[multiview] New pipeline Photogrammetry and Camera Tracking

This commit is contained in:
Fabien Castan 2021-05-27 17:02:39 +02:00
parent 48ed0a82fb
commit 1999b7c5b2
4 changed files with 71 additions and 22 deletions

View file

@ -472,7 +472,7 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False):
return sfmNodes, mvsNodes
def cameraTrackingPipeline(graph):
def cameraTrackingPipeline(graph, sourceSfm=None):
"""
Instantiate a camera tracking pipeline inside 'graph'.
@ -484,30 +484,33 @@ def cameraTrackingPipeline(graph):
"""
with GraphModification(graph):
if sourceSfm is None:
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmPipeline(graph)
else:
sfmNodes, _ = sfmAugmentation(graph, sourceSfm)
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmNodes
cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)
imageMatchingT.attribute("nbMatches").value = 5 # voctree nb matches
imageMatchingT.attribute("nbNeighbors").value = 10
imageMatching.attribute("nbMatches").value = 5 # voctree nb matches
imageMatching.attribute("nbNeighbors").value = 10
structureFromMotionT.attribute("minNumberOfMatches").value = 0
structureFromMotionT.attribute("minInputTrackLength").value = 5
structureFromMotionT.attribute("minNumberOfObservationsForTriangulation").value = 3
structureFromMotionT.attribute("minAngleForTriangulation").value = 1.0
structureFromMotionT.attribute("minAngleForLandmark").value = 0.5
structureFromMotion.attribute("minNumberOfMatches").value = 0
structureFromMotion.attribute("minInputTrackLength").value = 5
structureFromMotion.attribute("minNumberOfObservationsForTriangulation").value = 3
structureFromMotion.attribute("minAngleForTriangulation").value = 1.0
structureFromMotion.attribute("minAngleForLandmark").value = 0.5
exportAnimatedCamera = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotion.output)
exportAnimatedCameraT = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotionT.output)
# store current pipeline version in graph header
graph.header.update({'pipelineVersion': __version__})
return [
cameraInit,
featureExtraction,
imageMatching,
featureMatching,
structureFromMotion,
exportAnimatedCamera,
cameraInitT,
featureExtractionT,
imageMatchingT,
featureMatchingT,
structureFromMotionT,
exportAnimatedCameraT,
]
@ -527,3 +530,21 @@ def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=l
return graph
def photogrammetryAndCameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
if not graph:
graph = Graph('Photogrammetry And Camera Tracking')
with GraphModification(graph):
cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)
cameraInitT, featureExtractionT, imageMatchingMultiT, featureMatchingT, structureFromMotionT, exportAnimatedCameraT = cameraTrackingPipeline(graph, structureFromMotion)
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
cameraInit.viewpoints.extend(inputViewpoints)
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
graph.addNewNode('Publish', output=output, inputFiles=[exportAnimatedCameraT.output])
return graph

View file

@ -48,9 +48,16 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
desc.ChoiceParam(
name='method',
label='Method',
description='Method used to select the image pairs to match.',
value='VocabularyTree',
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree','Exhaustive','Frustum'],
description='Method used to select the image pairs to match:\n'
' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n'
'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n'
'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n'
' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n'
' * Exhaustive: Export all image pairs.\n'
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
value='SequentialAndVocabularyTree',
values=['VocabularyTree', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum'],
exclusive=True,
uid=[0],
),
@ -60,6 +67,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
description='Input name for the vocabulary tree file.',
value=os.environ.get('ALICEVISION_VOCTREE', ''),
uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.File(
name='weights',
@ -68,6 +76,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
value='',
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.ChoiceParam(
name='matchingMode',
@ -86,6 +95,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
range=(0, 500, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='maxDescriptors',
@ -95,24 +105,27 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
range=(0, 100000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='nbMatches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
value=50,
value=40,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
value=50,
value=5,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'Sequential' in node.method.value,
),
desc.ChoiceParam(
name='verboseLevel',

View file

@ -408,6 +408,13 @@ ApplicationWindow {
}
Menu {
title: "New Pipeline"
TextMetrics {
id: textMetrics
font: action_PG_CT.font
elide: Text.ElideNone
text: action_PG_CT.text
}
implicitWidth: textMetrics.width + 10 // largest text width + margin
Action {
text: "Photogrammetry"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") })
@ -420,6 +427,11 @@ ApplicationWindow {
text: "Panorama Fisheye HDR"
onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") })
}
Action {
id: action_PG_CT
text: "Photogrammetry and Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetryandcameratracking") })
}
Action {
text: "Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") })

View file

@ -490,6 +490,9 @@ class Reconstruction(UIGraph):
elif p.lower() == "panoramafisheyehdr":
# default panorama fisheye hdr pipeline
self.setGraph(multiview.panoramaFisheyeHdr())
elif p.lower() == "photogrammetryandcameratracking":
# default camera tracking pipeline
self.setGraph(multiview.photogrammetryAndCameraTracking())
elif p.lower() == "cameratracking":
# default camera tracking pipeline
self.setGraph(multiview.cameraTracking())