mirror of
https://github.com/alicevision/Meshroom.git
synced 2025-05-02 19:56:46 +02:00
[multiview] New pipeline Photogrammetry and Camera Tracking
This commit is contained in:
parent
48ed0a82fb
commit
1999b7c5b2
4 changed files with 71 additions and 22 deletions
|
@ -472,7 +472,7 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False):
|
||||||
return sfmNodes, mvsNodes
|
return sfmNodes, mvsNodes
|
||||||
|
|
||||||
|
|
||||||
def cameraTrackingPipeline(graph):
|
def cameraTrackingPipeline(graph, sourceSfm=None):
|
||||||
"""
|
"""
|
||||||
Instantiate a camera tracking pipeline inside 'graph'.
|
Instantiate a camera tracking pipeline inside 'graph'.
|
||||||
|
|
||||||
|
@ -484,30 +484,33 @@ def cameraTrackingPipeline(graph):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with GraphModification(graph):
|
with GraphModification(graph):
|
||||||
|
if sourceSfm is None:
|
||||||
|
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmPipeline(graph)
|
||||||
|
else:
|
||||||
|
sfmNodes, _ = sfmAugmentation(graph, sourceSfm)
|
||||||
|
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmNodes
|
||||||
|
|
||||||
cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)
|
imageMatchingT.attribute("nbMatches").value = 5 # voctree nb matches
|
||||||
|
imageMatchingT.attribute("nbNeighbors").value = 10
|
||||||
|
|
||||||
imageMatching.attribute("nbMatches").value = 5 # voctree nb matches
|
structureFromMotionT.attribute("minNumberOfMatches").value = 0
|
||||||
imageMatching.attribute("nbNeighbors").value = 10
|
structureFromMotionT.attribute("minInputTrackLength").value = 5
|
||||||
|
structureFromMotionT.attribute("minNumberOfObservationsForTriangulation").value = 3
|
||||||
|
structureFromMotionT.attribute("minAngleForTriangulation").value = 1.0
|
||||||
|
structureFromMotionT.attribute("minAngleForLandmark").value = 0.5
|
||||||
|
|
||||||
structureFromMotion.attribute("minNumberOfMatches").value = 0
|
exportAnimatedCameraT = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotionT.output)
|
||||||
structureFromMotion.attribute("minInputTrackLength").value = 5
|
|
||||||
structureFromMotion.attribute("minNumberOfObservationsForTriangulation").value = 3
|
|
||||||
structureFromMotion.attribute("minAngleForTriangulation").value = 1.0
|
|
||||||
structureFromMotion.attribute("minAngleForLandmark").value = 0.5
|
|
||||||
|
|
||||||
exportAnimatedCamera = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotion.output)
|
|
||||||
|
|
||||||
# store current pipeline version in graph header
|
# store current pipeline version in graph header
|
||||||
graph.header.update({'pipelineVersion': __version__})
|
graph.header.update({'pipelineVersion': __version__})
|
||||||
|
|
||||||
return [
|
return [
|
||||||
cameraInit,
|
cameraInitT,
|
||||||
featureExtraction,
|
featureExtractionT,
|
||||||
imageMatching,
|
imageMatchingT,
|
||||||
featureMatching,
|
featureMatchingT,
|
||||||
structureFromMotion,
|
structureFromMotionT,
|
||||||
exportAnimatedCamera,
|
exportAnimatedCameraT,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -527,3 +530,21 @@ def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=l
|
||||||
|
|
||||||
return graph
|
return graph
|
||||||
|
|
||||||
|
|
||||||
|
def photogrammetryAndCameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
|
||||||
|
if not graph:
|
||||||
|
graph = Graph('Photogrammetry And Camera Tracking')
|
||||||
|
with GraphModification(graph):
|
||||||
|
cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)
|
||||||
|
|
||||||
|
cameraInitT, featureExtractionT, imageMatchingMultiT, featureMatchingT, structureFromMotionT, exportAnimatedCameraT = cameraTrackingPipeline(graph, structureFromMotion)
|
||||||
|
|
||||||
|
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
|
||||||
|
cameraInit.viewpoints.extend(inputViewpoints)
|
||||||
|
cameraInit.intrinsics.extend(inputIntrinsics)
|
||||||
|
|
||||||
|
if output:
|
||||||
|
graph.addNewNode('Publish', output=output, inputFiles=[exportAnimatedCameraT.output])
|
||||||
|
|
||||||
|
return graph
|
||||||
|
|
||||||
|
|
|
@ -48,9 +48,16 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
|
||||||
desc.ChoiceParam(
|
desc.ChoiceParam(
|
||||||
name='method',
|
name='method',
|
||||||
label='Method',
|
label='Method',
|
||||||
description='Method used to select the image pairs to match.',
|
description='Method used to select the image pairs to match:\n'
|
||||||
value='VocabularyTree',
|
' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n'
|
||||||
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree','Exhaustive','Frustum'],
|
'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n'
|
||||||
|
'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n'
|
||||||
|
' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n'
|
||||||
|
' * Exhaustive: Export all image pairs.\n'
|
||||||
|
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
|
||||||
|
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
|
||||||
|
value='SequentialAndVocabularyTree',
|
||||||
|
values=['VocabularyTree', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum'],
|
||||||
exclusive=True,
|
exclusive=True,
|
||||||
uid=[0],
|
uid=[0],
|
||||||
),
|
),
|
||||||
|
@ -60,6 +67,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
|
||||||
description='Input name for the vocabulary tree file.',
|
description='Input name for the vocabulary tree file.',
|
||||||
value=os.environ.get('ALICEVISION_VOCTREE', ''),
|
value=os.environ.get('ALICEVISION_VOCTREE', ''),
|
||||||
uid=[],
|
uid=[],
|
||||||
|
enabled=lambda node: 'VocabularyTree' in node.method.value,
|
||||||
),
|
),
|
||||||
desc.File(
|
desc.File(
|
||||||
name='weights',
|
name='weights',
|
||||||
|
@ -68,6 +76,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
|
||||||
value='',
|
value='',
|
||||||
uid=[0],
|
uid=[0],
|
||||||
advanced=True,
|
advanced=True,
|
||||||
|
enabled=lambda node: 'VocabularyTree' in node.method.value,
|
||||||
),
|
),
|
||||||
desc.ChoiceParam(
|
desc.ChoiceParam(
|
||||||
name='matchingMode',
|
name='matchingMode',
|
||||||
|
@ -86,6 +95,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
|
||||||
range=(0, 500, 1),
|
range=(0, 500, 1),
|
||||||
uid=[0],
|
uid=[0],
|
||||||
advanced=True,
|
advanced=True,
|
||||||
|
enabled=lambda node: 'VocabularyTree' in node.method.value,
|
||||||
),
|
),
|
||||||
desc.IntParam(
|
desc.IntParam(
|
||||||
name='maxDescriptors',
|
name='maxDescriptors',
|
||||||
|
@ -95,24 +105,27 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
|
||||||
range=(0, 100000, 1),
|
range=(0, 100000, 1),
|
||||||
uid=[0],
|
uid=[0],
|
||||||
advanced=True,
|
advanced=True,
|
||||||
|
enabled=lambda node: 'VocabularyTree' in node.method.value,
|
||||||
),
|
),
|
||||||
desc.IntParam(
|
desc.IntParam(
|
||||||
name='nbMatches',
|
name='nbMatches',
|
||||||
label='Voc Tree: Nb Matches',
|
label='Voc Tree: Nb Matches',
|
||||||
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
|
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
|
||||||
value=50,
|
value=40,
|
||||||
range=(0, 1000, 1),
|
range=(0, 1000, 1),
|
||||||
uid=[0],
|
uid=[0],
|
||||||
advanced=True,
|
advanced=True,
|
||||||
|
enabled=lambda node: 'VocabularyTree' in node.method.value,
|
||||||
),
|
),
|
||||||
desc.IntParam(
|
desc.IntParam(
|
||||||
name='nbNeighbors',
|
name='nbNeighbors',
|
||||||
label='Sequential: Nb Neighbors',
|
label='Sequential: Nb Neighbors',
|
||||||
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
|
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
|
||||||
value=50,
|
value=5,
|
||||||
range=(0, 1000, 1),
|
range=(0, 1000, 1),
|
||||||
uid=[0],
|
uid=[0],
|
||||||
advanced=True,
|
advanced=True,
|
||||||
|
enabled=lambda node: 'Sequential' in node.method.value,
|
||||||
),
|
),
|
||||||
desc.ChoiceParam(
|
desc.ChoiceParam(
|
||||||
name='verboseLevel',
|
name='verboseLevel',
|
||||||
|
|
|
@ -408,6 +408,13 @@ ApplicationWindow {
|
||||||
}
|
}
|
||||||
Menu {
|
Menu {
|
||||||
title: "New Pipeline"
|
title: "New Pipeline"
|
||||||
|
TextMetrics {
|
||||||
|
id: textMetrics
|
||||||
|
font: action_PG_CT.font
|
||||||
|
elide: Text.ElideNone
|
||||||
|
text: action_PG_CT.text
|
||||||
|
}
|
||||||
|
implicitWidth: textMetrics.width + 10 // largest text width + margin
|
||||||
Action {
|
Action {
|
||||||
text: "Photogrammetry"
|
text: "Photogrammetry"
|
||||||
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") })
|
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") })
|
||||||
|
@ -420,6 +427,11 @@ ApplicationWindow {
|
||||||
text: "Panorama Fisheye HDR"
|
text: "Panorama Fisheye HDR"
|
||||||
onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") })
|
onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") })
|
||||||
}
|
}
|
||||||
|
Action {
|
||||||
|
id: action_PG_CT
|
||||||
|
text: "Photogrammetry and Camera Tracking (experimental)"
|
||||||
|
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetryandcameratracking") })
|
||||||
|
}
|
||||||
Action {
|
Action {
|
||||||
text: "Camera Tracking (experimental)"
|
text: "Camera Tracking (experimental)"
|
||||||
onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") })
|
onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") })
|
||||||
|
|
|
@ -490,6 +490,9 @@ class Reconstruction(UIGraph):
|
||||||
elif p.lower() == "panoramafisheyehdr":
|
elif p.lower() == "panoramafisheyehdr":
|
||||||
# default panorama fisheye hdr pipeline
|
# default panorama fisheye hdr pipeline
|
||||||
self.setGraph(multiview.panoramaFisheyeHdr())
|
self.setGraph(multiview.panoramaFisheyeHdr())
|
||||||
|
elif p.lower() == "photogrammetryandcameratracking":
|
||||||
|
# default camera tracking pipeline
|
||||||
|
self.setGraph(multiview.photogrammetryAndCameraTracking())
|
||||||
elif p.lower() == "cameratracking":
|
elif p.lower() == "cameratracking":
|
||||||
# default camera tracking pipeline
|
# default camera tracking pipeline
|
||||||
self.setGraph(multiview.cameraTracking())
|
self.setGraph(multiview.cameraTracking())
|
||||||
|
|
Loading…
Add table
Reference in a new issue