From 32370d1670b0dec75b7dd02d6754b36d76358bf9 Mon Sep 17 00:00:00 2001 From: demoulinv Date: Thu, 21 Apr 2022 18:40:45 +0200 Subject: [PATCH 01/21] Update ImageProcessing node adding ACES and ACEScg in output color space menu. --- meshroom/nodes/aliceVision/ImageProcessing.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/meshroom/nodes/aliceVision/ImageProcessing.py b/meshroom/nodes/aliceVision/ImageProcessing.py index f2079f2b..e276c365 100644 --- a/meshroom/nodes/aliceVision/ImageProcessing.py +++ b/meshroom/nodes/aliceVision/ImageProcessing.py @@ -289,6 +289,15 @@ Convert or apply filtering to the input images. exclusive=True, uid=[0], ), + desc.ChoiceParam( + name='outputColorSpace', + label='Output Color Space', + description='Allows you to choose the color space of the output image.', + value='AUTO', + values=['AUTO', 'sRGB', 'Linear', 'ACES', 'ACEScg'], + exclusive=True, + uid=[0], + ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type for EXR output', From a4598e0443ad64e1174d12ba6352a1848951ff20 Mon Sep 17 00:00:00 2001 From: demoulinv Date: Wed, 27 Apr 2022 14:26:40 +0200 Subject: [PATCH 02/21] [launch] Add ALICEVISION_ROOT setting example in start.bat --- start.bat | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/start.bat b/start.bat index 9044a1b4..65e27cbd 100644 --- a/start.bat +++ b/start.bat @@ -9,7 +9,8 @@ REM set MESHROOM_OUTPUT_QML_WARNINGS=1 REM set MESHROOM_INSTANT_CODING=1 REM set QT_PLUGIN_PATH=C:\dev\meshroom\install REM set QML2_IMPORT_PATH=C:\dev\meshroom\install\qml -REM set PATH=C:\dev\AliceVision\install\bin;C:\dev\vcpkg\installed\x64-windows\bin +REM set PATH=C:\dev\AliceVision\install\bin;C:\dev\vcpkg\installed\x64-windows\bin;%PATH% +REM set ALICEVISION_ROOT=C:\dev\AliceVision\install python meshroom\ui From e725f599c546359439f1aec98779ffd1a2316451 Mon Sep 17 00:00:00 2001 From: Povilas Kanapickas Date: Fri, 8 Jul 2022 00:00:31 +0300 Subject: [PATCH 03/21] [ui] Work around PySide2 bug affecting property decorators PySide 5.15.1 and newer have a bug (https://bugreports.qt.io/browse/PYSIDE-1426) which results in the following error emitted on certain @Property decorators: TypeError: A constant property cannot have a WRITE method or a NOTIFY signal. Until the bug is fixed on PySide2 side workaround is to not use Property as a decorator, but as a simple function wrapper emitting the property as a class member. Fixes #1239. --- meshroom/ui/app.py | 9 +++++---- meshroom/ui/reconstruction.py | 6 ++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/meshroom/ui/app.py b/meshroom/ui/app.py index 110daec4..16a3bfa4 100644 --- a/meshroom/ui/app.py +++ b/meshroom/ui/app.py @@ -284,8 +284,7 @@ class MeshroomApp(QApplication): return md return markdown(md) - @Property(QJsonValue, constant=True) - def systemInfo(self): + def _systemInfo(self): import platform import sys return { @@ -293,8 +292,9 @@ class MeshroomApp(QApplication): 'python': 'Python {}'.format(sys.version.split(" ")[0]) } - @Property("QVariantList", constant=True) - def licensesModel(self): + systemInfo = Property(QJsonValue, _systemInfo, constant=True) + + def _licensesModel(self): """ Get info about open-source licenses for the application. Model provides: @@ -316,6 +316,7 @@ class MeshroomApp(QApplication): } ] + licensesModel = Property("QVariantList", _licensesModel, constant=True) recentProjectFilesChanged = Signal() recentProjectFiles = Property("QVariantList", _recentProjectFiles, notify=recentProjectFilesChanged) diff --git a/meshroom/ui/reconstruction.py b/meshroom/ui/reconstruction.py index 3915c42a..0263d5ee 100755 --- a/meshroom/ui/reconstruction.py +++ b/meshroom/ui/reconstruction.py @@ -249,10 +249,8 @@ class ViewpointWrapper(QObject): self._undistortedImagePath = os.path.join(self._activeNode_PrepareDenseScene.node.output.value, filename) self.denseSceneParamsChanged.emit() - @Property(type=QObject, constant=True) - def attribute(self): - """ Get the underlying Viewpoint attribute wrapped by this Viewpoint. """ - return self._viewpoint + # Get the underlying Viewpoint attribute wrapped by this Viewpoint. + attribute = Property(QObject, lambda self: self._viewpoint, constant=True) @Property(type="QVariant", notify=initialParamsChanged) def initialIntrinsics(self): From 708eb85c9c250419d374a95979d1c133884f5c93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 12 Jul 2022 09:31:37 +0200 Subject: [PATCH 04/21] Evaluate variables in nodes' string parameters --- meshroom/core/attribute.py | 19 +++++++++++++++---- meshroom/core/desc.py | 5 +++-- .../qml/GraphEditor/AttributeItemDelegate.qml | 6 +++--- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/meshroom/core/attribute.py b/meshroom/core/attribute.py index 81134918..61632d23 100644 --- a/meshroom/core/attribute.py +++ b/meshroom/core/attribute.py @@ -1,11 +1,13 @@ #!/usr/bin/env python # coding:utf-8 import copy +import os import re import weakref import types import logging +from string import Template from meshroom.common import BaseObject, Property, Variant, Signal, ListModel, DictModel, Slot from meshroom.core import desc, pyCompatibility, hashValue @@ -139,7 +141,9 @@ class Attribute(BaseObject): self.enabledChanged.emit() def _get_value(self): - return self.getLinkParam().value if self.isLink else self._value + if self.isLink: + return self.getLinkParam().value + return self._value def _set_value(self, value): if self._value == value: @@ -259,13 +263,18 @@ class Attribute(BaseObject): return self.defaultValue() return self._value + def getEvalValue(self): + if isinstance(self.value, pyCompatibility.basestring): + return Template(self.value).safe_substitute(os.environ) + return self.value + def getValueStr(self): if isinstance(self.attributeDesc, desc.ChoiceParam) and not self.attributeDesc.exclusive: assert(isinstance(self.value, pyCompatibility.Sequence) and not isinstance(self.value, pyCompatibility.basestring)) - return self.attributeDesc.joinChar.join(self.value) + return self.attributeDesc.joinChar.join(self.getEvalValue()) if isinstance(self.attributeDesc, (desc.StringParam, desc.File)): - return '"{}"'.format(self.value) - return str(self.value) + return '"{}"'.format(self.getEvalValue()) + return str(self.getEvalValue()) def defaultValue(self): if isinstance(self.desc.value, types.FunctionType): @@ -298,6 +307,8 @@ class Attribute(BaseObject): desc = Property(desc.Attribute, lambda self: self.attributeDesc, constant=True) valueChanged = Signal() value = Property(Variant, _get_value, _set_value, notify=valueChanged) + valueStr = Property(Variant, getValueStr, notify=valueChanged) + evalValue = Property(Variant, getEvalValue, notify=valueChanged) isOutput = Property(bool, isOutput.fget, constant=True) isLinkChanged = Signal() isLink = Property(bool, isLink.fget, notify=isLinkChanged) diff --git a/meshroom/core/desc.py b/meshroom/core/desc.py index 66b3d80c..3dcb7a07 100755 --- a/meshroom/core/desc.py +++ b/meshroom/core/desc.py @@ -1,12 +1,13 @@ from meshroom.common import BaseObject, Property, Variant, VariantList, JSValue from meshroom.core import pyCompatibility + from enum import Enum # available by default in python3. For python2: "pip install enum34" import math import os import psutil import ast import distutils.util - +import shlex class Attribute(BaseObject): """ @@ -505,7 +506,7 @@ class CommandLineNode(Node): chunk.saveStatusFile() print(' - commandLine: {}'.format(cmd)) print(' - logFile: {}'.format(chunk.logFile)) - chunk.subprocess = psutil.Popen(cmd, stdout=logF, stderr=logF, shell=True) + chunk.subprocess = psutil.Popen(shlex.split(cmd), stdout=logF, stderr=logF) # store process static info into the status file # chunk.status.env = node.proc.environ() diff --git a/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml b/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml index 83ab8ca3..9ee2692b 100644 --- a/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml +++ b/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml @@ -68,7 +68,7 @@ RowLayout { id: paramMenu property bool isFileAttribute: attribute.type == "File" - property bool isFilepath: isFileAttribute && Filepath.isFile(attribute.value) + property bool isFilepath: isFileAttribute && Filepath.isFile(attribute.evalValue) MenuItem { text: "Reset To Default Value" @@ -85,8 +85,8 @@ RowLayout { visible: paramMenu.isFileAttribute height: visible ? implicitHeight : 0 text: paramMenu.isFilepath ? "Open Containing Folder" : "Open Folder" - onClicked: paramMenu.isFilepath ? Qt.openUrlExternally(Filepath.dirname(attribute.value)) : - Qt.openUrlExternally(Filepath.stringToUrl(attribute.value)) + onClicked: paramMenu.isFilepath ? Qt.openUrlExternally(Filepath.dirname(attribute.evalValue)) : + Qt.openUrlExternally(Filepath.stringToUrl(attribute.evalValue)) } MenuItem { From 7ec65d828cec87224841eb8786c9cb4c63f7d201 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 12 Jul 2022 11:22:57 +0200 Subject: [PATCH 05/21] Do not evaluate variables of string parameters in the nodes' description Set the value of the string parameters in the nodes' description as the unevaluated variable instead of the evaluated variable. The evaluation will be made later, when the nodes with such parameters will be computed. --- meshroom/nodes/aliceVision/CameraInit.py | 2 +- meshroom/nodes/aliceVision/CameraLocalization.py | 2 +- meshroom/nodes/aliceVision/CameraRigCalibration.py | 2 +- meshroom/nodes/aliceVision/CameraRigLocalization.py | 2 +- meshroom/nodes/aliceVision/ImageMatching.py | 2 +- meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py | 2 +- meshroom/nodes/aliceVision/KeyframeSelection.py | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/meshroom/nodes/aliceVision/CameraInit.py b/meshroom/nodes/aliceVision/CameraInit.py index 91cf5fef..9ff8a834 100644 --- a/meshroom/nodes/aliceVision/CameraInit.py +++ b/meshroom/nodes/aliceVision/CameraInit.py @@ -159,7 +159,7 @@ The metadata needed are: name='sensorDatabase', label='Sensor Database', description='''Camera sensor width database path.''', - value=os.environ.get('ALICEVISION_SENSOR_DB', ''), + value='${ALICEVISION_SENSOR_DB}', uid=[], ), desc.FloatParam( diff --git a/meshroom/nodes/aliceVision/CameraLocalization.py b/meshroom/nodes/aliceVision/CameraLocalization.py index 3afe3070..d8aabe9a 100644 --- a/meshroom/nodes/aliceVision/CameraLocalization.py +++ b/meshroom/nodes/aliceVision/CameraLocalization.py @@ -125,7 +125,7 @@ class CameraLocalization(desc.CommandLineNode): name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', - value=os.environ.get('ALICEVISION_VOCTREE', ''), + value='${ALICEVISION_VOCTREE}', uid=[0], ), desc.File( diff --git a/meshroom/nodes/aliceVision/CameraRigCalibration.py b/meshroom/nodes/aliceVision/CameraRigCalibration.py index 9b5eecc6..117457c3 100644 --- a/meshroom/nodes/aliceVision/CameraRigCalibration.py +++ b/meshroom/nodes/aliceVision/CameraRigCalibration.py @@ -109,7 +109,7 @@ class CameraRigCalibration(desc.CommandLineNode): name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', - value=os.environ.get('ALICEVISION_VOCTREE', ''), + value='${ALICEVISION_VOCTREE}', uid=[0], ), desc.File( diff --git a/meshroom/nodes/aliceVision/CameraRigLocalization.py b/meshroom/nodes/aliceVision/CameraRigLocalization.py index e5a4dd37..6cff2d31 100644 --- a/meshroom/nodes/aliceVision/CameraRigLocalization.py +++ b/meshroom/nodes/aliceVision/CameraRigLocalization.py @@ -116,7 +116,7 @@ class CameraRigLocalization(desc.CommandLineNode): name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', - value=os.environ.get('ALICEVISION_VOCTREE', ''), + value='${ALICEVISION_VOCTREE}', uid=[0], ), desc.File( diff --git a/meshroom/nodes/aliceVision/ImageMatching.py b/meshroom/nodes/aliceVision/ImageMatching.py index c09c701d..f2ac0d94 100644 --- a/meshroom/nodes/aliceVision/ImageMatching.py +++ b/meshroom/nodes/aliceVision/ImageMatching.py @@ -74,7 +74,7 @@ If images have known poses, use frustum intersection else use VocabularuTree. name='tree', label='Voc Tree: Tree', description='Input name for the vocabulary tree file.', - value=os.environ.get('ALICEVISION_VOCTREE', ''), + value='${ALICEVISION_VOCTREE}', uid=[], enabled=lambda node: 'VocabularyTree' in node.method.value, ), diff --git a/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py b/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py index f4e23bb9..313534b1 100644 --- a/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py +++ b/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py @@ -65,7 +65,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw name='tree', label='Voc Tree: Tree', description='Input name for the vocabulary tree file.', - value=os.environ.get('ALICEVISION_VOCTREE', ''), + value='${ALICEVISION_VOCTREE}', uid=[], enabled=lambda node: 'VocabularyTree' in node.method.value, ), diff --git a/meshroom/nodes/aliceVision/KeyframeSelection.py b/meshroom/nodes/aliceVision/KeyframeSelection.py index c8635a8c..c855cce3 100644 --- a/meshroom/nodes/aliceVision/KeyframeSelection.py +++ b/meshroom/nodes/aliceVision/KeyframeSelection.py @@ -95,14 +95,14 @@ You can extract frames at regular interval by configuring only the min/maxFrameS name='sensorDbPath', label='Sensor Db Path', description='''Camera sensor width database path.''', - value=os.environ.get('ALICEVISION_SENSOR_DB', ''), + value='${ALICEVISION_SENSOR_DB}', uid=[0], ), desc.File( name='voctreePath', label='Voctree Path', description='''Vocabulary tree path.''', - value=os.environ.get('ALICEVISION_VOCTREE', ''), + value='${ALICEVISION_VOCTREE}', uid=[0], ), desc.BoolParam( From 8fb0c778d12af5d3d51567407a6d38132e53d796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 12 Jul 2022 11:50:34 +0200 Subject: [PATCH 06/21] Add support for external pipelines using project files - Support loading external pipelines as templates with project files - Add template files for some standard pipelines - Remove the hard-coded generation of new pipelines - Update multiviewPipeline test: the multiviewPipeline test relied on the hard-coded generation of pipelines. --- bin/meshroom_batch | 23 +- meshroom/core/__init__.py | 16 + meshroom/multiview.py | 368 ----------- meshroom/pipelines/cameraTracking.mg | 278 ++++++++ meshroom/pipelines/panoramaFisheyeHdr.mg | 591 ++++++++++++++++++ meshroom/pipelines/panoramaHdr.mg | 591 ++++++++++++++++++ meshroom/pipelines/photogrammetry.mg | 523 ++++++++++++++++ .../photogrammetryAndCameraTracking.mg | 486 ++++++++++++++ meshroom/pipelines/photogrammetryDraft.mg | 404 ++++++++++++ meshroom/ui/app.py | 5 +- meshroom/ui/reconstruction.py | 28 +- tests/test_multiviewPipeline.py | 36 +- 12 files changed, 2933 insertions(+), 416 deletions(-) create mode 100644 meshroom/pipelines/cameraTracking.mg create mode 100644 meshroom/pipelines/panoramaFisheyeHdr.mg create mode 100644 meshroom/pipelines/panoramaHdr.mg create mode 100644 meshroom/pipelines/photogrammetry.mg create mode 100644 meshroom/pipelines/photogrammetryAndCameraTracking.mg create mode 100644 meshroom/pipelines/photogrammetryDraft.mg diff --git a/bin/meshroom_batch b/bin/meshroom_batch index 7a917039..7883e1b6 100755 --- a/bin/meshroom_batch +++ b/bin/meshroom_batch @@ -20,8 +20,8 @@ parser.add_argument('-I', '--inputRecursive', metavar='FOLDERS/IMAGES', type=str default=[], help='Input folders containing all images recursively.') -parser.add_argument('-p', '--pipeline', metavar='photogrammetry/panoramaHdr/panoramaFisheyeHdr/cameraTracking/photogrammetryDraft/MG_FILE', type=str, default='photogrammetry', - help='"photogrammetry", "panoramaHdr", "panoramaFisheyeHdr", "cameraTracking", "photogrammetryDraft" pipeline or a Meshroom file containing a custom pipeline to run on input images. ' +parser.add_argument('-p', '--pipeline', metavar='FILE.mg/' + '/'.join(meshroom.core.pipelineTemplates), type=str, default='photogrammetry', + help='Template pipeline among those listed or a Meshroom file containing a custom pipeline to run on input images. ' 'Requirements: the graph must contain one CameraInit node, ' 'and one Publish node if --output is set.') @@ -124,22 +124,13 @@ if hasSearchedForImages and not filesByType.images: graph = multiview.Graph(name=args.pipeline) with multiview.GraphModification(graph): - # initialize photogrammetry pipeline - if args.pipeline.lower() == "photogrammetry": - # default photogrammetry pipeline - multiview.photogrammetry(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) - elif args.pipeline.lower() == "panoramahdr": - # default panorama Hdr pipeline - multiview.panoramaHdr(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) - elif args.pipeline.lower() == "panoramafisheyehdr": - # default panorama Fisheye Hdr pipeline - multiview.panoramaFisheyeHdr(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) - elif args.pipeline.lower() == "cameratracking": - # default panorama Fisheye Hdr pipeline - multiview.cameraTracking(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) + # initialize template pipeline + loweredPipelineTemplates = dict((k.lower(), v) for k, v in meshroom.core.pipelineTemplates.items()) + if args.pipeline.lower() in loweredPipelineTemplates: + graph.load(loweredPipelineTemplates[args.pipeline.lower()], setupProjectFile=False) else: # custom pipeline - graph.load(args.pipeline) + graph.load(args.pipeline, setupProjectFile=False) # graph.update() cameraInit = getOnlyNodeOfType(graph, 'CameraInit') diff --git a/meshroom/core/__init__.py b/meshroom/core/__init__.py index 834ace0a..b39f2654 100644 --- a/meshroom/core/__init__.py +++ b/meshroom/core/__init__.py @@ -35,6 +35,7 @@ cacheFolderName = 'MeshroomCache' defaultCacheFolder = os.environ.get('MESHROOM_CACHE', os.path.join(tempfile.gettempdir(), cacheFolderName)) nodesDesc = {} submitters = {} +pipelineTemplates = {} def hashValue(value): @@ -270,6 +271,12 @@ def loadSubmitters(folder, packageName): return loadPlugins(folder, packageName, BaseSubmitter) +def loadPipelineTemplates(folder): + global pipelineTemplates + for file in os.listdir(folder): + if file.endswith(".mg") and file not in pipelineTemplates: + pipelineTemplates[os.path.splitext(file)[0]] = os.path.join(folder, file) + meshroomFolder = os.path.dirname(os.path.dirname(__file__)) additionalNodesPath = os.environ.get("MESHROOM_NODES_PATH", "").split(os.pathsep) @@ -288,3 +295,12 @@ subs = loadSubmitters(os.environ.get("MESHROOM_SUBMITTERS_PATH", meshroomFolder) for sub in subs: registerSubmitter(sub()) + +# Load pipeline templates: check in the default folder and any folder the user might have +# added to the environment variable +additionalPipelinesPath = os.environ.get("MESHROOM_PIPELINE_TEMPLATES_PATH", "").split(os.pathsep) +additionalPipelinesPath = [i for i in additionalPipelinesPath if i] +pipelineTemplatesFolders = [os.path.join(meshroomFolder, 'pipelines')] + additionalPipelinesPath + +for f in pipelineTemplatesFolders: + loadPipelineTemplates(f) diff --git a/meshroom/multiview.py b/meshroom/multiview.py index b8ad1821..74e4fafd 100644 --- a/meshroom/multiview.py +++ b/meshroom/multiview.py @@ -145,243 +145,6 @@ def findFilesByTypeInFolder(folder, recursive=False): return output -def panoramaHdr(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None): - """ - Create a new Graph with a Panorama HDR pipeline. - - Args: - inputImages (list of str, optional): list of image file paths - inputViewpoints (list of Viewpoint, optional): list of Viewpoints - output (str, optional): the path to export reconstructed model to - - Returns: - Graph: the created graph - """ - if not graph: - graph = Graph('PanoramaHDR') - with GraphModification(graph): - nodes = panoramaHdrPipeline(graph) - cameraInit = nodes[0] - if inputImages: - cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) - if inputViewpoints: - cameraInit.viewpoints.extend(inputViewpoints) - if inputIntrinsics: - cameraInit.intrinsics.extend(inputIntrinsics) - - if output: - imageProcessing = nodes[-1] - graph.addNewNode('Publish', output=output, inputFiles=[imageProcessing.outputImages]) - - return graph - -def panoramaFisheyeHdr(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None): - if not graph: - graph = Graph('PanoramaFisheyeHDR') - with GraphModification(graph): - panoramaHdr(inputImages, inputViewpoints, inputIntrinsics, output, graph) - for panoramaInit in graph.nodesOfType("PanoramaInit"): - panoramaInit.attribute("useFisheye").value = True - for featureExtraction in graph.nodesOfType("FeatureExtraction"): - # when using fisheye images, 'sift' performs better than 'dspsift' - featureExtraction.attribute("describerTypes").value = ['sift'] - # when using fisheye images, the overlap between images can be small - # and thus requires many features to get enough correspondences for cameras estimation - featureExtraction.attribute("describerPreset").value = 'high' - return graph - -def panoramaHdrPipeline(graph): - """ - Instantiate an PanoramaHDR pipeline inside 'graph'. - Args: - graph (Graph/UIGraph): the graph in which nodes should be instantiated - - Returns: - list of Node: the created nodes - """ - cameraInit = graph.addNewNode('CameraInit') - try: - # fisheye4 does not work well in the ParoramaEstimation, so here we avoid to use it. - cameraInit.attribute('allowedCameraModels').value.remove("fisheye4") - except ValueError: - pass - - panoramaPrepareImages = graph.addNewNode('PanoramaPrepareImages', - input=cameraInit.output) - - ldr2hdrSampling = graph.addNewNode('LdrToHdrSampling', - input=panoramaPrepareImages.output) - - ldr2hdrCalibration = graph.addNewNode('LdrToHdrCalibration', - input=ldr2hdrSampling.input, - userNbBrackets=ldr2hdrSampling.userNbBrackets, - byPass=ldr2hdrSampling.byPass, - channelQuantizationPower=ldr2hdrSampling.channelQuantizationPower, - samples=ldr2hdrSampling.output) - - ldr2hdrMerge = graph.addNewNode('LdrToHdrMerge', - input=ldr2hdrCalibration.input, - userNbBrackets=ldr2hdrCalibration.userNbBrackets, - byPass=ldr2hdrCalibration.byPass, - channelQuantizationPower=ldr2hdrCalibration.channelQuantizationPower, - response=ldr2hdrCalibration.response) - - featureExtraction = graph.addNewNode('FeatureExtraction', - input=ldr2hdrMerge.outSfMData, - describerQuality='high') - - panoramaInit = graph.addNewNode('PanoramaInit', - input=featureExtraction.input, - dependency=[featureExtraction.output] # Workaround for tractor submission with a fake dependency - ) - - imageMatching = graph.addNewNode('ImageMatching', - input=panoramaInit.outSfMData, - featuresFolders=[featureExtraction.output], - method='FrustumOrVocabularyTree') - - featureMatching = graph.addNewNode('FeatureMatching', - input=imageMatching.input, - featuresFolders=imageMatching.featuresFolders, - imagePairsList=imageMatching.output, - describerTypes=featureExtraction.describerTypes) - - panoramaEstimation = graph.addNewNode('PanoramaEstimation', - input=featureMatching.input, - featuresFolders=featureMatching.featuresFolders, - matchesFolders=[featureMatching.output], - describerTypes=featureMatching.describerTypes) - - panoramaOrientation = graph.addNewNode('SfMTransform', - input=panoramaEstimation.output, - method='manual') - - panoramaWarping = graph.addNewNode('PanoramaWarping', - input=panoramaOrientation.output) - - panoramaSeams = graph.addNewNode('PanoramaSeams', - input=panoramaWarping.input, - warpingFolder=panoramaWarping.output - ) - - panoramaCompositing = graph.addNewNode('PanoramaCompositing', - input=panoramaSeams.input, - warpingFolder=panoramaSeams.warpingFolder, - labels=panoramaSeams.output - ) - - panoramaMerging = graph.addNewNode('PanoramaMerging', - input=panoramaCompositing.input, - compositingFolder=panoramaCompositing.output - ) - - imageProcessing = graph.addNewNode('ImageProcessing', - input=panoramaMerging.outputPanorama, - fixNonFinite=True, - fillHoles=True, - extension='exr') - - return [ - cameraInit, - featureExtraction, - panoramaInit, - imageMatching, - featureMatching, - panoramaEstimation, - panoramaOrientation, - panoramaWarping, - panoramaSeams, - panoramaCompositing, - panoramaMerging, - imageProcessing, - ] - - - -def photogrammetry(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None): - """ - Create a new Graph with a complete photogrammetry pipeline. - - Args: - inputImages (list of str, optional): list of image file paths - inputViewpoints (list of Viewpoint, optional): list of Viewpoints - output (str, optional): the path to export reconstructed model to - - Returns: - Graph: the created graph - """ - if not graph: - graph = Graph('Photogrammetry') - with GraphModification(graph): - sfmNodes, mvsNodes = photogrammetryPipeline(graph) - cameraInit = sfmNodes[0] - cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) - cameraInit.viewpoints.extend(inputViewpoints) - cameraInit.intrinsics.extend(inputIntrinsics) - - if output: - texturing = mvsNodes[-1] - graph.addNewNode('Publish', output=output, inputFiles=[texturing.outputMesh, - texturing.outputMaterial, - texturing.outputTextures]) - - return graph - - -def photogrammetryPipeline(graph): - """ - Instantiate a complete photogrammetry pipeline inside 'graph'. - - Args: - graph (Graph/UIGraph): the graph in which nodes should be instantiated - - Returns: - list of Node: the created nodes - """ - sfmNodes = sfmPipeline(graph) - mvsNodes = mvsPipeline(graph, sfmNodes[-1]) - - # store current pipeline version in graph header - graph.header.update({'pipelineVersion': __version__}) - - return sfmNodes, mvsNodes - - -def sfmPipeline(graph): - """ - Instantiate a SfM pipeline inside 'graph'. - Args: - graph (Graph/UIGraph): the graph in which nodes should be instantiated - - Returns: - list of Node: the created nodes - """ - cameraInit = graph.addNewNode('CameraInit') - - featureExtraction = graph.addNewNode('FeatureExtraction', - input=cameraInit.output) - imageMatching = graph.addNewNode('ImageMatching', - input=featureExtraction.input, - featuresFolders=[featureExtraction.output]) - featureMatching = graph.addNewNode('FeatureMatching', - input=imageMatching.input, - featuresFolders=imageMatching.featuresFolders, - imagePairsList=imageMatching.output, - describerTypes=featureExtraction.describerTypes) - structureFromMotion = graph.addNewNode('StructureFromMotion', - input=featureMatching.input, - featuresFolders=featureMatching.featuresFolders, - matchesFolders=[featureMatching.output], - describerTypes=featureMatching.describerTypes) - return [ - cameraInit, - featureExtraction, - imageMatching, - featureMatching, - structureFromMotion - ] - - def mvsPipeline(graph, sfm=None): """ Instantiate a MVS pipeline inside 'graph'. @@ -470,134 +233,3 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False): mvsNodes = mvsPipeline(graph, structureFromMotion) return sfmNodes, mvsNodes - - -def cameraTrackingPipeline(graph, sourceSfm=None): - """ - Instantiate a camera tracking pipeline inside 'graph'. - - Args: - graph (Graph/UIGraph): the graph in which nodes should be instantiated - - Returns: - list of Node: the created nodes - """ - - with GraphModification(graph): - if sourceSfm is None: - cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmPipeline(graph) - else: - sfmNodes, _ = sfmAugmentation(graph, sourceSfm) - cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmNodes - - distortionCalibrationT = graph.addNewNode('DistortionCalibration', - input=cameraInitT.output) - - graph.removeEdge(featureMatchingT.input) - graph.addEdge(distortionCalibrationT.outSfMData, featureMatchingT.input) - - imageMatchingT.attribute("nbMatches").value = 5 # voctree nb matches - imageMatchingT.attribute("nbNeighbors").value = 10 - - structureFromMotionT.attribute("minNumberOfMatches").value = 0 - structureFromMotionT.attribute("minInputTrackLength").value = 5 - structureFromMotionT.attribute("minNumberOfObservationsForTriangulation").value = 3 - structureFromMotionT.attribute("minAngleForTriangulation").value = 1.0 - structureFromMotionT.attribute("minAngleForLandmark").value = 0.5 - - exportAnimatedCameraT = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotionT.output) - if sourceSfm: - graph.addEdge(sourceSfm.output, exportAnimatedCameraT.sfmDataFilter) - - # store current pipeline version in graph header - graph.header.update({'pipelineVersion': __version__}) - - return [ - cameraInitT, - featureExtractionT, - imageMatchingT, - featureMatchingT, - distortionCalibrationT, - structureFromMotionT, - exportAnimatedCameraT, - ] - - -def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None): - if not graph: - graph = Graph('Camera Tracking') - with GraphModification(graph): - trackingNodes = cameraTrackingPipeline(graph) - cameraInit = trackingNodes[0] - cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) - cameraInit.viewpoints.extend(inputViewpoints) - cameraInit.intrinsics.extend(inputIntrinsics) - - if output: - exportNode = trackingNodes[-1] - graph.addNewNode('Publish', output=output, inputFiles=[exportNode.output]) - - return graph - - -def photogrammetryAndCameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None): - if not graph: - graph = Graph('Photogrammetry And Camera Tracking') - with GraphModification(graph): - cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph) - - cameraInitT, featureExtractionT, imageMatchingMultiT, featureMatchingT, distortionCalibrationT, structureFromMotionT, exportAnimatedCameraT = cameraTrackingPipeline(graph, structureFromMotion) - - cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) - cameraInit.viewpoints.extend(inputViewpoints) - cameraInit.intrinsics.extend(inputIntrinsics) - - if output: - graph.addNewNode('Publish', output=output, inputFiles=[exportAnimatedCameraT.output]) - - return graph - - -def photogrammetryDraft(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None): - """ - Create a new Graph with a complete photogrammetry pipeline without requiring a NVIDIA CUDA video card. Something also named Draft Meshing. - More information on that pipeline https://github.com/alicevision/meshroom/wiki/Draft-Meshing - - Args: - inputImages (list of str, optional): list of image file paths - inputViewpoints (list of Viewpoint, optional): list of Viewpoints - output (str, optional): the path to export reconstructed model to - - Returns: - Graph: the created graph - """ - if not graph: - graph = Graph('PhotogrammetryDraft') - with GraphModification(graph): - sfmNodes = sfmPipeline(graph) - sfmNode = sfmNodes[-1] - - meshing = graph.addNewNode('Meshing', - input=sfmNode.output) - - meshFiltering = graph.addNewNode('MeshFiltering', - inputMesh=meshing.outputMesh) - texturing = graph.addNewNode('Texturing', - input=meshing.output, - inputMesh=meshFiltering.outputMesh) - - cameraInit = sfmNodes[0] - - if inputImages: - cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) - if inputViewpoints: - cameraInit.viewpoints.extend(inputViewpoints) - if inputIntrinsics: - cameraInit.intrinsics.extend(inputIntrinsics) - - if output: - graph.addNewNode('Publish', output=output, inputFiles=[texturing.outputMesh, - texturing.outputMaterial, - texturing.outputTextures]) - - return graph diff --git a/meshroom/pipelines/cameraTracking.mg b/meshroom/pipelines/cameraTracking.mg new file mode 100644 index 00000000..ccbdafd2 --- /dev/null +++ b/meshroom/pipelines/cameraTracking.mg @@ -0,0 +1,278 @@ +{ + "header": { + "pipelineVersion": "2.2", + "releaseVersion": "2021.1.0", + "fileVersion": "1.1", + "nodesVersions": { + "ExportAnimatedCamera": "2.0", + "FeatureMatching": "2.0", + "DistortionCalibration": "2.0", + "CameraInit": "7.0", + "ImageMatching": "2.0", + "FeatureExtraction": "1.1", + "StructureFromMotion": "2.0" + } + }, + "graph": { + "DistortionCalibration_1": { + "inputs": { + "verboseLevel": "info", + "input": "{CameraInit_1.output}", + "lensGrid": [] + }, + "nodeType": "DistortionCalibration", + "uids": { + "0": "8afea9d171904cdb6ba1c0b116cb60de3ccb6fb4" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" + }, + "position": [ + 200, + 160 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 10, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 5, + "input": "{FeatureExtraction_1.input}", + "method": "SequentialAndVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "832b744de5fa804d7d63ea255419b1afaf24f723" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "normal", + "gridFiltering": true, + "input": "{CameraInit_1.output}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "StructureFromMotion_1": { + "inputs": { + "localizerEstimatorMaxIterations": 4096, + "minAngleForLandmark": 0.5, + "filterTrackForks": false, + "minNumberOfObservationsForTriangulation": 3, + "maxAngleInitialPair": 40.0, + "observationConstraint": "Scale", + "maxNumberOfMatches": 0, + "localizerEstimator": "acransac", + "describerTypes": "{FeatureMatching_1.describerTypes}", + "lockScenePreviouslyReconstructed": false, + "localBAGraphDistance": 1, + "minNbCamerasToRefinePrincipalPoint": 3, + "lockAllIntrinsics": false, + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "useRigConstraint": true, + "rigMinNbCamerasForCalibration": 20, + "initialPairA": "", + "initialPairB": "", + "interFileExtension": ".abc", + "useLocalBA": true, + "computeStructureColor": true, + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "minInputTrackLength": 5, + "useOnlyMatchesFromInputFolder": false, + "verboseLevel": "info", + "minAngleForTriangulation": 1.0, + "maxReprojectionError": 4.0, + "minAngleInitialPair": 5.0, + "minNumberOfMatches": 0, + "localizerEstimatorError": 0.0 + }, + "nodeType": "StructureFromMotion", + "uids": { + "0": "4d198974784fd71f5a1c189e10c2914e56523585" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/sfm.abc", + "extraInfoFolder": "{cache}/{nodeType}/{uid0}/", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ExportAnimatedCamera_1": { + "inputs": { + "exportFullROD": false, + "undistortedImageType": "exr", + "exportUVMaps": true, + "verboseLevel": "info", + "sfmDataFilter": "", + "exportUndistortedImages": false, + "input": "{StructureFromMotion_1.output}", + "viewFilter": "", + "correctPrincipalPoint": true + }, + "nodeType": "ExportAnimatedCamera", + "uids": { + "0": "31413f19e51b239874733f13f9628286fd185c18" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/", + "outputUndistorted": "{cache}/{nodeType}/{uid0}/undistort", + "outputCamera": "{cache}/{nodeType}/{uid0}/camera.abc" + }, + "position": [ + 1000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_1": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye4", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_1": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatching_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{DistortionCalibration_1.outSfMData}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatching_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "8386c096445d6988ea7d14f1ae3192978a4dd2e8" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + } + } +} \ No newline at end of file diff --git a/meshroom/pipelines/panoramaFisheyeHdr.mg b/meshroom/pipelines/panoramaFisheyeHdr.mg new file mode 100644 index 00000000..b49093b9 --- /dev/null +++ b/meshroom/pipelines/panoramaFisheyeHdr.mg @@ -0,0 +1,591 @@ +{ + "header": { + "nodesVersions": { + "PanoramaSeams": "2.0", + "FeatureMatching": "2.0", + "ImageProcessing": "3.0", + "PanoramaCompositing": "2.0", + "LdrToHdrMerge": "4.0", + "LdrToHdrSampling": "4.0", + "LdrToHdrCalibration": "3.0", + "PanoramaEstimation": "1.0", + "PanoramaInit": "2.0", + "PanoramaMerging": "1.0", + "SfMTransform": "3.0", + "CameraInit": "7.0", + "ImageMatching": "2.0", + "FeatureExtraction": "1.1", + "PanoramaPrepareImages": "1.1", + "PanoramaWarping": "1.0" + }, + "releaseVersion": "2021.1.0", + "fileVersion": "1.1" + }, + "graph": { + "LdrToHdrMerge_1": { + "inputs": { + "verboseLevel": "info", + "fusionWeight": "gaussian", + "channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}", + "nbBrackets": 0, + "enableHighlight": false, + "offsetRefBracketIndex": 1, + "storageDataType": "float", + "highlightTargetLux": 120000.0, + "byPass": "{LdrToHdrCalibration_1.byPass}", + "highlightCorrectionFactor": 1.0, + "input": "{LdrToHdrCalibration_1.input}", + "userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}", + "response": "{LdrToHdrCalibration_1.response}" + }, + "nodeType": "LdrToHdrMerge", + "uids": { + "0": "9b90e3b468adc487fe2905e0cc78328216966317" + }, + "parallelization": { + "blockSize": 2, + "split": 0, + "size": 0 + }, + "outputs": { + "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" + }, + "position": [ + 800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageProcessing_1": { + "inputs": { + "outputFormat": "rgba", + "sharpenFilter": { + "threshold": 0.0, + "width": 3, + "sharpenFilterEnabled": false, + "contrast": 1.0 + }, + "extension": "exr", + "exposureCompensation": false, + "storageDataType": "float", + "inputFolders": [], + "verboseLevel": "info", + "metadataFolders": [], + "claheFilter": { + "claheClipLimit": 4.0, + "claheTileGridSize": 8, + "claheEnabled": false + }, + "medianFilter": 0, + "fillHoles": true, + "reconstructedViewsOnly": false, + "input": "{PanoramaMerging_1.outputPanorama}", + "noiseFilter": { + "noiseEnabled": false, + "noiseMethod": "uniform", + "noiseB": 1.0, + "noiseMono": true, + "noiseA": 0.0 + }, + "scaleFactor": 1.0, + "bilateralFilter": { + "bilateralFilterDistance": 0, + "bilateralFilterSigmaColor": 0.0, + "bilateralFilterSigmaSpace": 0.0, + "bilateralFilterEnabled": false + }, + "contrast": 1.0, + "fixNonFinite": true + }, + "nodeType": "ImageProcessing", + "uids": { + "0": "494b97af203ddbe4767c922a6c5795297cf53eef" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/", + "outputImages": "{cache}/{nodeType}/{uid0}/panorama.exr", + "outSfMData": "" + }, + "position": [ + 3000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaWarping_1": { + "inputs": { + "panoramaWidth": 10000, + "maxPanoramaWidth": 70000, + "verboseLevel": "info", + "percentUpscale": 50, + "input": "{SfMTransform_1.output}", + "storageDataType": "float", + "estimateResolution": true + }, + "nodeType": "PanoramaWarping", + "uids": { + "0": "45cca14aba2a8c4f68c79a15d3fbc48f30ae9d66" + }, + "parallelization": { + "blockSize": 5, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 2200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "LdrToHdrCalibration_1": { + "inputs": { + "samples": "{LdrToHdrSampling_1.output}", + "channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}", + "maxTotalPoints": 1000000, + "nbBrackets": 0, + "calibrationMethod": "debevec", + "calibrationWeight": "default", + "verboseLevel": "info", + "byPass": "{LdrToHdrSampling_1.byPass}", + "input": "{LdrToHdrSampling_1.input}", + "userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}" + }, + "nodeType": "LdrToHdrCalibration", + "uids": { + "0": "9225abd943d28be4387a8a8902711d0b7c604a2a" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "response": "{cache}/{nodeType}/{uid0}/response.csv" + }, + "position": [ + 600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "LdrToHdrSampling_1": { + "inputs": { + "blockSize": 256, + "nbBrackets": 0, + "verboseLevel": "info", + "radius": 5, + "byPass": false, + "channelQuantizationPower": 10, + "debug": false, + "input": "{PanoramaPrepareImages_1.output}", + "maxCountSample": 200, + "userNbBrackets": 0 + }, + "nodeType": "LdrToHdrSampling", + "uids": { + "0": "af67674ecc8524592fe2b217259c241167e28dcd" + }, + "parallelization": { + "blockSize": 2, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 5, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 40, + "input": "{PanoramaInit_1.outSfMData}", + "method": "FrustumOrVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "a076f9e959d62b3a6f63d3f6493527b857eab8d6" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 1400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "sift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "high", + "gridFiltering": true, + "input": "{LdrToHdrMerge_1.outSfMData}", + "describerPreset": "high" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "04f8824c2e2f206b47f05edaf76def15fa91446b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaSeams_1": { + "inputs": { + "verboseLevel": "info", + "input": "{PanoramaWarping_1.input}", + "warpingFolder": "{PanoramaWarping_1.output}", + "maxWidth": 5000, + "useGraphCut": true + }, + "nodeType": "PanoramaSeams", + "uids": { + "0": "dd02562c5c3b1e18e42561d99590cbf4ff5ba35a" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/labels.exr" + }, + "position": [ + 2400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaCompositing_1": { + "inputs": { + "warpingFolder": "{PanoramaSeams_1.warpingFolder}", + "maxThreads": 4, + "labels": "{PanoramaSeams_1.output}", + "verboseLevel": "info", + "overlayType": "none", + "compositerType": "multiband", + "input": "{PanoramaSeams_1.input}", + "storageDataType": "float" + }, + "nodeType": "PanoramaCompositing", + "uids": { + "0": "1f1e629021e2280291046226e009a52dbb7809c1" + }, + "parallelization": { + "blockSize": 5, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 2600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_1": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaPrepareImages_1": { + "inputs": { + "verboseLevel": "info", + "input": "{CameraInit_1.output}" + }, + "nodeType": "PanoramaPrepareImages", + "uids": { + "0": "6956c52a8d18cb4cdb7ceb0db68f4deb84a37aee" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "SfMTransform_1": { + "inputs": { + "applyScale": true, + "scale": 1.0, + "applyTranslation": true, + "landmarksDescriberTypes": [ + "sift", + "dspsift", + "akaze" + ], + "markers": [], + "method": "manual", + "verboseLevel": "info", + "input": "{PanoramaEstimation_1.output}", + "applyRotation": true, + "manualTransform": { + "manualTranslation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + }, + "manualRotation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + }, + "manualScale": 1.0 + }, + "transformation": "" + }, + "nodeType": "SfMTransform", + "uids": { + "0": "b8568fb40b68b42ac80c18df2dcdf600744fe3e1" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/panorama.abc", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 2000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaMerging_1": { + "inputs": { + "verboseLevel": "info", + "compositingFolder": "{PanoramaCompositing_1.output}", + "outputFileType": "exr", + "storageDataType": "float", + "input": "{PanoramaCompositing_1.input}" + }, + "nodeType": "PanoramaMerging", + "uids": { + "0": "70edd7fe8194bf35dcb0b221141cd4abd2354547" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "outputPanorama": "{cache}/{nodeType}/{uid0}/panorama.{outputFileTypeValue}" + }, + "position": [ + 2800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaEstimation_1": { + "inputs": { + "intermediateRefineWithFocalDist": false, + "offsetLongitude": 0.0, + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "filterMatches": false, + "rotationAveragingWeighting": true, + "offsetLatitude": 0.0, + "verboseLevel": "info", + "maxAngularError": 100.0, + "lockAllIntrinsics": false, + "refine": true, + "input": "{FeatureMatching_1.input}", + "intermediateRefineWithFocal": false, + "describerTypes": "{FeatureMatching_1.describerTypes}", + "relativeRotation": "rotation_matrix", + "maxAngleToPrior": 20.0, + "rotationAveraging": "L2_minimization", + "featuresFolders": "{FeatureMatching_1.featuresFolders}" + }, + "nodeType": "PanoramaEstimation", + "uids": { + "0": "47b0976fc98eefcbc0342bbb63e7d27ef3e0d4de" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/panorama.abc", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 1800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaInit_1": { + "inputs": { + "useFisheye": true, + "fisheyeCenterOffset": { + "fisheyeCenterOffset_y": 0.0, + "fisheyeCenterOffset_x": 0.0 + }, + "initializeCameras": "No", + "nbViewsPerLine": [], + "debugFisheyeCircleEstimation": false, + "verboseLevel": "info", + "dependency": [ + "{FeatureExtraction_1.output}" + ], + "estimateFisheyeCircle": true, + "input": "{FeatureExtraction_1.input}", + "yawCW": 1, + "config": "", + "fisheyeRadius": 96.0, + "inputAngle": "None" + }, + "nodeType": "PanoramaInit", + "uids": { + "0": "2fd95a957eb42ffc8fb1c24d2666afcd859ba079" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" + }, + "position": [ + 1200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_1": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatching_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{ImageMatching_1.input}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatching_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "c0fbe0b12fe47ada6a1ca8f74d266e99c1cc548c" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + } + } +} \ No newline at end of file diff --git a/meshroom/pipelines/panoramaHdr.mg b/meshroom/pipelines/panoramaHdr.mg new file mode 100644 index 00000000..5ca4c845 --- /dev/null +++ b/meshroom/pipelines/panoramaHdr.mg @@ -0,0 +1,591 @@ +{ + "header": { + "nodesVersions": { + "PanoramaSeams": "2.0", + "FeatureMatching": "2.0", + "ImageProcessing": "3.0", + "PanoramaCompositing": "2.0", + "LdrToHdrMerge": "4.0", + "LdrToHdrSampling": "4.0", + "LdrToHdrCalibration": "3.0", + "PanoramaEstimation": "1.0", + "PanoramaInit": "2.0", + "PanoramaMerging": "1.0", + "SfMTransform": "3.0", + "CameraInit": "7.0", + "ImageMatching": "2.0", + "FeatureExtraction": "1.1", + "PanoramaPrepareImages": "1.1", + "PanoramaWarping": "1.0" + }, + "releaseVersion": "2021.1.0", + "fileVersion": "1.1" + }, + "graph": { + "LdrToHdrMerge_1": { + "inputs": { + "verboseLevel": "info", + "fusionWeight": "gaussian", + "channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}", + "nbBrackets": 0, + "enableHighlight": false, + "offsetRefBracketIndex": 1, + "storageDataType": "float", + "highlightTargetLux": 120000.0, + "byPass": "{LdrToHdrCalibration_1.byPass}", + "highlightCorrectionFactor": 1.0, + "input": "{LdrToHdrCalibration_1.input}", + "userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}", + "response": "{LdrToHdrCalibration_1.response}" + }, + "nodeType": "LdrToHdrMerge", + "uids": { + "0": "9b90e3b468adc487fe2905e0cc78328216966317" + }, + "parallelization": { + "blockSize": 2, + "split": 0, + "size": 0 + }, + "outputs": { + "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" + }, + "position": [ + 800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageProcessing_1": { + "inputs": { + "outputFormat": "rgba", + "sharpenFilter": { + "threshold": 0.0, + "width": 3, + "sharpenFilterEnabled": false, + "contrast": 1.0 + }, + "extension": "exr", + "exposureCompensation": false, + "storageDataType": "float", + "inputFolders": [], + "verboseLevel": "info", + "metadataFolders": [], + "claheFilter": { + "claheClipLimit": 4.0, + "claheTileGridSize": 8, + "claheEnabled": false + }, + "medianFilter": 0, + "fillHoles": true, + "reconstructedViewsOnly": false, + "input": "{PanoramaMerging_1.outputPanorama}", + "noiseFilter": { + "noiseEnabled": false, + "noiseMethod": "uniform", + "noiseB": 1.0, + "noiseMono": true, + "noiseA": 0.0 + }, + "scaleFactor": 1.0, + "bilateralFilter": { + "bilateralFilterDistance": 0, + "bilateralFilterSigmaColor": 0.0, + "bilateralFilterSigmaSpace": 0.0, + "bilateralFilterEnabled": false + }, + "contrast": 1.0, + "fixNonFinite": true + }, + "nodeType": "ImageProcessing", + "uids": { + "0": "d7845b276d97c3489223cce16a1e9d581d98a832" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/", + "outputImages": "{cache}/{nodeType}/{uid0}/panorama.exr", + "outSfMData": "" + }, + "position": [ + 3000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaWarping_1": { + "inputs": { + "panoramaWidth": 10000, + "maxPanoramaWidth": 70000, + "verboseLevel": "info", + "percentUpscale": 50, + "input": "{SfMTransform_1.output}", + "storageDataType": "float", + "estimateResolution": true + }, + "nodeType": "PanoramaWarping", + "uids": { + "0": "f2971d0c73b15fa99cbccbc9515de346ca141a1e" + }, + "parallelization": { + "blockSize": 5, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 2200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "LdrToHdrCalibration_1": { + "inputs": { + "samples": "{LdrToHdrSampling_1.output}", + "channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}", + "maxTotalPoints": 1000000, + "nbBrackets": 0, + "calibrationMethod": "debevec", + "calibrationWeight": "default", + "verboseLevel": "info", + "byPass": "{LdrToHdrSampling_1.byPass}", + "input": "{LdrToHdrSampling_1.input}", + "userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}" + }, + "nodeType": "LdrToHdrCalibration", + "uids": { + "0": "9225abd943d28be4387a8a8902711d0b7c604a2a" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "response": "{cache}/{nodeType}/{uid0}/response.csv" + }, + "position": [ + 600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "LdrToHdrSampling_1": { + "inputs": { + "blockSize": 256, + "nbBrackets": 0, + "verboseLevel": "info", + "radius": 5, + "byPass": false, + "channelQuantizationPower": 10, + "debug": false, + "input": "{PanoramaPrepareImages_1.output}", + "maxCountSample": 200, + "userNbBrackets": 0 + }, + "nodeType": "LdrToHdrSampling", + "uids": { + "0": "af67674ecc8524592fe2b217259c241167e28dcd" + }, + "parallelization": { + "blockSize": 2, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 5, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 40, + "input": "{PanoramaInit_1.outSfMData}", + "method": "FrustumOrVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "7efc9cd43585003fc6eec0776a704e358f0a15de" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 1400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "high", + "gridFiltering": true, + "input": "{LdrToHdrMerge_1.outSfMData}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "1863cc0989ab0fd910d4fe293074ff94c4e586a1" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaSeams_1": { + "inputs": { + "verboseLevel": "info", + "input": "{PanoramaWarping_1.input}", + "warpingFolder": "{PanoramaWarping_1.output}", + "maxWidth": 5000, + "useGraphCut": true + }, + "nodeType": "PanoramaSeams", + "uids": { + "0": "0ee6da171bd684358b7c64dcc631f81ba743e1fa" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/labels.exr" + }, + "position": [ + 2400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaCompositing_1": { + "inputs": { + "warpingFolder": "{PanoramaSeams_1.warpingFolder}", + "maxThreads": 4, + "labels": "{PanoramaSeams_1.output}", + "verboseLevel": "info", + "overlayType": "none", + "compositerType": "multiband", + "input": "{PanoramaSeams_1.input}", + "storageDataType": "float" + }, + "nodeType": "PanoramaCompositing", + "uids": { + "0": "8aba78572808d012e0bb376503c2016df943b3f0" + }, + "parallelization": { + "blockSize": 5, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 2600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_1": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaPrepareImages_1": { + "inputs": { + "verboseLevel": "info", + "input": "{CameraInit_1.output}" + }, + "nodeType": "PanoramaPrepareImages", + "uids": { + "0": "6956c52a8d18cb4cdb7ceb0db68f4deb84a37aee" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "SfMTransform_1": { + "inputs": { + "applyScale": true, + "scale": 1.0, + "applyTranslation": true, + "landmarksDescriberTypes": [ + "sift", + "dspsift", + "akaze" + ], + "markers": [], + "method": "manual", + "verboseLevel": "info", + "input": "{PanoramaEstimation_1.output}", + "applyRotation": true, + "manualTransform": { + "manualTranslation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + }, + "manualRotation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + }, + "manualScale": 1.0 + }, + "transformation": "" + }, + "nodeType": "SfMTransform", + "uids": { + "0": "c72641a2cca50759bcf5283ae6e0b6f7abc3fe4a" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/panorama.abc", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 2000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaMerging_1": { + "inputs": { + "verboseLevel": "info", + "compositingFolder": "{PanoramaCompositing_1.output}", + "outputFileType": "exr", + "storageDataType": "float", + "input": "{PanoramaCompositing_1.input}" + }, + "nodeType": "PanoramaMerging", + "uids": { + "0": "e007a4eb5fc5937b320638eba667cea183c0c642" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "outputPanorama": "{cache}/{nodeType}/{uid0}/panorama.{outputFileTypeValue}" + }, + "position": [ + 2800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaEstimation_1": { + "inputs": { + "intermediateRefineWithFocalDist": false, + "offsetLongitude": 0.0, + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "filterMatches": false, + "rotationAveragingWeighting": true, + "offsetLatitude": 0.0, + "verboseLevel": "info", + "maxAngularError": 100.0, + "lockAllIntrinsics": false, + "refine": true, + "input": "{FeatureMatching_1.input}", + "intermediateRefineWithFocal": false, + "describerTypes": "{FeatureMatching_1.describerTypes}", + "relativeRotation": "rotation_matrix", + "maxAngleToPrior": 20.0, + "rotationAveraging": "L2_minimization", + "featuresFolders": "{FeatureMatching_1.featuresFolders}" + }, + "nodeType": "PanoramaEstimation", + "uids": { + "0": "de946a7c1080873d15c9eb8a0523b544cf548719" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/panorama.abc", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 1800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PanoramaInit_1": { + "inputs": { + "useFisheye": false, + "fisheyeCenterOffset": { + "fisheyeCenterOffset_y": 0.0, + "fisheyeCenterOffset_x": 0.0 + }, + "initializeCameras": "No", + "nbViewsPerLine": [], + "debugFisheyeCircleEstimation": false, + "verboseLevel": "info", + "dependency": [ + "{FeatureExtraction_1.output}" + ], + "estimateFisheyeCircle": true, + "input": "{FeatureExtraction_1.input}", + "yawCW": 1, + "config": "", + "fisheyeRadius": 96.0, + "inputAngle": "None" + }, + "nodeType": "PanoramaInit", + "uids": { + "0": "702d6b973342e9203b50afea1470b4c01eb90174" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" + }, + "position": [ + 1200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_1": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatching_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{ImageMatching_1.input}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatching_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "cec6da6e894230ab66683c2e959bc9581ea5430e" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + } + } +} \ No newline at end of file diff --git a/meshroom/pipelines/photogrammetry.mg b/meshroom/pipelines/photogrammetry.mg new file mode 100644 index 00000000..32419169 --- /dev/null +++ b/meshroom/pipelines/photogrammetry.mg @@ -0,0 +1,523 @@ +{ + "header": { + "pipelineVersion": "2.2", + "releaseVersion": "2021.1.0", + "fileVersion": "1.1", + "nodesVersions": { + "FeatureMatching": "2.0", + "MeshFiltering": "3.0", + "Texturing": "6.0", + "PrepareDenseScene": "3.0", + "DepthMap": "2.0", + "Meshing": "7.0", + "CameraInit": "7.0", + "ImageMatching": "2.0", + "FeatureExtraction": "1.1", + "StructureFromMotion": "2.0", + "DepthMapFilter": "3.0" + } + }, + "graph": { + "Texturing_1": { + "inputs": { + "imagesFolder": "{DepthMap_1.imagesFolder}", + "downscale": 2, + "bumpMapping": { + "normalFileType": "exr", + "enable": true, + "bumpType": "Normal", + "heightFileType": "exr" + }, + "forceVisibleByAllVertices": false, + "fillHoles": false, + "multiBandDownscale": 4, + "useScore": true, + "displacementMapping": { + "displacementMappingFileType": "exr", + "enable": true + }, + "outputMeshFileType": "obj", + "angleHardThreshold": 90.0, + "textureSide": 8192, + "processColorspace": "sRGB", + "input": "{Meshing_1.output}", + "useUDIM": true, + "subdivisionTargetRatio": 0.8, + "padding": 5, + "inputRefMesh": "", + "correctEV": false, + "visibilityRemappingMethod": "PullPush", + "inputMesh": "{MeshFiltering_1.outputMesh}", + "verboseLevel": "info", + "colorMapping": { + "enable": true, + "colorMappingFileType": "exr" + }, + "bestScoreThreshold": 0.1, + "unwrapMethod": "Basic", + "multiBandNbContrib": { + "high": 1, + "midHigh": 5, + "low": 0, + "midLow": 10 + }, + "flipNormals": false + }, + "nodeType": "Texturing", + "uids": { + "0": "09f72f6745c6b13aae56fc3876e6541fbeaa557d" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "outputTextures": "{cache}/{nodeType}/{uid0}/texture_*.exr", + "outputMesh": "{cache}/{nodeType}/{uid0}/texturedMesh.{outputMeshFileTypeValue}", + "outputMaterial": "{cache}/{nodeType}/{uid0}/texturedMesh.mtl", + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 2000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "Meshing_1": { + "inputs": { + "exportDebugTetrahedralization": false, + "useBoundingBox": false, + "maxInputPoints": 50000000, + "repartition": "multiResolution", + "helperPointsGridSize": 10, + "seed": 0, + "voteFilteringForWeaklySupportedSurfaces": true, + "verboseLevel": "info", + "outputMeshFileType": "obj", + "simGaussianSizeInit": 10.0, + "nPixelSizeBehind": 4.0, + "fullWeight": 1.0, + "depthMapsFolder": "{DepthMapFilter_1.output}", + "densify": false, + "simFactor": 15.0, + "maskHelperPointsWeight": 1.0, + "densifyScale": 20.0, + "input": "{DepthMapFilter_1.input}", + "addLandmarksToTheDensePointCloud": false, + "voteMarginFactor": 4.0, + "saveRawDensePointCloud": false, + "contributeMarginFactor": 2.0, + "estimateSpaceMinObservationAngle": 10, + "nbSolidAngleFilteringIterations": 2, + "minStep": 2, + "colorizeOutput": false, + "pixSizeMarginFinalCoef": 4.0, + "densifyNbFront": 1, + "boundingBox": { + "bboxScale": { + "y": 1.0, + "x": 1.0, + "z": 1.0 + }, + "bboxTranslation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + }, + "bboxRotation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + } + }, + "minSolidAngleRatio": 0.2, + "maxPoints": 5000000, + "addMaskHelperPoints": false, + "maxPointsPerVoxel": 1000000, + "angleFactor": 15.0, + "partitioning": "singleBlock", + "estimateSpaceFromSfM": true, + "minAngleThreshold": 1.0, + "pixSizeMarginInitCoef": 2.0, + "refineFuse": true, + "maxNbConnectedHelperPoints": 50, + "estimateSpaceMinObservations": 3, + "invertTetrahedronBasedOnNeighborsNbIterations": 10, + "maskBorderSize": 4, + "simGaussianSize": 10.0, + "densifyNbBack": 1 + }, + "nodeType": "Meshing", + "uids": { + "0": "aeb66fceaacd37ecd5bae8364bd9e87ccff2a84c" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/densePointCloud.abc", + "outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}" + }, + "position": [ + 1600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "DepthMapFilter_1": { + "inputs": { + "minNumOfConsistentCamsWithLowSimilarity": 4, + "computeNormalMaps": false, + "minNumOfConsistentCams": 3, + "depthMapsFolder": "{DepthMap_1.output}", + "verboseLevel": "info", + "nNearestCams": 10, + "pixSizeBallWithLowSimilarity": 0, + "pixToleranceFactor": 2.0, + "pixSizeBall": 0, + "minViewAngle": 2.0, + "maxViewAngle": 70.0, + "input": "{DepthMap_1.input}" + }, + "nodeType": "DepthMapFilter", + "uids": { + "0": "4de4649a857d7bd4f7fdfb27470a5087625ff8c9" + }, + "parallelization": { + "blockSize": 10, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 5, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 40, + "input": "{FeatureExtraction_1.input}", + "method": "SequentialAndVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "46fb9072ac753d60bec7dda9c8674b0568506ddf" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "normal", + "gridFiltering": true, + "input": "{CameraInit_1.output}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "StructureFromMotion_1": { + "inputs": { + "localizerEstimatorMaxIterations": 4096, + "minAngleForLandmark": 2.0, + "filterTrackForks": false, + "minNumberOfObservationsForTriangulation": 2, + "maxAngleInitialPair": 40.0, + "observationConstraint": "Scale", + "maxNumberOfMatches": 0, + "localizerEstimator": "acransac", + "describerTypes": "{FeatureMatching_1.describerTypes}", + "lockScenePreviouslyReconstructed": false, + "localBAGraphDistance": 1, + "minNbCamerasToRefinePrincipalPoint": 3, + "lockAllIntrinsics": false, + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "useRigConstraint": true, + "rigMinNbCamerasForCalibration": 20, + "initialPairA": "", + "initialPairB": "", + "interFileExtension": ".abc", + "useLocalBA": true, + "computeStructureColor": true, + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "minInputTrackLength": 2, + "useOnlyMatchesFromInputFolder": false, + "verboseLevel": "info", + "minAngleForTriangulation": 3.0, + "maxReprojectionError": 4.0, + "minAngleInitialPair": 5.0, + "minNumberOfMatches": 0, + "localizerEstimatorError": 0.0 + }, + "nodeType": "StructureFromMotion", + "uids": { + "0": "89c3db0849ba07dfac5e97ca9e27dd690dc476ce" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/sfm.abc", + "extraInfoFolder": "{cache}/{nodeType}/{uid0}/", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "PrepareDenseScene_1": { + "inputs": { + "imagesFolders": [], + "masksFolders": [], + "outputFileType": "exr", + "verboseLevel": "info", + "saveMatricesTxtFiles": false, + "saveMetadata": true, + "input": "{StructureFromMotion_1.output}", + "evCorrection": false + }, + "nodeType": "PrepareDenseScene", + "uids": { + "0": "894725f62ffeead1307d9d91852b07d7c8453625" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/", + "outputUndistorted": "{cache}/{nodeType}/{uid0}/*.{outputFileTypeValue}" + }, + "position": [ + 1000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_1": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye4", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "DepthMap_1": { + "inputs": { + "sgmMaxDepthsPerTc": 1500, + "sgmP2": 100.0, + "imagesFolder": "{PrepareDenseScene_1.output}", + "downscale": 2, + "refineMaxTCams": 6, + "exportIntermediateResults": false, + "nbGPUs": 0, + "refineNiters": 100, + "refineGammaP": 8.0, + "refineGammaC": 15.5, + "sgmMaxDepths": 3000, + "sgmUseSfmSeeds": true, + "input": "{PrepareDenseScene_1.input}", + "refineWSH": 3, + "sgmP1": 10.0, + "sgmFilteringAxes": "YX", + "sgmMaxTCams": 10, + "refineSigma": 15, + "sgmScale": -1, + "minViewAngle": 2.0, + "maxViewAngle": 70.0, + "sgmGammaC": 5.5, + "sgmWSH": 4, + "refineNSamplesHalf": 150, + "sgmMaxSideXY": 700, + "refineUseTcOrRcPixSize": false, + "verboseLevel": "info", + "sgmGammaP": 8.0, + "sgmStepXY": -1, + "refineNDepthsToRefine": 31, + "sgmStepZ": -1 + }, + "nodeType": "DepthMap", + "uids": { + "0": "f5ef2fd13dad8f48fcb87e2364e1e821a9db7d2d" + }, + "parallelization": { + "blockSize": 3, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "MeshFiltering_1": { + "inputs": { + "filteringSubset": "all", + "outputMeshFileType": "obj", + "inputMesh": "{Meshing_1.outputMesh}", + "filterTrianglesRatio": 0.0, + "smoothingSubset": "all", + "verboseLevel": "info", + "smoothingIterations": 5, + "filterLargeTrianglesFactor": 60.0, + "keepLargestMeshOnly": false, + "smoothingBoundariesNeighbours": 0, + "smoothingLambda": 1.0, + "filteringIterations": 1 + }, + "nodeType": "MeshFiltering", + "uids": { + "0": "febb162c4fbce195f6d312bbb80697720a2f52b9" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}" + }, + "position": [ + 1800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_1": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatching_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{ImageMatching_1.input}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatching_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "3b1f2c3fcfe0b94c65627c397a2671ba7594827d" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + } + } +} \ No newline at end of file diff --git a/meshroom/pipelines/photogrammetryAndCameraTracking.mg b/meshroom/pipelines/photogrammetryAndCameraTracking.mg new file mode 100644 index 00000000..f01f7ffd --- /dev/null +++ b/meshroom/pipelines/photogrammetryAndCameraTracking.mg @@ -0,0 +1,486 @@ +{ + "header": { + "pipelineVersion": "2.2", + "releaseVersion": "2021.1.0", + "fileVersion": "1.1", + "nodesVersions": { + "ExportAnimatedCamera": "2.0", + "FeatureMatching": "2.0", + "DistortionCalibration": "2.0", + "CameraInit": "7.0", + "ImageMatchingMultiSfM": "1.0", + "ImageMatching": "2.0", + "FeatureExtraction": "1.1", + "StructureFromMotion": "2.0" + } + }, + "graph": { + "DistortionCalibration_1": { + "inputs": { + "verboseLevel": "info", + "input": "{CameraInit_2.output}", + "lensGrid": [] + }, + "nodeType": "DistortionCalibration", + "uids": { + "0": "8afea9d171904cdb6ba1c0b116cb60de3ccb6fb4" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" + }, + "position": [ + 200, + 320 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "StructureFromMotion_1": { + "inputs": { + "localizerEstimatorMaxIterations": 4096, + "minAngleForLandmark": 2.0, + "filterTrackForks": false, + "minNumberOfObservationsForTriangulation": 2, + "maxAngleInitialPair": 40.0, + "observationConstraint": "Scale", + "maxNumberOfMatches": 0, + "localizerEstimator": "acransac", + "describerTypes": "{FeatureMatching_1.describerTypes}", + "lockScenePreviouslyReconstructed": false, + "localBAGraphDistance": 1, + "minNbCamerasToRefinePrincipalPoint": 3, + "lockAllIntrinsics": false, + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "useRigConstraint": true, + "rigMinNbCamerasForCalibration": 20, + "initialPairA": "", + "initialPairB": "", + "interFileExtension": ".abc", + "useLocalBA": true, + "computeStructureColor": true, + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "minInputTrackLength": 2, + "useOnlyMatchesFromInputFolder": false, + "verboseLevel": "info", + "minAngleForTriangulation": 3.0, + "maxReprojectionError": 4.0, + "minAngleInitialPair": 5.0, + "minNumberOfMatches": 0, + "localizerEstimatorError": 0.0 + }, + "nodeType": "StructureFromMotion", + "uids": { + "0": "89c3db0849ba07dfac5e97ca9e27dd690dc476ce" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/sfm.abc", + "extraInfoFolder": "{cache}/{nodeType}/{uid0}/", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "normal", + "gridFiltering": true, + "input": "{CameraInit_1.output}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_2": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye4", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 160 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ExportAnimatedCamera_1": { + "inputs": { + "exportFullROD": false, + "undistortedImageType": "exr", + "exportUVMaps": true, + "verboseLevel": "info", + "sfmDataFilter": "{StructureFromMotion_1.output}", + "exportUndistortedImages": false, + "input": "{StructureFromMotion_2.output}", + "viewFilter": "", + "correctPrincipalPoint": true + }, + "nodeType": "ExportAnimatedCamera", + "uids": { + "0": "6f482ab9e161bd79341c5cd4a43ab9f8e39aec1f" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/", + "outputUndistorted": "{cache}/{nodeType}/{uid0}/undistort", + "outputCamera": "{cache}/{nodeType}/{uid0}/camera.abc" + }, + "position": [ + 1600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_1": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye4", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatchingMultiSfM_1": { + "inputs": { + "minNbImages": 200, + "matchingMode": "a/a+a/b", + "nbNeighbors": 10, + "tree": "${ALICEVISION_VOCTREE}", + "nbMatches": 5, + "verboseLevel": "info", + "weights": "", + "maxDescriptors": 500, + "input": "{FeatureExtraction_2.input}", + "inputB": "{StructureFromMotion_1.output}", + "method": "SequentialAndVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_2.output}" + ] + }, + "nodeType": "ImageMatchingMultiSfM", + "uids": { + "0": "ef147c1bc069c7689863c7e14cdbbaca86af4006" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt", + "outputCombinedSfM": "{cache}/{nodeType}/{uid0}/combineSfM.sfm" + }, + "position": [ + 1000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_2": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "normal", + "gridFiltering": true, + "input": "{CameraInit_2.output}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 200, + 160 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 5, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 40, + "input": "{FeatureExtraction_1.input}", + "method": "SequentialAndVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "46fb9072ac753d60bec7dda9c8674b0568506ddf" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_2": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_2.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{DistortionCalibration_1.outSfMData}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "7bb42f40b3f607da7e9f5f432409ddf6ef9c5951" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_1": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatching_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{ImageMatching_1.input}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatching_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "3b1f2c3fcfe0b94c65627c397a2671ba7594827d" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "StructureFromMotion_2": { + "inputs": { + "localizerEstimatorMaxIterations": 4096, + "minAngleForLandmark": 0.5, + "filterTrackForks": false, + "minNumberOfObservationsForTriangulation": 3, + "maxAngleInitialPair": 40.0, + "observationConstraint": "Scale", + "maxNumberOfMatches": 0, + "localizerEstimator": "acransac", + "describerTypes": "{FeatureMatching_2.describerTypes}", + "lockScenePreviouslyReconstructed": false, + "localBAGraphDistance": 1, + "minNbCamerasToRefinePrincipalPoint": 3, + "lockAllIntrinsics": false, + "input": "{FeatureMatching_2.input}", + "featuresFolders": "{FeatureMatching_2.featuresFolders}", + "useRigConstraint": true, + "rigMinNbCamerasForCalibration": 20, + "initialPairA": "", + "initialPairB": "", + "interFileExtension": ".abc", + "useLocalBA": true, + "computeStructureColor": true, + "matchesFolders": [ + "{FeatureMatching_2.output}" + ], + "minInputTrackLength": 5, + "useOnlyMatchesFromInputFolder": false, + "verboseLevel": "info", + "minAngleForTriangulation": 1.0, + "maxReprojectionError": 4.0, + "minAngleInitialPair": 5.0, + "minNumberOfMatches": 0, + "localizerEstimatorError": 0.0 + }, + "nodeType": "StructureFromMotion", + "uids": { + "0": "4bc466c45bc7b430553752d1eb1640c581c43e36" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/sfm.abc", + "extraInfoFolder": "{cache}/{nodeType}/{uid0}/", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 1400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + } + } +} \ No newline at end of file diff --git a/meshroom/pipelines/photogrammetryDraft.mg b/meshroom/pipelines/photogrammetryDraft.mg new file mode 100644 index 00000000..4f37896d --- /dev/null +++ b/meshroom/pipelines/photogrammetryDraft.mg @@ -0,0 +1,404 @@ +{ + "header": { + "nodesVersions": { + "FeatureMatching": "2.0", + "MeshFiltering": "3.0", + "Texturing": "6.0", + "Meshing": "7.0", + "CameraInit": "7.0", + "ImageMatching": "2.0", + "FeatureExtraction": "1.1", + "StructureFromMotion": "2.0" + }, + "releaseVersion": "2021.1.0", + "fileVersion": "1.1" + }, + "graph": { + "Texturing_1": { + "inputs": { + "imagesFolder": "", + "downscale": 2, + "bumpMapping": { + "normalFileType": "exr", + "enable": true, + "bumpType": "Normal", + "heightFileType": "exr" + }, + "forceVisibleByAllVertices": false, + "fillHoles": false, + "multiBandDownscale": 4, + "useScore": true, + "displacementMapping": { + "displacementMappingFileType": "exr", + "enable": true + }, + "outputMeshFileType": "obj", + "angleHardThreshold": 90.0, + "textureSide": 8192, + "processColorspace": "sRGB", + "input": "{Meshing_1.output}", + "useUDIM": true, + "subdivisionTargetRatio": 0.8, + "padding": 5, + "inputRefMesh": "", + "correctEV": false, + "visibilityRemappingMethod": "PullPush", + "inputMesh": "{MeshFiltering_1.outputMesh}", + "verboseLevel": "info", + "colorMapping": { + "enable": true, + "colorMappingFileType": "exr" + }, + "bestScoreThreshold": 0.1, + "unwrapMethod": "Basic", + "multiBandNbContrib": { + "high": 1, + "midHigh": 5, + "low": 0, + "midLow": 10 + }, + "flipNormals": false + }, + "nodeType": "Texturing", + "uids": { + "0": "1ed1516bf83493071547e69146be3f1218012e25" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "outputTextures": "{cache}/{nodeType}/{uid0}/texture_*.exr", + "outputMesh": "{cache}/{nodeType}/{uid0}/texturedMesh.{outputMeshFileTypeValue}", + "outputMaterial": "{cache}/{nodeType}/{uid0}/texturedMesh.mtl", + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 1400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "Meshing_1": { + "inputs": { + "exportDebugTetrahedralization": false, + "useBoundingBox": false, + "maxInputPoints": 50000000, + "repartition": "multiResolution", + "helperPointsGridSize": 10, + "seed": 0, + "voteFilteringForWeaklySupportedSurfaces": true, + "verboseLevel": "info", + "outputMeshFileType": "obj", + "simGaussianSizeInit": 10.0, + "nPixelSizeBehind": 4.0, + "fullWeight": 1.0, + "depthMapsFolder": "", + "densify": false, + "simFactor": 15.0, + "maskHelperPointsWeight": 1.0, + "densifyScale": 20.0, + "input": "{StructureFromMotion_1.output}", + "addLandmarksToTheDensePointCloud": false, + "voteMarginFactor": 4.0, + "saveRawDensePointCloud": false, + "contributeMarginFactor": 2.0, + "estimateSpaceMinObservationAngle": 10, + "nbSolidAngleFilteringIterations": 2, + "minStep": 2, + "colorizeOutput": false, + "pixSizeMarginFinalCoef": 4.0, + "densifyNbFront": 1, + "boundingBox": { + "bboxScale": { + "y": 1.0, + "x": 1.0, + "z": 1.0 + }, + "bboxTranslation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + }, + "bboxRotation": { + "y": 0.0, + "x": 0.0, + "z": 0.0 + } + }, + "minSolidAngleRatio": 0.2, + "maxPoints": 5000000, + "addMaskHelperPoints": false, + "maxPointsPerVoxel": 1000000, + "angleFactor": 15.0, + "partitioning": "singleBlock", + "estimateSpaceFromSfM": true, + "minAngleThreshold": 1.0, + "pixSizeMarginInitCoef": 2.0, + "refineFuse": true, + "maxNbConnectedHelperPoints": 50, + "estimateSpaceMinObservations": 3, + "invertTetrahedronBasedOnNeighborsNbIterations": 10, + "maskBorderSize": 4, + "simGaussianSize": 10.0, + "densifyNbBack": 1 + }, + "nodeType": "Meshing", + "uids": { + "0": "dc3d06f150a2601334a44174aa8e5523d3055468" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/densePointCloud.abc", + "outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}" + }, + "position": [ + 1000, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 5, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 40, + "input": "{FeatureExtraction_1.input}", + "method": "SequentialAndVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "46fb9072ac753d60bec7dda9c8674b0568506ddf" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "normal", + "gridFiltering": true, + "input": "{CameraInit_1.output}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "StructureFromMotion_1": { + "inputs": { + "localizerEstimatorMaxIterations": 4096, + "minAngleForLandmark": 2.0, + "filterTrackForks": false, + "minNumberOfObservationsForTriangulation": 2, + "maxAngleInitialPair": 40.0, + "observationConstraint": "Scale", + "maxNumberOfMatches": 0, + "localizerEstimator": "acransac", + "describerTypes": "{FeatureMatching_1.describerTypes}", + "lockScenePreviouslyReconstructed": false, + "localBAGraphDistance": 1, + "minNbCamerasToRefinePrincipalPoint": 3, + "lockAllIntrinsics": false, + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "useRigConstraint": true, + "rigMinNbCamerasForCalibration": 20, + "initialPairA": "", + "initialPairB": "", + "interFileExtension": ".abc", + "useLocalBA": true, + "computeStructureColor": true, + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "minInputTrackLength": 2, + "useOnlyMatchesFromInputFolder": false, + "verboseLevel": "info", + "minAngleForTriangulation": 3.0, + "maxReprojectionError": 4.0, + "minAngleInitialPair": 5.0, + "minNumberOfMatches": 0, + "localizerEstimatorError": 0.0 + }, + "nodeType": "StructureFromMotion", + "uids": { + "0": "89c3db0849ba07dfac5e97ca9e27dd690dc476ce" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/sfm.abc", + "extraInfoFolder": "{cache}/{nodeType}/{uid0}/", + "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" + }, + "position": [ + 800, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_1": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye4", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + 0, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "MeshFiltering_1": { + "inputs": { + "filteringSubset": "all", + "outputMeshFileType": "obj", + "inputMesh": "{Meshing_1.outputMesh}", + "filterTrianglesRatio": 0.0, + "smoothingSubset": "all", + "verboseLevel": "info", + "smoothingIterations": 5, + "filterLargeTrianglesFactor": 60.0, + "keepLargestMeshOnly": false, + "smoothingBoundariesNeighbours": 0, + "smoothingLambda": 1.0, + "filteringIterations": 1 + }, + "nodeType": "MeshFiltering", + "uids": { + "0": "057d1647de39a617f79aad02a721938e5625ff64" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 1 + }, + "outputs": { + "outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}" + }, + "position": [ + 1200, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureMatching_1": { + "inputs": { + "verboseLevel": "info", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "exportDebugFiles": false, + "crossMatching": false, + "geometricError": 0.0, + "maxMatches": 0, + "matchFromKnownCameraPoses": false, + "savePutativeMatches": false, + "guidedMatching": false, + "imagePairsList": "{ImageMatching_1.output}", + "geometricEstimator": "acransac", + "geometricFilterType": "fundamental_matrix", + "maxIteration": 2048, + "distanceRatio": 0.8, + "input": "{ImageMatching_1.input}", + "photometricMatchingMethod": "ANN_L2", + "knownPosesGeometricErrorMax": 5.0, + "featuresFolders": "{ImageMatching_1.featuresFolders}" + }, + "nodeType": "FeatureMatching", + "uids": { + "0": "3b1f2c3fcfe0b94c65627c397a2671ba7594827d" + }, + "parallelization": { + "blockSize": 20, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, + "position": [ + 600, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + } + } +} \ No newline at end of file diff --git a/meshroom/ui/app.py b/meshroom/ui/app.py index 110daec4..12eadbdf 100644 --- a/meshroom/ui/app.py +++ b/meshroom/ui/app.py @@ -71,8 +71,9 @@ class MeshroomApp(QApplication): help='Import images to reconstruct from specified folder and sub-folders.') parser.add_argument('-s', '--save', metavar='PROJECT.mg', type=str, default='', help='Save the created scene.') - parser.add_argument('-p', '--pipeline', metavar='MESHROOM_FILE/photogrammetry/panoramaHdr/panoramaFisheyeHdr', type=str, default=os.environ.get("MESHROOM_DEFAULT_PIPELINE", "photogrammetry"), - help='Override the default Meshroom pipeline with this external graph.') + parser.add_argument('-p', '--pipeline', metavar="FILE.mg/" + "/".join(meshroom.core.pipelineTemplates), type=str, + default=os.environ.get("MESHROOM_DEFAULT_PIPELINE", "photogrammetry"), + help='Override the default Meshroom pipeline with this external or template graph.') parser.add_argument("--verbose", help="Verbosity level", default='warning', choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],) diff --git a/meshroom/ui/reconstruction.py b/meshroom/ui/reconstruction.py index 3915c42a..2dd249f8 100755 --- a/meshroom/ui/reconstruction.py +++ b/meshroom/ui/reconstruction.py @@ -483,27 +483,15 @@ class Reconstruction(UIGraph): @Slot(str) def new(self, pipeline=None): p = pipeline if pipeline != None else self._defaultPipeline - """ Create a new photogrammetry pipeline. """ - if p.lower() == "photogrammetry": - # default photogrammetry pipeline - self.setGraph(multiview.photogrammetry()) - elif p.lower() == "panoramahdr": - # default panorama hdr pipeline - self.setGraph(multiview.panoramaHdr()) - elif p.lower() == "panoramafisheyehdr": - # default panorama fisheye hdr pipeline - self.setGraph(multiview.panoramaFisheyeHdr()) - elif p.lower() == "photogrammetryandcameratracking": - # default camera tracking pipeline - self.setGraph(multiview.photogrammetryAndCameraTracking()) - elif p.lower() == "cameratracking": - # default camera tracking pipeline - self.setGraph(multiview.cameraTracking()) - elif p.lower() == "photogrammetrydraft": - # photogrammetry pipeline in draft mode (no cuda) - self.setGraph(multiview.photogrammetryDraft()) + """ Create a new pipeline. """ + # Lower the input and the dictionary keys to make sure that all input types can be found: + # - correct pipeline name but the case does not match (e.g. panoramaHDR instead of panoramaHdr) + # - lowercase pipeline name given through the "New Pipeline" menu + loweredPipelineTemplates = dict((k.lower(), v) for k, v in meshroom.core.pipelineTemplates.items()) + if p.lower() in loweredPipelineTemplates: + self.load(loweredPipelineTemplates[p.lower()], setupProjectFile=False) else: - # use the user-provided default photogrammetry project file + # use the user-provided default project file self.load(p, setupProjectFile=False) @Slot(str, result=bool) diff --git a/tests/test_multiviewPipeline.py b/tests/test_multiviewPipeline.py index c5ef6d86..4c072fba 100644 --- a/tests/test_multiviewPipeline.py +++ b/tests/test_multiviewPipeline.py @@ -9,18 +9,34 @@ from meshroom.core.node import Node def test_multiviewPipeline(): - graph1 = meshroom.multiview.photogrammetry(inputImages=['/non/existing/fileA']) - graph2 = meshroom.multiview.photogrammetry(inputImages=[]) - graph2b = meshroom.multiview.photogrammetry(inputImages=[]) - graph3 = meshroom.multiview.photogrammetry(inputImages=['/non/existing/file1', '/non/existing/file2']) - graph4 = meshroom.multiview.photogrammetry(inputViewpoints=[ + graph1InputImages = ['/non/existing/fileA'] + graph1 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"]) + graph1CameraInit = graph1.node("CameraInit_1") + graph1CameraInit.viewpoints.extend([{'path': image} for image in graph1InputImages]) + + graph2InputImages = [] # common to graph2 and graph2b + graph2 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"]) + graph2CameraInit = graph2.node("CameraInit_1") + graph2CameraInit.viewpoints.extend([{'path': image} for image in graph2InputImages]) + graph2b = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"]) + graph2bCameraInit = graph2b.node("CameraInit_1") + graph2bCameraInit.viewpoints.extend([{'path': image} for image in graph2InputImages]) + + graph3InputImages = ['/non/existing/file1', '/non/existing/file2'] + graph3 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"]) + graph3CameraInit = graph3.node("CameraInit_1") + graph3CameraInit.viewpoints.extend([{'path': image} for image in graph3InputImages]) + + graph4InputViewpoints = [ {'path': '/non/existing/file1', 'intrinsicId': 50}, {'path': '/non/existing/file2', 'intrinsicId': 55} - ]) - graph4b = meshroom.multiview.photogrammetry(inputViewpoints=[ - {'path': '/non/existing/file1', 'intrinsicId': 50}, - {'path': '/non/existing/file2', 'intrinsicId': 55} - ]) + ] # common to graph4 and graph4b + graph4 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"]) + graph4CameraInit = graph4.node("CameraInit_1") + graph4CameraInit.viewpoints.extend(graph4InputViewpoints) + graph4b = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"]) + graph4bCameraInit = graph4b.node("CameraInit_1") + graph4bCameraInit.viewpoints.extend(graph4InputViewpoints) assert graph1.findNode('CameraInit').viewpoints.at(0).path.value == '/non/existing/fileA' assert len(graph2.findNode('CameraInit').viewpoints) == 0 From 578c1f01c031deda131aa0f8ee16ea7218d77f4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Wed, 13 Jul 2022 18:20:34 +0200 Subject: [PATCH 07/21] [ui] Generate the "New Pipeline" menu based on the found project files The "New Pipeline" menu is automatically filled with the list of the project files that were found dynamically. Pipelines can thus be initialized with templates without restarting Meshroom app. --- meshroom/ui/app.py | 14 +++++++++- meshroom/ui/qml/main.qml | 60 ++++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 31 deletions(-) diff --git a/meshroom/ui/app.py b/meshroom/ui/app.py index 12eadbdf..38e1bcb1 100644 --- a/meshroom/ui/app.py +++ b/meshroom/ui/app.py @@ -1,5 +1,6 @@ import logging import os +import re import argparse from PySide2.QtCore import Qt, QUrl, Slot, QJsonValue, Property, Signal, qInstallMessageHandler, QtMsgType, QSettings @@ -177,6 +178,16 @@ class MeshroomApp(QApplication): self.engine.load(os.path.normpath(url)) + def _pipelineTemplateFiles(self): + templates = [] + for key in sorted(meshroom.core.pipelineTemplates.keys()): + # Use uppercase letters in the names as separators to format the templates' name nicely + # e.g: the template "panoramaHdr" will be shown as "Panorama Hdr" in the menu + name = " ".join(re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:])) + variant = {"name": name, "key": key, "path": meshroom.core.pipelineTemplates[key]} + templates.append(variant) + return templates + def _recentProjectFiles(self): projects = [] settings = QSettings() @@ -317,6 +328,7 @@ class MeshroomApp(QApplication): } ] + pipelineTemplateFilesChanged = Signal() recentProjectFilesChanged = Signal() + pipelineTemplateFiles = Property("QVariantList", _pipelineTemplateFiles, notify=pipelineTemplateFilesChanged) recentProjectFiles = Property("QVariantList", _recentProjectFiles, notify=recentProjectFilesChanged) - diff --git a/meshroom/ui/qml/main.qml b/meshroom/ui/qml/main.qml index 2b06ea18..5b4197d6 100755 --- a/meshroom/ui/qml/main.qml +++ b/meshroom/ui/qml/main.qml @@ -409,38 +409,38 @@ ApplicationWindow { onTriggered: ensureSaved(function() { _reconstruction.new() }) } Menu { + id: newPipelineMenu title: "New Pipeline" - TextMetrics { - id: textMetrics - font: action_PG_CT.font - elide: Text.ElideNone - text: action_PG_CT.text + enabled: newPipelineMenuItems.model != undefined && newPipelineMenuItems.model.length > 0 + property int maxWidth: 1000 + property int fullWidth: { + var result = 0; + for (var i = 0; i < count; ++i) { + var item = itemAt(i); + result = Math.max(item.implicitWidth + item.padding * 2, result); + } + return result; } - implicitWidth: textMetrics.width + 10 // largest text width + margin - Action { - text: "Photogrammetry" - onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") }) - } - Action { - text: "Panorama HDR" - onTriggered: ensureSaved(function() { _reconstruction.new("panoramahdr") }) - } - Action { - text: "Panorama Fisheye HDR" - onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") }) - } - Action { - id: action_PG_CT - text: "Photogrammetry and Camera Tracking (experimental)" - onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetryandcameratracking") }) - } - Action { - text: "Camera Tracking (experimental)" - onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") }) - } - Action { - text: "Photogrammetry Draft (No CUDA)" - onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetrydraft") }) + implicitWidth: fullWidth + Repeater { + id: newPipelineMenuItems + model: MeshroomApp.pipelineTemplateFiles + MenuItem { + onTriggered: ensureSaved(function() { + _reconstruction.new(modelData["key"]) + }) + + text: fileTextMetrics.elidedText + TextMetrics { + id: fileTextMetrics + text: modelData["name"] + elide: Text.ElideLeft + elideWidth: newPipelineMenu.maxWidth + } + ToolTip.text: modelData["path"] + ToolTip.visible: hovered + ToolTip.delay: 200 + } } } Action { From 47a99963fb88ee630667e4ab0d61c1067879dbca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 19 Jul 2022 10:49:52 +0200 Subject: [PATCH 08/21] [ui] Ensure the edge connecting two nodes is following the mouse Moving the mouse rapidly when starting to connect two nodes' attributes could lead to an unwanted offset between the tip of the edge and the mouse's position; it forced the user to move the mouse further than the attribute pin they wanted to connect to in order to actually be able to connect to it. --- meshroom/ui/qml/GraphEditor/AttributePin.qml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/meshroom/ui/qml/GraphEditor/AttributePin.qml b/meshroom/ui/qml/GraphEditor/AttributePin.qml index 9b38b3d1..b020a98f 100755 --- a/meshroom/ui/qml/GraphEditor/AttributePin.qml +++ b/meshroom/ui/qml/GraphEditor/AttributePin.qml @@ -131,14 +131,16 @@ RowLayout { height: 4 Drag.keys: [inputDragTarget.objectName] Drag.active: inputConnectMA.drag.active - Drag.hotSpot.x: width*0.5 - Drag.hotSpot.y: height*0.5 + Drag.hotSpot.x: width * 0.5 + Drag.hotSpot.y: height * 0.5 } MouseArea { id: inputConnectMA drag.target: attribute.isReadOnly ? undefined : inputDragTarget drag.threshold: 0 + // Move the edge's tip straight to the the current mouse position instead of waiting after the drag operation has started + drag.smoothed: false enabled: !root.readOnly anchors.fill: parent // use the same negative margins as DropArea to ease pin selection @@ -281,6 +283,8 @@ RowLayout { id: outputConnectMA drag.target: outputDragTarget drag.threshold: 0 + // Move the edge's tip straight to the the current mouse position instead of waiting after the drag operation has started + drag.smoothed: false anchors.fill: parent // use the same negative margins as DropArea to ease pin selection anchors.margins: outputDropArea.anchors.margins @@ -346,6 +350,7 @@ RowLayout { } StateChangeScript { script: { + // Add the right offset if the initial click is not exactly at the center of the connection circle. var pos = inputDragTarget.mapFromItem(inputConnectMA, inputConnectMA.mouseX, inputConnectMA.mouseY); inputDragTarget.x = pos.x - inputDragTarget.width/2; inputDragTarget.y = pos.y - inputDragTarget.height/2; From 352c8403e10b342901a07a7b9e957cf3f0bd7cb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 19 Jul 2022 10:55:50 +0200 Subject: [PATCH 09/21] [ui] Fix vertical alignment of attributes' pins The attributes' pins are now vertically aligned with their matching labels. --- meshroom/ui/qml/GraphEditor/AttributePin.qml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/meshroom/ui/qml/GraphEditor/AttributePin.qml b/meshroom/ui/qml/GraphEditor/AttributePin.qml index b020a98f..184ae451 100755 --- a/meshroom/ui/qml/GraphEditor/AttributePin.qml +++ b/meshroom/ui/qml/GraphEditor/AttributePin.qml @@ -127,8 +127,8 @@ RowLayout { property bool dragAccepted: false anchors.verticalCenter: parent.verticalCenter anchors.horizontalCenter: parent.horizontalCenter - width: 4 - height: 4 + width: parent.width + height: parent.height Drag.keys: [inputDragTarget.objectName] Drag.active: inputConnectMA.drag.active Drag.hotSpot.x: width * 0.5 @@ -176,6 +176,8 @@ RowLayout { Layout.fillWidth: true implicitHeight: childrenRect.height + Layout.alignment: Qt.AlignVCenter + Label { id: nameLabel @@ -271,8 +273,8 @@ RowLayout { property bool dropAccepted: false anchors.horizontalCenter: parent.horizontalCenter anchors.verticalCenter: parent.verticalCenter - width: 4 - height: 4 + width: parent.width + height: parent.height Drag.keys: [outputDragTarget.objectName] Drag.active: outputConnectMA.drag.active Drag.hotSpot.x: width*0.5 From 8310b5c532a6a0c0f9c936a4b52907b47a98c3b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 19 Jul 2022 10:57:16 +0200 Subject: [PATCH 10/21] [ui] Highlight attributes' pins correctly Harmonize the highlighting of the attributes' pins when they are hovered and clicked on, may they already be connected to other nodes or not. --- meshroom/ui/qml/GraphEditor/AttributePin.qml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshroom/ui/qml/GraphEditor/AttributePin.qml b/meshroom/ui/qml/GraphEditor/AttributePin.qml index 184ae451..d11ef2e9 100755 --- a/meshroom/ui/qml/GraphEditor/AttributePin.qml +++ b/meshroom/ui/qml/GraphEditor/AttributePin.qml @@ -59,7 +59,7 @@ RowLayout { color: Colors.sysPalette.base Rectangle { - visible: inputConnectMA.containsMouse || childrenRepeater.count > 0 || (attribute && attribute.isLink) + visible: inputConnectMA.containsMouse || childrenRepeater.count > 0 || (attribute && attribute.isLink) || inputConnectMA.drag.active || inputDropArea.containsDrag radius: isList ? 0 : 2 anchors.fill: parent anchors.margins: 2 @@ -209,7 +209,7 @@ RowLayout { color: Colors.sysPalette.base Rectangle { - visible: attribute.hasOutputConnections + visible: attribute.hasOutputConnections || outputConnectMA.containsMouse || outputConnectMA.drag.active || outputDropArea.containsDrag radius: isList ? 0 : 2 anchors.fill: parent anchors.margins: 2 From f2a904ac9be93fc22ca9687ba2f6aa9c2040f34c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Tue, 19 Jul 2022 14:52:18 +0200 Subject: [PATCH 11/21] Fix compatibility with Python 3 - Import "collections" correctly depending on the version of Python - Fix regex that raised deprecation warnings in Python 3 --- meshroom/core/attribute.py | 2 +- meshroom/ui/reconstruction.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/meshroom/core/attribute.py b/meshroom/core/attribute.py index 81134918..2a23a465 100644 --- a/meshroom/core/attribute.py +++ b/meshroom/core/attribute.py @@ -37,7 +37,7 @@ def attributeFactory(description, value, isOutput, node, root=None, parent=None) class Attribute(BaseObject): """ """ - stringIsLinkRe = re.compile('^\{[A-Za-z]+[A-Za-z0-9_.]*\}$') + stringIsLinkRe = re.compile(r'^\{[A-Za-z]+[A-Za-z0-9_.]*\}$') def __init__(self, node, attributeDesc, isOutput, root=None, parent=None): """ diff --git a/meshroom/ui/reconstruction.py b/meshroom/ui/reconstruction.py index 3915c42a..2b414c8c 100755 --- a/meshroom/ui/reconstruction.py +++ b/meshroom/ui/reconstruction.py @@ -3,7 +3,6 @@ import logging import math import os from threading import Thread -from collections import Iterable from PySide2.QtCore import QObject, Slot, Property, Signal, QUrl, QSizeF from PySide2.QtGui import QMatrix4x4, QMatrix3x3, QQuaternion, QVector3D, QVector2D @@ -14,6 +13,7 @@ from meshroom import multiview from meshroom.common.qt import QObjectListModel from meshroom.core import Version from meshroom.core.node import Node, CompatibilityNode, Status, Position +from meshroom.core.pyCompatibility import Iterable from meshroom.ui.graph import UIGraph from meshroom.ui.utils import makeProperty From 945f59e00c490a18ca7e67c4b50d614b83917e36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Thu, 21 Jul 2022 15:28:36 +0200 Subject: [PATCH 12/21] Retrieve file features for templates When a project file is opened as such, its filepath is stored and all its features are retrieved as a consequence. Templates were hard-coded instead of opened as files, so there never was any need to load their features. Now that .mg files can be opened both as projects and pipelines, we need to retrieve the features in both cases, whether the filepath is stored (file opened as project) or not (file opened as template). This can be useful to retrieve the layout contained in the .mg file for the pipeline, for example. --- meshroom/core/graph.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/meshroom/core/graph.py b/meshroom/core/graph.py index 9baf1a59..f143eb51 100644 --- a/meshroom/core/graph.py +++ b/meshroom/core/graph.py @@ -236,8 +236,6 @@ class Graph(BaseObject): @property def fileFeatures(self): """ Get loaded file supported features based on its version. """ - if not self._filepath: - return [] return Graph.IO.getFeaturesForVersion(self.header.get(Graph.IO.Keys.FileVersion, "0.0")) @Slot(str) From 7186ca3ce540e09606ec707888c7e739638c844d Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Mon, 25 Jul 2022 19:17:53 +0200 Subject: [PATCH 13/21] [pipelines] photogAndTracking: nodes ui alignment --- .../photogrammetryAndCameraTracking.mg | 240 +++++++++--------- 1 file changed, 120 insertions(+), 120 deletions(-) diff --git a/meshroom/pipelines/photogrammetryAndCameraTracking.mg b/meshroom/pipelines/photogrammetryAndCameraTracking.mg index f01f7ffd..bf418dbb 100644 --- a/meshroom/pipelines/photogrammetryAndCameraTracking.mg +++ b/meshroom/pipelines/photogrammetryAndCameraTracking.mg @@ -33,9 +33,77 @@ "outputs": { "outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm" }, + "position": [ + 1024, + 393 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "ImageMatching_1": { + "inputs": { + "minNbImages": 200, + "nbNeighbors": 5, + "tree": "${ALICEVISION_VOCTREE}", + "maxDescriptors": 500, + "verboseLevel": "info", + "weights": "", + "nbMatches": 40, + "input": "{FeatureExtraction_1.input}", + "method": "SequentialAndVocabularyTree", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "nodeType": "ImageMatching", + "uids": { + "0": "46fb9072ac753d60bec7dda9c8674b0568506ddf" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" + }, + "position": [ + 400, + 0 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "FeatureExtraction_1": { + "inputs": { + "verboseLevel": "info", + "maxThreads": 0, + "describerTypes": [ + "dspsift" + ], + "maxNbFeatures": 0, + "relativePeakThreshold": 0.01, + "forceCpuExtraction": true, + "masksFolder": "", + "contrastFiltering": "GridSort", + "describerQuality": "normal", + "gridFiltering": true, + "input": "{CameraInit_1.output}", + "describerPreset": "normal" + }, + "nodeType": "FeatureExtraction", + "uids": { + "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" + }, + "parallelization": { + "blockSize": 40, + "split": 0, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/" + }, "position": [ 200, - 320 + 0 ], "internalFolder": "{cache}/{nodeType}/{uid0}/" }, @@ -95,82 +163,6 @@ ], "internalFolder": "{cache}/{nodeType}/{uid0}/" }, - "FeatureExtraction_1": { - "inputs": { - "verboseLevel": "info", - "maxThreads": 0, - "describerTypes": [ - "dspsift" - ], - "maxNbFeatures": 0, - "relativePeakThreshold": 0.01, - "forceCpuExtraction": true, - "masksFolder": "", - "contrastFiltering": "GridSort", - "describerQuality": "normal", - "gridFiltering": true, - "input": "{CameraInit_1.output}", - "describerPreset": "normal" - }, - "nodeType": "FeatureExtraction", - "uids": { - "0": "a07fb8d05b63327d05461954c2fd2a00f201275b" - }, - "parallelization": { - "blockSize": 40, - "split": 0, - "size": 0 - }, - "outputs": { - "output": "{cache}/{nodeType}/{uid0}/" - }, - "position": [ - 200, - 0 - ], - "internalFolder": "{cache}/{nodeType}/{uid0}/" - }, - "CameraInit_2": { - "inputs": { - "groupCameraFallback": "folder", - "intrinsics": [], - "viewIdRegex": ".*?(\\d+)", - "defaultFieldOfView": 45.0, - "allowedCameraModels": [ - "pinhole", - "radial1", - "radial3", - "brown", - "fisheye4", - "fisheye1", - "3deanamorphic4", - "3deradial4", - "3declassicld" - ], - "verboseLevel": "info", - "viewIdMethod": "metadata", - "viewpoints": [], - "useInternalWhiteBalance": true, - "sensorDatabase": "${ALICEVISION_SENSOR_DB}" - }, - "nodeType": "CameraInit", - "uids": { - "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" - }, - "parallelization": { - "blockSize": 0, - "split": 1, - "size": 0 - }, - "outputs": { - "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" - }, - "position": [ - 0, - 160 - ], - "internalFolder": "{cache}/{nodeType}/{uid0}/" - }, "ExportAnimatedCamera_1": { "inputs": { "exportFullROD": false, @@ -198,8 +190,8 @@ "outputCamera": "{cache}/{nodeType}/{uid0}/camera.abc" }, "position": [ - 1600, - 0 + 1629, + 212 ], "internalFolder": "{cache}/{nodeType}/{uid0}/" }, @@ -275,8 +267,49 @@ "outputCombinedSfM": "{cache}/{nodeType}/{uid0}/combineSfM.sfm" }, "position": [ - 1000, - 0 + 1029, + 212 + ], + "internalFolder": "{cache}/{nodeType}/{uid0}/" + }, + "CameraInit_2": { + "inputs": { + "groupCameraFallback": "folder", + "intrinsics": [], + "viewIdRegex": ".*?(\\d+)", + "defaultFieldOfView": 45.0, + "allowedCameraModels": [ + "pinhole", + "radial1", + "radial3", + "brown", + "fisheye4", + "fisheye1", + "3deanamorphic4", + "3deradial4", + "3declassicld" + ], + "verboseLevel": "info", + "viewIdMethod": "metadata", + "viewpoints": [], + "useInternalWhiteBalance": true, + "sensorDatabase": "${ALICEVISION_SENSOR_DB}" + }, + "nodeType": "CameraInit", + "uids": { + "0": "f9436e97e444fa71a05aa5cf7639b206df8ba282" + }, + "parallelization": { + "blockSize": 0, + "split": 1, + "size": 0 + }, + "outputs": { + "output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm" + }, + "position": [ + -2, + 223 ], "internalFolder": "{cache}/{nodeType}/{uid0}/" }, @@ -310,41 +343,8 @@ "output": "{cache}/{nodeType}/{uid0}/" }, "position": [ - 200, - 160 - ], - "internalFolder": "{cache}/{nodeType}/{uid0}/" - }, - "ImageMatching_1": { - "inputs": { - "minNbImages": 200, - "nbNeighbors": 5, - "tree": "${ALICEVISION_VOCTREE}", - "maxDescriptors": 500, - "verboseLevel": "info", - "weights": "", - "nbMatches": 40, - "input": "{FeatureExtraction_1.input}", - "method": "SequentialAndVocabularyTree", - "featuresFolders": [ - "{FeatureExtraction_1.output}" - ] - }, - "nodeType": "ImageMatching", - "uids": { - "0": "46fb9072ac753d60bec7dda9c8674b0568506ddf" - }, - "parallelization": { - "blockSize": 0, - "split": 1, - "size": 0 - }, - "outputs": { - "output": "{cache}/{nodeType}/{uid0}/imageMatches.txt" - }, - "position": [ - 400, - 0 + 198, + 223 ], "internalFolder": "{cache}/{nodeType}/{uid0}/" }, @@ -382,8 +382,8 @@ "output": "{cache}/{nodeType}/{uid0}/" }, "position": [ - 1200, - 0 + 1229, + 212 ], "internalFolder": "{cache}/{nodeType}/{uid0}/" }, @@ -477,8 +477,8 @@ "outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm" }, "position": [ - 1400, - 0 + 1429, + 212 ], "internalFolder": "{cache}/{nodeType}/{uid0}/" } From b77274a027ebc560ac1ba39a08dfffbfe2e5bcd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Wed, 20 Jul 2022 16:11:37 +0200 Subject: [PATCH 14/21] Store all duplicates of a node correctly upon their creation Duplicates used to be stored in a dictionary with an entry being "parent node": "duplicated node". On occasions where a single parent node was duplicated more than once, the latest duplicated node erased the previous one(s), and these older ones were "lost": after being created, there was no trace left of their existence in the duplication operation. Undoing that duplication operation was thus leaving these duplicated nodes out and not removing them. Duplicated nodes are now stored as "parent node": [list of duplicated nodes] to keep track of all the created nodes, effectively removing them upon an "undo". --- meshroom/core/graph.py | 8 +++++--- meshroom/ui/commands.py | 3 ++- tests/test_graph.py | 18 ++++++++++-------- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/meshroom/core/graph.py b/meshroom/core/graph.py index 9baf1a59..5e6a75f2 100644 --- a/meshroom/core/graph.py +++ b/meshroom/core/graph.py @@ -382,7 +382,7 @@ class Graph(BaseObject): node, edges = self.copyNode(srcNode, withEdges=False) duplicate = self.addNode(node) duplicateEdges.update(edges) - duplicates[srcNode] = duplicate # original node to duplicate map + duplicates.setdefault(srcNode, []).append(duplicate) # re-create edges taking into account what has been duplicated for attr, linkExpression in duplicateEdges.items(): @@ -390,8 +390,10 @@ class Graph(BaseObject): # get source node and attribute name edgeSrcNodeName, edgeSrcAttrName = link.split(".", 1) edgeSrcNode = self.node(edgeSrcNodeName) - # if the edge's source node has been duplicated, use the duplicate; otherwise use the original node - edgeSrcNode = duplicates.get(edgeSrcNode, edgeSrcNode) + # if the edge's source node has been duplicated (the key exists in the dictionary), + # use the duplicate; otherwise use the original node + if edgeSrcNode in duplicates: + edgeSrcNode = duplicates.get(edgeSrcNode)[0] self.addEdge(edgeSrcNode.attribute(edgeSrcAttrName), attr) return duplicates diff --git a/meshroom/ui/commands.py b/meshroom/ui/commands.py index 4ae6a4f3..615d6036 100755 --- a/meshroom/ui/commands.py +++ b/meshroom/ui/commands.py @@ -184,7 +184,8 @@ class DuplicateNodesCommand(GraphCommand): def redoImpl(self): srcNodes = [ self.graph.node(i) for i in self.srcNodeNames ] - duplicates = list(self.graph.duplicateNodes(srcNodes).values()) + # flatten the list of duplicated nodes to avoid lists within the list + duplicates = [ n for nodes in list(self.graph.duplicateNodes(srcNodes).values()) for n in nodes ] self.duplicates = [ n.name for n in duplicates ] return duplicates diff --git a/tests/test_graph.py b/tests/test_graph.py index 6ade2154..6960258d 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -266,14 +266,16 @@ def test_duplicate_nodes(): # duplicate from n1 nodes_to_duplicate, _ = g.dfsOnDiscover(startNodes=[n1], reverse=True, dependenciesOnly=True) nMap = g.duplicateNodes(srcNodes=nodes_to_duplicate) - for s, d in nMap.items(): - assert s.nodeType == d.nodeType + for s, duplicated in nMap.items(): + for d in duplicated: + assert s.nodeType == d.nodeType - # check number of duplicated nodes - assert len(nMap) == 3 + # check number of duplicated nodes and that every parent node has been duplicated once + assert len(nMap) == 3 and all([len(nMap[i]) == 1 for i in nMap.keys()]) # check connections - assert nMap[n1].input.getLinkParam() == n0.output - assert nMap[n2].input.getLinkParam() == nMap[n1].output - assert nMap[n3].input.getLinkParam() == nMap[n1].output - assert nMap[n3].input2.getLinkParam() == nMap[n2].output + # access directly index 0 because we know there is a single duplicate for each parent node + assert nMap[n1][0].input.getLinkParam() == n0.output + assert nMap[n2][0].input.getLinkParam() == nMap[n1][0].output + assert nMap[n3][0].input.getLinkParam() == nMap[n1][0].output + assert nMap[n3][0].input2.getLinkParam() == nMap[n2][0].output From c4c8b5c8d55467a59e626b2403b9ded872088b5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Thu, 21 Jul 2022 08:37:11 +0200 Subject: [PATCH 15/21] [ui] Vertically align duplicated nodes correctly When a node is duplicated more than once in a single "duplicate" operation, it happens that several of the duplicated nodes overlap. This patch takes into account all the newly duplicated (and already moved) nodes before moving them into their final position. --- meshroom/ui/graph.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/meshroom/ui/graph.py b/meshroom/ui/graph.py index 250abcd2..91fa5424 100644 --- a/meshroom/ui/graph.py +++ b/meshroom/ui/graph.py @@ -574,6 +574,7 @@ class UIGraph(QObject): list[Node]: the list of duplicated nodes """ nodes = self.filterNodes(nodes) + nPositions = [] # enable updates between duplication and layout to get correct depths during layout with self.groupedGraphModification("Duplicate Selected Nodes", disableUpdates=False): # disable graph updates during duplication @@ -581,8 +582,19 @@ class UIGraph(QObject): duplicates = self.push(commands.DuplicateNodesCommand(self._graph, nodes)) # move nodes below the bounding box formed by the duplicated node(s) bbox = self._layout.boundingBox(duplicates) + for n in duplicates: - self.moveNode(n, Position(n.x, bbox[3] + self.layout.gridSpacing + n.y)) + idx = duplicates.index(n) + yPos = n.y + self.layout.gridSpacing + bbox[3] + if idx > 0 and (n.x, yPos) in nPositions: + # make sure the node will not be moved on top of another node + while (n.x, yPos) in nPositions: + yPos = yPos + self.layout.gridSpacing + self.layout.nodeHeight + self.moveNode(n, Position(n.x, yPos)) + else: + self.moveNode(n, Position(n.x, bbox[3] + self.layout.gridSpacing + n.y)) + nPositions.append((n.x, n.y)) + return duplicates @Slot(QObject, result="QVariantList") From 819d9e3e7037f0808e851e46d187930e21a6bd67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Thu, 21 Jul 2022 11:43:39 +0200 Subject: [PATCH 16/21] Prevent duplication/removal a node more than once in the same action --- meshroom/ui/graph.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/meshroom/ui/graph.py b/meshroom/ui/graph.py index 91fa5424..71846a9f 100644 --- a/meshroom/ui/graph.py +++ b/meshroom/ui/graph.py @@ -559,9 +559,11 @@ class UIGraph(QObject): """ with self.groupedGraphModification("Remove Nodes From Selected Nodes"): nodesToRemove, _ = self._graph.dfsOnDiscover(startNodes=nodes, reverse=True, dependenciesOnly=True) + # filter out nodes that will be removed more than once + uniqueNodesToRemove = list(dict.fromkeys(nodesToRemove)) # Perform nodes removal from leaves to start node so that edges # can be re-created in correct order on redo. - self.removeNodes(list(reversed(nodesToRemove))) + self.removeNodes(list(reversed(uniqueNodesToRemove))) @Slot(QObject, result="QVariantList") def duplicateNodes(self, nodes): @@ -609,7 +611,9 @@ class UIGraph(QObject): """ with self.groupedGraphModification("Duplicate Nodes From Selected Nodes"): nodesToDuplicate, _ = self._graph.dfsOnDiscover(startNodes=nodes, reverse=True, dependenciesOnly=True) - duplicates = self.duplicateNodes(nodesToDuplicate) + # filter out nodes that will be duplicated more than once + uniqueNodesToDuplicate = list(dict.fromkeys(nodesToDuplicate)) + duplicates = self.duplicateNodes(uniqueNodesToDuplicate) return duplicates @Slot(QObject) From a0913690472f2152941fb4eafb2bb473002c09a2 Mon Sep 17 00:00:00 2001 From: Simone Gasparini Date: Mon, 1 Aug 2022 11:19:17 +0200 Subject: [PATCH 17/21] [ci] add bug to the list of tag to skip the stale check --- .github/stale.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/stale.yml b/.github/stale.yml index 6e137fb3..9ef50bb8 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -8,6 +8,7 @@ exemptLabels: - "feature request" - "scope:doc" - "new feature" + - "bug" # Label to use when marking an issue as stale staleLabel: stale # Comment to post when marking an issue as stale. Set to `false` to disable From d5b1d43b274d1786f174d94e289e36f897704c66 Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Wed, 3 Aug 2022 23:41:55 +0200 Subject: [PATCH 18/21] [core] fix logging of nodes loading --- meshroom/core/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshroom/core/__init__.py b/meshroom/core/__init__.py index b39f2654..56d6351d 100644 --- a/meshroom/core/__init__.py +++ b/meshroom/core/__init__.py @@ -257,7 +257,7 @@ def loadAllNodes(folder): nodeTypes = loadNodes(folder, package) for nodeType in nodeTypes: registerNodeType(nodeType) - logging.debug('Plugins loaded: ', ', '.join([nodeType.__name__ for nodeType in nodeTypes])) + logging.debug('Nodes loaded [{}]: {}'.format(package, ', '.join([nodeType.__name__ for nodeType in nodeTypes]))) def registerSubmitter(s): From 659c8a05ed22a4cec8ba6c6b01d4400d5710612c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Thu, 4 Aug 2022 17:59:38 +0200 Subject: [PATCH 19/21] Add abstract InitNode InitNode is an abstract class which is meant to be inherited by all the initialization nodes (such as CameraInit), included those that might be created by the user. InitNode contains methods that can be reimplemented by the children classes if necessary. This abstract class allows to keep on using scripts such as meshroom_batch without having to modify them specifically or being limited to using a CameraInit node. --- bin/meshroom_batch | 55 ++++++++---------------- meshroom/core/desc.py | 52 ++++++++++++++++++++++ meshroom/core/graph.py | 8 ++++ meshroom/nodes/aliceVision/CameraInit.py | 34 ++++++++++++++- 4 files changed, 111 insertions(+), 38 deletions(-) diff --git a/bin/meshroom_batch b/bin/meshroom_batch index 7883e1b6..c5363acd 100755 --- a/bin/meshroom_batch +++ b/bin/meshroom_batch @@ -9,8 +9,10 @@ meshroom.setupEnvironment() import meshroom.core.graph from meshroom import multiview +from meshroom.core.desc import InitNode import logging + parser = argparse.ArgumentParser(description='Launch the full photogrammetry or Panorama HDR pipeline.') parser.add_argument('-i', '--input', metavar='SFM/FOLDERS/IMAGES', type=str, nargs='*', default=[], @@ -94,33 +96,24 @@ def getOnlyNodeOfType(g, nodeType): return nodes[0] +def getInitNode(g): + """ + Helper function to get the Init node in the graph 'g' and raise an exception if there is no or + multiple candidates. + """ + nodes = g.findInitNodes() + if len(nodes) == 0: + raise RuntimeError("meshroom_batch requires an Init node in the pipeline.") + elif len(nodes) > 1: + raise RuntimeError("meshroom_batch requires exactly one Init node in the pipeline, {} found: {}" + .format(len(nodes), str(nodes))) + return nodes[0] + + if not args.input and not args.inputRecursive: print('Nothing to compute. You need to set --input or --inputRecursive.') sys.exit(1) -views, intrinsics = [], [] -# Build image files list from inputImages arguments -filesByType = multiview.FilesByType() - -hasSearchedForImages = False - -if args.input: - if len(args.input) == 1 and os.path.isfile(args.input[0]) and os.path.splitext(args.input[0])[-1] in ('.json', '.sfm'): - # args.input is a sfmData file: setup pre-calibrated views and intrinsics - from meshroom.nodes.aliceVision.CameraInit import readSfMData - views, intrinsics = readSfMData(args.input[0]) - else: - filesByType.extend(multiview.findFilesByTypeInFolder(args.input, recursive=False)) - hasSearchedForImages = True - -if args.inputRecursive: - filesByType.extend(multiview.findFilesByTypeInFolder(args.inputRecursive, recursive=True)) - hasSearchedForImages = True - -if hasSearchedForImages and not filesByType.images: - print("No image found") - sys.exit(-1) - graph = multiview.Graph(name=args.pipeline) with multiview.GraphModification(graph): @@ -131,15 +124,10 @@ with multiview.GraphModification(graph): else: # custom pipeline graph.load(args.pipeline, setupProjectFile=False) - # graph.update() - cameraInit = getOnlyNodeOfType(graph, 'CameraInit') - # reset graph inputs - cameraInit.viewpoints.resetValue() - cameraInit.intrinsics.resetValue() - # add views and intrinsics (if any) read from args.input - cameraInit.viewpoints.extend(views) - cameraInit.intrinsics.extend(intrinsics) + # get init node and initialize it + initNode = getInitNode(graph) + initNode.nodeDesc.initialize(initNode, args.input, args.inputRecursive) if not graph.canComputeLeaves: raise RuntimeError("Graph cannot be computed. Check for compatibility issues.") @@ -151,11 +139,6 @@ with multiview.GraphModification(graph): publish = getOnlyNodeOfType(graph, 'Publish') publish.output.value = args.output - if filesByType.images: - views, intrinsics = cameraInit.nodeDesc.buildIntrinsics(cameraInit, filesByType.images) - cameraInit.viewpoints.value = views - cameraInit.intrinsics.value = intrinsics - if args.overrides: import io import json diff --git a/meshroom/core/desc.py b/meshroom/core/desc.py index 3dcb7a07..e66eb053 100755 --- a/meshroom/core/desc.py +++ b/meshroom/core/desc.py @@ -527,3 +527,55 @@ class CommandLineNode(Node): finally: chunk.subprocess = None + +# Test abstract node +class InitNode: + def __init__(self): + pass + + def initialize(self, node, inputs, recursiveInputs): + """ + Initialize the attributes that are needed for a node to start running. + + Args: + node (Node): the node whose attributes must be initialized + inputs (list): the user-provided list of input files/directories + recursiveInputs (list): the user-provided list of input directories to search recursively for images + """ + pass + + def resetAttributes(self, node, attributeNames): + """ + Reset the values of the provided attributes for a node. + + Args: + node (Node): the node whose attributes are to be reset + attributeNames (list): the list containing the names of the attributes to reset + """ + for attrName in attributeNames: + if node.hasAttribute(attrName): + node.attribute(attrName).resetValue() + + def extendAttributes(self, node, attributesDict): + """ + Extend the values of the provided attributes for a node. + + Args: + node (Node): the node whose attributes are to be extended + attributesDict (dict): the dictionary containing the attributes' names (as keys) and the values to extend with + """ + for attr in attributesDict.keys(): + if node.hasAttribute(attr): + node.attribute(attr).extend(attributesDict[attr]) + + def setAttributes(self, node, attributesDict): + """ + Set the values of the provided attributes for a node. + + Args: + node (Node): the node whose attributes are to be extended + attributesDict (dict): the dictionary containing the attributes' names (as keys) and the values to set + """ + for attr in attributesDict: + if node.hasAttribute(attr): + node.attribute(attr).value = attributesDict[attr] diff --git a/meshroom/core/graph.py b/meshroom/core/graph.py index 1e9b8132..89faefb5 100644 --- a/meshroom/core/graph.py +++ b/meshroom/core/graph.py @@ -547,6 +547,14 @@ class Graph(BaseObject): nodes = [n for n in self._nodes.values() if n.nodeType == nodeType] return self.sortNodesByIndex(nodes) if sortedByIndex else nodes + def findInitNodes(self): + """ + Returns: + list[Node]: the list of Init nodes (nodes inheriting from InitNode) + """ + nodes = [n for n in self._nodes.values() if isinstance(n.nodeDesc, meshroom.core.desc.InitNode)] + return nodes + def findNodeCandidates(self, nodeNameExpr): pattern = re.compile(nodeNameExpr) return [v for k, v in self._nodes.objects.items() if pattern.match(k)] diff --git a/meshroom/nodes/aliceVision/CameraInit.py b/meshroom/nodes/aliceVision/CameraInit.py index 9ff8a834..dcc8182b 100644 --- a/meshroom/nodes/aliceVision/CameraInit.py +++ b/meshroom/nodes/aliceVision/CameraInit.py @@ -8,7 +8,7 @@ import tempfile import logging from meshroom.core import desc, Version - +from meshroom.multiview import FilesByType, findFilesByTypeInFolder Viewpoint = [ desc.IntParam(name="viewId", label="Id", description="Image UID", value=-1, uid=[0], range=None), @@ -119,7 +119,8 @@ def readSfMData(sfmFile): return views, intrinsics -class CameraInit(desc.CommandLineNode): + +class CameraInit(desc.CommandLineNode, desc.InitNode): commandLine = 'aliceVision_cameraInit {allParams} --allowSingleView 1' # don't throw an error if there is only one image size = desc.DynamicNodeSize('viewpoints') @@ -250,6 +251,35 @@ The metadata needed are: ), ] + def __init__(self): + super(CameraInit, self).__init__() + + def initialize(self, node, inputs, recursiveInputs): + # Reset graph inputs + self.resetAttributes(node, ["viewpoints", "intrinsics"]) + + filesByType = FilesByType() + searchedForImages = False + + if recursiveInputs: + filesByType.extend(findFilesByTypeInFolder(recursiveInputs, recursive=True)) + searchedForImages = True + + # Add views and intrinsics from a file if it was provided, or look for the images + if len(inputs) == 1 and os.path.isfile(inputs[0]) and os.path.splitext(inputs[0])[-1] in ('.json', '.sfm'): + views, intrinsics = readSfMData(inputs[0]) + self.extendAttributes(node, {"viewpoints": views, "intrinsics": intrinsics}) + else: + filesByType.extend(findFilesByTypeInFolder(inputs, recursive=False)) + searchedForImages = True + + # If there was no input file, check that the directories do contain images + if searchedForImages and not filesByType.images: + raise ValueError("No valid input file or no image in the provided directories") + + views, intrinsics = self.buildIntrinsics(node, filesByType.images) + self.setAttributes(node, {"viewpoints": views, "intrinsics": intrinsics}) + def upgradeAttributeValues(self, attrValues, fromVersion): # Starting with version 6, the principal point is now relative to the image center From 93a3ad8f16e45150d339fb680fc330f4c6cd928b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Candice=20Bent=C3=A9jac?= Date: Mon, 8 Aug 2022 10:44:58 +0200 Subject: [PATCH 20/21] [ui] Update intrinsics table when switching between groups --- meshroom/ui/qml/ImageGallery/ImageGallery.qml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/meshroom/ui/qml/ImageGallery/ImageGallery.qml b/meshroom/ui/qml/ImageGallery/ImageGallery.qml index 6e0256e2..6e84b75f 100644 --- a/meshroom/ui/qml/ImageGallery/ImageGallery.qml +++ b/meshroom/ui/qml/ImageGallery/ImageGallery.qml @@ -48,6 +48,10 @@ Panel { parseIntr() } + onCameraInitIndexChanged: { + parseIntr() + } + function changeCurrentIndex(newIndex) { _reconstruction.cameraInitIndex = newIndex } From 9365a37bde58d70364ce2284bc56a54021b38bb0 Mon Sep 17 00:00:00 2001 From: Povilas Kanapickas Date: Thu, 14 Jul 2022 13:13:29 +0300 Subject: [PATCH 21/21] [ui] Reduce confusion when qml loading fails Currently we disable all logging out of qml by default. This is problematic in case qml loading fails for any reason (e.g. missing dependencies) as the user will be presented with a message that is not actionable: "QQmlApplicationEngine failed to load component". There is no way to understand what's happening unless the user knows about MESHROOM_OUTPUT_QML_WARNINGS. This is improved by checking for presence of "QQmlApplicationEngine failed to load component" and warning if qml logging is disabled in that case. --- meshroom/ui/app.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/meshroom/ui/app.py b/meshroom/ui/app.py index 990d928b..7e5b84ff 100644 --- a/meshroom/ui/app.py +++ b/meshroom/ui/app.py @@ -51,9 +51,19 @@ class MessageHandler(object): @classmethod def handler(cls, messageType, context, message): """ Message handler remapping Qt logs to Python logging system. """ - # discard blacklisted Qt messages related to QML when 'output qml warnings' is set to false - if not cls.outputQmlWarnings and any(w in message for w in cls.qmlWarningsBlacklist): - return + + if not cls.outputQmlWarnings: + # If MESHROOM_OUTPUT_QML_WARNINGS is not set and an error in qml files happen we're + # left without any output except "QQmlApplicationEngine failed to load component". + # This is extremely hard to debug to someone who does not know about + # MESHROOM_OUTPUT_QML_WARNINGS beforehand because by default Qml will output errors to + # stdout. + if "QQmlApplicationEngine failed to load component" in message: + logging.warning("Set MESHROOM_OUTPUT_QML_WARNINGS=1 to get a detailed error message.") + + # discard blacklisted Qt messages related to QML when 'output qml warnings' is not enabled + elif any(w in message for w in cls.qmlWarningsBlacklist): + return MessageHandler.logFunctions[messageType](message)