Merge pull request #1727 from alicevision/dev/externalPipelines

Use project files to define pipelines
This commit is contained in:
Fabien Castan 2022-07-26 15:06:16 +02:00 committed by GitHub
commit b291a9ed07
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 3005 additions and 466 deletions

View file

@ -20,8 +20,8 @@ parser.add_argument('-I', '--inputRecursive', metavar='FOLDERS/IMAGES', type=str
default=[], default=[],
help='Input folders containing all images recursively.') help='Input folders containing all images recursively.')
parser.add_argument('-p', '--pipeline', metavar='photogrammetry/panoramaHdr/panoramaFisheyeHdr/cameraTracking/photogrammetryDraft/MG_FILE', type=str, default='photogrammetry', parser.add_argument('-p', '--pipeline', metavar='FILE.mg/' + '/'.join(meshroom.core.pipelineTemplates), type=str, default='photogrammetry',
help='"photogrammetry", "panoramaHdr", "panoramaFisheyeHdr", "cameraTracking", "photogrammetryDraft" pipeline or a Meshroom file containing a custom pipeline to run on input images. ' help='Template pipeline among those listed or a Meshroom file containing a custom pipeline to run on input images. '
'Requirements: the graph must contain one CameraInit node, ' 'Requirements: the graph must contain one CameraInit node, '
'and one Publish node if --output is set.') 'and one Publish node if --output is set.')
@ -124,22 +124,13 @@ if hasSearchedForImages and not filesByType.images:
graph = multiview.Graph(name=args.pipeline) graph = multiview.Graph(name=args.pipeline)
with multiview.GraphModification(graph): with multiview.GraphModification(graph):
# initialize photogrammetry pipeline # initialize template pipeline
if args.pipeline.lower() == "photogrammetry": loweredPipelineTemplates = dict((k.lower(), v) for k, v in meshroom.core.pipelineTemplates.items())
# default photogrammetry pipeline if args.pipeline.lower() in loweredPipelineTemplates:
multiview.photogrammetry(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) graph.load(loweredPipelineTemplates[args.pipeline.lower()], setupProjectFile=False)
elif args.pipeline.lower() == "panoramahdr":
# default panorama Hdr pipeline
multiview.panoramaHdr(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
elif args.pipeline.lower() == "panoramafisheyehdr":
# default panorama Fisheye Hdr pipeline
multiview.panoramaFisheyeHdr(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
elif args.pipeline.lower() == "cameratracking":
# default panorama Fisheye Hdr pipeline
multiview.cameraTracking(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
else: else:
# custom pipeline # custom pipeline
graph.load(args.pipeline) graph.load(args.pipeline, setupProjectFile=False)
# graph.update() # graph.update()
cameraInit = getOnlyNodeOfType(graph, 'CameraInit') cameraInit = getOnlyNodeOfType(graph, 'CameraInit')

View file

@ -35,6 +35,7 @@ cacheFolderName = 'MeshroomCache'
defaultCacheFolder = os.environ.get('MESHROOM_CACHE', os.path.join(tempfile.gettempdir(), cacheFolderName)) defaultCacheFolder = os.environ.get('MESHROOM_CACHE', os.path.join(tempfile.gettempdir(), cacheFolderName))
nodesDesc = {} nodesDesc = {}
submitters = {} submitters = {}
pipelineTemplates = {}
def hashValue(value): def hashValue(value):
@ -270,6 +271,12 @@ def loadSubmitters(folder, packageName):
return loadPlugins(folder, packageName, BaseSubmitter) return loadPlugins(folder, packageName, BaseSubmitter)
def loadPipelineTemplates(folder):
global pipelineTemplates
for file in os.listdir(folder):
if file.endswith(".mg") and file not in pipelineTemplates:
pipelineTemplates[os.path.splitext(file)[0]] = os.path.join(folder, file)
meshroomFolder = os.path.dirname(os.path.dirname(__file__)) meshroomFolder = os.path.dirname(os.path.dirname(__file__))
additionalNodesPath = os.environ.get("MESHROOM_NODES_PATH", "").split(os.pathsep) additionalNodesPath = os.environ.get("MESHROOM_NODES_PATH", "").split(os.pathsep)
@ -288,3 +295,12 @@ subs = loadSubmitters(os.environ.get("MESHROOM_SUBMITTERS_PATH", meshroomFolder)
for sub in subs: for sub in subs:
registerSubmitter(sub()) registerSubmitter(sub())
# Load pipeline templates: check in the default folder and any folder the user might have
# added to the environment variable
additionalPipelinesPath = os.environ.get("MESHROOM_PIPELINE_TEMPLATES_PATH", "").split(os.pathsep)
additionalPipelinesPath = [i for i in additionalPipelinesPath if i]
pipelineTemplatesFolders = [os.path.join(meshroomFolder, 'pipelines')] + additionalPipelinesPath
for f in pipelineTemplatesFolders:
loadPipelineTemplates(f)

View file

@ -1,11 +1,13 @@
#!/usr/bin/env python #!/usr/bin/env python
# coding:utf-8 # coding:utf-8
import copy import copy
import os
import re import re
import weakref import weakref
import types import types
import logging import logging
from string import Template
from meshroom.common import BaseObject, Property, Variant, Signal, ListModel, DictModel, Slot from meshroom.common import BaseObject, Property, Variant, Signal, ListModel, DictModel, Slot
from meshroom.core import desc, pyCompatibility, hashValue from meshroom.core import desc, pyCompatibility, hashValue
@ -139,7 +141,9 @@ class Attribute(BaseObject):
self.enabledChanged.emit() self.enabledChanged.emit()
def _get_value(self): def _get_value(self):
return self.getLinkParam().value if self.isLink else self._value if self.isLink:
return self.getLinkParam().value
return self._value
def _set_value(self, value): def _set_value(self, value):
if self._value == value: if self._value == value:
@ -259,13 +263,18 @@ class Attribute(BaseObject):
return self.defaultValue() return self.defaultValue()
return self._value return self._value
def getEvalValue(self):
if isinstance(self.value, pyCompatibility.basestring):
return Template(self.value).safe_substitute(os.environ)
return self.value
def getValueStr(self): def getValueStr(self):
if isinstance(self.attributeDesc, desc.ChoiceParam) and not self.attributeDesc.exclusive: if isinstance(self.attributeDesc, desc.ChoiceParam) and not self.attributeDesc.exclusive:
assert(isinstance(self.value, pyCompatibility.Sequence) and not isinstance(self.value, pyCompatibility.basestring)) assert(isinstance(self.value, pyCompatibility.Sequence) and not isinstance(self.value, pyCompatibility.basestring))
return self.attributeDesc.joinChar.join(self.value) return self.attributeDesc.joinChar.join(self.getEvalValue())
if isinstance(self.attributeDesc, (desc.StringParam, desc.File)): if isinstance(self.attributeDesc, (desc.StringParam, desc.File)):
return '"{}"'.format(self.value) return '"{}"'.format(self.getEvalValue())
return str(self.value) return str(self.getEvalValue())
def defaultValue(self): def defaultValue(self):
if isinstance(self.desc.value, types.FunctionType): if isinstance(self.desc.value, types.FunctionType):
@ -298,6 +307,8 @@ class Attribute(BaseObject):
desc = Property(desc.Attribute, lambda self: self.attributeDesc, constant=True) desc = Property(desc.Attribute, lambda self: self.attributeDesc, constant=True)
valueChanged = Signal() valueChanged = Signal()
value = Property(Variant, _get_value, _set_value, notify=valueChanged) value = Property(Variant, _get_value, _set_value, notify=valueChanged)
valueStr = Property(Variant, getValueStr, notify=valueChanged)
evalValue = Property(Variant, getEvalValue, notify=valueChanged)
isOutput = Property(bool, isOutput.fget, constant=True) isOutput = Property(bool, isOutput.fget, constant=True)
isLinkChanged = Signal() isLinkChanged = Signal()
isLink = Property(bool, isLink.fget, notify=isLinkChanged) isLink = Property(bool, isLink.fget, notify=isLinkChanged)

View file

@ -1,12 +1,13 @@
from meshroom.common import BaseObject, Property, Variant, VariantList, JSValue from meshroom.common import BaseObject, Property, Variant, VariantList, JSValue
from meshroom.core import pyCompatibility from meshroom.core import pyCompatibility
from enum import Enum # available by default in python3. For python2: "pip install enum34" from enum import Enum # available by default in python3. For python2: "pip install enum34"
import math import math
import os import os
import psutil import psutil
import ast import ast
import distutils.util import distutils.util
import shlex
class Attribute(BaseObject): class Attribute(BaseObject):
""" """
@ -505,7 +506,7 @@ class CommandLineNode(Node):
chunk.saveStatusFile() chunk.saveStatusFile()
print(' - commandLine: {}'.format(cmd)) print(' - commandLine: {}'.format(cmd))
print(' - logFile: {}'.format(chunk.logFile)) print(' - logFile: {}'.format(chunk.logFile))
chunk.subprocess = psutil.Popen(cmd, stdout=logF, stderr=logF, shell=True) chunk.subprocess = psutil.Popen(shlex.split(cmd), stdout=logF, stderr=logF)
# store process static info into the status file # store process static info into the status file
# chunk.status.env = node.proc.environ() # chunk.status.env = node.proc.environ()

View file

@ -236,8 +236,6 @@ class Graph(BaseObject):
@property @property
def fileFeatures(self): def fileFeatures(self):
""" Get loaded file supported features based on its version. """ """ Get loaded file supported features based on its version. """
if not self._filepath:
return []
return Graph.IO.getFeaturesForVersion(self.header.get(Graph.IO.Keys.FileVersion, "0.0")) return Graph.IO.getFeaturesForVersion(self.header.get(Graph.IO.Keys.FileVersion, "0.0"))
@Slot(str) @Slot(str)

View file

@ -145,243 +145,6 @@ def findFilesByTypeInFolder(folder, recursive=False):
return output return output
def panoramaHdr(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None):
"""
Create a new Graph with a Panorama HDR pipeline.
Args:
inputImages (list of str, optional): list of image file paths
inputViewpoints (list of Viewpoint, optional): list of Viewpoints
output (str, optional): the path to export reconstructed model to
Returns:
Graph: the created graph
"""
if not graph:
graph = Graph('PanoramaHDR')
with GraphModification(graph):
nodes = panoramaHdrPipeline(graph)
cameraInit = nodes[0]
if inputImages:
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
if inputViewpoints:
cameraInit.viewpoints.extend(inputViewpoints)
if inputIntrinsics:
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
imageProcessing = nodes[-1]
graph.addNewNode('Publish', output=output, inputFiles=[imageProcessing.outputImages])
return graph
def panoramaFisheyeHdr(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None):
if not graph:
graph = Graph('PanoramaFisheyeHDR')
with GraphModification(graph):
panoramaHdr(inputImages, inputViewpoints, inputIntrinsics, output, graph)
for panoramaInit in graph.nodesOfType("PanoramaInit"):
panoramaInit.attribute("useFisheye").value = True
for featureExtraction in graph.nodesOfType("FeatureExtraction"):
# when using fisheye images, 'sift' performs better than 'dspsift'
featureExtraction.attribute("describerTypes").value = ['sift']
# when using fisheye images, the overlap between images can be small
# and thus requires many features to get enough correspondences for cameras estimation
featureExtraction.attribute("describerPreset").value = 'high'
return graph
def panoramaHdrPipeline(graph):
"""
Instantiate an PanoramaHDR pipeline inside 'graph'.
Args:
graph (Graph/UIGraph): the graph in which nodes should be instantiated
Returns:
list of Node: the created nodes
"""
cameraInit = graph.addNewNode('CameraInit')
try:
# fisheye4 does not work well in the ParoramaEstimation, so here we avoid to use it.
cameraInit.attribute('allowedCameraModels').value.remove("fisheye4")
except ValueError:
pass
panoramaPrepareImages = graph.addNewNode('PanoramaPrepareImages',
input=cameraInit.output)
ldr2hdrSampling = graph.addNewNode('LdrToHdrSampling',
input=panoramaPrepareImages.output)
ldr2hdrCalibration = graph.addNewNode('LdrToHdrCalibration',
input=ldr2hdrSampling.input,
userNbBrackets=ldr2hdrSampling.userNbBrackets,
byPass=ldr2hdrSampling.byPass,
channelQuantizationPower=ldr2hdrSampling.channelQuantizationPower,
samples=ldr2hdrSampling.output)
ldr2hdrMerge = graph.addNewNode('LdrToHdrMerge',
input=ldr2hdrCalibration.input,
userNbBrackets=ldr2hdrCalibration.userNbBrackets,
byPass=ldr2hdrCalibration.byPass,
channelQuantizationPower=ldr2hdrCalibration.channelQuantizationPower,
response=ldr2hdrCalibration.response)
featureExtraction = graph.addNewNode('FeatureExtraction',
input=ldr2hdrMerge.outSfMData,
describerQuality='high')
panoramaInit = graph.addNewNode('PanoramaInit',
input=featureExtraction.input,
dependency=[featureExtraction.output] # Workaround for tractor submission with a fake dependency
)
imageMatching = graph.addNewNode('ImageMatching',
input=panoramaInit.outSfMData,
featuresFolders=[featureExtraction.output],
method='FrustumOrVocabularyTree')
featureMatching = graph.addNewNode('FeatureMatching',
input=imageMatching.input,
featuresFolders=imageMatching.featuresFolders,
imagePairsList=imageMatching.output,
describerTypes=featureExtraction.describerTypes)
panoramaEstimation = graph.addNewNode('PanoramaEstimation',
input=featureMatching.input,
featuresFolders=featureMatching.featuresFolders,
matchesFolders=[featureMatching.output],
describerTypes=featureMatching.describerTypes)
panoramaOrientation = graph.addNewNode('SfMTransform',
input=panoramaEstimation.output,
method='manual')
panoramaWarping = graph.addNewNode('PanoramaWarping',
input=panoramaOrientation.output)
panoramaSeams = graph.addNewNode('PanoramaSeams',
input=panoramaWarping.input,
warpingFolder=panoramaWarping.output
)
panoramaCompositing = graph.addNewNode('PanoramaCompositing',
input=panoramaSeams.input,
warpingFolder=panoramaSeams.warpingFolder,
labels=panoramaSeams.output
)
panoramaMerging = graph.addNewNode('PanoramaMerging',
input=panoramaCompositing.input,
compositingFolder=panoramaCompositing.output
)
imageProcessing = graph.addNewNode('ImageProcessing',
input=panoramaMerging.outputPanorama,
fixNonFinite=True,
fillHoles=True,
extension='exr')
return [
cameraInit,
featureExtraction,
panoramaInit,
imageMatching,
featureMatching,
panoramaEstimation,
panoramaOrientation,
panoramaWarping,
panoramaSeams,
panoramaCompositing,
panoramaMerging,
imageProcessing,
]
def photogrammetry(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
"""
Create a new Graph with a complete photogrammetry pipeline.
Args:
inputImages (list of str, optional): list of image file paths
inputViewpoints (list of Viewpoint, optional): list of Viewpoints
output (str, optional): the path to export reconstructed model to
Returns:
Graph: the created graph
"""
if not graph:
graph = Graph('Photogrammetry')
with GraphModification(graph):
sfmNodes, mvsNodes = photogrammetryPipeline(graph)
cameraInit = sfmNodes[0]
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
cameraInit.viewpoints.extend(inputViewpoints)
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
texturing = mvsNodes[-1]
graph.addNewNode('Publish', output=output, inputFiles=[texturing.outputMesh,
texturing.outputMaterial,
texturing.outputTextures])
return graph
def photogrammetryPipeline(graph):
"""
Instantiate a complete photogrammetry pipeline inside 'graph'.
Args:
graph (Graph/UIGraph): the graph in which nodes should be instantiated
Returns:
list of Node: the created nodes
"""
sfmNodes = sfmPipeline(graph)
mvsNodes = mvsPipeline(graph, sfmNodes[-1])
# store current pipeline version in graph header
graph.header.update({'pipelineVersion': __version__})
return sfmNodes, mvsNodes
def sfmPipeline(graph):
"""
Instantiate a SfM pipeline inside 'graph'.
Args:
graph (Graph/UIGraph): the graph in which nodes should be instantiated
Returns:
list of Node: the created nodes
"""
cameraInit = graph.addNewNode('CameraInit')
featureExtraction = graph.addNewNode('FeatureExtraction',
input=cameraInit.output)
imageMatching = graph.addNewNode('ImageMatching',
input=featureExtraction.input,
featuresFolders=[featureExtraction.output])
featureMatching = graph.addNewNode('FeatureMatching',
input=imageMatching.input,
featuresFolders=imageMatching.featuresFolders,
imagePairsList=imageMatching.output,
describerTypes=featureExtraction.describerTypes)
structureFromMotion = graph.addNewNode('StructureFromMotion',
input=featureMatching.input,
featuresFolders=featureMatching.featuresFolders,
matchesFolders=[featureMatching.output],
describerTypes=featureMatching.describerTypes)
return [
cameraInit,
featureExtraction,
imageMatching,
featureMatching,
structureFromMotion
]
def mvsPipeline(graph, sfm=None): def mvsPipeline(graph, sfm=None):
""" """
Instantiate a MVS pipeline inside 'graph'. Instantiate a MVS pipeline inside 'graph'.
@ -470,134 +233,3 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False):
mvsNodes = mvsPipeline(graph, structureFromMotion) mvsNodes = mvsPipeline(graph, structureFromMotion)
return sfmNodes, mvsNodes return sfmNodes, mvsNodes
def cameraTrackingPipeline(graph, sourceSfm=None):
"""
Instantiate a camera tracking pipeline inside 'graph'.
Args:
graph (Graph/UIGraph): the graph in which nodes should be instantiated
Returns:
list of Node: the created nodes
"""
with GraphModification(graph):
if sourceSfm is None:
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmPipeline(graph)
else:
sfmNodes, _ = sfmAugmentation(graph, sourceSfm)
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmNodes
distortionCalibrationT = graph.addNewNode('DistortionCalibration',
input=cameraInitT.output)
graph.removeEdge(featureMatchingT.input)
graph.addEdge(distortionCalibrationT.outSfMData, featureMatchingT.input)
imageMatchingT.attribute("nbMatches").value = 5 # voctree nb matches
imageMatchingT.attribute("nbNeighbors").value = 10
structureFromMotionT.attribute("minNumberOfMatches").value = 0
structureFromMotionT.attribute("minInputTrackLength").value = 5
structureFromMotionT.attribute("minNumberOfObservationsForTriangulation").value = 3
structureFromMotionT.attribute("minAngleForTriangulation").value = 1.0
structureFromMotionT.attribute("minAngleForLandmark").value = 0.5
exportAnimatedCameraT = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotionT.output)
if sourceSfm:
graph.addEdge(sourceSfm.output, exportAnimatedCameraT.sfmDataFilter)
# store current pipeline version in graph header
graph.header.update({'pipelineVersion': __version__})
return [
cameraInitT,
featureExtractionT,
imageMatchingT,
featureMatchingT,
distortionCalibrationT,
structureFromMotionT,
exportAnimatedCameraT,
]
def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
if not graph:
graph = Graph('Camera Tracking')
with GraphModification(graph):
trackingNodes = cameraTrackingPipeline(graph)
cameraInit = trackingNodes[0]
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
cameraInit.viewpoints.extend(inputViewpoints)
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
exportNode = trackingNodes[-1]
graph.addNewNode('Publish', output=output, inputFiles=[exportNode.output])
return graph
def photogrammetryAndCameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
if not graph:
graph = Graph('Photogrammetry And Camera Tracking')
with GraphModification(graph):
cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)
cameraInitT, featureExtractionT, imageMatchingMultiT, featureMatchingT, distortionCalibrationT, structureFromMotionT, exportAnimatedCameraT = cameraTrackingPipeline(graph, structureFromMotion)
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
cameraInit.viewpoints.extend(inputViewpoints)
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
graph.addNewNode('Publish', output=output, inputFiles=[exportAnimatedCameraT.output])
return graph
def photogrammetryDraft(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None):
"""
Create a new Graph with a complete photogrammetry pipeline without requiring a NVIDIA CUDA video card. Something also named Draft Meshing.
More information on that pipeline https://github.com/alicevision/meshroom/wiki/Draft-Meshing
Args:
inputImages (list of str, optional): list of image file paths
inputViewpoints (list of Viewpoint, optional): list of Viewpoints
output (str, optional): the path to export reconstructed model to
Returns:
Graph: the created graph
"""
if not graph:
graph = Graph('PhotogrammetryDraft')
with GraphModification(graph):
sfmNodes = sfmPipeline(graph)
sfmNode = sfmNodes[-1]
meshing = graph.addNewNode('Meshing',
input=sfmNode.output)
meshFiltering = graph.addNewNode('MeshFiltering',
inputMesh=meshing.outputMesh)
texturing = graph.addNewNode('Texturing',
input=meshing.output,
inputMesh=meshFiltering.outputMesh)
cameraInit = sfmNodes[0]
if inputImages:
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
if inputViewpoints:
cameraInit.viewpoints.extend(inputViewpoints)
if inputIntrinsics:
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
graph.addNewNode('Publish', output=output, inputFiles=[texturing.outputMesh,
texturing.outputMaterial,
texturing.outputTextures])
return graph

View file

@ -159,7 +159,7 @@ The metadata needed are:
name='sensorDatabase', name='sensorDatabase',
label='Sensor Database', label='Sensor Database',
description='''Camera sensor width database path.''', description='''Camera sensor width database path.''',
value=os.environ.get('ALICEVISION_SENSOR_DB', ''), value='${ALICEVISION_SENSOR_DB}',
uid=[], uid=[],
), ),
desc.FloatParam( desc.FloatParam(

View file

@ -125,7 +125,7 @@ class CameraLocalization(desc.CommandLineNode):
name='voctree', name='voctree',
label='Voctree', label='Voctree',
description='''[voctree] Filename for the vocabulary tree''', description='''[voctree] Filename for the vocabulary tree''',
value=os.environ.get('ALICEVISION_VOCTREE', ''), value='${ALICEVISION_VOCTREE}',
uid=[0], uid=[0],
), ),
desc.File( desc.File(

View file

@ -109,7 +109,7 @@ class CameraRigCalibration(desc.CommandLineNode):
name='voctree', name='voctree',
label='Voctree', label='Voctree',
description='''[voctree] Filename for the vocabulary tree''', description='''[voctree] Filename for the vocabulary tree''',
value=os.environ.get('ALICEVISION_VOCTREE', ''), value='${ALICEVISION_VOCTREE}',
uid=[0], uid=[0],
), ),
desc.File( desc.File(

View file

@ -116,7 +116,7 @@ class CameraRigLocalization(desc.CommandLineNode):
name='voctree', name='voctree',
label='Voctree', label='Voctree',
description='''[voctree] Filename for the vocabulary tree''', description='''[voctree] Filename for the vocabulary tree''',
value=os.environ.get('ALICEVISION_VOCTREE', ''), value='${ALICEVISION_VOCTREE}',
uid=[0], uid=[0],
), ),
desc.File( desc.File(

View file

@ -74,7 +74,7 @@ If images have known poses, use frustum intersection else use VocabularuTree.
name='tree', name='tree',
label='Voc Tree: Tree', label='Voc Tree: Tree',
description='Input name for the vocabulary tree file.', description='Input name for the vocabulary tree file.',
value=os.environ.get('ALICEVISION_VOCTREE', ''), value='${ALICEVISION_VOCTREE}',
uid=[], uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value, enabled=lambda node: 'VocabularyTree' in node.method.value,
), ),

View file

@ -65,7 +65,7 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
name='tree', name='tree',
label='Voc Tree: Tree', label='Voc Tree: Tree',
description='Input name for the vocabulary tree file.', description='Input name for the vocabulary tree file.',
value=os.environ.get('ALICEVISION_VOCTREE', ''), value='${ALICEVISION_VOCTREE}',
uid=[], uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value, enabled=lambda node: 'VocabularyTree' in node.method.value,
), ),

View file

@ -95,14 +95,14 @@ You can extract frames at regular interval by configuring only the min/maxFrameS
name='sensorDbPath', name='sensorDbPath',
label='Sensor Db Path', label='Sensor Db Path',
description='''Camera sensor width database path.''', description='''Camera sensor width database path.''',
value=os.environ.get('ALICEVISION_SENSOR_DB', ''), value='${ALICEVISION_SENSOR_DB}',
uid=[0], uid=[0],
), ),
desc.File( desc.File(
name='voctreePath', name='voctreePath',
label='Voctree Path', label='Voctree Path',
description='''Vocabulary tree path.''', description='''Vocabulary tree path.''',
value=os.environ.get('ALICEVISION_VOCTREE', ''), value='${ALICEVISION_VOCTREE}',
uid=[0], uid=[0],
), ),
desc.BoolParam( desc.BoolParam(

View file

@ -0,0 +1,278 @@
{
"header": {
"pipelineVersion": "2.2",
"releaseVersion": "2021.1.0",
"fileVersion": "1.1",
"nodesVersions": {
"ExportAnimatedCamera": "2.0",
"FeatureMatching": "2.0",
"DistortionCalibration": "2.0",
"CameraInit": "7.0",
"ImageMatching": "2.0",
"FeatureExtraction": "1.1",
"StructureFromMotion": "2.0"
}
},
"graph": {
"DistortionCalibration_1": {
"inputs": {
"verboseLevel": "info",
"input": "{CameraInit_1.output}",
"lensGrid": []
},
"nodeType": "DistortionCalibration",
"uids": {
"0": "8afea9d171904cdb6ba1c0b116cb60de3ccb6fb4"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm"
},
"position": [
200,
160
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatching_1": {
"inputs": {
"minNbImages": 200,
"nbNeighbors": 10,
"tree": "${ALICEVISION_VOCTREE}",
"maxDescriptors": 500,
"verboseLevel": "info",
"weights": "",
"nbMatches": 5,
"input": "{FeatureExtraction_1.input}",
"method": "SequentialAndVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_1.output}"
]
},
"nodeType": "ImageMatching",
"uids": {
"0": "832b744de5fa804d7d63ea255419b1afaf24f723"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt"
},
"position": [
400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_1": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"dspsift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "normal",
"gridFiltering": true,
"input": "{CameraInit_1.output}",
"describerPreset": "normal"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "a07fb8d05b63327d05461954c2fd2a00f201275b"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"StructureFromMotion_1": {
"inputs": {
"localizerEstimatorMaxIterations": 4096,
"minAngleForLandmark": 0.5,
"filterTrackForks": false,
"minNumberOfObservationsForTriangulation": 3,
"maxAngleInitialPair": 40.0,
"observationConstraint": "Scale",
"maxNumberOfMatches": 0,
"localizerEstimator": "acransac",
"describerTypes": "{FeatureMatching_1.describerTypes}",
"lockScenePreviouslyReconstructed": false,
"localBAGraphDistance": 1,
"minNbCamerasToRefinePrincipalPoint": 3,
"lockAllIntrinsics": false,
"input": "{FeatureMatching_1.input}",
"featuresFolders": "{FeatureMatching_1.featuresFolders}",
"useRigConstraint": true,
"rigMinNbCamerasForCalibration": 20,
"initialPairA": "",
"initialPairB": "",
"interFileExtension": ".abc",
"useLocalBA": true,
"computeStructureColor": true,
"matchesFolders": [
"{FeatureMatching_1.output}"
],
"minInputTrackLength": 5,
"useOnlyMatchesFromInputFolder": false,
"verboseLevel": "info",
"minAngleForTriangulation": 1.0,
"maxReprojectionError": 4.0,
"minAngleInitialPair": 5.0,
"minNumberOfMatches": 0,
"localizerEstimatorError": 0.0
},
"nodeType": "StructureFromMotion",
"uids": {
"0": "4d198974784fd71f5a1c189e10c2914e56523585"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/sfm.abc",
"extraInfoFolder": "{cache}/{nodeType}/{uid0}/",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ExportAnimatedCamera_1": {
"inputs": {
"exportFullROD": false,
"undistortedImageType": "exr",
"exportUVMaps": true,
"verboseLevel": "info",
"sfmDataFilter": "",
"exportUndistortedImages": false,
"input": "{StructureFromMotion_1.output}",
"viewFilter": "",
"correctPrincipalPoint": true
},
"nodeType": "ExportAnimatedCamera",
"uids": {
"0": "31413f19e51b239874733f13f9628286fd185c18"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/",
"outputUndistorted": "{cache}/{nodeType}/{uid0}/undistort",
"outputCamera": "{cache}/{nodeType}/{uid0}/camera.abc"
},
"position": [
1000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_1": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye4",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
0,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_1": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_1.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatching_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{DistortionCalibration_1.outSfMData}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatching_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "8386c096445d6988ea7d14f1ae3192978a4dd2e8"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
}
}
}

View file

@ -0,0 +1,591 @@
{
"header": {
"nodesVersions": {
"PanoramaSeams": "2.0",
"FeatureMatching": "2.0",
"ImageProcessing": "3.0",
"PanoramaCompositing": "2.0",
"LdrToHdrMerge": "4.0",
"LdrToHdrSampling": "4.0",
"LdrToHdrCalibration": "3.0",
"PanoramaEstimation": "1.0",
"PanoramaInit": "2.0",
"PanoramaMerging": "1.0",
"SfMTransform": "3.0",
"CameraInit": "7.0",
"ImageMatching": "2.0",
"FeatureExtraction": "1.1",
"PanoramaPrepareImages": "1.1",
"PanoramaWarping": "1.0"
},
"releaseVersion": "2021.1.0",
"fileVersion": "1.1"
},
"graph": {
"LdrToHdrMerge_1": {
"inputs": {
"verboseLevel": "info",
"fusionWeight": "gaussian",
"channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}",
"nbBrackets": 0,
"enableHighlight": false,
"offsetRefBracketIndex": 1,
"storageDataType": "float",
"highlightTargetLux": 120000.0,
"byPass": "{LdrToHdrCalibration_1.byPass}",
"highlightCorrectionFactor": 1.0,
"input": "{LdrToHdrCalibration_1.input}",
"userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}",
"response": "{LdrToHdrCalibration_1.response}"
},
"nodeType": "LdrToHdrMerge",
"uids": {
"0": "9b90e3b468adc487fe2905e0cc78328216966317"
},
"parallelization": {
"blockSize": 2,
"split": 0,
"size": 0
},
"outputs": {
"outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm"
},
"position": [
800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageProcessing_1": {
"inputs": {
"outputFormat": "rgba",
"sharpenFilter": {
"threshold": 0.0,
"width": 3,
"sharpenFilterEnabled": false,
"contrast": 1.0
},
"extension": "exr",
"exposureCompensation": false,
"storageDataType": "float",
"inputFolders": [],
"verboseLevel": "info",
"metadataFolders": [],
"claheFilter": {
"claheClipLimit": 4.0,
"claheTileGridSize": 8,
"claheEnabled": false
},
"medianFilter": 0,
"fillHoles": true,
"reconstructedViewsOnly": false,
"input": "{PanoramaMerging_1.outputPanorama}",
"noiseFilter": {
"noiseEnabled": false,
"noiseMethod": "uniform",
"noiseB": 1.0,
"noiseMono": true,
"noiseA": 0.0
},
"scaleFactor": 1.0,
"bilateralFilter": {
"bilateralFilterDistance": 0,
"bilateralFilterSigmaColor": 0.0,
"bilateralFilterSigmaSpace": 0.0,
"bilateralFilterEnabled": false
},
"contrast": 1.0,
"fixNonFinite": true
},
"nodeType": "ImageProcessing",
"uids": {
"0": "494b97af203ddbe4767c922a6c5795297cf53eef"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/",
"outputImages": "{cache}/{nodeType}/{uid0}/panorama.exr",
"outSfMData": ""
},
"position": [
3000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaWarping_1": {
"inputs": {
"panoramaWidth": 10000,
"maxPanoramaWidth": 70000,
"verboseLevel": "info",
"percentUpscale": 50,
"input": "{SfMTransform_1.output}",
"storageDataType": "float",
"estimateResolution": true
},
"nodeType": "PanoramaWarping",
"uids": {
"0": "45cca14aba2a8c4f68c79a15d3fbc48f30ae9d66"
},
"parallelization": {
"blockSize": 5,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
2200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"LdrToHdrCalibration_1": {
"inputs": {
"samples": "{LdrToHdrSampling_1.output}",
"channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}",
"maxTotalPoints": 1000000,
"nbBrackets": 0,
"calibrationMethod": "debevec",
"calibrationWeight": "default",
"verboseLevel": "info",
"byPass": "{LdrToHdrSampling_1.byPass}",
"input": "{LdrToHdrSampling_1.input}",
"userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}"
},
"nodeType": "LdrToHdrCalibration",
"uids": {
"0": "9225abd943d28be4387a8a8902711d0b7c604a2a"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"response": "{cache}/{nodeType}/{uid0}/response.csv"
},
"position": [
600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"LdrToHdrSampling_1": {
"inputs": {
"blockSize": 256,
"nbBrackets": 0,
"verboseLevel": "info",
"radius": 5,
"byPass": false,
"channelQuantizationPower": 10,
"debug": false,
"input": "{PanoramaPrepareImages_1.output}",
"maxCountSample": 200,
"userNbBrackets": 0
},
"nodeType": "LdrToHdrSampling",
"uids": {
"0": "af67674ecc8524592fe2b217259c241167e28dcd"
},
"parallelization": {
"blockSize": 2,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatching_1": {
"inputs": {
"minNbImages": 200,
"nbNeighbors": 5,
"tree": "${ALICEVISION_VOCTREE}",
"maxDescriptors": 500,
"verboseLevel": "info",
"weights": "",
"nbMatches": 40,
"input": "{PanoramaInit_1.outSfMData}",
"method": "FrustumOrVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_1.output}"
]
},
"nodeType": "ImageMatching",
"uids": {
"0": "a076f9e959d62b3a6f63d3f6493527b857eab8d6"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt"
},
"position": [
1400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_1": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"sift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "high",
"gridFiltering": true,
"input": "{LdrToHdrMerge_1.outSfMData}",
"describerPreset": "high"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "04f8824c2e2f206b47f05edaf76def15fa91446b"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaSeams_1": {
"inputs": {
"verboseLevel": "info",
"input": "{PanoramaWarping_1.input}",
"warpingFolder": "{PanoramaWarping_1.output}",
"maxWidth": 5000,
"useGraphCut": true
},
"nodeType": "PanoramaSeams",
"uids": {
"0": "dd02562c5c3b1e18e42561d99590cbf4ff5ba35a"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/labels.exr"
},
"position": [
2400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaCompositing_1": {
"inputs": {
"warpingFolder": "{PanoramaSeams_1.warpingFolder}",
"maxThreads": 4,
"labels": "{PanoramaSeams_1.output}",
"verboseLevel": "info",
"overlayType": "none",
"compositerType": "multiband",
"input": "{PanoramaSeams_1.input}",
"storageDataType": "float"
},
"nodeType": "PanoramaCompositing",
"uids": {
"0": "1f1e629021e2280291046226e009a52dbb7809c1"
},
"parallelization": {
"blockSize": 5,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
2600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_1": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
0,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaPrepareImages_1": {
"inputs": {
"verboseLevel": "info",
"input": "{CameraInit_1.output}"
},
"nodeType": "PanoramaPrepareImages",
"uids": {
"0": "6956c52a8d18cb4cdb7ceb0db68f4deb84a37aee"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"SfMTransform_1": {
"inputs": {
"applyScale": true,
"scale": 1.0,
"applyTranslation": true,
"landmarksDescriberTypes": [
"sift",
"dspsift",
"akaze"
],
"markers": [],
"method": "manual",
"verboseLevel": "info",
"input": "{PanoramaEstimation_1.output}",
"applyRotation": true,
"manualTransform": {
"manualTranslation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
},
"manualRotation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
},
"manualScale": 1.0
},
"transformation": ""
},
"nodeType": "SfMTransform",
"uids": {
"0": "b8568fb40b68b42ac80c18df2dcdf600744fe3e1"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/panorama.abc",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
2000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaMerging_1": {
"inputs": {
"verboseLevel": "info",
"compositingFolder": "{PanoramaCompositing_1.output}",
"outputFileType": "exr",
"storageDataType": "float",
"input": "{PanoramaCompositing_1.input}"
},
"nodeType": "PanoramaMerging",
"uids": {
"0": "70edd7fe8194bf35dcb0b221141cd4abd2354547"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"outputPanorama": "{cache}/{nodeType}/{uid0}/panorama.{outputFileTypeValue}"
},
"position": [
2800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaEstimation_1": {
"inputs": {
"intermediateRefineWithFocalDist": false,
"offsetLongitude": 0.0,
"matchesFolders": [
"{FeatureMatching_1.output}"
],
"filterMatches": false,
"rotationAveragingWeighting": true,
"offsetLatitude": 0.0,
"verboseLevel": "info",
"maxAngularError": 100.0,
"lockAllIntrinsics": false,
"refine": true,
"input": "{FeatureMatching_1.input}",
"intermediateRefineWithFocal": false,
"describerTypes": "{FeatureMatching_1.describerTypes}",
"relativeRotation": "rotation_matrix",
"maxAngleToPrior": 20.0,
"rotationAveraging": "L2_minimization",
"featuresFolders": "{FeatureMatching_1.featuresFolders}"
},
"nodeType": "PanoramaEstimation",
"uids": {
"0": "47b0976fc98eefcbc0342bbb63e7d27ef3e0d4de"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/panorama.abc",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
1800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaInit_1": {
"inputs": {
"useFisheye": true,
"fisheyeCenterOffset": {
"fisheyeCenterOffset_y": 0.0,
"fisheyeCenterOffset_x": 0.0
},
"initializeCameras": "No",
"nbViewsPerLine": [],
"debugFisheyeCircleEstimation": false,
"verboseLevel": "info",
"dependency": [
"{FeatureExtraction_1.output}"
],
"estimateFisheyeCircle": true,
"input": "{FeatureExtraction_1.input}",
"yawCW": 1,
"config": "",
"fisheyeRadius": 96.0,
"inputAngle": "None"
},
"nodeType": "PanoramaInit",
"uids": {
"0": "2fd95a957eb42ffc8fb1c24d2666afcd859ba079"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm"
},
"position": [
1200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_1": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_1.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatching_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{ImageMatching_1.input}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatching_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "c0fbe0b12fe47ada6a1ca8f74d266e99c1cc548c"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
}
}
}

View file

@ -0,0 +1,591 @@
{
"header": {
"nodesVersions": {
"PanoramaSeams": "2.0",
"FeatureMatching": "2.0",
"ImageProcessing": "3.0",
"PanoramaCompositing": "2.0",
"LdrToHdrMerge": "4.0",
"LdrToHdrSampling": "4.0",
"LdrToHdrCalibration": "3.0",
"PanoramaEstimation": "1.0",
"PanoramaInit": "2.0",
"PanoramaMerging": "1.0",
"SfMTransform": "3.0",
"CameraInit": "7.0",
"ImageMatching": "2.0",
"FeatureExtraction": "1.1",
"PanoramaPrepareImages": "1.1",
"PanoramaWarping": "1.0"
},
"releaseVersion": "2021.1.0",
"fileVersion": "1.1"
},
"graph": {
"LdrToHdrMerge_1": {
"inputs": {
"verboseLevel": "info",
"fusionWeight": "gaussian",
"channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}",
"nbBrackets": 0,
"enableHighlight": false,
"offsetRefBracketIndex": 1,
"storageDataType": "float",
"highlightTargetLux": 120000.0,
"byPass": "{LdrToHdrCalibration_1.byPass}",
"highlightCorrectionFactor": 1.0,
"input": "{LdrToHdrCalibration_1.input}",
"userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}",
"response": "{LdrToHdrCalibration_1.response}"
},
"nodeType": "LdrToHdrMerge",
"uids": {
"0": "9b90e3b468adc487fe2905e0cc78328216966317"
},
"parallelization": {
"blockSize": 2,
"split": 0,
"size": 0
},
"outputs": {
"outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm"
},
"position": [
800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageProcessing_1": {
"inputs": {
"outputFormat": "rgba",
"sharpenFilter": {
"threshold": 0.0,
"width": 3,
"sharpenFilterEnabled": false,
"contrast": 1.0
},
"extension": "exr",
"exposureCompensation": false,
"storageDataType": "float",
"inputFolders": [],
"verboseLevel": "info",
"metadataFolders": [],
"claheFilter": {
"claheClipLimit": 4.0,
"claheTileGridSize": 8,
"claheEnabled": false
},
"medianFilter": 0,
"fillHoles": true,
"reconstructedViewsOnly": false,
"input": "{PanoramaMerging_1.outputPanorama}",
"noiseFilter": {
"noiseEnabled": false,
"noiseMethod": "uniform",
"noiseB": 1.0,
"noiseMono": true,
"noiseA": 0.0
},
"scaleFactor": 1.0,
"bilateralFilter": {
"bilateralFilterDistance": 0,
"bilateralFilterSigmaColor": 0.0,
"bilateralFilterSigmaSpace": 0.0,
"bilateralFilterEnabled": false
},
"contrast": 1.0,
"fixNonFinite": true
},
"nodeType": "ImageProcessing",
"uids": {
"0": "d7845b276d97c3489223cce16a1e9d581d98a832"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/",
"outputImages": "{cache}/{nodeType}/{uid0}/panorama.exr",
"outSfMData": ""
},
"position": [
3000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaWarping_1": {
"inputs": {
"panoramaWidth": 10000,
"maxPanoramaWidth": 70000,
"verboseLevel": "info",
"percentUpscale": 50,
"input": "{SfMTransform_1.output}",
"storageDataType": "float",
"estimateResolution": true
},
"nodeType": "PanoramaWarping",
"uids": {
"0": "f2971d0c73b15fa99cbccbc9515de346ca141a1e"
},
"parallelization": {
"blockSize": 5,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
2200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"LdrToHdrCalibration_1": {
"inputs": {
"samples": "{LdrToHdrSampling_1.output}",
"channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}",
"maxTotalPoints": 1000000,
"nbBrackets": 0,
"calibrationMethod": "debevec",
"calibrationWeight": "default",
"verboseLevel": "info",
"byPass": "{LdrToHdrSampling_1.byPass}",
"input": "{LdrToHdrSampling_1.input}",
"userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}"
},
"nodeType": "LdrToHdrCalibration",
"uids": {
"0": "9225abd943d28be4387a8a8902711d0b7c604a2a"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"response": "{cache}/{nodeType}/{uid0}/response.csv"
},
"position": [
600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"LdrToHdrSampling_1": {
"inputs": {
"blockSize": 256,
"nbBrackets": 0,
"verboseLevel": "info",
"radius": 5,
"byPass": false,
"channelQuantizationPower": 10,
"debug": false,
"input": "{PanoramaPrepareImages_1.output}",
"maxCountSample": 200,
"userNbBrackets": 0
},
"nodeType": "LdrToHdrSampling",
"uids": {
"0": "af67674ecc8524592fe2b217259c241167e28dcd"
},
"parallelization": {
"blockSize": 2,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatching_1": {
"inputs": {
"minNbImages": 200,
"nbNeighbors": 5,
"tree": "${ALICEVISION_VOCTREE}",
"maxDescriptors": 500,
"verboseLevel": "info",
"weights": "",
"nbMatches": 40,
"input": "{PanoramaInit_1.outSfMData}",
"method": "FrustumOrVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_1.output}"
]
},
"nodeType": "ImageMatching",
"uids": {
"0": "7efc9cd43585003fc6eec0776a704e358f0a15de"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt"
},
"position": [
1400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_1": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"dspsift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "high",
"gridFiltering": true,
"input": "{LdrToHdrMerge_1.outSfMData}",
"describerPreset": "normal"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "1863cc0989ab0fd910d4fe293074ff94c4e586a1"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaSeams_1": {
"inputs": {
"verboseLevel": "info",
"input": "{PanoramaWarping_1.input}",
"warpingFolder": "{PanoramaWarping_1.output}",
"maxWidth": 5000,
"useGraphCut": true
},
"nodeType": "PanoramaSeams",
"uids": {
"0": "0ee6da171bd684358b7c64dcc631f81ba743e1fa"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/labels.exr"
},
"position": [
2400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaCompositing_1": {
"inputs": {
"warpingFolder": "{PanoramaSeams_1.warpingFolder}",
"maxThreads": 4,
"labels": "{PanoramaSeams_1.output}",
"verboseLevel": "info",
"overlayType": "none",
"compositerType": "multiband",
"input": "{PanoramaSeams_1.input}",
"storageDataType": "float"
},
"nodeType": "PanoramaCompositing",
"uids": {
"0": "8aba78572808d012e0bb376503c2016df943b3f0"
},
"parallelization": {
"blockSize": 5,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
2600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_1": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
0,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaPrepareImages_1": {
"inputs": {
"verboseLevel": "info",
"input": "{CameraInit_1.output}"
},
"nodeType": "PanoramaPrepareImages",
"uids": {
"0": "6956c52a8d18cb4cdb7ceb0db68f4deb84a37aee"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"SfMTransform_1": {
"inputs": {
"applyScale": true,
"scale": 1.0,
"applyTranslation": true,
"landmarksDescriberTypes": [
"sift",
"dspsift",
"akaze"
],
"markers": [],
"method": "manual",
"verboseLevel": "info",
"input": "{PanoramaEstimation_1.output}",
"applyRotation": true,
"manualTransform": {
"manualTranslation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
},
"manualRotation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
},
"manualScale": 1.0
},
"transformation": ""
},
"nodeType": "SfMTransform",
"uids": {
"0": "c72641a2cca50759bcf5283ae6e0b6f7abc3fe4a"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/panorama.abc",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
2000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaMerging_1": {
"inputs": {
"verboseLevel": "info",
"compositingFolder": "{PanoramaCompositing_1.output}",
"outputFileType": "exr",
"storageDataType": "float",
"input": "{PanoramaCompositing_1.input}"
},
"nodeType": "PanoramaMerging",
"uids": {
"0": "e007a4eb5fc5937b320638eba667cea183c0c642"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"outputPanorama": "{cache}/{nodeType}/{uid0}/panorama.{outputFileTypeValue}"
},
"position": [
2800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaEstimation_1": {
"inputs": {
"intermediateRefineWithFocalDist": false,
"offsetLongitude": 0.0,
"matchesFolders": [
"{FeatureMatching_1.output}"
],
"filterMatches": false,
"rotationAveragingWeighting": true,
"offsetLatitude": 0.0,
"verboseLevel": "info",
"maxAngularError": 100.0,
"lockAllIntrinsics": false,
"refine": true,
"input": "{FeatureMatching_1.input}",
"intermediateRefineWithFocal": false,
"describerTypes": "{FeatureMatching_1.describerTypes}",
"relativeRotation": "rotation_matrix",
"maxAngleToPrior": 20.0,
"rotationAveraging": "L2_minimization",
"featuresFolders": "{FeatureMatching_1.featuresFolders}"
},
"nodeType": "PanoramaEstimation",
"uids": {
"0": "de946a7c1080873d15c9eb8a0523b544cf548719"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/panorama.abc",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
1800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PanoramaInit_1": {
"inputs": {
"useFisheye": false,
"fisheyeCenterOffset": {
"fisheyeCenterOffset_y": 0.0,
"fisheyeCenterOffset_x": 0.0
},
"initializeCameras": "No",
"nbViewsPerLine": [],
"debugFisheyeCircleEstimation": false,
"verboseLevel": "info",
"dependency": [
"{FeatureExtraction_1.output}"
],
"estimateFisheyeCircle": true,
"input": "{FeatureExtraction_1.input}",
"yawCW": 1,
"config": "",
"fisheyeRadius": 96.0,
"inputAngle": "None"
},
"nodeType": "PanoramaInit",
"uids": {
"0": "702d6b973342e9203b50afea1470b4c01eb90174"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm"
},
"position": [
1200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_1": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_1.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatching_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{ImageMatching_1.input}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatching_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "cec6da6e894230ab66683c2e959bc9581ea5430e"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
}
}
}

View file

@ -0,0 +1,523 @@
{
"header": {
"pipelineVersion": "2.2",
"releaseVersion": "2021.1.0",
"fileVersion": "1.1",
"nodesVersions": {
"FeatureMatching": "2.0",
"MeshFiltering": "3.0",
"Texturing": "6.0",
"PrepareDenseScene": "3.0",
"DepthMap": "2.0",
"Meshing": "7.0",
"CameraInit": "7.0",
"ImageMatching": "2.0",
"FeatureExtraction": "1.1",
"StructureFromMotion": "2.0",
"DepthMapFilter": "3.0"
}
},
"graph": {
"Texturing_1": {
"inputs": {
"imagesFolder": "{DepthMap_1.imagesFolder}",
"downscale": 2,
"bumpMapping": {
"normalFileType": "exr",
"enable": true,
"bumpType": "Normal",
"heightFileType": "exr"
},
"forceVisibleByAllVertices": false,
"fillHoles": false,
"multiBandDownscale": 4,
"useScore": true,
"displacementMapping": {
"displacementMappingFileType": "exr",
"enable": true
},
"outputMeshFileType": "obj",
"angleHardThreshold": 90.0,
"textureSide": 8192,
"processColorspace": "sRGB",
"input": "{Meshing_1.output}",
"useUDIM": true,
"subdivisionTargetRatio": 0.8,
"padding": 5,
"inputRefMesh": "",
"correctEV": false,
"visibilityRemappingMethod": "PullPush",
"inputMesh": "{MeshFiltering_1.outputMesh}",
"verboseLevel": "info",
"colorMapping": {
"enable": true,
"colorMappingFileType": "exr"
},
"bestScoreThreshold": 0.1,
"unwrapMethod": "Basic",
"multiBandNbContrib": {
"high": 1,
"midHigh": 5,
"low": 0,
"midLow": 10
},
"flipNormals": false
},
"nodeType": "Texturing",
"uids": {
"0": "09f72f6745c6b13aae56fc3876e6541fbeaa557d"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"outputTextures": "{cache}/{nodeType}/{uid0}/texture_*.exr",
"outputMesh": "{cache}/{nodeType}/{uid0}/texturedMesh.{outputMeshFileTypeValue}",
"outputMaterial": "{cache}/{nodeType}/{uid0}/texturedMesh.mtl",
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
2000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"Meshing_1": {
"inputs": {
"exportDebugTetrahedralization": false,
"useBoundingBox": false,
"maxInputPoints": 50000000,
"repartition": "multiResolution",
"helperPointsGridSize": 10,
"seed": 0,
"voteFilteringForWeaklySupportedSurfaces": true,
"verboseLevel": "info",
"outputMeshFileType": "obj",
"simGaussianSizeInit": 10.0,
"nPixelSizeBehind": 4.0,
"fullWeight": 1.0,
"depthMapsFolder": "{DepthMapFilter_1.output}",
"densify": false,
"simFactor": 15.0,
"maskHelperPointsWeight": 1.0,
"densifyScale": 20.0,
"input": "{DepthMapFilter_1.input}",
"addLandmarksToTheDensePointCloud": false,
"voteMarginFactor": 4.0,
"saveRawDensePointCloud": false,
"contributeMarginFactor": 2.0,
"estimateSpaceMinObservationAngle": 10,
"nbSolidAngleFilteringIterations": 2,
"minStep": 2,
"colorizeOutput": false,
"pixSizeMarginFinalCoef": 4.0,
"densifyNbFront": 1,
"boundingBox": {
"bboxScale": {
"y": 1.0,
"x": 1.0,
"z": 1.0
},
"bboxTranslation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
},
"bboxRotation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
}
},
"minSolidAngleRatio": 0.2,
"maxPoints": 5000000,
"addMaskHelperPoints": false,
"maxPointsPerVoxel": 1000000,
"angleFactor": 15.0,
"partitioning": "singleBlock",
"estimateSpaceFromSfM": true,
"minAngleThreshold": 1.0,
"pixSizeMarginInitCoef": 2.0,
"refineFuse": true,
"maxNbConnectedHelperPoints": 50,
"estimateSpaceMinObservations": 3,
"invertTetrahedronBasedOnNeighborsNbIterations": 10,
"maskBorderSize": 4,
"simGaussianSize": 10.0,
"densifyNbBack": 1
},
"nodeType": "Meshing",
"uids": {
"0": "aeb66fceaacd37ecd5bae8364bd9e87ccff2a84c"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/densePointCloud.abc",
"outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}"
},
"position": [
1600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"DepthMapFilter_1": {
"inputs": {
"minNumOfConsistentCamsWithLowSimilarity": 4,
"computeNormalMaps": false,
"minNumOfConsistentCams": 3,
"depthMapsFolder": "{DepthMap_1.output}",
"verboseLevel": "info",
"nNearestCams": 10,
"pixSizeBallWithLowSimilarity": 0,
"pixToleranceFactor": 2.0,
"pixSizeBall": 0,
"minViewAngle": 2.0,
"maxViewAngle": 70.0,
"input": "{DepthMap_1.input}"
},
"nodeType": "DepthMapFilter",
"uids": {
"0": "4de4649a857d7bd4f7fdfb27470a5087625ff8c9"
},
"parallelization": {
"blockSize": 10,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatching_1": {
"inputs": {
"minNbImages": 200,
"nbNeighbors": 5,
"tree": "${ALICEVISION_VOCTREE}",
"maxDescriptors": 500,
"verboseLevel": "info",
"weights": "",
"nbMatches": 40,
"input": "{FeatureExtraction_1.input}",
"method": "SequentialAndVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_1.output}"
]
},
"nodeType": "ImageMatching",
"uids": {
"0": "46fb9072ac753d60bec7dda9c8674b0568506ddf"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt"
},
"position": [
400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_1": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"dspsift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "normal",
"gridFiltering": true,
"input": "{CameraInit_1.output}",
"describerPreset": "normal"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "a07fb8d05b63327d05461954c2fd2a00f201275b"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"StructureFromMotion_1": {
"inputs": {
"localizerEstimatorMaxIterations": 4096,
"minAngleForLandmark": 2.0,
"filterTrackForks": false,
"minNumberOfObservationsForTriangulation": 2,
"maxAngleInitialPair": 40.0,
"observationConstraint": "Scale",
"maxNumberOfMatches": 0,
"localizerEstimator": "acransac",
"describerTypes": "{FeatureMatching_1.describerTypes}",
"lockScenePreviouslyReconstructed": false,
"localBAGraphDistance": 1,
"minNbCamerasToRefinePrincipalPoint": 3,
"lockAllIntrinsics": false,
"input": "{FeatureMatching_1.input}",
"featuresFolders": "{FeatureMatching_1.featuresFolders}",
"useRigConstraint": true,
"rigMinNbCamerasForCalibration": 20,
"initialPairA": "",
"initialPairB": "",
"interFileExtension": ".abc",
"useLocalBA": true,
"computeStructureColor": true,
"matchesFolders": [
"{FeatureMatching_1.output}"
],
"minInputTrackLength": 2,
"useOnlyMatchesFromInputFolder": false,
"verboseLevel": "info",
"minAngleForTriangulation": 3.0,
"maxReprojectionError": 4.0,
"minAngleInitialPair": 5.0,
"minNumberOfMatches": 0,
"localizerEstimatorError": 0.0
},
"nodeType": "StructureFromMotion",
"uids": {
"0": "89c3db0849ba07dfac5e97ca9e27dd690dc476ce"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/sfm.abc",
"extraInfoFolder": "{cache}/{nodeType}/{uid0}/",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"PrepareDenseScene_1": {
"inputs": {
"imagesFolders": [],
"masksFolders": [],
"outputFileType": "exr",
"verboseLevel": "info",
"saveMatricesTxtFiles": false,
"saveMetadata": true,
"input": "{StructureFromMotion_1.output}",
"evCorrection": false
},
"nodeType": "PrepareDenseScene",
"uids": {
"0": "894725f62ffeead1307d9d91852b07d7c8453625"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/",
"outputUndistorted": "{cache}/{nodeType}/{uid0}/*.{outputFileTypeValue}"
},
"position": [
1000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_1": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye4",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
0,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"DepthMap_1": {
"inputs": {
"sgmMaxDepthsPerTc": 1500,
"sgmP2": 100.0,
"imagesFolder": "{PrepareDenseScene_1.output}",
"downscale": 2,
"refineMaxTCams": 6,
"exportIntermediateResults": false,
"nbGPUs": 0,
"refineNiters": 100,
"refineGammaP": 8.0,
"refineGammaC": 15.5,
"sgmMaxDepths": 3000,
"sgmUseSfmSeeds": true,
"input": "{PrepareDenseScene_1.input}",
"refineWSH": 3,
"sgmP1": 10.0,
"sgmFilteringAxes": "YX",
"sgmMaxTCams": 10,
"refineSigma": 15,
"sgmScale": -1,
"minViewAngle": 2.0,
"maxViewAngle": 70.0,
"sgmGammaC": 5.5,
"sgmWSH": 4,
"refineNSamplesHalf": 150,
"sgmMaxSideXY": 700,
"refineUseTcOrRcPixSize": false,
"verboseLevel": "info",
"sgmGammaP": 8.0,
"sgmStepXY": -1,
"refineNDepthsToRefine": 31,
"sgmStepZ": -1
},
"nodeType": "DepthMap",
"uids": {
"0": "f5ef2fd13dad8f48fcb87e2364e1e821a9db7d2d"
},
"parallelization": {
"blockSize": 3,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"MeshFiltering_1": {
"inputs": {
"filteringSubset": "all",
"outputMeshFileType": "obj",
"inputMesh": "{Meshing_1.outputMesh}",
"filterTrianglesRatio": 0.0,
"smoothingSubset": "all",
"verboseLevel": "info",
"smoothingIterations": 5,
"filterLargeTrianglesFactor": 60.0,
"keepLargestMeshOnly": false,
"smoothingBoundariesNeighbours": 0,
"smoothingLambda": 1.0,
"filteringIterations": 1
},
"nodeType": "MeshFiltering",
"uids": {
"0": "febb162c4fbce195f6d312bbb80697720a2f52b9"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}"
},
"position": [
1800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_1": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_1.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatching_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{ImageMatching_1.input}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatching_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "3b1f2c3fcfe0b94c65627c397a2671ba7594827d"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
}
}
}

View file

@ -0,0 +1,486 @@
{
"header": {
"pipelineVersion": "2.2",
"releaseVersion": "2021.1.0",
"fileVersion": "1.1",
"nodesVersions": {
"ExportAnimatedCamera": "2.0",
"FeatureMatching": "2.0",
"DistortionCalibration": "2.0",
"CameraInit": "7.0",
"ImageMatchingMultiSfM": "1.0",
"ImageMatching": "2.0",
"FeatureExtraction": "1.1",
"StructureFromMotion": "2.0"
}
},
"graph": {
"DistortionCalibration_1": {
"inputs": {
"verboseLevel": "info",
"input": "{CameraInit_2.output}",
"lensGrid": []
},
"nodeType": "DistortionCalibration",
"uids": {
"0": "8afea9d171904cdb6ba1c0b116cb60de3ccb6fb4"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"outSfMData": "{cache}/{nodeType}/{uid0}/sfmData.sfm"
},
"position": [
1024,
393
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatching_1": {
"inputs": {
"minNbImages": 200,
"nbNeighbors": 5,
"tree": "${ALICEVISION_VOCTREE}",
"maxDescriptors": 500,
"verboseLevel": "info",
"weights": "",
"nbMatches": 40,
"input": "{FeatureExtraction_1.input}",
"method": "SequentialAndVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_1.output}"
]
},
"nodeType": "ImageMatching",
"uids": {
"0": "46fb9072ac753d60bec7dda9c8674b0568506ddf"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt"
},
"position": [
400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_1": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"dspsift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "normal",
"gridFiltering": true,
"input": "{CameraInit_1.output}",
"describerPreset": "normal"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "a07fb8d05b63327d05461954c2fd2a00f201275b"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"StructureFromMotion_1": {
"inputs": {
"localizerEstimatorMaxIterations": 4096,
"minAngleForLandmark": 2.0,
"filterTrackForks": false,
"minNumberOfObservationsForTriangulation": 2,
"maxAngleInitialPair": 40.0,
"observationConstraint": "Scale",
"maxNumberOfMatches": 0,
"localizerEstimator": "acransac",
"describerTypes": "{FeatureMatching_1.describerTypes}",
"lockScenePreviouslyReconstructed": false,
"localBAGraphDistance": 1,
"minNbCamerasToRefinePrincipalPoint": 3,
"lockAllIntrinsics": false,
"input": "{FeatureMatching_1.input}",
"featuresFolders": "{FeatureMatching_1.featuresFolders}",
"useRigConstraint": true,
"rigMinNbCamerasForCalibration": 20,
"initialPairA": "",
"initialPairB": "",
"interFileExtension": ".abc",
"useLocalBA": true,
"computeStructureColor": true,
"matchesFolders": [
"{FeatureMatching_1.output}"
],
"minInputTrackLength": 2,
"useOnlyMatchesFromInputFolder": false,
"verboseLevel": "info",
"minAngleForTriangulation": 3.0,
"maxReprojectionError": 4.0,
"minAngleInitialPair": 5.0,
"minNumberOfMatches": 0,
"localizerEstimatorError": 0.0
},
"nodeType": "StructureFromMotion",
"uids": {
"0": "89c3db0849ba07dfac5e97ca9e27dd690dc476ce"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/sfm.abc",
"extraInfoFolder": "{cache}/{nodeType}/{uid0}/",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ExportAnimatedCamera_1": {
"inputs": {
"exportFullROD": false,
"undistortedImageType": "exr",
"exportUVMaps": true,
"verboseLevel": "info",
"sfmDataFilter": "{StructureFromMotion_1.output}",
"exportUndistortedImages": false,
"input": "{StructureFromMotion_2.output}",
"viewFilter": "",
"correctPrincipalPoint": true
},
"nodeType": "ExportAnimatedCamera",
"uids": {
"0": "6f482ab9e161bd79341c5cd4a43ab9f8e39aec1f"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/",
"outputUndistorted": "{cache}/{nodeType}/{uid0}/undistort",
"outputCamera": "{cache}/{nodeType}/{uid0}/camera.abc"
},
"position": [
1629,
212
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_1": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye4",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
0,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatchingMultiSfM_1": {
"inputs": {
"minNbImages": 200,
"matchingMode": "a/a+a/b",
"nbNeighbors": 10,
"tree": "${ALICEVISION_VOCTREE}",
"nbMatches": 5,
"verboseLevel": "info",
"weights": "",
"maxDescriptors": 500,
"input": "{FeatureExtraction_2.input}",
"inputB": "{StructureFromMotion_1.output}",
"method": "SequentialAndVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_2.output}"
]
},
"nodeType": "ImageMatchingMultiSfM",
"uids": {
"0": "ef147c1bc069c7689863c7e14cdbbaca86af4006"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt",
"outputCombinedSfM": "{cache}/{nodeType}/{uid0}/combineSfM.sfm"
},
"position": [
1029,
212
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_2": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye4",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
-2,
223
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_2": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"dspsift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "normal",
"gridFiltering": true,
"input": "{CameraInit_2.output}",
"describerPreset": "normal"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "a07fb8d05b63327d05461954c2fd2a00f201275b"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
198,
223
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_2": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_2.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatchingMultiSfM_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{DistortionCalibration_1.outSfMData}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "7bb42f40b3f607da7e9f5f432409ddf6ef9c5951"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1229,
212
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_1": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_1.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatching_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{ImageMatching_1.input}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatching_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "3b1f2c3fcfe0b94c65627c397a2671ba7594827d"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"StructureFromMotion_2": {
"inputs": {
"localizerEstimatorMaxIterations": 4096,
"minAngleForLandmark": 0.5,
"filterTrackForks": false,
"minNumberOfObservationsForTriangulation": 3,
"maxAngleInitialPair": 40.0,
"observationConstraint": "Scale",
"maxNumberOfMatches": 0,
"localizerEstimator": "acransac",
"describerTypes": "{FeatureMatching_2.describerTypes}",
"lockScenePreviouslyReconstructed": false,
"localBAGraphDistance": 1,
"minNbCamerasToRefinePrincipalPoint": 3,
"lockAllIntrinsics": false,
"input": "{FeatureMatching_2.input}",
"featuresFolders": "{FeatureMatching_2.featuresFolders}",
"useRigConstraint": true,
"rigMinNbCamerasForCalibration": 20,
"initialPairA": "",
"initialPairB": "",
"interFileExtension": ".abc",
"useLocalBA": true,
"computeStructureColor": true,
"matchesFolders": [
"{FeatureMatching_2.output}"
],
"minInputTrackLength": 5,
"useOnlyMatchesFromInputFolder": false,
"verboseLevel": "info",
"minAngleForTriangulation": 1.0,
"maxReprojectionError": 4.0,
"minAngleInitialPair": 5.0,
"minNumberOfMatches": 0,
"localizerEstimatorError": 0.0
},
"nodeType": "StructureFromMotion",
"uids": {
"0": "4bc466c45bc7b430553752d1eb1640c581c43e36"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/sfm.abc",
"extraInfoFolder": "{cache}/{nodeType}/{uid0}/",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
1429,
212
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
}
}
}

View file

@ -0,0 +1,404 @@
{
"header": {
"nodesVersions": {
"FeatureMatching": "2.0",
"MeshFiltering": "3.0",
"Texturing": "6.0",
"Meshing": "7.0",
"CameraInit": "7.0",
"ImageMatching": "2.0",
"FeatureExtraction": "1.1",
"StructureFromMotion": "2.0"
},
"releaseVersion": "2021.1.0",
"fileVersion": "1.1"
},
"graph": {
"Texturing_1": {
"inputs": {
"imagesFolder": "",
"downscale": 2,
"bumpMapping": {
"normalFileType": "exr",
"enable": true,
"bumpType": "Normal",
"heightFileType": "exr"
},
"forceVisibleByAllVertices": false,
"fillHoles": false,
"multiBandDownscale": 4,
"useScore": true,
"displacementMapping": {
"displacementMappingFileType": "exr",
"enable": true
},
"outputMeshFileType": "obj",
"angleHardThreshold": 90.0,
"textureSide": 8192,
"processColorspace": "sRGB",
"input": "{Meshing_1.output}",
"useUDIM": true,
"subdivisionTargetRatio": 0.8,
"padding": 5,
"inputRefMesh": "",
"correctEV": false,
"visibilityRemappingMethod": "PullPush",
"inputMesh": "{MeshFiltering_1.outputMesh}",
"verboseLevel": "info",
"colorMapping": {
"enable": true,
"colorMappingFileType": "exr"
},
"bestScoreThreshold": 0.1,
"unwrapMethod": "Basic",
"multiBandNbContrib": {
"high": 1,
"midHigh": 5,
"low": 0,
"midLow": 10
},
"flipNormals": false
},
"nodeType": "Texturing",
"uids": {
"0": "1ed1516bf83493071547e69146be3f1218012e25"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"outputTextures": "{cache}/{nodeType}/{uid0}/texture_*.exr",
"outputMesh": "{cache}/{nodeType}/{uid0}/texturedMesh.{outputMeshFileTypeValue}",
"outputMaterial": "{cache}/{nodeType}/{uid0}/texturedMesh.mtl",
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
1400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"Meshing_1": {
"inputs": {
"exportDebugTetrahedralization": false,
"useBoundingBox": false,
"maxInputPoints": 50000000,
"repartition": "multiResolution",
"helperPointsGridSize": 10,
"seed": 0,
"voteFilteringForWeaklySupportedSurfaces": true,
"verboseLevel": "info",
"outputMeshFileType": "obj",
"simGaussianSizeInit": 10.0,
"nPixelSizeBehind": 4.0,
"fullWeight": 1.0,
"depthMapsFolder": "",
"densify": false,
"simFactor": 15.0,
"maskHelperPointsWeight": 1.0,
"densifyScale": 20.0,
"input": "{StructureFromMotion_1.output}",
"addLandmarksToTheDensePointCloud": false,
"voteMarginFactor": 4.0,
"saveRawDensePointCloud": false,
"contributeMarginFactor": 2.0,
"estimateSpaceMinObservationAngle": 10,
"nbSolidAngleFilteringIterations": 2,
"minStep": 2,
"colorizeOutput": false,
"pixSizeMarginFinalCoef": 4.0,
"densifyNbFront": 1,
"boundingBox": {
"bboxScale": {
"y": 1.0,
"x": 1.0,
"z": 1.0
},
"bboxTranslation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
},
"bboxRotation": {
"y": 0.0,
"x": 0.0,
"z": 0.0
}
},
"minSolidAngleRatio": 0.2,
"maxPoints": 5000000,
"addMaskHelperPoints": false,
"maxPointsPerVoxel": 1000000,
"angleFactor": 15.0,
"partitioning": "singleBlock",
"estimateSpaceFromSfM": true,
"minAngleThreshold": 1.0,
"pixSizeMarginInitCoef": 2.0,
"refineFuse": true,
"maxNbConnectedHelperPoints": 50,
"estimateSpaceMinObservations": 3,
"invertTetrahedronBasedOnNeighborsNbIterations": 10,
"maskBorderSize": 4,
"simGaussianSize": 10.0,
"densifyNbBack": 1
},
"nodeType": "Meshing",
"uids": {
"0": "dc3d06f150a2601334a44174aa8e5523d3055468"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/densePointCloud.abc",
"outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}"
},
"position": [
1000,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"ImageMatching_1": {
"inputs": {
"minNbImages": 200,
"nbNeighbors": 5,
"tree": "${ALICEVISION_VOCTREE}",
"maxDescriptors": 500,
"verboseLevel": "info",
"weights": "",
"nbMatches": 40,
"input": "{FeatureExtraction_1.input}",
"method": "SequentialAndVocabularyTree",
"featuresFolders": [
"{FeatureExtraction_1.output}"
]
},
"nodeType": "ImageMatching",
"uids": {
"0": "46fb9072ac753d60bec7dda9c8674b0568506ddf"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/imageMatches.txt"
},
"position": [
400,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureExtraction_1": {
"inputs": {
"verboseLevel": "info",
"maxThreads": 0,
"describerTypes": [
"dspsift"
],
"maxNbFeatures": 0,
"relativePeakThreshold": 0.01,
"forceCpuExtraction": true,
"masksFolder": "",
"contrastFiltering": "GridSort",
"describerQuality": "normal",
"gridFiltering": true,
"input": "{CameraInit_1.output}",
"describerPreset": "normal"
},
"nodeType": "FeatureExtraction",
"uids": {
"0": "a07fb8d05b63327d05461954c2fd2a00f201275b"
},
"parallelization": {
"blockSize": 40,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"StructureFromMotion_1": {
"inputs": {
"localizerEstimatorMaxIterations": 4096,
"minAngleForLandmark": 2.0,
"filterTrackForks": false,
"minNumberOfObservationsForTriangulation": 2,
"maxAngleInitialPair": 40.0,
"observationConstraint": "Scale",
"maxNumberOfMatches": 0,
"localizerEstimator": "acransac",
"describerTypes": "{FeatureMatching_1.describerTypes}",
"lockScenePreviouslyReconstructed": false,
"localBAGraphDistance": 1,
"minNbCamerasToRefinePrincipalPoint": 3,
"lockAllIntrinsics": false,
"input": "{FeatureMatching_1.input}",
"featuresFolders": "{FeatureMatching_1.featuresFolders}",
"useRigConstraint": true,
"rigMinNbCamerasForCalibration": 20,
"initialPairA": "",
"initialPairB": "",
"interFileExtension": ".abc",
"useLocalBA": true,
"computeStructureColor": true,
"matchesFolders": [
"{FeatureMatching_1.output}"
],
"minInputTrackLength": 2,
"useOnlyMatchesFromInputFolder": false,
"verboseLevel": "info",
"minAngleForTriangulation": 3.0,
"maxReprojectionError": 4.0,
"minAngleInitialPair": 5.0,
"minNumberOfMatches": 0,
"localizerEstimatorError": 0.0
},
"nodeType": "StructureFromMotion",
"uids": {
"0": "89c3db0849ba07dfac5e97ca9e27dd690dc476ce"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/sfm.abc",
"extraInfoFolder": "{cache}/{nodeType}/{uid0}/",
"outputViewsAndPoses": "{cache}/{nodeType}/{uid0}/cameras.sfm"
},
"position": [
800,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"CameraInit_1": {
"inputs": {
"groupCameraFallback": "folder",
"intrinsics": [],
"viewIdRegex": ".*?(\\d+)",
"defaultFieldOfView": 45.0,
"allowedCameraModels": [
"pinhole",
"radial1",
"radial3",
"brown",
"fisheye4",
"fisheye1",
"3deanamorphic4",
"3deradial4",
"3declassicld"
],
"verboseLevel": "info",
"viewIdMethod": "metadata",
"viewpoints": [],
"useInternalWhiteBalance": true,
"sensorDatabase": "${ALICEVISION_SENSOR_DB}"
},
"nodeType": "CameraInit",
"uids": {
"0": "f9436e97e444fa71a05aa5cf7639b206df8ba282"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/cameraInit.sfm"
},
"position": [
0,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"MeshFiltering_1": {
"inputs": {
"filteringSubset": "all",
"outputMeshFileType": "obj",
"inputMesh": "{Meshing_1.outputMesh}",
"filterTrianglesRatio": 0.0,
"smoothingSubset": "all",
"verboseLevel": "info",
"smoothingIterations": 5,
"filterLargeTrianglesFactor": 60.0,
"keepLargestMeshOnly": false,
"smoothingBoundariesNeighbours": 0,
"smoothingLambda": 1.0,
"filteringIterations": 1
},
"nodeType": "MeshFiltering",
"uids": {
"0": "057d1647de39a617f79aad02a721938e5625ff64"
},
"parallelization": {
"blockSize": 0,
"split": 1,
"size": 1
},
"outputs": {
"outputMesh": "{cache}/{nodeType}/{uid0}/mesh.{outputMeshFileTypeValue}"
},
"position": [
1200,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
},
"FeatureMatching_1": {
"inputs": {
"verboseLevel": "info",
"describerTypes": "{FeatureExtraction_1.describerTypes}",
"exportDebugFiles": false,
"crossMatching": false,
"geometricError": 0.0,
"maxMatches": 0,
"matchFromKnownCameraPoses": false,
"savePutativeMatches": false,
"guidedMatching": false,
"imagePairsList": "{ImageMatching_1.output}",
"geometricEstimator": "acransac",
"geometricFilterType": "fundamental_matrix",
"maxIteration": 2048,
"distanceRatio": 0.8,
"input": "{ImageMatching_1.input}",
"photometricMatchingMethod": "ANN_L2",
"knownPosesGeometricErrorMax": 5.0,
"featuresFolders": "{ImageMatching_1.featuresFolders}"
},
"nodeType": "FeatureMatching",
"uids": {
"0": "3b1f2c3fcfe0b94c65627c397a2671ba7594827d"
},
"parallelization": {
"blockSize": 20,
"split": 0,
"size": 0
},
"outputs": {
"output": "{cache}/{nodeType}/{uid0}/"
},
"position": [
600,
0
],
"internalFolder": "{cache}/{nodeType}/{uid0}/"
}
}
}

View file

@ -1,5 +1,6 @@
import logging import logging
import os import os
import re
import argparse import argparse
from PySide2.QtCore import Qt, QUrl, Slot, QJsonValue, Property, Signal, qInstallMessageHandler, QtMsgType, QSettings from PySide2.QtCore import Qt, QUrl, Slot, QJsonValue, Property, Signal, qInstallMessageHandler, QtMsgType, QSettings
@ -71,8 +72,9 @@ class MeshroomApp(QApplication):
help='Import images to reconstruct from specified folder and sub-folders.') help='Import images to reconstruct from specified folder and sub-folders.')
parser.add_argument('-s', '--save', metavar='PROJECT.mg', type=str, default='', parser.add_argument('-s', '--save', metavar='PROJECT.mg', type=str, default='',
help='Save the created scene.') help='Save the created scene.')
parser.add_argument('-p', '--pipeline', metavar='MESHROOM_FILE/photogrammetry/panoramaHdr/panoramaFisheyeHdr', type=str, default=os.environ.get("MESHROOM_DEFAULT_PIPELINE", "photogrammetry"), parser.add_argument('-p', '--pipeline', metavar="FILE.mg/" + "/".join(meshroom.core.pipelineTemplates), type=str,
help='Override the default Meshroom pipeline with this external graph.') default=os.environ.get("MESHROOM_DEFAULT_PIPELINE", "photogrammetry"),
help='Override the default Meshroom pipeline with this external or template graph.')
parser.add_argument("--verbose", help="Verbosity level", default='warning', parser.add_argument("--verbose", help="Verbosity level", default='warning',
choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],) choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],)
@ -176,6 +178,16 @@ class MeshroomApp(QApplication):
self.engine.load(os.path.normpath(url)) self.engine.load(os.path.normpath(url))
def _pipelineTemplateFiles(self):
templates = []
for key in sorted(meshroom.core.pipelineTemplates.keys()):
# Use uppercase letters in the names as separators to format the templates' name nicely
# e.g: the template "panoramaHdr" will be shown as "Panorama Hdr" in the menu
name = " ".join(re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]))
variant = {"name": name, "key": key, "path": meshroom.core.pipelineTemplates[key]}
templates.append(variant)
return templates
def _recentProjectFiles(self): def _recentProjectFiles(self):
projects = [] projects = []
settings = QSettings() settings = QSettings()
@ -316,6 +328,7 @@ class MeshroomApp(QApplication):
} }
] ]
pipelineTemplateFilesChanged = Signal()
recentProjectFilesChanged = Signal() recentProjectFilesChanged = Signal()
pipelineTemplateFiles = Property("QVariantList", _pipelineTemplateFiles, notify=pipelineTemplateFilesChanged)
recentProjectFiles = Property("QVariantList", _recentProjectFiles, notify=recentProjectFilesChanged) recentProjectFiles = Property("QVariantList", _recentProjectFiles, notify=recentProjectFilesChanged)

View file

@ -68,7 +68,7 @@ RowLayout {
id: paramMenu id: paramMenu
property bool isFileAttribute: attribute.type == "File" property bool isFileAttribute: attribute.type == "File"
property bool isFilepath: isFileAttribute && Filepath.isFile(attribute.value) property bool isFilepath: isFileAttribute && Filepath.isFile(attribute.evalValue)
MenuItem { MenuItem {
text: "Reset To Default Value" text: "Reset To Default Value"
@ -85,8 +85,8 @@ RowLayout {
visible: paramMenu.isFileAttribute visible: paramMenu.isFileAttribute
height: visible ? implicitHeight : 0 height: visible ? implicitHeight : 0
text: paramMenu.isFilepath ? "Open Containing Folder" : "Open Folder" text: paramMenu.isFilepath ? "Open Containing Folder" : "Open Folder"
onClicked: paramMenu.isFilepath ? Qt.openUrlExternally(Filepath.dirname(attribute.value)) : onClicked: paramMenu.isFilepath ? Qt.openUrlExternally(Filepath.dirname(attribute.evalValue)) :
Qt.openUrlExternally(Filepath.stringToUrl(attribute.value)) Qt.openUrlExternally(Filepath.stringToUrl(attribute.evalValue))
} }
MenuItem { MenuItem {

View file

@ -409,38 +409,38 @@ ApplicationWindow {
onTriggered: ensureSaved(function() { _reconstruction.new() }) onTriggered: ensureSaved(function() { _reconstruction.new() })
} }
Menu { Menu {
id: newPipelineMenu
title: "New Pipeline" title: "New Pipeline"
enabled: newPipelineMenuItems.model != undefined && newPipelineMenuItems.model.length > 0
property int maxWidth: 1000
property int fullWidth: {
var result = 0;
for (var i = 0; i < count; ++i) {
var item = itemAt(i);
result = Math.max(item.implicitWidth + item.padding * 2, result);
}
return result;
}
implicitWidth: fullWidth
Repeater {
id: newPipelineMenuItems
model: MeshroomApp.pipelineTemplateFiles
MenuItem {
onTriggered: ensureSaved(function() {
_reconstruction.new(modelData["key"])
})
text: fileTextMetrics.elidedText
TextMetrics { TextMetrics {
id: textMetrics id: fileTextMetrics
font: action_PG_CT.font text: modelData["name"]
elide: Text.ElideNone elide: Text.ElideLeft
text: action_PG_CT.text elideWidth: newPipelineMenu.maxWidth
} }
implicitWidth: textMetrics.width + 10 // largest text width + margin ToolTip.text: modelData["path"]
Action { ToolTip.visible: hovered
text: "Photogrammetry" ToolTip.delay: 200
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") })
} }
Action {
text: "Panorama HDR"
onTriggered: ensureSaved(function() { _reconstruction.new("panoramahdr") })
}
Action {
text: "Panorama Fisheye HDR"
onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") })
}
Action {
id: action_PG_CT
text: "Photogrammetry and Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetryandcameratracking") })
}
Action {
text: "Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") })
}
Action {
text: "Photogrammetry Draft (No CUDA)"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetrydraft") })
} }
} }
Action { Action {

View file

@ -483,27 +483,15 @@ class Reconstruction(UIGraph):
@Slot(str) @Slot(str)
def new(self, pipeline=None): def new(self, pipeline=None):
p = pipeline if pipeline != None else self._defaultPipeline p = pipeline if pipeline != None else self._defaultPipeline
""" Create a new photogrammetry pipeline. """ """ Create a new pipeline. """
if p.lower() == "photogrammetry": # Lower the input and the dictionary keys to make sure that all input types can be found:
# default photogrammetry pipeline # - correct pipeline name but the case does not match (e.g. panoramaHDR instead of panoramaHdr)
self.setGraph(multiview.photogrammetry()) # - lowercase pipeline name given through the "New Pipeline" menu
elif p.lower() == "panoramahdr": loweredPipelineTemplates = dict((k.lower(), v) for k, v in meshroom.core.pipelineTemplates.items())
# default panorama hdr pipeline if p.lower() in loweredPipelineTemplates:
self.setGraph(multiview.panoramaHdr()) self.load(loweredPipelineTemplates[p.lower()], setupProjectFile=False)
elif p.lower() == "panoramafisheyehdr":
# default panorama fisheye hdr pipeline
self.setGraph(multiview.panoramaFisheyeHdr())
elif p.lower() == "photogrammetryandcameratracking":
# default camera tracking pipeline
self.setGraph(multiview.photogrammetryAndCameraTracking())
elif p.lower() == "cameratracking":
# default camera tracking pipeline
self.setGraph(multiview.cameraTracking())
elif p.lower() == "photogrammetrydraft":
# photogrammetry pipeline in draft mode (no cuda)
self.setGraph(multiview.photogrammetryDraft())
else: else:
# use the user-provided default photogrammetry project file # use the user-provided default project file
self.load(p, setupProjectFile=False) self.load(p, setupProjectFile=False)
@Slot(str, result=bool) @Slot(str, result=bool)

View file

@ -9,18 +9,34 @@ from meshroom.core.node import Node
def test_multiviewPipeline(): def test_multiviewPipeline():
graph1 = meshroom.multiview.photogrammetry(inputImages=['/non/existing/fileA']) graph1InputImages = ['/non/existing/fileA']
graph2 = meshroom.multiview.photogrammetry(inputImages=[]) graph1 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"])
graph2b = meshroom.multiview.photogrammetry(inputImages=[]) graph1CameraInit = graph1.node("CameraInit_1")
graph3 = meshroom.multiview.photogrammetry(inputImages=['/non/existing/file1', '/non/existing/file2']) graph1CameraInit.viewpoints.extend([{'path': image} for image in graph1InputImages])
graph4 = meshroom.multiview.photogrammetry(inputViewpoints=[
graph2InputImages = [] # common to graph2 and graph2b
graph2 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"])
graph2CameraInit = graph2.node("CameraInit_1")
graph2CameraInit.viewpoints.extend([{'path': image} for image in graph2InputImages])
graph2b = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"])
graph2bCameraInit = graph2b.node("CameraInit_1")
graph2bCameraInit.viewpoints.extend([{'path': image} for image in graph2InputImages])
graph3InputImages = ['/non/existing/file1', '/non/existing/file2']
graph3 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"])
graph3CameraInit = graph3.node("CameraInit_1")
graph3CameraInit.viewpoints.extend([{'path': image} for image in graph3InputImages])
graph4InputViewpoints = [
{'path': '/non/existing/file1', 'intrinsicId': 50}, {'path': '/non/existing/file1', 'intrinsicId': 50},
{'path': '/non/existing/file2', 'intrinsicId': 55} {'path': '/non/existing/file2', 'intrinsicId': 55}
]) ] # common to graph4 and graph4b
graph4b = meshroom.multiview.photogrammetry(inputViewpoints=[ graph4 = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"])
{'path': '/non/existing/file1', 'intrinsicId': 50}, graph4CameraInit = graph4.node("CameraInit_1")
{'path': '/non/existing/file2', 'intrinsicId': 55} graph4CameraInit.viewpoints.extend(graph4InputViewpoints)
]) graph4b = loadGraph(meshroom.core.pipelineTemplates["photogrammetry"])
graph4bCameraInit = graph4b.node("CameraInit_1")
graph4bCameraInit.viewpoints.extend(graph4InputViewpoints)
assert graph1.findNode('CameraInit').viewpoints.at(0).path.value == '/non/existing/fileA' assert graph1.findNode('CameraInit').viewpoints.at(0).path.value == '/non/existing/fileA'
assert len(graph2.findNode('CameraInit').viewpoints) == 0 assert len(graph2.findNode('CameraInit').viewpoints) == 0