mirror of
https://github.com/alicevision/Meshroom.git
synced 2025-06-03 11:21:52 +02:00
Merge develop into dev/nodesAndTaskManager
This commit is contained in:
commit
991aca989b
28 changed files with 1570 additions and 452 deletions
|
@ -10,7 +10,7 @@ meshroom.setupEnvironment()
|
|||
import meshroom.core.graph
|
||||
from meshroom import multiview
|
||||
|
||||
parser = argparse.ArgumentParser(description='Launch the full photogrammetry pipeline.')
|
||||
parser = argparse.ArgumentParser(description='Launch the full photogrammetry or HDRI pipeline.')
|
||||
parser.add_argument('-i', '--input', metavar='SFM/FOLDERS/IMAGES', type=str, nargs='*',
|
||||
default=[],
|
||||
help='Input folder containing images or folders of images or file (.sfm or .json) '
|
||||
|
@ -19,9 +19,8 @@ parser.add_argument('-I', '--inputRecursive', metavar='FOLDERS/IMAGES', type=str
|
|||
default=[],
|
||||
help='Input folders containing all images recursively.')
|
||||
|
||||
parser.add_argument('-p', '--pipeline', metavar='MESHROOM_FILE', type=str, required=False,
|
||||
help='Meshroom file containing a pre-configured photogrammetry pipeline to run on input images. '
|
||||
'If not set, the default photogrammetry pipeline will be used. '
|
||||
parser.add_argument('-p', '--pipeline', metavar='photogrammetry/hdri/MG_FILE', type=str, default='photogrammetry',
|
||||
help='"photogrammetry" pipeline, "hdri" pipeline or a Meshroom file containing a custom pipeline to run on input images. '
|
||||
'Requirements: the graph must contain one CameraInit node, '
|
||||
'and one Publish node if --output is set.')
|
||||
|
||||
|
@ -60,6 +59,13 @@ parser.add_argument('--forceStatus', help='Force computation if status is RUNNIN
|
|||
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
|
||||
action='store_true')
|
||||
|
||||
parser.add_argument('--submit', help='Submit on renderfarm instead of local computation.',
|
||||
action='store_true')
|
||||
parser.add_argument('--submitter',
|
||||
type=str,
|
||||
default='SimpleFarm',
|
||||
help='Execute job with a specific submitter.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
|
@ -78,7 +84,7 @@ if not args.input and not args.inputRecursive:
|
|||
|
||||
views, intrinsics = [], []
|
||||
# Build image files list from inputImages arguments
|
||||
images = []
|
||||
filesByType = multiview.FilesByType()
|
||||
|
||||
hasSearchedForImages = False
|
||||
|
||||
|
@ -88,21 +94,32 @@ if args.input:
|
|||
from meshroom.nodes.aliceVision.CameraInit import readSfMData
|
||||
views, intrinsics = readSfMData(args.input[0])
|
||||
else:
|
||||
images += multiview.findImageFiles(args.input, recursive=False)
|
||||
filesByType.extend(multiview.findFilesByTypeInFolder(args.input, recursive=False))
|
||||
hasSearchedForImages = True
|
||||
|
||||
if args.inputRecursive:
|
||||
images += multiview.findImageFiles(args.inputRecursive, recursive=True)
|
||||
filesByType.extend(multiview.findFilesByTypeInFolder(args.inputRecursive, recursive=True))
|
||||
hasSearchedForImages = True
|
||||
|
||||
if hasSearchedForImages and not images:
|
||||
if hasSearchedForImages and not filesByType.images:
|
||||
print("No image found")
|
||||
exit(-1)
|
||||
|
||||
# initialize photogrammetry pipeline
|
||||
if args.pipeline:
|
||||
graph = multiview.Graph(name=args.pipeline)
|
||||
|
||||
with multiview.GraphModification(graph):
|
||||
# initialize photogrammetry pipeline
|
||||
if args.pipeline.lower() == "photogrammetry":
|
||||
# default photogrammetry pipeline
|
||||
multiview.photogrammetry(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
|
||||
elif args.pipeline.lower() == "hdri":
|
||||
# default hdri pipeline
|
||||
graph = multiview.hdri(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
|
||||
else:
|
||||
# custom pipeline
|
||||
graph = meshroom.core.graph.loadGraph(args.pipeline)
|
||||
graph.load(args.pipeline)
|
||||
# graph.update()
|
||||
|
||||
cameraInit = getOnlyNodeOfType(graph, 'CameraInit')
|
||||
# reset graph inputs
|
||||
cameraInit.viewpoints.resetValue()
|
||||
|
@ -117,17 +134,13 @@ if args.pipeline:
|
|||
if args.output:
|
||||
publish = getOnlyNodeOfType(graph, 'Publish')
|
||||
publish.output.value = args.output
|
||||
else:
|
||||
# default pipeline
|
||||
graph = multiview.photogrammetry(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output)
|
||||
cameraInit = getOnlyNodeOfType(graph, 'CameraInit')
|
||||
|
||||
if images:
|
||||
views, intrinsics = cameraInit.nodeDesc.buildIntrinsics(cameraInit, images)
|
||||
if filesByType.images:
|
||||
views, intrinsics = cameraInit.nodeDesc.buildIntrinsics(cameraInit, filesByType.images)
|
||||
cameraInit.viewpoints.value = views
|
||||
cameraInit.intrinsics.value = intrinsics
|
||||
|
||||
if args.overrides:
|
||||
if args.overrides:
|
||||
import io
|
||||
import json
|
||||
with io.open(args.overrides, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
|
@ -136,7 +149,7 @@ if args.overrides:
|
|||
for attrName, value in overrides.items():
|
||||
graph.findNode(nodeName).attribute(attrName).value = value
|
||||
|
||||
if args.paramOverrides:
|
||||
if args.paramOverrides:
|
||||
print("\n")
|
||||
import re
|
||||
reExtract = re.compile('(\w+)([:.])(\w+)=(.*)')
|
||||
|
@ -159,15 +172,15 @@ if args.paramOverrides:
|
|||
raise ValueError('Invalid param override: ' + str(p))
|
||||
print("\n")
|
||||
|
||||
# setup DepthMap downscaling
|
||||
if args.scale > 0:
|
||||
# setup DepthMap downscaling
|
||||
if args.scale > 0:
|
||||
for node in graph.nodesByType('DepthMap'):
|
||||
node.downscale.value = args.scale
|
||||
|
||||
# setup cache directory
|
||||
graph.cacheDir = args.cache if args.cache else meshroom.core.defaultCacheFolder
|
||||
# setup cache directory
|
||||
graph.cacheDir = args.cache if args.cache else meshroom.core.defaultCacheFolder
|
||||
|
||||
if args.save:
|
||||
if args.save:
|
||||
graph.save(args.save, setupProjectFile=not bool(args.cache))
|
||||
print('File successfully saved: "{}"'.format(args.save))
|
||||
|
||||
|
@ -177,6 +190,11 @@ if not args.output:
|
|||
# find end nodes (None will compute all graph)
|
||||
toNodes = graph.findNodes(args.toNode) if args.toNode else None
|
||||
|
||||
if args.compute:
|
||||
if args.submit:
|
||||
if not args.save:
|
||||
raise ValueError('Need to save the project to file to submit on renderfarm.')
|
||||
# submit on renderfarm
|
||||
meshroom.core.graph.submit(args.save, args.submitter, toNode=toNodes)
|
||||
elif args.compute:
|
||||
# start computation
|
||||
meshroom.core.graph.executeGraph(graph, toNodes=toNodes, forceCompute=args.forceCompute, forceStatus=args.forceStatus)
|
||||
|
|
|
@ -260,10 +260,7 @@ meshroomFolder = os.path.dirname(os.path.dirname(__file__))
|
|||
# - Nodes
|
||||
loadAllNodes(folder=os.path.join(meshroomFolder, 'nodes'))
|
||||
# - Submitters
|
||||
subs = loadSubmitters(meshroomFolder, 'submitters')
|
||||
# - additional 3rd party submitters
|
||||
if "MESHROOM_SUBMITTERS_PATH" in os.environ:
|
||||
subs += loadSubmitters(os.environ["MESHROOM_SUBMITTERS_PATH"], 'submitters')
|
||||
subs = loadSubmitters(os.environ.get("MESHROOM_SUBMITTERS_PATH", meshroomFolder), 'submitters')
|
||||
|
||||
for sub in subs:
|
||||
registerSubmitter(sub())
|
||||
|
|
|
@ -318,13 +318,14 @@ class DynamicNodeSize(object):
|
|||
|
||||
def computeSize(self, node):
|
||||
param = node.attribute(self._param)
|
||||
assert param.isInput
|
||||
# Link: use linked node's size
|
||||
if param.isLink:
|
||||
return param.getLinkParam().node.size
|
||||
# ListAttribute: use list size
|
||||
if isinstance(param.desc, ListAttribute):
|
||||
return len(param)
|
||||
if isinstance(param.desc, IntParam):
|
||||
return param.value
|
||||
return 1
|
||||
|
||||
|
||||
|
@ -383,7 +384,26 @@ class Node(object):
|
|||
def __init__(self):
|
||||
pass
|
||||
|
||||
def updateInternals(self, node):
|
||||
@classmethod
|
||||
def update(cls, node):
|
||||
""" Method call before node's internal update on invalidation.
|
||||
|
||||
Args:
|
||||
node: the BaseNode instance being updated
|
||||
See Also:
|
||||
BaseNode.updateInternals
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def postUpdate(cls, node):
|
||||
""" Method call after node's internal update on invalidation.
|
||||
|
||||
Args:
|
||||
node: the BaseNode instance being updated
|
||||
See Also:
|
||||
NodeBase.updateInternals
|
||||
"""
|
||||
pass
|
||||
|
||||
def stopProcess(self, chunk):
|
||||
|
|
|
@ -502,6 +502,11 @@ class BaseNode(BaseObject):
|
|||
for name, attr in self._attributes.objects.items():
|
||||
if attr.isInput:
|
||||
continue # skip inputs
|
||||
|
||||
# Only consider File attributes for command output parameters
|
||||
if not isinstance(attr.attributeDesc, desc.File):
|
||||
continue
|
||||
|
||||
attr.value = attr.attributeDesc.value.format(**self._cmdVars)
|
||||
attr._invalidationValue = attr.attributeDesc.value.format(**cmdVarsNoCache)
|
||||
v = attr.getValueStr()
|
||||
|
@ -599,6 +604,8 @@ class BaseNode(BaseObject):
|
|||
Args:
|
||||
cacheDir (str): (optional) override graph's cache directory with custom path
|
||||
"""
|
||||
if self.nodeDesc:
|
||||
self.nodeDesc.update(self)
|
||||
# Update chunks splitting
|
||||
self._updateChunks()
|
||||
# Retrieve current internal folder (if possible)
|
||||
|
@ -613,6 +620,8 @@ class BaseNode(BaseObject):
|
|||
}
|
||||
self._computeUids()
|
||||
self._buildCmdVars()
|
||||
if self.nodeDesc:
|
||||
self.nodeDesc.postUpdate(self)
|
||||
# Notify internal folder change if needed
|
||||
if self.internalFolder != folder:
|
||||
self.internalFolderChanged.emit()
|
||||
|
|
|
@ -6,15 +6,53 @@ import os
|
|||
from meshroom.core.graph import Graph, GraphModification
|
||||
|
||||
# Supported image extensions
|
||||
imageExtensions = ('.jpg', '.jpeg', '.tif', '.tiff', '.png', '.exr', '.rw2', '.cr2', '.nef', '.arw', '.dng')
|
||||
imageExtensions = ('.jpg', '.jpeg', '.tif', '.tiff', '.png', '.exr', '.rw2', '.cr2', '.nef', '.arw')
|
||||
videoExtensions = ('.avi', '.mov', '.qt',
|
||||
'.mkv', '.webm',
|
||||
'.mp4', '.mpg', '.mpeg', '.m2v', '.m4v',
|
||||
'.wmv',
|
||||
'.ogv', '.ogg',
|
||||
'.mxf')
|
||||
panoramaInfoExtensions = ('.xml')
|
||||
|
||||
|
||||
def isImageFile(filepath):
|
||||
""" Return whether filepath is a path to an image file supported by Meshroom. """
|
||||
return os.path.splitext(filepath)[1].lower() in imageExtensions
|
||||
def hasExtension(filepath, extensions):
|
||||
""" Return whether filepath is one of the following extensions. """
|
||||
return os.path.splitext(filepath)[1].lower() in extensions
|
||||
|
||||
|
||||
def findImageFiles(folder, recursive=False):
|
||||
class FilesByType:
|
||||
def __init__(self):
|
||||
self.images = []
|
||||
self.videos = []
|
||||
self.panoramaInfo = []
|
||||
self.other = []
|
||||
|
||||
def __bool__(self):
|
||||
return self.images or self.videos or self.panoramaInfo
|
||||
|
||||
def extend(self, other):
|
||||
self.images.extend(other.images)
|
||||
self.videos.extend(other.videos)
|
||||
self.panoramaInfo.extend(other.panoramaInfo)
|
||||
self.other.extend(other.other)
|
||||
|
||||
def addFile(self, file):
|
||||
if hasExtension(file, imageExtensions):
|
||||
self.images.append(file)
|
||||
elif hasExtension(file, videoExtensions):
|
||||
self.videos.append(file)
|
||||
elif hasExtension(file, panoramaInfoExtensions):
|
||||
self.panoramaInfo.append(file)
|
||||
else:
|
||||
self.other.append(file)
|
||||
|
||||
def addFiles(self, files):
|
||||
for file in files:
|
||||
self.addFile(file)
|
||||
|
||||
|
||||
def findFilesByTypeInFolder(folder, recursive=False):
|
||||
"""
|
||||
Return all files that are images in 'folder' based on their extensions.
|
||||
|
||||
|
@ -30,23 +68,111 @@ def findImageFiles(folder, recursive=False):
|
|||
else:
|
||||
inputFolders.append(folder)
|
||||
|
||||
output = []
|
||||
output = FilesByType()
|
||||
for currentFolder in inputFolders:
|
||||
if os.path.isfile(currentFolder):
|
||||
if isImageFile(currentFolder):
|
||||
output.append(currentFolder)
|
||||
output.addFile(currentFolder)
|
||||
continue
|
||||
elif os.path.isdir(currentFolder):
|
||||
if recursive:
|
||||
for root, directories, files in os.walk(currentFolder):
|
||||
for filename in files:
|
||||
if isImageFile(filename):
|
||||
output.append(os.path.join(root, filename))
|
||||
output.addFile(os.path.join(root, filename))
|
||||
else:
|
||||
output.extend([os.path.join(currentFolder, filename) for filename in os.listdir(currentFolder) if isImageFile(filename)])
|
||||
output.addFiles([os.path.join(currentFolder, filename) for filename in os.listdir(currentFolder)])
|
||||
else:
|
||||
# if not a diretory or a file, it may be an expression
|
||||
import glob
|
||||
paths = glob.glob(currentFolder)
|
||||
filesByType = findFilesByTypeInFolder(paths, recursive=recursive)
|
||||
output.extend(filesByType)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def photogrammetry(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output=''):
|
||||
def hdri(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
|
||||
"""
|
||||
Create a new Graph with a complete HDRI pipeline.
|
||||
|
||||
Args:
|
||||
inputImages (list of str, optional): list of image file paths
|
||||
inputViewpoints (list of Viewpoint, optional): list of Viewpoints
|
||||
output (str, optional): the path to export reconstructed model to
|
||||
|
||||
Returns:
|
||||
Graph: the created graph
|
||||
"""
|
||||
if not graph:
|
||||
graph = Graph('HDRI')
|
||||
with GraphModification(graph):
|
||||
nodes = hdriPipeline(graph)
|
||||
cameraInit = nodes[0]
|
||||
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
|
||||
cameraInit.viewpoints.extend(inputViewpoints)
|
||||
cameraInit.intrinsics.extend(inputIntrinsics)
|
||||
|
||||
if output:
|
||||
stitching = nodes[-1]
|
||||
graph.addNewNode('Publish', output=output, inputFiles=[stitching.output])
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
def hdriPipeline(graph):
|
||||
"""
|
||||
Instantiate an HDRI pipeline inside 'graph'.
|
||||
Args:
|
||||
graph (Graph/UIGraph): the graph in which nodes should be instantiated
|
||||
|
||||
Returns:
|
||||
list of Node: the created nodes
|
||||
"""
|
||||
cameraInit = graph.addNewNode('CameraInit')
|
||||
|
||||
ldr2hdr = graph.addNewNode('LDRToHDR',
|
||||
input=cameraInit.output)
|
||||
|
||||
featureExtraction = graph.addNewNode('FeatureExtraction',
|
||||
input=ldr2hdr.outSfMDataFilename)
|
||||
featureExtraction.describerPreset.value = 'ultra'
|
||||
imageMatching = graph.addNewNode('ImageMatching',
|
||||
input=featureExtraction.input,
|
||||
featuresFolders=[featureExtraction.output])
|
||||
featureMatching = graph.addNewNode('FeatureMatching',
|
||||
input=imageMatching.input,
|
||||
featuresFolders=imageMatching.featuresFolders,
|
||||
imagePairsList=imageMatching.output)
|
||||
|
||||
panoramaExternalInfo = graph.addNewNode('PanoramaExternalInfo',
|
||||
input=ldr2hdr.outSfMDataFilename,
|
||||
matchesFolders=[featureMatching.output] # Workaround for tractor submission with a fake dependency
|
||||
)
|
||||
|
||||
panoramaEstimation = graph.addNewNode('PanoramaEstimation',
|
||||
input=panoramaExternalInfo.outSfMDataFilename,
|
||||
featuresFolders=featureMatching.featuresFolders,
|
||||
matchesFolders=[featureMatching.output])
|
||||
|
||||
panoramaWarping = graph.addNewNode('PanoramaWarping',
|
||||
input=panoramaEstimation.outSfMDataFilename)
|
||||
|
||||
panoramaCompositing = graph.addNewNode('PanoramaCompositing',
|
||||
input=panoramaWarping.output)
|
||||
|
||||
return [
|
||||
cameraInit,
|
||||
featureExtraction,
|
||||
imageMatching,
|
||||
featureMatching,
|
||||
panoramaExternalInfo,
|
||||
panoramaEstimation,
|
||||
panoramaWarping,
|
||||
panoramaCompositing,
|
||||
]
|
||||
|
||||
|
||||
|
||||
def photogrammetry(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
|
||||
"""
|
||||
Create a new Graph with a complete photogrammetry pipeline.
|
||||
|
||||
|
@ -58,6 +184,7 @@ def photogrammetry(inputImages=list(), inputViewpoints=list(), inputIntrinsics=l
|
|||
Returns:
|
||||
Graph: the created graph
|
||||
"""
|
||||
if not graph:
|
||||
graph = Graph('Photogrammetry')
|
||||
with GraphModification(graph):
|
||||
sfmNodes, mvsNodes = photogrammetryPipeline(graph)
|
||||
|
|
49
meshroom/nodes/aliceVision/CameraDownscale.py
Normal file
49
meshroom/nodes/aliceVision/CameraDownscale.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class CameraDownscale(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_cameraDownscale {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="SfM Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='rescalefactor',
|
||||
label='RescaleFactor',
|
||||
description='Newsize = rescalefactor * oldsize',
|
||||
value=0.5,
|
||||
range=(0.0, 1.0, 0.1),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='outSfMDataFilename',
|
||||
label='Output SfMData File',
|
||||
description='Path to the output sfmdata file',
|
||||
value=desc.Node.internalFolder + 'sfmData.abc',
|
||||
uid=[],
|
||||
)
|
||||
]
|
|
@ -186,7 +186,7 @@ class CameraInit(desc.CommandLineNode):
|
|||
# logging.debug(' - commandLine:', cmd)
|
||||
proc = psutil.Popen(cmd, stdout=None, stderr=None, shell=True)
|
||||
stdout, stderr = proc.communicate()
|
||||
proc.wait()
|
||||
# proc.wait()
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError('CameraInit failed with error code {}.\nCommand was: "{}".\n'.format(
|
||||
proc.returncode, cmd)
|
||||
|
|
71
meshroom/nodes/aliceVision/ExportMatches.py
Normal file
71
meshroom/nodes/aliceVision/ExportMatches.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
__version__ = "1.1"
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class ExportMatches(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_exportMatches {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description='SfMData file.',
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='describerTypes',
|
||||
label='Describer Types',
|
||||
description='Describer types used to describe an image.',
|
||||
value=['sift'],
|
||||
values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
|
||||
exclusive=False,
|
||||
uid=[0],
|
||||
joinChar=',',
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name="featuresFolder",
|
||||
label="Features Folder",
|
||||
description="",
|
||||
value="",
|
||||
uid=[0],
|
||||
),
|
||||
name="featuresFolders",
|
||||
label="Features Folders",
|
||||
description="Folder(s) containing the extracted features and descriptors."
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name="matchesFolder",
|
||||
label="Matches Folder",
|
||||
description="",
|
||||
value="",
|
||||
uid=[0],
|
||||
),
|
||||
name="matchesFolders",
|
||||
label="Matches Folders",
|
||||
description="Folder(s) in which computed matches are stored."
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
)
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output Folder',
|
||||
description='Output path for the features and descriptors files (*.feat, *.desc).',
|
||||
value=desc.Node.internalFolder,
|
||||
uid=[],
|
||||
),
|
||||
]
|
114
meshroom/nodes/aliceVision/GlobalSfM.py
Normal file
114
meshroom/nodes/aliceVision/GlobalSfM.py
Normal file
|
@ -0,0 +1,114 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class GlobalSfM(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_globalSfM {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="SfM Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='featuresFolder',
|
||||
label='Features Folder',
|
||||
description="",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name='featuresFolders',
|
||||
label='Features Folders',
|
||||
description="Folder(s) containing the extracted features."
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='matchesFolder',
|
||||
label='Matches Folder',
|
||||
description="",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name='matchesFolders',
|
||||
label='Matches Folders',
|
||||
description="Folder(s) in which computed matches are stored."
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='describerTypes',
|
||||
label='Describer Types',
|
||||
description='Describer types used to describe an image.',
|
||||
value=['sift'],
|
||||
values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4',
|
||||
'sift_ocv', 'akaze_ocv'],
|
||||
exclusive=False,
|
||||
uid=[0],
|
||||
joinChar=',',
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='rotationAveraging',
|
||||
label='Rotation Averaging Method',
|
||||
description="Method for rotation averaging :\n"
|
||||
" * L1 minimization\n"
|
||||
" * L2 minimization\n",
|
||||
values=['L1_minimization', 'L2_minimization'],
|
||||
value='L2_minimization',
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='translationAveraging',
|
||||
label='Translation Averaging Method',
|
||||
description="Method for translation averaging :\n"
|
||||
" * L1 minimization\n"
|
||||
" * L2 minimization of sum of squared Chordal distances\n"
|
||||
" * L1 soft minimization",
|
||||
values=['L1_minimization', 'L2_minimization', 'L1_soft_minimization'],
|
||||
value='L1_soft_minimization',
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='lockAllIntrinsics',
|
||||
label='Force Lock of All Intrinsic Camera Parameters.',
|
||||
description='Force to keep constant all the intrinsics parameters of the cameras (focal length, \n'
|
||||
'principal point, distortion if any) during the reconstruction.\n'
|
||||
'This may be helpful if the input cameras are already fully calibrated.',
|
||||
value=False,
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
)
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output Folder',
|
||||
description='',
|
||||
value=desc.Node.internalFolder,
|
||||
uid=[],
|
||||
),
|
||||
desc.File(
|
||||
name='outSfMDataFilename',
|
||||
label='Output SfMData File',
|
||||
description='Path to the output sfmdata file',
|
||||
value=desc.Node.internalFolder + 'SfmData.abc',
|
||||
uid=[],
|
||||
),
|
||||
]
|
89
meshroom/nodes/aliceVision/HDRIstitching.py
Normal file
89
meshroom/nodes/aliceVision/HDRIstitching.py
Normal file
|
@ -0,0 +1,89 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class HDRIstitching(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_utils_fisheyeProjection {allParams}'
|
||||
|
||||
inputs = [
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='inputFile',
|
||||
label='Input File/Folder',
|
||||
description="",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name='input',
|
||||
label='Input Folder',
|
||||
description="List of fisheye images or folder containing them."
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='blurWidth',
|
||||
label='Blur Width',
|
||||
description="Blur width of alpha channel for all fisheye (between 0 and 1). \n"
|
||||
"Determine the transitions sharpness.",
|
||||
value=0.2,
|
||||
range=(0, 1, 0.1),
|
||||
uid=[0],
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.FloatParam(
|
||||
name='imageXRotation',
|
||||
label='Image X Rotation',
|
||||
description="",
|
||||
value=0,
|
||||
range=(-20, 20, 1),
|
||||
uid=[0],
|
||||
),
|
||||
name='xRotation',
|
||||
label='X Rotations',
|
||||
description="Rotations in degree on axis X (horizontal axis) for each image.",
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.FloatParam(
|
||||
name='imageYRotation',
|
||||
label='Image Y Rotation',
|
||||
description="",
|
||||
value=0,
|
||||
range=(-30, 30, 5),
|
||||
uid=[0],
|
||||
),
|
||||
name='yRotation',
|
||||
label='Y Rotations',
|
||||
description="Rotations in degree on axis Y (vertical axis) for each image.",
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.FloatParam(
|
||||
name='imageZRotation',
|
||||
label='Image Z Rotation',
|
||||
description="",
|
||||
value=0,
|
||||
range=(-10, 10, 1),
|
||||
uid=[0],
|
||||
),
|
||||
name='zRotation',
|
||||
label='Z Rotations',
|
||||
description="Rotations in degree on axis Z (depth axis) for each image.",
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description="Verbosity level (fatal, error, warning, info, debug, trace).",
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output Panorama',
|
||||
description="Output folder for panorama",
|
||||
value=desc.Node.internalFolder,
|
||||
uid=[],
|
||||
),
|
||||
]
|
|
@ -1,23 +1,93 @@
|
|||
__version__ = "1.0"
|
||||
__version__ = "2.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class DividedInputNodeSize(desc.DynamicNodeSize):
|
||||
"""
|
||||
The LDR2HDR will reduce the amount of views in the SfMData.
|
||||
This class converts the number of LDR input views into the number of HDR output views.
|
||||
"""
|
||||
def __init__(self, param, divParam):
|
||||
super(DividedInputNodeSize, self).__init__(param)
|
||||
self._divParam = divParam
|
||||
def computeSize(self, node):
|
||||
s = super(DividedInputNodeSize, self).computeSize(node)
|
||||
divParam = node.attribute(self._divParam)
|
||||
if divParam.value == 0:
|
||||
return s
|
||||
return s / divParam.value
|
||||
|
||||
|
||||
class LDRToHDR(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_convertLDRToHDR {allParams}'
|
||||
size = DividedInputNodeSize('input', 'nbBrackets')
|
||||
|
||||
cpu = desc.Level.INTENSIVE
|
||||
ram = desc.Level.NORMAL
|
||||
|
||||
inputs = [
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='inputFolder',
|
||||
label='Input File/Folder',
|
||||
description="Folder containing LDR images",
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="SfM Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name="input",
|
||||
label="Input Files or Folders",
|
||||
description='Folders containing LDR images.',
|
||||
desc.IntParam(
|
||||
name='userNbBrackets',
|
||||
label='Number of Brackets',
|
||||
description='Number of exposure brackets per HDR image (0 for automatic).',
|
||||
value=0,
|
||||
range=(0, 15, 1),
|
||||
uid=[0],
|
||||
group='user', # not used directly on the command line
|
||||
),
|
||||
desc.IntParam(
|
||||
name='nbBrackets',
|
||||
label='Automatic Nb Brackets',
|
||||
description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".',
|
||||
value=0,
|
||||
range=(0, 10, 1),
|
||||
uid=[],
|
||||
advanced=True,
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='highlightCorrectionFactor',
|
||||
label='Highlights Correction',
|
||||
description='Pixels saturated in all input images have a partial information about their real luminance.\n'
|
||||
'We only know that the value should be >= to the standard hdr fusion.\n'
|
||||
'This parameter allows to perform a post-processing step to put saturated pixels to a constant '
|
||||
'value defined by the `highlightsMaxLuminance` parameter.\n'
|
||||
'This parameter is float to enable to weight this correction.',
|
||||
value=1.0,
|
||||
range=(0.0, 1.0, 0.01),
|
||||
uid=[0],
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='highlightTargetLux',
|
||||
label='Highlight Target Luminance (Lux)',
|
||||
description='This is an arbitrary target value (in Lux) used to replace the unknown luminance value of the saturated pixels.\n'
|
||||
'\n'
|
||||
'Some Outdoor Reference Light Levels:\n'
|
||||
' * 120,000 lux : Brightest sunlight\n'
|
||||
' * 110,000 lux : Bright sunlight\n'
|
||||
' * 20,000 lux : Shade illuminated by entire clear blue sky, midday\n'
|
||||
' * 1,000 lux : Typical overcast day, midday\n'
|
||||
' * 400 lux : Sunrise or sunset on a clear day\n'
|
||||
' * 40 lux : Fully overcast, sunset/sunrise\n'
|
||||
'\n'
|
||||
'Some Indoor Reference Light Levels:\n'
|
||||
' * 20000 lux : Max Usually Used Indoor\n'
|
||||
' * 750 lux : Supermarkets\n'
|
||||
' * 500 lux : Office Work\n'
|
||||
' * 150 lux : Home\n',
|
||||
value=120000.0,
|
||||
range=(1000.0, 150000.0, 1.0),
|
||||
uid=[0],
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='fisheyeLens',
|
||||
|
@ -25,7 +95,21 @@ class LDRToHDR(desc.CommandLineNode):
|
|||
description="Enable if a fisheye lens has been used.\n "
|
||||
"This will improve the estimation of the Camera's Response Function by considering only the pixels in the center of the image\n"
|
||||
"and thus ignore undefined/noisy pixels outside the circle defined by the fisheye lens.",
|
||||
value=True,
|
||||
value=False,
|
||||
uid=[0],
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='calibrationRefineExposures',
|
||||
label='Refine Exposures',
|
||||
description="Refine exposures provided by metadata (shutter speed, f-number, iso). Only available for 'laguerre' calibration method.",
|
||||
value=False,
|
||||
uid=[0],
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='byPass',
|
||||
label='bypass convert',
|
||||
description="Bypass HDR creation and use the medium bracket as the source for the next steps",
|
||||
value=False,
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
|
@ -35,26 +119,13 @@ class LDRToHDR(desc.CommandLineNode):
|
|||
" * linear \n"
|
||||
" * robertson \n"
|
||||
" * debevec \n"
|
||||
" * grossberg",
|
||||
values=['linear', 'robertson', 'debevec', 'grossberg'],
|
||||
value='linear',
|
||||
" * grossberg \n"
|
||||
" * laguerre",
|
||||
values=['linear', 'robertson', 'debevec', 'grossberg', 'laguerre'],
|
||||
value='debevec',
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
),
|
||||
desc.File(
|
||||
name='inputResponse',
|
||||
label='Input Response',
|
||||
description="external camera response file path to fuse all LDR images together.",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.StringParam(
|
||||
name='targetExposureImage',
|
||||
label='Target Exposure Image',
|
||||
description="LDR image(s) name(s) at the target exposure for the output HDR image(s) to be centered.",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='calibrationWeight',
|
||||
label='Calibration Weight',
|
||||
|
@ -80,49 +151,109 @@ class LDRToHDR(desc.CommandLineNode):
|
|||
exclusive=True,
|
||||
uid=[0],
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='expandDynamicRange',
|
||||
label='Expand Dynamic Range',
|
||||
description="Correction of clamped high values in dynamic range: \n"
|
||||
" - use 0 for no correction \n"
|
||||
" - use 0.5 for interior lighting \n"
|
||||
" - use 1 for outdoor lighting",
|
||||
value=1,
|
||||
range=(0, 1, 0.1),
|
||||
desc.IntParam(
|
||||
name='calibrationNbPoints',
|
||||
label='Calibration Nb Points',
|
||||
description='Internal number of points used for calibration.',
|
||||
value=0,
|
||||
range=(0, 10000000, 1000),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.IntParam(
|
||||
name='calibrationDownscale',
|
||||
label='Calibration Downscale',
|
||||
description='Scaling factor applied to images before calibration of the response function to reduce the impact of misalignment.',
|
||||
value=4,
|
||||
range=(1, 16, 1),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.IntParam(
|
||||
name='channelQuantizationPower',
|
||||
label='Channel Quantization Power',
|
||||
description='Quantization level like 8 bits or 10 bits.',
|
||||
value=10,
|
||||
range=(8, 14, 1),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description="Verbosity level (fatal, error, warning, info, debug, trace).",
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
desc.File(
|
||||
name='recoverPath',
|
||||
label='Output Recovered Files',
|
||||
description="(debug) Folder for recovered LDR images at target exposures.",
|
||||
advanced=True,
|
||||
value='',
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output Folder',
|
||||
description="Output folder for HDR images",
|
||||
value=desc.Node.internalFolder,
|
||||
name='outSfMDataFilename',
|
||||
label='Output SfMData File',
|
||||
description='Path to the output sfmdata file',
|
||||
value=desc.Node.internalFolder + 'sfmData.abc',
|
||||
uid=[],
|
||||
),
|
||||
desc.File(
|
||||
name='outputResponse',
|
||||
label='Output Response',
|
||||
description="Output response function path.",
|
||||
value=desc.Node.internalFolder + 'response.csv',
|
||||
uid=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def update(cls, node):
|
||||
if not isinstance(node.nodeDesc, cls):
|
||||
raise ValueError("Node {} is not an instance of type {}".format(node, cls))
|
||||
# TODO: use Node version for this test
|
||||
if 'userNbBrackets' not in node.getAttributes().keys():
|
||||
# Old version of the node
|
||||
return
|
||||
if node.userNbBrackets.value != 0:
|
||||
node.nbBrackets.value = node.userNbBrackets.value
|
||||
return
|
||||
# logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion))
|
||||
cameraInitOutput = node.input.getLinkParam()
|
||||
if not cameraInitOutput:
|
||||
node.nbBrackets.value = 0
|
||||
return
|
||||
viewpoints = cameraInitOutput.node.viewpoints.value
|
||||
|
||||
# logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints)))
|
||||
inputs = []
|
||||
for viewpoint in viewpoints:
|
||||
jsonMetadata = viewpoint.metadata.value
|
||||
if not jsonMetadata:
|
||||
# no metadata, we cannot found the number of brackets
|
||||
node.nbBrackets.value = 0
|
||||
return
|
||||
d = json.loads(jsonMetadata)
|
||||
fnumber = d.get("FNumber", d.get("Exif:ApertureValue", ""))
|
||||
shutterSpeed = d.get("Exif:ShutterSpeedValue", "") # also "ExposureTime"?
|
||||
iso = d.get("Exif:ISOSpeedRatings", "")
|
||||
if not fnumber and not shutterSpeed:
|
||||
# if one image without shutter or fnumber, we cannot found the number of brackets
|
||||
node.nbBrackets.value = 0
|
||||
return
|
||||
inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso)))
|
||||
inputs.sort()
|
||||
|
||||
exposureGroups = []
|
||||
exposures = []
|
||||
for path, exp in inputs:
|
||||
if exposures and exp != exposures[-1] and exp == exposures[0]:
|
||||
exposureGroups.append(exposures)
|
||||
exposures = [exp]
|
||||
else:
|
||||
exposures.append(exp)
|
||||
exposureGroups.append(exposures)
|
||||
exposures = None
|
||||
bracketSizes = set()
|
||||
for expGroup in exposureGroups:
|
||||
bracketSizes.add(len(expGroup))
|
||||
if len(bracketSizes) == 1:
|
||||
node.nbBrackets.value = bracketSizes.pop()
|
||||
# logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value))
|
||||
else:
|
||||
node.nbBrackets.value = 0
|
||||
# logging.info("[LDRToHDR] Update end")
|
||||
|
||||
|
||||
|
|
59
meshroom/nodes/aliceVision/PanoramaCompositing.py
Normal file
59
meshroom/nodes/aliceVision/PanoramaCompositing.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class PanoramaCompositing(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaCompositing {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="Panorama Warping result",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='outputFileType',
|
||||
label='Output File Type',
|
||||
description='Output file type for the undistorted images.',
|
||||
value='exr',
|
||||
values=['jpg', 'png', 'tif', 'exr'],
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
group='', # not part of allParams, as this is not a parameter for the command line
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='compositerType',
|
||||
label='Compositer Type',
|
||||
description='Which compositer should be used to blend images',
|
||||
value='multiband',
|
||||
values=['replace', 'alpha', 'multiband'],
|
||||
exclusive=True,
|
||||
uid=[0]
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output Panorama',
|
||||
description='',
|
||||
value=desc.Node.internalFolder + 'panorama.{outputFileTypeValue}',
|
||||
uid=[],
|
||||
),
|
||||
]
|
149
meshroom/nodes/aliceVision/PanoramaEstimation.py
Normal file
149
meshroom/nodes/aliceVision/PanoramaEstimation.py
Normal file
|
@ -0,0 +1,149 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class PanoramaEstimation(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaEstimation {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="SfM Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='featuresFolder',
|
||||
label='Features Folder',
|
||||
description="",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name='featuresFolders',
|
||||
label='Features Folders',
|
||||
description="Folder(s) containing the extracted features."
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='matchesFolder',
|
||||
label='Matches Folder',
|
||||
description="",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name='matchesFolders',
|
||||
label='Matches Folders',
|
||||
description="Folder(s) in which computed matches are stored."
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='describerTypes',
|
||||
label='Describer Types',
|
||||
description='Describer types used to describe an image.',
|
||||
value=['sift'],
|
||||
values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4',
|
||||
'sift_ocv', 'akaze_ocv'],
|
||||
exclusive=False,
|
||||
uid=[0],
|
||||
joinChar=',',
|
||||
),
|
||||
desc.IntParam(
|
||||
name='orientation',
|
||||
label='Orientation',
|
||||
description='Orientation',
|
||||
value=0,
|
||||
range=(0, 6, 1),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='offsetLongitude',
|
||||
label='Longitude offset (deg.)',
|
||||
description='''Offset to the panorama longitude''',
|
||||
value=0.0,
|
||||
range=(-180.0, 180.0, 1.0),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='offsetLatitude',
|
||||
label='Latitude offset (deg.)',
|
||||
description='''Offset to the panorama latitude''',
|
||||
value=0.0,
|
||||
range=(-90.0, 90.0, 1.0),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='rotationAveraging',
|
||||
label='Rotation Averaging Method',
|
||||
description="Method for rotation averaging :\n"
|
||||
" * L1 minimization\n"
|
||||
" * L2 minimization\n",
|
||||
values=['L1_minimization', 'L2_minimization'],
|
||||
value='L2_minimization',
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='relativeRotation',
|
||||
label='Relative Rotation Method',
|
||||
description="Method for relative rotation :\n"
|
||||
" * from essential matrix\n"
|
||||
" * from homography matrix",
|
||||
values=['essential_matrix', 'homography_matrix'],
|
||||
value='homography_matrix',
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='refine',
|
||||
label='Refine',
|
||||
description='Refine camera relative poses, points and optionally internal camera parameter',
|
||||
value=True,
|
||||
uid=[0],
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='lockAllIntrinsics',
|
||||
label='Force Lock of All Intrinsic Camera Parameters.',
|
||||
description='Force to keep constant all the intrinsics parameters of the cameras (focal length, \n'
|
||||
'principal point, distortion if any) during the reconstruction.\n'
|
||||
'This may be helpful if the input cameras are already fully calibrated.',
|
||||
value=False,
|
||||
uid=[0],
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output Folder',
|
||||
description='',
|
||||
value=desc.Node.internalFolder,
|
||||
uid=[],
|
||||
),
|
||||
desc.File(
|
||||
name='outSfMDataFilename',
|
||||
label='Output SfMData File',
|
||||
description='Path to the output sfmdata file',
|
||||
value=desc.Node.internalFolder + 'sfmData.abc',
|
||||
uid=[],
|
||||
),
|
||||
]
|
60
meshroom/nodes/aliceVision/PanoramaExternalInfo.py
Normal file
60
meshroom/nodes/aliceVision/PanoramaExternalInfo.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class PanoramaExternalInfo(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaExternalInfo {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="SfM Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.File(
|
||||
name='config',
|
||||
label='Xml Config',
|
||||
description="XML Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.ListAttribute(
|
||||
elementDesc=desc.File(
|
||||
name='matchesFolder',
|
||||
label='Matches Folder',
|
||||
description="",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
name='matchesFolders',
|
||||
label='Matches Folders',
|
||||
description="Folder(s) in which computed matches are stored. (WORKAROUND for valid Tractor graph submission)",
|
||||
group='forDependencyOnly',
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='outSfMDataFilename',
|
||||
label='Output SfMData File',
|
||||
description='Path to the output sfmdata file',
|
||||
value=desc.Node.internalFolder + 'sfmData.abc',
|
||||
uid=[],
|
||||
)
|
||||
]
|
48
meshroom/nodes/aliceVision/PanoramaWarping.py
Normal file
48
meshroom/nodes/aliceVision/PanoramaWarping.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
__version__ = "1.0"
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from meshroom.core import desc
|
||||
|
||||
|
||||
class PanoramaWarping(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaWarping {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
label='Input',
|
||||
description="SfM Data File",
|
||||
value='',
|
||||
uid=[0],
|
||||
),
|
||||
desc.IntParam(
|
||||
name='panoramaWidth',
|
||||
label='Panorama Width',
|
||||
description='Panorama width (pixels). 0 For automatic size',
|
||||
value=10000,
|
||||
range=(0, 50000, 1000),
|
||||
uid=[0]
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
description='Verbosity level (fatal, error, warning, info, debug, trace).',
|
||||
value='info',
|
||||
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
|
||||
exclusive=True,
|
||||
uid=[],
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
desc.File(
|
||||
name='output',
|
||||
label='Output directory',
|
||||
description='',
|
||||
value=desc.Node.internalFolder,
|
||||
uid=[],
|
||||
),
|
||||
]
|
|
@ -91,32 +91,6 @@ class Texturing(desc.CommandLineNode):
|
|||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='correctEV',
|
||||
label='Correct Exposure',
|
||||
description='Uniformize images exposure values.',
|
||||
value=False,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='useScore',
|
||||
label='Use Score',
|
||||
description='Use triangles scores for multiband blending.',
|
||||
value=True,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='processColorspace',
|
||||
label='Process Colorspace',
|
||||
description="Colorspace for the texturing internal computation (does not impact the output file colorspace).",
|
||||
value='sRGB',
|
||||
values=('sRGB', 'LAB', 'XYZ'),
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.IntParam(
|
||||
name='multiBandDownscale',
|
||||
label='Multi Band Downscale',
|
||||
|
@ -138,6 +112,14 @@ class Texturing(desc.CommandLineNode):
|
|||
description='''Number of contributions per frequency band for multiband blending (each frequency band also contributes to lower bands)''',
|
||||
advanced=True,
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='useScore',
|
||||
label='Use Score',
|
||||
description='Use triangles scores (ie. reprojection area) for multiband blending.',
|
||||
value=True,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='bestScoreThreshold',
|
||||
label='Best Score Threshold',
|
||||
|
@ -156,6 +138,23 @@ class Texturing(desc.CommandLineNode):
|
|||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='processColorspace',
|
||||
label='Process Colorspace',
|
||||
description="Colorspace for the texturing internal computation (does not impact the output file colorspace).",
|
||||
value='sRGB',
|
||||
values=('sRGB', 'LAB', 'XYZ'),
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='correctEV',
|
||||
label='Correct Exposure',
|
||||
description='Uniformize images exposure values.',
|
||||
value=False,
|
||||
uid=[0],
|
||||
),
|
||||
desc.BoolParam(
|
||||
name='forceVisibleByAllVertices',
|
||||
label='Force Visible By All Vertices',
|
||||
|
@ -182,6 +181,15 @@ class Texturing(desc.CommandLineNode):
|
|||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.FloatParam(
|
||||
name='subdivisionTargetRatio',
|
||||
label='Subdivision Target Ratio',
|
||||
description='''Percentage of the density of the reconstruction as the target for the subdivision (0: disable subdivision, 0.5: half density of the reconstruction, 1: full density of the reconstruction).''',
|
||||
value=0.8,
|
||||
range=(0.0, 1.0, 0.001),
|
||||
uid=[0],
|
||||
advanced=True,
|
||||
),
|
||||
desc.ChoiceParam(
|
||||
name='verboseLevel',
|
||||
label='Verbose Level',
|
||||
|
|
|
@ -65,16 +65,31 @@ class MeshroomApp(QApplication):
|
|||
help='Import images or folder with images to reconstruct.')
|
||||
parser.add_argument('-I', '--importRecursive', metavar='FOLDERS', type=str, nargs='*',
|
||||
help='Import images to reconstruct from specified folder and sub-folders.')
|
||||
parser.add_argument('-p', '--pipeline', metavar='MESHROOM_FILE', type=str, required=False,
|
||||
parser.add_argument('-s', '--save', metavar='PROJECT.mg', type=str, default='',
|
||||
help='Save the created scene.')
|
||||
parser.add_argument('-p', '--pipeline', metavar='MESHROOM_FILE/photogrammetry/hdri', type=str, default=os.environ.get("MESHROOM_DEFAULT_PIPELINE", "photogrammetry"),
|
||||
help='Override the default Meshroom pipeline with this external graph.')
|
||||
parser.add_argument("--verbose", help="Verbosity level", default='warning',
|
||||
choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],)
|
||||
|
||||
args = parser.parse_args(args[1:])
|
||||
|
||||
logStringToPython = {
|
||||
'fatal': logging.FATAL,
|
||||
'error': logging.ERROR,
|
||||
'warning': logging.WARNING,
|
||||
'info': logging.INFO,
|
||||
'debug': logging.DEBUG,
|
||||
'trace': logging.DEBUG,
|
||||
}
|
||||
logging.getLogger().setLevel(logStringToPython[args.verbose])
|
||||
|
||||
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
|
||||
|
||||
super(MeshroomApp, self).__init__(QtArgs)
|
||||
|
||||
self.setOrganizationName('AliceVision')
|
||||
self.setApplicationName('Meshroom')
|
||||
self.setAttribute(Qt.AA_EnableHighDpiScaling)
|
||||
self.setApplicationVersion(meshroom.__version_name__)
|
||||
|
||||
font = self.font()
|
||||
|
@ -101,7 +116,7 @@ class MeshroomApp(QApplication):
|
|||
self.engine.rootContext().setContextProperty("_nodeTypes", sorted(nodesDesc.keys()))
|
||||
|
||||
# instantiate Reconstruction object
|
||||
r = Reconstruction(parent=self)
|
||||
r = Reconstruction(defaultPipeline=args.pipeline, parent=self)
|
||||
self.engine.rootContext().setContextProperty("_reconstruction", r)
|
||||
|
||||
# those helpers should be available from QML Utils module as singletons, but:
|
||||
|
@ -119,15 +134,6 @@ class MeshroomApp(QApplication):
|
|||
# request any potential computation to stop on exit
|
||||
self.aboutToQuit.connect(r.stopChildThreads)
|
||||
|
||||
if args.pipeline:
|
||||
# the pipeline from the command line has the priority
|
||||
r.setDefaultPipeline(args.pipeline)
|
||||
else:
|
||||
# consider the environment variable
|
||||
defaultPipeline = os.environ.get("MESHROOM_DEFAULT_PIPELINE", "")
|
||||
if defaultPipeline:
|
||||
r.setDefaultPipeline(args.pipeline)
|
||||
|
||||
if args.project and not os.path.isfile(args.project):
|
||||
raise RuntimeError(
|
||||
"Meshroom Command Line Error: 'PROJECT' argument should be a Meshroom project file (.mg).\n"
|
||||
|
@ -135,6 +141,8 @@ class MeshroomApp(QApplication):
|
|||
|
||||
if args.project:
|
||||
r.load(args.project)
|
||||
else:
|
||||
r.new()
|
||||
|
||||
# import is a python keyword, so we have to access the attribute by a string
|
||||
if getattr(args, "import", None):
|
||||
|
@ -143,6 +151,20 @@ class MeshroomApp(QApplication):
|
|||
if args.importRecursive:
|
||||
r.importImagesFromFolder(args.importRecursive, recursive=True)
|
||||
|
||||
if args.save:
|
||||
if os.path.isfile(args.save):
|
||||
raise RuntimeError(
|
||||
"Meshroom Command Line Error: Cannot save the new Meshroom project as the file (.mg) already exists.\n"
|
||||
"Invalid value: '{}'".format(args.save))
|
||||
projectFolder = os.path.dirname(args.save)
|
||||
if not os.path.isdir(projectFolder):
|
||||
if not os.path.isdir(os.path.dirname(projectFolder)):
|
||||
raise RuntimeError(
|
||||
"Meshroom Command Line Error: Cannot save the new Meshroom project file (.mg) as the parent of the folder does not exists.\n"
|
||||
"Invalid value: '{}'".format(args.save))
|
||||
os.mkdir(projectFolder)
|
||||
r.saveAs(args.save)
|
||||
|
||||
self.engine.load(os.path.normpath(url))
|
||||
|
||||
@Slot(str, result=str)
|
||||
|
|
|
@ -78,3 +78,14 @@ class FilepathHelper(QObject):
|
|||
def normpath(self, path):
|
||||
""" Returns native normalized path """
|
||||
return os.path.normpath(self.asStr(path))
|
||||
|
||||
@Slot(str, result=str)
|
||||
@Slot(QUrl, result=str)
|
||||
def globFirst(self, path):
|
||||
""" Returns the first from a list of paths matching a pathname pattern. """
|
||||
import glob
|
||||
fileList = glob.glob(self.asStr(path))
|
||||
fileList.sort()
|
||||
if fileList:
|
||||
return fileList[0]
|
||||
return ""
|
||||
|
|
|
@ -9,6 +9,7 @@ from multiprocessing.pool import ThreadPool
|
|||
|
||||
from PySide2.QtCore import Slot, QJsonValue, QObject, QUrl, Property, Signal, QPoint
|
||||
|
||||
from meshroom import multiview
|
||||
from meshroom.common.qt import QObjectListModel
|
||||
from meshroom.core.attribute import Attribute, ListAttribute
|
||||
from meshroom.core.graph import Graph, Edge, submitGraph
|
||||
|
@ -245,7 +246,7 @@ class UIGraph(QObject):
|
|||
UIGraph exposes undoable methods on its graph and computation in a separate thread.
|
||||
It also provides a monitoring of all its computation units (NodeChunks).
|
||||
"""
|
||||
def __init__(self, filepath='', parent=None):
|
||||
def __init__(self, parent=None):
|
||||
super(UIGraph, self).__init__(parent)
|
||||
self._undoStack = commands.UndoStack(self)
|
||||
self._graph = Graph('', self)
|
||||
|
@ -260,9 +261,6 @@ class UIGraph(QObject):
|
|||
self._layout = GraphLayout(self)
|
||||
self._selectedNode = None
|
||||
self._hoveredNode = None
|
||||
self._defaultPipelineFilepath = None
|
||||
if filepath:
|
||||
self.load(filepath)
|
||||
|
||||
def setGraph(self, g):
|
||||
""" Set the internal graph. """
|
||||
|
@ -319,10 +317,6 @@ class UIGraph(QObject):
|
|||
self.stopExecution()
|
||||
self._chunksMonitor.stop()
|
||||
|
||||
def setDefaultPipeline(self, pipelineFilepath):
|
||||
self._defaultPipelineFilepath = pipelineFilepath
|
||||
self._graph.load(pipelineFilepath, setupProjectFile=False)
|
||||
|
||||
def load(self, filepath, setupProjectFile=True):
|
||||
g = Graph('')
|
||||
g.load(filepath, setupProjectFile)
|
||||
|
@ -336,6 +330,9 @@ class UIGraph(QObject):
|
|||
|
||||
@Slot(QUrl)
|
||||
def saveAs(self, url):
|
||||
if isinstance(url, (str)):
|
||||
localFile = url
|
||||
else:
|
||||
localFile = url.toLocalFile()
|
||||
# ensure file is saved with ".mg" extension
|
||||
if os.path.splitext(localFile)[-1] != ".mg":
|
||||
|
|
|
@ -59,6 +59,14 @@ Item {
|
|||
}
|
||||
}
|
||||
|
||||
// Whether an attribute can be displayed as an attribute pin on the node
|
||||
function isDisplayableAsPin(attribute) {
|
||||
// ATM, only File attributes are meant to be connected
|
||||
// TODO: review this if we want to connect something else
|
||||
return attribute.type == "File"
|
||||
|| (attribute.type == "ListAttribute" && attribute.desc.elementDesc.type == "File")
|
||||
}
|
||||
|
||||
|
||||
// Main Layout
|
||||
MouseArea {
|
||||
|
@ -223,7 +231,7 @@ Item {
|
|||
|
||||
delegate: Loader {
|
||||
id: outputLoader
|
||||
active: object.isOutput
|
||||
active: object.isOutput && isDisplayableAsPin(object)
|
||||
anchors.right: parent.right
|
||||
width: outputs.width
|
||||
|
||||
|
@ -248,11 +256,9 @@ Item {
|
|||
Repeater {
|
||||
model: node.attributes
|
||||
delegate: Loader {
|
||||
active: !object.isOutput && object.type == "File"
|
||||
|| (object.type == "ListAttribute" && object.desc.elementDesc.type == "File") // TODO: review this
|
||||
active: !object.isOutput && isDisplayableAsPin(object)
|
||||
width: inputs.width
|
||||
|
||||
|
||||
sourceComponent: AttributePin {
|
||||
id: inPin
|
||||
nodeItem: root
|
||||
|
@ -286,5 +292,5 @@ Item {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import DepthMapEntity 2.0
|
||||
import DepthMapEntity 2.1
|
||||
|
||||
/**
|
||||
* Support for Depth Map files (EXR) in Qt3d.
|
||||
|
|
51
meshroom/ui/qml/Viewer3D/EnvironmentMapEntity.qml
Normal file
51
meshroom/ui/qml/Viewer3D/EnvironmentMapEntity.qml
Normal file
|
@ -0,0 +1,51 @@
|
|||
import QtQuick 2.9
|
||||
import Qt3D.Core 2.1
|
||||
import Qt3D.Render 2.1
|
||||
import Qt3D.Extras 2.10
|
||||
|
||||
|
||||
/**
|
||||
* EnvironmentMap maps an equirectangular image on a Sphere.
|
||||
* The 'position' property can be used to virually attach it to a camera
|
||||
* and get the impression of an environment at an infinite distance.
|
||||
*/
|
||||
Entity {
|
||||
id: root
|
||||
|
||||
/// Source of the equirectangular image
|
||||
property url source
|
||||
/// Radius of the sphere
|
||||
property alias radius: sphereMesh.radius
|
||||
/// Number of slices of the sphere
|
||||
property alias slices: sphereMesh.slices
|
||||
/// Number of rings of the sphere
|
||||
property alias rings: sphereMesh.rings
|
||||
/// Position of the sphere
|
||||
property alias position: transform.translation
|
||||
/// Texture loading status
|
||||
property alias status: textureLoader.status
|
||||
|
||||
components: [
|
||||
SphereMesh {
|
||||
id: sphereMesh
|
||||
radius: 1000
|
||||
slices: 50
|
||||
rings: 50
|
||||
},
|
||||
Transform {
|
||||
id: transform
|
||||
translation: root.position
|
||||
},
|
||||
DiffuseMapMaterial {
|
||||
ambient: "#FFF"
|
||||
shininess: 0
|
||||
specular: "#000"
|
||||
diffuse: TextureLoader {
|
||||
id: textureLoader
|
||||
magnificationFilter: Texture.Linear
|
||||
mirrored: true
|
||||
source: root.source
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -16,6 +16,9 @@ Entity {
|
|||
property bool pickingEnabled: false
|
||||
readonly property alias count: instantiator.count // number of instantiated media delegates
|
||||
|
||||
/// Camera to consider for positionning
|
||||
property Camera camera: null
|
||||
|
||||
/// True while at least one media is being loaded
|
||||
readonly property bool loading: {
|
||||
for(var i=0; i<m.mediaModel.count; ++i) {
|
||||
|
@ -173,6 +176,7 @@ Entity {
|
|||
// source based on currentSource + "requested" property
|
||||
readonly property string finalSource: model.requested ? currentSource : ""
|
||||
|
||||
camera: root.camera
|
||||
renderMode: root.renderMode
|
||||
enabled: visible
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import QtQuick 2.9
|
||||
import Qt3D.Core 2.1
|
||||
import Qt3D.Render 2.1
|
||||
import Qt3D.Extras 2.1
|
||||
import Qt3D.Extras 2.10
|
||||
import QtQuick.Scene3D 2.0
|
||||
import "Materials"
|
||||
import Utils 1.0
|
||||
|
@ -20,6 +20,9 @@ import Utils 1.0
|
|||
property var object: null
|
||||
property int renderMode
|
||||
|
||||
/// Scene's current camera
|
||||
property Camera camera: null
|
||||
|
||||
property bool cached: false
|
||||
|
||||
onSourceChanged: {
|
||||
|
@ -44,7 +47,7 @@ import Utils 1.0
|
|||
|
||||
switch(Filepath.extension(source)) {
|
||||
case ".abc": if(Viewer3DSettings.supportAlembic) component = abcLoaderEntityComponent; break;
|
||||
case ".exr": if(Viewer3DSettings.supportDepthMap) component = depthMapLoaderComponent; break;
|
||||
case ".exr": if(Viewer3DSettings.supportDepthMap) component = exrLoaderComponent; break;
|
||||
case ".obj":
|
||||
default: component = sceneLoaderEntityComponent; break;
|
||||
}
|
||||
|
@ -103,15 +106,35 @@ import Utils 1.0
|
|||
}
|
||||
|
||||
Component {
|
||||
id: depthMapLoaderComponent
|
||||
id: exrLoaderComponent
|
||||
MediaLoaderEntity {
|
||||
id: depthMapLoaderEntity
|
||||
id: exrLoaderEntity
|
||||
Component.onCompleted: {
|
||||
var obj = Viewer3DSettings.depthMapLoaderComp.createObject(depthMapLoaderEntity, {
|
||||
// EXR loading strategy:
|
||||
// - [1] as a depth map
|
||||
var obj = Viewer3DSettings.depthMapLoaderComp.createObject(
|
||||
exrLoaderEntity, {
|
||||
'source': source
|
||||
});
|
||||
|
||||
if(obj.status === SceneLoader.Ready)
|
||||
{
|
||||
faceCount = Scene3DHelper.faceCount(obj);
|
||||
root.status = SceneLoader.Ready;
|
||||
return;
|
||||
}
|
||||
|
||||
// - [2] as an environment map
|
||||
obj.destroy();
|
||||
root.status = SceneLoader.Loading;
|
||||
obj = Qt.createComponent("EnvironmentMapEntity.qml").createObject(
|
||||
exrLoaderEntity, {
|
||||
'source': source,
|
||||
'position': Qt.binding(function() { return root.camera.position })
|
||||
});
|
||||
obj.statusChanged.connect(function() {
|
||||
root.status = obj.status;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -221,6 +221,7 @@ FocusScope {
|
|||
// Picking to set focus point (camera view center)
|
||||
// Only activate it when a double click may happen or when the 'Control' key is pressed
|
||||
pickingEnabled: cameraController.pickingActive || doubleClickTimer.running
|
||||
camera: cameraSelector.camera
|
||||
|
||||
components: [
|
||||
Transform {
|
||||
|
|
|
@ -23,7 +23,7 @@ Item {
|
|||
readonly property variant cameraInits: _reconstruction.cameraInits
|
||||
property bool readOnly: false
|
||||
readonly property Viewer3D viewer3D: viewer3D
|
||||
|
||||
readonly property Viewer2D viewer2D: viewer2D
|
||||
|
||||
implicitWidth: 300
|
||||
implicitHeight: 400
|
||||
|
|
|
@ -518,6 +518,13 @@ ApplicationWindow {
|
|||
reconstruction: _reconstruction
|
||||
readOnly: _reconstruction.computing
|
||||
|
||||
function viewAttribute(attribute, mouse) {
|
||||
let viewable = false;
|
||||
viewable = workspaceView.viewIn2D(attribute);
|
||||
viewable |= workspaceView.viewIn3D(attribute, mouse);
|
||||
return viewable;
|
||||
}
|
||||
|
||||
function viewIn3D(attribute, mouse) {
|
||||
var loaded = viewer3D.view(attribute);
|
||||
// solo media if Control modifier was held
|
||||
|
@ -525,6 +532,29 @@ ApplicationWindow {
|
|||
viewer3D.solo(attribute);
|
||||
return loaded;
|
||||
}
|
||||
|
||||
function viewIn2D(attribute) {
|
||||
var imageExts = ['.exr', '.jpg', '.tif', '.png'];
|
||||
var ext = Filepath.extension(attribute.value);
|
||||
if(imageExts.indexOf(ext) == -1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if(attribute.value.includes('*'))
|
||||
{
|
||||
// For now, the viewer only supports a single image.
|
||||
var firstFile = Filepath.globFirst(attribute.value)
|
||||
viewer2D.source = Filepath.stringToUrl(firstFile);
|
||||
}
|
||||
else
|
||||
{
|
||||
viewer2D.source = Filepath.stringToUrl(attribute.value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -576,28 +606,16 @@ ApplicationWindow {
|
|||
nodeTypesModel: _nodeTypes
|
||||
|
||||
onNodeDoubleClicked: {
|
||||
if(node.nodeType === "StructureFromMotion")
|
||||
{
|
||||
_reconstruction.sfm = node;
|
||||
}
|
||||
else if(node.nodeType === "FeatureExtraction")
|
||||
{
|
||||
_reconstruction.featureExtraction = node;
|
||||
}
|
||||
else if(node.nodeType === "CameraInit")
|
||||
{
|
||||
_reconstruction.cameraInit = node;
|
||||
}
|
||||
_reconstruction.setActiveNodeOfType(node);
|
||||
|
||||
let viewable = false;
|
||||
for(var i=0; i < node.attributes.count; ++i)
|
||||
{
|
||||
var attr = node.attributes.at(i)
|
||||
if(attr.isOutput
|
||||
&& workspaceView.viewIn3D(attr, mouse))
|
||||
{
|
||||
if(attr.isOutput && workspaceView.viewAttribute(attr))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
onComputeRequest: computeManager.compute(node)
|
||||
onSubmitRequest: computeManager.submit(node)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ from PySide2.QtGui import QMatrix4x4, QMatrix3x3, QQuaternion, QVector3D, QVecto
|
|||
from meshroom import multiview
|
||||
from meshroom.common.qt import QObjectListModel
|
||||
from meshroom.core import Version
|
||||
from meshroom.core.node import Node, Status
|
||||
from meshroom.core.node import Node, Status, Position
|
||||
from meshroom.ui.graph import UIGraph
|
||||
from meshroom.ui.utils import makeProperty
|
||||
|
||||
|
@ -102,7 +102,7 @@ class LiveSfmManager(QObject):
|
|||
to include those images to the reconstruction.
|
||||
"""
|
||||
# Get all new images in the watched folder
|
||||
imagesInFolder = multiview.findImageFiles(self._folder)
|
||||
imagesInFolder = multiview.findFilesByTypeInFolder(self._folder)
|
||||
newImages = set(imagesInFolder).difference(self.allImages)
|
||||
for imagePath in newImages:
|
||||
# print('[LiveSfmManager] New image file : {}'.format(imagePath))
|
||||
|
@ -207,7 +207,7 @@ class ViewpointWrapper(QObject):
|
|||
self._metadata = {}
|
||||
else:
|
||||
self._initialIntrinsics = self._reconstruction.getIntrinsic(self._viewpoint)
|
||||
self._metadata = json.loads(self._viewpoint.metadata.value)
|
||||
self._metadata = json.loads(self._viewpoint.metadata.value) if self._viewpoint.metadata.value else {}
|
||||
self.initialParamsChanged.emit()
|
||||
|
||||
def _updateSfMParams(self):
|
||||
|
@ -358,8 +358,8 @@ class Reconstruction(UIGraph):
|
|||
Specialization of a UIGraph designed to manage a 3D reconstruction.
|
||||
"""
|
||||
|
||||
def __init__(self, graphFilepath='', parent=None):
|
||||
super(Reconstruction, self).__init__(graphFilepath, parent)
|
||||
def __init__(self, defaultPipeline='', parent=None):
|
||||
super(Reconstruction, self).__init__(parent)
|
||||
|
||||
# initialize member variables for key steps of the 3D reconstruction pipeline
|
||||
|
||||
|
@ -393,20 +393,23 @@ class Reconstruction(UIGraph):
|
|||
# react to internal graph changes to update those variables
|
||||
self.graphChanged.connect(self.onGraphChanged)
|
||||
|
||||
if graphFilepath:
|
||||
self.onGraphChanged()
|
||||
else:
|
||||
self.new()
|
||||
self.setDefaultPipeline(defaultPipeline)
|
||||
|
||||
def setDefaultPipeline(self, defaultPipeline):
|
||||
self._defaultPipeline = defaultPipeline
|
||||
|
||||
@Slot()
|
||||
def new(self):
|
||||
""" Create a new photogrammetry pipeline. """
|
||||
if self._defaultPipelineFilepath:
|
||||
# use the user-provided default photogrammetry project file
|
||||
self.load(self._defaultPipelineFilepath, setupProjectFile=False)
|
||||
else:
|
||||
if self._defaultPipeline.lower() == "photogrammetry":
|
||||
# default photogrammetry pipeline
|
||||
self.setGraph(multiview.photogrammetry())
|
||||
elif self._defaultPipeline.lower() == "hdri":
|
||||
# default hdri pipeline
|
||||
self.setGraph(multiview.hdri())
|
||||
else:
|
||||
# use the user-provided default photogrammetry project file
|
||||
self.load(self._defaultPipeline, setupProjectFile=False)
|
||||
|
||||
def load(self, filepath, setupProjectFile=True):
|
||||
try:
|
||||
|
@ -557,21 +560,67 @@ class Reconstruction(UIGraph):
|
|||
Fetching urls from dropEvent is generally expensive in QML/JS (bug ?).
|
||||
This method allows to reduce process time by doing it on Python side.
|
||||
"""
|
||||
images, urls = self.getImageFilesFromDrop(drop)
|
||||
if not images:
|
||||
extensions = set([os.path.splitext(url)[1] for url in urls])
|
||||
filesByType = self.getFilesByTypeFromDrop(drop)
|
||||
if filesByType.images:
|
||||
self.importImagesAsync(filesByType.images, cameraInit)
|
||||
if filesByType.videos:
|
||||
boundingBox = self.layout.boundingBox()
|
||||
keyframeNode = self.addNewNode("KeyframeSelection", position=Position(boundingBox[0], boundingBox[1] + boundingBox[3]))
|
||||
keyframeNode.mediaPaths.value = filesByType.videos
|
||||
if len(filesByType.videos) == 1:
|
||||
newVideoNodeMessage = "New node '{}' added for the input video.".format(keyframeNode.getLabel())
|
||||
else:
|
||||
newVideoNodeMessage = "New node '{}' added for a rig of {} synchronized cameras.".format(keyframeNode.getLabel(), len(filesByType.videos))
|
||||
self.info.emit(
|
||||
Message(
|
||||
"Video Input",
|
||||
newVideoNodeMessage,
|
||||
"Warning: You need to manually compute the KeyframeSelection node \n"
|
||||
"and then reimport the created images into Meshroom for the reconstruction.\n\n"
|
||||
"If you know the Camera Make/Model, it is highly recommended to declare them in the Node."
|
||||
))
|
||||
|
||||
if filesByType.panoramaInfo:
|
||||
if len(filesByType.panoramaInfo) > 1:
|
||||
self.error.emit(
|
||||
Message(
|
||||
"No Recognized Image",
|
||||
"No recognized image file in the {} dropped files".format(len(urls)),
|
||||
"File extensions: " + ', '.join(extensions)
|
||||
"Multiple XML files in input",
|
||||
"Ignore the xml Panorama files:\n\n'{}'.".format(',\n'.join(filesByType.panoramaInfo)),
|
||||
"",
|
||||
))
|
||||
else:
|
||||
panoramaExternalInfoNodes = self.graph.nodesByType('PanoramaExternalInfo')
|
||||
for panoramaInfoFile in filesByType.panoramaInfo:
|
||||
for panoramaInfoNode in panoramaExternalInfoNodes:
|
||||
panoramaInfoNode.attribute('config').value = panoramaInfoFile
|
||||
if panoramaExternalInfoNodes:
|
||||
self.info.emit(
|
||||
Message(
|
||||
"Panorama XML",
|
||||
"XML file declared on PanoramaExternalInfo node",
|
||||
"XML file '{}' set on node '{}'".format(','.join(filesByType.panoramaInfo), ','.join([n.getLabel() for n in panoramaExternalInfoNodes])),
|
||||
))
|
||||
else:
|
||||
self.error.emit(
|
||||
Message(
|
||||
"No PanoramaExternalInfo Node",
|
||||
"No PanoramaExternalInfo Node to set the Panorama file:\n'{}'.".format(','.join(filesByType.panoramaInfo)),
|
||||
"",
|
||||
))
|
||||
|
||||
if not filesByType.images and not filesByType.videos and not filesByType.panoramaInfo:
|
||||
if filesByType.other:
|
||||
extensions = set([os.path.splitext(url)[1] for url in filesByType.other])
|
||||
self.error.emit(
|
||||
Message(
|
||||
"No Recognized Input File",
|
||||
"No recognized input file in the {} dropped files".format(len(filesByType.other)),
|
||||
"Unknown file extensions: " + ', '.join(extensions)
|
||||
)
|
||||
)
|
||||
return
|
||||
self.importImagesAsync(images, cameraInit)
|
||||
|
||||
@staticmethod
|
||||
def getImageFilesFromDrop(drop):
|
||||
def getFilesByTypeFromDrop(drop):
|
||||
"""
|
||||
|
||||
Args:
|
||||
|
@ -582,17 +631,14 @@ class Reconstruction(UIGraph):
|
|||
"""
|
||||
urls = drop.property("urls")
|
||||
# Build the list of images paths
|
||||
images = []
|
||||
otherFiles = []
|
||||
filesByType = multiview.FilesByType()
|
||||
for url in urls:
|
||||
localFile = url.toLocalFile()
|
||||
if os.path.isdir(localFile): # get folder content
|
||||
images.extend(multiview.findImageFiles(localFile))
|
||||
elif multiview.isImageFile(localFile):
|
||||
images.append(localFile)
|
||||
filesByType.extend(multiview.findFilesByTypeInFolder(localFile))
|
||||
else:
|
||||
otherFiles.append(localFile)
|
||||
return images, otherFiles
|
||||
filesByType.addFile(localFile)
|
||||
return filesByType
|
||||
|
||||
def importImagesFromFolder(self, path, recursive=False):
|
||||
"""
|
||||
|
@ -602,19 +648,9 @@ class Reconstruction(UIGraph):
|
|||
recursive: List files in folders recursively.
|
||||
|
||||
"""
|
||||
images = []
|
||||
paths = []
|
||||
if isinstance(path, (list, tuple)):
|
||||
paths = path
|
||||
else:
|
||||
paths.append(path)
|
||||
for p in paths:
|
||||
if os.path.isdir(p): # get folder content
|
||||
images.extend(multiview.findImageFiles(p, recursive))
|
||||
elif multiview.isImageFile(p):
|
||||
images.append(p)
|
||||
if images:
|
||||
self.buildIntrinsics(self.cameraInit, images)
|
||||
filesByType = multiview.findFilesByTypeInFolder(path, recursive)
|
||||
if filesByType.images:
|
||||
self.buildIntrinsics(self.cameraInit, filesByType.images)
|
||||
|
||||
def importImagesAsync(self, images, cameraInit):
|
||||
""" Add the given list of images to the Reconstruction. """
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue