Add chunk notion for parallelization and implement specific updateInternals in CameraInit node

* Add chunk notion for parallelization
* Allows Node desc to implement custom updateInternals
* CameraInit node implement a specific updateInternals to update the
input image list
* FeatureExtraction, FeatureMatching, DepthMap, DepthMapFilter:
implement parallelization
This commit is contained in:
Fabien Castan 2017-11-07 15:47:14 +01:00
parent 39f6ef3d64
commit 1e4f8f8a61
14 changed files with 614 additions and 271 deletions

View file

@ -3,6 +3,8 @@ import argparse
import meshroom.core.graph import meshroom.core.graph
from meshroom.core.graph import Status from meshroom.core.graph import Status
from meshroom.core.desc import Range
parser = argparse.ArgumentParser(description='Execute a Graph of processes.') parser = argparse.ArgumentParser(description='Execute a Graph of processes.')
parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str, parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str,
@ -11,7 +13,9 @@ parser.add_argument('--node', metavar='NODE_NAME', type=str,
help='Process the node. It will generate an error if the dependencies are not already computed.') help='Process the node. It will generate an error if the dependencies are not already computed.')
parser.add_argument('--toNode', metavar='NODE_NAME', type=str, parser.add_argument('--toNode', metavar='NODE_NAME', type=str,
help='Process the node with its dependencies.') help='Process the node with its dependencies.')
parser.add_argument('--force', help='Force recompute', parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.',
action='store_true')
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
action='store_true') action='store_true')
parser.add_argument('--extern', help='Use this option when you compute externally after submission to a render farm from meshroom.', parser.add_argument('--extern', help='Use this option when you compute externally after submission to a render farm from meshroom.',
action='store_true') action='store_true')
@ -19,6 +23,9 @@ parser.add_argument('--cache', metavar='FOLDER', type=str,
default=None, default=None,
help='Override the cache folder') help='Override the cache folder')
parser.add_argument('-i', '--iteration', type=int,
default=-1, help='')
args = parser.parse_args() args = parser.parse_args()
graph = meshroom.core.graph.loadGraph(args.graphFile) graph = meshroom.core.graph.loadGraph(args.graphFile)
@ -32,14 +39,22 @@ if args.node:
submittedStatuses = [Status.SUBMITTED_LOCAL, Status.RUNNING] submittedStatuses = [Status.SUBMITTED_LOCAL, Status.RUNNING]
if not args.extern: if not args.extern:
submittedStatuses.append(Status.SUBMITTED_EXTERN) submittedStatuses.append(Status.SUBMITTED_EXTERN)
if node.status.status in submittedStatuses: if not args.forceStatus and not args.forceCompute:
print('Error: Node is already submitted with status "{}"'.format(node.status.status.name)) for range in node.ranges:
exit(-1) if node.status[range.iteration].status in submittedStatuses:
if args.force or node.status.status != pg.Status.SUCCESS: print('Error: Node is already submitted with status "{}". See file: "{}"'.format(node.status[range.iteration].status.name, node.statusFile(range)))
node.process() exit(-1)
if not node.hasStatus(Status.SUCCESS) or args.forceCompute:
if args.iteration != -1:
node.processIteration(args.iteration)
else:
node.process()
else: else:
if args.iteration != -1:
print('Error: "--iteration" only make sense when used with "--node".')
exit(-1)
toNodes = None toNodes = None
if args.toNode: if args.toNode:
toNodes = graph.findNodes(args.toNode) toNodes = graph.findNodes(args.toNode)
pg.execute(graph, toNodes=toNodes, force=args.force) meshroom.core.graph.execute(graph, toNodes=toNodes, forceCompute=args.forceCompute, forceStatus=args.forceStatus)

View file

@ -3,8 +3,8 @@ import argparse
import os import os
import meshroom.core import meshroom.core
import meshroom.core.graph
from meshroom import multiview from meshroom import multiview
from meshroom.core import graph as pg
parser = argparse.ArgumentParser(description='Launch the full photogrammetry pipeline.') parser = argparse.ArgumentParser(description='Launch the full photogrammetry pipeline.')
@ -25,6 +25,10 @@ parser.add_argument('--cache', metavar='FOLDER', type=str,
help='Choose a custom cache folder') help='Choose a custom cache folder')
parser.add_argument('--save', metavar='FOLDER', type=str, required=False, parser.add_argument('--save', metavar='FOLDER', type=str, required=False,
help='Save the workflow to a meshroom files.') help='Save the workflow to a meshroom files.')
parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.',
action='store_true')
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
action='store_true')
parser.add_argument('--scale', type=int, default=2, parser.add_argument('--scale', type=int, default=2,
help='Downscale factor for MVS steps. Possible values are: 1, 2, 4, 8, 16.') help='Downscale factor for MVS steps. Possible values are: 1, 2, 4, 8, 16.')
@ -54,9 +58,8 @@ if not graph.cacheDir:
graph.cacheDir = meshroom.core.defaultCacheFolder graph.cacheDir = meshroom.core.defaultCacheFolder
if args.output: if args.output:
graph.update()
toNodes = None toNodes = None
if args.toNode: if args.toNode:
toNodes = graph.findNodes(args.toNode) toNodes = graph.findNodes(args.toNode)
pg.execute(graph, toNodes=toNodes) meshroom.core.graph.execute(graph, toNodes=toNodes, forceCompute=args.forceCompute, forceStatus=args.forceStatus)

View file

@ -64,18 +64,20 @@ else:
nodes, edges = graph.dfsOnFinish(startNodes=startNodes) nodes, edges = graph.dfsOnFinish(startNodes=startNodes)
for node in nodes: for node in nodes:
print('{}: {}'.format(node.name, node.statistics.toDict())) for chunk in node.chunks:
print('{}: {}\n'.format(chunk.name, chunk.statistics.toDict()))
if args.exportHtml: if args.exportHtml:
with open(args.exportHtml, 'w') as fileObj: with open(args.exportHtml, 'w') as fileObj:
for node in nodes: for node in nodes:
for curves in (node.statistics.computer.curves, node.statistics.process.curves): for chunk in node.chunks:
exportCurves = defaultdict(list) for curves in (chunk.statistics.computer.curves, chunk.statistics.process.curves):
for name, curve in curves.items(): exportCurves = defaultdict(list)
s = name.split('.') for name, curve in curves.items():
figName = s[0] s = name.split('.')
curveName = ''.join(s[1:]) figName = s[0]
exportCurves[figName].append((curveName, curve)) curveName = ''.join(s[1:])
exportCurves[figName].append((curveName, curve))
for name, curves in exportCurves.items(): for name, curves in exportCurves.items():
addPlots(curves, name, fileObj) addPlots(curves, name, fileObj)

View file

@ -3,14 +3,14 @@ import argparse
import os import os
from pprint import pprint from pprint import pprint
from meshroom.core import graph as pg import meshroom.core.graph
parser = argparse.ArgumentParser(description='Query the status of nodes in a Graph of processes.') parser = argparse.ArgumentParser(description='Query the status of nodes in a Graph of processes.')
parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str, parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str,
help='Filepath to a graph file.') help='Filepath to a graph file.')
parser.add_argument('--node', metavar='NODE_NAME', type=str, parser.add_argument('--node', metavar='NODE_NAME', type=str,
help='Process the node alone.') help='Process the node alone.')
parser.add_argument('--graph', metavar='NODE_NAME', type=str, parser.add_argument('--toNode', metavar='NODE_NAME', type=str,
help='Process the node and all previous nodes needed.') help='Process the node and all previous nodes needed.')
parser.add_argument("--verbose", help="Print full status information", parser.add_argument("--verbose", help="Print full status information",
action="store_true") action="store_true")
@ -21,7 +21,7 @@ if not os.path.exists(args.graphFile):
print('ERROR: No graph file "{}".'.format(args.node, args.graphFile)) print('ERROR: No graph file "{}".'.format(args.node, args.graphFile))
exit(-1) exit(-1)
graph = pg.loadGraph(args.graphFile) graph = meshroom.core.graph.loadGraph(args.graphFile)
graph.update() graph.update()
@ -30,17 +30,19 @@ if args.node:
if node is None: if node is None:
print('ERROR: node "{}" does not exist in file "{}".'.format(args.node, args.graphFile)) print('ERROR: node "{}" does not exist in file "{}".'.format(args.node, args.graphFile))
exit(-1) exit(-1)
print('{}: {}'.format(node.name, node.status.status.name)) for chunk in node.chunks:
print('{}: {}'.format(chunk.name, chunk.status.status.name))
if args.verbose: if args.verbose:
print('statusFile: ', node.statusFile()) print('statusFile: ', node.statusFile())
pprint(node.status.toDict()) pprint(node.status.toDict())
else: else:
startNodes = None startNodes = None
if args.graph: if args.toNode:
startNodes = [graph.nodes(args.graph)] startNodes = [graph.nodes(args.toNode)]
nodes, edges = graph.dfsOnFinish(startNodes=startNodes) nodes, edges = graph.dfsOnFinish(startNodes=startNodes)
for node in nodes: for node in nodes:
print('{}: {}'.format(node.name, node.status.status.name)) for chunk in node.chunks:
print('{}: {}'.format(chunk.name, chunk.status.status.name))
if args.verbose: if args.verbose:
pprint([n.status.toDict() for n in nodes]) pprint([n.status.toDict() for n in nodes])

View file

@ -12,14 +12,16 @@ ENGINE = ''
DEFAULT_TAGS = {'prod': ''} DEFAULT_TAGS = {'prod': ''}
def createTask(meshroomFile, node, nbFrames=1, parallel=False, rangeSize=0): def createTask(meshroomFile, node):
tags = DEFAULT_TAGS.copy() # copy to not modify default tags tags = DEFAULT_TAGS.copy() # copy to not modify default tags
nbFrames = 1
arguments = {} arguments = {}
parallelArgs = '' parallelArgs = ''
if parallel: print('node: ', node.name)
parallelArgs = ' --rangeStart @start --rangeSize {rangeSize}'.format(rangeSize=rangeSize) if node.isParallelized:
arguments.update({'start': 0, 'end': nbFrames-1, 'step': rangeSize}) blockSize, fullSize, nbBlocks = node.nodeDesc.parallelization.getSizes(node)
parallelArgs = ' --iteration @start'
arguments.update({'start': 0, 'end': nbBlocks-1, 'step': 1})
tags['nbFrames'] = nbFrames tags['nbFrames'] = nbFrames
allRequirements = list(BASE_REQUIREMENTS) allRequirements = list(BASE_REQUIREMENTS)
@ -68,9 +70,14 @@ if args.toNode:
nodesToProcess, edgesToProcess = graph.dfsToProcess(startNodes=toNodes) nodesToProcess, edgesToProcess = graph.dfsToProcess(startNodes=toNodes)
print("edgesToProcess:", edgesToProcess)
flowEdges = graph.flowEdges(startNodes=toNodes) flowEdges = graph.flowEdges(startNodes=toNodes)
edgesToProcess = set(edgesToProcess).intersection(flowEdges) edgesToProcess = set(edgesToProcess).intersection(flowEdges)
print("nodesToProcess:", nodesToProcess)
print("edgesToProcess:", edgesToProcess)
if not nodesToProcess: if not nodesToProcess:
print('Nothing to compute') print('Nothing to compute')
exit(-1) exit(-1)

View file

@ -1,10 +1,10 @@
from meshroom.common import BaseObject, Property, Variant from meshroom.common import BaseObject, Property, Variant
from enum import Enum # available by default in python3. For python2: "pip install enum34" from enum import Enum # available by default in python3. For python2: "pip install enum34"
import collections import collections
import math
import os import os
import psutil import psutil
class Attribute(BaseObject): class Attribute(BaseObject):
""" """
""" """
@ -181,6 +181,76 @@ class Level(Enum):
INTENSIVE = 2 INTENSIVE = 2
class Range:
def __init__(self, iteration=0, blockSize=0, fullSize=0):
self.iteration = iteration
self.blockSize = blockSize
self.fullSize = fullSize
@property
def start(self):
return self.iteration * self.blockSize
@property
def effectiveBlockSize(self):
remaining = (self.fullSize - self.start) + 1
return self.blockSize if remaining >= self.blockSize else remaining
@property
def end(self):
return self.start + self.effectiveBlockSize
@property
def last(self):
return self.end - 1
def toDict(self):
return {
"rangeIteration": self.iteration,
"rangeStart": self.start,
"rangeEnd": self.end,
"rangeLast": self.last,
"rangeBlockSize": self.effectiveBlockSize,
"rangeFullSize": self.fullSize,
}
class Parallelization:
def __init__(self, inputListParamName='', staticNbBlocks=0, blockSize=0):
self.inputListParamName = inputListParamName
self.staticNbBlocks = staticNbBlocks
self.blockSize = blockSize
def getSizes(self, node):
"""
Args:
node:
Returns: (blockSize, fullSize, nbBlocks)
"""
if self.inputListParamName:
parentNodes, edges = node.graph.dfsOnFinish(startNodes=[node])
for parentNode in parentNodes:
if self.inputListParamName in parentNode.getAttributes().keys():
fullSize = len(parentNode.attribute(self.inputListParamName))
nbBlocks = int(math.ceil(float(fullSize) / float(self.blockSize)))
return (self.blockSize, fullSize, nbBlocks)
raise RuntimeError('Cannot find the "inputListParamName": "{}" in the list of input nodes: {} for node: {}'.format(self.inputListParamName, parentNodes, node.name))
if self.staticNbBlocks:
return (1, self.staticNbBlocks, self.staticNbBlocks)
return None
def getRange(self, node, iteration):
blockSize, fullSize, nbBlocks = self.getSizes(node)
return Range(iteration=iteration, blockSize=blockSize, fullSize=fullSize)
def getRanges(self, node):
blockSize, fullSize, nbBlocks = self.getSizes(node)
ranges = []
for i in range(nbBlocks):
ranges.append(Range(iteration=i, blockSize=blockSize, fullSize=fullSize))
return ranges
class Node(object): class Node(object):
""" """
""" """
@ -192,56 +262,67 @@ class Node(object):
packageVersion = '' packageVersion = ''
inputs = [] inputs = []
outputs = [] outputs = []
parallelization = None
def __init__(self): def __init__(self):
pass pass
def updateInternals(self, node):
pass
def stop(self, node): def stop(self, node):
pass pass
def process(self, node): def processChunk(self, node, range):
raise NotImplementedError('No process implementation on this node') raise NotImplementedError('No process implementation on node: "{}"'.format(node.name))
class CommandLineNode(Node): class CommandLineNode(Node):
""" """
""" """
internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = '' # need to be defined on the node
parallelization = None
commandLineRange = ''
def buildCommandLine(self, node): def buildCommandLine(self, chunk):
cmdPrefix = '' cmdPrefix = ''
if 'REZ_ENV' in os.environ: if 'REZ_ENV' in os.environ:
cmdPrefix = '{rez} {packageFullName} -- '.format(rez=os.environ.get('REZ_ENV'), packageFullName=node.packageFullName) cmdPrefix = '{rez} {packageFullName} -- '.format(rez=os.environ.get('REZ_ENV'), packageFullName=chunk.node.packageFullName)
return cmdPrefix + node.nodeDesc.commandLine.format(**node._cmdVars) cmdSuffix = ''
if chunk.range:
cmdSuffix = ' ' + self.commandLineRange.format(**chunk.range.toDict())
return cmdPrefix + chunk.node.nodeDesc.commandLine.format(**chunk.node._cmdVars) + cmdSuffix
def stop(self, node): def stop(self, node):
if node.subprocess: if node.subprocess:
node.subprocess.terminate() node.subprocess.terminate()
def process(self, node): def processChunk(self, chunk):
try: try:
with open(node.logFile(), 'w') as logF: with open(chunk.logFile(), 'w') as logF:
cmd = self.buildCommandLine(node) cmd = self.buildCommandLine(chunk)
print(' - commandLine:', cmd) print(' - commandLine:', cmd)
print(' - logFile:', node.logFile()) print(' - logFile:', chunk.logFile())
node.subprocess = psutil.Popen(cmd, stdout=logF, stderr=logF, shell=True) chunk.subprocess = psutil.Popen(cmd, stdout=logF, stderr=logF, shell=True)
# store process static info into the status file # store process static info into the status file
node.status.commandLine = cmd chunk.status.commandLine = cmd
# node.status.env = node.proc.environ() # chunk.status.env = node.proc.environ()
# node.status.createTime = node.proc.create_time() # chunk.status.createTime = node.proc.create_time()
node.statThread.proc = node.subprocess chunk.statThread.proc = chunk.subprocess
stdout, stderr = node.subprocess.communicate() stdout, stderr = chunk.subprocess.communicate()
node.subprocess.wait() chunk.subprocess.wait()
node.status.returnCode = node.subprocess.returncode chunk.status.returnCode = chunk.subprocess.returncode
if node.subprocess.returncode != 0: if chunk.subprocess.returncode != 0:
with open(node.logFile(), 'r') as logF: with open(chunk.logFile(), 'r') as logF:
logContent = ''.join(logF.readlines()) logContent = ''.join(logF.readlines())
raise RuntimeError('Error on node "{}":\nLog:\n{}'.format(node.name, logContent)) raise RuntimeError('Error on node "{}":\nLog:\n{}'.format(chunk.name, logContent))
except: except:
raise raise
finally: finally:
node.subprocess = None chunk.subprocess = None

View file

@ -386,8 +386,9 @@ class Status(Enum):
SUBMITTED_LOCAL = 3 SUBMITTED_LOCAL = 3
RUNNING = 4 RUNNING = 4
ERROR = 5 ERROR = 5
KILLED = 6 STOPPED = 6
SUCCESS = 7 KILLED = 7
SUCCESS = 8
class StatusData: class StatusData:
@ -429,6 +430,143 @@ def clearProcessesStatus():
v.upgradeStatusTo(Status.KILLED) v.upgradeStatusTo(Status.KILLED)
class NodeChunk(BaseObject):
def __init__(self, node, range):
super(NodeChunk, self).__init__(node)
self.node = node
self.range = range
self.status = StatusData(node.name, node.nodeType)
self.statistics = stats.Statistics()
self._subprocess = None
@property
def index(self):
return self.range.iteration
@property
def name(self):
if self.range.blockSize:
return "{}({})".format(self.node.name, self.index)
else:
return self.node.name
@property
def statusName(self):
return self.status.name
def updateStatusFromCache(self):
"""
Update node status based on status file content/existence.
"""
statusFile = self.statusFile()
oldStatus = self.status.status
# No status file => reset status to Status.None
if not os.path.exists(statusFile):
self.status.reset()
else:
with open(statusFile, 'r') as jsonFile:
statusData = json.load(jsonFile)
self.status.fromDict(statusData)
if oldStatus != self.status.status:
self.statusChanged.emit()
def statusFile(self):
if self.range.blockSize == 0:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, 'status')
else:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, str(self.index) + '.status')
def statisticsFile(self):
if self.range.blockSize == 0:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, 'statistics')
else:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, str(self.index) + '.statistics')
def logFile(self):
if self.range.blockSize == 0:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, 'log')
else:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, str(self.index) + '.log')
def saveStatusFile(self):
"""
Write node status on disk.
"""
data = self.status.toDict()
statusFilepath = self.statusFile()
folder = os.path.dirname(statusFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statusFilepathWriting = statusFilepath + '.writing.' + str(uuid.uuid4())
with open(statusFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statusFilepathWriting, statusFilepath)
def upgradeStatusTo(self, newStatus):
if newStatus.value <= self.status.status.value:
print('WARNING: downgrade status on node "{}" from {} to {}'.format(self.name, self.status.status,
newStatus))
self.status.status = newStatus
self.statusChanged.emit()
self.saveStatusFile()
def updateStatisticsFromCache(self):
"""
"""
oldTimes = self.statistics.times
statisticsFile = self.statisticsFile()
if not os.path.exists(statisticsFile):
return
with open(statisticsFile, 'r') as jsonFile:
statisticsData = json.load(jsonFile)
self.statistics.fromDict(statisticsData)
if oldTimes != self.statistics.times:
self.statisticsChanged.emit()
def saveStatistics(self):
data = self.statistics.toDict()
statisticsFilepath = self.statisticsFile()
folder = os.path.dirname(statisticsFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statisticsFilepathWriting = statisticsFilepath + '.writing.' + str(uuid.uuid4())
with open(statisticsFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statisticsFilepathWriting, statisticsFilepath)
def isAlreadySubmitted(self):
return self.status.status in (Status.SUBMITTED_EXTERN, Status.SUBMITTED_LOCAL, Status.RUNNING)
def process(self):
global runningProcesses
runningProcesses[self.name] = self
self.upgradeStatusTo(Status.RUNNING)
self.statThread = stats.StatisticsThread(self)
self.statThread.start()
startTime = time.time()
try:
self.node.nodeDesc.processChunk(self)
except Exception as e:
self.upgradeStatusTo(Status.ERROR)
raise
except (KeyboardInterrupt, SystemError, GeneratorExit) as e:
self.upgradeStatusTo(Status.STOPPED)
raise
finally:
elapsedTime = time.time() - startTime
print(' - elapsed time:', elapsedTime)
# ask and wait for the stats thread to stop
self.statThread.stopRequest()
self.statThread.join()
del runningProcesses[self.name]
self.upgradeStatusTo(Status.SUCCESS)
statusChanged = Signal()
statusName = Property(str, statusName.fget, notify=statusChanged)
statisticsChanged = Signal()
class Node(BaseObject): class Node(BaseObject):
""" """
""" """
@ -439,11 +577,13 @@ class Node(BaseObject):
def __init__(self, nodeDesc, parent=None, **kwargs): def __init__(self, nodeDesc, parent=None, **kwargs):
super(Node, self).__init__(parent) super(Node, self).__init__(parent)
self._name = None # type: str
self.graph = None # type: Graph
self.nodeDesc = meshroom.core.nodesDesc[nodeDesc]() self.nodeDesc = meshroom.core.nodesDesc[nodeDesc]()
self.packageName = self.nodeDesc.packageName self.packageName = self.nodeDesc.packageName
self.packageVersion = self.nodeDesc.packageVersion self.packageVersion = self.nodeDesc.packageVersion
self._name = None # type: str
self.graph = None # type: Graph
self._chunks = []
self._cmdVars = {} self._cmdVars = {}
self._attributes = DictModel(keyAttrName='name', parent=self) self._attributes = DictModel(keyAttrName='name', parent=self)
self.attributesPerUid = defaultdict(set) self.attributesPerUid = defaultdict(set)
@ -451,9 +591,6 @@ class Node(BaseObject):
for k, v in kwargs.items(): for k, v in kwargs.items():
self.attribute(k).value = v self.attribute(k).value = v
self.status = StatusData(self.name, self.nodeType)
self.statistics = stats.Statistics()
self._subprocess = None
def __getattr__(self, k): def __getattr__(self, k):
try: try:
@ -538,15 +675,11 @@ class Node(BaseObject):
'attributes': {k: v for k, v in attributes.items() if v is not None}, # filter empty values 'attributes': {k: v for k, v in attributes.items() if v is not None}, # filter empty values
} }
def updateInternals(self): def _buildCmdVars(self, cmdVars):
self._cmdVars = {
'cache': self.graph.cacheDir,
'nodeType': self.nodeType,
}
for uidIndex, associatedAttributes in self.attributesPerUid.items(): for uidIndex, associatedAttributes in self.attributesPerUid.items():
assAttr = [(a.getName(), a.uid(uidIndex)) for a in associatedAttributes] assAttr = [(a.getName(), a.uid(uidIndex)) for a in associatedAttributes]
assAttr.sort() assAttr.sort()
self._cmdVars['uid{}'.format(uidIndex)] = hash(tuple([b for a, b in assAttr])) cmdVars['uid{}'.format(uidIndex)] = hash(tuple([b for a, b in assAttr]))
# Evaluate input params # Evaluate input params
for name, attr in self._attributes.objects.items(): for name, attr in self._attributes.objects.items():
@ -557,15 +690,15 @@ class Node(BaseObject):
assert(isinstance(v, collections.Sequence) and not isinstance(v, basestring)) assert(isinstance(v, collections.Sequence) and not isinstance(v, basestring))
v = attr.attributeDesc.joinChar.join(v) v = attr.attributeDesc.joinChar.join(v)
self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v) cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
self._cmdVars[name + 'Value'] = str(v) cmdVars[name + 'Value'] = str(v)
if v is not None and v is not '': if v is not None and v is not '':
self._cmdVars[attr.attributeDesc.group] = self._cmdVars.get(attr.attributeDesc.group, '') + \ cmdVars[attr.attributeDesc.group] = cmdVars.get(attr.attributeDesc.group, '') + \
' ' + self._cmdVars[name] ' ' + cmdVars[name]
# For updating output attributes invalidation values # For updating output attributes invalidation values
cmdVarsNoCache = self._cmdVars.copy() cmdVarsNoCache = cmdVars.copy()
cmdVarsNoCache['cache'] = '' cmdVarsNoCache['cache'] = ''
# Evaluate output params # Evaluate output params
@ -573,17 +706,81 @@ class Node(BaseObject):
if attr.isInput: if attr.isInput:
continue # skip inputs continue # skip inputs
attr.value = attr.attributeDesc.value.format( attr.value = attr.attributeDesc.value.format(
**self._cmdVars) **cmdVars)
attr._invalidationValue = attr.attributeDesc.value.format( attr._invalidationValue = attr.attributeDesc.value.format(
**cmdVarsNoCache) **cmdVarsNoCache)
v = attr.value v = attr.value
self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v) cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
self._cmdVars[name + 'Value'] = str(v) cmdVars[name + 'Value'] = str(v)
if v is not None and v is not '': if v is not None and v is not '':
self._cmdVars[attr.attributeDesc.group] = self._cmdVars.get(attr.attributeDesc.group, '') + \ cmdVars[attr.attributeDesc.group] = cmdVars.get(attr.attributeDesc.group, '') + \
' ' + self._cmdVars[name] ' ' + cmdVars[name]
@property
def isParallelized(self):
return bool(self.nodeDesc.parallelization)
@property
def nbParallelizationBlocks(self):
return len(self.chunks)
def hasStatus(self, status):
if not self.chunks:
return False
for chunk in self.chunks:
if chunk.status.status != status:
return False
return True
def isAlreadySubmitted(self):
for chunk in self.chunks:
if chunk.isAlreadySubmitted():
return True
return False
def alreadySubmittedChunks(self):
submittedChunks = []
for chunk in self.chunks:
if chunk.isAlreadySubmitted():
submittedChunks.append(chunk)
return submittedChunks
def upgradeStatusTo(self, newStatus):
"""
Upgrade node to the given status and save it on disk.
"""
for chunk in self.chunks:
chunk.upgradeStatusTo(newStatus)
def updateStatisticsFromCache(self):
for chunk in self.chunks:
chunk.updateStatisticsFromCache()
def updateInternals(self):
if self.isParallelized:
ranges = self.nodeDesc.parallelization.getRanges(self)
if len(ranges) != len(self.chunks):
self._chunks = [NodeChunk(self, range) for range in ranges]
self.chunksChanged.emit()
else:
for chunk, range in zip(self.chunks, ranges):
chunk.range = range
else:
if len(self._chunks) != 1:
self._chunks = [NodeChunk(self, desc.Range())]
self.chunksChanged.emit()
else:
self._chunks[0].range = desc.Range()
self._cmdVars = {
'cache': self.graph.cacheDir,
'nodeType': self.nodeType,
}
self._buildCmdVars(self._cmdVars)
self.nodeDesc.updateInternals(self)
self.internalFolderChanged.emit() self.internalFolderChanged.emit()
@ -591,79 +788,12 @@ class Node(BaseObject):
def internalFolder(self): def internalFolder(self):
return self.nodeDesc.internalFolder.format(**self._cmdVars) return self.nodeDesc.internalFolder.format(**self._cmdVars)
def statusFile(self):
return os.path.join(self.graph.cacheDir, self.internalFolder, 'status')
def statisticsFile(self):
return os.path.join(self.graph.cacheDir, self.internalFolder, 'statistics')
def logFile(self):
return os.path.join(self.graph.cacheDir, self.internalFolder, 'log')
def updateStatusFromCache(self): def updateStatusFromCache(self):
""" """
Update node status based on status file content/existence. Update node status based on status file content/existence.
""" """
statusFile = self.statusFile() for chunk in self.chunks:
oldStatus = self.status.status chunk.updateStatusFromCache()
# No status file => reset status to Status.None
if not os.path.exists(statusFile):
self.status.reset()
else:
with open(statusFile, 'r') as jsonFile:
statusData = json.load(jsonFile)
self.status.fromDict(statusData)
if oldStatus != self.status.status:
self.statusChanged.emit()
def saveStatusFile(self):
"""
Write node status on disk.
"""
data = self.status.toDict()
statusFilepath = self.statusFile()
folder = os.path.dirname(statusFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statusFilepathWriting = statusFilepath + '.writing.' + str(uuid.uuid4())
with open(statusFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statusFilepathWriting, statusFilepath)
def upgradeStatusTo(self, newStatus):
"""
Upgrade node to the given status and save it on disk.
"""
if newStatus.value <= self.status.status.value:
print('WARNING: downgrade status on node "{}" from {} to {}'.format(self._name, self.status.status,
newStatus))
self.status.status = newStatus
self.statusChanged.emit()
self.saveStatusFile()
def updateStatisticsFromCache(self):
"""
"""
statisticsFile = self.statisticsFile()
if not os.path.exists(statisticsFile):
return
with open(statisticsFile, 'r') as jsonFile:
statisticsData = json.load(jsonFile)
self.statistics.fromDict(statisticsData)
def saveStatistics(self):
data = self.statistics.toDict()
statisticsFilepath = self.statisticsFile()
folder = os.path.dirname(statisticsFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statisticsFilepathWriting = statisticsFilepath + '.writing.' + str(uuid.uuid4())
with open(statisticsFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statisticsFilepathWriting, statisticsFilepath)
def isAlreadySubmitted(self):
return self.status.status in (Status.SUBMITTED_EXTERN, Status.SUBMITTED_LOCAL, Status.RUNNING)
def submit(self): def submit(self):
self.upgradeStatusTo(Status.SUBMITTED_EXTERN) self.upgradeStatusTo(Status.SUBMITTED_EXTERN)
@ -674,27 +804,12 @@ class Node(BaseObject):
def stopProcess(self): def stopProcess(self):
self.nodeDesc.stop(self) self.nodeDesc.stop(self)
def process(self): def processIteration(self, iteration):
global runningProcesses self.chunks[iteration].process()
runningProcesses[self.name] = self
self.upgradeStatusTo(Status.RUNNING)
self.statThread = stats.StatisticsThread(self)
self.statThread.start()
startTime = time.time()
try:
self.nodeDesc.process(self)
except BaseException:
self.upgradeStatusTo(Status.ERROR)
raise
finally:
elapsedTime = time.time() - startTime
print(' - elapsed time:', elapsedTime)
# ask and wait for the stats thread to stop
self.statThread.stopRequest()
self.statThread.join()
del runningProcesses[self.name]
self.upgradeStatusTo(Status.SUCCESS) def process(self):
for chunk in self.chunks:
chunk.process()
def endSequence(self): def endSequence(self):
pass pass
@ -702,9 +817,12 @@ class Node(BaseObject):
def getStatus(self): def getStatus(self):
return self.status return self.status
def getChunks(self):
return self._chunks
@property @property
def statusName(self): def statusNames(self):
return self.status.status.name return [s.status.name for s in self.status]
def __repr__(self): def __repr__(self):
return self.name return self.name
@ -716,9 +834,8 @@ class Node(BaseObject):
internalFolder = Property(str, internalFolder.fget, notify=internalFolderChanged) internalFolder = Property(str, internalFolder.fget, notify=internalFolderChanged)
depthChanged = Signal() depthChanged = Signal()
depth = Property(int, depth.fget, notify=depthChanged) depth = Property(int, depth.fget, notify=depthChanged)
statusChanged = Signal() chunksChanged = Signal()
statusName = Property(str, statusName.fget, notify=statusChanged) chunks = Property(Variant, getChunks, notify=chunksChanged)
WHITE = 0 WHITE = 0
GRAY = 1 GRAY = 1
@ -794,14 +911,16 @@ class Graph(BaseObject):
if not isinstance(graphData, dict): if not isinstance(graphData, dict):
raise RuntimeError('loadGraph error: Graph is not a dict. File: {}'.format(filepath)) raise RuntimeError('loadGraph error: Graph is not a dict. File: {}'.format(filepath))
self.cacheDir = os.path.join(os.path.abspath(os.path.dirname(filepath)), meshroom.core.cacheFolderName) with GraphModification(self):
self.name = os.path.splitext(os.path.basename(filepath))[0] # Init name and cache directory from input filepath
for nodeName, nodeData in graphData.items(): self.cacheDir = os.path.join(os.path.abspath(os.path.dirname(filepath)), meshroom.core.cacheFolderName)
if not isinstance(nodeData, dict): self.name = os.path.splitext(os.path.basename(filepath))[0]
raise RuntimeError('loadGraph error: Node is not a dict. File: {}'.format(filepath)) for nodeName, nodeData in graphData.items():
n = Node(nodeData['nodeType'], parent=self, **nodeData['attributes']) if not isinstance(nodeData, dict):
# Add node to the graph with raw attributes values raise RuntimeError('loadGraph error: Node is not a dict. File: {}'.format(filepath))
self._addNode(n, nodeName) n = Node(nodeData['nodeType'], parent=self, **nodeData['attributes'])
# Add node to the graph with raw attributes values
self._addNode(n, nodeName)
# Create graph edges by resolving attributes expressions # Create graph edges by resolving attributes expressions
self._applyExpr() self._applyExpr()
@ -1036,16 +1155,22 @@ class Graph(BaseObject):
visitor = Visitor() visitor = Visitor()
def finishVertex(vertex, graph): def finishVertex(vertex, graph):
if vertex.status.status in (Status.SUBMITTED_EXTERN, chunksToProcess = []
Status.SUBMITTED_LOCAL): for chunk in vertex.chunks:
print('WARNING: node "{}" is already submitted.'.format(vertex.name)) if chunk.status.status in (Status.SUBMITTED_EXTERN,
if vertex.status.status is Status.RUNNING: Status.SUBMITTED_LOCAL):
print('WARNING: node "{}" is already running.'.format(vertex.name)) logging.warning('Node "{}" is already submitted.'.format(vertex.name))
if vertex.status.status is not Status.SUCCESS: if chunk.status.status is Status.RUNNING:
nodes.append(vertex) logging.warning('Node "{}" is already running.'.format(vertex.name))
if chunk.status.status is not Status.SUCCESS:
chunksToProcess.append(chunk)
if chunksToProcess:
nodes.append(vertex) # We could collect specific chunks
def finishEdge(edge, graph): def finishEdge(edge, graph):
if (edge[0].status.status is not Status.SUCCESS) and (edge[1].status.status is not Status.SUCCESS): if edge[0].hasStatus(Status.SUCCESS) or edge[1].hasStatus(Status.SUCCESS):
return
else:
edges.append(edge) edges.append(edge)
visitor.finishVertex = finishVertex visitor.finishVertex = finishVertex
@ -1171,13 +1296,11 @@ class Graph(BaseObject):
""" Request graph execution to be stopped """ """ Request graph execution to be stopped """
self.stopExecutionRequested.emit() self.stopExecutionRequested.emit()
def submittedNodes(self):
""" Return the list of submitted nodes inside this Graph """
return [node for node in self.nodes if node.isAlreadySubmitted()]
def clearSubmittedNodes(self): def clearSubmittedNodes(self):
""" Reset the status of already submitted nodes to Status.NONE """ """ Reset the status of already submitted nodes to Status.NONE """
[node.upgradeStatusTo(Status.NONE) for node in self.submittedNodes()] for node in self.nodes:
for chunk in node.alreadySubmittedChunks():
chunk.upgradeStatusTo(Status.NONE)
@property @property
def nodes(self): def nodes(self):
@ -1222,25 +1345,26 @@ def getAlreadySubmittedNodes(nodes):
return out return out
def execute(graph, toNodes=None, force=False): def execute(graph, toNodes=None, forceCompute=False, forceStatus=False):
""" """
""" """
if force: if forceCompute:
nodes, edges = graph.dfsOnFinish(startNodes=toNodes) nodes, edges = graph.dfsOnFinish(startNodes=toNodes)
else: else:
nodes, edges = graph.dfsToProcess(startNodes=toNodes) nodes, edges = graph.dfsToProcess(startNodes=toNodes)
nodesInConflict = getAlreadySubmittedNodes(nodes) nodesInConflict = getAlreadySubmittedNodes(nodes)
if nodesInConflict: if nodesInConflict:
nodesStatus = set([node.status.status.name for node in nodesInConflict]) nodesStatus = set([status.status.name for node in nodesInConflict for status in node.status])
nodesName = [node.name for node in nodesInConflict] nodesName = [node.name for node in nodesInConflict]
#raise RuntimeError( msg = 'WARNING: Some nodes are already submitted with status: {}\nNodes: {}'.format(
print( ', '.join(nodesStatus),
'WARNING: Some nodes are already submitted with status: {}\n' ', '.join(nodesName)
'Nodes: {}'.format( )
', '.join(nodesStatus), if forceStatus:
', '.join(nodesName), print(msg)
)) else:
raise RuntimeError(msg)
print('Nodes to execute: ', str([n.name for n in nodes])) print('Nodes to execute: ', str([n.name for n in nodes]))
@ -1254,7 +1378,7 @@ def execute(graph, toNodes=None, force=False):
except Exception as e: except Exception as e:
logging.error("Error on node computation: {}".format(e)) logging.error("Error on node computation: {}".format(e))
graph.clearSubmittedNodes() graph.clearSubmittedNodes()
return raise
for node in nodes: for node in nodes:
node.endSequence() node.endSequence()

View file

@ -2,6 +2,7 @@ from collections import defaultdict
import psutil import psutil
import time import time
import threading import threading
import signal
def bytes2human(n): def bytes2human(n):
@ -187,27 +188,31 @@ bytesPerGiga = 1024. * 1024. * 1024.
class StatisticsThread(threading.Thread): class StatisticsThread(threading.Thread):
def __init__(self, node): def __init__(self, chunk):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.node = node signal.signal(signal.SIGINT, signal.SIG_IGN) # lambda signal, frame: self.stopRequest())
self.chunk = chunk
self.proc = psutil.Process() # by default current process pid self.proc = psutil.Process() # by default current process pid
self.statistics = self.node.statistics self.statistics = chunk.statistics
self._stopFlag = threading.Event() self._stopFlag = threading.Event()
def updateStats(self): def updateStats(self):
self.lastTime = time.time() self.lastTime = time.time()
if self.statistics.update(self.proc): if self.chunk.statistics.update(self.proc):
self.node.saveStatistics() self.chunk.saveStatistics()
def run(self): def run(self):
while True: try:
self.updateStats() while True:
if self._stopFlag.wait(60): self.updateStats()
# stopFlag has been set if self._stopFlag.wait(60):
# update stats one last time and exit main loop # stopFlag has been set
if self.proc.is_running(): # update stats one last time and exit main loop
self.updateStats() if self.proc.is_running():
return self.updateStats()
return
except (KeyboardInterrupt, SystemError, GeneratorExit):
pass
def stopRequest(self): def stopRequest(self):
""" Request the thread to exit as soon as possible. """ """ Request the thread to exit as soon as possible. """

View file

@ -1,12 +1,32 @@
import os
import sys import sys
from collections import OrderedDict from collections import OrderedDict
import os import json
import psutil
import shutil
import tempfile
import logging
from meshroom.core import desc from meshroom.core import desc
from meshroom.core.graph import GraphModification
Viewpoint = [ Viewpoint = [
desc.File(name="image", label="Image", description="Image filepath", value="", uid=[0]), desc.IntParam(name="id", label="Id", description="Image UID", value=-1, uid=[0], range=(0, 200, 1)),
desc.FloatParam(name="focal", label="Focal Length", description="Focal Length", value=0.0, uid=[0], range=(5, 200, 1)), desc.File(name="image", label="Image", description="Image Filepath", value="", uid=[0, 1]),
desc.IntParam(name="intrinsicId", label="Intrinsic", description="Internal Camera Parameters", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="rigId", label="Rig", description="Rig Parameters", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="rigSubPoseId", label="Rig Sub-Pose", description="Rig Sub-Pose Parameters", value=-1, uid=[0], range=(0, 200, 1)),
]
Intrinsic = [
desc.IntParam(name="id", label="Id", description="Intrinsic UID", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="initialFocalLength", label="Initial Focal Length", description="Initial Guess on the Focal Length", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="focalLength", label="Focal Length", description="Known/Calibrated Focal Length", value=-1, uid=[0], range=(0, 200, 1)),
desc.ChoiceParam(name="cameraType", label="Camera Type", description="Camera Type", value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4'], exclusive=True, uid=[0]),
desc.StringParam(name="deviceMake", label="Make", description="Camera Make", value="", uid=[]),
desc.StringParam(name="deviceModel", label="Model", description="Camera Model", value="", uid=[]),
desc.StringParam(name="sensorWidth", label="Sensor Width", description="Camera Sensor Width", value="", uid=[0]),
] ]
@ -16,34 +36,27 @@ class CameraInit(desc.CommandLineNode):
inputs = [ inputs = [
desc.ListAttribute( desc.ListAttribute(
name='viewpoints', name="viewpoints",
elementDesc=desc.GroupAttribute(name='viewpoint', label="Viewpoint", description="", groupDesc=Viewpoint, elementDesc=desc.GroupAttribute(name="viewpoint", label="Viewpoint", description="", groupDesc=Viewpoint,
group='allParams'), group="allParams"),
label="Viewpoints", label="Viewpoints",
description="Input viewpoints", description="Input viewpoints",
group="" group="",
), ),
desc.File( desc.ListAttribute(
name='imageDirectory', name="intrinsics",
label='Image Directory', elementDesc=desc.GroupAttribute(name="intrinsic", label="Intrinsic", description="", groupDesc=Intrinsic,
description='''Input images folder.''', group="allParams"),
value='', label="Intrinsics",
uid=[0], description="Camera Intrinsics",
), group="",
desc.File(
name='jsonFile',
label='Json File',
description='''Input file with all the user options. '''
'''It can be used to provide a list of images instead of a directory.''',
value='',
uid=[0],
), ),
desc.File( desc.File(
name='sensorDatabase', name='sensorDatabase',
label='Sensor Database', label='Sensor Database',
description='''Camera sensor width database path.''', description='''Camera sensor width database path.''',
value=os.environ.get('ALICEVISION_SENSOR_DB', ''), value=os.environ.get('ALICEVISION_SENSOR_DB', ''),
uid=[0], uid=[],
), ),
desc.IntParam( desc.IntParam(
name='defaultFocalLengthPix', name='defaultFocalLengthPix',
@ -98,17 +111,33 @@ class CameraInit(desc.CommandLineNode):
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True, exclusive=True,
uid=[], uid=[],
) ),
desc.StringParam(
name='_viewpointsUid',
label='Internal Intrinsics Uid',
description='',
value='',
uid=[],
group='',
),
] ]
outputs = [ outputs = [
desc.File( desc.File(
name='output', name='output',
label='Output', label='Output',
description='''Output directory for the new SfMData file Optional parameters:''', description='''Output SfMData.''',
value='{cache}/{nodeType}/{uid0}/', value='{cache}/{nodeType}/{uid0}', # TODO
uid=[], uid=[],
), ),
desc.File( # TODO: TO REMOVE
name='outputSfM',
label='Output SfM',
description='''Output SfMData.''',
value='{cache}/{nodeType}/{uid0}/sfm_data.json',
uid=[],
group="",
),
desc.File( desc.File(
name='outputSfm', name='outputSfm',
label='Output SfM', label='Output SfM',
@ -118,3 +147,81 @@ class CameraInit(desc.CommandLineNode):
group='', # not a command line argument group='', # not a command line argument
) )
] ]
def updateInternals(self, node):
if not node.viewpoints:
return
lastViewpointsUid = node.attribute("_viewpointsUid").value
if lastViewpointsUid == node.viewpoints.uid(1):
return
origCmdVars = node._cmdVars.copy()
# Python3: with tempfile.TemporaryDirectory(prefix="Meshroom_CameraInit") as tmpCache
tmpCache = tempfile.mkdtemp()
localCmdVars = {
'cache': tmpCache,
'nodeType': node.nodeType,
}
node._buildCmdVars(localCmdVars)
node._cmdVars = localCmdVars
try:
os.makedirs(os.path.join(tmpCache, node.internalFolder))
self.createViewpointsFile(node)
cmd = self.buildCommandLine(node.chunks[0])
# logging.debug(' - commandLine:', cmd)
subprocess = psutil.Popen(cmd, stdout=None, stderr=None, shell=True)
stdout, stderr = subprocess.communicate()
subprocess.wait()
if subprocess.returncode != 0:
logging.warning('CameraInit: Error on updateInternals of node "{}".'.format(node.name))
except Exception:
logging.warning('CameraInit: Error on updateInternals of node "{}".'.format(node.name))
raise
finally:
node._cmdVars = origCmdVars
shutil.rmtree(tmpCache)
# TODO: reload result of aliceVision_cameraInit
# cameraInitSfM = node.viewpointsFile # localCmdVars['outputSfMValue']
# jsonData = open(cameraInitSfM, 'r').read()
# data = json.loads(jsonData)
# with GraphModification(node.graph):
# node.viewpoints.value = data.get("views", [])
# node.intrinsics.value = data.get("intrinsics", [])
node.attribute("_viewpointsUid").value = node.viewpoints.uid(1)
def createViewpointsFile_new(self, node):
if node.viewpoints:
sfmData = {
"version": [1, 0, 0],
"views": node.viewpoints.getPrimitiveValue(exportDefault=False),
"intrinsics": node.intrinsics.getPrimitiveValue(exportDefault=False),
}
node.viewpointsFile = '{cache}/{nodeType}/{uid0}/viewpoints.json'.format(**node._cmdVars)
with open(node.viewpointsFile, 'w') as f:
f.write(json.dumps(sfmData, indent=4))
# python3: json.dumps(node.viewpoints, f, indent=4)
def createViewpointsFile(self, node):
"""
Temporary compatibility method.
"""
if node.viewpoints:
sfmData = {
"resources": [v["image"] for v in node.viewpoints.getPrimitiveValue(exportDefault=False)],
}
node.viewpointsFile = '{cache}/{nodeType}/{uid0}/viewpoints.json'.format(**node._cmdVars)
with open(node.viewpointsFile, 'w') as f:
f.write(json.dumps(sfmData, indent=4))
# python3: json.dumps(node.viewpoints, f, indent=4)
def buildCommandLine(self, chunk):
cmd = desc.CommandLineNode.buildCommandLine(self, chunk)
if len(chunk.node.viewpoints):
cmd += ' --jsonFile ' + chunk.node.viewpointsFile
return cmd
def processChunk(self, chunk):
self.createViewpointsFile(chunk.node)
desc.CommandLineNode.processChunk(self, chunk)

View file

@ -4,6 +4,7 @@ class DepthMap(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/' internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_depthMapEstimation {allParams}' commandLine = 'aliceVision_depthMapEstimation {allParams}'
gpu = desc.Level.INTENSIVE gpu = desc.Level.INTENSIVE
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=3)
inputs = [ inputs = [
desc.File( desc.File(

View file

@ -4,6 +4,7 @@ class DepthMapFilter(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/' internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_depthMapFiltering {allParams}' commandLine = 'aliceVision_depthMapFiltering {allParams}'
gpu = desc.Level.NORMAL gpu = desc.Level.NORMAL
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=10)
inputs = [ inputs = [
desc.File( desc.File(

View file

@ -5,6 +5,8 @@ from meshroom.core import desc
class FeatureExtraction(desc.CommandLineNode): class FeatureExtraction(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/' internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_featureExtraction {allParams}' commandLine = 'aliceVision_featureExtraction {allParams}'
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=10)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
inputs = [ inputs = [
desc.File( desc.File(
@ -40,22 +42,6 @@ class FeatureExtraction(desc.CommandLineNode):
value='', value='',
uid=[0], uid=[0],
), ),
desc.IntParam(
name='rangeStart',
label='Range Start',
description='''Range image index start.''',
value=-1,
range=(-sys.maxsize, sys.maxsize, 1),
uid=[0],
),
desc.IntParam(
name='rangeSize',
label='Range Size',
description='''Range size. Log parameters:''',
value=1,
range=(-sys.maxsize, sys.maxsize, 1),
uid=[0],
),
desc.ChoiceParam( desc.ChoiceParam(
name='verboseLevel', name='verboseLevel',
label='Verbose Level', label='Verbose Level',

View file

@ -5,6 +5,8 @@ from meshroom.core import desc
class FeatureMatching(desc.CommandLineNode): class FeatureMatching(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/' internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_featureMatching {allParams}' commandLine = 'aliceVision_featureMatching {allParams}'
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=20)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
inputs = [ inputs = [
desc.File( desc.File(

View file

@ -4,41 +4,48 @@ import meshroom.multiview
def test_multiviewPipeline(): def test_multiviewPipeline():
graph1 = meshroom.multiview.photogrammetryPipeline(inputFolder='/non/existing/folder') graph1 = meshroom.multiview.photogrammetryPipeline(inputImages=['/non/existing/fileA'])
graph2 = meshroom.multiview.photogrammetryPipeline(inputImages=[]) graph2 = meshroom.multiview.photogrammetryPipeline(inputImages=[])
graph2b = meshroom.multiview.photogrammetryPipeline(inputImages=[]) graph2b = meshroom.multiview.photogrammetryPipeline(inputImages=[])
graph3 = meshroom.multiview.photogrammetryPipeline(inputImages=['/non/existing/file1', '/non/existing/file2']) graph3 = meshroom.multiview.photogrammetryPipeline(inputImages=['/non/existing/file1', '/non/existing/file2'])
graph4 = meshroom.multiview.photogrammetryPipeline(inputViewpoints=[ graph4 = meshroom.multiview.photogrammetryPipeline(inputViewpoints=[
{'image': '/non/existing/file1', 'focal': 50}, {'image': '/non/existing/file1', 'intrinsicId': 50},
{'image': '/non/existing/file2', 'focal': 55} {'image': '/non/existing/file2', 'intrinsicId': 55}
]) ])
graph4b = meshroom.multiview.photogrammetryPipeline(inputViewpoints=[ graph4b = meshroom.multiview.photogrammetryPipeline(inputViewpoints=[
{'image': '/non/existing/file1', 'focal': 50}, {'image': '/non/existing/file1', 'intrinsicId': 50},
{'image': '/non/existing/file2', 'focal': 55} {'image': '/non/existing/file2', 'intrinsicId': 55}
]) ])
assert graph1.findNode('CameraInit').imageDirectory.value == '/non/existing/folder' assert graph1.findNode('CameraInit').viewpoints[0].image.value == '/non/existing/fileA'
assert graph2.findNode('CameraInit').imageDirectory.value == '' assert len(graph2.findNode('CameraInit').viewpoints) == 0
assert graph3.findNode('CameraInit').imageDirectory.value == '' assert graph3.findNode('CameraInit').viewpoints[0].image.value == '/non/existing/file1'
assert graph4.findNode('CameraInit').imageDirectory.value == '' assert graph4.findNode('CameraInit').viewpoints[0].image.value == '/non/existing/file1'
assert len(graph1.findNode('CameraInit').viewpoints) == 0 assert len(graph1.findNode('CameraInit').viewpoints) == 1
assert len(graph2.findNode('CameraInit').viewpoints) == 0 assert len(graph2.findNode('CameraInit').viewpoints) == 0
assert len(graph3.findNode('CameraInit').viewpoints) == 2 assert len(graph3.findNode('CameraInit').viewpoints) == 2
assert len(graph4.findNode('CameraInit').viewpoints) == 2 assert len(graph4.findNode('CameraInit').viewpoints) == 2
viewpoints = graph3.findNode('CameraInit').viewpoints viewpoints = graph3.findNode('CameraInit').viewpoints
assert viewpoints[0].image.value == '/non/existing/file1' assert viewpoints[0].image.value == '/non/existing/file1'
assert viewpoints[0].focal.value == -1 assert viewpoints[0].intrinsicId.value == -1
assert viewpoints[1].image.value == '/non/existing/file2' assert viewpoints[1].image.value == '/non/existing/file2'
assert viewpoints[1].focal.value == -1 assert viewpoints[1].intrinsicId.value == -1
assert viewpoints[0].image.isDefault() == False
assert viewpoints[0].intrinsicId.isDefault() == True
assert viewpoints.getPrimitiveValue(exportDefault=False) == [
{"image": '/non/existing/file1'},
{"image": '/non/existing/file2'},
]
for graph in (graph4, graph4b): for graph in (graph4, graph4b):
viewpoints = graph.findNode('CameraInit').viewpoints viewpoints = graph.findNode('CameraInit').viewpoints
assert viewpoints[0].image.value == '/non/existing/file1' assert viewpoints[0].image.value == '/non/existing/file1'
assert viewpoints[0].focal.value == 50 assert viewpoints[0].intrinsicId.value == 50
assert viewpoints[1].image.value == '/non/existing/file2' assert viewpoints[1].image.value == '/non/existing/file2'
assert viewpoints[1].focal.value == 55 assert viewpoints[1].intrinsicId.value == 55
# Ensure that all output UIDs are different as the input is different: # Ensure that all output UIDs are different as the input is different:
# graph1 != graph2 != graph3 != graph4 # graph1 != graph2 != graph3 != graph4