Add chunk notion for parallelization and implement specific updateInternals in CameraInit node

* Add chunk notion for parallelization
* Allows Node desc to implement custom updateInternals
* CameraInit node implement a specific updateInternals to update the
input image list
* FeatureExtraction, FeatureMatching, DepthMap, DepthMapFilter:
implement parallelization
This commit is contained in:
Fabien Castan 2017-11-07 15:47:14 +01:00
parent 39f6ef3d64
commit 1e4f8f8a61
14 changed files with 614 additions and 271 deletions

View file

@ -3,6 +3,8 @@ import argparse
import meshroom.core.graph
from meshroom.core.graph import Status
from meshroom.core.desc import Range
parser = argparse.ArgumentParser(description='Execute a Graph of processes.')
parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str,
@ -11,7 +13,9 @@ parser.add_argument('--node', metavar='NODE_NAME', type=str,
help='Process the node. It will generate an error if the dependencies are not already computed.')
parser.add_argument('--toNode', metavar='NODE_NAME', type=str,
help='Process the node with its dependencies.')
parser.add_argument('--force', help='Force recompute',
parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.',
action='store_true')
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
action='store_true')
parser.add_argument('--extern', help='Use this option when you compute externally after submission to a render farm from meshroom.',
action='store_true')
@ -19,6 +23,9 @@ parser.add_argument('--cache', metavar='FOLDER', type=str,
default=None,
help='Override the cache folder')
parser.add_argument('-i', '--iteration', type=int,
default=-1, help='')
args = parser.parse_args()
graph = meshroom.core.graph.loadGraph(args.graphFile)
@ -32,14 +39,22 @@ if args.node:
submittedStatuses = [Status.SUBMITTED_LOCAL, Status.RUNNING]
if not args.extern:
submittedStatuses.append(Status.SUBMITTED_EXTERN)
if node.status.status in submittedStatuses:
print('Error: Node is already submitted with status "{}"'.format(node.status.status.name))
if not args.forceStatus and not args.forceCompute:
for range in node.ranges:
if node.status[range.iteration].status in submittedStatuses:
print('Error: Node is already submitted with status "{}". See file: "{}"'.format(node.status[range.iteration].status.name, node.statusFile(range)))
exit(-1)
if args.force or node.status.status != pg.Status.SUCCESS:
if not node.hasStatus(Status.SUCCESS) or args.forceCompute:
if args.iteration != -1:
node.processIteration(args.iteration)
else:
node.process()
else:
if args.iteration != -1:
print('Error: "--iteration" only make sense when used with "--node".')
exit(-1)
toNodes = None
if args.toNode:
toNodes = graph.findNodes(args.toNode)
pg.execute(graph, toNodes=toNodes, force=args.force)
meshroom.core.graph.execute(graph, toNodes=toNodes, forceCompute=args.forceCompute, forceStatus=args.forceStatus)

View file

@ -3,8 +3,8 @@ import argparse
import os
import meshroom.core
import meshroom.core.graph
from meshroom import multiview
from meshroom.core import graph as pg
parser = argparse.ArgumentParser(description='Launch the full photogrammetry pipeline.')
@ -25,6 +25,10 @@ parser.add_argument('--cache', metavar='FOLDER', type=str,
help='Choose a custom cache folder')
parser.add_argument('--save', metavar='FOLDER', type=str, required=False,
help='Save the workflow to a meshroom files.')
parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.',
action='store_true')
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
action='store_true')
parser.add_argument('--scale', type=int, default=2,
help='Downscale factor for MVS steps. Possible values are: 1, 2, 4, 8, 16.')
@ -54,9 +58,8 @@ if not graph.cacheDir:
graph.cacheDir = meshroom.core.defaultCacheFolder
if args.output:
graph.update()
toNodes = None
if args.toNode:
toNodes = graph.findNodes(args.toNode)
pg.execute(graph, toNodes=toNodes)
meshroom.core.graph.execute(graph, toNodes=toNodes, forceCompute=args.forceCompute, forceStatus=args.forceStatus)

View file

@ -64,12 +64,14 @@ else:
nodes, edges = graph.dfsOnFinish(startNodes=startNodes)
for node in nodes:
print('{}: {}'.format(node.name, node.statistics.toDict()))
for chunk in node.chunks:
print('{}: {}\n'.format(chunk.name, chunk.statistics.toDict()))
if args.exportHtml:
with open(args.exportHtml, 'w') as fileObj:
for node in nodes:
for curves in (node.statistics.computer.curves, node.statistics.process.curves):
for chunk in node.chunks:
for curves in (chunk.statistics.computer.curves, chunk.statistics.process.curves):
exportCurves = defaultdict(list)
for name, curve in curves.items():
s = name.split('.')

View file

@ -3,14 +3,14 @@ import argparse
import os
from pprint import pprint
from meshroom.core import graph as pg
import meshroom.core.graph
parser = argparse.ArgumentParser(description='Query the status of nodes in a Graph of processes.')
parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str,
help='Filepath to a graph file.')
parser.add_argument('--node', metavar='NODE_NAME', type=str,
help='Process the node alone.')
parser.add_argument('--graph', metavar='NODE_NAME', type=str,
parser.add_argument('--toNode', metavar='NODE_NAME', type=str,
help='Process the node and all previous nodes needed.')
parser.add_argument("--verbose", help="Print full status information",
action="store_true")
@ -21,7 +21,7 @@ if not os.path.exists(args.graphFile):
print('ERROR: No graph file "{}".'.format(args.node, args.graphFile))
exit(-1)
graph = pg.loadGraph(args.graphFile)
graph = meshroom.core.graph.loadGraph(args.graphFile)
graph.update()
@ -30,17 +30,19 @@ if args.node:
if node is None:
print('ERROR: node "{}" does not exist in file "{}".'.format(args.node, args.graphFile))
exit(-1)
print('{}: {}'.format(node.name, node.status.status.name))
for chunk in node.chunks:
print('{}: {}'.format(chunk.name, chunk.status.status.name))
if args.verbose:
print('statusFile: ', node.statusFile())
pprint(node.status.toDict())
else:
startNodes = None
if args.graph:
startNodes = [graph.nodes(args.graph)]
if args.toNode:
startNodes = [graph.nodes(args.toNode)]
nodes, edges = graph.dfsOnFinish(startNodes=startNodes)
for node in nodes:
print('{}: {}'.format(node.name, node.status.status.name))
for chunk in node.chunks:
print('{}: {}'.format(chunk.name, chunk.status.status.name))
if args.verbose:
pprint([n.status.toDict() for n in nodes])

View file

@ -12,14 +12,16 @@ ENGINE = ''
DEFAULT_TAGS = {'prod': ''}
def createTask(meshroomFile, node, nbFrames=1, parallel=False, rangeSize=0):
def createTask(meshroomFile, node):
tags = DEFAULT_TAGS.copy() # copy to not modify default tags
nbFrames = 1
arguments = {}
parallelArgs = ''
if parallel:
parallelArgs = ' --rangeStart @start --rangeSize {rangeSize}'.format(rangeSize=rangeSize)
arguments.update({'start': 0, 'end': nbFrames-1, 'step': rangeSize})
print('node: ', node.name)
if node.isParallelized:
blockSize, fullSize, nbBlocks = node.nodeDesc.parallelization.getSizes(node)
parallelArgs = ' --iteration @start'
arguments.update({'start': 0, 'end': nbBlocks-1, 'step': 1})
tags['nbFrames'] = nbFrames
allRequirements = list(BASE_REQUIREMENTS)
@ -68,9 +70,14 @@ if args.toNode:
nodesToProcess, edgesToProcess = graph.dfsToProcess(startNodes=toNodes)
print("edgesToProcess:", edgesToProcess)
flowEdges = graph.flowEdges(startNodes=toNodes)
edgesToProcess = set(edgesToProcess).intersection(flowEdges)
print("nodesToProcess:", nodesToProcess)
print("edgesToProcess:", edgesToProcess)
if not nodesToProcess:
print('Nothing to compute')
exit(-1)

View file

@ -1,10 +1,10 @@
from meshroom.common import BaseObject, Property, Variant
from enum import Enum # available by default in python3. For python2: "pip install enum34"
import collections
import math
import os
import psutil
class Attribute(BaseObject):
"""
"""
@ -181,6 +181,76 @@ class Level(Enum):
INTENSIVE = 2
class Range:
def __init__(self, iteration=0, blockSize=0, fullSize=0):
self.iteration = iteration
self.blockSize = blockSize
self.fullSize = fullSize
@property
def start(self):
return self.iteration * self.blockSize
@property
def effectiveBlockSize(self):
remaining = (self.fullSize - self.start) + 1
return self.blockSize if remaining >= self.blockSize else remaining
@property
def end(self):
return self.start + self.effectiveBlockSize
@property
def last(self):
return self.end - 1
def toDict(self):
return {
"rangeIteration": self.iteration,
"rangeStart": self.start,
"rangeEnd": self.end,
"rangeLast": self.last,
"rangeBlockSize": self.effectiveBlockSize,
"rangeFullSize": self.fullSize,
}
class Parallelization:
def __init__(self, inputListParamName='', staticNbBlocks=0, blockSize=0):
self.inputListParamName = inputListParamName
self.staticNbBlocks = staticNbBlocks
self.blockSize = blockSize
def getSizes(self, node):
"""
Args:
node:
Returns: (blockSize, fullSize, nbBlocks)
"""
if self.inputListParamName:
parentNodes, edges = node.graph.dfsOnFinish(startNodes=[node])
for parentNode in parentNodes:
if self.inputListParamName in parentNode.getAttributes().keys():
fullSize = len(parentNode.attribute(self.inputListParamName))
nbBlocks = int(math.ceil(float(fullSize) / float(self.blockSize)))
return (self.blockSize, fullSize, nbBlocks)
raise RuntimeError('Cannot find the "inputListParamName": "{}" in the list of input nodes: {} for node: {}'.format(self.inputListParamName, parentNodes, node.name))
if self.staticNbBlocks:
return (1, self.staticNbBlocks, self.staticNbBlocks)
return None
def getRange(self, node, iteration):
blockSize, fullSize, nbBlocks = self.getSizes(node)
return Range(iteration=iteration, blockSize=blockSize, fullSize=fullSize)
def getRanges(self, node):
blockSize, fullSize, nbBlocks = self.getSizes(node)
ranges = []
for i in range(nbBlocks):
ranges.append(Range(iteration=i, blockSize=blockSize, fullSize=fullSize))
return ranges
class Node(object):
"""
"""
@ -192,56 +262,67 @@ class Node(object):
packageVersion = ''
inputs = []
outputs = []
parallelization = None
def __init__(self):
pass
def updateInternals(self, node):
pass
def stop(self, node):
pass
def process(self, node):
raise NotImplementedError('No process implementation on this node')
def processChunk(self, node, range):
raise NotImplementedError('No process implementation on node: "{}"'.format(node.name))
class CommandLineNode(Node):
"""
"""
internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = '' # need to be defined on the node
parallelization = None
commandLineRange = ''
def buildCommandLine(self, node):
def buildCommandLine(self, chunk):
cmdPrefix = ''
if 'REZ_ENV' in os.environ:
cmdPrefix = '{rez} {packageFullName} -- '.format(rez=os.environ.get('REZ_ENV'), packageFullName=node.packageFullName)
return cmdPrefix + node.nodeDesc.commandLine.format(**node._cmdVars)
cmdPrefix = '{rez} {packageFullName} -- '.format(rez=os.environ.get('REZ_ENV'), packageFullName=chunk.node.packageFullName)
cmdSuffix = ''
if chunk.range:
cmdSuffix = ' ' + self.commandLineRange.format(**chunk.range.toDict())
return cmdPrefix + chunk.node.nodeDesc.commandLine.format(**chunk.node._cmdVars) + cmdSuffix
def stop(self, node):
if node.subprocess:
node.subprocess.terminate()
def process(self, node):
def processChunk(self, chunk):
try:
with open(node.logFile(), 'w') as logF:
cmd = self.buildCommandLine(node)
with open(chunk.logFile(), 'w') as logF:
cmd = self.buildCommandLine(chunk)
print(' - commandLine:', cmd)
print(' - logFile:', node.logFile())
node.subprocess = psutil.Popen(cmd, stdout=logF, stderr=logF, shell=True)
print(' - logFile:', chunk.logFile())
chunk.subprocess = psutil.Popen(cmd, stdout=logF, stderr=logF, shell=True)
# store process static info into the status file
node.status.commandLine = cmd
# node.status.env = node.proc.environ()
# node.status.createTime = node.proc.create_time()
chunk.status.commandLine = cmd
# chunk.status.env = node.proc.environ()
# chunk.status.createTime = node.proc.create_time()
node.statThread.proc = node.subprocess
stdout, stderr = node.subprocess.communicate()
node.subprocess.wait()
chunk.statThread.proc = chunk.subprocess
stdout, stderr = chunk.subprocess.communicate()
chunk.subprocess.wait()
node.status.returnCode = node.subprocess.returncode
chunk.status.returnCode = chunk.subprocess.returncode
if node.subprocess.returncode != 0:
with open(node.logFile(), 'r') as logF:
if chunk.subprocess.returncode != 0:
with open(chunk.logFile(), 'r') as logF:
logContent = ''.join(logF.readlines())
raise RuntimeError('Error on node "{}":\nLog:\n{}'.format(node.name, logContent))
raise RuntimeError('Error on node "{}":\nLog:\n{}'.format(chunk.name, logContent))
except:
raise
finally:
node.subprocess = None
chunk.subprocess = None

View file

@ -386,8 +386,9 @@ class Status(Enum):
SUBMITTED_LOCAL = 3
RUNNING = 4
ERROR = 5
KILLED = 6
SUCCESS = 7
STOPPED = 6
KILLED = 7
SUCCESS = 8
class StatusData:
@ -429,6 +430,143 @@ def clearProcessesStatus():
v.upgradeStatusTo(Status.KILLED)
class NodeChunk(BaseObject):
def __init__(self, node, range):
super(NodeChunk, self).__init__(node)
self.node = node
self.range = range
self.status = StatusData(node.name, node.nodeType)
self.statistics = stats.Statistics()
self._subprocess = None
@property
def index(self):
return self.range.iteration
@property
def name(self):
if self.range.blockSize:
return "{}({})".format(self.node.name, self.index)
else:
return self.node.name
@property
def statusName(self):
return self.status.name
def updateStatusFromCache(self):
"""
Update node status based on status file content/existence.
"""
statusFile = self.statusFile()
oldStatus = self.status.status
# No status file => reset status to Status.None
if not os.path.exists(statusFile):
self.status.reset()
else:
with open(statusFile, 'r') as jsonFile:
statusData = json.load(jsonFile)
self.status.fromDict(statusData)
if oldStatus != self.status.status:
self.statusChanged.emit()
def statusFile(self):
if self.range.blockSize == 0:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, 'status')
else:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, str(self.index) + '.status')
def statisticsFile(self):
if self.range.blockSize == 0:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, 'statistics')
else:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, str(self.index) + '.statistics')
def logFile(self):
if self.range.blockSize == 0:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, 'log')
else:
return os.path.join(self.node.graph.cacheDir, self.node.internalFolder, str(self.index) + '.log')
def saveStatusFile(self):
"""
Write node status on disk.
"""
data = self.status.toDict()
statusFilepath = self.statusFile()
folder = os.path.dirname(statusFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statusFilepathWriting = statusFilepath + '.writing.' + str(uuid.uuid4())
with open(statusFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statusFilepathWriting, statusFilepath)
def upgradeStatusTo(self, newStatus):
if newStatus.value <= self.status.status.value:
print('WARNING: downgrade status on node "{}" from {} to {}'.format(self.name, self.status.status,
newStatus))
self.status.status = newStatus
self.statusChanged.emit()
self.saveStatusFile()
def updateStatisticsFromCache(self):
"""
"""
oldTimes = self.statistics.times
statisticsFile = self.statisticsFile()
if not os.path.exists(statisticsFile):
return
with open(statisticsFile, 'r') as jsonFile:
statisticsData = json.load(jsonFile)
self.statistics.fromDict(statisticsData)
if oldTimes != self.statistics.times:
self.statisticsChanged.emit()
def saveStatistics(self):
data = self.statistics.toDict()
statisticsFilepath = self.statisticsFile()
folder = os.path.dirname(statisticsFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statisticsFilepathWriting = statisticsFilepath + '.writing.' + str(uuid.uuid4())
with open(statisticsFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statisticsFilepathWriting, statisticsFilepath)
def isAlreadySubmitted(self):
return self.status.status in (Status.SUBMITTED_EXTERN, Status.SUBMITTED_LOCAL, Status.RUNNING)
def process(self):
global runningProcesses
runningProcesses[self.name] = self
self.upgradeStatusTo(Status.RUNNING)
self.statThread = stats.StatisticsThread(self)
self.statThread.start()
startTime = time.time()
try:
self.node.nodeDesc.processChunk(self)
except Exception as e:
self.upgradeStatusTo(Status.ERROR)
raise
except (KeyboardInterrupt, SystemError, GeneratorExit) as e:
self.upgradeStatusTo(Status.STOPPED)
raise
finally:
elapsedTime = time.time() - startTime
print(' - elapsed time:', elapsedTime)
# ask and wait for the stats thread to stop
self.statThread.stopRequest()
self.statThread.join()
del runningProcesses[self.name]
self.upgradeStatusTo(Status.SUCCESS)
statusChanged = Signal()
statusName = Property(str, statusName.fget, notify=statusChanged)
statisticsChanged = Signal()
class Node(BaseObject):
"""
"""
@ -439,11 +577,13 @@ class Node(BaseObject):
def __init__(self, nodeDesc, parent=None, **kwargs):
super(Node, self).__init__(parent)
self._name = None # type: str
self.graph = None # type: Graph
self.nodeDesc = meshroom.core.nodesDesc[nodeDesc]()
self.packageName = self.nodeDesc.packageName
self.packageVersion = self.nodeDesc.packageVersion
self._name = None # type: str
self.graph = None # type: Graph
self._chunks = []
self._cmdVars = {}
self._attributes = DictModel(keyAttrName='name', parent=self)
self.attributesPerUid = defaultdict(set)
@ -451,9 +591,6 @@ class Node(BaseObject):
for k, v in kwargs.items():
self.attribute(k).value = v
self.status = StatusData(self.name, self.nodeType)
self.statistics = stats.Statistics()
self._subprocess = None
def __getattr__(self, k):
try:
@ -538,15 +675,11 @@ class Node(BaseObject):
'attributes': {k: v for k, v in attributes.items() if v is not None}, # filter empty values
}
def updateInternals(self):
self._cmdVars = {
'cache': self.graph.cacheDir,
'nodeType': self.nodeType,
}
def _buildCmdVars(self, cmdVars):
for uidIndex, associatedAttributes in self.attributesPerUid.items():
assAttr = [(a.getName(), a.uid(uidIndex)) for a in associatedAttributes]
assAttr.sort()
self._cmdVars['uid{}'.format(uidIndex)] = hash(tuple([b for a, b in assAttr]))
cmdVars['uid{}'.format(uidIndex)] = hash(tuple([b for a, b in assAttr]))
# Evaluate input params
for name, attr in self._attributes.objects.items():
@ -557,15 +690,15 @@ class Node(BaseObject):
assert(isinstance(v, collections.Sequence) and not isinstance(v, basestring))
v = attr.attributeDesc.joinChar.join(v)
self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
self._cmdVars[name + 'Value'] = str(v)
cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
cmdVars[name + 'Value'] = str(v)
if v is not None and v is not '':
self._cmdVars[attr.attributeDesc.group] = self._cmdVars.get(attr.attributeDesc.group, '') + \
' ' + self._cmdVars[name]
cmdVars[attr.attributeDesc.group] = cmdVars.get(attr.attributeDesc.group, '') + \
' ' + cmdVars[name]
# For updating output attributes invalidation values
cmdVarsNoCache = self._cmdVars.copy()
cmdVarsNoCache = cmdVars.copy()
cmdVarsNoCache['cache'] = ''
# Evaluate output params
@ -573,17 +706,81 @@ class Node(BaseObject):
if attr.isInput:
continue # skip inputs
attr.value = attr.attributeDesc.value.format(
**self._cmdVars)
**cmdVars)
attr._invalidationValue = attr.attributeDesc.value.format(
**cmdVarsNoCache)
v = attr.value
self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
self._cmdVars[name + 'Value'] = str(v)
cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
cmdVars[name + 'Value'] = str(v)
if v is not None and v is not '':
self._cmdVars[attr.attributeDesc.group] = self._cmdVars.get(attr.attributeDesc.group, '') + \
' ' + self._cmdVars[name]
cmdVars[attr.attributeDesc.group] = cmdVars.get(attr.attributeDesc.group, '') + \
' ' + cmdVars[name]
@property
def isParallelized(self):
return bool(self.nodeDesc.parallelization)
@property
def nbParallelizationBlocks(self):
return len(self.chunks)
def hasStatus(self, status):
if not self.chunks:
return False
for chunk in self.chunks:
if chunk.status.status != status:
return False
return True
def isAlreadySubmitted(self):
for chunk in self.chunks:
if chunk.isAlreadySubmitted():
return True
return False
def alreadySubmittedChunks(self):
submittedChunks = []
for chunk in self.chunks:
if chunk.isAlreadySubmitted():
submittedChunks.append(chunk)
return submittedChunks
def upgradeStatusTo(self, newStatus):
"""
Upgrade node to the given status and save it on disk.
"""
for chunk in self.chunks:
chunk.upgradeStatusTo(newStatus)
def updateStatisticsFromCache(self):
for chunk in self.chunks:
chunk.updateStatisticsFromCache()
def updateInternals(self):
if self.isParallelized:
ranges = self.nodeDesc.parallelization.getRanges(self)
if len(ranges) != len(self.chunks):
self._chunks = [NodeChunk(self, range) for range in ranges]
self.chunksChanged.emit()
else:
for chunk, range in zip(self.chunks, ranges):
chunk.range = range
else:
if len(self._chunks) != 1:
self._chunks = [NodeChunk(self, desc.Range())]
self.chunksChanged.emit()
else:
self._chunks[0].range = desc.Range()
self._cmdVars = {
'cache': self.graph.cacheDir,
'nodeType': self.nodeType,
}
self._buildCmdVars(self._cmdVars)
self.nodeDesc.updateInternals(self)
self.internalFolderChanged.emit()
@ -591,79 +788,12 @@ class Node(BaseObject):
def internalFolder(self):
return self.nodeDesc.internalFolder.format(**self._cmdVars)
def statusFile(self):
return os.path.join(self.graph.cacheDir, self.internalFolder, 'status')
def statisticsFile(self):
return os.path.join(self.graph.cacheDir, self.internalFolder, 'statistics')
def logFile(self):
return os.path.join(self.graph.cacheDir, self.internalFolder, 'log')
def updateStatusFromCache(self):
"""
Update node status based on status file content/existence.
"""
statusFile = self.statusFile()
oldStatus = self.status.status
# No status file => reset status to Status.None
if not os.path.exists(statusFile):
self.status.reset()
else:
with open(statusFile, 'r') as jsonFile:
statusData = json.load(jsonFile)
self.status.fromDict(statusData)
if oldStatus != self.status.status:
self.statusChanged.emit()
def saveStatusFile(self):
"""
Write node status on disk.
"""
data = self.status.toDict()
statusFilepath = self.statusFile()
folder = os.path.dirname(statusFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statusFilepathWriting = statusFilepath + '.writing.' + str(uuid.uuid4())
with open(statusFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statusFilepathWriting, statusFilepath)
def upgradeStatusTo(self, newStatus):
"""
Upgrade node to the given status and save it on disk.
"""
if newStatus.value <= self.status.status.value:
print('WARNING: downgrade status on node "{}" from {} to {}'.format(self._name, self.status.status,
newStatus))
self.status.status = newStatus
self.statusChanged.emit()
self.saveStatusFile()
def updateStatisticsFromCache(self):
"""
"""
statisticsFile = self.statisticsFile()
if not os.path.exists(statisticsFile):
return
with open(statisticsFile, 'r') as jsonFile:
statisticsData = json.load(jsonFile)
self.statistics.fromDict(statisticsData)
def saveStatistics(self):
data = self.statistics.toDict()
statisticsFilepath = self.statisticsFile()
folder = os.path.dirname(statisticsFilepath)
if not os.path.exists(folder):
os.makedirs(folder)
statisticsFilepathWriting = statisticsFilepath + '.writing.' + str(uuid.uuid4())
with open(statisticsFilepathWriting, 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
shutil.move(statisticsFilepathWriting, statisticsFilepath)
def isAlreadySubmitted(self):
return self.status.status in (Status.SUBMITTED_EXTERN, Status.SUBMITTED_LOCAL, Status.RUNNING)
for chunk in self.chunks:
chunk.updateStatusFromCache()
def submit(self):
self.upgradeStatusTo(Status.SUBMITTED_EXTERN)
@ -674,27 +804,12 @@ class Node(BaseObject):
def stopProcess(self):
self.nodeDesc.stop(self)
def process(self):
global runningProcesses
runningProcesses[self.name] = self
self.upgradeStatusTo(Status.RUNNING)
self.statThread = stats.StatisticsThread(self)
self.statThread.start()
startTime = time.time()
try:
self.nodeDesc.process(self)
except BaseException:
self.upgradeStatusTo(Status.ERROR)
raise
finally:
elapsedTime = time.time() - startTime
print(' - elapsed time:', elapsedTime)
# ask and wait for the stats thread to stop
self.statThread.stopRequest()
self.statThread.join()
del runningProcesses[self.name]
def processIteration(self, iteration):
self.chunks[iteration].process()
self.upgradeStatusTo(Status.SUCCESS)
def process(self):
for chunk in self.chunks:
chunk.process()
def endSequence(self):
pass
@ -702,9 +817,12 @@ class Node(BaseObject):
def getStatus(self):
return self.status
def getChunks(self):
return self._chunks
@property
def statusName(self):
return self.status.status.name
def statusNames(self):
return [s.status.name for s in self.status]
def __repr__(self):
return self.name
@ -716,9 +834,8 @@ class Node(BaseObject):
internalFolder = Property(str, internalFolder.fget, notify=internalFolderChanged)
depthChanged = Signal()
depth = Property(int, depth.fget, notify=depthChanged)
statusChanged = Signal()
statusName = Property(str, statusName.fget, notify=statusChanged)
chunksChanged = Signal()
chunks = Property(Variant, getChunks, notify=chunksChanged)
WHITE = 0
GRAY = 1
@ -794,6 +911,8 @@ class Graph(BaseObject):
if not isinstance(graphData, dict):
raise RuntimeError('loadGraph error: Graph is not a dict. File: {}'.format(filepath))
with GraphModification(self):
# Init name and cache directory from input filepath
self.cacheDir = os.path.join(os.path.abspath(os.path.dirname(filepath)), meshroom.core.cacheFolderName)
self.name = os.path.splitext(os.path.basename(filepath))[0]
for nodeName, nodeData in graphData.items():
@ -1036,16 +1155,22 @@ class Graph(BaseObject):
visitor = Visitor()
def finishVertex(vertex, graph):
if vertex.status.status in (Status.SUBMITTED_EXTERN,
chunksToProcess = []
for chunk in vertex.chunks:
if chunk.status.status in (Status.SUBMITTED_EXTERN,
Status.SUBMITTED_LOCAL):
print('WARNING: node "{}" is already submitted.'.format(vertex.name))
if vertex.status.status is Status.RUNNING:
print('WARNING: node "{}" is already running.'.format(vertex.name))
if vertex.status.status is not Status.SUCCESS:
nodes.append(vertex)
logging.warning('Node "{}" is already submitted.'.format(vertex.name))
if chunk.status.status is Status.RUNNING:
logging.warning('Node "{}" is already running.'.format(vertex.name))
if chunk.status.status is not Status.SUCCESS:
chunksToProcess.append(chunk)
if chunksToProcess:
nodes.append(vertex) # We could collect specific chunks
def finishEdge(edge, graph):
if (edge[0].status.status is not Status.SUCCESS) and (edge[1].status.status is not Status.SUCCESS):
if edge[0].hasStatus(Status.SUCCESS) or edge[1].hasStatus(Status.SUCCESS):
return
else:
edges.append(edge)
visitor.finishVertex = finishVertex
@ -1171,13 +1296,11 @@ class Graph(BaseObject):
""" Request graph execution to be stopped """
self.stopExecutionRequested.emit()
def submittedNodes(self):
""" Return the list of submitted nodes inside this Graph """
return [node for node in self.nodes if node.isAlreadySubmitted()]
def clearSubmittedNodes(self):
""" Reset the status of already submitted nodes to Status.NONE """
[node.upgradeStatusTo(Status.NONE) for node in self.submittedNodes()]
for node in self.nodes:
for chunk in node.alreadySubmittedChunks():
chunk.upgradeStatusTo(Status.NONE)
@property
def nodes(self):
@ -1222,25 +1345,26 @@ def getAlreadySubmittedNodes(nodes):
return out
def execute(graph, toNodes=None, force=False):
def execute(graph, toNodes=None, forceCompute=False, forceStatus=False):
"""
"""
if force:
if forceCompute:
nodes, edges = graph.dfsOnFinish(startNodes=toNodes)
else:
nodes, edges = graph.dfsToProcess(startNodes=toNodes)
nodesInConflict = getAlreadySubmittedNodes(nodes)
if nodesInConflict:
nodesStatus = set([node.status.status.name for node in nodesInConflict])
nodesStatus = set([status.status.name for node in nodesInConflict for status in node.status])
nodesName = [node.name for node in nodesInConflict]
#raise RuntimeError(
print(
'WARNING: Some nodes are already submitted with status: {}\n'
'Nodes: {}'.format(
msg = 'WARNING: Some nodes are already submitted with status: {}\nNodes: {}'.format(
', '.join(nodesStatus),
', '.join(nodesName),
))
', '.join(nodesName)
)
if forceStatus:
print(msg)
else:
raise RuntimeError(msg)
print('Nodes to execute: ', str([n.name for n in nodes]))
@ -1254,7 +1378,7 @@ def execute(graph, toNodes=None, force=False):
except Exception as e:
logging.error("Error on node computation: {}".format(e))
graph.clearSubmittedNodes()
return
raise
for node in nodes:
node.endSequence()

View file

@ -2,6 +2,7 @@ from collections import defaultdict
import psutil
import time
import threading
import signal
def bytes2human(n):
@ -187,19 +188,21 @@ bytesPerGiga = 1024. * 1024. * 1024.
class StatisticsThread(threading.Thread):
def __init__(self, node):
def __init__(self, chunk):
threading.Thread.__init__(self)
self.node = node
signal.signal(signal.SIGINT, signal.SIG_IGN) # lambda signal, frame: self.stopRequest())
self.chunk = chunk
self.proc = psutil.Process() # by default current process pid
self.statistics = self.node.statistics
self.statistics = chunk.statistics
self._stopFlag = threading.Event()
def updateStats(self):
self.lastTime = time.time()
if self.statistics.update(self.proc):
self.node.saveStatistics()
if self.chunk.statistics.update(self.proc):
self.chunk.saveStatistics()
def run(self):
try:
while True:
self.updateStats()
if self._stopFlag.wait(60):
@ -208,6 +211,8 @@ class StatisticsThread(threading.Thread):
if self.proc.is_running():
self.updateStats()
return
except (KeyboardInterrupt, SystemError, GeneratorExit):
pass
def stopRequest(self):
""" Request the thread to exit as soon as possible. """

View file

@ -1,12 +1,32 @@
import os
import sys
from collections import OrderedDict
import os
import json
import psutil
import shutil
import tempfile
import logging
from meshroom.core import desc
from meshroom.core.graph import GraphModification
Viewpoint = [
desc.File(name="image", label="Image", description="Image filepath", value="", uid=[0]),
desc.FloatParam(name="focal", label="Focal Length", description="Focal Length", value=0.0, uid=[0], range=(5, 200, 1)),
desc.IntParam(name="id", label="Id", description="Image UID", value=-1, uid=[0], range=(0, 200, 1)),
desc.File(name="image", label="Image", description="Image Filepath", value="", uid=[0, 1]),
desc.IntParam(name="intrinsicId", label="Intrinsic", description="Internal Camera Parameters", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="rigId", label="Rig", description="Rig Parameters", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="rigSubPoseId", label="Rig Sub-Pose", description="Rig Sub-Pose Parameters", value=-1, uid=[0], range=(0, 200, 1)),
]
Intrinsic = [
desc.IntParam(name="id", label="Id", description="Intrinsic UID", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="initialFocalLength", label="Initial Focal Length", description="Initial Guess on the Focal Length", value=-1, uid=[0], range=(0, 200, 1)),
desc.IntParam(name="focalLength", label="Focal Length", description="Known/Calibrated Focal Length", value=-1, uid=[0], range=(0, 200, 1)),
desc.ChoiceParam(name="cameraType", label="Camera Type", description="Camera Type", value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4'], exclusive=True, uid=[0]),
desc.StringParam(name="deviceMake", label="Make", description="Camera Make", value="", uid=[]),
desc.StringParam(name="deviceModel", label="Model", description="Camera Model", value="", uid=[]),
desc.StringParam(name="sensorWidth", label="Sensor Width", description="Camera Sensor Width", value="", uid=[0]),
]
@ -16,34 +36,27 @@ class CameraInit(desc.CommandLineNode):
inputs = [
desc.ListAttribute(
name='viewpoints',
elementDesc=desc.GroupAttribute(name='viewpoint', label="Viewpoint", description="", groupDesc=Viewpoint,
group='allParams'),
name="viewpoints",
elementDesc=desc.GroupAttribute(name="viewpoint", label="Viewpoint", description="", groupDesc=Viewpoint,
group="allParams"),
label="Viewpoints",
description="Input viewpoints",
group=""
group="",
),
desc.File(
name='imageDirectory',
label='Image Directory',
description='''Input images folder.''',
value='',
uid=[0],
),
desc.File(
name='jsonFile',
label='Json File',
description='''Input file with all the user options. '''
'''It can be used to provide a list of images instead of a directory.''',
value='',
uid=[0],
desc.ListAttribute(
name="intrinsics",
elementDesc=desc.GroupAttribute(name="intrinsic", label="Intrinsic", description="", groupDesc=Intrinsic,
group="allParams"),
label="Intrinsics",
description="Camera Intrinsics",
group="",
),
desc.File(
name='sensorDatabase',
label='Sensor Database',
description='''Camera sensor width database path.''',
value=os.environ.get('ALICEVISION_SENSOR_DB', ''),
uid=[0],
uid=[],
),
desc.IntParam(
name='defaultFocalLengthPix',
@ -98,17 +111,33 @@ class CameraInit(desc.CommandLineNode):
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
)
),
desc.StringParam(
name='_viewpointsUid',
label='Internal Intrinsics Uid',
description='',
value='',
uid=[],
group='',
),
]
outputs = [
desc.File(
name='output',
label='Output',
description='''Output directory for the new SfMData file Optional parameters:''',
value='{cache}/{nodeType}/{uid0}/',
description='''Output SfMData.''',
value='{cache}/{nodeType}/{uid0}', # TODO
uid=[],
),
desc.File( # TODO: TO REMOVE
name='outputSfM',
label='Output SfM',
description='''Output SfMData.''',
value='{cache}/{nodeType}/{uid0}/sfm_data.json',
uid=[],
group="",
),
desc.File(
name='outputSfm',
label='Output SfM',
@ -118,3 +147,81 @@ class CameraInit(desc.CommandLineNode):
group='', # not a command line argument
)
]
def updateInternals(self, node):
if not node.viewpoints:
return
lastViewpointsUid = node.attribute("_viewpointsUid").value
if lastViewpointsUid == node.viewpoints.uid(1):
return
origCmdVars = node._cmdVars.copy()
# Python3: with tempfile.TemporaryDirectory(prefix="Meshroom_CameraInit") as tmpCache
tmpCache = tempfile.mkdtemp()
localCmdVars = {
'cache': tmpCache,
'nodeType': node.nodeType,
}
node._buildCmdVars(localCmdVars)
node._cmdVars = localCmdVars
try:
os.makedirs(os.path.join(tmpCache, node.internalFolder))
self.createViewpointsFile(node)
cmd = self.buildCommandLine(node.chunks[0])
# logging.debug(' - commandLine:', cmd)
subprocess = psutil.Popen(cmd, stdout=None, stderr=None, shell=True)
stdout, stderr = subprocess.communicate()
subprocess.wait()
if subprocess.returncode != 0:
logging.warning('CameraInit: Error on updateInternals of node "{}".'.format(node.name))
except Exception:
logging.warning('CameraInit: Error on updateInternals of node "{}".'.format(node.name))
raise
finally:
node._cmdVars = origCmdVars
shutil.rmtree(tmpCache)
# TODO: reload result of aliceVision_cameraInit
# cameraInitSfM = node.viewpointsFile # localCmdVars['outputSfMValue']
# jsonData = open(cameraInitSfM, 'r').read()
# data = json.loads(jsonData)
# with GraphModification(node.graph):
# node.viewpoints.value = data.get("views", [])
# node.intrinsics.value = data.get("intrinsics", [])
node.attribute("_viewpointsUid").value = node.viewpoints.uid(1)
def createViewpointsFile_new(self, node):
if node.viewpoints:
sfmData = {
"version": [1, 0, 0],
"views": node.viewpoints.getPrimitiveValue(exportDefault=False),
"intrinsics": node.intrinsics.getPrimitiveValue(exportDefault=False),
}
node.viewpointsFile = '{cache}/{nodeType}/{uid0}/viewpoints.json'.format(**node._cmdVars)
with open(node.viewpointsFile, 'w') as f:
f.write(json.dumps(sfmData, indent=4))
# python3: json.dumps(node.viewpoints, f, indent=4)
def createViewpointsFile(self, node):
"""
Temporary compatibility method.
"""
if node.viewpoints:
sfmData = {
"resources": [v["image"] for v in node.viewpoints.getPrimitiveValue(exportDefault=False)],
}
node.viewpointsFile = '{cache}/{nodeType}/{uid0}/viewpoints.json'.format(**node._cmdVars)
with open(node.viewpointsFile, 'w') as f:
f.write(json.dumps(sfmData, indent=4))
# python3: json.dumps(node.viewpoints, f, indent=4)
def buildCommandLine(self, chunk):
cmd = desc.CommandLineNode.buildCommandLine(self, chunk)
if len(chunk.node.viewpoints):
cmd += ' --jsonFile ' + chunk.node.viewpointsFile
return cmd
def processChunk(self, chunk):
self.createViewpointsFile(chunk.node)
desc.CommandLineNode.processChunk(self, chunk)

View file

@ -4,6 +4,7 @@ class DepthMap(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_depthMapEstimation {allParams}'
gpu = desc.Level.INTENSIVE
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=3)
inputs = [
desc.File(

View file

@ -4,6 +4,7 @@ class DepthMapFilter(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_depthMapFiltering {allParams}'
gpu = desc.Level.NORMAL
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=10)
inputs = [
desc.File(

View file

@ -5,6 +5,8 @@ from meshroom.core import desc
class FeatureExtraction(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_featureExtraction {allParams}'
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=10)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
inputs = [
desc.File(
@ -40,22 +42,6 @@ class FeatureExtraction(desc.CommandLineNode):
value='',
uid=[0],
),
desc.IntParam(
name='rangeStart',
label='Range Start',
description='''Range image index start.''',
value=-1,
range=(-sys.maxsize, sys.maxsize, 1),
uid=[0],
),
desc.IntParam(
name='rangeSize',
label='Range Size',
description='''Range size. Log parameters:''',
value=1,
range=(-sys.maxsize, sys.maxsize, 1),
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',

View file

@ -5,6 +5,8 @@ from meshroom.core import desc
class FeatureMatching(desc.CommandLineNode):
internalFolder = '{cache}/{nodeType}/{uid0}/'
commandLine = 'aliceVision_featureMatching {allParams}'
parallelization = desc.Parallelization(inputListParamName='viewpoints', blockSize=20)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
inputs = [
desc.File(

View file

@ -4,41 +4,48 @@ import meshroom.multiview
def test_multiviewPipeline():
graph1 = meshroom.multiview.photogrammetryPipeline(inputFolder='/non/existing/folder')
graph1 = meshroom.multiview.photogrammetryPipeline(inputImages=['/non/existing/fileA'])
graph2 = meshroom.multiview.photogrammetryPipeline(inputImages=[])
graph2b = meshroom.multiview.photogrammetryPipeline(inputImages=[])
graph3 = meshroom.multiview.photogrammetryPipeline(inputImages=['/non/existing/file1', '/non/existing/file2'])
graph4 = meshroom.multiview.photogrammetryPipeline(inputViewpoints=[
{'image': '/non/existing/file1', 'focal': 50},
{'image': '/non/existing/file2', 'focal': 55}
{'image': '/non/existing/file1', 'intrinsicId': 50},
{'image': '/non/existing/file2', 'intrinsicId': 55}
])
graph4b = meshroom.multiview.photogrammetryPipeline(inputViewpoints=[
{'image': '/non/existing/file1', 'focal': 50},
{'image': '/non/existing/file2', 'focal': 55}
{'image': '/non/existing/file1', 'intrinsicId': 50},
{'image': '/non/existing/file2', 'intrinsicId': 55}
])
assert graph1.findNode('CameraInit').imageDirectory.value == '/non/existing/folder'
assert graph2.findNode('CameraInit').imageDirectory.value == ''
assert graph3.findNode('CameraInit').imageDirectory.value == ''
assert graph4.findNode('CameraInit').imageDirectory.value == ''
assert graph1.findNode('CameraInit').viewpoints[0].image.value == '/non/existing/fileA'
assert len(graph2.findNode('CameraInit').viewpoints) == 0
assert graph3.findNode('CameraInit').viewpoints[0].image.value == '/non/existing/file1'
assert graph4.findNode('CameraInit').viewpoints[0].image.value == '/non/existing/file1'
assert len(graph1.findNode('CameraInit').viewpoints) == 0
assert len(graph1.findNode('CameraInit').viewpoints) == 1
assert len(graph2.findNode('CameraInit').viewpoints) == 0
assert len(graph3.findNode('CameraInit').viewpoints) == 2
assert len(graph4.findNode('CameraInit').viewpoints) == 2
viewpoints = graph3.findNode('CameraInit').viewpoints
assert viewpoints[0].image.value == '/non/existing/file1'
assert viewpoints[0].focal.value == -1
assert viewpoints[0].intrinsicId.value == -1
assert viewpoints[1].image.value == '/non/existing/file2'
assert viewpoints[1].focal.value == -1
assert viewpoints[1].intrinsicId.value == -1
assert viewpoints[0].image.isDefault() == False
assert viewpoints[0].intrinsicId.isDefault() == True
assert viewpoints.getPrimitiveValue(exportDefault=False) == [
{"image": '/non/existing/file1'},
{"image": '/non/existing/file2'},
]
for graph in (graph4, graph4b):
viewpoints = graph.findNode('CameraInit').viewpoints
assert viewpoints[0].image.value == '/non/existing/file1'
assert viewpoints[0].focal.value == 50
assert viewpoints[0].intrinsicId.value == 50
assert viewpoints[1].image.value == '/non/existing/file2'
assert viewpoints[1].focal.value == 55
assert viewpoints[1].intrinsicId.value == 55
# Ensure that all output UIDs are different as the input is different:
# graph1 != graph2 != graph3 != graph4