Merge branch 'develop' into dev/nodesAndTaskManager

This commit is contained in:
Fabien Castan 2020-07-29 22:07:42 +02:00
commit bab908d2eb
99 changed files with 5659 additions and 1150 deletions

6
.github/stale.yml vendored
View file

@ -3,7 +3,11 @@ daysUntilStale: 120
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Issues with these labels will never be considered stale
exemptLabels: false
exemptLabels:
- "do not close"
- "feature request"
- "scope:doc"
- "new feature"
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable

View file

@ -1,11 +1,10 @@
language: python
dist: xenial # required for Python >= 3.7
dist: bionic
python:
- "2.7"
- "3.6"
- "3.7"
- "3.8"
install:
- "pip install -r requirements.txt -r dev_requirements.txt --timeout 45"

View file

@ -1,6 +1,7 @@
ARG CUDA_TAG=7.0
ARG OS_TAG=7
FROM alicevision/alicevision:2.2.0-centos${OS_TAG}-cuda${CUDA_TAG}
ARG MR_VERSION
ARG CUDA_VERSION=9.0
ARG OS_VERSION=7
FROM alicevision/meshroom-deps:${MR_VERSION}-centos${OS_VERSION}-cuda${CUDA_VERSION}
LABEL maintainer="AliceVision Team alicevision-team@googlegroups.com"
# Execute with nvidia docker (https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0))
@ -9,55 +10,19 @@ LABEL maintainer="AliceVision Team alicevision-team@googlegroups.com"
ENV MESHROOM_DEV=/opt/Meshroom \
MESHROOM_BUILD=/tmp/Meshroom_build \
MESHROOM_BUNDLE=/opt/Meshroom_bundle \
QT_DIR=/opt/qt/5.13.0/gcc_64 \
QT_DIR=/opt/Qt5.14.1/5.14.1/gcc_64 \
PATH="${PATH}:${MESHROOM_BUNDLE}"
# Workaround for qmlAlembic/qtAliceVision builds: fuse lib/lib64 folders
RUN cp -rf ${AV_INSTALL}/lib/* ${AV_INSTALL}/lib64 && rm -rf ${AV_INSTALL}/lib && ln -s ${AV_INSTALL}/lib64 ${AV_INSTALL}/lib
# Install libs needed by Qt
RUN yum install -y \
flex \
fontconfig \
freetype \
glib2 \
libICE \
libX11 \
libxcb \
libXext \
libXi \
libXrender \
libSM \
libXt-devel \
libGLU-devel \
mesa-libOSMesa-devel \
mesa-libGL-devel \
mesa-libGLU-devel \
xcb-util-keysyms \
xcb-util-image
# Install Python3
RUN yum install -y centos-release-scl && yum install -y rh-python36
COPY . "${MESHROOM_DEV}"
WORKDIR "${MESHROOM_DEV}"
WORKDIR ${MESHROOM_DEV}
# Install Meshroom requirements and freeze bundle
RUN source scl_source enable rh-python36 && pip install -r dev_requirements.txt -r requirements.txt && python setup.py install_exe -d "${MESHROOM_BUNDLE}" && \
RUN source scl_source enable rh-python36 && python setup.py install_exe -d "${MESHROOM_BUNDLE}" && \
find ${MESHROOM_BUNDLE} -name "*Qt5Web*" -delete && \
find ${MESHROOM_BUNDLE} -name "*Qt5Designer*" -delete && \
rm -rf ${MESHROOM_BUNDLE}/lib/PySide2/typesystems/ ${MESHROOM_BUNDLE}/lib/PySide2/examples/ ${MESHROOM_BUNDLE}/lib/PySide2/include/ ${MESHROOM_BUNDLE}/lib/PySide2/Qt/translations/ ${MESHROOM_BUNDLE}/lib/PySide2/Qt/resources/ && \
rm ${MESHROOM_BUNDLE}/lib/PySide2/QtWeb* && \
rm ${MESHROOM_BUNDLE}/lib/PySide2/pyside2-lupdate ${MESHROOM_BUNDLE}/lib/PySide2/pyside2-rcc
# Install Qt (to build plugins)
WORKDIR /tmp/qt
# Qt version in specified in docker/qt-installer-noninteractive.qs
RUN curl -LO http://download.qt.io/official_releases/online_installers/qt-unified-linux-x64-online.run && \
chmod u+x qt-unified-linux-x64-online.run && \
./qt-unified-linux-x64-online.run --verbose --platform minimal --script "${MESHROOM_DEV}/docker/qt-installer-noninteractive.qs" && \
rm ./qt-unified-linux-x64-online.run
rm ${MESHROOM_BUNDLE}/lib/PySide2/pyside2-lupdate ${MESHROOM_BUNDLE}/lib/PySide2/rcc ${MESHROOM_BUNDLE}/lib/PySide2/designer
WORKDIR ${MESHROOM_BUILD}

62
Dockerfile_deps Normal file
View file

@ -0,0 +1,62 @@
ARG AV_VERSION
ARG CUDA_VERSION=9.0
ARG OS_VERSION=7
FROM alicevision/alicevision:${AV_VERSION}-centos${OS_VERSION}-cuda${CUDA_VERSION}
LABEL maintainer="AliceVision Team alicevision-team@googlegroups.com"
# Execute with nvidia docker (https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0))
# docker run -it --runtime=nvidia meshroom
ENV MESHROOM_DEV=/opt/Meshroom \
MESHROOM_BUILD=/tmp/Meshroom_build \
QT_DIR=/opt/Qt5.14.1/5.14.1/gcc_64 \
QT_CI_LOGIN=alicevisionjunk@gmail.com \
QT_CI_PASSWORD=azerty1.
# Workaround for qmlAlembic/qtAliceVision builds: fuse lib/lib64 folders
RUN cp -rf ${AV_INSTALL}/lib/* ${AV_INSTALL}/lib64 && rm -rf ${AV_INSTALL}/lib && ln -s ${AV_INSTALL}/lib64 ${AV_INSTALL}/lib
# Install libs needed by Qt
RUN yum install -y \
flex \
fontconfig \
freetype \
glib2 \
libICE \
libX11 \
libxcb \
libXext \
libXi \
libXrender \
libSM \
libXt-devel \
libGLU-devel \
mesa-libOSMesa-devel \
mesa-libGL-devel \
mesa-libGLU-devel \
xcb-util-keysyms \
xcb-util-image \
libxkbcommon-x11
# Install Python3
RUN yum install -y centos-release-scl && yum install -y rh-python36 && source scl_source enable rh-python36 && pip install --upgrade pip
COPY ./*requirements.txt ./setup.py "${MESHROOM_DEV}/"
# Install Meshroom requirements and freeze bundle
WORKDIR "${MESHROOM_DEV}"
RUN source scl_source enable rh-python36 && pip install -r dev_requirements.txt -r requirements.txt
COPY ./docker/qt-installer-noninteractive.qs "${MESHROOM_DEV}/docker/"
# Install Qt (to build plugins)
ENV QT_VERSION_A=5.14 \
QT_VERSION_B=5.14.1
WORKDIR /tmp/qt
RUN wget https://download.qt.io/archive/qt/${QT_VERSION_A}/${QT_VERSION_B}/qt-opensource-linux-x64-${QT_VERSION_B}.run && \
chmod +x qt-opensource-linux-x64-${QT_VERSION_B}.run && \
./qt-opensource-linux-x64-${QT_VERSION_B}.run --verbose --platform minimal --script "${MESHROOM_DEV}/docker/qt-installer-noninteractive.qs" && \
rm qt-opensource-linux-x64-${QT_VERSION_B}.run

View file

@ -1,7 +1,6 @@
image: Visual Studio 2019
environment:
matrix:
- PYTHON: "C:\\Python36-x64"
PYTHON: "C:\\Python38-x64"
install:
- "set PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
@ -14,3 +13,7 @@ test_script:
after_test:
- "python setup.py build"
- 7z a meshroomWin64snapshot%APPVEYOR_PULL_REQUEST_HEAD_COMMIT%.zip ./build/*
artifacts:
- path: meshroomWin64*.zip

View file

@ -114,7 +114,10 @@ with multiview.GraphModification(graph):
multiview.photogrammetry(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
elif args.pipeline.lower() == "hdri":
# default hdri pipeline
graph = multiview.hdri(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
multiview.hdri(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
elif args.pipeline.lower() == "hdrifisheye":
# default hdriFisheye pipeline
multiview.hdriFisheye(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
else:
# custom pipeline
graph.load(args.pipeline)

View file

@ -1,5 +1,6 @@
# packaging
cx_Freeze==5.1.1
cx_Freeze
# use cx_Freeze==5.1.1 for Python-2
# testing
pytest

View file

@ -1,83 +1,72 @@
// Emacs mode hint: -*- mode: JavaScript -*-
function Controller() {
installer.autoRejectMessageBoxes();
installer.installationFinished.connect(function() {
gui.clickButton(buttons.NextButton);
})
// Copied from https://bugreports.qt.io/browse/QTIFW-1072?jql=project%20%3D%20QTIFW
// there are some changes between Qt Online installer 3.0.1 and 3.0.2. Welcome page does some network
// queries that is why the next button is called too early.
var page = gui.pageWidgetByObjectName("WelcomePage")
page.completeChanged.connect(welcomepageFinished)
});
installer.setMessageBoxAutomaticAnswer("OverwriteTargetDirectory", QMessageBox.Yes);
installer.setMessageBoxAutomaticAnswer("installationErrorWithRetry", QMessageBox.Ignore);
installer.setMessageBoxAutomaticAnswer("cancelInstallation", QMessageBox.Yes);
}
Controller.prototype.WelcomePageCallback = function() {
gui.clickButton(buttons.NextButton);
console.log("Welcome Page");
gui.clickButton(buttons.NextButton, 3000);
}
welcomepageFinished = function()
{
//completeChange() -function is called also when other pages visible
//Make sure that next button is clicked only when in welcome page
if(gui.currentPageWidget().objectName == "WelcomePage") {
gui.clickButton( buttons.NextButton);
}
}
Controller.prototype.CredentialsPageCallback = function() {
gui.clickButton(buttons.NextButton);
console.log("Credentials Page");
var login = installer.environmentVariable("QT_CI_LOGIN");
var password = installer.environmentVariable("QT_CI_PASSWORD");
if( login === "" || password === "" ) {
console.log("No credentials provided - could stuck here forever");
gui.clickButton(buttons.CommitButton);
}
Controller.prototype.IntroductionPageCallback = function() {
gui.clickButton(buttons.NextButton);
}
Controller.prototype.TargetDirectoryPageCallback = function()
{
gui.currentPageWidget().TargetDirectoryLineEdit.setText("/opt/qt");
gui.clickButton(buttons.NextButton);
}
Controller.prototype.ComponentSelectionPageCallback = function() {
var widget = gui.currentPageWidget();
widget.loginWidget.EmailLineEdit.setText(login);
widget.loginWidget.PasswordLineEdit.setText(password);
gui.clickButton(buttons.CommitButton);
}
Controller.prototype.ComponentSelectionPageCallback = function() {
console.log("Select components");
var widget = gui.currentPageWidget();
widget.deselectAll();
// widget.selectComponent("qt");
// widget.selectComponent("qt.qt5.5130");
widget.selectComponent("qt.qt5.5130.gcc_64");
// widget.selectComponent("qt.qt5.5130.qtscript");
// widget.selectComponent("qt.qt5.5130.qtscript.gcc_64");
// widget.selectComponent("qt.qt5.5130.qtwebengine");
// widget.selectComponent("qt.qt5.5130.qtwebengine.gcc_64");
// widget.selectComponent("qt.qt5.5130.qtwebglplugin");
// widget.selectComponent("qt.qt5.5130.qtwebglplugin.gcc_64");
// widget.selectComponent("qt.tools");
widget.selectComponent("qt.qt5.5141.gcc_64");
gui.clickButton(buttons.NextButton);
}
Controller.prototype.IntroductionPageCallback = function() {
console.log("Introduction Page");
console.log("Retrieving meta information from remote repository");
gui.clickButton(buttons.NextButton);
}
Controller.prototype.TargetDirectoryPageCallback = function() {
gui.clickButton(buttons.NextButton);
}
Controller.prototype.LicenseAgreementPageCallback = function() {
gui.currentPageWidget().AcceptLicenseRadioButton.setChecked(true);
console.log("Accept license agreement");
var widget = gui.currentPageWidget();
if (widget != null) {
widget.AcceptLicenseRadioButton.setChecked(true);
}
gui.clickButton(buttons.NextButton);
}
Controller.prototype.StartMenuDirectoryPageCallback = function() {
Controller.prototype.ObligationsPageCallback = function() {
console.log("Accept obligation agreement");
var page = gui.pageWidgetByObjectName("ObligationsPage");
page.obligationsAgreement.setChecked(true);
page.completeChanged();
gui.clickButton(buttons.NextButton);
}
Controller.prototype.ReadyForInstallationPageCallback = function()
{
gui.clickButton(buttons.NextButton);
Controller.prototype.ReadyForInstallationPageCallback = function() {
console.log("Ready to install");
gui.clickButton(buttons.CommitButton);
}
Controller.prototype.FinishedPageCallback = function() {
var checkBoxForm = gui.currentPageWidget().LaunchQtCreatorCheckBoxForm
if (checkBoxForm && checkBoxForm.launchQtCreatorCheckBox) {
checkBoxForm.launchQtCreatorCheckBox.checked = false;
var widget = gui.currentPageWidget();
if (widget.LaunchQtCreatorCheckBoxForm) {
// No this form for minimal platform
widget.LaunchQtCreatorCheckBoxForm.launchQtCreatorCheckBox.setChecked(false);
}
gui.clickButton(buttons.FinishButton);
}
Controller.prototype.DynamicTelemetryPluginFormCallback = function() {
var page = gui.pageWidgetByObjectName("DynamicTelemetryPluginForm");
page.statisticGroupBox.disableStatisticRadioButton.setChecked(true);
gui.clickButton(buttons.NextButton);
}

View file

@ -1,10 +1,11 @@
__version__ = "2019.2.0"
__version_name__ = __version__
from distutils import util
from enum import Enum
import logging
import os
import sys
import logging
from enum import Enum
# sys.frozen is initialized by cx_Freeze and identifies a release package
isFrozen = getattr(sys, "frozen", False)
@ -21,6 +22,8 @@ if not isFrozen:
# Allow override from env variable
__version_name__ = os.environ.get("REZ_MESHROOM_VERSION", __version_name__)
useMultiChunks = util.strtobool(os.environ.get("MESHROOM_USE_MULTI_CHUNKS", "True"))
class Backend(Enum):
STANDALONE = 1

View file

@ -86,9 +86,9 @@ def loadPlugins(folder, packageName, classType):
errors.append(' * {}: {}'.format(pluginName, str(e)))
if errors:
logging.warning('== The following plugins could not be loaded ==\n'
'{}\n'
.format('\n'.join(errors)))
logging.warning('== The following "{package}" plugins could not be loaded ==\n'
'{errorMsg}\n'
.format(package=packageName, errorMsg='\n'.join(errors)))
return pluginTypes

View file

@ -1,8 +1,10 @@
#!/usr/bin/env python
# coding:utf-8
import collections
import copy
import re
import weakref
import types
import logging
from meshroom.common import BaseObject, Property, Variant, Signal, ListModel, DictModel, Slot
from meshroom.core import desc, pyCompatibility, hashValue
@ -54,8 +56,9 @@ class Attribute(BaseObject):
self._node = weakref.ref(node)
self.attributeDesc = attributeDesc
self._isOutput = isOutput
self._value = attributeDesc.value
self._value = copy.copy(attributeDesc.value)
self._label = attributeDesc.label
self._enabled = True
# invalidation value for output attributes
self._invalidationValue = ""
@ -93,6 +96,21 @@ class Attribute(BaseObject):
def getLabel(self):
return self._label
def getEnabled(self):
if isinstance(self.desc.enabled, types.FunctionType):
try:
return self.desc.enabled(self.node)
except:
# Node implementation may fail due to version mismatch
return True
return self.attributeDesc.enabled
def setEnabled(self, v):
if self._enabled == v:
return
self._enabled = v
self.enabledChanged.emit()
def _get_value(self):
return self.getLinkParam().value if self.isLink else self._value
@ -151,10 +169,8 @@ class Attribute(BaseObject):
@property
def isLink(self):
""" Whether the attribute is a link to another attribute. """
# Note: Need to test self.node.graph.edges before accessing to edges.keys() to avoid errors in particular conditions.
# For instance: open a scene, modify something and close without saving it.
if not self.node.graph or not self.node.graph.edges or not self.isInput:
return False
# note: directly use self.node.graph._edges to avoid using the property that may become invalid at some point
return self.node.graph and self.isInput and self in self.node.graph._edges.keys()
return self in self.node.graph.edges.keys()
@ -166,8 +182,15 @@ class Attribute(BaseObject):
"""
return isinstance(value, pyCompatibility.basestring) and Attribute.stringIsLinkRe.match(value)
def getLinkParam(self):
return self.node.graph.edge(self).src if self.isLink else None
def getLinkParam(self, recursive=False):
if not self.isLink:
return None
linkParam = self.node.graph.edge(self).src
if not recursive:
return linkParam
if linkParam.isLink:
return linkParam.getLinkParam(recursive)
return linkParam
@property
def hasOutputConnections(self):
@ -194,26 +217,32 @@ class Attribute(BaseObject):
# value is a link to another attribute
link = v[1:-1]
linkNode, linkAttr = link.split('.')
try:
g.addEdge(g.node(linkNode).attribute(linkAttr), self)
except KeyError as err:
logging.warning('Connect Attribute from Expression failed.\nExpression: "{exp}"\nError: "{err}".'.format(exp=v, err=err))
self.resetValue()
def getExportValue(self):
if self.isLink:
return self.getLinkParam().asLinkExpr()
if self.isOutput:
return self.desc.value
return self.defaultValue()
return self._value
def getValueStr(self):
if isinstance(self.attributeDesc, desc.ChoiceParam) and not self.attributeDesc.exclusive:
assert(isinstance(self.value, collections.Sequence) and not isinstance(self.value, pyCompatibility.basestring))
assert(isinstance(self.value, pyCompatibility.Sequence) and not isinstance(self.value, pyCompatibility.basestring))
return self.attributeDesc.joinChar.join(self.value)
if isinstance(self.attributeDesc, (desc.StringParam, desc.File)):
return '"{}"'.format(self.value)
return str(self.value)
def defaultValue(self):
return self.desc.value
if isinstance(self.desc.value, types.FunctionType):
return self.desc.value(self)
# Need to force a copy, for the case where the value is a list (avoid reference to the desc value)
return copy.copy(self.desc.value)
def _isDefault(self):
return self._value == self.defaultValue()
@ -221,6 +250,11 @@ class Attribute(BaseObject):
def getPrimitiveValue(self, exportDefault=True):
return self._value
def updateInternals(self):
# Emit if the enable status has changed
self.setEnabled(self.getEnabled())
name = Property(str, getName, constant=True)
fullName = Property(str, getFullName, constant=True)
label = Property(str, getLabel, constant=True)
@ -236,6 +270,8 @@ class Attribute(BaseObject):
isDefault = Property(bool, _isDefault, notify=valueChanged)
linkParam = Property(BaseObject, getLinkParam, notify=isLinkChanged)
node = Property(BaseObject, node.fget, constant=True)
enabledChanged = Signal()
enabled = Property(bool, getEnabled, setEnabled, notify=enabledChanged)
def raiseIfLink(func):
@ -352,6 +388,11 @@ class ListAttribute(Attribute):
return self.attributeDesc.joinChar.join([v.getValueStr() for v in self.value])
return super(ListAttribute, self).getValueStr()
def updateInternals(self):
super(ListAttribute, self).updateInternals()
for attr in self._value:
attr.updateInternals()
# Override value property setter
value = Property(Variant, Attribute._get_value, _set_value, notify=Attribute.valueChanged)
isDefault = Property(bool, _isDefault, notify=Attribute.valueChanged)
@ -433,6 +474,11 @@ class GroupAttribute(Attribute):
sortedSubValues = [self._value.get(attr.name).getValueStr() for attr in self.attributeDesc.groupDesc]
return self.attributeDesc.joinChar.join(sortedSubValues)
def updateInternals(self):
super(GroupAttribute, self).updateInternals()
for attr in self._value:
attr.updateInternals()
# Override value property
value = Property(Variant, Attribute._get_value, _set_value, notify=Attribute.valueChanged)
isDefault = Property(bool, _isDefault, notify=Attribute.valueChanged)

77
meshroom/core/desc.py Executable file → Normal file
View file

@ -1,7 +1,6 @@
from meshroom.common import BaseObject, Property, Variant, VariantList
from meshroom.core import pyCompatibility
from enum import Enum # available by default in python3. For python2: "pip install enum34"
import collections
import math
import os
import psutil
@ -11,7 +10,7 @@ class Attribute(BaseObject):
"""
"""
def __init__(self, name, label, description, value, advanced, uid, group):
def __init__(self, name, label, description, value, advanced, uid, group, enabled):
super(Attribute, self).__init__()
self._name = name
self._label = label
@ -20,6 +19,7 @@ class Attribute(BaseObject):
self._uid = uid
self._group = group
self._advanced = advanced
self._enabled = enabled
name = Property(str, lambda self: self._name, constant=True)
label = Property(str, lambda self: self._label, constant=True)
@ -28,6 +28,7 @@ class Attribute(BaseObject):
uid = Property(Variant, lambda self: self._uid, constant=True)
group = Property(str, lambda self: self._group, constant=True)
advanced = Property(bool, lambda self: self._advanced, constant=True)
enabled = Property(Variant, lambda self: self._enabled, constant=True)
type = Property(str, lambda self: self.__class__.__name__, constant=True)
def validateValue(self, value):
@ -38,8 +39,13 @@ class Attribute(BaseObject):
"""
return value
def matchDescription(self, value):
""" Returns whether the value perfectly match attribute's description. """
def matchDescription(self, value, conform=False):
""" Returns whether the value perfectly match attribute's description.
Args:
value: the value
conform: try to adapt value to match the description
"""
try:
self.validateValue(value)
except ValueError:
@ -49,13 +55,13 @@ class Attribute(BaseObject):
class ListAttribute(Attribute):
""" A list of Attributes """
def __init__(self, elementDesc, name, label, description, group='allParams', advanced=False, joinChar=' '):
def __init__(self, elementDesc, name, label, description, group='allParams', advanced=False, enabled=True, joinChar=' '):
"""
:param elementDesc: the Attribute description of elements to store in that list
"""
self._elementDesc = elementDesc
self._joinChar = joinChar
super(ListAttribute, self).__init__(name=name, label=label, description=description, value=[], uid=(), group=group, advanced=advanced)
super(ListAttribute, self).__init__(name=name, label=label, description=description, value=[], uid=(), group=group, advanced=advanced, enabled=enabled)
elementDesc = Property(Attribute, lambda self: self._elementDesc, constant=True)
uid = Property(Variant, lambda self: self.elementDesc.uid, constant=True)
@ -66,25 +72,25 @@ class ListAttribute(Attribute):
raise ValueError('ListAttribute only supports list/tuple input values (param:{}, value:{}, type:{})'.format(self.name, value, type(value)))
return value
def matchDescription(self, value):
def matchDescription(self, value, conform=False):
""" Check that 'value' content matches ListAttribute's element description. """
if not super(ListAttribute, self).matchDescription(value):
if not super(ListAttribute, self).matchDescription(value, conform):
return False
# list must be homogeneous: only test first element
if value:
return self._elementDesc.matchDescription(value[0])
return self._elementDesc.matchDescription(value[0], conform)
return True
class GroupAttribute(Attribute):
""" A macro Attribute composed of several Attributes """
def __init__(self, groupDesc, name, label, description, group='allParams', advanced=False, joinChar=' '):
def __init__(self, groupDesc, name, label, description, group='allParams', advanced=False, enabled=True, joinChar=' '):
"""
:param groupDesc: the description of the Attributes composing this group
"""
self._groupDesc = groupDesc
self._joinChar = joinChar
super(GroupAttribute, self).__init__(name=name, label=label, description=description, value={}, uid=(), group=group, advanced=advanced)
super(GroupAttribute, self).__init__(name=name, label=label, description=description, value={}, uid=(), group=group, advanced=advanced, enabled=enabled)
groupDesc = Property(Variant, lambda self: self._groupDesc, constant=True)
@ -97,20 +103,32 @@ class GroupAttribute(Attribute):
raise ValueError('Value contains key that does not match group description : {}'.format(invalidKeys))
return value
def matchDescription(self, value):
def matchDescription(self, value, conform=False):
"""
Check that 'value' contains the exact same set of keys as GroupAttribute's group description
and that every child value match corresponding child attribute description.
Args:
value: the value
conform: remove entries that don't exist in the description.
"""
if not super(GroupAttribute, self).matchDescription(value):
return False
attrMap = {attr.name: attr for attr in self._groupDesc}
if conform:
# remove invalid keys
invalidKeys = set(value.keys()).difference([attr.name for attr in self._groupDesc])
for k in invalidKeys:
del self._groupDesc[k]
else:
# must have the exact same child attributes
if sorted(value.keys()) != sorted(attrMap.keys()):
return False
for k, v in value.items():
# each child value must match corresponding child attribute description
if not attrMap[k].matchDescription(v):
if not attrMap[k].matchDescription(v, conform):
return False
return True
@ -127,15 +145,15 @@ class GroupAttribute(Attribute):
class Param(Attribute):
"""
"""
def __init__(self, name, label, description, value, uid, group, advanced):
super(Param, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
def __init__(self, name, label, description, value, uid, group, advanced, enabled):
super(Param, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
class File(Attribute):
"""
"""
def __init__(self, name, label, description, value, uid, group='allParams', advanced=False):
super(File, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
def __init__(self, name, label, description, value, uid, group='allParams', advanced=False, enabled=True):
super(File, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
def validateValue(self, value):
if not isinstance(value, pyCompatibility.basestring):
@ -146,8 +164,8 @@ class File(Attribute):
class BoolParam(Param):
"""
"""
def __init__(self, name, label, description, value, uid, group='allParams', advanced=False):
super(BoolParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
def __init__(self, name, label, description, value, uid, group='allParams', advanced=False, enabled=True):
super(BoolParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
def validateValue(self, value):
try:
@ -159,9 +177,9 @@ class BoolParam(Param):
class IntParam(Param):
"""
"""
def __init__(self, name, label, description, value, range, uid, group='allParams', advanced=False):
def __init__(self, name, label, description, value, range, uid, group='allParams', advanced=False, enabled=True):
self._range = range
super(IntParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
super(IntParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
def validateValue(self, value):
# handle unsigned int values that are translated to int by shiboken and may overflow
@ -178,9 +196,9 @@ class IntParam(Param):
class FloatParam(Param):
"""
"""
def __init__(self, name, label, description, value, range, uid, group='allParams', advanced=False):
def __init__(self, name, label, description, value, range, uid, group='allParams', advanced=False, enabled=True):
self._range = range
super(FloatParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
super(FloatParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
def validateValue(self, value):
try:
@ -194,13 +212,13 @@ class FloatParam(Param):
class ChoiceParam(Param):
"""
"""
def __init__(self, name, label, description, value, values, exclusive, uid, group='allParams', joinChar=' ', advanced=False):
def __init__(self, name, label, description, value, values, exclusive, uid, group='allParams', joinChar=' ', advanced=False, enabled=True):
assert values
self._values = values
self._exclusive = exclusive
self._joinChar = joinChar
self._valueType = type(self._values[0]) # cast to value type
super(ChoiceParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
super(ChoiceParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
def conformValue(self, val):
""" Conform 'val' to the correct type and check for its validity """
@ -213,7 +231,7 @@ class ChoiceParam(Param):
if self.exclusive:
return self.conformValue(value)
if not isinstance(value, collections.Iterable):
if not isinstance(value, pyCompatibility.Iterable):
raise ValueError('Non exclusive ChoiceParam value should be iterable (param:{}, value:{}, type:{})'.format(self.name, value, type(value)))
return [self.conformValue(v) for v in value]
@ -225,8 +243,8 @@ class ChoiceParam(Param):
class StringParam(Param):
"""
"""
def __init__(self, name, label, description, value, uid, group='allParams', advanced=False):
super(StringParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced)
def __init__(self, name, label, description, value, uid, group='allParams', advanced=False, enabled=True):
super(StringParam, self).__init__(name=name, label=label, description=description, value=value, uid=uid, group=group, advanced=advanced, enabled=enabled)
def validateValue(self, value):
if not isinstance(value, pyCompatibility.basestring):
@ -380,6 +398,7 @@ class Node(object):
outputs = []
size = StaticNodeSize(1)
parallelization = None
documentation = ''
def __init__(self):
pass
@ -429,7 +448,7 @@ class CommandLineNode(Node):
if not alreadyInEnv:
cmdPrefix = '{rez} {packageFullName} -- '.format(rez=os.environ.get('REZ_ENV'), packageFullName=chunk.node.packageFullName)
cmdSuffix = ''
if chunk.range:
if chunk.node.isParallelized:
cmdSuffix = ' ' + self.commandLineRange.format(**chunk.range.toDict())
return cmdPrefix + chunk.node.nodeDesc.commandLine.format(**chunk.node._cmdVars) + cmdSuffix

View file

@ -273,13 +273,16 @@ class Graph(BaseObject):
# Add node to the graph with raw attributes values
self._addNode(n, nodeName)
if setupProjectFile:
# Update filepath related members
self._setFilepath(filepath)
# Create graph edges by resolving attributes expressions
self._applyExpr()
if setupProjectFile:
# Update filepath related members
# Note: needs to be done at the end as it will trigger an updateInternals.
self._setFilepath(filepath)
return True
@property
def updateEnabled(self):
return self._updateEnabled
@ -558,7 +561,7 @@ class Graph(BaseObject):
candidates = self.findNodeCandidates('^' + nodeExpr)
if not candidates:
raise KeyError('No node candidate for "{}"'.format(nodeExpr))
elif len(candidates) > 1:
if len(candidates) > 1:
raise KeyError('Multiple node candidates for "{}": {}'.format(nodeExpr, str([c.name for c in candidates])))
return candidates[0]
@ -680,11 +683,11 @@ class Graph(BaseObject):
# (u,v) is a tree edge
self.dfsVisit(v, visitor, colors, nodeChildren, longestPathFirst) # TODO: avoid recursion
elif colors[v] == GRAY:
# (u,v) is a back edge
visitor.backEdge((u, v), self)
pass # (u,v) is a back edge
elif colors[v] == BLACK:
# (u,v) is a cross or forward edge
visitor.forwardOrCrossEdge((u, v), self)
pass # (u,v) is a cross or forward edge
visitor.finishEdge((u, v), self)
colors[u] = BLACK
visitor.finishVertex(u, self)
@ -739,7 +742,6 @@ class Graph(BaseObject):
def finishEdge(edge, graph):
if edge[0].hasStatus(Status.SUCCESS) or edge[1].hasStatus(Status.SUCCESS):
return
else:
edges.append(edge)
visitor.finishVertex = finishVertex
@ -871,23 +873,23 @@ class Graph(BaseObject):
flowEdges.append(link)
return flowEdges
def nodesFromNode(self, startNode, filterType=None):
def nodesFromNode(self, startNode, filterTypes=None):
"""
Return the node chain from startNode to the graph leaves.
Args:
startNode (Node): the node to start the visit from.
filterType (str): (optional) only return the nodes of the given type
filterTypes (str list): (optional) only return the nodes of the given types
(does not stop the visit, this is a post-process only)
Returns:
The list of nodes from startNode to the graph leaves following edges.
The list of nodes and edges, from startNode to the graph leaves following edges.
"""
nodes = []
edges = []
visitor = Visitor()
def discoverVertex(vertex, graph):
if not filterType or vertex.nodeType == filterType:
if not filterTypes or vertex.nodeType in filterTypes:
nodes.append(vertex)
visitor.discoverVertex = discoverVertex

View file

@ -127,6 +127,92 @@ class StatusData:
self.sessionUid = d.get('sessionUid', '')
class LogManager:
dateTimeFormatting = '%H:%M:%S'
def __init__(self, chunk):
self.chunk = chunk
self.logger = logging.getLogger(chunk.node.getName())
class Formatter(logging.Formatter):
def format(self, record):
# Make level name lower case
record.levelname = record.levelname.lower()
return logging.Formatter.format(self, record)
def configureLogger(self):
for handler in self.logger.handlers[:]:
self.logger.removeHandler(handler)
handler = logging.FileHandler(self.chunk.logFile)
formatter = self.Formatter('[%(asctime)s.%(msecs)03d][%(levelname)s] %(message)s', self.dateTimeFormatting)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def start(self, level):
# Clear log file
open(self.chunk.logFile, 'w').close()
self.configureLogger()
self.logger.setLevel(self.textToLevel(level))
self.progressBar = False
def end(self):
for handler in self.logger.handlers[:]:
# Stops the file being locked
handler.close()
def makeProgressBar(self, end, message=''):
assert end > 0
assert not self.progressBar
self.progressEnd = end
self.currentProgressTics = 0
self.progressBar = True
with open(self.chunk.logFile, 'a') as f:
if message:
f.write(message+'\n')
f.write('0% 10 20 30 40 50 60 70 80 90 100%\n')
f.write('|----|----|----|----|----|----|----|----|----|----|\n\n')
f.close()
with open(self.chunk.logFile, 'r') as f:
content = f.read()
self.progressBarPosition = content.rfind('\n')
f.close()
def updateProgressBar(self, value):
assert self.progressBar
assert value <= self.progressEnd
tics = round((value/self.progressEnd)*51)
with open(self.chunk.logFile, 'r+') as f:
text = f.read()
for i in range(tics-self.currentProgressTics):
text = text[:self.progressBarPosition]+'*'+text[self.progressBarPosition:]
f.seek(0)
f.write(text)
f.close()
self.currentProgressTics = tics
def completeProgressBar(self):
assert self.progressBar
self.progressBar = False
def textToLevel(self, text):
if text == 'critical': return logging.CRITICAL
elif text == 'error': return logging.ERROR
elif text == 'warning': return logging.WARNING
elif text == 'info': return logging.INFO
elif text == 'debug': return logging.DEBUG
else: return logging.NOTSET
runningProcesses = {}
@ -142,6 +228,7 @@ class NodeChunk(BaseObject):
super(NodeChunk, self).__init__(parent)
self.node = node
self.range = range
self.logManager = LogManager(self)
self.status = StatusData(node.name, node.nodeType, node.packageName, node.packageVersion)
self.statistics = stats.Statistics()
self.statusFileLastModTime = -1
@ -164,6 +251,10 @@ class NodeChunk(BaseObject):
def statusName(self):
return self.status.status.name
@property
def logger(self):
return self.logManager.logger
@property
def execModeName(self):
return self.status.execMode.name
@ -402,6 +493,9 @@ class BaseNode(BaseObject):
t, idx = name.split("_")
return "{}{}".format(t, idx if int(idx) > 1 else "")
def getDocumentation(self):
return self.nodeDesc.documentation
@property
def packageFullName(self):
return '-'.join([self.packageName, self.packageVersion])
@ -432,6 +526,9 @@ class BaseNode(BaseObject):
def getAttributes(self):
return self._attributes
def hasAttribute(self, name):
return name in self._attributes.keys()
def _applyExpr(self):
for attr in self._attributes:
attr._applyExpr()
@ -472,11 +569,29 @@ class BaseNode(BaseObject):
""" Compute node uids by combining associated attributes' uids. """
for uidIndex, associatedAttributes in self.attributesPerUid.items():
# uid is computed by hashing the sorted list of tuple (name, value) of all attributes impacting this uid
uidAttributes = [(a.getName(), a.uid(uidIndex)) for a in associatedAttributes]
uidAttributes = [(a.getName(), a.uid(uidIndex)) for a in associatedAttributes if a.enabled]
uidAttributes.sort()
self._uids[uidIndex] = hashValue(uidAttributes)
def _buildCmdVars(self):
def _buildAttributeCmdVars(cmdVars, name, attr):
if attr.enabled:
if attr.attributeDesc.group is not None:
# if there is a valid command line "group"
v = attr.getValueStr()
cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
cmdVars[name + 'Value'] = str(v)
if v:
cmdVars[attr.attributeDesc.group] = cmdVars.get(attr.attributeDesc.group, '') + \
' ' + cmdVars[name]
elif isinstance(attr, GroupAttribute):
assert isinstance(attr.value, DictModel)
# if the GroupAttribute is not set in a single command line argument,
# the sub-attributes may need to be exposed individually
for v in attr._value:
_buildAttributeCmdVars(cmdVars, v.name, v)
""" Generate command variables using input attributes and resolved output attributes names and values. """
for uidIndex, value in self._uids.items():
self._cmdVars['uid{}'.format(uidIndex)] = value
@ -485,14 +600,7 @@ class BaseNode(BaseObject):
for name, attr in self._attributes.objects.items():
if attr.isOutput:
continue # skip outputs
v = attr.getValueStr()
self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
self._cmdVars[name + 'Value'] = str(v)
if v:
self._cmdVars[attr.attributeDesc.group] = self._cmdVars.get(attr.attributeDesc.group, '') + \
' ' + self._cmdVars[name]
_buildAttributeCmdVars(self._cmdVars, name, attr)
# For updating output attributes invalidation values
cmdVarsNoCache = self._cmdVars.copy()
@ -507,8 +615,14 @@ class BaseNode(BaseObject):
if not isinstance(attr.attributeDesc, desc.File):
continue
attr.value = attr.attributeDesc.value.format(**self._cmdVars)
attr._invalidationValue = attr.attributeDesc.value.format(**cmdVarsNoCache)
defaultValue = attr.defaultValue()
try:
attr.value = defaultValue.format(**self._cmdVars)
attr._invalidationValue = defaultValue.format(**cmdVarsNoCache)
except KeyError as e:
logging.warning('Invalid expression with missing key on "{nodeName}.{attrName}" with value "{defaultValue}".\nError: {err}'.format(nodeName=self.name, attrName=attr.name, defaultValue=defaultValue, err=str(e)))
except ValueError as e:
logging.warning('Invalid expression value on "{nodeName}.{attrName}" with value "{defaultValue}".\nError: {err}'.format(nodeName=self.name, attrName=attr.name, defaultValue=defaultValue, err=str(e)))
v = attr.getValueStr()
self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v)
@ -520,7 +634,7 @@ class BaseNode(BaseObject):
@property
def isParallelized(self):
return bool(self.nodeDesc.parallelization)
return bool(self.nodeDesc.parallelization) if meshroom.useMultiChunks else False
@property
def nbParallelizationBlocks(self):
@ -534,6 +648,9 @@ class BaseNode(BaseObject):
return False
return True
def _isComputed(self):
return self.hasStatus(Status.SUCCESS)
@Slot()
def clearData(self):
""" Delete this Node internal folder.
@ -604,6 +721,10 @@ class BaseNode(BaseObject):
"""
if self.nodeDesc:
self.nodeDesc.update(self)
for attr in self._attributes:
attr.updateInternals()
# Update chunks splitting
self._updateChunks()
# Retrieve current internal folder (if possible)
@ -695,6 +816,7 @@ class BaseNode(BaseObject):
name = Property(str, getName, constant=True)
label = Property(str, getLabel, constant=True)
nodeType = Property(str, nodeType.fget, constant=True)
documentation = Property(str, getDocumentation, constant=True)
positionChanged = Signal()
position = Property(Variant, position.fget, position.fset, notify=positionChanged)
x = Property(float, lambda self: self._position.x, notify=positionChanged)
@ -711,6 +833,7 @@ class BaseNode(BaseObject):
size = Property(int, getSize, notify=sizeChanged)
globalStatusChanged = Signal()
globalStatus = Property(str, lambda self: self.getGlobalStatus().name, notify=globalStatusChanged)
isComputed = Property(bool, _isComputed, notify=globalStatusChanged)
class Node(BaseNode):
@ -822,14 +945,9 @@ class CompatibilityNode(BaseNode):
self.splitCount = self.parallelization.get("split", 1)
self.setSize(self.parallelization.get("size", 1))
# inputs matching current type description
self._commonInputs = []
# create input attributes
for attrName, value in self._inputs.items():
matchDesc = self._addAttribute(attrName, value, False)
# store attributes that could be used during node upgrade
if matchDesc:
self._commonInputs.append(attrName)
self._addAttribute(attrName, value, False)
# create outputs attributes
for attrName, value in self.outputs.items():
@ -893,7 +1011,7 @@ class CompatibilityNode(BaseNode):
return desc.StringParam(**params)
@staticmethod
def attributeDescFromName(refAttributes, name, value):
def attributeDescFromName(refAttributes, name, value, conform=False):
"""
Try to find a matching attribute description in refAttributes for given attribute 'name' and 'value'.
@ -910,8 +1028,9 @@ class CompatibilityNode(BaseNode):
# consider this value matches description:
# - if it's a serialized link expression (no proper value to set/evaluate)
# - or if it passes the 'matchDescription' test
if attrDesc and (Attribute.isLinkExpression(value) or attrDesc.matchDescription(value)):
if attrDesc and (Attribute.isLinkExpression(value) or attrDesc.matchDescription(value, conform)):
return attrDesc
return None
def _addAttribute(self, name, val, isOutput):
@ -985,8 +1104,16 @@ class CompatibilityNode(BaseNode):
if not self.canUpgrade:
raise NodeUpgradeError(self.name, "no matching node type")
# TODO: use upgrade method of node description if available
# inputs matching current type description
commonInputs = []
for attrName, value in self._inputs.items():
if self.attributeDescFromName(self.nodeDesc.inputs, attrName, value, conform=True):
# store attributes that could be used during node upgrade
commonInputs.append(attrName)
return Node(self.nodeType, position=self.position,
**{key: value for key, value in self.inputs.items() if key in self._commonInputs})
**{key: value for key, value in self.inputs.items() if key in commonInputs})
compatibilityIssue = Property(int, lambda self: self.issue.value, constant=True)
canUpgrade = Property(bool, canUpgrade.fget, constant=True)

View file

@ -13,3 +13,10 @@ else:
unicode = unicode
bytes = str
basestring = basestring
try:
# Import ABC from collections.abc in Python 3.4+
from collections.abc import Sequence, Iterable
except ImportError:
# Import ABC from collections in Python 2 support
from collections import Sequence, Iterable

View file

@ -303,7 +303,7 @@ class StatisticsThread(threading.Thread):
if self.proc.is_running():
self.updateStats()
return
except (KeyboardInterrupt, SystemError, GeneratorExit):
except (KeyboardInterrupt, SystemError, GeneratorExit, psutil.NoSuchProcess):
pass
def stopRequest(self):

View file

@ -6,13 +6,66 @@ import os
from meshroom.core.graph import Graph, GraphModification
# Supported image extensions
imageExtensions = ('.jpg', '.jpeg', '.tif', '.tiff', '.png', '.exr', '.rw2', '.cr2', '.nef', '.arw')
videoExtensions = ('.avi', '.mov', '.qt',
imageExtensions = (
# bmp:
'.bmp',
# cineon:
'.cin',
# dds
'dds'
# dpx:
'.dpx',
# gif:
'.gif',
# hdr:
'.hdr', '.rgbe',
# ico:
'.ico',
# iff:
'.iff', '.z',
# jpeg:
'.jpg', '.jpe', '.jpeg', '.jif', '.jfif', '.jfi',
# jpeg2000:
'.jp2', '.j2k', '.j2c',
# openexr:
'.exr', '.sxr', '.mxr',
# png:
'.png',
# pnm:
'.ppm', '.pgm', '.pbm', '.pnm', '.pfm',
# psd:
'.psd', '.pdd', '.psb',
# ptex:
'.ptex', '.ptx',
# raw:
'.bay', '.bmq', '.cr2', '.cr3', '.crw', '.cs1', '.dc2', '.dcr', '.dng', '.erf', '.fff', '.k25', '.kdc', '.mdc', '.mos', '.mrw', '.nef', '.orf', '.pef', '.pxn', '.raf', '.raw', '.rdc', '.sr2', '.srf', '.x3f', '.arw', '.3fr', '.cine', '.ia', '.kc2', '.mef', '.nrw', '.qtk', '.rw2', '.sti', '.rwl', '.srw', '.drf', '.dsc', '.cap', '.iiq', '.rwz',
# rla:
'.rla',
# sgi:
'.sgi', '.rgb', '.rgba', '.bw', '.int', '.inta',
# socket:
'.socket',
# softimage:
'.pic',
# tiff:
'.tiff', '.tif', '.tx', '.env', '.sm', '.vsm',
# targa:
'.tga', '.tpic',
# webp:
'webp',
# zfile:
'.zfile',
# osl:
'.osl', '.oso', '.oslgroup', '.oslbody',
)
videoExtensions = (
'.avi', '.mov', '.qt',
'.mkv', '.webm',
'.mp4', '.mpg', '.mpeg', '.m2v', '.m4v',
'.wmv',
'.ogv', '.ogg',
'.mxf')
'.mxf',
)
panoramaInfoExtensions = ('.xml')
@ -90,7 +143,7 @@ def findFilesByTypeInFolder(folder, recursive=False):
return output
def hdri(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
def hdri(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None):
"""
Create a new Graph with a complete HDRI pipeline.
@ -107,16 +160,27 @@ def hdri(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), out
with GraphModification(graph):
nodes = hdriPipeline(graph)
cameraInit = nodes[0]
if inputImages:
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
if inputViewpoints:
cameraInit.viewpoints.extend(inputViewpoints)
if inputIntrinsics:
cameraInit.intrinsics.extend(inputIntrinsics)
if output:
stitching = nodes[-1]
graph.addNewNode('Publish', output=output, inputFiles=[stitching.output])
imageProcessing = nodes[-1]
graph.addNewNode('Publish', output=output, inputFiles=[imageProcessing.outputImages])
return graph
def hdriFisheye(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None):
if not graph:
graph = Graph('HDRI-Fisheye')
with GraphModification(graph):
hdri(inputImages, inputViewpoints, inputIntrinsics, output, graph)
for panoramaInit in graph.nodesByType("PanoramaInit"):
panoramaInit.attribute("useFisheye").value = True
return graph
def hdriPipeline(graph):
"""
@ -128,46 +192,77 @@ def hdriPipeline(graph):
list of Node: the created nodes
"""
cameraInit = graph.addNewNode('CameraInit')
try:
# fisheye4 does not work well in the ParoramaEstimation, so here we avoid to use it.
cameraInit.attribute('allowedCameraModels').value.remove("fisheye4")
except ValueError:
pass
ldr2hdr = graph.addNewNode('LDRToHDR',
panoramaPrepareImages = graph.addNewNode('PanoramaPrepareImages',
input=cameraInit.output)
ldr2hdrSampling = graph.addNewNode('LdrToHdrSampling',
input=panoramaPrepareImages.output)
ldr2hdrCalibration = graph.addNewNode('LdrToHdrCalibration',
input=ldr2hdrSampling.input,
samples=ldr2hdrSampling.output)
ldr2hdrMerge = graph.addNewNode('LdrToHdrMerge',
input=ldr2hdrCalibration.input,
response=ldr2hdrCalibration.response)
featureExtraction = graph.addNewNode('FeatureExtraction',
input=ldr2hdr.outSfMDataFilename)
featureExtraction.describerPreset.value = 'ultra'
imageMatching = graph.addNewNode('ImageMatching',
input=ldr2hdrMerge.outSfMData,
describerPreset='high')
panoramaInit = graph.addNewNode('PanoramaInit',
input=featureExtraction.input,
featuresFolders=[featureExtraction.output])
dependency=[featureExtraction.output] # Workaround for tractor submission with a fake dependency
)
imageMatching = graph.addNewNode('ImageMatching',
input=panoramaInit.outSfMData,
featuresFolders=[featureExtraction.output],
method='FrustumOrVocabularyTree')
featureMatching = graph.addNewNode('FeatureMatching',
input=imageMatching.input,
featuresFolders=imageMatching.featuresFolders,
imagePairsList=imageMatching.output)
panoramaExternalInfo = graph.addNewNode('PanoramaExternalInfo',
input=ldr2hdr.outSfMDataFilename,
matchesFolders=[featureMatching.output] # Workaround for tractor submission with a fake dependency
)
panoramaEstimation = graph.addNewNode('PanoramaEstimation',
input=panoramaExternalInfo.outSfMDataFilename,
input=featureMatching.input,
featuresFolders=featureMatching.featuresFolders,
matchesFolders=[featureMatching.output])
panoramaOrientation = graph.addNewNode('SfMTransform',
input=panoramaEstimation.output,
method='from_single_camera')
panoramaWarping = graph.addNewNode('PanoramaWarping',
input=panoramaEstimation.outSfMDataFilename)
input=panoramaOrientation.output)
panoramaCompositing = graph.addNewNode('PanoramaCompositing',
input=panoramaWarping.output)
input=panoramaWarping.input,
warpingFolder=panoramaWarping.output)
imageProcessing = graph.addNewNode('ImageProcessing',
input=panoramaCompositing.output,
fillHoles=True,
extension='exr')
return [
cameraInit,
featureExtraction,
panoramaInit,
imageMatching,
featureMatching,
panoramaExternalInfo,
panoramaEstimation,
panoramaOrientation,
panoramaWarping,
panoramaCompositing,
imageProcessing,
]
@ -278,8 +373,7 @@ def mvsPipeline(graph, sfm=None):
depthMapsFolder=depthMap.output)
meshing = graph.addNewNode('Meshing',
input=depthMapFilter.input,
depthMapsFolder=depthMapFilter.depthMapsFolder,
depthMapsFilterFolder=depthMapFilter.output)
depthMapsFolder=depthMapFilter.output)
meshFiltering = graph.addNewNode('MeshFiltering',
inputMesh=meshing.outputMesh)
texturing = graph.addNewNode('Texturing',

View file

@ -1,49 +0,0 @@
__version__ = "1.0"
import json
import os
from meshroom.core import desc
class CameraDownscale(desc.CommandLineNode):
commandLine = 'aliceVision_cameraDownscale {allParams}'
size = desc.DynamicNodeSize('input')
inputs = [
desc.File(
name='input',
label='Input',
description="SfM Data File",
value='',
uid=[0],
),
desc.FloatParam(
name='rescalefactor',
label='RescaleFactor',
description='Newsize = rescalefactor * oldsize',
value=0.5,
range=(0.0, 1.0, 0.1),
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name='outSfMDataFilename',
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfmData.abc',
uid=[],
)
]

View file

@ -1,10 +1,11 @@
__version__ = "2.0"
__version__ = "3.0"
import os
import json
import psutil
import shutil
import tempfile
import logging
from meshroom.core import desc
@ -16,18 +17,39 @@ Viewpoint = [
desc.IntParam(name="intrinsicId", label="Intrinsic", description="Internal Camera Parameters", value=-1, uid=[0], range=None),
desc.IntParam(name="rigId", label="Rig", description="Rig Parameters", value=-1, uid=[0], range=None),
desc.IntParam(name="subPoseId", label="Rig Sub-Pose", description="Rig Sub-Pose Parameters", value=-1, uid=[0], range=None),
desc.StringParam(name="metadata", label="Image Metadata", description="", value="", uid=[], advanced=True),
desc.StringParam(name="metadata", label="Image Metadata",
description="The configuration of the Viewpoints is based on the images metadata.\n"
"The important ones are:\n"
" * Focal Length: the focal length in mm.\n"
" * Make and Model: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database.\n"
" * Serial Number: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately.",
value="", uid=[], advanced=True),
]
Intrinsic = [
desc.IntParam(name="intrinsicId", label="Id", description="Intrinsic UID", value=-1, uid=[0], range=None),
desc.FloatParam(name="pxInitialFocalLength", label="Initial Focal Length", description="Initial Guess on the Focal Length", value=-1.0, uid=[0], range=None),
desc.FloatParam(name="pxFocalLength", label="Focal Length", description="Known/Calibrated Focal Length", value=-1.0, uid=[0], range=None),
desc.ChoiceParam(name="type", label="Camera Type", description="Camera Type", value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4'], exclusive=True, uid=[0]),
desc.FloatParam(name="pxInitialFocalLength", label="Initial Focal Length",
description="Initial Guess on the Focal Length (in pixels). \n"
"When we have an initial value from EXIF, this value is not accurate but cannot be wrong. \n"
"So this value is used to limit the range of possible values in the optimization. \n"
"If you put -1, this value will not be used and the focal length will not be bounded.",
value=-1.0, uid=[0], range=None),
desc.FloatParam(name="pxFocalLength", label="Focal Length", description="Known/Calibrated Focal Length (in pixels)", value=-1.0, uid=[0], range=None),
desc.ChoiceParam(name="type", label="Camera Type",
description="Mathematical Model used to represent a camera:\n"
" * pinhole: Simplest projective camera model without optical distortion (focal and optical center).\n"
" * radial1: Pinhole camera with one radial distortion parameter\n"
" * radial3: Pinhole camera with 3 radial distortion parameters\n"
" * brown: Pinhole camera with 3 radial and 2 tangential distortion parameters\n"
" * fisheye4: Pinhole camera with 4 distortion parameters suited for fisheye optics (like 120deg FoV)\n"
" * equidistant_r3: Non-projective camera model suited for full-fisheye optics (like 180deg FoV)\n",
value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'equidistant_r3'], exclusive=True, uid=[0]),
desc.IntParam(name="width", label="Width", description="Image Width", value=0, uid=[], range=(0, 10000, 1)),
desc.IntParam(name="height", label="Height", description="Image Height", value=0, uid=[], range=(0, 10000, 1)),
desc.StringParam(name="serialNumber", label="Serial Number", description="Device Serial Number (camera and lens combined)", value="", uid=[]),
desc.GroupAttribute(name="principalPoint", label="Principal Point", description="", groupDesc=[
desc.FloatParam(name="sensorWidth", label="Sensor Width", description="Sensor Width (mm)", value=36, uid=[], range=(0, 1000, 1)),
desc.FloatParam(name="sensorHeight", label="Sensor Height", description="Sensor Height (mm)", value=24, uid=[], range=(0, 1000, 1)),
desc.StringParam(name="serialNumber", label="Serial Number", description="Device Serial Number (Camera UID and Lens UID combined)", value="", uid=[]),
desc.GroupAttribute(name="principalPoint", label="Principal Point", description="Position of the Optical Center in the Image (i.e. the sensor surface).", groupDesc=[
desc.FloatParam(name="x", label="x", description="", value=0, uid=[], range=(0, 10000, 1)),
desc.FloatParam(name="y", label="y", description="", value=0, uid=[], range=(0, 10000, 1)),
]),
@ -94,6 +116,21 @@ class CameraInit(desc.CommandLineNode):
size = desc.DynamicNodeSize('viewpoints')
documentation = '''
This node describes your dataset. It lists the Viewpoints candidates, the guess about the type of optic, the initial focal length
and which images are sharing the same internal camera parameters, as well as potential cameras rigs.
When you import new images into Meshroom, this node is automatically configured from the analysis of the image metadata.
The software can support images without any metadata but it is recommended to have them for robustness.
### Metadata
Metadata allows images to be grouped together and provides an initialization of the focal length (in pixel unit).
The metadata needed are:
* **Focal Length**: the focal length in mm.
* **Make** & **Model**: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database.
* **Serial Number**: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately (in the photogrammetry case).
'''
inputs = [
desc.ListAttribute(
name="viewpoints",
@ -122,7 +159,8 @@ class CameraInit(desc.CommandLineNode):
description='Empirical value for the field of view in degree.',
value=45.0,
range=(0, 180.0, 1),
uid=[0],
uid=[],
advanced=True,
),
desc.ChoiceParam(
name='groupCameraFallback',
@ -136,8 +174,44 @@ class CameraInit(desc.CommandLineNode):
values=['global', 'folder', 'image'],
value='folder',
exclusive=True,
uid=[0],
advanced=True
uid=[],
advanced=True,
),
desc.ChoiceParam(
name='allowedCameraModels',
label='Allowed Camera Models',
description='the Camera Models that can be attributed.',
value=['pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'fisheye1'],
values=['pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'fisheye1'],
exclusive=False,
uid=[],
joinChar=',',
advanced=True,
),
desc.ChoiceParam(
name='viewIdMethod',
label='ViewId Method',
description="Allows to choose the way the viewID is generated:\n"
" * metadata : Generate viewId from image metadata.\n"
" * filename : Generate viewId from file names using regex.",
value='metadata',
values=['metadata', 'filename'],
exclusive=True,
uid=[],
advanced=True,
),
desc.StringParam(
name='viewIdRegex',
label='ViewId Regex',
description='Regex used to catch number used as viewId in filename.'
'You should capture specific parts of the filename with parenthesis to define matching elements. (only number will works)\n'
'Some examples of patterns:\n'
' - Match the longest number at the end of filename (default value): ".*?(\d+)"\n'
' - Match the first number found in filename : "(\d+).*"\n',
value='.*?(\d+)',
uid=[],
advanced=True,
enabled=lambda node: node.viewIdMethod.value == 'filename',
),
desc.ChoiceParam(
name='verboseLevel',
@ -160,6 +234,9 @@ class CameraInit(desc.CommandLineNode):
),
]
def readSfMData(self, sfmFile):
return readSfMData(sfmFile)
def buildIntrinsics(self, node, additionalViews=()):
""" Build intrinsics from node current views and optional additional views
@ -183,7 +260,7 @@ class CameraInit(desc.CommandLineNode):
os.makedirs(os.path.join(tmpCache, node.internalFolder))
self.createViewpointsFile(node, additionalViews)
cmd = self.buildCommandLine(node.chunks[0])
# logging.debug(' - commandLine:', cmd)
logging.debug(' - commandLine: {}'.format(cmd))
proc = psutil.Popen(cmd, stdout=None, stderr=None, shell=True)
stdout, stderr = proc.communicate()
# proc.wait()
@ -196,9 +273,12 @@ class CameraInit(desc.CommandLineNode):
cameraInitSfM = node.output.value
return readSfMData(cameraInitSfM)
except Exception:
except Exception as e:
logging.debug("[CameraInit] Error while building intrinsics: {}".format(str(e)))
raise
finally:
if os.path.exists(tmpCache):
logging.debug("[CameraInit] Remove temp files in: {}".format(tmpCache))
shutil.rmtree(tmpCache)
def createViewpointsFile(self, node, additionalViews=()):

View file

@ -7,6 +7,11 @@ class ConvertSfMFormat(desc.CommandLineNode):
commandLine = 'aliceVision_convertSfMFormat {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Convert an SfM scene from one file format to another.
It can also be used to remove specific parts of from an SfM scene (like filter all 3D landmarks or filter 2D observations).
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,16 @@ class DepthMap(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=3)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
For each camera that have been estimated by the Structure-From-Motion, it estimates the depth value per pixel.
Adjust the downscale factor to compute depth maps at a higher/lower resolution.
Use a downscale factor of one (full-resolution) only if the quality of the input images is really high (camera on a tripod with high-quality optics).
## Online
[https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation)
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,11 @@ class DepthMapFilter(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=10)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
Filter depth map values that are not coherent in multiple depth maps.
This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node.
'''
inputs = [
desc.File(
name='input',

View file

@ -6,6 +6,11 @@ from meshroom.core import desc
class ExportAnimatedCamera(desc.CommandLineNode):
commandLine = 'aliceVision_exportAnimatedCamera {allParams}'
documentation = '''
Convert cameras from an SfM scene into an animated cameras in Alembic file format.
Based on the input image filenames, it will recognize the input video sequence to create an animated camera.
'''
inputs = [
desc.File(
name='input',

View file

@ -6,6 +6,13 @@ from meshroom.core import desc
class ExportMaya(desc.CommandLineNode):
commandLine = 'aliceVision_exportMeshroomMaya {allParams}'
documentation = '''
Export a scene for Autodesk Maya, with an Alembic file describing the SfM: cameras and 3D points.
It will export half-size undistorted images to use as image planes for cameras and also export thumbnails.
Use the MeshroomMaya plugin, to load the ABC file. It will recognize the file structure and will setup the scene.
MeshroomMaya contains a user interface to browse all cameras.
'''
inputs = [
desc.File(
name='input',

View file

@ -9,6 +9,26 @@ class FeatureExtraction(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=40)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition.
Hence, a feature in the scene should have similar feature descriptions in all images.
This node implements multiple methods:
* **SIFT**
The most standard method. This is the default and recommended value for all use cases.
* **AKAZE**
AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks.
It may extract to many features, the repartition is not always good.
It is known to be good on challenging surfaces such as skin.
* **CCTAG**
CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size.
It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags.
## Online
[https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction)
'''
inputs = [
desc.File(
name='input',

View file

@ -9,6 +9,28 @@ class FeatureMatching(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=20)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
This node performs the matching of all features between the candidate image pairs.
It is performed in 2 steps:
1/ **Photometric Matches**
It performs the photometric matches between the set of features descriptors from the 2 input images.
For each feature descriptor on the first image, it looks for the 2 closest descriptors in the second image and uses a relative threshold between them.
This assumption kill features on repetitive structure but has proved to be a robust criterion.
2/ **Geometric Filtering**
It performs a geometric filtering of the photometric match candidates.
It uses the features positions in the images to make a geometric filtering by using epipolar geometry in an outlier detection framework
called RANSAC (RANdom SAmple Consensus). It randomly selects a small set of feature correspondences and compute the fundamental (or essential) matrix,
then it checks the number of features that validates this model and iterate through the RANSAC framework.
## Online
[https://alicevision.org/#photogrammetry/feature_matching](https://alicevision.org/#photogrammetry/feature_matching)
'''
inputs = [
desc.File(
name='input',
@ -77,12 +99,13 @@ class FeatureMatching(desc.CommandLineNode):
label='Geometric Filter Type',
description='Geometric validation method to filter features matches: \n'
' * fundamental_matrix\n'
' * fundamental_with_distortion\n'
' * essential_matrix\n'
' * homography_matrix\n'
' * homography_growing\n'
' * no_filtering',
value='fundamental_matrix',
values=['fundamental_matrix', 'essential_matrix', 'homography_matrix', 'homography_growing', 'no_filtering'],
values=['fundamental_matrix', 'fundamental_with_distortion', 'essential_matrix', 'homography_matrix', 'homography_growing', 'no_filtering'],
exclusive=True,
uid=[0],
advanced=True,
@ -116,6 +139,16 @@ class FeatureMatching(desc.CommandLineNode):
uid=[0],
advanced=True,
),
desc.FloatParam(
name='knownPosesGeometricErrorMax',
label='Known Poses Geometric Error Max',
description='Maximum error (in pixels) allowed for features matching guided by geometric information from known camera poses.\n'
'If set to 0 it lets the ACRansac select an optimal value.',
value=5.0,
range=(0.0, 100.0, 1.0),
uid=[0],
advanced=True,
),
desc.IntParam(
name='maxMatches',
label='Max Matches',
@ -140,6 +173,14 @@ class FeatureMatching(desc.CommandLineNode):
value=False,
uid=[0],
),
desc.BoolParam(
name='matchFromKnownCameraPoses',
label='Match From Known Camera Poses',
description='Enable the usage of geometric information from known camera poses to guide the feature matching.\n'
'If some cameras have unknown poses (so there is no geometric prior), the standard feature matching will be performed.',
value=False,
uid=[0],
),
desc.BoolParam(
name='exportDebugFiles',
label='Export Debug Files',

View file

@ -10,6 +10,11 @@ class GlobalSfM(desc.CommandLineNode):
commandLine = 'aliceVision_globalSfM {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Performs the Structure-From-Motion with a global approach.
It is known to be faster but less robust to challenging datasets than the Incremental approach.
'''
inputs = [
desc.File(
name='input',
@ -99,16 +104,23 @@ class GlobalSfM(desc.CommandLineNode):
outputs = [
desc.File(
name='output',
label='Output Folder',
description='',
value=desc.Node.internalFolder,
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfm.abc',
uid=[],
),
desc.File(
name='outSfMDataFilename',
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'SfmData.abc',
name='outputViewsAndPoses',
label='Output Poses',
description='''Path to the output sfmdata file with cameras (views and poses).''',
value=desc.Node.internalFolder + 'cameras.sfm',
uid=[],
),
desc.File(
name='extraInfoFolder',
label='Output Folder',
description='Folder for intermediate reconstruction files and additional reconstruction information files.',
value=desc.Node.internalFolder,
uid=[],
),
]

View file

@ -1,89 +0,0 @@
__version__ = "1.0"
from meshroom.core import desc
class HDRIstitching(desc.CommandLineNode):
commandLine = 'aliceVision_utils_fisheyeProjection {allParams}'
inputs = [
desc.ListAttribute(
elementDesc=desc.File(
name='inputFile',
label='Input File/Folder',
description="",
value='',
uid=[0],
),
name='input',
label='Input Folder',
description="List of fisheye images or folder containing them."
),
desc.FloatParam(
name='blurWidth',
label='Blur Width',
description="Blur width of alpha channel for all fisheye (between 0 and 1). \n"
"Determine the transitions sharpness.",
value=0.2,
range=(0, 1, 0.1),
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.FloatParam(
name='imageXRotation',
label='Image X Rotation',
description="",
value=0,
range=(-20, 20, 1),
uid=[0],
),
name='xRotation',
label='X Rotations',
description="Rotations in degree on axis X (horizontal axis) for each image.",
),
desc.ListAttribute(
elementDesc=desc.FloatParam(
name='imageYRotation',
label='Image Y Rotation',
description="",
value=0,
range=(-30, 30, 5),
uid=[0],
),
name='yRotation',
label='Y Rotations',
description="Rotations in degree on axis Y (vertical axis) for each image.",
),
desc.ListAttribute(
elementDesc=desc.FloatParam(
name='imageZRotation',
label='Image Z Rotation',
description="",
value=0,
range=(-10, 10, 1),
uid=[0],
),
name='zRotation',
label='Z Rotations',
description="Rotations in degree on axis Z (depth axis) for each image.",
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name='output',
label='Output Panorama',
description="Output folder for panorama",
value=desc.Node.internalFolder,
uid=[],
),
]

View file

@ -1,4 +1,4 @@
__version__ = "1.0"
__version__ = "2.0"
import os
from meshroom.core import desc
@ -8,6 +8,30 @@ class ImageMatching(desc.CommandLineNode):
commandLine = 'aliceVision_imageMatching {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
The goal of this node is to select the image pairs to match. The ambition is to find the images that are looking to the same areas of the scene.
Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs.
It provides multiple methods:
* **VocabularyTree**
It uses image retrieval techniques to find images that share some content without the cost of resolving all feature matches in details.
Each image is represented in a compact image descriptor which allows to compute the distance between all images descriptors very efficiently.
If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.
* **Sequential**
If your input is a video sequence, you can use this option to link images between them over time.
* **SequentialAndVocabularyTree**
Combines sequential approach with Voc Tree to enable connections between keyframes at different times.
* **Exhaustive**
Export all image pairs.
* **Frustum**
If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.
* **FrustumOrVocabularyTree**
If images have known poses, use frustum intersection else use VocabularuTree.
## Online
[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching)
'''
inputs = [
desc.File(
name='input',
@ -28,47 +52,79 @@ class ImageMatching(desc.CommandLineNode):
label="Features Folders",
description="Folder(s) containing the extracted features and descriptors."
),
desc.ChoiceParam(
name='method',
label='Method',
description='Method used to select the image pairs to match:\n'
' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n'
'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n'
'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n'
' * Sequential: If your input is a video sequence, you can use this option to link images between them over time.\n'
' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n'
' * Exhaustive: Export all image pairs.\n'
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
value='VocabularyTree',
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum', 'FrustumOrVocabularyTree'],
exclusive=True,
uid=[0],
),
desc.File(
name='tree',
label='Tree',
label='Voc Tree: Tree',
description='Input name for the vocabulary tree file.',
value=os.environ.get('ALICEVISION_VOCTREE', ''),
uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.File(
name='weights',
label='Weights',
label='Voc Tree: Weights',
description='Input name for the weight file, if not provided the weights will be computed on the database built with the provided set.',
value='',
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='minNbImages',
label='Minimal Number of Images',
label='Voc Tree: Minimal Number of Images',
description='Minimal number of images to use the vocabulary tree. If we have less features than this threshold, we will compute all matching combinations.',
value=200,
range=(0, 500, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='maxDescriptors',
label='Max Descriptors',
label='Voc Tree: Max Descriptors',
description='Limit the number of descriptors you load per image. Zero means no limit.',
value=500,
range=(0, 100000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='nbMatches',
label='Nb Matches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
value=50,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
value=50,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'Sequential' in node.method.value,
),
desc.ChoiceParam(
name='verboseLevel',

View file

@ -9,6 +9,14 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
# use both SfM inputs to define Node's size
size = desc.MultiDynamicNodeSize(['input', 'inputB'])
documentation = '''
The goal of this node is to select the image pairs to match in the context of an SfM augmentation.
The ambition is to find the images that are looking to the same areas of the scene.
Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs.
## Online
[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching)
'''
inputs = [
desc.File(
name='input',
@ -36,16 +44,25 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
label="Features Folders",
description="Folder(s) containing the extracted features and descriptors."
),
desc.ChoiceParam(
name='method',
label='Method',
description='Method used to select the image pairs to match.',
value='VocabularyTree',
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree','Exhaustive','Frustum'],
exclusive=True,
uid=[0],
),
desc.File(
name='tree',
label='Tree',
label='Voc Tree: Tree',
description='Input name for the vocabulary tree file.',
value=os.environ.get('ALICEVISION_VOCTREE', ''),
uid=[],
),
desc.File(
name='weights',
label='Weights',
label='Voc Tree: Weights',
description='Input name for the weight file, if not provided the weights will be computed on the database built with the provided set.',
value='',
uid=[0],
@ -62,7 +79,7 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
),
desc.IntParam(
name='minNbImages',
label='Minimal Number of Images',
label='Voc Tree: Minimal Number of Images',
description='Minimal number of images to use the vocabulary tree. If we have less features than this threshold, we will compute all matching combinations.',
value=200,
range=(0, 500, 1),
@ -71,7 +88,7 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
),
desc.IntParam(
name='maxDescriptors',
label='Max Descriptors',
label='Voc Tree: Max Descriptors',
description='Limit the number of descriptors you load per image. Zero means no limit.',
value=500,
range=(0, 100000, 1),
@ -80,13 +97,22 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
),
desc.IntParam(
name='nbMatches',
label='Nb Matches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
value=50,
range=(0, 1000, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
value=50,
range=(0, 1000, 1),
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',

View file

@ -0,0 +1,318 @@
__version__ = "3.0"
from meshroom.core import desc
import os.path
def outputImagesValueFunct(attr):
basename = os.path.basename(attr.node.input.value)
fileStem = os.path.splitext(basename)[0]
inputExt = os.path.splitext(basename)[1]
outputExt = ('.' + attr.node.extension.value) if attr.node.extension.value else None
if inputExt in ['.abc', '.sfm']:
# If we have an SfM in input
return desc.Node.internalFolder + '*' + (outputExt or '.*')
if inputExt:
# if we have one or multiple files in input
return desc.Node.internalFolder + fileStem + (outputExt or inputExt)
if '*' in fileStem:
# The fileStem of the input param is a regular expression,
# so even if there is no file extension,
# we consider that the expression represents files.
return desc.Node.internalFolder + fileStem + (outputExt or '.*')
# No extension and no expression means that the input param is a folder path
return desc.Node.internalFolder + '*' + (outputExt or '.*')
class ImageProcessing(desc.CommandLineNode):
commandLine = 'aliceVision_utils_imageProcessing {allParams}'
size = desc.DynamicNodeSize('input')
# parallelization = desc.Parallelization(blockSize=40)
# commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
Convert or apply filtering to the input images.
'''
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file input, image filenames or regex(es) on the image file path.\nsupported regex: \'#\' matches a single digit, \'@\' one or more digits, \'?\' one character and \'*\' zero or more.',
value='',
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name="inputFolder",
label="input Folder",
description="",
value="",
uid=[0],
),
name="inputFolders",
label="Images input Folders",
description='Use images from specific folder(s).',
),
desc.ListAttribute(
elementDesc=desc.StringParam(
name="metadataFolder",
label="Metadata Folder",
description="",
value="",
uid=[0],
),
name="metadataFolders",
label="Metadata input Folders",
description='Use images metadata from specific folder(s).',
),
desc.ChoiceParam(
name='extension',
label='Output File Extension',
description='Output Image File Extension.',
value='',
values=['', 'exr', 'jpg', 'tiff', 'png'],
exclusive=True,
uid=[0],
),
desc.BoolParam(
name='reconstructedViewsOnly',
label='Only Reconstructed Views',
description='Process Only Reconstructed Views',
value=False,
uid=[0],
),
desc.BoolParam(
name='exposureCompensation',
label='Exposure Compensation',
description='Exposure Compensation',
value=False,
uid=[0],
),
desc.FloatParam(
name='scaleFactor',
label='ScaleFactor',
description='Scale Factor.',
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0],
),
desc.FloatParam(
name='contrast',
label='Contrast',
description='Contrast.',
value=1.0,
range=(0.0, 100.0, 0.1),
uid=[0],
),
desc.IntParam(
name='medianFilter',
label='Median Filter',
description='Median Filter.',
value=0,
range=(0, 10, 1),
uid=[0],
),
desc.BoolParam(
name='fillHoles',
label='Fill holes',
description='Fill holes.',
value=False,
uid=[0],
),
desc.GroupAttribute(name="sharpenFilter", label="Sharpen Filter", description="Sharpen Filtering Parameters.", joinChar=":", groupDesc=[
desc.BoolParam(
name='sharpenFilterEnabled',
label='Enable',
description='Use sharpen.',
value=False,
uid=[0],
),
desc.IntParam(
name='width',
label='Width',
description='Sharpen Width.',
value=3,
range=(1, 9, 2),
uid=[0],
enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value,
),
desc.FloatParam(
name='contrast',
label='Contrast',
description='Sharpen Contrast.',
value=1.0,
range=(0.0, 100.0, 0.1),
uid=[0],
enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value,
),
desc.FloatParam(
name='threshold',
label='Threshold',
description='Sharpen Threshold.',
value=0.0,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value,
),
]),
desc.GroupAttribute(name="bilateralFilter", label="Bilateral Filter", description="Bilateral Filtering Parameters.", joinChar=":", groupDesc=[
desc.BoolParam(
name='bilateralFilterEnabled',
label='Enable',
description='Bilateral Filter.',
value=False,
uid=[0],
),
desc.IntParam(
name='bilateralFilterDistance',
label='Distance',
description='Diameter of each pixel neighborhood that is used during bilateral filtering.\nCould be very slow for large filters, so it is recommended to use 5.',
value=0,
range=(0, 9, 1),
uid=[0],
enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value,
),
desc.FloatParam(
name='bilateralFilterSigmaSpace',
label='Sigma Coordinate Space',
description='Bilateral Filter sigma in the coordinate space.',
value=0.0,
range=(0.0, 150.0, 0.01),
uid=[0],
enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value,
),
desc.FloatParam(
name='bilateralFilterSigmaColor',
label='Sigma Color Space',
description='Bilateral Filter sigma in the color space.',
value=0.0,
range=(0.0, 150.0, 0.01),
uid=[0],
enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value,
),
]),
desc.GroupAttribute(name="claheFilter", label="Clahe Filter", description="Clahe Filtering Parameters.", joinChar=":", groupDesc=[
desc.BoolParam(
name='claheEnabled',
label='Enable',
description='Use Contrast Limited Adaptive Histogram Equalization (CLAHE) Filter.',
value=False,
uid=[0],
),
desc.FloatParam(
name='claheClipLimit',
label='Clip Limit',
description='Sets Threshold For Contrast Limiting.',
value=4.0,
range=(0.0, 8.0, 1.0),
uid=[0],
enabled=lambda node: node.claheFilter.claheEnabled.value,
),
desc.IntParam(
name='claheTileGridSize',
label='Tile Grid Size',
description='Sets Size Of Grid For Histogram Equalization. Input Image Will Be Divided Into Equally Sized Rectangular Tiles.',
value=8,
range=(4, 64, 4),
uid=[0],
enabled=lambda node: node.claheFilter.claheEnabled.value,
),
]),
desc.GroupAttribute(name="noiseFilter", label="Noise Filter", description="Noise Filtering Parameters.", joinChar=":", groupDesc=[
desc.BoolParam(
name='noiseEnabled',
label='Enable',
description='Add Noise.',
value=False,
uid=[0],
),
desc.ChoiceParam(
name='noiseMethod',
label='Method',
description=" * method: There are several noise types to choose from:\n"
" * uniform: adds noise values uninformly distributed on range [A,B).\n"
" * gaussian: adds Gaussian (normal distribution) noise values with mean value A and standard deviation B.\n"
" * salt: changes to value A a portion of pixels given by B.\n",
value='uniform',
values=['uniform', 'gaussian', 'salt'],
exclusive=True,
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
desc.FloatParam(
name='noiseA',
label='A',
description='Parameter that have a different interpretation depending on the method chosen.',
value=0.0,
range=(0.0, 1.0, 0.0001),
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
desc.FloatParam(
name='noiseB',
label='B',
description='Parameter that have a different interpretation depending on the method chosen.',
value=1.0,
range=(0.0, 1.0, 0.0001),
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
desc.BoolParam(
name='noiseMono',
label='Mono',
description='If is Checked, a single noise value will be applied to all channels otherwise a separate noise value will be computed for each channel.',
value=True,
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
]),
desc.ChoiceParam(
name='outputFormat',
label='Output Image Format',
description='Allows you to choose the format of the output image.',
value='rgba',
values=['rgba', 'rgb', 'grayscale'],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
)
]
outputs = [
desc.File(
name='outSfMData',
label='Output sfmData',
description='Output sfmData.',
value=lambda attr: (desc.Node.internalFolder + os.path.basename(attr.node.input.value)) if (os.path.splitext(attr.node.input.value)[1] in ['.abc', '.sfm']) else '',
uid=[],
group='', # do not export on the command line
),
desc.File(
name='output',
label='Output Folder',
description='Output Images Folder.',
value=desc.Node.internalFolder,
uid=[],
),
desc.File(
name='outputImages',
label='Output Images',
description='Output Image Files.',
value= outputImagesValueFunct,
group='', # do not export on the command line
uid=[],
),
]

View file

@ -7,6 +7,13 @@ from meshroom.core import desc
class KeyframeSelection(desc.CommandLineNode):
commandLine = 'aliceVision_utils_keyframeSelection {allParams}'
documentation = '''
Allows to extract keyframes from a video and insert metadata.
It can extract frames from a synchronized multi-cameras rig.
You can extract frames at regular interval by configuring only the min/maxFrameStep.
'''
inputs = [
desc.ListAttribute(
elementDesc=desc.File(

View file

@ -0,0 +1,197 @@
__version__ = "2.0"
import json
from meshroom.core import desc
def findMetadata(d, keys, defaultValue):
v = None
for key in keys:
v = d.get(key, None)
k = key.lower()
if v is not None:
return v
for dk, dv in d.items():
dkm = dk.lower().replace(" ", "")
if dkm == key.lower():
return dv
dkm = dkm.split(":")[-1]
dkm = dkm.split("/")[-1]
if dkm == k:
return dv
return defaultValue
class LdrToHdrCalibration(desc.CommandLineNode):
commandLine = 'aliceVision_LdrToHdrCalibration {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Calibrate LDR to HDR response curve from samples
'''
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
uid=[0],
),
desc.File(
name='samples',
label='Samples folder',
description='Samples folder',
value=desc.Node.internalFolder,
uid=[0],
),
desc.ChoiceParam(
name='calibrationMethod',
label='Calibration Method',
description="Method used for camera calibration \n"
" * Linear: Disable the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear. \n"
" * Debevec: This is the standard method for HDR calibration. \n"
" * Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision. \n"
" * Laguerre: Simple but robust method estimating the minimal number of parameters. \n"
" * Robertson: First method for HDR calibration in the literature. \n",
values=['linear', 'debevec', 'grossberg', 'laguerre'],
value='debevec',
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='calibrationWeight',
label='Calibration Weight',
description="Weight function used to calibrate camera response \n"
" * default (automatically selected according to the calibrationMethod) \n"
" * gaussian \n"
" * triangle \n"
" * plateau",
value='default',
values=['default', 'gaussian', 'triangle', 'plateau'],
exclusive=True,
uid=[0],
),
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
value=0,
range=(0, 15, 1),
uid=[0],
group='user', # not used directly on the command line
),
desc.IntParam(
name='nbBrackets',
label='Automatic Nb Brackets',
description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".',
value=0,
range=(0, 10, 1),
uid=[],
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
value=10,
range=(8, 14, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name='maxTotalPoints',
label='Max Number of Points',
description='Max number of points selected by the sampling strategy.\n'
'This ensures that this sampling step will extract a number of pixels values\n'
'that the calibration step can manage (in term of computation time and memory usage).',
value=1000000,
range=(8, 10000000, 1000),
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
)
]
outputs = [
desc.File(
name='response',
label='Output response File',
description='Path to the output response file',
value=desc.Node.internalFolder + 'response.csv',
uid=[],
)
]
@classmethod
def update(cls, node):
if not isinstance(node.nodeDesc, cls):
raise ValueError("Node {} is not an instance of type {}".format(node, cls))
# TODO: use Node version for this test
if 'userNbBrackets' not in node.getAttributes().keys():
# Old version of the node
return
if node.userNbBrackets.value != 0:
node.nbBrackets.value = node.userNbBrackets.value
return
# logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion))
cameraInitOutput = node.input.getLinkParam(recursive=True)
if not cameraInitOutput:
node.nbBrackets.value = 0
return
if not cameraInitOutput.node.hasAttribute('viewpoints'):
if cameraInitOutput.node.hasAttribute('input'):
cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True)
viewpoints = cameraInitOutput.node.viewpoints.value
# logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints)))
inputs = []
for viewpoint in viewpoints:
jsonMetadata = viewpoint.metadata.value
if not jsonMetadata:
# no metadata, we cannot found the number of brackets
node.nbBrackets.value = 0
return
d = json.loads(jsonMetadata)
fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "")
shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "")
iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "")
if not fnumber and not shutterSpeed:
# If one image without shutter or fnumber, we cannot found the number of brackets.
# We assume that there is no multi-bracketing, so nothing to do.
node.nbBrackets.value = 1
return
inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso)))
inputs.sort()
exposureGroups = []
exposures = []
for path, exp in inputs:
if exposures and exp != exposures[-1] and exp == exposures[0]:
exposureGroups.append(exposures)
exposures = [exp]
else:
exposures.append(exp)
exposureGroups.append(exposures)
exposures = None
bracketSizes = set()
if len(exposureGroups) == 1:
node.nbBrackets.value = 1
else:
for expGroup in exposureGroups:
bracketSizes.add(len(expGroup))
if len(bracketSizes) == 1:
node.nbBrackets.value = bracketSizes.pop()
# logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value))
else:
node.nbBrackets.value = 0
# logging.info("[LDRToHDR] Update end")

View file

@ -1,46 +1,56 @@
__version__ = "2.0"
__version__ = "3.0"
import json
import os
from meshroom.core import desc
class DividedInputNodeSize(desc.DynamicNodeSize):
"""
The LDR2HDR will reduce the amount of views in the SfMData.
This class converts the number of LDR input views into the number of HDR output views.
"""
def __init__(self, param, divParam):
super(DividedInputNodeSize, self).__init__(param)
self._divParam = divParam
def computeSize(self, node):
s = super(DividedInputNodeSize, self).computeSize(node)
divParam = node.attribute(self._divParam)
if divParam.value == 0:
return s
return s / divParam.value
def findMetadata(d, keys, defaultValue):
v = None
for key in keys:
v = d.get(key, None)
k = key.lower()
if v is not None:
return v
for dk, dv in d.items():
dkm = dk.lower().replace(" ", "")
if dkm == key.lower():
return dv
dkm = dkm.split(":")[-1]
dkm = dkm.split("/")[-1]
if dkm == k:
return dv
return defaultValue
class LDRToHDR(desc.CommandLineNode):
commandLine = 'aliceVision_convertLDRToHDR {allParams}'
size = DividedInputNodeSize('input', 'nbBrackets')
class LdrToHdrMerge(desc.CommandLineNode):
commandLine = 'aliceVision_LdrToHdrMerge {allParams}'
size = desc.DynamicNodeSize('input')
parallelization = desc.Parallelization(blockSize=2)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
cpu = desc.Level.INTENSIVE
ram = desc.Level.NORMAL
documentation = '''
Calibrate LDR to HDR response curve from samples
'''
inputs = [
desc.File(
name='input',
label='Input',
description="SfM Data File",
description='SfMData file.',
value='',
uid=[0],
),
desc.File(
name='response',
label='Response file',
description='Response file',
value='',
uid=[0],
),
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic).',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
value=0,
range=(0, 15, 1),
uid=[0],
@ -53,19 +63,59 @@ class LDRToHDR(desc.CommandLineNode):
value=0,
range=(0, 10, 1),
uid=[],
),
desc.IntParam(
name='offsetRefBracketIndex',
label='Offset Ref Bracket Index',
description='Zero to use the center bracket. +N to use a more exposed bracket or -N to use a less exposed backet.',
value=1,
range=(-4, 4, 1),
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
),
desc.BoolParam(
name='byPass',
label='Bypass',
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
),
desc.ChoiceParam(
name='fusionWeight',
label='Fusion Weight',
description="Weight function used to fuse all LDR images together:\n"
" * gaussian \n"
" * triangle \n"
" * plateau",
value='gaussian',
values=['gaussian', 'triangle', 'plateau'],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
value=10,
range=(8, 14, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.FloatParam(
name='highlightCorrectionFactor',
label='Highlights Correction',
description='Pixels saturated in all input images have a partial information about their real luminance.\n'
'We only know that the value should be >= to the standard hdr fusion.\n'
'This parameter allows to perform a post-processing step to put saturated pixels to a constant '
'This parameter allows to perform a post-processing step to put saturated pixels to a constant\n'
'value defined by the `highlightsMaxLuminance` parameter.\n'
'This parameter is float to enable to weight this correction.',
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.FloatParam(
name='highlightTargetLux',
@ -88,113 +138,25 @@ class LDRToHDR(desc.CommandLineNode):
value=120000.0,
range=(1000.0, 150000.0, 1.0),
uid=[0],
),
desc.BoolParam(
name='fisheyeLens',
label='Fisheye Lens',
description="Enable if a fisheye lens has been used.\n "
"This will improve the estimation of the Camera's Response Function by considering only the pixels in the center of the image\n"
"and thus ignore undefined/noisy pixels outside the circle defined by the fisheye lens.",
value=False,
uid=[0],
),
desc.BoolParam(
name='calibrationRefineExposures',
label='Refine Exposures',
description="Refine exposures provided by metadata (shutter speed, f-number, iso). Only available for 'laguerre' calibration method.",
value=False,
uid=[0],
),
desc.BoolParam(
name='byPass',
label='bypass convert',
description="Bypass HDR creation and use the medium bracket as the source for the next steps",
value=False,
uid=[0],
),
desc.ChoiceParam(
name='calibrationMethod',
label='Calibration Method',
description="Method used for camera calibration \n"
" * linear \n"
" * robertson \n"
" * debevec \n"
" * grossberg \n"
" * laguerre",
values=['linear', 'robertson', 'debevec', 'grossberg', 'laguerre'],
value='debevec',
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='calibrationWeight',
label='Calibration Weight',
description="Weight function used to calibrate camera response \n"
" * default (automatically selected according to the calibrationMethod) \n"
" * gaussian \n"
" * triangle \n"
" * plateau",
value='default',
values=['default', 'gaussian', 'triangle', 'plateau'],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='fusionWeight',
label='Fusion Weight',
description="Weight function used to fuse all LDR images together \n"
" * gaussian \n"
" * triangle \n"
" * plateau",
value='gaussian',
values=['gaussian', 'triangle', 'plateau'],
exclusive=True,
uid=[0],
),
desc.IntParam(
name='calibrationNbPoints',
label='Calibration Nb Points',
description='Internal number of points used for calibration.',
value=0,
range=(0, 10000000, 1000),
uid=[0],
advanced=True,
),
desc.IntParam(
name='calibrationDownscale',
label='Calibration Downscale',
description='Scaling factor applied to images before calibration of the response function to reduce the impact of misalignment.',
value=4,
range=(1, 16, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
value=10,
range=(8, 14, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value and node.highlightCorrectionFactor.value != 0,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
),
)
]
outputs = [
desc.File(
name='outSfMDataFilename',
name='outSfMData',
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfmData.abc',
value=desc.Node.internalFolder + 'sfmData.sfm',
uid=[],
)
]
@ -211,10 +173,13 @@ class LDRToHDR(desc.CommandLineNode):
node.nbBrackets.value = node.userNbBrackets.value
return
# logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion))
cameraInitOutput = node.input.getLinkParam()
cameraInitOutput = node.input.getLinkParam(recursive=True)
if not cameraInitOutput:
node.nbBrackets.value = 0
return
if not cameraInitOutput.node.hasAttribute('viewpoints'):
if cameraInitOutput.node.hasAttribute('input'):
cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True)
viewpoints = cameraInitOutput.node.viewpoints.value
# logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints)))
@ -226,12 +191,13 @@ class LDRToHDR(desc.CommandLineNode):
node.nbBrackets.value = 0
return
d = json.loads(jsonMetadata)
fnumber = d.get("FNumber", d.get("Exif:ApertureValue", ""))
shutterSpeed = d.get("Exif:ShutterSpeedValue", "") # also "ExposureTime"?
iso = d.get("Exif:ISOSpeedRatings", "")
fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "")
shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "")
iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "")
if not fnumber and not shutterSpeed:
# if one image without shutter or fnumber, we cannot found the number of brackets
node.nbBrackets.value = 0
# If one image without shutter or fnumber, we cannot found the number of brackets.
# We assume that there is no multi-bracketing, so nothing to do.
node.nbBrackets.value = 1
return
inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso)))
inputs.sort()
@ -247,6 +213,9 @@ class LDRToHDR(desc.CommandLineNode):
exposureGroups.append(exposures)
exposures = None
bracketSizes = set()
if len(exposureGroups) == 1:
node.nbBrackets.value = 1
else:
for expGroup in exposureGroups:
bracketSizes.add(len(expGroup))
if len(bracketSizes) == 1:
@ -256,4 +225,3 @@ class LDRToHDR(desc.CommandLineNode):
node.nbBrackets.value = 0
# logging.info("[LDRToHDR] Update end")

View file

@ -0,0 +1,223 @@
__version__ = "3.0"
import json
from meshroom.core import desc
def findMetadata(d, keys, defaultValue):
v = None
for key in keys:
v = d.get(key, None)
k = key.lower()
if v is not None:
return v
for dk, dv in d.items():
dkm = dk.lower().replace(" ", "")
if dkm == key.lower():
return dv
dkm = dkm.split(":")[-1]
dkm = dkm.split("/")[-1]
if dkm == k:
return dv
return defaultValue
class DividedInputNodeSize(desc.DynamicNodeSize):
"""
The LDR2HDR will reduce the amount of views in the SfMData.
This class converts the number of LDR input views into the number of HDR output views.
"""
def __init__(self, param, divParam):
super(DividedInputNodeSize, self).__init__(param)
self._divParam = divParam
def computeSize(self, node):
s = super(DividedInputNodeSize, self).computeSize(node)
divParam = node.attribute(self._divParam)
if divParam.value == 0:
return s
return s / divParam.value
class LdrToHdrSampling(desc.CommandLineNode):
commandLine = 'aliceVision_LdrToHdrSampling {allParams}'
size = DividedInputNodeSize('input', 'nbBrackets')
parallelization = desc.Parallelization(blockSize=2)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
Sample pixels from Low range images for HDR creation
'''
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
uid=[0],
),
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
value=0,
range=(0, 15, 1),
uid=[0],
group='user', # not used directly on the command line
),
desc.IntParam(
name='nbBrackets',
label='Automatic Nb Brackets',
description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".',
value=0,
range=(0, 10, 1),
uid=[],
),
desc.BoolParam(
name='byPass',
label='Bypass',
description="Bypass HDR creation and use the medium bracket as the source for the next steps",
value=False,
uid=[0],
group='internal',
enabled= lambda node: node.nbBrackets.value != 1,
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
value=10,
range=(8, 14, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='blockSize',
label='Block Size',
description='Size of the image tile to extract a sample.',
value=256,
range=(8, 1024, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='radius',
label='Patch Radius',
description='Radius of the patch used to analyze the sample statistics.',
value=5,
range=(0, 10, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='maxCountSample',
label='Max Number of Samples',
description='Max number of samples per image group.',
value=200,
range=(10, 1000, 10),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.BoolParam(
name='debug',
label='Export Debug Files',
description="Export debug files to analyze the sampling strategy.",
value=False,
uid=[],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
)
]
outputs = [
desc.File(
name='output',
label='Output Folder',
description='Output path for the samples.',
value=desc.Node.internalFolder,
uid=[],
),
]
def processChunk(self, chunk):
if chunk.node.nbBrackets.value == 1 or chunk.node.byPass.value:
return
super(LdrToHdrSampling, self).processChunk(chunk)
@classmethod
def update(cls, node):
if not isinstance(node.nodeDesc, cls):
raise ValueError("Node {} is not an instance of type {}".format(node, cls))
# TODO: use Node version for this test
if 'userNbBrackets' not in node.getAttributes().keys():
# Old version of the node
return
if node.userNbBrackets.value != 0:
node.nbBrackets.value = node.userNbBrackets.value
return
# logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion))
cameraInitOutput = node.input.getLinkParam(recursive=True)
if not cameraInitOutput:
node.nbBrackets.value = 0
return
if not cameraInitOutput.node.hasAttribute('viewpoints'):
if cameraInitOutput.node.hasAttribute('input'):
cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True)
viewpoints = cameraInitOutput.node.viewpoints.value
# logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints)))
inputs = []
for viewpoint in viewpoints:
jsonMetadata = viewpoint.metadata.value
if not jsonMetadata:
# no metadata, we cannot found the number of brackets
node.nbBrackets.value = 0
return
d = json.loads(jsonMetadata)
fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "")
shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "")
iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "")
if not fnumber and not shutterSpeed:
# If one image without shutter or fnumber, we cannot found the number of brackets.
# We assume that there is no multi-bracketing, so nothing to do.
node.nbBrackets.value = 1
return
inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso)))
inputs.sort()
exposureGroups = []
exposures = []
for path, exp in inputs:
if exposures and exp != exposures[-1] and exp == exposures[0]:
exposureGroups.append(exposures)
exposures = [exp]
else:
exposures.append(exp)
exposureGroups.append(exposures)
exposures = None
bracketSizes = set()
if len(exposureGroups) == 1:
node.nbBrackets.value = 1
else:
for expGroup in exposureGroups:
bracketSizes.add(len(expGroup))
if len(bracketSizes) == 1:
node.nbBrackets.value = bracketSizes.pop()
# logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value))
else:
node.nbBrackets.value = 0
# logging.info("[LDRToHDR] Update end")

View file

@ -9,6 +9,10 @@ class MeshDecimate(desc.CommandLineNode):
cpu = desc.Level.NORMAL
ram = desc.Level.NORMAL
documentation = '''
This node allows to reduce the density of the Mesh.
'''
inputs = [
desc.File(
name="input",

View file

@ -6,6 +6,11 @@ from meshroom.core import desc
class MeshDenoising(desc.CommandLineNode):
commandLine = 'aliceVision_meshDenoising {allParams}'
documentation = '''
This experimental node allows to reduce noise from a Mesh.
for now, the parameters are difficult to control and vary a lot from one dataset to another.
'''
inputs = [
desc.File(
name='input',
@ -69,7 +74,7 @@ class MeshDenoising(desc.CommandLineNode):
label='Mesh Update Method',
description='Mesh Update Method\n'
' * ITERATIVE_UPDATE (default): ShapeUp styled iterative solver \n'
' * POISSON_UPDATE: Poisson-based update from [Want et al. 2015]',
' * POISSON_UPDATE: Poisson-based update from [Wang et al. 2015] "Rolling guidance normal filter for geometric processing"',
value=0,
values=(0, 1),
exclusive=True,

View file

@ -6,6 +6,11 @@ from meshroom.core import desc
class MeshFiltering(desc.CommandLineNode):
commandLine = 'aliceVision_meshFiltering {allParams}'
documentation = '''
This node applies a Laplacian filtering to remove local defects from the raw Meshing cut.
'''
inputs = [
desc.File(
name='inputMesh',

View file

@ -9,6 +9,10 @@ class MeshResampling(desc.CommandLineNode):
cpu = desc.Level.NORMAL
ram = desc.Level.NORMAL
documentation = '''
This node allows to recompute the mesh surface with a new topology and uniform density.
'''
inputs = [
desc.File(
name="input",

View file

@ -1,4 +1,4 @@
__version__ = "3.0"
__version__ = "5.0"
from meshroom.core import desc
@ -9,6 +9,17 @@ class Meshing(desc.CommandLineNode):
cpu = desc.Level.INTENSIVE
ram = desc.Level.INTENSIVE
documentation = '''
This node creates a dense geometric surface representation of the scene.
First, it fuses all the depth maps into a global dense point cloud with an adaptive resolution.
It then performs a 3D Delaunay tetrahedralization and a voting procedure is done to compute weights on cells and weights on facets connecting the cells.
A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents the extracted mesh surface.
## Online
[https://alicevision.org/#photogrammetry/meshing](https://alicevision.org/#photogrammetry/meshing)
'''
inputs = [
desc.File(
name='input',
@ -20,14 +31,7 @@ class Meshing(desc.CommandLineNode):
desc.File(
name="depthMapsFolder",
label='Depth Maps Folder',
description='Input depth maps folder',
value='',
uid=[0],
),
desc.File(
name="depthMapsFilterFolder",
label='Filtered Depth Maps Folder',
description='Input filtered depth maps folder',
description='Input depth maps folder.',
value='',
uid=[0],
),
@ -47,6 +51,7 @@ class Meshing(desc.CommandLineNode):
range=(0, 100, 1),
uid=[0],
advanced=True,
enabled=lambda node: node.estimateSpaceFromSfM.value,
),
desc.FloatParam(
name='estimateSpaceMinObservationAngle',
@ -55,6 +60,7 @@ class Meshing(desc.CommandLineNode):
value=10,
range=(0, 120, 1),
uid=[0],
enabled=lambda node: node.estimateSpaceFromSfM.value,
),
desc.IntParam(
name='maxInputPoints',

View file

@ -10,11 +10,25 @@ class PanoramaCompositing(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaCompositing {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Once the images have been transformed geometrically (in PanoramaWarping),
they have to be fused together in a single panorama image which looks like a single photography.
The Multi-band Blending method provides the best quality. It averages the pixel values using multiple bands in the frequency domain.
Multiple cameras are contributing to the low frequencies and only the best one contributes to the high frequencies.
'''
inputs = [
desc.File(
name='input',
label='Input',
description="Panorama Warping result",
label='Input SfMData',
description="Input SfMData.",
value='',
uid=[0],
),
desc.File(
name='warpingFolder',
label='Warping Folder',
description="Panorama Warping results",
value='',
uid=[0],
),
@ -31,12 +45,28 @@ class PanoramaCompositing(desc.CommandLineNode):
desc.ChoiceParam(
name='compositerType',
label='Compositer Type',
description='Which compositer should be used to blend images',
description='Which compositer should be used to blend images:\n'
' * multiband: high quality transition by fusing images by frequency bands\n'
' * replace: debug option with straight transitions\n'
' * alpha: debug option with linear transitions\n',
value='multiband',
values=['replace', 'alpha', 'multiband'],
exclusive=True,
uid=[0]
),
desc.ChoiceParam(
name='overlayType',
label='Overlay Type',
description='Overlay on top of panorama to analyze transitions:\n'
' * none: no overlay\n'
' * borders: display image borders\n'
' * seams: display transitions between images\n',
value='none',
values=['none', 'borders', 'seams'],
exclusive=True,
advanced=True,
uid=[0]
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',

View file

@ -10,6 +10,10 @@ class PanoramaEstimation(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaEstimation {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Estimate relative camera rotations between input images.
'''
inputs = [
desc.File(
name='input',
@ -53,15 +57,6 @@ class PanoramaEstimation(desc.CommandLineNode):
uid=[0],
joinChar=',',
),
desc.IntParam(
name='orientation',
label='Orientation',
description='Orientation',
value=0,
range=(0, 6, 1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name='offsetLongitude',
label='Longitude offset (deg.)',
@ -69,7 +64,6 @@ class PanoramaEstimation(desc.CommandLineNode):
value=0.0,
range=(-180.0, 180.0, 1.0),
uid=[0],
advanced=True,
),
desc.FloatParam(
name='offsetLatitude',
@ -78,7 +72,6 @@ class PanoramaEstimation(desc.CommandLineNode):
value=0.0,
range=(-90.0, 90.0, 1.0),
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='rotationAveraging',
@ -97,9 +90,10 @@ class PanoramaEstimation(desc.CommandLineNode):
label='Relative Rotation Method',
description="Method for relative rotation :\n"
" * from essential matrix\n"
" * from homography matrix",
values=['essential_matrix', 'homography_matrix'],
value='homography_matrix',
" * from homography matrix\n"
" * from rotation matrix",
values=['essential_matrix', 'homography_matrix', 'rotation_matrix'],
value='rotation_matrix',
exclusive=True,
uid=[0],
advanced=True,
@ -113,13 +107,47 @@ class PanoramaEstimation(desc.CommandLineNode):
),
desc.BoolParam(
name='lockAllIntrinsics',
label='Force Lock of All Intrinsic Camera Parameters.',
label='Force Lock of All Intrinsics',
description='Force to keep constant all the intrinsics parameters of the cameras (focal length, \n'
'principal point, distortion if any) during the reconstruction.\n'
'This may be helpful if the input cameras are already fully calibrated.',
value=False,
uid=[0],
),
desc.FloatParam(
name='maxAngleToPrior',
label='Max Angle To Priors (deg.)',
description='''Maximal angle allowed regarding the input prior (in degrees).''',
value=20.0,
range=(0.0, 360.0, 1.0),
uid=[0],
advanced=True,
),
desc.FloatParam(
name='maxAngularError',
label='Max Angular Error (deg.)',
description='''Maximal angular error in global rotation averging (in degrees).''',
value=100.0,
range=(0.0, 360.0, 1.0),
uid=[0],
advanced=True,
),
desc.BoolParam(
name='intermediateRefineWithFocal',
label='Intermediate Refine: Focal',
description='Intermediate refine with rotation and focal length only.',
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name='intermediateRefineWithFocalDist',
label='Intermediate Refine: Focal And Distortion',
description='Intermediate refine with rotation, focal length and distortion.',
value=False,
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
@ -134,16 +162,16 @@ class PanoramaEstimation(desc.CommandLineNode):
outputs = [
desc.File(
name='output',
label='Output Folder',
description='',
value=desc.Node.internalFolder,
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'panorama.abc',
uid=[],
),
desc.File(
name='outSfMDataFilename',
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfmData.abc',
name='outputViewsAndPoses',
label='Output Poses',
description='''Path to the output sfmdata file with cameras (views and poses).''',
value=desc.Node.internalFolder + 'cameras.sfm',
uid=[],
),
]

View file

@ -1,60 +0,0 @@
__version__ = "1.0"
import json
import os
from meshroom.core import desc
class PanoramaExternalInfo(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaExternalInfo {allParams}'
size = desc.DynamicNodeSize('input')
inputs = [
desc.File(
name='input',
label='Input',
description="SfM Data File",
value='',
uid=[0],
),
desc.File(
name='config',
label='Xml Config',
description="XML Data File",
value='',
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name='matchesFolder',
label='Matches Folder',
description="",
value='',
uid=[0],
),
name='matchesFolders',
label='Matches Folders',
description="Folder(s) in which computed matches are stored. (WORKAROUND for valid Tractor graph submission)",
group='forDependencyOnly',
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name='outSfMDataFilename',
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfmData.abc',
uid=[],
)
]

View file

@ -0,0 +1,112 @@
__version__ = "2.0"
from meshroom.core import desc
class PanoramaInit(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaInit {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to setup the Panorama:
1/ Enables the initialization the cameras from known position in an XML file (provided by
["Roundshot VR Drive"](https://www.roundshot.com/xml_1/internet/fr/application/d394/d395/f396.cfm) ).
2/ Enables to setup Full Fisheye Optics (to use an Equirectangular camera model).
3/ To automatically detects the Fisheye Circle (radius + center) in input images or manually adjust it.
'''
inputs = [
desc.File(
name='input',
label='Input',
description="SfM Data File",
value='',
uid=[0],
),
desc.File(
name='config',
label='Xml Config',
description="XML Data File",
value='',
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name='dependency',
label='',
description="",
value='',
uid=[],
),
name='dependency',
label='Dependency',
description="Folder(s) in which computed features are stored. (WORKAROUND for valid Tractor graph submission)",
group='forDependencyOnly', # not a command line argument
),
desc.BoolParam(
name='useFisheye',
label='Full Fisheye',
description='To declare a full fisheye panorama setup',
value=False,
uid=[0],
),
desc.BoolParam(
name='estimateFisheyeCircle',
label='Estimate Fisheye Circle',
description='Automatically estimate the Fisheye Circle center and radius instead of using user values.',
value=True,
uid=[0],
enabled=lambda node: node.useFisheye.value,
),
desc.GroupAttribute(
name="fisheyeCenterOffset",
label="Fisheye Center",
description="Center of the Fisheye circle (XY offset to the center in pixels).",
groupDesc=[
desc.FloatParam(
name="fisheyeCenterOffset_x", label="x", description="X Offset in pixels",
value=0.0,
uid=[0],
range=(-1000.0, 10000.0, 1.0)),
desc.FloatParam(
name="fisheyeCenterOffset_y", label="y", description="Y Offset in pixels",
value=0.0,
uid=[0],
range=(-1000.0, 10000.0, 1.0)),
],
group=None, # skip group from command line
enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value,
),
desc.FloatParam(
name='fisheyeRadius',
label='Radius',
description='Fisheye visibillity circle radius (% of image shortest side).',
value=96.0,
range=(0.0, 150.0, 0.01),
uid=[0],
enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name='outSfMData',
label='Output SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfmData.sfm',
uid=[],
)
]

View file

@ -0,0 +1,43 @@
__version__ = "1.1"
from meshroom.core import desc
import os.path
class PanoramaPrepareImages(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaPrepareImages {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Prepare images for Panorama pipeline: ensures that images orientations are coherent.
'''
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
exclusive=True,
uid=[],
)
]
outputs = [
desc.File(
name='output',
label='Output sfmData',
description='Output sfmData.',
value=lambda attr: desc.Node.internalFolder + os.path.basename(attr.node.input.value),
uid=[],
),
]

View file

@ -10,6 +10,13 @@ class PanoramaWarping(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaWarping {allParams}'
size = desc.DynamicNodeSize('input')
parallelization = desc.Parallelization(blockSize=5)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
Compute the image warping for each input image in the panorama coordinate system.
'''
inputs = [
desc.File(
name='input',
@ -21,7 +28,8 @@ class PanoramaWarping(desc.CommandLineNode):
desc.IntParam(
name='panoramaWidth',
label='Panorama Width',
description='Panorama width (pixels). 0 For automatic size',
description='Panorama Width (in pixels).\n'
'Set 0 to let the software choose the size automatically, so that on average the input resolution is kept (to limit over/under sampling).',
value=10000,
range=(0, 50000, 1000),
uid=[0]

View file

@ -9,6 +9,10 @@ class PrepareDenseScene(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=40)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
This node export undistorted images so the depth map and texturing can be computed on Pinhole images without distortion.
'''
inputs = [
desc.File(
name='input',

View file

@ -1,6 +1,6 @@
from __future__ import print_function
__version__ = "1.1"
__version__ = "1.2"
from meshroom.core import desc
import shutil
@ -10,6 +10,11 @@ import os
class Publish(desc.Node):
size = desc.DynamicNodeSize('inputFiles')
documentation = '''
This node allows to copy files into a specific folder.
'''
inputs = [
desc.ListAttribute(
elementDesc=desc.File(
@ -31,6 +36,15 @@ class Publish(desc.Node):
value="",
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='''verbosity level (critical, error, warning, info, debug).''',
value='info',
values=['critical', 'error', 'warning', 'info', 'debug'],
exclusive=True,
uid=[],
),
]
def resolvedPaths(self, inputFiles, outDir):
@ -41,9 +55,11 @@ class Publish(desc.Node):
return paths
def processChunk(self, chunk):
print("Publish")
try:
chunk.logManager.start(chunk.node.verboseLevel.value)
if not chunk.node.inputFiles:
print("Nothing to publish")
chunk.logger.warning('Nothing to publish')
return
if not chunk.node.output.value:
return
@ -51,13 +67,17 @@ class Publish(desc.Node):
outFiles = self.resolvedPaths(chunk.node.inputFiles.value, chunk.node.output.value)
if not outFiles:
raise RuntimeError("Publish: input files listed, but nothing to publish. "
"Listed input files: {}".format(chunk.node.inputFiles.value))
error = 'Publish: input files listed, but nothing to publish'
chunk.logger.error(error)
chunk.logger.info('Listed input files: {}'.format([i.value for i in chunk.node.inputFiles.value]))
raise RuntimeError(error)
if not os.path.exists(chunk.node.output.value):
os.mkdir(chunk.node.output.value)
for iFile, oFile in outFiles.items():
print('Publish file', iFile, 'into', oFile)
chunk.logger.info('Publish file {} into {}'.format(iFile, oFile))
shutil.copyfile(iFile, oFile)
print('Publish end')
chunk.logger.info('Publish end')
finally:
chunk.logManager.end()

View file

@ -1,12 +1,26 @@
__version__ = "1.0"
__version__ = "2.0"
from meshroom.core import desc
import os.path
class SfMAlignment(desc.CommandLineNode):
commandLine = 'aliceVision_utils_sfmAlignment {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to change the coordinate system of one SfM scene to align it on another one.
The alignment can be based on:
* from_cameras_viewid: Align cameras in both SfM on the specified viewId
* from_cameras_poseid: Align cameras in both SfM on the specified poseId
* from_cameras_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern'
* from_cameras_metadata: Align cameras with matching metadata, using 'metadataMatchingList'
* from_markers: Align from markers with the same Id
'''
inputs = [
desc.File(
name='input',
@ -95,9 +109,16 @@ class SfMAlignment(desc.CommandLineNode):
outputs = [
desc.File(
name='output',
label='Output',
description='''Aligned SfMData file .''',
value=desc.Node.internalFolder + 'alignedSfM.abc',
label='Output SfMData File',
description='SfMData file.',
value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc',
uid=[],
),
desc.File(
name='outputViewsAndPoses',
label='Output Poses',
description='''Path to the output sfmdata file with cameras (views and poses).''',
value=desc.Node.internalFolder + 'cameras.sfm',
uid=[],
),
]

View file

@ -1,12 +1,18 @@
__version__ = "1.0"
__version__ = "2.0"
from meshroom.core import desc
import os.path
class SfMTransfer(desc.CommandLineNode):
commandLine = 'aliceVision_utils_sfmTransfer {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to transfer poses and/or intrinsics form one SfM scene onto another one.
'''
inputs = [
desc.File(
name='input',
@ -86,9 +92,16 @@ class SfMTransfer(desc.CommandLineNode):
outputs = [
desc.File(
name='output',
label='Output',
label='Output SfMData File',
description='SfMData file.',
value=desc.Node.internalFolder + 'sfmData.abc',
value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc',
uid=[],
),
desc.File(
name='outputViewsAndPoses',
label='Output Poses',
description='''Path to the output sfmdata file with cameras (views and poses).''',
value=desc.Node.internalFolder + 'cameras.sfm',
uid=[],
),
]

View file

@ -1,12 +1,26 @@
__version__ = "1.1"
__version__ = "2.0"
from meshroom.core import desc
import os.path
class SfMTransform(desc.CommandLineNode):
commandLine = 'aliceVision_utils_sfmTransform {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to change the coordinate system of one SfM scene.
The transformation can be based on:
* transformation: Apply a given transformation
* auto_from_cameras: Fit all cameras into a box [-1,1]
* auto_from_landmarks: Fit all landmarks into a box [-1,1]
* from_single_camera: Use a specific camera as the origin of the coordinate system
* from_markers: Align specific markers to custom coordinates
'''
inputs = [
desc.File(
name='input',
@ -104,9 +118,16 @@ class SfMTransform(desc.CommandLineNode):
outputs = [
desc.File(
name='output',
label='Output',
label='Output SfMData File',
description='''Aligned SfMData file .''',
value=desc.Node.internalFolder + 'transformedSfM.abc',
value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc',
uid=[],
),
desc.File(
name='outputViewsAndPoses',
label='Output Poses',
description='''Path to the output sfmdata file with cameras (views and poses).''',
value=desc.Node.internalFolder + 'cameras.sfm',
uid=[],
),
]

View file

@ -0,0 +1,287 @@
__version__ = "1.0"
from meshroom.core import desc
import glob
import os
import json
import zipfile
import requests
import io
class BufferReader(io.BytesIO): # object to call the callback while the file is being uploaded
def __init__(self, buf=b'',
callback=None,
cb_args=(),
cb_kwargs={},
stopped=None):
self._callback = callback
self._cb_args = cb_args
self._cb_kwargs = cb_kwargs
self._stopped = stopped
self._progress = 0
self._len = len(buf)
io.BytesIO.__init__(self, buf)
def __len__(self):
return self._len
def read(self, n=-1):
chunk = io.BytesIO.read(self, n)
self._progress += int(len(chunk))
self._cb_kwargs.update({
'size' : self._len,
'progress': self._progress
})
if self._callback:
try:
self._callback(*self._cb_args, **self._cb_kwargs)
except Exception as e: # catches exception from the callback
self._cb_kwargs['logManager'].logger.warning('Error at callback: {}'.format(e))
if self._stopped():
raise RuntimeError('Node stopped by user')
return chunk
def progressUpdate(size=None, progress=None, logManager=None):
if not logManager.progressBar:
logManager.makeProgressBar(size, 'Upload progress:')
logManager.updateProgressBar(progress)
class SketchfabUpload(desc.Node):
size = desc.DynamicNodeSize('inputFiles')
documentation = '''
Upload a textured mesh on Sketchfab.
'''
inputs = [
desc.ListAttribute(
elementDesc=desc.File(
name="input",
label="Input",
description="",
value="",
uid=[0],
),
name="inputFiles",
label="Input Files",
description="Input Files to export.",
group="",
),
desc.StringParam(
name='apiToken',
label='API Token',
description='Get your token from https://sketchfab.com/settings/password',
value='',
uid=[0],
),
desc.StringParam(
name='title',
label='Title',
description='Title cannot be longer than 48 characters.',
value='',
uid=[0],
),
desc.StringParam(
name='description',
label='Description',
description='Description cannot be longer than 1024 characters.',
value='',
uid=[0],
),
desc.ChoiceParam(
name='license',
label='License',
description='License label.',
value='CC Attribution',
values=['CC Attribution',
'CC Attribution-ShareAlike',
'CC Attribution-NoDerivs',
'CC Attribution-NonCommercial',
'CC Attribution-NonCommercial-ShareAlike',
'CC Attribution-NonCommercial-NoDerivs'],
exclusive=True,
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.StringParam(
name='tag',
label='Tag',
description='Tag cannot be longer than 48 characters.',
value='',
uid=[0],
),
name="tags",
label="Tags",
description="Maximum of 42 separate tags.",
group="",
),
desc.ChoiceParam(
name='category',
label='Category',
description='Adding categories helps improve the discoverability of your model.',
value='none',
values=['none',
'animals-pets',
'architecture',
'art-abstract',
'cars-vehicles',
'characters-creatures',
'cultural-heritage-history',
'electronics-gadgets',
'fashion-style',
'food-drink',
'furniture-home',
'music',
'nature-plants',
'news-politics',
'people',
'places-travel',
'science-technology',
'sports-fitness',
'weapons-military'],
exclusive=True,
uid=[0],
),
desc.BoolParam(
name='isPublished',
label='Publish',
description='If the model is not published it will be saved as a draft.',
value=False,
uid=[0],
),
desc.BoolParam(
name='isInspectable',
label='Inspectable',
description='Allow 2D view in model inspector.',
value=True,
uid=[0],
),
desc.BoolParam(
name='isPrivate',
label='Private',
description='Requires a pro account.',
value=False,
uid=[0],
),
desc.StringParam(
name='password',
label='Password',
description='Requires a pro account.',
value='',
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='''verbosity level (critical, error, warning, info, debug).''',
value='info',
values=['critical', 'error', 'warning', 'info', 'debug'],
exclusive=True,
uid=[],
),
]
def upload(self, apiToken, modelFile, data, chunk):
modelEndpoint = 'https://api.sketchfab.com/v3/models'
f = open(modelFile, 'rb')
file = {'modelFile': (os.path.basename(modelFile), f.read())}
file.update(data)
f.close()
(files, contentType) = requests.packages.urllib3.filepost.encode_multipart_formdata(file)
headers = {'Authorization': 'Token {}'.format(apiToken), 'Content-Type': contentType}
body = BufferReader(files, progressUpdate, cb_kwargs={'logManager': chunk.logManager}, stopped=self.stopped)
chunk.logger.info('Uploading...')
try:
r = requests.post(
modelEndpoint, **{'data': body, 'headers': headers})
chunk.logManager.completeProgressBar()
except requests.exceptions.RequestException as e:
chunk.logger.error(u'An error occured: {}'.format(e))
raise RuntimeError()
if r.status_code != requests.codes.created:
chunk.logger.error(u'Upload failed with error: {}'.format(r.json()))
raise RuntimeError()
def resolvedPaths(self, inputFiles):
paths = []
for inputFile in inputFiles:
if os.path.isdir(inputFile.value):
for path, subdirs, files in os.walk(inputFile.value):
for name in files:
paths.append(os.path.join(path, name))
else:
for f in glob.glob(inputFile.value):
paths.append(f)
return paths
def stopped(self):
return self._stopped
def processChunk(self, chunk):
try:
self._stopped = False
chunk.logManager.start(chunk.node.verboseLevel.value)
uploadFile = ''
if not chunk.node.inputFiles:
chunk.logger.warning('Nothing to upload')
return
if chunk.node.apiToken.value == '':
chunk.logger.error('Need API token.')
raise RuntimeError()
if len(chunk.node.title.value) > 48:
chunk.logger.error('Title cannot be longer than 48 characters.')
raise RuntimeError()
if len(chunk.node.description.value) > 1024:
chunk.logger.error('Description cannot be longer than 1024 characters.')
raise RuntimeError()
tags = [ i.value.replace(' ', '-') for i in chunk.node.tags.value.values() ]
if all(len(i) > 48 for i in tags) and len(tags) > 0:
chunk.logger.error('Tags cannot be longer than 48 characters.')
raise RuntimeError()
if len(tags) > 42:
chunk.logger.error('Maximum of 42 separate tags.')
raise RuntimeError()
data = {
'name': chunk.node.title.value,
'description': chunk.node.description.value,
'license': chunk.node.license.value,
'tags': str(tags),
'isPublished': chunk.node.isPublished.value,
'isInspectable': chunk.node.isInspectable.value,
'private': chunk.node.isPrivate.value,
'password': chunk.node.password.value
}
if chunk.node.category.value != 'none':
data.update({'categories': chunk.node.category.value})
chunk.logger.debug('Data to be sent: {}'.format(str(data)))
# pack files into .zip to reduce file size and simplify process
uploadFile = os.path.join(chunk.node.internalFolder, 'temp.zip')
files = self.resolvedPaths(chunk.node.inputFiles.value)
zf = zipfile.ZipFile(uploadFile, 'w')
for file in files:
zf.write(file, os.path.basename(file))
zf.close()
chunk.logger.debug('Files added to zip: {}'.format(str(files)))
chunk.logger.debug('Created {}'.format(uploadFile))
chunk.logger.info('File size: {}MB'.format(round(os.path.getsize(uploadFile)/(1024*1024), 3)))
self.upload(chunk.node.apiToken.value, uploadFile, data, chunk)
chunk.logger.info('Upload successful. Your model is being processed on Sketchfab. It may take some time to show up on your "models" page.')
except Exception as e:
chunk.logger.error(e)
raise RuntimeError()
finally:
if os.path.isfile(uploadFile):
os.remove(uploadFile)
chunk.logger.debug('Deleted {}'.format(uploadFile))
chunk.logManager.end()
def stopProcess(self, chunk):
self._stopped = True

View file

@ -1,8 +1,5 @@
__version__ = "2.0"
import json
import os
from meshroom.core import desc
@ -10,6 +7,59 @@ class StructureFromMotion(desc.CommandLineNode):
commandLine = 'aliceVision_incrementalSfM {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node will analyze feature matches to understand the geometric relationship behind all the 2D observations,
and infer the rigid scene structure (3D points) with the pose (position and orientation) and internal calibration of all cameras.
The pipeline is a growing reconstruction process (called incremental SfM): it first computes an initial two-view reconstruction that is iteratively extended by adding new views.
1/ Fuse 2-View Matches into Tracks
It fuses all feature matches between image pairs into tracks. Each track represents a candidate point in space, visible from multiple cameras.
However, at this step of the pipeline, it still contains many outliers.
2/ Initial Image Pair
It chooses the best initial image pair. This choice is critical for the quality of the final reconstruction.
It should indeed provide robust matches and contain reliable geometric information.
So, this image pair should maximize the number of matches and the repartition of the corresponding features in each image.
But at the same time, the angle between the cameras should also be large enough to provide reliable geometric information.
3/ Initial 2-View Geometry
It computes the fundamental matrix between the 2 images selected and consider that the first one is the origin of the coordinate system.
4/ Triangulate
Now with the pose of the 2 first cameras, it triangulates the corresponding 2D features into 3D points.
5/ Next Best View Selection
After that, it selects all the images that have enough associations with the features that are already reconstructed in 3D.
6/ Estimate New Cameras
Based on these 2D-3D associations it performs the resectioning of each of these new cameras.
The resectioning is a Perspective-n-Point algorithm (PnP) in a RANSAC framework to find the pose of the camera that validates most of the features associations.
On each camera, a non-linear minimization is performed to refine the pose.
7/ Triangulate
From these new cameras poses, some tracks become visible by 2 or more resected cameras and it triangulates them.
8/ Optimize
It performs a Bundle Adjustment to refine everything: extrinsics and intrinsics parameters of all cameras as well as the position of all 3D points.
It filters the results of the Bundle Adjustment by removing all observations that have high reprojection error or insufficient angles between observations.
9/ Loop from 5 to 9
As we have triangulated new points, we get more image candidates for next best views selection and we can iterate from 5 to 9.
It iterates like that, adding cameras and triangulating new 2D features into 3D points and removing 3D points that became invalidated, until we cannot localize new views.
## Online
[https://alicevision.org/#photogrammetry/sfm](https://alicevision.org/#photogrammetry/sfm)
'''
inputs = [
desc.File(
name='input',
@ -62,6 +112,18 @@ class StructureFromMotion(desc.CommandLineNode):
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='observationConstraint',
label='Observation Constraint',
description='Observation contraint mode used in the optimization:\n'
' * Basic: Use standard reprojection error in pixel coordinates\n'
' * Scale: Use reprojection error in pixel coordinates but relative to the feature scale',
value='Basic',
values=['Basic', 'Scale'],
exclusive=True,
uid=[0],
advanced=True,
),
desc.IntParam(
name='localizerEstimatorMaxIterations',
label='Localizer Max Ransac Iterations',
@ -116,6 +178,15 @@ class StructureFromMotion(desc.CommandLineNode):
range=(0, 50000, 1),
uid=[0],
),
desc.IntParam(
name='minNumberOfMatches',
label='Minimum Number of Matches',
description='Minimum number of matches per image pair (and per feature type). \n'
'This can be useful to have a meaningful reconstruction with accurate keypoints. 0 means no limit.',
value=0,
range=(0, 50000, 1),
uid=[0],
),
desc.IntParam(
name='minInputTrackLength',
label='Min Input Track Length',
@ -207,6 +278,14 @@ class StructureFromMotion(desc.CommandLineNode):
value=False,
uid=[0],
),
desc.BoolParam(
name='filterTrackForks',
label='Filter Track Forks',
description='Enable/Disable the track forks removal. A track contains a fork when incoherent matches \n'
'lead to multiple features in the same image for a single track. \n',
value=True,
uid=[0],
),
desc.File(
name='initialPairA',
label='Initial Pair A',
@ -265,30 +344,3 @@ class StructureFromMotion(desc.CommandLineNode):
uid=[],
),
]
@staticmethod
def getResults(node):
"""
Parse SfM result and return views, poses and intrinsics as three dicts with viewId, poseId and intrinsicId as keys.
"""
reportFile = node.outputViewsAndPoses.value
if not os.path.exists(reportFile):
return {}, {}, {}
with open(reportFile) as jsonFile:
report = json.load(jsonFile)
views = dict()
poses = dict()
intrinsics = dict()
for view in report['views']:
views[view['viewId']] = view
for pose in report['poses']:
poses[pose['poseId']] = pose['pose']
for intrinsic in report['intrinsics']:
intrinsics[intrinsic['intrinsicId']] = intrinsic
return views, poses, intrinsics

View file

@ -7,6 +7,20 @@ class Texturing(desc.CommandLineNode):
commandLine = 'aliceVision_texturing {allParams}'
cpu = desc.Level.INTENSIVE
ram = desc.Level.INTENSIVE
documentation = '''
This node computes the texturing on the mesh.
If the mesh has no associated UV, it automatically computes UV maps.
For each triangle, it uses the visibility information associated to each vertex to retrieve the texture candidates.
It select the best cameras based on the resolution covering the triangle. Finally it averages the pixel values using multiple bands in the frequency domain.
Many cameras are contributing to the low frequencies and only the best ones contributes to the high frequencies.
## Online
[https://alicevision.org/#photogrammetry/texturing](https://alicevision.org/#photogrammetry/texturing)
'''
inputs = [
desc.File(
name='input',

View file

@ -2,12 +2,14 @@ import logging
import os
import argparse
from PySide2.QtCore import Qt, QUrl, Slot, QJsonValue, Property, qInstallMessageHandler, QtMsgType
from PySide2.QtCore import Qt, QUrl, Slot, QJsonValue, Property, Signal, qInstallMessageHandler, QtMsgType, QSettings
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QApplication
import meshroom
from meshroom.core import nodesDesc
from meshroom.core import pyCompatibility
from meshroom.ui import components
from meshroom.ui.components.clipboard import ClipboardHelper
from meshroom.ui.components.filepath import FilepathHelper
@ -141,6 +143,7 @@ class MeshroomApp(QApplication):
if args.project:
r.load(args.project)
self.addRecentProjectFile(args.project)
else:
r.new()
@ -164,9 +167,100 @@ class MeshroomApp(QApplication):
"Invalid value: '{}'".format(args.save))
os.mkdir(projectFolder)
r.saveAs(args.save)
self.addRecentProjectFile(args.save)
self.engine.load(os.path.normpath(url))
def _recentProjectFiles(self):
projects = []
settings = QSettings()
settings.beginGroup("RecentFiles")
size = settings.beginReadArray("Projects")
for i in range(size):
settings.setArrayIndex(i)
p = settings.value("filepath")
if p:
projects.append(p)
settings.endArray()
return projects
@Slot(str)
@Slot(QUrl)
def addRecentProjectFile(self, projectFile):
if not isinstance(projectFile, (QUrl, pyCompatibility.basestring)):
raise TypeError("Unexpected data type: {}".format(projectFile.__class__))
if isinstance(projectFile, QUrl):
projectFileNorm = projectFile.toLocalFile()
if not projectFileNorm:
projectFileNorm = projectFile.toString()
else:
projectFileNorm = QUrl(projectFile).toLocalFile()
if not projectFileNorm:
projectFileNorm = QUrl.fromLocalFile(projectFile).toLocalFile()
projects = self._recentProjectFiles()
# remove duplicates while preserving order
from collections import OrderedDict
uniqueProjects = OrderedDict.fromkeys(projects)
projects = list(uniqueProjects)
# remove previous usage of the value
if projectFileNorm in uniqueProjects:
projects.remove(projectFileNorm)
# add the new value in the first place
projects.insert(0, projectFileNorm)
# keep only the 10 first elements
projects = projects[0:20]
settings = QSettings()
settings.beginGroup("RecentFiles")
size = settings.beginWriteArray("Projects")
for i, p in enumerate(projects):
settings.setArrayIndex(i)
settings.setValue("filepath", p)
settings.endArray()
settings.sync()
self.recentProjectFilesChanged.emit()
@Slot(str)
@Slot(QUrl)
def removeRecentProjectFile(self, projectFile):
if not isinstance(projectFile, (QUrl, pyCompatibility.basestring)):
raise TypeError("Unexpected data type: {}".format(projectFile.__class__))
if isinstance(projectFile, QUrl):
projectFileNorm = projectFile.toLocalFile()
if not projectFileNorm:
projectFileNorm = projectFile.toString()
else:
projectFileNorm = QUrl(projectFile).toLocalFile()
if not projectFileNorm:
projectFileNorm = QUrl.fromLocalFile(projectFile).toLocalFile()
projects = self._recentProjectFiles()
# remove duplicates while preserving order
from collections import OrderedDict
uniqueProjects = OrderedDict.fromkeys(projects)
projects = list(uniqueProjects)
# remove previous usage of the value
if projectFileNorm not in uniqueProjects:
return
projects.remove(projectFileNorm)
settings = QSettings()
settings.beginGroup("RecentFiles")
size = settings.beginWriteArray("Projects")
for i, p in enumerate(projects):
settings.setArrayIndex(i)
settings.setValue("filepath", p)
settings.endArray()
settings.sync()
self.recentProjectFilesChanged.emit()
@Slot(str, result=str)
def markdownToHtml(self, md):
"""
@ -216,3 +310,7 @@ class MeshroomApp(QApplication):
"onlineUrl": "https://raw.githubusercontent.com/alicevision/AliceVision/develop/COPYING.md"
}
]
recentProjectFilesChanged = Signal()
recentProjectFiles = Property("QVariantList", _recentProjectFiles, notify=recentProjectFilesChanged)

View file

@ -197,6 +197,30 @@ class GraphLayout(QObject):
""" Perform auto-layout on the whole graph. """
self.autoLayout()
def positionBoundingBox(self, nodes=None):
"""
Return bounding box for a set of nodes as (x, y, width, height).
Args:
nodes (list of Node): the list of nodes or the whole graph if None
Returns:
list of int: the resulting bounding box (x, y, width, height)
"""
if nodes is None:
nodes = self.graph.nodes.values()
first = nodes[0]
bbox = [first.x, first.y, first.x, first.y]
for n in nodes:
bbox[0] = min(bbox[0], n.x)
bbox[1] = min(bbox[1], n.y)
bbox[2] = max(bbox[2], n.x)
bbox[3] = max(bbox[3], n.y)
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
return bbox
def boundingBox(self, nodes=None):
"""
Return bounding box for a set of nodes as (x, y, width, height).
@ -205,22 +229,12 @@ class GraphLayout(QObject):
nodes (list of Node): the list of nodes or the whole graph if None
Returns:
tuple of int: the resulting bounding box (x, y, width, height)
list of int: the resulting bounding box (x, y, width, height)
"""
if nodes is None:
nodes = self.graph.nodes.values()
first = nodes[0]
bbox = [first.x, first.y, first.x + self._nodeWidth, first.y + self._nodeHeight]
for n in nodes:
bbox[0] = min(bbox[0], n.x)
bbox[1] = min(bbox[1], n.y)
bbox[2] = max(bbox[2], n.x + self._nodeWidth)
bbox[3] = max(bbox[3], n.y + self._nodeHeight)
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
return tuple(bbox)
bbox = self.positionBoundingBox(nodes)
bbox[2] += self._nodeWidth
bbox[3] += self._nodeHeight
return bbox
def setDepthMode(self, mode):
""" Set node depth mode to use. """
@ -274,7 +288,14 @@ class UIGraph(QObject):
# perform auto-layout if graph does not provide nodes positions
if Graph.IO.Features.NodesPositions not in self._graph.fileFeatures:
self._layout.reset()
self._undoStack.clear() # clear undo-stack after layout
# clear undo-stack after layout
self._undoStack.clear()
else:
bbox = self._layout.positionBoundingBox()
if bbox[2] == 0 and bbox[3] == 0:
self._layout.reset()
# clear undo-stack after layout
self._undoStack.clear()
self.graphChanged.emit()
def onGraphUpdated(self):
@ -317,16 +338,14 @@ class UIGraph(QObject):
self.stopExecution()
self._chunksMonitor.stop()
def load(self, filepath, setupProjectFile=True):
@Slot(str, result=bool)
def loadGraph(self, filepath, setupProjectFile=True):
g = Graph('')
g.load(filepath, setupProjectFile)
status = g.load(filepath, setupProjectFile)
if not os.path.exists(g.cacheDir):
os.mkdir(g.cacheDir)
self.setGraph(g)
@Slot(QUrl)
def loadUrl(self, url):
self.load(url.toLocalFile())
return status
@Slot(QUrl)
def saveAs(self, url):

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 B

View file

@ -0,0 +1,57 @@
import QtQuick 2.9
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import QtPositioning 5.8
import QtLocation 5.9
import QtCharts 2.13
import Controls 1.0
import Utils 1.0
ChartView {
id: root
antialiasing: true
Rectangle {
id: plotZone
x: root.plotArea.x
y: root.plotArea.y
width: root.plotArea.width
height: root.plotArea.height
color: "transparent"
MouseArea {
anchors.fill: parent
property double degreeToScale: 1.0 / 120.0 // default mouse scroll is 15 degree
acceptedButtons: Qt.LeftButton | Qt.RightButton | Qt.MiddleButton
// onWheel: {
// console.warn("root.plotArea before: " + root.plotArea)
// var zoomFactor = wheel.angleDelta.y > 0 ? 1.0 / (1.0 + wheel.angleDelta.y * degreeToScale) : (1.0 + Math.abs(wheel.angleDelta.y) * degreeToScale)
// // var mouse_screen = Qt.point(wheel.x, wheel.y)
// var mouse_screen = mapToItem(root, wheel.x, wheel.y)
// var mouse_normalized = Qt.point(mouse_screen.x / plotZone.width, mouse_screen.y / plotZone.height)
// var mouse_plot = Qt.point(mouse_normalized.x * plotZone.width, mouse_normalized.y * plotZone.height)
// // var p = mapToValue(mouse_screen, root.series(0))
// // var pMin = mapToValue(mouse_screen, Qt.point(root.axisX().min, root.axisY().min))
// // var pMax = mapToValue(mouse_screen, Qt.point(root.axisX().max, root.axisY().max))
// // console.warn("p: " + p)
// // Qt.rect()
// var r = Qt.rect(mouse_plot.x, mouse_plot.y, plotZone.width * zoomFactor, plotZone.height * zoomFactor)
// //var r = Qt.rect(pMin.x, pMin.y, (pMax.x-pMin.x) / 2, (pMax.y-pMin.y) / 2)
// root.zoomIn(r)
// }
onClicked: {
root.zoomReset();
}
}
}
}

View file

@ -2,3 +2,4 @@ module Charts
ChartViewLegend 1.0 ChartViewLegend.qml
ChartViewCheckBox 1.0 ChartViewCheckBox.qml
InteractiveChartView 1.0 InteractiveChartView.qml

View file

@ -18,6 +18,8 @@ Page {
property alias headerBar: headerLayout.data
property alias footerContent: footerLayout.data
property alias icon: iconPlaceHolder.data
property alias loading: loadingIndicator.running
property alias loadingText: loadingLabal.text
clip: true
@ -46,18 +48,37 @@ Page {
width: childrenRect.width
height: childrenRect.height
Layout.alignment: Qt.AlignVCenter
visible: icon != ""
visible: icon !== ""
}
// Title
Label {
text: root.title
Layout.fillWidth: true
elide: Text.ElideRight
topPadding: m.vPadding
bottomPadding: m.vPadding
}
//
Item {
width: 10
}
// Feature loading status
BusyIndicator {
id: loadingIndicator
padding: 0
implicitWidth: 12
implicitHeight: 12
running: false
}
Label {
id: loadingLabal
text: ""
font.italic: true
}
Item {
Layout.fillWidth: true
}
// Header menu
Row { id: headerLayout }
}
}

View file

@ -10,7 +10,6 @@ import Utils 1.0
ListView {
id: root
property variant attributes: null
property bool readOnly: false
property int labelWidth: 180
@ -23,10 +22,8 @@ ListView {
clip: true
ScrollBar.vertical: ScrollBar { id: scrollBar }
model: attributes
delegate: Loader {
active: !object.desc.advanced || GraphEditorSettings.showAdvancedAttributes
active: object.enabled && (!object.desc.advanced || GraphEditorSettings.showAdvancedAttributes)
visible: active
height: item ? item.implicitHeight : -spacing // compensate for spacing if item is hidden

View file

@ -235,13 +235,27 @@ RowLayout {
property string displayValue: String(slider.active && slider.item.pressed ? slider.item.formattedValue : attribute.value)
text: displayValue
selectByMouse: true
// Note: Use autoScroll as a workaround for alignment
// When the value change keep the text align to the left to be able to read the most important part
// of the number. When we are editing (item is in focus), the content should follow the editing.
autoScroll: activeFocus
validator: attribute.type == "FloatParam" ? doubleValidator : intValidator
onEditingFinished: setTextFieldAttribute(text)
onAccepted: setTextFieldAttribute(text)
onAccepted: {
setTextFieldAttribute(text)
// When the text is too long, display the left part
// (with the most important values and cut the floating point details)
ensureVisible(0)
}
Component.onDestruction: {
if(activeFocus)
setTextFieldAttribute(text)
}
Component.onCompleted: {
// When the text is too long, display the left part
// (with the most important values and cut the floating point details)
ensureVisible(0)
}
}
Loader {
@ -356,7 +370,7 @@ RowLayout {
Component.onCompleted: {
var cpt = Qt.createComponent("AttributeEditor.qml");
var obj = cpt.createObject(groupItem,
{'attributes': Qt.binding(function() { return attribute.value }),
{'model': Qt.binding(function() { return attribute.value }),
'readOnly': Qt.binding(function() { return root.readOnly }),
'labelWidth': 100, // reduce label width for children (space gain)
})

View file

@ -0,0 +1,66 @@
import QtQuick 2.11
import QtQuick.Controls 2.3
import QtQuick.Controls 1.4 as Controls1 // SplitView
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import Controls 1.0
import "common.js" as Common
/**
* ChunkListView
*/
ListView {
id: chunksLV
// model: node.chunks
property variant currentChunk: currentItem ? currentItem.chunk : undefined
width: 60
Layout.fillHeight: true
highlightFollowsCurrentItem: true
keyNavigationEnabled: true
focus: true
currentIndex: 0
signal changeCurrentChunk(int chunkIndex)
header: Component {
Label {
width: chunksLV.width
elide: Label.ElideRight
text: "Chunks"
padding: 4
z: 10
background: Rectangle { color: parent.palette.window }
}
}
highlight: Component {
Rectangle {
color: activePalette.highlight
opacity: 0.3
z: 2
}
}
highlightMoveDuration: 0
highlightResizeDuration: 0
delegate: ItemDelegate {
id: chunkDelegate
property var chunk: object
text: index
width: parent.width
leftPadding: 8
onClicked: {
chunksLV.forceActiveFocus()
chunksLV.changeCurrentChunk(index)
}
Rectangle {
width: 4
height: parent.height
color: Common.getChunkColor(parent.chunk)
}
}
}

View file

@ -16,7 +16,7 @@ MessageDialog {
// alias to underlying compatibilityNodes model
readonly property var nodesModel: uigraph.graph.compatibilityNodes
// the total number of compatibility issues
readonly property int issueCount: nodesModel.count
readonly property int issueCount: (nodesModel != undefined) ? nodesModel.count : 0
// the number of CompatibilityNodes that can be upgraded
readonly property int upgradableCount: {
var count = 0

View file

@ -0,0 +1,38 @@
import QtQuick 2.11
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
import Controls 1.0
import "common.js" as Common
/**
* Displays Node documentation
*/
FocusScope {
id: root
property variant node
SystemPalette { id: activePalette }
ScrollView {
width: parent.width
height: parent.height
ScrollBar.vertical.policy: ScrollBar.AlwaysOn
ScrollBar.horizontal.policy: ScrollBar.AlwaysOff
clip: true
TextEdit {
width: parent.parent.width
height: parent.height
padding: 8
textFormat: TextEdit.MarkdownText
selectByMouse: true
selectionColor: activePalette.highlight
color: activePalette.text
text: node.documentation
wrapMode: TextEdit.Wrap
}
}
}

View file

@ -19,6 +19,15 @@ Panel {
signal attributeDoubleClicked(var mouse, var attribute)
signal upgradeRequest()
Item {
id: m
property int chunkCurrentIndex: 0
}
onNodeChanged: {
m.chunkCurrentIndex = 0 // Needed to avoid invalid state of ChunksListView
}
title: "Node" + (node !== null ? " - <b>" + node.label + "</b>" : "")
icon: MaterialLabel { text: MaterialIcons.tune }
@ -113,8 +122,7 @@ Panel {
currentIndex: tabBar.currentIndex
AttributeEditor {
Layout.fillWidth: true
attributes: root.node.attributes
model: root.node.attributes
readOnly: root.readOnly || root.isCompatibilityNode
onAttributeDoubleClicked: root.attributeDoubleClicked(mouse, attribute)
onUpgradeRequest: root.upgradeRequest()
@ -122,8 +130,27 @@ Panel {
NodeLog {
id: nodeLog
node: root.node
chunkCurrentIndex: m.chunkCurrentIndex
onChangeCurrentChunk: { m.chunkCurrentIndex = chunkIndex }
}
Layout.fillHeight: true
NodeStatistics {
id: nodeStatistics
node: root.node
chunkCurrentIndex: m.chunkCurrentIndex
onChangeCurrentChunk: { m.chunkCurrentIndex = chunkIndex }
}
NodeStatus {
id: nodeStatus
node: root.node
chunkCurrentIndex: m.chunkCurrentIndex
onChangeCurrentChunk: { m.chunkCurrentIndex = chunkIndex }
}
NodeDocumentation {
id: nodeDocumentation
Layout.fillWidth: true
node: root.node
}
@ -152,6 +179,24 @@ Panel {
leftPadding: 8
rightPadding: leftPadding
}
TabButton {
text: "Statistics"
width: implicitWidth
leftPadding: 8
rightPadding: leftPadding
}
TabButton {
text: "Status"
width: implicitWidth
leftPadding: 8
rightPadding: leftPadding
}
TabButton {
text: "Documentation"
width: implicitWidth
leftPadding: 8
rightPadding: leftPadding
}
}
}
}

View file

@ -14,7 +14,10 @@ import "common.js" as Common
* if the related NodeChunk is being computed.
*/
FocusScope {
id: root
property variant node
property alias chunkCurrentIndex: chunksLV.currentIndex
signal changeCurrentChunk(int chunkIndex)
SystemPalette { id: activePalette }
@ -22,127 +25,44 @@ FocusScope {
anchors.fill: parent
// The list of chunks
ListView {
ChunksListView {
id: chunksLV
property variant currentChunk: currentItem ? currentItem.chunk : undefined
width: 60
Layout.fillHeight: true
model: node.chunks
highlightFollowsCurrentItem: true
keyNavigationEnabled: true
focus: true
currentIndex: 0
header: Component {
Label {
width: chunksLV.width
elide: Label.ElideRight
text: "Chunks"
padding: 4
z: 10
background: Rectangle { color: parent.palette.window }
}
onChangeCurrentChunk: root.changeCurrentChunk(chunkIndex)
}
highlight: Component {
Rectangle {
color: activePalette.highlight
opacity: 0.3
z: 2
}
}
highlightMoveDuration: 0
highlightResizeDuration: 0
delegate: ItemDelegate {
id: chunkDelegate
property var chunk: object
text: index
width: parent.width
leftPadding: 8
onClicked: {
chunksLV.forceActiveFocus()
chunksLV.currentIndex = index
}
Rectangle {
width: 4
height: parent.height
color: Common.getChunkColor(parent.chunk)
}
}
}
ColumnLayout {
Loader {
id: componentLoader
clip: true
Layout.fillWidth: true
Layout.fillHeight: true
Layout.margins: 1
property url source
spacing: 1
TabBar {
id: fileSelector
Layout.fillWidth: true
property string currentFile: chunksLV.currentChunk ? chunksLV.currentChunk[currentItem.fileProperty] : ""
property string currentFile: chunksLV.currentChunk ? chunksLV.currentChunk["logFile"] : ""
onCurrentFileChanged: {
// only set text file viewer source when ListView is fully ready
// (either empty or fully populated with a valid currentChunk)
// to avoid going through an empty url when switching between two nodes
if(!chunksLV.count || chunksLV.currentChunk)
logComponentLoader.source = Filepath.stringToUrl(currentFile);
componentLoader.source = Filepath.stringToUrl(currentFile);
}
TabButton {
property string fileProperty: "logFile"
text: "Output"
padding: 4
}
TabButton {
property string fileProperty: "statisticsFile"
text: "Statistics"
padding: 4
}
TabButton {
property string fileProperty: "statusFile"
text: "Status"
padding: 4
}
}
Loader {
id: logComponentLoader
clip: true
Layout.fillWidth: true
Layout.fillHeight: true
property url source
sourceComponent: fileSelector.currentItem.fileProperty === "statisticsFile" ? statViewerComponent : textFileViewerComponent
sourceComponent: textFileViewerComponent
}
Component {
id: textFileViewerComponent
TextFileViewer {
id: textFileViewer
source: logComponentLoader.source
source: componentLoader.source
Layout.fillWidth: true
Layout.fillHeight: true
autoReload: chunksLV.currentChunk !== undefined && chunksLV.currentChunk.statusName === "RUNNING"
// source is set in fileSelector
}
}
Component {
id: statViewerComponent
StatViewer {
id: statViewer
Layout.fillWidth: true
Layout.fillHeight: true
source: logComponentLoader.source
}
}
}
}
}

View file

@ -0,0 +1,65 @@
import QtQuick 2.11
import QtQuick.Controls 2.3
import QtQuick.Controls 1.4 as Controls1 // SplitView
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import Controls 1.0
import "common.js" as Common
/**
* NodeLog displays log and statistics data of Node's chunks (NodeChunks)
*
* To ease monitoring, it provides periodic auto-reload of the opened file
* if the related NodeChunk is being computed.
*/
FocusScope {
id: root
property variant node
property alias chunkCurrentIndex: chunksLV.currentIndex
signal changeCurrentChunk(int chunkIndex)
SystemPalette { id: activePalette }
Controls1.SplitView {
anchors.fill: parent
// The list of chunks
ChunksListView {
id: chunksLV
Layout.fillHeight: true
model: node.chunks
onChangeCurrentChunk: root.changeCurrentChunk(chunkIndex)
}
Loader {
id: componentLoader
clip: true
Layout.fillWidth: true
Layout.fillHeight: true
property url source
property string currentFile: chunksLV.currentChunk ? chunksLV.currentChunk["statisticsFile"] : ""
onCurrentFileChanged: {
// only set text file viewer source when ListView is fully ready
// (either empty or fully populated with a valid currentChunk)
// to avoid going through an empty url when switching between two nodes
if(!chunksLV.count || chunksLV.currentChunk)
componentLoader.source = Filepath.stringToUrl(currentFile);
}
sourceComponent: statViewerComponent
}
Component {
id: statViewerComponent
StatViewer {
id: statViewer
Layout.fillWidth: true
Layout.fillHeight: true
source: componentLoader.source
}
}
}
}

View file

@ -0,0 +1,187 @@
import QtQuick 2.11
import QtQuick.Controls 2.3
import QtQuick.Controls 1.4 as Controls1 // SplitView
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import Controls 1.0
import "common.js" as Common
/**
* NodeLog displays log and statistics data of Node's chunks (NodeChunks)
*
* To ease monitoring, it provides periodic auto-reload of the opened file
* if the related NodeChunk is being computed.
*/
FocusScope {
id: root
property variant node
property alias chunkCurrentIndex: chunksLV.currentIndex
signal changeCurrentChunk(int chunkIndex)
SystemPalette { id: activePalette }
Controls1.SplitView {
anchors.fill: parent
// The list of chunks
ChunksListView {
id: chunksLV
Layout.fillHeight: true
model: node.chunks
onChangeCurrentChunk: root.changeCurrentChunk(chunkIndex)
}
Loader {
id: componentLoader
clip: true
Layout.fillWidth: true
Layout.fillHeight: true
property url source
property string currentFile: chunksLV.currentChunk ? chunksLV.currentChunk["statusFile"] : ""
onCurrentFileChanged: {
// only set text file viewer source when ListView is fully ready
// (either empty or fully populated with a valid currentChunk)
// to avoid going through an empty url when switching between two nodes
if(!chunksLV.count || chunksLV.currentChunk)
componentLoader.source = Filepath.stringToUrl(currentFile);
}
sourceComponent: statViewerComponent
}
Component {
id: statViewerComponent
Item {
id: statusViewer
property url source: componentLoader.source
property var lastModified: undefined
onSourceChanged: {
statusListModel.readSourceFile()
}
ListModel {
id: statusListModel
function readSourceFile() {
// make sure we are trying to load a statistics file
if(!Filepath.urlToString(source).endsWith("status"))
return;
var xhr = new XMLHttpRequest;
xhr.open("GET", source);
xhr.onreadystatechange = function() {
if (xhr.readyState === XMLHttpRequest.DONE && xhr.status === 200) {
// console.warn("StatusListModel: read valid file")
if(lastModified === undefined || lastModified !== xhr.getResponseHeader('Last-Modified')) {
lastModified = xhr.getResponseHeader('Last-Modified')
try {
var jsonObject = JSON.parse(xhr.responseText);
var entries = [];
// prepare data to populate the ListModel from the input json object
for(var key in jsonObject)
{
var entry = {};
entry["key"] = key;
entry["value"] = String(jsonObject[key]);
entries.push(entry);
}
// reset the model with prepared data (limit to one update event)
statusListModel.clear();
statusListModel.append(entries);
}
catch(exc)
{
// console.warn("StatusListModel: failed to read file")
lastModified = undefined;
statusListModel.clear();
}
}
}
else
{
// console.warn("StatusListModel: invalid file")
lastModified = undefined;
statusListModel.clear();
}
};
xhr.send();
}
}
ListView {
id: statusListView
anchors.fill: parent
spacing: 3
model: statusListModel
delegate: Rectangle {
color: activePalette.window
width: parent.width
height: childrenRect.height
RowLayout {
width: parent.width
Rectangle {
id: statusKey
anchors.margins: 2
// height: statusValue.height
color: Qt.darker(activePalette.window, 1.1)
Layout.preferredWidth: sizeHandle.x
Layout.minimumWidth: 10.0 * Qt.application.font.pixelSize
Layout.maximumWidth: 15.0 * Qt.application.font.pixelSize
Layout.fillWidth: false
Layout.fillHeight: true
Label {
text: key
anchors.fill: parent
anchors.top: parent.top
topPadding: 4
leftPadding: 6
verticalAlignment: TextEdit.AlignTop
elide: Text.ElideRight
}
}
TextArea {
id: statusValue
text: value
anchors.margins: 2
Layout.fillWidth: true
wrapMode: Label.WrapAtWordBoundaryOrAnywhere
textFormat: TextEdit.PlainText
readOnly: true
selectByMouse: true
background: Rectangle { anchors.fill: parent; color: Qt.darker(activePalette.window, 1.05) }
}
}
}
}
// Categories resize handle
Rectangle {
id: sizeHandle
height: parent.contentHeight
width: 1
x: parent.width * 0.2
MouseArea {
anchors.fill: parent
anchors.margins: -4
cursorShape: Qt.SizeHorCursor
drag {
target: parent
axis: Drag.XAxis
threshold: 0
minimumX: statusListView.width * 0.2
maximumX: statusListView.width * 0.8
}
}
}
}
}
}
}

View file

@ -6,6 +6,7 @@ import Utils 1.0
import Charts 1.0
import MaterialIcons 2.2
Item {
id: root
@ -364,7 +365,7 @@ Item {
}
}
ChartView {
InteractiveChartView {
id: cpuChart
Layout.fillWidth: true
@ -419,7 +420,7 @@ Item {
ColumnLayout {
ChartView {
InteractiveChartView {
id: ramChart
margins.top: 0
margins.bottom: 0
@ -487,7 +488,7 @@ Item {
ColumnLayout {
ChartView {
InteractiveChartView {
id: gpuChart
Layout.fillWidth: true

View file

@ -56,6 +56,12 @@ Item {
enabled: !root.readOnly
onClicked: removeRequest()
}
MenuItem {
text: "Define As Center Image"
property var activeNode: _reconstruction.activeNodes.get("SfMTransform").node
enabled: !root.readOnly && _viewpoint.viewId != -1 && _reconstruction && activeNode
onClicked: activeNode.attribute("transformation").value = _viewpoint.viewId.toString()
}
}
ColumnLayout {

View file

@ -16,13 +16,15 @@ Panel {
property variant cameraInits
property variant cameraInit
property variant tempCameraInit
readonly property alias currentItem: grid.currentItem
readonly property string currentItemSource: grid.currentItem ? grid.currentItem.source : ""
readonly property var currentItemMetadata: grid.currentItem ? grid.currentItem.metadata : undefined
readonly property int centerViewId: (_reconstruction && _reconstruction.sfmTransform) ? parseInt(_reconstruction.sfmTransform.attribute("transformation").value) : 0
property int defaultCellSize: 160
property int currentIndex: 0
property bool readOnly: false
readonly property variant viewpoints: cameraInit.attribute('viewpoints').value
signal removeImageRequest(var attribute)
signal filesDropped(var drop, var augmentSfm)
@ -30,6 +32,17 @@ Panel {
title: "Images"
implicitWidth: (root.defaultCellSize + 2) * 2
function changeCurrentIndex(newIndex) {
_reconstruction.cameraInitIndex = newIndex
}
QtObject {
id: m
property variant currentCameraInit: _reconstruction.tempCameraInit ? _reconstruction.tempCameraInit : root.cameraInit
property variant viewpoints: currentCameraInit ? currentCameraInit.attribute('viewpoints').value : undefined
property bool readOnly: root.readOnly || displayHDR.checked
}
headerBar: RowLayout {
MaterialToolButton {
text: MaterialIcons.more_vert
@ -99,7 +112,7 @@ Panel {
model: SortFilterDelegateModel {
id: sortedModel
model: _reconstruction.viewpoints
model: m.viewpoints
sortRole: "path"
// TODO: provide filtering on reconstruction status
// filterRole: _reconstruction.sfmReport ? "reconstructed" : ""
@ -123,7 +136,7 @@ Panel {
viewpoint: object.value
width: grid.cellWidth
height: grid.cellHeight
readOnly: root.readOnly
readOnly: m.readOnly
displayViewId: displayViewIdsAction.checked
isCurrentItem: GridView.isCurrentItem
@ -178,6 +191,16 @@ Panel {
}
}
// Center of SfMTransform
Loader {
id: sfmTransformIndicator
active: viewpoint && (viewpoint.get("viewId").value == centerViewId)
sourceComponent: ImageBadge {
text: MaterialIcons.gamepad
ToolTip.text: "Camera used to define the center of the scene."
}
}
Item { Layout.fillWidth: true }
// Reconstruction status indicator
@ -202,9 +225,9 @@ Panel {
{
event.accepted = true
if(event.key == Qt.Key_Right)
root.currentIndex = Math.min(root.cameraInits.count - 1, root.currentIndex + 1)
root.changeCurrentIndex(Math.min(root.cameraInits.count - 1, root.currentIndex + 1))
else if(event.key == Qt.Key_Left)
root.currentIndex = Math.max(0, root.currentIndex - 1)
root.changeCurrentIndex(Math.max(0, root.currentIndex - 1))
}
}
@ -227,7 +250,7 @@ Panel {
DropArea {
id: dropArea
anchors.fill: parent
enabled: !root.readOnly
enabled: !m.readOnly
keys: ["text/uri-list"]
// TODO: onEntered: call specific method to filter files based on extension
onDropped: {
@ -274,7 +297,7 @@ Panel {
text: "Augment Reconstruction"
font.bold: true
wrapMode: Text.WrapAtWordBoundaryOrAnywhere
visible: viewpoints.count > 0
visible: m.viewpoints ? m.viewpoints.count > 0 : false
background: Rectangle {
color: parent.hovered ? palette.highlight : palette.window
opacity: 0.8
@ -299,14 +322,13 @@ Panel {
enabled: nodesCB.currentIndex > 0
onClicked: nodesCB.decrementCurrentIndex()
}
Label { text: "Group " }
Label { id: groupLabel; text: "Group " }
ComboBox {
id: nodesCB
model: root.cameraInits.count
implicitWidth: 40
currentIndex: root.currentIndex
onActivated: root.currentIndex = currentIndex
onActivated: root.changeCurrentIndex(currentIndex)
}
Label { text: "/ " + (root.cameraInits.count - 1) }
ToolButton {
@ -321,27 +343,118 @@ Panel {
}
footerContent: RowLayout {
// Image count
RowLayout {
Layout.fillWidth: true
spacing: 8
RowLayout {
MaterialLabel { text: MaterialIcons.image }
Label { text: grid.model.count }
}
RowLayout {
visible: _reconstruction.cameraInit && _reconstruction.nbCameras
MaterialLabel { text: MaterialIcons.videocam }
Label { text: _reconstruction.cameraInit ? _reconstruction.nbCameras : 0 }
// Images count
MaterialToolLabel {
ToolTip.text: grid.model.count + " Input Images"
iconText: MaterialIcons.image
label: grid.model.count.toString()
// enabled: grid.model.count > 0
// margin: 4
}
// cameras count
MaterialToolLabel {
ToolTip.text: label + " Estimated Cameras"
iconText: MaterialIcons.videocam
label: _reconstruction ? _reconstruction.nbCameras.toString() : "0"
// margin: 4
// enabled: _reconstruction.cameraInit && _reconstruction.nbCameras
}
Item { Layout.fillHeight: true; Layout.fillWidth: true }
MaterialToolLabelButton {
id: displayHDR
property var activeNode: _reconstruction.activeNodes.get("LdrToHdrMerge").node
ToolTip.text: "Visualize HDR images: " + (activeNode ? activeNode.label : "No Node")
iconText: MaterialIcons.filter
label: activeNode ? activeNode.attribute("nbBrackets").value : ""
visible: activeNode
enabled: activeNode && activeNode.isComputed
property string nodeID: activeNode ? (activeNode.label + activeNode.isComputed) : ""
onNodeIDChanged: {
if(checked) {
open();
}
}
onEnabledChanged: {
// Reset the toggle to avoid getting stuck
// with the HDR node checked but disabled.
if(checked) {
checked = false;
close();
}
}
checkable: true
checked: false
onClicked: {
if(checked) {
open();
} else {
close();
}
}
function open() {
if(imageProcessing.checked)
imageProcessing.checked = false;
_reconstruction.setupTempCameraInit(activeNode, "outSfMData");
}
function close() {
_reconstruction.clearTempCameraInit();
}
}
MaterialToolButton {
id: imageProcessing
property var activeNode: _reconstruction.activeNodes.get("ImageProcessing").node
font.pointSize: 15
padding: 0
ToolTip.text: "Preprocessed Images: " + (activeNode ? activeNode.label : "No Node")
text: MaterialIcons.wallpaper
visible: activeNode && activeNode.attribute("outSfMData").value
enabled: activeNode && activeNode.isComputed
property string nodeID: activeNode ? (activeNode.label + activeNode.isComputed) : ""
onNodeIDChanged: {
if(checked) {
open();
}
}
onEnabledChanged: {
// Reset the toggle to avoid getting stuck
// with the HDR node checked but disabled.
if(checked) {
checked = false;
close();
}
}
checkable: true
checked: false
onClicked: {
if(checked) {
open();
} else {
close();
}
}
function open() {
if(displayHDR.checked)
displayHDR.checked = false;
_reconstruction.setupTempCameraInit(activeNode, "outSfMData");
}
function close() {
_reconstruction.clearTempCameraInit();
}
}
Item { Layout.fillHeight: true; width: 1 }
// Thumbnail size icon and slider
MaterialLabel {
MaterialToolButton {
text: MaterialIcons.photo_size_select_large
ToolTip.text: "Thumbnails Scale"
padding: 0
anchors.margins: 0
font.pointSize: 11
onClicked: { thumbnailSizeSlider.value = defaultCellSize; }
}
Slider {
id: thumbnailSizeSlider
@ -351,5 +464,4 @@ Panel {
implicitWidth: 70
}
}
}

View file

@ -18,13 +18,22 @@ ImageBadge {
readonly property string distortionModel: intrinsic ? childAttributeValue(intrinsic, "type", "") : ""
property var metadata: ({})
function findMetadata(key) {
var keyLower = key.toLowerCase()
for(var mKey in metadata)
{
if(mKey.toLowerCase().endsWith(keyLower))
return metadata[mKey]
}
return ""
}
// access useful metadata
readonly property var make: metadata["Make"]
readonly property var model: metadata["Model"]
readonly property var focalLength: metadata["Exif:FocalLength"]
readonly property var focalLength35: metadata["Exif:FocalLengthIn35mmFilm"]
readonly property var bodySerialNumber: metadata["Exif:BodySerialNumber"]
readonly property var lensSerialNumber: metadata["Exif:LensSerialNumber"]
readonly property var make: findMetadata("Make")
readonly property var model: findMetadata("Model")
readonly property var focalLength: findMetadata("FocalLength")
readonly property var focalLength35: findMetadata("FocalLengthIn35mmFilm")
readonly property var bodySerialNumber: findMetadata("BodySerialNumber")
readonly property var lensSerialNumber: findMetadata("LensSerialNumber")
readonly property var sensorWidth: metadata["AliceVision:SensorWidth"]
readonly property var sensorWidthEstimation: metadata["AliceVision:SensorWidthEstimation"]

View file

@ -0,0 +1,23 @@
import QtQuick 2.9
import QtQuick.Controls 2.4
/**
* MLabel is a standard Label.
* If ToolTip.text is set, it shows up a tooltip when hovered.
*/
Label {
padding: 4
MouseArea {
id: mouseArea
anchors.fill: parent
hoverEnabled: true
acceptedButtons: Qt.NoButton
}
ToolTip.visible: mouseArea.containsMouse
ToolTip.delay: 500
background: Rectangle {
anchors.fill: parent
color: mouseArea.containsMouse ? Qt.darker(parent.palette.base, 0.6) : "transparent"
}
}

View file

@ -1,5 +1,6 @@
import QtQuick 2.9
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
/**
@ -7,6 +8,7 @@ import QtQuick.Controls 2.3
* It also shows up its tooltip when hovered.
*/
ToolButton {
id: control
font.family: MaterialIcons.fontFamily
padding: 4
font.pointSize: 13

View file

@ -0,0 +1,45 @@
import QtQuick 2.9
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
/**
* MaterialToolLabel is a Label with an icon (using MaterialIcons).
* It shows up its tooltip when hovered.
*/
Item {
id: control
property alias iconText: icon.text
property alias iconSize: icon.font.pointSize
property alias label: labelItem.text
width: childrenRect.width
height: childrenRect.height
RowLayout {
Label {
id: icon
font.family: MaterialIcons.fontFamily
font.pointSize: 13
padding: 0
text: ""
color: palette.text
}
Label {
id: labelItem
text: ""
color: palette.text
}
Item {
width: 5
}
}
MouseArea {
id: mouseArea
anchors.fill: parent
hoverEnabled: true
acceptedButtons: Qt.NoButton
}
ToolTip.visible: mouseArea.containsMouse
ToolTip.delay: 500
}

View file

@ -0,0 +1,51 @@
import QtQuick 2.9
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
/**
* MaterialToolButton is a standard ToolButton using MaterialIcons font.
* It also shows up its tooltip when hovered.
*/
ToolButton {
id: control
property alias iconText: icon.text
property alias iconSize: icon.font.pointSize
property alias label: labelItem.text
padding: 0
ToolTip.visible: ToolTip.text && hovered
ToolTip.delay: 100
width: childrenRect.width
height: childrenRect.height
contentItem: RowLayout {
Layout.margins: 0
Label {
id: icon
font.family: MaterialIcons.fontFamily
font.pointSize: 13
padding: 0
text: ""
color: (checked ? palette.highlight : palette.text)
}
Label {
id: labelItem
text: ""
padding: 0
color: (checked ? palette.highlight : palette.text)
}
}
background: Rectangle {
color: {
if(pressed || checked || hovered)
{
if(pressed || checked)
return Qt.darker(parent.palette.base, 1.3)
if(hovered)
return Qt.darker(parent.palette.base, 0.6)
}
return "transparent";
}
border.color: checked ? Qt.darker(parent.palette.base, 1.4) : "transparent"
}
}

View file

@ -1,4 +1,7 @@
module MaterialIcons
singleton MaterialIcons 2.2 MaterialIcons.qml
MaterialToolButton 2.2 MaterialToolButton.qml
MaterialToolLabelButton 2.2 MaterialToolLabelButton.qml
MaterialToolLabel 2.2 MaterialToolLabel.qml
MaterialLabel 2.2 MaterialLabel.qml
MLabel 2.2 MLabel.qml

View file

@ -0,0 +1,100 @@
import QtQuick 2.11
Rectangle {
id: root
property bool readOnly: false
signal moved()
signal incrementRadius(real radiusOffset)
width: radius * 2
height: width
color: "transparent"
border.width: 5
border.color: readOnly ? "green" : "yellow"
/*
// visualize top-left corner for debugging purpose
Rectangle {
color: "red"
width: 500
height: 50
}
Rectangle {
color: "red"
width: 50
height: 500
}
*/
// Cross to visualize the circle center
Rectangle {
color: parent.border.color
anchors.centerIn: parent
width: parent.width * 0.2
height: parent.border.width * 0.5
}
Rectangle {
color: parent.border.color
anchors.centerIn: parent
width: parent.border.width * 0.5
height: parent.height * 0.2
}
Behavior on x {
NumberAnimation {
duration: 100
}
}
Behavior on y {
NumberAnimation {
duration: 100
}
}
Behavior on radius {
NumberAnimation {
duration: 100
}
}
Loader {
anchors.fill: parent
active: !root.readOnly
sourceComponent: MouseArea {
id: mArea
anchors.fill: parent
cursorShape: root.readOnly ? Qt.ArrowCursor : (controlModifierEnabled ? Qt.SizeBDiagCursor : (pressed ? Qt.ClosedHandCursor : Qt.OpenHandCursor))
propagateComposedEvents: true
property bool controlModifierEnabled: false
onPositionChanged: {
mArea.controlModifierEnabled = (mouse.modifiers & Qt.ControlModifier)
mouse.accepted = false;
}
acceptedButtons: Qt.LeftButton
hoverEnabled: true
drag.target: root
drag.onActiveChanged: {
if(!drag.active) {
moved();
}
}
onPressed: {
forceActiveFocus();
}
onWheel: {
mArea.controlModifierEnabled = (wheel.modifiers & Qt.ControlModifier)
if (wheel.modifiers & Qt.ControlModifier) {
incrementRadius(wheel.angleDelta.y / 120.0);
wheel.accepted = true;
} else {
wheel.accepted = false;
}
}
}
}
}

View file

@ -18,12 +18,11 @@ FloatingPane {
property var featureExtractionNode: null
ColumnLayout {
// Header
RowLayout {
// FeatureExtraction node name
Label {
text: featureExtractionNode.label
text: featureExtractionNode ? featureExtractionNode.label : ""
Layout.fillWidth: true
}
// Settings menu
@ -46,8 +45,9 @@ FloatingPane {
id: displayModeCB
flat: true
Layout.fillWidth: true
model: featuresViewer.displayModes
onActivated: featuresViewer.displayMode = currentIndex
model: root.featuresViewer ? root.featuresViewer.displayModes : null
currentIndex: root.featuresViewer ? root.featuresViewer.displayMode : 0
onActivated: root.featuresViewer.displayMode = currentIndex
}
}
}
@ -67,37 +67,75 @@ FloatingPane {
implicitHeight: contentHeight
implicitWidth: contentItem.childrenRect.width
model: featuresViewer !== null ? featuresViewer.model : 0
model: root.featuresViewer !== null ? root.featuresViewer.model : 0
delegate: RowLayout {
id: featureType
property var viewer: featuresViewer.itemAt(index)
property var viewer: root.featuresViewer.itemAt(index)
spacing: 4
// Visibility toogle
// Features visibility toggle
MaterialToolButton {
text: featureType.viewer.visible ? MaterialIcons.visibility : MaterialIcons.visibility_off
onClicked: featureType.viewer.visible = !featureType.viewer.visible
id: featuresVisibilityButton
checkable: true
checked: true
text: MaterialIcons.center_focus_strong
onClicked: {
featureType.viewer.displayfeatures = featuresVisibilityButton.checked;
}
font.pointSize: 10
opacity: featureType.viewer.visible ? 1.0 : 0.6
}
// Tracks visibility toogle
MaterialToolButton {
id: tracksVisibilityButton
checkable: true
checked: true
text: MaterialIcons.timeline
onClicked: {
featureType.viewer.displayTracks = tracksVisibilityButton.checked;
}
font.pointSize: 10
}
// Landmarks visibility toogle
MaterialToolButton {
id: landmarksVisibilityButton
checkable: true
checked: true
text: MaterialIcons.fiber_manual_record
onClicked: {
featureType.viewer.displayLandmarks = landmarksVisibilityButton.checked;
}
font.pointSize: 10
}
// ColorChart picker
ColorChart {
implicitWidth: 12
implicitHeight: implicitWidth
colors: featuresViewer.colors
colors: root.featuresViewer.colors
currentIndex: featureType.viewer.colorIndex
// offset FeaturesViewer color set when changing the color of one feature type
onColorPicked: featuresViewer.colorOffset = colorIndex - index
// offset featuresViewer color set when changing the color of one feature type
onColorPicked: featureType.viewer.colorOffset = colorIndex - index
}
// Feature type name
Label {
text: featureType.viewer.describerType + (featureType.viewer.loading ? "" : ": " + featureType.viewer.features.length)
text: {
if(featureType.viewer.loadingFeatures)
return featureType.viewer.describerType;
return featureType.viewer.describerType + ": " +
((featureExtractionNode && featureExtractionNode.isComputed) ? featureType.viewer.features.length : " - ") + " / " +
(featureType.viewer.haveValidTracks ? featureType.viewer.nbTracks : " - ") + " / " +
(featureType.viewer.haveValidLandmarks ? featureType.viewer.nbLandmarks : " - ");
}
}
// Feature loading status
Loader {
active: featureType.viewer.loading
active: featureType.viewer.loadingFeatures
sourceComponent: BusyIndicator {
padding: 0
implicitWidth: 12
@ -105,6 +143,7 @@ FloatingPane {
running: true
}
}
}
}
}

View file

@ -10,31 +10,37 @@ import Utils 1.0
Repeater {
id: root
/// ViewID to display the features of
/// ViewID to display the features of a specific view
property int viewId
/// SfMData to display the data of SfM
property var sfmData
/// Folder containing the features files
property string folder
property string featureFolder
/// Tracks object loading all the matches files
property var tracks
/// The list of describer types to load
property alias describerTypes: root.model
/// List of available display modes
readonly property var displayModes: ['Points', 'Squares', 'Oriented Squares']
/// Current display mode index
property int displayMode: 0
property int displayMode: 2
/// The list of colors used for displaying several describers
property var colors: [Colors.blue, Colors.red, Colors.yellow, Colors.green, Colors.orange, Colors.cyan, Colors.pink, Colors.lime]
/// Offset the color list
property int colorOffset: 0
property var colors: [Colors.blue, Colors.green, Colors.yellow, Colors.orange, Colors.cyan, Colors.pink, Colors.lime] //, Colors.red
model: root.describerTypes
// instantiate one FeaturesViewer by describer type
delegate: AliceVision.FeaturesViewer {
readonly property int colorIndex: (index+root.colorOffset)%root.colors.length
readonly property int colorIndex: (index + colorOffset) % root.colors.length
property int colorOffset: 0
describerType: modelData
folder: root.folder
featureFolder: root.featureFolder
mtracks: root.tracks
viewId: root.viewId
color: root.colors[colorIndex]
landmarkColor: Colors.red
displayMode: root.displayMode
msfmData: root.sfmData
}
}

View file

@ -0,0 +1,56 @@
import QtQuick 2.11
import Utils 1.0
import AliceVision 1.0 as AliceVision
/**
* FloatImage displays an Image with gamma / offset / channel controls
* Requires QtAliceVision plugin.
*/
AliceVision.FloatImageViewer {
id: root
width: textureSize.width
height: textureSize.height
visible: (status === Image.Ready)
// paintedWidth / paintedHeight / status for compatibility with standard Image
property int paintedWidth: textureSize.width
property int paintedHeight: textureSize.height
property var status: {
if(root.loading)
return Image.Loading;
else if((root.source === "") ||
(root.sourceSize.height <= 0) ||
(root.sourceSize.height <= 0))
return Image.Null;
return Image.Ready;
}
property string channelModeString : "rgba"
channelMode: {
switch(channelModeString)
{
case "rgb": return AliceVision.FloatImageViewer.EChannelMode.RGB
case "r": return AliceVision.FloatImageViewer.EChannelMode.R
case "g": return AliceVision.FloatImageViewer.EChannelMode.G
case "b": return AliceVision.FloatImageViewer.EChannelMode.B
case "a": return AliceVision.FloatImageViewer.EChannelMode.A
default: return AliceVision.FloatImageViewer.EChannelMode.RGBA
}
}
clearBeforeLoad: true
property alias containsMouse: mouseArea.containsMouse
property alias mouseX: mouseArea.mouseX
property alias mouseY: mouseArea.mouseY
MouseArea {
id: mouseArea
anchors.fill: parent
hoverEnabled: true
// Do not intercept mouse events, only get the mouse over information
acceptedButtons: Qt.NoButton
}
}

View file

@ -0,0 +1,237 @@
import QtQuick 2.11
import QtQuick.Controls 2.0
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import Controls 1.0
FloatingPane {
id: root
anchors.margins: 0
padding: 5
radius: 0
property real gammaDefaultValue: 1
property real offsetDefaultValue: 0
property real gammaValue: gammaCtrl.value
property real offsetValue: offsetCtrl.value
property string channelModeValue: channelsCtrl.value
property variant colorRGBA: null
background: Rectangle { color: root.palette.window }
DoubleValidator {
id: doubleValidator
locale: 'C' // use '.' decimal separator disregarding of the system locale
}
RowLayout {
id: toolLayout
// anchors.verticalCenter: parent
anchors.fill: parent
// channel mode
ComboBox {
id: channelsCtrl
// set min size to 4 characters + one margin for the combobox
Layout.minimumWidth: 5.0 * Qt.application.font.pixelSize
Layout.preferredWidth: Layout.minimumWidth
flat: true
property var channels: ["rgba", "rgb", "r", "g", "b","a"]
property string value: channels[currentIndex]
model: channels
}
// offset slider
RowLayout {
spacing: 5
ToolButton {
text: "Gain"
ToolTip.visible: ToolTip.text && hovered
ToolTip.delay: 100
ToolTip.text: "Reset Gain"
onClicked: {
offsetCtrl.value = offsetDefaultValue;
}
}
TextField {
id: offsetLabel
ToolTip.visible: ToolTip.text && hovered
ToolTip.delay: 100
ToolTip.text: "Color Gain (in linear colorspace)"
text: offsetValue.toFixed(2)
Layout.preferredWidth: textMetrics_offsetValue.width
selectByMouse: true
validator: doubleValidator
onAccepted: {
offsetCtrl.value = Number(offsetLabel.text)
}
}
Slider {
id: offsetCtrl
Layout.fillWidth: true
from: -1
to: 1
value: 0
stepSize: 0.01
}
}
// gamma slider
RowLayout {
spacing: 5
ToolButton {
text: "γ"
ToolTip.visible: ToolTip.text && hovered
ToolTip.delay: 100
ToolTip.text: "Reset Gamma"
onClicked: {
gammaCtrl.value = gammaDefaultValue;
}
}
TextField {
id: gammaLabel
ToolTip.visible: ToolTip.text && hovered
ToolTip.delay: 100
ToolTip.text: "Apply Gamma (after Gain and in linear colorspace)"
text: gammaValue.toFixed(2)
Layout.preferredWidth: textMetrics_offsetValue.width
selectByMouse: true
validator: doubleValidator
onAccepted: {
gammaCtrl.value = Number(offsetLabel.text)
}
}
Slider {
id: gammaCtrl
Layout.fillWidth: true
from: 0.01
to: 16
value: 1
stepSize: 0.01
}
}
Rectangle {
Layout.preferredWidth: 20
implicitWidth: 20
implicitHeight: parent.height
color: root.colorRGBA ? Qt.rgba(red.value_gamma, green.value_gamma, blue.value_gamma, 1.0) : "black"
}
// gamma slider
RowLayout {
spacing: 1
TextField {
id: red
property real value: root.colorRGBA ? root.colorRGBA.x : 0.0
property real value_gamma: Math.pow(value, 1.0/2.2)
text: root.colorRGBA ? value.toFixed(6) : "--"
Layout.preferredWidth: textMetrics_colorValue.width
selectByMouse: true
validator: doubleValidator
horizontalAlignment: TextInput.AlignLeft
readOnly: true
// autoScroll: When the text is too long, display the left part
// (with the most important values and cut the floating point details)
autoScroll: false
Rectangle {
anchors.verticalCenter: parent.bottom
width: parent.width
height: 3
color: Qt.rgba(red.value_gamma, 0.0, 0.0, 1.0)
}
}
TextField {
id: green
property real value: root.colorRGBA ? root.colorRGBA.y : 0.0
property real value_gamma: Math.pow(value, 1.0/2.2)
text: root.colorRGBA ? value.toFixed(6) : "--"
Layout.preferredWidth: textMetrics_colorValue.width
selectByMouse: true
validator: doubleValidator
horizontalAlignment: TextInput.AlignLeft
readOnly: true
// autoScroll: When the text is too long, display the left part
// (with the most important values and cut the floating point details)
autoScroll: false
Rectangle {
anchors.verticalCenter: parent.bottom
width: parent.width
height: 3
color: Qt.rgba(0.0, green.value_gamma, 0.0, 1.0)
}
}
TextField {
id: blue
property real value: root.colorRGBA ? root.colorRGBA.z : 0.0
property real value_gamma: Math.pow(value, 1.0/2.2)
text: root.colorRGBA ? value.toFixed(6) : "--"
Layout.preferredWidth: textMetrics_colorValue.width
selectByMouse: true
validator: doubleValidator
horizontalAlignment: TextInput.AlignLeft
readOnly: true
// autoScroll: When the text is too long, display the left part
// (with the most important values and cut the floating point details)
autoScroll: false
Rectangle {
anchors.verticalCenter: parent.bottom
width: parent.width
height: 3
color: Qt.rgba(0.0, 0.0, blue.value_gamma, 1.0)
}
}
TextField {
id: alpha
property real value: root.colorRGBA ? root.colorRGBA.w : 0.0
property real value_gamma: Math.pow(value, 1.0/2.2)
text: root.colorRGBA ? value.toFixed(6) : "--"
Layout.preferredWidth: textMetrics_colorValue.width
selectByMouse: true
validator: doubleValidator
horizontalAlignment: TextInput.AlignLeft
readOnly: true
// autoScroll: When the text is too long, display the left part
// (with the most important values and cut the floating point details)
autoScroll: false
Rectangle {
anchors.verticalCenter: parent.bottom
width: parent.width
height: 3
color: Qt.rgba(alpha.value_gamma, alpha.value_gamma, alpha.value_gamma, 1.0)
}
}
}
}
TextMetrics {
id: textMetrics_colorValue
font: red.font
text: "1.2345" // use one more than expected to get the correct value (probably needed due to TextField margin)
}
TextMetrics {
id: textMetrics_offsetValue
font: offsetLabel.font
text: "-10.01"
}
}

View file

@ -19,6 +19,7 @@ FloatingPane {
clip: true
padding: 4
anchors.rightMargin: 0
/**
* Convert GPS metadata to degree coordinates.
@ -44,7 +45,7 @@ FloatingPane {
function getGPSCoordinates(metadata)
{
// GPS data available
if(metadata["GPS:Longitude"] != undefined && metadata["GPS:Latitude"] != undefined)
if(metadata && metadata["GPS:Longitude"] != undefined && metadata["GPS:Latitude"] != undefined)
{
var latitude = gpsMetadataToCoordinates(metadata["GPS:Latitude"], metadata["GPS:LatitudeRef"])
var longitude = gpsMetadataToCoordinates(metadata["GPS:Longitude"], metadata["GPS:LongitudeRef"])
@ -76,13 +77,16 @@ FloatingPane {
for(var key in metadata)
{
var entry = {}
entry["raw"] = key
// split on ":" to get group and key
var sKey = key.split(":", 2)
if(sKey.length === 2)
var i = key.lastIndexOf(":")
if(i == -1)
{
entry["group"] = sKey[0]
entry["key"] = sKey[1]
i = key.lastIndexOf("/")
}
if(i != -1)
{
entry["group"] = key.substr(0, i)
entry["key"] = key.substr(i+1)
}
else
{

View file

@ -0,0 +1,7 @@
import QtQuick 2.11
import AliceVision 1.0 as AliceVision
// Data from the SfM
AliceVision.MSfMData {
id: root
}

View file

@ -0,0 +1,6 @@
import QtQuick 2.11
import AliceVision 1.0 as AliceVision
AliceVision.MTracks {
id: root
}

View file

@ -0,0 +1,327 @@
import QtQuick 2.9
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import QtPositioning 5.8
import QtLocation 5.9
import QtCharts 2.13
import Charts 1.0
import Controls 1.0
import Utils 1.0
import AliceVision 1.0 as AliceVision
FloatingPane {
id: root
property var msfmData
property var mTracks
property color textColor: Colors.sysPalette.text
visible: (_reconstruction.sfm && _reconstruction.sfm.isComputed) ? root.visible : false
clip: true
padding: 4
// To avoid interaction with components in background
MouseArea {
anchors.fill: parent
acceptedButtons: Qt.LeftButton | Qt.RightButton | Qt.MiddleButton
onPressed: {}
onReleased: {}
onWheel: {}
}
InteractiveChartView {
id: residualsPerViewChart
width: parent.width * 0.5
height: parent.height * 0.5
title: "Residuals Per View"
legend.visible: false
antialiasing: true
ValueAxis {
id: residualsPerViewValueAxisX
labelFormat: "%i"
titleText: "Ordered Views"
min: 0
max: sfmDataStat.residualsPerViewMaxAxisX
}
ValueAxis {
id: residualsPerViewValueAxisY
titleText: "Reprojection Error (pix)"
min: 0
max: sfmDataStat.residualsPerViewMaxAxisY
tickAnchor: 0
tickInterval: 0.50
tickCount: sfmDataStat.residualsPerViewMaxAxisY * 2
}
LineSeries {
id: residualsMinPerViewLineSerie
axisX: residualsPerViewValueAxisX
axisY: residualsPerViewValueAxisY
name: "Min"
}
LineSeries {
id: residualsMaxPerViewLineSerie
axisX: residualsPerViewValueAxisX
axisY: residualsPerViewValueAxisY
name: "Max"
}
LineSeries {
id: residualsMeanPerViewLineSerie
axisX: residualsPerViewValueAxisX
axisY: residualsPerViewValueAxisY
name: "Mean"
}
LineSeries {
id: residualsMedianPerViewLineSerie
axisX: residualsPerViewValueAxisX
axisY: residualsPerViewValueAxisY
name: "Median"
}
LineSeries {
id: residualsFirstQuartilePerViewLineSerie
axisX: residualsPerViewValueAxisX
axisY: residualsPerViewValueAxisY
name: "Q1"
}
LineSeries {
id: residualsThirdQuartilePerViewLineSerie
axisX: residualsPerViewValueAxisX
axisY: residualsPerViewValueAxisY
name: "Q3"
}
}
Item {
id: residualsPerViewBtnContainer
Layout.fillWidth: true
anchors.bottom: residualsPerViewChart.bottom
anchors.bottomMargin: 35
anchors.left: residualsPerViewChart.left
anchors.leftMargin: residualsPerViewChart.width * 0.25
RowLayout {
ChartViewCheckBox {
id: allObservations
text: "ALL"
color: textColor
checkState: residualsPerViewLegend.buttonGroup.checkState
onClicked: {
var _checked = checked;
for(var i = 0; i < residualsPerViewChart.count; ++i)
{
residualsPerViewChart.series(i).visible = _checked;
}
}
}
ChartViewLegend {
id: residualsPerViewLegend
chartView: residualsPerViewChart
}
}
}
InteractiveChartView {
id: observationsLengthsPerViewChart
width: parent.width * 0.5
height: parent.height * 0.5
anchors.top: parent.top
anchors.topMargin: (parent.height) * 0.5
title: "Observations Lengths Per View"
legend.visible: false
antialiasing: true
ValueAxis {
id: observationsLengthsPerViewValueAxisX
labelFormat: "%i"
titleText: "Ordered Views"
min: 0
max: sfmDataStat.observationsLengthsPerViewMaxAxisX
}
ValueAxis {
id: observationsLengthsPerViewValueAxisY
titleText: "Observations Lengths"
min: 0
max: sfmDataStat.observationsLengthsPerViewMaxAxisY
tickAnchor: 0
tickInterval: 0.50
tickCount: sfmDataStat.observationsLengthsPerViewMaxAxisY * 2
}
LineSeries {
id: observationsLengthsMinPerViewLineSerie
axisX: observationsLengthsPerViewValueAxisX
axisY: observationsLengthsPerViewValueAxisY
name: "Min"
}
LineSeries {
id: observationsLengthsMaxPerViewLineSerie
axisX: observationsLengthsPerViewValueAxisX
axisY: observationsLengthsPerViewValueAxisY
name: "Max"
}
LineSeries {
id: observationsLengthsMeanPerViewLineSerie
axisX: observationsLengthsPerViewValueAxisX
axisY: observationsLengthsPerViewValueAxisY
name: "Mean"
}
LineSeries {
id: observationsLengthsMedianPerViewLineSerie
axisX: observationsLengthsPerViewValueAxisX
axisY: observationsLengthsPerViewValueAxisY
name: "Median"
}
LineSeries {
id: observationsLengthsFirstQuartilePerViewLineSerie
axisX: observationsLengthsPerViewValueAxisX
axisY: observationsLengthsPerViewValueAxisY
name: "Q1"
}
LineSeries {
id: observationsLengthsThirdQuartilePerViewLineSerie
axisX: observationsLengthsPerViewValueAxisX
axisY: observationsLengthsPerViewValueAxisY
name: "Q3"
}
}
Item {
id: observationsLengthsPerViewBtnContainer
Layout.fillWidth: true
anchors.bottom: observationsLengthsPerViewChart.bottom
anchors.bottomMargin: 35
anchors.left: observationsLengthsPerViewChart.left
anchors.leftMargin: observationsLengthsPerViewChart.width * 0.25
RowLayout {
ChartViewCheckBox {
id: allModes
text: "ALL"
color: textColor
checkState: observationsLengthsPerViewLegend.buttonGroup.checkState
onClicked: {
var _checked = checked;
for(var i = 0; i < observationsLengthsPerViewChart.count; ++i)
{
observationsLengthsPerViewChart.series(i).visible = _checked;
}
}
}
ChartViewLegend {
id: observationsLengthsPerViewLegend
chartView: observationsLengthsPerViewChart
}
}
}
InteractiveChartView {
id: landmarksPerViewChart
width: parent.width * 0.5
height: parent.height * 0.5
anchors.left: parent.left
anchors.leftMargin: (parent.width) * 0.5
anchors.top: parent.top
title: "Landmarks Per View"
legend.visible: false
antialiasing: true
ValueAxis {
id: landmarksPerViewValueAxisX
titleText: "Ordered Views"
min: 0.0
max: sfmDataStat.landmarksPerViewMaxAxisX
}
ValueAxis {
id: landmarksPerViewValueAxisY
labelFormat: "%i"
titleText: "Number of Landmarks"
min: 0
max: sfmDataStat.landmarksPerViewMaxAxisY
}
LineSeries {
id: landmarksPerViewLineSerie
axisX: landmarksPerViewValueAxisX
axisY: landmarksPerViewValueAxisY
name: "Landmarks"
}
LineSeries {
id: tracksPerViewLineSerie
axisX: landmarksPerViewValueAxisX
axisY: landmarksPerViewValueAxisY
name: "Tracks"
}
}
Item {
id: landmarksFeatTracksPerViewBtnContainer
Layout.fillWidth: true
anchors.bottom: landmarksPerViewChart.bottom
anchors.bottomMargin: 35
anchors.left: landmarksPerViewChart.left
anchors.leftMargin: landmarksPerViewChart.width * 0.25
RowLayout {
ChartViewCheckBox {
id: allFeatures
text: "ALL"
color: textColor
checkState: landmarksFeatTracksPerViewLegend.buttonGroup.checkState
onClicked: {
var _checked = checked;
for(var i = 0; i < landmarksPerViewChart.count; ++i)
{
landmarksPerViewChart.series(i).visible = _checked;
}
}
}
ChartViewLegend {
id: landmarksFeatTracksPerViewLegend
chartView: landmarksPerViewChart
}
}
}
// Stats from the sfmData
AliceVision.MSfMDataStats {
id: sfmDataStat
msfmData: root.msfmData
mTracks: root.mTracks
onAxisChanged: {
fillLandmarksPerViewSerie(landmarksPerViewLineSerie);
fillTracksPerViewSerie(tracksPerViewLineSerie);
fillResidualsMinPerViewSerie(residualsMinPerViewLineSerie);
fillResidualsMaxPerViewSerie(residualsMaxPerViewLineSerie);
fillResidualsMeanPerViewSerie(residualsMeanPerViewLineSerie);
fillResidualsMedianPerViewSerie(residualsMedianPerViewLineSerie);
fillResidualsFirstQuartilePerViewSerie(residualsFirstQuartilePerViewLineSerie);
fillResidualsThirdQuartilePerViewSerie(residualsThirdQuartilePerViewLineSerie);
fillObservationsLengthsMinPerViewSerie(observationsLengthsMinPerViewLineSerie);
fillObservationsLengthsMaxPerViewSerie(observationsLengthsMaxPerViewLineSerie);
fillObservationsLengthsMeanPerViewSerie(observationsLengthsMeanPerViewLineSerie);
fillObservationsLengthsMedianPerViewSerie(observationsLengthsMedianPerViewLineSerie);
fillObservationsLengthsFirstQuartilePerViewSerie(observationsLengthsFirstQuartilePerViewLineSerie);
fillObservationsLengthsThirdQuartilePerViewSerie(observationsLengthsThirdQuartilePerViewLineSerie);
}
}
}

View file

@ -0,0 +1,263 @@
import QtQuick 2.9
import QtQuick.Controls 2.3
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import QtPositioning 5.8
import QtLocation 5.9
import QtCharts 2.13
import Charts 1.0
import Controls 1.0
import Utils 1.0
import AliceVision 1.0 as AliceVision
FloatingPane {
id: root
property var msfmData: null
property int viewId
property color textColor: Colors.sysPalette.text
visible: (_reconstruction.sfm && _reconstruction.sfm.isComputed) ? root.visible : false
clip: true
padding: 4
// To avoid interaction with components in background
MouseArea {
anchors.fill: parent
acceptedButtons: Qt.LeftButton | Qt.RightButton | Qt.MiddleButton
onPressed: {}
onReleased: {}
onWheel: {}
}
InteractiveChartView {
id: residualChart
width: parent.width * 0.5
height: parent.height * 0.5
title: "Reprojection Errors"
legend.visible: false
antialiasing: true
ValueAxis {
id: residualValueAxisX
titleText: "Reprojection Error"
min: 0.0
max: viewStat.residualMaxAxisX
}
ValueAxis {
id: residualValueAxisY
labelFormat: "%i"
titleText: "Number of Points"
min: 0
max: viewStat.residualMaxAxisY
}
LineSeries {
id: residualFullLineSerie
axisX: residualValueAxisX
axisY: residualValueAxisY
name: "Average on All Cameras"
}
LineSeries {
id: residualViewLineSerie
axisX: residualValueAxisX
axisY: residualValueAxisY
name: "Current"
}
}
Item {
id: residualBtnContainer
Layout.fillWidth: true
anchors.bottom: residualChart.bottom
anchors.bottomMargin: 35
anchors.left: residualChart.left
anchors.leftMargin: residualChart.width * 0.15
RowLayout {
ChartViewCheckBox {
id: allResiduals
text: "ALL"
color: textColor
checkState: residualLegend.buttonGroup.checkState
onClicked: {
var _checked = checked;
for(var i = 0; i < residualChart.count; ++i)
{
residualChart.series(i).visible = _checked;
}
}
}
ChartViewLegend {
id: residualLegend
chartView: residualChart
}
}
}
InteractiveChartView {
id: observationsLengthsChart
width: parent.width * 0.5
height: parent.height * 0.5
anchors.top: parent.top
anchors.topMargin: (parent.height) * 0.5
legend.visible: false
title: "Observations Lengths"
ValueAxis {
id: observationsLengthsvalueAxisX
labelFormat: "%i"
titleText: "Observations Length"
min: 2
max: viewStat.observationsLengthsMaxAxisX
tickAnchor: 2
tickInterval: 1
tickCount: 5
}
ValueAxis {
id: observationsLengthsvalueAxisY
labelFormat: "%i"
titleText: "Number of Points"
min: 0
max: viewStat.observationsLengthsMaxAxisY
}
LineSeries {
id: observationsLengthsFullLineSerie
axisX: observationsLengthsvalueAxisX
axisY: observationsLengthsvalueAxisY
name: "All Cameras"
}
LineSeries {
id: observationsLengthsViewLineSerie
axisX: observationsLengthsvalueAxisX
axisY: observationsLengthsvalueAxisY
name: "Current"
}
}
Item {
id: observationsLengthsBtnContainer
Layout.fillWidth: true
anchors.bottom: observationsLengthsChart.bottom
anchors.bottomMargin: 35
anchors.left: observationsLengthsChart.left
anchors.leftMargin: observationsLengthsChart.width * 0.25
RowLayout {
ChartViewCheckBox {
id: allObservations
text: "ALL"
color: textColor
checkState: observationsLengthsLegend.buttonGroup.checkState
onClicked: {
var _checked = checked;
for(var i = 0; i < observationsLengthsChart.count; ++i)
{
observationsLengthsChart.series(i).visible = _checked;
}
}
}
ChartViewLegend {
id: observationsLengthsLegend
chartView: observationsLengthsChart
}
}
}
InteractiveChartView {
id: observationsScaleChart
width: parent.width * 0.5
height: parent.height * 0.5
anchors.left: parent.left
anchors.leftMargin: (parent.width) * 0.5
anchors.top: parent.top
legend.visible: false
title: "Observations Scale"
ValueAxis {
id: observationsScaleValueAxisX
titleText: "Scale"
min: 0
max: viewStat.observationsScaleMaxAxisX
}
ValueAxis {
id: observationsScaleValueAxisY
titleText: "Number of Points"
min: 0
max: viewStat.observationsScaleMaxAxisY
}
LineSeries {
id: observationsScaleFullLineSerie
axisX: observationsScaleValueAxisX
axisY: observationsScaleValueAxisY
name: " Average on All Cameras"
}
LineSeries {
id: observationsScaleViewLineSerie
axisX: observationsScaleValueAxisX
axisY: observationsScaleValueAxisY
name: "Current"
}
}
Item {
id: observationsScaleBtnContainer
Layout.fillWidth: true
anchors.bottom: observationsScaleChart.bottom
anchors.bottomMargin: 35
anchors.left: observationsScaleChart.left
anchors.leftMargin: observationsScaleChart.width * 0.15
RowLayout {
ChartViewCheckBox {
id: allObservationsScales
text: "ALL"
color: textColor
checkState: observationsScaleLegend.buttonGroup.checkState
onClicked: {
var _checked = checked;
for(var i = 0; i < observationsScaleChart.count; ++i)
{
observationsScaleChart.series(i).visible = _checked;
}
}
}
ChartViewLegend {
id: observationsScaleLegend
chartView: observationsScaleChart
}
}
}
// Stats from a view the sfmData
AliceVision.MViewStats {
id: viewStat
msfmData: (root.visible && root.msfmData && root.msfmData.status === AliceVision.MSfMData.Ready) ? root.msfmData : null
viewId: root.viewId
onViewStatsChanged: {
fillResidualFullSerie(residualFullLineSerie);
fillResidualViewSerie(residualViewLineSerie);
fillObservationsLengthsFullSerie(observationsLengthsFullLineSerie);
fillObservationsLengthsViewSerie(observationsLengthsViewLineSerie);
fillObservationsScaleFullSerie(observationsScaleFullLineSerie);
fillObservationsScaleViewSerie(observationsScaleViewLineSerie);
}
}
}

View file

@ -0,0 +1,9 @@
import AliceVision 1.0
import QtQuick 2.7
/**
* To evaluate if the QtAliceVision plugin is available.
*/
Item {
id: root
}

View file

@ -0,0 +1,10 @@
import DepthMapEntity 2.1
import QtQuick 2.7
/**
* To evaluate if the QtOIIO plugin is available.
* DepthMapEntity is in the same plugin than the imageformats plugin, that we cannot check from qml.
*/
Item {
id: root
}

View file

@ -2,15 +2,70 @@ import QtQuick 2.7
import QtQuick.Controls 2.0
import QtQuick.Layouts 1.3
import MaterialIcons 2.2
import Controls 1.0
FocusScope {
id: root
clip: true
property url source
property var metadata
property var viewIn3D
property Component floatViewerComp: Qt.createComponent("FloatImage.qml")
property alias useFloatImageViewer: displayHDR.checked
Loader {
id: aliceVisionPluginLoader
active: true
source: "TestAliceVisionPlugin.qml"
}
Loader {
id: oiioPluginLoader
active: true
source: "TestOIIOPlugin.qml"
}
readonly property bool aliceVisionPluginAvailable: aliceVisionPluginLoader.status === Component.Ready
readonly property bool oiioPluginAvailable: oiioPluginLoader.status === Component.Ready
Component.onCompleted: {
if(!aliceVisionPluginAvailable)
console.warn("Missing plugin qtAliceVision.")
if(!oiioPluginAvailable)
console.warn("Missing plugin qtOIIO.")
}
property string loadingModules: {
if(!imgContainer.image)
return "";
var res = "";
if(imgContainer.image.status === Image.Loading)
res += " Image";
if(featuresViewerLoader.status === Loader.Ready && featuresViewerLoader.item)
{
for (var i = 0; i < featuresViewerLoader.item.count; ++i) {
if(featuresViewerLoader.item.itemAt(i).loadingFeatures)
{
res += " Features";
break;
}
}
}
if(mtracksLoader.status === Loader.Ready)
{
if(mtracksLoader.item.status === MTracks.Loading)
res += " Tracks";
}
if(msfmDataLoader.status === Loader.Ready)
{
if(msfmDataLoader.item.status === MSfMData.Loading)
{
res += " SfMData";
}
}
return res;
}
function clear()
{
@ -26,106 +81,15 @@ FocusScope {
}
}
// functions
function fit() {
if(image.status != Image.Ready)
return;
image.scale = Math.min(root.width/image.width, root.height/image.height)
image.x = Math.max((root.width-image.width*image.scale)*0.5, 0)
image.y = Math.max((root.height-image.height*image.scale)*0.5, 0)
}
// context menu
property Component contextMenu: Menu {
MenuItem {
text: "Fit"
onTriggered: fit()
}
MenuItem {
text: "Zoom 100%"
onTriggered: image.scale = 1
}
}
// Main Image
Image {
id: image
transformOrigin: Item.TopLeft
asynchronous: true
smooth: false
fillMode: Image.PreserveAspectFit
autoTransform: true
onWidthChanged: if(status==Image.Ready) fit()
source: root.source
onStatusChanged: {
// update cache source when image is loaded
if(status === Image.Ready)
cache.source = source
}
// Image cache of the last loaded image
// Only visible when the main one is loading, to keep an image
// displayed at all time and smoothen transitions
Image {
id: cache
anchors.fill: parent
asynchronous: true
smooth: parent.smooth
fillMode: parent.fillMode
autoTransform: parent.autoTransform
visible: image.status === Image.Loading
}
// FeatureViewer: display view extracted feature points
// note: requires QtAliceVision plugin - use a Loader to evaluate plugin avaibility at runtime
Loader {
id: featuresViewerLoader
active: displayFeatures.checked
// handle rotation/position based on available metadata
rotation: {
var orientation = metadata ? metadata["Orientation"] : 0
switch(orientation) {
case "6": return 90;
case "8": return -90;
default: return 0;
}
}
x: rotation === 90 ? image.paintedWidth : 0
y: rotation === -90 ? image.paintedHeight : 0
Component.onCompleted: {
// instantiate and initialize a FeaturesViewer component dynamically using Loader.setSource
setSource("FeaturesViewer.qml", {
'active': Qt.binding(function() { return displayFeatures.checked; }),
'viewId': Qt.binding(function() { return _reconstruction.selectedViewId; }),
'model': Qt.binding(function() { return _reconstruction.featureExtraction.attribute("describerTypes").value; }),
'folder': Qt.binding(function() { return Filepath.stringToUrl(_reconstruction.featureExtraction.attribute("output").value); }),
})
}
}
}
// Busy indicator
BusyIndicator {
anchors.centerIn: parent
// running property binding seems broken, only dynamic binding assignment works
Component.onCompleted: running = Qt.binding(function() { return image.status === Image.Loading })
}
// mouse area
MouseArea {
anchors.fill: parent
property double factor: 1.2
acceptedButtons: Qt.LeftButton | Qt.RightButton | Qt.MiddleButton
onPressed: {
image.forceActiveFocus()
imgContainer.forceActiveFocus()
if(mouse.button & Qt.MiddleButton || (mouse.button & Qt.LeftButton && mouse.modifiers & Qt.ShiftModifier))
drag.target = image // start drag
drag.target = imgContainer // start drag
}
onReleased: {
drag.target = undefined // stop drag
@ -138,39 +102,301 @@ FocusScope {
}
onWheel: {
var zoomFactor = wheel.angleDelta.y > 0 ? factor : 1/factor
if(Math.min(image.width*image.scale*zoomFactor, image.height*image.scale*zoomFactor) < 10)
if(Math.min(imgContainer.width, imgContainer.image.height) * imgContainer.scale * zoomFactor < 10)
return
var point = mapToItem(image, wheel.x, wheel.y)
image.x += (1-zoomFactor) * point.x * image.scale
image.y += (1-zoomFactor) * point.y * image.scale
image.scale *= zoomFactor
var point = mapToItem(imgContainer, wheel.x, wheel.y)
imgContainer.x += (1-zoomFactor) * point.x * imgContainer.scale
imgContainer.y += (1-zoomFactor) * point.y * imgContainer.scale
imgContainer.scale *= zoomFactor
}
}
// functions
function fit() {
if(imgContainer.image.status != Image.Ready)
return;
imgContainer.scale = Math.min(imgLayout.width / imgContainer.image.width, root.height / imgContainer.image.height)
imgContainer.x = Math.max((imgLayout.width - imgContainer.image.width * imgContainer.scale)*0.5, 0)
imgContainer.y = Math.max((imgLayout.height - imgContainer.image.height * imgContainer.scale)*0.5, 0)
// console.warn("fit: imgLayout.width: " + imgContainer.scale + ", imgContainer.image.width: " + imgContainer.image.width)
// console.warn("fit: imgContainer.scale: " + imgContainer.scale + ", x: " + imgContainer.x + ", y: " + imgContainer.y)
}
function getImageFile(type) {
var depthMapNode = _reconstruction.activeNodes.get('allDepthMap').node;
if (type == "image") {
return root.source;
} else if (depthMapNode != undefined && _reconstruction.selectedViewId >= 0) {
return Filepath.stringToUrl(depthMapNode.internalFolder+_reconstruction.selectedViewId+"_"+type+"Map.exr");
}
return "";
}
// context menu
property Component contextMenu: Menu {
MenuItem {
text: "Fit"
onTriggered: fit()
}
MenuItem {
text: "Zoom 100%"
onTriggered: {
imgContainer.scale = 1
imgContainer.x = Math.max((imgLayout.width-imgContainer.width*imgContainer.scale)*0.5, 0)
imgContainer.y = Math.max((imgLayout.height-imgContainer.height*imgContainer.scale)*0.5, 0)
}
}
}
ColumnLayout {
anchors.fill: parent
HdrImageToolbar {
id: hdrImageToolbar
anchors.margins: 0
visible: displayImageToolBarAction.checked && displayImageToolBarAction.enabled
Layout.fillWidth: true
colorRGBA: {
if(!floatImageViewerLoader.item ||
floatImageViewerLoader.item.status !== Image.Ready)
{
return null;
}
if(floatImageViewerLoader.item.containsMouse == false)
{
// console.warn("floatImageViewerLoader: does not contain mouse");
return null;
}
var pix = floatImageViewerLoader.item.pixelValueAt(Math.floor(floatImageViewerLoader.item.mouseX), Math.floor(floatImageViewerLoader.item.mouseY));
// console.warn("floatImageViewerLoader: pixel value at (" << floatImageViewerLoader.item.mouseX << "," << floatImageViewerLoader.item.mouseY << "): ", pix);
return pix;
}
}
// Image
Item {
id: imgLayout
Layout.fillWidth: true
Layout.fillHeight: true
clip: true
Image {
id: alphaBackground
anchors.fill: parent
visible: displayAlphaBackground.checked
fillMode: Image.Tile
horizontalAlignment: Image.AlignLeft
verticalAlignment: Image.AlignTop
source: "../../img/checkerboard_light.png"
scale: 4
smooth: false
}
Item {
id: imgContainer
transformOrigin: Item.TopLeft
// qtAliceVision Image Viewer
Loader {
id: floatImageViewerLoader
active: root.aliceVisionPluginAvailable && root.useFloatImageViewer
visible: (floatImageViewerLoader.status === Loader.Ready)
anchors.centerIn: parent
onActiveChanged: {
if(active) {
// instantiate and initialize a FeaturesViewer component dynamically using Loader.setSource
// Note: It does not work to use previously created component, so we re-create it with setSource.
// floatViewerComp.createObject(floatImageViewerLoader, {
setSource("FloatImage.qml", {
'source': Qt.binding(function() { return getImageFile(imageType.type); }),
'gamma': Qt.binding(function() { return hdrImageToolbar.gammaValue; }),
'offset': Qt.binding(function() { return hdrImageToolbar.offsetValue; }),
'channelModeString': Qt.binding(function() { return hdrImageToolbar.channelModeValue; }),
})
} else {
// Force the unload (instead of using Component.onCompleted to load it once and for all) is necessary since Qt 5.14
setSource("", {})
}
}
}
// Simple QML Image Viewer (using Qt or qtOIIO to load images)
Loader {
id: qtImageViewerLoader
active: !floatImageViewerLoader.active
anchors.centerIn: parent
sourceComponent: Image {
id: qtImageViewer
asynchronous: true
smooth: false
fillMode: Image.PreserveAspectFit
autoTransform: true
onWidthChanged: if(status==Image.Ready) fit()
source: getImageFile(imageType.type)
onStatusChanged: {
// update cache source when image is loaded
if(status === Image.Ready)
qtImageViewerCache.source = source
}
// Image cache of the last loaded image
// Only visible when the main one is loading, to keep an image
// displayed at all time and smoothen transitions
Image {
id: qtImageViewerCache
anchors.fill: parent
asynchronous: true
smooth: parent.smooth
fillMode: parent.fillMode
autoTransform: parent.autoTransform
visible: qtImageViewer.status === Image.Loading
}
}
}
property var image: qtImageViewerLoader.active ? qtImageViewerLoader.item : floatImageViewerLoader.item
width: image ? image.width : 1
height: image ? image.height : 1
scale: 1.0
// FeatureViewer: display view extracted feature points
// note: requires QtAliceVision plugin - use a Loader to evaluate plugin availability at runtime
Loader {
id: featuresViewerLoader
active: displayFeatures.checked
property var activeNode: _reconstruction.activeNodes.get("FeatureExtraction").node
// handle rotation/position based on available metadata
rotation: {
var orientation = metadata ? metadata["Orientation"] : 0
switch(orientation) {
case "6": return 90;
case "8": return -90;
default: return 0;
}
}
x: (imgContainer.image && rotation === 90) ? imgContainer.image.paintedWidth : 0
y: (imgContainer.image && rotation === -90) ? imgContainer.image.paintedHeight : 0
onActiveChanged: {
if(active) {
// instantiate and initialize a FeaturesViewer component dynamically using Loader.setSource
setSource("FeaturesViewer.qml", {
'viewId': Qt.binding(function() { return _reconstruction.selectedViewId; }),
'model': Qt.binding(function() { return activeNode ? activeNode.attribute("describerTypes").value : ""; }),
'featureFolder': Qt.binding(function() { return activeNode ? Filepath.stringToUrl(activeNode.attribute("output").value) : ""; }),
'tracks': Qt.binding(function() { return mtracksLoader.status === Loader.Ready ? mtracksLoader.item : null; }),
'sfmData': Qt.binding(function() { return msfmDataLoader.status === Loader.Ready ? msfmDataLoader.item : null; }),
})
} else {
// Force the unload (instead of using Component.onCompleted to load it once and for all) is necessary since Qt 5.14
setSource("", {})
}
}
}
// FisheyeCircleViewer: display fisheye circle
// note: use a Loader to evaluate if a PanoramaInit node exist and displayFisheyeCircle checked at runtime
Loader {
anchors.centerIn: parent
property var activeNode: _reconstruction.activeNodes.get("PanoramaInit").node
active: (displayFisheyeCircleLoader.checked && activeNode)
// handle rotation/position based on available metadata
rotation: {
var orientation = metadata ? metadata["Orientation"] : 0
switch(orientation) {
case "6": return 90;
case "8": return -90;
default: return 0;
}
}
sourceComponent: CircleGizmo {
property bool useAuto: activeNode.attribute("estimateFisheyeCircle").value
readOnly: useAuto
visible: (!useAuto) || activeNode.isComputed
property real userFisheyeRadius: activeNode.attribute("fisheyeRadius").value
property variant fisheyeAutoParams: _reconstruction.getAutoFisheyeCircle(activeNode)
x: useAuto ? fisheyeAutoParams.x : activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_x").value
y: useAuto ? fisheyeAutoParams.y : activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_y").value
radius: useAuto ? fisheyeAutoParams.z : ((imgContainer.image ? Math.min(imgContainer.image.width, imgContainer.image.height) : 1.0) * 0.5 * (userFisheyeRadius * 0.01))
border.width: Math.max(1, (3.0 / imgContainer.scale))
onMoved: {
if(!useAuto)
{
_reconstruction.setAttribute(activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_x"), x)
_reconstruction.setAttribute(activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_y"), y)
}
}
onIncrementRadius: {
if(!useAuto)
{
_reconstruction.setAttribute(activeNode.attribute("fisheyeRadius"), activeNode.attribute("fisheyeRadius").value + radiusOffset)
}
}
}
}
}
ColumnLayout {
anchors.fill: parent
spacing: 0
FloatingPane {
id: topToolbar
id: imagePathToolbar
Layout.fillWidth: true
Layout.fillHeight: false
Layout.preferredHeight: childrenRect.height
visible: displayImagePathAction.checked
RowLayout {
width: parent.width
radius: 0
padding: 4
height: childrenRect.height
// selectable filepath to source image
TextField {
width: parent.width
padding: 0
background: Item {}
horizontalAlignment: TextInput.AlignLeft
Layout.fillWidth: true
height: contentHeight
font.pointSize: 8
readOnly: true
selectByMouse: true
text: Filepath.urlToString(source)
text: Filepath.urlToString(getImageFile(imageType.type))
}
// show which depthmap node is active
Label {
id: depthMapNodeName
property var activeNode: root.oiioPluginAvailable ? _reconstruction.activeNodes.get("allDepthMap").node : null
visible: (imageType.type != "image") && activeNode
text: activeNode ? activeNode.label : ""
font.pointSize: 8
horizontalAlignment: TextInput.AlignLeft
Layout.fillWidth: false
Layout.preferredWidth: contentWidth
height: contentHeight
}
}
}
Item {
id: imgPlaceholder
Layout.fillWidth: true
Layout.fillHeight: true
// Image Metadata overlay Pane
ImageMetadataView {
width: 350
anchors {
top: topToolbar.bottom
top: parent.top
right: parent.right
bottom: bottomToolbar.top
bottom: parent.bottom
}
visible: metadataCB.checked
@ -178,58 +404,317 @@ FocusScope {
metadata: visible ? root.metadata : {}
}
Loader {
id: msfmDataLoader
property bool isUsed: displayFeatures.checked || displaySfmStatsView.checked || displaySfmDataGlobalStats.checked
property var activeNode: root.aliceVisionPluginAvailable ? _reconstruction.activeNodes.get('sfm').node : null
property bool isComputed: activeNode && activeNode.isComputed
property string filepath: Filepath.stringToUrl(isComputed ? activeNode.attribute("output").value : "")
active: false
// It takes time to load tracks, so keep them looaded, if we may use it again.
// If we load another node, we can trash them (to eventually load the new node data).
onIsUsedChanged: {
if(!active && isUsed && isComputed)
{
active = true;
}
}
onIsComputedChanged: {
if(!isComputed)
{
active = false;
}
else if(!active && isUsed)
{
active = true;
}
}
onActiveNodeChanged: {
if(!isUsed)
{
active = false;
}
else if(!isComputed)
{
active = false;
}
else
{
active = true;
}
}
onActiveChanged: {
if(active) {
// instantiate and initialize a SfmStatsView component dynamically using Loader.setSource
// so it can fail safely if the c++ plugin is not available
setSource("MSfMData.qml", {
'sfmDataPath': Qt.binding(function() { return filepath; }),
})
} else {
// Force the unload (instead of using Component.onCompleted to load it once and for all) is necessary since Qt 5.14
setSource("", {})
}
}
}
Loader {
id: mtracksLoader
property bool isUsed: displayFeatures.checked || displaySfmStatsView.checked || displaySfmDataGlobalStats.checked
property var activeNode: root.aliceVisionPluginAvailable ? _reconstruction.activeNodes.get('FeatureMatching').node : null
property bool isComputed: activeNode && activeNode.isComputed
active: false
// It takes time to load tracks, so keep them looaded, if we may use it again.
// If we load another node, we can trash them (to eventually load the new node data).
onIsUsedChanged: {
if(!active && isUsed && isComputed) {
active = true;
}
}
onIsComputedChanged: {
if(!isComputed) {
active = false;
}
else if(!active && isUsed) {
active = true;
}
}
onActiveNodeChanged: {
if(!isUsed) {
active = false;
}
else if(!isComputed) {
active = false;
}
else {
active = true;
}
}
onActiveChanged: {
if(active) {
// instantiate and initialize a SfmStatsView component dynamically using Loader.setSource
// so it can fail safely if the c++ plugin is not available
setSource("MTracks.qml", {
'matchingFolder': Qt.binding(function() { return Filepath.stringToUrl(isComputed ? activeNode.attribute("output").value : ""); }),
})
} else {
// Force the unload (instead of using Component.onCompleted to load it once and for all) is necessary since Qt 5.14
setSource("", {})
}
}
}
Loader {
id: sfmStatsView
anchors.fill: parent
active: msfmDataLoader.status === Loader.Ready && displaySfmStatsView.checked
Component.onCompleted: {
// instantiate and initialize a SfmStatsView component dynamically using Loader.setSource
// so it can fail safely if the c++ plugin is not available
setSource("SfmStatsView.qml", {
'msfmData': Qt.binding(function() { return msfmDataLoader.item; }),
'viewId': Qt.binding(function() { return _reconstruction.selectedViewId; }),
})
}
}
Loader {
id: sfmGlobalStats
anchors.fill: parent
active: msfmDataLoader.status === Loader.Ready && displaySfmDataGlobalStats.checked
Component.onCompleted: {
// instantiate and initialize a SfmStatsView component dynamically using Loader.setSource
// so it can fail safely if the c++ plugin is not available
setSource("SfmGlobalStats.qml", {
'msfmData': Qt.binding(function() { return msfmDataLoader.item; }),
'mTracks': Qt.binding(function() { return mtracksLoader.item; }),
})
}
}
Loader {
id: featuresOverlay
anchors.bottom: bottomToolbar.top
anchors.left: parent.left
anchors.margins: 2
active: displayFeatures.checked
anchors {
bottom: parent.bottom
left: parent.left
margins: 2
}
active: root.aliceVisionPluginAvailable && displayFeatures.checked && featuresViewerLoader.status === Loader.Ready
sourceComponent: FeaturesInfoOverlay {
featureExtractionNode: _reconstruction.featureExtraction
featureExtractionNode: _reconstruction.activeNodes.get('FeatureExtraction').node
pluginStatus: featuresViewerLoader.status
featuresViewer: featuresViewerLoader.item
}
}
}
FloatingPane {
id: bottomToolbar
anchors.bottom: parent.bottom
anchors.margins: 0
width: parent.width
topPadding: 2
bottomPadding: topPadding
padding: 4
Layout.fillWidth: true
Layout.preferredHeight: childrenRect.height
RowLayout {
anchors.fill: parent
// zoom label
Label {
text: (image.status == Image.Ready ? image.scale.toFixed(2) : "1.00") + "x"
state: "xsmall"
MLabel {
text: ((imgContainer.image && (imgContainer.image.status === Image.Ready)) ? imgContainer.scale.toFixed(2) : "1.00") + "x"
MouseArea {
anchors.fill: parent
acceptedButtons: Qt.LeftButton | Qt.RightButton
onClicked: {
if(mouse.button & Qt.LeftButton) {
fit()
}
else if(mouse.button & Qt.RightButton) {
var menu = contextMenu.createObject(root);
var point = mapToItem(root, mouse.x, mouse.y)
menu.x = point.x;
menu.y = point.y;
menu.open()
}
}
}
ToolTip.text: "Zoom"
}
MaterialToolButton {
id: displayAlphaBackground
ToolTip.text: "Alpha Background"
text: MaterialIcons.texture
font.pointSize: 11
Layout.minimumWidth: 0
checkable: true
}
MaterialToolButton {
id: displayHDR
ToolTip.text: "High-Dynamic-Range Image Viewer"
text: MaterialIcons.hdr_on
// larger font but smaller padding,
// so it is visually similar.
font.pointSize: 20
padding: 0
Layout.minimumWidth: 0
checkable: true
checked: false
enabled: root.aliceVisionPluginAvailable
}
MaterialToolButton {
id: displayFeatures
font.pointSize: 11
ToolTip.text: "Display Features"
checkable: true
text: MaterialIcons.scatter_plot
font.pointSize: 11
Layout.minimumWidth: 0
checkable: true
checked: false
enabled: root.aliceVisionPluginAvailable
}
MaterialToolButton {
id: displayFisheyeCircleLoader
property var activeNode: _reconstruction.activeNodes.get('PanoramaInit').node
ToolTip.text: "Display Fisheye Circle: " + (activeNode ? activeNode.label : "No Node")
text: MaterialIcons.vignette
// text: MaterialIcons.panorama_fish_eye
font.pointSize: 11
Layout.minimumWidth: 0
checkable: true
checked: false
enabled: activeNode && activeNode.attribute("useFisheye").value
visible: activeNode
}
Item {
Layout.fillWidth: true
Label {
id: resolutionLabel
text: image.sourceSize.width + "x" + image.sourceSize.height
anchors.centerIn: parent
elide: Text.ElideMiddle
Layout.fillWidth: true
text: (imgContainer.image && imgContainer.image.sourceSize.width > 0) ? (imgContainer.image.sourceSize.width + "x" + imgContainer.image.sourceSize.height) : ""
elide: Text.ElideRight
horizontalAlignment: Text.AlignHCenter
}
ComboBox {
id: imageType
property var activeNode: root.oiioPluginAvailable ? _reconstruction.activeNodes.get('allDepthMap').node : null
// set min size to 5 characters + one margin for the combobox
clip: true
Layout.minimumWidth: 0
Layout.preferredWidth: 6.0 * Qt.application.font.pixelSize
flat: true
property var types: ["image", "depth", "sim"]
property string type: enabled ? types[currentIndex] : types[0]
model: types
enabled: activeNode
}
MaterialToolButton {
property var activeNode: root.oiioPluginAvailable ? _reconstruction.activeNodes.get('allDepthMap').node : null
enabled: activeNode
ToolTip.text: "View Depth Map in 3D (" + (activeNode ? activeNode.label : "No DepthMap Node Selected") + ")"
text: MaterialIcons.input
font.pointSize: 11
Layout.minimumWidth: 0
onClicked: {
root.viewIn3D(root.getImageFile("depth"))
}
}
ToolButton {
MaterialToolButton {
id: displaySfmStatsView
property var activeNode: root.aliceVisionPluginAvailable ? _reconstruction.activeNodes.get('sfm').node : null
font.family: MaterialIcons.fontFamily
text: MaterialIcons.assessment
ToolTip.text: "StructureFromMotion Statistics"
ToolTip.visible: hovered
font.pointSize: 14
padding: 2
smooth: false
flat: true
checkable: enabled
enabled: activeNode && activeNode.isComputed && _reconstruction.selectedViewId >= 0
onCheckedChanged: {
if(checked == true) {
displaySfmDataGlobalStats.checked = false
metadataCB.checked = false
}
}
}
MaterialToolButton {
id: displaySfmDataGlobalStats
property var activeNode: root.aliceVisionPluginAvailable ? _reconstruction.activeNodes.get('sfm').node : null
font.family: MaterialIcons.fontFamily
text: MaterialIcons.language
ToolTip.text: "StructureFromMotion Global Statistics"
ToolTip.visible: hovered
font.pointSize: 14
padding: 2
smooth: false
flat: true
checkable: enabled
enabled: activeNode && activeNode.isComputed
onCheckedChanged: {
if(checked == true) {
displaySfmStatsView.checked = false
metadataCB.checked = false
}
}
}
MaterialToolButton {
id: metadataCB
padding: 3
font.family: MaterialIcons.fontFamily
text: MaterialIcons.info_outline
@ -237,11 +722,35 @@ FocusScope {
ToolTip.text: "Image Metadata"
ToolTip.visible: hovered
font.pointSize: 12
font.pointSize: 14
padding: 2
smooth: false
flat: true
checkable: enabled
enabled: _reconstruction.selectedViewId >= 0
onCheckedChanged: {
if(checked == true)
{
displaySfmDataGlobalStats.checked = false
displaySfmStatsView.checked = false
}
}
}
}
}
}
}
}
// Busy indicator
BusyIndicator {
anchors.centerIn: parent
// running property binding seems broken, only dynamic binding assignment works
Component.onCompleted: {
running = Qt.binding(function() { return imgContainer.image && imgContainer.image.status === Image.Loading })
}
// disable the visibility when unused to avoid stealing the mouseEvent to the image color picker
visible: running
}
}

View file

@ -4,7 +4,7 @@ import QtQuick.Layouts 1.12
/**
* ImageOverlay enables to display a Viewpoint image on top of a 3D View.
* It takes the principal point correction into account and handle image ratio to
* correclty fit or crop according to original image ratio and parent Item ratio.
* correctly fit or crop according to original image ratio and parent Item ratio.
*/
Item {
id: root

View file

@ -44,9 +44,10 @@ Item {
// Load reconstruction's current SfM file
function viewSfM() {
if(!reconstruction.sfm)
var activeNode = _reconstruction.activeNodes.get('sfm').node;
if(!activeNode)
return;
viewer3D.view(reconstruction.sfm.attribute('output'));
viewer3D.view(activeNode.attribute('output'));
}
SystemPalette { id: activePalette }
@ -64,9 +65,9 @@ Item {
Layout.fillHeight: true
readOnly: root.readOnly
cameraInits: root.cameraInits
cameraInit: _reconstruction.cameraInit
cameraInit: reconstruction.cameraInit
tempCameraInit: reconstruction.tempCameraInit
currentIndex: reconstruction.cameraInitIndex
onCurrentIndexChanged: reconstruction.cameraInitIndex = currentIndex
onRemoveImageRequest: reconstruction.removeAttribute(attribute)
onFilesDropped: reconstruction.handleFilesDrop(drop, augmentSfm ? null : cameraInit)
}
@ -81,11 +82,45 @@ Item {
title: "Image Viewer"
Layout.fillHeight: true
Layout.fillWidth: true
Layout.minimumWidth: 40
Layout.minimumWidth: 50
loading: viewer2D.loadingModules.length > 0
loadingText: loading ? "Loading " + viewer2D.loadingModules : ""
headerBar: RowLayout {
MaterialToolButton {
text: MaterialIcons.more_vert
font.pointSize: 11
padding: 2
checkable: true
checked: imageViewerMenu.visible
onClicked: imageViewerMenu.open()
Menu {
id: imageViewerMenu
y: parent.height
x: -width + parent.width
Action {
id: displayImageToolBarAction
text: "Display HDR Toolbar"
checkable: true
checked: true
enabled: viewer2D.useFloatImageViewer
}
Action {
id: displayImagePathAction
text: "Display Image Path"
checkable: true
checked: true
}
}
}
}
Viewer2D {
id: viewer2D
anchors.fill: parent
viewIn3D: root.load3DMedia
Connections {
target: imageGallery
onCurrentItemChanged: {
@ -157,7 +192,7 @@ Item {
mediaLibrary: viewer3D.library
camera: viewer3D.mainCamera
uigraph: reconstruction
onNodeActivated: _reconstruction.setActiveNodeOfType(node)
onNodeActivated: _reconstruction.setActiveNode(node)
}
}
}

View file

@ -4,7 +4,10 @@ import QtQuick.Controls 1.4 as Controls1 // For SplitView
import QtQuick.Layouts 1.1
import QtQuick.Window 2.3
import QtQml.Models 2.2
import Qt.labs.platform 1.0 as Platform
import QtQuick.Dialogs 1.3
import Qt.labs.settings 1.0
import GraphEditor 1.0
import MaterialIcons 2.2
@ -136,6 +139,7 @@ ApplicationWindow {
onAccepted: {
_reconstruction.saveAs(file)
closed(Platform.Dialog.Accepted)
MeshroomApp.addRecentProjectFile(file.toString())
}
onRejected: closed(Platform.Dialog.Rejected)
}
@ -205,12 +209,27 @@ ApplicationWindow {
}
}
Platform.FileDialog {
FileDialog {
id: openFileDialog
title: "Open File"
nameFilters: ["Meshroom Graphs (*.mg)"]
onAccepted: {
_reconstruction.loadUrl(file.toString())
if(_reconstruction.loadUrl(fileUrl))
{
MeshroomApp.addRecentProjectFile(fileUrl.toString())
}
}
}
FileDialog {
id: importFilesDialog
title: "Import Images"
selectExisting: true
selectMultiple: true
nameFilters: []
onAccepted: {
console.warn("importFilesDialog fileUrls: " + importFilesDialog.fileUrls)
_reconstruction.importImagesUrls(importFilesDialog.fileUrls)
}
}
@ -318,11 +337,73 @@ ApplicationWindow {
shortcut: "Ctrl+N"
onTriggered: ensureSaved(function() { _reconstruction.new() })
}
Menu {
title: "New Pipeline"
Action {
text: "Photogrammetry"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") })
}
Action {
text: "HDRI"
onTriggered: ensureSaved(function() { _reconstruction.new("hdri") })
}
Action {
text: "HDRI Fisheye"
onTriggered: ensureSaved(function() { _reconstruction.new("hdriFisheye") })
}
}
Action {
id: openActionItem
text: "Open"
shortcut: "Ctrl+O"
onTriggered: ensureSaved(function() { openFileDialog.open() })
}
Menu {
id: openRecentMenu
title: "Open Recent"
enabled: recentFilesMenuItems.model != undefined && recentFilesMenuItems.model.length > 0
property int maxWidth: 1000
property int fullWidth: {
var result = 0;
for (var i = 0; i < count; ++i) {
var item = itemAt(i);
result = Math.max(item.implicitWidth + item.padding * 2, result);
}
return result;
}
implicitWidth: fullWidth
Repeater {
id: recentFilesMenuItems
model: MeshroomApp.recentProjectFiles
MenuItem {
onTriggered: ensureSaved(function() {
openRecentMenu.dismiss();
if(_reconstruction.loadUrl(modelData))
{
MeshroomApp.addRecentProjectFile(modelData);
}
else
{
MeshroomApp.removeRecentProjectFile(modelData);
}
})
text: fileTextMetrics.elidedText
TextMetrics {
id: fileTextMetrics
text: modelData
elide: Text.ElideLeft
elideWidth: openRecentMenu.maxWidth
}
}
}
}
Action {
id: importActionItem
text: "Import Images"
shortcut: "Ctrl+I"
onTriggered: importFilesDialog.open()
}
Action {
id: saveAction
text: "Save"
@ -571,6 +652,20 @@ ApplicationWindow {
tabs: ["Graph Editor", "Task Manager"]
headerBar: RowLayout {
MaterialToolButton {
text: MaterialIcons.refresh
ToolTip.text: "Refresh Nodes Status"
ToolTip.visible: hovered
font.pointSize: 11
padding: 2
onClicked: {
updatingStatus = true
_reconstruction.forceNodesStatusUpdate()
updatingStatus = false
}
property bool updatingStatus: false
enabled: !updatingStatus && !_reconstruction.computingLocally
}
MaterialToolButton {
text: MaterialIcons.more_vert
font.pointSize: 11
@ -587,11 +682,6 @@ ApplicationWindow {
enabled: !_reconstruction.computingLocally
onTriggered: _reconstruction.graph.clearSubmittedNodes()
}
MenuItem {
text: "Refresh Nodes Status"
enabled: !_reconstruction.computingLocally
onTriggered: _reconstruction.forceNodesStatusUpdate()
}
}
}
}
@ -606,13 +696,13 @@ ApplicationWindow {
nodeTypesModel: _nodeTypes
onNodeDoubleClicked: {
_reconstruction.setActiveNodeOfType(node);
_reconstruction.setActiveNode(node);
let viewable = false;
for(var i=0; i < node.attributes.count; ++i)
{
var attr = node.attributes.at(i)
if(attr.isOutput && workspaceView.viewAttribute(attr))
if(attr.isOutput && workspaceView.viewAttribute(attr, mouse))
break;
}
}
@ -665,7 +755,7 @@ ApplicationWindow {
}
}
onAttributeDoubleClicked: workspaceView.viewIn3D(attribute, mouse)
onAttributeDoubleClicked: workspaceView.viewAttribute(attribute, mouse)
onUpgradeRequest: {
var n = _reconstruction.upgradeNode(node);
_reconstruction.selectedNode = n;

View file

@ -3,17 +3,26 @@ import logging
import math
import os
from threading import Thread
from collections import Iterable
from PySide2.QtCore import QObject, Slot, Property, Signal, QUrl, QSizeF
from PySide2.QtGui import QMatrix4x4, QMatrix3x3, QQuaternion, QVector3D, QVector2D
import meshroom.core
import meshroom.common
from meshroom import multiview
from meshroom.common.qt import QObjectListModel
from meshroom.core import Version
from meshroom.core.node import Node, Status, Position
from meshroom.core.node import Node, CompatibilityNode, Status, Position
from meshroom.ui.graph import UIGraph
from meshroom.ui.utils import makeProperty
# Python2 compatibility
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class Message(QObject):
""" Simple structure wrapping a high-level message. """
@ -189,6 +198,7 @@ class ViewpointWrapper(QObject):
self._reconstructed = False
# PrepareDenseScene
self._undistortedImagePath = ''
self._activeNode_PrepareDenseScene = self._reconstruction.activeNodes.get("PrepareDenseScene")
# update internally cached variables
self._updateInitialParams()
@ -198,16 +208,22 @@ class ViewpointWrapper(QObject):
# trigger internal members updates when reconstruction members changes
self._reconstruction.cameraInitChanged.connect(self._updateInitialParams)
self._reconstruction.sfmReportChanged.connect(self._updateSfMParams)
self._reconstruction.prepareDenseSceneChanged.connect(self._updateDenseSceneParams)
self._activeNode_PrepareDenseScene.nodeChanged.connect(self._updateDenseSceneParams)
def _updateInitialParams(self):
""" Update internal members depending on CameraInit. """
if not self._reconstruction.cameraInit:
self.initialIntrinsics = None
self._initialIntrinsics = None
self._metadata = {}
else:
self._initialIntrinsics = self._reconstruction.getIntrinsic(self._viewpoint)
self._metadata = json.loads(self._viewpoint.metadata.value) if self._viewpoint.metadata.value else {}
try:
self._metadata = json.loads(self._viewpoint.metadata.value) if self._viewpoint.metadata.value else None
except Exception as e:
logging.warning("Failed to parse Viewpoint metadata: '{}', '{}'".format(str(e), str(self._viewpoint.metadata.value)))
self._metadata = {}
if not self._metadata:
self._metadata = {}
self.initialParamsChanged.emit()
def _updateSfMParams(self):
@ -226,11 +242,11 @@ class ViewpointWrapper(QObject):
def _updateDenseSceneParams(self):
""" Update internal members depending on PrepareDenseScene. """
# undistorted image path
if not self._reconstruction.prepareDenseScene:
if not self._activeNode_PrepareDenseScene.node:
self._undistortedImagePath = ''
else:
filename = "{}.{}".format(self._viewpoint.viewId.value, self._reconstruction.prepareDenseScene.outputFileType.value)
self._undistortedImagePath = os.path.join(self._reconstruction.prepareDenseScene.output.value, filename)
filename = "{}.{}".format(self._viewpoint.viewId.value, self._activeNode_PrepareDenseScene.node.outputFileType.value)
self._undistortedImagePath = os.path.join(self._activeNode_PrepareDenseScene.node.output.value, filename)
self.denseSceneParamsChanged.emit()
@Property(type=QObject, constant=True)
@ -353,27 +369,75 @@ class ViewpointWrapper(QObject):
return QUrl.fromLocalFile(self._undistortedImagePath)
def parseSfMJsonFile(sfmJsonFile):
"""
Parse the SfM Json file and return views, poses and intrinsics as three dicts with viewId, poseId and intrinsicId as keys.
"""
if not os.path.exists(sfmJsonFile):
return {}, {}, {}
with open(sfmJsonFile) as jsonFile:
report = json.load(jsonFile)
views = dict()
poses = dict()
intrinsics = dict()
for view in report['views']:
views[view['viewId']] = view
for pose in report['poses']:
poses[pose['poseId']] = pose['pose']
for intrinsic in report['intrinsics']:
intrinsics[intrinsic['intrinsicId']] = intrinsic
return views, poses, intrinsics
class ActiveNode(QObject):
"""
Hold one active node for a given NodeType.
"""
def __init__(self, nodeType, parent=None):
super(ActiveNode, self).__init__(parent)
self.nodeType = nodeType
self._node = None
nodeChanged = Signal()
node = makeProperty(QObject, "_node", nodeChanged, resetOnDestroy=True)
class Reconstruction(UIGraph):
"""
Specialization of a UIGraph designed to manage a 3D reconstruction.
"""
activeNodeCategories = {
"sfm": ["StructureFromMotion", "GlobalSfM", "PanoramaEstimation", "SfMTransfer", "SfMTransform",
"SfMAlignment"],
"undistort": ["PrepareDenseScene", "PanoramaWarping"],
"allDepthMap": ["DepthMap", "DepthMapFilter"],
}
def __init__(self, defaultPipeline='', parent=None):
super(Reconstruction, self).__init__(parent)
# initialize member variables for key steps of the 3D reconstruction pipeline
self._activeNodes = meshroom.common.DictModel(keyAttrName="nodeType")
self.initActiveNodes()
# - CameraInit
self._cameraInit = None # current CameraInit node
self._cameraInits = QObjectListModel(parent=self) # all CameraInit nodes
self._buildingIntrinsics = False
self.intrinsicsBuilt.connect(self.onIntrinsicsAvailable)
self.importImagesFailed.connect(self.onImportImagesFailed)
self.cameraInitChanged.connect(self.onCameraInitChanged)
# - Feature Extraction
self._featureExtraction = None
self.cameraInitChanged.connect(self.updateFeatureExtraction)
self._tempCameraInit = None
self.importImagesFailed.connect(self.onImportImagesFailed)
# - SfM
self._sfm = None
@ -384,12 +448,6 @@ class Reconstruction(UIGraph):
self._selectedViewpoint = None
self._liveSfmManager = LiveSfmManager(self)
# - Prepare Dense Scene (undistorted images)
self._prepareDenseScene = None
# - Texturing
self._texturing = None
# react to internal graph changes to update those variables
self.graphChanged.connect(self.onGraphChanged)
@ -398,22 +456,40 @@ class Reconstruction(UIGraph):
def setDefaultPipeline(self, defaultPipeline):
self._defaultPipeline = defaultPipeline
def initActiveNodes(self):
# Create all possible entries
for category, _ in self.activeNodeCategories.items():
self._activeNodes.add(ActiveNode(category, self))
for nodeType, _ in meshroom.core.nodesDesc.items():
self._activeNodes.add(ActiveNode(nodeType, self))
def onCameraInitChanged(self):
# Update active nodes when CameraInit changes
nodes = self._graph.nodesFromNode(self._cameraInit)[0]
self.setActiveNodes(nodes)
@Slot()
def new(self):
@Slot(str)
def new(self, pipeline=None):
p = pipeline if pipeline != None else self._defaultPipeline
""" Create a new photogrammetry pipeline. """
if self._defaultPipeline.lower() == "photogrammetry":
if p.lower() == "photogrammetry":
# default photogrammetry pipeline
self.setGraph(multiview.photogrammetry())
elif self._defaultPipeline.lower() == "hdri":
elif p.lower() == "hdri":
# default hdri pipeline
self.setGraph(multiview.hdri())
elif p.lower() == "hdrifisheye":
# default hdri pipeline
self.setGraph(multiview.hdriFisheye())
else:
# use the user-provided default photogrammetry project file
self.load(self._defaultPipeline, setupProjectFile=False)
self.load(p, setupProjectFile=False)
@Slot(str, result=bool)
def load(self, filepath, setupProjectFile=True):
try:
super(Reconstruction, self).load(filepath, setupProjectFile)
status = super(Reconstruction, self).loadGraph(filepath, setupProjectFile)
# warn about pre-release projects being automatically upgraded
if Version(self._graph.fileReleaseVersion).major == "0":
self.warning.emit(Message(
@ -422,26 +498,48 @@ class Reconstruction(UIGraph):
"Data might have been lost in the process.",
"Open it with the corresponding version of Meshroom to recover your data."
))
return status
except FileNotFoundError as e:
self.error.emit(
Message(
"No Such File",
"Error While Loading '{}': No Such File.".format(os.path.basename(filepath)),
""
)
)
logging.error("Error while loading '{}': No Such File.".format(os.path.basename(filepath)))
return False
except Exception as e:
import traceback
trace = traceback.format_exc()
self.error.emit(
Message(
"Error while loading {}".format(os.path.basename(filepath)),
"An unexpected error has occurred",
"Error While Loading Project File",
"An unexpected error has occurred while loading file: '{}'".format(os.path.basename(filepath)),
trace
)
)
logging.error(trace)
return False
@Slot(QUrl, result=bool)
def loadUrl(self, url):
if isinstance(url, (QUrl)):
# depending how the QUrl has been initialized,
# toLocalFile() may return the local path or an empty string
localFile = url.toLocalFile()
if not localFile:
localFile = url.toString()
else:
localFile = url
return self.load(localFile)
def onGraphChanged(self):
""" React to the change of the internal graph. """
self._liveSfmManager.reset()
self.selectedViewId = "-1"
self.featureExtraction = None
self.sfm = None
self.prepareDenseScene = None
self.texturing = None
self.tempCameraInit = None
self.updateCameraInits()
if not self._graph:
return
@ -457,6 +555,7 @@ class Reconstruction(UIGraph):
thread.start()
return thread
@Slot(QObject)
def getViewpoints(self):
""" Return the Viewpoints model. """
# TODO: handle multiple Viewpoints models
@ -471,6 +570,10 @@ class Reconstruction(UIGraph):
def getCameraInitIndex(self):
if not self._cameraInit:
# No CameraInit node
return -1
if not self._cameraInit.graph:
# The CameraInit node is a temporary one not attached to a graph
return -1
return self._cameraInits.indexOf(self._cameraInit)
@ -478,21 +581,60 @@ class Reconstruction(UIGraph):
camInit = self._cameraInits[idx] if self._cameraInits else None
self.cameraInit = camInit
def updateFeatureExtraction(self):
""" Set the current FeatureExtraction node based on the current CameraInit node. """
self.featureExtraction = self.lastNodeOfType('FeatureExtraction', self.cameraInit) if self.cameraInit else None
@Slot()
def clearTempCameraInit(self):
self.tempCameraInit = None
@Slot(QObject, str)
def setupTempCameraInit(self, node, attrName):
if not node or not attrName:
self.tempCameraInit = None
return
sfmFile = node.attribute(attrName).value
if not sfmFile or not os.path.isfile(sfmFile):
self.tempCameraInit = None
return
nodeDesc = meshroom.core.nodesDesc["CameraInit"]()
views, intrinsics = nodeDesc.readSfMData(sfmFile)
tmpCameraInit = Node("CameraInit", viewpoints=views, intrinsics=intrinsics)
self.tempCameraInit = tmpCameraInit
@Slot(QObject, result=QVector3D)
def getAutoFisheyeCircle(self, panoramaInit):
if not panoramaInit or not panoramaInit.isComputed:
return QVector3D(0.0, 0.0, 0.0)
if not panoramaInit.attribute("estimateFisheyeCircle").value:
return QVector3D(0.0, 0.0, 0.0)
sfmFile = panoramaInit.attribute('outSfMData').value
if not os.path.exists(sfmFile):
return QVector3D(0.0, 0.0, 0.0)
import io # use io.open for Python2/3 compatibility (allow to specify encoding + errors handling)
# skip decoding errors to avoid potential exceptions due to non utf-8 characters in images metadata
with io.open(sfmFile, 'r', encoding='utf-8', errors='ignore') as f:
data = json.load(f)
intrinsics = data.get('intrinsics', [])
if len(intrinsics) == 0:
return QVector3D(0.0, 0.0, 0.0)
intrinsic = intrinsics[0]
res = QVector3D(float(intrinsic.get("fisheyeCircleCenterX", 0.0)) - float(intrinsic.get("width", 0.0)) * 0.5,
float(intrinsic.get("fisheyeCircleCenterY", 0.0)) - float(intrinsic.get("height", 0.0)) * 0.5,
float(intrinsic.get("fisheyeCircleRadius", 0.0)))
return res
def lastSfmNode(self):
""" Retrieve the last SfM node from the initial CameraInit node. """
return self.lastNodeOfType("StructureFromMotion", self._cameraInit, Status.SUCCESS)
return self.lastNodeOfType(self.activeNodeCategories['sfm'], self._cameraInit, Status.SUCCESS)
def lastNodeOfType(self, nodeType, startNode, preferredStatus=None):
def lastNodeOfType(self, nodeTypes, startNode, preferredStatus=None):
"""
Returns the last node of the given type starting from 'startNode'.
If 'preferredStatus' is specified, the last node with this status will be considered in priority.
Args:
nodeType (str): the node type
nodeTypes (str list): the node types
startNode (Node): the node to start from
preferredStatus (Status): (optional) the node status to prioritize
@ -501,7 +643,7 @@ class Reconstruction(UIGraph):
"""
if not startNode:
return None
nodes = self._graph.nodesFromNode(startNode, nodeType)[0]
nodes = self._graph.nodesFromNode(startNode, nodeTypes)[0]
if not nodes:
return None
node = nodes[-1]
@ -589,22 +731,22 @@ class Reconstruction(UIGraph):
"",
))
else:
panoramaExternalInfoNodes = self.graph.nodesByType('PanoramaExternalInfo')
panoramaInitNodes = self.graph.nodesByType('PanoramaInit')
for panoramaInfoFile in filesByType.panoramaInfo:
for panoramaInfoNode in panoramaExternalInfoNodes:
panoramaInfoNode.attribute('config').value = panoramaInfoFile
if panoramaExternalInfoNodes:
for panoramaInitNode in panoramaInitNodes:
panoramaInitNode.attribute('config').value = panoramaInfoFile
if panoramaInitNodes:
self.info.emit(
Message(
"Panorama XML",
"XML file declared on PanoramaExternalInfo node",
"XML file '{}' set on node '{}'".format(','.join(filesByType.panoramaInfo), ','.join([n.getLabel() for n in panoramaExternalInfoNodes])),
"XML file declared on PanoramaInit node",
"XML file '{}' set on node '{}'".format(','.join(filesByType.panoramaInfo), ','.join([n.getLabel() for n in panoramaInitNodes])),
))
else:
self.error.emit(
Message(
"No PanoramaExternalInfo Node",
"No PanoramaExternalInfo Node to set the Panorama file:\n'{}'.".format(','.join(filesByType.panoramaInfo)),
"No PanoramaInit Node",
"No PanoramaInit Node to set the Panorama file:\n'{}'.".format(','.join(filesByType.panoramaInfo)),
"",
))
@ -648,10 +790,24 @@ class Reconstruction(UIGraph):
recursive: List files in folders recursively.
"""
logging.debug("importImagesFromFolder: " + str(path))
filesByType = multiview.findFilesByTypeInFolder(path, recursive)
if filesByType.images:
self.buildIntrinsics(self.cameraInit, filesByType.images)
@Slot("QVariant")
def importImagesUrls(self, imagePaths, recursive=False):
paths = []
for imagePath in imagePaths:
if isinstance(imagePath, (QUrl)):
p = imagePath.toLocalFile()
if not p:
p = imagePath.toString()
else:
p = imagePath
paths.append(p)
self.importImagesFromFolder(paths)
def importImagesAsync(self, images, cameraInit):
""" Add the given list of images to the Reconstruction. """
# Start the process of updating views and intrinsics
@ -765,8 +921,11 @@ class Reconstruction(UIGraph):
self._buildingIntrinsics = value
self.buildingIntrinsicsChanged.emit()
activeNodes = makeProperty(QObject, "_activeNodes", resetOnDestroy=True)
cameraInitChanged = Signal()
cameraInit = makeProperty(QObject, "_cameraInit", cameraInitChanged, resetOnDestroy=True)
tempCameraInitChanged = Signal()
tempCameraInit = makeProperty(QObject, "_tempCameraInit", tempCameraInitChanged, resetOnDestroy=True)
cameraInitIndex = Property(int, getCameraInitIndex, setCameraInitIndex, notify=cameraInitChanged)
viewpoints = Property(QObject, getViewpoints, notify=cameraInitChanged)
cameraInits = Property(QObject, lambda self: self._cameraInits, constant=True)
@ -777,27 +936,46 @@ class Reconstruction(UIGraph):
liveSfmManager = Property(QObject, lambda self: self._liveSfmManager, constant=True)
@Slot(QObject)
def setActiveNodeOfType(self, node):
def setActiveNode(self, node):
""" Set node as the active node of its type. """
if node.nodeType == "StructureFromMotion":
self.sfm = node
elif node.nodeType == "FeatureExtraction":
self.featureExtraction = node
elif node.nodeType == "CameraInit":
self.cameraInit = node
elif node.nodeType == "PrepareDenseScene":
self.prepareDenseScene = node
for category, nodeTypes in self.activeNodeCategories.items():
if node.nodeType in nodeTypes:
self.activeNodes.get(category).node = node
if category == 'sfm':
self.setSfm(node)
self.activeNodes.get(node.nodeType).node = node
@Slot(QObject)
def setActiveNodes(self, nodes):
""" Set node as the active node of its type. """
# Setup the active node per category only once, on the last one
nodesByCategory = {}
for node in nodes:
if node is None:
continue
for category, nodeTypes in self.activeNodeCategories.items():
if node.nodeType in nodeTypes:
nodesByCategory[category] = node
for category, node in nodesByCategory.items():
self.activeNodes.get(category).node = node
if category == 'sfm':
self.setSfm(node)
for node in nodes:
if node is None:
continue
if not isinstance(node, CompatibilityNode):
self.activeNodes.get(node.nodeType).node = node
def updateSfMResults(self):
"""
Update internal views, poses and solved intrinsics based on the current SfM node.
"""
if not self._sfm:
if not self._sfm or ('outputViewsAndPoses' not in self._sfm.getAttributes().keys()):
self._views = dict()
self._poses = dict()
self._solvedIntrinsics = dict()
else:
self._views, self._poses, self._solvedIntrinsics = self._sfm.nodeDesc.getResults(self._sfm)
self._views, self._poses, self._solvedIntrinsics = parseSfMJsonFile(self._sfm.outputViewsAndPoses.value)
self.sfmReportChanged.emit()
def getSfm(self):
@ -835,9 +1013,6 @@ class Reconstruction(UIGraph):
self._sfm.destroyed.disconnect(self._unsetSfm)
self._setSfm(node)
self.texturing = self.lastNodeOfType("Texturing", self._sfm, Status.SUCCESS)
self.prepareDenseScene = self.lastNodeOfType("PrepareDenseScene", self._sfm, Status.SUCCESS)
@Slot(QObject, result=bool)
def isInViews(self, viewpoint):
if not viewpoint:
@ -897,7 +1072,11 @@ class Reconstruction(UIGraph):
def reconstructedCamerasCount(self):
""" Get the number of reconstructed cameras in the current context. """
return len([v for v in self.getViewpoints() if self.isReconstructed(v)])
viewpoints = self.getViewpoints()
# Check that the object is iterable to avoid error with undefined Qt Property
if not isinstance(viewpoints, Iterable):
return 0
return len([v for v in viewpoints if self.isReconstructed(v)])
@Slot(QObject, result="QVariant")
def getSolvedIntrinsics(self, viewpoint):
@ -940,17 +1119,10 @@ class Reconstruction(UIGraph):
sfmChanged = Signal()
sfm = Property(QObject, getSfm, setSfm, notify=sfmChanged)
featureExtractionChanged = Signal()
featureExtraction = makeProperty(QObject, "_featureExtraction", featureExtractionChanged, resetOnDestroy=True)
sfmReportChanged = Signal()
# convenient property for QML binding re-evaluation when sfm report changes
sfmReport = Property(bool, lambda self: len(self._poses) > 0, notify=sfmReportChanged)
sfmAugmented = Signal(Node, Node)
prepareDenseSceneChanged = Signal()
prepareDenseScene = makeProperty(QObject, "_prepareDenseScene", notify=prepareDenseSceneChanged, resetOnDestroy=True)
texturingChanged = Signal()
texturing = makeProperty(QObject, "_texturing", notify=texturingChanged)
nbCameras = Property(int, reconstructedCamerasCount, notify=sfmReportChanged)

View file

@ -1,5 +1,6 @@
# runtime
psutil>=5.6.3
enum34;python_version<"3.4"
PySide2==5.13.0
PySide2==5.14.1
markdown==2.6.11
requests==2.22.0

View file

@ -41,6 +41,9 @@ class PlatformExecutable(Executable):
build_exe_options = {
# include dynamically loaded plugins
"packages": ["meshroom.nodes", "meshroom.submitters"],
"includes": [
"idna.idnadata", # Dependency needed by SketchfabUpload node, but not detected by cx_Freeze
],
"include_files": ["CHANGES.md", "COPYING.md", "LICENSE-MPL2.md", "README.md"]
}

View file

@ -34,6 +34,18 @@ SampleGroupV2 = [
)
]
#SampleGroupV3 is SampleGroupV2 with one more int parameter
SampleGroupV3 = [
desc.IntParam(name="a", label="a", description="", value=0, uid=[0], range=None),
desc.IntParam(name="notInSampleGroupV2", label="notInSampleGroupV2", description="", value=0, uid=[0], range=None),
desc.ListAttribute(
name="b",
elementDesc=desc.GroupAttribute(name="p", label="", description="", groupDesc=SampleGroupV1),
label="b",
description="",
)
]
class SampleNodeV1(desc.Node):
""" Version 1 Sample Node """
@ -103,6 +115,21 @@ class SampleNodeV5(desc.Node):
desc.File(name='output', label='Output', description='', value=desc.Node.internalFolder, uid=[])
]
class SampleNodeV6(desc.Node):
"""
Changes from V5:
* 'paramA' elementDesc has changed from SampleGroupV2 to SampleGroupV3
"""
inputs = [
desc.File(name='in', label='Input', description='', value='', uid=[0]),
desc.ListAttribute(name='paramA', label='ParamA',
elementDesc=desc.GroupAttribute(
groupDesc=SampleGroupV3, name='gA', label='gA', description=''),
description='')
]
outputs = [
desc.File(name='output', label='Output', description='', value=desc.Node.internalFolder, uid=[])
]
def test_unknown_node_type():
"""
@ -289,3 +316,48 @@ def test_upgradeAllNodes():
assert n2Name in g.compatibilityNodes.keys()
unregisterNodeType(SampleNodeV1)
def test_conformUpgrade():
registerNodeType(SampleNodeV5)
registerNodeType(SampleNodeV6)
g = Graph('')
n1 = g.addNewNode("SampleNodeV5")
n1.paramA.value = [{'a': 0, 'b': [{'a': 0, 'b': [1.0, 2.0]}, {'a': 1, 'b': [1.0, 2.0]}]}]
n1Name = n1.name
graphFile = os.path.join(tempfile.mkdtemp(), "test_conform_upgrade.mg")
g.save(graphFile)
# replace SampleNodeV5 by SampleNodeV6
meshroom.core.nodesDesc[SampleNodeV5.__name__] = SampleNodeV6
# reload file
g = loadGraph(graphFile)
os.remove(graphFile)
# node is a CompatibilityNode
assert len(g.compatibilityNodes) == 1
assert g.node(n1Name).canUpgrade
# upgrade all upgradable nodes
g.upgradeAllNodes()
# only the node with an unknown type has not been upgraded
assert len(g.compatibilityNodes) == 0
upgradedNode = g.node(n1Name)
# check upgrade
assert isinstance(upgradedNode, Node) and isinstance(upgradedNode.nodeDesc, SampleNodeV6)
# check conformation
assert len(upgradedNode.paramA.value) == 1
unregisterNodeType(SampleNodeV5)
unregisterNodeType(SampleNodeV6)

View file

@ -180,7 +180,7 @@ def test_graph_reverse_dfs():
nodes = graph.nodesFromNode(B)[0]
assert set(nodes) == {B, D, C, E, F}
# Get all nodes of type AppendText from B
nodes = graph.nodesFromNode(B, filterType='AppendText')[0]
nodes = graph.nodesFromNode(B, filterTypes=['AppendText'])[0]
assert set(nodes) == {B, D, C, F}
# Get all nodes from C (order guaranteed)
nodes = graph.nodesFromNode(C)[0]