mirror of
https://github.com/alicevision/Meshroom.git
synced 2025-06-10 23:01:59 +02:00
Merge remote-tracking branch 'origin/develop' into dev_depthMapOptim
This commit is contained in:
commit
8214f0a0e8
53 changed files with 134 additions and 28 deletions
|
@ -461,6 +461,7 @@ class Node(object):
|
|||
size = StaticNodeSize(1)
|
||||
parallelization = None
|
||||
documentation = ''
|
||||
category = 'Other'
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
|
|
@ -529,6 +529,8 @@ class BaseNode(BaseObject):
|
|||
return "{}{}".format(t, idx if int(idx) > 1 else "")
|
||||
|
||||
def getDocumentation(self):
|
||||
if not self.nodeDesc:
|
||||
return ""
|
||||
return self.nodeDesc.documentation
|
||||
|
||||
@property
|
||||
|
|
|
@ -6,6 +6,8 @@ from meshroom.core import desc
|
|||
class CameraCalibration(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_cameraCalibration {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
|
|
|
@ -116,6 +116,7 @@ class CameraInit(desc.CommandLineNode):
|
|||
|
||||
size = desc.DynamicNodeSize('viewpoints')
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
This node describes your dataset. It lists the Viewpoints candidates, the guess about the type of optic, the initial focal length
|
||||
and which images are sharing the same internal camera parameters, as well as potential cameras rigs.
|
||||
|
|
|
@ -7,6 +7,8 @@ from meshroom.core import desc
|
|||
class CameraLocalization(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_cameraLocalization {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='sfmdata',
|
||||
|
|
|
@ -7,6 +7,8 @@ from meshroom.core import desc
|
|||
class CameraRigCalibration(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_rigCalibration {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='sfmdata',
|
||||
|
|
|
@ -7,6 +7,8 @@ from meshroom.core import desc
|
|||
class CameraRigLocalization(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_rigLocalization {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='sfmdata',
|
||||
|
|
|
@ -5,6 +5,8 @@ from meshroom.core import desc
|
|||
|
||||
class ConvertMesh(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_convertMesh {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''This node allows to convert a mesh to another format.'''
|
||||
|
||||
inputs = [
|
||||
|
|
|
@ -7,6 +7,7 @@ class ConvertSfMFormat(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_convertSfMFormat {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
Convert an SfM scene from one file format to another.
|
||||
It can also be used to remove specific parts of from an SfM scene (like filter all 3D landmarks or filter 2D observations).
|
||||
|
|
|
@ -10,6 +10,7 @@ class DepthMap(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=3)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Dense Reconstruction'
|
||||
documentation = '''
|
||||
For each camera that have been estimated by the Structure-From-Motion, it estimates the depth value per pixel.
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ class DepthMapFilter(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=10)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Dense Reconstruction'
|
||||
documentation = '''
|
||||
Filter depth map values that are not coherent in multiple depth maps.
|
||||
This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node.
|
||||
|
|
|
@ -6,6 +6,7 @@ from meshroom.core import desc
|
|||
class ExportAnimatedCamera(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_exportAnimatedCamera {allParams}'
|
||||
|
||||
category = 'Export'
|
||||
documentation = '''
|
||||
Convert cameras from an SfM scene into an animated cameras in Alembic file format.
|
||||
Based on the input image filenames, it will recognize the input video sequence to create an animated camera.
|
||||
|
|
|
@ -6,6 +6,8 @@ from meshroom.core import desc
|
|||
class ExportColoredPointCloud(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_exportColoredPointCloud {allParams}'
|
||||
|
||||
category = 'Export'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
|
|
|
@ -7,6 +7,8 @@ class ExportMatches(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_exportMatches {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Export'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
|
|
|
@ -6,6 +6,7 @@ from meshroom.core import desc
|
|||
class ExportMaya(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_exportMeshroomMaya {allParams}'
|
||||
|
||||
category = 'Export'
|
||||
documentation = '''
|
||||
Export a scene for Autodesk Maya, with an Alembic file describing the SfM: cameras and 3D points.
|
||||
It will export half-size undistorted images to use as image planes for cameras and also export thumbnails.
|
||||
|
|
|
@ -9,6 +9,7 @@ class FeatureExtraction(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=40)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition.
|
||||
Hence, a feature in the scene should have similar feature descriptions in all images.
|
||||
|
|
|
@ -9,6 +9,7 @@ class FeatureMatching(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=20)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
This node performs the matching of all features between the candidate image pairs.
|
||||
|
||||
|
|
|
@ -9,7 +9,9 @@ class FeatureRepeatability(desc.CommandLineNode):
|
|||
# parallelization = desc.Parallelization(blockSize=40)
|
||||
# commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
Compare feature/descriptor matching repeatability on some dataset with known homography motions.
|
||||
'''
|
||||
|
||||
inputs = [
|
||||
|
|
|
@ -10,6 +10,7 @@ class GlobalSfM(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_globalSfM {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
Performs the Structure-From-Motion with a global approach.
|
||||
It is known to be faster but less robust to challenging datasets than the Incremental approach.
|
||||
|
|
|
@ -8,6 +8,7 @@ class ImageMatching(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_imageMatching {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
The goal of this node is to select the image pairs to match. The ambition is to find the images that are looking to the same areas of the scene.
|
||||
Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs.
|
||||
|
|
|
@ -9,6 +9,7 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
|
|||
# use both SfM inputs to define Node's size
|
||||
size = desc.MultiDynamicNodeSize(['input', 'inputB'])
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
The goal of this node is to select the image pairs to match in the context of an SfM augmentation.
|
||||
The ambition is to find the images that are looking to the same areas of the scene.
|
||||
|
|
|
@ -34,6 +34,7 @@ class ImageProcessing(desc.CommandLineNode):
|
|||
# parallelization = desc.Parallelization(blockSize=40)
|
||||
# commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
Convert or apply filtering to the input images.
|
||||
'''
|
||||
|
|
|
@ -7,6 +7,7 @@ from meshroom.core import desc
|
|||
class KeyframeSelection(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_utils_keyframeSelection {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
Allows to extract keyframes from a video and insert metadata.
|
||||
It can extract frames from a synchronized multi-cameras rig.
|
||||
|
|
|
@ -26,10 +26,10 @@ def findMetadata(d, keys, defaultValue):
|
|||
class LdrToHdrCalibration(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_LdrToHdrCalibration {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
cpu = desc.Level.INTENSIVE
|
||||
ram = desc.Level.NORMAL
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Calibrate LDR to HDR response curve from samples
|
||||
'''
|
||||
|
|
|
@ -28,6 +28,7 @@ class LdrToHdrMerge(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=2)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Calibrate LDR to HDR response curve from samples
|
||||
'''
|
||||
|
|
|
@ -45,6 +45,7 @@ class LdrToHdrSampling(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=2)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Sample pixels from Low range images for HDR creation
|
||||
'''
|
||||
|
|
|
@ -6,6 +6,8 @@ from meshroom.core import desc
|
|||
class LightingEstimation(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_utils_lightingEstimation {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
|
||||
inputs = [
|
||||
desc.File(
|
||||
name='input',
|
||||
|
|
|
@ -5,6 +5,8 @@ from meshroom.core import desc
|
|||
|
||||
class MergeMeshes(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_utils_mergeMeshes {allParams}'
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
This node allows to merge two meshes in one.
|
||||
|
||||
|
|
|
@ -5,10 +5,10 @@ from meshroom.core import desc
|
|||
|
||||
class MeshDecimate(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_meshDecimate {allParams}'
|
||||
|
||||
cpu = desc.Level.NORMAL
|
||||
ram = desc.Level.NORMAL
|
||||
|
||||
category = 'Mesh Post-Processing'
|
||||
documentation = '''
|
||||
This node allows to reduce the density of the Mesh.
|
||||
'''
|
||||
|
|
|
@ -6,6 +6,7 @@ from meshroom.core import desc
|
|||
class MeshDenoising(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_meshDenoising {allParams}'
|
||||
|
||||
category = 'Mesh Post-Processing'
|
||||
documentation = '''
|
||||
This experimental node allows to reduce noise from a Mesh.
|
||||
for now, the parameters are difficult to control and vary a lot from one dataset to another.
|
||||
|
|
|
@ -5,6 +5,8 @@ from meshroom.core import desc
|
|||
|
||||
class MeshFiltering(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_meshFiltering {allParams}'
|
||||
|
||||
category = 'Dense Reconstruction'
|
||||
documentation = '''
|
||||
This node applies a Laplacian filtering to remove local defects from the raw Meshing cut.
|
||||
'''
|
||||
|
|
|
@ -5,10 +5,10 @@ from meshroom.core import desc
|
|||
|
||||
class MeshResampling(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_meshResampling {allParams}'
|
||||
|
||||
cpu = desc.Level.NORMAL
|
||||
ram = desc.Level.NORMAL
|
||||
|
||||
category = 'Mesh Post-Processing'
|
||||
documentation = '''
|
||||
This node allows to recompute the mesh surface with a new topology and uniform density.
|
||||
'''
|
||||
|
|
|
@ -9,6 +9,7 @@ class Meshing(desc.CommandLineNode):
|
|||
cpu = desc.Level.INTENSIVE
|
||||
ram = desc.Level.INTENSIVE
|
||||
|
||||
category = 'Dense Reconstruction'
|
||||
documentation = '''
|
||||
This node creates a dense geometric surface representation of the scene.
|
||||
|
||||
|
|
|
@ -9,13 +9,12 @@ from meshroom.core import desc
|
|||
class PanoramaCompositing(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaCompositing {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
parallelization = desc.Parallelization(blockSize=5)
|
||||
commandLineRange = '--rangeIteration {rangeIteration} --rangeSize {rangeBlockSize}'
|
||||
|
||||
cpu = desc.Level.INTENSIVE
|
||||
ram = desc.Level.INTENSIVE
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Once the images have been transformed geometrically (in PanoramaWarping),
|
||||
they have to be fused together in a single panorama image which looks like a single photography.
|
||||
|
|
|
@ -10,6 +10,7 @@ class PanoramaEstimation(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_panoramaEstimation {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Estimate relative camera rotations between input images.
|
||||
'''
|
||||
|
|
|
@ -7,6 +7,7 @@ class PanoramaInit(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_panoramaInit {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
This node allows to setup the Panorama:
|
||||
|
||||
|
|
|
@ -9,11 +9,10 @@ from meshroom.core import desc
|
|||
class PanoramaMerging(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaMerging {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
cpu = desc.Level.NORMAL
|
||||
ram = desc.Level.INTENSIVE
|
||||
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Merge all inputs coming from PanoramaComposiring
|
||||
'''
|
||||
|
|
|
@ -9,6 +9,7 @@ class PanoramaPrepareImages(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_panoramaPrepareImages {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Prepare images for Panorama pipeline: ensures that images orientations are coherent.
|
||||
'''
|
||||
|
|
|
@ -9,10 +9,10 @@ from meshroom.core import desc
|
|||
class PanoramaSeams(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaSeams {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
cpu = desc.Level.INTENSIVE
|
||||
ram = desc.Level.INTENSIVE
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Estimate the seams lines between the inputs to provide an optimal compositing in a further node
|
||||
'''
|
||||
|
|
|
@ -9,10 +9,10 @@ from meshroom.core import desc
|
|||
class PanoramaWarping(desc.CommandLineNode):
|
||||
commandLine = 'aliceVision_panoramaWarping {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
parallelization = desc.Parallelization(blockSize=5)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Panorama HDR'
|
||||
documentation = '''
|
||||
Compute the image warping for each input image in the panorama coordinate system.
|
||||
'''
|
||||
|
|
|
@ -9,6 +9,7 @@ class PrepareDenseScene(desc.CommandLineNode):
|
|||
parallelization = desc.Parallelization(blockSize=40)
|
||||
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
|
||||
|
||||
category = 'Dense Reconstruction'
|
||||
documentation = '''
|
||||
This node export undistorted images so the depth map and texturing can be computed on Pinhole images without distortion.
|
||||
'''
|
||||
|
|
|
@ -11,6 +11,7 @@ import os
|
|||
class Publish(desc.Node):
|
||||
size = desc.DynamicNodeSize('inputFiles')
|
||||
|
||||
category = 'Export'
|
||||
documentation = '''
|
||||
This node allows to copy files into a specific folder.
|
||||
'''
|
||||
|
|
|
@ -9,6 +9,7 @@ class SfMAlignment(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_utils_sfmAlignment {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
This node allows to change the coordinate system of one SfM scene to align it on another one.
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ class SfMTransfer(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_utils_sfmTransfer {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
This node allows to transfer poses and/or intrinsics form one SfM scene onto another one.
|
||||
'''
|
||||
|
|
|
@ -9,6 +9,7 @@ class SfMTransform(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_utils_sfmTransform {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Utils'
|
||||
documentation = '''
|
||||
This node allows to change the coordinate system of one SfM scene.
|
||||
|
||||
|
@ -38,9 +39,10 @@ The transformation can be based on:
|
|||
" * auto_from_cameras: Use cameras\n"
|
||||
" * auto_from_landmarks: Use landmarks\n"
|
||||
" * from_single_camera: Use a specific camera as the origin of the coordinate system\n"
|
||||
" * from_center_camera: Use the center camera as the origin of the coordinate system\n"
|
||||
" * from_markers: Align specific markers to custom coordinates",
|
||||
value='auto_from_landmarks',
|
||||
values=['transformation', 'manual', 'auto_from_cameras', 'auto_from_landmarks', 'from_single_camera', 'from_markers'],
|
||||
values=['transformation', 'manual', 'auto_from_cameras', 'auto_from_landmarks', 'from_single_camera', 'from_center_camera', 'from_markers'],
|
||||
exclusive=True,
|
||||
uid=[0],
|
||||
),
|
||||
|
|
|
@ -52,6 +52,7 @@ def progressUpdate(size=None, progress=None, logManager=None):
|
|||
class SketchfabUpload(desc.Node):
|
||||
size = desc.DynamicNodeSize('inputFiles')
|
||||
|
||||
category = 'Export'
|
||||
documentation = '''
|
||||
Upload a textured mesh on Sketchfab.
|
||||
'''
|
||||
|
|
|
@ -7,6 +7,7 @@ class StructureFromMotion(desc.CommandLineNode):
|
|||
commandLine = 'aliceVision_incrementalSfM {allParams}'
|
||||
size = desc.DynamicNodeSize('input')
|
||||
|
||||
category = 'Sparse Reconstruction'
|
||||
documentation = '''
|
||||
This node will analyze feature matches to understand the geometric relationship behind all the 2D observations,
|
||||
and infer the rigid scene structure (3D points) with the pose (position and orientation) and internal calibration of all cameras.
|
||||
|
|
|
@ -8,6 +8,7 @@ class Texturing(desc.CommandLineNode):
|
|||
cpu = desc.Level.INTENSIVE
|
||||
ram = desc.Level.INTENSIVE
|
||||
|
||||
category = 'Dense Reconstruction'
|
||||
documentation = '''
|
||||
This node computes the texturing on the mesh.
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ class MeshroomApp(QApplication):
|
|||
components.registerTypes()
|
||||
|
||||
# expose available node types that can be instantiated
|
||||
self.engine.rootContext().setContextProperty("_nodeTypes", sorted(nodesDesc.keys()))
|
||||
self.engine.rootContext().setContextProperty("_nodeTypes", {n: {"category": nodesDesc[n].category} for n in sorted(nodesDesc.keys())})
|
||||
|
||||
# instantiate Reconstruction object
|
||||
self._undoStack = commands.UndoStack(self)
|
||||
|
|
|
@ -34,6 +34,11 @@ FocusScope {
|
|||
focus: true
|
||||
Layout.fillWidth: true
|
||||
selectByMouse: true
|
||||
|
||||
// ensure the field has focus when the text is modified
|
||||
onTextChanged: {
|
||||
forceActiveFocus()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,6 +134,22 @@ Item {
|
|||
// add node via the proper command in uigraph
|
||||
var node = uigraph.addNewNode(nodeType, spawnPosition)
|
||||
selectNode(node)
|
||||
close()
|
||||
}
|
||||
|
||||
function parseCategories()
|
||||
{
|
||||
// organize nodes based on their category
|
||||
// {"category1": ["node1", "node2"], "category2": ["node3", "node4"]}
|
||||
let categories = {};
|
||||
for (const [name, data] of Object.entries(root.nodeTypesModel)) {
|
||||
let category = data["category"];
|
||||
if (categories[category] === undefined) {
|
||||
categories[category] = [];
|
||||
}
|
||||
categories[category].push(name)
|
||||
}
|
||||
return categories
|
||||
}
|
||||
|
||||
onVisibleChanged: {
|
||||
|
@ -150,11 +166,10 @@ Item {
|
|||
width: parent.width
|
||||
}
|
||||
|
||||
Repeater {
|
||||
model: root.nodeTypesModel
|
||||
|
||||
// Create Menu items from available node types model
|
||||
delegate: MenuItem {
|
||||
// menuItemDelegate is wrapped in a component so it can be used in both the search bar and sub-menus
|
||||
Component {
|
||||
id: menuItemDelegateComponent
|
||||
MenuItem {
|
||||
id: menuItemDelegate
|
||||
font.pointSize: 8
|
||||
padding: 3
|
||||
|
@ -169,17 +184,20 @@ Item {
|
|||
Keys.forwardTo: [searchBar.textField]
|
||||
Keys.onPressed: {
|
||||
event.accepted = false;
|
||||
switch(event.key)
|
||||
{
|
||||
switch(event.key) {
|
||||
case Qt.Key_Return:
|
||||
case Qt.Key_Enter:
|
||||
// create node on validation (Enter/Return keys)
|
||||
newNodeMenu.createNode(modelData);
|
||||
newNodeMenu.close();
|
||||
event.accepted = true;
|
||||
break;
|
||||
case Qt.Key_Up:
|
||||
case Qt.Key_Down:
|
||||
case Qt.Key_Left:
|
||||
case Qt.Key_Right:
|
||||
break; // ignore if arrow key was pressed to let the menu be controlled
|
||||
default:
|
||||
searchBar.textField.forceActiveFocus();
|
||||
searchBar.forceActiveFocus();
|
||||
}
|
||||
}
|
||||
// Create node on mouse click
|
||||
|
@ -199,6 +217,33 @@ Item {
|
|||
]
|
||||
}
|
||||
}
|
||||
|
||||
Repeater {
|
||||
id: nodeMenuRepeater
|
||||
model: searchBar.text != "" ? Object.keys(root.nodeTypesModel) : undefined
|
||||
|
||||
// create Menu items from available items
|
||||
delegate: menuItemDelegateComponent
|
||||
}
|
||||
|
||||
// Dynamically add the menu categories
|
||||
Instantiator {
|
||||
model: !(searchBar.text != "") ? Object.keys(newNodeMenu.parseCategories()).sort() : undefined
|
||||
onObjectAdded: newNodeMenu.insertMenu(index+1, object ) // add sub-menu under the search bar
|
||||
onObjectRemoved: newNodeMenu.removeMenu(object)
|
||||
|
||||
delegate: Menu {
|
||||
title: modelData
|
||||
id: newNodeSubMenu
|
||||
|
||||
Instantiator {
|
||||
model: newNodeMenu.visible && newNodeSubMenu.activeFocus ? newNodeMenu.parseCategories()[modelData] : undefined
|
||||
onObjectAdded: newNodeSubMenu.insertItem(index, object)
|
||||
onObjectRemoved: newNodeSubMenu.removeItem(object)
|
||||
delegate: menuItemDelegateComponent
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Informative contextual menu when graph is read-only
|
||||
|
|
|
@ -66,7 +66,7 @@ FloatingPane {
|
|||
MaterialLabel { text: MaterialIcons.grain; padding: 2 }
|
||||
RowLayout {
|
||||
Slider {
|
||||
Layout.fillWidth: true; from: 0; to: 5; stepSize: 0.1
|
||||
Layout.fillWidth: true; from: 0; to: 5; stepSize: 0.001
|
||||
value: Viewer3DSettings.pointSize
|
||||
onValueChanged: Viewer3DSettings.pointSize = value
|
||||
ToolTip.text: "Point Size: " + value.toFixed(2)
|
||||
|
|
|
@ -548,6 +548,10 @@ ApplicationWindow {
|
|||
}
|
||||
Menu {
|
||||
title: "Help"
|
||||
Action {
|
||||
text: "Online Documentation"
|
||||
onTriggered: Qt.openUrlExternally("https://meshroom-manual.readthedocs.io")
|
||||
}
|
||||
Action {
|
||||
text: "About Meshroom"
|
||||
onTriggered: aboutDialog.open()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue