[nodes] Clean-up: Harmonize nodes' descriptions

This commit is contained in:
Candice Bentéjac 2023-10-17 15:43:34 +02:00
parent e463f0dce2
commit f2d6770651
25 changed files with 173 additions and 173 deletions

View file

@ -38,8 +38,8 @@ Intrinsic = [
value=-1.0, uid=[0], range=None),
desc.FloatParam(name="focalLength", label="Focal Length", description="Known/calibrated focal length (in mm).", value=1000.0, uid=[0], range=(0.0, 10000.0, 1.0)),
desc.FloatParam(name="pixelRatio", label="Pixel Ratio", description="Ratio between the pixel width and the pixel height.", value=1.0, uid=[0], range=(0.0, 10.0, 0.1)),
desc.BoolParam(name='pixelRatioLocked', label='Pixel Ratio Locked',
description='The pixel ratio value is locked for estimation.',
desc.BoolParam(name="pixelRatioLocked", label="Pixel Ratio Locked",
description="The pixel ratio value is locked for estimation.",
value=True, uid=[0]),
desc.ChoiceParam(name="type", label="Camera Type",
description="Mathematical model used to represent a camera:\n"
@ -241,7 +241,7 @@ The needed metadata are:
desc.ChoiceParam(
name="allowedCameraModels",
label="Allowed Camera Models",
description='List of the camera models that can be attributed.',
description="List of the camera models that can be attributed.",
value=["pinhole", "radial1", "radial3", "brown", "fisheye4", "fisheye1", "3deanamorphic4", "3deradial4", "3declassicld"],
values=["pinhole", "radial1", "radial3", "brown", "fisheye4", "fisheye1", "3deanamorphic4", "3deradial4", "3declassicld"],
exclusive=False,
@ -320,10 +320,10 @@ The needed metadata are:
outputs = [
desc.File(
name='output',
label='SfMData',
description='''Output SfMData.''',
value=desc.Node.internalFolder + 'cameraInit.sfm',
name="output",
label="SfMData",
description="Output SfMData.",
value=desc.Node.internalFolder + "cameraInit.sfm",
uid=[],
),
]

View file

@ -14,7 +14,7 @@ class CameraRigCalibration(desc.AVCommandLineNode):
inputs = [
desc.File(
name="sfmdata",
label='SfMData',
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],

View file

@ -17,30 +17,30 @@ The detection method also supports nested calibration grids.
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData File. Viewpoints must correspond to lens calibration grids.',
value='',
name="input",
label="Input",
description="Input SfMData file. Viewpoints must correspond to lens calibration grids.",
value="",
uid=[0],
),
desc.BoolParam(
name='useNestedGrids',
label='Nested calibration grid',
description='Images contain nested calibration grids. These grids must be centered on the image center.',
name="useNestedGrids",
label="Nested Calibration Grid",
description="Enable if images contain nested calibration grids. These grids must be centered on the image center.",
value=False,
uid=[0],
),
desc.BoolParam(
name='doubleSize',
label='Double Size',
description='Double the image size prior to processing',
name="doubleSize",
label="Double Size",
description="Double the image size prior to processing.",
value=False,
uid=[0],
),
desc.BoolParam(
name='exportDebugImages',
label='Export Debug Images',
description='Export Debug Images',
name="exportDebugImages",
label="Export Debug Images",
description="Export debug images.",
value=False,
uid=[0],
),
@ -48,20 +48,20 @@ The detection method also supports nested calibration grids.
outputs = [
desc.File(
name='output',
label='Folder',
description='',
name="output",
label="Folder",
description="Output folder.",
value=desc.Node.internalFolder,
uid=[],
),
desc.File(
name='checkerLines',
enabled= lambda node: node.exportDebugImages.value,
label='Checker Lines',
description='Debug Images.',
semantic='image',
value=desc.Node.internalFolder + '<VIEW_ID>.png',
group='', # do not export on the command line
name="checkerLines",
enabled=lambda node: node.exportDebugImages.value,
label="Checker Lines",
description="Debug images.",
semantic="image",
value=desc.Node.internalFolder + "<VIEW_ID>.png",
group="", # do not export on the command line
uid=[],
),
]

View file

@ -19,10 +19,10 @@ class ConvertMesh(desc.AVCommandLineNode):
),
desc.ChoiceParam(
name="outputMeshFileType",
label="File Type",
label="Output File Type",
description="Output mesh format (*.obj, *.gltf, *.fbx, *.stl).",
value="obj",
values=("gltf", "obj", "fbx", "stl"),
values=["gltf", "obj", "fbx", "stl"],
exclusive=True,
uid=[0],
group="",

View file

@ -17,7 +17,7 @@ It can also be used to remove specific parts of from an SfM scene (like filter a
desc.File(
name="input",
label="Input",
description="SfMData file.",
description="Input SfMData file.",
value="",
uid=[0],
),
@ -45,7 +45,7 @@ It can also be used to remove specific parts of from an SfM scene (like filter a
elementDesc=desc.File(
name="imageId",
label="Image ID",
description="UID or path of an image to add to the whitelist.",
description="UID or path of an image to add to the white list.",
value="",
uid=[0],
),

View file

@ -299,7 +299,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
value=1,
range=(-1, 10, 1),
uid=[0],
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineStepXY",
@ -308,7 +308,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
value=1,
range=(-1, 10, 1),
uid=[0],
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineMaxTCamsPerTile",
@ -317,7 +317,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
value=4,
range=(1, 20, 1),
uid=[0],
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineSubsampling",
@ -327,7 +327,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(1, 30, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineHalfNbDepths",
@ -339,7 +339,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(1, 50, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineWSH",
@ -349,7 +349,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(1, 20, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.FloatParam(
name="refineSigma",
@ -359,7 +359,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.FloatParam(
name="refineGammaC",
@ -369,7 +369,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.FloatParam(
name="refineGammaP",
@ -379,7 +379,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.BoolParam(
name="refineInterpolateMiddleDepth",
@ -387,7 +387,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
description="Enable middle depth bilinear interpolation.",
value=False,
uid=[0],
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.BoolParam(
name="refineUseConsistentScale",
@ -395,7 +395,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
description="Compare patch with consistent scale for similarity volume computation.",
value=False,
uid=[0],
enabled= lambda node: node.refine.refineEnabled.value,
enabled=lambda node: node.refine.refineEnabled.value,
),
]
),
@ -420,7 +420,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
range=(1, 500, 10),
uid=[0],
advanced=True,
enabled= lambda node: node.colorOptimization.colorOptimizationEnabled.value,
enabled=lambda node: node.colorOptimization.colorOptimizationEnabled.value,
),
]
),
@ -452,7 +452,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
label="Subparts",
description="User custom patch pattern subparts for similarity volume computation.",
advanced=True,
enabled= lambda node: (node.customPatchPattern.sgmUseCustomPatchPattern.value or node.customPatchPattern.refineUseCustomPatchPattern.value),
enabled=lambda node: (node.customPatchPattern.sgmUseCustomPatchPattern.value or node.customPatchPattern.refineUseCustomPatchPattern.value),
elementDesc=desc.GroupAttribute(
name="customPatchPatternSubpart",
label="Patch Pattern Subpart",
@ -511,7 +511,7 @@ Use a downscale factor of one (full-resolution) only if the quality of the input
value=False,
uid=[0],
advanced=True,
enabled= lambda node: (node.customPatchPattern.sgmUseCustomPatchPattern.value or node.customPatchPattern.refineUseCustomPatchPattern.value),
enabled=lambda node: (node.customPatchPattern.sgmUseCustomPatchPattern.value or node.customPatchPattern.refineUseCustomPatchPattern.value),
),
]
),

View file

@ -59,7 +59,7 @@ Based on the input image filenames, it will recognize the input video sequence t
values=["jpg", "png", "tif", "exr"],
exclusive=True,
uid=[0],
enabled= lambda node: node.exportUndistortedImages.value,
enabled=lambda node: node.exportUndistortedImages.value,
),
desc.BoolParam(
name="exportFullROD",

View file

@ -12,38 +12,38 @@ Export the distortion model and parameters of cameras in a SfM scene.
inputs = [
desc.File(
name='input',
label='Input SfMData',
description='SfMData file.',
value='',
name="input",
label="Input SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
]
outputs = [
desc.File(
name='output',
label='Folder',
description='Output folder.',
name="output",
label="Folder",
description="Output folder.",
value=desc.Node.internalFolder,
uid=[],
),
desc.File(
name='distoStMap',
label='Distortion ST Map',
description='Calibrated distortion ST map.',
semantic='image',
value=desc.Node.internalFolder + '<INTRINSIC_ID>_distort.exr',
group='', # do not export on the command line
name="distoStMap",
label="Distortion ST Map",
description="Calibrated distortion ST map.",
semantic="image",
value=desc.Node.internalFolder + "<INTRINSIC_ID>_distort.exr",
group="", # do not export on the command line
uid=[],
),
desc.File(
name='undistoStMap',
label='Undistortion ST Map',
description='Calibrated undistortion ST map.',
semantic='image',
value=desc.Node.internalFolder + '<INTRINSIC_ID>_undistort.exr',
group='', # do not export on the command line
name="undistoStMap",
label="Undistortion ST Map",
description="Calibrated undistortion ST map.",
semantic="image",
value=desc.Node.internalFolder + "<INTRINSIC_ID>_undistort.exr",
group="", # do not export on the command line
uid=[],
),
]

View file

@ -413,7 +413,7 @@ Convert or apply filtering to the input images.
desc.BoolParam(
name="nlmFilterEnabled",
label="Enable",
description='Use Non-Local Mean Denoising from OpenCV to denoise images.',
description="Use Non-Local Mean Denoising from OpenCV to denoise images.",
value=False,
uid=[0],
),
@ -587,17 +587,17 @@ Convert or apply filtering to the input images.
),
desc.File(
name='lensCorrectionProfileInfo',
label='Lens Correction Profile Info',
description='''Lens Correction Profile filepath or database directory.''',
value='${ALICEVISION_LENS_PROFILE_INFO}',
name="lensCorrectionProfileInfo",
label="Lens Correction Profile Info",
description="Lens Correction Profile filepath or database directory.",
value="${ALICEVISION_LENS_PROFILE_INFO}",
uid=[],
),
desc.BoolParam(
name='lensCorrectionProfileSearchIgnoreCameraModel',
label='LCP Generic Search',
description='The lens name and camera maker are used to match the LCP database, but the camera model is ignored.',
name="lensCorrectionProfileSearchIgnoreCameraModel",
label="LCP Generic Search",
description="The lens name and camera maker are used to match the LCP database, but the camera model is ignored.",
value=True,
uid=[0],
advanced=True,
@ -689,7 +689,7 @@ Convert or apply filtering to the input images.
label="Images",
description="Output images.",
semantic="image",
value= outputImagesValueFunct,
value=outputImagesValueFunct,
group="", # do not export on the command line
uid=[],
),

View file

@ -80,7 +80,7 @@ Calibrate LDR to HDR response curve from samples.
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
enabled=lambda node: node.nbBrackets.value != 1,
),
desc.ChoiceParam(
name="calibrationMethod",
@ -95,7 +95,7 @@ Calibrate LDR to HDR response curve from samples.
value="auto",
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name="calibrationWeight",
@ -109,7 +109,7 @@ Calibrate LDR to HDR response curve from samples.
values=["default", "gaussian", "triangle", "plateau"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="channelQuantizationPower",
@ -119,7 +119,7 @@ Calibrate LDR to HDR response curve from samples.
range=(8, 14, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name="workingColorSpace",
@ -131,7 +131,7 @@ Calibrate LDR to HDR response curve from samples.
exclusive=True,
uid=[],
group="user", # not used directly on the command line
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="maxTotalPoints",
@ -143,7 +143,7 @@ Calibrate LDR to HDR response curve from samples.
range=(8, 10000000, 1000),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name="verboseLevel",

View file

@ -89,7 +89,7 @@ Merge LDR images into HDR images.
value=1,
range=(-4, 4, 1),
uid=[0],
enabled= lambda node: (node.nbBrackets.value != 1 and node.offsetRefBracketIndexEnabled.value),
enabled=lambda node: (node.nbBrackets.value != 1 and node.offsetRefBracketIndexEnabled.value),
),
desc.FloatParam(
name="meanTargetedLumaForMerging",
@ -98,33 +98,33 @@ Merge LDR images into HDR images.
value=0.4,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled= lambda node: (node.nbBrackets.value != 1 and not node.offsetRefBracketIndexEnabled.value),
enabled=lambda node: (node.nbBrackets.value != 1 and not node.offsetRefBracketIndexEnabled.value),
),
desc.FloatParam(
name='minSignificantValue',
label='Minimum Significant Value',
description='Minimum channel input value to be considered in advanced pixelwise merging.',
name="minSignificantValue",
label="Minimum Significant Value",
description="Minimum channel input value to be considered in advanced pixelwise merging.",
value=0.05,
range=(0.0, 1.0, 0.001),
uid=[0],
enabled= lambda node: (node.nbBrackets.value != 1),
enabled=lambda node: (node.nbBrackets.value != 1),
),
desc.FloatParam(
name='maxSignificantValue',
label='Maximum Significant Value',
description='Maximum channel input value to be considered in advanced pixelwise merging.',
name="maxSignificantValue",
label="Maximum Significant Value",
description="Maximum channel input value to be considered in advanced pixelwise merging.",
value=0.995,
range=(0.0, 1.0, 0.001),
uid=[0],
enabled= lambda node: (node.nbBrackets.value != 1),
enabled=lambda node: (node.nbBrackets.value != 1),
),
desc.BoolParam(
name='computeLightMasks',
label='Compute Light Masks',
name="computeLightMasks",
label="Compute Light Masks",
description="Compute masks of low and high lights and missing info.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
enabled=lambda node: node.nbBrackets.value != 1,
),
desc.BoolParam(
name="byPass",
@ -132,7 +132,7 @@ Merge LDR images into HDR images.
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
enabled=lambda node: node.nbBrackets.value != 1,
),
desc.BoolParam(
name="keepSourceImageName",
@ -152,7 +152,7 @@ Merge LDR images into HDR images.
values=["gaussian", "triangle", "plateau"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="channelQuantizationPower",
@ -162,7 +162,7 @@ Merge LDR images into HDR images.
range=(8, 14, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name="workingColorSpace",
@ -173,7 +173,7 @@ Merge LDR images into HDR images.
values=["auto", "sRGB", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.BoolParam(
name="enableHighlight",
@ -182,7 +182,7 @@ Merge LDR images into HDR images.
value=False,
uid=[0],
group="user", # not used directly on the command line
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.FloatParam(
name="highlightCorrectionFactor",
@ -195,7 +195,7 @@ Merge LDR images into HDR images.
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled= lambda node: node.enableHighlight.enabled and node.enableHighlight.value,
enabled=lambda node: node.enableHighlight.enabled and node.enableHighlight.value,
),
desc.FloatParam(
name="highlightTargetLux",
@ -218,7 +218,7 @@ Merge LDR images into HDR images.
value=120000.0,
range=(1000.0, 150000.0, 1.0),
uid=[0],
enabled= lambda node: node.enableHighlight.enabled and node.enableHighlight.value and node.highlightCorrectionFactor.value != 0,
enabled=lambda node: node.enableHighlight.enabled and node.enableHighlight.value and node.highlightCorrectionFactor.value != 0,
),
desc.ChoiceParam(
name="storageDataType",

View file

@ -94,7 +94,7 @@ Sample pixels from Low range images for HDR creation.
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
enabled=lambda node: node.nbBrackets.value != 1,
),
desc.ChoiceParam(
name="calibrationMethod",
@ -109,7 +109,7 @@ Sample pixels from Low range images for HDR creation.
value="auto",
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="channelQuantizationPower",
@ -119,7 +119,7 @@ Sample pixels from Low range images for HDR creation.
range=(8, 14, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name="workingColorSpace",
@ -130,7 +130,7 @@ Sample pixels from Low range images for HDR creation.
values=["auto", "sRGB", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="blockSize",
@ -140,7 +140,7 @@ Sample pixels from Low range images for HDR creation.
range=(8, 1024, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="radius",
@ -150,7 +150,7 @@ Sample pixels from Low range images for HDR creation.
range=(0, 10, 1),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name="maxCountSample",
@ -160,7 +160,7 @@ Sample pixels from Low range images for HDR creation.
range=(10, 1000, 10),
uid=[0],
advanced=True,
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.BoolParam(
name="debug",
@ -168,7 +168,7 @@ Sample pixels from Low range images for HDR creation.
description="Export debug files to analyze the sampling strategy.",
value=False,
uid=[],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
enabled=lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name="verboseLevel",

View file

@ -14,7 +14,7 @@ Can also be used to calibrate a lighting dome (RTI type).
inputs = [
desc.File(
name="inputPath",
label="SfMData",
label="Input SfMData",
description="Input SfMData file.",
value="",
uid=[0]

View file

@ -13,7 +13,7 @@ class LightingEstimation(desc.AVCommandLineNode):
inputs = [
desc.File(
name="input",
label="SfMData",
label="Input SfMData",
description="Input SfMData file.",
value="",
uid=[0],

View file

@ -39,7 +39,7 @@ This node allows to recompute the mesh surface with a new topology and uniform d
),
desc.IntParam(
name="minVertices",
label='Min Vertices',
label="Min Vertices",
description="Minimum number of output vertices.",
value=0,
range=(0, 1000000, 1),

View file

@ -326,7 +326,7 @@ A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents
),
desc.IntParam(
name="helperPointsGridSize",
label='Helper Points Grid Size',
label="Helper Points Grid Size",
description="Grid size for the helper points.",
value=10,
range=(0, 50, 1),

View file

@ -14,10 +14,10 @@ A Structure-From-Motion node specifically designed to handle pure rotation camer
inputs = [
desc.File(
name='input',
label='SfMData',
description='SfMData file.',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.ListAttribute(
@ -33,35 +33,35 @@ A Structure-From-Motion node specifically designed to handle pure rotation camer
description="Folder(s) containing the extracted features and descriptors."
),
desc.File(
name='tracksFilename',
label='Tracks file',
description='Tracks file.',
value='',
name="tracksFilename",
label="Tracks File",
description="Input tracks file.",
value="",
uid=[0],
),
desc.File(
name='pairs',
label='Pairs file',
description='Information on pairs.',
value='',
name="pairs",
label="Pairs File",
description="Information on pairs.",
value="",
uid=[0],
),
desc.ChoiceParam(
name='describerTypes',
label='Describer Types',
description='Describer types used to describe an image.',
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv', 'tag16h5'],
name="describerTypes",
label="Describer Types",
description="Describer types used to describe an image.",
value=["dspsift"],
values=["sift", "sift_float", "sift_upright", "dspsift", "akaze", "akaze_liop", "akaze_mldb", "cctag3", "cctag4", "sift_ocv", "akaze_ocv", "tag16h5"],
exclusive=False,
uid=[0],
joinChar=',',
joinChar=",",
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -69,10 +69,10 @@ A Structure-From-Motion node specifically designed to handle pure rotation camer
outputs = [
desc.File(
name='output',
label='SfMData',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfm.abc',
name="output",
label="SfMData",
description="Path to the output SfMData file.",
value=desc.Node.internalFolder + "sfm.abc",
uid=[],
)
]

View file

@ -22,7 +22,7 @@ Merge all inputs coming from the PanoramaCompositing node.
name="input",
label="Input SfMData",
description="Input SfMData file.",
value='',
value="",
uid=[0],
),
desc.File(

View file

@ -135,6 +135,6 @@ Post process the panorama.
description="Downscaled versions of the generated panorama.",
value=lambda attr: desc.Node.internalFolder + os.path.splitext(attr.node.panoramaName.value)[0] + "_level_*.exr",
uid=[],
group='',
group="",
),
]

View file

@ -22,7 +22,7 @@ This node allows to copy files into a specific folder.
elementDesc=desc.File(
name="input",
label="Input",
description="",
description="File or folder to publish.",
value="",
uid=[0],
),

View file

@ -9,25 +9,25 @@ class SfMSplitReconstructed(desc.AVCommandLineNode):
category = 'Utils'
documentation = '''
This nodes takes a sfmData file and split it in two
- One sfmData with the reconstructed views
- One sfmData with the non reconstructed views
This nodes takes a SfMData file as an input and splits it in two output SfMData files:
- One SfMData containing the reconstructed views
- One SfMData containing the non-reconstructed views
'''
inputs = [
desc.File(
name='input',
label='Input',
description='''SfMData file .''',
value='',
name="input",
label="Input SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='''verbosity level (fatal, error, warning, info, debug, trace).''',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
@ -35,17 +35,17 @@ class SfMSplitReconstructed(desc.AVCommandLineNode):
outputs = [
desc.File(
name='reconstructedOutput',
label='Reconstructed SfMData File',
description='SfMData file with reconstructed cameras',
value=desc.Node.internalFolder + 'sfmReconstructed.abc',
name="reconstructedOutput",
label="Reconstructed SfMData File",
description="SfMData file containing the reconstructed cameras.",
value=desc.Node.internalFolder + "sfmReconstructed.abc",
uid=[],
),
desc.File(
name='notReconstructedOutput',
label='Not Reconstructed SfMData File',
description='SfMData file with non reconstructed cameras',
value=desc.Node.internalFolder + 'sfmNonReconstructed.abc',
name="notReconstructedOutput",
label="Not Reconstructed SfMData File",
description="SfMData file containing the non-reconstructed cameras.",
value=desc.Node.internalFolder + "sfmNonReconstructed.abc",
uid=[],
)
]

View file

@ -131,7 +131,7 @@ Contrary to the StructureFromMotion node, this node does not infer the camera po
),
desc.ChoiceParam(
name="interFileExtension",
label='Inter File Extension',
label="Inter File Extension",
description="Extension of the intermediate file export.",
value=".abc",
values=(".abc", ".ply"),

View file

@ -31,7 +31,7 @@ class Split360Images(desc.AVCommandLineNode):
name="input",
label="Input",
description="Single image, image folder or SfMData file.",
value='',
value="",
uid=[0],
),
desc.ChoiceParam(
@ -80,7 +80,7 @@ class Split360Images(desc.AVCommandLineNode):
groupDesc=[
desc.IntParam(
name="equirectangularNbSplits",
label='Nb Splits',
label="Nb Splits",
description="Equirectangular number of splits.",
value=2,
range=(1, 100, 1),

View file

@ -65,7 +65,7 @@ It iterates like that, adding cameras and triangulating new 2D features into 3D
desc.File(
name="input",
label="SfMData",
description='SfMData file.',
description="Input SfMData file.",
value="",
uid=[0],
),
@ -323,7 +323,7 @@ It iterates like that, adding cameras and triangulating new 2D features into 3D
),
desc.IntParam(
name="minNbCamerasToRefinePrincipalPoint",
label='Min Nb Cameras To Refine Principal Point',
label="Min Nb Cameras To Refine Principal Point",
description="Minimum number of cameras to refine the principal point of the cameras (one of the intrinsic parameters of the camera).\n"
"If we do not have enough cameras, the principal point is considered to be in the center of the image.\n"
"If minNbCamerasToRefinePrincipalPoint <= 0, the principal point is never refined."

View file

@ -350,7 +350,7 @@ Many cameras are contributing to the low frequencies and only the best ones cont
),
desc.File(
name="outputMaterial",
enabled= lambda node: node.outputMeshFileType.value == "obj",
enabled=lambda node: node.outputMeshFileType.value == "obj",
label="Material",
description="Output material file.",
value=desc.Node.internalFolder + "texturedMesh.mtl",
@ -361,7 +361,7 @@ Many cameras are contributing to the low frequencies and only the best ones cont
name="outputTextures",
label="Textures",
description="Output texture files.",
value= lambda attr: desc.Node.internalFolder + "texture_*." + attr.node.colorMapping.colorMappingFileType.value if attr.node.colorMapping.enable.value else "",
value=lambda attr: desc.Node.internalFolder + "texture_*." + attr.node.colorMapping.colorMappingFileType.value if attr.node.colorMapping.enable.value else "",
uid=[],
group=""
)