[nodes] I-L: Harmonize and improve labels and descriptions

Use CamelCase for all labels, always end descriptions with periods, and
replace the mixed use of single and double quotes with double quotes
only.
This commit is contained in:
Candice Bentéjac 2023-06-16 10:31:18 +02:00
parent 5c2865968d
commit 3146dcface
11 changed files with 978 additions and 889 deletions

View file

@ -9,138 +9,143 @@ class ImageMasking(desc.AVCommandLineNode):
parallelization = desc.Parallelization(blockSize=40)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentaiton = '''
'''
inputs = [
desc.File(
name='input',
label='Input',
description='''SfMData file.''',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.ChoiceParam(
name='algorithm',
label='Algorithm',
description='',
value='HSV',
values=['HSV', 'AutoGrayscaleThreshold'],
name="algorithm",
label="Algorithm",
description="",
value="HSV",
values=["HSV", "AutoGrayscaleThreshold"],
exclusive=True,
uid=[0],
),
desc.GroupAttribute(
name="hsv",
label="HSV Parameters",
description="""Values to select:
- Green: default values
- White: Tolerance = 1, minSaturation = 0, maxSaturation = 0.1, minValue = 0.8, maxValue = 1
- Black: Tolerance = 1, minSaturation = 0, maxSaturation = 0.1, minValue = 0, maxValue = 0.2
""",
description="Values to select:\n"
" - Green: default values\n"
" - White: Tolerance = 1, minSaturation = 0, maxSaturation = 0.1, minValue = 0.8, maxValue = 1\n"
" - Black: Tolerance = 1, minSaturation = 0, maxSaturation = 0.1, minValue = 0, maxValue = 0.2",
group=None,
enabled=lambda node: node.algorithm.value == 'HSV',
enabled=lambda node: node.algorithm.value == "HSV",
groupDesc=[
desc.FloatParam(
name='hsvHue',
label='Hue',
description='Hue value to isolate in [0,1] range. 0 = red, 0.33 = green, 0.66 = blue, 1 = red.',
semantic='color/hue',
name="hsvHue",
label="Hue",
description="Hue value to isolate in [0,1] range.\n"
"0 = red, 0.33 = green, 0.66 = blue, 1 = red.",
semantic="color/hue",
value=0.33,
range=(0.0, 1.0, 0.01),
uid=[0]
),
desc.FloatParam(
name='hsvHueRange',
label='Tolerance',
description='Tolerance around the hue value to isolate.',
name="hsvHueRange",
label="Tolerance",
description="Tolerance around the hue value to isolate.",
value=0.1,
range=(0.0, 1.0, 0.01),
uid=[0]
),
desc.FloatParam(
name='hsvMinSaturation',
label='Min Saturation',
description='Hue is meaningless if saturation is low. Do not mask pixels below this threshold.',
name="hsvMinSaturation",
label="Min Saturation",
description="Hue is meaningless if saturation is low. Do not mask pixels below this threshold.",
value=0.3,
range=(0.0, 1.0, 0.01),
uid=[0]
),
desc.FloatParam(
name='hsvMaxSaturation',
label='Max Saturation',
description='Do not mask pixels above this threshold. It might be useful to mask white/black pixels.',
name="hsvMaxSaturation",
label="Max Saturation",
description="Do not mask pixels above this threshold. It might be useful to mask white/black pixels.",
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0]
),
desc.FloatParam(
name='hsvMinValue',
label='Min Value',
description='Hue is meaningless if value is low. Do not mask pixels below this threshold.',
name="hsvMinValue",
label="Min Value",
description="Hue is meaningless if the value is low. Do not mask pixels below this threshold.",
value=0.3,
range=(0.0, 1.0, 0.01),
uid=[0]
),
desc.FloatParam(
name='hsvMaxValue',
label='Max Value',
description='Do not mask pixels above this threshold. It might be useful to mask white/black pixels.',
name="hsvMaxValue",
label="Max Value",
description="Do not mask pixels above this threshold. It might be useful to mask white/black pixels.",
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0]
),
]),
]
),
desc.BoolParam(
name='invert',
label='Invert',
description='''If ticked, the selected area is ignored.
If not, only the selected area is considered.''',
name="invert",
label="Invert",
description="If selected, the selected area is ignored.\n"
"If not, only the selected area is considered.",
value=True,
uid=[0]
),
desc.IntParam(
name='growRadius',
label='Grow Radius',
description='Grow the selected area. It might be used to fill the holes: then use shrinkRadius to restore the initial coutours.',
name="growRadius",
label="Grow Radius",
description="Grow the selected area.\n"
"It might be used to fill the holes: then use shrinkRadius to restore the initial coutours.",
value=0,
range=(0, 50, 1),
uid=[0]
),
desc.IntParam(
name='shrinkRadius',
label='Shrink Radius',
description='Shrink the selected area.',
name="shrinkRadius",
label="Shrink Radius",
description="Shrink the selected area.",
value=0,
range=(0, 50, 1),
uid=[0]
),
desc.File(
name='depthMapFolder',
label='Depth Mask Folder',
description='''Depth Mask Folder''',
value='',
name="depthMapFolder",
label="Depth Mask Folder",
description="Depth mask folder.",
value="",
uid=[0],
),
desc.StringParam(
name='depthMapExp',
label='Depth Mask Expression',
description='''Depth Mask Expression, like "{inputFolder}/{stem}-depth.{ext}".''',
value='',
name="depthMapExp",
label="Depth Mask Expression",
description="Depth mask expression, like '{inputFolder}/{stem}-depth.{ext}'.",
value="",
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='''verbosity level (fatal, error, warning, info, debug, trace).''',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
)
]
outputs = [
desc.File(
name='output',
label='Output',
description='''Output folder.''',
name="output",
label="Output",
description="Output folder.",
value=desc.Node.internalFolder,
uid=[],
),

View file

@ -35,17 +35,17 @@ If images have known poses, use frustum intersection else use VocabularuTree.
inputs = [
desc.File(
name='input',
label='SfmData',
description='SfMData file .',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name="featuresFolder",
label="Features Folder",
description="",
description="Folder containing some extracted features and descriptors.",
value="",
uid=[0],
),
@ -54,85 +54,87 @@ If images have known poses, use frustum intersection else use VocabularuTree.
description="Folder(s) containing the extracted features and descriptors."
),
desc.ChoiceParam(
name='method',
label='Method',
description='Method used to select the image pairs to match:\n'
' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n'
'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n'
'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n'
' * Sequential: If your input is a video sequence, you can use this option to link images between them over time.\n'
' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n'
' * Exhaustive: Export all image pairs.\n'
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
value='SequentialAndVocabularyTree',
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum', 'FrustumOrVocabularyTree'],
name="method",
label="Method",
description="Method used to select the image pairs to match:\n"
" - VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n"
"feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n"
"images descriptors very efficiently. If your scene contains less than 'Voc Tree: Minimal Number of Images', all image pairs will be selected.\n"
" - Sequential: If your input is a video sequence, you can use this option to link images between them over time.\n"
" - SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n"
" - Exhaustive: Export all image pairs.\n"
" - Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n"
" - FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n",
value="SequentialAndVocabularyTree",
values=["VocabularyTree", "Sequential", "SequentialAndVocabularyTree", "Exhaustive", "Frustum", "FrustumOrVocabularyTree"],
exclusive=True,
uid=[0],
),
desc.File(
name='tree',
label='Voc Tree: Tree',
description='Input name for the vocabulary tree file.',
value='${ALICEVISION_VOCTREE}',
name="tree",
label="Voc Tree: Tree",
description="Input name for the vocabulary tree file.",
value="${ALICEVISION_VOCTREE}",
uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.File(
name='weights',
label='Voc Tree: Weights',
description='Input name for the weight file, if not provided the weights will be computed on the database built with the provided set.',
value='',
name="weights",
label="Voc Tree: Weights",
description="Input name for the weight file.\n"
"If not provided, the weights will be computed on the database built with the provided set.",
value="",
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='minNbImages',
label='Voc Tree: Minimal Number of Images',
description='Minimal number of images to use the vocabulary tree. If we have less features than this threshold, we will compute all matching combinations.',
name="minNbImages",
label="Voc Tree: Minimum Number Of Images",
description="Minimum number of images to use the vocabulary tree.\n"
"If we have less features than this threshold, we will compute all matching combinations.",
value=200,
range=(0, 500, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='maxDescriptors',
label='Voc Tree: Max Descriptors',
description='Limit the number of descriptors you load per image. Zero means no limit.',
name="maxDescriptors",
label="Voc Tree: Max Descriptors",
description="Limit the number of descriptors you load per image. 0 means no limit.",
value=500,
range=(0, 100000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='nbMatches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
name="nbMatches",
label="Voc Tree: Nb Matches",
description="The number of matches to retrieve for each image. (If 0, it will retrieve all the matches).",
value=40,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
name="nbNeighbors",
label="Sequential: Nb Neighbors",
description="The number of neighbors to retrieve for each image. (If 0, it will retrieve all the neighbors).",
value=5,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'Sequential' in node.method.value,
enabled=lambda node: "Sequential" in node.method.value,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -140,10 +142,10 @@ If images have known poses, use frustum intersection else use VocabularuTree.
outputs = [
desc.File(
name='output',
label='Image Pairs',
description='Filepath to the output file with the list of selected image pairs.',
value=desc.Node.internalFolder + 'imageMatches.txt',
name="output",
label="Image Pairs",
description="Filepath to the output file with the list of selected image pairs.",
value=desc.Node.internalFolder + "imageMatches.txt",
uid=[],
),
]

View file

@ -20,24 +20,24 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
'''
inputs = [
desc.File(
name='input',
label='Input A',
description='SfMData file .',
value='',
name="input",
label="Input A",
description="First input SfMData file.",
value="",
uid=[0],
),
desc.File(
name='inputB',
label='Input B',
description='SfMData file .',
value='',
name="inputB",
label="Input B",
description="Second input SfMData file.",
value="",
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name="featuresFolder",
label="Features Folder",
description="",
description="Folder containing some extracted features and descriptors.",
value="",
uid=[0],
),
@ -46,93 +46,98 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
description="Folder(s) containing the extracted features and descriptors."
),
desc.ChoiceParam(
name='method',
label='Method',
description='Method used to select the image pairs to match:\n'
' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n'
'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n'
'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n'
' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n'
' * Exhaustive: Export all image pairs.\n'
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
value='SequentialAndVocabularyTree',
values=['VocabularyTree', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum'],
name="method",
label="Method",
description="Method used to select the image pairs to match:\n"
" - VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n"
"feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n"
"images descriptors very efficiently. If your scene contains less than 'Voc Tree: Minimal Number of Images', all image pairs will be selected.\n"
" - SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n"
" - Exhaustive: Export all image pairs.\n"
" - Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n"
" - FrustumOrVocabularyTree: If images have known poses, use frustum intersection. Otherwise, use VocabularyTree.\n",
value="SequentialAndVocabularyTree",
values=["VocabularyTree", "SequentialAndVocabularyTree", "Exhaustive", "Frustum"],
exclusive=True,
uid=[0],
),
desc.File(
name='tree',
label='Voc Tree: Tree',
description='Input name for the vocabulary tree file.',
value='${ALICEVISION_VOCTREE}',
name="tree",
label="Voc Tree: Tree",
description="Input name for the vocabulary tree file.",
value="${ALICEVISION_VOCTREE}",
uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.File(
name='weights',
label='Voc Tree: Weights',
description='Input name for the weight file, if not provided the weights will be computed on the database built with the provided set.',
value='',
name="weights",
label="Voc Tree: Weights",
description="Input name for the weight file.\n"
"If not provided, the weights will be computed on the database built with the provided set.",
value="",
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.ChoiceParam(
name='matchingMode',
label='Matching Mode',
description='The mode to combine image matching between the input SfMData A and B:\n"a/a+a/b" for A with A + A with B.\n"a/ab" for A with A and B.\n"a/b" for A with B.',
value='a/a+a/b',
values=['a/a+a/b','a/ab', 'a/b'],
name="matchingMode",
label="Matching Mode",
description="The mode to combine image matching between the input SfMData A and B:\n"
"- 'a/a+a/b' for A with A + A with B.\n"
"- 'a/ab' for A with A and B.\n"
"- 'a/b' for A with B.",
value="a/a+a/b",
values=["a/a+a/b","a/ab", "a/b"],
exclusive=True,
uid=[0],
),
desc.IntParam(
name='minNbImages',
label='Voc Tree: Minimal Number of Images',
description='Minimal number of images to use the vocabulary tree. If we have less features than this threshold, we will compute all matching combinations.',
name="minNbImages",
label="Voc Tree: Minimum Number Of Images",
description="Minimum number of images to use the vocabulary tree.\n"
"If we have less features than this threshold, we will compute all the matching combinations.",
value=200,
range=(0, 500, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='maxDescriptors',
label='Voc Tree: Max Descriptors',
description='Limit the number of descriptors you load per image. Zero means no limit.',
name="maxDescriptors",
label="Voc Tree: Max Descriptors",
description="Limit the number of descriptors you load per image. 0 means no limit.",
value=500,
range=(0, 100000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='nbMatches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
name="nbMatches",
label="Voc Tree: Nb Matches",
description="The number of matches to retrieve for each image. (If 0, it will retrieve all the matches).",
value=40,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
enabled=lambda node: "VocabularyTree" in node.method.value,
),
desc.IntParam(
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
name="nbNeighbors",
label="Sequential: Nb Neighbors",
description="The number of neighbors to retrieve for each image. (If 0, it will retrieve all the neighbors).",
value=5,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'Sequential' in node.method.value,
enabled=lambda node: "Sequential" in node.method.value,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -140,17 +145,17 @@ Thanks to this node, the FeatureMatching node will only compute the matches betw
outputs = [
desc.File(
name='output',
label='List File',
description='Filepath to the output file with the list of selected image pairs.',
value=desc.Node.internalFolder + 'imageMatches.txt',
name="output",
label="List File",
description="Filepath to the output file with the list of selected image pairs.",
value=desc.Node.internalFolder + "imageMatches.txt",
uid=[],
),
desc.File(
name='outputCombinedSfM',
label='Combined SfM',
description='Path for the combined SfMData file',
value=desc.Node.internalFolder + 'combineSfM.sfm',
name="outputCombinedSfM",
label="Combined SfM",
description="Path for the combined SfMData file.",
value=desc.Node.internalFolder + "combineSfM.sfm",
uid=[],
),
]

View file

@ -41,467 +41,520 @@ Convert or apply filtering to the input images.
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file input, image filenames or regex(es) on the image file path.\nsupported regex: \'#\' matches a single digit, \'@\' one or more digits, \'?\' one character and \'*\' zero or more.',
value='',
name="input",
label="Input",
description="SfMData file input, image filenames or regex(es) on the image file path.\n"
"Supported regex:\n"
" - '#' matches a single digit.\n"
" - '@' matches one or more digits.\n"
" - '?' matches one character.\n"
" - '*' matches zero character or more.",
value="",
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name="inputFolder",
label="input Folder",
description="",
label="Input Folder",
description="Folder containing images.",
value="",
uid=[0],
),
name="inputFolders",
label="Images input Folders",
description='Use images from specific folder(s).',
label="Input Images Folders",
description="Use images from specific folder(s).",
),
desc.ListAttribute(
elementDesc=desc.StringParam(
name="metadataFolder",
label="Metadata Folder",
description="",
description="Specific folder containing images with metadata.",
value="",
uid=[0],
),
name="metadataFolders",
label="Metadata input Folders",
description='Use images metadata from specific folder(s).',
label="Input Metadata Folders",
description="Use images metadata from specific folder(s).",
),
desc.ChoiceParam(
name='extension',
label='Output File Extension',
description='Output Image File Extension.',
value='',
values=['', 'exr', 'jpg', 'tiff', 'png'],
name="extension",
label="Output File Extension",
description="Output image file extension.\n"
"If unset, the output file extension will match the input's if possible.",
value="",
values=["", "exr", "jpg", "tiff", "png"],
exclusive=True,
uid=[0],
),
desc.BoolParam(
name='reconstructedViewsOnly',
label='Only Reconstructed Views',
description='Process Only Reconstructed Views',
name="reconstructedViewsOnly",
label="Only Reconstructed Views",
description="Only process reconstructed views.",
value=False,
uid=[0],
),
desc.BoolParam(
name='keepImageFilename',
label='Keep Image Name',
description='Keep original image name instead of view name',
name="keepImageFilename",
label="Keep Image Name",
description="Keep the original image name instead of the view name.",
value=False,
uid=[0],
),
desc.BoolParam(
name='fixNonFinite',
label='Fix Non-Finite',
description='Fix non-finite pixels based on neighboring pixels average.',
name="fixNonFinite",
label="Fix Non-Finite",
description="Fix non-finite pixels based on neighboring pixels average.",
value=False,
uid=[0],
),
desc.BoolParam(
name='exposureCompensation',
label='Exposure Compensation',
description='Exposure compensation (only valid for SfMData)',
name="exposureCompensation",
label="Exposure Compensation",
description="Exposure compensation (only valid for SfMData).",
value=False,
uid=[0],
),
desc.BoolParam(
name='rawAutoBright',
label='RAW Auto Bright',
description='Enable automatic exposure adjustment for RAW images',
name="rawAutoBright",
label="RAW Auto Bright",
description="Enable automatic exposure adjustment for RAW images.",
value=False,
uid=[0],
),
desc.FloatParam(
name='rawExposureAdjust',
label='RAW Exposure Adjustment',
description='Manual exposure adjustment in fstops for RAW images',
name="rawExposureAdjust",
label="RAW Exposure Adjustment",
description="Manual exposure adjustment in fstops for RAW images.",
value=0.0,
range=(-2.0, 3.0, 0.125),
uid=[0],
),
desc.GroupAttribute(name="lensCorrection", label="Lens Correction", description="Automatic lens correction settings.", joinChar=":", groupDesc=[
desc.GroupAttribute(
name="lensCorrection",
label="Lens Correction",
description="Automatic lens correction settings.",
joinChar=":",
groupDesc=[
desc.BoolParam(
name='lensCorrectionEnabled',
label='Enable',
description='Enable lens correction.',
name="lensCorrectionEnabled",
label="Enable",
description="Enable lens correction.",
value=False,
uid=[0],
),
desc.BoolParam(
name='geometry',
label='Geometry',
description='Geometry correction if a model is available in SfM data.',
name="geometry",
label="Geometry",
description="Geometry correction if a model is available in the SfMData.",
value=False,
uid=[0],
enabled=lambda node: node.lensCorrection.lensCorrectionEnabled.value,
),
desc.BoolParam(
name='vignetting',
label='Vignetting',
description='Vignetting correction if model parameters are available in metadata.',
name="vignetting",
label="Vignetting",
description="Vignetting correction if the model parameters are available in the metadata.",
value=False,
uid=[0],
enabled=lambda node: node.lensCorrection.lensCorrectionEnabled.value,
),
desc.BoolParam(
name='chromaticAberration',
label='Chromatic Aberration',
description='Chromatic aberration (fringing) correction if model parameters are available in metadata.',
name="chromaticAberration",
label="Chromatic Aberration",
description="Chromatic aberration (fringing) correction if the model parameters are available in the metadata.",
value=False,
uid=[0],
enabled=False # To replace with the line below when the correction of chromatic aberration will be available
# enabled=lambda node: node.lensCorrection.lensCorrectionEnabled.value
)
]),
]
),
desc.FloatParam(
name='scaleFactor',
label='Scale Factor',
description='Scale Factor.',
name="scaleFactor",
label="Scale Factor",
description="Scale factor.",
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0],
),
desc.IntParam(
name='maxWidth',
label='Max Width',
description='Maximal width of the output images (0: ignored).',
name="maxWidth",
label="Max Width",
description="Maximum width of the output images (0: ignored).",
value=0,
range=(0, 10000, 1),
uid=[0],
),
desc.IntParam(
name='maxHeight',
label='Max Height',
description='Maximal height of the output images (0: ignored).',
name="maxHeight",
label="Max Height",
description="Maximum height of the output images (0: ignored).",
value=0,
range=(0, 10000, 1),
uid=[0],
),
desc.FloatParam(
name='contrast',
label='Contrast',
description='Contrast.',
name="contrast",
label="Contrast",
description="Contrast.",
value=1.0,
range=(0.0, 100.0, 0.1),
uid=[0],
),
desc.IntParam(
name='medianFilter',
label='Median Filter',
description='Median Filter.',
name="medianFilter",
label="Median Filter",
description="Median filter.",
value=0,
range=(0, 10, 1),
uid=[0],
),
desc.BoolParam(
name='fillHoles',
label='Fill Holes',
description='Fill holes based on the alpha channel.\n'
'Note: It will enable fixNonFinite, as it is required for the image pyramid construction used to fill holes.',
name="fillHoles",
label="Fill Holes",
description="Fill holes based on the alpha channel.\n"
"Note: It will enable 'fixNonFinite', as it is required for the image pyramid construction used to fill holes.",
value=False,
uid=[0],
),
desc.GroupAttribute(name="sharpenFilter", label="Sharpen Filter", description="Sharpen Filtering Parameters.", joinChar=":", groupDesc=[
desc.GroupAttribute(
name="sharpenFilter",
label="Sharpen Filter",
description="Sharpen filter parameters.",
joinChar=":",
groupDesc=[
desc.BoolParam(
name='sharpenFilterEnabled',
label='Enable',
description='Use sharpen.',
name="sharpenFilterEnabled",
label="Enable",
description="Use sharpen filter.",
value=False,
uid=[0],
),
desc.IntParam(
name='width',
label='Width',
description='Sharpen Width.',
name="width",
label="Width",
description="Sharpening width.",
value=3,
range=(1, 9, 2),
uid=[0],
enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value,
),
desc.FloatParam(
name='contrast',
label='Contrast',
description='Sharpen Contrast.',
name="contrast",
label="Contrast",
description="Sharpening contrast.",
value=1.0,
range=(0.0, 100.0, 0.1),
uid=[0],
enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value,
),
desc.FloatParam(
name='threshold',
label='Threshold',
description='Sharpen Threshold.',
name="threshold",
label="Threshold",
description="Sharpening threshold.",
value=0.0,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value,
),
]),
desc.GroupAttribute(name="bilateralFilter", label="Bilateral Filter", description="Bilateral Filtering Parameters.", joinChar=":", groupDesc=[
]
),
desc.GroupAttribute(
name="bilateralFilter",
label="Bilateral Filter",
description="Bilateral filter parameters.",
joinChar=":",
groupDesc=[
desc.BoolParam(
name='bilateralFilterEnabled',
label='Enable',
description='Bilateral Filter.',
name="bilateralFilterEnabled",
label="Enable",
description="Use bilateral filter.",
value=False,
uid=[0],
),
desc.IntParam(
name='bilateralFilterDistance',
label='Distance',
description='Diameter of each pixel neighborhood that is used during bilateral filtering.\nCould be very slow for large filters, so it is recommended to use 5.',
name="bilateralFilterDistance",
label="Distance",
description="Diameter of each pixel neighborhood that is used during bilateral filtering.\n"
"Could be very slow for large filters, so it is recommended to use 5.",
value=0,
range=(0, 9, 1),
uid=[0],
enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value,
),
desc.FloatParam(
name='bilateralFilterSigmaSpace',
label='Sigma Coordinate Space',
description='Bilateral Filter sigma in the coordinate space.',
name="bilateralFilterSigmaSpace",
label="Sigma Coordinate Space",
description="Bilateral filter sigma in the coordinate space.",
value=0.0,
range=(0.0, 150.0, 0.01),
uid=[0],
enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value,
),
desc.FloatParam(
name='bilateralFilterSigmaColor',
label='Sigma Color Space',
description='Bilateral Filter sigma in the color space.',
name="bilateralFilterSigmaColor",
label="Sigma Color Space",
description="Bilateral filter sigma in the color space.",
value=0.0,
range=(0.0, 150.0, 0.01),
uid=[0],
enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value,
),
]),
desc.GroupAttribute(name="claheFilter", label="Clahe Filter", description="Clahe Filtering Parameters.", joinChar=":", groupDesc=[
]
),
desc.GroupAttribute(
name="claheFilter",
label="Clahe Filter",
description="Clahe filter parameters.",
joinChar=":",
groupDesc=[
desc.BoolParam(
name='claheEnabled',
label='Enable',
description='Use Contrast Limited Adaptive Histogram Equalization (CLAHE) Filter.',
name="claheEnabled",
label="Enable",
description="Use Contrast Limited Adaptive Histogram Equalization (CLAHE) filter.",
value=False,
uid=[0],
),
desc.FloatParam(
name='claheClipLimit',
label='Clip Limit',
description='Sets Threshold For Contrast Limiting.',
name="claheClipLimit",
label="Clip Limit",
description="Threshold for contrast limiting.",
value=4.0,
range=(0.0, 8.0, 1.0),
uid=[0],
enabled=lambda node: node.claheFilter.claheEnabled.value,
),
desc.IntParam(
name='claheTileGridSize',
label='Tile Grid Size',
description='Sets Size Of Grid For Histogram Equalization. Input Image Will Be Divided Into Equally Sized Rectangular Tiles.',
name="claheTileGridSize",
label="Tile Grid Size",
description="Size of the grid for histogram equalization.\n"
"Input image will be divided into equally sized rectangular tiles.",
value=8,
range=(4, 64, 4),
uid=[0],
enabled=lambda node: node.claheFilter.claheEnabled.value,
),
]),
desc.GroupAttribute(name="noiseFilter", label="Noise Filter", description="Noise Filtering Parameters.", joinChar=":", groupDesc=[
]
),
desc.GroupAttribute(
name="noiseFilter",
label="Noise Filter",
description="Noise filter parameters.",
joinChar=":",
groupDesc=[
desc.BoolParam(
name='noiseEnabled',
label='Enable',
description='Add Noise.',
name="noiseEnabled",
label="Enable",
description="Add noise.",
value=False,
uid=[0],
),
desc.ChoiceParam(
name='noiseMethod',
label='Method',
description=" * method: There are several noise types to choose from:\n"
" * uniform: adds noise values uninformly distributed on range [A,B).\n"
" * gaussian: adds Gaussian (normal distribution) noise values with mean value A and standard deviation B.\n"
" * salt: changes to value A a portion of pixels given by B.\n",
value='uniform',
values=['uniform', 'gaussian', 'salt'],
name="noiseMethod",
label="Method",
description="There are several noise types to choose from:\n"
" - uniform: adds noise values uniformly distributed on range [A,B).\n"
" - gaussian: adds Gaussian (normal distribution) noise values with mean value A and standard deviation B.\n"
" - salt: changes to value A a portion of pixels given by B.\n",
value="uniform",
values=["uniform", "gaussian", "salt"],
exclusive=True,
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
desc.FloatParam(
name='noiseA',
label='A',
description='Parameter that have a different interpretation depending on the method chosen.',
name="noiseA",
label="A",
description="Parameter that has a different interpretation depending on the chosen method:\n"
" - uniform: lower bound of the range on which the noise is uniformly distributed.\n"
" - gaussian: the mean value of the Gaussian noise.\n"
" - salt: the value of the specified portion of pixels.",
value=0.0,
range=(0.0, 1.0, 0.0001),
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
desc.FloatParam(
name='noiseB',
label='B',
description='Parameter that have a different interpretation depending on the method chosen.',
name="noiseB",
label="B",
description="Parameter that has a different interpretation depending on the chosen method:\n"
" - uniform: higher bound of the range on which the noise is uniformly distributed.\n"
" - gaussian: the standard deviation of the Gaussian noise.\n"
" - salt: the portion of pixels to set to a specified value.",
value=1.0,
range=(0.0, 1.0, 0.0001),
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
desc.BoolParam(
name='noiseMono',
label='Mono',
description='If is Checked, a single noise value will be applied to all channels otherwise a separate noise value will be computed for each channel.',
name="noiseMono",
label="Mono",
description="If selected, a single noise value will be applied to all channels.\n"
"Otherwise, a separate noise value will be computed for each channel.",
value=True,
uid=[0],
enabled=lambda node: node.noiseFilter.noiseEnabled.value,
),
]),
desc.GroupAttribute(name="nlmFilter", label="NL Means Denoising (8 bits)",
description="NL Means Denoising Parameters.\n This implementation only works on 8-bit images, so the colors can be reduced and clamped.",
joinChar=":", groupDesc=[
]
),
desc.GroupAttribute(
name="nlmFilter",
label="NL Means Denoising (8 bits)",
description="NL Means Denoising Parameters.\n"
"This implementation only works on 8-bit images, so the colors can be reduced and clamped.",
joinChar=":",
groupDesc=[
desc.BoolParam(
name='nlmFilterEnabled',
label='Enable',
description='Use Non-local Mean Denoising from OpenCV to denoise images',
name="nlmFilterEnabled",
label="Enable",
description='Use Non-Local Mean Denoising from OpenCV to denoise images.',
value=False,
uid=[0],
),
desc.FloatParam(
name='nlmFilterH',
label='H',
description='Parameter regulating filter strength for luminance component.\n'
'Bigger H value perfectly removes noise but also removes image details, smaller H value preserves details but also preserves some noise.',
name="nlmFilterH",
label="H",
description="Parameter regulating the filter strength for the luminance component.\n"
"Bigger H value perfectly removes noise but also removes image details,\n"
"smaller H value preserves details but also preserves some noise.",
value=5.0,
range=(1.0, 1000.0, 0.01),
uid=[0],
enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value,
),
desc.FloatParam(
name='nlmFilterHColor',
label='HColor',
description='Parameter regulating filter strength for color components. Not necessary for grayscale images.\n'
'Bigger HColor value perfectly removes noise but also removes image details, smaller HColor value preserves details but also preserves some noise.',
name="nlmFilterHColor",
label="HColor",
description="Parameter regulating filter strength for color components. Not necessary for grayscale images.\n"
"Bigger HColor value perfectly removes noise but also removes image details,\n"
"smaller HColor value preserves details but also preserves some noise.",
value=10.0,
range=(0.0, 1000.0, 0.01),
uid=[0],
enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value,
),
desc.IntParam(
name='nlmFilterTemplateWindowSize',
label='Template Window Size',
description='Size in pixels of the template patch that is used to compute weights. Should be odd.',
name="nlmFilterTemplateWindowSize",
label="Template Window Size",
description="Size in pixels of the template patch that is used to compute weights. Should be odd.",
value=7,
range=(1, 101, 2),
uid=[0],
enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value,
),
desc.IntParam(
name='nlmFilterSearchWindowSize',
label='Search Window Size',
description='Size in pixels of the window that is used to compute weighted average for given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time.',
name="nlmFilterSearchWindowSize",
label="Search Window Size",
description="Size in pixels of the window that is used to compute weighted average for a given pixel.\n"
"Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time.",
value=21,
range=(1, 1001, 2),
uid=[0],
enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value,
),
]),
]
),
desc.ChoiceParam(
name='outputFormat',
label='Output Image Format',
description='Allows you to choose the format of the output image.',
value='rgba',
values=['rgba', 'rgb', 'grayscale'],
name="outputFormat",
label="Output Image Format",
description="Allows you to choose the format of the output image.",
value="rgba",
values=["rgba", "rgb", "grayscale"],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='outputColorSpace',
label='Output Color Space',
description='Allows you to choose the color space of the output image.',
value='AUTO',
values=['AUTO', 'sRGB', 'rec709', 'Linear', 'ACES2065-1', 'ACEScg', 'no_conversion'],
name="outputColorSpace",
label="Output Color Space",
description="Allows you to choose the color space of the output image.",
value="AUTO",
values=["AUTO", "sRGB", "rec709", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='workingColorSpace',
label='Working Color Space',
description='Allows you to choose the color space in which the data are processed.',
value='Linear',
values=['sRGB', 'rec709', 'Linear', 'ACES2065-1', 'ACEScg', 'no_conversion'],
name="workingColorSpace",
label="Working Color Space",
description="Allows you to choose the color space in which the data are processed.",
value="Linear",
values=["sRGB", "rec709", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
enabled=lambda node: not node.applyDcpMetadata.value,
),
desc.ChoiceParam(
name='rawColorInterpretation',
label='RAW Color Interpretation',
description='Allows you to choose how raw data are color processed.',
value='DCPLinearProcessing' if os.environ.get('ALICEVISION_COLOR_PROFILE_DB', '') else 'LibRawWhiteBalancing',
values=['None', 'LibRawNoWhiteBalancing', 'LibRawWhiteBalancing', 'DCPLinearProcessing', 'DCPMetadata', 'Auto'],
name="rawColorInterpretation",
label="RAW Color Interpretation",
description="Allows you to choose how RAW data are color processed.",
value="DCPLinearProcessing" if os.environ.get("ALICEVISION_COLOR_PROFILE_DB", "") else "LibRawWhiteBalancing",
values=["None", "LibRawNoWhiteBalancing", "LibRawWhiteBalancing", "DCPLinearProcessing", "DCPMetadata", "Auto"],
exclusive=True,
uid=[0],
),
desc.BoolParam(
name='applyDcpMetadata',
label='Apply DCP metadata',
description='If the image contains some DCP metadata then generate a DCP profile from them and apply it on the image content',
name="applyDcpMetadata",
label="Apply DCP Metadata",
description="If the image contains some DCP metadata, then generate a DCP profile from them and apply it to the image content.",
value=False,
uid=[0],
),
desc.File(
name='colorProfileDatabase',
label='Color Profile Database',
description='''Color Profile database directory path.''',
value='${ALICEVISION_COLOR_PROFILE_DB}',
name="colorProfileDatabase",
label="Color Profile Database",
description="Color profile database directory path.",
value="${ALICEVISION_COLOR_PROFILE_DB}",
uid=[],
enabled=lambda node: (node.rawColorInterpretation.value=='DCPLinearProcessing') or (node.rawColorInterpretation.value=='DCPMetadata'),
enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"),
),
desc.BoolParam(
name='errorOnMissingColorProfile',
label='Error On Missing DCP Color Profile',
description='If a color profile database is specified but no color profile is found for at least one image, then an error is thrown',
name="errorOnMissingColorProfile",
label="Error On Missing DCP Color Profile",
description="If a color profile database is specified but no color profile is found for at least one image, then an error is thrown.",
value=True,
uid=[0],
enabled=lambda node: (node.rawColorInterpretation.value=='DCPLinearProcessing') or (node.rawColorInterpretation.value=='DCPMetadata'),
enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"),
),
desc.BoolParam(
name='useDCPColorMatrixOnly',
label='Use DCP Color Matrix Only',
description='Use only the Color Matrix information from the DCP and ignore the Forward Matrix.',
name="useDCPColorMatrixOnly",
label="Use DCP Color Matrix Only",
description="Use only the Color Matrix information from the DCP and ignore the Forward Matrix.",
value=True,
uid=[0],
enabled=lambda node: (node.rawColorInterpretation.value=='DCPLinearProcessing') or (node.rawColorInterpretation.value=='DCPMetadata'),
enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"),
),
desc.BoolParam(
name='doWBAfterDemosaicing',
label='WB After Demosaicing',
description='Do White Balance after demosaicing, just before DCP profile application',
name="doWBAfterDemosaicing",
label="WB After Demosaicing",
description="Do White Balance after demosaicing, just before DCP profile application.",
value=False,
uid=[0],
enabled=lambda node: (node.rawColorInterpretation.value=='DCPLinearProcessing') or (node.rawColorInterpretation.value=='DCPMetadata'),
enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"),
),
desc.ChoiceParam(
name='demosaicingAlgo',
label='Demosaicing Algorithm',
description='LibRaw Demosaicing Algorithm',
value='AHD',
values=['linear', 'VNG', 'PPG', 'AHD', 'DCB', 'AHD-Mod', 'AFD', 'VCD', 'Mixed', 'LMMSE', 'AMaZE', 'DHT', 'AAHD', 'none'],
name="demosaicingAlgo",
label="Demosaicing Algorithm",
description="LibRaw demosaicing algorithm to use.",
value="AHD",
values=["linear", "VNG", "PPG", "AHD", "DCB", "AHD-Mod", "AFD", "VCD", "Mixed", "LMMSE", "AMaZE", "DHT", "AAHD", "none"],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='highlightMode',
label='Highlight Mode',
description='LibRaw highlight mode:\n'
' * 0: Clip (default)\n'
' * 1: Unclip\n'
' * 2: Blend\n'
' * 3-9: Rebuild',
name="highlightMode",
label="Highlight Mode",
description="LibRaw highlight mode:\n"
" - 0: Clip (default)\n"
" - 1: Unclip\n"
" - 2: Blend\n"
" - 3-9: Rebuild",
value=0,
values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
exclusive=True,
@ -509,62 +562,63 @@ Convert or apply filtering to the input images.
),
desc.FloatParam(
name='correlatedColorTemperature',
label='Illuminant Color Temperature',
description='Scene illuminant color temperature in Kelvin. A negative or null value indicates that the metadata information will be used.',
name="correlatedColorTemperature",
label="Illuminant Color Temperature",
description="Scene illuminant color temperature in Kelvin.\n"
"A negative or null value indicates that the metadata information will be used.",
value=-1.0,
range=(-1.0, 10000.0, 1.0),
uid=[0],
),
desc.ChoiceParam(
name='storageDataType',
label='Storage Data Type For EXR Output',
description='Storage image data type:\n'
' * float: Use full floating point (32 bits per channel)\n'
' * half: Use half float (16 bits per channel)\n'
' * halfFinite: Use half float, but clamp values to avoid non-finite values\n'
' * auto: Use half float if all values can fit, else use full float\n',
value='float',
values=['float', 'half', 'halfFinite', 'auto'],
name="storageDataType",
label="Storage Data Type For EXR Output",
description="Storage image data type for EXR outputs:\n"
" - float: Use full floating point (32 bits per channel).\n"
" - half: Use half float (16 bits per channel).\n"
" - halfFinite: Use half float, but clamp values to avoid non-finite values.\n"
" - auto: Use half float if all values can fit, else use full float.",
value="float",
values=["float", "half", "halfFinite", "auto"],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='exrCompressionMethod',
label='EXR Compression Method',
description='Compression method for EXR images.',
value='auto',
values=['none', 'auto', 'rle', 'zip', 'zips', 'piz', 'pxr24', 'b44', 'b44a', 'dwaa', 'dwab'],
name="exrCompressionMethod",
label="EXR Compression Method",
description="Compression method for EXR output images.",
value="auto",
values=["none", "auto", "rle", "zip", "zips", "piz", "pxr24", "b44", "b44a", "dwaa", "dwab"],
exclusive=True,
uid=[0],
),
desc.IntParam(
name='exrCompressionLevel',
label='EXR Compression Level',
description='Level of compression for EXR images, range depends on method used.\n'
'For zip/zips methods, values must be between 1 and 9.\n'
'A value of 0 will be ignored, default value for the selected method will be used.',
name="exrCompressionLevel",
label="EXR Compression Level",
description="Level of compression for EXR images. The range depends on the used method.\n"
"For the zip/zips methods, values must be between 1 and 9.\n"
"A value of 0 will be ignored, and the default value for the selected method will be used.",
value=0,
range=(0, 500, 1),
uid=[0],
enabled=lambda node: node.exrCompressionMethod.value in ['dwaa', 'dwab', 'zip', 'zips']
enabled=lambda node: node.exrCompressionMethod.value in ["dwaa", "dwab", "zip", "zips"]
),
desc.BoolParam(
name='jpegCompress',
label='JPEG Compress',
description='Enable JPEG compression.',
name="jpegCompress",
label="JPEG Compress",
description="Enable JPEG compression.",
value=True,
uid=[0],
),
desc.IntParam(
name='jpegQuality',
label='JPEG Quality',
description='JPEG images quality after compression.',
name="jpegQuality",
label="JPEG Quality",
description="JPEG images quality after compression.",
value=90,
range=(0, 100, 1),
uid=[0],
@ -572,11 +626,11 @@ Convert or apply filtering to the input images.
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -584,27 +638,27 @@ Convert or apply filtering to the input images.
outputs = [
desc.File(
name='outSfMData',
label='SfMData',
description='Output SfMData.',
value=lambda attr: (desc.Node.internalFolder + os.path.basename(attr.node.input.value)) if (os.path.splitext(attr.node.input.value)[1] in ['.abc', '.sfm']) else '',
name="outSfMData",
label="SfMData",
description="Output SfMData file.",
value=lambda attr: (desc.Node.internalFolder + os.path.basename(attr.node.input.value)) if (os.path.splitext(attr.node.input.value)[1] in [".abc", ".sfm"]) else "",
uid=[],
group='', # do not export on the command line
group="", # do not export on the command line
),
desc.File(
name='output',
label='Folder',
description='Output Images Folder.',
name="output",
label="Folder",
description="Output images folder.",
value=desc.Node.internalFolder,
uid=[],
),
desc.File(
name='outputImages',
label='Images',
description='Output Image Files.',
semantic='image',
name="outputImages",
label="Images",
description="Output images.",
semantic="image",
value= outputImagesValueFunct,
group='', # do not export on the command line
group="", # do not export on the command line
uid=[],
),
]

View file

@ -13,26 +13,35 @@ class ImportKnownPoses(desc.AVCommandLineNode):
inputs = [
desc.File(
name='sfmData',
label='SfmData',
description='SfMData file.',
value='',
name="sfmData",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name='knownPosesData',
label='KnownPosesData',
description='KnownPoses data in the json or xmp format',
value='',
name="knownPosesData",
label="Known Poses Data",
description="Known poses data in the JSON or XMP format.",
value="",
uid=[0],
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
]
outputs = [
desc.File(
name='output',
label='Output',
description='Path to the output smfData file.',
name="output",
label="Output",
description="Path to the output SfMData file.",
value=desc.Node.internalFolder + "/sfmData.abc",
uid=[],
),

View file

@ -102,14 +102,14 @@ You can extract frames at regular interval by configuring only the min/maxFrameS
desc.ListAttribute(
elementDesc=desc.FloatParam(
name="mmFocal",
label="mmFocal",
label="Focal",
description="Focal in mm (will be used if not 0).",
value=0.0,
range=(0.0, 500.0, 1.0),
uid=[0],
),
name="mmFocals",
label="mmFocals",
label="Focals",
description="Focals in mm (will be used if not 0)."
),
desc.File(
@ -128,8 +128,8 @@ You can extract frames at regular interval by configuring only the min/maxFrameS
group=None, # skip group from command line
groupDesc=[
desc.BoolParam(
name='useSmartSelection',
label='Use Smart Keyframe Selection',
name="useSmartSelection",
label="Use Smart Keyframe Selection",
description="Use the smart keyframe selection.",
value=True,
uid=[0]
@ -137,7 +137,8 @@ You can extract frames at regular interval by configuring only the min/maxFrameS
desc.GroupAttribute(
name="regularSelection",
label="Regular Keyframe Selection",
description="Parameters for the regular keyframe selection.\nKeyframes are selected regularly over the sequence with respect to the set parameters.",
description="Parameters for the regular keyframe selection.\n"
"Keyframes are selected regularly over the sequence with respect to the set parameters.",
group=None, # skip group from command line
enabled=lambda node: node.selectionMethod.useSmartSelection.value is False,
groupDesc=[
@ -175,14 +176,15 @@ You can extract frames at regular interval by configuring only the min/maxFrameS
desc.GroupAttribute(
name="smartSelection",
label="Smart Keyframe Selection",
description="Parameters for the smart keyframe selection.\nKeyframes are selected based on their sharpness and optical flow scores.",
description="Parameters for the smart keyframe selection.\n"
"Keyframes are selected based on their sharpness and optical flow scores.",
group=None, # skip group from command line
enabled=lambda node: node.selectionMethod.useSmartSelection.value,
groupDesc=[
desc.FloatParam(
name="pxDisplacement",
label="Pixel Displacement",
description="The percentage of pixels in the frame that need to have moved since the last keyframe to be considered for the selection",
description="The percentage of pixels in the frame that need to have moved since the last keyframe to be considered for the selection.",
value=10.0,
range=(0.0, 100.0, 1.0),
uid=[0],
@ -277,10 +279,10 @@ You can extract frames at regular interval by configuring only the min/maxFrameS
name="storageDataType",
label="EXR Storage Data Type",
description="Storage image data type for keyframes written to EXR files:\n"
" * float: Use full floating point (32 bits per channel)\n"
" * half: Use half float (16 bits per channel)\n"
" * halfFinite: Use half float, but clamp values to avoid non-finite values\n"
" * auto: Use half float if all values can fit, else use full float\n",
" - float: Use full floating point (32 bits per channel).\n"
" - half: Use half float (16 bits per channel).\n"
" - halfFinite: Use half float, but clamp values to avoid non-finite values.\n"
" - auto: Use half float if all values can fit, else use full float.",
value="float",
values=["float", "half", "halfFinite", "auto"],
exclusive=True,

View file

@ -36,76 +36,78 @@ Calibrate LDR to HDR response curve from samples.
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name='samples',
label='Samples folder',
description='Samples folder',
name="samples",
label="Samples Folder",
description="Samples folder.",
value=desc.Node.internalFolder,
uid=[0],
),
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
name="userNbBrackets",
label="Number Of Brackets",
description="Number of exposure brackets per HDR image (0 for automatic detection).",
value=0,
range=(0, 15, 1),
uid=[],
group='user', # not used directly on the command line
group="user", # not used directly on the command line
),
desc.IntParam(
name='nbBrackets',
label='Automatic Nb Brackets',
description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".',
name="nbBrackets",
label="Automatic Nb Brackets",
description="Number of exposure brackets used per HDR image.\n"
"It is detected automatically from input Viewpoints metadata if 'userNbBrackets' is 0,\n"
"else it is equal to 'userNbBrackets'.",
value=0,
range=(0, 10, 1),
uid=[0],
),
desc.BoolParam(
name='byPass',
label='Bypass',
description="Bypass HDR creation and use the medium bracket as the source for the next steps",
name="byPass",
label="Bypass",
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
),
desc.ChoiceParam(
name='calibrationMethod',
label='Calibration Method',
description="Method used for camera calibration \n"
" * Linear: Disable the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear. \n"
" * Debevec: This is the standard method for HDR calibration. \n"
" * Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision. \n"
" * Laguerre: Simple but robust method estimating the minimal number of parameters.",
values=['linear', 'debevec', 'grossberg', 'laguerre'],
value='debevec',
name="calibrationMethod",
label="Calibration Method",
description="Method used for camera calibration:\n"
" - Linear: Disables the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear.\n"
" - Debevec: This is the standard method for HDR calibration.\n"
" - Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision.\n"
" - Laguerre: Simple but robust method estimating the minimal number of parameters.",
values=["linear", "debevec", "grossberg", "laguerre"],
value="debevec",
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='calibrationWeight',
label='Calibration Weight',
description="Weight function used to calibrate camera response \n"
" * default (automatically selected according to the calibrationMethod) \n"
" * gaussian \n"
" * triangle \n"
" * plateau",
value='default',
values=['default', 'gaussian', 'triangle', 'plateau'],
name="calibrationWeight",
label="Calibration Weight",
description="Weight function used to calibrate camera response:\n"
" - default (automatically selected according to the calibrationMethod)\n"
" - gaussian\n"
" - triangle\n"
" - plateau",
value="default",
values=["default", "gaussian", "triangle", "plateau"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
name="channelQuantizationPower",
label="Channel Quantization Power",
description="Quantization level like 8 bits or 10 bits.",
value=10,
range=(8, 14, 1),
uid=[0],
@ -113,21 +115,22 @@ Calibrate LDR to HDR response curve from samples.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='workingColorSpace',
label='Working Color Space',
description='Allows you to choose the color space in which the data are processed.',
value='sRGB',
values=['sRGB', 'Linear', 'ACES2065-1', 'ACEScg'],
name="workingColorSpace",
label="Working Color Space",
description="Allows you to choose the color space in which the data are processed.",
value="sRGB",
values=["sRGB", "Linear", "ACES2065-1", "ACEScg"],
exclusive=True,
uid=[],
group='user', # not used directly on the command line
group="user", # not used directly on the command line
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='maxTotalPoints',
label='Max Number of Points',
description='Max number of points used from the sampling. This ensures that the number of pixels values extracted by the sampling\n'
'can be managed by the calibration step (in term of computation time and memory usage).',
name="maxTotalPoints",
label="Max Number Of Points",
description="Maximum number of points used from the sampling.\n"
"This ensures that the number of pixels values extracted by the sampling\n"
"can be managed by the calibration step (in term of computation time and memory usage).",
value=1000000,
range=(8, 10000000, 1000),
uid=[0],
@ -135,11 +138,11 @@ Calibrate LDR to HDR response curve from samples.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -147,10 +150,10 @@ Calibrate LDR to HDR response curve from samples.
outputs = [
desc.File(
name='response',
label='Response File',
description='Path to the output response file',
value=desc.Node.internalFolder + 'response.csv',
name="response",
label="Response File",
description="Path to the output response file.",
value=desc.Node.internalFolder + "response.csv",
uid=[],
)
]

View file

@ -35,94 +35,97 @@ Merge LDR images into HDR images.
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name='response',
label='Response file',
description='Response file',
value='',
name="response",
label="Response File",
description="Response file.",
value="",
uid=[0],
),
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
name="userNbBrackets",
label="Number Of Brackets",
description="Number of exposure brackets per HDR image (0 for automatic detection).",
value=0,
range=(0, 15, 1),
uid=[],
group='user', # not used directly on the command line
group="user", # not used directly on the command line
),
desc.IntParam(
name='nbBrackets',
label='Automatic Nb Brackets',
description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".',
name="nbBrackets",
label="Automatic Nb Brackets",
description="Number of exposure brackets used per HDR image.\n"
"It is detected automatically from input Viewpoints metadata if 'userNbBrackets'\n"
"is 0, else it is equal to 'userNbBrackets'.",
value=0,
range=(0, 10, 1),
uid=[0],
),
desc.BoolParam(
name='offsetRefBracketIndexEnabled',
label='Manually Specify Ref Bracket',
description='Manually specify the reference bracket index to control the exposure of the HDR image.',
name="offsetRefBracketIndexEnabled",
label="Manually Specify Ref Bracket",
description="Manually specify the reference bracket index to control the exposure of the HDR image.",
value=False,
uid=[0],
group='user', # not used directly on the command line
group="user", # not used directly on the command line
),
desc.IntParam(
name='offsetRefBracketIndex',
label='Offset Ref Bracket Index',
description='Zero to use the center bracket. +N to use a more exposed bracket or -N to use a less exposed backet.',
name="offsetRefBracketIndex",
label="Offset Ref Bracket Index",
description="0 to use the center bracket.\n"
"+N to use a more exposed bracket or -N to use a less exposed bracket.",
value=1,
range=(-4, 4, 1),
uid=[0],
enabled= lambda node: (node.nbBrackets.value != 1 and node.offsetRefBracketIndexEnabled.value),
),
desc.FloatParam(
name='meanTargetedLumaForMerging',
label='Targeted Luminance For Merging',
description='Expected mean luminance of the HDR images used to compute the final panorama',
name="meanTargetedLumaForMerging",
label="Targeted Luminance For Merging",
description="Expected mean luminance of the HDR images used to compute the final panorama.",
value=0.4,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled= lambda node: (node.nbBrackets.value != 1 and not node.offsetRefBracketIndexEnabled.value),
),
desc.BoolParam(
name='byPass',
label='Bypass',
name="byPass",
label="Bypass",
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
),
desc.BoolParam(
name='keepSourceImageName',
label='Keep Source Image Name',
name="keepSourceImageName",
label="Keep Source Image Name",
description="Keep the filename of the input image selected as central image for the output image filename.",
value=False,
uid=[0],
),
desc.ChoiceParam(
name='fusionWeight',
label='Fusion Weight',
name="fusionWeight",
label="Fusion Weight",
description="Weight function used to fuse all LDR images together:\n"
" * gaussian \n"
" * triangle \n"
" * plateau",
value='gaussian',
values=['gaussian', 'triangle', 'plateau'],
" - gaussian\n"
" - triangle\n"
" - plateau",
value="gaussian",
values=["gaussian", "triangle", "plateau"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
name="channelQuantizationPower",
label="Channel Quantization Power",
description="Quantization level like 8 bits or 10 bits.",
value=10,
range=(8, 14, 1),
uid=[0],
@ -130,79 +133,79 @@ Merge LDR images into HDR images.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='workingColorSpace',
label='Working Color Space',
description='Allows you to choose the color space in which the data are processed.',
value='sRGB',
values=['sRGB', 'Linear', 'ACES2065-1', 'ACEScg', 'no_conversion'],
name="workingColorSpace",
label="Working Color Space",
description="Allows you to choose the color space in which the data are processed.",
value="sRGB",
values=["sRGB", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.BoolParam(
name='enableHighlight',
label='Enable Highlight',
name="enableHighlight",
label="Enable Highlight",
description="Enable highlights correction.",
value=False,
uid=[0],
group='user', # not used directly on the command line
group="user", # not used directly on the command line
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.FloatParam(
name='highlightCorrectionFactor',
label='Highlights Correction',
description='Pixels saturated in all input images have a partial information about their real luminance.\n'
'We only know that the value should be >= to the standard hdr fusion.\n'
'This parameter allows to perform a post-processing step to put saturated pixels to a constant\n'
'value defined by the `highlightsMaxLuminance` parameter.\n'
'This parameter is float to enable to weight this correction.',
name="highlightCorrectionFactor",
label="Highlights Correction",
description="Pixels saturated in all input images have a partial information about their real luminance.\n"
"We only know that the value should be >= to the standard HDRfusion.\n"
"This parameter allows to perform a post-processing step to put saturated pixels to a constant\n"
"value defined by the `highlightsMaxLuminance` parameter.\n"
"This parameter is float to enable to weight this correction.",
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0],
enabled= lambda node: node.enableHighlight.enabled and node.enableHighlight.value,
),
desc.FloatParam(
name='highlightTargetLux',
label='Highlight Target Luminance (Lux)',
description='This is an arbitrary target value (in Lux) used to replace the unknown luminance value of the saturated pixels.\n'
'\n'
'Some Outdoor Reference Light Levels:\n'
' * 120,000 lux: Brightest sunlight\n'
' * 110,000 lux: Bright sunlight\n'
' * 20,000 lux: Shade illuminated by entire clear blue sky, midday\n'
' * 1,000 lux: Typical overcast day, midday\n'
' * 400 lux: Sunrise or sunset on a clear day\n'
' * 40 lux: Fully overcast, sunset/sunrise\n'
'\n'
'Some Indoor Reference Light Levels:\n'
' * 20000 lux: Max Usually Used Indoor\n'
' * 750 lux: Supermarkets\n'
' * 500 lux: Office Work\n'
' * 150 lux: Home\n',
name="highlightTargetLux",
label="Highlight Target Luminance (Lux)",
description="This is an arbitrary target value (in Lux) used to replace the unknown luminance value of the saturated pixels.\n"
"\n"
"Some Outdoor Reference Light Levels:\n"
" - 120,000 lux: Brightest sunlight\n"
" - 110,000 lux: Bright sunlight\n"
" - 20,000 lux: Shade illuminated by entire clear blue sky, midday\n"
" - 1,000 lux: Typical overcast day, midday\n"
" - 400 lux: Sunrise or sunset on a clear day\n"
" - 40 lux: Fully overcast, sunset/sunrise\n"
"\n"
"Some Indoor Reference Light Levels:\n"
" - 20000 lux: Max Usually Used Indoor\n"
" - 750 lux: Supermarkets\n"
" - 500 lux: Office Work\n"
" - 150 lux: Home\n",
value=120000.0,
range=(1000.0, 150000.0, 1.0),
uid=[0],
enabled= lambda node: node.enableHighlight.enabled and node.enableHighlight.value and node.highlightCorrectionFactor.value != 0,
),
desc.ChoiceParam(
name='storageDataType',
label='Storage Data Type',
description='Storage image data type:\n'
' * float: Use full floating point (32 bits per channel)\n'
' * half: Use half float (16 bits per channel)\n'
' * halfFinite: Use half float, but clamp values to avoid non-finite values\n'
' * auto: Use half float if all values can fit, else use full float\n',
value='float',
values=['float', 'half', 'halfFinite', 'auto'],
name="storageDataType",
label="Storage Data Type",
description="Storage image data type:\n"
" - float: Use full floating point (32 bits per channel).\n"
" - half: Use half float (16 bits per channel).\n"
" - halfFinite: Use half float, but clamp values to avoid non-finite values.\n"
" - auto: Use half float if all values can fit, else use full float.",
value="float",
values=["float", "half", "halfFinite", "auto"],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -210,18 +213,18 @@ Merge LDR images into HDR images.
outputs = [
desc.File(
name='outputFolder',
label='Output Folder',
description='Path to the folder containing the merged HDR images.',
name="outputFolder",
label="Folder",
description="Path to the folder containing the merged HDR images.",
value=desc.Node.internalFolder,
uid=[],
group='', # do not export on the command line
group="", # do not export on the command line
),
desc.File(
name='outSfMData',
label='SfMData File',
description='Path to the output sfmdata file',
value=desc.Node.internalFolder + 'sfmData.sfm',
name="outSfMData",
label="SfMData",
description="Path to the output SfMData file.",
value=desc.Node.internalFolder + "sfmData.sfm",
uid=[],
)
]

View file

@ -53,55 +53,57 @@ Sample pixels from Low range images for HDR creation.
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
name="userNbBrackets",
label="Number Of Brackets",
description="Number of exposure brackets per HDR image (0 for automatic detection).",
value=0,
range=(0, 15, 1),
uid=[],
group='user', # not used directly on the command line
group="user", # not used directly on the command line
),
desc.IntParam(
name='nbBrackets',
label='Automatic Nb Brackets',
description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".',
name="nbBrackets",
label="Automatic Nb Brackets",
description="Number of exposure brackets used per HDR image.\n"
"It is detected automatically from input Viewpoints metadata if 'userNbBrackets'\n"
"is 0, else it is equal to 'userNbBrackets'.",
value=0,
range=(0, 10, 1),
uid=[0],
),
desc.BoolParam(
name='byPass',
label='Bypass',
description="Bypass HDR creation and use the medium bracket as the source for the next steps",
name="byPass",
label="Bypass",
description="Bypass HDR creation and use the medium bracket as the source for the next steps.",
value=False,
uid=[0],
enabled= lambda node: node.nbBrackets.value != 1,
),
desc.ChoiceParam(
name='calibrationMethod',
label='Calibration Method',
description="Method used for camera calibration \n"
" * Linear: Disable the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear. \n"
" * Debevec: This is the standard method for HDR calibration. \n"
" * Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision. \n"
" * Laguerre: Simple but robust method estimating the minimal number of parameters.",
values=['linear', 'debevec', 'grossberg', 'laguerre'],
value='debevec',
name="calibrationMethod",
label="Calibration Method",
description="Method used for camera calibration:\n"
" - Linear: Disable the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear.\n"
" - Debevec: This is the standard method for HDR calibration.\n"
" - Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision.\n"
" - Laguerre: Simple but robust method estimating the minimal number of parameters.",
values=["linear", "debevec", "grossberg", "laguerre"],
value="debevec",
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='channelQuantizationPower',
label='Channel Quantization Power',
description='Quantization level like 8 bits or 10 bits.',
name="channelQuantizationPower",
label="Channel Quantization Power",
description="Quantization level like 8 bits or 10 bits.",
value=10,
range=(8, 14, 1),
uid=[0],
@ -109,19 +111,19 @@ Sample pixels from Low range images for HDR creation.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='workingColorSpace',
label='Working Color Space',
description='Allows you to choose the color space in which the data are processed.',
value='sRGB',
values=['sRGB', 'Linear', 'ACES2065-1', 'ACEScg', 'no_conversion'],
name="workingColorSpace",
label="Working Color Space",
description="Allows you to choose the color space in which the data are processed.",
value="sRGB",
values=["sRGB", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='blockSize',
label='Block Size',
description='Size of the image tile to extract a sample.',
name="blockSize",
label="Block Size",
description="Size of the image tile to extract a sample.",
value=256,
range=(8, 1024, 1),
uid=[0],
@ -129,9 +131,9 @@ Sample pixels from Low range images for HDR creation.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='radius',
label='Patch Radius',
description='Radius of the patch used to analyze the sample statistics.',
name="radius",
label="Patch Radius",
description="Radius of the patch used to analyze the sample statistics.",
value=5,
range=(0, 10, 1),
uid=[0],
@ -139,9 +141,9 @@ Sample pixels from Low range images for HDR creation.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.IntParam(
name='maxCountSample',
label='Max Number of Samples',
description='Max number of samples per image group.',
name="maxCountSample",
label="Max Number Of Samples",
description="Maximum number of samples per image group.",
value=200,
range=(10, 1000, 10),
uid=[0],
@ -149,19 +151,19 @@ Sample pixels from Low range images for HDR creation.
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.BoolParam(
name='debug',
label='Export Debug Files',
name="debug",
label="Export Debug Files",
description="Export debug files to analyze the sampling strategy.",
value=False,
uid=[],
enabled= lambda node: node.byPass.enabled and not node.byPass.value,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
)
@ -169,9 +171,9 @@ Sample pixels from Low range images for HDR creation.
outputs = [
desc.File(
name='output',
label='Folder',
description='Output path for the samples.',
name="output",
label="Folder",
description="Output path for the samples.",
value=desc.Node.internalFolder,
uid=[],
),

View file

@ -13,52 +13,53 @@ Can also be used to calibrate a lighting dome (RTI type).
inputs = [
desc.File(
name='inputPath',
label='SfMData',
description='Input SfMData file.',
value='',
name="inputPath",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0]
),
desc.File(
name='inputJSON',
label='Sphere Detection File',
description='Input JSON file containing sphere centers and radiuses.',
value='',
name="inputJSON",
label="Sphere Detection File",
description="Input JSON file containing sphere centers and radiuses.",
value="",
uid=[0]
),
desc.BoolParam(
name='saveAsModel',
label='Save As Model',
description='Check if this calibration file will be used with other datasets.',
name="saveAsModel",
label="Save As Model",
description="Check if this calibration file will be used with other datasets.",
value=False,
uid=[0]
),
desc.ChoiceParam(
name='method',
label='Calibration Method',
description='Method used for light calibration. Use "brightestPoint" for shiny spheres and "whiteSphere" for white matte spheres.',
values=['brightestPoint', 'whiteSphere'],
value='brightestPoint',
name="method",
label="Calibration Method",
description="Method used for light calibration.\n"
"Use 'brightestPoint' for shiny spheres and 'whiteSphere' for white matte spheres.",
values=["brightestPoint", "whiteSphere"],
value="brightestPoint",
exclusive=True,
uid=[0]
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[]
uid=[],
)
]
outputs = [
desc.File(
name='outputFile',
label='Light File',
description='Light information will be written here.',
value=desc.Node.internalFolder +'/lights.json' ,
name="outputFile",
label="Light File",
description="Light information will be written here.",
value=desc.Node.internalFolder + "/lights.json",
uid=[]
)
]

View file

@ -7,84 +7,87 @@ class LightingEstimation(desc.AVCommandLineNode):
commandLine = 'aliceVision_lightingEstimation {allParams}'
category = 'Utils'
documentation = '''
'''
inputs = [
desc.File(
name='input',
label='Input',
description='SfMData file.',
value='',
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name="depthMapsFilterFolder",
label='Filtered Depth Maps Folder',
description='Input filtered depth maps folder',
value='',
label="Filtered Depth Maps Folder",
description="Input filtered depth maps folder.",
value="",
uid=[0],
),
desc.File(
name='imagesFolder',
label='Images Folder',
description='Use images from a specific folder instead of those specify in the SfMData file.\nFilename should be the image uid.',
value='',
name="imagesFolder",
label="Images Folder",
description="Use images from a specific folder instead of those specify in the SfMData file.\n"
"Filename should be the image UID.",
value="",
uid=[0],
),
desc.ChoiceParam(
name='lightingEstimationMode',
label='Lighting Estimation Mode',
description='Lighting Estimation Mode.',
value='global',
values=['global', 'per_image'],
name="lightingEstimationMode",
label="Lighting Estimation Mode",
description="Lighting estimation mode.",
value="global",
values=["global", "per_image"],
exclusive=True,
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='lightingColor',
label='Lighting Color Mode',
description='Lighting Color Mode.',
value='RGB',
values=['RGB', 'Luminance'],
name="lightingColor",
label="Lighting Color Mode",
description="Lighting color mode.",
value="RGB",
values=["RGB", "Luminance"],
exclusive=True,
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='albedoEstimationName',
label='Albedo Estimation Name',
description='Albedo estimation method used for light estimation.',
value='constant',
values=['constant', 'picture', 'median_filter', 'blur_filter'],
name="albedoEstimationName",
label="Albedo Estimation Name",
description="Albedo estimation method used for light estimation.",
value="constant",
values=["constant", "picture", "median_filter", "blur_filter"],
exclusive=True,
uid=[0],
advanced=True,
),
desc.IntParam(
name='albedoEstimationFilterSize',
label='Albedo Estimation Filter Size',
description='Albedo filter size for estimation method using filter.',
name="albedoEstimationFilterSize",
label="Albedo Estimation Filter Size",
description="Albedo filter size for estimation method using filter.",
value=3,
range=(0, 100, 1),
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='verboseLevel',
label='Verbose Level',
description='Verbosity level (fatal, error, warning, info, debug, trace).',
value='info',
values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
)
]
outputs = [
desc.File(
name='output',
label='Folder',
description='Folder for output lighting vector files.',
name="output",
label="Folder",
description="Folder for output lighting vector files.",
value=desc.Node.internalFolder,
uid=[],
),