[nodes] add some nodes documentation

This commit is contained in:
Fabien Castan 2020-03-26 11:20:44 +01:00
parent 0e606eef4e
commit 64a4c9426a
30 changed files with 352 additions and 18 deletions

View file

@ -16,20 +16,39 @@ Viewpoint = [
desc.IntParam(name="intrinsicId", label="Intrinsic", description="Internal Camera Parameters", value=-1, uid=[0], range=None),
desc.IntParam(name="rigId", label="Rig", description="Rig Parameters", value=-1, uid=[0], range=None),
desc.IntParam(name="subPoseId", label="Rig Sub-Pose", description="Rig Sub-Pose Parameters", value=-1, uid=[0], range=None),
desc.StringParam(name="metadata", label="Image Metadata", description="", value="", uid=[], advanced=True),
desc.StringParam(name="metadata", label="Image Metadata",
description="The configuration of the Viewpoints is based on the images metadata.\n"
"The important ones are:\n"
" * Focal Length: the focal length in mm.\n"
" * Make and Model: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database.\n"
" * Serial Number: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately.",
value="", uid=[], advanced=True),
]
Intrinsic = [
desc.IntParam(name="intrinsicId", label="Id", description="Intrinsic UID", value=-1, uid=[0], range=None),
desc.FloatParam(name="pxInitialFocalLength", label="Initial Focal Length", description="Initial Guess on the Focal Length", value=-1.0, uid=[0], range=None),
desc.FloatParam(name="pxFocalLength", label="Focal Length", description="Known/Calibrated Focal Length", value=-1.0, uid=[0], range=None),
desc.ChoiceParam(name="type", label="Camera Type", description="Camera Type", value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'equidistant_r3'], exclusive=True, uid=[0]),
desc.FloatParam(name="pxInitialFocalLength", label="Initial Focal Length",
description="Initial Guess on the Focal Length (in pixels). \n"
"When we have an initial value from EXIF, this value is not accurate but cannot be wrong. \n"
"So this value is used to limit the range of possible values in the optimization. \n"
"If you put -1, this value will not be used and the focal length will not be bounded.",
value=-1.0, uid=[0], range=None),
desc.FloatParam(name="pxFocalLength", label="Focal Length", description="Known/Calibrated Focal Length (in pixels)", value=-1.0, uid=[0], range=None),
desc.ChoiceParam(name="type", label="Camera Type",
description="Mathematical Model used to represent a camera:\n"
" * pinhole: Simplest projective camera model without optical distortion (focal and optical center).\n"
" * radial1: Pinhole camera with one radial distortion parameter\n"
" * radial3: Pinhole camera with 3 radial distortion parameters\n"
" * brown: Pinhole camera with 3 radial and 2 tangential distortion parameters\n"
" * fisheye4: Pinhole camera with 4 distortion parameters suited for fisheye optics (like 120° FoV)\n"
" * equidistant_r3: Non-projective camera model suited for full-fisheye optics (like 180° FoV)\n",
value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'equidistant_r3'], exclusive=True, uid=[0]),
desc.IntParam(name="width", label="Width", description="Image Width", value=0, uid=[], range=(0, 10000, 1)),
desc.IntParam(name="height", label="Height", description="Image Height", value=0, uid=[], range=(0, 10000, 1)),
desc.FloatParam(name="sensorWidth", label="Sensor Width", description="Sensor Width (mm)", value=36, uid=[], range=(0, 1000, 1)),
desc.FloatParam(name="sensorHeight", label="Sensor Height", description="Sensor Height (mm)", value=24, uid=[], range=(0, 1000, 1)),
desc.StringParam(name="serialNumber", label="Serial Number", description="Device Serial Number (camera and lens combined)", value="", uid=[]),
desc.GroupAttribute(name="principalPoint", label="Principal Point", description="", groupDesc=[
desc.StringParam(name="serialNumber", label="Serial Number", description="Device Serial Number (Camera UID and Lens UID combined)", value="", uid=[]),
desc.GroupAttribute(name="principalPoint", label="Principal Point", description="Position of the Optical Center in the Image (i.e. the sensor surface).", groupDesc=[
desc.FloatParam(name="x", label="x", description="", value=0, uid=[], range=(0, 10000, 1)),
desc.FloatParam(name="y", label="y", description="", value=0, uid=[], range=(0, 10000, 1)),
]),
@ -96,6 +115,21 @@ class CameraInit(desc.CommandLineNode):
size = desc.DynamicNodeSize('viewpoints')
documentation = '''
This node describes your dataset. It lists the Viewpoints candidates, the guess about the type of optic, the initial focal length
and which images are sharing the same internal camera parameters, as well as potential cameras rigs.
When you import new images into Meshroom, this node is automatically configured from the analysis of the image metadata.
The software can support images without any metadata but it is recommended to have them for robustness.
### Metadata
Metadata allows images to be grouped together and provides an initialization of the focal length (in pixel unit).
The metadata needed are:
* **Focal Length**: the focal length in mm.
* **Make** & **Model**: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database.
* **Serial Number**: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately (in the photogrammetry case).
'''
inputs = [
desc.ListAttribute(
name="viewpoints",

View file

@ -7,6 +7,11 @@ class ConvertSfMFormat(desc.CommandLineNode):
commandLine = 'aliceVision_convertSfMFormat {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Convert an SfM scene from one file format to another.
It can also be used to remove specific parts of from an SfM scene (like filter all 3D landmarks or filter 2D observations).
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,16 @@ class DepthMap(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=3)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
For each camera that have been estimated by the Structure-From-Motion, it estimates the depth value per pixel.
Adjust the downscale factor to compute depth maps at a higher/lower resolution.
Use a downscale factor of one (full-resolution) only if the quality of the input images is really high (camera on a tripod with high-quality optics).
## Online
[https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation)
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,11 @@ class DepthMapFilter(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=10)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
Filter depth map values that are not coherent in multiple depth maps.
This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node.
'''
inputs = [
desc.File(
name='input',

View file

@ -6,6 +6,11 @@ from meshroom.core import desc
class ExportAnimatedCamera(desc.CommandLineNode):
commandLine = 'aliceVision_exportAnimatedCamera {allParams}'
documentation = '''
Convert cameras from an SfM scene into an animated cameras in Alembic file format.
Based on the input image filenames, it will recognize the input video sequence to create an animated camera.
'''
inputs = [
desc.File(
name='input',

View file

@ -6,6 +6,13 @@ from meshroom.core import desc
class ExportMaya(desc.CommandLineNode):
commandLine = 'aliceVision_exportMeshroomMaya {allParams}'
documentation = '''
Export a scene for Autodesk Maya, with an Alembic file describing the SfM: cameras and 3D points.
It will export half-size undistorted images to use as image planes for cameras and also export thumbnails.
Use the MeshroomMaya plugin, to load the ABC file. It will recognize the file structure and will setup the scene.
MeshroomMaya contains a user interface to browse all cameras.
'''
inputs = [
desc.File(
name='input',

View file

@ -9,6 +9,26 @@ class FeatureExtraction(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=40)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition.
Hence, a feature in the scene should have similar feature descriptions in all images.
This node implements multiple methods:
* **SIFT**
The most standard method. This is the default and recommended value for all use cases.
* **AKAZE**
AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks.
It may extract to many features, the repartition is not always good.
It is known to be good on challenging surfaces such as skin.
* **CCTAG**
CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size.
It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags.
## Online
[https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction)
'''
inputs = [
desc.File(
name='input',

View file

@ -9,6 +9,28 @@ class FeatureMatching(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=20)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
This node performs the matching of all features between the candidate image pairs.
It is performed in 2 steps:
1/ **Photometric Matches**
It performs the photometric matches between the set of features descriptors from the 2 input images.
For each feature descriptor on the first image, it looks for the 2 closest descriptors in the second image and uses a relative threshold between them.
This assumption kill features on repetitive structure but has proved to be a robust criterion.
2/ **Geometric Filtering**
It performs a geometric filtering of the photometric match candidates.
It uses the features positions in the images to make a geometric filtering by using epipolar geometry in an outlier detection framework
called RANSAC (RANdom SAmple Consensus). It randomly selects a small set of feature correspondences and compute the fundamental (or essential) matrix,
then it checks the number of features that validates this model and iterate through the RANSAC framework.
## Online
[https://alicevision.org/#photogrammetry/feature_matching](https://alicevision.org/#photogrammetry/feature_matching)
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,11 @@ class GlobalSfM(desc.CommandLineNode):
commandLine = 'aliceVision_globalSfM {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Performs the Structure-From-Motion with a global approach.
It is known to be faster but less robust to challenging datasets than the Incremental approach.
'''
inputs = [
desc.File(
name='input',

View file

@ -8,6 +8,28 @@ class ImageMatching(desc.CommandLineNode):
commandLine = 'aliceVision_imageMatching {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
The goal of this node is to select the image pairs to match. The ambition is to find the images that are looking to the same areas of the scene.
Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs.
It provides multiple methods:
* **VocabularyTree**
It uses image retrieval techniques to find images that share some content without the cost of resolving all feature matches in details.
Each image is represented in a compact image descriptor which allows to compute the distance between all images descriptors very efficiently.
If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.
* **Sequential**
If your input is a video sequence, you can use this option to link images between them over time.
* **SequentialAndVocabularyTree**
Combines sequential approach with Voc Tree to enable connections between keyframes at different times.
* **Exhaustive**
Export all image pairs.
* **Frustum**
If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.
## Online
[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching)
'''
inputs = [
desc.File(
name='input',

View file

@ -9,6 +9,14 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
# use both SfM inputs to define Node's size
size = desc.MultiDynamicNodeSize(['input', 'inputB'])
documentation = '''
The goal of this node is to select the image pairs to match in the context of an SfM augmentation.
The ambition is to find the images that are looking to the same areas of the scene.
Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs.
## Online
[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching)
'''
inputs = [
desc.File(
name='input',

View file

@ -7,6 +7,13 @@ from meshroom.core import desc
class KeyframeSelection(desc.CommandLineNode):
commandLine = 'aliceVision_utils_keyframeSelection {allParams}'
documentation = '''
Allows to extract keyframes from a video and insert metadata.
It can extract frames from a synchronized multi-cameras rig.
You can extract frames at regular interval by configuring only the min/maxFrameStep.
'''
inputs = [
desc.ListAttribute(
elementDesc=desc.File(

View file

@ -29,6 +29,17 @@ class LDRToHDR(desc.CommandLineNode):
cpu = desc.Level.INTENSIVE
ram = desc.Level.NORMAL
documentation='''
This node fuse LDR (Low Dynamic Range) images with multi-bracketing into HDR (High Dynamic Range) images.
It is done in 2 steps:
1/ Estimation of the Camera Response Function (CRF)
2/ HDR fusion relying on the CRF
'''
inputs = [
desc.File(
name='input',
@ -40,7 +51,7 @@ class LDRToHDR(desc.CommandLineNode):
desc.IntParam(
name='userNbBrackets',
label='Number of Brackets',
description='Number of exposure brackets per HDR image (0 for automatic).',
description='Number of exposure brackets per HDR image (0 for automatic detection).',
value=0,
range=(0, 15, 1),
uid=[0],
@ -111,17 +122,18 @@ class LDRToHDR(desc.CommandLineNode):
description="Bypass HDR creation and use the medium bracket as the source for the next steps",
value=False,
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name='calibrationMethod',
label='Calibration Method',
description="Method used for camera calibration \n"
" * linear \n"
" * robertson \n"
" * debevec \n"
" * grossberg \n"
" * laguerre",
values=['linear', 'robertson', 'debevec', 'grossberg', 'laguerre'],
" * Linear: Disable the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear. \n"
" * Debevec: This is the standard method for HDR calibration. \n"
" * Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision. \n"
" * Laguerre: Simple but robust method estimating the minimal number of parameters. \n"
" * Robertson: First method for HDR calibration in the literature. \n",
values=['linear', 'debevec', 'grossberg', 'laguerre', 'robertson'],
value='debevec',
exclusive=True,
uid=[0],
@ -142,7 +154,7 @@ class LDRToHDR(desc.CommandLineNode):
desc.ChoiceParam(
name='fusionWeight',
label='Fusion Weight',
description="Weight function used to fuse all LDR images together \n"
description="Weight function used to fuse all LDR images together:\n"
" * gaussian \n"
" * triangle \n"
" * plateau",

View file

@ -9,6 +9,10 @@ class MeshDecimate(desc.CommandLineNode):
cpu = desc.Level.NORMAL
ram = desc.Level.NORMAL
documentation = '''
This node allows to reduce the density of the Mesh.
'''
inputs = [
desc.File(
name="input",

View file

@ -6,6 +6,11 @@ from meshroom.core import desc
class MeshDenoising(desc.CommandLineNode):
commandLine = 'aliceVision_meshDenoising {allParams}'
documentation = '''
This experimental node allows to reduce noise from a Mesh.
for now, the parameters are difficult to control and vary a lot from one dataset to another.
'''
inputs = [
desc.File(
name='input',

View file

@ -6,6 +6,11 @@ from meshroom.core import desc
class MeshFiltering(desc.CommandLineNode):
commandLine = 'aliceVision_meshFiltering {allParams}'
documentation = '''
This node applies a Laplacian filtering to remove local defects from the raw Meshing cut.
'''
inputs = [
desc.File(
name='inputMesh',

View file

@ -9,6 +9,10 @@ class MeshResampling(desc.CommandLineNode):
cpu = desc.Level.NORMAL
ram = desc.Level.NORMAL
documentation = '''
This node allows to recompute the mesh surface with a new topology and uniform density.
'''
inputs = [
desc.File(
name="input",

View file

@ -9,6 +9,17 @@ class Meshing(desc.CommandLineNode):
cpu = desc.Level.INTENSIVE
ram = desc.Level.INTENSIVE
documentation = '''
This node creates a dense geometric surface representation of the scene.
First, it fuses all the depth maps into a global dense point cloud with an adaptive resolution.
It then performs a 3D Delaunay tetrahedralization and a voting procedure is done to compute weights on cells and weights on facets connecting the cells.
A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents the extracted mesh surface.
## Online
[https://alicevision.org/#photogrammetry/meshing](https://alicevision.org/#photogrammetry/meshing)
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,13 @@ class PanoramaCompositing(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaCompositing {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Once the images have been transformed geometrically (in PanoramaWarping),
they have to be fused together in a single panorama image which looks like a single photography.
The Multi-band Blending method provides the best quality. It averages the pixel values using multiple bands in the frequency domain.
Multiple cameras are contributing to the low frequencies and only the best one contributes to the high frequencies.
'''
inputs = [
desc.File(
name='input',
@ -31,7 +38,10 @@ class PanoramaCompositing(desc.CommandLineNode):
desc.ChoiceParam(
name='compositerType',
label='Compositer Type',
description='Which compositer should be used to blend images',
description='Which compositer should be used to blend images:\n'
' * multiband: high quality transition by fusing images by frequency bands\n'
' * replace: debug option with straight transitions\n'
' * alpha: debug option with linear transitions\n',
value='multiband',
values=['replace', 'alpha', 'multiband'],
exclusive=True,
@ -40,7 +50,10 @@ class PanoramaCompositing(desc.CommandLineNode):
desc.ChoiceParam(
name='overlayType',
label='Overlay Type',
description='Which overlay to display on top of panorama for debug',
description='Overlay on top of panorama to analyze transitions:\n'
' * none: no overlay\n'
' * borders: display image borders\n'
' * seams: display transitions between images\n',
value='none',
values=['none', 'borders', 'seams'],
exclusive=True,

View file

@ -10,6 +10,10 @@ class PanoramaEstimation(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaEstimation {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Estimate relative camera rotations between input images.
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,18 @@ class PanoramaInit(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaInit {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to setup the Panorama:
1/ Enables the initialization the cameras from known position in an XML file (provided by
["Roundshot VR Drive"](https://www.roundshot.com/xml_1/internet/fr/application/d394/d395/f396.cfm) ).
2/ Enables to setup Full Fisheye Optics (to use an Equirectangular camera model).
3/ To automatically detects the Fisheye Circle (radius + center) in input images or manually adjust it.
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,10 @@ class PanoramaWarping(desc.CommandLineNode):
commandLine = 'aliceVision_panoramaWarping {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
Compute the image warping for each input image in the panorama coordinate system.
'''
inputs = [
desc.File(
name='input',
@ -21,7 +25,8 @@ class PanoramaWarping(desc.CommandLineNode):
desc.IntParam(
name='panoramaWidth',
label='Panorama Width',
description='Panorama width (pixels). 0 For automatic size',
description='Panorama Width (in pixels).\n'
'Set 0 to let the software choose the size automatically, so that on average the input resolution is kept (to limit over/under sampling).',
value=10000,
range=(0, 50000, 1000),
uid=[0]

View file

@ -9,6 +9,10 @@ class PrepareDenseScene(desc.CommandLineNode):
parallelization = desc.Parallelization(blockSize=40)
commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}'
documentation = '''
This node export undistorted images so the depth map and texturing can be computed on Pinhole images without distortion.
'''
inputs = [
desc.File(
name='input',

View file

@ -10,6 +10,11 @@ import os
class Publish(desc.Node):
size = desc.DynamicNodeSize('inputFiles')
documentation = '''
This node allows to copy files into a specific folder.
'''
inputs = [
desc.ListAttribute(
elementDesc=desc.File(

View file

@ -7,6 +7,18 @@ class SfMAlignment(desc.CommandLineNode):
commandLine = 'aliceVision_utils_sfmAlignment {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to change the coordinate system of one SfM scene to align it on another one.
The alignment can be based on:
* from_cameras_viewid: Align cameras in both SfM on the specified viewId
* from_cameras_poseid: Align cameras in both SfM on the specified poseId
* from_cameras_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern'
* from_cameras_metadata: Align cameras with matching metadata, using 'metadataMatchingList'
* from_markers: Align from markers with the same Id
'''
inputs = [
desc.File(
name='input',

View file

@ -7,6 +7,10 @@ class SfMTransfer(desc.CommandLineNode):
commandLine = 'aliceVision_utils_sfmTransfer {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to transfer poses and/or intrinsics form one SfM scene onto another one.
'''
inputs = [
desc.File(
name='input',

View file

@ -7,6 +7,18 @@ class SfMTransform(desc.CommandLineNode):
commandLine = 'aliceVision_utils_sfmTransform {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node allows to change the coordinate system of one SfM scene.
The transformation can be based on:
* transformation: Apply a given transformation
* auto_from_cameras: Fit all cameras into a box [-1,1]
* auto_from_landmarks: Fit all landmarks into a box [-1,1]
* from_single_camera: Use a specific camera as the origin of the coordinate system
* from_markers: Align specific markers to custom coordinates
'''
inputs = [
desc.File(
name='input',

View file

@ -51,6 +51,11 @@ def progressUpdate(size=None, progress=None, logManager=None):
class SketchfabUpload(desc.Node):
size = desc.DynamicNodeSize('inputFiles')
documentation = '''
Upload a textured mesh on Sketchfab.
'''
inputs = [
desc.ListAttribute(
elementDesc=desc.File(

View file

@ -10,6 +10,59 @@ class StructureFromMotion(desc.CommandLineNode):
commandLine = 'aliceVision_incrementalSfM {allParams}'
size = desc.DynamicNodeSize('input')
documentation = '''
This node will analyze feature matches to understand the geometric relationship behind all the 2D observations,
and infer the rigid scene structure (3D points) with the pose (position and orientation) and internal calibration of all cameras.
The pipeline is a growing reconstruction process (called incremental SfM): it first computes an initial two-view reconstruction that is iteratively extended by adding new views.
1/ Fuse 2-View Matches into Tracks
It fuses all feature matches between image pairs into tracks. Each track represents a candidate point in space, visible from multiple cameras.
However, at this step of the pipeline, it still contains many outliers.
2/ Initial Image Pair
It chooses the best initial image pair. This choice is critical for the quality of the final reconstruction.
It should indeed provide robust matches and contain reliable geometric information.
So, this image pair should maximize the number of matches and the repartition of the corresponding features in each image.
But at the same time, the angle between the cameras should also be large enough to provide reliable geometric information.
3/ Initial 2-View Geometry
It computes the fundamental matrix between the 2 images selected and consider that the first one is the origin of the coordinate system.
4/ Triangulate
Now with the pose of the 2 first cameras, it triangulates the corresponding 2D features into 3D points.
5/ Next Best View Selection
After that, it selects all the images that have enough associations with the features that are already reconstructed in 3D.
6/ Estimate New Cameras
Based on these 2D-3D associations it performs the resectioning of each of these new cameras.
The resectioning is a Perspective-n-Point algorithm (PnP) in a RANSAC framework to find the pose of the camera that validates most of the features associations.
On each camera, a non-linear minimization is performed to refine the pose.
7/ Triangulate
From these new cameras poses, some tracks become visible by 2 or more resected cameras and it triangulates them.
8/ Optimize
It performs a Bundle Adjustment to refine everything: extrinsics and intrinsics parameters of all cameras as well as the position of all 3D points.
It filters the results of the Bundle Adjustment by removing all observations that have high reprojection error or insufficient angles between observations.
9/ Loop from 5 to 9
As we have triangulated new points, we get more image candidates for next best views selection and we can iterate from 5 to 9.
It iterates like that, adding cameras and triangulating new 2D features into 3D points and removing 3D points that became invalidated, until we cannot localize new views.
## Online
[https://alicevision.org/#photogrammetry/sfm](https://alicevision.org/#photogrammetry/sfm)
'''
inputs = [
desc.File(
name='input',

View file

@ -7,6 +7,20 @@ class Texturing(desc.CommandLineNode):
commandLine = 'aliceVision_texturing {allParams}'
cpu = desc.Level.INTENSIVE
ram = desc.Level.INTENSIVE
documentation = '''
This node computes the texturing on the mesh.
If the mesh has no associated UV, it automatically computes UV maps.
For each triangle, it uses the visibility information associated to each vertex to retrieve the texture candidates.
It select the best cameras based on the resolution covering the triangle. Finally it averages the pixel values using multiple bands in the frequency domain.
Many cameras are contributing to the low frequencies and only the best ones contributes to the high frequencies.
## Online
[https://alicevision.org/#photogrammetry/texturing](https://alicevision.org/#photogrammetry/texturing)
'''
inputs = [
desc.File(
name='input',