diff --git a/meshroom/nodes/blender/RenderAnimatedCamera.py b/meshroom/nodes/blender/RenderAnimatedCamera.py index 2d5d9b20..803fdac6 100644 --- a/meshroom/nodes/blender/RenderAnimatedCamera.py +++ b/meshroom/nodes/blender/RenderAnimatedCamera.py @@ -8,32 +8,17 @@ currentDir = os.path.dirname(os.path.abspath(__file__)) class RenderAnimatedCamera(desc.CommandLineNode): commandLine = '{blenderPathValue} -b --python {scriptPathValue} -- {allParams}' - category = 'Rendition' + category = 'Export' documentation = ''' - The goal of this node is to make a render of the sfmData a Blender scene using Blender's API. - It supports both Point Clouds (.abc) and Meshes (.obj) and can use a background image of you use undistorted images. - We have several inputs: - **blenderPath points to the blender executable - **scriptPath point to the script containing the code. - **sfMCameraPath point to the AnimatedCamera we are going to be tracking. - **useBackground determines if you want to use images as a background. - **undistortedImages path toward the images you can use as background. - **sfMData the data you want to render. - (point cloud) - **pointCloudDensity changes the density of the point cloud rendered. - **particleSize changes the size of each point in the point cloud rendered. - **particleColor changes the color of each point in the point cloud rendered. - (Mesh) - **edgeColor is the color of the outline of the mesh rendered. - **outputFormat is the video format we want to export of rendition in. - **outputPath point to where is video is going to be saved. + This node makes a rendering of the sfmData scene through an animated camera using the Blender rendering engine. + It supports both Point Clouds (.abc) and Meshes (.obj). ''' inputs = [ desc.File( name='blenderPath', label='Blender Path', - description='''Path to blender binary.''', + description='''Path to blender executable''', value=os.environ.get('BLENDER',"C:/Program Files/Blender Foundation/Blender 2.91/blender.exe"), uid=[], group='', @@ -41,93 +26,102 @@ class RenderAnimatedCamera(desc.CommandLineNode): desc.File( name='scriptPath', label='Script Path', - description='''Path to the script in the project.''', + description='''Path to the internal script for rendering in Blender''', value=os.path.join(currentDir, 'scripts' ,'renderAnimatedCameraInBlender.py'), uid=[], group='', ), desc.File( - name='sfMCameraPath', - label='Camera Path', - description='''Input Camera path from the sfm.''', + name='sfmCameraPath', + label='SfmData with Animated Camera', + description='''SfmData with the animated camera to render''', + value='', + uid=[0], + ), + desc.File( + name='model', + label='Model', + description='Point Cloud or Mesh used in the rendering', value='', uid=[0], ), desc.BoolParam( name='useBackground', label='Display Background', - description='Tick if you want to use original image dataset as background', + description='Use the undistorted images as background', value=True, uid=[0], ), desc.File( name='undistortedImages', - label='Images Folder', - description='''Input the processed images.''', + label='Undistorted Images Folder', + description='''Input folder with the undistorted images''', value='', uid=[0], - enabled=lambda node: node.displayBackground.useBackground.value, + enabled=lambda node: node.useBackground.value, ), - desc.File( - name='sfMData', - label='SFM Data', - description='''Input the previously used SFM Data.''', - value='', - uid=[0], + desc.GroupAttribute( + name="pointCloudParams", + label="Point Cloud Settings", + group=None, + enabled=lambda node: node.model.value.lower().endswith('.abc'), + description="Setting of the render if we use a Point Cloud", + groupDesc=[ + desc.FloatParam( + name='pointCloudDensity', + label='Density', + description='''Reduce the points density for the point cloud rendering''', + value=0.25, + range=(0.01, 0.5, 0.01), + uid=[0], + ), + desc.FloatParam( + name='particleSize', + label='Particle Size', + description='''Scale of particles used to show the point cloud''', + value=0.1, + range=(0.01, 1, 0.01), + uid=[0], + ), + desc.ChoiceParam( + name='particleColor', + label='Particle Color', + description='''Color of particles used to show the point cloud''', + value='Red', + values=['Grey', 'White', 'Red', 'Green', 'Magenta'], + exclusive=True, + uid=[0], + joinChar=',', + ), + ] + ), + desc.GroupAttribute( + name="meshParams", + label="Mesh Settings", + group=None, + enabled=lambda node: node.model.value.lower().endswith('.obj'), + description="Setting of the render if we use a Mesh", + groupDesc=[ + desc.ChoiceParam( + name='edgeColor', + label='Edge Color', + description='''Color of the edges of the rendered object''', + value='Red', + values=['Grey', 'White', 'Red', 'Green', 'Magenta'], + exclusive=True, + uid=[0], + joinChar=',', + ), + ] ), - desc.GroupAttribute(name="isCloudPoint", label="Point Cloud Settings", group=None, enabled=lambda node: node.sfMData.value.endswith('.abc'), description="Setting of the render if we use a Point Cloud. (SFM Data is .abc)", groupDesc=[ - desc.FloatParam( - name='pointCloudDensity', - label='Point Cloud Density', - description='''Number of point from the point cloud rendered''', - value=0.25, - range=(0.01, 0.5, 0.01), - uid=[0], - enabled=lambda node: node.sfMData.value.endswith('.abc'), - ), - desc.FloatParam( - name='particleSize', - label='Particle Size', - description='''Scale of every particle used to show the point cloud''', - value=0.25, - range=(0.01, 1, 0.01), - uid=[0], - enabled=lambda node: node.sfMData.value.endswith('.abc'), - ), - desc.ChoiceParam( - name='particleColor', - label='Particle Color', - description='''Color of every particle used to show the point cloud (SFM Data is .abc)''', - value='Red', - values=['Grey', 'White', 'Red', 'Green', 'Magenta'], - exclusive=True, - uid=[0], - joinChar=',', - enabled=lambda node: node.sfMData.value.endswith('.abc'), - ), - ]), - desc.GroupAttribute(name="isMesh", label="Mesh Settings", group=None, enabled=lambda node: node.sfMData.value.endswith('.obj'), description="Setting of the render if we use a Mesh. (SFM Data is .obj)", groupDesc=[ - desc.ChoiceParam( - name='edgeColor', - label='Edge Color', - description='''Color of the edges of the rendered object (SFM Data is .obj)''', - value='Red', - values=['Grey', 'White', 'Red', 'Green', 'Magenta'], - exclusive=True, - uid=[0], - joinChar=',', - enabled=lambda node: node.sfMData.value.endswith('.obj'), - ), - ]), desc.ChoiceParam( - name='outputFormat', - label='Output Format', + name='videoFormat', + label='Video Format', description='''Choose the format of the output among this list of supported format''', value='mkv', values=['mkv', 'mp4', 'mov', 'avi'], exclusive=True, uid=[0], - joinChar=',', ), ] @@ -135,7 +129,7 @@ class RenderAnimatedCamera(desc.CommandLineNode): desc.File( name='outputPath', label='Output Path', - description='''Output folder.''', + description='''Output Folder''', value=desc.Node.internalFolder, uid=[], ) diff --git a/meshroom/nodes/blender/scripts/renderAnimatedCameraInBlender.py b/meshroom/nodes/blender/scripts/renderAnimatedCameraInBlender.py index 1cfce3ce..7549c990 100644 --- a/meshroom/nodes/blender/scripts/renderAnimatedCameraInBlender.py +++ b/meshroom/nodes/blender/scripts/renderAnimatedCameraInBlender.py @@ -7,6 +7,7 @@ import sys # to get command line args import argparse # to parse options for us and print a nice help message from distutils.util import strtobool + def main(): argv = sys.argv @@ -23,104 +24,82 @@ def main(): parser = argparse.ArgumentParser(description=usage_text) - parser.add_argument( - "--sfMCameraPath", dest="SFM_cam_path", metavar='FILE', required=True, - help="This text will be used to render an image", + "--sfmCameraPath", metavar='FILE', required=True, + help="sfmData with the animated camera.", ) parser.add_argument( - "--useBackground", dest="Use_Background", type=strtobool, required=True, + "--useBackground", type=strtobool, required=True, help="Diplay the background image or not.", ) parser.add_argument( - "--undistortedImages", dest="undisto_images", metavar='FILE', required=False, + "--undistortedImages", metavar='FILE', required=False, help="Save the generated file to the specified path", ) parser.add_argument( - "--sfMData", dest="SFM_Data", metavar='FILE', required=True, - help="These info carry the Point Cloud or mesh we need.", + "--model", metavar='FILE', required=True, + help="Point Cloud or Mesh used in the rendering.", ) - #Point Cloud Arguments (When SFM Data is .abc) + # Point Cloud Arguments (when SFM Data is .abc) parser.add_argument( - "--pointCloudDensity", dest="Point_Cloud_Density", type=float, required=False, + "--pointCloudDensity", type=float, required=False, help="Number of point from the cloud rendered", ) parser.add_argument( - "--particleSize", dest="Particle_Size", type=float, required=False, - help="Scale of every particle used to show the point cloud", + "--particleSize", type=float, required=False, + help="Scale of particles used to show the point cloud", ) parser.add_argument( - "--particleColor", dest="Particle_Color", type=str, required=False, - help="Color of every particle used to show the point cloud (SFM Data is .abc)", + "--particleColor", type=str, required=False, + help="Color of particles used to show the point cloud (SFM Data is .abc)", ) - #Mesh Arguments (When SFM Data is .obj) + # Mesh Arguments (when SFM Data is .obj) parser.add_argument( - "--edgeColor", dest="Edge_Color", type=str, required=False, + "--edgeColor", type=str, required=False, help="Color of the edges of the rendered object (SFM Data is .obj)", ) - - #Output Arguments + # Output Arguments parser.add_argument( - "--outputFormat", dest="Output_Format", type=str, required=True, + "--videoFormat", type=str, required=True, help="Format of the video output", ) parser.add_argument( - "--outputPath", dest="output_path", metavar='FILE', required=True, + "--outputPath", metavar='FILE', required=True, help="Render an image to the specified path", ) - args = parser.parse_args(argv) if not argv: parser.print_help() - return + return -1 - if not args.undisto_images and args.Use_Background : - print("Error: --undisto_images argument not given, aborting.") + if not args.undistortedImages and args.useBackground: + print("Error: --undistortedImages argument not given, aborting.") parser.print_help() - return - - if not args.Point_Cloud_Density and args.SFM_Data.endswith('.abc'): - print("Error: --Point_Cloud_Density argument not given, aborting.") - parser.print_help() - return + return -1 - if not args.Particle_Size and args.SFM_Data.endswith('.abc'): - print("Error: --Particle_Size argument not given, aborting.") - parser.print_help() - return - - if not args.Particle_Color and args.SFM_Data.endswith('.abc'): - print("Error: --Particle_Color argument not given, aborting.") - parser.print_help() - return - - if not args.Edge_Color and args.SFM_Data.endswith('.obj'): - print("Error: --Edge_Color argument not given, aborting.") - parser.print_help() - return - - #Clear Current Scene + # Clear Current Scene try: for objects in bpy.data.objects: bpy.data.objects.remove(objects) except RuntimeError: - print("Error: While clearing current scene") + print("Error while clearing current scene") + raise - #The Switcher is the setting for most of the colors (if you want to add some, do it here and in the arguments of the node) + # The Switcher is the setting for most of the colors (if you want to add some, do it here and in the arguments of the node) # Keep in mind that we use the same switcher for both the Edge Color and the Particle Color settings. # So if you add a color to one of them in the node, might has well add it to the other. @@ -132,10 +111,10 @@ def main(): 'Magenta':(1.0, 0, 0.75, 1) } - # import Undistorted Images + print("Import Undistorted Images") undis_imgs = [] - #Some of these variable will be very useful in the next steps keep them in mind + # Some of these variable will be very useful in the next steps keep them in mind number_of_frame = 0 offset = 0 first_image_name = "" @@ -143,21 +122,23 @@ def main(): # In this part of the code we take the undistorted images and we process some info about them # undis_imgs is the list of the images' names # first_image_name says it all in the name - # The offset is important, it corresponds to the last part of the name of the first frame - # In most case it will hopefully be 0 but the sequence may start from a more advanced frame - if args.Use_Background : - files = os.listdir(args.undisto_images) + # The offset is important, it corresponds to the last part of the name of the first frame. + # In most case, it will be 0 but the sequence may start from a more advanced frame. + if args.useBackground: + files = os.listdir(args.undistortedImages) for f in files : if f.endswith(".exr") and not f.__contains__("UVMap"): undis_imgs.append({"name":f}) number_of_frame = len(undis_imgs) + print("undis_imgs: " + str(undis_imgs)) first_image_name = undis_imgs[0]['name'] offset = int(re.findall(r'\d+', first_image_name)[-1]) - 1 except RuntimeError: - print("Error: while importing the undistorted images.") + print("Error while importing the undistorted images.") + raise - #import abc (Animated Camera) + print("Import Animated Camera") try: @@ -167,14 +148,12 @@ def main(): # Once the cam has been found we select the main camera of the scene. # The rest of the code is setting up the display of the background image, - # Since it's not a simple image but an image Sequence, we have to use the offset and the number of frame - # Information taken from the previous block of code. - # The frame method is the one that align with the Point Cloud althought this may change, - # so feel free to try out the two other settings if something changes on previous nodes. - # We also have to make the scene render film transparent because we want to be able to display - # our background afterward in the next block of code + # As it is not a simple image but an image Sequence, we have to use the offset and the number of frames. - bpy.ops.wm.alembic_import(filepath=args.SFM_cam_path) + # We also have to make the scene render film transparent because we want to be able to display + # our background afterwards. + + bpy.ops.wm.alembic_import(filepath=args.sfmCameraPath) animated_cams = bpy.context.selected_editable_objects[:] cam_location = mathutils.Vector((0, 0, 0)) cam_obj = None @@ -185,8 +164,8 @@ def main(): bpy.context.scene.camera = obj cam_location = obj.location cam_obj = obj - if args.Use_Background : - bpy.ops.image.open(filepath=args.undisto_images + "/" + first_image_name, directory=args.undisto_images, files=undis_imgs, relative_path=True, show_multiview=False) + if args.useBackground : + bpy.ops.image.open(filepath=args.undistortedImages + "/" + first_image_name, directory=args.undistortedImages, files=undis_imgs, relative_path=True, show_multiview=False) bpy.data.cameras[obj.data.name].background_images.new() bpy.data.cameras[obj.data.name].show_background_images = True bpy.data.cameras[obj.data.name].background_images[0].image = bpy.data.images[first_image_name] @@ -196,26 +175,20 @@ def main(): bpy.data.cameras[obj.data.name].background_images[0].image_user.frame_start = 1 bpy.context.scene.render.film_transparent = True except RuntimeError: - print("Error: while importing the alembic file (Animated Camera).") + print("Error while importing the alembic file (Animated Camera): " + args.sfmCameraPath) + raise + + print("Create the particle plane") - #Place the particle plane try: - # This is a key step if you are displaying a Point Cloud. # We are using a particle system later in the code to display the Point Cloud. - # To make it so, we need a model for the particle, a object that will be repeated a lot to make a shape. - # In order to do that we need a plane (one face only for optimisation purpose) that always face the camera. - # So we made a plane and made it a child (in the parenting system) of the camera. That way whenever the cam - # moves, the plane moves and turn accordingly. + # We need to setup a model for the particle, a plane that always face the camera. + # It is declared as a child of the camera in the parenting system, so when the camera moves, the plane moves accordingly. - # Bmesh creates the plane and put it into the mesh. We change the size of the plane according to - # the scale given in arguments. We need to adjust the plane's location because putting it at the - # exact location of the camera blocks the view. Then, the switcher gives a RGBA color according to - # the given argument. We have to use a material that uses 'Emission' - # otherwise the particle is going to react to lights and we don't really need that (the color wouldn't be clear). + # We use an 'Emission' material so it does not react to lights. # To do that we have to use the shader 'node_tree' we clear all links between nodes, create the emission node - # and connect it to the 'Material Output' node (which is what we will see in render). - # Finally we use the switcher to color the model. + # and connect it to the 'Material Output' node. plane = bpy.data.meshes.new('Plane') objectsPlane = bpy.data.objects.new(name="Plane", object_data=plane) @@ -223,8 +196,8 @@ def main(): bmesh.ops.create_grid(bm, x_segments = 1, y_segments = 1, size = 1.0) bm.to_mesh(plane) bm.free() - if (args.SFM_Data.endswith('.abc')): - objectsPlane.scale = mathutils.Vector((args.Particle_Size, args.Particle_Size, args.Particle_Size)) + if args.model.lower().endswith('.abc'): + objectsPlane.scale = mathutils.Vector((args.particleSize, args.particleSize, args.particleSize)) cam_location.y += -2.0 objectsPlane.location = cam_location bpy.context.scene.collection.objects.link(objectsPlane) @@ -237,29 +210,29 @@ def main(): objectsPlane.active_material.node_tree.links.clear() objectsPlane.active_material.node_tree.nodes.new(type='ShaderNodeEmission') objectsPlane.active_material.node_tree.links.new(objectsPlane.active_material.node_tree.nodes['Emission'].outputs['Emission'], objectsPlane.active_material.node_tree.nodes['Material Output'].inputs['Surface']) - if (args.SFM_Data.endswith('.abc')): - objectsPlane.active_material.node_tree.nodes['Emission'].inputs[0].default_value = switcher.get(args.Particle_Color, 'Invalid Color') + if args.model.lower().endswith('.abc'): + objectsPlane.active_material.node_tree.nodes['Emission'].inputs[0].default_value = switcher.get(args.particleColor, 'Invalid Color') except RuntimeError: print("Error: while setting up the particle model.") + raise - if (args.SFM_Data.endswith('.abc')): - # This part is all about importing the Point Cloud and setting up the Particle System. - # After importing the alembic, we look for a specific mesh in the file. Again the hardcoded name would be a - # problem if the previous nodes hadn't name it specificaly that (.001 because a mesh with the same name has - # been imported with the animated camera). - # Once the Point Cloud has been found. We make it the active object (important for the node_tree later). - # Then, we create a particle system on it. Render_type set to object and the said object is the plane, - # thanks to that the particle format is set to repeat the plane. Emit_from 'vert' so the points of the - # point cloud are the one rendering the particle. - # The count is the number of particle repeated on the point cloud. We use the rate given as arguments - # to give a number. Most of the following settings are just formalities but use_rotation and use_rotation_instance, - # those two make sure to use the same rotaion than the model (which is needed to have the particle always facing the camera). + if args.model.lower().endswith('.abc'): - #import abc (Point Cloud) + print("Import ABC Point Cloud") + + # After importing the alembic, we look for a specific Point Cloud in the file. + # We make it the active object (important for the node_tree later). + # Then, we create a particle system on it. Render_type set to object and the said object is the plane. + # Emit_from 'vert' so the points of the point cloud are the one rendering the particle. + # The count is the number of particles repeated on the point cloud. We use the rate given as arguments + # to give a number. + # use_rotation and use_rotation_instance ensure that we use the same rotation than the model (which is needed to have the particle always facing the camera). + + # Import Point Cloud try: - bpy.ops.wm.alembic_import(filepath=args.SFM_Data) + bpy.ops.wm.alembic_import(filepath=args.model) all_abc_info = bpy.context.selected_editable_objects[:] for obj in all_abc_info: if obj.name == 'mvgPointCloud.001': #May have a problem with such hard code @@ -272,8 +245,8 @@ def main(): particle_system.instance_object = bpy.data.objects["Plane"] particle_system.emit_from = 'VERT' - if (args.SFM_Data.endswith('.abc')): - particle_system.count = int(args.Point_Cloud_Density * len(obj.data.vertices.values())) + if args.model.lower().endswith('.abc'): + particle_system.count = int(args.pointCloudDensity * len(obj.data.vertices.values())) particle_system.frame_end = 1.0 particle_system.use_emit_random = False particle_system.particle_size = 0.02 @@ -283,15 +256,18 @@ def main(): particle_system.rotation_mode = 'GLOB_X' except RuntimeError: - print("Error: while importing the alembic file (Point Cloud).") - #Or import obj directly + print("Error while importing the alembic file (Point Cloud): " + args.model) + raise - # The import via obj needs a bit of work too. For showing an outline of the object, we need to add two materials to the mesh : + + # For showing an outline of the object, we need to add two materials to the mesh: # Center and Edge, we are using a method that consists in having a "bold" effect on the Edge Material so we can see it - # around the Center material. We do that by using a Solidify Modifier on which we flip normals and reduce Thickness to bellow zero. + # around the Center material. We use a Solidify Modifier on which we flip normals and reduce Thickness to bellow zero. # The more the thickness get bellow zero, the more the egde will be largely revealed. - elif (args.SFM_Data.endswith('.obj')): - bpy.ops.import_scene.obj(filepath=args.SFM_Data) + elif args.model.lower().endswith('.obj'): + print("Import OBJ") + + bpy.ops.import_scene.obj(filepath=args.model) center = bpy.data.materials.new('Center') center.use_nodes = True @@ -300,10 +276,9 @@ def main(): center.node_tree.links.new(center.node_tree.nodes['Emission'].outputs['Emission'], center.node_tree.nodes['Material Output'].inputs['Surface']) center.node_tree.nodes['Emission'].inputs[0].default_value = (0,0,0,0) - if not args.Use_Background and args.SFM_Data.endswith('.obj'): + if not args.useBackground and args.model.lower().endswith('.obj'): center.node_tree.nodes['Emission'].inputs[0].default_value = (0.05, 0.05, 0.05, 1) #Same Color as the no background color in blender - edge = bpy.data.materials.new('Edge') edge.use_nodes = True @@ -311,8 +286,8 @@ def main(): edge.node_tree.nodes.new(type='ShaderNodeEmission') edge.use_backface_culling = True edge.node_tree.links.new(edge.node_tree.nodes['Emission'].outputs['Emission'], edge.node_tree.nodes['Material Output'].inputs['Surface']) - edge.node_tree.nodes['Emission'].inputs[0].default_value = switcher.get(args.Edge_Color, 'Invalid Color') - + edge.node_tree.nodes['Emission'].inputs[0].default_value = switcher.get(args.edgeColor, 'Invalid Color') + bpy.data.meshes['mesh'].materials.clear() bpy.data.meshes['mesh'].materials.append(bpy.data.materials['Center']) bpy.data.meshes['mesh'].materials.append(bpy.data.materials['Edge']) @@ -325,17 +300,17 @@ def main(): bpy.data.objects['mesh'].modifiers["New"].use_flip_normals = True bpy.data.objects['mesh'].modifiers["New"].material_offset = 1 else: - print("SFM_Data isn't in the right format, alembics(.abc) and object(.obj) only are supported") + raise ValueError("sfmData: unknown file format, only alembic (.abc) and object (.obj) are supported: " + args.model) - #WE HAVE TO USE THE COMPOSITING GRAPH TO MAKE THE BACKGROUND IMAGE VISIBLE - # We setup all the nodes in the first place, even if we don't need them in our configuration. We put the setting in all of them. - # Only after having done that we can control which of the node we link in the graph according to the option we were given. + print("Create compositing graph") + + # We use the compositing graph to add the background image. # If the SFM Data is a Mesh, its extension is .obj so we have to build the graph accordingly. If the Background image setting was activated, # we need to include it in our node tree through the "Image" and Scale node. try: bpy.context.scene.use_nodes = True - #CREATE ALL NODES WE NEED (regardless of the options) + # Create all the nodes that we could need bpy.context.scene.node_tree.nodes.new(type="CompositorNodeAlphaOver") bpy.context.scene.node_tree.nodes.new(type="CompositorNodeScale") bpy.context.scene.node_tree.nodes.new(type="CompositorNodeImage") @@ -343,19 +318,18 @@ def main(): bpy.context.scene.node_tree.nodes.new(type="CompositorNodePremulKey") bpy.context.scene.node_tree.nodes.new(type="CompositorNodeMixRGB") - #SET THEM UP CORRECTLY (still regardless of the option) bpy.data.scenes["Scene"].node_tree.nodes["Mix"].blend_type = 'LIGHTEN' bpy.data.scenes["Scene"].node_tree.nodes["Image"].frame_duration = number_of_frame bpy.data.scenes["Scene"].node_tree.nodes["Image"].frame_offset = offset bpy.data.scenes["Scene"].node_tree.nodes["Scale"].space = 'RENDER_SIZE' bpy.data.scenes["Scene"].node_tree.nodes["Scale"].frame_method = 'CROP' - #LINKS THE NODES THAT NEEDS TO BE LINKED - if args.Use_Background : - if args.SFM_Data.endswith('.obj'): + # create links between nodes + if args.useBackground : + if args.model.lower().endswith('.obj'): bpy.context.scene.node_tree.nodes["Image"].image = bpy.data.images[first_image_name] bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Mix'].outputs['Image'], bpy.context.scene.node_tree.nodes['Composite'].inputs['Image']) - #Two Inputs of AlphaOver are named "Image" so we'll use index instead + # Two Inputs of AlphaOver are named "Image" so we use indexes instead bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Render Layers'].outputs['Image'], bpy.context.scene.node_tree.nodes['Alpha Convert'].inputs['Image']) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Alpha Convert'].outputs['Image'], bpy.context.scene.node_tree.nodes['Mix'].inputs[2]) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Scale'].outputs['Image'], bpy.context.scene.node_tree.nodes['Mix'].inputs[1]) @@ -363,43 +337,55 @@ def main(): else: bpy.context.scene.node_tree.nodes["Image"].image = bpy.data.images[first_image_name] bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Alpha Over'].outputs['Image'], bpy.context.scene.node_tree.nodes['Composite'].inputs['Image']) - #Two Inputs of AlphaOver are named "Image" so we'll use index instead + # Two Inputs of AlphaOver are named "Image" so we use indexes instead bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Render Layers'].outputs['Image'], bpy.context.scene.node_tree.nodes['Alpha Over'].inputs[2]) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Scale'].outputs['Image'], bpy.context.scene.node_tree.nodes['Alpha Over'].inputs[1]) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Image'].outputs['Image'], bpy.context.scene.node_tree.nodes['Scale'].inputs['Image']) else: - if args.SFM_Data.endswith('.obj'): + if args.model.lower().endswith('.obj'): bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Mix'].outputs['Image'], bpy.context.scene.node_tree.nodes['Composite'].inputs['Image']) - #Two Inputs of AlphaOver are named "Image" so we'll use index instead + # Two Inputs of AlphaOver are named "Image" so we use indexes instead bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Render Layers'].outputs['Image'], bpy.context.scene.node_tree.nodes['Alpha Convert'].inputs['Image']) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Alpha Convert'].outputs['Image'], bpy.context.scene.node_tree.nodes['Mix'].inputs[2]) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Scale'].outputs['Image'], bpy.context.scene.node_tree.nodes['Mix'].inputs[1]) bpy.context.scene.node_tree.links.new(bpy.context.scene.node_tree.nodes['Image'].outputs['Image'], bpy.context.scene.node_tree.nodes['Scale'].inputs['Image']) except RuntimeError: - print("Error: while composing the compositing graph.") - - ## Starts the rendering and launchs it with a blender animator player + print("Error while creating the compositing graph.") + raise try: # Setup the render format and filepath bpy.context.scene.render.image_settings.file_format = 'FFMPEG' - if args.Output_Format == 'mkv': + if args.videoFormat == 'mkv': bpy.context.scene.render.ffmpeg.format = 'MKV' - elif args.Output_Format == 'avi': + elif args.videoFormat == 'avi': bpy.context.scene.render.ffmpeg.format = 'AVI' - elif args.Output_Format == 'mov': + elif args.videoFormat == 'mov': bpy.context.scene.render.ffmpeg.format = 'QUICKTIME' else: bpy.context.scene.render.ffmpeg.format = 'MPEG4' - bpy.context.scene.render.filepath = args.output_path + '/render.' + args.Output_Format + bpy.context.scene.render.filepath = args.outputPath + '/render.' + args.videoFormat + + print("Start Rendering") # Render everything on to the filepath bpy.ops.render.render(animation=True) - # Starts a player automatically to play the output (Usefull for developpers to see what they do but it doesn't really have its place in a software) + print("Rendering Done") + # Starts a player automatically to play the output # bpy.ops.render.play_rendered_anim() except RuntimeError: - print("Error: while rendering the scene.") - + print("Error while rendering the scene") + raise + + return 0 if __name__ == "__main__": - main() \ No newline at end of file + + err = 1 + try: + err = main() + except Exception as e: + print("\n" + str(e)) + sys.exit(err) + sys.exit(err) +