[Bf-blender-cvs] SVN commit: /data/svn/bf-blender [42099] trunk/blender/release/scripts/ startup: Camera tracking: operator to setup scene for compositing things into footage

Sergey Sharybin sergey.vfx at gmail.com
Wed Nov 23 18:30:47 CET 2011


Revision: 42099
          http://projects.blender.org/scm/viewvc.php?view=rev&root=bf-blender&revision=42099
Author:   nazgul
Date:     2011-11-23 17:30:47 +0000 (Wed, 23 Nov 2011)
Log Message:
-----------
Camera tracking: operator to setup scene for compositing things into footage

This operator does needed changes to

- 3D viewport
- Scene settings
- World settings
- Compositor
- Scene objects

in a way scene becomes ready to be composited into footage.

Known issue: preview doesn't work "out-of-box" after running this script,
selecting View node and hitting Tab helps. Not sure it can be solved
in nicer way at this moment.

Modified Paths:
--------------
    trunk/blender/release/scripts/startup/bl_operators/clip.py
    trunk/blender/release/scripts/startup/bl_ui/space_clip.py

Modified: trunk/blender/release/scripts/startup/bl_operators/clip.py
===================================================================
--- trunk/blender/release/scripts/startup/bl_operators/clip.py	2011-11-23 17:25:25 UTC (rev 42098)
+++ trunk/blender/release/scripts/startup/bl_operators/clip.py	2011-11-23 17:30:47 UTC (rev 42099)
@@ -21,9 +21,47 @@
 import os
 import shutil
 from bpy.types import Operator
-from bpy_extras.io_utils import unpack_list
+from bpy_extras.io_utils import unpack_list, unpack_face_list
 
+from mathutils import Vector, Matrix
 
+
+def CLIP_spacees_walk(context, all_screens, tarea, tspace, callback, *args):
+    screens = bpy.data.screens if all_screens else [context.screen]
+
+    for screen in screens:
+        for area in screen.areas:
+            if area.type == tarea:
+                for space in area.spaces:
+                    if space.type == tspace:
+                        callback(space, *args)
+
+
+def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
+    def set_background(space_v3d, clip, user):
+        bgpic = None
+
+        for x in space_v3d.background_images:
+            if x.source == 'MOVIE':
+                bgpic = x
+                break
+
+        if not bgpic:
+            bgpic = space_v3d.background_images.new()
+
+        bgpic.source = 'MOVIE'
+        bgpic.clip = clip
+        bgpic.clip_user.proxy_render_size = user.proxy_render_size
+        bgpic.clip_user.use_render_undistorted = True
+        bgpic.use_camera_clip = False
+        bgpic.view_axis = 'CAMERA'
+
+        space_v3d.show_background_images = True
+
+    CLIP_spacees_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
+                      set_background, clip, clip_user)
+
+
 def CLIP_track_view_selected(sc, track):
     if track.select_anchor:
         return True
@@ -51,8 +89,8 @@
 
         ob = bpy.data.objects.new(name=track.name, object_data=None)
         ob.select = True
-        bpy.context.scene.objects.link(ob)
-        bpy.context.scene.objects.active = ob
+        context.scene.objects.link(ob)
+        context.scene.objects.active = ob
 
         for con in ob.constraints:
             if con.type == 'FOLLOW_TRACK':
@@ -106,7 +144,7 @@
 
         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
 
-        bpy.context.scene.objects.link(ob)
+        context.scene.objects.link(ob)
 
         return {'FINISHED'}
 
@@ -202,36 +240,10 @@
 
         return sc.clip
 
-    def _set_background(self, space_v3d, clip, user):
-        bgpic = None
-
-        for x in space_v3d.background_images:
-            if x.source == 'MOVIE':
-                bgpic = x
-                break
-
-        if not bgpic:
-            bgpic = space_v3d.background_images.new()
-
-        bgpic.source = 'MOVIE'
-        bgpic.clip = clip
-        bgpic.clip_user.proxy_render_size = user.proxy_render_size
-        bgpic.clip_user.use_render_undistorted = user.use_render_undistorted
-        bgpic.use_camera_clip = False
-        bgpic.view_axis = 'CAMERA'
-
-        space_v3d.show_background_images = True
-
     def execute(self, context):
         sc = context.space_data
-        clip = sc.clip
+        CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
 
-        for area in context.window.screen.areas:
-            if area.type == 'VIEW_3D':
-                for space in area.spaces:
-                    if space.type == 'VIEW_3D':
-                        self._set_background(space, clip, sc.clip_user)
-
         return {'FINISHED'}
 
 
@@ -332,3 +344,430 @@
                 self._bake_object(scene, ob)
 
         return {'FINISHED'}
+
+
+class CLIP_OT_setup_tracking_scene(Operator):
+    """Prepare scene for compositing 3D objects into this footage"""
+
+    bl_idname = "clip.setup_tracking_scene"
+    bl_label = "Setup Tracking Scene"
+    bl_options = {'UNDO', 'REGISTER'}
+
+    @classmethod
+    def poll(cls, context):
+        sc = context.space_data
+
+        if sc.type != 'CLIP_EDITOR':
+            return False
+
+        clip = sc.clip
+
+        return clip and clip.tracking.reconstruction.is_valid
+
+    @staticmethod
+    def _setupScene(context):
+        scene = context.scene
+        scene.active_clip = context.space_data.clip
+
+    @staticmethod
+    def _setupWorld(context):
+        scene = context.scene
+        world = scene.world
+
+        if not world:
+            world = bpy.data.worlds.new(name="World")
+            scene.world = world
+
+        world.light_settings.use_ambient_occlusion = True
+        world.light_settings.ao_blend_type = 'MULTIPLY'
+
+        world.light_settings.use_environment_light = True
+        world.light_settings.environment_energy = 0.1
+
+        world.light_settings.distance = 1.0
+        world.light_settings.sample_method = 'ADAPTIVE_QMC'
+        world.light_settings.samples = 7
+        world.light_settings.threshold = 0.005
+
+    @staticmethod
+    def _findOrCreateCamera(context):
+        scene = context.scene
+
+        if scene.camera:
+            return scene.camera
+
+        cam = bpy.data.cameras.new(name="Camera")
+        camob = bpy.data.objects.new(name="Camera", object_data=cam)
+        scene.objects.link(camob)
+
+        scene.camera = camob
+
+        camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
+            Matrix.Rotation(0.815, 4, 'Z') *
+            Matrix.Rotation(0.011, 4, 'Y') *
+            Matrix.Rotation(1.109, 4, 'X'))
+
+        return camob
+
+    @staticmethod
+    def _setupCamera(context):
+        camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
+
+        # Remove all constraints to be sure motion is fine
+        camob.constraints.clear()
+
+        # Append camera solver constraint
+        con = camob.constraints.new(type='CAMERA_SOLVER')
+        con.use_active_clip = True
+        con.influence = 1.0
+
+    @staticmethod
+    def _setupViewport(context):
+        sc = context.space_data
+        CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
+
+    @staticmethod
+    def _setupRenderLayers(context):
+        scene = context.scene
+        rlayers = scene.render.layers
+
+        if not scene.render.layers.get("Foreground"):
+            if len(rlayers) == 1:
+                fg = rlayers[0]
+                fg.name = 'Foreground'
+            else:
+                fg = scene.render.layers.new('Foreground')
+
+            fg.use_sky = False
+            fg.layers = [True] + [False] * 19
+            fg.layers_zmask = [False] * 10 + [True] + [False] * 9
+            fg.use_pass_vector = True
+
+        if not scene.render.layers.get("Background"):
+            bg = scene.render.layers.new('Background')
+            bg.use_pass_shadow = True
+            bg.use_pass_ambient_occlusion = True
+            bg.layers = [False] * 10 + [True] + [False] * 9
+
+    @staticmethod
+    def _findNode(tree, type):
+        for node in tree.nodes:
+            if node.type == type:
+                return node
+
+        return None
+
+    @staticmethod
+    def _findOrCreateNode(tree, type):
+        node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
+
+        if not node:
+            node = tree.nodes.new(type=type)
+
+        return node
+
+    @staticmethod
+    def _needSetupNodes(context):
+        scene = context.scene
+        tree = scene.node_tree
+
+        if not tree:
+            # No compositor node tree found, time to create it!
+            return True
+
+        for node in tree.nodes:
+            if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
+                return False
+
+        return True
+
+    @staticmethod
+    def _offsetNodes(tree):
+        for a in tree.nodes:
+            for b in tree.nodes:
+                if a != b and a.location == b.location:
+                    b.location += Vector((40.0, 20.0))
+
+    def _setupNodes(self, context):
+        if not self._needSetupNodes(context):
+            # compositor nodes were already setup or even changes already
+            # do nothing to prevent nodes damage
+            return
+
+        # Enable backdrop for all compositor spaces
+        def setup_space(space):
+            space.show_backdrop = True
+
+        CLIP_spacees_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
+                          setup_space)
+
+        sc = context.space_data
+        scene = context.scene
+        scene.use_nodes = True
+        tree = scene.node_tree
+        clip = sc.clip
+
+        need_stabilization = False
+
+        # create nodes
+        rlayer_fg = self._findOrCreateNode(tree, 'R_LAYERS')
+        rlayer_bg = tree.nodes.new(type='R_LAYERS')
+        composite = self._findOrCreateNode(tree, 'COMPOSITE')
+
+        movieclip = tree.nodes.new(type='MOVIECLIP')
+        distortion = tree.nodes.new(type='MOVIEDISTORTION')
+
+        if need_stabilization:
+            stabilize = tree.nodes.new(type='STABILIZE2D')
+
+        scale = tree.nodes.new(type='SCALE')
+        invert = tree.nodes.new(type='INVERT')
+        add_ao = tree.nodes.new(type='MIX_RGB')
+        add_shadow = tree.nodes.new(type='MIX_RGB')
+        mul_shadow = tree.nodes.new(type='MIX_RGB')
+        mul_image = tree.nodes.new(type='MIX_RGB')
+        vector_blur = tree.nodes.new(type='VECBLUR')
+        alphaover = tree.nodes.new(type='ALPHAOVER')
+        viewer = tree.nodes.new(type='VIEWER')
+
+        # setup nodes
+        movieclip.clip = clip
+
+        distortion.clip = clip
+        distortion.distortion_type = 'UNDISTORT'
+
+        if need_stabilization:
+            stabilize.clip = clip
+
+        scale.space = 'RENDER_SIZE'
+
+        rlayer_bg.scene = scene
+        rlayer_bg.layer = "Background"
+
+        rlayer_fg.scene = scene
+        rlayer_fg.layer = "Foreground"
+
+        add_ao.blend_type = 'ADD'
+        add_shadow.blend_type = 'ADD'
+
+        mul_shadow.blend_type = 'MULTIPLY'
+        mul_shadow.inputs['Fac'].default_value = 0.8
+
+        mul_image.blend_type = 'MULTIPLY'
+        mul_image.inputs['Fac'].default_value = 0.8
+
+        vector_blur.factor = 0.75
+
+        # create links
+        tree.links.new(movieclip.outputs['Image'], distortion.inputs['Image'])
+
+        if need_stabilization:
+            tree.links.new(distortion.outputs['Image'],
+                stabilize.inputs['Image'])
+            tree.links.new(stabilize.outputs['Image'], scale.inputs['Image'])
+        else:
+            tree.links.new(distortion.outputs['Image'], scale.inputs['Image'])
+

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list