[Bf-extensions-cvs] SVN commit: /data/svn/bf-extensions [2843] trunk/py/scripts/addons/ netrender: netrender

Martin Poirier theeth at yahoo.com
Sat Dec 31 19:25:05 CET 2011


Revision: 2843
          http://projects.blender.org/scm/viewvc.php?view=rev&root=bf-extensions&revision=2843
Author:   theeth
Date:     2011-12-31 18:25:00 +0000 (Sat, 31 Dec 2011)
Log Message:
-----------
netrender
wip feature: distributed point cache baking. Distributed baking works but results are sent back to master/client yet. Feature is disabled in the UI for this reason.
new feature: job and slave tags, enables filtering slaves for specific jobs
  jobs are dispatched to a slave only it has no tags or all the job's tags
  Render jobs have the tag "render" by default while baking jobs the tag "baking"
UI: Web interface additions to reflect tags and job subtypes (render/baking)
bug fix: reseting a completed job correctly resets the status to queued

Modified Paths:
--------------
    trunk/py/scripts/addons/netrender/__init__.py
    trunk/py/scripts/addons/netrender/client.py
    trunk/py/scripts/addons/netrender/master.py
    trunk/py/scripts/addons/netrender/master_html.py
    trunk/py/scripts/addons/netrender/model.py
    trunk/py/scripts/addons/netrender/operators.py
    trunk/py/scripts/addons/netrender/repath.py
    trunk/py/scripts/addons/netrender/slave.py
    trunk/py/scripts/addons/netrender/ui.py
    trunk/py/scripts/addons/netrender/utils.py

Added Paths:
-----------
    trunk/py/scripts/addons/netrender/baking.py

Modified: trunk/py/scripts/addons/netrender/__init__.py
===================================================================
--- trunk/py/scripts/addons/netrender/__init__.py	2011-12-31 11:45:52 UTC (rev 2842)
+++ trunk/py/scripts/addons/netrender/__init__.py	2011-12-31 18:25:00 UTC (rev 2843)
@@ -21,8 +21,8 @@
 bl_info = {
     "name": "Network Renderer",
     "author": "Martin Poirier",
-    "version": (1, 4),
-    "blender": (2, 5, 6),
+    "version": (1, 7),
+    "blender": (2, 6, 0),
     "api": 35011,
     "location": "Render > Engine > Network Render",
     "description": "Distributed rendering for Blender",
@@ -45,6 +45,7 @@
     imp.reload(ui)
     imp.reload(repath)
     imp.reload(versioning)
+    imp.reload(baking)
 else:
     from netrender import model
     from netrender import operators
@@ -57,6 +58,7 @@
     from netrender import ui
     from netrender import repath
     from netrender import versioning
+    from netrender import baking
 
 jobs = []
 slaves = []

Added: trunk/py/scripts/addons/netrender/baking.py
===================================================================
--- trunk/py/scripts/addons/netrender/baking.py	                        (rev 0)
+++ trunk/py/scripts/addons/netrender/baking.py	2011-12-31 18:25:00 UTC (rev 2843)
@@ -0,0 +1,87 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+import sys, subprocess
+
+BLENDER_PATH = sys.argv[0]
+
+def bake(job, tasks):
+    main_file = job.files[0]
+    job_full_path = main_file.filepath
+    
+    task_commands = []
+    for task in tasks:
+        task_commands.extend(task)
+    
+    process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-P", __file__, "--"] + task_commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    
+    return process
+
+def process_cache(obj, point_cache):
+    if point_cache.is_baked:
+        bpy.ops.ptcache.free_bake({"point_cache": point_cache})
+        
+    point_cache.use_disk_cache = True
+    
+    bpy.ops.ptcache.bake({"point_cache": point_cache}, bake=True)
+
+def process_generic(obj, index):
+    modifier = obj.modifiers[index]
+    point_cache = modifier.point_cache
+    process_cache(obj, point_cache)
+
+def process_smoke(obj, index):
+    modifier = obj.modifiers[index]
+    point_cache = modifier.domain_settings.point_cache
+    process_cache(obj, point_cache)
+
+def process_particle(obj, index):
+    psys = obj.particle_systems[index]
+    point_cache = psys.point_cache
+    process_cache(obj, point_cache)
+
+def process_paint(obj, index):
+    modifier = obj.modifiers[index]
+    for surface in modifier.canvas_settings.canvas_surfaces:
+        process_cache(obj, surface.point_cache)
+
+def process_null(obj, index):
+    raise ValueException("No baking possible with arguments: " + " ".join(sys.argv))
+
+bake_funcs = {}
+bake_funcs["CLOTH"] = process_generic
+bake_funcs["SOFT_BODY"] = process_generic
+bake_funcs["PARTICLE_SYSTEM"] = process_particle
+bake_funcs["SMOKE"] = process_smoke
+bake_funcs["DYNAMIC_PAINT"] = process_paint
+
+if __name__ == "__main__":
+    try:
+        i = sys.argv.index("--")
+    except:
+        i = 0
+    
+    if i:
+        task_args = sys.argv[i+1:]
+        for i in range(0, len(task_args), 3):
+            bake_type = task_args[i]
+            obj = bpy.data.objects[task_args[i+1]]
+            index = int(task_args[i+2])
+            
+            bake_funcs.get(bake_type, process_null)(obj, index)


Property changes on: trunk/py/scripts/addons/netrender/baking.py
___________________________________________________________________
Added: svn:keywords
   + Author Date Id Revision
Added: svn:eol-style
   + native

Modified: trunk/py/scripts/addons/netrender/client.py
===================================================================
--- trunk/py/scripts/addons/netrender/client.py	2011-12-31 11:45:52 UTC (rev 2842)
+++ trunk/py/scripts/addons/netrender/client.py	2011-12-31 18:25:00 UTC (rev 2843)
@@ -112,14 +112,14 @@
     elif netsettings.job_type == "JOB_VCS":
         job.type = netrender.model.JOB_VCS
 
-def clientSendJob(conn, scene, anim = False):
+def sendJob(conn, scene, anim = False):
     netsettings = scene.network_render
     if netsettings.job_type == "JOB_BLENDER":
-        return clientSendJobBlender(conn, scene, anim)
+        return sendJobBlender(conn, scene, anim)
     elif netsettings.job_type == "JOB_VCS":
-        return clientSendJobVCS(conn, scene, anim)
+        return sendJobVCS(conn, scene, anim)
 
-def clientSendJobVCS(conn, scene, anim = False):
+def sendJobVCS(conn, scene, anim = False):
     netsettings = scene.network_render
     job = netrender.model.RenderJob()
 
@@ -140,8 +140,6 @@
     if filename[0] in (os.sep, os.altsep):
         filename = filename[1:]
     
-    print("CREATING VCS JOB", filename)
-    
     job.addFile(filename, signed=False)
 
     job_name = netsettings.job_name
@@ -158,6 +156,8 @@
     job.version_info.wpath = netsettings.vcs_wpath
     job.version_info.rpath = netsettings.vcs_rpath
     job.version_info.revision = netsettings.vcs_revision
+    
+    job.tags.add(netrender.model.TAG_RENDER)
 
     # try to send path first
     with ConnectionContext():
@@ -171,10 +171,87 @@
 
     return job_id
 
-def clientSendJobBlender(conn, scene, anim = False):
+def sendJobBaking(conn, scene):
     netsettings = scene.network_render
     job = netrender.model.RenderJob()
 
+    filename = bpy.data.filepath
+    
+    if not os.path.exists(filename):
+        raise RuntimeError("Current file path not defined\nSave your file before sending a job")
+    
+    job.addFile(filename)
+
+    job_name = netsettings.job_name
+    path, name = os.path.split(filename)
+    if job_name == "[default]":
+        job_name = name
+        
+    ###############################
+    # LIBRARIES (needed for baking)
+    ###############################
+    for lib in bpy.data.libraries:
+        file_path = bpy.path.abspath(lib.filepath)
+        if os.path.exists(file_path):
+            job.addFile(file_path)
+
+    tasks = set()
+    
+    ####################################
+    # FLUID + POINT CACHE (what we bake)
+    ####################################
+    def pointCacheFunc(object, owner, point_cache):
+        if type(owner) == bpy.types.ParticleSystem:
+            index = [index for index, data in enumerate(object.particle_systems) if data == owner][0]
+            tasks.add(("PARTICLE_SYSTEM", object.name, str(index)))
+        else: # owner is modifier
+            index = [index for index, data in enumerate(object.modifiers) if data == owner][0]
+            tasks.add((owner.type, object.name, str(index)))
+        
+    def fluidFunc(object, modifier, cache_path):
+        pass
+        
+    def multiresFunc(object, modifier, cache_path):
+        pass
+        
+    processObjectDependencies(pointCacheFunc, fluidFunc, multiresFunc)
+
+    fillCommonJobSettings(job, job_name, netsettings)
+    
+    job.tags.add(netrender.model.TAG_BAKING)
+    job.subtype = netrender.model.JOB_SUB_BAKING
+    job.chunks = 1 # No chunking for baking
+
+    for i, task in enumerate(tasks):
+        job.addFrame(i + 1)
+        job.frames[-1].command = "|".join(task)
+        
+    # try to send path first
+    with ConnectionContext():
+        conn.request("POST", "/job", json.dumps(job.serialize()))
+    response = conn.getresponse()
+    response.read()
+
+    job_id = response.getheader("job-id")
+
+    # if not ACCEPTED (but not processed), send files
+    if response.status == http.client.ACCEPTED:
+        for rfile in job.files:
+            f = open(rfile.filepath, "rb")
+            with ConnectionContext():
+                conn.request("PUT", fileURL(job_id, rfile.index), f)
+            f.close()
+            response = conn.getresponse()
+            response.read()
+
+    # server will reply with ACCEPTED until all files are found
+
+    return job_id
+    
+def sendJobBlender(conn, scene, anim = False):
+    netsettings = scene.network_render
+    job = netrender.model.RenderJob()
+
     if anim:
         for f in range(scene.frame_start, scene.frame_end + 1):
             job.addFrame(f)
@@ -219,7 +296,7 @@
     ###########################
     default_path = cachePath(filename)
     
-    def pointCacheFunc(object, point_cache):
+    def pointCacheFunc(object, owner, point_cache):
         addPointCache(job, object, point_cache, default_path)
         
     def fluidFunc(object, modifier, cache_path):
@@ -233,6 +310,8 @@
     #print(job.files)
 
     fillCommonJobSettings(job, job_name, netsettings)
+    
+    job.tags.add(netrender.model.TAG_RENDER)
 
     # try to send path first
     with ConnectionContext():
@@ -322,7 +401,7 @@
 
             if response.status == http.client.NO_CONTENT:
                 new_job = True
-                netsettings.job_id = clientSendJob(conn, scene)
+                netsettings.job_id = sendJob(conn, scene)
                 job_id = netsettings.job_id
 
                 requestResult(conn, job_id, scene.frame_current)

Modified: trunk/py/scripts/addons/netrender/master.py
===================================================================
--- trunk/py/scripts/addons/netrender/master.py	2011-12-31 11:45:52 UTC (rev 2842)
+++ trunk/py/scripts/addons/netrender/master.py	2011-12-31 18:25:00 UTC (rev 2843)
@@ -54,12 +54,9 @@
 
 
 class MRenderSlave(netrender.model.RenderSlave):
-    def __init__(self, name, address, stats):
-        super().__init__()
-        self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest()
-        self.name = name
-        self.address = address
-        self.stats = stats
+    def __init__(self, slave_info):
+        super().__init__(slave_info)

@@ Diff output truncated at 10240 characters. @@


More information about the Bf-extensions-cvs mailing list