[Bf-blender-cvs] [2ace89ebb5f] temp-cycles-tbb: Cycles: make TBB a required library dependency, and use in a few places

Brecht Van Lommel noreply at git.blender.org
Sat Jun 6 21:05:05 CEST 2020


Commit: 2ace89ebb5f3aaebd918627e32832c94e151b185
Author: Brecht Van Lommel
Date:   Fri Jun 5 12:53:38 2020 +0200
Branches: temp-cycles-tbb
https://developer.blender.org/rB2ace89ebb5f3aaebd918627e32832c94e151b185

Cycles: make TBB a required library dependency, and use in a few places

Now that the rest of Blender also relies on TBB, no point in maintaining custom
code for paraller_for and thread local storage.

===================================================================

M	CMakeLists.txt
M	intern/cycles/CMakeLists.txt
M	intern/cycles/bvh/bvh_build.cpp
M	intern/cycles/bvh/bvh_build.h
M	intern/cycles/render/light.cpp
M	intern/cycles/render/object.cpp
M	intern/cycles/render/svm.cpp
M	intern/cycles/util/CMakeLists.txt
M	intern/cycles/util/util_task.h

===================================================================

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9c7482901d1..28d10b44a42 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -655,6 +655,7 @@ if(WITH_BOOST AND NOT (WITH_CYCLES OR WITH_OPENIMAGEIO OR WITH_INTERNATIONAL OR
   set(WITH_BOOST OFF)
 endif()
 
+set_and_warn_dependency(WITH_TBB WITH_CYCLES            OFF)
 set_and_warn_dependency(WITH_TBB WITH_USD               OFF)
 set_and_warn_dependency(WITH_TBB WITH_OPENIMAGEDENOISE  OFF)
 set_and_warn_dependency(WITH_TBB WITH_OPENVDB           OFF)
diff --git a/intern/cycles/CMakeLists.txt b/intern/cycles/CMakeLists.txt
index 121c8bdad6e..e5a5e9773d3 100644
--- a/intern/cycles/CMakeLists.txt
+++ b/intern/cycles/CMakeLists.txt
@@ -286,6 +286,7 @@ include_directories(
   ${OPENEXR_INCLUDE_DIR}
   ${OPENEXR_INCLUDE_DIRS}
   ${PUGIXML_INCLUDE_DIR}
+  ${TBB_INCLUDE_DIRS}
 )
 
 if(CYCLES_STANDALONE_REPOSITORY)
diff --git a/intern/cycles/bvh/bvh_build.cpp b/intern/cycles/bvh/bvh_build.cpp
index 814b5ced5d2..ad555535a17 100644
--- a/intern/cycles/bvh/bvh_build.cpp
+++ b/intern/cycles/bvh/bvh_build.cpp
@@ -423,22 +423,6 @@ BVHNode *BVHBuild::run()
   }
 
   spatial_min_overlap = root.bounds().safe_area() * params.spatial_split_alpha;
-  if (params.use_spatial_split) {
-    /* NOTE: The API here tries to be as much ready for multi-threaded build
-     * as possible, but at the same time it tries not to introduce any
-     * changes in behavior for until all refactoring needed for threading is
-     * finished.
-     *
-     * So we currently allocate single storage for now, which is only used by
-     * the only thread working on the spatial BVH build.
-     */
-    spatial_storage.resize(TaskScheduler::num_threads() + 1);
-    size_t num_bins = max(root.size(), (int)BVHParams::NUM_SPATIAL_BINS) - 1;
-    foreach (BVHSpatialStorage &storage, spatial_storage) {
-      storage.right_bounds.clear();
-    }
-    spatial_storage[0].right_bounds.resize(num_bins);
-  }
   spatial_free_index = 0;
 
   need_prim_time = params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0;
@@ -475,6 +459,9 @@ BVHNode *BVHBuild::run()
     task_pool.wait_work();
   }
 
+  /* clean up temporary memory usage by threads */
+  spatial_storage.clear();
+
   /* delete if we canceled */
   if (rootnode) {
     if (progress.get_cancel()) {
@@ -551,19 +538,18 @@ void BVHBuild::thread_build_node(InnerNode *inner, int child, BVHObjectBinning *
   }
 }
 
-void BVHBuild::thread_build_spatial_split_node(InnerNode *inner,
-                                               int child,
-                                               BVHRange *range,
-                                               vector<BVHReference> *references,
-                                               int level,
-                                               int thread_id)
+void BVHBuild::thread_build_spatial_split_node(
+    InnerNode *inner, int child, BVHRange *range, vector<BVHReference> *references, int level)
 {
   if (progress.get_cancel()) {
     return;
   }
 
+  /* Get per-thread memory for spatial split. */
+  BVHSpatialStorage *local_storage = &spatial_storage.local();
+
   /* build nodes */
-  BVHNode *node = build_node(*range, references, level, thread_id);
+  BVHNode *node = build_node(*range, references, level, local_storage);
 
   /* set child in inner node */
   inner->children[child] = node;
@@ -690,7 +676,7 @@ BVHNode *BVHBuild::build_node(const BVHObjectBinning &range, int level)
 BVHNode *BVHBuild::build_node(const BVHRange &range,
                               vector<BVHReference> *references,
                               int level,
-                              int thread_id)
+                              BVHSpatialStorage *storage)
 {
   /* Update progress.
    *
@@ -712,7 +698,6 @@ BVHNode *BVHBuild::build_node(const BVHRange &range,
   }
 
   /* Perform splitting test. */
-  BVHSpatialStorage *storage = &spatial_storage[thread_id];
   BVHMixedSplit split(this, storage, range, references, level);
 
   if (!(range.size() > 0 && params.top_level && level == 0)) {
diff --git a/intern/cycles/bvh/bvh_build.h b/intern/cycles/bvh/bvh_build.h
index 3fe4c3799e2..df2aa2ae1a7 100644
--- a/intern/cycles/bvh/bvh_build.h
+++ b/intern/cycles/bvh/bvh_build.h
@@ -76,7 +76,7 @@ class BVHBuild {
   BVHNode *build_node(const BVHRange &range,
                       vector<BVHReference> *references,
                       int level,
-                      int thread_id);
+                      BVHSpatialStorage *storage);
   BVHNode *build_node(const BVHObjectBinning &range, int level);
   BVHNode *create_leaf_node(const BVHRange &range, const vector<BVHReference> &references);
   BVHNode *create_object_leaf_nodes(const BVHReference *ref, int start, int num);
@@ -87,12 +87,8 @@ class BVHBuild {
   /* Threads. */
   enum { THREAD_TASK_SIZE = 4096 };
   void thread_build_node(InnerNode *node, int child, BVHObjectBinning *range, int level);
-  void thread_build_spatial_split_node(InnerNode *node,
-                                       int child,
-                                       BVHRange *range,
-                                       vector<BVHReference> *references,
-                                       int level,
-                                       int thread_id);
+  void thread_build_spatial_split_node(
+      InnerNode *node, int child, BVHRange *range, vector<BVHReference> *references, int level);
   thread_mutex build_mutex;
 
   /* Progress. */
@@ -127,7 +123,7 @@ class BVHBuild {
 
   /* Spatial splitting. */
   float spatial_min_overlap;
-  vector<BVHSpatialStorage> spatial_storage;
+  enumerable_thread_specific<BVHSpatialStorage> spatial_storage;
   size_t spatial_free_index;
   thread_spin_lock spatial_spin_lock;
 
diff --git a/intern/cycles/render/light.cpp b/intern/cycles/render/light.cpp
index 3896695b873..6174454854b 100644
--- a/intern/cycles/render/light.cpp
+++ b/intern/cycles/render/light.cpp
@@ -625,29 +625,13 @@ void LightManager::device_update_background(Device *device,
   float2 *cond_cdf = dscene->light_background_conditional_cdf.alloc(cdf_width * res.y);
 
   double time_start = time_dt();
-  if (max(res.x, res.y) < 512) {
-    /* Small enough resolution, faster to do single-threaded. */
-    background_cdf(0, res.y, res.x, res.y, &pixels, cond_cdf);
-  }
-  else {
-    /* Threaded evaluation for large resolution. */
-    const int num_blocks = TaskScheduler::num_threads();
-    const int chunk_size = res.y / num_blocks;
-    int start_row = 0;
-    TaskPool pool;
-    for (int i = 0; i < num_blocks; ++i) {
-      const int current_chunk_size = (i != num_blocks - 1) ? chunk_size : (res.y - i * chunk_size);
-      pool.push(function_bind(&background_cdf,
-                              start_row,
-                              start_row + current_chunk_size,
-                              res.x,
-                              res.y,
-                              &pixels,
-                              cond_cdf));
-      start_row += current_chunk_size;
-    }
-    pool.wait_work();
-  }
+
+  /* Create CDF in parallel. */
+  const int rows_per_task = divide_up(10240, res.x);
+  parallel_for(blocked_range<size_t>(0, res.y, rows_per_task),
+               [&](const blocked_range<size_t> &r) {
+                 background_cdf(r.begin(), r.end(), res.x, res.y, &pixels, cond_cdf);
+               });
 
   /* marginal CDFs (column, V direction, sum of rows) */
   marg_cdf[0].x = cond_cdf[res.x].x;
diff --git a/intern/cycles/render/object.cpp b/intern/cycles/render/object.cpp
index 752350ad76e..28337ef1a21 100644
--- a/intern/cycles/render/object.cpp
+++ b/intern/cycles/render/object.cpp
@@ -78,7 +78,6 @@ struct UpdateObjectTransformState {
   Scene *scene;
 
   /* Some locks to keep everything thread-safe. */
-  thread_spin_lock queue_lock;
   thread_spin_lock surface_area_lock;
 
   /* First unused object index in the queue. */
@@ -551,41 +550,6 @@ void ObjectManager::device_update_object_transform(UpdateObjectTransformState *s
   }
 }
 
-bool ObjectManager::device_update_object_transform_pop_work(UpdateObjectTransformState *state,
-                                                            int *start_index,
-                                                            int *num_objects)
-{
-  /* Tweakable parameter, number of objects per chunk.
-   * Too small value will cause some extra overhead due to spin lock,
-   * too big value might not use all threads nicely.
-   */
-  static const int OBJECTS_PER_TASK = 32;
-  bool have_work = false;
-  state->queue_lock.lock();
-  int num_scene_objects = state->scene->objects.size();
-  if (state->queue_start_object < num_scene_objects) {
-    int count = min(OBJECTS_PER_TASK, num_scene_objects - state->queue_start_object);
-    *start_index = state->queue_start_object;
-    *num_objects = count;
-    state->queue_start_object += count;
-    have_work = true;
-  }
-  state->queue_lock.unlock();
-  return have_work;
-}
-
-void ObjectManager::device_update_object_transform_task(UpdateObjectTransformState *state)
-{
-  int start_index, num_objects;
-  while (device_update_object_transform_pop_work(state, &start_index, &num_objects)) {
-    for (int i = 0; i < num_objects; ++i) {
-      const int object_index = start_index + i;
-      Object *ob = state->scene->objects[object_index];
-      device_update_object_transform(state, ob);
-    }
-  }
-}
-
 void ObjectManager::device_update_transforms(DeviceScene *dscene, Scene *scene, Progress &progress)
 {
   UpdateObjectTransformState state;
@@ -631,29 +595,16 @@ void ObjectManager::device_update_transforms(DeviceScene *dscene, Scene *scene,
     numparticles += psys->particles.size();
   }
 
-  /* NOTE: If it's just a handful of objects we deal with them in a single
-   * thread to avoid threading overhead. However, this threshold is might
-   * need some tweaks to make mid-complex scenes optimal.
-   */
-  if (scene->objects.size() < 64) {
-    foreach (Object *ob, scene->objects) {
-      device_update_object_transform(&state, ob);
-      if (progress.get_cancel()) {
-        return;
-      }
-    }
-  }
-  else {
-    const int num_threads = TaskScheduler::num_threads();
-    TaskPool pool;
-    for (int i = 0; i < num_thread

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list