[Bf-blender-cvs] [4ada1d26774] temp-tbb-task-scheduler: Tasks: split task.c into task_pool.cc and task_iterator.c

Brecht Van Lommel noreply at git.blender.org
Tue Nov 5 15:10:47 CET 2019


Commit: 4ada1d267749931ca934a74b14a82479bcaa92e0
Author: Brecht Van Lommel
Date:   Sat Oct 12 17:11:36 2019 +0200
Branches: temp-tbb-task-scheduler
https://developer.blender.org/rB4ada1d267749931ca934a74b14a82479bcaa92e0

Tasks: split task.c into task_pool.cc and task_iterator.c

===================================================================

M	source/blender/blenlib/BLI_task.h
M	source/blender/blenlib/CMakeLists.txt
A	source/blender/blenlib/intern/task_iterator.c
R056	source/blender/blenlib/intern/task.c	source/blender/blenlib/intern/task_pool.cc

===================================================================

diff --git a/source/blender/blenlib/BLI_task.h b/source/blender/blenlib/BLI_task.h
index 9832bf9a0f2..b68c675c605 100644
--- a/source/blender/blenlib/BLI_task.h
+++ b/source/blender/blenlib/BLI_task.h
@@ -116,6 +116,9 @@ void *BLI_task_pool_userdata(TaskPool *pool);
 /* optional mutex to use from run function */
 ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool);
 
+/* Thread ID of thread that created the task pool. */
+int BLI_task_pool_creator_thread_id(TaskPool *pool);
+
 /* Delayed push, use that to reduce thread overhead by accumulating
  * all new tasks into local queue first and pushing it to scheduler
  * from within a single mutex lock.
diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt
index f3740b5d39f..f1bd3089834 100644
--- a/source/blender/blenlib/CMakeLists.txt
+++ b/source/blender/blenlib/CMakeLists.txt
@@ -119,7 +119,8 @@ set(SRC
   intern/string_utf8.c
   intern/string_utils.c
   intern/system.c
-  intern/task.c
+  intern/task_iterator.c
+  intern/task_pool.cc
   intern/threads.c
   intern/time.c
   intern/timecode.c
diff --git a/source/blender/blenlib/intern/task_iterator.c b/source/blender/blenlib/intern/task_iterator.c
new file mode 100644
index 00000000000..a0fea107e88
--- /dev/null
+++ b/source/blender/blenlib/intern/task_iterator.c
@@ -0,0 +1,636 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ *
+ * A generic task system which can be used for any task based subsystem.
+ */
+
+#include <stdlib.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "DNA_listBase.h"
+
+#include "BLI_listbase.h"
+#include "BLI_math.h"
+#include "BLI_mempool.h"
+#include "BLI_task.h"
+#include "BLI_threads.h"
+
+#include "atomic_ops.h"
+
+/* Parallel range routines */
+
+/**
+ *
+ * Main functions:
+ * - #BLI_task_parallel_range
+ * - #BLI_task_parallel_listbase (#ListBase - double linked list)
+ *
+ * TODO:
+ * - #BLI_task_parallel_foreach_link (#Link - single linked list)
+ * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
+ * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
+ */
+
+/* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
+#define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
+#define MALLOCA_FREE(_mem, _size) \
+  if (((_mem) != NULL) && ((_size) > 8192)) \
+  MEM_freeN((_mem))
+
+typedef struct ParallelRangeState {
+  int start, stop;
+  void *userdata;
+
+  TaskParallelRangeFunc func;
+
+  int iter;
+  int chunk_size;
+} ParallelRangeState;
+
+BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
+                                              const int tot_items,
+                                              int num_tasks,
+                                              int *r_chunk_size)
+{
+  int chunk_size = 0;
+
+  if (!settings->use_threading) {
+    /* Some users of this helper will still need a valid chunk size in case processing is not
+     * threaded. We can use a bigger one than in default threaded case then. */
+    chunk_size = 1024;
+    num_tasks = 1;
+  }
+  else if (settings->min_iter_per_thread > 0) {
+    /* Already set by user, no need to do anything here. */
+    chunk_size = settings->min_iter_per_thread;
+  }
+  else {
+    /* Multiplier used in heuristics below to define "optimal" chunk size.
+     * The idea here is to increase the chunk size to compensate for a rather measurable threading
+     * overhead caused by fetching tasks. With too many CPU threads we are starting
+     * to spend too much time in those overheads.
+     * First values are: 1 if num_tasks < 16;
+     *              else 2 if num_tasks < 32;
+     *              else 3 if num_tasks < 48;
+     *              else 4 if num_tasks < 64;
+     *                   etc.
+     * Note: If we wanted to keep the 'power of two' multiplier, we'd need something like:
+     *     1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(num_tasks) - 3)
+     */
+    const int num_tasks_factor = max_ii(1, num_tasks >> 3);
+
+    /* We could make that 'base' 32 number configurable in TaskParallelSettings too, or maybe just
+     * always use that heuristic using TaskParallelSettings.min_iter_per_thread as basis? */
+    chunk_size = 32 * num_tasks_factor;
+
+    /* Basic heuristic to avoid threading on low amount of items.
+     * We could make that limit configurable in settings too. */
+    if (tot_items > 0 && tot_items < max_ii(256, chunk_size * 2)) {
+      chunk_size = tot_items;
+    }
+  }
+
+  BLI_assert(chunk_size > 0);
+
+  if (tot_items > 0) {
+    switch (settings->scheduling_mode) {
+      case TASK_SCHEDULING_STATIC:
+        *r_chunk_size = max_ii(chunk_size, tot_items / num_tasks);
+        break;
+      case TASK_SCHEDULING_DYNAMIC:
+        *r_chunk_size = chunk_size;
+        break;
+    }
+  }
+  else {
+    /* If total amount of items is unknown, we can only use dynamic scheduling. */
+    *r_chunk_size = chunk_size;
+  }
+}
+
+BLI_INLINE void task_parallel_range_calc_chunk_size(const TaskParallelSettings *settings,
+                                                    const int num_tasks,
+                                                    ParallelRangeState *state)
+{
+  task_parallel_calc_chunk_size(
+      settings, state->stop - state->start, num_tasks, &state->chunk_size);
+}
+
+BLI_INLINE bool parallel_range_next_iter_get(ParallelRangeState *__restrict state,
+                                             int *__restrict iter,
+                                             int *__restrict count)
+{
+  int previter = atomic_fetch_and_add_int32(&state->iter, state->chunk_size);
+
+  *iter = previter;
+  *count = max_ii(0, min_ii(state->chunk_size, state->stop - previter));
+
+  return (previter < state->stop);
+}
+
+static void parallel_range_func(TaskPool *__restrict pool, void *userdata_chunk, int thread_id)
+{
+  ParallelRangeState *__restrict state = BLI_task_pool_userdata(pool);
+  TaskParallelTLS tls = {
+      .thread_id = thread_id,
+      .userdata_chunk = userdata_chunk,
+  };
+  int iter, count;
+  while (parallel_range_next_iter_get(state, &iter, &count)) {
+    for (int i = 0; i < count; i++) {
+      state->func(state->userdata, iter + i, &tls);
+    }
+  }
+}
+
+static void parallel_range_single_thread(const int start,
+                                         int const stop,
+                                         void *userdata,
+                                         TaskParallelRangeFunc func,
+                                         const TaskParallelSettings *settings)
+{
+  void *userdata_chunk = settings->userdata_chunk;
+  const size_t userdata_chunk_size = settings->userdata_chunk_size;
+  void *userdata_chunk_local = NULL;
+  const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
+  if (use_userdata_chunk) {
+    userdata_chunk_local = MALLOCA(userdata_chunk_size);
+    memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
+  }
+  TaskParallelTLS tls = {
+      .thread_id = 0,
+      .userdata_chunk = userdata_chunk_local,
+  };
+  for (int i = start; i < stop; i++) {
+    func(userdata, i, &tls);
+  }
+  if (settings->func_finalize != NULL) {
+    settings->func_finalize(userdata, userdata_chunk_local);
+  }
+  MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
+}
+
+/**
+ * This function allows to parallelized for loops in a similar way to OpenMP's
+ * 'parallel for' statement.
+ *
+ * See public API doc of ParallelRangeSettings for description of all settings.
+ */
+void BLI_task_parallel_range(const int start,
+                             const int stop,
+                             void *userdata,
+                             TaskParallelRangeFunc func,
+                             const TaskParallelSettings *settings)
+{
+  TaskScheduler *task_scheduler;
+  TaskPool *task_pool;
+  ParallelRangeState state;
+  int i, num_threads, num_tasks;
+
+  void *userdata_chunk = settings->userdata_chunk;
+  const size_t userdata_chunk_size = settings->userdata_chunk_size;
+  void *userdata_chunk_local = NULL;
+  void *userdata_chunk_array = NULL;
+  const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
+
+  if (start == stop) {
+    return;
+  }
+
+  BLI_assert(start < stop);
+  if (userdata_chunk_size != 0) {
+    BLI_assert(userdata_chunk != NULL);
+  }
+
+  /* If it's not enough data to be crunched, don't bother with tasks at all,
+   * do everything from the main thread.
+   */
+  if (!settings->use_threading) {
+    parallel_range_single_thread(start, stop, userdata, func, settings);
+    return;
+  }
+
+  task_scheduler = BLI_task_scheduler_get();
+  num_threads = BLI_task_scheduler_num_threads(task_scheduler);
+
+  /* The idea here is to prevent creating task for each of the loop iterations
+   * and instead have tasks which are evenly distributed across CPU cores and
+   * pull next iter to be crunched using the queue.
+   */
+  num_tasks = num_threads + 2;
+
+  state.start = start;
+  state.stop = stop;
+  state.userdata = userdata;
+  state.func = func;
+  state.iter = start;
+
+  task_parallel_range_calc_chunk_size(settings, num_tasks, &state);
+  num_tasks = min_ii(num_tasks, max_ii(1, (stop - start) / state.chunk_size));
+
+  if (num_tasks == 1) {
+    parallel_range_single_thread(start, stop, userdata, func, settings);
+    return;
+  }
+
+  task_pool = BLI_task_pool_create_suspended(task_schedul

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list