[Bf-blender-cvs] [7046e37eded] master: Cleanup: WorkScheduler.

Jeroen Bakker noreply at git.blender.org
Tue Mar 23 16:15:07 CET 2021


Commit: 7046e37eded55ac8879835500788a1e734437bbf
Author: Jeroen Bakker
Date:   Tue Mar 23 15:33:51 2021 +0100
Branches: master
https://developer.blender.org/rB7046e37eded55ac8879835500788a1e734437bbf

Cleanup: WorkScheduler.

- Use constexpr for better readability.
- Split in functions per backend.
- Split work scheduler global struct in smaller structs.
- Replaced std::vector with blender::Vector.
- Removed threading defines in COM_defines.h

===================================================================

M	source/blender/compositor/COM_compositor.h
M	source/blender/compositor/COM_defines.h
M	source/blender/compositor/intern/COM_WorkScheduler.cc

===================================================================

diff --git a/source/blender/compositor/COM_compositor.h b/source/blender/compositor/COM_compositor.h
index 7c230c8379a..8e3caf7aaf5 100644
--- a/source/blender/compositor/COM_compositor.h
+++ b/source/blender/compositor/COM_compositor.h
@@ -250,8 +250,8 @@ extern "C" {
  *
  * \subsection singlethread Single threaded
  * For debugging reasons the multi-threading can be disabled.
- * This is done by changing the COM_CURRENT_THREADING_MODEL
- * to COM_TM_NOTHREAD. When compiling the work-scheduler
+ * This is done by changing the `COM_threading_model`
+ * to `ThreadingModel::SingleThreaded`. When compiling the work-scheduler
  * will be changes to support no threading and run everything on the CPU.
  *
  * \section devices Devices
diff --git a/source/blender/compositor/COM_defines.h b/source/blender/compositor/COM_defines.h
index 6aa42c45212..266f532ebb8 100644
--- a/source/blender/compositor/COM_defines.h
+++ b/source/blender/compositor/COM_defines.h
@@ -62,26 +62,8 @@ enum class CompositorPriority {
 
 // chunk size determination
 #define COM_PREVIEW_SIZE 140.0f
-#define COM_OPENCL_ENABLED
 //#define COM_DEBUG
 
-// workscheduler threading models
-/**
- * COM_TM_QUEUE is a multi-threaded model, which uses the BLI_thread_queue pattern.
- * This is the default option.
- */
-#define COM_TM_QUEUE 1
-
-/**
- * COM_TM_NOTHREAD is a single threading model, everything is executed in the caller thread.
- * easy for debugging
- */
-#define COM_TM_NOTHREAD 0
-
-/**
- * COM_CURRENT_THREADING_MODEL can be one of the above, COM_TM_QUEUE is currently default.
- */
-#define COM_CURRENT_THREADING_MODEL COM_TM_QUEUE
 // chunk order
 /**
  * \brief The order of chunks to be scheduled
diff --git a/source/blender/compositor/intern/COM_WorkScheduler.cc b/source/blender/compositor/intern/COM_WorkScheduler.cc
index 5d3f232221f..2bc3ff936b1 100644
--- a/source/blender/compositor/intern/COM_WorkScheduler.cc
+++ b/source/blender/compositor/intern/COM_WorkScheduler.cc
@@ -25,66 +25,89 @@
 #include "COM_WorkScheduler.h"
 #include "COM_WriteBufferOperation.h"
 #include "COM_compositor.h"
+
 #include "clew.h"
 
 #include "MEM_guardedalloc.h"
 
+#include "BLI_task.h"
 #include "BLI_threads.h"
+#include "BLI_vector.hh"
 #include "PIL_time.h"
 
 #include "BKE_global.h"
 
-#if COM_CURRENT_THREADING_MODEL == COM_TM_NOTHREAD
-#  ifndef DEBUG /* Test this so we don't get warnings in debug builds. */
-#    warning COM_CURRENT_THREADING_MODEL COM_TM_NOTHREAD is activated. Use only for debugging.
-#  endif
-#elif COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-/* do nothing - default */
-#else
-#  error COM_CURRENT_THREADING_MODEL No threading model selected
-#endif
+enum class ThreadingModel {
+  /** Everything is executed in the caller thread. easy for debugging. */
+  SingleThreaded,
+  /** Multi-threaded model, which uses the BLI_thread_queue pattern. */
+  Queue,
+  /** Uses BLI_task as threading backend. */
+  Task
+};
+
+/**
+ * Returns the active threading model.
+ *
+ * Default is `ThreadingModel::Queue`.
+ */
+constexpr ThreadingModel COM_threading_model()
+{
+  return ThreadingModel::Queue;
+}
+
+/**
+ * Does the active threading model support opencl?
+ */
+constexpr bool COM_is_opencl_enabled()
+{
+  return COM_threading_model() != ThreadingModel::SingleThreaded;
+}
 
 static ThreadLocal(CPUDevice *) g_thread_device;
 static struct {
-  /** \brief list of all CPUDevices. for every hardware thread an instance of CPUDevice is created
-   */
-  std::vector<CPUDevice *> cpu_devices;
-
-#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-  /** \brief list of all thread for every CPUDevice in cpudevices a thread exists. */
-  ListBase cpu_threads;
-  bool cpu_initialized = false;
-  /** \brief all scheduled work for the cpu */
-  ThreadQueue *cpu_queue;
-  ThreadQueue *gpu_queue;
-#  ifdef COM_OPENCL_ENABLED
-  cl_context opencl_context;
-  cl_program opencl_program;
-  /** \brief list of all OpenCLDevices. for every OpenCL GPU device an instance of OpenCLDevice is
-   * created. */
-  std::vector<OpenCLDevice *> gpu_devices;
-  /** \brief list of all thread for every GPUDevice in cpudevices a thread exists. */
-  ListBase gpu_threads;
-  /** \brief all scheduled work for the GPU. */
-  bool opencl_active = false;
-  bool opencl_initialized = false;
-#  endif
-#endif
+  struct {
+    /** \brief list of all CPUDevices. for every hardware thread an instance of CPUDevice is
+     * created
+     */
+    blender::Vector<CPUDevice *> devices;
+
+    /** \brief list of all thread for every CPUDevice in cpudevices a thread exists. */
+    ListBase threads;
+    bool initialized = false;
+    /** \brief all scheduled work for the cpu */
+    ThreadQueue *queue;
+  } queue;
 
+  struct {
+    TaskPool *pool;
+  } task;
+
+  struct {
+    ThreadQueue *queue;
+    cl_context context;
+    cl_program program;
+    /** \brief list of all OpenCLDevices. for every OpenCL GPU device an instance of OpenCLDevice
+     * is created. */
+    blender::Vector<OpenCLDevice *> devices;
+    /** \brief list of all thread for every GPUDevice in cpudevices a thread exists. */
+    ListBase threads;
+    /** \brief all scheduled work for the GPU. */
+    bool active = false;
+    bool initialized = false;
+  } opencl;
 } g_work_scheduler;
 
-#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-static void *thread_execute_cpu(void *data)
-{
-  CPUDevice *device = (CPUDevice *)data;
-  WorkPackage *work;
-  BLI_thread_local_set(g_thread_device, device);
-  while ((work = (WorkPackage *)BLI_thread_queue_pop(g_work_scheduler.cpu_queue))) {
-    device->execute(work);
-    delete work;
-  }
+/* -------------------------------------------------------------------- */
+/** \name OpenCL Scheduling
+ * \{ */
 
-  return nullptr;
+static void CL_CALLBACK clContextError(const char *errinfo,
+                                       const void * /*private_info*/,
+                                       size_t /*cb*/,
+                                       void * /*user_data*/)
+{
+  printf("OPENCL error: %s\n", errinfo);
 }
 
 static void *thread_execute_gpu(void *data)
@@ -92,156 +115,69 @@ static void *thread_execute_gpu(void *data)
   Device *device = (Device *)data;
   WorkPackage *work;
 
-  while ((work = (WorkPackage *)BLI_thread_queue_pop(g_work_scheduler.gpu_queue))) {
+  while ((work = (WorkPackage *)BLI_thread_queue_pop(g_work_scheduler.opencl.queue))) {
     device->execute(work);
     delete work;
   }
 
   return nullptr;
 }
-#endif
-
-void WorkScheduler::schedule(ExecutionGroup *group, int chunkNumber)
-{
-  WorkPackage *package = new WorkPackage(group, chunkNumber);
-#if COM_CURRENT_THREADING_MODEL == COM_TM_NOTHREAD
-  CPUDevice device(0);
-  device.execute(package);
-  delete package;
-#elif COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-#  ifdef COM_OPENCL_ENABLED
-  if (group->isOpenCL() && g_work_scheduler.opencl_active) {
-    BLI_thread_queue_push(g_work_scheduler.gpu_queue, package);
-  }
-  else {
-    BLI_thread_queue_push(g_work_scheduler.cpu_queue, package);
-  }
-#  else
-  BLI_thread_queue_push(g_work_scheduler.cpu_queue, package);
-#  endif
-#endif
-}
 
-void WorkScheduler::start(CompositorContext &context)
+static void opencl_start(CompositorContext &context)
 {
-#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-  unsigned int index;
-  g_work_scheduler.cpu_queue = BLI_thread_queue_init();
-  BLI_threadpool_init(
-      &g_work_scheduler.cpu_threads, thread_execute_cpu, g_work_scheduler.cpu_devices.size());
-  for (index = 0; index < g_work_scheduler.cpu_devices.size(); index++) {
-    Device *device = g_work_scheduler.cpu_devices[index];
-    BLI_threadpool_insert(&g_work_scheduler.cpu_threads, device);
-  }
-#  ifdef COM_OPENCL_ENABLED
   if (context.getHasActiveOpenCLDevices()) {
-    g_work_scheduler.gpu_queue = BLI_thread_queue_init();
-    BLI_threadpool_init(
-        &g_work_scheduler.gpu_threads, thread_execute_gpu, g_work_scheduler.gpu_devices.size());
-    for (index = 0; index < g_work_scheduler.gpu_devices.size(); index++) {
-      Device *device = g_work_scheduler.gpu_devices[index];
-      BLI_threadpool_insert(&g_work_scheduler.gpu_threads, device);
+    g_work_scheduler.opencl.queue = BLI_thread_queue_init();
+    BLI_threadpool_init(&g_work_scheduler.opencl.threads,
+                        thread_execute_gpu,
+                        g_work_scheduler.opencl.devices.size());
+    for (int index = 0; index < g_work_scheduler.opencl.devices.size(); index++) {
+      Device *device = g_work_scheduler.opencl.devices[index];
+      BLI_threadpool_insert(&g_work_scheduler.opencl.threads, device);
     }
-    g_work_scheduler.opencl_active = true;
+    g_work_scheduler.opencl.active = true;
   }
   else {
-    g_work_scheduler.opencl_active = false;
+    g_work_scheduler.opencl.active = false;
   }
-#  endif
-#endif
 }
-void WorkScheduler::finish()
+
+static bool opencl_schedule(WorkPackage *package)
 {
-#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-#  ifdef COM_OPENCL_ENABLED
-  if (g_work_scheduler.opencl_active) {
-    BLI_thread_queue_wait_finish(g_work_scheduler.gpu_queue);
-    BLI_thread_queue_wait_finish(g_work_scheduler.cpu_queue);
-  }
-  else {
-    BLI_thread_queue_wait_finish(g_work_scheduler.cpu_queue);
+  if (package->execution_group->isOpenCL() && g_work_scheduler.opencl.active) {
+    BLI_thread_queue_push(g_work_scheduler.opencl.queue, package);
+    return true;
   }
-#  else
-  BLI_thread_queue_wait_finish(cpuqueue);
-#  endif
-#endif
+  return false;
 }
-void WorkScheduler::stop()
+
+static void opencl_finish()
 {
-#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
-  BLI_thread_queue_nowait(g_work_scheduler.cpu_queue);
-  BLI_threadpool_end(&g_work_scheduler.cpu_threads);
-  BLI_thread_queue_free(g_work_scheduler.cpu_queue);
-  g_work_scheduler.cpu_queue = nullptr;
-#  ifdef COM_OPENCL_ENABLED
-  if (g_work_scheduler.opencl_active) {
-    BLI_thread_queue_nowait(g_work_scheduler.gpu_queue);
-    BLI_threadpool_end(&g_work_scheduler.gpu_threads);
-    BLI_thread_queue_free(g_work_scheduler.gpu_queue);
-    g_work_scheduler.gpu_queue = nullptr;
+  if (g_work_scheduler.opencl.active) {
+    BLI_thread_queue_wait_finish(g_work_scheduler.opencl.queue);
   }
-#  endif
-#endi

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list