[Bf-blender-cvs] [1324659dee4] master: GTests: BLI_task: Add basic tests for BLI_task_parallel_listbase(), and some performances benchmarks.

Bastien Montagne noreply at git.blender.org
Tue Jun 4 23:51:14 CEST 2019


Commit: 1324659dee4981bce37557febb446710547fb646
Author: Bastien Montagne
Date:   Tue Jun 4 23:23:55 2019 +0200
Branches: master
https://developer.blender.org/rB1324659dee4981bce37557febb446710547fb646

GTests: BLI_task: Add basic tests for BLI_task_parallel_listbase(), and some performances benchmarks.

Nothing special to mention about regression test itself, it basically
mimics the one for `BLI_task_parallel_mempool()`...

Basic performances benchmarks do not tell us much, besides the fact that
for very light processing of listbase, even with 100k items,
single-thread remains an order of magnitude faster than threaded code.
Synchronization is just way too expensive in that case with current
code. This should be partially solvable with much bigger (and
configurable) chunk sizes though (current ones are just ridiculous
for such cases ;) )...

===================================================================

A	tests/gtests/blenlib/BLI_task_performance_test.cc
M	tests/gtests/blenlib/BLI_task_test.cc
M	tests/gtests/blenlib/CMakeLists.txt

===================================================================

diff --git a/tests/gtests/blenlib/BLI_task_performance_test.cc b/tests/gtests/blenlib/BLI_task_performance_test.cc
new file mode 100644
index 00000000000..ecc012aa47a
--- /dev/null
+++ b/tests/gtests/blenlib/BLI_task_performance_test.cc
@@ -0,0 +1,192 @@
+/* Apache License, Version 2.0 */
+
+#include "testing/testing.h"
+#include "BLI_ressource_strings.h"
+
+#include "atomic_ops.h"
+
+#define GHASH_INTERNAL_API
+
+extern "C" {
+#include "BLI_utildefines.h"
+
+#include "BLI_listbase.h"
+#include "BLI_mempool.h"
+#include "BLI_task.h"
+
+#include "PIL_time.h"
+
+#include "MEM_guardedalloc.h"
+}
+
+/* *** Parallel iterations over double-linked list items. *** */
+
+#define NUM_RUN_AVERAGED 100
+
+static uint gen_pseudo_random_number(uint num)
+{
+  /* Note: this is taken from BLI_ghashutil_uinthash(), don't want to depend on external code that
+   * might change here... */
+  num += ~(num << 16);
+  num ^= (num >> 5);
+  num += (num << 3);
+  num ^= (num >> 13);
+  num += ~(num << 9);
+  num ^= (num >> 17);
+
+  /* Make final number in [65 - 16385] range. */
+  return ((num & 255) << 6) + 1;
+}
+
+static void task_listbase_light_iter_func(void *UNUSED(userdata), Link *item, int index)
+{
+  LinkData *data = (LinkData *)item;
+
+  data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
+}
+
+static void task_listbase_light_membarrier_iter_func(void *userdata, Link *item, int index)
+{
+  LinkData *data = (LinkData *)item;
+  int *count = (int *)userdata;
+
+  data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
+  atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
+}
+
+static void task_listbase_heavy_iter_func(void *UNUSED(userdata), Link *item, int index)
+{
+  LinkData *data = (LinkData *)item;
+
+  /* 'Random' number of iterations. */
+  const uint num = gen_pseudo_random_number((uint)index);
+
+  for (uint i = 0; i < num; i++) {
+    data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + ((i % 2) ? -index : index));
+  }
+}
+
+static void task_listbase_heavy_membarrier_iter_func(void *userdata, Link *item, int index)
+{
+  LinkData *data = (LinkData *)item;
+  int *count = (int *)userdata;
+
+  /* 'Random' number of iterations. */
+  const uint num = gen_pseudo_random_number((uint)index);
+
+  for (uint i = 0; i < num; i++) {
+    data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + ((i % 2) ? -index : index));
+  }
+  atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
+}
+
+static void task_listbase_test_do(ListBase *list,
+                                  const int num_items,
+                                  int *num_items_tmp,
+                                  const char *id,
+                                  TaskParallelListbaseFunc func,
+                                  const bool use_threads,
+                                  const bool check_num_items_tmp)
+{
+  double averaged_timing = 0.0;
+  for (int i = 0; i < NUM_RUN_AVERAGED; i++) {
+    const double init_time = PIL_check_seconds_timer();
+    BLI_task_parallel_listbase(list, num_items_tmp, func, use_threads);
+    averaged_timing += PIL_check_seconds_timer() - init_time;
+
+    /* Those checks should ensure us all items of the listbase were processed once, and only once -
+     * as expected. */
+    if (check_num_items_tmp) {
+      EXPECT_EQ(*num_items_tmp, 0);
+    }
+    LinkData *item;
+    int j;
+    for (j = 0, item = (LinkData *)list->first; j < num_items && item != NULL;
+         j++, item = item->next) {
+      EXPECT_EQ(POINTER_AS_INT(item->data), j);
+      item->data = POINTER_FROM_INT(0);
+    }
+    EXPECT_EQ(num_items, j);
+
+    *num_items_tmp = num_items;
+  }
+
+  printf("\t%s: done in %fs on average over %d runs\n",
+         id,
+         averaged_timing / NUM_RUN_AVERAGED,
+         NUM_RUN_AVERAGED);
+}
+
+static void task_listbase_test(const char *id, const int nbr, const bool use_threads)
+{
+  printf("\n========== STARTING %s ==========\n", id);
+
+  ListBase list = {NULL, NULL};
+  LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(nbr, sizeof(*items_buffer), __func__);
+
+  BLI_threadapi_init();
+
+  int num_items = 0;
+  for (int i = 0; i < nbr; i++) {
+    BLI_addtail(&list, &items_buffer[i]);
+    num_items++;
+  }
+  int num_items_tmp = num_items;
+
+  task_listbase_test_do(&list,
+                        num_items,
+                        &num_items_tmp,
+                        "Light iter",
+                        task_listbase_light_iter_func,
+                        use_threads,
+                        false);
+
+  task_listbase_test_do(&list,
+                        num_items,
+                        &num_items_tmp,
+                        "Light iter with mem barrier",
+                        task_listbase_light_membarrier_iter_func,
+                        use_threads,
+                        true);
+
+  task_listbase_test_do(&list,
+                        num_items,
+                        &num_items_tmp,
+                        "Heavy iter",
+                        task_listbase_heavy_iter_func,
+                        use_threads,
+                        false);
+
+  task_listbase_test_do(&list,
+                        num_items,
+                        &num_items_tmp,
+                        "Heavy iter with mem barrier",
+                        task_listbase_heavy_membarrier_iter_func,
+                        use_threads,
+                        true);
+
+  MEM_freeN(items_buffer);
+  BLI_threadapi_exit();
+
+  printf("========== ENDED %s ==========\n\n", id);
+}
+
+TEST(task, ListBaseIterNoThread10k)
+{
+  task_listbase_test("ListBase parallel iteration - Single thread - 10000 items", 10000, false);
+}
+
+TEST(task, ListBaseIter10k)
+{
+  task_listbase_test("ListBase parallel iteration - Threaded - 10000 items", 10000, true);
+}
+
+TEST(task, ListBaseIterNoThread100k)
+{
+  task_listbase_test("ListBase parallel iteration - Single thread - 100000 items", 100000, false);
+}
+
+TEST(task, ListBaseIter100k)
+{
+  task_listbase_test("ListBase parallel iteration - Threaded - 100000 items", 100000, true);
+}
diff --git a/tests/gtests/blenlib/BLI_task_test.cc b/tests/gtests/blenlib/BLI_task_test.cc
index 96c6b572e19..0c1868380da 100644
--- a/tests/gtests/blenlib/BLI_task_test.cc
+++ b/tests/gtests/blenlib/BLI_task_test.cc
@@ -6,13 +6,19 @@
 #include "atomic_ops.h"
 
 extern "C" {
+#include "BLI_utildefines.h"
+
+#include "BLI_listbase.h"
 #include "BLI_mempool.h"
 #include "BLI_task.h"
-#include "BLI_utildefines.h"
+
+#include "MEM_guardedalloc.h"
 };
 
 #define NUM_ITEMS 10000
 
+/* *** Parallel iterations over mempool items. *** */
+
 static void task_mempool_iter_func(void *userdata, MempoolIterData *item)
 {
   int *data = (int *)item;
@@ -79,3 +85,45 @@ TEST(task, MempoolIter)
   BLI_mempool_destroy(mempool);
   BLI_threadapi_exit();
 }
+
+/* *** Parallel iterations over double-linked list items. *** */
+
+static void task_listbase_iter_func(void *userdata, Link *item, int index)
+{
+  LinkData *data = (LinkData *)item;
+  int *count = (int *)userdata;
+
+  data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
+  atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
+}
+
+TEST(task, ListBaseIter)
+{
+  ListBase list = {NULL, NULL};
+  LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(
+      NUM_ITEMS, sizeof(*items_buffer), __func__);
+  BLI_threadapi_init();
+
+  int i;
+
+  int num_items = 0;
+  for (i = 0; i < NUM_ITEMS; i++) {
+    BLI_addtail(&list, &items_buffer[i]);
+    num_items++;
+  }
+
+  BLI_task_parallel_listbase(&list, &num_items, task_listbase_iter_func, true);
+
+  /* Those checks should ensure us all items of the listbase were processed once, and only once -
+   * as expected. */
+  EXPECT_EQ(num_items, 0);
+  LinkData *item;
+  for (i = 0, item = (LinkData *)list.first; i < NUM_ITEMS && item != NULL;
+       i++, item = item->next) {
+    EXPECT_EQ(POINTER_AS_INT(item->data), i);
+  }
+  EXPECT_EQ(NUM_ITEMS, i);
+
+  MEM_freeN(items_buffer);
+  BLI_threadapi_exit();
+}
diff --git a/tests/gtests/blenlib/CMakeLists.txt b/tests/gtests/blenlib/CMakeLists.txt
index 547ed99295c..c2b5930e649 100644
--- a/tests/gtests/blenlib/CMakeLists.txt
+++ b/tests/gtests/blenlib/CMakeLists.txt
@@ -61,5 +61,6 @@ BLENDER_TEST(BLI_string_utf8 "bf_blenlib")
 BLENDER_TEST(BLI_task "bf_blenlib;bf_intern_numaapi")
 
 BLENDER_TEST_PERFORMANCE(BLI_ghash_performance "bf_blenlib")
+BLENDER_TEST_PERFORMANCE(BLI_task_performance "bf_blenlib")
 
 unset(BLI_path_util_extra_libs)



More information about the Bf-blender-cvs mailing list