[Bf-blender-cvs] [1abd978] new-filebrowser-preview-temp: BLI_task: add optional 'forced background' behavior to pools, and freefunc to tasks.

Bastien Montagne noreply at git.blender.org
Thu Oct 1 20:46:42 CEST 2015


Commit: 1abd97850373ac3372331464be8e0f5eb8b98279
Author: Bastien Montagne
Date:   Tue Sep 29 22:19:36 2015 +0200
Branches: new-filebrowser-preview-temp
https://developer.blender.org/rB1abd97850373ac3372331464be8e0f5eb8b98279

BLI_task: add optional 'forced background' behavior to pools, and freefunc to tasks.

Currently, task scheduler in mono-threaded context do not have any worker thread,
which means you have to 'work_and_wait' on a pool to get its tasks done in this context.
This is not suitable for that are to be done in a complete asynchronous, background
fashion, so this commit adds:
* One worker thread to scheduler in case none is created by default;
* That worker thread only execute tasks from pools which have relevant option set
  (so-called 'forced background' pools).

This commit also adds an optional freefunc callback to tasks, in case we need
more sofisticated behavior that a mere MEM_freeN...

===================================================================

M	source/blender/blenkernel/intern/mesh_evaluate.c
M	source/blender/blenkernel/intern/particle.c
M	source/blender/blenkernel/intern/particle_distribute.c
M	source/blender/blenkernel/intern/scene.c
M	source/blender/blenlib/BLI_task.h
M	source/blender/blenlib/intern/task.c
M	source/blender/depsgraph/intern/depsgraph_eval.cc
M	source/blender/editors/armature/editarmature_retarget.c
M	source/blender/editors/mask/mask_draw.c
M	source/blender/editors/space_clip/clip_editor.c
M	source/blender/editors/space_clip/clip_ops.c
M	source/blender/editors/space_file/filelist.c
M	source/blender/imbuf/intern/imageprocess.c
M	source/blender/render/intern/source/volume_precache.c
M	source/gameengine/Ketsji/KX_Scene.cpp

===================================================================

diff --git a/source/blender/blenkernel/intern/mesh_evaluate.c b/source/blender/blenkernel/intern/mesh_evaluate.c
index 2fc5350..4d0afd0 100644
--- a/source/blender/blenkernel/intern/mesh_evaluate.c
+++ b/source/blender/blenkernel/intern/mesh_evaluate.c
@@ -1300,7 +1300,7 @@ void BKE_mesh_normals_loop_split(
 		common_data.task_queue = BLI_thread_queue_init();
 
 		task_scheduler = BLI_task_scheduler_get();
-		task_pool = BLI_task_pool_create(task_scheduler, NULL);
+		task_pool = BLI_task_pool_create(task_scheduler, NULL, false);
 
 		nbr_workers = max_ii(2, BLI_task_scheduler_num_threads(task_scheduler));
 		for (i = 1; i < nbr_workers; i++) {
diff --git a/source/blender/blenkernel/intern/particle.c b/source/blender/blenkernel/intern/particle.c
index 9aacba8..cfa3644 100644
--- a/source/blender/blenkernel/intern/particle.c
+++ b/source/blender/blenkernel/intern/particle.c
@@ -2358,7 +2358,7 @@ void psys_cache_child_paths(ParticleSimulationData *sim, float cfra, int editupd
 		return;
 	
 	task_scheduler = BLI_task_scheduler_get();
-	task_pool = BLI_task_pool_create(task_scheduler, &ctx);
+	task_pool = BLI_task_pool_create(task_scheduler, &ctx, false);
 	totchild = ctx.totchild;
 	totparent = ctx.totparent;
 	
diff --git a/source/blender/blenkernel/intern/particle_distribute.c b/source/blender/blenkernel/intern/particle_distribute.c
index 87bc355..b0601be 100644
--- a/source/blender/blenkernel/intern/particle_distribute.c
+++ b/source/blender/blenkernel/intern/particle_distribute.c
@@ -1118,7 +1118,7 @@ static void distribute_particles_on_dm(ParticleSimulationData *sim, int from)
 		return;
 	
 	task_scheduler = BLI_task_scheduler_get();
-	task_pool = BLI_task_pool_create(task_scheduler, &ctx);
+	task_pool = BLI_task_pool_create(task_scheduler, &ctx, false);
 	
 	totpart = (from == PART_FROM_CHILD ? sim->psys->totchild : sim->psys->totpart);
 	psys_tasks_create(&ctx, 0, totpart, &tasks, &numtasks);
diff --git a/source/blender/blenkernel/intern/scene.c b/source/blender/blenkernel/intern/scene.c
index 1ccc213..a6a9ab9 100644
--- a/source/blender/blenkernel/intern/scene.c
+++ b/source/blender/blenkernel/intern/scene.c
@@ -1650,7 +1650,7 @@ static void scene_update_objects(EvaluationContext *eval_ctx, Main *bmain, Scene
 	state.has_mballs = false;
 #endif
 
-	task_pool = BLI_task_pool_create(task_scheduler, &state);
+	task_pool = BLI_task_pool_create(task_scheduler, &state, false);
 	if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
 		BLI_pool_set_num_threads(task_pool, 1);
 	}
diff --git a/source/blender/blenlib/BLI_task.h b/source/blender/blenlib/BLI_task.h
index 780b0bf..5f901ce 100644
--- a/source/blender/blenlib/BLI_task.h
+++ b/source/blender/blenlib/BLI_task.h
@@ -74,10 +74,14 @@ typedef enum TaskPriority {
 
 typedef struct TaskPool TaskPool;
 typedef void (*TaskRunFunction)(TaskPool *__restrict pool, void *taskdata, int threadid);
+typedef void (*TaskFreeFunction)(TaskPool *__restrict pool, void *taskdata, int threadid);
 
-TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata);
+TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, const bool is_background);
 void BLI_task_pool_free(TaskPool *pool);
 
+void BLI_task_pool_push_ex(
+        TaskPool *pool, TaskRunFunction run, void *taskdata,
+        bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority);
 void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run,
 	void *taskdata, bool free_taskdata, TaskPriority priority);
 
diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c
index 08d40a1..1bb3a01 100644
--- a/source/blender/blenlib/intern/task.c
+++ b/source/blender/blenlib/intern/task.c
@@ -43,6 +43,7 @@ typedef struct Task {
 	TaskRunFunction run;
 	void *taskdata;
 	bool free_taskdata;
+	TaskFreeFunction freedata;
 	TaskPool *pool;
 } Task;
 
@@ -60,6 +61,10 @@ struct TaskPool {
 	ThreadMutex user_mutex;
 
 	volatile bool do_cancel;
+
+	/* If set, this pool may never be work_and_wait'ed, which means TaskScheduler has to use its special
+	 * background fallback thread in case we are in mono-threaded situation. */
+	bool use_force_background;
 };
 
 struct TaskScheduler {
@@ -107,7 +112,7 @@ static void task_pool_num_increase(TaskPool *pool)
 	BLI_mutex_unlock(&pool->num_mutex);
 }
 
-static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
+static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task, const bool forced_background)
 {
 	bool found_task = false;
 	BLI_mutex_lock(&scheduler->queue_mutex);
@@ -127,6 +132,11 @@ static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task
 		     current_task = current_task->next)
 		{
 			TaskPool *pool = current_task->pool;
+
+			if (forced_background && !pool->use_force_background) {
+				continue;
+			}
+
 			if (pool->num_threads == 0 ||
 			    pool->currently_running_tasks < pool->num_threads)
 			{
@@ -146,7 +156,7 @@ static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task
 	return true;
 }
 
-static void *task_scheduler_thread_run(void *thread_p)
+static void *task_scheduler_thread_run_ex(void *thread_p, const bool forced_background)
 {
 	TaskThread *thread = (TaskThread *) thread_p;
 	TaskScheduler *scheduler = thread->scheduler;
@@ -154,15 +164,21 @@ static void *task_scheduler_thread_run(void *thread_p)
 	Task *task;
 
 	/* keep popping off tasks */
-	while (task_scheduler_thread_wait_pop(scheduler, &task)) {
+	while (task_scheduler_thread_wait_pop(scheduler, &task, forced_background)) {
 		TaskPool *pool = task->pool;
 
 		/* run task */
 		task->run(pool, task->taskdata, thread_id);
 
 		/* delete task */
-		if (task->free_taskdata)
-			MEM_freeN(task->taskdata);
+		if (task->free_taskdata) {
+			if (task->freedata) {
+				task->freedata(pool, task->taskdata, thread_id);
+			}
+			else {
+				MEM_freeN(task->taskdata);
+			}
+		}
 		MEM_freeN(task);
 
 		/* notify pool task was done */
@@ -172,6 +188,16 @@ static void *task_scheduler_thread_run(void *thread_p)
 	return NULL;
 }
 
+static void *task_scheduler_thread_run(void *thread_p)
+{
+	return task_scheduler_thread_run_ex(thread_p, false);
+}
+
+static void *task_scheduler_thread_run_forced_background(void *thread_p)
+{
+	return task_scheduler_thread_run_ex(thread_p, true);
+}
+
 TaskScheduler *BLI_task_scheduler_create(int num_threads)
 {
 	TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
@@ -207,11 +233,25 @@ TaskScheduler *BLI_task_scheduler_create(int num_threads)
 
 			if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
 				fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
-				MEM_freeN(thread);
 			}
 		}
 	}
-	
+	else {
+		/* We create a thread, but only for pools that are 'forced background'. */
+		TaskThread *thread;
+
+		scheduler->num_threads = 1;
+		scheduler->threads = MEM_callocN(sizeof(pthread_t), "TaskScheduler threads");
+		thread = scheduler->task_threads = MEM_callocN(sizeof(TaskThread), "TaskScheduler task threads");
+
+		thread->scheduler = scheduler;
+		thread->id = 1;
+
+		if (pthread_create(&scheduler->threads[0], NULL, task_scheduler_thread_run_forced_background, thread) != 0) {
+			fprintf(stderr, "TaskScheduler failed to launch forced background thread\n");
+		}
+	}
+
 	return scheduler;
 }
 
@@ -244,8 +284,14 @@ void BLI_task_scheduler_free(TaskScheduler *scheduler)
 
 	/* delete leftover tasks */
 	for (task = scheduler->queue.first; task; task = task->next) {
-		if (task->free_taskdata)
-			MEM_freeN(task->taskdata);
+		if (task->free_taskdata) {
+			if (task->freedata) {
+				task->freedata(task->pool, task->taskdata, 0);
+			}
+			else {
+				MEM_freeN(task->taskdata);
+			}
+		}
 	}
 	BLI_freelistN(&scheduler->queue);
 
@@ -289,8 +335,14 @@ static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
 		nexttask = task->next;
 
 		if (task->pool == pool) {
-			if (task->free_taskdata)
-				MEM_freeN(task->taskdata);
+			if (task->free_taskdata) {
+				if (task->freedata) {
+					task->freedata(pool, task->taskdata, 0);
+				}
+				else {
+					MEM_freeN(task->taskdata);
+				}
+			}
 			BLI_freelinkN(&scheduler->queue, task);
 
 			done++;
@@ -305,7 +357,7 @@ static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
 
 /* Task Pool */
 
-TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
+TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, const bool is_background)
 {
 	TaskPool *pool = MEM_callocN(sizeof(TaskPool), "TaskPool");
 
@@ -314,6 +366,7 @@ TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
 	pool->num_threads = 0;
 	pool->currently_running_tasks = 0;
 	pool->do_cancel = false;
+	pool->use_force_background = is_background;
 
 	BLI_mutex_init(&pool->num_mutex);
 	BLI_condition_init(&pool->num_cond);
@@ -346,19 +399,27 @@ void BLI_task_pool_free(TaskPool *pool)
 	BLI_end_threaded_malloc();
 }
 
-void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run,
-	void *taskdata, bool free_taskdata, TaskPriority priority)
+void BLI_task_pool_push_ex(
+        TaskPool *pool, TaskRunFunction run, void *taskdata,
+        bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority)
 {
 	Task *task = MEM_callocN(sizeof(Task), "Task");
 
 	task->run = run;
 	task->taskdata = taskdata;
 	task->free_taskdata = free_taskdata;
+	task->freedata = freedata;
 	task->pool = pool;
 
 	task_scheduler_push(pool->scheduler, task, priority);
 }
 
+void BLI_task_pool_push(
+        TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority)
+{
+	BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority);
+}
+
 void BLI_task_pool_work_and_wait(TaskPool *pool)
 {
 	TaskScheduler *scheduler = pool->scheduler;
@@ -398,8 +459,14 @@ void BLI_task_pool_work_and_wait(TaskPool *pool)
 			work_task->run(pool, work_task->taskdata, 0);
 
 			/* delete task */
-			if (work_task->free_taskdata)
-				MEM_freeN(work_task->taskdata);
+			if (task->free_taskdata) {
+				if (task->freedata) {
+					task->freedata(pool, task->taskdata, 0);
+				}


@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list