[Bf-blender-cvs] [cdbda1c3d80] blender2.8: GPUPass: Refactor gpupass caching system to allow fast gpumaterial creation.

Clément Foucault noreply at git.blender.org
Thu Jun 7 12:02:44 CEST 2018


Commit: cdbda1c3d8017b994366b101b34c79a5df58902f
Author: Clément Foucault
Date:   Thu Jun 7 11:58:15 2018 +0200
Branches: blender2.8
https://developer.blender.org/rBcdbda1c3d8017b994366b101b34c79a5df58902f

GPUPass: Refactor gpupass caching system to allow fast gpumaterial creation.

This is part of the work needed to refactor the material parameters update.

Now the gpupass cache is polled before adding the gpumaterial to the
deferred compilation queue.

We store gpupasses in a single linked list grouped based on their hashes.
This is not the most efficient way but it can be improved upon later.

===================================================================

M	source/blender/draw/intern/draw_manager_data.c
M	source/blender/draw/intern/draw_manager_shader.c
M	source/blender/gpu/GPU_material.h
M	source/blender/gpu/intern/gpu_codegen.c
M	source/blender/gpu/intern/gpu_codegen.h
M	source/blender/gpu/intern/gpu_material.c
M	source/blender/windowmanager/intern/wm_init_exit.c

===================================================================

diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index c419e9e2535..dab3336b3ed 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -732,7 +732,14 @@ static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass
 		return NULL;
 	}
 
-	DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass);
+	GPUShader *sh = GPU_pass_shader_get(gpupass);
+
+	if (!sh) {
+		/* Shader not yet compiled */
+		return NULL;
+	}
+
+	DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
 	return grp;
 }
 
@@ -808,7 +815,7 @@ DRWShadingGroup *DRW_shgroup_material_create(
 	DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
 
 	if (shgroup) {
-		drw_shgroup_init(shgroup, GPU_pass_shader(gpupass));
+		drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
 		drw_shgroup_material_inputs(shgroup, material);
 	}
 
@@ -825,7 +832,7 @@ DRWShadingGroup *DRW_shgroup_material_instance_create(
 		shgroup->type = DRW_SHG_INSTANCE;
 		shgroup->instance_geom = geom;
 		drw_call_calc_orco(ob, shgroup->instance_orcofac);
-		drw_shgroup_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format);
+		drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
 		drw_shgroup_material_inputs(shgroup, material);
 	}
 
@@ -843,7 +850,7 @@ DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
 
 	if (shgroup) {
 		/* Calling drw_shgroup_init will cause it to call GWN_draw_primitive(). */
-		drw_shgroup_init(shgroup, GPU_pass_shader(gpupass));
+		drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
 		shgroup->type = DRW_SHG_TRIANGLE_BATCH;
 		shgroup->instance_count = tri_count * 3;
 		drw_shgroup_material_inputs(shgroup, material);
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 56a7c5db08e..dd46248a781 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -62,7 +62,6 @@ typedef struct DRWDeferredShader {
 	struct DRWDeferredShader *prev, *next;
 
 	GPUMaterial *mat;
-	char *vert, *geom, *frag, *defs;
 } DRWDeferredShader;
 
 typedef struct DRWShaderCompiler {
@@ -80,11 +79,6 @@ typedef struct DRWShaderCompiler {
 static void drw_deferred_shader_free(DRWDeferredShader *dsh)
 {
 	/* Make sure it is not queued before freeing. */
-	MEM_SAFE_FREE(dsh->vert);
-	MEM_SAFE_FREE(dsh->geom);
-	MEM_SAFE_FREE(dsh->frag);
-	MEM_SAFE_FREE(dsh->defs);
-
 	MEM_freeN(dsh);
 }
 
@@ -129,12 +123,7 @@ static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop,
 		BLI_spin_unlock(&comp->list_lock);
 
 		/* Do the compilation. */
-		GPU_material_generate_pass(
-		        comp->mat_compiling->mat,
-		        comp->mat_compiling->vert,
-		        comp->mat_compiling->geom,
-		        comp->mat_compiling->frag,
-		        comp->mat_compiling->defs);
+		GPU_material_compile(comp->mat_compiling->mat);
 
 		*progress = (float)comp->shaders_done / (float)total;
 		*do_update = true;
@@ -165,25 +154,21 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
 	MEM_freeN(comp);
 }
 
-static void drw_deferred_shader_add(
-        GPUMaterial *mat, const char *vert, const char *geom, const char *frag_lib, const char *defines)
+static void drw_deferred_shader_add(GPUMaterial *mat)
 {
 	/* Do not deferre the compilation if we are rendering for image. */
 	if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
 		/* Double checking that this GPUMaterial is not going to be
 		 * compiled by another thread. */
 		DRW_deferred_shader_remove(mat);
-		GPU_material_generate_pass(mat, vert, geom, frag_lib, defines);
+		printf("%s GPUMaterial %p\n", __func__, mat);
+		GPU_material_compile(mat);
 		return;
 	}
 
 	DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
 
 	dsh->mat = mat;
-	if (vert)     dsh->vert = BLI_strdup(vert);
-	if (geom)     dsh->geom = BLI_strdup(geom);
-	if (frag_lib) dsh->frag = BLI_strdup(frag_lib);
-	if (defines)  dsh->defs = BLI_strdup(defines);
 
 	BLI_assert(DST.draw_ctx.evil_C);
 	wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
@@ -361,10 +346,13 @@ GPUMaterial *DRW_shader_create_from_world(
 
 	if (mat == NULL) {
 		mat = GPU_material_from_nodetree(
-		        scene, wo->nodetree, &wo->gpumaterial, engine_type, options);
+		        scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
+		        vert, geom, frag_lib, defines);
 	}
 
-	drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
+	if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+		drw_deferred_shader_add(mat);
+	}
 
 	return mat;
 }
@@ -380,10 +368,13 @@ GPUMaterial *DRW_shader_create_from_material(
 
 	if (mat == NULL) {
 		mat = GPU_material_from_nodetree(
-		        scene, ma->nodetree, &ma->gpumaterial, engine_type, options);
+		        scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
+		        vert, geom, frag_lib, defines);
 	}
 
-	drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
+	if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+		drw_deferred_shader_add(mat);
+	}
 
 	return mat;
 }
diff --git a/source/blender/gpu/GPU_material.h b/source/blender/gpu/GPU_material.h
index 95492016f25..f780eda62fe 100644
--- a/source/blender/gpu/GPU_material.h
+++ b/source/blender/gpu/GPU_material.h
@@ -246,9 +246,9 @@ struct GPUUniformBuffer *GPU_material_sss_profile_get(
 GPUMaterial *GPU_material_from_nodetree_find(
         struct ListBase *gpumaterials, const void *engine_type, int options);
 GPUMaterial *GPU_material_from_nodetree(
-        struct Scene *scene, struct bNodeTree *ntree, struct ListBase *gpumaterials, const void *engine_type, int options);
-void GPU_material_generate_pass(
-        GPUMaterial *mat, const char *vert_code, const char *geom_code, const char *frag_lib, const char *defines);
+        struct Scene *scene, struct bNodeTree *ntree, struct ListBase *gpumaterials, const void *engine_type, int options,
+        const char *vert_code, const char *geom_code, const char *frag_lib, const char *defines);
+void GPU_material_compile(GPUMaterial *mat);
 void GPU_material_free(struct ListBase *gpumaterial);
 
 void GPU_materials_free(void);
@@ -270,6 +270,7 @@ bool GPU_material_do_color_management(GPUMaterial *mat);
 bool GPU_material_use_domain_surface(GPUMaterial *mat);
 bool GPU_material_use_domain_volume(GPUMaterial *mat);
 
+void GPU_pass_cache_init(void);
 void GPU_pass_cache_garbage_collect(void);
 void GPU_pass_cache_free(void);
 
diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c
index 22665e2c0bf..91d0d9dbecc 100644
--- a/source/blender/gpu/intern/gpu_codegen.c
+++ b/source/blender/gpu/intern/gpu_codegen.c
@@ -40,10 +40,11 @@
 
 #include "BLI_blenlib.h"
 #include "BLI_hash_mm2a.h"
-#include "BLI_linklist.h"
+#include "BLI_link_utils.h"
 #include "BLI_utildefines.h"
 #include "BLI_dynstr.h"
 #include "BLI_ghash.h"
+#include "BLI_threads.h"
 
 #include "PIL_time.h"
 
@@ -75,39 +76,54 @@ static char *glsl_material_library = NULL;
  * same for 2 different Materials. Unused GPUPasses are free by Garbage collection.
  **/
 
-static LinkNode *pass_cache = NULL; /* GPUPass */
+/* Only use one linklist that contains the GPUPasses grouped by hash. */
+static GPUPass *pass_cache = NULL;
+static SpinLock pass_cache_spin;
 
-static uint32_t gpu_pass_hash(const char *vert, const char *geom, const char *frag, const char *defs)
+static uint32_t gpu_pass_hash(const char *frag_gen, const char *defs)
 {
 	BLI_HashMurmur2A hm2a;
 	BLI_hash_mm2a_init(&hm2a, 0);
-	BLI_hash_mm2a_add(&hm2a, (unsigned char *)frag, strlen(frag));
-	BLI_hash_mm2a_add(&hm2a, (unsigned char *)vert, strlen(vert));
+	BLI_hash_mm2a_add(&hm2a, (unsigned char *)frag_gen, strlen(frag_gen));
 	if (defs)
 		BLI_hash_mm2a_add(&hm2a, (unsigned char *)defs, strlen(defs));
-	if (geom)
-		BLI_hash_mm2a_add(&hm2a, (unsigned char *)geom, strlen(geom));
 
 	return BLI_hash_mm2a_end(&hm2a);
 }
 
-/* Search by hash then by exact string match. */
-static GPUPass *gpu_pass_cache_lookup(
-        const char *vert, const char *geom, const char *frag, const char *defs, uint32_t hash)
+/* Search by hash only. Return first pass with the same hash.
+ * There is hash collision if (pass->next && pass->next->hash == hash) */
+static GPUPass *gpu_pass_cache_lookup(uint32_t hash)
 {
-	for (LinkNode *ln = pass_cache; ln; ln = ln->next) {
-		GPUPass *pass = (GPUPass *)ln->link;
+	BLI_spin_lock(&pass_cache_spin);
+	/* Could be optimized with a Lookup table. */
+	for (GPUPass *pass = pass_cache; pass; pass = pass->next) {
 		if (pass->hash == hash) {
-			/* Note: Could be made faster if that becomes a real bottleneck. */
-			if ((defs != NULL) && (strcmp(pass->defines, defs) != 0)) { /* Pass */ }
-			else if ((geom != NULL) && (strcmp(pass->geometrycode, geom) != 0)) { /* Pass */ }
-			else if ((strcmp(pass->fragmentcode, frag) == 0) &&
-			         (strcmp(pass->vertexcode, vert) == 0))
-			{
-				return pass;
-			}
+			BLI_spin_unlock(&pass_cache_spin);
+			return pass;
 		}
 	}
+	BLI_spin_unlock(&pass_cache_spin);
+	return NULL;
+}
+
+/* Check all possible passes with the same hash. */
+static GPUPass *gpu_pass_cache_resolve_collision(
+        GPUPass *pass, const char *vert, const char *geom, const char *frag, const char *defs, uint32_t hash)
+{
+	BLI_spin_lock(&pass_cache_spin);
+	/* Collision, need to strcmp the whole shader. */
+	for (; pass && (pass->hash == hash); pass = pass->next) {
+		if ((defs != NULL) && (strcmp(pass->defines, defs) != 0)) { /* Pass */ }
+		else if ((geom != NULL) && (strcmp(pass->geometrycode, geom) != 0)) { /* Pass */ }
+		else if ((strcmp(pass->fragmentcode, frag) == 0) &&
+		         (strcmp(pass->vertexcode, vert) == 0))
+		{
+			BLI_spin_unlock(&pass_cache_spin);
+			return pass;
+		}
+	}
+	BLI_spin_unlock(&pass_cache_spin);
 	return NULL;
 }
 
@@ -1099,12 +1115,12 @@ void GPU_code_generate_glsl_lib(void)
 
 /* GPU pass binding/unbinding */
 
-GPUShader *GPU_pass_shader(GPUPass *pass)
+GPUShader *GPU_pass_shader_get(GPUPass *pass)
 {
 	return pas

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list