[Bf-blender-cvs] [efc97b3919e] master: GPUBatch: GL backend isolation

Clément Foucault noreply at git.blender.org
Thu Aug 13 14:47:19 CEST 2020


Commit: efc97b3919ea4fd46b9d2e931ca3fea27e7ea31c
Author: Clément Foucault
Date:   Tue Aug 11 01:31:40 2020 +0200
Branches: master
https://developer.blender.org/rBefc97b3919ea4fd46b9d2e931ca3fea27e7ea31c

GPUBatch: GL backend isolation

This changes the drawing paradigm a bit. The VAO configuration is done
JIT-style and depends on context active shader.

This is to allow more flexibility for implementations to do optimization
at lower level.

The vao cache is now its own class to isolate the concept. It is this
class that is reference by the GLContext for ownership of the containing
VAO ids.

===================================================================

M	source/blender/gpu/CMakeLists.txt
M	source/blender/gpu/GPU_batch.h
M	source/blender/gpu/GPU_shader_interface.h
M	source/blender/gpu/intern/gpu_batch.cc
M	source/blender/gpu/intern/gpu_batch_private.hh
M	source/blender/gpu/intern/gpu_context.cc
M	source/blender/gpu/intern/gpu_context_private.hh
M	source/blender/gpu/intern/gpu_shader.cc
M	source/blender/gpu/intern/gpu_shader_interface.cc
M	source/blender/gpu/opengl/gl_batch.cc
M	source/blender/gpu/opengl/gl_batch.hh
M	source/blender/gpu/opengl/gl_context.cc
M	source/blender/gpu/opengl/gl_context.hh
A	source/blender/gpu/opengl/gl_vertex_array.cc
A	source/blender/gpu/opengl/gl_vertex_array.hh

===================================================================

diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt
index fcbe53e599a..906ae31fbc7 100644
--- a/source/blender/gpu/CMakeLists.txt
+++ b/source/blender/gpu/CMakeLists.txt
@@ -92,6 +92,7 @@ set(SRC
   opengl/gl_batch.cc
   opengl/gl_context.cc
   opengl/gl_drawlist.cc
+  opengl/gl_vertex_array.cc
 
   GPU_attr_binding.h
   GPU_batch.h
@@ -143,6 +144,7 @@ set(SRC
   opengl/gl_batch.hh
   opengl/gl_context.hh
   opengl/gl_drawlist.hh
+  opengl/gl_vertex_array.hh
 )
 
 set(LIB
diff --git a/source/blender/gpu/GPU_batch.h b/source/blender/gpu/GPU_batch.h
index d71d4d5435f..33d539e3a9e 100644
--- a/source/blender/gpu/GPU_batch.h
+++ b/source/blender/gpu/GPU_batch.h
@@ -30,7 +30,6 @@
 
 #include "GPU_element.h"
 #include "GPU_shader.h"
-#include "GPU_shader_interface.h"
 #include "GPU_vertex_buffer.h"
 
 #define GPU_BATCH_VBO_MAX_LEN 6
@@ -59,9 +58,7 @@ typedef enum eGPUBatchFlag {
   /** Batch is initialized but it's VBOs are still being populated. (optional) */
   GPU_BATCH_BUILDING = (1 << 16),
   /** Cached data need to be rebuild. (VAO, PSO, ...) */
-  GPU_BATCH_DIRTY_BINDINGS = (1 << 17),
-  GPU_BATCH_DIRTY_INTERFACE = (1 << 18),
-  GPU_BATCH_DIRTY = (GPU_BATCH_DIRTY_BINDINGS | GPU_BATCH_DIRTY_INTERFACE),
+  GPU_BATCH_DIRTY = (1 << 17),
 } eGPUBatchFlag;
 
 #define GPU_BATCH_OWNS_NONE GPU_BATCH_INVALID
@@ -78,6 +75,7 @@ extern "C" {
 /**
  * IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
  * the common and "public" part of the struct. Use the provided allocator.
+ * TODO(fclem) Make the content of this struct hidden and expose getters/setters.
  **/
 typedef struct GPUBatch {
   /** verts[0] is required, others can be NULL */
@@ -90,32 +88,8 @@ typedef struct GPUBatch {
   eGPUBatchFlag flag;
   /** Type of geometry to draw. */
   GPUPrimType prim_type;
-
-  /** Current assigned shader. */
+  /** Current assigned shader. DEPRECATED. Here only for uniform binding. */
   struct GPUShader *shader;
-  /** Last context used to draw this batch. */
-  struct GPUContext *context;
-
-  struct GPUShaderInterface *interface;
-  GLuint vao_id;
-
-  /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
-   * for each shader interface. Start with a static number of vaos and fallback to dynamic count
-   * if necessary. Once a batch goes dynamic it does not go back. */
-  bool is_dynamic_vao_count;
-  union {
-    /** Static handle count */
-    struct {
-      const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
-      uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
-    } static_vaos;
-    /** Dynamic handle count */
-    struct {
-      uint count;
-      const struct GPUShaderInterface **interfaces;
-      uint32_t *vao_ids;
-    } dynamic_vaos;
-  };
 } GPUBatch;
 
 GPUBatch *GPU_batch_calloc(void);
diff --git a/source/blender/gpu/GPU_shader_interface.h b/source/blender/gpu/GPU_shader_interface.h
index 8aba1236b65..47e4e432d66 100644
--- a/source/blender/gpu/GPU_shader_interface.h
+++ b/source/blender/gpu/GPU_shader_interface.h
@@ -80,7 +80,7 @@ typedef struct GPUShaderInterface {
   /** Buffer containing all inputs names separated by '\0'. */
   char *name_buffer;
   /** Reference to GPUBatches using this interface */
-  struct GPUBatch **batches;
+  void **batches;
   uint batches_len;
   /** Input counts. */
   uint attribute_len;
@@ -109,8 +109,8 @@ const GPUShaderInput *GPU_shaderinterface_ubo(const GPUShaderInterface *, const
 const GPUShaderInput *GPU_shaderinterface_attr(const GPUShaderInterface *, const char *name);
 
 /* keep track of batches using this interface */
-void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *, struct GPUBatch *);
-void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *, struct GPUBatch *);
+void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *interface, void *cache);
+void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *interface, void *cache);
 
 #ifdef __cplusplus
 }
diff --git a/source/blender/gpu/intern/gpu_batch.cc b/source/blender/gpu/intern/gpu_batch.cc
index 27196413b20..995e1afb236 100644
--- a/source/blender/gpu/intern/gpu_batch.cc
+++ b/source/blender/gpu/intern/gpu_batch.cc
@@ -26,6 +26,8 @@
 
 #include "MEM_guardedalloc.h"
 
+#include "BLI_math_base.h"
+
 #include "GPU_batch.h"
 #include "GPU_batch_presets.h"
 #include "GPU_extensions.h"
@@ -46,49 +48,15 @@
 
 using namespace blender::gpu;
 
-static GLuint g_default_attr_vbo = 0;
-
-static void gpu_batch_bind(GPUBatch *batch);
-static void batch_update_program_bindings(GPUBatch *batch, uint i_first);
-
-void GPU_batch_vao_cache_clear(GPUBatch *batch)
+void GPU_batch_vao_cache_clear(GPUBatch *UNUSED(batch))
 {
-  if (batch->context == NULL) {
-    return;
-  }
-  if (batch->is_dynamic_vao_count) {
-    for (int i = 0; i < batch->dynamic_vaos.count; i++) {
-      if (batch->dynamic_vaos.vao_ids[i]) {
-        GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
-      }
-      if (batch->dynamic_vaos.interfaces[i]) {
-        GPU_shaderinterface_remove_batch_ref(
-            (GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
-      }
-    }
-    MEM_freeN((void *)batch->dynamic_vaos.interfaces);
-    MEM_freeN(batch->dynamic_vaos.vao_ids);
-  }
-  else {
-    for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
-      if (batch->static_vaos.vao_ids[i]) {
-        GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
-      }
-      if (batch->static_vaos.interfaces[i]) {
-        GPU_shaderinterface_remove_batch_ref(
-            (GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
-      }
-    }
-  }
-  batch->is_dynamic_vao_count = false;
-  for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
-    batch->static_vaos.vao_ids[i] = 0;
-    batch->static_vaos.interfaces[i] = NULL;
-  }
-  gpu_context_remove_batch(batch->context, batch);
-  batch->context = NULL;
+  /* TODO remove */
 }
 
+/* -------------------------------------------------------------------- */
+/** \name Creation & Deletion
+ * \{ */
+
 GPUBatch *GPU_batch_calloc(void)
 {
   GPUBatch *batch = GPUBackend::get()->batch_alloc();
@@ -126,7 +94,6 @@ void GPU_batch_init_ex(GPUBatch *batch,
   batch->elem = elem;
   batch->prim_type = prim_type;
   batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
-  batch->context = NULL;
   batch->shader = NULL;
 }
 
@@ -144,7 +111,6 @@ void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
 
 void GPU_batch_clear(GPUBatch *batch)
 {
-  GPU_batch_vao_cache_clear(batch);
   if (batch->flag & GPU_BATCH_OWNS_INDEX) {
     GPU_indexbuf_discard(batch->elem);
   }
@@ -172,11 +138,17 @@ void GPU_batch_discard(GPUBatch *batch)
   delete static_cast<Batch *>(batch);
 }
 
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Buffers Management
+ * \{ */
+
 /* NOTE: Override ONLY the first instance vbo (and free them if owned). */
 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
 {
   BLI_assert(inst);
-  batch->flag |= GPU_BATCH_DIRTY_BINDINGS;
+  batch->flag |= GPU_BATCH_DIRTY;
 
   if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
     GPU_vertbuf_discard(batch->inst[0]);
@@ -190,7 +162,7 @@ void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
 void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
 {
   BLI_assert(elem);
-  batch->flag |= GPU_BATCH_DIRTY_BINDINGS;
+  batch->flag |= GPU_BATCH_DIRTY;
 
   if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
     GPU_indexbuf_discard(batch->elem);
@@ -203,7 +175,7 @@ void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
 int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
 {
   BLI_assert(insts);
-  batch->flag |= GPU_BATCH_DIRTY_BINDINGS;
+  batch->flag |= GPU_BATCH_DIRTY;
 
   for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
     if (batch->inst[v] == NULL) {
@@ -228,7 +200,7 @@ int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
 int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
 {
   BLI_assert(verts);
-  batch->flag |= GPU_BATCH_DIRTY_BINDINGS;
+  batch->flag |= GPU_BATCH_DIRTY;
 
   for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
     if (batch->verts[v] == NULL) {
@@ -246,254 +218,20 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
   return -1;
 }
 
-static GLuint batch_vao_get(GPUBatch *batch)
-{
-  /* Search through cache */
-  if (batch->is_dynamic_vao_count) {
-    for (int i = 0; i < batch->dynamic_vaos.count; i++) {
-      if (batch->dynamic_vaos.interfaces[i] == batch->interface) {
-        return batch->dynamic_vaos.vao_ids[i];
-      }
-    }
-  }
-  else {
-    for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
-      if (batch->static_vaos.interfaces[i] == batch->interface) {
-        return batch->static_vaos.vao_ids[i];
-      }
-    }
-  }
-
-  /* Set context of this batch.
-   * It will be bound to it until GPU_batch_vao_cache_clear is called.
-   * Until then it can only be drawn with this context. */
-  if (batch->context == NULL) {
-    batch->context = GPU_context_active_get();
-    gpu_context_add_batch(batch->context, batch);
-  }
-#if TRUST_NO_ONE
-  else {
-    /* Make sure you are not trying to draw this batch in another context. */
-    assert(batch->context == GPU_context_active_get());
-  }
-#endif
-
-  /* Cache miss, time to add a new entry! */
-  GLuint new_vao = 0;
-  if (!batch->is_dynamic_vao_count) {
-    int i; /* find first unused slot */
-    for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
-      if (batch->static_vaos.vao_ids[i] == 0) {
-        break;
-      }
-    }
-
-    if (i < GPU_BATCH_VAO_STATIC_LEN) {
-      batch->static_vaos.interfaces[i] = batch->interface;
-      batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
-    }
-    else {
-      /* Not enough place switch to dynamic. */
-      batch->is_dynamic_vao_count = true;
-      /* Erase previous entries, they will be added back if drawn again. */
-      for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; j++) {


@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list