[Bf-blender-cvs] [d4292bbd28d] temp_bmesh_multires: * Added clang-cl support to BLI_strict_flags.h

Joseph Eagar noreply at git.blender.org
Mon May 24 05:58:35 CEST 2021


Commit: d4292bbd28d2e1db2ed01928bd751b8dd3d45f07
Author: Joseph Eagar
Date:   Sun May 23 20:39:52 2021 -0700
Branches: temp_bmesh_multires
https://developer.blender.org/rBd4292bbd28d2e1db2ed01928bd751b8dd3d45f07

* Added clang-cl support to BLI_strict_flags.h

===================================================================

M	intern/atomic/intern/atomic_ops_msvc.h
M	source/blender/blenkernel/intern/subdiv_ccg.c
M	source/blender/blenlib/BLI_compiler_attrs.h
M	source/blender/blenlib/BLI_strict_flags.h
M	source/blender/blenlib/intern/BLI_ghash_utils.c
M	source/blender/blenlib/intern/BLI_mempool.c
M	source/blender/bmesh/intern/bmesh_log.c
M	source/blender/render/intern/bake.c

===================================================================

diff --git a/intern/atomic/intern/atomic_ops_msvc.h b/intern/atomic/intern/atomic_ops_msvc.h
index c9ad1a46ab9..57589d9bcc3 100644
--- a/intern/atomic/intern/atomic_ops_msvc.h
+++ b/intern/atomic/intern/atomic_ops_msvc.h
@@ -49,27 +49,27 @@
 /* Unsigned */
 ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
-  return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
+  return (uint64_t)(InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + (int64_t)x);
 }
 
 ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
-  return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
+  return (uint64_t)(InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - (int64_t)x);
 }
 
 ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
 {
-  return InterlockedCompareExchange64((int64_t *)v, _new, old);
+  return (uint64_t)(InterlockedCompareExchange64((int64_t *)v, _new, old));
 }
 
 ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
 {
-  return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
+  return (uint64_t)InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
 }
 
 ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
 {
-  return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
+  return (uint64_t)InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
 }
 
 /* Signed */
@@ -103,32 +103,32 @@ ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
 /* Unsigned */
 ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
-  return InterlockedExchangeAdd(p, x) + x;
+  return (uint32_t)InterlockedExchangeAdd(p, x) + x;
 }
 
 ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
-  return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
+  return (uint32_t)InterlockedExchangeAdd(p, -((int32_t)x)) - x;
 }
 
 ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
 {
-  return InterlockedCompareExchange((long *)v, _new, old);
+  return (uint32_t)InterlockedCompareExchange((long *)v, _new, old);
 }
 
 ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
 {
-  return InterlockedExchangeAdd(p, x);
+  return (uint32_t)InterlockedExchangeAdd(p, x);
 }
 
 ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
 {
-  return InterlockedOr((long *)p, x);
+  return (uint32_t)InterlockedOr((long *)p, x);
 }
 
 ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
 {
-  return InterlockedAnd((long *)p, x);
+  return (uint32_t)InterlockedAnd((long *)p, x);
 }
 
 /* Signed */
@@ -205,9 +205,9 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
 ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
 {
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-  return InterlockedAnd8((char *)p, (char)b);
+  return (int8_t)InterlockedAnd8((char *)p, (char)b);
 #else
-  return _InterlockedAnd8((char *)p, (char)b);
+  return (int8_t)_InterlockedAnd8((char *)p, (char)b);
 #endif
 }
 
@@ -215,9 +215,9 @@ ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
 ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
 {
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-  return InterlockedOr8((char *)p, (char)b);
+  return (int8_t)InterlockedOr8((char *)p, (char)b);
 #else
-  return _InterlockedOr8((char *)p, (char)b);
+  return (int8_t)_InterlockedOr8((char *)p, (char)b);
 #endif
 }
 
diff --git a/source/blender/blenkernel/intern/subdiv_ccg.c b/source/blender/blenkernel/intern/subdiv_ccg.c
index a59f9e0c633..265cab01dc7 100644
--- a/source/blender/blenkernel/intern/subdiv_ccg.c
+++ b/source/blender/blenkernel/intern/subdiv_ccg.c
@@ -28,6 +28,7 @@
 
 #include "MEM_guardedalloc.h"
 
+#include "BLI_ghash.h"
 #include "BLI_math_bits.h"
 #include "BLI_math_vector.h"
 #include "BLI_task.h"
@@ -50,6 +51,11 @@ static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
                                                 CCGKey *key,
                                                 SubdivCCGFace *face);
 
+void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
+                                                     CCGKey *key,
+                                                     struct CCGFace **effected_faces,
+                                                     int num_effected_faces);
+
 /** \} */
 
 /* -------------------------------------------------------------------- */
@@ -889,11 +895,12 @@ void BKE_subdiv_ccg_update_normals(SubdivCCG *subdiv_ccg,
     return;
   }
   subdiv_ccg_recalc_modified_inner_grid_normals(subdiv_ccg, effected_faces, num_effected_faces);
-  /* TODO(sergey): Only average elements which are adjacent to modified
-   * faces. */
+
   CCGKey key;
   BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
-  subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
+
+  subdiv_ccg_average_faces_boundaries_and_corners(
+      subdiv_ccg, &key, effected_faces, num_effected_faces);
 }
 
 /** \} */
@@ -1032,6 +1039,9 @@ static void subdiv_ccg_average_inner_grids_task(void *__restrict userdata_v,
 typedef struct AverageGridsBoundariesData {
   SubdivCCG *subdiv_ccg;
   CCGKey *key;
+
+  /* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_edges*/
+  int *idxmap;
 } AverageGridsBoundariesData;
 
 typedef struct AverageGridsBoundariesTLSData {
@@ -1079,10 +1089,12 @@ static void subdiv_ccg_average_grids_boundary(SubdivCCG *subdiv_ccg,
 }
 
 static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v,
-                                                     const int adjacent_edge_index,
+                                                     const int n,
                                                      const TaskParallelTLS *__restrict tls_v)
 {
   AverageGridsBoundariesData *data = userdata_v;
+  const int adjacent_edge_index = data->idxmap ? data->idxmap[n] : n;
+
   AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
   CCGKey *key = data->key;
@@ -1100,6 +1112,9 @@ static void subdiv_ccg_average_grids_boundaries_free(const void *__restrict UNUS
 typedef struct AverageGridsCornerData {
   SubdivCCG *subdiv_ccg;
   CCGKey *key;
+
+  /* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_vertices*/
+  int *idxmap;
 } AverageGridsCornerData;
 
 static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
@@ -1128,10 +1143,11 @@ static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
 }
 
 static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
-                                                  const int adjacent_vertex_index,
+                                                  const int n,
                                                   const TaskParallelTLS *__restrict UNUSED(tls_v))
 {
   AverageGridsCornerData *data = userdata_v;
+  const int adjacent_vertex_index = data->idxmap ? data->idxmap[n] : n;
   SubdivCCG *subdiv_ccg = data->subdiv_ccg;
   CCGKey *key = data->key;
   SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
@@ -1143,9 +1159,7 @@ static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key
   TaskParallelSettings parallel_range_settings;
   BLI_parallel_range_settings_defaults(&parallel_range_settings);
   AverageGridsBoundariesData boundaries_data = {
-      .subdiv_ccg = subdiv_ccg,
-      .key = key,
-  };
+      .subdiv_ccg = subdiv_ccg, .key = key, .idxmap = NULL};
   AverageGridsBoundariesTLSData tls_data = {NULL};
   parallel_range_settings.userdata_chunk = &tls_data;
   parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
@@ -1161,10 +1175,7 @@ static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
 {
   TaskParallelSettings parallel_range_settings;
   BLI_parallel_range_settings_defaults(&parallel_range_settings);
-  AverageGridsCornerData corner_data = {
-      .subdiv_ccg = subdiv_ccg,
-      .key = key,
-  };
+  AverageGridsCornerData corner_data = {.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = NULL};
   BLI_task_parallel_range(0,
                           subdiv_ccg->num_adjacent_vertices,
                           &corner_data,
@@ -1198,6 +1209,108 @@ void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
   subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
 }
 
+void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
+                                                     CCGKey *key,
+                                                     struct CCGFace **effected_faces,
+                                                     int num_effected_faces)
+{
+  Subdiv *subdiv = subdiv_ccg->subdiv;
+  GSet *adjacent_verts = BLI_gset_ptr_new(__func__);
+  GSet *adjacent_edges = BLI_gset_ptr_new(__func__);
+  OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
+  GSetIterator gi;
+
+  StaticOrHeapIntStorage face_vertices_storage;
+  StaticOrHeapIntStorage face_edges_storage;
+  static_or_heap_storage_init(&face_vertices_storage);
+  static_or_heap_storage_init(&face_edges_storage);
+
+  for (int i = 0; i < num_effected_faces; i++) {
+    SubdivCCGFace *face = (SubdivCCGFace *)effected_faces[i];
+    int face_index = face - subdiv_ccg->faces;
+    const int num_face_grids = face->num_grids;
+    const int num_face_edges = num_face_grids;
+    int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
+    topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
+
+    /* Note that order of edges is same as order of MLoops, which also
+     * means it's the same as order of grids. */
+    int *face_edges = static_or_heap_storage_get(&face_edges_storage, num_face_edges);
+    topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges);
+    for (int corner = 0; corner < num_face_edges; corner++) {
+      const int vertex_index = face_vertices[corner];
+      const int edge_index = face_ed

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list