[Bf-blender-cvs] [79b1e267ef9] master: Cleanup: Move subdiv_ccg.c to C++
Hans Goudey
noreply at git.blender.org
Fri Oct 7 00:28:50 CEST 2022
Commit: 79b1e267ef9ab21afccd4a569d83ae2b8c9d4654
Author: Hans Goudey
Date: Thu Oct 6 17:24:16 2022 -0500
Branches: master
https://developer.blender.org/rB79b1e267ef9ab21afccd4a569d83ae2b8c9d4654
Cleanup: Move subdiv_ccg.c to C++
In preparation for moving mesh runtime data to C++
===================================================================
M source/blender/blenkernel/CMakeLists.txt
R091 source/blender/blenkernel/intern/subdiv_ccg.c source/blender/blenkernel/intern/subdiv_ccg.cc
===================================================================
diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt
index 12fb6792f93..97bdff217d0 100644
--- a/source/blender/blenkernel/CMakeLists.txt
+++ b/source/blender/blenkernel/CMakeLists.txt
@@ -266,7 +266,7 @@ set(SRC
intern/speaker.c
intern/studiolight.c
intern/subdiv.c
- intern/subdiv_ccg.c
+ intern/subdiv_ccg.cc
intern/subdiv_ccg_mask.c
intern/subdiv_ccg_material.c
intern/subdiv_converter.c
diff --git a/source/blender/blenkernel/intern/subdiv_ccg.c b/source/blender/blenkernel/intern/subdiv_ccg.cc
similarity index 91%
rename from source/blender/blenkernel/intern/subdiv_ccg.c
rename to source/blender/blenkernel/intern/subdiv_ccg.cc
index e7f89a625b3..8c44d53e5bd 100644
--- a/source/blender/blenkernel/intern/subdiv_ccg.c
+++ b/source/blender/blenkernel/intern/subdiv_ccg.cc
@@ -38,7 +38,7 @@ static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
- struct CCGFace **effected_faces,
+ CCGFace **effected_faces,
int num_effected_faces);
/** \} */
@@ -126,20 +126,21 @@ static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
const int grid_area = grid_size * grid_size;
subdiv_ccg->grid_element_size = element_size;
subdiv_ccg->num_grids = num_grids;
- subdiv_ccg->grids = MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids");
- subdiv_ccg->grids_storage = MEM_calloc_arrayN(
- num_grids, ((size_t)grid_area) * element_size, "subdiv ccg grids storage");
+ subdiv_ccg->grids = static_cast<CCGElem **>(
+ MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids"));
+ subdiv_ccg->grids_storage = static_cast<unsigned char *>(MEM_calloc_arrayN(
+ num_grids, ((size_t)grid_area) * element_size, "subdiv ccg grids storage"));
const size_t grid_size_in_bytes = (size_t)grid_area * element_size;
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
const size_t grid_offset = grid_size_in_bytes * grid_index;
subdiv_ccg->grids[grid_index] = (CCGElem *)&subdiv_ccg->grids_storage[grid_offset];
}
/* Grid material flags. */
- subdiv_ccg->grid_flag_mats = MEM_calloc_arrayN(
- num_grids, sizeof(DMFlagMat), "ccg grid material flags");
+ subdiv_ccg->grid_flag_mats = static_cast<DMFlagMat *>(
+ MEM_calloc_arrayN(num_grids, sizeof(DMFlagMat), "ccg grid material flags"));
/* Grid hidden flags. */
- subdiv_ccg->grid_hidden = MEM_calloc_arrayN(
- num_grids, sizeof(BLI_bitmap *), "ccg grid material flags");
+ subdiv_ccg->grid_hidden = static_cast<BLI_bitmap **>(
+ MEM_calloc_arrayN(num_grids, sizeof(BLI_bitmap *), "ccg grid material flags"));
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
subdiv_ccg->grid_hidden[grid_index] = BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
}
@@ -147,9 +148,10 @@ static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
/* Allocate memory for faces. */
subdiv_ccg->num_faces = num_faces;
if (num_faces) {
- subdiv_ccg->faces = MEM_calloc_arrayN(num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces");
- subdiv_ccg->grid_faces = MEM_calloc_arrayN(
- num_grids, sizeof(SubdivCCGFace *), "Subdiv CCG grid faces");
+ subdiv_ccg->faces = static_cast<SubdivCCGFace *>(
+ MEM_calloc_arrayN(num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces"));
+ subdiv_ccg->grid_faces = static_cast<SubdivCCGFace **>(
+ MEM_calloc_arrayN(num_grids, sizeof(SubdivCCGFace *), "Subdiv CCG grid faces"));
}
}
@@ -159,13 +161,13 @@ static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
/** \name Grids evaluation
* \{ */
-typedef struct CCGEvalGridsData {
+struct CCGEvalGridsData {
SubdivCCG *subdiv_ccg;
Subdiv *subdiv;
int *face_ptex_offset;
SubdivCCGMaskEvaluator *mask_evaluator;
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator;
-} CCGEvalGridsData;
+};
static void subdiv_ccg_eval_grid_element_limit(CCGEvalGridsData *data,
const int ptex_face_index,
@@ -175,7 +177,7 @@ static void subdiv_ccg_eval_grid_element_limit(CCGEvalGridsData *data,
{
Subdiv *subdiv = data->subdiv;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
- if (subdiv->displacement_evaluator != NULL) {
+ if (subdiv->displacement_evaluator != nullptr) {
BKE_subdiv_eval_final_point(subdiv, ptex_face_index, u, v, (float *)element);
}
else if (subdiv_ccg->has_normal) {
@@ -202,7 +204,7 @@ static void subdiv_ccg_eval_grid_element_mask(CCGEvalGridsData *data,
return;
}
float *mask_value_ptr = (float *)(element + subdiv_ccg->mask_offset);
- if (data->mask_evaluator != NULL) {
+ if (data->mask_evaluator != nullptr) {
*mask_value_ptr = data->mask_evaluator->eval_mask(data->mask_evaluator, ptex_face_index, u, v);
}
else {
@@ -286,7 +288,7 @@ static void subdiv_ccg_eval_grids_task(void *__restrict userdata_v,
const int face_index,
const TaskParallelTLS *__restrict UNUSED(tls))
{
- CCGEvalGridsData *data = userdata_v;
+ CCGEvalGridsData *data = static_cast<CCGEvalGridsData *>(userdata_v);
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
SubdivCCGFace *face = &subdiv_ccg->faces[face_index];
if (face->num_grids == 4) {
@@ -318,7 +320,7 @@ static bool subdiv_ccg_evaluate_grids(SubdivCCG *subdiv_ccg,
0, num_faces, &data, subdiv_ccg_eval_grids_task, ¶llel_range_settings);
/* If displacement is used, need to calculate normals after all final
* coordinates are known. */
- if (subdiv->displacement_evaluator != NULL) {
+ if (subdiv->displacement_evaluator != nullptr) {
BKE_subdiv_ccg_recalc_normals(subdiv_ccg);
}
return true;
@@ -341,17 +343,17 @@ static void subdiv_ccg_init_faces(SubdivCCG *subdiv_ccg)
}
/* TODO(sergey): Consider making it generic enough to be fit into BLI. */
-typedef struct StaticOrHeapIntStorage {
+struct StaticOrHeapIntStorage {
int static_storage[64];
int static_storage_len;
int *heap_storage;
int heap_storage_len;
-} StaticOrHeapIntStorage;
+};
static void static_or_heap_storage_init(StaticOrHeapIntStorage *storage)
{
storage->static_storage_len = sizeof(storage->static_storage) / sizeof(*storage->static_storage);
- storage->heap_storage = NULL;
+ storage->heap_storage = nullptr;
storage->heap_storage_len = 0;
}
@@ -364,7 +366,8 @@ static int *static_or_heap_storage_get(StaticOrHeapIntStorage *storage, int heap
/* Make sure heap ius big enough. */
if (heap_len > storage->heap_storage_len) {
MEM_SAFE_FREE(storage->heap_storage);
- storage->heap_storage = MEM_malloc_arrayN(heap_len, sizeof(int), "int storage");
+ storage->heap_storage = static_cast<int *>(
+ MEM_malloc_arrayN(heap_len, sizeof(int), "int storage"));
storage->heap_storage_len = heap_len;
}
return storage->heap_storage;
@@ -378,13 +381,16 @@ static void static_or_heap_storage_free(StaticOrHeapIntStorage *storage)
static void subdiv_ccg_allocate_adjacent_edges(SubdivCCG *subdiv_ccg, const int num_edges)
{
subdiv_ccg->num_adjacent_edges = num_edges;
- subdiv_ccg->adjacent_edges = MEM_calloc_arrayN(
- subdiv_ccg->num_adjacent_edges, sizeof(*subdiv_ccg->adjacent_edges), "ccg adjacent edges");
+ subdiv_ccg->adjacent_edges = static_cast<SubdivCCGAdjacentEdge *>(MEM_calloc_arrayN(
+ subdiv_ccg->num_adjacent_edges, sizeof(*subdiv_ccg->adjacent_edges), "ccg adjacent edges"));
}
static SubdivCCGCoord subdiv_ccg_coord(int grid_index, int x, int y)
{
- SubdivCCGCoord coord = {.grid_index = grid_index, .x = x, .y = y};
+ SubdivCCGCoord coord{};
+ coord.grid_index = grid_index;
+ coord.x = x;
+ coord.y = y;
return coord;
}
@@ -403,11 +409,11 @@ static SubdivCCGCoord *subdiv_ccg_adjacent_edge_add_face(SubdivCCG *subdiv_ccg,
const int adjacent_face_index = adjacent_edge->num_adjacent_faces;
++adjacent_edge->num_adjacent_faces;
/* Allocate memory for the boundary elements. */
- adjacent_edge->boundary_coords = MEM_reallocN(adjacent_edge->boundary_coords,
- adjacent_edge->num_adjacent_faces *
- sizeof(*adjacent_edge->boundary_coords));
- adjacent_edge->boundary_coords[adjacent_face_index] = MEM_malloc_arrayN(
- grid_size * 2, sizeof(SubdivCCGCoord), "ccg adjacent boundary");
+ adjacent_edge->boundary_coords = static_cast<SubdivCCGCoord **>(
+ MEM_reallocN(adjacent_edge->boundary_coords,
+ adjacent_edge->num_adjacent_faces * sizeof(*adjacent_edge->boundary_coords)));
+ adjacent_edge->boundary_coords[adjacent_face_index] = static_cast<SubdivCCGCoord *>(
+ MEM_malloc_arrayN(grid_size * 2, sizeof(SubdivCCGCoord), "ccg adjacent boundary"));
return adjacent_edge->boundary_coords[adjacent_face_index];
}
@@ -487,9 +493,10 @@ static void subdiv_ccg_init_faces_edge_neighborhood(SubdivCCG *subdiv_ccg)
static void subdiv_ccg_allocate_adjacent_vertices(SubdivCCG *subdiv_ccg, const int num_vertices)
{
subdiv_ccg->num_adjacent_vertices = num_vertices;
- subdiv_ccg->adjacent_vertices = MEM_calloc_arrayN(subdiv_ccg->num_adjacent_vertices,
- sizeof(*subdiv_ccg->adjacent_vertices),
- "ccg adjacent vertices");
+ subdiv_ccg->adjacent_vertices = static_cast<SubdivCCGAdjacentVertex *>(
+ MEM_calloc_arrayN(subdiv_ccg->num_adjacent_vertices,
+ sizeof(*subdiv_ccg->adjacent_vertices),
+ "ccg adjacent vertices"));
}
/* Returns storage where corner elements are to be stored. This is a pointer
@@ -500,9 +507,9 @@ static SubdivCCGCoord *subdiv_ccg_adjacent_vertex_add_face(
const int adjacent_face_index = adjacent_vertex->num_adjacent_faces;
++adjacent_ve
@@ Diff output truncated at 10240 characters. @@
More information about the Bf-blender-cvs
mailing list