[Bf-blender-cvs] [44ea61a] compositor-2016: Editmesh undo memory optimization

Campbell Barton noreply at git.blender.org
Wed Jun 8 21:51:15 CEST 2016


Commit: 44ea61ab470cc26b6bbc12f5d2bc3b659c72c0c0
Author: Campbell Barton
Date:   Mon May 30 15:31:31 2016 +1000
Branches: compositor-2016
https://developer.blender.org/rB44ea61ab470cc26b6bbc12f5d2bc3b659c72c0c0

Editmesh undo memory optimization

Previously a whole mesh was stored between undo steps,
This commit uses BLI_array_store to de-duplicate memory use between undo steps.

Memory saving depends entirely on kinds of edits performed,
in own tests 5x-15x less memory use is common.

Compacting the memory does give some overhead however its done in a background thread
so its not blocking in most cases.

New behavior and threading can be ifdef'd out to check for regressions.

See D2026 for details.

===================================================================

M	source/blender/editors/mesh/editmesh_undo.c

===================================================================

diff --git a/source/blender/editors/mesh/editmesh_undo.c b/source/blender/editors/mesh/editmesh_undo.c
index 287ee97..b9d3fd6 100644
--- a/source/blender/editors/mesh/editmesh_undo.c
+++ b/source/blender/editors/mesh/editmesh_undo.c
@@ -39,18 +39,39 @@
 #include "ED_mesh.h"
 #include "ED_util.h"
 
+#define USE_ARRAY_STORE
 
-/* for callbacks */
+#ifdef USE_ARRAY_STORE
+// #  define DEBUG_PRINT
+// #  define DEBUG_TIME
+#  ifdef DEBUG_TIME
+#    include "PIL_time_utildefines.h"
+#  endif
 
-static void *getEditMesh(bContext *C)
-{
-	Object *obedit = CTX_data_edit_object(C);
-	if (obedit && obedit->type == OB_MESH) {
-		Mesh *me = obedit->data;
-		return me->edit_btmesh;
-	}
-	return NULL;
-}
+#  include "BLI_array_store.h"
+#  include "BLI_math_base.h"
+   /* check on best size later... */
+#  define ARRAY_CHUNK_SIZE 256
+
+#  define USE_ARRAY_STORE_THREAD
+#endif
+
+#ifdef USE_ARRAY_STORE_THREAD
+#  include "BLI_task.h"
+#endif
+
+
+#ifdef USE_ARRAY_STORE
+
+/* Single linked list of layers stored per type */
+typedef struct BArrayCustomData {
+	struct BArrayCustomData *next;
+	CustomDataType type;
+	int states_len;  /* number of layers for each type */
+	BArrayState *states[0];
+} BArrayCustomData;
+
+#endif
 
 typedef struct UndoMesh {
 	Mesh me;
@@ -65,11 +86,463 @@ typedef struct UndoMesh {
 	 * There are a few ways this could be made to work but for now its a known limitation with mixing
 	 * object and editmode operations - Campbell */
 	int shapenr;
+
+#ifdef USE_ARRAY_STORE
+	/* NULL arrays are considered empty */
+	struct {
+		/* most data is stored as 'custom' data */
+		BArrayCustomData *vdata, *edata, *ldata, *pdata;
+		BArrayState **keyblocks;
+		BArrayState *mselect;
+	} store;
+#endif  /* USE_ARRAY_STORE */
 } UndoMesh;
 
+
+#ifdef USE_ARRAY_STORE
+
+/** \name Array Store
+ * \{ */
+
+static struct {
+	BArrayStore **bs_all;
+	int          bs_all_len;
+	int users;
+
+	/* We could have the undo API pass in the previous state, for now store a local list */
+	ListBase local_links;
+
+#ifdef USE_ARRAY_STORE_THREAD
+		TaskPool *task_pool;
+#endif
+
+} um_arraystore = {NULL};
+
+static BArrayStore *array_store_at_size_ensure(const int stride)
+{
+	if (um_arraystore.bs_all_len < stride) {
+		um_arraystore.bs_all_len = stride;
+		um_arraystore.bs_all = MEM_recallocN(um_arraystore.bs_all, sizeof(*um_arraystore.bs_all) * stride);
+	}
+	BArrayStore **bs_p = &um_arraystore.bs_all[stride - 1];
+
+	if ((*bs_p) == NULL) {
+#if 0
+		unsigned int chunk_count = ARRAY_CHUNK_SIZE;
+#else
+		/* calculate best chunk-count to fit a power of two */
+		unsigned int chunk_count = ARRAY_CHUNK_SIZE;
+		{
+			unsigned int size = chunk_count * stride;
+			size = power_of_2_max_u(size);
+			size = MEM_SIZE_OPTIMAL(size);
+			chunk_count = size / stride;
+		}
+#endif
+
+		(*bs_p) = BLI_array_store_create(stride, chunk_count);
+	}
+	return *bs_p;
+}
+
+static BArrayStore *array_store_at_size_get(const int stride)
+{
+	BLI_assert(stride > 0 && stride <= um_arraystore.bs_all_len);
+	return um_arraystore.bs_all[stride - 1];
+}
+
+#ifdef DEBUG_PRINT
+static void um_arraystore_memory_usage(size_t *r_size_expanded, size_t *r_size_compacted)
+{
+	size_t size_compacted = 0;
+	size_t size_expanded  = 0;
+	for (int i = 0; i < um_arraystore.bs_all_len; i++) {
+		BArrayStore *bs = um_arraystore.bs_all[i];
+		if (bs) {
+			size_compacted += BLI_array_store_calc_size_compacted_get(bs);
+			size_expanded  += BLI_array_store_calc_size_expanded_get(bs);
+		}
+	}
+
+	*r_size_expanded = size_expanded;
+	*r_size_compacted = size_compacted;
+}
+#endif
+
+static void um_arraystore_cd_compact(
+        struct CustomData *cdata, const size_t data_len,
+        bool create,
+        const BArrayCustomData *bcd_reference,
+        BArrayCustomData **r_bcd_first)
+{
+	if (data_len == 0) {
+		if (create) {
+			*r_bcd_first = NULL;
+		}
+	}
+
+	const BArrayCustomData *bcd_reference_current = bcd_reference;
+	BArrayCustomData *bcd = NULL, *bcd_first = NULL, *bcd_prev = NULL;
+	for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
+		const CustomDataType type = cdata->layers[layer_start].type;
+
+		layer_end = layer_start + 1;
+		while ((layer_end < cdata->totlayer) &&
+		       (type == cdata->layers[layer_end].type))
+		{
+			layer_end++;
+		}
+
+		const int stride = CustomData_sizeof(type);
+		BArrayStore *bs = create ? array_store_at_size_ensure(stride) : NULL;
+		const int layer_len = layer_end - layer_start;
+
+		if (create) {
+			if (bcd_reference_current && (bcd_reference_current->type == type)) {
+				/* common case, the reference is aligned */
+			}
+			else {
+				bcd_reference_current = NULL;
+
+				/* do a full lookup when un-alligned */
+				if (bcd_reference) {
+					const BArrayCustomData *bcd_iter = bcd_reference;
+					while (bcd_iter) {
+						if (bcd_iter->type == type) {
+							bcd_reference_current = bcd_iter;
+							break;
+						}
+						bcd_iter = bcd_iter->next;
+					}
+				}
+			}
+		}
+
+		if (create) {
+			bcd = MEM_callocN(sizeof(BArrayCustomData) + (layer_len * sizeof(BArrayState *)), __func__);
+			bcd->next = NULL;
+			bcd->type = type;
+			bcd->states_len = layer_end - layer_start;
+
+			if (bcd_prev) {
+				bcd_prev->next = bcd;
+				bcd_prev = bcd;
+			}
+			else {
+				bcd_first = bcd;
+				bcd_prev  = bcd;
+			}
+		}
+
+		CustomDataLayer *layer = &cdata->layers[layer_start];
+		for (int i = 0; i < layer_len; i++, layer++) {
+			if (create) {
+				if (layer->data) {
+					BArrayState *state_reference =
+					        (bcd_reference_current && i < bcd_reference_current->states_len) ?
+					         bcd_reference_current->states[i] : NULL;
+					bcd->states[i] = BLI_array_store_state_add(
+					        bs, layer->data, (size_t)data_len * stride, state_reference);
+				}
+				else {
+					bcd->states[i] = NULL;
+				}
+			}
+
+			if (layer->data) {
+				MEM_freeN(layer->data);
+				layer->data = NULL;
+			}
+		}
+
+		if (create) {
+			if (bcd_reference_current) {
+				bcd_reference_current = bcd_reference_current->next;
+			}
+		}
+	}
+
+	if (create) {
+		*r_bcd_first = bcd_first;
+	}
+}
+
+/**
+ * \note There is no room for data going out of sync here.
+ * The layers and the states are stored together so this can be kept working.
+ */
+static void um_arraystore_cd_expand(
+        const BArrayCustomData *bcd, struct CustomData *cdata, const size_t data_len)
+{
+	CustomDataLayer *layer = cdata->layers;
+	while (bcd) {
+		const int stride = CustomData_sizeof(bcd->type);
+		for (int i = 0; i < bcd->states_len; i++) {
+			BLI_assert(bcd->type == layer->type);
+			if (bcd->states[i]) {
+				size_t state_len;
+				layer->data = BLI_array_store_state_data_get_alloc(bcd->states[i], &state_len);
+				BLI_assert(stride * data_len == state_len);
+				UNUSED_VARS_NDEBUG(stride, data_len);
+			}
+			else {
+				layer->data = NULL;
+			}
+			layer++;
+		}
+		bcd = bcd->next;
+	}
+}
+
+static void um_arraystore_cd_free(BArrayCustomData *bcd)
+{
+	while (bcd) {
+		BArrayCustomData *bcd_next = bcd->next;
+		const int stride = CustomData_sizeof(bcd->type);
+		BArrayStore *bs = array_store_at_size_get(stride);
+		for (int i = 0; i <		bcd->states_len; i++) {
+			if (bcd->states[i]) {
+				BLI_array_store_state_remove(bs, bcd->states[i]);
+			}
+		}
+		MEM_freeN(bcd);
+		bcd = bcd_next;
+	}
+}
+
+/**
+ * \param create: When false, only free the arrays.
+ * This is done since when reading from an undo state, they must be temporarily expanded.
+ * then discarded afterwards, having this argument avoids having 2x code paths.
+ */
+static void um_arraystore_compact_ex(
+        UndoMesh *um, const UndoMesh *um_ref,
+        bool create)
+{
+	Mesh *me = &um->me;
+
+	um_arraystore_cd_compact(&me->vdata, me->totvert, create, um_ref ? um_ref->store.vdata : NULL, &um->store.vdata);
+	um_arraystore_cd_compact(&me->edata, me->totedge, create, um_ref ? um_ref->store.edata : NULL, &um->store.edata);
+	um_arraystore_cd_compact(&me->ldata, me->totloop, create, um_ref ? um_ref->store.ldata : NULL, &um->store.ldata);
+	um_arraystore_cd_compact(&me->pdata, me->totpoly, create, um_ref ? um_ref->store.pdata : NULL, &um->store.pdata);
+
+	if (me->key && me->key->totkey) {
+		const size_t stride = me->key->elemsize;
+		BArrayStore *bs = create ? array_store_at_size_ensure(stride) : NULL;
+		if (create) {
+			um->store.keyblocks = MEM_mallocN(me->key->totkey * sizeof(*um->store.keyblocks), __func__);
+		}
+		KeyBlock *keyblock = me->key->block.first;
+		for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
+			if (create) {
+				BArrayState *state_reference =
+				        (um_ref && um_ref->me.key && (i < um_ref->me.key->totkey)) ?
+				         um_ref->store.keyblocks[i] : NULL;
+				um->store.keyblocks[i] = BLI_array_store_state_add(
+				        bs, keyblock->data, (size_t)keyblock->totelem * stride,
+				        state_reference);
+			}
+
+			if (keyblock->data) {
+				MEM_freeN(keyblock->data);
+				keyblock->data = NULL;
+			}
+		}
+	}
+
+	if (me->mselect && me->totselect) {
+		BLI_assert(create == (um->store.mselect == NULL));
+		if (create) {
+			BArrayState *state_reference = um_ref ? um_ref->store.mselect : NULL;
+			const size_t stride = sizeof(*me->mselect);
+			BArrayStore *bs = array_store_at_size_ensure(stride);
+			um->store.mselect = BLI_array_store_state_add(
+			        bs, me->mselect, (size_t)me->totselect * stride, state_reference);
+		}
+
+		/* keep me->totselect for validation */
+		MEM_freeN(me->mselect);
+		me->mselect = NULL;
+	}
+
+	if (create) {
+		um_arraystore.users += 1;
+	}
+
+	BKE_mesh_update_customdata_pointers(me, false);
+}
+
+/**
+ * Move data from allocated arrays to de-duplicated states and clear arrays.
+ */
+static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
+{
+	um_arraystore_compact_ex(um, um_ref, true);
+}
+
+static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
+{
+#ifdef DEBUG_PRINT
+	size_t size_expanded_prev, size_compacted_prev;
+	um_arraystore_memory_usage(&size_expanded_prev, &size_compacted_prev);
+#endif
+
+#ifdef DEBUG_TIME
+	TIMEIT_START(mesh_undo_compact);
+#endif
+
+	um_arraystore_compact(um, um_ref);
+
+#ifdef DEBUG_TIME
+	TIMEIT_END(mesh_u

@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list