[Bf-blender-cvs] [7c324e7] temp-ghash-experiments: GHash: Add difference and symmetric_difference (with tests).

Bastien Montagne noreply at git.blender.org
Tue Mar 3 09:02:05 CET 2015


Commit: 7c324e76a984f81caae43e1600da007ed4afd892
Author: Bastien Montagne
Date:   Mon Mar 2 18:02:21 2015 +0100
Branches: temp-ghash-experiments
https://developer.blender.org/rB7c324e76a984f81caae43e1600da007ed4afd892

GHash: Add difference and symmetric_difference (with tests).

Also fix some issues in previous commits.

Notes about our 'symmetric_difference':
    * name is taken from py, but our function takes multiple arguments,
      and returns keys which are present in one and only one given ghash.
      So we may want a better name for that
      (real symmetric difference only accepts/works with two args)!
    * Its code is a bit more complex than the others, think it's worth it though,
      XOR is a really useful operation imho.

===================================================================

M	source/blender/blenlib/BLI_ghash.h
M	source/blender/blenlib/intern/BLI_ghash.c

===================================================================

diff --git a/source/blender/blenlib/BLI_ghash.h b/source/blender/blenlib/BLI_ghash.h
index 2a2b8a9..78f8aa3 100644
--- a/source/blender/blenlib/BLI_ghash.h
+++ b/source/blender/blenlib/BLI_ghash.h
@@ -95,11 +95,18 @@ GHash *BLI_ghash_union_reversed(
         GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
         GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
         GHash *gh1, GHash *gh2, ...);
-
 GHash *BLI_ghash_intersection(
         GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
         GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
         GHash *gh1, GHash *gh2, ...);
+GHash *BLI_ghash_difference(
+        GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
+        GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
+        GHash *gh1, GHash *gh2, ...);
+GHash *BLI_ghash_symmetric_difference(
+        GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
+        GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
+        GHash *gh1, GHash *gh2, ...);
 
 /* *** */
 
@@ -250,6 +257,8 @@ bool   BLI_gset_issuperset(GSet *gs1, GSet *gs2);
 
 GSet *BLI_gset_union(GSetKeyCopyFP keycopyfp, GSet *gs1, GSet *gs2, ...);
 GSet *BLI_gset_intersection(GSetKeyCopyFP keycopyfp, GSetKeyFreeFP keyfreefp, GSet *gs1, GSet *gs2, ...);
+GSet *BLI_gset_difference(GSetKeyCopyFP keycopyfp, GSetKeyFreeFP keyfreefp, GSet *gs1, GSet *gs2, ...);
+GSet *BLI_gset_symmetric_difference(GSetKeyCopyFP keycopyfp, GSetKeyFreeFP keyfreefp, GSet *gs1, GSet *gs2, ...);
 
 GSet *BLI_gset_ptr_new_ex(const char *info,
                           const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
diff --git a/source/blender/blenlib/intern/BLI_ghash.c b/source/blender/blenlib/intern/BLI_ghash.c
index 69c0729..7875631 100644
--- a/source/blender/blenlib/intern/BLI_ghash.c
+++ b/source/blender/blenlib/intern/BLI_ghash.c
@@ -89,6 +89,20 @@ typedef struct Entry {
 	void *val;  /* This pointer ***must*** remain the last one, since it is 'virtually removed' for gset. */
 } Entry;
 
+#define GHASH_ENTRY_SIZE sizeof(Entry)
+#define GSET_ENTRY_SIZE sizeof(Entry) - sizeof(void *)
+
+BLI_INLINE void entry_copy(
+        Entry *dst, Entry *src, const size_t entry_size, GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
+{
+	const size_t start = offsetof(Entry, hash);
+	const size_t size = entry_size - start;
+
+	memcpy(((char *)dst) + start, ((char *)src) + start, size);
+	if (keycopyfp) dst->key = keycopyfp(src->key);
+	if (valcopyfp) dst->val = valcopyfp(src->val);
+}
+
 struct GHash {
 	GHashHashFP hashfp;
 	GHashCmpFP cmpfp;
@@ -205,7 +219,8 @@ BLI_INLINE void ghash_resize_buckets(GHash *gh, const unsigned int nbuckets)
  * Check if the number of items in the GHash is large enough to require more buckets,
  * or small enough to require less buckets, and resize \a gh accordingly.
  */
-BLI_INLINE void ghash_expand_buckets(GHash *gh, const unsigned int nentries, const bool user_defined)
+BLI_INLINE void ghash_expand_buckets(
+        GHash *gh, const unsigned int nentries, const bool user_defined, const bool force_shrink)
 {
 	unsigned int new_nbuckets;
 
@@ -227,7 +242,7 @@ BLI_INLINE void ghash_expand_buckets(GHash *gh, const unsigned int nentries, con
 #endif
 		gh->limit_grow = GHASH_LIMIT_GROW(new_nbuckets);
 	}
-	if (gh->flag & GHASH_FLAG_ALLOW_SHRINK) {
+	if (force_shrink || (gh->flag & GHASH_FLAG_ALLOW_SHRINK)) {
 		while ((nentries < gh->limit_shrink) &&
 #ifdef GHASH_USE_MODULO_BUCKETS
 			   (gh->cursize > gh->size_min))
@@ -284,7 +299,7 @@ BLI_INLINE void ghash_buckets_reset(GHash *gh, const unsigned int nentries)
 	gh->nentries = 0;
 	gh->flag = 0;
 
-	ghash_expand_buckets(gh, nentries, (nentries != 0));
+	ghash_expand_buckets(gh, nentries, (nentries != 0), false);
 }
 
 /**
@@ -318,7 +333,7 @@ BLI_INLINE Entry *ghash_lookup_entry(GHash *gh, const void *key)
 
 static GHash *ghash_new(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info,
                         const unsigned int nentries_reserve,
-                        const unsigned int entry_size)
+                        const size_t entry_size)
 {
 	GHash *gh = MEM_mallocN(sizeof(*gh), info);
 
@@ -327,7 +342,7 @@ static GHash *ghash_new(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info,
 
 	gh->buckets = NULL;
 	ghash_buckets_reset(gh, nentries_reserve);
-	gh->entrypool = BLI_mempool_create(entry_size, 64, 64, BLI_MEMPOOL_NOP);
+	gh->entrypool = BLI_mempool_create((unsigned int)entry_size, 64, 64, BLI_MEMPOOL_NOP);
 
 	return gh;
 }
@@ -349,7 +364,7 @@ BLI_INLINE void ghash_insert_ex(
 	e->val = val;
 	gh->buckets[bucket_hash] = e;
 
-	ghash_expand_buckets(gh, ++gh->nentries, false);
+	ghash_expand_buckets(gh, ++gh->nentries, false, false);
 }
 
 /**
@@ -367,7 +382,7 @@ BLI_INLINE void ghash_insert_ex_keyonly(
 	/* intentionally leave value unset */
 	gh->buckets[bucket_hash] = e;
 
-	ghash_expand_buckets(gh, ++gh->nentries, false);
+	ghash_expand_buckets(gh, ++gh->nentries, false, false);
 }
 
 BLI_INLINE void ghash_insert(GHash *gh, void *key, void *val)
@@ -398,7 +413,7 @@ static Entry *ghash_remove_ex(
 				if (e_prev) e_prev->next = e_next;
 				else   gh->buckets[bucket_hash] = e_next;
 
-				ghash_expand_buckets(gh, --gh->nentries, false);
+				ghash_expand_buckets(gh, --gh->nentries, false, false);
 				return e;
 			}
 		}
@@ -434,12 +449,13 @@ static void ghash_free_cb(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP va
 /**
  * Copy the GHash.
  */
-static GHash *ghash_copy(GHash *gh, const unsigned int entry_size, GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
+static GHash *ghash_copy(GHash *gh, const size_t entry_size, GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
 {
 	GHash *gh_new;
 	unsigned int i;
 
-	gh_new = ghash_new(gh->hashfp, gh->cmpfp, __func__, gh->nentries, entry_size);
+	gh_new = ghash_new(gh->hashfp, gh->cmpfp, __func__, 0, entry_size);
+	ghash_expand_buckets(gh_new, gh->nentries, false, false);
 
 	for (i = 0; i < gh->nbuckets; i++) {
 		Entry *e;
@@ -448,9 +464,7 @@ static GHash *ghash_copy(GHash *gh, const unsigned int entry_size, GHashKeyCopyF
 			Entry *e_new = BLI_mempool_alloc(gh_new->entrypool);
 			const unsigned int gh_new_bucket_hash = ghash_bucket_hash(gh_new, e->hash);
 
-			e_new->hash = e->hash;
-			e_new->key = (keycopyfp) ? keycopyfp(e->key) : e->key;
-			e_new->val = (valcopyfp) ? valcopyfp(e->val) : e->val;
+			entry_copy(e_new, e, entry_size, keycopyfp, valcopyfp);
 
 			/* Warning! This means entries in buckets in new copy will be in reversed order!
 			 *          This shall not be an issue though, since order should never be assumed in ghash. */
@@ -471,7 +485,7 @@ static GHash *ghash_copy(GHash *gh, const unsigned int entry_size, GHashKeyCopyF
  * If \a reverse is True, entries present in latest GHash will override those in former GHash.
  */
 static GHash *ghash_union(
-        const bool reverse, const unsigned int entry_size,
+        const bool reverse, const size_t entry_size,
         GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
         GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
         GHash *gh1, GHash *gh2, va_list arg)
@@ -488,6 +502,9 @@ static GHash *ghash_union(
 	for ( ; ghn; ghn = va_arg(arg, GHash *)) {
 		unsigned int i;
 
+		BLI_assert(gh1->cmpfp == ghn->cmpfp);
+		BLI_assert(gh1->hashfp == ghn->hashfp);
+
 		for (i = 0; i < ghn->nbuckets; i++) {
 			Entry *e;
 
@@ -498,22 +515,18 @@ static GHash *ghash_union(
 				if ((e_gh1 = ghash_lookup_entry_ex(gh1, e->key, e->hash, gh1_bucket_hash)) == NULL) {
 					Entry *e_new = BLI_mempool_alloc(gh1->entrypool);
 
-					memcpy(e_new, e, (size_t)entry_size);
-					if (keycopyfp) e_new->key = keycopyfp(e->key);
-					if (valcopyfp) e_new->val = valcopyfp(e->val);
+					entry_copy(e_new, e, entry_size, keycopyfp, valcopyfp);
 
 					/* As with copy, this does not preserve order (but this would be even less meaningful here). */
 					e_new->next = gh1->buckets[gh1_bucket_hash];
 					gh1->buckets[gh1_bucket_hash] = e_new;
-					ghash_expand_buckets(gh1, ++gh1->nentries, false);
+					ghash_expand_buckets(gh1, ++gh1->nentries, false, false);
 				}
 				else if (reverse) {
 					if (keyfreefp) keyfreefp(e_gh1->key);
 					if (valfreefp) valfreefp(e_gh1->val);
 
-					e_gh1->hash = e->hash;
-					e_gh1->key = (keycopyfp) ? keycopyfp(e->key) : e->key;
-					e_gh1->val = (valcopyfp) ? valcopyfp(e->val) : e->val;
+					entry_copy(e_gh1, e, entry_size, keycopyfp, valcopyfp);
 				}
 			}
 		}
@@ -527,7 +540,7 @@ static GHash *ghash_union(
  * If \a gh1 is NULL, a new GHash will be created first (avoids modifying \a gh1 in place).
  */
 static GHash *ghash_intersection(
-        const unsigned int entry_size,
+        const size_t entry_size,
         GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
         GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
         GHash *gh1, GHash *gh2, va_list arg)
@@ -542,32 +555,245 @@ static GHash *ghash_intersection(
 	}
 
 	for ( ; ghn; ghn = va_arg(arg, GHash *)) {
+		unsigned int new_gh1_nentries = gh1->nentries;
 		unsigned int i;
 
+		BLI_assert(gh1->cmpfp == ghn->cmpfp);
+		BLI_assert(gh1->hashfp == ghn->hashfp);
+
 		for (i = 0; i < gh1->nbuckets; i++) {
-			Entry *e, *e_prev = NULL;
+			Entry *e, *e_prev = NULL, *e_next;
 
-			for (e = gh1->buckets[i]; e; e_prev = e, e = e->next) {
+			for (e = gh1->buckets[i]; e; e = e_next) {
 				const unsigned int ghn_bucket_hash = ghash_bucket_hash(ghn, e->hash);
 
+				e_next = e->next;
+
 				if (ghash_lookup_entry_ex(ghn, e->key, e->hash, ghn_bucket_hash) == NULL) {
-					Entry *e_next = e->next;
+					if (keyfreefp) keyfreefp(e->key);
+					if (valfreefp) valfreefp(e->val);
+
+					if (e_prev) e_prev->next = e_next;
+					else        gh1->buckets[i] = e_next;
+
+					/* We cannot resize gh1 while we are looping on it!!! */
+					new_gh1_nentries--;
+					BLI_mempool_free(gh1->entrypool, e);
+				}
+				else {
+					e_prev = e;
+				}
+			}
+		}
+
+		gh1->nentries = new_gh1_nentries;
+		/* We force shrinking here (if needed). */
+		ghash_expand_buckets(gh1, gh1->nentries, false, true);
+	}
+
+	return gh1;
+}
+
+/**
+ * Remove all entries in \a gh1 which keys are present in \a gh2 or any subsequent given GHash.
+ * If \a gh1 is NULL, a ne

@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list