[Bf-blender-cvs] SVN commit: /data/svn/bf-blender [59520] trunk/blender/source/blender/ blenlib/intern: internal changes to ghash/edgehash, reorganize to split out resizing the hash from insertion.

Campbell Barton ideasman42 at gmail.com
Mon Aug 26 15:41:13 CEST 2013


Revision: 59520
          http://projects.blender.org/scm/viewvc.php?view=rev&root=bf-blender&revision=59520
Author:   campbellbarton
Date:     2013-08-26 13:41:13 +0000 (Mon, 26 Aug 2013)
Log Message:
-----------
internal changes to ghash/edgehash, reorganize to split out resizing the hash from insertion.

Modified Paths:
--------------
    trunk/blender/source/blender/blenlib/intern/BLI_ghash.c
    trunk/blender/source/blender/blenlib/intern/edgehash.c

Modified: trunk/blender/source/blender/blenlib/intern/BLI_ghash.c
===================================================================
--- trunk/blender/source/blender/blenlib/intern/BLI_ghash.c	2013-08-26 12:41:48 UTC (rev 59519)
+++ trunk/blender/source/blender/blenlib/intern/BLI_ghash.c	2013-08-26 13:41:13 UTC (rev 59520)
@@ -98,6 +98,14 @@
  * \{ */
 
 /**
+ * Get the hash for a key.
+ */
+BLI_INLINE unsigned int ghash_keyhash(GHash *gh, const void *key)
+{
+	return gh->hashfp(key) % gh->nbuckets;
+}
+
+/**
  * Check if the number of items in the GHash is large enough to require more buckets.
  */
 BLI_INLINE bool ghash_test_expand_buckets(const unsigned int nentries, const unsigned int nbuckets)
@@ -106,21 +114,43 @@
 }
 
 /**
- * Increase initial bucket size to match a reserved ammount.
+ * Expand buckets to the next size up.
  */
-BLI_INLINE void ghash_buckets_reserve(GHash *gh, const unsigned int nentries_reserve)
+BLI_INLINE void ghash_resize_buckets(GHash *gh, const unsigned int nbuckets)
 {
-	while (ghash_test_expand_buckets(nentries_reserve, gh->nbuckets)) {
-		gh->nbuckets = hashsizes[++gh->cursize];
+	Entry **buckets_old = gh->buckets;
+	Entry **buckets_new;
+	const unsigned int nbuckets_old = gh->nbuckets;
+	unsigned int i;
+	Entry *e;
+
+	BLI_assert(gh->nbuckets != nbuckets);
+
+	gh->nbuckets = nbuckets;
+	buckets_new = (Entry **)MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets");
+
+	for (i = 0; i < nbuckets_old; i++) {
+		Entry *e_next;
+		for (e = buckets_old[i]; e; e = e_next) {
+			const unsigned hash = ghash_keyhash(gh, e->key);
+			e_next = e->next;
+			e->next = buckets_new[hash];
+			buckets_new[hash] = e;
+		}
 	}
+
+	gh->buckets = buckets_new;
+	MEM_freeN(buckets_old);
 }
 
 /**
- * Get the hash for a key.
+ * Increase initial bucket size to match a reserved ammount.
  */
-BLI_INLINE unsigned int ghash_keyhash(GHash *gh, const void *key)
+BLI_INLINE void ghash_buckets_reserve(GHash *gh, const unsigned int nentries_reserve)
 {
-	return gh->hashfp(key) % gh->nbuckets;
+	while (ghash_test_expand_buckets(nentries_reserve, gh->nbuckets)) {
+		gh->nbuckets = hashsizes[++gh->cursize];
+	}
 }
 
 /**
@@ -133,7 +163,7 @@
 	Entry *e;
 
 	for (e = gh->buckets[hash]; e; e = e->next) {
-		if (gh->cmpfp(key, e->key) == 0) {
+		if (UNLIKELY(gh->cmpfp(key, e->key) == 0)) {
 			return e;
 		}
 	}
@@ -178,41 +208,45 @@
  * Internal insert function.
  * Takes a hash argument to avoid calling #ghash_keyhash multiple times.
  */
-static Entry *ghash_insert_ex(GHash *gh, void *key,
-                              unsigned int hash)
+BLI_INLINE void ghash_insert_ex(GHash *gh, void *key, void *val,
+                                unsigned int hash)
 {
 	Entry *e = (Entry *)BLI_mempool_alloc(gh->entrypool);
-
 	BLI_assert((gh->flag & GHASH_FLAG_ALLOW_DUPES) || (BLI_ghash_haskey(gh, key) == 0));
+	IS_GHASH_ASSERT(gh);
 
 	e->next = gh->buckets[hash];
 	e->key = key;
-	/* intentionally don't set the value */
+	e->val = val;
 	gh->buckets[hash] = e;
 
 	if (UNLIKELY(ghash_test_expand_buckets(++gh->nentries, gh->nbuckets))) {
-		Entry *e_iter;
-		Entry **old = gh->buckets;
-		const unsigned nold = gh->nbuckets;
-		unsigned int i;
+		ghash_resize_buckets(gh, hashsizes[++gh->cursize]);
+	}
+}
 
-		gh->nbuckets = hashsizes[++gh->cursize];
-		gh->buckets = (Entry **)MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets");
+/**
+ * Insert function that doesn't set the value (use for GSet)
+ */
+BLI_INLINE void ghash_insert_ex_keyonly(GHash *gh, void *key,
+                                        unsigned int hash)
+{
+	Entry *e = (Entry *)BLI_mempool_alloc(gh->entrypool);
+	BLI_assert((gh->flag & GHASH_FLAG_ALLOW_DUPES) || (BLI_ghash_haskey(gh, key) == 0));
+	e->next = gh->buckets[hash];
+	e->key = key;
+	/* intentionally leave value unset */
+	gh->buckets[hash] = e;
 
-		for (i = 0; i < nold; i++) {
-			Entry *e_next;
-			for (e_iter = old[i]; e_iter; e_iter = e_next) {
-				e_next = e_iter->next;
-				hash = ghash_keyhash(gh, e_iter->key);
-				e_iter->next = gh->buckets[hash];
-				gh->buckets[hash] = e_iter;
-			}
-		}
-
-		MEM_freeN(old);
+	if (UNLIKELY(ghash_test_expand_buckets(++gh->nentries, gh->nbuckets))) {
+		ghash_resize_buckets(gh, hashsizes[++gh->cursize]);
 	}
+}
 
-	return e;
+BLI_INLINE void ghash_insert(GHash *gh, void *key, void *val)
+{
+	const unsigned int hash = ghash_keyhash(gh, key);
+	return ghash_insert_ex(gh, key, val, hash);
 }
 
 /**
@@ -225,7 +259,7 @@
 	Entry *e_prev = NULL;
 
 	for (e = gh->buckets[hash]; e; e = e->next) {
-		if (gh->cmpfp(key, e->key) == 0) {
+		if (UNLIKELY(gh->cmpfp(key, e->key) == 0)) {
 			Entry *e_next = e->next;
 
 			if (keyfreefp) keyfreefp(e->key);
@@ -314,9 +348,7 @@
  */
 void BLI_ghash_insert(GHash *gh, void *key, void *val)
 {
-	const unsigned int hash = ghash_keyhash(gh, key);
-	Entry *e = ghash_insert_ex(gh, key, hash);
-	e->val = val;
+	ghash_insert(gh, key, val);
 }
 
 /**
@@ -338,8 +370,7 @@
 		return false;
 	}
 	else {
-		e = ghash_insert_ex(gh, key, hash);
-		e->val = val;
+		ghash_insert_ex(gh, key, val, hash);
 		return true;
 	}
 }
@@ -811,7 +842,7 @@
 void BLI_gset_insert(GSet *gs, void *key)
 {
 	const unsigned int hash = ghash_keyhash((GHash *)gs, key);
-	ghash_insert_ex((GHash *)gs, key, hash);
+	ghash_insert_ex_keyonly((GHash *)gs, key, hash);
 }
 
 /**
@@ -830,7 +861,7 @@
 		return false;
 	}
 	else {
-		ghash_insert_ex((GHash *)gs, key, hash);
+		ghash_insert_ex_keyonly((GHash *)gs, key, hash);
 		return true;
 	}
 }

Modified: trunk/blender/source/blender/blenlib/intern/edgehash.c
===================================================================
--- trunk/blender/source/blender/blenlib/intern/edgehash.c	2013-08-26 12:41:48 UTC (rev 59519)
+++ trunk/blender/source/blender/blenlib/intern/edgehash.c	2013-08-26 13:41:13 UTC (rev 59520)
@@ -95,12 +95,55 @@
 /** \name Internal Utility API
  * \{ */
 
+/**
+ * Get the hash for a key.
+ */
+BLI_INLINE unsigned int edgehash_keyhash(EdgeHash *eh, unsigned int v0, unsigned int v1)
+{
+	BLI_assert(v0 < v1);
+
+	return ((v0 * 39) ^ (v1 * 31)) % eh->nbuckets;
+}
+
+/**
+ * Check if the number of items in the GHash is large enough to require more buckets.
+ */
 BLI_INLINE bool edgehash_test_expand_buckets(const unsigned int nentries, const unsigned int nbuckets)
 {
 	return (nentries > nbuckets * 3);
 }
 
 /**
+ * Expand buckets to the next size up.
+ */
+BLI_INLINE void edgehash_resize_buckets(EdgeHash *eh, const unsigned int nbuckets)
+{
+	EdgeEntry **buckets_old = eh->buckets;
+	EdgeEntry **buckets_new;
+	const unsigned int nbuckets_old = eh->nbuckets;
+	unsigned int i;
+	EdgeEntry *e;
+
+	BLI_assert(eh->nbuckets != nbuckets);
+
+	eh->nbuckets = nbuckets;
+	buckets_new = MEM_callocN(eh->nbuckets * sizeof(*eh->buckets), "eh buckets");
+
+	for (i = 0; i < nbuckets_old; i++) {
+		EdgeEntry *e_next;
+		for (e = buckets_old[i]; e; e = e_next) {
+			const unsigned hash = edgehash_keyhash(eh, e->v0, e->v1);
+			e_next = e->next;
+			e->next = buckets_new[hash];
+			buckets_new[hash] = e;
+		}
+	}
+
+	eh->buckets = buckets_new;
+	MEM_freeN(buckets_old);
+}
+
+/**
  * Increase initial bucket size to match a reserved ammount.
  */
 BLI_INLINE void edgehash_buckets_reserve(EdgeHash *eh, const unsigned int nentries_reserve)
@@ -110,26 +153,26 @@
 	}
 }
 
-BLI_INLINE unsigned int edgehash_keyhash(EdgeHash *eh, unsigned int v0, unsigned int v1)
-{
-	BLI_assert(v0 < v1);
-
-	return ((v0 * 39) ^ (v1 * 31)) % eh->nbuckets;
-}
-
+/**
+ * Internal lookup function.
+ * Takes a hash argument to avoid calling #ghash_keyhash multiple times.
+ */
 BLI_INLINE EdgeEntry *edgehash_lookup_entry_ex(EdgeHash *eh, unsigned int v0, unsigned int v1,
                                                const unsigned int hash)
 {
 	EdgeEntry *e;
 	BLI_assert(v0 < v1);
 	for (e = eh->buckets[hash]; e; e = e->next) {
-		if (v0 == e->v0 && v1 == e->v1) {
+		if (UNLIKELY(v0 == e->v0 && v1 == e->v1)) {
 			return e;
 		}
 	}
 	return NULL;
 }
 
+/**
+ * Internal lookup function. Only wraps #edgehash_lookup_entry_ex
+ */
 BLI_INLINE EdgeEntry *edgehash_lookup_entry(EdgeHash *eh, unsigned int v0, unsigned int v1)
 {
 	unsigned int hash;
@@ -138,6 +181,7 @@
 	return edgehash_lookup_entry_ex(eh, v0, v1, hash);
 }
 
+
 static EdgeHash *edgehash_new(const char *info,
                               const unsigned int nentries_reserve,
                               const size_t entry_size)
@@ -160,12 +204,17 @@
 	return eh;
 }
 
-static EdgeEntry *edgehash_insert_ex(EdgeHash *eh, unsigned int v0, unsigned int v1,
-                                     unsigned int hash)
+/**
+ * Internal insert function.
+ * Takes a hash argument to avoid calling #edgehash_keyhash multiple times.
+ */
+BLI_INLINE void edgehash_insert_ex(EdgeHash *eh, unsigned int v0, unsigned int v1, void *val,
+                                   unsigned int hash)
 {
 	EdgeEntry *e = BLI_mempool_alloc(eh->epool);
 
 	BLI_assert((eh->flag & EDGEHASH_FLAG_ALLOW_DUPES) || (BLI_edgehash_haskey(eh, v0, v1) == 0));
+	IS_EDGEHASH_ASSERT(eh);
 
 	/* this helps to track down errors with bad edge data */
 	BLI_assert(v0 < v1);
@@ -174,33 +223,47 @@
 	e->next = eh->buckets[hash];
 	e->v0 = v0;
 	e->v1 = v1;
-	/* intentionally don't set the value */
+	e->val = val;
 	eh->buckets[hash] = e;
 
 	if (UNLIKELY(edgehash_test_expand_buckets(++eh->nentries, eh->nbuckets))) {
-		EdgeEntry *e_iter;
-		EdgeEntry **old = eh->buckets;
-		const unsigned int nold = eh->nbuckets;
-		unsigned int i;
+		edgehash_resize_buckets(eh, _ehash_hashsizes[++eh->cursize]);
+	}
+}
 
-		eh->nbuckets = _ehash_hashsizes[++eh->cursize];
-		eh->buckets = MEM_callocN(eh->nbuckets * sizeof(*eh->buckets), "eh buckets");
+/**
+ * Insert function that doesn't set the value (use for EdgeSet)
+ */
+BLI_INLINE void edgehash_insert_ex_keyonly(EdgeHash *eh, unsigned int v0, unsigned int v1,
+                                           unsigned int hash)
+{
+	EdgeEntry *e = BLI_mempool_alloc(eh->epool);
 
-		for (i = 0; i < nold; i++) {
-			EdgeEntry *e_next;
-			for (e_iter = old[i]; e_iter; e_iter = e_next) {
-				e_next = e_iter->next;
-				hash = edgehash_keyhash(eh, e_iter->v0, e_iter->v1);
-				e_iter->next = eh->buckets[hash];
-				eh->buckets[hash] = e_iter;
-			}
-		}
+	BLI_assert((eh->flag & EDGEHASH_FLAG_ALLOW_DUPES) || (BLI_edgehash_haskey(eh, v0, v1) == 0));
 
-		MEM_freeN(old);
+	/* this helps to track down errors with bad edge data */
+	BLI_assert(v0 < v1);
+	BLI_assert(v0 != v1);
+
+	e->next = eh->buckets[hash];
+	e->v0 = v0;
+	e->v1 = v1;
+	/* intentionally leave value unset */

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list