[Bf-blender-cvs] [9396124] temp-ghash-basis: minor edits

Campbell Barton noreply at git.blender.org
Thu Mar 19 16:19:43 CET 2015


Commit: 9396124c14f7712ea4c3a7380ec97ea92acd5682
Author: Campbell Barton
Date:   Fri Mar 20 02:05:50 2015 +1100
Branches: temp-ghash-basis
https://developer.blender.org/rB9396124c14f7712ea4c3a7380ec97ea92acd5682

minor edits

===================================================================

M	source/blender/blenlib/intern/BLI_ghash.c

===================================================================

diff --git a/source/blender/blenlib/intern/BLI_ghash.c b/source/blender/blenlib/intern/BLI_ghash.c
index 6e730ed..9597024 100644
--- a/source/blender/blenlib/intern/BLI_ghash.c
+++ b/source/blender/blenlib/intern/BLI_ghash.c
@@ -67,11 +67,12 @@ const unsigned int hashsizes[] = {
 #  define GHASH_BUCKET_BIT_MAX 28  /* About 268M of buckets... */
 #endif
 
-/* Note: Max load (GHASH_LIMIT_GROW) used to be 3.
- *       Python uses 0.6666, tommyhaslib even goes down to 0.5.
- *       Reducing our from 3 to 0.75 gives huge speedup (about twice quicker pure GHash insertions/lookup,
- *       about 25% - 30% quicker dyntopo stroke drawing e.g.).
- *       Min load (GHASH_LIMIT_SHRINK) is a quarter of max load, to avoid resizing to quickly.
+/**
+ * \note Max load #GHASH_LIMIT_GROW used to be 3. (pre 2.74).
+ * Python uses 0.6666, tommyhaslib even goes down to 0.5.
+ * Reducing our from 3 to 0.75 gives huge speedup (about twice quicker pure GHash insertions/lookup,
+ * about 25% - 30% quicker 'dynamic-topology' stroke drawing e.g.).
+ * Min load #GHASH_LIMIT_SHRINK is a quarter of max load, to avoid resizing to quickly.
  */
 #define GHASH_LIMIT_GROW(_nbkt) ((_nbkt) * 3) / 4
 #define GHASH_LIMIT_SHRINK(_nbkt) ((_nbkt) * 3) / 16
@@ -119,17 +120,14 @@ BLI_INLINE void entry_copy(
         GHash *gh_dst, Entry *dst, GHash *gh_src, Entry *src,
         const unsigned int UNUSED(hash), GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
 {
-	const bool is_gset_dst = (gh_dst->flag & GHASH_FLAG_IS_GSET) != 0;
-	const bool is_gset_src = (gh_src->flag & GHASH_FLAG_IS_GSET) != 0;
-
 	dst->key = (keycopyfp) ? keycopyfp(src->key) : src->key;
 
-	if (!is_gset_dst) {
-		if (is_gset_src) {
-			((GHashEntry *)dst)->val = NULL;
+	if ((gh_dst->flag & GHASH_FLAG_IS_GSET) == 0) {
+		if ((gh_src->flag & GHASH_FLAG_IS_GSET) == 0) {
+			((GHashEntry *)dst)->val = (valcopyfp) ? valcopyfp(((GHashEntry *)src)->val) : ((GHashEntry *)src)->val;
 		}
 		else {
-			((GHashEntry *)dst)->val = (valcopyfp) ? valcopyfp(((GHashEntry *)src)->val) : ((GHashEntry *)src)->val;
+			((GHashEntry *)dst)->val = NULL;
 		}
 	}
 }
@@ -251,13 +249,15 @@ BLI_INLINE void ghash_expand_buckets(
 	new_nbuckets = gh->nbuckets;
 
 #ifdef GHASH_USE_MODULO_BUCKETS
-	while ((nentries > gh->limit_grow) && (gh->cursize < GHASH_MAX_SIZE - 1))
+	while ((nentries    > gh->limit_grow) &&
+	       (gh->cursize < GHASH_MAX_SIZE - 1))
 	{
 		new_nbuckets = hashsizes[++gh->cursize];
 		gh->limit_grow = GHASH_LIMIT_GROW(new_nbuckets);
 	}
 #else
-	while ((nentries > gh->limit_grow) && (gh->bucket_bit < GHASH_BUCKET_BIT_MAX))
+	while ((nentries       > gh->limit_grow) &&
+	       (gh->bucket_bit < GHASH_BUCKET_BIT_MAX))
 	{
 		new_nbuckets = 1u << ++gh->bucket_bit;
 		gh->limit_grow = GHASH_LIMIT_GROW(new_nbuckets);
@@ -266,17 +266,19 @@ BLI_INLINE void ghash_expand_buckets(
 
 	if (force_shrink || (gh->flag & GHASH_FLAG_ALLOW_SHRINK)) {
 #ifdef GHASH_USE_MODULO_BUCKETS
-		while ((nentries < gh->limit_shrink) && (gh->cursize > gh->size_min))
+		while ((nentries    < gh->limit_shrink) &&
+		       (gh->cursize > gh->size_min))
 		{
 			new_nbuckets = hashsizes[--gh->cursize];
 			gh->limit_shrink = GHASH_LIMIT_SHRINK(new_nbuckets);
 		}
 #else
-		while ((nentries < gh->limit_shrink) && (gh->bucket_bit > gh->bucket_bit_min))
-		 {
-			 new_nbuckets = 1u << --gh->bucket_bit;
-			 gh->limit_shrink = GHASH_LIMIT_SHRINK(new_nbuckets);
-		 }
+		while ((nentries       < gh->limit_shrink) &&
+		       (gh->bucket_bit > gh->bucket_bit_min))
+		{
+			new_nbuckets = 1u << --gh->bucket_bit;
+			gh->limit_shrink = GHASH_LIMIT_SHRINK(new_nbuckets);
+		}
 #endif
 	}
 
@@ -349,19 +351,20 @@ BLI_INLINE Entry *ghash_lookup_entry_ex(
  * Useful when modifying buckets somehow (like removing an entry...).
  */
 BLI_INLINE Entry *ghash_lookup_entry_prev_ex(
-        GHash *gh, const void *key, Entry **e_prev_r, const unsigned int bucket_hash)
+        GHash *gh, const void *key, Entry **r_e_prev, const unsigned int bucket_hash)
 {
-	Entry *e;
+	Entry *e, *e_prev = NULL;
 
-	*e_prev_r = NULL;
 	/* If we do not store GHash, not worth computing it for each entry here!
 	 * Typically, comparison function will be quicker, and since it's needed in the end anyway... */
-	for (e = gh->buckets[bucket_hash]; e; *e_prev_r = e, e = e->next) {
+	for (e = gh->buckets[bucket_hash]; e; e_prev = e, e = e->next) {
 		if (UNLIKELY(gh->cmpfp(key, e->key) == false)) {
+			*r_e_prev = e_prev;
 			return e;
 		}
 	}
 
+	*r_e_prev = NULL;
 	return NULL;
 }




More information about the Bf-blender-cvs mailing list