[Bf-blender-cvs] [d140e70] temp-ghash-experiments: Merge branch 'master' into temp-ghash-experiments

Bastien Montagne noreply at git.blender.org
Mon Jun 29 17:18:39 CEST 2015


Commit: d140e70c496122915eb5c05aba83153e2e0d7998
Author: Bastien Montagne
Date:   Mon Jun 29 16:41:00 2015 +0200
Branches: temp-ghash-experiments
https://developer.blender.org/rBd140e70c496122915eb5c05aba83153e2e0d7998

Merge branch 'master' into temp-ghash-experiments

Note that 'store hash' feature was removed for now - to complex to maintain (conflicts)
and relatively easy to re-add if we ever really want this one day.

Conflicts:
	source/blender/blenlib/BLI_ghash.h
	source/blender/blenlib/intern/BLI_ghash.c
	source/blender/blenlib/intern/hash_mm2a.c
	source/blender/bmesh/tools/bmesh_region_match.c
	tests/gtests/blenlib/BLI_ghash_performance_test.cc
	tests/gtests/blenlib/BLI_ghash_test.cc
	tests/gtests/blenlib/CMakeLists.txt

===================================================================



===================================================================

diff --cc source/blender/blenlib/intern/BLI_ghash.c
index ff5c20f,9287d62..f6f203c
--- a/source/blender/blenlib/intern/BLI_ghash.c
+++ b/source/blender/blenlib/intern/BLI_ghash.c
@@@ -637,321 -614,6 +617,321 @@@ static GHash *ghash_copy(GHash *gh, GHa
  	return gh_new;
  }
  
 +/**
 + * Merge \a gh2 into \a gh1 (keeping entries already in \a gh1 unchanged), and then each subsequent given GHash.
 + * If \a gh1 is NULL, a new GHash will be created first (avoids modifying \a gh1 in place).
 + * If \a reverse is True, entries present in latest GHash will override those in former GHash.
 + */
 +static GHash *ghash_union(
 +        const bool reverse,
 +        GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
 +        GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
 +        GHash *gh1, GHash *gh2, va_list arg)
 +{
 +	GHash *ghn = gh2;
 +
 +	BLI_assert(ghn);
 +
 +	if (!gh1) {
 +		gh1 = ghash_copy(ghn, keycopyfp, valcopyfp);
 +		ghn = va_arg(arg, GHash *);
 +	}
 +
 +	for ( ; ghn; ghn = va_arg(arg, GHash *)) {
 +		unsigned int i;
 +
 +		BLI_assert(gh1->cmpfp == ghn->cmpfp);
 +		BLI_assert(gh1->hashfp == ghn->hashfp);
 +		BLI_assert(gh1->is_gset == ghn->is_gset);
 +
 +		for (i = 0; i < ghn->nbuckets; i++) {
 +			Entry *e;
 +
 +			for (e = ghn->buckets[i]; e; e = e->next) {
 +				Entry *e_gh1;
 +				const unsigned int hash = ghash_entryhash(gh1, e);
- 				const unsigned int gh1_bucket_hash = ghash_bucket_hash(gh1, hash);
++				const unsigned int gh1_bucket_index = ghash_bucket_index(gh1, hash);
 +
- 				if ((e_gh1 = ghash_lookup_entry_ex(gh1, e->key, hash, gh1_bucket_hash)) == NULL) {
++				if ((e_gh1 = ghash_lookup_entry_ex(gh1, e->key, gh1_bucket_index)) == NULL) {
 +					Entry *e_new = BLI_mempool_alloc(gh1->entrypool);
 +
- 					entry_copy(gh1, e_new, ghn, e, hash, keycopyfp, valcopyfp);
++					ghash_entry_copy(gh1, e_new, ghn, e, keycopyfp, valcopyfp);
 +
 +					/* As with copy, this does not preserve order (but this would be even less meaningful here). */
- 					e_new->next = gh1->buckets[gh1_bucket_hash];
- 					gh1->buckets[gh1_bucket_hash] = e_new;
- 					ghash_expand_buckets(gh1, ++gh1->nentries, false, false);
++					e_new->next = gh1->buckets[gh1_bucket_index];
++					gh1->buckets[gh1_bucket_index] = e_new;
++					ghash_buckets_expand(gh1, ++gh1->nentries, false);
 +				}
 +				else if (reverse) {
 +					if (keyfreefp) keyfreefp(e_gh1->key);
 +					if (valfreefp) valfreefp(((GHashEntry *)e_gh1)->val);
 +
- 					entry_copy(gh1, e_gh1, ghn, e, hash, keycopyfp, valcopyfp);
++					ghash_entry_copy(gh1, e_gh1, ghn, e, keycopyfp, valcopyfp);
 +				}
 +			}
 +		}
 +	}
 +
 +	return gh1;
 +}
 +
 +/**
 + * Remove all entries in \a gh1 which keys are not present in \a gh2 and all subsequent given GHash.
 + * If \a gh1 is NULL, a new GHash will be created first (avoids modifying \a gh1 in place).
 + */
 +static GHash *ghash_intersection(
 +        GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
 +        GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
 +        GHash *gh1, GHash *gh2, va_list arg)
 +{
 +	GHash *ghn = gh2;
 +
 +	BLI_assert(ghn);
 +
 +	if (!gh1) {
 +		gh1 = ghash_copy(ghn, keycopyfp, valcopyfp);
 +		ghn = va_arg(arg, GHash *);
 +	}
 +
 +	BLI_assert(!valfreefp || !gh1->is_gset);
 +
 +	for ( ; ghn; ghn = va_arg(arg, GHash *)) {
 +		unsigned int new_gh1_nentries = gh1->nentries;
 +		unsigned int i;
 +
 +		BLI_assert(gh1->cmpfp == ghn->cmpfp);
 +		BLI_assert(gh1->hashfp == ghn->hashfp);
 +
 +		for (i = 0; i < gh1->nbuckets; i++) {
 +			Entry *e, *e_prev = NULL, *e_next;
 +
 +			for (e = gh1->buckets[i]; e; e = e_next) {
 +				const unsigned int hash = ghash_entryhash(gh1, e);
- 				const unsigned int ghn_bucket_hash = ghash_bucket_hash(ghn, hash);
++				const unsigned int ghn_bucket_index = ghash_bucket_index(ghn, hash);
 +
 +				e_next = e->next;
 +
- 				if (ghash_lookup_entry_ex(ghn, e->key, hash, ghn_bucket_hash) == NULL) {
++				if (ghash_lookup_entry_ex(ghn, e->key, ghn_bucket_index) == NULL) {
 +					if (keyfreefp) keyfreefp(e->key);
 +					if (valfreefp) valfreefp(((GHashEntry *)e)->val);
 +
 +					if (e_prev) e_prev->next = e_next;
 +					else        gh1->buckets[i] = e_next;
 +
 +					/* We cannot resize gh1 while we are looping on it!!! */
 +					new_gh1_nentries--;
 +					BLI_mempool_free(gh1->entrypool, e);
 +				}
 +				else {
 +					e_prev = e;
 +				}
 +			}
 +		}
 +
 +		gh1->nentries = new_gh1_nentries;
 +		/* We force shrinking here (if needed). */
- 		ghash_expand_buckets(gh1, gh1->nentries, false, true);
++		ghash_buckets_contract(gh1, gh1->nentries, false, true);
 +	}
 +
 +	return gh1;
 +}
 +
 +/**
 + * Remove all entries in \a gh1 which keys are present in \a gh2 or any subsequent given GHash.
 + * If \a gh1 is NULL, a new GHash will be created first (avoids modifying \a gh1 in place).
 + */
 +static GHash *ghash_difference(
 +        GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
 +        GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
 +        GHash *gh1, GHash *gh2, va_list arg)
 +{
 +	GHash *ghn = gh2;
 +
 +	BLI_assert(ghn);
 +
 +	if (!gh1) {
 +		gh1 = ghash_copy(ghn, keycopyfp, valcopyfp);
 +		ghn = va_arg(arg, GHash *);
 +	}
 +
 +	BLI_assert(!valfreefp || !gh1->is_gset);
 +
 +	for ( ; ghn; ghn = va_arg(arg, GHash *)) {
 +		unsigned int new_gh1_nentries = gh1->nentries;
 +		unsigned int i;
 +
 +		BLI_assert(gh1->cmpfp == ghn->cmpfp);
 +		BLI_assert(gh1->hashfp == ghn->hashfp);
 +
 +		for (i = 0; i < gh1->nbuckets; i++) {
 +			Entry *e, *e_prev = NULL, *e_next;
 +
 +			for (e = gh1->buckets[i]; e; e = e_next) {
 +				const unsigned int hash = ghash_entryhash(gh1, e);
- 				const unsigned int ghn_bucket_hash = ghash_bucket_hash(ghn, hash);
++				const unsigned int ghn_bucket_index = ghash_bucket_index(ghn, hash);
 +
 +				e_next = e->next;
 +
- 				if (ghash_lookup_entry_ex(ghn, e->key, hash, ghn_bucket_hash) != NULL) {
++				if (ghash_lookup_entry_ex(ghn, e->key, ghn_bucket_index) != NULL) {
 +					if (keyfreefp) keyfreefp(e->key);
 +					if (valfreefp) valfreefp(((GHashEntry *)e)->val);
 +
 +					if (e_prev) e_prev->next = e_next;
 +					else        gh1->buckets[i] = e_next;
 +
 +					/* We cannot resize gh1 while we are looping on it!!! */
 +					new_gh1_nentries--;
 +					BLI_mempool_free(gh1->entrypool, e);
 +				}
 +				else {
 +					e_prev = e;
 +				}
 +			}
 +		}
 +
 +		gh1->nentries = new_gh1_nentries;
 +		/* We force shrinking here (if needed). */
- 		ghash_expand_buckets(gh1, gh1->nentries, false, true);
++		ghash_buckets_contract(gh1, gh1->nentries, false, true);
 +	}
 +
 +	return gh1;
 +}
 +
 +/**
 + * Set \a gh1 to only contain entries which keys are present in one and only one of all given ghash.
 + * If \a gh1 is NULL, a new GHash will be created first (avoids modifying \a gh1 in place).
 + */
 +static GHash *ghash_symmetric_difference(
 +        GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp,
 +        GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
 +        GHash *gh1, GHash *gh2, va_list arg)
 +{
 +	GHash *ghn = gh2;
 +	unsigned int i;
 +
 +	/* Temp storage, we never copy key/values here, just borrow them from real ghash. */
 +	/* Warning! rem_keys is used as gset (i.e. no val memory reserved). */
 +	GHash *keys, *rem_keys;
 +
 +	BLI_assert(ghn);
 +
 +	if (!gh1) {
 +		gh1 = ghash_copy(ghn, keycopyfp, valcopyfp);
 +		ghn = va_arg(arg, GHash *);
 +	}
 +
 +	BLI_assert(!valfreefp || !gh1->is_gset);
 +
 +	keys = ghash_copy(gh1, NULL, NULL);
- 	rem_keys = ghash_new(gh1->hashfp, gh1->cmpfp, __func__, 64, true, gh1->use_store_hash);
++	rem_keys = ghash_new(gh1->hashfp, gh1->cmpfp, __func__, 64, true);
 +
 +	/* First pass: all key found at least once is in keys, all key found at least twice is in rem_keys. */
 +	for ( ; ghn; ghn = va_arg(arg, GHash *)) {
 +		BLI_assert(gh1->cmpfp == ghn->cmpfp);
 +		BLI_assert(gh1->hashfp == ghn->hashfp);
- 		BLI_assert(gh1->is_gset == ghn->is_gset);
++		BLI_assert((gh1->flag & GHASH_FLAG_IS_GSET) == (ghn->is_gset & GHASH_FLAG_IS_GSET));
 +
 +		for (i = 0; i < ghn->nbuckets; i++) {
 +			Entry *e;
 +
 +			for (e = ghn->buckets[i]; e; e = e->next) {
 +				const unsigned int hash = ghash_entryhash(ghn, e);
- 				const unsigned int keys_bucket_hash = ghash_bucket_hash(keys, hash);
++				const unsigned int keys_bucket_index = ghash_bucket_index(keys, hash);
 +
- 				if (ghash_lookup_entry_ex(keys, e->key, hash, keys_bucket_hash) != NULL) {
- 					const unsigned int rem_keys_bucket_hash = ghash_bucket_hash(rem_keys, hash);
++				if (ghash_lookup_entry_ex(keys, e->key, keys_bucket_index) != NULL) {
++					const unsigned int rem_keys_bucket_index = ghash_bucket_index(rem_keys, hash);
 +					Entry *e_new = BLI_mempool_alloc(rem_keys->entrypool);
 +
- 					entry_copy(rem_keys, e_new, ghn, e, hash, NULL, NULL);
++					ghash_entry_copy(rem_keys, e_new, ghn, e, NULL, NULL);
 +
 +					/* As with copy, this does not preserve order (but this would be even less meaningful here). */
- 					e_new->next = rem_keys->buckets[rem_keys_bucket_hash];
- 					rem_keys->buckets[rem_keys_bucket_hash] = e_new;
- 					ghash_expand_buckets(rem_keys, ++rem_keys->nentries, false, false);
++					e_new->next = rem_keys->buckets[rem_keys_bucket_index];
++					rem_keys->buckets[rem_keys_bucket_index] = e_new;
++					ghash_buckets_expand(rem_keys, ++rem_keys->nentries, false);
 +				}
 +				else {
 +					Entry *e_new = BLI_mempool_alloc(keys->entrypool);
 +
- 					entry_copy(keys, e_new, ghn, e, hash, NULL, NULL);
++					ghash_entry_copy(keys, e_new, ghn, e, NULL, NULL);
 +
 +					/* As with copy, this does not preserve order (but this would be even less meaningful here). */
- 					e_new->next = keys->buckets[keys_bucket_hash];
- 					keys->buckets[keys_bucket_hash] = e_new;
- 					ghash_expand_buckets(keys, ++keys->nentries, false, false);
++					e_new->next = keys->buckets[keys_bucket_index];
++					keys->buckets[keys_bucket_index] = e_new;
++					ghash_buckets_expand(keys, ++keys->nentries, false);
 +				}
 +			}
 +		}
 +	}
 +
 +	/* Now, keys we actually want are (keys - rem_keys) */
 +	for (i = 0; i < rem_keys->nbuckets; i++) {
 +		Entry *e;
 +
 +		for (e = rem_keys->buckets[i]; e; e = e->next) {
 +			Entry *e_prev, *e_curr;
 +			const unsigned int hash = ghash_entryhash(re

@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list