[Bf-blender-cvs] [4ee08e9] master: Atomics: Make naming more obvious about which value is being returned

Sergey Sharybin noreply at git.blender.org
Tue Nov 15 12:16:50 CET 2016


Commit: 4ee08e9533593b0e7cf7f50b3c4c61eb5598c13e
Author: Sergey Sharybin
Date:   Tue Nov 15 12:16:26 2016 +0100
Branches: master
https://developer.blender.org/rB4ee08e9533593b0e7cf7f50b3c4c61eb5598c13e

Atomics: Make naming more obvious about which value is being returned

===================================================================

M	intern/atomic/atomic_ops.h
M	intern/atomic/intern/atomic_ops_ext.h
M	intern/atomic/intern/atomic_ops_msvc.h
M	intern/atomic/intern/atomic_ops_unix.h
M	intern/cycles/kernel/kernel_passes.h
M	intern/cycles/util/util_atomic.h
M	intern/cycles/util/util_stats.h
M	intern/guardedalloc/intern/mallocn_guarded_impl.c
M	intern/guardedalloc/intern/mallocn_lockfree_impl.c
M	source/blender/blenkernel/intern/depsgraph.c
M	source/blender/blenkernel/intern/dynamicpaint.c
M	source/blender/blenkernel/intern/mesh_evaluate.c
M	source/blender/blenkernel/intern/pbvh.c
M	source/blender/blenlib/intern/task.c
M	source/blender/compositor/intern/COM_ExecutionGroup.cpp
M	source/blender/depsgraph/intern/eval/deg_eval.cc
M	source/blender/editors/space_file/filelist.c
M	source/gameengine/VideoTexture/VideoDeckLink.cpp

===================================================================

diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h
index f78eab7..c3926fd 100644
--- a/intern/atomic/atomic_ops.h
+++ b/intern/atomic/atomic_ops.h
@@ -77,13 +77,13 @@
 /* Function prototypes. */
 
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x);
 ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new);
 #endif
 
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x);
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x);
 ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new);
 
 ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x);
@@ -93,18 +93,18 @@ ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x);
 ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b);
 ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b);
 
-ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x);
-ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
 ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
 
-ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x);
-ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x);
 ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new);
 
 /* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,
  *          which means they are only efficient if collisions are highly unlikely (i.e. if probability of two threads
  *          working on the same pointer at the same time is very low). */
-ATOMIC_INLINE float atomic_add_fl(float *p, const float x);
+ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x);
 
 /******************************************************************************/
 /* Include system-dependent implementations. */
diff --git a/intern/atomic/intern/atomic_ops_ext.h b/intern/atomic/intern/atomic_ops_ext.h
index 4065299..74ed327 100644
--- a/intern/atomic/intern/atomic_ops_ext.h
+++ b/intern/atomic/intern/atomic_ops_ext.h
@@ -56,25 +56,25 @@
 
 /******************************************************************************/
 /* size_t operations. */
-ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x)
+ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
 {
 	assert(sizeof(size_t) == LG_SIZEOF_PTR);
 
 #if (LG_SIZEOF_PTR == 8)
-	return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
+	return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
 #elif (LG_SIZEOF_PTR == 4)
-	return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
+	return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
 #endif
 }
 
-ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x)
+ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
 {
 	assert(sizeof(size_t) == LG_SIZEOF_PTR);
 
 #if (LG_SIZEOF_PTR == 8)
-	return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+	return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
 #elif (LG_SIZEOF_PTR == 4)
-	return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+	return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
 #endif
 }
 
@@ -91,25 +91,25 @@ ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
 
 /******************************************************************************/
 /* unsigned operations. */
-ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x)
+ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x)
 {
 	assert(sizeof(unsigned) == LG_SIZEOF_INT);
 
 #if (LG_SIZEOF_INT == 8)
-	return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
+	return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
 #elif (LG_SIZEOF_INT == 4)
-	return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
+	return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
 #endif
 }
 
-ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x)
+ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x)
 {
 	assert(sizeof(unsigned) == LG_SIZEOF_INT);
 
 #if (LG_SIZEOF_INT == 8)
-	return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+	return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
 #elif (LG_SIZEOF_INT == 4)
-	return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+	return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
 #endif
 }
 
@@ -127,7 +127,7 @@ ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new)
 /******************************************************************************/
 /* float operations. */
 
-ATOMIC_INLINE float atomic_add_fl(float *p, const float x)
+ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
 {
 	assert(sizeof(float) == sizeof(uint32_t));
 
diff --git a/intern/atomic/intern/atomic_ops_msvc.h b/intern/atomic/intern/atomic_ops_msvc.h
index 3461719..e7aae4a 100644
--- a/intern/atomic/intern/atomic_ops_msvc.h
+++ b/intern/atomic/intern/atomic_ops_msvc.h
@@ -43,12 +43,12 @@
 /******************************************************************************/
 /* 64-bit operations. */
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
 	return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
 }
 
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
 	return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
 }
@@ -61,12 +61,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
 
 /******************************************************************************/
 /* 32-bit operations. */
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
 	return InterlockedExchangeAdd(p, x) + x;
 }
 
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
 	return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
 }
diff --git a/intern/atomic/intern/atomic_ops_unix.h b/intern/atomic/intern/atomic_ops_unix.h
index e63f09c..3d00f91 100644
--- a/intern/atomic/intern/atomic_ops_unix.h
+++ b/intern/atomic/intern/atomic_ops_unix.h
@@ -58,12 +58,12 @@
 /* 64-bit operations. */
 #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
 #  if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
 	return __sync_add_and_fetch(p, x);
 }
 
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
 	return __sync_sub_and_fetch(p, x);
 }
@@ -73,7 +73,7 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
 	return __sync_val_compare_and_swap(v, old, _new);
 }
 #  elif (defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
 	asm volatile (
 	    "lock; xaddq %0, %1;"
@@ -83,7 +83,7 @@ ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
 	return x;
 }
 
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
 {
 	x = (uint64_t)(-(int64_t)x);
 	asm volatile (
@@ -112,12 +112,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
 /******************************************************************************/
 /* 32-bit operations. */
 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
 	return __sync_add_and_fetch(p, x);
 }
 
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
 	return __sync_sub_and_fetch(p, x);
 }
@@ -127,7 +127,7 @@ ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _ne
    return __sync_val_compare_and_swap(v, old, _new);
 }
 #elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
 	uint32_t ret = x;
 	asm volatile (
@@ -138,7 +138,7 @@ ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
 	return ret+x;
 }
 
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
 {
 	ret = (uint32_t)(-(int32_t)x);
 	asm volatile (
diff --git a/intern/cycles/kernel/kernel_passes.h b/intern/cycles/kernel/kernel_passes.h
index 20cf3fa..7aec47e 100644
--- a/intern/cycles/kernel/kernel_passes.h
+++ b/intern/cycles/kernel/kernel_pa

@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list