[Bf-blender-cvs] [c1bfbcb] decklink: Atomic ops: return value of *add* and *sub* consistent across OSes.

Benoit Bolsee noreply at git.blender.org
Fri Apr 17 00:00:11 CEST 2015


Commit: c1bfbcbbb567c5e3ef1082f3d2973d8d4edf3811
Author: Benoit Bolsee
Date:   Thu Apr 16 23:56:23 2015 +0200
Branches: decklink
https://developer.blender.org/rBc1bfbcbbb567c5e3ef1082f3d2973d8d4edf3811

Atomic ops: return value of *add* and *sub* consistent across OSes.

The Windows and asm variant were returning the value, of the variable
before the add or sub operation. All the other variants were returning
the value after the operation. Now all variants return the new value.

===================================================================

M	intern/atomic/atomic_ops.h

===================================================================

diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h
index 06a5c8d..a6b7312 100644
--- a/intern/atomic/atomic_ops.h
+++ b/intern/atomic/atomic_ops.h
@@ -102,13 +102,13 @@ atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
 ATOMIC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
-	return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
+    return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x)+x;
 }
 
 ATOMIC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
-	return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
+    return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x))-((int64_t)x);
 }
 
 ATOMIC_INLINE uint64_t
@@ -140,24 +140,25 @@ atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
 ATOMIC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
+    uint64_t ret = x;
 	asm volatile (
 	    "lock; xaddq %0, %1;"
-	    : "+r" (x), "=m" (*p) /* Outputs. */
+        : "+r" (ret), "=m" (*p) /* Outputs. */
 	    : "m" (*p) /* Inputs. */
 	    );
-	return x;
+    return ret+x;
 }
 
 ATOMIC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
-	x = (uint64_t)(-(int64_t)x);
+    uint64_t ret = (uint64_t)(-(int64_t)x);
 	asm volatile (
 	    "lock; xaddq %0, %1;"
-	    : "+r" (x), "=m" (*p) /* Outputs. */
+        : "+r" (ret), "=m" (*p) /* Outputs. */
 	    : "m" (*p) /* Inputs. */
 	    );
-	return x;
+    return ret-x;
 }
 
 ATOMIC_INLINE uint64_t
@@ -247,13 +248,13 @@ atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
 ATOMIC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
-	return InterlockedExchangeAdd(p, x);
+    return InterlockedExchangeAdd(p, x)+x;
 }
 
 ATOMIC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
-	return InterlockedExchangeAdd(p, -((int32_t)x));
+    return InterlockedExchangeAdd(p, -((int32_t)x))-((int32_t)x);
 }
 
 ATOMIC_INLINE uint32_t
@@ -285,24 +286,25 @@ atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
 ATOMIC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
+    uint32_t ret = x;
 	asm volatile (
 	    "lock; xaddl %0, %1;"
-	    : "+r" (x), "=m" (*p) /* Outputs. */
+        : "+r" (ret), "=m" (*p) /* Outputs. */
 	    : "m" (*p) /* Inputs. */
 	    );
-	return x;
+    return ret+x;
 }
 
 ATOMIC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
-	x = (uint32_t)(-(int32_t)x);
+    uint32_t ret = (uint32_t)(-(int32_t)x);
 	asm volatile (
 	    "lock; xaddl %0, %1;"
-	    : "+r" (x), "=m" (*p) /* Outputs. */
+        : "+r" (ret), "=m" (*p) /* Outputs. */
 	    : "m" (*p) /* Inputs. */
 	    );
-	return x;
+    return ret-x;
 }
 
 ATOMIC_INLINE uint32_t




More information about the Bf-blender-cvs mailing list