-int32_t OSAtomicAdd32( int32_t theAmount, int32_t *theValue );
-inline static
-int32_t OSAtomicIncrement32( int32_t *theValue ) { return OSAtomicAdd32( 1, theValue); }
-inline static
-int32_t OSAtomicDecrement32( int32_t *theValue ) { return OSAtomicAdd32( -1, theValue); }
-int32_t OSAtomicOr32( uint32_t theMask, uint32_t *theValue );
-int32_t OSAtomicAnd32( uint32_t theMask, uint32_t *theValue );
-int32_t OSAtomicXor32( uint32_t theMask, uint32_t *theValue );
-#if defined(__ppc64__) || defined(__i386__)
-int64_t OSAtomicAdd64( int64_t theAmount, int64_t *theValue );
-inline static
-int64_t OSAtomicIncrement64( int64_t *theValue ) { return OSAtomicAdd64( 1, theValue); }
-inline static
-int64_t OSAtomicDecrement64( int64_t *theValue ) { return OSAtomicAdd64( -1, theValue); }
-#endif /* defined(__ppc64__) || defined(__i386__) */
-
-/* Compare and swap. They do not incorporate memory barriers and thus cannot be used
- * by themselved to synchronize shared memory. They return true if the swap occured.
+int32_t OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue );
+int32_t OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue );
+
+__inline static
+int32_t OSAtomicIncrement32( volatile int32_t *__theValue )
+ { return OSAtomicAdd32( 1, __theValue); }
+__inline static
+int32_t OSAtomicIncrement32Barrier( volatile int32_t *__theValue )
+ { return OSAtomicAdd32Barrier( 1, __theValue); }
+
+__inline static
+int32_t OSAtomicDecrement32( volatile int32_t *__theValue )
+ { return OSAtomicAdd32( -1, __theValue); }
+__inline static
+int32_t OSAtomicDecrement32Barrier( volatile int32_t *__theValue )
+ { return OSAtomicAdd32Barrier( -1, __theValue); }
+
+#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+
+int64_t OSAtomicAdd64( int64_t __theAmount, volatile int64_t *__theValue );
+int64_t OSAtomicAdd64Barrier( int64_t __theAmount, volatile int64_t *__theValue );
+
+__inline static
+int64_t OSAtomicIncrement64( volatile int64_t *__theValue )
+ { return OSAtomicAdd64( 1, __theValue); }
+__inline static
+int64_t OSAtomicIncrement64Barrier( volatile int64_t *__theValue )
+ { return OSAtomicAdd64Barrier( 1, __theValue); }
+
+__inline static
+int64_t OSAtomicDecrement64( volatile int64_t *__theValue )
+ { return OSAtomicAdd64( -1, __theValue); }
+__inline static
+int64_t OSAtomicDecrement64Barrier( volatile int64_t *__theValue )
+ { return OSAtomicAdd64Barrier( -1, __theValue); }
+
+#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
+
+
+/* Boolean functions (and, or, xor.) These come in four versions for each operation:
+ * with and without barriers, and returning the old or new value of the operation.
+ * The "Orig" versions return the original value, ie before the operation, the non-Orig
+ * versions return the value after the operation. All are layered on top of
+ * compare-and-swap.