* This header declares the OSAtomic group of functions for atomic
* reading and updating of values.
*/
-
+
/*!
* @function OSCompareAndSwap64
*
* See OSCompareAndSwap.
*/
extern Boolean OSCompareAndSwap64(
- UInt64 oldValue,
- UInt64 newValue,
- volatile UInt64 * address);
+ UInt64 oldValue,
+ UInt64 newValue,
+ volatile UInt64 * address);
#define OSCompareAndSwap64(a, b, c) \
(OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c)))
* See OSAddAtomic.
*/
extern SInt64 OSAddAtomic64(
- SInt64 theAmount,
- volatile SInt64 * address);
+ SInt64 theAmount,
+ volatile SInt64 * address);
#define OSAddAtomic64(a, b) \
(OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b)))
*
* @discussion
* See OSIncrementAtomic.
-*/
-inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address)
+ */
+inline static SInt64
+OSIncrementAtomic64(volatile SInt64 * address)
{
- return OSAddAtomic64(1LL, address);
+ return OSAddAtomic64(1LL, address);
}
/*!
*
* @discussion
* See OSDecrementAtomic.
-*/
-inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address)
+ */
+inline static SInt64
+OSDecrementAtomic64(volatile SInt64 * address)
{
- return OSAddAtomic64(-1LL, address);
+ return OSAddAtomic64(-1LL, address);
}
#if XNU_KERNEL_PRIVATE
* See OSAddAtomic.
*/
extern long OSAddAtomicLong(
- long theAmount,
- volatile long * address);
+ long theAmount,
+ volatile long * address);
#define OSAddAtomicLong(a, b) \
(OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b)))
*
* @discussion
* See OSIncrementAtomic.
-*/
-inline static long OSIncrementAtomicLong(volatile long * address)
+ */
+inline static long
+OSIncrementAtomicLong(volatile long * address)
{
- return OSAddAtomicLong(1L, address);
+ return OSAddAtomicLong(1L, address);
}
/* Not to be included in headerdoc.
* 32/64-bit decrement, depending on sizeof(long)
*@discussion See OSDecrementAtomic.
*/
-inline static long OSDecrementAtomicLong(volatile long * address)
+inline static long
+OSDecrementAtomicLong(volatile long * address)
{
- return OSAddAtomicLong(-1L, address);
+ return OSAddAtomicLong(-1L, address);
}
#endif /* XNU_KERNEL_PRIVATE */
* @result true if newValue was written to the address.
*/
extern Boolean OSCompareAndSwap8(
- UInt8 oldValue,
- UInt8 newValue,
- volatile UInt8 * address);
+ UInt8 oldValue,
+ UInt8 newValue,
+ volatile UInt8 * address);
#define OSCompareAndSwap8(a, b, c) \
(OSCompareAndSwap8(a, b, __SAFE_CAST_PTR(volatile UInt8*,c)))
* @result true if newValue was written to the address.
*/
extern Boolean OSCompareAndSwap16(
- UInt16 oldValue,
- UInt16 newValue,
- volatile UInt16 * address);
+ UInt16 oldValue,
+ UInt16 newValue,
+ volatile UInt16 * address);
#define OSCompareAndSwap16(a, b, c) \
(OSCompareAndSwap16(a, b, __SAFE_CAST_PTR(volatile UInt16*,c)))
* @result true if newValue was written to the address.
*/
extern Boolean OSCompareAndSwap(
- UInt32 oldValue,
- UInt32 newValue,
- volatile UInt32 * address);
+ UInt32 oldValue,
+ UInt32 newValue,
+ volatile UInt32 * address);
#define OSCompareAndSwap(a, b, c) \
(OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c)))
* @result true if newValue was written to the address.
*/
extern Boolean OSCompareAndSwapPtr(
- void * oldValue,
- void * newValue,
- void * volatile * address);
+ void * oldValue,
+ void * newValue,
+ void * volatile * address);
#define OSCompareAndSwapPtr(a, b, c) \
(OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c)))
* @result The value before the addition
*/
extern SInt32 OSAddAtomic(
- SInt32 amount,
- volatile SInt32 * address);
+ SInt32 amount,
+ volatile SInt32 * address);
#define OSAddAtomic(a, b) \
(OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b)))
* @result The value before the addition
*/
extern SInt16 OSAddAtomic16(
- SInt32 amount,
- volatile SInt16 * address);
+ SInt32 amount,
+ volatile SInt16 * address);
/*!
* @function OSAddAtomic8
* @result The value before the addition.
*/
extern SInt8 OSAddAtomic8(
- SInt32 amount,
- volatile SInt8 * address);
+ SInt32 amount,
+ volatile SInt8 * address);
/*!
* @function OSIncrementAtomic
* @result The value before the bitwise operation
*/
extern UInt32 OSBitAndAtomic(
- UInt32 mask,
- volatile UInt32 * address);
+ UInt32 mask,
+ volatile UInt32 * address);
#define OSBitAndAtomic(a, b) \
(OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
* @result The value before the bitwise operation.
*/
extern UInt16 OSBitAndAtomic16(
- UInt32 mask,
- volatile UInt16 * address);
+ UInt32 mask,
+ volatile UInt16 * address);
/*!
* @function OSBitAndAtomic8
* @result The value before the bitwise operation.
*/
extern UInt8 OSBitAndAtomic8(
- UInt32 mask,
- volatile UInt8 * address);
+ UInt32 mask,
+ volatile UInt8 * address);
/*!
* @function OSBitOrAtomic
* @result The value before the bitwise operation.
*/
extern UInt32 OSBitOrAtomic(
- UInt32 mask,
- volatile UInt32 * address);
+ UInt32 mask,
+ volatile UInt32 * address);
#define OSBitOrAtomic(a, b) \
(OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
* @result The value before the bitwise operation.
*/
extern UInt16 OSBitOrAtomic16(
- UInt32 mask,
- volatile UInt16 * address);
+ UInt32 mask,
+ volatile UInt16 * address);
/*!
* @function OSBitOrAtomic8
* @result The value before the bitwise operation.
*/
extern UInt8 OSBitOrAtomic8(
- UInt32 mask,
- volatile UInt8 * address);
+ UInt32 mask,
+ volatile UInt8 * address);
/*!
* @function OSBitXorAtomic
* @result The value before the bitwise operation.
*/
extern UInt32 OSBitXorAtomic(
- UInt32 mask,
- volatile UInt32 * address);
+ UInt32 mask,
+ volatile UInt32 * address);
#define OSBitXorAtomic(a, b) \
(OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
* @result The value before the bitwise operation.
*/
extern UInt16 OSBitXorAtomic16(
- UInt32 mask,
- volatile UInt16 * address);
+ UInt32 mask,
+ volatile UInt16 * address);
/*!
* @function OSBitXorAtomic8
* @result The value before the bitwise operation.
*/
extern UInt8 OSBitXorAtomic8(
- UInt32 mask,
- volatile UInt8 * address);
+ UInt32 mask,
+ volatile UInt8 * address);
/*!
* @function OSTestAndSet
* @result true if the bit was already set, false otherwise.
*/
extern Boolean OSTestAndSet(
- UInt32 bit,
- volatile UInt8 * startAddress);
+ UInt32 bit,
+ volatile UInt8 * startAddress);
/*!
* @function OSTestAndClear
* @result true if the bit was already clear, false otherwise.
*/
extern Boolean OSTestAndClear(
- UInt32 bit,
- volatile UInt8 * startAddress);
+ UInt32 bit,
+ volatile UInt8 * startAddress);
/*!
* @defined OS_SPINLOCK_INIT
* @discussion
* The convention is that unlocked is zero, locked is nonzero.
*/
-#define OS_SPINLOCK_INIT 0
+#define OS_SPINLOCK_INIT 0
-/*!
+/*!
* @typedef OSSpinLock
*
* @abstract
* Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode.
*
* @result
- * Returns false if the lock was already held by another thread, true if it took the lock successfully.
+ * Returns false if the lock was already held by another thread, true if it took the lock successfully.
*/
extern Boolean OSSpinLockTry(volatile OSSpinLock * lock);
#if defined(__arm__) || defined(__arm64__)
extern void OSSynchronizeIO(void);
#else
-static __inline__ void OSSynchronizeIO(void)
+static __inline__ void
+OSSynchronizeIO(void)
{
}
#endif
-#if defined(KERNEL_PRIVATE)
+#if defined(KERNEL_PRIVATE)
-#if defined(__arm__) || defined(__arm64__)
-static inline void OSMemoryBarrier(void) {
- __asm__ volatile("dmb ish" ::: "memory");
+#if defined(__arm__) || defined(__arm64__)
+static inline void
+OSMemoryBarrier(void)
+{
+ __asm__ volatile ("dmb ish" ::: "memory");
}
#elif defined(__i386__) || defined(__x86_64__)
-#if defined(XNU_KERNEL_PRIVATE)
-static inline void OSMemoryBarrier(void) {
- __asm__ volatile("mfence" ::: "memory");
+#if defined(XNU_KERNEL_PRIVATE)
+static inline void
+OSMemoryBarrier(void)
+{
+ __asm__ volatile ("mfence" ::: "memory");
}
#endif /* XNU_KERNEL_PRIVATE */
#endif