]>
git.saurik.com Git - apple/xnu.git/blob - libkern/gen/OSAtomicOperations.c
   2  * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 #include <libkern/OSAtomic.h> 
  30 #include <kern/debug.h> 
  31 #include <machine/atomic.h> 
  39 #define NULL ((void *)0) 
  42 #define ATOMIC_DEBUG DEBUG 
  45 #define ALIGN_TEST(p,t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p\n",p);}while(0) 
  47 #define ALIGN_TEST(p,t) do{}while(0) 
  50 // 19831745 - start of big hammer! 
  51 #pragma clang diagnostic push 
  52 #pragma clang diagnostic ignored "-Wcast-qual" 
  56  *      These are _the_ atomic operations, now implemented via compiler built-ins. 
  57  *      It is expected that this C implementation is a candidate for Link-Time- 
  58  *      Optimization inlining, whereas the assembler implementations they replace 
  62 #undef OSCompareAndSwap8 
  63 Boolean 
OSCompareAndSwap8(UInt8 oldValue
, UInt8 newValue
, volatile UInt8 
*address
) 
  65         return __c11_atomic_compare_exchange_strong((_Atomic UInt8 
*)address
, &oldValue
, newValue
, 
  66                         memory_order_acq_rel_smp
, memory_order_relaxed
); 
  69 #undef OSCompareAndSwap16 
  70 Boolean 
OSCompareAndSwap16(UInt16 oldValue
, UInt16 newValue
, volatile UInt16 
*address
) 
  72         return __c11_atomic_compare_exchange_strong((_Atomic UInt16 
*)address
, &oldValue
, newValue
, 
  73                         memory_order_acq_rel_smp
, memory_order_relaxed
); 
  76 #undef OSCompareAndSwap 
  77 Boolean 
OSCompareAndSwap(UInt32 oldValue
, UInt32 newValue
, volatile UInt32 
*address
) 
  79         ALIGN_TEST(address
, UInt32
); 
  80         return __c11_atomic_compare_exchange_strong((_Atomic UInt32 
*)address
, &oldValue
, newValue
, 
  81                         memory_order_acq_rel_smp
, memory_order_relaxed
); 
  84 #undef OSCompareAndSwap64 
  85 Boolean 
OSCompareAndSwap64(UInt64 oldValue
, UInt64 newValue
, volatile UInt64 
*address
) 
  88          * _Atomic uint64 requires 8-byte alignment on all architectures. 
  89          * This silences the compiler cast warning.  ALIGN_TEST() verifies 
  90          * that the cast was legal, if defined. 
  92         _Atomic UInt64 
*aligned_addr 
= (_Atomic UInt64 
*)(uintptr_t)address
; 
  94         ALIGN_TEST(address
, UInt64
); 
  95         return __c11_atomic_compare_exchange_strong(aligned_addr
, &oldValue
, newValue
, 
  96                         memory_order_acq_rel_smp
, memory_order_relaxed
); 
  99 #undef OSCompareAndSwapPtr 
 100 Boolean 
OSCompareAndSwapPtr(void *oldValue
, void *newValue
, void * volatile *address
) 
 103   return OSCompareAndSwap64((UInt64
)oldValue
, (UInt64
)newValue
, (volatile UInt64 
*)address
); 
 105   return OSCompareAndSwap((UInt32
)oldValue
, (UInt32
)newValue
, (volatile UInt32 
*)address
); 
 109 SInt8 
OSAddAtomic8(SInt32 amount
, volatile SInt8 
*address
) 
 111         return __c11_atomic_fetch_add((_Atomic SInt8
*)address
, amount
, memory_order_relaxed
); 
 114 SInt16 
OSAddAtomic16(SInt32 amount
, volatile SInt16 
*address
) 
 116         return __c11_atomic_fetch_add((_Atomic SInt16
*)address
, amount
, memory_order_relaxed
); 
 120 SInt32 
OSAddAtomic(SInt32 amount
, volatile SInt32 
*address
) 
 122         ALIGN_TEST(address
, UInt32
); 
 123         return __c11_atomic_fetch_add((_Atomic SInt32
*)address
, amount
, memory_order_relaxed
); 
 127 SInt64 
OSAddAtomic64(SInt64 amount
, volatile SInt64 
*address
) 
 129         _Atomic SInt64
* aligned_address 
= (_Atomic SInt64
*)(uintptr_t)address
; 
 131         ALIGN_TEST(address
, SInt64
); 
 132         return __c11_atomic_fetch_add(aligned_address
, amount
, memory_order_relaxed
); 
 135 #undef OSAddAtomicLong 
 137 OSAddAtomicLong(long theAmount
, volatile long *address
) 
 140         return (long)OSAddAtomic64((SInt64
)theAmount
, (SInt64
*)address
); 
 142         return (long)OSAddAtomic((SInt32
)theAmount
, address
); 
 146 #undef OSIncrementAtomic 
 147 SInt32  
OSIncrementAtomic(volatile SInt32 
* value
) 
 149         return OSAddAtomic(1, value
); 
 152 #undef OSDecrementAtomic 
 153 SInt32  
OSDecrementAtomic(volatile SInt32 
* value
) 
 155         return OSAddAtomic(-1, value
); 
 158 #undef OSBitAndAtomic 
 159 UInt32  
OSBitAndAtomic(UInt32 mask
, volatile UInt32 
* value
) 
 161         return __c11_atomic_fetch_and((_Atomic UInt32
*)value
, mask
, memory_order_relaxed
); 
 165 UInt32  
OSBitOrAtomic(UInt32 mask
, volatile UInt32 
* value
) 
 167         return __c11_atomic_fetch_or((_Atomic UInt32
*)value
, mask
, memory_order_relaxed
); 
 170 #undef OSBitXorAtomic 
 171 UInt32  
OSBitXorAtomic(UInt32 mask
, volatile UInt32 
* value
) 
 173         return __c11_atomic_fetch_xor((_Atomic UInt32
*)value
, mask
, memory_order_relaxed
); 
 176 static Boolean  
OSTestAndSetClear(UInt32 bit
, Boolean wantSet
, volatile UInt8 
* startAddress
) 
 182         startAddress 
+= (bit 
/ 8); 
 183         mask 
<<= (7 - (bit 
% 8)); 
 184         wantValue 
= wantSet 
? mask 
: 0; 
 187                 oldValue 
= *startAddress
; 
 188                 if ((oldValue 
& mask
) == wantValue
) { 
 191         } while (! __c11_atomic_compare_exchange_strong((_Atomic UInt8 
*)startAddress
, 
 192                 &oldValue
, (oldValue 
& ~mask
) | wantValue
, memory_order_relaxed
, memory_order_relaxed
)); 
 194         return (oldValue 
& mask
) == wantValue
; 
 197 Boolean 
OSTestAndSet(UInt32 bit
, volatile UInt8 
* startAddress
) 
 199         return OSTestAndSetClear(bit
, true, startAddress
); 
 202 Boolean 
OSTestAndClear(UInt32 bit
, volatile UInt8 
* startAddress
) 
 204         return OSTestAndSetClear(bit
, false, startAddress
); 
 208  * silly unaligned versions 
 211 SInt8   
OSIncrementAtomic8(volatile SInt8 
* value
) 
 213         return OSAddAtomic8(1, value
); 
 216 SInt8   
OSDecrementAtomic8(volatile SInt8 
* value
) 
 218         return OSAddAtomic8(-1, value
); 
 221 UInt8   
OSBitAndAtomic8(UInt32 mask
, volatile UInt8 
* value
) 
 223         return __c11_atomic_fetch_and((_Atomic UInt8 
*)value
, mask
, memory_order_relaxed
); 
 226 UInt8   
OSBitOrAtomic8(UInt32 mask
, volatile UInt8 
* value
) 
 228         return __c11_atomic_fetch_or((_Atomic UInt8 
*)value
, mask
, memory_order_relaxed
); 
 231 UInt8   
OSBitXorAtomic8(UInt32 mask
, volatile UInt8 
* value
) 
 233         return __c11_atomic_fetch_xor((_Atomic UInt8 
*)value
, mask
, memory_order_relaxed
); 
 236 SInt16  
OSIncrementAtomic16(volatile SInt16 
* value
) 
 238         return OSAddAtomic16(1, value
); 
 241 SInt16  
OSDecrementAtomic16(volatile SInt16 
* value
) 
 243         return OSAddAtomic16(-1, value
); 
 246 UInt16  
OSBitAndAtomic16(UInt32 mask
, volatile UInt16 
* value
) 
 248         return __c11_atomic_fetch_and((_Atomic UInt16 
*)value
, mask
, memory_order_relaxed
); 
 251 UInt16  
OSBitOrAtomic16(UInt32 mask
, volatile UInt16 
* value
) 
 253         return __c11_atomic_fetch_or((_Atomic UInt16 
*)value
, mask
, memory_order_relaxed
); 
 256 UInt16  
OSBitXorAtomic16(UInt32 mask
, volatile UInt16 
* value
) 
 258         return __c11_atomic_fetch_xor((_Atomic UInt16 
*)value
, mask
, memory_order_relaxed
); 
 261 // 19831745 - end of big hammer! 
 262 #pragma clang diagnostic pop