]>
git.saurik.com Git - apple/libdispatch.git/blob - src/shims/atomic.h
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_SHIMS_ATOMIC__
28 #define __DISPATCH_SHIMS_ATOMIC__
30 /* x86 & cortex-a8 have a 64 byte cacheline */
31 #define DISPATCH_CACHELINE_SIZE 64
32 #define ROUND_UP_TO_CACHELINE_SIZE(x) \
33 (((x) + (DISPATCH_CACHELINE_SIZE - 1)) & ~(DISPATCH_CACHELINE_SIZE - 1))
34 #define ROUND_UP_TO_VECTOR_SIZE(x) \
36 #define DISPATCH_CACHELINE_ALIGN \
37 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
39 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
41 #define _dispatch_atomic_barrier() __sync_synchronize()
42 // see comment in dispatch_once.c
43 #define dispatch_atomic_maximally_synchronizing_barrier() \
44 _dispatch_atomic_barrier()
45 // assume atomic builtins provide barriers
46 #define dispatch_atomic_barrier()
47 #define dispatch_atomic_acquire_barrier()
48 #define dispatch_atomic_release_barrier()
49 #define dispatch_atomic_store_barrier()
51 #define _dispatch_hardware_pause() asm("")
52 #define _dispatch_debugger() asm("trap")
54 #define dispatch_atomic_cmpxchg(p, e, n) \
55 __sync_bool_compare_and_swap((p), (e), (n))
56 #if __has_builtin(__sync_swap)
57 #define dispatch_atomic_xchg(p, n) \
58 ((typeof(*(p)))__sync_swap((p), (n)))
60 #define dispatch_atomic_xchg(p, n) \
61 ((typeof(*(p)))__sync_lock_test_and_set((p), (n)))
63 #define dispatch_atomic_add(p, v) __sync_add_and_fetch((p), (v))
64 #define dispatch_atomic_sub(p, v) __sync_sub_and_fetch((p), (v))
65 #define dispatch_atomic_or(p, v) __sync_fetch_and_or((p), (v))
66 #define dispatch_atomic_and(p, v) __sync_fetch_and_and((p), (v))
68 #define dispatch_atomic_inc(p) dispatch_atomic_add((p), 1)
69 #define dispatch_atomic_dec(p) dispatch_atomic_sub((p), 1)
70 // really just a low level abort()
71 #define _dispatch_hardware_crash() __builtin_trap()
73 #define dispatch_atomic_cmpxchg2o(p, f, e, n) \
74 dispatch_atomic_cmpxchg(&(p)->f, (e), (n))
75 #define dispatch_atomic_xchg2o(p, f, n) \
76 dispatch_atomic_xchg(&(p)->f, (n))
77 #define dispatch_atomic_add2o(p, f, v) \
78 dispatch_atomic_add(&(p)->f, (v))
79 #define dispatch_atomic_sub2o(p, f, v) \
80 dispatch_atomic_sub(&(p)->f, (v))
81 #define dispatch_atomic_or2o(p, f, v) \
82 dispatch_atomic_or(&(p)->f, (v))
83 #define dispatch_atomic_and2o(p, f, v) \
84 dispatch_atomic_and(&(p)->f, (v))
85 #define dispatch_atomic_inc2o(p, f) \
86 dispatch_atomic_add2o((p), f, 1)
87 #define dispatch_atomic_dec2o(p, f) \
88 dispatch_atomic_sub2o((p), f, 1)
91 #error "Please upgrade to GCC 4.2 or newer."
94 #if defined(__x86_64__) || defined(__i386__)
96 // GCC emits nothing for __sync_synchronize() on x86_64 & i386
97 #undef _dispatch_atomic_barrier
98 #define _dispatch_atomic_barrier() \
99 __asm__ __volatile__( \
102 #undef dispatch_atomic_maximally_synchronizing_barrier
104 #define dispatch_atomic_maximally_synchronizing_barrier() \
105 do { unsigned long _clbr; __asm__ __volatile__( \
107 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
111 #define dispatch_atomic_maximally_synchronizing_barrier() \
112 do { unsigned long _clbr; __asm__ __volatile__( \
114 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
116 #else // gcc does not allow inline i386 asm to clobber ebx
117 #define dispatch_atomic_maximally_synchronizing_barrier() \
118 do { unsigned long _clbr; __asm__ __volatile__( \
122 : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory" \
126 #undef _dispatch_hardware_pause
127 #define _dispatch_hardware_pause() asm("pause")
128 #undef _dispatch_debugger
129 #define _dispatch_debugger() asm("int3")
131 #elif defined(__ppc__) || defined(__ppc64__)
133 // GCC emits "sync" for __sync_synchronize() on ppc & ppc64
134 #undef _dispatch_atomic_barrier
136 #define _dispatch_atomic_barrier() \
137 __asm__ __volatile__( \
142 #define _dispatch_atomic_barrier() \
143 __asm__ __volatile__( \
148 #undef dispatch_atomic_maximally_synchronizing_barrier
149 #define dispatch_atomic_maximally_synchronizing_barrier() \
150 __asm__ __volatile__( \
157 #endif // __DISPATCH_SHIMS_ATOMIC__