-#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
-#pragma mark -
-#pragma mark gnu99
-
-#define _os_atomic_full_barrier() \
- __sync_synchronize()
-#define _os_atomic_barrier(m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- break; \
- default: \
- _os_atomic_full_barrier(); break; \
- } })
-#define os_atomic_load(p, m) \
- ({ typeof(*(p)) _r = *(p); \
- switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- case _os_atomic_memory_order_acquire: \
- case _os_atomic_memory_order_seq_cst: \
- _os_atomic_barrier(m); \
- break; \
- default: \
- _os_atomic_unimplemented(); break; \
- } _r; })
-#define os_atomic_store(p, v, m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- case _os_atomic_memory_order_release: \
- case _os_atomic_memory_order_seq_cst: \
- _os_atomic_barrier(m); \
- *(p) = (v); break; \
- default: \
- _os_atomic_unimplemented(); break; \
- } switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_seq_cst: \
- _os_atomic_barrier(m); break; \
- default: \
- break; \
- } })
-#if __has_builtin(__sync_swap)
-#define os_atomic_xchg(p, v, m) \
- ((typeof(*(p)))__sync_swap((p), (v)))
-#else
-#define os_atomic_xchg(p, v, m) \
- ((typeof(*(p)))__sync_lock_test_and_set((p), (v)))
-#endif
-#define os_atomic_cmpxchg(p, e, v, m) \
- __sync_bool_compare_and_swap((p), (e), (v))
-#define os_atomic_cmpxchgv(p, e, v, g, m) \
- ({ typeof(*(g)) _e = (e), _r = \
- __sync_val_compare_and_swap((p), _e, (v)); \
- bool _b = (_e == _r); *(g) = _r; _b; })
-#define os_atomic_cmpxchgvw(p, e, v, g, m) \
- os_atomic_cmpxchgv((p), (e), (v), (g), m)
-
-#define os_atomic_add(p, v, m) \
- __sync_add_and_fetch((p), (v))
-#define os_atomic_add_orig(p, v, m) \
- __sync_fetch_and_add((p), (v))
-#define os_atomic_sub(p, v, m) \
- __sync_sub_and_fetch((p), (v))
-#define os_atomic_sub_orig(p, v, m) \
- __sync_fetch_and_sub((p), (v))
-#define os_atomic_and(p, v, m) \
- __sync_and_and_fetch((p), (v))
-#define os_atomic_and_orig(p, v, m) \
- __sync_fetch_and_and((p), (v))
-#define os_atomic_or(p, v, m) \
- __sync_or_and_fetch((p), (v))
-#define os_atomic_or_orig(p, v, m) \
- __sync_fetch_and_or((p), (v))
-#define os_atomic_xor(p, v, m) \
- __sync_xor_and_fetch((p), (v))
-#define os_atomic_xor_orig(p, v, m) \
- __sync_fetch_and_xor((p), (v))
-
-#if defined(__x86_64__) || defined(__i386__)
-// GCC emits nothing for __sync_synchronize() on x86_64 & i386
-#undef _os_atomic_full_barrier
-#define _os_atomic_full_barrier() \
- ({ __asm__ __volatile__( \
- "mfence" \
- : : : "memory"); })
-#undef os_atomic_load
-#define os_atomic_load(p, m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- case _os_atomic_memory_order_acquire: \
- case _os_atomic_memory_order_seq_cst: \
- break; \
- default: \
- _os_atomic_unimplemented(); break; \
- } *(p); })
-// xchg is faster than store + mfence
-#undef os_atomic_store
-#define os_atomic_store(p, v, m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- case _os_atomic_memory_order_release: \
- *(p) = (v); break; \
- case _os_atomic_memory_order_seq_cst: \
- (void)os_atomic_xchg((p), (v), m); break; \
- default:\
- _os_atomic_unimplemented(); break; \
- } })
-#endif
-
-#else
-#error "Please upgrade to GCC 4.2 or newer."
-#endif