]> git.saurik.com Git - apple/libc.git/blob - darwin/SpinlocksLoadStoreEx.c
Libc-825.40.1.tar.gz
[apple/libc.git] / darwin / SpinlocksLoadStoreEx.c
1 /*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 // OSAtomic.h is included by files that include this C file.
25 /* #include "OSAtomic.h" */
26
27 typedef int32_t OSSpinLock;
28
29 /*
30 * Bear with me, there's method to this madness. clang actually produces more optimal
31 * fastpath code if the lock code is split in half like this.
32 *
33 * Once we're spinning in userspace/kernel like this then we can be more lenient about
34 * how much effort is spent doing the spin. Where the lock is uncontended, this split
35 * arrangement produces a very good fastpath at the cost of some code duplication.
36 *
37 * Similarly, the gotos produce less wasteful code during spins then the equivalent code
38 * defined with loops.
39 */
40
41 #import <mach/mach_traps.h>
42
43 static inline void _OSSpinLockSlow(volatile OSSpinLock * lock) _OSATOMIC_VARIANT(_OSSpinLockSlow);
44 static inline void _OSSpinLockSlow(volatile OSSpinLock * lock)
45 {
46 int32_t r;
47 uint32_t t;
48
49 _spin: ;
50 #if (defined(_ARM_ARCH_7) && !defined(_OSATOMIC_NO_BARRIERS))
51 uint32_t tries = MP_SPIN_TRIES;
52 do {
53 if (*lock == 0) goto _try_store;
54 _osatomic_pause();
55 } while (--tries);
56 #endif
57
58 __asm__ ("mov r0, %[_a] ;"
59 "mov r1, %[_b] ;"
60 "mov r2, %[_c] ;"
61 "bl _syscall_thread_switch ;"
62 : : [_a] "i" (0), [_b] "i" (1), [_c] "i" (1)
63 : "r0", "r1", "r2", "r9", "r12", "lr" );
64
65 _try_store:
66 do {
67 _osatomic_load_exclusive(lock, r);
68 if (slowpath(r)) goto _spin;
69 _osatomic_store_exclusive(lock, 1, t);
70 } while (slowpath(t));
71
72 _osatomic_barrier();
73 }
74
75 void OSSpinLockLock(volatile OSSpinLock * lock) _OSATOMIC_VARIANT(OSSpinLockLock);
76 void OSSpinLockLock(volatile OSSpinLock * lock)
77 {
78 _OSATOMIC_ALIAS(spin_lock, OSSpinLockLock);
79 _OSATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
80
81 int32_t r;
82 uint32_t t;
83
84 do {
85 _osatomic_load_exclusive(lock, r);
86 if (slowpath(r)) return _OSSpinLockSlow(lock);
87 _osatomic_store_exclusive(lock, 1, t);
88 } while (slowpath(t));
89
90 _osatomic_barrier();
91 }
92
93 #ifndef _OSATOMIC_WFE
94
95 bool OSSpinLockTry(OSSpinLock * lock) _OSATOMIC_VARIANT(OSSpinLockTry);
96 bool OSSpinLockTry(OSSpinLock * lock)
97 {
98 _OSATOMIC_ALIAS(spin_lock_try, OSSpinLockTry);
99 _OSATOMIC_ALIAS(_spin_lock_try, OSSpinLockTry);
100
101 int32_t r;
102 uint32_t t;
103
104 do {
105 _osatomic_load_exclusive(lock, r);
106 if (slowpath(r)) return false;
107 _osatomic_store_exclusive(lock, 1, t);
108 } while (slowpath(t));
109
110 _osatomic_barrier();
111 return (r == 0);
112 }
113
114 void OSSpinLockUnlock(OSSpinLock * lock) _OSATOMIC_VARIANT(OSSpinLockUnlock);
115 void OSSpinLockUnlock(OSSpinLock * lock)
116 {
117 _OSATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
118 _OSATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
119
120 _osatomic_barrier();
121 *lock = 0;
122 }
123
124 #endif // _OSATOMIC_WFE