]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/commpage/spinlocks.s
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / ppc / commpage / spinlocks.s
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <sys/appleapiopts.h>
27 #include <ppc/asm.h> // EXT, LEXT
28 #include <machine/cpu_capabilities.h>
29 #include <machine/commpage.h>
30
31 .text
32 .align 2
33 .globl EXT(spinlock_32_try_mp)
34 .globl EXT(spinlock_32_try_up)
35 .globl EXT(spinlock_32_lock_mp)
36 .globl EXT(spinlock_32_lock_up)
37 .globl EXT(spinlock_32_unlock_mp)
38 .globl EXT(spinlock_32_unlock_up)
39
40 .globl EXT(spinlock_64_try_mp)
41 .globl EXT(spinlock_64_try_up)
42 .globl EXT(spinlock_64_lock_mp)
43 .globl EXT(spinlock_64_lock_up)
44 .globl EXT(spinlock_64_unlock_mp)
45 .globl EXT(spinlock_64_unlock_up)
46
47 .globl EXT(spinlock_relinquish)
48
49 #define MP_SPIN_TRIES 1000
50
51
52 // The user mode spinlock library. There are many versions,
53 // in order to take advantage of a few special cases:
54 // - no barrier instructions (SYNC,ISYNC) are needed if UP
55 // - 64-bit processors can use LWSYNC instead of SYNC (if MP)
56 // - branch hints appropriate to the processor (+ vs ++ etc)
57 // - potentially custom relinquish strategies (not used at present)
58 // - fixes for errata as necessary
59
60
61 spinlock_32_try_mp:
62 mr r5, r3
63 li r3, 1
64 1:
65 lwarx r4,0,r5
66 cmpwi r4,0
67 bne- 2f
68 stwcx. r5,0,r5
69 isync // cancel speculative execution
70 beqlr+
71 b 1b
72 2:
73 li r3,0
74 blr
75
76 COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,0)
77
78
79 spinlock_32_try_up:
80 mr r5, r3
81 li r3, 1
82 1:
83 lwarx r4,0,r5
84 cmpwi r4,0
85 bne- 2f
86 stwcx. r5,0,r5
87 beqlr+
88 b 1b
89 2:
90 li r3,0
91 blr
92
93 COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,0)
94
95
96 spinlock_32_lock_mp:
97 li r5,MP_SPIN_TRIES
98 1:
99 lwarx r4,0,r3
100 cmpwi r4,0
101 bne- 2f
102 stwcx. r3,0,r3
103 isync // cancel speculative execution
104 beqlr+ // we return void
105 b 1b
106 2:
107 subic. r5,r5,1 // try again before relinquish?
108 bne 1b
109 ba _COMM_PAGE_RELINQUISH
110
111 COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,0)
112
113
114 spinlock_32_lock_up:
115 1:
116 lwarx r4,0,r3
117 cmpwi r4,0
118 bnea- _COMM_PAGE_RELINQUISH // always depress on UP (let lock owner run)
119 stwcx. r3,0,r3
120 beqlr+ // we return void
121 b 1b
122
123 COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,0)
124
125
126 spinlock_32_unlock_mp:
127 li r4,0
128 sync // complete prior stores before unlock
129 stw r4,0(r3)
130 blr
131
132 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,0)
133
134
135 spinlock_32_unlock_up:
136 li r4,0
137 stw r4,0(r3)
138 blr
139
140 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,0)
141
142
143 spinlock_64_try_mp:
144 mr r5, r3
145 li r3, 1
146 1:
147 lwarx r4,0,r5
148 cmpwi r4,0
149 bne-- 2f
150 stwcx. r5,0,r5
151 isync // cancel speculative execution
152 beqlr++
153 b 1b
154 2:
155 li r6,-4
156 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
157 li r3,0 // Pass failure
158 blr
159
160 COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,0)
161
162
163 spinlock_64_try_up:
164 mr r5, r3
165 li r3, 1
166 1:
167 lwarx r4,0,r5
168 cmpwi r4,0
169 bne-- 2f
170 stwcx. r5,0,r5
171 beqlr++
172 b 1b
173 2:
174 li r6,-4
175 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
176 li r3,0
177 blr
178
179 COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,0)
180
181
182 spinlock_64_lock_mp:
183 li r5,MP_SPIN_TRIES
184 1:
185 lwarx r4,0,r3
186 cmpwi r4,0
187 bne-- 2f
188 stwcx. r3,0,r3
189 isync // cancel speculative execution
190 beqlr++ // we return void
191 b 1b
192 2:
193 li r6,-4
194 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
195 subic. r5,r5,1 // try again before relinquish?
196 bne-- 1b // mispredict this one (a cheap back-off)
197 ba _COMM_PAGE_RELINQUISH
198
199 COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,0)
200
201
202 spinlock_64_lock_up:
203 1:
204 lwarx r4,0,r3
205 cmpwi r4,0
206 bne-- 2f
207 stwcx. r3,0,r3
208 beqlr++ // we return void
209 b 1b
210 2: // always relinquish on UP (let lock owner run)
211 li r6,-4
212 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
213 ba _COMM_PAGE_RELINQUISH
214
215 COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,0)
216
217
218 spinlock_64_unlock_mp:
219 li r4,0
220 lwsync // complete prior stores before unlock
221 stw r4,0(r3)
222 blr
223
224 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,0)
225
226
227 spinlock_64_unlock_up:
228 li r4,0
229 stw r4,0(r3)
230 blr
231
232 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,0)
233
234
235 spinlock_relinquish:
236 mr r12,r3 // preserve lockword ptr across relinquish
237 li r3,0 // THREAD_NULL
238 li r4,1 // SWITCH_OPTION_DEPRESS
239 li r5,1 // timeout (ms)
240 li r0,-61 // SYSCALL_THREAD_SWITCH
241 sc // relinquish
242 mr r3,r12
243 ba _COMM_PAGE_SPINLOCK_LOCK
244
245 COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,0)
246