2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/appleapiopts.h>
30 #include <ppc/asm.h> // EXT, LEXT
31 #include <machine/cpu_capabilities.h>
32 #include <machine/commpage.h>
37 #define MP_SPIN_TRIES 1000
40 /* The user mode spinlock library. There are many versions,
41 * in order to take advantage of a few special cases:
42 * - no barrier instructions (SYNC,ISYNC) are needed if UP
43 * - 64-bit processors can use LWSYNC instead of SYNC (if MP)
44 * - 32-bit processors can use ISYNC/EIEIO instead of SYNC (if MP)
45 * - branch hints appropriate to the processor (+ vs ++ etc)
46 * - potentially custom relinquish strategies (not used at present)
47 * - fixes for errata as necessary
49 * The convention for lockwords is that 0==free and -1==locked.
58 li r6,-1 // locked == -1
62 isync // cancel speculative execution
66 li r3,0 // we did not get the lock
69 COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,kCommPage32)
77 li r6,-1 // locked == -1
84 li r3,0 // we did not get the lock
87 COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,kCommPage32)
94 li r6,-1 // locked == -1
98 isync // cancel speculative execution
99 beqlr+ // we return void
102 subic. r5,r5,1 // try again before relinquish?
104 ba _COMM_PAGE_RELINQUISH
106 COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,kCommPage32)
112 li r6,-1 // locked == -1
114 bnea- _COMM_PAGE_RELINQUISH // always depress on UP (let lock owner run)
116 beqlr+ // we return void
119 COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,kCommPage32)
122 spinlock_32_unlock_mp:
124 isync // complete prior stores before unlock
125 eieio // (using isync/eieio is faster than a sync)
129 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,kCommPage32)
132 spinlock_32_unlock_up:
137 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,kCommPage32)
145 li r6,-1 // locked == -1
149 isync // cancel speculative execution
154 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
155 li r3,0 // we did not get the lock
158 COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,kCommPageBoth)
166 li r6,-1 // locked == -1
174 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
175 li r3,0 // we did not get the lock
178 COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,kCommPageBoth)
185 li r6,-1 // locked == -1
189 isync // cancel speculative execution
190 beqlr++ // we return void
194 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
195 subic. r5,r5,1 // try again before relinquish?
196 bne-- 1b // mispredict this one (a cheap back-off)
197 ba _COMM_PAGE_RELINQUISH
199 COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,kCommPageBoth)
205 li r6,-1 // locked == -1
209 beqlr++ // we return void
211 2: // always relinquish on UP (let lock owner run)
213 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
214 ba _COMM_PAGE_RELINQUISH
216 COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,kCommPageBoth)
219 spinlock_64_unlock_mp:
220 lwsync // complete prior stores before unlock
225 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,kCommPageBoth)
228 spinlock_64_unlock_up:
233 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,kCommPageBoth)
237 mr r12,r3 // preserve lockword ptr across relinquish
238 li r3,0 // THREAD_NULL
239 li r4,1 // SWITCH_OPTION_DEPRESS
240 li r5,1 // timeout (ms)
241 li r0,-61 // SYSCALL_THREAD_SWITCH
244 ba _COMM_PAGE_SPINLOCK_LOCK
246 COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,kCommPageBoth)