2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <sys/appleapiopts.h>
32 #include <ppc/asm.h> // EXT, LEXT
33 #include <machine/cpu_capabilities.h>
34 #include <machine/commpage.h>
39 #define MP_SPIN_TRIES 1000
42 /* The user mode spinlock library. There are many versions,
43 * in order to take advantage of a few special cases:
44 * - no barrier instructions (SYNC,ISYNC) are needed if UP
45 * - 64-bit processors can use LWSYNC instead of SYNC (if MP)
46 * - 32-bit processors can use ISYNC/EIEIO instead of SYNC (if MP)
47 * - branch hints appropriate to the processor (+ vs ++ etc)
48 * - potentially custom relinquish strategies (not used at present)
49 * - fixes for errata as necessary
51 * The convention for lockwords is that 0==free and -1==locked.
60 li r6,-1 // locked == -1
64 isync // cancel speculative execution
68 li r3,0 // we did not get the lock
71 COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,kCommPage32)
79 li r6,-1 // locked == -1
86 li r3,0 // we did not get the lock
89 COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,kCommPage32)
96 li r6,-1 // locked == -1
100 isync // cancel speculative execution
101 beqlr+ // we return void
104 subic. r5,r5,1 // try again before relinquish?
106 ba _COMM_PAGE_RELINQUISH
108 COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,kCommPage32)
114 li r6,-1 // locked == -1
116 bnea- _COMM_PAGE_RELINQUISH // always depress on UP (let lock owner run)
118 beqlr+ // we return void
121 COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,kCommPage32)
124 spinlock_32_unlock_mp:
126 isync // complete prior stores before unlock
127 eieio // (using isync/eieio is faster than a sync)
131 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,kCommPage32)
134 spinlock_32_unlock_up:
139 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,kCommPage32)
147 li r6,-1 // locked == -1
151 isync // cancel speculative execution
156 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
157 li r3,0 // we did not get the lock
160 COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,kCommPageBoth)
168 li r6,-1 // locked == -1
176 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
177 li r3,0 // we did not get the lock
180 COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,kCommPageBoth)
187 li r6,-1 // locked == -1
191 isync // cancel speculative execution
192 beqlr++ // we return void
196 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
197 subic. r5,r5,1 // try again before relinquish?
198 bne-- 1b // mispredict this one (a cheap back-off)
199 ba _COMM_PAGE_RELINQUISH
201 COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,kCommPageBoth)
207 li r6,-1 // locked == -1
211 beqlr++ // we return void
213 2: // always relinquish on UP (let lock owner run)
215 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
216 ba _COMM_PAGE_RELINQUISH
218 COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,kCommPageBoth)
221 spinlock_64_unlock_mp:
222 lwsync // complete prior stores before unlock
227 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,kCommPageBoth)
230 spinlock_64_unlock_up:
235 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,kCommPageBoth)
239 mr r12,r3 // preserve lockword ptr across relinquish
240 li r3,0 // THREAD_NULL
241 li r4,1 // SWITCH_OPTION_DEPRESS
242 li r5,1 // timeout (ms)
243 li r0,-61 // SYSCALL_THREAD_SWITCH
246 ba _COMM_PAGE_SPINLOCK_LOCK
248 COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,kCommPageBoth)