2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/appleapiopts.h>
24 #include <ppc/asm.h> // EXT, LEXT
25 #include <machine/cpu_capabilities.h>
26 #include <machine/commpage.h>
31 #define MP_SPIN_TRIES 1000
34 /* The user mode spinlock library. There are many versions,
35 * in order to take advantage of a few special cases:
36 * - no barrier instructions (SYNC,ISYNC) are needed if UP
37 * - 64-bit processors can use LWSYNC instead of SYNC (if MP)
38 * - 32-bit processors can use ISYNC/EIEIO instead of SYNC (if MP)
39 * - branch hints appropriate to the processor (+ vs ++ etc)
40 * - potentially custom relinquish strategies (not used at present)
41 * - fixes for errata as necessary
43 * The convention for lockwords is that 0==free and -1==locked.
52 li r6,-1 // locked == -1
56 isync // cancel speculative execution
60 li r3,0 // we did not get the lock
63 COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,kCommPage32)
71 li r6,-1 // locked == -1
78 li r3,0 // we did not get the lock
81 COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,kCommPage32)
88 li r6,-1 // locked == -1
92 isync // cancel speculative execution
93 beqlr+ // we return void
96 subic. r5,r5,1 // try again before relinquish?
98 ba _COMM_PAGE_RELINQUISH
100 COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,kCommPage32)
106 li r6,-1 // locked == -1
108 bnea- _COMM_PAGE_RELINQUISH // always depress on UP (let lock owner run)
110 beqlr+ // we return void
113 COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,kCommPage32)
116 spinlock_32_unlock_mp:
118 isync // complete prior stores before unlock
119 eieio // (using isync/eieio is faster than a sync)
123 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,kCommPage32)
126 spinlock_32_unlock_up:
131 COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,kCommPage32)
139 li r6,-1 // locked == -1
143 isync // cancel speculative execution
148 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
149 li r3,0 // we did not get the lock
152 COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,kCommPageBoth)
160 li r6,-1 // locked == -1
168 stwcx. r5,r6,r1 // clear the pending reservation (using red zone)
169 li r3,0 // we did not get the lock
172 COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,kCommPageBoth)
179 li r6,-1 // locked == -1
183 isync // cancel speculative execution
184 beqlr++ // we return void
188 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
189 subic. r5,r5,1 // try again before relinquish?
190 bne-- 1b // mispredict this one (a cheap back-off)
191 ba _COMM_PAGE_RELINQUISH
193 COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,kCommPageBoth)
199 li r6,-1 // locked == -1
203 beqlr++ // we return void
205 2: // always relinquish on UP (let lock owner run)
207 stwcx. r3,r6,r1 // clear the pending reservation (using red zone)
208 ba _COMM_PAGE_RELINQUISH
210 COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,kCommPageBoth)
213 spinlock_64_unlock_mp:
214 lwsync // complete prior stores before unlock
219 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,kCommPageBoth)
222 spinlock_64_unlock_up:
227 COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,kCommPageBoth)
231 mr r12,r3 // preserve lockword ptr across relinquish
232 li r3,0 // THREAD_NULL
233 li r4,1 // SWITCH_OPTION_DEPRESS
234 li r5,1 // timeout (ms)
235 li r0,-61 // SYSCALL_THREAD_SWITCH
238 ba _COMM_PAGE_SPINLOCK_LOCK
240 COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,kCommPageBoth)