]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_lock.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
1c79356b
A
29#include <mach_assert.h>
30#include <mach_ldebug.h>
1c79356b
A
31#include <ppc/asm.h>
32#include <ppc/proc_reg.h>
33#include <assym.s>
34
2d21ac55
A
35
36#include <config_dtrace.h>
37#if CONFIG_DTRACE
38 #define LOCKSTAT_LABEL(lab) \
39 .data __ASMNL__ \
40 .globl lab __ASMNL__ \
41 lab: __ASMNL__ \
42 .long 9f __ASMNL__ \
43 .text __ASMNL__ \
44 9: __ASMNL__ \
45
46 .globl _dtrace_probe, _lockstat_probemap
47#define LOCKSTAT_RECORD(id) \
48 lis r6,hi16(_lockstat_probemap) __ASMNL__ \
49 ori r6,r6,lo16(_lockstat_probemap) __ASMNL__ \
50 lwz r5,4*id(r6) __ASMNL__ \
51 mr. r5,r5 __ASMNL__ \
52 beqlr-- __ASMNL__ \
53 mr r4,r3 __ASMNL__ \
54 mr r3,r5 __ASMNL__ \
55 li r5,0 __ASMNL__ \
56 li r6,0 __ASMNL__ \
57 li r7,0 __ASMNL__ \
58 li r8,0 __ASMNL__ \
59 PROLOG(0) __ASMNL__ \
60 bl _dtrace_probe __ASMNL__ \
61 EPILOG
62#endif
63
64
65
1c79356b
A
66#define STRING ascii
67
0b4e3aa0 68#define ILK_LOCKED 0x01
9bccf70c 69#define WAIT_FLAG 0x02
91447636
A
70#define WANT_UPGRADE 0x04
71#define WANT_EXCL 0x08
0c530ab8 72#define PRIV_EXCL 0x8000
1c79356b 73
91447636 74#define TH_FN_OWNED 0x01
1c79356b 75
91447636
A
76# volatile CR bits
77#define hwtimeout 20
78#define mlckmiss 21
1c79356b 79
91447636 80#define RW_DATA 0
1c79356b 81
91447636
A
82#define PROLOG(space) \
83 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
84 mfcr r2 __ASMNL__ \
85 mflr r0 __ASMNL__ \
86 stw r3,FM_ARG0(r1) __ASMNL__ \
87 stw r11,FM_ARG0+0x04(r1) __ASMNL__ \
88 stw r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1) __ASMNL__ \
89 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
1c79356b 90
91447636
A
91#define EPILOG \
92 lwz r1,0(r1) __ASMNL__ \
93 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
94 mtlr r0 __ASMNL__
1c79356b 95
1c79356b 96/*
55e303ae 97 * void hw_lock_init(hw_lock_t)
1c79356b 98 *
55e303ae 99 * Initialize a hardware lock.
1c79356b 100 */
55e303ae
A
101 .align 5
102 .globl EXT(hw_lock_init)
1c79356b 103
55e303ae 104LEXT(hw_lock_init)
1c79356b 105
55e303ae
A
106 li r0, 0 ; set lock to free == 0
107 stw r0, 0(r3) ; Initialize the lock
1c79356b
A
108 blr
109
110/*
91447636 111 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
1c79356b 112 *
91447636
A
113 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
114 * multiple bits may be set. Return success (1) or failure (0).
115 * Attempt will fail after timeout ticks of the timebase.
1c79356b 116 */
1c79356b 117 .align 5
91447636 118 .globl EXT(hw_lock_bit)
1c79356b 119
91447636 120LEXT(hw_lock_bit)
1c79356b 121
91447636
A
122 crset hwtimeout ; timeout option
123 mr r12,r4 ; Load bit mask
124 mr r4,r5 ; Load timeout value
125 b lckcomm ; Join on up...
1c79356b 126
1c79356b
A
127/*
128 * void hw_lock_lock(hw_lock_t)
129 *
55e303ae
A
130 * Acquire lock, spinning until it becomes available.
131 * Return with preemption disabled.
132 * We will just set a default timeout and jump into the NORMAL timeout lock.
1c79356b 133 */
1c79356b
A
134 .align 5
135 .globl EXT(hw_lock_lock)
136
137LEXT(hw_lock_lock)
91447636
A
138 crclr hwtimeout ; no timeout option
139 li r4,0 ; request default timeout value
140 li r12,ILK_LOCKED ; Load bit mask
141 b lckcomm ; Join on up...
142
55e303ae 143lockDisa:
91447636
A
144 crset hwtimeout ; timeout option
145 li r4,0 ; request default timeout value
146 li r12,ILK_LOCKED ; Load bit mask
55e303ae 147 b lckcomm ; Join on up...
1c79356b
A
148
149/*
55e303ae 150 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
1c79356b 151 *
55e303ae
A
152 * Try to acquire spin-lock. Return success (1) or failure (0).
153 * Attempt will fail after timeout ticks of the timebase.
154 * We try fairly hard to get this lock. We disable for interruptions, but
155 * reenable after a "short" timeout (128 ticks, we may want to change this).
156 * After checking to see if the large timeout value (passed in) has expired and a
157 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
158 * we return either in abject failure, or disable and go back to the lock sniff routine.
159 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
1c79356b
A
160 */
161 .align 5
162 .globl EXT(hw_lock_to)
163
164LEXT(hw_lock_to)
91447636
A
165 crset hwtimeout ; timeout option
166 li r12,ILK_LOCKED ; Load bit mask
55e303ae
A
167lckcomm:
168 mfsprg r6,1 ; Get the current activation
169 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
170 addi r5,r5,1 ; Bring up the disable count
171 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
172 mr r5,r3 ; Get the address of the lock
173 li r8,0 ; Set r8 to zero
174
175lcktry: lwarx r6,0,r5 ; Grab the lock value
91447636
A
176 and. r3,r6,r12 ; Is it locked?
177 or r6,r6,r12 ; Set interlock
55e303ae
A
178 bne-- lckspin ; Yeah, wait for it to clear...
179 stwcx. r6,0,r5 ; Try to seize that there durn lock
180 bne-- lcktry ; Couldn't get it...
181 li r3,1 ; return true
91447636
A
182 .globl EXT(hwllckPatch_isync)
183LEXT(hwllckPatch_isync)
55e303ae
A
184 isync ; Make sure we don't use a speculativily loaded value
185 blr ; Go on home...
186
187lckspin: li r6,lgKillResv ; Get killing field
188 stwcx. r6,0,r6 ; Kill reservation
1c79356b 189
55e303ae
A
190 mr. r4,r4 ; Test timeout value
191 bne++ lockspin0
192 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
193 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
194 lwz r4,0(r4) ; Get the timeout value
195lockspin0:
196 mr. r8,r8 ; Is r8 set to zero
197 bne++ lockspin1 ; If yes, first spin attempt
198 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
199 mfmsr r9 ; Get the MSR value
200 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
201 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
202 andc r9,r9,r0 ; Clear FP and VEC
203 andc r7,r9,r7 ; Clear EE as well
204 mtmsr r7 ; Turn off interruptions
205 isync ; May have turned off vec and fp here
206 mftb r8 ; Get timestamp on entry
207 b lcksniff
208
209lockspin1: mtmsr r7 ; Turn off interruptions
210 mftb r8 ; Get timestamp on entry
211
212lcksniff: lwz r3,0(r5) ; Get that lock in here
91447636 213 and. r3,r3,r12 ; Is it free yet?
55e303ae 214 beq++ lckretry ; Yeah, try for it again...
1c79356b 215
55e303ae
A
216 mftb r10 ; Time stamp us now
217 sub r10,r10,r8 ; Get the elapsed time
218 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
219 blt++ lcksniff ; Not yet...
de355530 220
55e303ae 221 mtmsr r9 ; Say, any interrupts pending?
1c79356b 222
55e303ae
A
223; The following instructions force the pipeline to be interlocked to that only one
224; instruction is issued per cycle. The insures that we stay enabled for a long enough
225; time; if it's too short, pending interruptions will not have a chance to be taken
de355530 226
55e303ae
A
227 subi r4,r4,128 ; Back off elapsed time from timeout value
228 or r4,r4,r4 ; Do nothing here but force a single cycle delay
229 mr. r4,r4 ; See if we used the whole timeout
230 li r3,0 ; Assume a timeout return code
231 or r4,r4,r4 ; Do nothing here but force a single cycle delay
232
233 ble-- lckfail ; We failed
234 b lockspin1 ; Now that we've opened an enable window, keep trying...
235lckretry:
236 mtmsr r9 ; Restore interrupt state
237 li r8,1 ; Insure that R8 is not 0
238 b lcktry
239lckfail: ; We couldn't get the lock
91447636 240 bf hwtimeout,lckpanic
55e303ae
A
241 li r3,0 ; Set failure return code
242 blr ; Return, head hanging low...
91447636
A
243lckpanic:
244 mr r4,r5
245 mr r5,r3
246 lis r3,hi16(lckpanic_str) ; Get the failed lck message
247 ori r3,r3,lo16(lckpanic_str) ; Get the failed lck message
248 bl EXT(panic)
249 BREAKPOINT_TRAP ; We die here anyway
250 .data
251lckpanic_str:
252 STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
253 .text
1c79356b
A
254
255/*
91447636 256 * void hw_lock_unlock(hw_lock_t)
1c79356b 257 *
91447636
A
258 * Unconditionally release lock.
259 * Release preemption level.
1c79356b 260 */
1c79356b 261 .align 5
91447636 262 .globl EXT(hw_lock_unlock)
55e303ae 263
91447636 264LEXT(hw_lock_unlock)
1c79356b 265
91447636
A
266 .globl EXT(hwulckPatch_isync)
267LEXT(hwulckPatch_isync)
268 isync
269 .globl EXT(hwulckPatch_eieio)
270LEXT(hwulckPatch_eieio)
271 eieio
272 li r0, 0 ; set lock to free
273 stw r0, 0(r3)
1c79356b 274
91447636 275 b epStart ; Go enable preemption...
1c79356b
A
276
277/*
55e303ae 278 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
1c79356b 279 *
55e303ae
A
280 * Release bit based spin-lock. The second parameter is the bit mask to clear.
281 * Multiple bits may be cleared.
1c79356b 282 *
1c79356b 283 */
1c79356b
A
284 .align 5
285 .globl EXT(hw_unlock_bit)
286
287LEXT(hw_unlock_bit)
288
55e303ae
A
289 .globl EXT(hwulckbPatch_isync)
290LEXT(hwulckbPatch_isync)
291 isync
292 .globl EXT(hwulckbPatch_eieio)
293LEXT(hwulckbPatch_eieio)
294 eieio
295ubittry: lwarx r0,0,r3 ; Grab the lock value
296 andc r0,r0,r4 ; Clear the lock bits
297 stwcx. r0,0,r3 ; Try to clear that there durn lock
298 bne- ubittry ; Try again, couldn't save it...
1c79356b 299
91447636 300 b epStart ; Go enable preemption...
1c79356b
A
301
302/*
55e303ae 303 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
1c79356b
A
304 * unsigned int newb, unsigned int timeout)
305 *
55e303ae
A
306 * Try to acquire spin-lock. The second parameter is the bit mask to check.
307 * The third is the value of those bits and the 4th is what to set them to.
308 * Return success (1) or failure (0).
309 * Attempt will fail after timeout ticks of the timebase.
310 * We try fairly hard to get this lock. We disable for interruptions, but
311 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
312 * After checking to see if the large timeout value (passed in) has expired and a
313 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
314 * we return either in abject failure, or disable and go back to the lock sniff routine.
315 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
1c79356b 316 */
1c79356b 317 .align 5
1c79356b
A
318 .globl EXT(hw_lock_mbits)
319
320LEXT(hw_lock_mbits)
321
55e303ae 322 li r10,0
1c79356b 323
55e303ae
A
324mbittry: lwarx r12,0,r3 ; Grab the lock value
325 and r0,r12,r4 ; Clear extra bits
326 andc r12,r12,r4 ; Clear all bits in the bit mask
327 or r12,r12,r6 ; Turn on the lock bits
328 cmplw r0,r5 ; Are these the right bits?
329 bne-- mbitspin ; Nope, wait for it to clear...
330 stwcx. r12,0,r3 ; Try to seize that there durn lock
331 beq++ mbitgot ; We got it, yahoo...
332 b mbittry ; Just start up again if the store failed...
1c79356b
A
333
334 .align 5
55e303ae
A
335mbitspin: li r11,lgKillResv ; Point to killing field
336 stwcx. r11,0,r11 ; Kill it
1c79356b 337
55e303ae
A
338 mr. r10,r10 ; Is r10 set to zero
339 bne++ mbitspin0 ; If yes, first spin attempt
340 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
341 mfmsr r9 ; Get the MSR value
342 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
343 ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
344 andc r9,r9,r0 ; Clear FP and VEC
345 andc r8,r9,r8 ; Clear EE as well
346 mtmsr r8 ; Turn off interruptions
347 isync ; May have turned off vectors or float here
348 mftb r10 ; Get the low part of the time base
349 b mbitsniff
350mbitspin0:
351 mtmsr r8 ; Turn off interruptions
352 mftb r10 ; Get the low part of the time base
353mbitsniff:
354 lwz r12,0(r3) ; Get that lock in here
355 and r0,r12,r4 ; Clear extra bits
356 cmplw r0,r5 ; Are these the right bits?
357 beq++ mbitretry ; Yeah, try for it again...
1c79356b 358
55e303ae
A
359 mftb r11 ; Time stamp us now
360 sub r11,r11,r10 ; Get the elapsed time
361 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
362 blt++ mbitsniff ; Not yet...
1c79356b 363
55e303ae 364 mtmsr r9 ; Say, any interrupts pending?
1c79356b
A
365
366; The following instructions force the pipeline to be interlocked to that only one
367; instruction is issued per cycle. The insures that we stay enabled for a long enough
368; time. If it is too short, pending interruptions will not have a chance to be taken
369
55e303ae
A
370 subi r7,r7,128 ; Back off elapsed time from timeout value
371 or r7,r7,r7 ; Do nothing here but force a single cycle delay
372 mr. r7,r7 ; See if we used the whole timeout
373 or r7,r7,r7 ; Do nothing here but force a single cycle delay
1c79356b 374
55e303ae
A
375 ble-- mbitfail ; We failed
376 b mbitspin0 ; Now that we have opened an enable window, keep trying...
377mbitretry:
378 mtmsr r9 ; Enable for interruptions
379 li r10,1 ; Make sure this is non-zero
380 b mbittry
1c79356b
A
381
382 .align 5
55e303ae
A
383mbitgot:
384 li r3,1 ; Set good return code
91447636
A
385 .globl EXT(hwlmlckPatch_isync)
386LEXT(hwlmlckPatch_isync)
55e303ae 387 isync ; Make sure we do not use a speculativily loaded value
1c79356b
A
388 blr
389
55e303ae
A
390mbitfail: li r3,0 ; Set failure return code
391 blr ; Return, head hanging low...
1c79356b
A
392
393/*
394 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
395 *
55e303ae
A
396 * Spin until word hits 0 or timeout.
397 * Return success (1) or failure (0).
398 * Attempt will fail after timeout ticks of the timebase.
de355530 399 *
55e303ae
A
400 * The theory is that a processor will bump a counter as it signals
401 * other processors. Then it will spin untl the counter hits 0 (or
402 * times out). The other processors, as it receives the signal will
403 * decrement the counter.
1c79356b 404 *
55e303ae
A
405 * The other processors use interlocked update to decrement, this one
406 * does not need to interlock.
1c79356b 407 */
1c79356b 408 .align 5
1c79356b
A
409 .globl EXT(hw_cpu_sync)
410
411LEXT(hw_cpu_sync)
412
55e303ae
A
413 mftb r10 ; Get the low part of the time base
414 mr r9,r3 ; Save the sync word address
415 li r3,1 ; Assume we work
1c79356b 416
55e303ae
A
417csynctry: lwz r11,0(r9) ; Grab the sync value
418 mr. r11,r11 ; Counter hit 0?
419 beqlr- ; Yeah, we are sunk...
420 mftb r12 ; Time stamp us now
1c79356b 421
55e303ae
A
422 sub r12,r12,r10 ; Get the elapsed time
423 cmplw r4,r12 ; Have we gone too long?
424 bge+ csynctry ; Not yet...
1c79356b 425
55e303ae
A
426 li r3,0 ; Set failure...
427 blr ; Return, head hanging low...
1c79356b
A
428
429/*
430 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
431 *
55e303ae
A
432 * Spin until word changes or timeout.
433 * Return success (1) or failure (0).
434 * Attempt will fail after timeout ticks of the timebase.
1c79356b 435 *
55e303ae
A
436 * This is used to insure that a processor passes a certain point.
437 * An example of use is to monitor the last interrupt time in the
438 * per_proc block. This can be used to insure that the other processor
439 * has seen at least one interrupt since a specific time.
1c79356b 440 */
1c79356b 441 .align 5
0b4e3aa0 442 .globl EXT(hw_cpu_wcng)
1c79356b
A
443
444LEXT(hw_cpu_wcng)
445
55e303ae
A
446 mftb r10 ; Get the low part of the time base
447 mr r9,r3 ; Save the sync word address
448 li r3,1 ; Assume we work
1c79356b 449
55e303ae
A
450wcngtry: lwz r11,0(r9) ; Grab the value
451 cmplw r11,r4 ; Do they still match?
452 bnelr- ; Nope, cool...
453 mftb r12 ; Time stamp us now
1c79356b 454
55e303ae
A
455 sub r12,r12,r10 ; Get the elapsed time
456 cmplw r5,r12 ; Have we gone too long?
457 bge+ wcngtry ; Not yet...
1c79356b 458
55e303ae
A
459 li r3,0 ; Set failure...
460 blr ; Return, head hanging low...
1c79356b
A
461
462
463/*
55e303ae 464 * unsigned int hw_lock_try(hw_lock_t)
1c79356b 465 *
55e303ae
A
466 * Try to acquire spin-lock. Return success (1) or failure (0)
467 * Returns with preemption disabled on success.
1c79356b
A
468 *
469 */
470 .align 5
471 .globl EXT(hw_lock_try)
472
473LEXT(hw_lock_try)
474
55e303ae
A
475 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
476 mfmsr r9 ; Get the MSR value
477 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
478 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
479 andc r9,r9,r0 ; Clear FP and VEC
480 andc r7,r9,r7 ; Clear EE as well
1c79356b 481
55e303ae 482 mtmsr r7 ; Disable interruptions and thus, preemption
1c79356b 483
55e303ae
A
484 lwz r5,0(r3) ; Quick load
485 andi. r6,r5,ILK_LOCKED ; TEST...
486 bne-- .L_lock_try_failed ; No go...
1c79356b 487
55e303ae
A
488.L_lock_try_loop:
489 lwarx r5,0,r3 ; Ld from addr of arg and reserve
1c79356b 490
55e303ae 491 andi. r6,r5,ILK_LOCKED ; TEST...
0b4e3aa0 492 ori r5,r5,ILK_LOCKED
55e303ae 493 bne-- .L_lock_try_failedX ; branch if taken. Predict free
1c79356b 494
55e303ae
A
495 stwcx. r5,0,r3 ; And SET (if still reserved)
496 bne-- .L_lock_try_loop ; If set failed, loop back
1c79356b 497
91447636
A
498 .globl EXT(hwltlckPatch_isync)
499LEXT(hwltlckPatch_isync)
1c79356b
A
500 isync
501
55e303ae
A
502 mfsprg r6,1 ; Get current activation
503 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
504 addi r5,r5,1 ; Bring up the disable count
505 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1c79356b 506
55e303ae
A
507 mtmsr r9 ; Allow interruptions now
508 li r3,1 ; Set that the lock was free
1c79356b
A
509 blr
510
55e303ae
A
511.L_lock_try_failedX:
512 li r6,lgKillResv ; Killing field
513 stwcx. r6,0,r6 ; Kill reservation
514
1c79356b 515.L_lock_try_failed:
55e303ae
A
516 mtmsr r9 ; Allow interruptions now
517 li r3,0 ; FAILURE - lock was taken
1c79356b
A
518 blr
519
520/*
55e303ae 521 * unsigned int hw_lock_held(hw_lock_t)
1c79356b 522 *
55e303ae
A
523 * Return 1 if lock is held
524 * Doesn't change preemption state.
525 * N.B. Racy, of course.
1c79356b
A
526 */
527 .align 5
528 .globl EXT(hw_lock_held)
529
530LEXT(hw_lock_held)
531
55e303ae
A
532 isync ; Make sure we don't use a speculativily fetched lock
533 lwz r3, 0(r3) ; Get lock value
534 andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit
1c79356b
A
535 blr
536
537/*
9bccf70c 538 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
1c79356b 539 *
55e303ae
A
540 * Compare old to area if equal, store new, and return true
541 * else return false and no store
542 * This is an atomic operation
1c79356b
A
543 */
544 .align 5
545 .globl EXT(hw_compare_and_store)
546
547LEXT(hw_compare_and_store)
548
55e303ae 549 mr r6,r3 ; Save the old value
1c79356b 550
55e303ae
A
551cstry: lwarx r9,0,r5 ; Grab the area value
552 li r3,1 ; Assume it works
553 cmplw cr0,r9,r6 ; Does it match the old value?
554 bne-- csfail ; No, it must have changed...
555 stwcx. r4,0,r5 ; Try to save the new value
556 bne-- cstry ; Didn't get it, try again...
91447636
A
557 .globl EXT(hwcsatomicPatch_isync)
558LEXT(hwcsatomicPatch_isync)
55e303ae
A
559 isync ; Just hold up prefetch
560 blr ; Return...
1c79356b 561
55e303ae
A
562csfail: li r3,lgKillResv ; Killing field
563 stwcx. r3,0,r3 ; Blow reservation
564
565 li r3,0 ; Set failure
566 blr ; Better luck next time...
1c79356b
A
567
568
569/*
9bccf70c 570 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
1c79356b 571 *
55e303ae
A
572 * Atomically add the second parameter to the first.
573 * Returns the result.
1c79356b
A
574 *
575 */
576 .align 5
577 .globl EXT(hw_atomic_add)
578
579LEXT(hw_atomic_add)
580
55e303ae 581 mr r6,r3 ; Save the area
1c79356b 582
55e303ae
A
583addtry: lwarx r3,0,r6 ; Grab the area value
584 add r3,r3,r4 ; Add the value
585 stwcx. r3,0,r6 ; Try to save the new value
586 bne-- addtry ; Didn't get it, try again...
587 blr ; Return...
1c79356b
A
588
589
590/*
9bccf70c 591 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
1c79356b 592 *
55e303ae
A
593 * Atomically subtract the second parameter from the first.
594 * Returns the result.
1c79356b
A
595 *
596 */
597 .align 5
598 .globl EXT(hw_atomic_sub)
599
600LEXT(hw_atomic_sub)
601
55e303ae 602 mr r6,r3 ; Save the area
1c79356b 603
55e303ae
A
604subtry: lwarx r3,0,r6 ; Grab the area value
605 sub r3,r3,r4 ; Subtract the value
606 stwcx. r3,0,r6 ; Try to save the new value
607 bne-- subtry ; Didn't get it, try again...
608 blr ; Return...
1c79356b
A
609
610
0b4e3aa0 611/*
9bccf70c 612 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
0b4e3aa0 613 *
55e303ae
A
614 * Atomically ORs the second parameter into the first.
615 * Returns the result.
0b4e3aa0
A
616 */
617 .align 5
618 .globl EXT(hw_atomic_or)
0b4e3aa0 619LEXT(hw_atomic_or)
2d21ac55
A
620 .globl EXT(hw_atomic_or_noret)
621LEXT(hw_atomic_or_noret)
55e303ae 622 mr r6,r3 ; Save the area
0b4e3aa0 623
55e303ae
A
624ortry: lwarx r3,0,r6 ; Grab the area value
625 or r3,r3,r4 ; OR the value
626 stwcx. r3,0,r6 ; Try to save the new value
627 bne-- ortry ; Did not get it, try again...
628 blr ; Return...
0b4e3aa0
A
629
630
631/*
9bccf70c 632 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
0b4e3aa0 633 *
55e303ae
A
634 * Atomically ANDs the second parameter with the first.
635 * Returns the result.
0b4e3aa0
A
636 *
637 */
638 .align 5
639 .globl EXT(hw_atomic_and)
0b4e3aa0 640LEXT(hw_atomic_and)
2d21ac55
A
641 .globl EXT(hw_atomic_and_noret)
642LEXT(hw_atomic_and_noret)
55e303ae 643 mr r6,r3 ; Save the area
0b4e3aa0 644
55e303ae
A
645andtry: lwarx r3,0,r6 ; Grab the area value
646 and r3,r3,r4 ; AND the value
647 stwcx. r3,0,r6 ; Try to save the new value
648 bne-- andtry ; Did not get it, try again...
649 blr ; Return...
0b4e3aa0
A
650
651
1c79356b
A
652/*
653 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
654 *
55e303ae
A
655 * Atomically inserts the element at the head of the list
656 * anchor is the pointer to the first element
657 * element is the pointer to the element to insert
658 * disp is the displacement into the element to the chain pointer
1c79356b 659 *
91447636 660 * NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
1c79356b
A
661 */
662 .align 5
663 .globl EXT(hw_queue_atomic)
664
665LEXT(hw_queue_atomic)
666
55e303ae
A
667 mr r7,r4 ; Make end point the same as start
668 mr r8,r5 ; Copy the displacement also
669 b hw_queue_comm ; Join common code...
1c79356b
A
670
671/*
672 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
673 *
55e303ae
A
674 * Atomically inserts the list of elements at the head of the list
675 * anchor is the pointer to the first element
676 * first is the pointer to the first element to insert
677 * last is the pointer to the last element to insert
678 * disp is the displacement into the element to the chain pointer
1c79356b
A
679 */
680 .align 5
681 .globl EXT(hw_queue_atomic_list)
682
683LEXT(hw_queue_atomic_list)
684
55e303ae
A
685 mr r7,r5 ; Make end point the same as start
686 mr r8,r6 ; Copy the displacement also
1c79356b
A
687
688hw_queue_comm:
55e303ae
A
689 lwarx r9,0,r3 ; Pick up the anchor
690 stwx r9,r8,r7 ; Chain that to the end of the new stuff
691 eieio ; Make sure this store makes it before the anchor update
692 stwcx. r4,0,r3 ; Try to chain into the front
693 bne-- hw_queue_comm ; Didn't make it, try again...
694
695 blr ; Return...
1c79356b
A
696
697/*
698 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
699 *
55e303ae
A
700 * Atomically removes the first element in a list and returns it.
701 * anchor is the pointer to the first element
702 * disp is the displacement into the element to the chain pointer
703 * Returns element if found, 0 if empty.
91447636
A
704 *
705 * NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
1c79356b
A
706 */
707 .align 5
708 .globl EXT(hw_dequeue_atomic)
709
710LEXT(hw_dequeue_atomic)
711
55e303ae 712 mr r5,r3 ; Save the anchor
1c79356b
A
713
714hw_dequeue_comm:
55e303ae
A
715 lwarx r3,0,r5 ; Pick up the anchor
716 mr. r3,r3 ; Is the list empty?
717 beq-- hdcFail ; Leave it list empty...
718 lwzx r9,r4,r3 ; Get the next in line
719 stwcx. r9,0,r5 ; Try to chain into the front
720 beqlr++ ; Got the thing, go away with it...
721 b hw_dequeue_comm ; Did not make it, try again...
722
723hdcFail: li r4,lgKillResv ; Killing field
724 stwcx. r4,0,r4 ; Dump reservation
725 blr ; Leave...
726
1c79356b 727
91447636
A
728/*
729 * Routines for mutex lock debugging.
730 */
731
732/*
733 * Gets lock check flags in CR6: CR bits 24-27
734 */
735#define CHECK_SETUP(rg) \
736 lbz rg,lglcksWork(0) __ASMNL__ \
737 mtcrf 2,rg __ASMNL__
738
739
740/*
741 * Checks for expected lock type.
742 */
743#define CHECK_MUTEX_TYPE() \
744 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
745 bt 24+disLktypeb,1f __ASMNL__ \
746 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
747 cmpwi r10,MUTEX_TAG __ASMNL__ \
748 beq++ 1f __ASMNL__ \
749 PROLOG(0) __ASMNL__ \
750 mr r4,r11 __ASMNL__ \
751 mr r5,r10 __ASMNL__ \
752 lis r3,hi16(not_a_mutex) __ASMNL__ \
753 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
754 bl EXT(panic) __ASMNL__ \
755 BREAKPOINT_TRAP __ASMNL__ \
7561:
757
758 .data
759not_a_mutex:
760 STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
761 .text
762
763/*
764 * Verifies return to the correct thread in "unlock" situations.
765 */
766#define CHECK_THREAD(thread_offset) \
767 bf MUTEX_ATTR_DEBUGb,3f __ASMNL__ \
768 bt 24+disLkThreadb,3f __ASMNL__ \
769 mfsprg r10,1 __ASMNL__ \
770 lwz r5,MUTEX_DATA(r3) __ASMNL__ \
771 rlwinm. r9,r5,0,0,29 __ASMNL__ \
772 bne++ 1f __ASMNL__ \
773 lis r3,hi16(not_held) __ASMNL__ \
774 ori r3,r3,lo16(not_held) __ASMNL__ \
775 b 2f __ASMNL__ \
7761: __ASMNL__ \
777 cmpw r9,r10 __ASMNL__ \
778 beq++ 3f __ASMNL__ \
779 mr r5,r10 __ASMNL__ \
780 mr r6,r9 __ASMNL__ \
781 lis r3,hi16(wrong_thread) __ASMNL__ \
782 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
7832: __ASMNL__ \
784 mr r4,r11 __ASMNL__ \
785 PROLOG(0) __ASMNL__ \
786 bl EXT(panic) __ASMNL__ \
787 BREAKPOINT_TRAP __ASMNL__ \
7883:
789
790 .data
791not_held:
792 STRINGD "mutex (0x%08X) not held\n\000"
793wrong_thread:
794 STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
795 .text
796
797#define CHECK_MYLOCK() \
798 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
799 bt 24+disLkMyLckb,1f __ASMNL__ \
800 mfsprg r10,1 __ASMNL__ \
801 lwz r9,MUTEX_DATA(r3) __ASMNL__ \
802 rlwinm r9,r9,0,0,29 __ASMNL__ \
803 cmpw r9,r10 __ASMNL__ \
804 bne++ 1f __ASMNL__ \
805 mr r4,r11 __ASMNL__ \
806 lis r3, hi16(mylock_attempt) __ASMNL__ \
807 ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
808 bl EXT(panic) __ASMNL__ \
809 BREAKPOINT_TRAP __ASMNL__ \
8101:
811
812 .data
813mylock_attempt:
814 STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
815 .text
816
817#define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp) \
818 bf 24+enaLkExtStckb,3f __ASMNL__ \
819 addi lck_stack,lck,MUTEX_STACK __ASMNL__ \
820 li frame_cnt,MUTEX_FRAMES-1 __ASMNL__ \
8211: __ASMNL__ \
822 mr tmp,stack __ASMNL__ \
823 lwz stack,0(stack) __ASMNL__ \
824 xor tmp,stack,tmp __ASMNL__ \
825 cmplwi tmp,8192 __ASMNL__ \
826 bge-- 2f __ASMNL__ \
827 lwz lr_save,FM_LR_SAVE(stack) __ASMNL__ \
828 stwu lr_save,4(lck_stack) __ASMNL__ \
829 subi frame_cnt,frame_cnt,1 __ASMNL__ \
830 cmpi cr0,frame_cnt,0 __ASMNL__ \
831 bne 1b __ASMNL__ \
832 b 3f __ASMNL__ \
8332: __ASMNL__ \
834 li tmp,0 __ASMNL__ \
835 stwu tmp,4(lck_stack) __ASMNL__ \
836 subi frame_cnt,frame_cnt,1 __ASMNL__ \
837 cmpi cr0,frame_cnt,0 __ASMNL__ \
838 bne 2b __ASMNL__ \
8393:
840
1c79356b 841/*
55e303ae
A
842 * void mutex_init(mutex_t* l, etap_event_t etap)
843 *
1c79356b 844 */
55e303ae
A
845 .align 5
846 .globl EXT(mutex_init)
55e303ae 847LEXT(mutex_init)
1c79356b 848
0b4e3aa0 849 PROLOG(0)
91447636
A
850 li r10,0
851 stw r10,MUTEX_DATA(r3) ; clear lock word
852 sth r10,MUTEX_WAITERS(r3) ; init waiter count
853 sth r10,MUTEX_PROMOTED_PRI(r3)
1c79356b 854#if MACH_LDEBUG
91447636
A
855 li r11,MUTEX_ATTR_DEBUG
856 stw r10,MUTEX_STACK(r3) ; init caller pc
857 stw r10,MUTEX_THREAD(r3) ; and owning thread
858 li r9, MUTEX_TAG
859 stw r9, MUTEX_TYPE(r3) ; set lock type
860 stw r11,MUTEX_ATTR(r3)
861 addi r8,r3,MUTEX_STACK-4
862 li r9,MUTEX_FRAMES
863mlistck:
864 stwu r10,4(r8) ; init stack
865 subi r9,r9,1
866 cmpi cr0,r9,0
867 bne mlistck
1c79356b 868#endif /* MACH_LDEBUG */
0b4e3aa0
A
869 EPILOG
870 blr
1c79356b
A
871
872/*
91447636 873 * void lck_mtx_lock_ext(lck_mtx_ext_t*)
55e303ae 874 *
1c79356b 875 */
1c79356b 876 .align 5
91447636
A
877 .globl EXT(lck_mtx_lock_ext)
878LEXT(lck_mtx_lock_ext)
879#if MACH_LDEBUG
0b4e3aa0
A
880 .globl EXT(mutex_lock)
881LEXT(mutex_lock)
1c79356b 882
0b4e3aa0 883 .globl EXT(_mutex_lock)
1c79356b 884LEXT(_mutex_lock)
91447636
A
885#endif
886 mr r11,r3 ; Save lock addr
887mlckeEnter:
888 lwz r0,MUTEX_ATTR(r3)
889 mtcrf 1,r0 ; Set cr7
890 CHECK_SETUP(r12)
891 CHECK_MUTEX_TYPE()
892
893 bf MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
894 PROLOG(0)
895 bl EXT(assert_wait_possible)
896 mr. r3,r3
897 bne L_mutex_lock_assert_wait_1
898 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
899 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
900 bl EXT(panic)
901 BREAKPOINT_TRAP ; We die here anyway
902
903 .data
904L_mutex_lock_assert_wait_panic_str:
905 STRINGD "mutex lock attempt with assert_wait_possible false\n\000"
906 .text
907
908L_mutex_lock_assert_wait_1:
909 lwz r3,FM_ARG0(r1)
910 lwz r11,FM_ARG0+0x04(r1)
911 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
912 mtcr r2
913 EPILOG
914L_mutex_lock_assert_wait_2:
1c79356b 915
55e303ae 916 mfsprg r6,1 ; load the current thread
91447636
A
917 bf MUTEX_ATTR_STATb,mlckestatskip ; Branch if no stat
918 lwz r5,MUTEX_GRP(r3) ; Load lock group
919 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
920mlckestatloop:
921 lwarx r8,r7,r5 ; Load stat util cnt
922 addi r8,r8,1 ; Increment stat util cnt
923 stwcx. r8,r7,r5 ; Store stat util cnt
924 bne-- mlckestatloop ; Retry if failed
925 mr. r8,r8 ; Test for zero
926 bne++ mlckestatskip ; Did stat util cnt wrapped?
927 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
928 addi r8,r8,1 ; Increment upper stat util cnt
929 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
930mlckestatskip:
931 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
ab86ba33
A
932 li r4,0
933 li r8,0
91447636
A
934 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
935 mfmsr r9 ; Get the MSR value
936 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
937 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
938 andc r9,r9,r0 ; Clear FP and VEC
939 andc r7,r9,r7 ; Clear EE as well
940 mtmsr r7 ; Turn off interruptions
941 isync ; May have turned off vec and fp here
55e303ae 942 mr. r5,r5 ; Quick check
91447636 943 bne-- mlckespin01 ; Can not get it right now...
55e303ae 944
91447636
A
945mlcketry:
946 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
9bccf70c 947 mr. r5,r5
91447636
A
948 bne-- mlckespin0 ; Can not get it right now...
949 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
950 bne-- mlcketry ; loop back if failed
951 .globl EXT(mlckePatch_isync)
952LEXT(mlckePatch_isync)
55e303ae 953 isync ; stop prefeteching
91447636
A
954 mflr r12
955 bf MUTEX_ATTR_DEBUGb,mlckedebskip
956 mr r8,r6 ; Get the active thread
957 stw r12,MUTEX_STACK(r3) ; Save our caller
958 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
959 mr r5,r1
960 LCK_STACK(r3,r5,r6,r7,r8,r10)
961mlckedebskip:
962 mtmsr r9 ; Say, any interrupts pending?
0b4e3aa0 963 blr
55e303ae 964
91447636 965mlckespin0:
55e303ae
A
966 li r5,lgKillResv ; Killing field
967 stwcx. r5,0,r5 ; Kill reservation
91447636
A
968mlckespin01:
969 mflr r12
970 mtmsr r9 ; Say, any interrupts pending?
971 bl mlckspin1
972 mtmsr r7 ; Turn off interruptions, vec and fp off already
973 mtlr r12
974 b mlcketry
975
976/*
977 * void lck_mtx_lock(lck_mtx_t*)
978 *
979 */
980 .align 5
981 .globl EXT(lck_mtx_lock)
982LEXT(lck_mtx_lock)
983
984#if !MACH_LDEBUG
985 .globl EXT(mutex_lock)
986LEXT(mutex_lock)
987
988 .globl EXT(_mutex_lock)
989LEXT(_mutex_lock)
990#endif
991
992 mfsprg r6,1 ; load the current thread
993 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
994 mr r11,r3 ; Save lock addr
995 li r4,0
996 li r8,0
997 li r9,0
998 mr. r5,r5 ; Quick check
999 bne-- mlckspin00 ; Indirect or Can not get it right now...
1000
1001mlcktry:
1002 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
1003 mr. r5,r5
1004 bne-- mlckspin01 ; Can not get it right now...
1005 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1006 bne-- mlcktry ; loop back if failed
1007 .globl EXT(mlckPatch_isync)
1008LEXT(mlckPatch_isync)
1009 isync ; stop prefeteching
1010 blr
2d21ac55
A
1011; Need to debug making blr above a patch point and record:
1012; LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE)
91447636
A
1013
1014mlckspin00:
1015 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1016 bne-- mlckspin02 ; No, go handle contention
1017 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1018 b mlckeEnter
1019mlckspin01:
1020 li r5,lgKillResv ; Killing field
1021 stwcx. r5,0,r5 ; Kill reservation
1022mlckspin02:
1023 mflr r12
1024 li r0,0
1025 mtcrf 1,r0 ; Set cr7 to zero
1026 bl mlckspin1
1027 mtlr r12
1028 b mlcktry
1029
1030
1031mlckspin1:
1032 mr. r4,r4 ; Test timeout value
1033 bne++ mlckspin2
1034 lis r4,hi16(EXT(MutexSpin)) ; Get the high part
ab86ba33
A
1035 ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
1036 lwz r4,0(r4) ; Get spin timerout value
1037 mr. r4,r4 ; Test spin timeout value
91447636
A
1038 bne++ mlckspin2 ; Is spin timeout requested
1039 crclr mlckmiss ; Clear miss test
1040 b mlckslow1 ; Don't try to spin
ab86ba33
A
1041
1042mlckspin2: mr. r8,r8 ; Is r8 set to zero
1043 bne++ mlckspin3 ; If yes, first spin attempt
91447636
A
1044 crclr mlckmiss ; Clear miss test
1045 mr. r9,r9 ; Is r9 set to zero
1046 bne++ mlckspin3 ; If yes, r9 set with msr value
ab86ba33
A
1047 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1048 mfmsr r9 ; Get the MSR value
1049 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1050 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1051 andc r9,r9,r0 ; Clear FP and VEC
1052 andc r7,r9,r7 ; Clear EE as well
1053 mtmsr r7 ; Turn off interruptions
1054 isync ; May have turned off vec and fp here
1055 mftb r8 ; Get timestamp on entry
1056 b mlcksniff
1057
1058mlckspin3: mtmsr r7 ; Turn off interruptions
1059 mftb r8 ; Get timestamp on entry
1060
91447636 1061mlcksniff: lwz r5,MUTEX_DATA(r3) ; Get that lock in here
ab86ba33
A
1062 mr. r5,r5 ; Is the lock held
1063 beq++ mlckretry ; No, try for it again...
91447636 1064 rlwinm. r10,r5,0,0,29 ; Extract the lock owner
ab86ba33 1065 beq++ mlckslow0 ; InterLock is held
91447636
A
1066 bf MUTEX_ATTR_STATb,mlStatSkip ; Branch if no stat
1067 andi. r5,r5,ILK_LOCKED ; extract interlocked?
1068 bne mlStatSkip ; yes, skip
1069 bt mlckmiss,mlStatSkip ; miss already counted
1070 crset mlckmiss ; Remember miss recorded
1071 lwz r5,MUTEX_GRP(r3) ; Load lock group
1072 addi r5,r5,GRP_MTX_STAT_MISS+4 ; Add stat miss offset
1073mlStatLoop:
1074 lwarx r6,0,r5 ; Load stat miss cnt
1075 addi r6,r6,1 ; Increment stat miss cnt
1076 stwcx. r6,0,r5 ; Update stat miss cnt
1077 bne-- mlStatLoop ; Retry if failed
1078 mfsprg r6,1 ; Reload current thread
1079mlStatSkip:
1080 lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
1081 rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
ab86ba33 1082 beq mlckslow0 ; Lock owner isn't running
2d21ac55
A
1083 lis r2,hi16(TH_IDLE) ; Get thread idle state
1084 ori r2,r2,lo16(TH_IDLE) ; Get thread idle state
1085 lwz r10,THREAD_STATE(r10) ; Get the thread state
1086 and. r10,r10,r2 ; Is idle set?
1087 bne mlckslow0 ; Lock owner is idling
ab86ba33
A
1088
1089 mftb r10 ; Time stamp us now
1090 sub r10,r10,r8 ; Get the elapsed time
1091 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1092 blt++ mlcksniff ; Not yet...
1093
1094 mtmsr r9 ; Say, any interrupts pending?
1095
1096; The following instructions force the pipeline to be interlocked to that only one
1097; instruction is issued per cycle. The insures that we stay enabled for a long enough
1098; time; if it's too short, pending interruptions will not have a chance to be taken
1099
1100 subi r4,r4,128 ; Back off elapsed time from timeout value
1101 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1102 mr. r4,r4 ; See if we used the whole timeout
1103 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1104
1105 ble-- mlckslow1 ; We failed
91447636 1106 b mlckspin3 ; Now that we've opened an enable window, keep trying...
ab86ba33
A
1107mlckretry:
1108 mtmsr r9 ; Restore interrupt state
1109 li r8,1 ; Show already through once
91447636
A
1110 blr
1111
ab86ba33
A
1112mlckslow0: ; We couldn't get the lock
1113 mtmsr r9 ; Restore interrupt state
55e303ae 1114
ab86ba33 1115mlckslow1:
91447636 1116 mtlr r12
1c79356b 1117
55e303ae 1118 PROLOG(0)
1c79356b 1119.L_ml_retry:
55e303ae
A
1120 bl lockDisa ; Go get a lock on the mutex's interlock lock
1121 mr. r4,r3 ; Did we get it?
1122 lwz r3,FM_ARG0(r1) ; Restore the lock address
3a60a9f5 1123 bne++ mlGotInt ; We got it just fine...
91447636 1124 mr r4,r11 ; Saved lock addr
55e303ae
A
1125 lis r3,hi16(mutex_failed1) ; Get the failed mutex message
1126 ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
55e303ae
A
1127 bl EXT(panic) ; Call panic
1128 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1c79356b
A
1129
1130 .data
1131mutex_failed1:
91447636 1132 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
1c79356b
A
1133 .text
1134
1135mlGotInt:
1136
55e303ae
A
1137; Note that there is no reason to do a load and reserve here. We already
1138; hold the interlock lock and no one can touch this field unless they
1139; have that, so, we're free to play
1140
91447636 1141 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
55e303ae
A
1142 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1143 bne- mlInUse ; Nope, sombody's playing already...
1c79356b 1144
91447636
A
1145 bf++ MUTEX_ATTR_DEBUGb,mlDebSkip
1146 CHECK_SETUP(r5)
55e303ae
A
1147 mfsprg r9,1 ; Get the current activation
1148 lwz r5,0(r1) ; Get previous save frame
91447636
A
1149 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1150 mr r8,r9 ; Get the active thread
1151 stw r6,MUTEX_STACK(r3) ; Save our caller
55e303ae 1152 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
91447636
A
1153 LCK_STACK(r3,r5,r6,r7,r8,r10)
1154mlDebSkip:
1155 mr r3,r11 ; Get the based lock address
1156 bl EXT(lck_mtx_lock_acquire)
1157 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
9bccf70c 1158 mfsprg r5,1
91447636 1159 mtcr r2
9bccf70c 1160 mr. r4,r3
91447636
A
1161 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1162 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
9bccf70c
A
1163 beq mlUnlock
1164 ori r5,r5,WAIT_FLAG
1c79356b 1165
55e303ae 1166mlUnlock: eieio
91447636 1167 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1c79356b 1168
55e303ae
A
1169 EPILOG ; Restore all saved registers
1170 b epStart ; Go enable preemption...
de355530 1171
55e303ae
A
1172; We come to here when we have a resource conflict. In other words,
1173; the mutex is held.
1c79356b
A
1174
1175mlInUse:
1176
de355530 1177 CHECK_SETUP(r12)
91447636 1178 CHECK_MYLOCK() ; Assert we don't own the lock already */
d7e50217 1179
55e303ae
A
1180; Note that we come in here with the interlock set. The wait routine
1181; will unlock it before waiting.
1182
91447636
A
1183 bf MUTEX_ATTR_STATb,mlStatSkip2 ; Branch if no stat
1184 lwz r5,MUTEX_GRP(r3) ; Load lck group
1185 bt mlckmiss,mlStatSkip1 ; Skip miss already counted
1186 crset mlckmiss ; Remember miss recorded
1187 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1188mlStatLoop1:
1189 lwarx r8,r9,r5 ; Load stat miss cnt
1190 addi r8,r8,1 ; Increment stat miss cnt
1191 stwcx. r8,r9,r5 ; Store stat miss cnt
1192 bne-- mlStatLoop1 ; Retry if failed
1193mlStatSkip1:
1194 lwz r9,GRP_MTX_STAT_WAIT+4(r5) ; Load wait cnt
1195 addi r9,r9,1 ; Increment wait cnt
1196 stw r9,GRP_MTX_STAT_WAIT+4(r5) ; Update miss cnt
1197mlStatSkip2:
55e303ae 1198 ori r4,r4,WAIT_FLAG ; Set the wait flag
91447636 1199 stw r4,MUTEX_DATA(r3)
55e303ae 1200 rlwinm r4,r4,0,0,29 ; Extract the lock owner
91447636
A
1201 mfcr r2
1202 stw r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1203 mr r3,r11 ; Get the based lock address
1204 bl EXT(lck_mtx_lock_wait) ; Wait for our turn at the lock
1c79356b 1205
55e303ae 1206 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
91447636
A
1207 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1208 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1209 mtcr r2
55e303ae 1210 b .L_ml_retry ; and try again...
1c79356b
A
1211
1212
1213/*
91447636 1214 * void lck_mtx_try_lock(_extlck_mtx_ext_t*)
1c79356b
A
1215 *
1216 */
1c79356b 1217 .align 5
91447636
A
1218 .globl EXT(lck_mtx_try_lock_ext)
1219LEXT(lck_mtx_try_lock_ext)
1220#if MACH_LDEBUG
0b4e3aa0
A
1221 .globl EXT(mutex_try)
1222LEXT(mutex_try)
1c79356b 1223 .globl EXT(_mutex_try)
1c79356b 1224LEXT(_mutex_try)
91447636
A
1225#endif
1226 mr r11,r3 ; Save lock addr
1227mlteEnter:
1228 lwz r0,MUTEX_ATTR(r3)
1229 mtcrf 1,r0 ; Set cr7
1230 CHECK_SETUP(r12)
1231 CHECK_MUTEX_TYPE()
1232
1233 bf MUTEX_ATTR_STATb,mlteStatSkip ; Branch if no stat
1234 lwz r5,MUTEX_GRP(r3) ; Load lock group
1235 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
1236mlteStatLoop:
1237 lwarx r8,r7,r5 ; Load stat util cnt
1238 addi r8,r8,1 ; Increment stat util cnt
1239 stwcx. r8,r7,r5 ; Store stat util cnt
1240 bne-- mlteStatLoop ; Retry if failed
1241 mr. r8,r8 ; Test for zero
1242 bne++ mlteStatSkip ; Did stat util cnt wrapped?
1243 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
1244 addi r8,r8,1 ; Increment upper stat util cnt
1245 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
1246mlteStatSkip:
55e303ae 1247 mfsprg r6,1 ; load the current thread
91447636 1248 lwz r5,MUTEX_DATA(r3) ; Get the lock value
55e303ae
A
1249 mr. r5,r5 ; Quick check
1250 bne-- L_mutex_try_slow ; Can not get it now...
91447636
A
1251 mfmsr r9 ; Get the MSR value
1252 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1253 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1254 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1255 andc r9,r9,r0 ; Clear FP and VEC
1256 andc r7,r9,r7 ; Clear EE as well
1257 mtmsr r7 ; Turn off interruptions
1258 isync ; May have turned off vec and fp here
55e303ae 1259
91447636
A
1260mlteLoopTry:
1261 lwarx r5,MUTEX_DATA,r3 ; load the lock value
9bccf70c 1262 mr. r5,r5
91447636
A
1263 bne-- mlteSlowX ; branch to the slow path
1264 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1265 bne-- mlteLoopTry ; retry if failed
1266 .globl EXT(mltelckPatch_isync)
1267LEXT(mltelckPatch_isync)
55e303ae 1268 isync ; stop prefetching
91447636
A
1269 mflr r12
1270 bf MUTEX_ATTR_DEBUGb,mlteDebSkip
1271 mr r8,r6 ; Get the active thread
1272 stw r12,MUTEX_STACK(r3) ; Save our caller
1273 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1274 mr r5,r1
1275 LCK_STACK(r3,r5,r6,r7,r8,r10)
1276mlteDebSkip:
0b4e3aa0 1277 li r3, 1
91447636 1278 mtmsr r9 ; Say, any interrupts pending?
0b4e3aa0 1279 blr
91447636 1280mlteSlowX:
55e303ae
A
1281 li r5,lgKillResv ; Killing field
1282 stwcx. r5,0,r5 ; Kill reservation
91447636
A
1283 mtmsr r9 ; Say, any interrupts pending?
1284 b L_mutex_try_slow
55e303ae 1285
55e303ae 1286
91447636
A
1287/*
1288 * void lck_mtx_try_lock(lck_mtx_t*)
1289 *
1290 */
1291 .align 5
1292 .globl EXT(lck_mtx_try_lock)
1293LEXT(lck_mtx_try_lock)
1294#if !MACH_LDEBUG
1295 .globl EXT(mutex_try)
1296LEXT(mutex_try)
1297 .globl EXT(_mutex_try)
1298LEXT(_mutex_try)
0b4e3aa0 1299#endif
1c79356b 1300
91447636
A
1301 mfsprg r6,1 ; load the current thread
1302 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1303 mr r11,r3 ; Save lock addr
1304 mr. r5,r5 ; Quick check
1305 bne-- mltSlow00 ; Indirect or Can not get it now...
1306
1307mltLoopTry:
1308 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1309 mr. r5,r5
1310 bne-- mltSlow01 ; branch to the slow path
1311 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1312 bne-- mltLoopTry ; retry if failed
1313 .globl EXT(mltlckPatch_isync)
1314LEXT(mltlckPatch_isync)
1315 isync ; stop prefetching
1316 li r3, 1
1317 blr
1318
1319mltSlow00:
1320 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1321 bne-- mltSlow02 ; No, go handle contention
1322 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1323 b mlteEnter
1324mltSlow01:
1325 li r5,lgKillResv ; Killing field
1326 stwcx. r5,0,r5 ; Kill reservation
1327
1328mltSlow02:
1329 li r0,0
1330 mtcrf 1,r0 ; Set cr7 to zero
1331
1332L_mutex_try_slow:
1333 PROLOG(0)
1c79356b 1334
91447636 1335 lwz r6,MUTEX_DATA(r3) ; Quick check
55e303ae
A
1336 rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already
1337 bne- mtFail ; Someone's got it already...
1338
1339 bl lockDisa ; Go get a lock on the mutex's interlock lock
3a60a9f5 1340 mr. r4,r3 ; Did we get it?
55e303ae 1341 lwz r3,FM_ARG0(r1) ; Restore the lock address
3a60a9f5 1342 bne++ mtGotInt ; We got it just fine...
91447636 1343 mr r4,r11 ; Saved lock addr
55e303ae
A
1344 lis r3,hi16(mutex_failed2) ; Get the failed mutex message
1345 ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
55e303ae
A
1346 bl EXT(panic) ; Call panic
1347 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1c79356b
A
1348
1349 .data
1350mutex_failed2:
91447636 1351 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
1c79356b
A
1352 .text
1353
1354mtGotInt:
1355
55e303ae
A
1356; Note that there is no reason to do a load and reserve here. We already
1357; hold the interlock and no one can touch at this field unless they
1358; have that, so, we're free to play
1c79356b 1359
91447636 1360 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
55e303ae
A
1361 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1362 bne- mtInUse ; Nope, sombody's playing already...
1c79356b 1363
91447636
A
1364 bf++ MUTEX_ATTR_DEBUGb,mtDebSkip
1365 CHECK_SETUP(r5)
55e303ae
A
1366 mfsprg r9,1 ; Get the current activation
1367 lwz r5,0(r1) ; Get previous save frame
91447636
A
1368 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1369 mr r8,r9 ; Get the active thread
1370 stw r6,MUTEX_STACK(r3) ; Save our caller
55e303ae 1371 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
91447636
A
1372 LCK_STACK(r3,r5,r6,r7,r8,r10)
1373mtDebSkip:
1374 mr r3,r11 ; Get the based lock address
1375 bl EXT(lck_mtx_lock_acquire)
9bccf70c
A
1376 mfsprg r5,1
1377 mr. r4,r3
91447636
A
1378 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1379 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
9bccf70c
A
1380 beq mtUnlock
1381 ori r5,r5,WAIT_FLAG
1c79356b 1382
55e303ae 1383mtUnlock: eieio
91447636 1384 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1c79356b 1385
55e303ae 1386 bl epStart ; Go enable preemption...
0b4e3aa0 1387
1c79356b 1388 li r3, 1
55e303ae
A
1389 EPILOG ; Restore all saved registers
1390 blr ; Return...
1c79356b 1391
55e303ae
A
1392; We come to here when we have a resource conflict. In other words,
1393; the mutex is held.
1c79356b 1394
0b4e3aa0 1395mtInUse:
91447636
A
1396 bf++ MUTEX_ATTR_STATb,mtStatSkip ; Branch if no stat
1397 lwz r5,MUTEX_GRP(r3) ; Load lock group
1398 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1399mtStatLoop:
1400 lwarx r8,r9,r5 ; Load stat miss cnt
1401 addi r8,r8,1 ; Increment stat miss cnt
1402 stwcx. r8,r9,r5 ; Store stat miss cnt
1403 bne-- mtStatLoop ; Retry if failed
1404mtStatSkip:
55e303ae 1405 rlwinm r4,r4,0,0,30 ; Get the unlock value
91447636 1406 stw r4,MUTEX_DATA(r3) ; free the interlock
55e303ae 1407 bl epStart ; Go enable preemption...
1c79356b 1408
55e303ae
A
1409mtFail: li r3,0 ; Set failure code
1410 EPILOG ; Restore all saved registers
1411 blr ; Return...
1c79356b
A
1412
1413
1414/*
91447636 1415 * void mutex_unlock(mutex_t* l)
55e303ae 1416 *
1c79356b 1417 */
55e303ae 1418 .align 5
91447636
A
1419 .globl EXT(mutex_unlock)
1420LEXT(mutex_unlock)
1421
1422 sync
1423 mr r11,r3 ; Save lock addr
1424#if MACH_LDEBUG
1425 b mlueEnter1
1426#else
1427 b mluEnter1
1428#endif
55e303ae 1429
91447636
A
1430/*
1431 * void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
1432 *
1433 */
1434 .align 5
1435 .globl EXT(lck_mtx_ext_unlock)
1436LEXT(lck_mtx_ext_unlock)
1437#if MACH_LDEBUG
1438 .globl EXT(mutex_unlock_rwcmb)
55e303ae 1439LEXT(mutex_unlock_rwcmb)
91447636
A
1440#endif
1441mlueEnter:
1442 .globl EXT(mulckePatch_isync)
1443LEXT(mulckePatch_isync)
55e303ae 1444 isync
91447636
A
1445 .globl EXT(mulckePatch_eieio)
1446LEXT(mulckePatch_eieio)
55e303ae 1447 eieio
91447636
A
1448 mr r11,r3 ; Save lock addr
1449mlueEnter1:
1450 lwz r0,MUTEX_ATTR(r3)
1451 mtcrf 1,r0 ; Set cr7
1452 CHECK_SETUP(r12)
1453 CHECK_MUTEX_TYPE()
1454 CHECK_THREAD(MUTEX_THREAD)
1c79356b 1455
91447636 1456 lwz r5,MUTEX_DATA(r3) ; Get the lock
55e303ae
A
1457 rlwinm. r4,r5,0,30,31 ; Quick check
1458 bne-- L_mutex_unlock_slow ; Can not get it now...
91447636
A
1459 mfmsr r9 ; Get the MSR value
1460 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1461 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1462 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1463 andc r9,r9,r0 ; Clear FP and VEC
1464 andc r7,r9,r7 ; Clear EE as well
1465 mtmsr r7 ; Turn off interruptions
1466 isync ; May have turned off vec and fp here
55e303ae 1467
91447636
A
1468mlueLoop:
1469 lwarx r5,MUTEX_DATA,r3
55e303ae
A
1470 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1471 li r5,0 ; Clear the mutexlock
91447636
A
1472 bne-- mlueSlowX
1473 stwcx. r5,MUTEX_DATA,r3
1474 bne-- mlueLoop
1475 mtmsr r9 ; Say, any interrupts pending?
55e303ae
A
1476 blr
1477
91447636 1478mlueSlowX:
55e303ae
A
1479 li r5,lgKillResv ; Killing field
1480 stwcx. r5,0,r5 ; Dump reservation
91447636 1481 mtmsr r9 ; Say, any interrupts pending?
55e303ae
A
1482 b L_mutex_unlock_slow ; Join slow path...
1483
1484/*
91447636 1485 * void lck_mtx_unlock(lck_mtx_t* l)
55e303ae
A
1486 *
1487 */
1c79356b 1488 .align 5
91447636
A
1489 .globl EXT(lck_mtx_unlock)
1490LEXT(lck_mtx_unlock)
0b4e3aa0 1491#if !MACH_LDEBUG
91447636
A
1492 .globl EXT(mutex_unlock_rwcmb)
1493LEXT(mutex_unlock_rwcmb)
1494#endif
1495mluEnter:
1496 .globl EXT(mulckPatch_isync)
1497LEXT(mulckPatch_isync)
1498 isync
1499 .globl EXT(mulckPatch_eieio)
1500LEXT(mulckPatch_eieio)
1501 eieio
1502 mr r11,r3 ; Save lock addr
1503mluEnter1:
1504 lwz r5,MUTEX_DATA(r3) ; Get the lock
55e303ae 1505 rlwinm. r4,r5,0,30,31 ; Quick check
91447636 1506 bne-- mluSlow0 ; Indirect or Can not get it now...
55e303ae 1507
91447636
A
1508mluLoop:
1509 lwarx r5,MUTEX_DATA,r3
55e303ae
A
1510 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1511 li r5,0 ; Clear the mutexlock
91447636
A
1512 bne-- mluSlowX
1513 stwcx. r5,MUTEX_DATA,r3
1514 bne-- mluLoop
2d21ac55
A
1515#if CONFIG_DTRACE
1516/* lock released - LS_LCK_MTX_UNLOCK_RELEASE */
1517 LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
0b4e3aa0 1518 blr
91447636 1519
2d21ac55
A
1520 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE)
1521#endif
1522 blr
1523
1524
91447636
A
1525mluSlow0:
1526 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1527 bne-- L_mutex_unlock_slow ; No, go handle contention
1528 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1529 b mlueEnter1
1530mluSlowX:
55e303ae
A
1531 li r5,lgKillResv ; Killing field
1532 stwcx. r5,0,r5 ; Dump reservation
1533
55e303ae
A
1534L_mutex_unlock_slow:
1535
1c79356b
A
1536 PROLOG(0)
1537
55e303ae
A
1538 bl lockDisa ; Go get a lock on the mutex's interlock lock
1539 mr. r4,r3 ; Did we get it?
1540 lwz r3,FM_ARG0(r1) ; Restore the lock address
3a60a9f5 1541 bne++ muGotInt ; We got it just fine...
91447636 1542 mr r4,r11 ; Saved lock addr
55e303ae
A
1543 lis r3,hi16(mutex_failed3) ; Get the failed mutex message
1544 ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
55e303ae
A
1545 bl EXT(panic) ; Call panic
1546 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1c79356b
A
1547
1548 .data
1549mutex_failed3:
91447636 1550 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
1c79356b
A
1551 .text
1552
1553
1554muGotInt:
91447636 1555 lwz r4,MUTEX_DATA(r3)
55e303ae 1556 andi. r5,r4,WAIT_FLAG ; are there any waiters ?
9bccf70c 1557 rlwinm r4,r4,0,0,29
55e303ae 1558 beq+ muUnlock ; Nope, we're done...
1c79356b 1559
91447636
A
1560 mr r3,r11 ; Get the based lock address
1561 bl EXT(lck_mtx_unlock_wakeup) ; yes, wake a thread
55e303ae 1562 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
91447636
A
1563 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1564 lwz r5,MUTEX_DATA(r3) ; load the lock
1c79356b
A
1565
1566muUnlock:
55e303ae
A
1567 andi. r5,r5,WAIT_FLAG ; Get the unlock value
1568 eieio
91447636 1569 stw r5,MUTEX_DATA(r3) ; unlock the interlock and lock
55e303ae
A
1570
1571 EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
1572 b epStart ; Go enable preemption...
1c79356b
A
1573
1574/*
91447636
A
1575 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1576 *
1c79356b 1577 */
91447636
A
1578 .align 5
1579 .globl EXT(lck_mtx_assert)
1580LEXT(lck_mtx_assert)
1581 .globl EXT(_mutex_assert)
1582LEXT(_mutex_assert)
1583 mr r11,r3
1584maEnter:
1585 lwz r5,MUTEX_DATA(r3)
1586 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1587 bne-- maCheck ; No, go check the assertion
1588 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1589 b maEnter
1590maCheck:
1591 mfsprg r6,1 ; load the current thread
1592 rlwinm r5,r5,0,0,29 ; Extract the lock owner
1593 cmpwi r4,MUTEX_ASSERT_OWNED
1594 cmplw cr1,r6,r5 ; Is the lock held by current act
1595 crandc cr0_eq,cr0_eq,cr1_eq ; Check owned assertion
1596 bne-- maNext
1597 mr r4,r11
1598 lis r3,hi16(mutex_assert1) ; Get the failed mutex message
1599 ori r3,r3,lo16(mutex_assert1) ; Get the failed mutex message
1600 b maPanic ; Panic path
1601maNext:
1602 cmpwi r4,MUTEX_ASSERT_NOTOWNED ; Check not owned assertion
1603 crand cr0_eq,cr0_eq,cr1_eq ;
1604 bnelr++
1605maPanic:
55e303ae 1606 PROLOG(0)
91447636
A
1607 mr r4,r11
1608 lis r3,hi16(mutex_assert2) ; Get the failed mutex message
1609 ori r3,r3,lo16(mutex_assert2) ; Get the failed mutex message
1610 bl EXT(panic) ; Call panic
1611 BREAKPOINT_TRAP ; We die here anyway
55e303ae 1612
91447636
A
1613 .data
1614mutex_assert1:
1615 STRINGD "mutex (0x%08X) not owned\n\000"
1616mutex_assert2:
1617 STRINGD "mutex (0x%08X) owned\n\000"
1618 .text
1619
1620
55e303ae 1621/*
91447636 1622 * void lck_mtx_ilk_unlock(lck_mtx *lock)
55e303ae 1623 */
91447636
A
1624 .globl EXT(lck_mtx_ilk_unlock)
1625LEXT(lck_mtx_ilk_unlock)
1c79356b 1626
91447636 1627 lwz r10,MUTEX_DATA(r3)
0b4e3aa0 1628 rlwinm r10,r10,0,0,30
55e303ae 1629 eieio
91447636 1630 stw r10,MUTEX_DATA(r3)
1c79356b 1631
55e303ae 1632 b epStart ; Go enable preemption...
1c79356b 1633
55e303ae
A
1634/*
1635 * void _enable_preemption_no_check(void)
1c79356b 1636 *
55e303ae 1637 * This version does not check if we get preempted or not
1c79356b 1638 */
1c79356b
A
1639 .align 4
1640 .globl EXT(_enable_preemption_no_check)
1641
1642LEXT(_enable_preemption_no_check)
1c79356b 1643
55e303ae
A
1644 cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted
1645 b epCommn ; Join up with the other enable code...
1c79356b 1646
55e303ae
A
1647/*
1648 * void _enable_preemption(void)
1649 *
1650 * This version checks if we get preempted or not
1651 */
1c79356b
A
1652 .align 5
1653 .globl EXT(_enable_preemption)
1654
1655LEXT(_enable_preemption)
1656
91447636 1657; Here is where we enable preemption.
55e303ae
A
1658
1659epStart:
1660 cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted
1661
1662epCommn:
1663 mfsprg r3,1 ; Get current activation
1664 li r8,-1 ; Get a decrementer
1665 lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level
1666 add. r5,r5,r8 ; Bring down the disable count
1667 blt- epTooFar ; Yeah, we did...
1668 stw r5,ACT_PREEMPT_CNT(r3) ; Save it back
1669 crandc cr0_eq,cr0_eq,cr1_eq
1670 beq+ epCheckPreempt ; Go check if we need to be preempted...
1671 blr ; Leave...
1c79356b 1672epTooFar:
55e303ae
A
1673 mr r4,r5
1674 lis r3,hi16(epTooFarStr) ; First half of panic string
1675 ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
1676 PROLOG(0)
1677 bl EXT(panic)
1678 BREAKPOINT_TRAP ; We die here anyway
1c79356b
A
1679
1680 .data
1681epTooFarStr:
91447636 1682 STRINGD "enable_preemption: preemption_level %d\n\000"
de355530 1683
55e303ae 1684 .text
1c79356b 1685 .align 5
1c79356b 1686epCheckPreempt:
55e303ae
A
1687 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1688 mfmsr r9 ; Get the MSR value
1689 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
91447636 1690 andi. r4,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
55e303ae
A
1691 beq+ epCPno ; No preemption here...
1692 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1693 andc r9,r9,r0 ; Clear FP and VEC
1694 andc r7,r9,r7 ; Clear EE as well
1695 mtmsr r7 ; Turn off interruptions
1696 isync ; May have turned off vec and fp here
91447636
A
1697 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1698 lwz r7,PP_PENDING_AST(r3) ; Get pending AST mask
55e303ae 1699 li r5,AST_URGENT ; Get the requests we do honor
55e303ae
A
1700 lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call
1701 and. r7,r7,r5 ; Should we preempt?
1702 ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part
1703 mtmsr r9 ; Allow interrupts if we can
1704epCPno:
1705 beqlr+ ; We probably will not preempt...
1706 sc ; Do the preemption
1707 blr ; Now, go away now...
1c79356b
A
1708
1709/*
55e303ae
A
1710 * void disable_preemption(void)
1711 *
91447636 1712 * Here is where we disable preemption.
1c79356b 1713 */
1c79356b 1714 .align 5
1c79356b
A
1715 .globl EXT(_disable_preemption)
1716
1717LEXT(_disable_preemption)
1718
55e303ae
A
1719 mfsprg r6,1 ; Get the current activation
1720 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1721 addi r5,r5,1 ; Bring up the disable count
1722 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1723 blr ; Return...
1c79356b
A
1724
1725/*
55e303ae
A
1726 * int get_preemption_level(void)
1727 *
1728 * Return the current preemption level
1c79356b 1729 */
1c79356b 1730 .align 5
55e303ae 1731 .globl EXT(get_preemption_level)
de355530 1732
55e303ae
A
1733LEXT(get_preemption_level)
1734
1735 mfsprg r6,1 ; Get current activation
1736 lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1737 blr ; Return...
9bccf70c 1738
9bccf70c 1739/*
55e303ae
A
1740 * void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
1741 *
1742 * Initialize a simple lock.
9bccf70c
A
1743 */
1744 .align 5
55e303ae 1745 .globl EXT(ppc_usimple_lock_init)
9bccf70c 1746
55e303ae 1747LEXT(ppc_usimple_lock_init)
1c79356b 1748
55e303ae
A
1749 li r0, 0 ; set lock to free == 0
1750 stw r0, 0(r3) ; Initialize the lock
1751 blr
1752
1c79356b 1753/*
91447636
A
1754 * void lck_spin_lock(lck_spin_t *)
1755 * void ppc_usimple_lock(simple_lock_t *)
55e303ae 1756 *
1c79356b 1757 */
1c79356b 1758 .align 5
91447636
A
1759 .globl EXT(lck_spin_lock)
1760LEXT(lck_spin_lock)
55e303ae 1761 .globl EXT(ppc_usimple_lock)
55e303ae 1762LEXT(ppc_usimple_lock)
1c79356b 1763
55e303ae
A
1764 mfsprg r6,1 ; Get the current activation
1765 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1766 addi r5,r5,1 ; Bring up the disable count
1767 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1768 mr r5,r3 ; Get the address of the lock
1769 li r8,0 ; Set r8 to zero
1770 li r4,0 ; Set r4 to zero
1771
91447636 1772slcktry: lwarx r11,SLOCK_ILK,r5 ; Grab the lock value
55e303ae
A
1773 andi. r3,r11,ILK_LOCKED ; Is it locked?
1774 ori r11,r6,ILK_LOCKED ; Set interlock
1775 bne-- slckspin ; Yeah, wait for it to clear...
91447636 1776 stwcx. r11,SLOCK_ILK,r5 ; Try to seize that there durn lock
55e303ae 1777 bne-- slcktry ; Couldn't get it...
91447636
A
1778 .globl EXT(slckPatch_isync)
1779LEXT(slckPatch_isync)
55e303ae
A
1780 isync ; Make sure we don't use a speculativily loaded value
1781 blr ; Go on home...
1782
1783slckspin: li r11,lgKillResv ; Killing field
1784 stwcx. r11,0,r11 ; Kill reservation
1785
1786 mr. r4,r4 ; Test timeout value
1787 bne++ slockspin0
1788 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
1789 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
1790 lwz r4,0(r4) ; Get the timerout value
1791
1792slockspin0: mr. r8,r8 ; Is r8 set to zero
1793 bne++ slockspin1 ; If yes, first spin attempt
1794 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1795 mfmsr r9 ; Get the MSR value
1796 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1797 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1798 andc r9,r9,r0 ; Clear FP and VEC
1799 andc r7,r9,r7 ; Clear EE as well
1800 mtmsr r7 ; Turn off interruptions
1801 isync ; May have turned off vec and fp here
1802 mftb r8 ; Get timestamp on entry
1803 b slcksniff
1804
1805slockspin1: mtmsr r7 ; Turn off interruptions
1806 mftb r8 ; Get timestamp on entry
1807
91447636 1808slcksniff: lwz r3,SLOCK_ILK(r5) ; Get that lock in here
55e303ae
A
1809 andi. r3,r3,ILK_LOCKED ; Is it free yet?
1810 beq++ slckretry ; Yeah, try for it again...
1811
1812 mftb r10 ; Time stamp us now
1813 sub r10,r10,r8 ; Get the elapsed time
1814 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1815 blt++ slcksniff ; Not yet...
1816
1817 mtmsr r9 ; Say, any interrupts pending?
9bccf70c 1818
55e303ae
A
1819; The following instructions force the pipeline to be interlocked to that only one
1820; instruction is issued per cycle. The insures that we stay enabled for a long enough
1821; time; if it's too short, pending interruptions will not have a chance to be taken
1c79356b 1822
55e303ae
A
1823 subi r4,r4,128 ; Back off elapsed time from timeout value
1824 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1825 mr. r4,r4 ; See if we used the whole timeout
1826 li r3,0 ; Assume a timeout return code
1827 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1828
1829 ble-- slckfail ; We failed
1830 b slockspin1 ; Now that we've opened an enable window, keep trying...
1831slckretry:
1832 mtmsr r9 ; Restore interrupt state
1833 li r8,1 ; Show already through once
1834 b slcktry
1835slckfail: ; We couldn't get the lock
1836 lis r3,hi16(slckpanic_str)
1837 ori r3,r3,lo16(slckpanic_str)
1838 mr r4,r5
1839 mflr r5
1840 PROLOG(0)
1841 bl EXT(panic)
1842 BREAKPOINT_TRAP ; We die here anyway
de355530 1843
55e303ae
A
1844 .data
1845slckpanic_str:
91447636 1846 STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
55e303ae 1847 .text
1c79356b 1848
0b4e3aa0 1849/*
91447636
A
1850 * boolean_t lck_spin_try_lock(lck_spin_t *)
1851 * unsigned int ppc_usimple_lock_try(simple_lock_t *)
0b4e3aa0 1852 *
0b4e3aa0
A
1853 */
1854 .align 5
91447636
A
1855 .globl EXT(lck_spin_try_lock)
1856LEXT(lck_spin_try_lock)
55e303ae 1857 .globl EXT(ppc_usimple_lock_try)
55e303ae 1858LEXT(ppc_usimple_lock_try)
0b4e3aa0 1859
55e303ae
A
1860 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1861 mfmsr r9 ; Get the MSR value
1862 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1863 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1864 andc r9,r9,r0 ; Clear FP and VEC
1865 andc r7,r9,r7 ; Clear EE as well
1866 mtmsr r7 ; Disable interruptions and thus, preemption
1867 mfsprg r6,1 ; Get current activation
1868
91447636 1869 lwz r11,SLOCK_ILK(r3) ; Get the lock
55e303ae
A
1870 andi. r5,r11,ILK_LOCKED ; Check it...
1871 bne-- slcktryfail ; Quickly fail...
1872
1873slcktryloop:
91447636 1874 lwarx r11,SLOCK_ILK,r3 ; Ld from addr of arg and reserve
55e303ae
A
1875
1876 andi. r5,r11,ILK_LOCKED ; TEST...
1877 ori r5,r6,ILK_LOCKED
1878 bne-- slcktryfailX ; branch if taken. Predict free
1879
91447636 1880 stwcx. r5,SLOCK_ILK,r3 ; And SET (if still reserved)
55e303ae
A
1881 bne-- slcktryloop ; If set failed, loop back
1882
91447636
A
1883 .globl EXT(stlckPatch_isync)
1884LEXT(stlckPatch_isync)
55e303ae
A
1885 isync
1886
1887 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1888 addi r5,r5,1 ; Bring up the disable count
1889 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1890
1891 mtmsr r9 ; Allow interruptions now
1892 li r3,1 ; Set that the lock was free
1893 blr
1894
1895slcktryfailX:
1896 li r5,lgKillResv ; Killing field
1897 stwcx. r5,0,r5 ; Kill reservation
1898
1899slcktryfail:
1900 mtmsr r9 ; Allow interruptions now
1901 li r3,0 ; FAILURE - lock was taken
1902 blr
1903
0b4e3aa0
A
1904
1905/*
91447636
A
1906 * void lck_spin_unlock(lck_spin_t *)
1907 * void ppc_usimple_unlock_rwcmb(simple_lock_t *)
0b4e3aa0 1908 *
0b4e3aa0
A
1909 */
1910 .align 5
91447636
A
1911 .globl EXT(lck_spin_unlock)
1912LEXT(lck_spin_unlock)
55e303ae 1913 .globl EXT(ppc_usimple_unlock_rwcmb)
55e303ae 1914LEXT(ppc_usimple_unlock_rwcmb)
0b4e3aa0 1915
55e303ae
A
1916 li r0,0
1917 .globl EXT(sulckPatch_isync)
1918LEXT(sulckPatch_isync)
1919 isync
1920 .globl EXT(sulckPatch_eieio)
1921LEXT(sulckPatch_eieio)
1922 eieio
91447636 1923 stw r0, SLOCK_ILK(r3)
55e303ae
A
1924
1925 b epStart ; Go enable preemption...
0b4e3aa0
A
1926
1927/*
91447636 1928 * void ppc_usimple_unlock_rwmb(simple_lock_t *)
0b4e3aa0 1929 *
0b4e3aa0
A
1930 */
1931 .align 5
55e303ae 1932 .globl EXT(ppc_usimple_unlock_rwmb)
0b4e3aa0 1933
55e303ae 1934LEXT(ppc_usimple_unlock_rwmb)
0b4e3aa0 1935
55e303ae
A
1936 li r0,0
1937 sync
91447636 1938 stw r0, SLOCK_ILK(r3)
0b4e3aa0 1939
55e303ae 1940 b epStart ; Go enable preemption...
0b4e3aa0 1941
91447636
A
1942/*
1943 * void lck_rw_lock_exclusive(lck_rw_t*)
1944 *
1945 */
1946 .align 5
1947 .globl EXT(lck_rw_lock_exclusive)
1948LEXT(lck_rw_lock_exclusive)
1949#if !MACH_LDEBUG
1950 .globl EXT(lock_write)
1951LEXT(lock_write)
1952#endif
0c530ab8
A
1953 lis r7,0xFFFF
1954 ori r7,r7,(WANT_EXCL|WANT_UPGRADE|ILK_LOCKED)
91447636 1955rwleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
0c530ab8 1956 and. r8,r5,r7 ; Can we have it?
91447636
A
1957 ori r6,r5,WANT_EXCL ; Mark Exclusive
1958 bne-- rwlespin ; Branch if cannot be held
1959 stwcx. r6,RW_DATA,r3 ; Update lock word
1960 bne-- rwleloop
1961 .globl EXT(rwlePatch_isync)
1962LEXT(rwlePatch_isync)
1963 isync
1964 blr
1965rwlespin:
1966 li r4,lgKillResv ; Killing field
1967 stwcx. r4,0,r4 ; Kill it
1968 cmpli cr0,r5,RW_IND ; Is it a lock indirect
1969 bne-- rwlespin1 ; No, go handle contention
1970 mr r4,r3 ; pass lock pointer
1971 lwz r3,RW_PTR(r3) ; load lock ext pointer
1972 b EXT(lck_rw_lock_exclusive_ext)
1973rwlespin1:
1974 b EXT(lck_rw_lock_exclusive_gen)
1975
1976/*
1977 * void lck_rw_lock_shared(lck_rw_t*)
1978 *
1979 */
1980 .align 5
1981 .globl EXT(lck_rw_lock_shared)
1982LEXT(lck_rw_lock_shared)
1983#if !MACH_LDEBUG
1984 .globl EXT(lock_read)
1985LEXT(lock_read)
1986#endif
1987rwlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
1988 andi. r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
0c530ab8
A
1989 bne-- rwlsopt ; Branch if cannot be held
1990rwlsloopres:
91447636 1991 addis r6,r5,1 ; Increment read cnt
91447636
A
1992 stwcx. r6,RW_DATA,r3 ; Update lock word
1993 bne-- rwlsloop
1994 .globl EXT(rwlsPatch_isync)
1995LEXT(rwlsPatch_isync)
1996 isync
1997 blr
0c530ab8
A
1998rwlsopt:
1999 andi. r7,r5,PRIV_EXCL|ILK_LOCKED ; Can we have it?
2000 bne-- rwlsspin ; Branch if cannot be held
2001 lis r7,0xFFFF ; Get read cnt mask
2002 and. r8,r5,r7 ; Is it shared
2003 bne rwlsloopres ; Branch if can be held
91447636
A
2004rwlsspin:
2005 li r4,lgKillResv ; Killing field
2006 stwcx. r4,0,r4 ; Kill it
2007 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2008 bne-- rwlsspin1 ; No, go handle contention
2009 mr r4,r3 ; pass lock pointer
2010 lwz r3,RW_PTR(r3) ; load lock ext pointer
2011 b EXT(lck_rw_lock_shared_ext)
2012rwlsspin1:
2013 b EXT(lck_rw_lock_shared_gen)
2014
2015/*
2016 * boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
2017 *
2018 */
2019 .align 5
2020 .globl EXT(lck_rw_lock_shared_to_exclusive)
2021LEXT(lck_rw_lock_shared_to_exclusive)
2022#if !MACH_LDEBUG
2023 .globl EXT(lock_read_to_write)
2024LEXT(lock_read_to_write)
2025#endif
2026rwlseloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2027 addis r6,r5,0xFFFF ; Decrement read cnt
2028 lis r8,0xFFFF ; Get read count mask
2029 ori r8,r8,WANT_UPGRADE|ILK_LOCKED ; Include Interlock and upgrade flags
2030 and. r7,r6,r8 ; Can we have it?
2031 ori r9,r6,WANT_UPGRADE ; Mark Exclusive
2032 bne-- rwlsespin ; Branch if cannot be held
2033 stwcx. r9,RW_DATA,r3 ; Update lock word
2034 bne-- rwlseloop
2035 .globl EXT(rwlsePatch_isync)
2036LEXT(rwlsePatch_isync)
2037 isync
2d21ac55 2038 li r3,1 ; Succeed, return TRUE...
91447636
A
2039 blr
2040rwlsespin:
2041 li r4,lgKillResv ; Killing field
2042 stwcx. r4,0,r4 ; Kill it
2043 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2044 bne-- rwlsespin1 ; No, go handle contention
2045 mr r4,r3 ; pass lock pointer
2046 lwz r3,RW_PTR(r3) ; load lock ext pointer
2047 b EXT(lck_rw_lock_shared_to_exclusive_ext)
2048rwlsespin1:
2049 b EXT(lck_rw_lock_shared_to_exclusive_gen)
2050
2051
2052
2053/*
2054 * void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
2055 *
2056 */
2057 .align 5
2058 .globl EXT(lck_rw_lock_exclusive_to_shared)
2059LEXT(lck_rw_lock_exclusive_to_shared)
2060#if !MACH_LDEBUG
2061 .globl EXT(lock_write_to_read)
2062LEXT(lock_write_to_read)
2063#endif
2064 .globl EXT(rwlesPatch_isync)
2065LEXT(rwlesPatch_isync)
2066 isync
2067 .globl EXT(rwlesPatch_eieio)
2068LEXT(rwlesPatch_eieio)
2069 eieio
2070rwlesloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2071 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2072 bne-- rwlesspin ; Branch if interlocked
2073 lis r6,1 ; Get 1 for read count
2074 andi. r10,r5,WANT_UPGRADE ; Is it held with upgrade
2075 li r9,WANT_UPGRADE|WAIT_FLAG ; Get upgrade and wait flags mask
2076 bne rwlesexcl1 ; Skip if held with upgrade
2077 li r9,WANT_EXCL|WAIT_FLAG ; Get exclusive and wait flags mask
2078rwlesexcl1:
2079 andc r7,r5,r9 ; Marked free
2080 rlwimi r6,r7,0,16,31 ; Set shared cnt to one
2081 stwcx. r6,RW_DATA,r3 ; Update lock word
2082 bne-- rwlesloop
2083 andi. r7,r5,WAIT_FLAG ; Test wait flag
2084 beqlr++ ; Return of no waiters
2085 addi r3,r3,RW_EVENT ; Get lock event address
2086 b EXT(thread_wakeup) ; wakeup waiters
2087rwlesspin:
2088 li r4,lgKillResv ; Killing field
2089 stwcx. r4,0,r4 ; Kill it
2090 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2091 bne-- rwlesspin1 ; No, go handle contention
2092 mr r4,r3 ; pass lock pointer
2093 lwz r3,RW_PTR(r3) ; load lock ext pointer
2094 b EXT(lck_rw_lock_exclusive_to_shared_ext)
2095rwlesspin1:
2096 b EXT(lck_rw_lock_exclusive_to_shared_gen)
2097
2098
2099
2100/*
2101 * boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
2102 *
2103 */
2104 .align 5
2105 .globl EXT(lck_rw_try_lock_exclusive)
2106LEXT(lck_rw_try_lock_exclusive)
2107 lis r10,0xFFFF ; Load read count mask
2108 ori r10,r10,WANT_EXCL|WANT_UPGRADE ; Include exclusive and upgrade flags
2109rwtleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2110 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2111 bne-- rwtlespin ; Branch if interlocked
2112 and. r7,r5,r10 ; Can we have it
2113 ori r6,r5,WANT_EXCL ; Mark Exclusive
2114 bne-- rwtlefail ;
2115 stwcx. r6,RW_DATA,r3 ; Update lock word
2116 bne-- rwtleloop
2117 .globl EXT(rwtlePatch_isync)
2118LEXT(rwtlePatch_isync)
2119 isync
2120 li r3,1 ; Return TRUE
2121 blr
2122rwtlefail:
2123 li r4,lgKillResv ; Killing field
2124 stwcx. r4,0,r4 ; Kill it
2125 li r3,0 ; Return FALSE
2126 blr
2127rwtlespin:
2128 li r4,lgKillResv ; Killing field
2129 stwcx. r4,0,r4 ; Kill it
2130 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2131 bne-- rwtlespin1 ; No, go handle contention
2132 mr r4,r3 ; pass lock pointer
2133 lwz r3,RW_PTR(r3) ; load lock ext pointer
2134 b EXT(lck_rw_try_lock_exclusive_ext)
2135rwtlespin1:
2136 b EXT(lck_rw_try_lock_exclusive_gen)
2137
2138
2139/*
2140 * boolean_t lck_rw_try_lock_shared(lck_rw_t*)
2141 *
2142 */
2143 .align 5
2144 .globl EXT(lck_rw_try_lock_shared)
2145LEXT(lck_rw_try_lock_shared)
2146rwtlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2147 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2148 bne-- rwtlsspin ; Branch if interlocked
2149 andi. r7,r5,WANT_EXCL|WANT_UPGRADE ; So, can we have it?
0c530ab8
A
2150 bne-- rwtlsopt ; Branch if held exclusive
2151rwtlsloopres:
91447636 2152 addis r6,r5,1 ; Increment read cnt
91447636
A
2153 stwcx. r6,RW_DATA,r3 ; Update lock word
2154 bne-- rwtlsloop
2155 .globl EXT(rwtlsPatch_isync)
2156LEXT(rwtlsPatch_isync)
2157 isync
2158 li r3,1 ; Return TRUE
2159 blr
0c530ab8
A
2160rwtlsopt:
2161 andi. r7,r5,PRIV_EXCL ; Can we have it?
2162 bne-- rwtlsfail ; Branch if cannot be held
2163 lis r7,0xFFFF ; Get read cnt mask
2164 and. r8,r5,r7 ; Is it shared
2165 bne rwtlsloopres ; Branch if can be held
91447636
A
2166rwtlsfail:
2167 li r3,0 ; Return FALSE
2168 blr
2169rwtlsspin:
2170 li r4,lgKillResv ; Killing field
2171 stwcx. r4,0,r4 ; Kill it
2172 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2173 bne-- rwtlsspin1 ; No, go handle contention
2174 mr r4,r3 ; pass lock pointer
2175 lwz r3,RW_PTR(r3) ; load lock ext pointer
2176 b EXT(lck_rw_try_lock_shared_ext)
2177rwtlsspin1:
2178 b EXT(lck_rw_try_lock_shared_gen)
55e303ae 2179
55e303ae 2180
9bccf70c 2181
91447636
A
2182/*
2183 * lck_rw_type_t lck_rw_done(lck_rw_t*)
2184 *
2185 */
2186 .align 5
2187 .globl EXT(lck_rw_done)
2188LEXT(lck_rw_done)
2189#if !MACH_LDEBUG
2190 .globl EXT(lock_done)
2191LEXT(lock_done)
2192#endif
2193 .globl EXT(rwldPatch_isync)
2194LEXT(rwldPatch_isync)
2195 isync
2196 .globl EXT(rwldPatch_eieio)
2197LEXT(rwldPatch_eieio)
2198 eieio
2199 li r10,WAIT_FLAG ; Get wait flag
2200 lis r7,0xFFFF ; Get read cnt mask
2201 mr r12,r3 ; Save lock addr
2202rwldloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2203 andi. r8,r5,ILK_LOCKED ; Test interlock flag
2204 bne-- rwldspin ; Branch if interlocked
2205 and. r8,r5,r7 ; Is it shared
2206 cmpi cr1,r8,0 ; Is it shared
2207 beq cr1,rwldexcl ; No, check exclusive
2208 li r11,RW_SHARED ; Set return value
2209 addis r6,r5,0xFFFF ; Decrement read count
2210 and. r8,r6,r7 ; Is it still shared
2211 li r8,0 ; Assume no wakeup
2212 bne rwldshared1 ; Skip if still held shared
2213 and r8,r6,r10 ; Extract wait flag
2214 andc r6,r6,r10 ; Clear wait flag
2215rwldshared1:
2216 b rwldstore
2217rwldexcl:
2218 li r11,RW_EXCL ; Set return value
2219 li r9,WANT_UPGRADE ; Get upgrade flag
2220 and. r6,r5,r9 ; Is it held with upgrade
2221 li r9,WANT_UPGRADE|WAIT_FLAG ; Mask upgrade abd wait flags
2222 bne rwldexcl1 ; Skip if held with upgrade
2223 li r9,WANT_EXCL|WAIT_FLAG ; Mask exclusive and wait flags
2224rwldexcl1:
2225 andc r6,r5,r9 ; Marked free
2226 and r8,r5,r10 ; Null if no waiter
2227rwldstore:
2228 stwcx. r6,RW_DATA,r3 ; Update lock word
2229 bne-- rwldloop
2230 mr. r8,r8 ; wakeup needed?
2231 mr r3,r11 ; Return lock held type
2232 beqlr++
2233 mr r3,r12 ; Restore lock address
2234 PROLOG(0)
2235 addi r3,r3,RW_EVENT ; Get lock event address
2236 bl EXT(thread_wakeup) ; wakeup threads
2237 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
2238 mtcr r2
2239 EPILOG
2240 li r3,RW_SHARED ; Assume lock type shared
2241 bne cr1,rwldret ; Branch if was held exclusive
2242 li r3,RW_EXCL ; Return lock type exclusive
2243rwldret:
2244 blr
2245rwldspin:
2246 li r4,lgKillResv ; Killing field
2247 stwcx. r4,0,r4 ; Kill it
2248 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2249 bne-- rwldspin1 ; No, go handle contention
2250 mr r4,r3 ; pass lock pointer
2251 lwz r3,RW_PTR(r3) ; load lock ext pointer
2252 b EXT(lck_rw_done_ext)
2253rwldspin1:
2254 b EXT(lck_rw_done_gen)
2255
2256/*
2257 * void lck_rw_ilk_lock(lck_rw_t *lock)
2258 */
2259 .globl EXT(lck_rw_ilk_lock)
2260LEXT(lck_rw_ilk_lock)
2261 crclr hwtimeout ; no timeout option
2262 li r4,0 ; request default timeout value
2263 li r12,ILK_LOCKED ; Load bit mask
2264 b lckcomm ; Join on up...
2265
2266/*
2267 * void lck_rw_ilk_unlock(lck_rw_t *lock)
2268 */
2269 .globl EXT(lck_rw_ilk_unlock)
2270LEXT(lck_rw_ilk_unlock)
2271 li r4,1
2272 b EXT(hw_unlock_bit)