]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_lock.s
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach_assert.h>
32 #include <mach_ldebug.h>
33 #include <ppc/asm.h>
34 #include <ppc/proc_reg.h>
35 #include <assym.s>
36
37 #define STRING ascii
38
39 #define ILK_LOCKED 0x01
40 #define WAIT_FLAG 0x02
41 #define WANT_UPGRADE 0x04
42 #define WANT_EXCL 0x08
43
44 #define TH_FN_OWNED 0x01
45
46 # volatile CR bits
47 #define hwtimeout 20
48 #define mlckmiss 21
49
50 #define RW_DATA 0
51
52 #define PROLOG(space) \
53 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
54 mfcr r2 __ASMNL__ \
55 mflr r0 __ASMNL__ \
56 stw r3,FM_ARG0(r1) __ASMNL__ \
57 stw r11,FM_ARG0+0x04(r1) __ASMNL__ \
58 stw r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1) __ASMNL__ \
59 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
60
61 #define EPILOG \
62 lwz r1,0(r1) __ASMNL__ \
63 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
64 mtlr r0 __ASMNL__
65
66 /*
67 * void hw_lock_init(hw_lock_t)
68 *
69 * Initialize a hardware lock.
70 */
71 .align 5
72 .globl EXT(hw_lock_init)
73
74 LEXT(hw_lock_init)
75
76 li r0, 0 ; set lock to free == 0
77 stw r0, 0(r3) ; Initialize the lock
78 blr
79
80 /*
81 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
82 *
83 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
84 * multiple bits may be set. Return success (1) or failure (0).
85 * Attempt will fail after timeout ticks of the timebase.
86 */
87 .align 5
88 .globl EXT(hw_lock_bit)
89
90 LEXT(hw_lock_bit)
91
92 crset hwtimeout ; timeout option
93 mr r12,r4 ; Load bit mask
94 mr r4,r5 ; Load timeout value
95 b lckcomm ; Join on up...
96
97 /*
98 * void hw_lock_lock(hw_lock_t)
99 *
100 * Acquire lock, spinning until it becomes available.
101 * Return with preemption disabled.
102 * We will just set a default timeout and jump into the NORMAL timeout lock.
103 */
104 .align 5
105 .globl EXT(hw_lock_lock)
106
107 LEXT(hw_lock_lock)
108 crclr hwtimeout ; no timeout option
109 li r4,0 ; request default timeout value
110 li r12,ILK_LOCKED ; Load bit mask
111 b lckcomm ; Join on up...
112
113 lockDisa:
114 crset hwtimeout ; timeout option
115 li r4,0 ; request default timeout value
116 li r12,ILK_LOCKED ; Load bit mask
117 b lckcomm ; Join on up...
118
119 /*
120 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
121 *
122 * Try to acquire spin-lock. Return success (1) or failure (0).
123 * Attempt will fail after timeout ticks of the timebase.
124 * We try fairly hard to get this lock. We disable for interruptions, but
125 * reenable after a "short" timeout (128 ticks, we may want to change this).
126 * After checking to see if the large timeout value (passed in) has expired and a
127 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
128 * we return either in abject failure, or disable and go back to the lock sniff routine.
129 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
130 */
131 .align 5
132 .globl EXT(hw_lock_to)
133
134 LEXT(hw_lock_to)
135 crset hwtimeout ; timeout option
136 li r12,ILK_LOCKED ; Load bit mask
137 lckcomm:
138 mfsprg r6,1 ; Get the current activation
139 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
140 addi r5,r5,1 ; Bring up the disable count
141 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
142 mr r5,r3 ; Get the address of the lock
143 li r8,0 ; Set r8 to zero
144
145 lcktry: lwarx r6,0,r5 ; Grab the lock value
146 and. r3,r6,r12 ; Is it locked?
147 or r6,r6,r12 ; Set interlock
148 bne-- lckspin ; Yeah, wait for it to clear...
149 stwcx. r6,0,r5 ; Try to seize that there durn lock
150 bne-- lcktry ; Couldn't get it...
151 li r3,1 ; return true
152 .globl EXT(hwllckPatch_isync)
153 LEXT(hwllckPatch_isync)
154 isync ; Make sure we don't use a speculativily loaded value
155 blr ; Go on home...
156
157 lckspin: li r6,lgKillResv ; Get killing field
158 stwcx. r6,0,r6 ; Kill reservation
159
160 mr. r4,r4 ; Test timeout value
161 bne++ lockspin0
162 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
163 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
164 lwz r4,0(r4) ; Get the timeout value
165 lockspin0:
166 mr. r8,r8 ; Is r8 set to zero
167 bne++ lockspin1 ; If yes, first spin attempt
168 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
169 mfmsr r9 ; Get the MSR value
170 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
171 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
172 andc r9,r9,r0 ; Clear FP and VEC
173 andc r7,r9,r7 ; Clear EE as well
174 mtmsr r7 ; Turn off interruptions
175 isync ; May have turned off vec and fp here
176 mftb r8 ; Get timestamp on entry
177 b lcksniff
178
179 lockspin1: mtmsr r7 ; Turn off interruptions
180 mftb r8 ; Get timestamp on entry
181
182 lcksniff: lwz r3,0(r5) ; Get that lock in here
183 and. r3,r3,r12 ; Is it free yet?
184 beq++ lckretry ; Yeah, try for it again...
185
186 mftb r10 ; Time stamp us now
187 sub r10,r10,r8 ; Get the elapsed time
188 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
189 blt++ lcksniff ; Not yet...
190
191 mtmsr r9 ; Say, any interrupts pending?
192
193 ; The following instructions force the pipeline to be interlocked to that only one
194 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
195 ; time; if it's too short, pending interruptions will not have a chance to be taken
196
197 subi r4,r4,128 ; Back off elapsed time from timeout value
198 or r4,r4,r4 ; Do nothing here but force a single cycle delay
199 mr. r4,r4 ; See if we used the whole timeout
200 li r3,0 ; Assume a timeout return code
201 or r4,r4,r4 ; Do nothing here but force a single cycle delay
202
203 ble-- lckfail ; We failed
204 b lockspin1 ; Now that we've opened an enable window, keep trying...
205 lckretry:
206 mtmsr r9 ; Restore interrupt state
207 li r8,1 ; Insure that R8 is not 0
208 b lcktry
209 lckfail: ; We couldn't get the lock
210 bf hwtimeout,lckpanic
211 li r3,0 ; Set failure return code
212 blr ; Return, head hanging low...
213 lckpanic:
214 mr r4,r5
215 mr r5,r3
216 lis r3,hi16(lckpanic_str) ; Get the failed lck message
217 ori r3,r3,lo16(lckpanic_str) ; Get the failed lck message
218 bl EXT(panic)
219 BREAKPOINT_TRAP ; We die here anyway
220 .data
221 lckpanic_str:
222 STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
223 .text
224
225 /*
226 * void hw_lock_unlock(hw_lock_t)
227 *
228 * Unconditionally release lock.
229 * Release preemption level.
230 */
231 .align 5
232 .globl EXT(hw_lock_unlock)
233
234 LEXT(hw_lock_unlock)
235
236 .globl EXT(hwulckPatch_isync)
237 LEXT(hwulckPatch_isync)
238 isync
239 .globl EXT(hwulckPatch_eieio)
240 LEXT(hwulckPatch_eieio)
241 eieio
242 li r0, 0 ; set lock to free
243 stw r0, 0(r3)
244
245 b epStart ; Go enable preemption...
246
247 /*
248 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
249 *
250 * Release bit based spin-lock. The second parameter is the bit mask to clear.
251 * Multiple bits may be cleared.
252 *
253 */
254 .align 5
255 .globl EXT(hw_unlock_bit)
256
257 LEXT(hw_unlock_bit)
258
259 .globl EXT(hwulckbPatch_isync)
260 LEXT(hwulckbPatch_isync)
261 isync
262 .globl EXT(hwulckbPatch_eieio)
263 LEXT(hwulckbPatch_eieio)
264 eieio
265 ubittry: lwarx r0,0,r3 ; Grab the lock value
266 andc r0,r0,r4 ; Clear the lock bits
267 stwcx. r0,0,r3 ; Try to clear that there durn lock
268 bne- ubittry ; Try again, couldn't save it...
269
270 b epStart ; Go enable preemption...
271
272 /*
273 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
274 * unsigned int newb, unsigned int timeout)
275 *
276 * Try to acquire spin-lock. The second parameter is the bit mask to check.
277 * The third is the value of those bits and the 4th is what to set them to.
278 * Return success (1) or failure (0).
279 * Attempt will fail after timeout ticks of the timebase.
280 * We try fairly hard to get this lock. We disable for interruptions, but
281 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
282 * After checking to see if the large timeout value (passed in) has expired and a
283 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
284 * we return either in abject failure, or disable and go back to the lock sniff routine.
285 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
286 */
287 .align 5
288 .globl EXT(hw_lock_mbits)
289
290 LEXT(hw_lock_mbits)
291
292 li r10,0
293
294 mbittry: lwarx r12,0,r3 ; Grab the lock value
295 and r0,r12,r4 ; Clear extra bits
296 andc r12,r12,r4 ; Clear all bits in the bit mask
297 or r12,r12,r6 ; Turn on the lock bits
298 cmplw r0,r5 ; Are these the right bits?
299 bne-- mbitspin ; Nope, wait for it to clear...
300 stwcx. r12,0,r3 ; Try to seize that there durn lock
301 beq++ mbitgot ; We got it, yahoo...
302 b mbittry ; Just start up again if the store failed...
303
304 .align 5
305 mbitspin: li r11,lgKillResv ; Point to killing field
306 stwcx. r11,0,r11 ; Kill it
307
308 mr. r10,r10 ; Is r10 set to zero
309 bne++ mbitspin0 ; If yes, first spin attempt
310 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
311 mfmsr r9 ; Get the MSR value
312 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
313 ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
314 andc r9,r9,r0 ; Clear FP and VEC
315 andc r8,r9,r8 ; Clear EE as well
316 mtmsr r8 ; Turn off interruptions
317 isync ; May have turned off vectors or float here
318 mftb r10 ; Get the low part of the time base
319 b mbitsniff
320 mbitspin0:
321 mtmsr r8 ; Turn off interruptions
322 mftb r10 ; Get the low part of the time base
323 mbitsniff:
324 lwz r12,0(r3) ; Get that lock in here
325 and r0,r12,r4 ; Clear extra bits
326 cmplw r0,r5 ; Are these the right bits?
327 beq++ mbitretry ; Yeah, try for it again...
328
329 mftb r11 ; Time stamp us now
330 sub r11,r11,r10 ; Get the elapsed time
331 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
332 blt++ mbitsniff ; Not yet...
333
334 mtmsr r9 ; Say, any interrupts pending?
335
336 ; The following instructions force the pipeline to be interlocked to that only one
337 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
338 ; time. If it is too short, pending interruptions will not have a chance to be taken
339
340 subi r7,r7,128 ; Back off elapsed time from timeout value
341 or r7,r7,r7 ; Do nothing here but force a single cycle delay
342 mr. r7,r7 ; See if we used the whole timeout
343 or r7,r7,r7 ; Do nothing here but force a single cycle delay
344
345 ble-- mbitfail ; We failed
346 b mbitspin0 ; Now that we have opened an enable window, keep trying...
347 mbitretry:
348 mtmsr r9 ; Enable for interruptions
349 li r10,1 ; Make sure this is non-zero
350 b mbittry
351
352 .align 5
353 mbitgot:
354 li r3,1 ; Set good return code
355 .globl EXT(hwlmlckPatch_isync)
356 LEXT(hwlmlckPatch_isync)
357 isync ; Make sure we do not use a speculativily loaded value
358 blr
359
360 mbitfail: li r3,0 ; Set failure return code
361 blr ; Return, head hanging low...
362
363 /*
364 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
365 *
366 * Spin until word hits 0 or timeout.
367 * Return success (1) or failure (0).
368 * Attempt will fail after timeout ticks of the timebase.
369 *
370 * The theory is that a processor will bump a counter as it signals
371 * other processors. Then it will spin untl the counter hits 0 (or
372 * times out). The other processors, as it receives the signal will
373 * decrement the counter.
374 *
375 * The other processors use interlocked update to decrement, this one
376 * does not need to interlock.
377 */
378 .align 5
379 .globl EXT(hw_cpu_sync)
380
381 LEXT(hw_cpu_sync)
382
383 mftb r10 ; Get the low part of the time base
384 mr r9,r3 ; Save the sync word address
385 li r3,1 ; Assume we work
386
387 csynctry: lwz r11,0(r9) ; Grab the sync value
388 mr. r11,r11 ; Counter hit 0?
389 beqlr- ; Yeah, we are sunk...
390 mftb r12 ; Time stamp us now
391
392 sub r12,r12,r10 ; Get the elapsed time
393 cmplw r4,r12 ; Have we gone too long?
394 bge+ csynctry ; Not yet...
395
396 li r3,0 ; Set failure...
397 blr ; Return, head hanging low...
398
399 /*
400 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
401 *
402 * Spin until word changes or timeout.
403 * Return success (1) or failure (0).
404 * Attempt will fail after timeout ticks of the timebase.
405 *
406 * This is used to insure that a processor passes a certain point.
407 * An example of use is to monitor the last interrupt time in the
408 * per_proc block. This can be used to insure that the other processor
409 * has seen at least one interrupt since a specific time.
410 */
411 .align 5
412 .globl EXT(hw_cpu_wcng)
413
414 LEXT(hw_cpu_wcng)
415
416 mftb r10 ; Get the low part of the time base
417 mr r9,r3 ; Save the sync word address
418 li r3,1 ; Assume we work
419
420 wcngtry: lwz r11,0(r9) ; Grab the value
421 cmplw r11,r4 ; Do they still match?
422 bnelr- ; Nope, cool...
423 mftb r12 ; Time stamp us now
424
425 sub r12,r12,r10 ; Get the elapsed time
426 cmplw r5,r12 ; Have we gone too long?
427 bge+ wcngtry ; Not yet...
428
429 li r3,0 ; Set failure...
430 blr ; Return, head hanging low...
431
432
433 /*
434 * unsigned int hw_lock_try(hw_lock_t)
435 *
436 * Try to acquire spin-lock. Return success (1) or failure (0)
437 * Returns with preemption disabled on success.
438 *
439 */
440 .align 5
441 .globl EXT(hw_lock_try)
442
443 LEXT(hw_lock_try)
444
445 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
446 mfmsr r9 ; Get the MSR value
447 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
448 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
449 andc r9,r9,r0 ; Clear FP and VEC
450 andc r7,r9,r7 ; Clear EE as well
451
452 mtmsr r7 ; Disable interruptions and thus, preemption
453
454 lwz r5,0(r3) ; Quick load
455 andi. r6,r5,ILK_LOCKED ; TEST...
456 bne-- .L_lock_try_failed ; No go...
457
458 .L_lock_try_loop:
459 lwarx r5,0,r3 ; Ld from addr of arg and reserve
460
461 andi. r6,r5,ILK_LOCKED ; TEST...
462 ori r5,r5,ILK_LOCKED
463 bne-- .L_lock_try_failedX ; branch if taken. Predict free
464
465 stwcx. r5,0,r3 ; And SET (if still reserved)
466 bne-- .L_lock_try_loop ; If set failed, loop back
467
468 .globl EXT(hwltlckPatch_isync)
469 LEXT(hwltlckPatch_isync)
470 isync
471
472 mfsprg r6,1 ; Get current activation
473 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
474 addi r5,r5,1 ; Bring up the disable count
475 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
476
477 mtmsr r9 ; Allow interruptions now
478 li r3,1 ; Set that the lock was free
479 blr
480
481 .L_lock_try_failedX:
482 li r6,lgKillResv ; Killing field
483 stwcx. r6,0,r6 ; Kill reservation
484
485 .L_lock_try_failed:
486 mtmsr r9 ; Allow interruptions now
487 li r3,0 ; FAILURE - lock was taken
488 blr
489
490 /*
491 * unsigned int hw_lock_held(hw_lock_t)
492 *
493 * Return 1 if lock is held
494 * Doesn't change preemption state.
495 * N.B. Racy, of course.
496 */
497 .align 5
498 .globl EXT(hw_lock_held)
499
500 LEXT(hw_lock_held)
501
502 isync ; Make sure we don't use a speculativily fetched lock
503 lwz r3, 0(r3) ; Get lock value
504 andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit
505 blr
506
507 /*
508 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
509 *
510 * Compare old to area if equal, store new, and return true
511 * else return false and no store
512 * This is an atomic operation
513 */
514 .align 5
515 .globl EXT(hw_compare_and_store)
516
517 LEXT(hw_compare_and_store)
518
519 mr r6,r3 ; Save the old value
520
521 cstry: lwarx r9,0,r5 ; Grab the area value
522 li r3,1 ; Assume it works
523 cmplw cr0,r9,r6 ; Does it match the old value?
524 bne-- csfail ; No, it must have changed...
525 stwcx. r4,0,r5 ; Try to save the new value
526 bne-- cstry ; Didn't get it, try again...
527 .globl EXT(hwcsatomicPatch_isync)
528 LEXT(hwcsatomicPatch_isync)
529 isync ; Just hold up prefetch
530 blr ; Return...
531
532 csfail: li r3,lgKillResv ; Killing field
533 stwcx. r3,0,r3 ; Blow reservation
534
535 li r3,0 ; Set failure
536 blr ; Better luck next time...
537
538
539 /*
540 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
541 *
542 * Atomically add the second parameter to the first.
543 * Returns the result.
544 *
545 */
546 .align 5
547 .globl EXT(hw_atomic_add)
548
549 LEXT(hw_atomic_add)
550
551 mr r6,r3 ; Save the area
552
553 addtry: lwarx r3,0,r6 ; Grab the area value
554 add r3,r3,r4 ; Add the value
555 stwcx. r3,0,r6 ; Try to save the new value
556 bne-- addtry ; Didn't get it, try again...
557 blr ; Return...
558
559
560 /*
561 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
562 *
563 * Atomically subtract the second parameter from the first.
564 * Returns the result.
565 *
566 */
567 .align 5
568 .globl EXT(hw_atomic_sub)
569
570 LEXT(hw_atomic_sub)
571
572 mr r6,r3 ; Save the area
573
574 subtry: lwarx r3,0,r6 ; Grab the area value
575 sub r3,r3,r4 ; Subtract the value
576 stwcx. r3,0,r6 ; Try to save the new value
577 bne-- subtry ; Didn't get it, try again...
578 blr ; Return...
579
580
581 /*
582 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
583 *
584 * Atomically ORs the second parameter into the first.
585 * Returns the result.
586 */
587 .align 5
588 .globl EXT(hw_atomic_or)
589
590 LEXT(hw_atomic_or)
591
592 mr r6,r3 ; Save the area
593
594 ortry: lwarx r3,0,r6 ; Grab the area value
595 or r3,r3,r4 ; OR the value
596 stwcx. r3,0,r6 ; Try to save the new value
597 bne-- ortry ; Did not get it, try again...
598 blr ; Return...
599
600
601 /*
602 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
603 *
604 * Atomically ANDs the second parameter with the first.
605 * Returns the result.
606 *
607 */
608 .align 5
609 .globl EXT(hw_atomic_and)
610
611 LEXT(hw_atomic_and)
612
613 mr r6,r3 ; Save the area
614
615 andtry: lwarx r3,0,r6 ; Grab the area value
616 and r3,r3,r4 ; AND the value
617 stwcx. r3,0,r6 ; Try to save the new value
618 bne-- andtry ; Did not get it, try again...
619 blr ; Return...
620
621
622 /*
623 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
624 *
625 * Atomically inserts the element at the head of the list
626 * anchor is the pointer to the first element
627 * element is the pointer to the element to insert
628 * disp is the displacement into the element to the chain pointer
629 *
630 * NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
631 */
632 .align 5
633 .globl EXT(hw_queue_atomic)
634
635 LEXT(hw_queue_atomic)
636
637 mr r7,r4 ; Make end point the same as start
638 mr r8,r5 ; Copy the displacement also
639 b hw_queue_comm ; Join common code...
640
641 /*
642 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
643 *
644 * Atomically inserts the list of elements at the head of the list
645 * anchor is the pointer to the first element
646 * first is the pointer to the first element to insert
647 * last is the pointer to the last element to insert
648 * disp is the displacement into the element to the chain pointer
649 */
650 .align 5
651 .globl EXT(hw_queue_atomic_list)
652
653 LEXT(hw_queue_atomic_list)
654
655 mr r7,r5 ; Make end point the same as start
656 mr r8,r6 ; Copy the displacement also
657
658 hw_queue_comm:
659 lwarx r9,0,r3 ; Pick up the anchor
660 stwx r9,r8,r7 ; Chain that to the end of the new stuff
661 eieio ; Make sure this store makes it before the anchor update
662 stwcx. r4,0,r3 ; Try to chain into the front
663 bne-- hw_queue_comm ; Didn't make it, try again...
664
665 blr ; Return...
666
667 /*
668 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
669 *
670 * Atomically removes the first element in a list and returns it.
671 * anchor is the pointer to the first element
672 * disp is the displacement into the element to the chain pointer
673 * Returns element if found, 0 if empty.
674 *
675 * NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
676 */
677 .align 5
678 .globl EXT(hw_dequeue_atomic)
679
680 LEXT(hw_dequeue_atomic)
681
682 mr r5,r3 ; Save the anchor
683
684 hw_dequeue_comm:
685 lwarx r3,0,r5 ; Pick up the anchor
686 mr. r3,r3 ; Is the list empty?
687 beq-- hdcFail ; Leave it list empty...
688 lwzx r9,r4,r3 ; Get the next in line
689 stwcx. r9,0,r5 ; Try to chain into the front
690 beqlr++ ; Got the thing, go away with it...
691 b hw_dequeue_comm ; Did not make it, try again...
692
693 hdcFail: li r4,lgKillResv ; Killing field
694 stwcx. r4,0,r4 ; Dump reservation
695 blr ; Leave...
696
697
698 /*
699 * Routines for mutex lock debugging.
700 */
701
702 /*
703 * Gets lock check flags in CR6: CR bits 24-27
704 */
705 #define CHECK_SETUP(rg) \
706 lbz rg,lglcksWork(0) __ASMNL__ \
707 mtcrf 2,rg __ASMNL__
708
709
710 /*
711 * Checks for expected lock type.
712 */
713 #define CHECK_MUTEX_TYPE() \
714 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
715 bt 24+disLktypeb,1f __ASMNL__ \
716 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
717 cmpwi r10,MUTEX_TAG __ASMNL__ \
718 beq++ 1f __ASMNL__ \
719 PROLOG(0) __ASMNL__ \
720 mr r4,r11 __ASMNL__ \
721 mr r5,r10 __ASMNL__ \
722 lis r3,hi16(not_a_mutex) __ASMNL__ \
723 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
724 bl EXT(panic) __ASMNL__ \
725 BREAKPOINT_TRAP __ASMNL__ \
726 1:
727
728 .data
729 not_a_mutex:
730 STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
731 .text
732
733 /*
734 * Verifies return to the correct thread in "unlock" situations.
735 */
736 #define CHECK_THREAD(thread_offset) \
737 bf MUTEX_ATTR_DEBUGb,3f __ASMNL__ \
738 bt 24+disLkThreadb,3f __ASMNL__ \
739 mfsprg r10,1 __ASMNL__ \
740 lwz r5,MUTEX_DATA(r3) __ASMNL__ \
741 rlwinm. r9,r5,0,0,29 __ASMNL__ \
742 bne++ 1f __ASMNL__ \
743 lis r3,hi16(not_held) __ASMNL__ \
744 ori r3,r3,lo16(not_held) __ASMNL__ \
745 b 2f __ASMNL__ \
746 1: __ASMNL__ \
747 cmpw r9,r10 __ASMNL__ \
748 beq++ 3f __ASMNL__ \
749 mr r5,r10 __ASMNL__ \
750 mr r6,r9 __ASMNL__ \
751 lis r3,hi16(wrong_thread) __ASMNL__ \
752 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
753 2: __ASMNL__ \
754 mr r4,r11 __ASMNL__ \
755 PROLOG(0) __ASMNL__ \
756 bl EXT(panic) __ASMNL__ \
757 BREAKPOINT_TRAP __ASMNL__ \
758 3:
759
760 .data
761 not_held:
762 STRINGD "mutex (0x%08X) not held\n\000"
763 wrong_thread:
764 STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
765 .text
766
767 #define CHECK_MYLOCK() \
768 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
769 bt 24+disLkMyLckb,1f __ASMNL__ \
770 mfsprg r10,1 __ASMNL__ \
771 lwz r9,MUTEX_DATA(r3) __ASMNL__ \
772 rlwinm r9,r9,0,0,29 __ASMNL__ \
773 cmpw r9,r10 __ASMNL__ \
774 bne++ 1f __ASMNL__ \
775 mr r4,r11 __ASMNL__ \
776 lis r3, hi16(mylock_attempt) __ASMNL__ \
777 ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
778 bl EXT(panic) __ASMNL__ \
779 BREAKPOINT_TRAP __ASMNL__ \
780 1:
781
782 .data
783 mylock_attempt:
784 STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
785 .text
786
787 #define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp) \
788 bf 24+enaLkExtStckb,3f __ASMNL__ \
789 addi lck_stack,lck,MUTEX_STACK __ASMNL__ \
790 li frame_cnt,MUTEX_FRAMES-1 __ASMNL__ \
791 1: __ASMNL__ \
792 mr tmp,stack __ASMNL__ \
793 lwz stack,0(stack) __ASMNL__ \
794 xor tmp,stack,tmp __ASMNL__ \
795 cmplwi tmp,8192 __ASMNL__ \
796 bge-- 2f __ASMNL__ \
797 lwz lr_save,FM_LR_SAVE(stack) __ASMNL__ \
798 stwu lr_save,4(lck_stack) __ASMNL__ \
799 subi frame_cnt,frame_cnt,1 __ASMNL__ \
800 cmpi cr0,frame_cnt,0 __ASMNL__ \
801 bne 1b __ASMNL__ \
802 b 3f __ASMNL__ \
803 2: __ASMNL__ \
804 li tmp,0 __ASMNL__ \
805 stwu tmp,4(lck_stack) __ASMNL__ \
806 subi frame_cnt,frame_cnt,1 __ASMNL__ \
807 cmpi cr0,frame_cnt,0 __ASMNL__ \
808 bne 2b __ASMNL__ \
809 3:
810
811 /*
812 * void mutex_init(mutex_t* l, etap_event_t etap)
813 *
814 */
815 .align 5
816 .globl EXT(mutex_init)
817 LEXT(mutex_init)
818
819 PROLOG(0)
820 li r10,0
821 stw r10,MUTEX_DATA(r3) ; clear lock word
822 sth r10,MUTEX_WAITERS(r3) ; init waiter count
823 sth r10,MUTEX_PROMOTED_PRI(r3)
824 #if MACH_LDEBUG
825 li r11,MUTEX_ATTR_DEBUG
826 stw r10,MUTEX_STACK(r3) ; init caller pc
827 stw r10,MUTEX_THREAD(r3) ; and owning thread
828 li r9, MUTEX_TAG
829 stw r9, MUTEX_TYPE(r3) ; set lock type
830 stw r11,MUTEX_ATTR(r3)
831 addi r8,r3,MUTEX_STACK-4
832 li r9,MUTEX_FRAMES
833 mlistck:
834 stwu r10,4(r8) ; init stack
835 subi r9,r9,1
836 cmpi cr0,r9,0
837 bne mlistck
838 #endif /* MACH_LDEBUG */
839 EPILOG
840 blr
841
842 /*
843 * void lck_mtx_lock_ext(lck_mtx_ext_t*)
844 *
845 */
846 .align 5
847 .globl EXT(lck_mtx_lock_ext)
848 LEXT(lck_mtx_lock_ext)
849 #if MACH_LDEBUG
850 .globl EXT(mutex_lock)
851 LEXT(mutex_lock)
852
853 .globl EXT(_mutex_lock)
854 LEXT(_mutex_lock)
855 #endif
856 mr r11,r3 ; Save lock addr
857 mlckeEnter:
858 lwz r0,MUTEX_ATTR(r3)
859 mtcrf 1,r0 ; Set cr7
860 CHECK_SETUP(r12)
861 CHECK_MUTEX_TYPE()
862
863 bf MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
864 PROLOG(0)
865 bl EXT(assert_wait_possible)
866 mr. r3,r3
867 bne L_mutex_lock_assert_wait_1
868 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
869 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
870 bl EXT(panic)
871 BREAKPOINT_TRAP ; We die here anyway
872
873 .data
874 L_mutex_lock_assert_wait_panic_str:
875 STRINGD "mutex lock attempt with assert_wait_possible false\n\000"
876 .text
877
878 L_mutex_lock_assert_wait_1:
879 lwz r3,FM_ARG0(r1)
880 lwz r11,FM_ARG0+0x04(r1)
881 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
882 mtcr r2
883 EPILOG
884 L_mutex_lock_assert_wait_2:
885
886 mfsprg r6,1 ; load the current thread
887 bf MUTEX_ATTR_STATb,mlckestatskip ; Branch if no stat
888 lwz r5,MUTEX_GRP(r3) ; Load lock group
889 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
890 mlckestatloop:
891 lwarx r8,r7,r5 ; Load stat util cnt
892 addi r8,r8,1 ; Increment stat util cnt
893 stwcx. r8,r7,r5 ; Store stat util cnt
894 bne-- mlckestatloop ; Retry if failed
895 mr. r8,r8 ; Test for zero
896 bne++ mlckestatskip ; Did stat util cnt wrapped?
897 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
898 addi r8,r8,1 ; Increment upper stat util cnt
899 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
900 mlckestatskip:
901 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
902 li r4,0
903 li r8,0
904 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
905 mfmsr r9 ; Get the MSR value
906 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
907 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
908 andc r9,r9,r0 ; Clear FP and VEC
909 andc r7,r9,r7 ; Clear EE as well
910 mtmsr r7 ; Turn off interruptions
911 isync ; May have turned off vec and fp here
912 mr. r5,r5 ; Quick check
913 bne-- mlckespin01 ; Can not get it right now...
914
915 mlcketry:
916 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
917 mr. r5,r5
918 bne-- mlckespin0 ; Can not get it right now...
919 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
920 bne-- mlcketry ; loop back if failed
921 .globl EXT(mlckePatch_isync)
922 LEXT(mlckePatch_isync)
923 isync ; stop prefeteching
924 mflr r12
925 bf MUTEX_ATTR_DEBUGb,mlckedebskip
926 mr r8,r6 ; Get the active thread
927 stw r12,MUTEX_STACK(r3) ; Save our caller
928 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
929 mr r5,r1
930 LCK_STACK(r3,r5,r6,r7,r8,r10)
931 mlckedebskip:
932 mtmsr r9 ; Say, any interrupts pending?
933 blr
934
935 mlckespin0:
936 li r5,lgKillResv ; Killing field
937 stwcx. r5,0,r5 ; Kill reservation
938 mlckespin01:
939 mflr r12
940 mtmsr r9 ; Say, any interrupts pending?
941 bl mlckspin1
942 mtmsr r7 ; Turn off interruptions, vec and fp off already
943 mtlr r12
944 b mlcketry
945
946 /*
947 * void lck_mtx_lock(lck_mtx_t*)
948 *
949 */
950 .align 5
951 .globl EXT(lck_mtx_lock)
952 LEXT(lck_mtx_lock)
953
954 #if !MACH_LDEBUG
955 .globl EXT(mutex_lock)
956 LEXT(mutex_lock)
957
958 .globl EXT(_mutex_lock)
959 LEXT(_mutex_lock)
960 #endif
961
962 mfsprg r6,1 ; load the current thread
963 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
964 mr r11,r3 ; Save lock addr
965 li r4,0
966 li r8,0
967 li r9,0
968 mr. r5,r5 ; Quick check
969 bne-- mlckspin00 ; Indirect or Can not get it right now...
970
971 mlcktry:
972 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
973 mr. r5,r5
974 bne-- mlckspin01 ; Can not get it right now...
975 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
976 bne-- mlcktry ; loop back if failed
977 .globl EXT(mlckPatch_isync)
978 LEXT(mlckPatch_isync)
979 isync ; stop prefeteching
980 blr
981
982 mlckspin00:
983 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
984 bne-- mlckspin02 ; No, go handle contention
985 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
986 b mlckeEnter
987 mlckspin01:
988 li r5,lgKillResv ; Killing field
989 stwcx. r5,0,r5 ; Kill reservation
990 mlckspin02:
991 mflr r12
992 li r0,0
993 mtcrf 1,r0 ; Set cr7 to zero
994 bl mlckspin1
995 mtlr r12
996 b mlcktry
997
998
999 mlckspin1:
1000 mr. r4,r4 ; Test timeout value
1001 bne++ mlckspin2
1002 lis r4,hi16(EXT(MutexSpin)) ; Get the high part
1003 ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
1004 lwz r4,0(r4) ; Get spin timerout value
1005 mr. r4,r4 ; Test spin timeout value
1006 bne++ mlckspin2 ; Is spin timeout requested
1007 crclr mlckmiss ; Clear miss test
1008 b mlckslow1 ; Don't try to spin
1009
1010 mlckspin2: mr. r8,r8 ; Is r8 set to zero
1011 bne++ mlckspin3 ; If yes, first spin attempt
1012 crclr mlckmiss ; Clear miss test
1013 mr. r9,r9 ; Is r9 set to zero
1014 bne++ mlckspin3 ; If yes, r9 set with msr value
1015 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1016 mfmsr r9 ; Get the MSR value
1017 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1018 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1019 andc r9,r9,r0 ; Clear FP and VEC
1020 andc r7,r9,r7 ; Clear EE as well
1021 mtmsr r7 ; Turn off interruptions
1022 isync ; May have turned off vec and fp here
1023 mftb r8 ; Get timestamp on entry
1024 b mlcksniff
1025
1026 mlckspin3: mtmsr r7 ; Turn off interruptions
1027 mftb r8 ; Get timestamp on entry
1028
1029 mlcksniff: lwz r5,MUTEX_DATA(r3) ; Get that lock in here
1030 mr. r5,r5 ; Is the lock held
1031 beq++ mlckretry ; No, try for it again...
1032 rlwinm. r10,r5,0,0,29 ; Extract the lock owner
1033 beq++ mlckslow0 ; InterLock is held
1034 bf MUTEX_ATTR_STATb,mlStatSkip ; Branch if no stat
1035 andi. r5,r5,ILK_LOCKED ; extract interlocked?
1036 bne mlStatSkip ; yes, skip
1037 bt mlckmiss,mlStatSkip ; miss already counted
1038 crset mlckmiss ; Remember miss recorded
1039 lwz r5,MUTEX_GRP(r3) ; Load lock group
1040 addi r5,r5,GRP_MTX_STAT_MISS+4 ; Add stat miss offset
1041 mlStatLoop:
1042 lwarx r6,0,r5 ; Load stat miss cnt
1043 addi r6,r6,1 ; Increment stat miss cnt
1044 stwcx. r6,0,r5 ; Update stat miss cnt
1045 bne-- mlStatLoop ; Retry if failed
1046 mfsprg r6,1 ; Reload current thread
1047 mlStatSkip:
1048 lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
1049 rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
1050 beq mlckslow0 ; Lock owner isn't running
1051 lis r2,hi16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1052 ori r2,r2,lo16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1053 lwz r10,THREAD_OPTIONS(r10) ; Get the thread options
1054 and. r10,r10,r2 ; Is DelayedIdle set?
1055 bne mlckslow0 ; Lock owner is in delay idle
1056
1057 mftb r10 ; Time stamp us now
1058 sub r10,r10,r8 ; Get the elapsed time
1059 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1060 blt++ mlcksniff ; Not yet...
1061
1062 mtmsr r9 ; Say, any interrupts pending?
1063
1064 ; The following instructions force the pipeline to be interlocked to that only one
1065 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
1066 ; time; if it's too short, pending interruptions will not have a chance to be taken
1067
1068 subi r4,r4,128 ; Back off elapsed time from timeout value
1069 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1070 mr. r4,r4 ; See if we used the whole timeout
1071 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1072
1073 ble-- mlckslow1 ; We failed
1074 b mlckspin3 ; Now that we've opened an enable window, keep trying...
1075 mlckretry:
1076 mtmsr r9 ; Restore interrupt state
1077 li r8,1 ; Show already through once
1078 blr
1079
1080 mlckslow0: ; We couldn't get the lock
1081 mtmsr r9 ; Restore interrupt state
1082
1083 mlckslow1:
1084 mtlr r12
1085
1086 PROLOG(0)
1087 .L_ml_retry:
1088 bl lockDisa ; Go get a lock on the mutex's interlock lock
1089 mr. r4,r3 ; Did we get it?
1090 lwz r3,FM_ARG0(r1) ; Restore the lock address
1091 bne++ mlGotInt ; We got it just fine...
1092 mr r4,r11 ; Saved lock addr
1093 lis r3,hi16(mutex_failed1) ; Get the failed mutex message
1094 ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
1095 bl EXT(panic) ; Call panic
1096 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1097
1098 .data
1099 mutex_failed1:
1100 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
1101 .text
1102
1103 mlGotInt:
1104
1105 ; Note that there is no reason to do a load and reserve here. We already
1106 ; hold the interlock lock and no one can touch this field unless they
1107 ; have that, so, we're free to play
1108
1109 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
1110 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1111 bne- mlInUse ; Nope, sombody's playing already...
1112
1113 bf++ MUTEX_ATTR_DEBUGb,mlDebSkip
1114 CHECK_SETUP(r5)
1115 mfsprg r9,1 ; Get the current activation
1116 lwz r5,0(r1) ; Get previous save frame
1117 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1118 mr r8,r9 ; Get the active thread
1119 stw r6,MUTEX_STACK(r3) ; Save our caller
1120 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1121 LCK_STACK(r3,r5,r6,r7,r8,r10)
1122 mlDebSkip:
1123 mr r3,r11 ; Get the based lock address
1124 bl EXT(lck_mtx_lock_acquire)
1125 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1126 mfsprg r5,1
1127 mtcr r2
1128 mr. r4,r3
1129 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1130 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1131 beq mlUnlock
1132 ori r5,r5,WAIT_FLAG
1133
1134 mlUnlock: eieio
1135 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1136
1137 EPILOG ; Restore all saved registers
1138 b epStart ; Go enable preemption...
1139
1140 ; We come to here when we have a resource conflict. In other words,
1141 ; the mutex is held.
1142
1143 mlInUse:
1144
1145 CHECK_SETUP(r12)
1146 CHECK_MYLOCK() ; Assert we don't own the lock already */
1147
1148 ; Note that we come in here with the interlock set. The wait routine
1149 ; will unlock it before waiting.
1150
1151 bf MUTEX_ATTR_STATb,mlStatSkip2 ; Branch if no stat
1152 lwz r5,MUTEX_GRP(r3) ; Load lck group
1153 bt mlckmiss,mlStatSkip1 ; Skip miss already counted
1154 crset mlckmiss ; Remember miss recorded
1155 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1156 mlStatLoop1:
1157 lwarx r8,r9,r5 ; Load stat miss cnt
1158 addi r8,r8,1 ; Increment stat miss cnt
1159 stwcx. r8,r9,r5 ; Store stat miss cnt
1160 bne-- mlStatLoop1 ; Retry if failed
1161 mlStatSkip1:
1162 lwz r9,GRP_MTX_STAT_WAIT+4(r5) ; Load wait cnt
1163 addi r9,r9,1 ; Increment wait cnt
1164 stw r9,GRP_MTX_STAT_WAIT+4(r5) ; Update miss cnt
1165 mlStatSkip2:
1166 ori r4,r4,WAIT_FLAG ; Set the wait flag
1167 stw r4,MUTEX_DATA(r3)
1168 rlwinm r4,r4,0,0,29 ; Extract the lock owner
1169 mfcr r2
1170 stw r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1171 mr r3,r11 ; Get the based lock address
1172 bl EXT(lck_mtx_lock_wait) ; Wait for our turn at the lock
1173
1174 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1175 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1176 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1177 mtcr r2
1178 b .L_ml_retry ; and try again...
1179
1180
1181 /*
1182 * void lck_mtx_try_lock(_extlck_mtx_ext_t*)
1183 *
1184 */
1185 .align 5
1186 .globl EXT(lck_mtx_try_lock_ext)
1187 LEXT(lck_mtx_try_lock_ext)
1188 #if MACH_LDEBUG
1189 .globl EXT(mutex_try)
1190 LEXT(mutex_try)
1191 .globl EXT(_mutex_try)
1192 LEXT(_mutex_try)
1193 #endif
1194 mr r11,r3 ; Save lock addr
1195 mlteEnter:
1196 lwz r0,MUTEX_ATTR(r3)
1197 mtcrf 1,r0 ; Set cr7
1198 CHECK_SETUP(r12)
1199 CHECK_MUTEX_TYPE()
1200
1201 bf MUTEX_ATTR_STATb,mlteStatSkip ; Branch if no stat
1202 lwz r5,MUTEX_GRP(r3) ; Load lock group
1203 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
1204 mlteStatLoop:
1205 lwarx r8,r7,r5 ; Load stat util cnt
1206 addi r8,r8,1 ; Increment stat util cnt
1207 stwcx. r8,r7,r5 ; Store stat util cnt
1208 bne-- mlteStatLoop ; Retry if failed
1209 mr. r8,r8 ; Test for zero
1210 bne++ mlteStatSkip ; Did stat util cnt wrapped?
1211 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
1212 addi r8,r8,1 ; Increment upper stat util cnt
1213 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
1214 mlteStatSkip:
1215 mfsprg r6,1 ; load the current thread
1216 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1217 mr. r5,r5 ; Quick check
1218 bne-- L_mutex_try_slow ; Can not get it now...
1219 mfmsr r9 ; Get the MSR value
1220 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1221 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1222 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1223 andc r9,r9,r0 ; Clear FP and VEC
1224 andc r7,r9,r7 ; Clear EE as well
1225 mtmsr r7 ; Turn off interruptions
1226 isync ; May have turned off vec and fp here
1227
1228 mlteLoopTry:
1229 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1230 mr. r5,r5
1231 bne-- mlteSlowX ; branch to the slow path
1232 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1233 bne-- mlteLoopTry ; retry if failed
1234 .globl EXT(mltelckPatch_isync)
1235 LEXT(mltelckPatch_isync)
1236 isync ; stop prefetching
1237 mflr r12
1238 bf MUTEX_ATTR_DEBUGb,mlteDebSkip
1239 mr r8,r6 ; Get the active thread
1240 stw r12,MUTEX_STACK(r3) ; Save our caller
1241 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1242 mr r5,r1
1243 LCK_STACK(r3,r5,r6,r7,r8,r10)
1244 mlteDebSkip:
1245 li r3, 1
1246 mtmsr r9 ; Say, any interrupts pending?
1247 blr
1248 mlteSlowX:
1249 li r5,lgKillResv ; Killing field
1250 stwcx. r5,0,r5 ; Kill reservation
1251 mtmsr r9 ; Say, any interrupts pending?
1252 b L_mutex_try_slow
1253
1254
1255 /*
1256 * void lck_mtx_try_lock(lck_mtx_t*)
1257 *
1258 */
1259 .align 5
1260 .globl EXT(lck_mtx_try_lock)
1261 LEXT(lck_mtx_try_lock)
1262 #if !MACH_LDEBUG
1263 .globl EXT(mutex_try)
1264 LEXT(mutex_try)
1265 .globl EXT(_mutex_try)
1266 LEXT(_mutex_try)
1267 #endif
1268
1269 mfsprg r6,1 ; load the current thread
1270 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1271 mr r11,r3 ; Save lock addr
1272 mr. r5,r5 ; Quick check
1273 bne-- mltSlow00 ; Indirect or Can not get it now...
1274
1275 mltLoopTry:
1276 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1277 mr. r5,r5
1278 bne-- mltSlow01 ; branch to the slow path
1279 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1280 bne-- mltLoopTry ; retry if failed
1281 .globl EXT(mltlckPatch_isync)
1282 LEXT(mltlckPatch_isync)
1283 isync ; stop prefetching
1284 li r3, 1
1285 blr
1286
1287 mltSlow00:
1288 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1289 bne-- mltSlow02 ; No, go handle contention
1290 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1291 b mlteEnter
1292 mltSlow01:
1293 li r5,lgKillResv ; Killing field
1294 stwcx. r5,0,r5 ; Kill reservation
1295
1296 mltSlow02:
1297 li r0,0
1298 mtcrf 1,r0 ; Set cr7 to zero
1299
1300 L_mutex_try_slow:
1301 PROLOG(0)
1302
1303 lwz r6,MUTEX_DATA(r3) ; Quick check
1304 rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already
1305 bne- mtFail ; Someone's got it already...
1306
1307 bl lockDisa ; Go get a lock on the mutex's interlock lock
1308 mr. r4,r3 ; Did we get it?
1309 lwz r3,FM_ARG0(r1) ; Restore the lock address
1310 bne++ mtGotInt ; We got it just fine...
1311 mr r4,r11 ; Saved lock addr
1312 lis r3,hi16(mutex_failed2) ; Get the failed mutex message
1313 ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
1314 bl EXT(panic) ; Call panic
1315 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1316
1317 .data
1318 mutex_failed2:
1319 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
1320 .text
1321
1322 mtGotInt:
1323
1324 ; Note that there is no reason to do a load and reserve here. We already
1325 ; hold the interlock and no one can touch at this field unless they
1326 ; have that, so, we're free to play
1327
1328 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
1329 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1330 bne- mtInUse ; Nope, sombody's playing already...
1331
1332 bf++ MUTEX_ATTR_DEBUGb,mtDebSkip
1333 CHECK_SETUP(r5)
1334 mfsprg r9,1 ; Get the current activation
1335 lwz r5,0(r1) ; Get previous save frame
1336 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1337 mr r8,r9 ; Get the active thread
1338 stw r6,MUTEX_STACK(r3) ; Save our caller
1339 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1340 LCK_STACK(r3,r5,r6,r7,r8,r10)
1341 mtDebSkip:
1342 mr r3,r11 ; Get the based lock address
1343 bl EXT(lck_mtx_lock_acquire)
1344 mfsprg r5,1
1345 mr. r4,r3
1346 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1347 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1348 beq mtUnlock
1349 ori r5,r5,WAIT_FLAG
1350
1351 mtUnlock: eieio
1352 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1353
1354 bl epStart ; Go enable preemption...
1355
1356 li r3, 1
1357 EPILOG ; Restore all saved registers
1358 blr ; Return...
1359
1360 ; We come to here when we have a resource conflict. In other words,
1361 ; the mutex is held.
1362
1363 mtInUse:
1364 bf++ MUTEX_ATTR_STATb,mtStatSkip ; Branch if no stat
1365 lwz r5,MUTEX_GRP(r3) ; Load lock group
1366 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1367 mtStatLoop:
1368 lwarx r8,r9,r5 ; Load stat miss cnt
1369 addi r8,r8,1 ; Increment stat miss cnt
1370 stwcx. r8,r9,r5 ; Store stat miss cnt
1371 bne-- mtStatLoop ; Retry if failed
1372 mtStatSkip:
1373 rlwinm r4,r4,0,0,30 ; Get the unlock value
1374 stw r4,MUTEX_DATA(r3) ; free the interlock
1375 bl epStart ; Go enable preemption...
1376
1377 mtFail: li r3,0 ; Set failure code
1378 EPILOG ; Restore all saved registers
1379 blr ; Return...
1380
1381
1382 /*
1383 * void mutex_unlock(mutex_t* l)
1384 *
1385 */
1386 .align 5
1387 .globl EXT(mutex_unlock)
1388 LEXT(mutex_unlock)
1389
1390 sync
1391 mr r11,r3 ; Save lock addr
1392 #if MACH_LDEBUG
1393 b mlueEnter1
1394 #else
1395 b mluEnter1
1396 #endif
1397
1398 /*
1399 * void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
1400 *
1401 */
1402 .align 5
1403 .globl EXT(lck_mtx_ext_unlock)
1404 LEXT(lck_mtx_ext_unlock)
1405 #if MACH_LDEBUG
1406 .globl EXT(mutex_unlock_rwcmb)
1407 LEXT(mutex_unlock_rwcmb)
1408 #endif
1409 mlueEnter:
1410 .globl EXT(mulckePatch_isync)
1411 LEXT(mulckePatch_isync)
1412 isync
1413 .globl EXT(mulckePatch_eieio)
1414 LEXT(mulckePatch_eieio)
1415 eieio
1416 mr r11,r3 ; Save lock addr
1417 mlueEnter1:
1418 lwz r0,MUTEX_ATTR(r3)
1419 mtcrf 1,r0 ; Set cr7
1420 CHECK_SETUP(r12)
1421 CHECK_MUTEX_TYPE()
1422 CHECK_THREAD(MUTEX_THREAD)
1423
1424 lwz r5,MUTEX_DATA(r3) ; Get the lock
1425 rlwinm. r4,r5,0,30,31 ; Quick check
1426 bne-- L_mutex_unlock_slow ; Can not get it now...
1427 mfmsr r9 ; Get the MSR value
1428 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1429 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1430 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1431 andc r9,r9,r0 ; Clear FP and VEC
1432 andc r7,r9,r7 ; Clear EE as well
1433 mtmsr r7 ; Turn off interruptions
1434 isync ; May have turned off vec and fp here
1435
1436 mlueLoop:
1437 lwarx r5,MUTEX_DATA,r3
1438 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1439 li r5,0 ; Clear the mutexlock
1440 bne-- mlueSlowX
1441 stwcx. r5,MUTEX_DATA,r3
1442 bne-- mlueLoop
1443 mtmsr r9 ; Say, any interrupts pending?
1444 blr
1445
1446 mlueSlowX:
1447 li r5,lgKillResv ; Killing field
1448 stwcx. r5,0,r5 ; Dump reservation
1449 mtmsr r9 ; Say, any interrupts pending?
1450 b L_mutex_unlock_slow ; Join slow path...
1451
1452 /*
1453 * void lck_mtx_unlock(lck_mtx_t* l)
1454 *
1455 */
1456 .align 5
1457 .globl EXT(lck_mtx_unlock)
1458 LEXT(lck_mtx_unlock)
1459 #if !MACH_LDEBUG
1460 .globl EXT(mutex_unlock_rwcmb)
1461 LEXT(mutex_unlock_rwcmb)
1462 #endif
1463 mluEnter:
1464 .globl EXT(mulckPatch_isync)
1465 LEXT(mulckPatch_isync)
1466 isync
1467 .globl EXT(mulckPatch_eieio)
1468 LEXT(mulckPatch_eieio)
1469 eieio
1470 mr r11,r3 ; Save lock addr
1471 mluEnter1:
1472 lwz r5,MUTEX_DATA(r3) ; Get the lock
1473 rlwinm. r4,r5,0,30,31 ; Quick check
1474 bne-- mluSlow0 ; Indirect or Can not get it now...
1475
1476 mluLoop:
1477 lwarx r5,MUTEX_DATA,r3
1478 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1479 li r5,0 ; Clear the mutexlock
1480 bne-- mluSlowX
1481 stwcx. r5,MUTEX_DATA,r3
1482 bne-- mluLoop
1483 blr
1484
1485 mluSlow0:
1486 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1487 bne-- L_mutex_unlock_slow ; No, go handle contention
1488 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1489 b mlueEnter1
1490 mluSlowX:
1491 li r5,lgKillResv ; Killing field
1492 stwcx. r5,0,r5 ; Dump reservation
1493
1494 L_mutex_unlock_slow:
1495
1496 PROLOG(0)
1497
1498 bl lockDisa ; Go get a lock on the mutex's interlock lock
1499 mr. r4,r3 ; Did we get it?
1500 lwz r3,FM_ARG0(r1) ; Restore the lock address
1501 bne++ muGotInt ; We got it just fine...
1502 mr r4,r11 ; Saved lock addr
1503 lis r3,hi16(mutex_failed3) ; Get the failed mutex message
1504 ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
1505 bl EXT(panic) ; Call panic
1506 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1507
1508 .data
1509 mutex_failed3:
1510 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
1511 .text
1512
1513
1514 muGotInt:
1515 lwz r4,MUTEX_DATA(r3)
1516 andi. r5,r4,WAIT_FLAG ; are there any waiters ?
1517 rlwinm r4,r4,0,0,29
1518 beq+ muUnlock ; Nope, we're done...
1519
1520 mr r3,r11 ; Get the based lock address
1521 bl EXT(lck_mtx_unlock_wakeup) ; yes, wake a thread
1522 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1523 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1524 lwz r5,MUTEX_DATA(r3) ; load the lock
1525
1526 muUnlock:
1527 andi. r5,r5,WAIT_FLAG ; Get the unlock value
1528 eieio
1529 stw r5,MUTEX_DATA(r3) ; unlock the interlock and lock
1530
1531 EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
1532 b epStart ; Go enable preemption...
1533
1534 /*
1535 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1536 *
1537 */
1538 .align 5
1539 .globl EXT(lck_mtx_assert)
1540 LEXT(lck_mtx_assert)
1541 .globl EXT(_mutex_assert)
1542 LEXT(_mutex_assert)
1543 mr r11,r3
1544 maEnter:
1545 lwz r5,MUTEX_DATA(r3)
1546 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1547 bne-- maCheck ; No, go check the assertion
1548 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1549 b maEnter
1550 maCheck:
1551 mfsprg r6,1 ; load the current thread
1552 rlwinm r5,r5,0,0,29 ; Extract the lock owner
1553 cmpwi r4,MUTEX_ASSERT_OWNED
1554 cmplw cr1,r6,r5 ; Is the lock held by current act
1555 crandc cr0_eq,cr0_eq,cr1_eq ; Check owned assertion
1556 bne-- maNext
1557 mr r4,r11
1558 lis r3,hi16(mutex_assert1) ; Get the failed mutex message
1559 ori r3,r3,lo16(mutex_assert1) ; Get the failed mutex message
1560 b maPanic ; Panic path
1561 maNext:
1562 cmpwi r4,MUTEX_ASSERT_NOTOWNED ; Check not owned assertion
1563 crand cr0_eq,cr0_eq,cr1_eq ;
1564 bnelr++
1565 maPanic:
1566 PROLOG(0)
1567 mr r4,r11
1568 lis r3,hi16(mutex_assert2) ; Get the failed mutex message
1569 ori r3,r3,lo16(mutex_assert2) ; Get the failed mutex message
1570 bl EXT(panic) ; Call panic
1571 BREAKPOINT_TRAP ; We die here anyway
1572
1573 .data
1574 mutex_assert1:
1575 STRINGD "mutex (0x%08X) not owned\n\000"
1576 mutex_assert2:
1577 STRINGD "mutex (0x%08X) owned\n\000"
1578 .text
1579
1580
1581 /*
1582 * void lck_mtx_ilk_unlock(lck_mtx *lock)
1583 */
1584 .globl EXT(lck_mtx_ilk_unlock)
1585 LEXT(lck_mtx_ilk_unlock)
1586
1587 lwz r10,MUTEX_DATA(r3)
1588 rlwinm r10,r10,0,0,30
1589 eieio
1590 stw r10,MUTEX_DATA(r3)
1591
1592 b epStart ; Go enable preemption...
1593
1594 /*
1595 * void _enable_preemption_no_check(void)
1596 *
1597 * This version does not check if we get preempted or not
1598 */
1599 .align 4
1600 .globl EXT(_enable_preemption_no_check)
1601
1602 LEXT(_enable_preemption_no_check)
1603
1604 cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted
1605 b epCommn ; Join up with the other enable code...
1606
1607 /*
1608 * void _enable_preemption(void)
1609 *
1610 * This version checks if we get preempted or not
1611 */
1612 .align 5
1613 .globl EXT(_enable_preemption)
1614
1615 LEXT(_enable_preemption)
1616
1617 ; Here is where we enable preemption.
1618
1619 epStart:
1620 cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted
1621
1622 epCommn:
1623 mfsprg r3,1 ; Get current activation
1624 li r8,-1 ; Get a decrementer
1625 lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level
1626 add. r5,r5,r8 ; Bring down the disable count
1627 blt- epTooFar ; Yeah, we did...
1628 stw r5,ACT_PREEMPT_CNT(r3) ; Save it back
1629 crandc cr0_eq,cr0_eq,cr1_eq
1630 beq+ epCheckPreempt ; Go check if we need to be preempted...
1631 blr ; Leave...
1632 epTooFar:
1633 mr r4,r5
1634 lis r3,hi16(epTooFarStr) ; First half of panic string
1635 ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
1636 PROLOG(0)
1637 bl EXT(panic)
1638 BREAKPOINT_TRAP ; We die here anyway
1639
1640 .data
1641 epTooFarStr:
1642 STRINGD "enable_preemption: preemption_level %d\n\000"
1643
1644 .text
1645 .align 5
1646 epCheckPreempt:
1647 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1648 mfmsr r9 ; Get the MSR value
1649 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1650 andi. r4,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1651 beq+ epCPno ; No preemption here...
1652 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1653 andc r9,r9,r0 ; Clear FP and VEC
1654 andc r7,r9,r7 ; Clear EE as well
1655 mtmsr r7 ; Turn off interruptions
1656 isync ; May have turned off vec and fp here
1657 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1658 lwz r7,PP_PENDING_AST(r3) ; Get pending AST mask
1659 li r5,AST_URGENT ; Get the requests we do honor
1660 lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call
1661 and. r7,r7,r5 ; Should we preempt?
1662 ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part
1663 mtmsr r9 ; Allow interrupts if we can
1664 epCPno:
1665 beqlr+ ; We probably will not preempt...
1666 sc ; Do the preemption
1667 blr ; Now, go away now...
1668
1669 /*
1670 * void disable_preemption(void)
1671 *
1672 * Here is where we disable preemption.
1673 */
1674 .align 5
1675 .globl EXT(_disable_preemption)
1676
1677 LEXT(_disable_preemption)
1678
1679 mfsprg r6,1 ; Get the current activation
1680 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1681 addi r5,r5,1 ; Bring up the disable count
1682 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1683 blr ; Return...
1684
1685 /*
1686 * int get_preemption_level(void)
1687 *
1688 * Return the current preemption level
1689 */
1690 .align 5
1691 .globl EXT(get_preemption_level)
1692
1693 LEXT(get_preemption_level)
1694
1695 mfsprg r6,1 ; Get current activation
1696 lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1697 blr ; Return...
1698
1699 /*
1700 * void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
1701 *
1702 * Initialize a simple lock.
1703 */
1704 .align 5
1705 .globl EXT(ppc_usimple_lock_init)
1706
1707 LEXT(ppc_usimple_lock_init)
1708
1709 li r0, 0 ; set lock to free == 0
1710 stw r0, 0(r3) ; Initialize the lock
1711 blr
1712
1713 /*
1714 * void lck_spin_lock(lck_spin_t *)
1715 * void ppc_usimple_lock(simple_lock_t *)
1716 *
1717 */
1718 .align 5
1719 .globl EXT(lck_spin_lock)
1720 LEXT(lck_spin_lock)
1721 .globl EXT(ppc_usimple_lock)
1722 LEXT(ppc_usimple_lock)
1723
1724 mfsprg r6,1 ; Get the current activation
1725 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1726 addi r5,r5,1 ; Bring up the disable count
1727 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1728 mr r5,r3 ; Get the address of the lock
1729 li r8,0 ; Set r8 to zero
1730 li r4,0 ; Set r4 to zero
1731
1732 slcktry: lwarx r11,SLOCK_ILK,r5 ; Grab the lock value
1733 andi. r3,r11,ILK_LOCKED ; Is it locked?
1734 ori r11,r6,ILK_LOCKED ; Set interlock
1735 bne-- slckspin ; Yeah, wait for it to clear...
1736 stwcx. r11,SLOCK_ILK,r5 ; Try to seize that there durn lock
1737 bne-- slcktry ; Couldn't get it...
1738 .globl EXT(slckPatch_isync)
1739 LEXT(slckPatch_isync)
1740 isync ; Make sure we don't use a speculativily loaded value
1741 blr ; Go on home...
1742
1743 slckspin: li r11,lgKillResv ; Killing field
1744 stwcx. r11,0,r11 ; Kill reservation
1745
1746 mr. r4,r4 ; Test timeout value
1747 bne++ slockspin0
1748 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
1749 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
1750 lwz r4,0(r4) ; Get the timerout value
1751
1752 slockspin0: mr. r8,r8 ; Is r8 set to zero
1753 bne++ slockspin1 ; If yes, first spin attempt
1754 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1755 mfmsr r9 ; Get the MSR value
1756 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1757 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1758 andc r9,r9,r0 ; Clear FP and VEC
1759 andc r7,r9,r7 ; Clear EE as well
1760 mtmsr r7 ; Turn off interruptions
1761 isync ; May have turned off vec and fp here
1762 mftb r8 ; Get timestamp on entry
1763 b slcksniff
1764
1765 slockspin1: mtmsr r7 ; Turn off interruptions
1766 mftb r8 ; Get timestamp on entry
1767
1768 slcksniff: lwz r3,SLOCK_ILK(r5) ; Get that lock in here
1769 andi. r3,r3,ILK_LOCKED ; Is it free yet?
1770 beq++ slckretry ; Yeah, try for it again...
1771
1772 mftb r10 ; Time stamp us now
1773 sub r10,r10,r8 ; Get the elapsed time
1774 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1775 blt++ slcksniff ; Not yet...
1776
1777 mtmsr r9 ; Say, any interrupts pending?
1778
1779 ; The following instructions force the pipeline to be interlocked to that only one
1780 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
1781 ; time; if it's too short, pending interruptions will not have a chance to be taken
1782
1783 subi r4,r4,128 ; Back off elapsed time from timeout value
1784 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1785 mr. r4,r4 ; See if we used the whole timeout
1786 li r3,0 ; Assume a timeout return code
1787 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1788
1789 ble-- slckfail ; We failed
1790 b slockspin1 ; Now that we've opened an enable window, keep trying...
1791 slckretry:
1792 mtmsr r9 ; Restore interrupt state
1793 li r8,1 ; Show already through once
1794 b slcktry
1795 slckfail: ; We couldn't get the lock
1796 lis r3,hi16(slckpanic_str)
1797 ori r3,r3,lo16(slckpanic_str)
1798 mr r4,r5
1799 mflr r5
1800 PROLOG(0)
1801 bl EXT(panic)
1802 BREAKPOINT_TRAP ; We die here anyway
1803
1804 .data
1805 slckpanic_str:
1806 STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
1807 .text
1808
1809 /*
1810 * boolean_t lck_spin_try_lock(lck_spin_t *)
1811 * unsigned int ppc_usimple_lock_try(simple_lock_t *)
1812 *
1813 */
1814 .align 5
1815 .globl EXT(lck_spin_try_lock)
1816 LEXT(lck_spin_try_lock)
1817 .globl EXT(ppc_usimple_lock_try)
1818 LEXT(ppc_usimple_lock_try)
1819
1820 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1821 mfmsr r9 ; Get the MSR value
1822 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1823 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1824 andc r9,r9,r0 ; Clear FP and VEC
1825 andc r7,r9,r7 ; Clear EE as well
1826 mtmsr r7 ; Disable interruptions and thus, preemption
1827 mfsprg r6,1 ; Get current activation
1828
1829 lwz r11,SLOCK_ILK(r3) ; Get the lock
1830 andi. r5,r11,ILK_LOCKED ; Check it...
1831 bne-- slcktryfail ; Quickly fail...
1832
1833 slcktryloop:
1834 lwarx r11,SLOCK_ILK,r3 ; Ld from addr of arg and reserve
1835
1836 andi. r5,r11,ILK_LOCKED ; TEST...
1837 ori r5,r6,ILK_LOCKED
1838 bne-- slcktryfailX ; branch if taken. Predict free
1839
1840 stwcx. r5,SLOCK_ILK,r3 ; And SET (if still reserved)
1841 bne-- slcktryloop ; If set failed, loop back
1842
1843 .globl EXT(stlckPatch_isync)
1844 LEXT(stlckPatch_isync)
1845 isync
1846
1847 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1848 addi r5,r5,1 ; Bring up the disable count
1849 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1850
1851 mtmsr r9 ; Allow interruptions now
1852 li r3,1 ; Set that the lock was free
1853 blr
1854
1855 slcktryfailX:
1856 li r5,lgKillResv ; Killing field
1857 stwcx. r5,0,r5 ; Kill reservation
1858
1859 slcktryfail:
1860 mtmsr r9 ; Allow interruptions now
1861 li r3,0 ; FAILURE - lock was taken
1862 blr
1863
1864
1865 /*
1866 * void lck_spin_unlock(lck_spin_t *)
1867 * void ppc_usimple_unlock_rwcmb(simple_lock_t *)
1868 *
1869 */
1870 .align 5
1871 .globl EXT(lck_spin_unlock)
1872 LEXT(lck_spin_unlock)
1873 .globl EXT(ppc_usimple_unlock_rwcmb)
1874 LEXT(ppc_usimple_unlock_rwcmb)
1875
1876 li r0,0
1877 .globl EXT(sulckPatch_isync)
1878 LEXT(sulckPatch_isync)
1879 isync
1880 .globl EXT(sulckPatch_eieio)
1881 LEXT(sulckPatch_eieio)
1882 eieio
1883 stw r0, SLOCK_ILK(r3)
1884
1885 b epStart ; Go enable preemption...
1886
1887 /*
1888 * void ppc_usimple_unlock_rwmb(simple_lock_t *)
1889 *
1890 */
1891 .align 5
1892 .globl EXT(ppc_usimple_unlock_rwmb)
1893
1894 LEXT(ppc_usimple_unlock_rwmb)
1895
1896 li r0,0
1897 sync
1898 stw r0, SLOCK_ILK(r3)
1899
1900 b epStart ; Go enable preemption...
1901
1902 /*
1903 * void enter_funnel_section(funnel_t *)
1904 *
1905 */
1906 .align 5
1907 .globl EXT(enter_funnel_section)
1908
1909 LEXT(enter_funnel_section)
1910
1911 #if !MACH_LDEBUG
1912 lis r10,hi16(EXT(kdebug_enable))
1913 ori r10,r10,lo16(EXT(kdebug_enable))
1914 lwz r10,0(r10)
1915 lis r11,hi16(EXT(split_funnel_off))
1916 ori r11,r11,lo16(EXT(split_funnel_off))
1917 lwz r11,0(r11)
1918 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1919 bne- L_enter_funnel_section_slow ; If set, call the slow path
1920 mfsprg r6,1 ; Get the current activation
1921 lwz r7,LOCK_FNL_MUTEX(r3)
1922
1923 lwz r5,0(r7) ; Get lock quickly
1924 mr. r5,r5 ; Locked?
1925 bne-- L_enter_funnel_section_slow ; Yup...
1926
1927 L_enter_funnel_section_loop:
1928 lwarx r5,0,r7 ; Load the mutex lock
1929 mr. r5,r5
1930 bne-- L_enter_funnel_section_slowX ; Go to the slow path
1931 stwcx. r6,0,r7 ; Grab the lock
1932 bne-- L_enter_funnel_section_loop ; Loop back if failed
1933 .globl EXT(entfsectPatch_isync)
1934 LEXT(entfsectPatch_isync)
1935 isync ; Stop prefeteching
1936 li r7,TH_FN_OWNED
1937 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1938 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1939 blr
1940
1941 L_enter_funnel_section_slowX:
1942 li r4,lgKillResv ; Killing field
1943 stwcx. r4,0,r4 ; Kill reservation
1944
1945 L_enter_funnel_section_slow:
1946 #endif
1947 li r4,TRUE
1948 b EXT(thread_funnel_set)
1949
1950 /*
1951 * void exit_funnel_section(void)
1952 *
1953 */
1954 .align 5
1955 .globl EXT(exit_funnel_section)
1956
1957 LEXT(exit_funnel_section)
1958
1959 mfsprg r6,1 ; Get the current activation
1960 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1961 mr. r3,r3 ; Check on funnel held
1962 beq- L_exit_funnel_section_ret ;
1963 #if !MACH_LDEBUG
1964 lis r10,hi16(EXT(kdebug_enable))
1965 ori r10,r10,lo16(EXT(kdebug_enable))
1966 lwz r10,0(r10)
1967 mr. r10,r10
1968 bne- L_exit_funnel_section_slow ; If set, call the slow path
1969 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1970 .globl EXT(retfsectPatch_isync)
1971 LEXT(retfsectPatch_isync)
1972 isync
1973 .globl EXT(retfsectPatch_eieio)
1974 LEXT(retfsectPatch_eieio)
1975 eieio
1976
1977 lwz r5,0(r7) ; Get lock
1978 rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set
1979 bne-- L_exit_funnel_section_slow ; No can get...
1980
1981 L_exit_funnel_section_loop:
1982 lwarx r5,0,r7
1983 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1984 li r5,0 ; Clear the mutexlock
1985 bne-- L_exit_funnel_section_slowX
1986 stwcx. r5,0,r7 ; Release the funnel mutexlock
1987 bne-- L_exit_funnel_section_loop
1988 li r7,0
1989 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1990 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1991 blr ; Return
1992
1993 L_exit_funnel_section_slowX:
1994 li r4,lgKillResv ; Killing field
1995 stwcx. r4,0,r4 ; Kill it
1996
1997 L_exit_funnel_section_slow:
1998 #endif
1999 li r4,FALSE
2000 b EXT(thread_funnel_set)
2001 L_exit_funnel_section_ret:
2002 blr
2003
2004 /*
2005 * void lck_rw_lock_exclusive(lck_rw_t*)
2006 *
2007 */
2008 .align 5
2009 .globl EXT(lck_rw_lock_exclusive)
2010 LEXT(lck_rw_lock_exclusive)
2011 #if !MACH_LDEBUG
2012 .globl EXT(lock_write)
2013 LEXT(lock_write)
2014 #endif
2015 rwleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2016 rlwinm. r7,r5,30,1,31 ; Can we have it?
2017 ori r6,r5,WANT_EXCL ; Mark Exclusive
2018 bne-- rwlespin ; Branch if cannot be held
2019 stwcx. r6,RW_DATA,r3 ; Update lock word
2020 bne-- rwleloop
2021 .globl EXT(rwlePatch_isync)
2022 LEXT(rwlePatch_isync)
2023 isync
2024 blr
2025 rwlespin:
2026 li r4,lgKillResv ; Killing field
2027 stwcx. r4,0,r4 ; Kill it
2028 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2029 bne-- rwlespin1 ; No, go handle contention
2030 mr r4,r3 ; pass lock pointer
2031 lwz r3,RW_PTR(r3) ; load lock ext pointer
2032 b EXT(lck_rw_lock_exclusive_ext)
2033 rwlespin1:
2034 b EXT(lck_rw_lock_exclusive_gen)
2035
2036 /*
2037 * void lck_rw_lock_shared(lck_rw_t*)
2038 *
2039 */
2040 .align 5
2041 .globl EXT(lck_rw_lock_shared)
2042 LEXT(lck_rw_lock_shared)
2043 #if !MACH_LDEBUG
2044 .globl EXT(lock_read)
2045 LEXT(lock_read)
2046 #endif
2047 rwlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2048 andi. r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
2049 addis r6,r5,1 ; Increment read cnt
2050 bne-- rwlsspin ; Branch if cannot be held
2051 stwcx. r6,RW_DATA,r3 ; Update lock word
2052 bne-- rwlsloop
2053 .globl EXT(rwlsPatch_isync)
2054 LEXT(rwlsPatch_isync)
2055 isync
2056 blr
2057 rwlsspin:
2058 li r4,lgKillResv ; Killing field
2059 stwcx. r4,0,r4 ; Kill it
2060 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2061 bne-- rwlsspin1 ; No, go handle contention
2062 mr r4,r3 ; pass lock pointer
2063 lwz r3,RW_PTR(r3) ; load lock ext pointer
2064 b EXT(lck_rw_lock_shared_ext)
2065 rwlsspin1:
2066 b EXT(lck_rw_lock_shared_gen)
2067
2068 /*
2069 * boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
2070 *
2071 */
2072 .align 5
2073 .globl EXT(lck_rw_lock_shared_to_exclusive)
2074 LEXT(lck_rw_lock_shared_to_exclusive)
2075 #if !MACH_LDEBUG
2076 .globl EXT(lock_read_to_write)
2077 LEXT(lock_read_to_write)
2078 #endif
2079 rwlseloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2080 addis r6,r5,0xFFFF ; Decrement read cnt
2081 lis r8,0xFFFF ; Get read count mask
2082 ori r8,r8,WANT_UPGRADE|ILK_LOCKED ; Include Interlock and upgrade flags
2083 and. r7,r6,r8 ; Can we have it?
2084 ori r9,r6,WANT_UPGRADE ; Mark Exclusive
2085 bne-- rwlsespin ; Branch if cannot be held
2086 stwcx. r9,RW_DATA,r3 ; Update lock word
2087 bne-- rwlseloop
2088 .globl EXT(rwlsePatch_isync)
2089 LEXT(rwlsePatch_isync)
2090 isync
2091 li r3,0 ; Succeed, return FALSE...
2092 blr
2093 rwlsespin:
2094 li r4,lgKillResv ; Killing field
2095 stwcx. r4,0,r4 ; Kill it
2096 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2097 bne-- rwlsespin1 ; No, go handle contention
2098 mr r4,r3 ; pass lock pointer
2099 lwz r3,RW_PTR(r3) ; load lock ext pointer
2100 b EXT(lck_rw_lock_shared_to_exclusive_ext)
2101 rwlsespin1:
2102 b EXT(lck_rw_lock_shared_to_exclusive_gen)
2103
2104
2105
2106 /*
2107 * void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
2108 *
2109 */
2110 .align 5
2111 .globl EXT(lck_rw_lock_exclusive_to_shared)
2112 LEXT(lck_rw_lock_exclusive_to_shared)
2113 #if !MACH_LDEBUG
2114 .globl EXT(lock_write_to_read)
2115 LEXT(lock_write_to_read)
2116 #endif
2117 .globl EXT(rwlesPatch_isync)
2118 LEXT(rwlesPatch_isync)
2119 isync
2120 .globl EXT(rwlesPatch_eieio)
2121 LEXT(rwlesPatch_eieio)
2122 eieio
2123 rwlesloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2124 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2125 bne-- rwlesspin ; Branch if interlocked
2126 lis r6,1 ; Get 1 for read count
2127 andi. r10,r5,WANT_UPGRADE ; Is it held with upgrade
2128 li r9,WANT_UPGRADE|WAIT_FLAG ; Get upgrade and wait flags mask
2129 bne rwlesexcl1 ; Skip if held with upgrade
2130 li r9,WANT_EXCL|WAIT_FLAG ; Get exclusive and wait flags mask
2131 rwlesexcl1:
2132 andc r7,r5,r9 ; Marked free
2133 rlwimi r6,r7,0,16,31 ; Set shared cnt to one
2134 stwcx. r6,RW_DATA,r3 ; Update lock word
2135 bne-- rwlesloop
2136 andi. r7,r5,WAIT_FLAG ; Test wait flag
2137 beqlr++ ; Return of no waiters
2138 addi r3,r3,RW_EVENT ; Get lock event address
2139 b EXT(thread_wakeup) ; wakeup waiters
2140 rwlesspin:
2141 li r4,lgKillResv ; Killing field
2142 stwcx. r4,0,r4 ; Kill it
2143 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2144 bne-- rwlesspin1 ; No, go handle contention
2145 mr r4,r3 ; pass lock pointer
2146 lwz r3,RW_PTR(r3) ; load lock ext pointer
2147 b EXT(lck_rw_lock_exclusive_to_shared_ext)
2148 rwlesspin1:
2149 b EXT(lck_rw_lock_exclusive_to_shared_gen)
2150
2151
2152
2153 /*
2154 * boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
2155 *
2156 */
2157 .align 5
2158 .globl EXT(lck_rw_try_lock_exclusive)
2159 LEXT(lck_rw_try_lock_exclusive)
2160 lis r10,0xFFFF ; Load read count mask
2161 ori r10,r10,WANT_EXCL|WANT_UPGRADE ; Include exclusive and upgrade flags
2162 rwtleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2163 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2164 bne-- rwtlespin ; Branch if interlocked
2165 and. r7,r5,r10 ; Can we have it
2166 ori r6,r5,WANT_EXCL ; Mark Exclusive
2167 bne-- rwtlefail ;
2168 stwcx. r6,RW_DATA,r3 ; Update lock word
2169 bne-- rwtleloop
2170 .globl EXT(rwtlePatch_isync)
2171 LEXT(rwtlePatch_isync)
2172 isync
2173 li r3,1 ; Return TRUE
2174 blr
2175 rwtlefail:
2176 li r4,lgKillResv ; Killing field
2177 stwcx. r4,0,r4 ; Kill it
2178 li r3,0 ; Return FALSE
2179 blr
2180 rwtlespin:
2181 li r4,lgKillResv ; Killing field
2182 stwcx. r4,0,r4 ; Kill it
2183 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2184 bne-- rwtlespin1 ; No, go handle contention
2185 mr r4,r3 ; pass lock pointer
2186 lwz r3,RW_PTR(r3) ; load lock ext pointer
2187 b EXT(lck_rw_try_lock_exclusive_ext)
2188 rwtlespin1:
2189 b EXT(lck_rw_try_lock_exclusive_gen)
2190
2191
2192 /*
2193 * boolean_t lck_rw_try_lock_shared(lck_rw_t*)
2194 *
2195 */
2196 .align 5
2197 .globl EXT(lck_rw_try_lock_shared)
2198 LEXT(lck_rw_try_lock_shared)
2199 rwtlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2200 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2201 bne-- rwtlsspin ; Branch if interlocked
2202 andi. r7,r5,WANT_EXCL|WANT_UPGRADE ; So, can we have it?
2203 addis r6,r5,1 ; Increment read cnt
2204 bne-- rwtlsfail ; Branch if held exclusive
2205 stwcx. r6,RW_DATA,r3 ; Update lock word
2206 bne-- rwtlsloop
2207 .globl EXT(rwtlsPatch_isync)
2208 LEXT(rwtlsPatch_isync)
2209 isync
2210 li r3,1 ; Return TRUE
2211 blr
2212 rwtlsfail:
2213 li r3,0 ; Return FALSE
2214 blr
2215 rwtlsspin:
2216 li r4,lgKillResv ; Killing field
2217 stwcx. r4,0,r4 ; Kill it
2218 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2219 bne-- rwtlsspin1 ; No, go handle contention
2220 mr r4,r3 ; pass lock pointer
2221 lwz r3,RW_PTR(r3) ; load lock ext pointer
2222 b EXT(lck_rw_try_lock_shared_ext)
2223 rwtlsspin1:
2224 b EXT(lck_rw_try_lock_shared_gen)
2225
2226
2227
2228 /*
2229 * lck_rw_type_t lck_rw_done(lck_rw_t*)
2230 *
2231 */
2232 .align 5
2233 .globl EXT(lck_rw_done)
2234 LEXT(lck_rw_done)
2235 #if !MACH_LDEBUG
2236 .globl EXT(lock_done)
2237 LEXT(lock_done)
2238 #endif
2239 .globl EXT(rwldPatch_isync)
2240 LEXT(rwldPatch_isync)
2241 isync
2242 .globl EXT(rwldPatch_eieio)
2243 LEXT(rwldPatch_eieio)
2244 eieio
2245 li r10,WAIT_FLAG ; Get wait flag
2246 lis r7,0xFFFF ; Get read cnt mask
2247 mr r12,r3 ; Save lock addr
2248 rwldloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2249 andi. r8,r5,ILK_LOCKED ; Test interlock flag
2250 bne-- rwldspin ; Branch if interlocked
2251 and. r8,r5,r7 ; Is it shared
2252 cmpi cr1,r8,0 ; Is it shared
2253 beq cr1,rwldexcl ; No, check exclusive
2254 li r11,RW_SHARED ; Set return value
2255 addis r6,r5,0xFFFF ; Decrement read count
2256 and. r8,r6,r7 ; Is it still shared
2257 li r8,0 ; Assume no wakeup
2258 bne rwldshared1 ; Skip if still held shared
2259 and r8,r6,r10 ; Extract wait flag
2260 andc r6,r6,r10 ; Clear wait flag
2261 rwldshared1:
2262 b rwldstore
2263 rwldexcl:
2264 li r11,RW_EXCL ; Set return value
2265 li r9,WANT_UPGRADE ; Get upgrade flag
2266 and. r6,r5,r9 ; Is it held with upgrade
2267 li r9,WANT_UPGRADE|WAIT_FLAG ; Mask upgrade abd wait flags
2268 bne rwldexcl1 ; Skip if held with upgrade
2269 li r9,WANT_EXCL|WAIT_FLAG ; Mask exclusive and wait flags
2270 rwldexcl1:
2271 andc r6,r5,r9 ; Marked free
2272 and r8,r5,r10 ; Null if no waiter
2273 rwldstore:
2274 stwcx. r6,RW_DATA,r3 ; Update lock word
2275 bne-- rwldloop
2276 mr. r8,r8 ; wakeup needed?
2277 mr r3,r11 ; Return lock held type
2278 beqlr++
2279 mr r3,r12 ; Restore lock address
2280 PROLOG(0)
2281 addi r3,r3,RW_EVENT ; Get lock event address
2282 bl EXT(thread_wakeup) ; wakeup threads
2283 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
2284 mtcr r2
2285 EPILOG
2286 li r3,RW_SHARED ; Assume lock type shared
2287 bne cr1,rwldret ; Branch if was held exclusive
2288 li r3,RW_EXCL ; Return lock type exclusive
2289 rwldret:
2290 blr
2291 rwldspin:
2292 li r4,lgKillResv ; Killing field
2293 stwcx. r4,0,r4 ; Kill it
2294 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2295 bne-- rwldspin1 ; No, go handle contention
2296 mr r4,r3 ; pass lock pointer
2297 lwz r3,RW_PTR(r3) ; load lock ext pointer
2298 b EXT(lck_rw_done_ext)
2299 rwldspin1:
2300 b EXT(lck_rw_done_gen)
2301
2302 /*
2303 * void lck_rw_ilk_lock(lck_rw_t *lock)
2304 */
2305 .globl EXT(lck_rw_ilk_lock)
2306 LEXT(lck_rw_ilk_lock)
2307 crclr hwtimeout ; no timeout option
2308 li r4,0 ; request default timeout value
2309 li r12,ILK_LOCKED ; Load bit mask
2310 b lckcomm ; Join on up...
2311
2312 /*
2313 * void lck_rw_ilk_unlock(lck_rw_t *lock)
2314 */
2315 .globl EXT(lck_rw_ilk_unlock)
2316 LEXT(lck_rw_ilk_unlock)
2317 li r4,1
2318 b EXT(hw_unlock_bit)