]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_lock.s
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach_ldebug.h>
31 #include <ppc/asm.h>
32 #include <ppc/proc_reg.h>
33 #include <assym.s>
34
35 #define STRING ascii
36
37 #define ILK_LOCKED 0x01
38 #define WAIT_FLAG 0x02
39 #define WANT_UPGRADE 0x04
40 #define WANT_EXCL 0x08
41
42 #define TH_FN_OWNED 0x01
43
44 # volatile CR bits
45 #define hwtimeout 20
46 #define mlckmiss 21
47
48 #define RW_DATA 0
49
50 #define PROLOG(space) \
51 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
52 mfcr r2 __ASMNL__ \
53 mflr r0 __ASMNL__ \
54 stw r3,FM_ARG0(r1) __ASMNL__ \
55 stw r11,FM_ARG0+0x04(r1) __ASMNL__ \
56 stw r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1) __ASMNL__ \
57 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
58
59 #define EPILOG \
60 lwz r1,0(r1) __ASMNL__ \
61 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
62 mtlr r0 __ASMNL__
63
64 /*
65 * void hw_lock_init(hw_lock_t)
66 *
67 * Initialize a hardware lock.
68 */
69 .align 5
70 .globl EXT(hw_lock_init)
71
72 LEXT(hw_lock_init)
73
74 li r0, 0 ; set lock to free == 0
75 stw r0, 0(r3) ; Initialize the lock
76 blr
77
78 /*
79 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
80 *
81 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
82 * multiple bits may be set. Return success (1) or failure (0).
83 * Attempt will fail after timeout ticks of the timebase.
84 */
85 .align 5
86 .globl EXT(hw_lock_bit)
87
88 LEXT(hw_lock_bit)
89
90 crset hwtimeout ; timeout option
91 mr r12,r4 ; Load bit mask
92 mr r4,r5 ; Load timeout value
93 b lckcomm ; Join on up...
94
95 /*
96 * void hw_lock_lock(hw_lock_t)
97 *
98 * Acquire lock, spinning until it becomes available.
99 * Return with preemption disabled.
100 * We will just set a default timeout and jump into the NORMAL timeout lock.
101 */
102 .align 5
103 .globl EXT(hw_lock_lock)
104
105 LEXT(hw_lock_lock)
106 crclr hwtimeout ; no timeout option
107 li r4,0 ; request default timeout value
108 li r12,ILK_LOCKED ; Load bit mask
109 b lckcomm ; Join on up...
110
111 lockDisa:
112 crset hwtimeout ; timeout option
113 li r4,0 ; request default timeout value
114 li r12,ILK_LOCKED ; Load bit mask
115 b lckcomm ; Join on up...
116
117 /*
118 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
119 *
120 * Try to acquire spin-lock. Return success (1) or failure (0).
121 * Attempt will fail after timeout ticks of the timebase.
122 * We try fairly hard to get this lock. We disable for interruptions, but
123 * reenable after a "short" timeout (128 ticks, we may want to change this).
124 * After checking to see if the large timeout value (passed in) has expired and a
125 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
126 * we return either in abject failure, or disable and go back to the lock sniff routine.
127 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
128 */
129 .align 5
130 .globl EXT(hw_lock_to)
131
132 LEXT(hw_lock_to)
133 crset hwtimeout ; timeout option
134 li r12,ILK_LOCKED ; Load bit mask
135 lckcomm:
136 mfsprg r6,1 ; Get the current activation
137 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
138 addi r5,r5,1 ; Bring up the disable count
139 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
140 mr r5,r3 ; Get the address of the lock
141 li r8,0 ; Set r8 to zero
142
143 lcktry: lwarx r6,0,r5 ; Grab the lock value
144 and. r3,r6,r12 ; Is it locked?
145 or r6,r6,r12 ; Set interlock
146 bne-- lckspin ; Yeah, wait for it to clear...
147 stwcx. r6,0,r5 ; Try to seize that there durn lock
148 bne-- lcktry ; Couldn't get it...
149 li r3,1 ; return true
150 .globl EXT(hwllckPatch_isync)
151 LEXT(hwllckPatch_isync)
152 isync ; Make sure we don't use a speculativily loaded value
153 blr ; Go on home...
154
155 lckspin: li r6,lgKillResv ; Get killing field
156 stwcx. r6,0,r6 ; Kill reservation
157
158 mr. r4,r4 ; Test timeout value
159 bne++ lockspin0
160 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
161 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
162 lwz r4,0(r4) ; Get the timeout value
163 lockspin0:
164 mr. r8,r8 ; Is r8 set to zero
165 bne++ lockspin1 ; If yes, first spin attempt
166 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
167 mfmsr r9 ; Get the MSR value
168 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
169 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
170 andc r9,r9,r0 ; Clear FP and VEC
171 andc r7,r9,r7 ; Clear EE as well
172 mtmsr r7 ; Turn off interruptions
173 isync ; May have turned off vec and fp here
174 mftb r8 ; Get timestamp on entry
175 b lcksniff
176
177 lockspin1: mtmsr r7 ; Turn off interruptions
178 mftb r8 ; Get timestamp on entry
179
180 lcksniff: lwz r3,0(r5) ; Get that lock in here
181 and. r3,r3,r12 ; Is it free yet?
182 beq++ lckretry ; Yeah, try for it again...
183
184 mftb r10 ; Time stamp us now
185 sub r10,r10,r8 ; Get the elapsed time
186 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
187 blt++ lcksniff ; Not yet...
188
189 mtmsr r9 ; Say, any interrupts pending?
190
191 ; The following instructions force the pipeline to be interlocked to that only one
192 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
193 ; time; if it's too short, pending interruptions will not have a chance to be taken
194
195 subi r4,r4,128 ; Back off elapsed time from timeout value
196 or r4,r4,r4 ; Do nothing here but force a single cycle delay
197 mr. r4,r4 ; See if we used the whole timeout
198 li r3,0 ; Assume a timeout return code
199 or r4,r4,r4 ; Do nothing here but force a single cycle delay
200
201 ble-- lckfail ; We failed
202 b lockspin1 ; Now that we've opened an enable window, keep trying...
203 lckretry:
204 mtmsr r9 ; Restore interrupt state
205 li r8,1 ; Insure that R8 is not 0
206 b lcktry
207 lckfail: ; We couldn't get the lock
208 bf hwtimeout,lckpanic
209 li r3,0 ; Set failure return code
210 blr ; Return, head hanging low...
211 lckpanic:
212 mr r4,r5
213 mr r5,r3
214 lis r3,hi16(lckpanic_str) ; Get the failed lck message
215 ori r3,r3,lo16(lckpanic_str) ; Get the failed lck message
216 bl EXT(panic)
217 BREAKPOINT_TRAP ; We die here anyway
218 .data
219 lckpanic_str:
220 STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
221 .text
222
223 /*
224 * void hw_lock_unlock(hw_lock_t)
225 *
226 * Unconditionally release lock.
227 * Release preemption level.
228 */
229 .align 5
230 .globl EXT(hw_lock_unlock)
231
232 LEXT(hw_lock_unlock)
233
234 .globl EXT(hwulckPatch_isync)
235 LEXT(hwulckPatch_isync)
236 isync
237 .globl EXT(hwulckPatch_eieio)
238 LEXT(hwulckPatch_eieio)
239 eieio
240 li r0, 0 ; set lock to free
241 stw r0, 0(r3)
242
243 b epStart ; Go enable preemption...
244
245 /*
246 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
247 *
248 * Release bit based spin-lock. The second parameter is the bit mask to clear.
249 * Multiple bits may be cleared.
250 *
251 */
252 .align 5
253 .globl EXT(hw_unlock_bit)
254
255 LEXT(hw_unlock_bit)
256
257 .globl EXT(hwulckbPatch_isync)
258 LEXT(hwulckbPatch_isync)
259 isync
260 .globl EXT(hwulckbPatch_eieio)
261 LEXT(hwulckbPatch_eieio)
262 eieio
263 ubittry: lwarx r0,0,r3 ; Grab the lock value
264 andc r0,r0,r4 ; Clear the lock bits
265 stwcx. r0,0,r3 ; Try to clear that there durn lock
266 bne- ubittry ; Try again, couldn't save it...
267
268 b epStart ; Go enable preemption...
269
270 /*
271 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
272 * unsigned int newb, unsigned int timeout)
273 *
274 * Try to acquire spin-lock. The second parameter is the bit mask to check.
275 * The third is the value of those bits and the 4th is what to set them to.
276 * Return success (1) or failure (0).
277 * Attempt will fail after timeout ticks of the timebase.
278 * We try fairly hard to get this lock. We disable for interruptions, but
279 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
280 * After checking to see if the large timeout value (passed in) has expired and a
281 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
282 * we return either in abject failure, or disable and go back to the lock sniff routine.
283 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
284 */
285 .align 5
286 .globl EXT(hw_lock_mbits)
287
288 LEXT(hw_lock_mbits)
289
290 li r10,0
291
292 mbittry: lwarx r12,0,r3 ; Grab the lock value
293 and r0,r12,r4 ; Clear extra bits
294 andc r12,r12,r4 ; Clear all bits in the bit mask
295 or r12,r12,r6 ; Turn on the lock bits
296 cmplw r0,r5 ; Are these the right bits?
297 bne-- mbitspin ; Nope, wait for it to clear...
298 stwcx. r12,0,r3 ; Try to seize that there durn lock
299 beq++ mbitgot ; We got it, yahoo...
300 b mbittry ; Just start up again if the store failed...
301
302 .align 5
303 mbitspin: li r11,lgKillResv ; Point to killing field
304 stwcx. r11,0,r11 ; Kill it
305
306 mr. r10,r10 ; Is r10 set to zero
307 bne++ mbitspin0 ; If yes, first spin attempt
308 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
309 mfmsr r9 ; Get the MSR value
310 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
311 ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
312 andc r9,r9,r0 ; Clear FP and VEC
313 andc r8,r9,r8 ; Clear EE as well
314 mtmsr r8 ; Turn off interruptions
315 isync ; May have turned off vectors or float here
316 mftb r10 ; Get the low part of the time base
317 b mbitsniff
318 mbitspin0:
319 mtmsr r8 ; Turn off interruptions
320 mftb r10 ; Get the low part of the time base
321 mbitsniff:
322 lwz r12,0(r3) ; Get that lock in here
323 and r0,r12,r4 ; Clear extra bits
324 cmplw r0,r5 ; Are these the right bits?
325 beq++ mbitretry ; Yeah, try for it again...
326
327 mftb r11 ; Time stamp us now
328 sub r11,r11,r10 ; Get the elapsed time
329 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
330 blt++ mbitsniff ; Not yet...
331
332 mtmsr r9 ; Say, any interrupts pending?
333
334 ; The following instructions force the pipeline to be interlocked to that only one
335 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
336 ; time. If it is too short, pending interruptions will not have a chance to be taken
337
338 subi r7,r7,128 ; Back off elapsed time from timeout value
339 or r7,r7,r7 ; Do nothing here but force a single cycle delay
340 mr. r7,r7 ; See if we used the whole timeout
341 or r7,r7,r7 ; Do nothing here but force a single cycle delay
342
343 ble-- mbitfail ; We failed
344 b mbitspin0 ; Now that we have opened an enable window, keep trying...
345 mbitretry:
346 mtmsr r9 ; Enable for interruptions
347 li r10,1 ; Make sure this is non-zero
348 b mbittry
349
350 .align 5
351 mbitgot:
352 li r3,1 ; Set good return code
353 .globl EXT(hwlmlckPatch_isync)
354 LEXT(hwlmlckPatch_isync)
355 isync ; Make sure we do not use a speculativily loaded value
356 blr
357
358 mbitfail: li r3,0 ; Set failure return code
359 blr ; Return, head hanging low...
360
361 /*
362 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
363 *
364 * Spin until word hits 0 or timeout.
365 * Return success (1) or failure (0).
366 * Attempt will fail after timeout ticks of the timebase.
367 *
368 * The theory is that a processor will bump a counter as it signals
369 * other processors. Then it will spin untl the counter hits 0 (or
370 * times out). The other processors, as it receives the signal will
371 * decrement the counter.
372 *
373 * The other processors use interlocked update to decrement, this one
374 * does not need to interlock.
375 */
376 .align 5
377 .globl EXT(hw_cpu_sync)
378
379 LEXT(hw_cpu_sync)
380
381 mftb r10 ; Get the low part of the time base
382 mr r9,r3 ; Save the sync word address
383 li r3,1 ; Assume we work
384
385 csynctry: lwz r11,0(r9) ; Grab the sync value
386 mr. r11,r11 ; Counter hit 0?
387 beqlr- ; Yeah, we are sunk...
388 mftb r12 ; Time stamp us now
389
390 sub r12,r12,r10 ; Get the elapsed time
391 cmplw r4,r12 ; Have we gone too long?
392 bge+ csynctry ; Not yet...
393
394 li r3,0 ; Set failure...
395 blr ; Return, head hanging low...
396
397 /*
398 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
399 *
400 * Spin until word changes or timeout.
401 * Return success (1) or failure (0).
402 * Attempt will fail after timeout ticks of the timebase.
403 *
404 * This is used to insure that a processor passes a certain point.
405 * An example of use is to monitor the last interrupt time in the
406 * per_proc block. This can be used to insure that the other processor
407 * has seen at least one interrupt since a specific time.
408 */
409 .align 5
410 .globl EXT(hw_cpu_wcng)
411
412 LEXT(hw_cpu_wcng)
413
414 mftb r10 ; Get the low part of the time base
415 mr r9,r3 ; Save the sync word address
416 li r3,1 ; Assume we work
417
418 wcngtry: lwz r11,0(r9) ; Grab the value
419 cmplw r11,r4 ; Do they still match?
420 bnelr- ; Nope, cool...
421 mftb r12 ; Time stamp us now
422
423 sub r12,r12,r10 ; Get the elapsed time
424 cmplw r5,r12 ; Have we gone too long?
425 bge+ wcngtry ; Not yet...
426
427 li r3,0 ; Set failure...
428 blr ; Return, head hanging low...
429
430
431 /*
432 * unsigned int hw_lock_try(hw_lock_t)
433 *
434 * Try to acquire spin-lock. Return success (1) or failure (0)
435 * Returns with preemption disabled on success.
436 *
437 */
438 .align 5
439 .globl EXT(hw_lock_try)
440
441 LEXT(hw_lock_try)
442
443 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
444 mfmsr r9 ; Get the MSR value
445 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
446 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
447 andc r9,r9,r0 ; Clear FP and VEC
448 andc r7,r9,r7 ; Clear EE as well
449
450 mtmsr r7 ; Disable interruptions and thus, preemption
451
452 lwz r5,0(r3) ; Quick load
453 andi. r6,r5,ILK_LOCKED ; TEST...
454 bne-- .L_lock_try_failed ; No go...
455
456 .L_lock_try_loop:
457 lwarx r5,0,r3 ; Ld from addr of arg and reserve
458
459 andi. r6,r5,ILK_LOCKED ; TEST...
460 ori r5,r5,ILK_LOCKED
461 bne-- .L_lock_try_failedX ; branch if taken. Predict free
462
463 stwcx. r5,0,r3 ; And SET (if still reserved)
464 bne-- .L_lock_try_loop ; If set failed, loop back
465
466 .globl EXT(hwltlckPatch_isync)
467 LEXT(hwltlckPatch_isync)
468 isync
469
470 mfsprg r6,1 ; Get current activation
471 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
472 addi r5,r5,1 ; Bring up the disable count
473 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
474
475 mtmsr r9 ; Allow interruptions now
476 li r3,1 ; Set that the lock was free
477 blr
478
479 .L_lock_try_failedX:
480 li r6,lgKillResv ; Killing field
481 stwcx. r6,0,r6 ; Kill reservation
482
483 .L_lock_try_failed:
484 mtmsr r9 ; Allow interruptions now
485 li r3,0 ; FAILURE - lock was taken
486 blr
487
488 /*
489 * unsigned int hw_lock_held(hw_lock_t)
490 *
491 * Return 1 if lock is held
492 * Doesn't change preemption state.
493 * N.B. Racy, of course.
494 */
495 .align 5
496 .globl EXT(hw_lock_held)
497
498 LEXT(hw_lock_held)
499
500 isync ; Make sure we don't use a speculativily fetched lock
501 lwz r3, 0(r3) ; Get lock value
502 andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit
503 blr
504
505 /*
506 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
507 *
508 * Compare old to area if equal, store new, and return true
509 * else return false and no store
510 * This is an atomic operation
511 */
512 .align 5
513 .globl EXT(hw_compare_and_store)
514
515 LEXT(hw_compare_and_store)
516
517 mr r6,r3 ; Save the old value
518
519 cstry: lwarx r9,0,r5 ; Grab the area value
520 li r3,1 ; Assume it works
521 cmplw cr0,r9,r6 ; Does it match the old value?
522 bne-- csfail ; No, it must have changed...
523 stwcx. r4,0,r5 ; Try to save the new value
524 bne-- cstry ; Didn't get it, try again...
525 .globl EXT(hwcsatomicPatch_isync)
526 LEXT(hwcsatomicPatch_isync)
527 isync ; Just hold up prefetch
528 blr ; Return...
529
530 csfail: li r3,lgKillResv ; Killing field
531 stwcx. r3,0,r3 ; Blow reservation
532
533 li r3,0 ; Set failure
534 blr ; Better luck next time...
535
536
537 /*
538 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
539 *
540 * Atomically add the second parameter to the first.
541 * Returns the result.
542 *
543 */
544 .align 5
545 .globl EXT(hw_atomic_add)
546
547 LEXT(hw_atomic_add)
548
549 mr r6,r3 ; Save the area
550
551 addtry: lwarx r3,0,r6 ; Grab the area value
552 add r3,r3,r4 ; Add the value
553 stwcx. r3,0,r6 ; Try to save the new value
554 bne-- addtry ; Didn't get it, try again...
555 blr ; Return...
556
557
558 /*
559 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
560 *
561 * Atomically subtract the second parameter from the first.
562 * Returns the result.
563 *
564 */
565 .align 5
566 .globl EXT(hw_atomic_sub)
567
568 LEXT(hw_atomic_sub)
569
570 mr r6,r3 ; Save the area
571
572 subtry: lwarx r3,0,r6 ; Grab the area value
573 sub r3,r3,r4 ; Subtract the value
574 stwcx. r3,0,r6 ; Try to save the new value
575 bne-- subtry ; Didn't get it, try again...
576 blr ; Return...
577
578
579 /*
580 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
581 *
582 * Atomically ORs the second parameter into the first.
583 * Returns the result.
584 */
585 .align 5
586 .globl EXT(hw_atomic_or)
587
588 LEXT(hw_atomic_or)
589
590 mr r6,r3 ; Save the area
591
592 ortry: lwarx r3,0,r6 ; Grab the area value
593 or r3,r3,r4 ; OR the value
594 stwcx. r3,0,r6 ; Try to save the new value
595 bne-- ortry ; Did not get it, try again...
596 blr ; Return...
597
598
599 /*
600 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
601 *
602 * Atomically ANDs the second parameter with the first.
603 * Returns the result.
604 *
605 */
606 .align 5
607 .globl EXT(hw_atomic_and)
608
609 LEXT(hw_atomic_and)
610
611 mr r6,r3 ; Save the area
612
613 andtry: lwarx r3,0,r6 ; Grab the area value
614 and r3,r3,r4 ; AND the value
615 stwcx. r3,0,r6 ; Try to save the new value
616 bne-- andtry ; Did not get it, try again...
617 blr ; Return...
618
619
620 /*
621 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
622 *
623 * Atomically inserts the element at the head of the list
624 * anchor is the pointer to the first element
625 * element is the pointer to the element to insert
626 * disp is the displacement into the element to the chain pointer
627 *
628 * NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
629 */
630 .align 5
631 .globl EXT(hw_queue_atomic)
632
633 LEXT(hw_queue_atomic)
634
635 mr r7,r4 ; Make end point the same as start
636 mr r8,r5 ; Copy the displacement also
637 b hw_queue_comm ; Join common code...
638
639 /*
640 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
641 *
642 * Atomically inserts the list of elements at the head of the list
643 * anchor is the pointer to the first element
644 * first is the pointer to the first element to insert
645 * last is the pointer to the last element to insert
646 * disp is the displacement into the element to the chain pointer
647 */
648 .align 5
649 .globl EXT(hw_queue_atomic_list)
650
651 LEXT(hw_queue_atomic_list)
652
653 mr r7,r5 ; Make end point the same as start
654 mr r8,r6 ; Copy the displacement also
655
656 hw_queue_comm:
657 lwarx r9,0,r3 ; Pick up the anchor
658 stwx r9,r8,r7 ; Chain that to the end of the new stuff
659 eieio ; Make sure this store makes it before the anchor update
660 stwcx. r4,0,r3 ; Try to chain into the front
661 bne-- hw_queue_comm ; Didn't make it, try again...
662
663 blr ; Return...
664
665 /*
666 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
667 *
668 * Atomically removes the first element in a list and returns it.
669 * anchor is the pointer to the first element
670 * disp is the displacement into the element to the chain pointer
671 * Returns element if found, 0 if empty.
672 *
673 * NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
674 */
675 .align 5
676 .globl EXT(hw_dequeue_atomic)
677
678 LEXT(hw_dequeue_atomic)
679
680 mr r5,r3 ; Save the anchor
681
682 hw_dequeue_comm:
683 lwarx r3,0,r5 ; Pick up the anchor
684 mr. r3,r3 ; Is the list empty?
685 beq-- hdcFail ; Leave it list empty...
686 lwzx r9,r4,r3 ; Get the next in line
687 stwcx. r9,0,r5 ; Try to chain into the front
688 beqlr++ ; Got the thing, go away with it...
689 b hw_dequeue_comm ; Did not make it, try again...
690
691 hdcFail: li r4,lgKillResv ; Killing field
692 stwcx. r4,0,r4 ; Dump reservation
693 blr ; Leave...
694
695
696 /*
697 * Routines for mutex lock debugging.
698 */
699
700 /*
701 * Gets lock check flags in CR6: CR bits 24-27
702 */
703 #define CHECK_SETUP(rg) \
704 lbz rg,lglcksWork(0) __ASMNL__ \
705 mtcrf 2,rg __ASMNL__
706
707
708 /*
709 * Checks for expected lock type.
710 */
711 #define CHECK_MUTEX_TYPE() \
712 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
713 bt 24+disLktypeb,1f __ASMNL__ \
714 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
715 cmpwi r10,MUTEX_TAG __ASMNL__ \
716 beq++ 1f __ASMNL__ \
717 PROLOG(0) __ASMNL__ \
718 mr r4,r11 __ASMNL__ \
719 mr r5,r10 __ASMNL__ \
720 lis r3,hi16(not_a_mutex) __ASMNL__ \
721 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
722 bl EXT(panic) __ASMNL__ \
723 BREAKPOINT_TRAP __ASMNL__ \
724 1:
725
726 .data
727 not_a_mutex:
728 STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
729 .text
730
731 /*
732 * Verifies return to the correct thread in "unlock" situations.
733 */
734 #define CHECK_THREAD(thread_offset) \
735 bf MUTEX_ATTR_DEBUGb,3f __ASMNL__ \
736 bt 24+disLkThreadb,3f __ASMNL__ \
737 mfsprg r10,1 __ASMNL__ \
738 lwz r5,MUTEX_DATA(r3) __ASMNL__ \
739 rlwinm. r9,r5,0,0,29 __ASMNL__ \
740 bne++ 1f __ASMNL__ \
741 lis r3,hi16(not_held) __ASMNL__ \
742 ori r3,r3,lo16(not_held) __ASMNL__ \
743 b 2f __ASMNL__ \
744 1: __ASMNL__ \
745 cmpw r9,r10 __ASMNL__ \
746 beq++ 3f __ASMNL__ \
747 mr r5,r10 __ASMNL__ \
748 mr r6,r9 __ASMNL__ \
749 lis r3,hi16(wrong_thread) __ASMNL__ \
750 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
751 2: __ASMNL__ \
752 mr r4,r11 __ASMNL__ \
753 PROLOG(0) __ASMNL__ \
754 bl EXT(panic) __ASMNL__ \
755 BREAKPOINT_TRAP __ASMNL__ \
756 3:
757
758 .data
759 not_held:
760 STRINGD "mutex (0x%08X) not held\n\000"
761 wrong_thread:
762 STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
763 .text
764
765 #define CHECK_MYLOCK() \
766 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
767 bt 24+disLkMyLckb,1f __ASMNL__ \
768 mfsprg r10,1 __ASMNL__ \
769 lwz r9,MUTEX_DATA(r3) __ASMNL__ \
770 rlwinm r9,r9,0,0,29 __ASMNL__ \
771 cmpw r9,r10 __ASMNL__ \
772 bne++ 1f __ASMNL__ \
773 mr r4,r11 __ASMNL__ \
774 lis r3, hi16(mylock_attempt) __ASMNL__ \
775 ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
776 bl EXT(panic) __ASMNL__ \
777 BREAKPOINT_TRAP __ASMNL__ \
778 1:
779
780 .data
781 mylock_attempt:
782 STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
783 .text
784
785 #define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp) \
786 bf 24+enaLkExtStckb,3f __ASMNL__ \
787 addi lck_stack,lck,MUTEX_STACK __ASMNL__ \
788 li frame_cnt,MUTEX_FRAMES-1 __ASMNL__ \
789 1: __ASMNL__ \
790 mr tmp,stack __ASMNL__ \
791 lwz stack,0(stack) __ASMNL__ \
792 xor tmp,stack,tmp __ASMNL__ \
793 cmplwi tmp,8192 __ASMNL__ \
794 bge-- 2f __ASMNL__ \
795 lwz lr_save,FM_LR_SAVE(stack) __ASMNL__ \
796 stwu lr_save,4(lck_stack) __ASMNL__ \
797 subi frame_cnt,frame_cnt,1 __ASMNL__ \
798 cmpi cr0,frame_cnt,0 __ASMNL__ \
799 bne 1b __ASMNL__ \
800 b 3f __ASMNL__ \
801 2: __ASMNL__ \
802 li tmp,0 __ASMNL__ \
803 stwu tmp,4(lck_stack) __ASMNL__ \
804 subi frame_cnt,frame_cnt,1 __ASMNL__ \
805 cmpi cr0,frame_cnt,0 __ASMNL__ \
806 bne 2b __ASMNL__ \
807 3:
808
809 /*
810 * void mutex_init(mutex_t* l, etap_event_t etap)
811 *
812 */
813 .align 5
814 .globl EXT(mutex_init)
815 LEXT(mutex_init)
816
817 PROLOG(0)
818 li r10,0
819 stw r10,MUTEX_DATA(r3) ; clear lock word
820 sth r10,MUTEX_WAITERS(r3) ; init waiter count
821 sth r10,MUTEX_PROMOTED_PRI(r3)
822 #if MACH_LDEBUG
823 li r11,MUTEX_ATTR_DEBUG
824 stw r10,MUTEX_STACK(r3) ; init caller pc
825 stw r10,MUTEX_THREAD(r3) ; and owning thread
826 li r9, MUTEX_TAG
827 stw r9, MUTEX_TYPE(r3) ; set lock type
828 stw r11,MUTEX_ATTR(r3)
829 addi r8,r3,MUTEX_STACK-4
830 li r9,MUTEX_FRAMES
831 mlistck:
832 stwu r10,4(r8) ; init stack
833 subi r9,r9,1
834 cmpi cr0,r9,0
835 bne mlistck
836 #endif /* MACH_LDEBUG */
837 EPILOG
838 blr
839
840 /*
841 * void lck_mtx_lock_ext(lck_mtx_ext_t*)
842 *
843 */
844 .align 5
845 .globl EXT(lck_mtx_lock_ext)
846 LEXT(lck_mtx_lock_ext)
847 #if MACH_LDEBUG
848 .globl EXT(mutex_lock)
849 LEXT(mutex_lock)
850
851 .globl EXT(_mutex_lock)
852 LEXT(_mutex_lock)
853 #endif
854 mr r11,r3 ; Save lock addr
855 mlckeEnter:
856 lwz r0,MUTEX_ATTR(r3)
857 mtcrf 1,r0 ; Set cr7
858 CHECK_SETUP(r12)
859 CHECK_MUTEX_TYPE()
860
861 bf MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
862 PROLOG(0)
863 bl EXT(assert_wait_possible)
864 mr. r3,r3
865 bne L_mutex_lock_assert_wait_1
866 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
867 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
868 bl EXT(panic)
869 BREAKPOINT_TRAP ; We die here anyway
870
871 .data
872 L_mutex_lock_assert_wait_panic_str:
873 STRINGD "mutex lock attempt with assert_wait_possible false\n\000"
874 .text
875
876 L_mutex_lock_assert_wait_1:
877 lwz r3,FM_ARG0(r1)
878 lwz r11,FM_ARG0+0x04(r1)
879 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
880 mtcr r2
881 EPILOG
882 L_mutex_lock_assert_wait_2:
883
884 mfsprg r6,1 ; load the current thread
885 bf MUTEX_ATTR_STATb,mlckestatskip ; Branch if no stat
886 lwz r5,MUTEX_GRP(r3) ; Load lock group
887 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
888 mlckestatloop:
889 lwarx r8,r7,r5 ; Load stat util cnt
890 addi r8,r8,1 ; Increment stat util cnt
891 stwcx. r8,r7,r5 ; Store stat util cnt
892 bne-- mlckestatloop ; Retry if failed
893 mr. r8,r8 ; Test for zero
894 bne++ mlckestatskip ; Did stat util cnt wrapped?
895 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
896 addi r8,r8,1 ; Increment upper stat util cnt
897 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
898 mlckestatskip:
899 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
900 li r4,0
901 li r8,0
902 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
903 mfmsr r9 ; Get the MSR value
904 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
905 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
906 andc r9,r9,r0 ; Clear FP and VEC
907 andc r7,r9,r7 ; Clear EE as well
908 mtmsr r7 ; Turn off interruptions
909 isync ; May have turned off vec and fp here
910 mr. r5,r5 ; Quick check
911 bne-- mlckespin01 ; Can not get it right now...
912
913 mlcketry:
914 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
915 mr. r5,r5
916 bne-- mlckespin0 ; Can not get it right now...
917 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
918 bne-- mlcketry ; loop back if failed
919 .globl EXT(mlckePatch_isync)
920 LEXT(mlckePatch_isync)
921 isync ; stop prefeteching
922 mflr r12
923 bf MUTEX_ATTR_DEBUGb,mlckedebskip
924 mr r8,r6 ; Get the active thread
925 stw r12,MUTEX_STACK(r3) ; Save our caller
926 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
927 mr r5,r1
928 LCK_STACK(r3,r5,r6,r7,r8,r10)
929 mlckedebskip:
930 mtmsr r9 ; Say, any interrupts pending?
931 blr
932
933 mlckespin0:
934 li r5,lgKillResv ; Killing field
935 stwcx. r5,0,r5 ; Kill reservation
936 mlckespin01:
937 mflr r12
938 mtmsr r9 ; Say, any interrupts pending?
939 bl mlckspin1
940 mtmsr r7 ; Turn off interruptions, vec and fp off already
941 mtlr r12
942 b mlcketry
943
944 /*
945 * void lck_mtx_lock(lck_mtx_t*)
946 *
947 */
948 .align 5
949 .globl EXT(lck_mtx_lock)
950 LEXT(lck_mtx_lock)
951
952 #if !MACH_LDEBUG
953 .globl EXT(mutex_lock)
954 LEXT(mutex_lock)
955
956 .globl EXT(_mutex_lock)
957 LEXT(_mutex_lock)
958 #endif
959
960 mfsprg r6,1 ; load the current thread
961 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
962 mr r11,r3 ; Save lock addr
963 li r4,0
964 li r8,0
965 li r9,0
966 mr. r5,r5 ; Quick check
967 bne-- mlckspin00 ; Indirect or Can not get it right now...
968
969 mlcktry:
970 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
971 mr. r5,r5
972 bne-- mlckspin01 ; Can not get it right now...
973 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
974 bne-- mlcktry ; loop back if failed
975 .globl EXT(mlckPatch_isync)
976 LEXT(mlckPatch_isync)
977 isync ; stop prefeteching
978 blr
979
980 mlckspin00:
981 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
982 bne-- mlckspin02 ; No, go handle contention
983 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
984 b mlckeEnter
985 mlckspin01:
986 li r5,lgKillResv ; Killing field
987 stwcx. r5,0,r5 ; Kill reservation
988 mlckspin02:
989 mflr r12
990 li r0,0
991 mtcrf 1,r0 ; Set cr7 to zero
992 bl mlckspin1
993 mtlr r12
994 b mlcktry
995
996
997 mlckspin1:
998 mr. r4,r4 ; Test timeout value
999 bne++ mlckspin2
1000 lis r4,hi16(EXT(MutexSpin)) ; Get the high part
1001 ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
1002 lwz r4,0(r4) ; Get spin timerout value
1003 mr. r4,r4 ; Test spin timeout value
1004 bne++ mlckspin2 ; Is spin timeout requested
1005 crclr mlckmiss ; Clear miss test
1006 b mlckslow1 ; Don't try to spin
1007
1008 mlckspin2: mr. r8,r8 ; Is r8 set to zero
1009 bne++ mlckspin3 ; If yes, first spin attempt
1010 crclr mlckmiss ; Clear miss test
1011 mr. r9,r9 ; Is r9 set to zero
1012 bne++ mlckspin3 ; If yes, r9 set with msr value
1013 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1014 mfmsr r9 ; Get the MSR value
1015 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1016 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1017 andc r9,r9,r0 ; Clear FP and VEC
1018 andc r7,r9,r7 ; Clear EE as well
1019 mtmsr r7 ; Turn off interruptions
1020 isync ; May have turned off vec and fp here
1021 mftb r8 ; Get timestamp on entry
1022 b mlcksniff
1023
1024 mlckspin3: mtmsr r7 ; Turn off interruptions
1025 mftb r8 ; Get timestamp on entry
1026
1027 mlcksniff: lwz r5,MUTEX_DATA(r3) ; Get that lock in here
1028 mr. r5,r5 ; Is the lock held
1029 beq++ mlckretry ; No, try for it again...
1030 rlwinm. r10,r5,0,0,29 ; Extract the lock owner
1031 beq++ mlckslow0 ; InterLock is held
1032 bf MUTEX_ATTR_STATb,mlStatSkip ; Branch if no stat
1033 andi. r5,r5,ILK_LOCKED ; extract interlocked?
1034 bne mlStatSkip ; yes, skip
1035 bt mlckmiss,mlStatSkip ; miss already counted
1036 crset mlckmiss ; Remember miss recorded
1037 lwz r5,MUTEX_GRP(r3) ; Load lock group
1038 addi r5,r5,GRP_MTX_STAT_MISS+4 ; Add stat miss offset
1039 mlStatLoop:
1040 lwarx r6,0,r5 ; Load stat miss cnt
1041 addi r6,r6,1 ; Increment stat miss cnt
1042 stwcx. r6,0,r5 ; Update stat miss cnt
1043 bne-- mlStatLoop ; Retry if failed
1044 mfsprg r6,1 ; Reload current thread
1045 mlStatSkip:
1046 lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
1047 rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
1048 beq mlckslow0 ; Lock owner isn't running
1049 lis r2,hi16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1050 ori r2,r2,lo16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1051 lwz r10,THREAD_OPTIONS(r10) ; Get the thread options
1052 and. r10,r10,r2 ; Is DelayedIdle set?
1053 bne mlckslow0 ; Lock owner is in delay idle
1054
1055 mftb r10 ; Time stamp us now
1056 sub r10,r10,r8 ; Get the elapsed time
1057 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1058 blt++ mlcksniff ; Not yet...
1059
1060 mtmsr r9 ; Say, any interrupts pending?
1061
1062 ; The following instructions force the pipeline to be interlocked to that only one
1063 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
1064 ; time; if it's too short, pending interruptions will not have a chance to be taken
1065
1066 subi r4,r4,128 ; Back off elapsed time from timeout value
1067 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1068 mr. r4,r4 ; See if we used the whole timeout
1069 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1070
1071 ble-- mlckslow1 ; We failed
1072 b mlckspin3 ; Now that we've opened an enable window, keep trying...
1073 mlckretry:
1074 mtmsr r9 ; Restore interrupt state
1075 li r8,1 ; Show already through once
1076 blr
1077
1078 mlckslow0: ; We couldn't get the lock
1079 mtmsr r9 ; Restore interrupt state
1080
1081 mlckslow1:
1082 mtlr r12
1083
1084 PROLOG(0)
1085 .L_ml_retry:
1086 bl lockDisa ; Go get a lock on the mutex's interlock lock
1087 mr. r4,r3 ; Did we get it?
1088 lwz r3,FM_ARG0(r1) ; Restore the lock address
1089 bne++ mlGotInt ; We got it just fine...
1090 mr r4,r11 ; Saved lock addr
1091 lis r3,hi16(mutex_failed1) ; Get the failed mutex message
1092 ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
1093 bl EXT(panic) ; Call panic
1094 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1095
1096 .data
1097 mutex_failed1:
1098 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
1099 .text
1100
1101 mlGotInt:
1102
1103 ; Note that there is no reason to do a load and reserve here. We already
1104 ; hold the interlock lock and no one can touch this field unless they
1105 ; have that, so, we're free to play
1106
1107 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
1108 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1109 bne- mlInUse ; Nope, sombody's playing already...
1110
1111 bf++ MUTEX_ATTR_DEBUGb,mlDebSkip
1112 CHECK_SETUP(r5)
1113 mfsprg r9,1 ; Get the current activation
1114 lwz r5,0(r1) ; Get previous save frame
1115 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1116 mr r8,r9 ; Get the active thread
1117 stw r6,MUTEX_STACK(r3) ; Save our caller
1118 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1119 LCK_STACK(r3,r5,r6,r7,r8,r10)
1120 mlDebSkip:
1121 mr r3,r11 ; Get the based lock address
1122 bl EXT(lck_mtx_lock_acquire)
1123 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1124 mfsprg r5,1
1125 mtcr r2
1126 mr. r4,r3
1127 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1128 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1129 beq mlUnlock
1130 ori r5,r5,WAIT_FLAG
1131
1132 mlUnlock: eieio
1133 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1134
1135 EPILOG ; Restore all saved registers
1136 b epStart ; Go enable preemption...
1137
1138 ; We come to here when we have a resource conflict. In other words,
1139 ; the mutex is held.
1140
1141 mlInUse:
1142
1143 CHECK_SETUP(r12)
1144 CHECK_MYLOCK() ; Assert we don't own the lock already */
1145
1146 ; Note that we come in here with the interlock set. The wait routine
1147 ; will unlock it before waiting.
1148
1149 bf MUTEX_ATTR_STATb,mlStatSkip2 ; Branch if no stat
1150 lwz r5,MUTEX_GRP(r3) ; Load lck group
1151 bt mlckmiss,mlStatSkip1 ; Skip miss already counted
1152 crset mlckmiss ; Remember miss recorded
1153 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1154 mlStatLoop1:
1155 lwarx r8,r9,r5 ; Load stat miss cnt
1156 addi r8,r8,1 ; Increment stat miss cnt
1157 stwcx. r8,r9,r5 ; Store stat miss cnt
1158 bne-- mlStatLoop1 ; Retry if failed
1159 mlStatSkip1:
1160 lwz r9,GRP_MTX_STAT_WAIT+4(r5) ; Load wait cnt
1161 addi r9,r9,1 ; Increment wait cnt
1162 stw r9,GRP_MTX_STAT_WAIT+4(r5) ; Update miss cnt
1163 mlStatSkip2:
1164 ori r4,r4,WAIT_FLAG ; Set the wait flag
1165 stw r4,MUTEX_DATA(r3)
1166 rlwinm r4,r4,0,0,29 ; Extract the lock owner
1167 mfcr r2
1168 stw r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1169 mr r3,r11 ; Get the based lock address
1170 bl EXT(lck_mtx_lock_wait) ; Wait for our turn at the lock
1171
1172 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1173 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1174 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1175 mtcr r2
1176 b .L_ml_retry ; and try again...
1177
1178
1179 /*
1180 * void lck_mtx_try_lock(_extlck_mtx_ext_t*)
1181 *
1182 */
1183 .align 5
1184 .globl EXT(lck_mtx_try_lock_ext)
1185 LEXT(lck_mtx_try_lock_ext)
1186 #if MACH_LDEBUG
1187 .globl EXT(mutex_try)
1188 LEXT(mutex_try)
1189 .globl EXT(_mutex_try)
1190 LEXT(_mutex_try)
1191 #endif
1192 mr r11,r3 ; Save lock addr
1193 mlteEnter:
1194 lwz r0,MUTEX_ATTR(r3)
1195 mtcrf 1,r0 ; Set cr7
1196 CHECK_SETUP(r12)
1197 CHECK_MUTEX_TYPE()
1198
1199 bf MUTEX_ATTR_STATb,mlteStatSkip ; Branch if no stat
1200 lwz r5,MUTEX_GRP(r3) ; Load lock group
1201 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
1202 mlteStatLoop:
1203 lwarx r8,r7,r5 ; Load stat util cnt
1204 addi r8,r8,1 ; Increment stat util cnt
1205 stwcx. r8,r7,r5 ; Store stat util cnt
1206 bne-- mlteStatLoop ; Retry if failed
1207 mr. r8,r8 ; Test for zero
1208 bne++ mlteStatSkip ; Did stat util cnt wrapped?
1209 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
1210 addi r8,r8,1 ; Increment upper stat util cnt
1211 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
1212 mlteStatSkip:
1213 mfsprg r6,1 ; load the current thread
1214 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1215 mr. r5,r5 ; Quick check
1216 bne-- L_mutex_try_slow ; Can not get it now...
1217 mfmsr r9 ; Get the MSR value
1218 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1219 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1220 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1221 andc r9,r9,r0 ; Clear FP and VEC
1222 andc r7,r9,r7 ; Clear EE as well
1223 mtmsr r7 ; Turn off interruptions
1224 isync ; May have turned off vec and fp here
1225
1226 mlteLoopTry:
1227 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1228 mr. r5,r5
1229 bne-- mlteSlowX ; branch to the slow path
1230 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1231 bne-- mlteLoopTry ; retry if failed
1232 .globl EXT(mltelckPatch_isync)
1233 LEXT(mltelckPatch_isync)
1234 isync ; stop prefetching
1235 mflr r12
1236 bf MUTEX_ATTR_DEBUGb,mlteDebSkip
1237 mr r8,r6 ; Get the active thread
1238 stw r12,MUTEX_STACK(r3) ; Save our caller
1239 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1240 mr r5,r1
1241 LCK_STACK(r3,r5,r6,r7,r8,r10)
1242 mlteDebSkip:
1243 li r3, 1
1244 mtmsr r9 ; Say, any interrupts pending?
1245 blr
1246 mlteSlowX:
1247 li r5,lgKillResv ; Killing field
1248 stwcx. r5,0,r5 ; Kill reservation
1249 mtmsr r9 ; Say, any interrupts pending?
1250 b L_mutex_try_slow
1251
1252
1253 /*
1254 * void lck_mtx_try_lock(lck_mtx_t*)
1255 *
1256 */
1257 .align 5
1258 .globl EXT(lck_mtx_try_lock)
1259 LEXT(lck_mtx_try_lock)
1260 #if !MACH_LDEBUG
1261 .globl EXT(mutex_try)
1262 LEXT(mutex_try)
1263 .globl EXT(_mutex_try)
1264 LEXT(_mutex_try)
1265 #endif
1266
1267 mfsprg r6,1 ; load the current thread
1268 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1269 mr r11,r3 ; Save lock addr
1270 mr. r5,r5 ; Quick check
1271 bne-- mltSlow00 ; Indirect or Can not get it now...
1272
1273 mltLoopTry:
1274 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1275 mr. r5,r5
1276 bne-- mltSlow01 ; branch to the slow path
1277 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1278 bne-- mltLoopTry ; retry if failed
1279 .globl EXT(mltlckPatch_isync)
1280 LEXT(mltlckPatch_isync)
1281 isync ; stop prefetching
1282 li r3, 1
1283 blr
1284
1285 mltSlow00:
1286 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1287 bne-- mltSlow02 ; No, go handle contention
1288 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1289 b mlteEnter
1290 mltSlow01:
1291 li r5,lgKillResv ; Killing field
1292 stwcx. r5,0,r5 ; Kill reservation
1293
1294 mltSlow02:
1295 li r0,0
1296 mtcrf 1,r0 ; Set cr7 to zero
1297
1298 L_mutex_try_slow:
1299 PROLOG(0)
1300
1301 lwz r6,MUTEX_DATA(r3) ; Quick check
1302 rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already
1303 bne- mtFail ; Someone's got it already...
1304
1305 bl lockDisa ; Go get a lock on the mutex's interlock lock
1306 mr. r4,r3 ; Did we get it?
1307 lwz r3,FM_ARG0(r1) ; Restore the lock address
1308 bne++ mtGotInt ; We got it just fine...
1309 mr r4,r11 ; Saved lock addr
1310 lis r3,hi16(mutex_failed2) ; Get the failed mutex message
1311 ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
1312 bl EXT(panic) ; Call panic
1313 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1314
1315 .data
1316 mutex_failed2:
1317 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
1318 .text
1319
1320 mtGotInt:
1321
1322 ; Note that there is no reason to do a load and reserve here. We already
1323 ; hold the interlock and no one can touch at this field unless they
1324 ; have that, so, we're free to play
1325
1326 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
1327 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1328 bne- mtInUse ; Nope, sombody's playing already...
1329
1330 bf++ MUTEX_ATTR_DEBUGb,mtDebSkip
1331 CHECK_SETUP(r5)
1332 mfsprg r9,1 ; Get the current activation
1333 lwz r5,0(r1) ; Get previous save frame
1334 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1335 mr r8,r9 ; Get the active thread
1336 stw r6,MUTEX_STACK(r3) ; Save our caller
1337 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1338 LCK_STACK(r3,r5,r6,r7,r8,r10)
1339 mtDebSkip:
1340 mr r3,r11 ; Get the based lock address
1341 bl EXT(lck_mtx_lock_acquire)
1342 mfsprg r5,1
1343 mr. r4,r3
1344 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1345 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1346 beq mtUnlock
1347 ori r5,r5,WAIT_FLAG
1348
1349 mtUnlock: eieio
1350 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1351
1352 bl epStart ; Go enable preemption...
1353
1354 li r3, 1
1355 EPILOG ; Restore all saved registers
1356 blr ; Return...
1357
1358 ; We come to here when we have a resource conflict. In other words,
1359 ; the mutex is held.
1360
1361 mtInUse:
1362 bf++ MUTEX_ATTR_STATb,mtStatSkip ; Branch if no stat
1363 lwz r5,MUTEX_GRP(r3) ; Load lock group
1364 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1365 mtStatLoop:
1366 lwarx r8,r9,r5 ; Load stat miss cnt
1367 addi r8,r8,1 ; Increment stat miss cnt
1368 stwcx. r8,r9,r5 ; Store stat miss cnt
1369 bne-- mtStatLoop ; Retry if failed
1370 mtStatSkip:
1371 rlwinm r4,r4,0,0,30 ; Get the unlock value
1372 stw r4,MUTEX_DATA(r3) ; free the interlock
1373 bl epStart ; Go enable preemption...
1374
1375 mtFail: li r3,0 ; Set failure code
1376 EPILOG ; Restore all saved registers
1377 blr ; Return...
1378
1379
1380 /*
1381 * void mutex_unlock(mutex_t* l)
1382 *
1383 */
1384 .align 5
1385 .globl EXT(mutex_unlock)
1386 LEXT(mutex_unlock)
1387
1388 sync
1389 mr r11,r3 ; Save lock addr
1390 #if MACH_LDEBUG
1391 b mlueEnter1
1392 #else
1393 b mluEnter1
1394 #endif
1395
1396 /*
1397 * void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
1398 *
1399 */
1400 .align 5
1401 .globl EXT(lck_mtx_ext_unlock)
1402 LEXT(lck_mtx_ext_unlock)
1403 #if MACH_LDEBUG
1404 .globl EXT(mutex_unlock_rwcmb)
1405 LEXT(mutex_unlock_rwcmb)
1406 #endif
1407 mlueEnter:
1408 .globl EXT(mulckePatch_isync)
1409 LEXT(mulckePatch_isync)
1410 isync
1411 .globl EXT(mulckePatch_eieio)
1412 LEXT(mulckePatch_eieio)
1413 eieio
1414 mr r11,r3 ; Save lock addr
1415 mlueEnter1:
1416 lwz r0,MUTEX_ATTR(r3)
1417 mtcrf 1,r0 ; Set cr7
1418 CHECK_SETUP(r12)
1419 CHECK_MUTEX_TYPE()
1420 CHECK_THREAD(MUTEX_THREAD)
1421
1422 lwz r5,MUTEX_DATA(r3) ; Get the lock
1423 rlwinm. r4,r5,0,30,31 ; Quick check
1424 bne-- L_mutex_unlock_slow ; Can not get it now...
1425 mfmsr r9 ; Get the MSR value
1426 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1427 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1428 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1429 andc r9,r9,r0 ; Clear FP and VEC
1430 andc r7,r9,r7 ; Clear EE as well
1431 mtmsr r7 ; Turn off interruptions
1432 isync ; May have turned off vec and fp here
1433
1434 mlueLoop:
1435 lwarx r5,MUTEX_DATA,r3
1436 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1437 li r5,0 ; Clear the mutexlock
1438 bne-- mlueSlowX
1439 stwcx. r5,MUTEX_DATA,r3
1440 bne-- mlueLoop
1441 mtmsr r9 ; Say, any interrupts pending?
1442 blr
1443
1444 mlueSlowX:
1445 li r5,lgKillResv ; Killing field
1446 stwcx. r5,0,r5 ; Dump reservation
1447 mtmsr r9 ; Say, any interrupts pending?
1448 b L_mutex_unlock_slow ; Join slow path...
1449
1450 /*
1451 * void lck_mtx_unlock(lck_mtx_t* l)
1452 *
1453 */
1454 .align 5
1455 .globl EXT(lck_mtx_unlock)
1456 LEXT(lck_mtx_unlock)
1457 #if !MACH_LDEBUG
1458 .globl EXT(mutex_unlock_rwcmb)
1459 LEXT(mutex_unlock_rwcmb)
1460 #endif
1461 mluEnter:
1462 .globl EXT(mulckPatch_isync)
1463 LEXT(mulckPatch_isync)
1464 isync
1465 .globl EXT(mulckPatch_eieio)
1466 LEXT(mulckPatch_eieio)
1467 eieio
1468 mr r11,r3 ; Save lock addr
1469 mluEnter1:
1470 lwz r5,MUTEX_DATA(r3) ; Get the lock
1471 rlwinm. r4,r5,0,30,31 ; Quick check
1472 bne-- mluSlow0 ; Indirect or Can not get it now...
1473
1474 mluLoop:
1475 lwarx r5,MUTEX_DATA,r3
1476 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1477 li r5,0 ; Clear the mutexlock
1478 bne-- mluSlowX
1479 stwcx. r5,MUTEX_DATA,r3
1480 bne-- mluLoop
1481 blr
1482
1483 mluSlow0:
1484 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1485 bne-- L_mutex_unlock_slow ; No, go handle contention
1486 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1487 b mlueEnter1
1488 mluSlowX:
1489 li r5,lgKillResv ; Killing field
1490 stwcx. r5,0,r5 ; Dump reservation
1491
1492 L_mutex_unlock_slow:
1493
1494 PROLOG(0)
1495
1496 bl lockDisa ; Go get a lock on the mutex's interlock lock
1497 mr. r4,r3 ; Did we get it?
1498 lwz r3,FM_ARG0(r1) ; Restore the lock address
1499 bne++ muGotInt ; We got it just fine...
1500 mr r4,r11 ; Saved lock addr
1501 lis r3,hi16(mutex_failed3) ; Get the failed mutex message
1502 ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
1503 bl EXT(panic) ; Call panic
1504 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1505
1506 .data
1507 mutex_failed3:
1508 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
1509 .text
1510
1511
1512 muGotInt:
1513 lwz r4,MUTEX_DATA(r3)
1514 andi. r5,r4,WAIT_FLAG ; are there any waiters ?
1515 rlwinm r4,r4,0,0,29
1516 beq+ muUnlock ; Nope, we're done...
1517
1518 mr r3,r11 ; Get the based lock address
1519 bl EXT(lck_mtx_unlock_wakeup) ; yes, wake a thread
1520 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1521 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1522 lwz r5,MUTEX_DATA(r3) ; load the lock
1523
1524 muUnlock:
1525 andi. r5,r5,WAIT_FLAG ; Get the unlock value
1526 eieio
1527 stw r5,MUTEX_DATA(r3) ; unlock the interlock and lock
1528
1529 EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
1530 b epStart ; Go enable preemption...
1531
1532 /*
1533 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1534 *
1535 */
1536 .align 5
1537 .globl EXT(lck_mtx_assert)
1538 LEXT(lck_mtx_assert)
1539 .globl EXT(_mutex_assert)
1540 LEXT(_mutex_assert)
1541 mr r11,r3
1542 maEnter:
1543 lwz r5,MUTEX_DATA(r3)
1544 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1545 bne-- maCheck ; No, go check the assertion
1546 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1547 b maEnter
1548 maCheck:
1549 mfsprg r6,1 ; load the current thread
1550 rlwinm r5,r5,0,0,29 ; Extract the lock owner
1551 cmpwi r4,MUTEX_ASSERT_OWNED
1552 cmplw cr1,r6,r5 ; Is the lock held by current act
1553 crandc cr0_eq,cr0_eq,cr1_eq ; Check owned assertion
1554 bne-- maNext
1555 mr r4,r11
1556 lis r3,hi16(mutex_assert1) ; Get the failed mutex message
1557 ori r3,r3,lo16(mutex_assert1) ; Get the failed mutex message
1558 b maPanic ; Panic path
1559 maNext:
1560 cmpwi r4,MUTEX_ASSERT_NOTOWNED ; Check not owned assertion
1561 crand cr0_eq,cr0_eq,cr1_eq ;
1562 bnelr++
1563 maPanic:
1564 PROLOG(0)
1565 mr r4,r11
1566 lis r3,hi16(mutex_assert2) ; Get the failed mutex message
1567 ori r3,r3,lo16(mutex_assert2) ; Get the failed mutex message
1568 bl EXT(panic) ; Call panic
1569 BREAKPOINT_TRAP ; We die here anyway
1570
1571 .data
1572 mutex_assert1:
1573 STRINGD "mutex (0x%08X) not owned\n\000"
1574 mutex_assert2:
1575 STRINGD "mutex (0x%08X) owned\n\000"
1576 .text
1577
1578
1579 /*
1580 * void lck_mtx_ilk_unlock(lck_mtx *lock)
1581 */
1582 .globl EXT(lck_mtx_ilk_unlock)
1583 LEXT(lck_mtx_ilk_unlock)
1584
1585 lwz r10,MUTEX_DATA(r3)
1586 rlwinm r10,r10,0,0,30
1587 eieio
1588 stw r10,MUTEX_DATA(r3)
1589
1590 b epStart ; Go enable preemption...
1591
1592 /*
1593 * void _enable_preemption_no_check(void)
1594 *
1595 * This version does not check if we get preempted or not
1596 */
1597 .align 4
1598 .globl EXT(_enable_preemption_no_check)
1599
1600 LEXT(_enable_preemption_no_check)
1601
1602 cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted
1603 b epCommn ; Join up with the other enable code...
1604
1605 /*
1606 * void _enable_preemption(void)
1607 *
1608 * This version checks if we get preempted or not
1609 */
1610 .align 5
1611 .globl EXT(_enable_preemption)
1612
1613 LEXT(_enable_preemption)
1614
1615 ; Here is where we enable preemption.
1616
1617 epStart:
1618 cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted
1619
1620 epCommn:
1621 mfsprg r3,1 ; Get current activation
1622 li r8,-1 ; Get a decrementer
1623 lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level
1624 add. r5,r5,r8 ; Bring down the disable count
1625 blt- epTooFar ; Yeah, we did...
1626 stw r5,ACT_PREEMPT_CNT(r3) ; Save it back
1627 crandc cr0_eq,cr0_eq,cr1_eq
1628 beq+ epCheckPreempt ; Go check if we need to be preempted...
1629 blr ; Leave...
1630 epTooFar:
1631 mr r4,r5
1632 lis r3,hi16(epTooFarStr) ; First half of panic string
1633 ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
1634 PROLOG(0)
1635 bl EXT(panic)
1636 BREAKPOINT_TRAP ; We die here anyway
1637
1638 .data
1639 epTooFarStr:
1640 STRINGD "enable_preemption: preemption_level %d\n\000"
1641
1642 .text
1643 .align 5
1644 epCheckPreempt:
1645 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1646 mfmsr r9 ; Get the MSR value
1647 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1648 andi. r4,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1649 beq+ epCPno ; No preemption here...
1650 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1651 andc r9,r9,r0 ; Clear FP and VEC
1652 andc r7,r9,r7 ; Clear EE as well
1653 mtmsr r7 ; Turn off interruptions
1654 isync ; May have turned off vec and fp here
1655 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1656 lwz r7,PP_PENDING_AST(r3) ; Get pending AST mask
1657 li r5,AST_URGENT ; Get the requests we do honor
1658 lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call
1659 and. r7,r7,r5 ; Should we preempt?
1660 ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part
1661 mtmsr r9 ; Allow interrupts if we can
1662 epCPno:
1663 beqlr+ ; We probably will not preempt...
1664 sc ; Do the preemption
1665 blr ; Now, go away now...
1666
1667 /*
1668 * void disable_preemption(void)
1669 *
1670 * Here is where we disable preemption.
1671 */
1672 .align 5
1673 .globl EXT(_disable_preemption)
1674
1675 LEXT(_disable_preemption)
1676
1677 mfsprg r6,1 ; Get the current activation
1678 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1679 addi r5,r5,1 ; Bring up the disable count
1680 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1681 blr ; Return...
1682
1683 /*
1684 * int get_preemption_level(void)
1685 *
1686 * Return the current preemption level
1687 */
1688 .align 5
1689 .globl EXT(get_preemption_level)
1690
1691 LEXT(get_preemption_level)
1692
1693 mfsprg r6,1 ; Get current activation
1694 lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1695 blr ; Return...
1696
1697 /*
1698 * void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
1699 *
1700 * Initialize a simple lock.
1701 */
1702 .align 5
1703 .globl EXT(ppc_usimple_lock_init)
1704
1705 LEXT(ppc_usimple_lock_init)
1706
1707 li r0, 0 ; set lock to free == 0
1708 stw r0, 0(r3) ; Initialize the lock
1709 blr
1710
1711 /*
1712 * void lck_spin_lock(lck_spin_t *)
1713 * void ppc_usimple_lock(simple_lock_t *)
1714 *
1715 */
1716 .align 5
1717 .globl EXT(lck_spin_lock)
1718 LEXT(lck_spin_lock)
1719 .globl EXT(ppc_usimple_lock)
1720 LEXT(ppc_usimple_lock)
1721
1722 mfsprg r6,1 ; Get the current activation
1723 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1724 addi r5,r5,1 ; Bring up the disable count
1725 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1726 mr r5,r3 ; Get the address of the lock
1727 li r8,0 ; Set r8 to zero
1728 li r4,0 ; Set r4 to zero
1729
1730 slcktry: lwarx r11,SLOCK_ILK,r5 ; Grab the lock value
1731 andi. r3,r11,ILK_LOCKED ; Is it locked?
1732 ori r11,r6,ILK_LOCKED ; Set interlock
1733 bne-- slckspin ; Yeah, wait for it to clear...
1734 stwcx. r11,SLOCK_ILK,r5 ; Try to seize that there durn lock
1735 bne-- slcktry ; Couldn't get it...
1736 .globl EXT(slckPatch_isync)
1737 LEXT(slckPatch_isync)
1738 isync ; Make sure we don't use a speculativily loaded value
1739 blr ; Go on home...
1740
1741 slckspin: li r11,lgKillResv ; Killing field
1742 stwcx. r11,0,r11 ; Kill reservation
1743
1744 mr. r4,r4 ; Test timeout value
1745 bne++ slockspin0
1746 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
1747 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
1748 lwz r4,0(r4) ; Get the timerout value
1749
1750 slockspin0: mr. r8,r8 ; Is r8 set to zero
1751 bne++ slockspin1 ; If yes, first spin attempt
1752 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1753 mfmsr r9 ; Get the MSR value
1754 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1755 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1756 andc r9,r9,r0 ; Clear FP and VEC
1757 andc r7,r9,r7 ; Clear EE as well
1758 mtmsr r7 ; Turn off interruptions
1759 isync ; May have turned off vec and fp here
1760 mftb r8 ; Get timestamp on entry
1761 b slcksniff
1762
1763 slockspin1: mtmsr r7 ; Turn off interruptions
1764 mftb r8 ; Get timestamp on entry
1765
1766 slcksniff: lwz r3,SLOCK_ILK(r5) ; Get that lock in here
1767 andi. r3,r3,ILK_LOCKED ; Is it free yet?
1768 beq++ slckretry ; Yeah, try for it again...
1769
1770 mftb r10 ; Time stamp us now
1771 sub r10,r10,r8 ; Get the elapsed time
1772 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1773 blt++ slcksniff ; Not yet...
1774
1775 mtmsr r9 ; Say, any interrupts pending?
1776
1777 ; The following instructions force the pipeline to be interlocked to that only one
1778 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
1779 ; time; if it's too short, pending interruptions will not have a chance to be taken
1780
1781 subi r4,r4,128 ; Back off elapsed time from timeout value
1782 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1783 mr. r4,r4 ; See if we used the whole timeout
1784 li r3,0 ; Assume a timeout return code
1785 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1786
1787 ble-- slckfail ; We failed
1788 b slockspin1 ; Now that we've opened an enable window, keep trying...
1789 slckretry:
1790 mtmsr r9 ; Restore interrupt state
1791 li r8,1 ; Show already through once
1792 b slcktry
1793 slckfail: ; We couldn't get the lock
1794 lis r3,hi16(slckpanic_str)
1795 ori r3,r3,lo16(slckpanic_str)
1796 mr r4,r5
1797 mflr r5
1798 PROLOG(0)
1799 bl EXT(panic)
1800 BREAKPOINT_TRAP ; We die here anyway
1801
1802 .data
1803 slckpanic_str:
1804 STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
1805 .text
1806
1807 /*
1808 * boolean_t lck_spin_try_lock(lck_spin_t *)
1809 * unsigned int ppc_usimple_lock_try(simple_lock_t *)
1810 *
1811 */
1812 .align 5
1813 .globl EXT(lck_spin_try_lock)
1814 LEXT(lck_spin_try_lock)
1815 .globl EXT(ppc_usimple_lock_try)
1816 LEXT(ppc_usimple_lock_try)
1817
1818 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1819 mfmsr r9 ; Get the MSR value
1820 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1821 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1822 andc r9,r9,r0 ; Clear FP and VEC
1823 andc r7,r9,r7 ; Clear EE as well
1824 mtmsr r7 ; Disable interruptions and thus, preemption
1825 mfsprg r6,1 ; Get current activation
1826
1827 lwz r11,SLOCK_ILK(r3) ; Get the lock
1828 andi. r5,r11,ILK_LOCKED ; Check it...
1829 bne-- slcktryfail ; Quickly fail...
1830
1831 slcktryloop:
1832 lwarx r11,SLOCK_ILK,r3 ; Ld from addr of arg and reserve
1833
1834 andi. r5,r11,ILK_LOCKED ; TEST...
1835 ori r5,r6,ILK_LOCKED
1836 bne-- slcktryfailX ; branch if taken. Predict free
1837
1838 stwcx. r5,SLOCK_ILK,r3 ; And SET (if still reserved)
1839 bne-- slcktryloop ; If set failed, loop back
1840
1841 .globl EXT(stlckPatch_isync)
1842 LEXT(stlckPatch_isync)
1843 isync
1844
1845 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1846 addi r5,r5,1 ; Bring up the disable count
1847 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1848
1849 mtmsr r9 ; Allow interruptions now
1850 li r3,1 ; Set that the lock was free
1851 blr
1852
1853 slcktryfailX:
1854 li r5,lgKillResv ; Killing field
1855 stwcx. r5,0,r5 ; Kill reservation
1856
1857 slcktryfail:
1858 mtmsr r9 ; Allow interruptions now
1859 li r3,0 ; FAILURE - lock was taken
1860 blr
1861
1862
1863 /*
1864 * void lck_spin_unlock(lck_spin_t *)
1865 * void ppc_usimple_unlock_rwcmb(simple_lock_t *)
1866 *
1867 */
1868 .align 5
1869 .globl EXT(lck_spin_unlock)
1870 LEXT(lck_spin_unlock)
1871 .globl EXT(ppc_usimple_unlock_rwcmb)
1872 LEXT(ppc_usimple_unlock_rwcmb)
1873
1874 li r0,0
1875 .globl EXT(sulckPatch_isync)
1876 LEXT(sulckPatch_isync)
1877 isync
1878 .globl EXT(sulckPatch_eieio)
1879 LEXT(sulckPatch_eieio)
1880 eieio
1881 stw r0, SLOCK_ILK(r3)
1882
1883 b epStart ; Go enable preemption...
1884
1885 /*
1886 * void ppc_usimple_unlock_rwmb(simple_lock_t *)
1887 *
1888 */
1889 .align 5
1890 .globl EXT(ppc_usimple_unlock_rwmb)
1891
1892 LEXT(ppc_usimple_unlock_rwmb)
1893
1894 li r0,0
1895 sync
1896 stw r0, SLOCK_ILK(r3)
1897
1898 b epStart ; Go enable preemption...
1899
1900 /*
1901 * void enter_funnel_section(funnel_t *)
1902 *
1903 */
1904 .align 5
1905 .globl EXT(enter_funnel_section)
1906
1907 LEXT(enter_funnel_section)
1908
1909 #if !MACH_LDEBUG
1910 lis r10,hi16(EXT(kdebug_enable))
1911 ori r10,r10,lo16(EXT(kdebug_enable))
1912 lwz r10,0(r10)
1913 lis r11,hi16(EXT(split_funnel_off))
1914 ori r11,r11,lo16(EXT(split_funnel_off))
1915 lwz r11,0(r11)
1916 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1917 bne- L_enter_funnel_section_slow ; If set, call the slow path
1918 mfsprg r6,1 ; Get the current activation
1919 lwz r7,LOCK_FNL_MUTEX(r3)
1920
1921 lwz r5,0(r7) ; Get lock quickly
1922 mr. r5,r5 ; Locked?
1923 bne-- L_enter_funnel_section_slow ; Yup...
1924
1925 L_enter_funnel_section_loop:
1926 lwarx r5,0,r7 ; Load the mutex lock
1927 mr. r5,r5
1928 bne-- L_enter_funnel_section_slowX ; Go to the slow path
1929 stwcx. r6,0,r7 ; Grab the lock
1930 bne-- L_enter_funnel_section_loop ; Loop back if failed
1931 .globl EXT(entfsectPatch_isync)
1932 LEXT(entfsectPatch_isync)
1933 isync ; Stop prefeteching
1934 li r7,TH_FN_OWNED
1935 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1936 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1937 blr
1938
1939 L_enter_funnel_section_slowX:
1940 li r4,lgKillResv ; Killing field
1941 stwcx. r4,0,r4 ; Kill reservation
1942
1943 L_enter_funnel_section_slow:
1944 #endif
1945 li r4,TRUE
1946 b EXT(thread_funnel_set)
1947
1948 /*
1949 * void exit_funnel_section(void)
1950 *
1951 */
1952 .align 5
1953 .globl EXT(exit_funnel_section)
1954
1955 LEXT(exit_funnel_section)
1956
1957 mfsprg r6,1 ; Get the current activation
1958 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1959 mr. r3,r3 ; Check on funnel held
1960 beq- L_exit_funnel_section_ret ;
1961 #if !MACH_LDEBUG
1962 lis r10,hi16(EXT(kdebug_enable))
1963 ori r10,r10,lo16(EXT(kdebug_enable))
1964 lwz r10,0(r10)
1965 mr. r10,r10
1966 bne- L_exit_funnel_section_slow ; If set, call the slow path
1967 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1968 .globl EXT(retfsectPatch_isync)
1969 LEXT(retfsectPatch_isync)
1970 isync
1971 .globl EXT(retfsectPatch_eieio)
1972 LEXT(retfsectPatch_eieio)
1973 eieio
1974
1975 lwz r5,0(r7) ; Get lock
1976 rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set
1977 bne-- L_exit_funnel_section_slow ; No can get...
1978
1979 L_exit_funnel_section_loop:
1980 lwarx r5,0,r7
1981 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1982 li r5,0 ; Clear the mutexlock
1983 bne-- L_exit_funnel_section_slowX
1984 stwcx. r5,0,r7 ; Release the funnel mutexlock
1985 bne-- L_exit_funnel_section_loop
1986 li r7,0
1987 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1988 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1989 blr ; Return
1990
1991 L_exit_funnel_section_slowX:
1992 li r4,lgKillResv ; Killing field
1993 stwcx. r4,0,r4 ; Kill it
1994
1995 L_exit_funnel_section_slow:
1996 #endif
1997 li r4,FALSE
1998 b EXT(thread_funnel_set)
1999 L_exit_funnel_section_ret:
2000 blr
2001
2002 /*
2003 * void lck_rw_lock_exclusive(lck_rw_t*)
2004 *
2005 */
2006 .align 5
2007 .globl EXT(lck_rw_lock_exclusive)
2008 LEXT(lck_rw_lock_exclusive)
2009 #if !MACH_LDEBUG
2010 .globl EXT(lock_write)
2011 LEXT(lock_write)
2012 #endif
2013 rwleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2014 rlwinm. r7,r5,30,1,31 ; Can we have it?
2015 ori r6,r5,WANT_EXCL ; Mark Exclusive
2016 bne-- rwlespin ; Branch if cannot be held
2017 stwcx. r6,RW_DATA,r3 ; Update lock word
2018 bne-- rwleloop
2019 .globl EXT(rwlePatch_isync)
2020 LEXT(rwlePatch_isync)
2021 isync
2022 blr
2023 rwlespin:
2024 li r4,lgKillResv ; Killing field
2025 stwcx. r4,0,r4 ; Kill it
2026 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2027 bne-- rwlespin1 ; No, go handle contention
2028 mr r4,r3 ; pass lock pointer
2029 lwz r3,RW_PTR(r3) ; load lock ext pointer
2030 b EXT(lck_rw_lock_exclusive_ext)
2031 rwlespin1:
2032 b EXT(lck_rw_lock_exclusive_gen)
2033
2034 /*
2035 * void lck_rw_lock_shared(lck_rw_t*)
2036 *
2037 */
2038 .align 5
2039 .globl EXT(lck_rw_lock_shared)
2040 LEXT(lck_rw_lock_shared)
2041 #if !MACH_LDEBUG
2042 .globl EXT(lock_read)
2043 LEXT(lock_read)
2044 #endif
2045 rwlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2046 andi. r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
2047 addis r6,r5,1 ; Increment read cnt
2048 bne-- rwlsspin ; Branch if cannot be held
2049 stwcx. r6,RW_DATA,r3 ; Update lock word
2050 bne-- rwlsloop
2051 .globl EXT(rwlsPatch_isync)
2052 LEXT(rwlsPatch_isync)
2053 isync
2054 blr
2055 rwlsspin:
2056 li r4,lgKillResv ; Killing field
2057 stwcx. r4,0,r4 ; Kill it
2058 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2059 bne-- rwlsspin1 ; No, go handle contention
2060 mr r4,r3 ; pass lock pointer
2061 lwz r3,RW_PTR(r3) ; load lock ext pointer
2062 b EXT(lck_rw_lock_shared_ext)
2063 rwlsspin1:
2064 b EXT(lck_rw_lock_shared_gen)
2065
2066 /*
2067 * boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
2068 *
2069 */
2070 .align 5
2071 .globl EXT(lck_rw_lock_shared_to_exclusive)
2072 LEXT(lck_rw_lock_shared_to_exclusive)
2073 #if !MACH_LDEBUG
2074 .globl EXT(lock_read_to_write)
2075 LEXT(lock_read_to_write)
2076 #endif
2077 rwlseloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2078 addis r6,r5,0xFFFF ; Decrement read cnt
2079 lis r8,0xFFFF ; Get read count mask
2080 ori r8,r8,WANT_UPGRADE|ILK_LOCKED ; Include Interlock and upgrade flags
2081 and. r7,r6,r8 ; Can we have it?
2082 ori r9,r6,WANT_UPGRADE ; Mark Exclusive
2083 bne-- rwlsespin ; Branch if cannot be held
2084 stwcx. r9,RW_DATA,r3 ; Update lock word
2085 bne-- rwlseloop
2086 .globl EXT(rwlsePatch_isync)
2087 LEXT(rwlsePatch_isync)
2088 isync
2089 li r3,0 ; Succeed, return FALSE...
2090 blr
2091 rwlsespin:
2092 li r4,lgKillResv ; Killing field
2093 stwcx. r4,0,r4 ; Kill it
2094 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2095 bne-- rwlsespin1 ; No, go handle contention
2096 mr r4,r3 ; pass lock pointer
2097 lwz r3,RW_PTR(r3) ; load lock ext pointer
2098 b EXT(lck_rw_lock_shared_to_exclusive_ext)
2099 rwlsespin1:
2100 b EXT(lck_rw_lock_shared_to_exclusive_gen)
2101
2102
2103
2104 /*
2105 * void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
2106 *
2107 */
2108 .align 5
2109 .globl EXT(lck_rw_lock_exclusive_to_shared)
2110 LEXT(lck_rw_lock_exclusive_to_shared)
2111 #if !MACH_LDEBUG
2112 .globl EXT(lock_write_to_read)
2113 LEXT(lock_write_to_read)
2114 #endif
2115 .globl EXT(rwlesPatch_isync)
2116 LEXT(rwlesPatch_isync)
2117 isync
2118 .globl EXT(rwlesPatch_eieio)
2119 LEXT(rwlesPatch_eieio)
2120 eieio
2121 rwlesloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2122 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2123 bne-- rwlesspin ; Branch if interlocked
2124 lis r6,1 ; Get 1 for read count
2125 andi. r10,r5,WANT_UPGRADE ; Is it held with upgrade
2126 li r9,WANT_UPGRADE|WAIT_FLAG ; Get upgrade and wait flags mask
2127 bne rwlesexcl1 ; Skip if held with upgrade
2128 li r9,WANT_EXCL|WAIT_FLAG ; Get exclusive and wait flags mask
2129 rwlesexcl1:
2130 andc r7,r5,r9 ; Marked free
2131 rlwimi r6,r7,0,16,31 ; Set shared cnt to one
2132 stwcx. r6,RW_DATA,r3 ; Update lock word
2133 bne-- rwlesloop
2134 andi. r7,r5,WAIT_FLAG ; Test wait flag
2135 beqlr++ ; Return of no waiters
2136 addi r3,r3,RW_EVENT ; Get lock event address
2137 b EXT(thread_wakeup) ; wakeup waiters
2138 rwlesspin:
2139 li r4,lgKillResv ; Killing field
2140 stwcx. r4,0,r4 ; Kill it
2141 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2142 bne-- rwlesspin1 ; No, go handle contention
2143 mr r4,r3 ; pass lock pointer
2144 lwz r3,RW_PTR(r3) ; load lock ext pointer
2145 b EXT(lck_rw_lock_exclusive_to_shared_ext)
2146 rwlesspin1:
2147 b EXT(lck_rw_lock_exclusive_to_shared_gen)
2148
2149
2150
2151 /*
2152 * boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
2153 *
2154 */
2155 .align 5
2156 .globl EXT(lck_rw_try_lock_exclusive)
2157 LEXT(lck_rw_try_lock_exclusive)
2158 lis r10,0xFFFF ; Load read count mask
2159 ori r10,r10,WANT_EXCL|WANT_UPGRADE ; Include exclusive and upgrade flags
2160 rwtleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2161 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2162 bne-- rwtlespin ; Branch if interlocked
2163 and. r7,r5,r10 ; Can we have it
2164 ori r6,r5,WANT_EXCL ; Mark Exclusive
2165 bne-- rwtlefail ;
2166 stwcx. r6,RW_DATA,r3 ; Update lock word
2167 bne-- rwtleloop
2168 .globl EXT(rwtlePatch_isync)
2169 LEXT(rwtlePatch_isync)
2170 isync
2171 li r3,1 ; Return TRUE
2172 blr
2173 rwtlefail:
2174 li r4,lgKillResv ; Killing field
2175 stwcx. r4,0,r4 ; Kill it
2176 li r3,0 ; Return FALSE
2177 blr
2178 rwtlespin:
2179 li r4,lgKillResv ; Killing field
2180 stwcx. r4,0,r4 ; Kill it
2181 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2182 bne-- rwtlespin1 ; No, go handle contention
2183 mr r4,r3 ; pass lock pointer
2184 lwz r3,RW_PTR(r3) ; load lock ext pointer
2185 b EXT(lck_rw_try_lock_exclusive_ext)
2186 rwtlespin1:
2187 b EXT(lck_rw_try_lock_exclusive_gen)
2188
2189
2190 /*
2191 * boolean_t lck_rw_try_lock_shared(lck_rw_t*)
2192 *
2193 */
2194 .align 5
2195 .globl EXT(lck_rw_try_lock_shared)
2196 LEXT(lck_rw_try_lock_shared)
2197 rwtlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2198 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2199 bne-- rwtlsspin ; Branch if interlocked
2200 andi. r7,r5,WANT_EXCL|WANT_UPGRADE ; So, can we have it?
2201 addis r6,r5,1 ; Increment read cnt
2202 bne-- rwtlsfail ; Branch if held exclusive
2203 stwcx. r6,RW_DATA,r3 ; Update lock word
2204 bne-- rwtlsloop
2205 .globl EXT(rwtlsPatch_isync)
2206 LEXT(rwtlsPatch_isync)
2207 isync
2208 li r3,1 ; Return TRUE
2209 blr
2210 rwtlsfail:
2211 li r3,0 ; Return FALSE
2212 blr
2213 rwtlsspin:
2214 li r4,lgKillResv ; Killing field
2215 stwcx. r4,0,r4 ; Kill it
2216 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2217 bne-- rwtlsspin1 ; No, go handle contention
2218 mr r4,r3 ; pass lock pointer
2219 lwz r3,RW_PTR(r3) ; load lock ext pointer
2220 b EXT(lck_rw_try_lock_shared_ext)
2221 rwtlsspin1:
2222 b EXT(lck_rw_try_lock_shared_gen)
2223
2224
2225
2226 /*
2227 * lck_rw_type_t lck_rw_done(lck_rw_t*)
2228 *
2229 */
2230 .align 5
2231 .globl EXT(lck_rw_done)
2232 LEXT(lck_rw_done)
2233 #if !MACH_LDEBUG
2234 .globl EXT(lock_done)
2235 LEXT(lock_done)
2236 #endif
2237 .globl EXT(rwldPatch_isync)
2238 LEXT(rwldPatch_isync)
2239 isync
2240 .globl EXT(rwldPatch_eieio)
2241 LEXT(rwldPatch_eieio)
2242 eieio
2243 li r10,WAIT_FLAG ; Get wait flag
2244 lis r7,0xFFFF ; Get read cnt mask
2245 mr r12,r3 ; Save lock addr
2246 rwldloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2247 andi. r8,r5,ILK_LOCKED ; Test interlock flag
2248 bne-- rwldspin ; Branch if interlocked
2249 and. r8,r5,r7 ; Is it shared
2250 cmpi cr1,r8,0 ; Is it shared
2251 beq cr1,rwldexcl ; No, check exclusive
2252 li r11,RW_SHARED ; Set return value
2253 addis r6,r5,0xFFFF ; Decrement read count
2254 and. r8,r6,r7 ; Is it still shared
2255 li r8,0 ; Assume no wakeup
2256 bne rwldshared1 ; Skip if still held shared
2257 and r8,r6,r10 ; Extract wait flag
2258 andc r6,r6,r10 ; Clear wait flag
2259 rwldshared1:
2260 b rwldstore
2261 rwldexcl:
2262 li r11,RW_EXCL ; Set return value
2263 li r9,WANT_UPGRADE ; Get upgrade flag
2264 and. r6,r5,r9 ; Is it held with upgrade
2265 li r9,WANT_UPGRADE|WAIT_FLAG ; Mask upgrade abd wait flags
2266 bne rwldexcl1 ; Skip if held with upgrade
2267 li r9,WANT_EXCL|WAIT_FLAG ; Mask exclusive and wait flags
2268 rwldexcl1:
2269 andc r6,r5,r9 ; Marked free
2270 and r8,r5,r10 ; Null if no waiter
2271 rwldstore:
2272 stwcx. r6,RW_DATA,r3 ; Update lock word
2273 bne-- rwldloop
2274 mr. r8,r8 ; wakeup needed?
2275 mr r3,r11 ; Return lock held type
2276 beqlr++
2277 mr r3,r12 ; Restore lock address
2278 PROLOG(0)
2279 addi r3,r3,RW_EVENT ; Get lock event address
2280 bl EXT(thread_wakeup) ; wakeup threads
2281 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
2282 mtcr r2
2283 EPILOG
2284 li r3,RW_SHARED ; Assume lock type shared
2285 bne cr1,rwldret ; Branch if was held exclusive
2286 li r3,RW_EXCL ; Return lock type exclusive
2287 rwldret:
2288 blr
2289 rwldspin:
2290 li r4,lgKillResv ; Killing field
2291 stwcx. r4,0,r4 ; Kill it
2292 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2293 bne-- rwldspin1 ; No, go handle contention
2294 mr r4,r3 ; pass lock pointer
2295 lwz r3,RW_PTR(r3) ; load lock ext pointer
2296 b EXT(lck_rw_done_ext)
2297 rwldspin1:
2298 b EXT(lck_rw_done_gen)
2299
2300 /*
2301 * void lck_rw_ilk_lock(lck_rw_t *lock)
2302 */
2303 .globl EXT(lck_rw_ilk_lock)
2304 LEXT(lck_rw_ilk_lock)
2305 crclr hwtimeout ; no timeout option
2306 li r4,0 ; request default timeout value
2307 li r12,ILK_LOCKED ; Load bit mask
2308 b lckcomm ; Join on up...
2309
2310 /*
2311 * void lck_rw_ilk_unlock(lck_rw_t *lock)
2312 */
2313 .globl EXT(lck_rw_ilk_unlock)
2314 LEXT(lck_rw_ilk_unlock)
2315 li r4,1
2316 b EXT(hw_unlock_bit)