2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
30 #include <mach_ldebug.h>
32 #include <ppc/proc_reg.h>
37 #define ILK_LOCKED 0x01
38 #define WAIT_FLAG 0x02
39 #define WANT_UPGRADE 0x04
40 #define WANT_EXCL 0x08
42 #define TH_FN_OWNED 0x01
50 #define PROLOG(space) \
51 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
54 stw r3,FM_ARG0(r1) __ASMNL__ \
55 stw r11,FM_ARG0+0x04(r1) __ASMNL__ \
56 stw r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1) __ASMNL__ \
57 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
60 lwz r1,0(r1) __ASMNL__ \
61 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
65 * void hw_lock_init(hw_lock_t)
67 * Initialize a hardware lock.
70 .globl EXT(hw_lock_init)
74 li r0, 0 ; set lock to free == 0
75 stw r0, 0(r3) ; Initialize the lock
79 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
81 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
82 * multiple bits may be set. Return success (1) or failure (0).
83 * Attempt will fail after timeout ticks of the timebase.
86 .globl EXT(hw_lock_bit)
90 crset hwtimeout ; timeout option
91 mr r12,r4 ; Load bit mask
92 mr r4,r5 ; Load timeout value
93 b lckcomm ; Join on up...
96 * void hw_lock_lock(hw_lock_t)
98 * Acquire lock, spinning until it becomes available.
99 * Return with preemption disabled.
100 * We will just set a default timeout and jump into the NORMAL timeout lock.
103 .globl EXT(hw_lock_lock)
106 crclr hwtimeout ; no timeout option
107 li r4,0 ; request default timeout value
108 li r12,ILK_LOCKED ; Load bit mask
109 b lckcomm ; Join on up...
112 crset hwtimeout ; timeout option
113 li r4,0 ; request default timeout value
114 li r12,ILK_LOCKED ; Load bit mask
115 b lckcomm ; Join on up...
118 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
120 * Try to acquire spin-lock. Return success (1) or failure (0).
121 * Attempt will fail after timeout ticks of the timebase.
122 * We try fairly hard to get this lock. We disable for interruptions, but
123 * reenable after a "short" timeout (128 ticks, we may want to change this).
124 * After checking to see if the large timeout value (passed in) has expired and a
125 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
126 * we return either in abject failure, or disable and go back to the lock sniff routine.
127 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
130 .globl EXT(hw_lock_to)
133 crset hwtimeout ; timeout option
134 li r12,ILK_LOCKED ; Load bit mask
136 mfsprg r6,1 ; Get the current activation
137 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
138 addi r5,r5,1 ; Bring up the disable count
139 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
140 mr r5,r3 ; Get the address of the lock
141 li r8,0 ; Set r8 to zero
143 lcktry: lwarx r6,0,r5 ; Grab the lock value
144 and. r3,r6,r12 ; Is it locked?
145 or r6,r6,r12 ; Set interlock
146 bne-- lckspin ; Yeah, wait for it to clear...
147 stwcx. r6,0,r5 ; Try to seize that there durn lock
148 bne-- lcktry ; Couldn't get it...
149 li r3,1 ; return true
150 .globl EXT(hwllckPatch_isync)
151 LEXT(hwllckPatch_isync)
152 isync ; Make sure we don't use a speculativily loaded value
155 lckspin: li r6,lgKillResv ; Get killing field
156 stwcx. r6,0,r6 ; Kill reservation
158 mr. r4,r4 ; Test timeout value
160 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
161 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
162 lwz r4,0(r4) ; Get the timeout value
164 mr. r8,r8 ; Is r8 set to zero
165 bne++ lockspin1 ; If yes, first spin attempt
166 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
167 mfmsr r9 ; Get the MSR value
168 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
169 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
170 andc r9,r9,r0 ; Clear FP and VEC
171 andc r7,r9,r7 ; Clear EE as well
172 mtmsr r7 ; Turn off interruptions
173 isync ; May have turned off vec and fp here
174 mftb r8 ; Get timestamp on entry
177 lockspin1: mtmsr r7 ; Turn off interruptions
178 mftb r8 ; Get timestamp on entry
180 lcksniff: lwz r3,0(r5) ; Get that lock in here
181 and. r3,r3,r12 ; Is it free yet?
182 beq++ lckretry ; Yeah, try for it again...
184 mftb r10 ; Time stamp us now
185 sub r10,r10,r8 ; Get the elapsed time
186 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
187 blt++ lcksniff ; Not yet...
189 mtmsr r9 ; Say, any interrupts pending?
191 ; The following instructions force the pipeline to be interlocked to that only one
192 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
193 ; time; if it's too short, pending interruptions will not have a chance to be taken
195 subi r4,r4,128 ; Back off elapsed time from timeout value
196 or r4,r4,r4 ; Do nothing here but force a single cycle delay
197 mr. r4,r4 ; See if we used the whole timeout
198 li r3,0 ; Assume a timeout return code
199 or r4,r4,r4 ; Do nothing here but force a single cycle delay
201 ble-- lckfail ; We failed
202 b lockspin1 ; Now that we've opened an enable window, keep trying...
204 mtmsr r9 ; Restore interrupt state
205 li r8,1 ; Insure that R8 is not 0
207 lckfail: ; We couldn't get the lock
208 bf hwtimeout,lckpanic
209 li r3,0 ; Set failure return code
210 blr ; Return, head hanging low...
214 lis r3,hi16(lckpanic_str) ; Get the failed lck message
215 ori r3,r3,lo16(lckpanic_str) ; Get the failed lck message
217 BREAKPOINT_TRAP ; We die here anyway
220 STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
224 * void hw_lock_unlock(hw_lock_t)
226 * Unconditionally release lock.
227 * Release preemption level.
230 .globl EXT(hw_lock_unlock)
234 .globl EXT(hwulckPatch_isync)
235 LEXT(hwulckPatch_isync)
237 .globl EXT(hwulckPatch_eieio)
238 LEXT(hwulckPatch_eieio)
240 li r0, 0 ; set lock to free
243 b epStart ; Go enable preemption...
246 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
248 * Release bit based spin-lock. The second parameter is the bit mask to clear.
249 * Multiple bits may be cleared.
253 .globl EXT(hw_unlock_bit)
257 .globl EXT(hwulckbPatch_isync)
258 LEXT(hwulckbPatch_isync)
260 .globl EXT(hwulckbPatch_eieio)
261 LEXT(hwulckbPatch_eieio)
263 ubittry: lwarx r0,0,r3 ; Grab the lock value
264 andc r0,r0,r4 ; Clear the lock bits
265 stwcx. r0,0,r3 ; Try to clear that there durn lock
266 bne- ubittry ; Try again, couldn't save it...
268 b epStart ; Go enable preemption...
271 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
272 * unsigned int newb, unsigned int timeout)
274 * Try to acquire spin-lock. The second parameter is the bit mask to check.
275 * The third is the value of those bits and the 4th is what to set them to.
276 * Return success (1) or failure (0).
277 * Attempt will fail after timeout ticks of the timebase.
278 * We try fairly hard to get this lock. We disable for interruptions, but
279 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
280 * After checking to see if the large timeout value (passed in) has expired and a
281 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
282 * we return either in abject failure, or disable and go back to the lock sniff routine.
283 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
286 .globl EXT(hw_lock_mbits)
292 mbittry: lwarx r12,0,r3 ; Grab the lock value
293 and r0,r12,r4 ; Clear extra bits
294 andc r12,r12,r4 ; Clear all bits in the bit mask
295 or r12,r12,r6 ; Turn on the lock bits
296 cmplw r0,r5 ; Are these the right bits?
297 bne-- mbitspin ; Nope, wait for it to clear...
298 stwcx. r12,0,r3 ; Try to seize that there durn lock
299 beq++ mbitgot ; We got it, yahoo...
300 b mbittry ; Just start up again if the store failed...
303 mbitspin: li r11,lgKillResv ; Point to killing field
304 stwcx. r11,0,r11 ; Kill it
306 mr. r10,r10 ; Is r10 set to zero
307 bne++ mbitspin0 ; If yes, first spin attempt
308 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
309 mfmsr r9 ; Get the MSR value
310 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
311 ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
312 andc r9,r9,r0 ; Clear FP and VEC
313 andc r8,r9,r8 ; Clear EE as well
314 mtmsr r8 ; Turn off interruptions
315 isync ; May have turned off vectors or float here
316 mftb r10 ; Get the low part of the time base
319 mtmsr r8 ; Turn off interruptions
320 mftb r10 ; Get the low part of the time base
322 lwz r12,0(r3) ; Get that lock in here
323 and r0,r12,r4 ; Clear extra bits
324 cmplw r0,r5 ; Are these the right bits?
325 beq++ mbitretry ; Yeah, try for it again...
327 mftb r11 ; Time stamp us now
328 sub r11,r11,r10 ; Get the elapsed time
329 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
330 blt++ mbitsniff ; Not yet...
332 mtmsr r9 ; Say, any interrupts pending?
334 ; The following instructions force the pipeline to be interlocked to that only one
335 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
336 ; time. If it is too short, pending interruptions will not have a chance to be taken
338 subi r7,r7,128 ; Back off elapsed time from timeout value
339 or r7,r7,r7 ; Do nothing here but force a single cycle delay
340 mr. r7,r7 ; See if we used the whole timeout
341 or r7,r7,r7 ; Do nothing here but force a single cycle delay
343 ble-- mbitfail ; We failed
344 b mbitspin0 ; Now that we have opened an enable window, keep trying...
346 mtmsr r9 ; Enable for interruptions
347 li r10,1 ; Make sure this is non-zero
352 li r3,1 ; Set good return code
353 .globl EXT(hwlmlckPatch_isync)
354 LEXT(hwlmlckPatch_isync)
355 isync ; Make sure we do not use a speculativily loaded value
358 mbitfail: li r3,0 ; Set failure return code
359 blr ; Return, head hanging low...
362 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
364 * Spin until word hits 0 or timeout.
365 * Return success (1) or failure (0).
366 * Attempt will fail after timeout ticks of the timebase.
368 * The theory is that a processor will bump a counter as it signals
369 * other processors. Then it will spin untl the counter hits 0 (or
370 * times out). The other processors, as it receives the signal will
371 * decrement the counter.
373 * The other processors use interlocked update to decrement, this one
374 * does not need to interlock.
377 .globl EXT(hw_cpu_sync)
381 mftb r10 ; Get the low part of the time base
382 mr r9,r3 ; Save the sync word address
383 li r3,1 ; Assume we work
385 csynctry: lwz r11,0(r9) ; Grab the sync value
386 mr. r11,r11 ; Counter hit 0?
387 beqlr- ; Yeah, we are sunk...
388 mftb r12 ; Time stamp us now
390 sub r12,r12,r10 ; Get the elapsed time
391 cmplw r4,r12 ; Have we gone too long?
392 bge+ csynctry ; Not yet...
394 li r3,0 ; Set failure...
395 blr ; Return, head hanging low...
398 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
400 * Spin until word changes or timeout.
401 * Return success (1) or failure (0).
402 * Attempt will fail after timeout ticks of the timebase.
404 * This is used to insure that a processor passes a certain point.
405 * An example of use is to monitor the last interrupt time in the
406 * per_proc block. This can be used to insure that the other processor
407 * has seen at least one interrupt since a specific time.
410 .globl EXT(hw_cpu_wcng)
414 mftb r10 ; Get the low part of the time base
415 mr r9,r3 ; Save the sync word address
416 li r3,1 ; Assume we work
418 wcngtry: lwz r11,0(r9) ; Grab the value
419 cmplw r11,r4 ; Do they still match?
420 bnelr- ; Nope, cool...
421 mftb r12 ; Time stamp us now
423 sub r12,r12,r10 ; Get the elapsed time
424 cmplw r5,r12 ; Have we gone too long?
425 bge+ wcngtry ; Not yet...
427 li r3,0 ; Set failure...
428 blr ; Return, head hanging low...
432 * unsigned int hw_lock_try(hw_lock_t)
434 * Try to acquire spin-lock. Return success (1) or failure (0)
435 * Returns with preemption disabled on success.
439 .globl EXT(hw_lock_try)
443 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
444 mfmsr r9 ; Get the MSR value
445 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
446 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
447 andc r9,r9,r0 ; Clear FP and VEC
448 andc r7,r9,r7 ; Clear EE as well
450 mtmsr r7 ; Disable interruptions and thus, preemption
452 lwz r5,0(r3) ; Quick load
453 andi. r6,r5,ILK_LOCKED ; TEST...
454 bne-- .L_lock_try_failed ; No go...
457 lwarx r5,0,r3 ; Ld from addr of arg and reserve
459 andi. r6,r5,ILK_LOCKED ; TEST...
461 bne-- .L_lock_try_failedX ; branch if taken. Predict free
463 stwcx. r5,0,r3 ; And SET (if still reserved)
464 bne-- .L_lock_try_loop ; If set failed, loop back
466 .globl EXT(hwltlckPatch_isync)
467 LEXT(hwltlckPatch_isync)
470 mfsprg r6,1 ; Get current activation
471 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
472 addi r5,r5,1 ; Bring up the disable count
473 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
475 mtmsr r9 ; Allow interruptions now
476 li r3,1 ; Set that the lock was free
480 li r6,lgKillResv ; Killing field
481 stwcx. r6,0,r6 ; Kill reservation
484 mtmsr r9 ; Allow interruptions now
485 li r3,0 ; FAILURE - lock was taken
489 * unsigned int hw_lock_held(hw_lock_t)
491 * Return 1 if lock is held
492 * Doesn't change preemption state.
493 * N.B. Racy, of course.
496 .globl EXT(hw_lock_held)
500 isync ; Make sure we don't use a speculativily fetched lock
501 lwz r3, 0(r3) ; Get lock value
502 andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit
506 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
508 * Compare old to area if equal, store new, and return true
509 * else return false and no store
510 * This is an atomic operation
513 .globl EXT(hw_compare_and_store)
515 LEXT(hw_compare_and_store)
517 mr r6,r3 ; Save the old value
519 cstry: lwarx r9,0,r5 ; Grab the area value
520 li r3,1 ; Assume it works
521 cmplw cr0,r9,r6 ; Does it match the old value?
522 bne-- csfail ; No, it must have changed...
523 stwcx. r4,0,r5 ; Try to save the new value
524 bne-- cstry ; Didn't get it, try again...
525 .globl EXT(hwcsatomicPatch_isync)
526 LEXT(hwcsatomicPatch_isync)
527 isync ; Just hold up prefetch
530 csfail: li r3,lgKillResv ; Killing field
531 stwcx. r3,0,r3 ; Blow reservation
533 li r3,0 ; Set failure
534 blr ; Better luck next time...
538 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
540 * Atomically add the second parameter to the first.
541 * Returns the result.
545 .globl EXT(hw_atomic_add)
549 mr r6,r3 ; Save the area
551 addtry: lwarx r3,0,r6 ; Grab the area value
552 add r3,r3,r4 ; Add the value
553 stwcx. r3,0,r6 ; Try to save the new value
554 bne-- addtry ; Didn't get it, try again...
559 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
561 * Atomically subtract the second parameter from the first.
562 * Returns the result.
566 .globl EXT(hw_atomic_sub)
570 mr r6,r3 ; Save the area
572 subtry: lwarx r3,0,r6 ; Grab the area value
573 sub r3,r3,r4 ; Subtract the value
574 stwcx. r3,0,r6 ; Try to save the new value
575 bne-- subtry ; Didn't get it, try again...
580 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
582 * Atomically ORs the second parameter into the first.
583 * Returns the result.
586 .globl EXT(hw_atomic_or)
590 mr r6,r3 ; Save the area
592 ortry: lwarx r3,0,r6 ; Grab the area value
593 or r3,r3,r4 ; OR the value
594 stwcx. r3,0,r6 ; Try to save the new value
595 bne-- ortry ; Did not get it, try again...
600 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
602 * Atomically ANDs the second parameter with the first.
603 * Returns the result.
607 .globl EXT(hw_atomic_and)
611 mr r6,r3 ; Save the area
613 andtry: lwarx r3,0,r6 ; Grab the area value
614 and r3,r3,r4 ; AND the value
615 stwcx. r3,0,r6 ; Try to save the new value
616 bne-- andtry ; Did not get it, try again...
621 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
623 * Atomically inserts the element at the head of the list
624 * anchor is the pointer to the first element
625 * element is the pointer to the element to insert
626 * disp is the displacement into the element to the chain pointer
628 * NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
631 .globl EXT(hw_queue_atomic)
633 LEXT(hw_queue_atomic)
635 mr r7,r4 ; Make end point the same as start
636 mr r8,r5 ; Copy the displacement also
637 b hw_queue_comm ; Join common code...
640 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
642 * Atomically inserts the list of elements at the head of the list
643 * anchor is the pointer to the first element
644 * first is the pointer to the first element to insert
645 * last is the pointer to the last element to insert
646 * disp is the displacement into the element to the chain pointer
649 .globl EXT(hw_queue_atomic_list)
651 LEXT(hw_queue_atomic_list)
653 mr r7,r5 ; Make end point the same as start
654 mr r8,r6 ; Copy the displacement also
657 lwarx r9,0,r3 ; Pick up the anchor
658 stwx r9,r8,r7 ; Chain that to the end of the new stuff
659 eieio ; Make sure this store makes it before the anchor update
660 stwcx. r4,0,r3 ; Try to chain into the front
661 bne-- hw_queue_comm ; Didn't make it, try again...
666 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
668 * Atomically removes the first element in a list and returns it.
669 * anchor is the pointer to the first element
670 * disp is the displacement into the element to the chain pointer
671 * Returns element if found, 0 if empty.
673 * NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
676 .globl EXT(hw_dequeue_atomic)
678 LEXT(hw_dequeue_atomic)
680 mr r5,r3 ; Save the anchor
683 lwarx r3,0,r5 ; Pick up the anchor
684 mr. r3,r3 ; Is the list empty?
685 beq-- hdcFail ; Leave it list empty...
686 lwzx r9,r4,r3 ; Get the next in line
687 stwcx. r9,0,r5 ; Try to chain into the front
688 beqlr++ ; Got the thing, go away with it...
689 b hw_dequeue_comm ; Did not make it, try again...
691 hdcFail: li r4,lgKillResv ; Killing field
692 stwcx. r4,0,r4 ; Dump reservation
697 * Routines for mutex lock debugging.
701 * Gets lock check flags in CR6: CR bits 24-27
703 #define CHECK_SETUP(rg) \
704 lbz rg,lglcksWork(0) __ASMNL__ \
709 * Checks for expected lock type.
711 #define CHECK_MUTEX_TYPE() \
712 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
713 bt 24+disLktypeb,1f __ASMNL__ \
714 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
715 cmpwi r10,MUTEX_TAG __ASMNL__ \
717 PROLOG(0) __ASMNL__ \
718 mr r4,r11 __ASMNL__ \
719 mr r5,r10 __ASMNL__ \
720 lis r3,hi16(not_a_mutex) __ASMNL__ \
721 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
722 bl EXT(panic) __ASMNL__ \
723 BREAKPOINT_TRAP __ASMNL__ \
728 STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
732 * Verifies return to the correct thread in "unlock" situations.
734 #define CHECK_THREAD(thread_offset) \
735 bf MUTEX_ATTR_DEBUGb,3f __ASMNL__ \
736 bt 24+disLkThreadb,3f __ASMNL__ \
737 mfsprg r10,1 __ASMNL__ \
738 lwz r5,MUTEX_DATA(r3) __ASMNL__ \
739 rlwinm. r9,r5,0,0,29 __ASMNL__ \
741 lis r3,hi16(not_held) __ASMNL__ \
742 ori r3,r3,lo16(not_held) __ASMNL__ \
745 cmpw r9,r10 __ASMNL__ \
747 mr r5,r10 __ASMNL__ \
749 lis r3,hi16(wrong_thread) __ASMNL__ \
750 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
752 mr r4,r11 __ASMNL__ \
753 PROLOG(0) __ASMNL__ \
754 bl EXT(panic) __ASMNL__ \
755 BREAKPOINT_TRAP __ASMNL__ \
760 STRINGD "mutex (0x%08X) not held\n\000"
762 STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
765 #define CHECK_MYLOCK() \
766 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
767 bt 24+disLkMyLckb,1f __ASMNL__ \
768 mfsprg r10,1 __ASMNL__ \
769 lwz r9,MUTEX_DATA(r3) __ASMNL__ \
770 rlwinm r9,r9,0,0,29 __ASMNL__ \
771 cmpw r9,r10 __ASMNL__ \
773 mr r4,r11 __ASMNL__ \
774 lis r3, hi16(mylock_attempt) __ASMNL__ \
775 ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
776 bl EXT(panic) __ASMNL__ \
777 BREAKPOINT_TRAP __ASMNL__ \
782 STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
785 #define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp) \
786 bf 24+enaLkExtStckb,3f __ASMNL__ \
787 addi lck_stack,lck,MUTEX_STACK __ASMNL__ \
788 li frame_cnt,MUTEX_FRAMES-1 __ASMNL__ \
790 mr tmp,stack __ASMNL__ \
791 lwz stack,0(stack) __ASMNL__ \
792 xor tmp,stack,tmp __ASMNL__ \
793 cmplwi tmp,8192 __ASMNL__ \
795 lwz lr_save,FM_LR_SAVE(stack) __ASMNL__ \
796 stwu lr_save,4(lck_stack) __ASMNL__ \
797 subi frame_cnt,frame_cnt,1 __ASMNL__ \
798 cmpi cr0,frame_cnt,0 __ASMNL__ \
803 stwu tmp,4(lck_stack) __ASMNL__ \
804 subi frame_cnt,frame_cnt,1 __ASMNL__ \
805 cmpi cr0,frame_cnt,0 __ASMNL__ \
810 * void mutex_init(mutex_t* l, etap_event_t etap)
814 .globl EXT(mutex_init)
819 stw r10,MUTEX_DATA(r3) ; clear lock word
820 sth r10,MUTEX_WAITERS(r3) ; init waiter count
821 sth r10,MUTEX_PROMOTED_PRI(r3)
823 li r11,MUTEX_ATTR_DEBUG
824 stw r10,MUTEX_STACK(r3) ; init caller pc
825 stw r10,MUTEX_THREAD(r3) ; and owning thread
827 stw r9, MUTEX_TYPE(r3) ; set lock type
828 stw r11,MUTEX_ATTR(r3)
829 addi r8,r3,MUTEX_STACK-4
832 stwu r10,4(r8) ; init stack
836 #endif /* MACH_LDEBUG */
841 * void lck_mtx_lock_ext(lck_mtx_ext_t*)
845 .globl EXT(lck_mtx_lock_ext)
846 LEXT(lck_mtx_lock_ext)
848 .globl EXT(mutex_lock)
851 .globl EXT(_mutex_lock)
854 mr r11,r3 ; Save lock addr
856 lwz r0,MUTEX_ATTR(r3)
861 bf MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
863 bl EXT(assert_wait_possible)
865 bne L_mutex_lock_assert_wait_1
866 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
867 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
869 BREAKPOINT_TRAP ; We die here anyway
872 L_mutex_lock_assert_wait_panic_str:
873 STRINGD "mutex lock attempt with assert_wait_possible false\n\000"
876 L_mutex_lock_assert_wait_1:
878 lwz r11,FM_ARG0+0x04(r1)
879 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
882 L_mutex_lock_assert_wait_2:
884 mfsprg r6,1 ; load the current thread
885 bf MUTEX_ATTR_STATb,mlckestatskip ; Branch if no stat
886 lwz r5,MUTEX_GRP(r3) ; Load lock group
887 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
889 lwarx r8,r7,r5 ; Load stat util cnt
890 addi r8,r8,1 ; Increment stat util cnt
891 stwcx. r8,r7,r5 ; Store stat util cnt
892 bne-- mlckestatloop ; Retry if failed
893 mr. r8,r8 ; Test for zero
894 bne++ mlckestatskip ; Did stat util cnt wrapped?
895 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
896 addi r8,r8,1 ; Increment upper stat util cnt
897 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
899 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
902 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
903 mfmsr r9 ; Get the MSR value
904 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
905 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
906 andc r9,r9,r0 ; Clear FP and VEC
907 andc r7,r9,r7 ; Clear EE as well
908 mtmsr r7 ; Turn off interruptions
909 isync ; May have turned off vec and fp here
910 mr. r5,r5 ; Quick check
911 bne-- mlckespin01 ; Can not get it right now...
914 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
916 bne-- mlckespin0 ; Can not get it right now...
917 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
918 bne-- mlcketry ; loop back if failed
919 .globl EXT(mlckePatch_isync)
920 LEXT(mlckePatch_isync)
921 isync ; stop prefeteching
923 bf MUTEX_ATTR_DEBUGb,mlckedebskip
924 mr r8,r6 ; Get the active thread
925 stw r12,MUTEX_STACK(r3) ; Save our caller
926 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
928 LCK_STACK(r3,r5,r6,r7,r8,r10)
930 mtmsr r9 ; Say, any interrupts pending?
934 li r5,lgKillResv ; Killing field
935 stwcx. r5,0,r5 ; Kill reservation
938 mtmsr r9 ; Say, any interrupts pending?
940 mtmsr r7 ; Turn off interruptions, vec and fp off already
945 * void lck_mtx_lock(lck_mtx_t*)
949 .globl EXT(lck_mtx_lock)
953 .globl EXT(mutex_lock)
956 .globl EXT(_mutex_lock)
960 mfsprg r6,1 ; load the current thread
961 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
962 mr r11,r3 ; Save lock addr
966 mr. r5,r5 ; Quick check
967 bne-- mlckspin00 ; Indirect or Can not get it right now...
970 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
972 bne-- mlckspin01 ; Can not get it right now...
973 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
974 bne-- mlcktry ; loop back if failed
975 .globl EXT(mlckPatch_isync)
976 LEXT(mlckPatch_isync)
977 isync ; stop prefeteching
981 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
982 bne-- mlckspin02 ; No, go handle contention
983 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
986 li r5,lgKillResv ; Killing field
987 stwcx. r5,0,r5 ; Kill reservation
991 mtcrf 1,r0 ; Set cr7 to zero
998 mr. r4,r4 ; Test timeout value
1000 lis r4,hi16(EXT(MutexSpin)) ; Get the high part
1001 ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
1002 lwz r4,0(r4) ; Get spin timerout value
1003 mr. r4,r4 ; Test spin timeout value
1004 bne++ mlckspin2 ; Is spin timeout requested
1005 crclr mlckmiss ; Clear miss test
1006 b mlckslow1 ; Don't try to spin
1008 mlckspin2: mr. r8,r8 ; Is r8 set to zero
1009 bne++ mlckspin3 ; If yes, first spin attempt
1010 crclr mlckmiss ; Clear miss test
1011 mr. r9,r9 ; Is r9 set to zero
1012 bne++ mlckspin3 ; If yes, r9 set with msr value
1013 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1014 mfmsr r9 ; Get the MSR value
1015 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1016 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1017 andc r9,r9,r0 ; Clear FP and VEC
1018 andc r7,r9,r7 ; Clear EE as well
1019 mtmsr r7 ; Turn off interruptions
1020 isync ; May have turned off vec and fp here
1021 mftb r8 ; Get timestamp on entry
1024 mlckspin3: mtmsr r7 ; Turn off interruptions
1025 mftb r8 ; Get timestamp on entry
1027 mlcksniff: lwz r5,MUTEX_DATA(r3) ; Get that lock in here
1028 mr. r5,r5 ; Is the lock held
1029 beq++ mlckretry ; No, try for it again...
1030 rlwinm. r10,r5,0,0,29 ; Extract the lock owner
1031 beq++ mlckslow0 ; InterLock is held
1032 bf MUTEX_ATTR_STATb,mlStatSkip ; Branch if no stat
1033 andi. r5,r5,ILK_LOCKED ; extract interlocked?
1034 bne mlStatSkip ; yes, skip
1035 bt mlckmiss,mlStatSkip ; miss already counted
1036 crset mlckmiss ; Remember miss recorded
1037 lwz r5,MUTEX_GRP(r3) ; Load lock group
1038 addi r5,r5,GRP_MTX_STAT_MISS+4 ; Add stat miss offset
1040 lwarx r6,0,r5 ; Load stat miss cnt
1041 addi r6,r6,1 ; Increment stat miss cnt
1042 stwcx. r6,0,r5 ; Update stat miss cnt
1043 bne-- mlStatLoop ; Retry if failed
1044 mfsprg r6,1 ; Reload current thread
1046 lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
1047 rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
1048 beq mlckslow0 ; Lock owner isn't running
1049 lis r2,hi16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1050 ori r2,r2,lo16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1051 lwz r10,THREAD_OPTIONS(r10) ; Get the thread options
1052 and. r10,r10,r2 ; Is DelayedIdle set?
1053 bne mlckslow0 ; Lock owner is in delay idle
1055 mftb r10 ; Time stamp us now
1056 sub r10,r10,r8 ; Get the elapsed time
1057 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1058 blt++ mlcksniff ; Not yet...
1060 mtmsr r9 ; Say, any interrupts pending?
1062 ; The following instructions force the pipeline to be interlocked to that only one
1063 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
1064 ; time; if it's too short, pending interruptions will not have a chance to be taken
1066 subi r4,r4,128 ; Back off elapsed time from timeout value
1067 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1068 mr. r4,r4 ; See if we used the whole timeout
1069 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1071 ble-- mlckslow1 ; We failed
1072 b mlckspin3 ; Now that we've opened an enable window, keep trying...
1074 mtmsr r9 ; Restore interrupt state
1075 li r8,1 ; Show already through once
1078 mlckslow0: ; We couldn't get the lock
1079 mtmsr r9 ; Restore interrupt state
1086 bl lockDisa ; Go get a lock on the mutex's interlock lock
1087 mr. r4,r3 ; Did we get it?
1088 lwz r3,FM_ARG0(r1) ; Restore the lock address
1089 bne++ mlGotInt ; We got it just fine...
1090 mr r4,r11 ; Saved lock addr
1091 lis r3,hi16(mutex_failed1) ; Get the failed mutex message
1092 ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
1093 bl EXT(panic) ; Call panic
1094 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1098 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
1103 ; Note that there is no reason to do a load and reserve here. We already
1104 ; hold the interlock lock and no one can touch this field unless they
1105 ; have that, so, we're free to play
1107 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
1108 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1109 bne- mlInUse ; Nope, sombody's playing already...
1111 bf++ MUTEX_ATTR_DEBUGb,mlDebSkip
1113 mfsprg r9,1 ; Get the current activation
1114 lwz r5,0(r1) ; Get previous save frame
1115 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1116 mr r8,r9 ; Get the active thread
1117 stw r6,MUTEX_STACK(r3) ; Save our caller
1118 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1119 LCK_STACK(r3,r5,r6,r7,r8,r10)
1121 mr r3,r11 ; Get the based lock address
1122 bl EXT(lck_mtx_lock_acquire)
1123 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1127 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1128 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1133 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1135 EPILOG ; Restore all saved registers
1136 b epStart ; Go enable preemption...
1138 ; We come to here when we have a resource conflict. In other words,
1139 ; the mutex is held.
1144 CHECK_MYLOCK() ; Assert we don't own the lock already */
1146 ; Note that we come in here with the interlock set. The wait routine
1147 ; will unlock it before waiting.
1149 bf MUTEX_ATTR_STATb,mlStatSkip2 ; Branch if no stat
1150 lwz r5,MUTEX_GRP(r3) ; Load lck group
1151 bt mlckmiss,mlStatSkip1 ; Skip miss already counted
1152 crset mlckmiss ; Remember miss recorded
1153 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1155 lwarx r8,r9,r5 ; Load stat miss cnt
1156 addi r8,r8,1 ; Increment stat miss cnt
1157 stwcx. r8,r9,r5 ; Store stat miss cnt
1158 bne-- mlStatLoop1 ; Retry if failed
1160 lwz r9,GRP_MTX_STAT_WAIT+4(r5) ; Load wait cnt
1161 addi r9,r9,1 ; Increment wait cnt
1162 stw r9,GRP_MTX_STAT_WAIT+4(r5) ; Update miss cnt
1164 ori r4,r4,WAIT_FLAG ; Set the wait flag
1165 stw r4,MUTEX_DATA(r3)
1166 rlwinm r4,r4,0,0,29 ; Extract the lock owner
1168 stw r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1169 mr r3,r11 ; Get the based lock address
1170 bl EXT(lck_mtx_lock_wait) ; Wait for our turn at the lock
1172 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1173 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1174 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1176 b .L_ml_retry ; and try again...
1180 * void lck_mtx_try_lock(_extlck_mtx_ext_t*)
1184 .globl EXT(lck_mtx_try_lock_ext)
1185 LEXT(lck_mtx_try_lock_ext)
1187 .globl EXT(mutex_try)
1189 .globl EXT(_mutex_try)
1192 mr r11,r3 ; Save lock addr
1194 lwz r0,MUTEX_ATTR(r3)
1195 mtcrf 1,r0 ; Set cr7
1199 bf MUTEX_ATTR_STATb,mlteStatSkip ; Branch if no stat
1200 lwz r5,MUTEX_GRP(r3) ; Load lock group
1201 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
1203 lwarx r8,r7,r5 ; Load stat util cnt
1204 addi r8,r8,1 ; Increment stat util cnt
1205 stwcx. r8,r7,r5 ; Store stat util cnt
1206 bne-- mlteStatLoop ; Retry if failed
1207 mr. r8,r8 ; Test for zero
1208 bne++ mlteStatSkip ; Did stat util cnt wrapped?
1209 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
1210 addi r8,r8,1 ; Increment upper stat util cnt
1211 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
1213 mfsprg r6,1 ; load the current thread
1214 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1215 mr. r5,r5 ; Quick check
1216 bne-- L_mutex_try_slow ; Can not get it now...
1217 mfmsr r9 ; Get the MSR value
1218 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1219 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1220 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1221 andc r9,r9,r0 ; Clear FP and VEC
1222 andc r7,r9,r7 ; Clear EE as well
1223 mtmsr r7 ; Turn off interruptions
1224 isync ; May have turned off vec and fp here
1227 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1229 bne-- mlteSlowX ; branch to the slow path
1230 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1231 bne-- mlteLoopTry ; retry if failed
1232 .globl EXT(mltelckPatch_isync)
1233 LEXT(mltelckPatch_isync)
1234 isync ; stop prefetching
1236 bf MUTEX_ATTR_DEBUGb,mlteDebSkip
1237 mr r8,r6 ; Get the active thread
1238 stw r12,MUTEX_STACK(r3) ; Save our caller
1239 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1241 LCK_STACK(r3,r5,r6,r7,r8,r10)
1244 mtmsr r9 ; Say, any interrupts pending?
1247 li r5,lgKillResv ; Killing field
1248 stwcx. r5,0,r5 ; Kill reservation
1249 mtmsr r9 ; Say, any interrupts pending?
1254 * void lck_mtx_try_lock(lck_mtx_t*)
1258 .globl EXT(lck_mtx_try_lock)
1259 LEXT(lck_mtx_try_lock)
1261 .globl EXT(mutex_try)
1263 .globl EXT(_mutex_try)
1267 mfsprg r6,1 ; load the current thread
1268 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1269 mr r11,r3 ; Save lock addr
1270 mr. r5,r5 ; Quick check
1271 bne-- mltSlow00 ; Indirect or Can not get it now...
1274 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1276 bne-- mltSlow01 ; branch to the slow path
1277 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1278 bne-- mltLoopTry ; retry if failed
1279 .globl EXT(mltlckPatch_isync)
1280 LEXT(mltlckPatch_isync)
1281 isync ; stop prefetching
1286 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1287 bne-- mltSlow02 ; No, go handle contention
1288 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1291 li r5,lgKillResv ; Killing field
1292 stwcx. r5,0,r5 ; Kill reservation
1296 mtcrf 1,r0 ; Set cr7 to zero
1301 lwz r6,MUTEX_DATA(r3) ; Quick check
1302 rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already
1303 bne- mtFail ; Someone's got it already...
1305 bl lockDisa ; Go get a lock on the mutex's interlock lock
1306 mr. r4,r3 ; Did we get it?
1307 lwz r3,FM_ARG0(r1) ; Restore the lock address
1308 bne++ mtGotInt ; We got it just fine...
1309 mr r4,r11 ; Saved lock addr
1310 lis r3,hi16(mutex_failed2) ; Get the failed mutex message
1311 ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
1312 bl EXT(panic) ; Call panic
1313 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1317 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
1322 ; Note that there is no reason to do a load and reserve here. We already
1323 ; hold the interlock and no one can touch at this field unless they
1324 ; have that, so, we're free to play
1326 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
1327 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1328 bne- mtInUse ; Nope, sombody's playing already...
1330 bf++ MUTEX_ATTR_DEBUGb,mtDebSkip
1332 mfsprg r9,1 ; Get the current activation
1333 lwz r5,0(r1) ; Get previous save frame
1334 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1335 mr r8,r9 ; Get the active thread
1336 stw r6,MUTEX_STACK(r3) ; Save our caller
1337 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1338 LCK_STACK(r3,r5,r6,r7,r8,r10)
1340 mr r3,r11 ; Get the based lock address
1341 bl EXT(lck_mtx_lock_acquire)
1344 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1345 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1350 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1352 bl epStart ; Go enable preemption...
1355 EPILOG ; Restore all saved registers
1358 ; We come to here when we have a resource conflict. In other words,
1359 ; the mutex is held.
1362 bf++ MUTEX_ATTR_STATb,mtStatSkip ; Branch if no stat
1363 lwz r5,MUTEX_GRP(r3) ; Load lock group
1364 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1366 lwarx r8,r9,r5 ; Load stat miss cnt
1367 addi r8,r8,1 ; Increment stat miss cnt
1368 stwcx. r8,r9,r5 ; Store stat miss cnt
1369 bne-- mtStatLoop ; Retry if failed
1371 rlwinm r4,r4,0,0,30 ; Get the unlock value
1372 stw r4,MUTEX_DATA(r3) ; free the interlock
1373 bl epStart ; Go enable preemption...
1375 mtFail: li r3,0 ; Set failure code
1376 EPILOG ; Restore all saved registers
1381 * void mutex_unlock(mutex_t* l)
1385 .globl EXT(mutex_unlock)
1389 mr r11,r3 ; Save lock addr
1397 * void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
1401 .globl EXT(lck_mtx_ext_unlock)
1402 LEXT(lck_mtx_ext_unlock)
1404 .globl EXT(mutex_unlock_rwcmb)
1405 LEXT(mutex_unlock_rwcmb)
1408 .globl EXT(mulckePatch_isync)
1409 LEXT(mulckePatch_isync)
1411 .globl EXT(mulckePatch_eieio)
1412 LEXT(mulckePatch_eieio)
1414 mr r11,r3 ; Save lock addr
1416 lwz r0,MUTEX_ATTR(r3)
1417 mtcrf 1,r0 ; Set cr7
1420 CHECK_THREAD(MUTEX_THREAD)
1422 lwz r5,MUTEX_DATA(r3) ; Get the lock
1423 rlwinm. r4,r5,0,30,31 ; Quick check
1424 bne-- L_mutex_unlock_slow ; Can not get it now...
1425 mfmsr r9 ; Get the MSR value
1426 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1427 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1428 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1429 andc r9,r9,r0 ; Clear FP and VEC
1430 andc r7,r9,r7 ; Clear EE as well
1431 mtmsr r7 ; Turn off interruptions
1432 isync ; May have turned off vec and fp here
1435 lwarx r5,MUTEX_DATA,r3
1436 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1437 li r5,0 ; Clear the mutexlock
1439 stwcx. r5,MUTEX_DATA,r3
1441 mtmsr r9 ; Say, any interrupts pending?
1445 li r5,lgKillResv ; Killing field
1446 stwcx. r5,0,r5 ; Dump reservation
1447 mtmsr r9 ; Say, any interrupts pending?
1448 b L_mutex_unlock_slow ; Join slow path...
1451 * void lck_mtx_unlock(lck_mtx_t* l)
1455 .globl EXT(lck_mtx_unlock)
1456 LEXT(lck_mtx_unlock)
1458 .globl EXT(mutex_unlock_rwcmb)
1459 LEXT(mutex_unlock_rwcmb)
1462 .globl EXT(mulckPatch_isync)
1463 LEXT(mulckPatch_isync)
1465 .globl EXT(mulckPatch_eieio)
1466 LEXT(mulckPatch_eieio)
1468 mr r11,r3 ; Save lock addr
1470 lwz r5,MUTEX_DATA(r3) ; Get the lock
1471 rlwinm. r4,r5,0,30,31 ; Quick check
1472 bne-- mluSlow0 ; Indirect or Can not get it now...
1475 lwarx r5,MUTEX_DATA,r3
1476 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1477 li r5,0 ; Clear the mutexlock
1479 stwcx. r5,MUTEX_DATA,r3
1484 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1485 bne-- L_mutex_unlock_slow ; No, go handle contention
1486 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1489 li r5,lgKillResv ; Killing field
1490 stwcx. r5,0,r5 ; Dump reservation
1492 L_mutex_unlock_slow:
1496 bl lockDisa ; Go get a lock on the mutex's interlock lock
1497 mr. r4,r3 ; Did we get it?
1498 lwz r3,FM_ARG0(r1) ; Restore the lock address
1499 bne++ muGotInt ; We got it just fine...
1500 mr r4,r11 ; Saved lock addr
1501 lis r3,hi16(mutex_failed3) ; Get the failed mutex message
1502 ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
1503 bl EXT(panic) ; Call panic
1504 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1508 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
1513 lwz r4,MUTEX_DATA(r3)
1514 andi. r5,r4,WAIT_FLAG ; are there any waiters ?
1516 beq+ muUnlock ; Nope, we're done...
1518 mr r3,r11 ; Get the based lock address
1519 bl EXT(lck_mtx_unlock_wakeup) ; yes, wake a thread
1520 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1521 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1522 lwz r5,MUTEX_DATA(r3) ; load the lock
1525 andi. r5,r5,WAIT_FLAG ; Get the unlock value
1527 stw r5,MUTEX_DATA(r3) ; unlock the interlock and lock
1529 EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
1530 b epStart ; Go enable preemption...
1533 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1537 .globl EXT(lck_mtx_assert)
1538 LEXT(lck_mtx_assert)
1539 .globl EXT(_mutex_assert)
1543 lwz r5,MUTEX_DATA(r3)
1544 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1545 bne-- maCheck ; No, go check the assertion
1546 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1549 mfsprg r6,1 ; load the current thread
1550 rlwinm r5,r5,0,0,29 ; Extract the lock owner
1551 cmpwi r4,MUTEX_ASSERT_OWNED
1552 cmplw cr1,r6,r5 ; Is the lock held by current act
1553 crandc cr0_eq,cr0_eq,cr1_eq ; Check owned assertion
1556 lis r3,hi16(mutex_assert1) ; Get the failed mutex message
1557 ori r3,r3,lo16(mutex_assert1) ; Get the failed mutex message
1558 b maPanic ; Panic path
1560 cmpwi r4,MUTEX_ASSERT_NOTOWNED ; Check not owned assertion
1561 crand cr0_eq,cr0_eq,cr1_eq ;
1566 lis r3,hi16(mutex_assert2) ; Get the failed mutex message
1567 ori r3,r3,lo16(mutex_assert2) ; Get the failed mutex message
1568 bl EXT(panic) ; Call panic
1569 BREAKPOINT_TRAP ; We die here anyway
1573 STRINGD "mutex (0x%08X) not owned\n\000"
1575 STRINGD "mutex (0x%08X) owned\n\000"
1580 * void lck_mtx_ilk_unlock(lck_mtx *lock)
1582 .globl EXT(lck_mtx_ilk_unlock)
1583 LEXT(lck_mtx_ilk_unlock)
1585 lwz r10,MUTEX_DATA(r3)
1586 rlwinm r10,r10,0,0,30
1588 stw r10,MUTEX_DATA(r3)
1590 b epStart ; Go enable preemption...
1593 * void _enable_preemption_no_check(void)
1595 * This version does not check if we get preempted or not
1598 .globl EXT(_enable_preemption_no_check)
1600 LEXT(_enable_preemption_no_check)
1602 cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted
1603 b epCommn ; Join up with the other enable code...
1606 * void _enable_preemption(void)
1608 * This version checks if we get preempted or not
1611 .globl EXT(_enable_preemption)
1613 LEXT(_enable_preemption)
1615 ; Here is where we enable preemption.
1618 cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted
1621 mfsprg r3,1 ; Get current activation
1622 li r8,-1 ; Get a decrementer
1623 lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level
1624 add. r5,r5,r8 ; Bring down the disable count
1625 blt- epTooFar ; Yeah, we did...
1626 stw r5,ACT_PREEMPT_CNT(r3) ; Save it back
1627 crandc cr0_eq,cr0_eq,cr1_eq
1628 beq+ epCheckPreempt ; Go check if we need to be preempted...
1632 lis r3,hi16(epTooFarStr) ; First half of panic string
1633 ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
1636 BREAKPOINT_TRAP ; We die here anyway
1640 STRINGD "enable_preemption: preemption_level %d\n\000"
1645 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1646 mfmsr r9 ; Get the MSR value
1647 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1648 andi. r4,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1649 beq+ epCPno ; No preemption here...
1650 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1651 andc r9,r9,r0 ; Clear FP and VEC
1652 andc r7,r9,r7 ; Clear EE as well
1653 mtmsr r7 ; Turn off interruptions
1654 isync ; May have turned off vec and fp here
1655 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1656 lwz r7,PP_PENDING_AST(r3) ; Get pending AST mask
1657 li r5,AST_URGENT ; Get the requests we do honor
1658 lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call
1659 and. r7,r7,r5 ; Should we preempt?
1660 ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part
1661 mtmsr r9 ; Allow interrupts if we can
1663 beqlr+ ; We probably will not preempt...
1664 sc ; Do the preemption
1665 blr ; Now, go away now...
1668 * void disable_preemption(void)
1670 * Here is where we disable preemption.
1673 .globl EXT(_disable_preemption)
1675 LEXT(_disable_preemption)
1677 mfsprg r6,1 ; Get the current activation
1678 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1679 addi r5,r5,1 ; Bring up the disable count
1680 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1684 * int get_preemption_level(void)
1686 * Return the current preemption level
1689 .globl EXT(get_preemption_level)
1691 LEXT(get_preemption_level)
1693 mfsprg r6,1 ; Get current activation
1694 lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1698 * void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
1700 * Initialize a simple lock.
1703 .globl EXT(ppc_usimple_lock_init)
1705 LEXT(ppc_usimple_lock_init)
1707 li r0, 0 ; set lock to free == 0
1708 stw r0, 0(r3) ; Initialize the lock
1712 * void lck_spin_lock(lck_spin_t *)
1713 * void ppc_usimple_lock(simple_lock_t *)
1717 .globl EXT(lck_spin_lock)
1719 .globl EXT(ppc_usimple_lock)
1720 LEXT(ppc_usimple_lock)
1722 mfsprg r6,1 ; Get the current activation
1723 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1724 addi r5,r5,1 ; Bring up the disable count
1725 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1726 mr r5,r3 ; Get the address of the lock
1727 li r8,0 ; Set r8 to zero
1728 li r4,0 ; Set r4 to zero
1730 slcktry: lwarx r11,SLOCK_ILK,r5 ; Grab the lock value
1731 andi. r3,r11,ILK_LOCKED ; Is it locked?
1732 ori r11,r6,ILK_LOCKED ; Set interlock
1733 bne-- slckspin ; Yeah, wait for it to clear...
1734 stwcx. r11,SLOCK_ILK,r5 ; Try to seize that there durn lock
1735 bne-- slcktry ; Couldn't get it...
1736 .globl EXT(slckPatch_isync)
1737 LEXT(slckPatch_isync)
1738 isync ; Make sure we don't use a speculativily loaded value
1741 slckspin: li r11,lgKillResv ; Killing field
1742 stwcx. r11,0,r11 ; Kill reservation
1744 mr. r4,r4 ; Test timeout value
1746 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
1747 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
1748 lwz r4,0(r4) ; Get the timerout value
1750 slockspin0: mr. r8,r8 ; Is r8 set to zero
1751 bne++ slockspin1 ; If yes, first spin attempt
1752 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1753 mfmsr r9 ; Get the MSR value
1754 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1755 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1756 andc r9,r9,r0 ; Clear FP and VEC
1757 andc r7,r9,r7 ; Clear EE as well
1758 mtmsr r7 ; Turn off interruptions
1759 isync ; May have turned off vec and fp here
1760 mftb r8 ; Get timestamp on entry
1763 slockspin1: mtmsr r7 ; Turn off interruptions
1764 mftb r8 ; Get timestamp on entry
1766 slcksniff: lwz r3,SLOCK_ILK(r5) ; Get that lock in here
1767 andi. r3,r3,ILK_LOCKED ; Is it free yet?
1768 beq++ slckretry ; Yeah, try for it again...
1770 mftb r10 ; Time stamp us now
1771 sub r10,r10,r8 ; Get the elapsed time
1772 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1773 blt++ slcksniff ; Not yet...
1775 mtmsr r9 ; Say, any interrupts pending?
1777 ; The following instructions force the pipeline to be interlocked to that only one
1778 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
1779 ; time; if it's too short, pending interruptions will not have a chance to be taken
1781 subi r4,r4,128 ; Back off elapsed time from timeout value
1782 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1783 mr. r4,r4 ; See if we used the whole timeout
1784 li r3,0 ; Assume a timeout return code
1785 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1787 ble-- slckfail ; We failed
1788 b slockspin1 ; Now that we've opened an enable window, keep trying...
1790 mtmsr r9 ; Restore interrupt state
1791 li r8,1 ; Show already through once
1793 slckfail: ; We couldn't get the lock
1794 lis r3,hi16(slckpanic_str)
1795 ori r3,r3,lo16(slckpanic_str)
1800 BREAKPOINT_TRAP ; We die here anyway
1804 STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
1808 * boolean_t lck_spin_try_lock(lck_spin_t *)
1809 * unsigned int ppc_usimple_lock_try(simple_lock_t *)
1813 .globl EXT(lck_spin_try_lock)
1814 LEXT(lck_spin_try_lock)
1815 .globl EXT(ppc_usimple_lock_try)
1816 LEXT(ppc_usimple_lock_try)
1818 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1819 mfmsr r9 ; Get the MSR value
1820 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1821 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1822 andc r9,r9,r0 ; Clear FP and VEC
1823 andc r7,r9,r7 ; Clear EE as well
1824 mtmsr r7 ; Disable interruptions and thus, preemption
1825 mfsprg r6,1 ; Get current activation
1827 lwz r11,SLOCK_ILK(r3) ; Get the lock
1828 andi. r5,r11,ILK_LOCKED ; Check it...
1829 bne-- slcktryfail ; Quickly fail...
1832 lwarx r11,SLOCK_ILK,r3 ; Ld from addr of arg and reserve
1834 andi. r5,r11,ILK_LOCKED ; TEST...
1835 ori r5,r6,ILK_LOCKED
1836 bne-- slcktryfailX ; branch if taken. Predict free
1838 stwcx. r5,SLOCK_ILK,r3 ; And SET (if still reserved)
1839 bne-- slcktryloop ; If set failed, loop back
1841 .globl EXT(stlckPatch_isync)
1842 LEXT(stlckPatch_isync)
1845 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1846 addi r5,r5,1 ; Bring up the disable count
1847 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1849 mtmsr r9 ; Allow interruptions now
1850 li r3,1 ; Set that the lock was free
1854 li r5,lgKillResv ; Killing field
1855 stwcx. r5,0,r5 ; Kill reservation
1858 mtmsr r9 ; Allow interruptions now
1859 li r3,0 ; FAILURE - lock was taken
1864 * void lck_spin_unlock(lck_spin_t *)
1865 * void ppc_usimple_unlock_rwcmb(simple_lock_t *)
1869 .globl EXT(lck_spin_unlock)
1870 LEXT(lck_spin_unlock)
1871 .globl EXT(ppc_usimple_unlock_rwcmb)
1872 LEXT(ppc_usimple_unlock_rwcmb)
1875 .globl EXT(sulckPatch_isync)
1876 LEXT(sulckPatch_isync)
1878 .globl EXT(sulckPatch_eieio)
1879 LEXT(sulckPatch_eieio)
1881 stw r0, SLOCK_ILK(r3)
1883 b epStart ; Go enable preemption...
1886 * void ppc_usimple_unlock_rwmb(simple_lock_t *)
1890 .globl EXT(ppc_usimple_unlock_rwmb)
1892 LEXT(ppc_usimple_unlock_rwmb)
1896 stw r0, SLOCK_ILK(r3)
1898 b epStart ; Go enable preemption...
1901 * void enter_funnel_section(funnel_t *)
1905 .globl EXT(enter_funnel_section)
1907 LEXT(enter_funnel_section)
1910 lis r10,hi16(EXT(kdebug_enable))
1911 ori r10,r10,lo16(EXT(kdebug_enable))
1913 lis r11,hi16(EXT(split_funnel_off))
1914 ori r11,r11,lo16(EXT(split_funnel_off))
1916 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1917 bne- L_enter_funnel_section_slow ; If set, call the slow path
1918 mfsprg r6,1 ; Get the current activation
1919 lwz r7,LOCK_FNL_MUTEX(r3)
1921 lwz r5,0(r7) ; Get lock quickly
1923 bne-- L_enter_funnel_section_slow ; Yup...
1925 L_enter_funnel_section_loop:
1926 lwarx r5,0,r7 ; Load the mutex lock
1928 bne-- L_enter_funnel_section_slowX ; Go to the slow path
1929 stwcx. r6,0,r7 ; Grab the lock
1930 bne-- L_enter_funnel_section_loop ; Loop back if failed
1931 .globl EXT(entfsectPatch_isync)
1932 LEXT(entfsectPatch_isync)
1933 isync ; Stop prefeteching
1935 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1936 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1939 L_enter_funnel_section_slowX:
1940 li r4,lgKillResv ; Killing field
1941 stwcx. r4,0,r4 ; Kill reservation
1943 L_enter_funnel_section_slow:
1946 b EXT(thread_funnel_set)
1949 * void exit_funnel_section(void)
1953 .globl EXT(exit_funnel_section)
1955 LEXT(exit_funnel_section)
1957 mfsprg r6,1 ; Get the current activation
1958 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1959 mr. r3,r3 ; Check on funnel held
1960 beq- L_exit_funnel_section_ret ;
1962 lis r10,hi16(EXT(kdebug_enable))
1963 ori r10,r10,lo16(EXT(kdebug_enable))
1966 bne- L_exit_funnel_section_slow ; If set, call the slow path
1967 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1968 .globl EXT(retfsectPatch_isync)
1969 LEXT(retfsectPatch_isync)
1971 .globl EXT(retfsectPatch_eieio)
1972 LEXT(retfsectPatch_eieio)
1975 lwz r5,0(r7) ; Get lock
1976 rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set
1977 bne-- L_exit_funnel_section_slow ; No can get...
1979 L_exit_funnel_section_loop:
1981 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1982 li r5,0 ; Clear the mutexlock
1983 bne-- L_exit_funnel_section_slowX
1984 stwcx. r5,0,r7 ; Release the funnel mutexlock
1985 bne-- L_exit_funnel_section_loop
1987 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1988 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1991 L_exit_funnel_section_slowX:
1992 li r4,lgKillResv ; Killing field
1993 stwcx. r4,0,r4 ; Kill it
1995 L_exit_funnel_section_slow:
1998 b EXT(thread_funnel_set)
1999 L_exit_funnel_section_ret:
2003 * void lck_rw_lock_exclusive(lck_rw_t*)
2007 .globl EXT(lck_rw_lock_exclusive)
2008 LEXT(lck_rw_lock_exclusive)
2010 .globl EXT(lock_write)
2013 rwleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2014 rlwinm. r7,r5,30,1,31 ; Can we have it?
2015 ori r6,r5,WANT_EXCL ; Mark Exclusive
2016 bne-- rwlespin ; Branch if cannot be held
2017 stwcx. r6,RW_DATA,r3 ; Update lock word
2019 .globl EXT(rwlePatch_isync)
2020 LEXT(rwlePatch_isync)
2024 li r4,lgKillResv ; Killing field
2025 stwcx. r4,0,r4 ; Kill it
2026 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2027 bne-- rwlespin1 ; No, go handle contention
2028 mr r4,r3 ; pass lock pointer
2029 lwz r3,RW_PTR(r3) ; load lock ext pointer
2030 b EXT(lck_rw_lock_exclusive_ext)
2032 b EXT(lck_rw_lock_exclusive_gen)
2035 * void lck_rw_lock_shared(lck_rw_t*)
2039 .globl EXT(lck_rw_lock_shared)
2040 LEXT(lck_rw_lock_shared)
2042 .globl EXT(lock_read)
2045 rwlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2046 andi. r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
2047 addis r6,r5,1 ; Increment read cnt
2048 bne-- rwlsspin ; Branch if cannot be held
2049 stwcx. r6,RW_DATA,r3 ; Update lock word
2051 .globl EXT(rwlsPatch_isync)
2052 LEXT(rwlsPatch_isync)
2056 li r4,lgKillResv ; Killing field
2057 stwcx. r4,0,r4 ; Kill it
2058 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2059 bne-- rwlsspin1 ; No, go handle contention
2060 mr r4,r3 ; pass lock pointer
2061 lwz r3,RW_PTR(r3) ; load lock ext pointer
2062 b EXT(lck_rw_lock_shared_ext)
2064 b EXT(lck_rw_lock_shared_gen)
2067 * boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
2071 .globl EXT(lck_rw_lock_shared_to_exclusive)
2072 LEXT(lck_rw_lock_shared_to_exclusive)
2074 .globl EXT(lock_read_to_write)
2075 LEXT(lock_read_to_write)
2077 rwlseloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2078 addis r6,r5,0xFFFF ; Decrement read cnt
2079 lis r8,0xFFFF ; Get read count mask
2080 ori r8,r8,WANT_UPGRADE|ILK_LOCKED ; Include Interlock and upgrade flags
2081 and. r7,r6,r8 ; Can we have it?
2082 ori r9,r6,WANT_UPGRADE ; Mark Exclusive
2083 bne-- rwlsespin ; Branch if cannot be held
2084 stwcx. r9,RW_DATA,r3 ; Update lock word
2086 .globl EXT(rwlsePatch_isync)
2087 LEXT(rwlsePatch_isync)
2089 li r3,0 ; Succeed, return FALSE...
2092 li r4,lgKillResv ; Killing field
2093 stwcx. r4,0,r4 ; Kill it
2094 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2095 bne-- rwlsespin1 ; No, go handle contention
2096 mr r4,r3 ; pass lock pointer
2097 lwz r3,RW_PTR(r3) ; load lock ext pointer
2098 b EXT(lck_rw_lock_shared_to_exclusive_ext)
2100 b EXT(lck_rw_lock_shared_to_exclusive_gen)
2105 * void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
2109 .globl EXT(lck_rw_lock_exclusive_to_shared)
2110 LEXT(lck_rw_lock_exclusive_to_shared)
2112 .globl EXT(lock_write_to_read)
2113 LEXT(lock_write_to_read)
2115 .globl EXT(rwlesPatch_isync)
2116 LEXT(rwlesPatch_isync)
2118 .globl EXT(rwlesPatch_eieio)
2119 LEXT(rwlesPatch_eieio)
2121 rwlesloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2122 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2123 bne-- rwlesspin ; Branch if interlocked
2124 lis r6,1 ; Get 1 for read count
2125 andi. r10,r5,WANT_UPGRADE ; Is it held with upgrade
2126 li r9,WANT_UPGRADE|WAIT_FLAG ; Get upgrade and wait flags mask
2127 bne rwlesexcl1 ; Skip if held with upgrade
2128 li r9,WANT_EXCL|WAIT_FLAG ; Get exclusive and wait flags mask
2130 andc r7,r5,r9 ; Marked free
2131 rlwimi r6,r7,0,16,31 ; Set shared cnt to one
2132 stwcx. r6,RW_DATA,r3 ; Update lock word
2134 andi. r7,r5,WAIT_FLAG ; Test wait flag
2135 beqlr++ ; Return of no waiters
2136 addi r3,r3,RW_EVENT ; Get lock event address
2137 b EXT(thread_wakeup) ; wakeup waiters
2139 li r4,lgKillResv ; Killing field
2140 stwcx. r4,0,r4 ; Kill it
2141 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2142 bne-- rwlesspin1 ; No, go handle contention
2143 mr r4,r3 ; pass lock pointer
2144 lwz r3,RW_PTR(r3) ; load lock ext pointer
2145 b EXT(lck_rw_lock_exclusive_to_shared_ext)
2147 b EXT(lck_rw_lock_exclusive_to_shared_gen)
2152 * boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
2156 .globl EXT(lck_rw_try_lock_exclusive)
2157 LEXT(lck_rw_try_lock_exclusive)
2158 lis r10,0xFFFF ; Load read count mask
2159 ori r10,r10,WANT_EXCL|WANT_UPGRADE ; Include exclusive and upgrade flags
2160 rwtleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2161 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2162 bne-- rwtlespin ; Branch if interlocked
2163 and. r7,r5,r10 ; Can we have it
2164 ori r6,r5,WANT_EXCL ; Mark Exclusive
2166 stwcx. r6,RW_DATA,r3 ; Update lock word
2168 .globl EXT(rwtlePatch_isync)
2169 LEXT(rwtlePatch_isync)
2171 li r3,1 ; Return TRUE
2174 li r4,lgKillResv ; Killing field
2175 stwcx. r4,0,r4 ; Kill it
2176 li r3,0 ; Return FALSE
2179 li r4,lgKillResv ; Killing field
2180 stwcx. r4,0,r4 ; Kill it
2181 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2182 bne-- rwtlespin1 ; No, go handle contention
2183 mr r4,r3 ; pass lock pointer
2184 lwz r3,RW_PTR(r3) ; load lock ext pointer
2185 b EXT(lck_rw_try_lock_exclusive_ext)
2187 b EXT(lck_rw_try_lock_exclusive_gen)
2191 * boolean_t lck_rw_try_lock_shared(lck_rw_t*)
2195 .globl EXT(lck_rw_try_lock_shared)
2196 LEXT(lck_rw_try_lock_shared)
2197 rwtlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2198 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2199 bne-- rwtlsspin ; Branch if interlocked
2200 andi. r7,r5,WANT_EXCL|WANT_UPGRADE ; So, can we have it?
2201 addis r6,r5,1 ; Increment read cnt
2202 bne-- rwtlsfail ; Branch if held exclusive
2203 stwcx. r6,RW_DATA,r3 ; Update lock word
2205 .globl EXT(rwtlsPatch_isync)
2206 LEXT(rwtlsPatch_isync)
2208 li r3,1 ; Return TRUE
2211 li r3,0 ; Return FALSE
2214 li r4,lgKillResv ; Killing field
2215 stwcx. r4,0,r4 ; Kill it
2216 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2217 bne-- rwtlsspin1 ; No, go handle contention
2218 mr r4,r3 ; pass lock pointer
2219 lwz r3,RW_PTR(r3) ; load lock ext pointer
2220 b EXT(lck_rw_try_lock_shared_ext)
2222 b EXT(lck_rw_try_lock_shared_gen)
2227 * lck_rw_type_t lck_rw_done(lck_rw_t*)
2231 .globl EXT(lck_rw_done)
2234 .globl EXT(lock_done)
2237 .globl EXT(rwldPatch_isync)
2238 LEXT(rwldPatch_isync)
2240 .globl EXT(rwldPatch_eieio)
2241 LEXT(rwldPatch_eieio)
2243 li r10,WAIT_FLAG ; Get wait flag
2244 lis r7,0xFFFF ; Get read cnt mask
2245 mr r12,r3 ; Save lock addr
2246 rwldloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2247 andi. r8,r5,ILK_LOCKED ; Test interlock flag
2248 bne-- rwldspin ; Branch if interlocked
2249 and. r8,r5,r7 ; Is it shared
2250 cmpi cr1,r8,0 ; Is it shared
2251 beq cr1,rwldexcl ; No, check exclusive
2252 li r11,RW_SHARED ; Set return value
2253 addis r6,r5,0xFFFF ; Decrement read count
2254 and. r8,r6,r7 ; Is it still shared
2255 li r8,0 ; Assume no wakeup
2256 bne rwldshared1 ; Skip if still held shared
2257 and r8,r6,r10 ; Extract wait flag
2258 andc r6,r6,r10 ; Clear wait flag
2262 li r11,RW_EXCL ; Set return value
2263 li r9,WANT_UPGRADE ; Get upgrade flag
2264 and. r6,r5,r9 ; Is it held with upgrade
2265 li r9,WANT_UPGRADE|WAIT_FLAG ; Mask upgrade abd wait flags
2266 bne rwldexcl1 ; Skip if held with upgrade
2267 li r9,WANT_EXCL|WAIT_FLAG ; Mask exclusive and wait flags
2269 andc r6,r5,r9 ; Marked free
2270 and r8,r5,r10 ; Null if no waiter
2272 stwcx. r6,RW_DATA,r3 ; Update lock word
2274 mr. r8,r8 ; wakeup needed?
2275 mr r3,r11 ; Return lock held type
2277 mr r3,r12 ; Restore lock address
2279 addi r3,r3,RW_EVENT ; Get lock event address
2280 bl EXT(thread_wakeup) ; wakeup threads
2281 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
2284 li r3,RW_SHARED ; Assume lock type shared
2285 bne cr1,rwldret ; Branch if was held exclusive
2286 li r3,RW_EXCL ; Return lock type exclusive
2290 li r4,lgKillResv ; Killing field
2291 stwcx. r4,0,r4 ; Kill it
2292 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2293 bne-- rwldspin1 ; No, go handle contention
2294 mr r4,r3 ; pass lock pointer
2295 lwz r3,RW_PTR(r3) ; load lock ext pointer
2296 b EXT(lck_rw_done_ext)
2298 b EXT(lck_rw_done_gen)
2301 * void lck_rw_ilk_lock(lck_rw_t *lock)
2303 .globl EXT(lck_rw_ilk_lock)
2304 LEXT(lck_rw_ilk_lock)
2305 crclr hwtimeout ; no timeout option
2306 li r4,0 ; request default timeout value
2307 li r12,ILK_LOCKED ; Load bit mask
2308 b lckcomm ; Join on up...
2311 * void lck_rw_ilk_unlock(lck_rw_t *lock)
2313 .globl EXT(lck_rw_ilk_unlock)
2314 LEXT(lck_rw_ilk_unlock)
2316 b EXT(hw_unlock_bit)