2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <mach_assert.h>
25 #include <mach_ldebug.h>
28 #include <kern/etap_options.h>
31 #include <ppc/proc_reg.h>
36 #define SWT_HI 0+FM_SIZE
37 #define SWT_LO 4+FM_SIZE
38 #define MISSED 8+FM_SIZE
41 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
42 ; set the same as it is here. This is the number of
43 ; traceback entries we can handle per processor
45 ; A value of 0 disables the stack.
47 #define PREEMPTSTACK 0
51 #include <ppc/POWERMAC/mp/mp.h>
53 #define PROLOG(space) \
54 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
56 stw r3,FM_ARG0(r1) __ASMNL__ \
57 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
60 lwz r1,0(r1) __ASMNL__ \
61 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
64 #if MACH_LDEBUG && CHECKLOCKS
66 * Routines for general lock debugging.
69 /* Gets lock check flags in CR6: CR bits 24-27 */
71 #define CHECK_SETUP(rg) \
72 lis rg,hi16(EXT(dgWork)) __ASMNL__ \
73 ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \
74 lbz rg,dgFlags(rg) __ASMNL__ \
79 * Checks for expected lock types and calls "panic" on
80 * mismatch. Detects calls to Mutex functions with
81 * type simplelock and vice versa.
83 #define CHECK_MUTEX_TYPE() \
84 bt 24+disLktypeb,1f __ASMNL__ \
85 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
86 cmpwi r10,MUTEX_TAG __ASMNL__ \
88 lis r3,hi16(not_a_mutex) __ASMNL__ \
89 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
90 bl EXT(panic) __ASMNL__ \
91 lwz r3,FM_ARG0(r1) __ASMNL__ \
96 STRINGD "not a mutex!\n\000"
99 #define CHECK_SIMPLE_LOCK_TYPE() \
100 bt 24+disLktypeb,1f __ASMNL__ \
101 lwz r10,SLOCK_TYPE(r3) __ASMNL__ \
102 cmpwi r10,USLOCK_TAG __ASMNL__ \
104 lis r3,hi16(not_a_slock) __ASMNL__ \
105 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
106 bl EXT(panic) __ASMNL__ \
107 lwz r3,FM_ARG0(r1) __ASMNL__ \
112 STRINGD "not a simple lock!\n\000"
115 #define CHECK_NO_SIMPLELOCKS() \
116 bt 24+disLkNmSimpb,2f __ASMNL__ \
117 mfmsr r11 __ASMNL__ \
118 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
119 mtmsr r10 __ASMNL__ \
120 mfsprg r10,0 __ASMNL__ \
121 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
122 lwz r10,CPU_SIMPLE_LOCK_COUNT(r10) __ASMNL__ \
123 cmpwi r10,0 __ASMNL__ \
125 lis r3,hi16(simple_locks_held) __ASMNL__ \
126 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
127 bl EXT(panic) __ASMNL__ \
128 lwz r3,FM_ARG0(r1) __ASMNL__ \
130 mtmsr r11 __ASMNL__ \
135 STRINGD "simple locks held!\n\000"
139 * Verifies return to the correct thread in "unlock" situations.
142 #define CHECK_THREAD(thread_offset) \
143 bt 24+disLkThreadb,2f __ASMNL__ \
144 mfmsr r11 __ASMNL__ \
145 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
146 mtmsr r10 __ASMNL__ \
147 mfsprg r10,0 __ASMNL__ \
148 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
149 lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \
150 cmpwi r10,0 __ASMNL__ \
152 lwz r9,thread_offset(r3) __ASMNL__ \
153 cmpw r9,r10 __ASMNL__ \
155 lis r3,hi16(wrong_thread) __ASMNL__ \
156 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
157 bl EXT(panic) __ASMNL__ \
158 lwz r3,FM_ARG0(r1) __ASMNL__ \
160 mtmsr r11 __ASMNL__ \
164 STRINGD "wrong thread!\n\000"
167 #define CHECK_MYLOCK(thread_offset) \
168 bt 24+disLkMyLckb,2f __ASMNL__ \
169 mfmsr r11 __ASMNL__ \
170 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
171 mtmsr r10 __ASMNL__ \
172 mfsprg r10,0 __ASMNL__ \
173 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
174 lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \
175 cmpwi r10,0 __ASMNL__ \
177 lwz r9, thread_offset(r3) __ASMNL__ \
178 cmpw r9,r10 __ASMNL__ \
180 lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \
181 ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \
182 bl EXT(panic) __ASMNL__ \
183 lwz r3,FM_ARG0(r1) __ASMNL__ \
185 mtmsr r11 __ASMNL__ \
190 STRINGD "mylock attempt!\n\000"
193 #else /* MACH_LDEBUG */
195 #define CHECK_SETUP(rg)
196 #define CHECK_MUTEX_TYPE()
197 #define CHECK_SIMPLE_LOCK_TYPE()
198 #define CHECK_THREAD(thread_offset)
199 #define CHECK_NO_SIMPLELOCKS()
200 #define CHECK_MYLOCK(thread_offset)
202 #endif /* MACH_LDEBUG */
205 * void hw_lock_init(hw_lock_t)
207 * Initialize a hardware lock. These locks should be cache aligned and a multiple
211 ENTRY(hw_lock_init, TAG_NO_FRAME_USED)
213 li r0, 0 /* set lock to free == 0 */
214 stw r0, 0(r3) /* Initialize the lock */
218 * void hw_lock_unlock(hw_lock_t)
220 * Unconditionally release lock.
221 * MACH_RT: release preemption level.
226 .globl EXT(hw_lock_unlock)
231 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
232 lis r5,0xFFFF /* (TEST/DEBUG) */
233 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
234 sc /* (TEST/DEBUG) */
236 sync /* Flush writes done under lock */
237 li r0, 0 /* set lock to free */
241 b epStart /* Go enable preemption... */
248 * Special case for internal use. Uses same lock code, but sets up so
249 * that there will be no disabling of preemption after locking. Generally
250 * used for mutex locks when obtaining the interlock although there is
251 * nothing stopping other uses.
254 lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
255 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
256 cmplwi cr1,r1,0 /* Set flag to disable disable preemption */
257 lwz r4,0(r4) /* Get the timerout value */
258 b lockComm /* Join on up... */
261 * void hw_lock_lock(hw_lock_t)
263 * Acquire lock, spinning until it becomes available.
264 * MACH_RT: also return with preemption disabled.
265 * Apparently not used except by mach_perf.
266 * We will just set a default timeout and jump into the NORMAL timeout lock.
270 .globl EXT(hw_lock_lock)
274 lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
275 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
276 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
277 lwz r4,0(r4) /* Get the timerout value */
278 b lockComm /* Join on up... */
281 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
283 * Try to acquire spin-lock. Return success (1) or failure (0).
284 * Attempt will fail after timeout ticks of the timebase.
285 * We try fairly hard to get this lock. We disable for interruptions, but
286 * reenable after a "short" timeout (128 ticks, we may want to change this).
287 * After checking to see if the large timeout value (passed in) has expired and a
288 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
289 * we return either in abject failure, or disable and go back to the lock sniff routine.
290 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
292 * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL
293 * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT!
297 .globl EXT(hw_lock_to)
302 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
303 lis r5,0xEEEE /* (TEST/DEBUG) */
304 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
305 sc /* (TEST/DEBUG) */
309 mflr r12 ; (TEST/DEBUG)
310 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
311 mtlr r12 ; (TEST/DEBUG)
314 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
316 lockComm: mfmsr r9 /* Get the MSR value */
317 mr r5,r3 /* Get the address of the lock */
318 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
320 mtmsr r7 /* Turn off interruptions */
321 mftb r8 /* Get the low part of the time base */
325 lcktry: lwarx r6,0,r5 /* Grab the lock value */
326 li r3,1 /* Use part of the delay time */
327 mr. r6,r6 /* Is it locked? */
328 bne- lcksniff /* Yeah, wait for it to clear... */
329 stwcx. r3,0,r5 /* Try to seize that there durn lock */
331 bne- lcktry /* Couldn't get it... */
332 beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */
333 mtmsr r9 ; Restore interrupt state
334 blr /* Go on home... */
336 beq+ lckgot /* We got it, yahoo... */
337 b lcktry /* Just start up again if the store failed... */
342 lcksniff: lwz r3,0(r5) /* Get that lock in here */
343 mr. r3,r3 /* Is it free yet? */
344 beq+ lcktry /* Yeah, try for it again... */
346 mftb r10 /* Time stamp us now */
347 sub r10,r10,r8 /* Get the elapsed time */
348 cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */
349 blt+ lcksniff /* Not yet... */
351 mtmsr r9 /* Say, any interrupts pending? */
353 /* The following instructions force the pipeline to be interlocked to that only one
354 instruction is issued per cycle. The insures that we stay enabled for a long enough
355 time; if it's too short, pending interruptions will not have a chance to be taken */
357 subi r4,r4,128 /* Back off elapsed time from timeout value */
358 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
359 mr. r4,r4 /* See if we used the whole timeout */
360 li r3,0 /* Assume a timeout return code */
361 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
363 ble- lckfail /* We failed */
364 mtmsr r7 /* Disable for interruptions */
365 mftb r8 /* Get the low part of the time base */
366 b lcksniff /* Now that we've opened an enable window, keep trying... */
369 lckgot: mtmsr r9 /* Enable for interruptions */
370 isync /* Make sure we don't use a speculativily loaded value */
372 #endif /* !MACH_RT */
374 lckfail: /* We couldn't get the lock */
375 li r3,0 /* Set failure return code */
376 blr /* Return, head hanging low... */
380 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
382 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
383 * multiple bits may be set. Return success (1) or failure (0).
384 * Attempt will fail after timeout ticks of the timebase.
385 * We try fairly hard to get this lock. We disable for interruptions, but
386 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
387 * After checking to see if the large timeout value (passed in) has expired and a
388 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
389 * we return either in abject failure, or disable and go back to the lock sniff routine.
390 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
392 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
393 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
394 * RESTORE FROM THE STACK.
400 nop ; Force loop alignment to cache line
405 .globl EXT(hw_lock_bit)
409 mfmsr r9 /* Get the MSR value */
410 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
412 mtmsr r7 /* Turn off interruptions */
414 mftb r8 /* Get the low part of the time base */
418 bittry: lwarx r6,0,r3 /* Grab the lock value */
419 and. r0,r6,r4 /* See if any of the lock bits are on */
420 or r6,r6,r4 /* Turn on the lock bits */
421 bne- bitsniff /* Yeah, wait for it to clear... */
422 stwcx. r6,0,r3 /* Try to seize that there durn lock */
423 beq+ bitgot /* We got it, yahoo... */
424 b bittry /* Just start up again if the store failed... */
428 bitsniff: lwz r6,0(r3) /* Get that lock in here */
429 and. r0,r6,r4 /* See if any of the lock bits are on */
430 beq+ bittry /* Yeah, try for it again... */
432 mftb r6 /* Time stamp us now */
433 sub r6,r6,r8 /* Get the elapsed time */
434 cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */
435 blt+ bitsniff /* Not yet... */
437 mtmsr r9 /* Say, any interrupts pending? */
439 /* The following instructions force the pipeline to be interlocked to that only one
440 instruction is issued per cycle. The insures that we stay enabled for a long enough
441 time. If it's too short, pending interruptions will not have a chance to be taken
444 subi r5,r5,128 /* Back off elapsed time from timeout value */
445 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
446 mr. r5,r5 /* See if we used the whole timeout */
447 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
449 ble- bitfail /* We failed */
450 mtmsr r7 /* Disable for interruptions */
451 mftb r8 /* Get the low part of the time base */
452 b bitsniff /* Now that we've opened an enable window, keep trying... */
456 bitgot: mtmsr r9 /* Enable for interruptions */
457 li r3,1 /* Set good return code */
458 isync /* Make sure we don't use a speculativily loaded value */
461 bitfail: li r3,0 /* Set failure return code */
462 blr /* Return, head hanging low... */
466 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
468 * Release bit based spin-lock. The second parameter is the bit mask to clear.
469 * Multiple bits may be cleared.
471 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
472 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
473 * RESTORE FROM THE STACK.
477 .globl EXT(hw_unlock_bit)
484 ubittry: lwarx r0,0,r3 /* Grab the lock value */
485 andc r0,r0,r4 /* Clear the lock bits */
486 stwcx. r0,0,r3 /* Try to clear that there durn lock */
487 bne- ubittry /* Try again, couldn't save it... */
492 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
493 * unsigned int newb, unsigned int timeout)
495 * Try to acquire spin-lock. The second parameter is the bit mask to check.
496 * The third is the value of those bits and the 4th is what to set them to.
497 * Return success (1) or failure (0).
498 * Attempt will fail after timeout ticks of the timebase.
499 * We try fairly hard to get this lock. We disable for interruptions, but
500 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
501 * After checking to see if the large timeout value (passed in) has expired and a
502 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
503 * we return either in abject failure, or disable and go back to the lock sniff routine.
504 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
510 nop ; Force loop alignment to cache line
515 .globl EXT(hw_lock_mbits)
519 mfmsr r9 ; Get the MSR value
520 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible
522 mtmsr r8 ; Turn off interruptions
524 mftb r10 ; Get the low part of the time base
528 mbittry: lwarx r12,0,r3 ; Grab the lock value
529 and r0,r12,r4 ; Clear extra bits
530 or r12,r12,r6 ; Turn on the lock bits
531 cmplw r0,r5 ; Are these the right bits?
532 bne- mbitsniff ; Nope, wait for it to clear...
533 stwcx. r12,0,r3 ; Try to seize that there durn lock
534 beq+ mbitgot ; We got it, yahoo...
535 b mbittry ; Just start up again if the store failed...
539 mbitsniff: lwz r12,0(r3) ; Get that lock in here
540 and r0,r12,r4 ; Clear extra bits
541 or r12,r12,r6 ; Turn on the lock bits
542 cmplw r0,r5 ; Are these the right bits?
543 beq+ mbittry ; Yeah, try for it again...
545 mftb r11 ; Time stamp us now
546 sub r11,r11,r10 ; Get the elapsed time
547 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
548 blt+ mbitsniff ; Not yet...
550 mtmsr r9 ; Say, any interrupts pending?
552 ; The following instructions force the pipeline to be interlocked to that only one
553 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
554 ; time. If it is too short, pending interruptions will not have a chance to be taken
556 subi r7,r7,128 ; Back off elapsed time from timeout value
557 or r7,r7,r7 ; Do nothing here but force a single cycle delay
558 mr. r7,r7 ; See if we used the whole timeout
559 or r7,r7,r7 ; Do nothing here but force a single cycle delay
561 ble- mbitfail ; We failed
562 mtmsr r8 ; Disable for interruptions
563 mftb r10 ; Get the low part of the time base
564 b mbitsniff ; Now that we have opened an enable window, keep trying...
568 mbitgot: mtmsr r9 ; Enable for interruptions
569 li r3,1 ; Set good return code
570 isync ; Make sure we do not use a speculativily loaded value
573 mbitfail: li r3,0 ; Set failure return code
574 blr ; Return, head hanging low...
578 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
580 * Spin until word hits 0 or timeout.
581 * Return success (1) or failure (0).
582 * Attempt will fail after timeout ticks of the timebase.
584 * The theory is that a processor will bump a counter as it signals
585 * other processors. Then it will spin untl the counter hits 0 (or
586 * times out). The other processors, as it receives the signal will
587 * decrement the counter.
589 * The other processors use interlocked update to decrement, this one
590 * does not need to interlock.
596 .globl EXT(hw_cpu_sync)
600 mftb r10 ; Get the low part of the time base
601 mr r9,r3 ; Save the sync word address
602 li r3,1 ; Assume we work
604 csynctry: lwz r11,0(r9) ; Grab the sync value
605 mr. r11,r11 ; Counter hit 0?
606 beqlr- ; Yeah, we are sunk...
607 mftb r12 ; Time stamp us now
609 sub r12,r12,r10 ; Get the elapsed time
610 cmplw r4,r12 ; Have we gone too long?
611 bge+ csynctry ; Not yet...
613 li r3,0 ; Set failure...
614 blr ; Return, head hanging low...
617 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
619 * Spin until word changes or timeout.
620 * Return success (1) or failure (0).
621 * Attempt will fail after timeout ticks of the timebase.
623 * This is used to insure that a processor passes a certain point.
624 * An example of use is to monitor the last interrupt time in the
625 * per_proc block. This can be used to insure that the other processor
626 * has seen at least one interrupt since a specific time.
632 .globl EXT(hw_cpu_sync)
636 mftb r10 ; Get the low part of the time base
637 mr r9,r3 ; Save the sync word address
638 li r3,1 ; Assume we work
640 wcngtry: lwz r11,0(r9) ; Grab the value
641 cmplw r11,r4 ; Do they still match?
642 bnelr- ; Nope, cool...
643 mftb r12 ; Time stamp us now
645 sub r12,r12,r10 ; Get the elapsed time
646 cmplw r5,r12 ; Have we gone too long?
647 bge+ wcngtry ; Not yet...
649 li r3,0 ; Set failure...
650 blr ; Return, head hanging low...
654 * unsigned int hw_lock_try(hw_lock_t)
656 * try to acquire spin-lock. Return success (1) or failure (0)
657 * MACH_RT: returns with preemption disabled on success.
661 .globl EXT(hw_lock_try)
666 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
667 lis r5,0x9999 /* (TEST/DEBUG) */
668 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
669 sc /* (TEST/DEBUG) */
671 mfmsr r9 /* Save the MSR value */
672 li r4, 1 /* value to be stored... 1==taken */
673 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */
676 lis r5, 0x10 /* roughly 1E6 */
678 #endif /* MACH_LDEBUG */
680 mtmsr r7 /* Disable interruptions and thus, preemption */
687 bdnz+ 0f /* Count attempts */
688 mtmsr r9 /* Restore enablement */
689 BREAKPOINT_TRAP /* Get to debugger */
690 mtmsr r7 /* Disable interruptions and thus, preemption */
692 #endif /* MACH_LDEBUG */
694 lwarx r5,0,r3 /* Ld from addr of arg and reserve */
696 cmpwi r5, 0 /* TEST... */
697 bne- .L_lock_try_failed /* branch if taken. Predict free */
699 stwcx. r4, 0,r3 /* And SET (if still reserved) */
700 mfsprg r6,0 /* Get the per_proc block */
701 bne- .L_lock_try_loop /* If set failed, loop back */
703 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
707 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
708 addi r5,r5,1 /* Bring up the disable count */
709 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
713 mtmsr r9 /* Allow interruptions now */
714 li r3,1 /* Set that the lock was free */
718 mtmsr r9 /* Allow interruptions now */
719 li r3,0 /* FAILURE - lock was taken */
723 * unsigned int hw_lock_held(hw_lock_t)
725 * Return 1 if lock is held
726 * MACH_RT: doesn't change preemption state.
727 * N.B. Racy, of course.
731 .globl EXT(hw_lock_held)
736 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
737 lis r5,0x8888 /* (TEST/DEBUG) */
738 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
739 sc /* (TEST/DEBUG) */
741 isync /* Make sure we don't use a speculativily fetched lock */
742 lwz r3, 0(r3) /* Return value of lock */
746 * unsigned int hw_compare_and_store(unsigned int old, unsigned int new, unsigned int *area)
748 * Compare old to area if equal, store new, and return true
749 * else return false and no store
750 * This is an atomic operation
754 .globl EXT(hw_compare_and_store)
756 LEXT(hw_compare_and_store)
758 mr r6,r3 /* Save the old value */
762 cstry: lwarx r9,0,r5 /* Grab the area value */
763 li r3,1 /* Assume it works */
764 cmplw cr0,r9,r6 /* Does it match the old value? */
765 bne- csfail /* No, it must have changed... */
766 stwcx. r4,0,r5 /* Try to save the new value */
767 bne- cstry /* Didn't get it, try again... */
768 isync /* Just hold up prefetch */
771 csfail: li r3,0 /* Set failure */
772 blr /* Better luck next time... */
776 * unsigned int hw_atomic_add(unsigned int *area, int *val)
778 * Atomically add the second parameter to the first.
779 * Returns the result.
783 .globl EXT(hw_atomic_add)
787 mr r6,r3 /* Save the area */
791 addtry: lwarx r3,0,r6 /* Grab the area value */
792 add r3,r3,r4 /* Add the value */
793 stwcx. r3,0,r6 /* Try to save the new value */
794 bne- addtry /* Didn't get it, try again... */
799 * unsigned int hw_atomic_sub(unsigned int *area, int *val)
801 * Atomically subtract the second parameter from the first.
802 * Returns the result.
806 .globl EXT(hw_atomic_sub)
810 mr r6,r3 /* Save the area */
814 subtry: lwarx r3,0,r6 /* Grab the area value */
815 sub r3,r3,r4 /* Subtract the value */
816 stwcx. r3,0,r6 /* Try to save the new value */
817 bne- subtry /* Didn't get it, try again... */
822 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
824 * Atomically inserts the element at the head of the list
825 * anchor is the pointer to the first element
826 * element is the pointer to the element to insert
827 * disp is the displacement into the element to the chain pointer
831 .globl EXT(hw_queue_atomic)
833 LEXT(hw_queue_atomic)
835 mr r7,r4 /* Make end point the same as start */
836 mr r8,r5 /* Copy the displacement also */
837 b hw_queue_comm /* Join common code... */
840 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
842 * Atomically inserts the list of elements at the head of the list
843 * anchor is the pointer to the first element
844 * first is the pointer to the first element to insert
845 * last is the pointer to the last element to insert
846 * disp is the displacement into the element to the chain pointer
850 .globl EXT(hw_queue_atomic_list)
852 LEXT(hw_queue_atomic_list)
854 mr r7,r5 /* Make end point the same as start */
855 mr r8,r6 /* Copy the displacement also */
861 lwarx r9,0,r3 /* Pick up the anchor */
862 stwx r9,r8,r7 /* Chain that to the end of the new stuff */
863 stwcx. r4,0,r3 /* Try to chain into the front */
864 bne- hw_queue_comm2 /* Didn't make it, try again... */
869 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
871 * Atomically removes the first element in a list and returns it.
872 * anchor is the pointer to the first element
873 * disp is the displacement into the element to the chain pointer
874 * Returns element if found, 0 if empty.
878 .globl EXT(hw_dequeue_atomic)
880 LEXT(hw_dequeue_atomic)
882 mr r5,r3 /* Save the anchor */
888 lwarx r3,0,r5 /* Pick up the anchor */
889 mr. r3,r3 /* Is the list empty? */
890 beqlr- /* Leave it list empty... */
891 lwzx r9,r4,r3 /* Get the next in line */
892 stwcx. r9,0,r5 /* Try to chain into the front */
893 beqlr+ ; Got the thing, go away with it...
894 b hw_dequeue_comm2 ; Did not make it, try again...
897 * void mutex_init(mutex_t* l, etap_event_t etap)
900 ENTRY(mutex_init,TAG_NO_FRAME_USED)
904 stw r10, MUTEX_ILK(r3) /* clear interlock */
905 stw r10, MUTEX_LOCKED(r3) /* clear locked flag */
906 sth r10, MUTEX_WAITERS(r3) /* init waiter count */
909 stw r10, MUTEX_PC(r3) /* init caller pc */
910 stw r10, MUTEX_THREAD(r3) /* and owning thread */
912 stw r10, MUTEX_TYPE(r3) /* set lock type */
913 #endif /* MACH_LDEBUG */
916 bl EXT(etap_mutex_init) /* init ETAP data */
917 #endif /* ETAP_LOCK_TRACE */
923 * void _mutex_lock(mutex_t*)
927 .globl EXT(_mutex_lock)
932 mflr r12 ; (TEST/DEBUG)
933 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
934 mtlr r12 ; (TEST/DEBUG)
941 stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */
942 stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */
943 stw r0,MISSED(r1) /* clear local miss marker */
944 #endif /* ETAP_LOCK_TRACE */
948 CHECK_NO_SIMPLELOCKS()
952 mfsprg r4,0 /* (TEST/DEBUG) */
953 lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */
954 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
955 lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
956 lis r5,0xAAAA /* (TEST/DEBUG) */
957 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
958 sc /* (TEST/DEBUG) */
961 bl lockDisa /* Go get a lock on the mutex's interlock lock */
962 mr. r4,r3 /* Did we get it? */
963 lwz r3,FM_ARG0(r1) /* Restore the lock address */
964 bne+ mlGotInt /* We got it just fine... */
966 lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message
967 ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message
968 bl EXT(panic) ; Call panic
969 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
973 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
978 /* Note that there is no reason to do a load and reserve here. We already
979 hold the interlock lock and no one can touch this field unless they
980 have that, so, we're free to play */
982 lwz r4,MUTEX_LOCKED(r3) /* Get the mutex's lock field */
984 li r10,1 /* Set the lock value */
986 mr. r4,r4 /* So, can we have it? */
987 bne- mlInUse /* Nope, sombody's playing already... */
989 stw r10,MUTEX_LOCKED(r3) /* Take it unto ourselves */
993 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
995 mfsprg r9,0 /* Get the per_proc block */
996 lwz r10,0(r1) /* Get previous save frame */
997 lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */
998 lwz r10,FM_LR_SAVE(r10) /* Get our caller's address */
999 lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */
1000 stw r10,MUTEX_PC(r3) /* Save our caller */
1001 mr. r8,r8 /* Is there any thread? */
1002 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1003 beq- .L_ml_no_active_thread /* No owning thread... */
1004 lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1005 addi r9,r9,1 /* Bump it up */
1006 stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */
1007 .L_ml_no_active_thread:
1009 #endif /* MACH_LDEBUG */
1011 li r10,0 /* Get the unlock value */
1012 sync /* Push it all out */
1013 stw r10,MUTEX_ILK(r3) /* free the interlock */
1019 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1020 #endif /* ETAP_LOCK_TRACE */
1022 EPILOG /* Restore all saved registers */
1025 b epStart /* Go enable preemption... */
1031 * We come to here when we have a resource conflict. In other words,
1032 * the mutex is held.
1039 cmpwi r7,0 /* did we already take a wait timestamp ? */
1040 bne .L_ml_block /* yup. carry-on */
1041 bl EXT(etap_mutex_miss) /* get wait timestamp */
1042 stw r3,SWT_HI(r1) /* store timestamp */
1044 li r7, 1 /* mark wait timestamp as taken */
1046 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1048 #endif /* ETAP_LOCK_TRACE */
1051 CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */
1054 /* Note that we come in here with the interlock set. The wait routine
1055 * will unlock it before waiting.
1057 bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */
1059 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1060 b .L_ml_retry /* and try again... */
1064 * void _mutex_try(mutex_t*)
1069 .globl EXT(_mutex_try)
1073 PROLOG(8) /* reserve space for SWT_HI and SWT_LO */
1077 stw r5, STW_HI(r1) /* set wait time to 0 (HI) */
1078 stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */
1079 #endif /* ETAP_LOCK_TRACE */
1082 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1083 lis r5,0xBBBB /* (TEST/DEBUG) */
1084 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1085 sc /* (TEST/DEBUG) */
1089 CHECK_NO_SIMPLELOCKS()
1091 lwz r6,MUTEX_LOCKED(r3) /* Quick check */
1092 mr. r6,r6 /* to see if someone has this lock already */
1093 bne- mtFail /* Someone's got it already... */
1095 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1096 mr. r4,r3 /* Did we get it? */
1097 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1098 bne+ mtGotInt /* We got it just fine... */
1100 lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message
1101 ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message
1102 bl EXT(panic) ; Call panic
1103 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1107 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1112 /* Note that there is no reason to do a load and reserve here. We already
1113 hold the interlock and no one can touch at this field unless they
1114 have that, so, we're free to play */
1116 lwz r4,MUTEX_LOCKED(r3) /* Get the mutex's lock field */
1118 li r10,1 /* Set the lock value */
1120 mr. r4,r4 /* So, can we have it? */
1121 bne- mtInUse /* Nope, sombody's playing already... */
1123 stw r10,MUTEX_LOCKED(r3) /* Take it unto ourselves */
1127 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1129 mfsprg r9,0 /* Get the per_proc block */
1130 lwz r10,0(r1) /* Get previous save frame */
1131 lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */
1132 lwz r10,FM_LR_SAVE(r10) /* Get our caller's address */
1133 lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */
1134 stw r10,MUTEX_PC(r3) /* Save our caller */
1135 mr. r8,r8 /* Is there any thread? */
1136 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1137 beq- .L_mt_no_active_thread /* No owning thread... */
1138 lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1139 addi r9, r9, 1 /* Bump it up */
1140 stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */
1141 .L_mt_no_active_thread:
1143 #endif /* MACH_LDEBUG */
1145 li r10,0 /* Get the unlock value */
1146 sync /* Push it all out */
1147 stw r10,MUTEX_ILK(r3) /* free the interlock */
1150 lwz r4,0(r1) /* Back chain the stack */
1152 lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */
1154 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1155 #endif /* ETAP_LOCK_TRACE */
1158 bl epStart /* Go enable preemption... */
1161 EPILOG /* Restore all saved registers */
1165 * We come to here when we have a resource conflict. In other words,
1166 * the mutex is held.
1169 mtInUse: li r10,0 /* Get the unlock value */
1170 sync /* Push it all out */
1171 stw r10,MUTEX_ILK(r3) /* free the interlock */
1173 bl epStart /* Go enable preemption... */
1176 mtFail: li r3,0 /* Set failure code */
1177 EPILOG /* Restore all saved registers */
1182 * void mutex_unlock(mutex_t* l)
1186 .globl EXT(mutex_unlock)
1193 bl EXT(etap_mutex_unlock) /* collect ETAP data */
1194 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1195 #endif /* ETAP_LOCK_TRACE */
1199 CHECK_THREAD(MUTEX_THREAD)
1202 mfsprg r4,0 /* (TEST/DEBUG) */
1203 lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */
1204 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1205 lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1206 lis r5,0xCCCC /* (TEST/DEBUG) */
1207 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1208 sc /* (TEST/DEBUG) */
1210 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1211 mr. r4,r3 /* Did we get it? */
1212 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1213 bne+ muGotInt /* We got it just fine... */
1215 lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message
1216 ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message
1217 bl EXT(panic) ; Call panic
1218 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1222 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1227 lhz r10,MUTEX_WAITERS(r3) /* are there any waiters ? */
1229 beq+ muUnlock /* Nope, we're done... */
1231 bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */
1232 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1233 li r10,0 /* Get unlock value */
1238 rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1241 lwz r9,PP_CPU_DATA(r9)
1242 lwz r9,CPU_ACTIVE_THREAD(r9)
1243 stw r10,MUTEX_THREAD(r3) /* disown thread */
1245 beq- .L_mu_no_active_thread
1246 lwz r8,THREAD_MUTEX_COUNT(r9)
1248 stw r8,THREAD_MUTEX_COUNT(r9)
1249 .L_mu_no_active_thread:
1251 #endif /* MACH_LDEBUG */
1253 stw r10,MUTEX_LOCKED(r3) /* release the mutex */
1254 sync /* Make sure it's all there before we release */
1255 stw r10,MUTEX_ILK(r3) /* unlock the interlock */
1257 EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */
1259 b epStart /* Go enable preemption... */
1265 * void interlock_unlock(hw_lock_t lock)
1269 .globl EXT(interlock_unlock)
1271 LEXT(interlock_unlock)
1274 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1275 lis r5,0xDDDD /* (TEST/DEBUG) */
1276 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1277 sc /* (TEST/DEBUG) */
1284 b epStart /* Go enable preemption... */
1291 * Here is where we enable preemption. We need to be protected
1292 * against ourselves, we can't chance getting interrupted and modifying
1293 * our processor wide preemption count after we'sve loaded it up. So,
1294 * we need to disable all 'rupts. Actually, we could use a compare
1295 * and swap to do this, but, since there are no MP considerations
1296 * (we are dealing with a CPU local field) it is much, much faster
1299 * Note that if we are not genned MP, the calls here will be no-opped via
1300 * a #define and since the _mp forms are the same, likewise a #define
1301 * will be used to route to the other forms
1304 /* This version does not check if we get preempted or not */
1308 .globl EXT(_enable_preemption_no_check)
1310 LEXT(_enable_preemption_no_check)
1311 cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */
1312 b epCommn /* Join up with the other enable code... */
1315 /* This version checks if we get preempted or not */
1318 .globl EXT(_enable_preemption)
1320 LEXT(_enable_preemption)
1322 epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */
1325 * Common enable preemption code
1328 epCommn: mfmsr r9 /* Save the old MSR */
1329 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1330 mtmsr r8 /* Interrupts off */
1332 mfsprg r3,0 /* Get the per_proc block */
1333 lwz r6,PP_CPU_DATA(r3) /* Get the pointer to the CPU data from per proc */
1334 li r8,-1 /* Get a decrimenter */
1335 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1336 add. r5,r5,r8 /* Bring down the disable count */
1338 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1339 mr. r4,r4 ; (TEST/DEBUG)
1340 beq- epskptrc0 ; (TEST/DEBUG)
1341 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1342 lis r4,0xBBBB ; (TEST/DEBUG)
1343 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1345 epskptrc0: mr. r5,r5 ; (TEST/DEBUG)
1348 blt- epTooFar /* Yeah, we did... */
1349 #endif /* MACH_LDEBUG */
1350 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
1352 beq+ epCheckPreempt /* Go check if we need to be preempted... */
1354 epNoCheck: mtmsr r9 /* Restore the interrupt level */
1359 lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */
1360 lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */
1361 ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */
1362 ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */
1363 mtlr r6 /* Get the address of the panic routine */
1364 mtmsr r9 /* Restore interruptions */
1369 STRINGD "_enable_preemption: preemption_level <= 0!\000"
1371 #endif /* MACH_LDEBUG */
1376 lwz r7,PP_NEED_AST(r3) /* Get the AST request address */
1377 li r5,AST_URGENT /* Get the requests we do honor */
1378 lwz r7,0(r7) /* Get the actual, real live, extra special AST word */
1379 lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */
1380 and. r7,r7,r5 ; Should we preempt?
1381 ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */
1382 beq+ epCPno ; No preemption here...
1384 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1386 epCPno: mtmsr r9 /* Allow interrupts if we can */
1387 beqlr+ ; We probably will not preempt...
1388 sc /* Do the preemption */
1389 blr /* Now, go away now... */
1392 * Here is where we disable preemption. Since preemption is on a
1393 * per processor basis (a thread runs on one CPU at a time) we don't
1394 * need any cross-processor synchronization. We do, however, need to
1395 * be interrupt safe, so we don't preempt while in the process of
1396 * disabling it. We could use SPLs, but since we always want complete
1397 * disablement, and this is platform specific code, we'll just kick the
1398 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1403 nop ; Use these 5 nops to force daPreComm
1404 nop ; to a line boundary.
1409 .globl EXT(_disable_preemption)
1411 LEXT(_disable_preemption)
1413 daPreAll: mfmsr r9 /* Save the old MSR */
1414 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1415 mtmsr r8 /* Interrupts off */
1417 daPreComm: mfsprg r6,0 /* Get the per_proc block */
1418 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1419 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1420 addi r5,r5,1 /* Bring up the disable count */
1421 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
1423 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1424 mr. r4,r4 ; (TEST/DEBUG)
1425 beq- epskptrc1 ; (TEST/DEBUG)
1426 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1427 lis r4,0xAAAA ; (TEST/DEBUG)
1428 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1430 epskptrc1: ; (TEST/DEBUG)
1434 ; Set PREEMPTSTACK above to enable a preemption traceback stack.
1436 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
1437 ; set the same as it is here. This is the number of
1438 ; traceback entries we can handle per processor
1440 ; A value of 0 disables the stack.
1443 cmplwi r5,PREEMPTSTACK ; Maximum depth
1444 lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread
1445 bgt- nopredeb ; Too many to stack...
1446 mr. r6,r6 ; During boot?
1447 beq- nopredeb ; Yes, do not do backtrace...
1448 lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation
1449 lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used
1450 mr. r0,r6 ; Any saved context?
1451 beq- nosaveds ; No...
1452 lwz r0,saver1(r6) ; Get end of savearea chain
1454 nosaveds: li r11,0 ; Clear callers callers callers return
1455 li r10,0 ; Clear callers callers callers callers return
1456 li r8,0 ; Clear callers callers callers callers callers return
1457 lwz r2,0(r1) ; Get callers callers stack frame
1458 lwz r12,8(r2) ; Get our callers return
1459 lwz r4,0(r2) ; Back chain
1461 xor r2,r4,r2 ; Form difference
1462 cmplwi r2,8192 ; Within a couple of pages?
1463 mr r2,r4 ; Move register
1464 bge- nosaveher2 ; No, no back chain then...
1465 lwz r11,8(r2) ; Get our callers return
1466 lwz r4,0(r2) ; Back chain
1468 xor r2,r4,r2 ; Form difference
1469 cmplwi r2,8192 ; Within a couple of pages?
1470 mr r2,r4 ; Move register
1471 bge- nosaveher2 ; No, no back chain then...
1472 lwz r10,8(r2) ; Get our callers return
1473 lwz r4,0(r2) ; Back chain
1475 xor r2,r4,r2 ; Form difference
1476 cmplwi r2,8192 ; Within a couple of pages?
1477 mr r2,r4 ; Move register
1478 bge- nosaveher2 ; No, no back chain then...
1479 lwz r8,8(r2) ; Get our callers return
1482 addi r5,r5,-1 ; Get index to slot
1483 mfspr r6,pir ; Get our processor
1484 mflr r4 ; Get our return
1485 rlwinm r6,r6,8,0,23 ; Index to processor slot
1486 lis r2,hi16(EXT(DBGpreempt)) ; Stack high order
1487 rlwinm r5,r5,4,0,27 ; Index to stack slot
1488 ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order
1489 add r2,r2,r5 ; Point to slot
1490 add r2,r2,r6 ; Move to processor
1491 stw r4,0(r2) ; Save our return
1492 stw r11,4(r2) ; Save callers caller
1493 stw r10,8(r2) ; Save callers callers caller
1494 stw r8,12(r2) ; Save callers callers callers caller
1497 mtmsr r9 /* Allow interruptions now */
1502 * Return the active thread for both inside and outside osfmk consumption
1506 .globl EXT(current_thread)
1508 LEXT(current_thread)
1510 mfmsr r9 /* Save the old MSR */
1511 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1512 mtmsr r8 /* Interrupts off */
1513 mfsprg r6,0 /* Get the per_proc */
1514 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1515 lwz r3,CPU_ACTIVE_THREAD(r6) /* Get the active thread */
1516 mtmsr r9 /* Restore interruptions to entry */
1521 * Return the current preemption level
1525 .globl EXT(get_preemption_level)
1527 LEXT(get_preemption_level)
1529 mfmsr r9 /* Save the old MSR */
1530 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1531 mtmsr r8 /* Interrupts off */
1532 mfsprg r6,0 /* Get the per_proc */
1533 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1534 lwz r3,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1535 mtmsr r9 /* Restore interruptions to entry */
1540 * Return the simple lock count
1544 .globl EXT(get_simple_lock_count)
1546 LEXT(get_simple_lock_count)
1548 mfmsr r9 /* Save the old MSR */
1549 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1550 mtmsr r8 /* Interrupts off */
1551 mfsprg r6,0 /* Get the per_proc */
1552 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1553 lwz r3,CPU_SIMPLE_LOCK_COUNT(r6) /* Get the simple lock count */
1554 mtmsr r9 /* Restore interruptions to entry */
1557 #endif /* MACH_RT */