2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <mach_assert.h>
25 #include <mach_ldebug.h>
28 #include <kern/etap_options.h>
31 #include <ppc/proc_reg.h>
36 #define SWT_HI 0+FM_SIZE
37 #define SWT_LO 4+FM_SIZE
38 #define MISSED 8+FM_SIZE
40 #define ILK_LOCKED 0x01
41 #define MUTEX_LOCKED 0x02
42 #define SLOCK_FAST 0x02
45 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
46 ; set the same as it is here. This is the number of
47 ; traceback entries we can handle per processor
49 ; A value of 0 disables the stack.
51 #define PREEMPTSTACK 0
55 #include <ppc/POWERMAC/mp/mp.h>
57 #define PROLOG(space) \
58 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
60 stw r3,FM_ARG0(r1) __ASMNL__ \
61 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
64 lwz r1,0(r1) __ASMNL__ \
65 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
68 #if MACH_LDEBUG && CHECKLOCKS
70 * Routines for general lock debugging.
73 /* Gets lock check flags in CR6: CR bits 24-27 */
75 #define CHECK_SETUP(rg) \
76 lis rg,hi16(EXT(dgWork)) __ASMNL__ \
77 ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \
78 lbz rg,dgFlags(rg) __ASMNL__ \
83 * Checks for expected lock types and calls "panic" on
84 * mismatch. Detects calls to Mutex functions with
85 * type simplelock and vice versa.
87 #define CHECK_MUTEX_TYPE() \
88 bt 24+disLktypeb,1f __ASMNL__ \
89 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
90 cmpwi r10,MUTEX_TAG __ASMNL__ \
92 lis r3,hi16(not_a_mutex) __ASMNL__ \
93 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
94 bl EXT(panic) __ASMNL__ \
95 lwz r3,FM_ARG0(r1) __ASMNL__ \
100 STRINGD "not a mutex!\n\000"
103 #define CHECK_SIMPLE_LOCK_TYPE() \
104 bt 24+disLktypeb,1f __ASMNL__ \
105 lwz r10,SLOCK_TYPE(r3) __ASMNL__ \
106 cmpwi r10,USLOCK_TAG __ASMNL__ \
108 lis r3,hi16(not_a_slock) __ASMNL__ \
109 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
110 bl EXT(panic) __ASMNL__ \
111 lwz r3,FM_ARG0(r1) __ASMNL__ \
116 STRINGD "not a simple lock!\n\000"
119 #define CHECK_NO_SIMPLELOCKS() \
120 bt 24+disLkNmSimpb,2f __ASMNL__ \
121 mfmsr r11 __ASMNL__ \
122 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
123 mtmsr r10 __ASMNL__ \
124 mfsprg r10,0 __ASMNL__ \
125 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
126 lwz r10,CPU_SIMPLE_LOCK_COUNT(r10) __ASMNL__ \
127 cmpwi r10,0 __ASMNL__ \
129 lis r3,hi16(simple_locks_held) __ASMNL__ \
130 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
131 bl EXT(panic) __ASMNL__ \
132 lwz r3,FM_ARG0(r1) __ASMNL__ \
134 mtmsr r11 __ASMNL__ \
139 STRINGD "simple locks held!\n\000"
143 * Verifies return to the correct thread in "unlock" situations.
146 #define CHECK_THREAD(thread_offset) \
147 bt 24+disLkThreadb,2f __ASMNL__ \
148 mfmsr r11 __ASMNL__ \
149 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
150 mtmsr r10 __ASMNL__ \
151 mfsprg r10,0 __ASMNL__ \
152 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
153 lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \
154 cmpwi r10,0 __ASMNL__ \
156 lwz r9,thread_offset(r3) __ASMNL__ \
157 cmpw r9,r10 __ASMNL__ \
159 lis r3,hi16(wrong_thread) __ASMNL__ \
160 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
161 bl EXT(panic) __ASMNL__ \
162 lwz r3,FM_ARG0(r1) __ASMNL__ \
164 mtmsr r11 __ASMNL__ \
168 STRINGD "wrong thread!\n\000"
171 #define CHECK_MYLOCK(thread_offset) \
172 bt 24+disLkMyLckb,2f __ASMNL__ \
173 mfmsr r11 __ASMNL__ \
174 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
175 mtmsr r10 __ASMNL__ \
176 mfsprg r10,0 __ASMNL__ \
177 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
178 lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \
179 cmpwi r10,0 __ASMNL__ \
181 lwz r9, thread_offset(r3) __ASMNL__ \
182 cmpw r9,r10 __ASMNL__ \
184 lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \
185 ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \
186 bl EXT(panic) __ASMNL__ \
187 lwz r3,FM_ARG0(r1) __ASMNL__ \
189 mtmsr r11 __ASMNL__ \
194 STRINGD "mylock attempt!\n\000"
197 #else /* MACH_LDEBUG */
199 #define CHECK_SETUP(rg)
200 #define CHECK_MUTEX_TYPE()
201 #define CHECK_SIMPLE_LOCK_TYPE()
202 #define CHECK_THREAD(thread_offset)
203 #define CHECK_NO_SIMPLELOCKS()
204 #define CHECK_MYLOCK(thread_offset)
206 #endif /* MACH_LDEBUG */
209 * void hw_lock_init(hw_lock_t)
211 * Initialize a hardware lock. These locks should be cache aligned and a multiple
215 ENTRY(hw_lock_init, TAG_NO_FRAME_USED)
217 li r0, 0 /* set lock to free == 0 */
218 stw r0, 0(r3) /* Initialize the lock */
222 * void hw_lock_unlock(hw_lock_t)
224 * Unconditionally release lock.
225 * Release preemption level.
230 .globl EXT(hw_lock_unlock)
235 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
236 lis r5,0xFFFF /* (TEST/DEBUG) */
237 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
238 sc /* (TEST/DEBUG) */
240 sync /* Flush writes done under lock */
241 li r0, 0 /* set lock to free */
244 b epStart /* Go enable preemption... */
248 * Special case for internal use. Uses same lock code, but sets up so
249 * that there will be no disabling of preemption after locking. Generally
250 * used for mutex locks when obtaining the interlock although there is
251 * nothing stopping other uses.
254 lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
255 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
256 cmplwi cr1,r1,0 /* Set flag to disable disable preemption */
257 lwz r4,0(r4) /* Get the timerout value */
258 b lockComm /* Join on up... */
261 * void hw_lock_lock(hw_lock_t)
263 * Acquire lock, spinning until it becomes available.
264 * Return with preemption disabled.
265 * Apparently not used except by mach_perf.
266 * We will just set a default timeout and jump into the NORMAL timeout lock.
270 .globl EXT(hw_lock_lock)
274 lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
275 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
276 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
277 lwz r4,0(r4) /* Get the timerout value */
278 b lockComm /* Join on up... */
281 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
283 * Try to acquire spin-lock. Return success (1) or failure (0).
284 * Attempt will fail after timeout ticks of the timebase.
285 * We try fairly hard to get this lock. We disable for interruptions, but
286 * reenable after a "short" timeout (128 ticks, we may want to change this).
287 * After checking to see if the large timeout value (passed in) has expired and a
288 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
289 * we return either in abject failure, or disable and go back to the lock sniff routine.
290 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
292 * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL
293 * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT!
297 .globl EXT(hw_lock_to)
302 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
303 lis r5,0xEEEE /* (TEST/DEBUG) */
304 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
305 sc /* (TEST/DEBUG) */
309 mflr r12 ; (TEST/DEBUG)
310 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
311 mtlr r12 ; (TEST/DEBUG)
314 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
316 lockComm: mfmsr r9 /* Get the MSR value */
317 mr r5,r3 /* Get the address of the lock */
318 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
320 mtmsr r7 /* Turn off interruptions */
321 mftb r8 /* Get the low part of the time base */
323 lcktry: lwarx r6,0,r5 /* Grab the lock value */
324 andi. r3,r6,ILK_LOCKED /* Is it locked? */
325 ori r6,r6,ILK_LOCKED /* Set interlock */
326 bne- lcksniff /* Yeah, wait for it to clear... */
327 stwcx. r6,0,r5 /* Try to seize that there durn lock */
328 bne- lcktry /* Couldn't get it... */
329 li r3,1 /* return true */
330 isync /* Make sure we don't use a speculativily loaded value */
331 beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */
332 mtmsr r9 ; Restore interrupt state
333 blr /* Go on home... */
337 lcksniff: lwz r3,0(r5) /* Get that lock in here */
338 andi. r3,r3,ILK_LOCKED /* Is it free yet? */
339 beq+ lcktry /* Yeah, try for it again... */
341 mftb r10 /* Time stamp us now */
342 sub r10,r10,r8 /* Get the elapsed time */
343 cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */
344 blt+ lcksniff /* Not yet... */
346 mtmsr r9 /* Say, any interrupts pending? */
348 /* The following instructions force the pipeline to be interlocked to that only one
349 instruction is issued per cycle. The insures that we stay enabled for a long enough
350 time; if it's too short, pending interruptions will not have a chance to be taken */
352 subi r4,r4,128 /* Back off elapsed time from timeout value */
353 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
354 mr. r4,r4 /* See if we used the whole timeout */
355 li r3,0 /* Assume a timeout return code */
356 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
358 ble- lckfail /* We failed */
359 mtmsr r7 /* Disable for interruptions */
360 mftb r8 /* Get the low part of the time base */
361 b lcksniff /* Now that we've opened an enable window, keep trying... */
363 lckfail: /* We couldn't get the lock */
364 li r3,0 /* Set failure return code */
365 blr /* Return, head hanging low... */
369 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
371 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
372 * multiple bits may be set. Return success (1) or failure (0).
373 * Attempt will fail after timeout ticks of the timebase.
374 * We try fairly hard to get this lock. We disable for interruptions, but
375 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
376 * After checking to see if the large timeout value (passed in) has expired and a
377 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
378 * we return either in abject failure, or disable and go back to the lock sniff routine.
379 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
381 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
382 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
383 * RESTORE FROM THE STACK.
389 nop ; Force loop alignment to cache line
394 .globl EXT(hw_lock_bit)
398 mfmsr r9 /* Get the MSR value */
399 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
401 mtmsr r7 /* Turn off interruptions */
403 mftb r8 /* Get the low part of the time base */
405 bittry: lwarx r6,0,r3 /* Grab the lock value */
406 and. r0,r6,r4 /* See if any of the lock bits are on */
407 or r6,r6,r4 /* Turn on the lock bits */
408 bne- bitsniff /* Yeah, wait for it to clear... */
409 stwcx. r6,0,r3 /* Try to seize that there durn lock */
410 beq+ bitgot /* We got it, yahoo... */
411 b bittry /* Just start up again if the store failed... */
415 bitsniff: lwz r6,0(r3) /* Get that lock in here */
416 and. r0,r6,r4 /* See if any of the lock bits are on */
417 beq+ bittry /* Yeah, try for it again... */
419 mftb r6 /* Time stamp us now */
420 sub r6,r6,r8 /* Get the elapsed time */
421 cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */
422 blt+ bitsniff /* Not yet... */
424 mtmsr r9 /* Say, any interrupts pending? */
426 /* The following instructions force the pipeline to be interlocked to that only one
427 instruction is issued per cycle. The insures that we stay enabled for a long enough
428 time. If it's too short, pending interruptions will not have a chance to be taken
431 subi r5,r5,128 /* Back off elapsed time from timeout value */
432 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
433 mr. r5,r5 /* See if we used the whole timeout */
434 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
436 ble- bitfail /* We failed */
437 mtmsr r7 /* Disable for interruptions */
438 mftb r8 /* Get the low part of the time base */
439 b bitsniff /* Now that we've opened an enable window, keep trying... */
443 bitgot: mtmsr r9 /* Enable for interruptions */
444 li r3,1 /* Set good return code */
445 isync /* Make sure we don't use a speculativily loaded value */
448 bitfail: li r3,0 /* Set failure return code */
449 blr /* Return, head hanging low... */
453 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
455 * Release bit based spin-lock. The second parameter is the bit mask to clear.
456 * Multiple bits may be cleared.
458 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
459 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
460 * RESTORE FROM THE STACK.
464 .globl EXT(hw_unlock_bit)
470 ubittry: lwarx r0,0,r3 /* Grab the lock value */
471 andc r0,r0,r4 /* Clear the lock bits */
472 stwcx. r0,0,r3 /* Try to clear that there durn lock */
473 bne- ubittry /* Try again, couldn't save it... */
478 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
479 * unsigned int newb, unsigned int timeout)
481 * Try to acquire spin-lock. The second parameter is the bit mask to check.
482 * The third is the value of those bits and the 4th is what to set them to.
483 * Return success (1) or failure (0).
484 * Attempt will fail after timeout ticks of the timebase.
485 * We try fairly hard to get this lock. We disable for interruptions, but
486 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
487 * After checking to see if the large timeout value (passed in) has expired and a
488 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
489 * we return either in abject failure, or disable and go back to the lock sniff routine.
490 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
496 nop ; Force loop alignment to cache line
501 .globl EXT(hw_lock_mbits)
505 mfmsr r9 ; Get the MSR value
506 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible
508 mtmsr r8 ; Turn off interruptions
510 mftb r10 ; Get the low part of the time base
512 mbittry: lwarx r12,0,r3 ; Grab the lock value
513 and r0,r12,r4 ; Clear extra bits
514 or r12,r12,r6 ; Turn on the lock bits
515 cmplw r0,r5 ; Are these the right bits?
516 bne- mbitsniff ; Nope, wait for it to clear...
517 stwcx. r12,0,r3 ; Try to seize that there durn lock
518 beq+ mbitgot ; We got it, yahoo...
519 b mbittry ; Just start up again if the store failed...
523 mbitsniff: lwz r12,0(r3) ; Get that lock in here
524 and r0,r12,r4 ; Clear extra bits
525 or r12,r12,r6 ; Turn on the lock bits
526 cmplw r0,r5 ; Are these the right bits?
527 beq+ mbittry ; Yeah, try for it again...
529 mftb r11 ; Time stamp us now
530 sub r11,r11,r10 ; Get the elapsed time
531 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
532 blt+ mbitsniff ; Not yet...
534 mtmsr r9 ; Say, any interrupts pending?
536 ; The following instructions force the pipeline to be interlocked to that only one
537 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
538 ; time. If it is too short, pending interruptions will not have a chance to be taken
540 subi r7,r7,128 ; Back off elapsed time from timeout value
541 or r7,r7,r7 ; Do nothing here but force a single cycle delay
542 mr. r7,r7 ; See if we used the whole timeout
543 or r7,r7,r7 ; Do nothing here but force a single cycle delay
545 ble- mbitfail ; We failed
546 mtmsr r8 ; Disable for interruptions
547 mftb r10 ; Get the low part of the time base
548 b mbitsniff ; Now that we have opened an enable window, keep trying...
552 mbitgot: mtmsr r9 ; Enable for interruptions
553 li r3,1 ; Set good return code
554 isync ; Make sure we do not use a speculativily loaded value
557 mbitfail: li r3,0 ; Set failure return code
558 blr ; Return, head hanging low...
562 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
564 * Spin until word hits 0 or timeout.
565 * Return success (1) or failure (0).
566 * Attempt will fail after timeout ticks of the timebase.
568 * The theory is that a processor will bump a counter as it signals
569 * other processors. Then it will spin untl the counter hits 0 (or
570 * times out). The other processors, as it receives the signal will
571 * decrement the counter.
573 * The other processors use interlocked update to decrement, this one
574 * does not need to interlock.
580 .globl EXT(hw_cpu_sync)
584 mftb r10 ; Get the low part of the time base
585 mr r9,r3 ; Save the sync word address
586 li r3,1 ; Assume we work
588 csynctry: lwz r11,0(r9) ; Grab the sync value
589 mr. r11,r11 ; Counter hit 0?
590 beqlr- ; Yeah, we are sunk...
591 mftb r12 ; Time stamp us now
593 sub r12,r12,r10 ; Get the elapsed time
594 cmplw r4,r12 ; Have we gone too long?
595 bge+ csynctry ; Not yet...
597 li r3,0 ; Set failure...
598 blr ; Return, head hanging low...
601 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
603 * Spin until word changes or timeout.
604 * Return success (1) or failure (0).
605 * Attempt will fail after timeout ticks of the timebase.
607 * This is used to insure that a processor passes a certain point.
608 * An example of use is to monitor the last interrupt time in the
609 * per_proc block. This can be used to insure that the other processor
610 * has seen at least one interrupt since a specific time.
616 .globl EXT(hw_cpu_wcng)
620 mftb r10 ; Get the low part of the time base
621 mr r9,r3 ; Save the sync word address
622 li r3,1 ; Assume we work
624 wcngtry: lwz r11,0(r9) ; Grab the value
625 cmplw r11,r4 ; Do they still match?
626 bnelr- ; Nope, cool...
627 mftb r12 ; Time stamp us now
629 sub r12,r12,r10 ; Get the elapsed time
630 cmplw r5,r12 ; Have we gone too long?
631 bge+ wcngtry ; Not yet...
633 li r3,0 ; Set failure...
634 blr ; Return, head hanging low...
638 * unsigned int hw_lock_try(hw_lock_t)
640 * Try to acquire spin-lock. Return success (1) or failure (0)
641 * Returns with preemption disabled on success.
645 .globl EXT(hw_lock_try)
650 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
651 lis r5,0x9999 /* (TEST/DEBUG) */
652 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
653 sc /* (TEST/DEBUG) */
655 mfmsr r9 /* Save the MSR value */
656 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */
659 lis r5, 0x10 /* roughly 1E6 */
661 #endif /* MACH_LDEBUG */
663 mtmsr r7 /* Disable interruptions and thus, preemption */
668 bdnz+ 0f /* Count attempts */
669 mtmsr r9 /* Restore enablement */
670 BREAKPOINT_TRAP /* Get to debugger */
671 mtmsr r7 /* Disable interruptions and thus, preemption */
673 #endif /* MACH_LDEBUG */
675 lwarx r5,0,r3 /* Ld from addr of arg and reserve */
677 andi. r6,r5,ILK_LOCKED /* TEST... */
679 bne- .L_lock_try_failed /* branch if taken. Predict free */
681 stwcx. r5,0,r3 /* And SET (if still reserved) */
682 mfsprg r6,0 /* Get the per_proc block */
683 bne- .L_lock_try_loop /* If set failed, loop back */
685 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
688 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
689 addi r5,r5,1 /* Bring up the disable count */
690 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
692 mtmsr r9 /* Allow interruptions now */
693 li r3,1 /* Set that the lock was free */
697 mtmsr r9 /* Allow interruptions now */
698 li r3,0 /* FAILURE - lock was taken */
702 * unsigned int hw_lock_held(hw_lock_t)
704 * Return 1 if lock is held
705 * Doesn't change preemption state.
706 * N.B. Racy, of course.
710 .globl EXT(hw_lock_held)
715 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
716 lis r5,0x8888 /* (TEST/DEBUG) */
717 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
718 sc /* (TEST/DEBUG) */
720 isync /* Make sure we don't use a speculativily fetched lock */
721 lwz r3, 0(r3) /* Return value of lock */
725 * unsigned int hw_compare_and_store(unsigned int old, unsigned int new, unsigned int *area)
727 * Compare old to area if equal, store new, and return true
728 * else return false and no store
729 * This is an atomic operation
733 .globl EXT(hw_compare_and_store)
735 LEXT(hw_compare_and_store)
737 mr r6,r3 /* Save the old value */
739 cstry: lwarx r9,0,r5 /* Grab the area value */
740 li r3,1 /* Assume it works */
741 cmplw cr0,r9,r6 /* Does it match the old value? */
742 bne- csfail /* No, it must have changed... */
743 stwcx. r4,0,r5 /* Try to save the new value */
744 bne- cstry /* Didn't get it, try again... */
745 isync /* Just hold up prefetch */
748 csfail: li r3,0 /* Set failure */
749 blr /* Better luck next time... */
753 * unsigned int hw_atomic_add(unsigned int *area, int val)
755 * Atomically add the second parameter to the first.
756 * Returns the result.
760 .globl EXT(hw_atomic_add)
764 mr r6,r3 /* Save the area */
766 addtry: lwarx r3,0,r6 /* Grab the area value */
767 add r3,r3,r4 /* Add the value */
768 stwcx. r3,0,r6 /* Try to save the new value */
769 bne- addtry /* Didn't get it, try again... */
774 * unsigned int hw_atomic_sub(unsigned int *area, int val)
776 * Atomically subtract the second parameter from the first.
777 * Returns the result.
781 .globl EXT(hw_atomic_sub)
785 mr r6,r3 /* Save the area */
787 subtry: lwarx r3,0,r6 /* Grab the area value */
788 sub r3,r3,r4 /* Subtract the value */
789 stwcx. r3,0,r6 /* Try to save the new value */
790 bne- subtry /* Didn't get it, try again... */
795 * unsigned int hw_atomic_or(unsigned int *area, int val)
797 * Atomically ORs the second parameter into the first.
798 * Returns the result.
802 .globl EXT(hw_atomic_or)
806 mr r6,r3 ; Save the area
808 ortry: lwarx r3,0,r6 ; Grab the area value
809 or r3,r3,r4 ; OR the value
810 stwcx. r3,0,r6 ; Try to save the new value
811 bne- ortry ; Did not get it, try again...
816 * unsigned int hw_atomic_and(unsigned int *area, int val)
818 * Atomically ANDs the second parameter with the first.
819 * Returns the result.
823 .globl EXT(hw_atomic_and)
827 mr r6,r3 ; Save the area
829 andtry: lwarx r3,0,r6 ; Grab the area value
830 and r3,r3,r4 ; AND the value
831 stwcx. r3,0,r6 ; Try to save the new value
832 bne- andtry ; Did not get it, try again...
837 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
839 * Atomically inserts the element at the head of the list
840 * anchor is the pointer to the first element
841 * element is the pointer to the element to insert
842 * disp is the displacement into the element to the chain pointer
846 .globl EXT(hw_queue_atomic)
848 LEXT(hw_queue_atomic)
850 mr r7,r4 /* Make end point the same as start */
851 mr r8,r5 /* Copy the displacement also */
852 b hw_queue_comm /* Join common code... */
855 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
857 * Atomically inserts the list of elements at the head of the list
858 * anchor is the pointer to the first element
859 * first is the pointer to the first element to insert
860 * last is the pointer to the last element to insert
861 * disp is the displacement into the element to the chain pointer
865 .globl EXT(hw_queue_atomic_list)
867 LEXT(hw_queue_atomic_list)
869 mr r7,r5 /* Make end point the same as start */
870 mr r8,r6 /* Copy the displacement also */
873 lwarx r9,0,r3 /* Pick up the anchor */
874 stwx r9,r8,r7 /* Chain that to the end of the new stuff */
875 stwcx. r4,0,r3 /* Try to chain into the front */
876 bne- hw_queue_comm /* Didn't make it, try again... */
881 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
883 * Atomically removes the first element in a list and returns it.
884 * anchor is the pointer to the first element
885 * disp is the displacement into the element to the chain pointer
886 * Returns element if found, 0 if empty.
890 .globl EXT(hw_dequeue_atomic)
892 LEXT(hw_dequeue_atomic)
894 mr r5,r3 /* Save the anchor */
897 lwarx r3,0,r5 /* Pick up the anchor */
898 mr. r3,r3 /* Is the list empty? */
899 beqlr- /* Leave it list empty... */
900 lwzx r9,r4,r3 /* Get the next in line */
901 stwcx. r9,0,r5 /* Try to chain into the front */
902 beqlr+ ; Got the thing, go away with it...
903 b hw_dequeue_comm ; Did not make it, try again...
906 * void mutex_init(mutex_t* l, etap_event_t etap)
909 ENTRY(mutex_init,TAG_NO_FRAME_USED)
913 stw r10, LOCK_DATA(r3) /* clear lock word */
914 sth r10, MUTEX_WAITERS(r3) /* init waiter count */
917 stw r10, MUTEX_PC(r3) /* init caller pc */
918 stw r10, MUTEX_THREAD(r3) /* and owning thread */
920 stw r10, MUTEX_TYPE(r3) /* set lock type */
921 #endif /* MACH_LDEBUG */
924 bl EXT(etap_mutex_init) /* init ETAP data */
925 #endif /* ETAP_LOCK_TRACE */
931 * void mutex_lock(mutex_t*)
935 .globl EXT(mutex_lock)
938 .globl EXT(_mutex_lock)
944 andi. r4,r5,ILK_LOCKED|MUTEX_LOCKED
945 bne- L_mutex_lock_slow
946 ori r5,r5,MUTEX_LOCKED
948 bne- L_mutex_lock_loop
954 mflr r12 ; (TEST/DEBUG)
955 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
956 mtlr r12 ; (TEST/DEBUG)
961 bl EXT(assert_wait_possible)
963 bne L_mutex_lock_assert_wait_1
964 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
965 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
969 L_mutex_lock_assert_wait_panic_str:
970 STRINGD "mutex_lock: assert_wait_possible false\n\000"
973 L_mutex_lock_assert_wait_1:
979 stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */
980 stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */
981 stw r0,MISSED(r1) /* clear local miss marker */
982 #endif /* ETAP_LOCK_TRACE */
986 CHECK_NO_SIMPLELOCKS()
990 mfsprg r4,0 /* (TEST/DEBUG) */
991 lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */
992 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
993 lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
994 lis r5,0xAAAA /* (TEST/DEBUG) */
995 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
996 sc /* (TEST/DEBUG) */
999 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1000 mr. r4,r3 /* Did we get it? */
1001 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1002 bne+ mlGotInt /* We got it just fine... */
1004 lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message
1005 ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message
1006 bl EXT(panic) ; Call panic
1007 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1011 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
1016 /* Note that there is no reason to do a load and reserve here. We already
1017 hold the interlock lock and no one can touch this field unless they
1018 have that, so, we're free to play */
1020 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
1021 andi. r9,r4,MUTEX_LOCKED /* So, can we have it? */
1022 ori r10,r4,MUTEX_LOCKED /* Set the lock value */
1023 bne- mlInUse /* Nope, sombody's playing already... */
1027 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1029 mfsprg r9,0 /* Get the per_proc block */
1030 lwz r5,0(r1) /* Get previous save frame */
1031 lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */
1032 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
1033 lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */
1034 stw r5,MUTEX_PC(r3) /* Save our caller */
1035 mr. r8,r8 /* Is there any thread? */
1036 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1037 beq- .L_ml_no_active_thread /* No owning thread... */
1038 lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1039 addi r9,r9,1 /* Bump it up */
1040 stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */
1041 .L_ml_no_active_thread:
1043 #endif /* MACH_LDEBUG */
1045 rlwinm r10,r10,0,0,30 /* Get the unlock value */
1046 stw r10,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1052 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1053 #endif /* ETAP_LOCK_TRACE */
1055 EPILOG /* Restore all saved registers */
1057 b epStart /* Go enable preemption... */
1060 * We come to here when we have a resource conflict. In other words,
1061 * the mutex is held.
1068 cmpwi r7,0 /* did we already take a wait timestamp ? */
1069 bne .L_ml_block /* yup. carry-on */
1070 bl EXT(etap_mutex_miss) /* get wait timestamp */
1071 stw r3,SWT_HI(r1) /* store timestamp */
1073 li r7, 1 /* mark wait timestamp as taken */
1075 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1077 #endif /* ETAP_LOCK_TRACE */
1080 CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */
1083 /* Note that we come in here with the interlock set. The wait routine
1084 * will unlock it before waiting.
1086 addis r4,r4,1 /* Bump the wait count */
1087 stw r4,LOCK_DATA(r3)
1088 bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */
1090 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1091 b .L_ml_retry /* and try again... */
1095 * void _mutex_try(mutex_t*)
1100 .globl EXT(mutex_try)
1102 .globl EXT(_mutex_try)
1107 andi. r4,r5,ILK_LOCKED|MUTEX_LOCKED
1108 bne- L_mutex_try_slow
1109 ori r5,r5,MUTEX_LOCKED
1111 bne- L_mutex_try_loop
1118 PROLOG(8) /* reserve space for SWT_HI and SWT_LO */
1122 stw r5, STW_HI(r1) /* set wait time to 0 (HI) */
1123 stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */
1124 #endif /* ETAP_LOCK_TRACE */
1127 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1128 lis r5,0xBBBB /* (TEST/DEBUG) */
1129 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1130 sc /* (TEST/DEBUG) */
1134 CHECK_NO_SIMPLELOCKS()
1136 lwz r6,LOCK_DATA(r3) /* Quick check */
1137 andi. r6,r6,MUTEX_LOCKED /* to see if someone has this lock already */
1138 bne- mtFail /* Someone's got it already... */
1140 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1141 mr. r4,r3 /* Did we get it? */
1142 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1143 bne+ mtGotInt /* We got it just fine... */
1145 lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message
1146 ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message
1147 bl EXT(panic) ; Call panic
1148 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1152 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1157 /* Note that there is no reason to do a load and reserve here. We already
1158 hold the interlock and no one can touch at this field unless they
1159 have that, so, we're free to play */
1161 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
1162 andi. r9,r4,MUTEX_LOCKED /* So, can we have it? */
1163 ori r10,r4,MUTEX_LOCKED /* Set the lock value */
1164 bne- mtInUse /* Nope, sombody's playing already... */
1168 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1170 mfsprg r9,0 /* Get the per_proc block */
1171 lwz r5,0(r1) /* Get previous save frame */
1172 lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */
1173 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
1174 lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */
1175 stw r5,MUTEX_PC(r3) /* Save our caller */
1176 mr. r8,r8 /* Is there any thread? */
1177 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1178 beq- .L_mt_no_active_thread /* No owning thread... */
1179 lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1180 addi r9, r9, 1 /* Bump it up */
1181 stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */
1182 .L_mt_no_active_thread:
1184 #endif /* MACH_LDEBUG */
1186 rlwinm r10,r10,0,0,30 /* Get the unlock value */
1187 sync /* Push it all out */
1188 stw r10,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1189 isync /* stop speculative instructions */
1192 lwz r4,0(r1) /* Back chain the stack */
1194 lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */
1196 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1197 #endif /* ETAP_LOCK_TRACE */
1199 bl epStart /* Go enable preemption... */
1202 EPILOG /* Restore all saved registers */
1206 * We come to here when we have a resource conflict. In other words,
1207 * the mutex is held.
1211 rlwinm r10,r10,0,0,30 /* Get the unlock value */
1212 stw r10,LOCK_DATA(r3) /* free the interlock */
1213 bl epStart /* Go enable preemption... */
1215 mtFail: li r3,0 /* Set failure code */
1216 EPILOG /* Restore all saved registers */
1221 * void mutex_unlock(mutex_t* l)
1225 .globl EXT(mutex_unlock)
1229 L_mutex_unlock_loop:
1231 rlwinm. r4,r5,16,15,31 /* Bail if pending waiter or interlock set */
1232 rlwinm r5,r5,0,0,29 /* Clear the mutexlock */
1233 bne- L_mutex_unlock_slow
1235 bne- L_mutex_unlock_loop
1238 L_mutex_unlock_slow:
1243 bl EXT(etap_mutex_unlock) /* collect ETAP data */
1244 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1245 #endif /* ETAP_LOCK_TRACE */
1249 CHECK_THREAD(MUTEX_THREAD)
1252 mfsprg r4,0 /* (TEST/DEBUG) */
1253 lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */
1254 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1255 lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1256 lis r5,0xCCCC /* (TEST/DEBUG) */
1257 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1258 sc /* (TEST/DEBUG) */
1260 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1261 mr. r4,r3 /* Did we get it? */
1262 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1263 bne+ muGotInt /* We got it just fine... */
1265 lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message
1266 ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message
1267 bl EXT(panic) ; Call panic
1268 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1272 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1277 lhz r5,LOCK_DATA(r3)
1278 mr. r5,r5 /* are there any waiters ? */
1279 beq+ muUnlock /* Nope, we're done... */
1281 bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */
1282 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1283 lhz r5,LOCK_DATA(r3) /* load the wait count */
1289 rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1292 lwz r9,PP_CPU_DATA(r9)
1293 lwz r9,CPU_ACTIVE_THREAD(r9)
1294 stw r9,MUTEX_THREAD(r3) /* disown thread */
1296 beq- .L_mu_no_active_thread
1297 lwz r8,THREAD_MUTEX_COUNT(r9)
1299 stw r8,THREAD_MUTEX_COUNT(r9)
1300 .L_mu_no_active_thread:
1302 #endif /* MACH_LDEBUG */
1304 rlwinm r5,r5,16,0,15 /* Shift wait count */
1305 sync /* Make sure it's all there before we release */
1306 stw r5,LOCK_DATA(r3) /* unlock the interlock and lock */
1308 EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */
1309 b epStart /* Go enable preemption... */
1312 * void interlock_unlock(hw_lock_t lock)
1316 .globl EXT(interlock_unlock)
1318 LEXT(interlock_unlock)
1321 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1322 lis r5,0xDDDD /* (TEST/DEBUG) */
1323 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1324 sc /* (TEST/DEBUG) */
1326 lwz r10,LOCK_DATA(r3)
1327 rlwinm r10,r10,0,0,30
1329 stw r10,LOCK_DATA(r3)
1331 b epStart /* Go enable preemption... */
1334 * Here is where we enable preemption. We need to be protected
1335 * against ourselves, we can't chance getting interrupted and modifying
1336 * our processor wide preemption count after we'sve loaded it up. So,
1337 * we need to disable all 'rupts. Actually, we could use a compare
1338 * and swap to do this, but, since there are no MP considerations
1339 * (we are dealing with a CPU local field) it is much, much faster
1342 * Note that if we are not genned MP, the calls here will be no-opped via
1343 * a #define and since the _mp forms are the same, likewise a #define
1344 * will be used to route to the other forms
1347 /* This version does not check if we get preempted or not */
1351 .globl EXT(_enable_preemption_no_check)
1353 LEXT(_enable_preemption_no_check)
1354 cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */
1355 b epCommn /* Join up with the other enable code... */
1358 /* This version checks if we get preempted or not */
1361 .globl EXT(_enable_preemption)
1363 LEXT(_enable_preemption)
1365 epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */
1368 * Common enable preemption code
1371 epCommn: mfmsr r9 /* Save the old MSR */
1372 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1373 mtmsr r8 /* Interrupts off */
1375 mfsprg r3,0 /* Get the per_proc block */
1376 lwz r6,PP_CPU_DATA(r3) /* Get the pointer to the CPU data from per proc */
1377 li r8,-1 /* Get a decrimenter */
1378 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1379 add. r5,r5,r8 /* Bring down the disable count */
1381 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1382 mr. r4,r4 ; (TEST/DEBUG)
1383 beq- epskptrc0 ; (TEST/DEBUG)
1384 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1385 lis r4,0xBBBB ; (TEST/DEBUG)
1386 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1388 epskptrc0: mr. r5,r5 ; (TEST/DEBUG)
1391 blt- epTooFar /* Yeah, we did... */
1392 #endif /* MACH_LDEBUG */
1393 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
1395 beq+ epCheckPreempt /* Go check if we need to be preempted... */
1397 epNoCheck: mtmsr r9 /* Restore the interrupt level */
1402 lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */
1403 lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */
1404 ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */
1405 ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */
1406 mtlr r6 /* Get the address of the panic routine */
1407 mtmsr r9 /* Restore interruptions */
1412 STRINGD "_enable_preemption: preemption_level <= 0!\000"
1414 #endif /* MACH_LDEBUG */
1419 lwz r7,PP_NEED_AST(r3) /* Get the AST request address */
1420 li r5,AST_URGENT /* Get the requests we do honor */
1421 lwz r7,0(r7) /* Get the actual, real live, extra special AST word */
1422 lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */
1423 and. r7,r7,r5 ; Should we preempt?
1424 ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */
1425 beq+ epCPno ; No preemption here...
1427 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1429 epCPno: mtmsr r9 /* Allow interrupts if we can */
1430 beqlr+ ; We probably will not preempt...
1431 sc /* Do the preemption */
1432 blr /* Now, go away now... */
1435 * Here is where we disable preemption. Since preemption is on a
1436 * per processor basis (a thread runs on one CPU at a time) we don't
1437 * need any cross-processor synchronization. We do, however, need to
1438 * be interrupt safe, so we don't preempt while in the process of
1439 * disabling it. We could use SPLs, but since we always want complete
1440 * disablement, and this is platform specific code, we'll just kick the
1441 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1446 nop ; Use these 5 nops to force daPreComm
1447 nop ; to a line boundary.
1452 .globl EXT(_disable_preemption)
1454 LEXT(_disable_preemption)
1456 daPreAll: mfmsr r9 /* Save the old MSR */
1457 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1458 mtmsr r8 /* Interrupts off */
1460 daPreComm: mfsprg r6,0 /* Get the per_proc block */
1461 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1462 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1463 addi r5,r5,1 /* Bring up the disable count */
1464 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
1466 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1467 mr. r4,r4 ; (TEST/DEBUG)
1468 beq- epskptrc1 ; (TEST/DEBUG)
1469 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1470 lis r4,0xAAAA ; (TEST/DEBUG)
1471 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1473 epskptrc1: ; (TEST/DEBUG)
1477 ; Set PREEMPTSTACK above to enable a preemption traceback stack.
1479 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
1480 ; set the same as it is here. This is the number of
1481 ; traceback entries we can handle per processor
1483 ; A value of 0 disables the stack.
1486 cmplwi r5,PREEMPTSTACK ; Maximum depth
1487 lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread
1488 bgt- nopredeb ; Too many to stack...
1489 mr. r6,r6 ; During boot?
1490 beq- nopredeb ; Yes, do not do backtrace...
1491 lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation
1492 lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used
1493 mr. r0,r6 ; Any saved context?
1494 beq- nosaveds ; No...
1495 lwz r0,saver1(r6) ; Get end of savearea chain
1497 nosaveds: li r11,0 ; Clear callers callers callers return
1498 li r10,0 ; Clear callers callers callers callers return
1499 li r8,0 ; Clear callers callers callers callers callers return
1500 lwz r2,0(r1) ; Get callers callers stack frame
1501 lwz r12,8(r2) ; Get our callers return
1502 lwz r4,0(r2) ; Back chain
1504 xor r2,r4,r2 ; Form difference
1505 cmplwi r2,8192 ; Within a couple of pages?
1506 mr r2,r4 ; Move register
1507 bge- nosaveher2 ; No, no back chain then...
1508 lwz r11,8(r2) ; Get our callers return
1509 lwz r4,0(r2) ; Back chain
1511 xor r2,r4,r2 ; Form difference
1512 cmplwi r2,8192 ; Within a couple of pages?
1513 mr r2,r4 ; Move register
1514 bge- nosaveher2 ; No, no back chain then...
1515 lwz r10,8(r2) ; Get our callers return
1516 lwz r4,0(r2) ; Back chain
1518 xor r2,r4,r2 ; Form difference
1519 cmplwi r2,8192 ; Within a couple of pages?
1520 mr r2,r4 ; Move register
1521 bge- nosaveher2 ; No, no back chain then...
1522 lwz r8,8(r2) ; Get our callers return
1525 addi r5,r5,-1 ; Get index to slot
1526 mfspr r6,pir ; Get our processor
1527 mflr r4 ; Get our return
1528 rlwinm r6,r6,8,0,23 ; Index to processor slot
1529 lis r2,hi16(EXT(DBGpreempt)) ; Stack high order
1530 rlwinm r5,r5,4,0,27 ; Index to stack slot
1531 ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order
1532 add r2,r2,r5 ; Point to slot
1533 add r2,r2,r6 ; Move to processor
1534 stw r4,0(r2) ; Save our return
1535 stw r11,4(r2) ; Save callers caller
1536 stw r10,8(r2) ; Save callers callers caller
1537 stw r8,12(r2) ; Save callers callers callers caller
1540 mtmsr r9 /* Allow interruptions now */
1545 * Return the active thread for both inside and outside osfmk consumption
1549 .globl EXT(current_thread)
1551 LEXT(current_thread)
1553 mfmsr r9 /* Save the old MSR */
1554 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1555 mtmsr r8 /* Interrupts off */
1556 mfsprg r6,0 /* Get the per_proc */
1557 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1558 lwz r3,CPU_ACTIVE_THREAD(r6) /* Get the active thread */
1559 mtmsr r9 /* Restore interruptions to entry */
1564 * Return the current preemption level
1568 .globl EXT(get_preemption_level)
1570 LEXT(get_preemption_level)
1572 mfmsr r9 /* Save the old MSR */
1573 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1574 mtmsr r8 /* Interrupts off */
1575 mfsprg r6,0 /* Get the per_proc */
1576 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1577 lwz r3,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1578 mtmsr r9 /* Restore interruptions to entry */
1583 * Return the simple lock count
1587 .globl EXT(get_simple_lock_count)
1589 LEXT(get_simple_lock_count)
1591 mfmsr r9 /* Save the old MSR */
1592 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1593 mtmsr r8 /* Interrupts off */
1594 mfsprg r6,0 /* Get the per_proc */
1595 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1596 lwz r3,CPU_SIMPLE_LOCK_COUNT(r6) /* Get the simple lock count */
1597 mtmsr r9 /* Restore interruptions to entry */
1601 * fast_usimple_lock():
1603 * If EE is off, get the simple lock without incrementing the preemption count and
1604 * mark The simple lock with SLOCK_FAST.
1605 * If EE is on, call usimple_lock().
1608 .globl EXT(fast_usimple_lock)
1610 LEXT(fast_usimple_lock)
1613 andi. r7,r9,lo16(MASK(MSR_EE))
1614 bne- L_usimple_lock_c
1615 L_usimple_lock_loop:
1617 li r5,ILK_LOCKED|SLOCK_FAST
1619 bne- L_usimple_lock_c
1621 bne- L_usimple_lock_loop
1628 * fast_usimple_lock_try():
1630 * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and
1631 * if successfully held, the simple lock is marked with SLOCK_FAST.
1632 * If EE is on, call usimple_lock_try()
1635 .globl EXT(fast_usimple_lock_try)
1637 LEXT(fast_usimple_lock_try)
1640 andi. r7,r9,lo16(MASK(MSR_EE))
1641 bne- L_usimple_lock_try_c
1642 L_usimple_lock_try_loop:
1644 li r5,ILK_LOCKED|SLOCK_FAST
1646 bne- L_usimple_lock_try_fail
1648 bne- L_usimple_lock_try_loop
1652 L_usimple_lock_try_fail:
1655 L_usimple_lock_try_c:
1656 b EXT(usimple_lock_try)
1659 * fast_usimple_unlock():
1661 * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count.
1662 * Call usimple_unlock() otherwise.
1665 .globl EXT(fast_usimple_unlock)
1667 LEXT(fast_usimple_unlock)
1669 lwz r5,LOCK_DATA(r3)
1671 cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST
1672 bne- L_usimple_unlock_c
1676 andi. r7,r9,lo16(MASK(MSR_EE))
1677 beq L_usimple_unlock_cont
1678 lis r3,hi16(L_usimple_unlock_panic)
1679 ori r3,r3,lo16(L_usimple_unlock_panic)
1683 L_usimple_unlock_panic:
1684 STRINGD "fast_usimple_unlock: interrupts not disabled\n\000"
1686 L_usimple_unlock_cont:
1688 stw r0, LOCK_DATA(r3)
1691 b EXT(usimple_unlock)