2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <mach_assert.h>
25 #include <mach_ldebug.h>
28 #include <kern/etap_options.h>
31 #include <ppc/proc_reg.h>
36 #define SWT_HI 0+FM_SIZE
37 #define SWT_LO 4+FM_SIZE
38 #define MISSED 8+FM_SIZE
40 #define ILK_LOCKED 0x01
41 #define WAIT_FLAG 0x02
42 #define SLOCK_FAST 0x02
43 #define TH_FN_OWNED 0x01
46 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
47 ; set the same as it is here. This is the number of
48 ; traceback entries we can handle per processor
50 ; A value of 0 disables the stack.
52 #define PREEMPTSTACK 0
56 #include <ppc/POWERMAC/mp/mp.h>
58 #define PROLOG(space) \
59 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
61 stw r3,FM_ARG0(r1) __ASMNL__ \
62 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
65 lwz r1,0(r1) __ASMNL__ \
66 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
69 #if MACH_LDEBUG && CHECKLOCKS
71 * Routines for general lock debugging.
74 /* Gets lock check flags in CR6: CR bits 24-27 */
76 #define CHECK_SETUP(rg) \
77 lis rg,hi16(EXT(dgWork)) __ASMNL__ \
78 ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \
79 lbz rg,dgFlags(rg) __ASMNL__ \
84 * Checks for expected lock types and calls "panic" on
85 * mismatch. Detects calls to Mutex functions with
86 * type simplelock and vice versa.
88 #define CHECK_MUTEX_TYPE() \
89 bt 24+disLktypeb,1f __ASMNL__ \
90 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
91 cmpwi r10,MUTEX_TAG __ASMNL__ \
93 lis r3,hi16(not_a_mutex) __ASMNL__ \
94 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
95 bl EXT(panic) __ASMNL__ \
96 lwz r3,FM_ARG0(r1) __ASMNL__ \
101 STRINGD "not a mutex!\n\000"
104 #define CHECK_SIMPLE_LOCK_TYPE() \
105 bt 24+disLktypeb,1f __ASMNL__ \
106 lwz r10,SLOCK_TYPE(r3) __ASMNL__ \
107 cmpwi r10,USLOCK_TAG __ASMNL__ \
109 lis r3,hi16(not_a_slock) __ASMNL__ \
110 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
111 bl EXT(panic) __ASMNL__ \
112 lwz r3,FM_ARG0(r1) __ASMNL__ \
117 STRINGD "not a simple lock!\n\000"
120 #define CHECK_NO_SIMPLELOCKS() \
121 bt 24+disLkNmSimpb,2f __ASMNL__ \
122 mfmsr r11 __ASMNL__ \
123 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
124 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
125 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
126 mtmsr r10 __ASMNL__ \
128 mfsprg r10,0 __ASMNL__ \
129 lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \
130 cmpwi r10,0 __ASMNL__ \
132 lis r3,hi16(simple_locks_held) __ASMNL__ \
133 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
134 bl EXT(panic) __ASMNL__ \
135 lwz r3,FM_ARG0(r1) __ASMNL__ \
137 mtmsr r11 __ASMNL__ \
142 STRINGD "simple locks held!\n\000"
146 * Verifies return to the correct thread in "unlock" situations.
149 #define CHECK_THREAD(thread_offset) \
150 bt 24+disLkThreadb,2f __ASMNL__ \
151 mfmsr r11 __ASMNL__ \
152 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
153 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
154 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
155 mtmsr r10 __ASMNL__ \
157 mfsprg r10,0 __ASMNL__ \
158 lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
159 cmpwi r10,0 __ASMNL__ \
161 lwz r9,thread_offset(r3) __ASMNL__ \
162 cmpw r9,r10 __ASMNL__ \
164 lis r3,hi16(wrong_thread) __ASMNL__ \
165 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
166 bl EXT(panic) __ASMNL__ \
167 lwz r3,FM_ARG0(r1) __ASMNL__ \
169 mtmsr r11 __ASMNL__ \
173 STRINGD "wrong thread!\n\000"
176 #define CHECK_MYLOCK(thread_offset) \
177 bt 24+disLkMyLckb,2f __ASMNL__ \
178 mfmsr r11 __ASMNL__ \
179 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
180 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
181 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
182 mtmsr r10 __ASMNL__ \
184 mfsprg r10,0 __ASMNL__ \
185 lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
186 cmpwi r10,0 __ASMNL__ \
188 lwz r9, thread_offset(r3) __ASMNL__ \
189 cmpw r9,r10 __ASMNL__ \
191 lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \
192 ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \
193 bl EXT(panic) __ASMNL__ \
194 lwz r3,FM_ARG0(r1) __ASMNL__ \
196 mtmsr r11 __ASMNL__ \
201 STRINGD "mylock attempt!\n\000"
204 #else /* MACH_LDEBUG */
206 #define CHECK_SETUP(rg)
207 #define CHECK_MUTEX_TYPE()
208 #define CHECK_SIMPLE_LOCK_TYPE()
209 #define CHECK_THREAD(thread_offset)
210 #define CHECK_NO_SIMPLELOCKS()
211 #define CHECK_MYLOCK(thread_offset)
213 #endif /* MACH_LDEBUG */
216 * void hw_lock_init(hw_lock_t)
218 * Initialize a hardware lock. These locks should be cache aligned and a multiple
222 ENTRY(hw_lock_init, TAG_NO_FRAME_USED)
224 li r0, 0 /* set lock to free == 0 */
225 stw r0, 0(r3) /* Initialize the lock */
229 * void hw_lock_unlock(hw_lock_t)
231 * Unconditionally release lock.
232 * Release preemption level.
237 .globl EXT(hw_lock_unlock)
242 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
243 lis r5,0xFFFF /* (TEST/DEBUG) */
244 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
245 sc /* (TEST/DEBUG) */
247 sync /* Flush writes done under lock */
248 li r0, 0 /* set lock to free */
251 b epStart /* Go enable preemption... */
255 * Special case for internal use. Uses same lock code, but sets up so
256 * that there will be no disabling of preemption after locking. Generally
257 * used for mutex locks when obtaining the interlock although there is
258 * nothing stopping other uses.
261 lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
262 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
263 cmplwi cr1,r1,0 /* Set flag to disable disable preemption */
264 lwz r4,0(r4) /* Get the timerout value */
265 b lockComm /* Join on up... */
268 * void hw_lock_lock(hw_lock_t)
270 * Acquire lock, spinning until it becomes available.
271 * Return with preemption disabled.
272 * Apparently not used except by mach_perf.
273 * We will just set a default timeout and jump into the NORMAL timeout lock.
277 .globl EXT(hw_lock_lock)
281 lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
282 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
283 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
284 lwz r4,0(r4) /* Get the timerout value */
285 b lockComm /* Join on up... */
288 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
290 * Try to acquire spin-lock. Return success (1) or failure (0).
291 * Attempt will fail after timeout ticks of the timebase.
292 * We try fairly hard to get this lock. We disable for interruptions, but
293 * reenable after a "short" timeout (128 ticks, we may want to change this).
294 * After checking to see if the large timeout value (passed in) has expired and a
295 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
296 * we return either in abject failure, or disable and go back to the lock sniff routine.
297 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
299 * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL
300 * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT!
304 .globl EXT(hw_lock_to)
309 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
310 lis r5,0xEEEE /* (TEST/DEBUG) */
311 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
312 sc /* (TEST/DEBUG) */
316 mflr r12 ; (TEST/DEBUG)
317 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
318 mtlr r12 ; (TEST/DEBUG)
321 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
323 lockComm: mfmsr r9 /* Get the MSR value */
324 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
325 mr r5,r3 /* Get the address of the lock */
326 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
327 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
329 mtmsr r7 /* Turn off interruptions */
330 isync ; May have turned off vec and fp here
331 mftb r8 /* Get the low part of the time base */
333 lcktry: lwarx r6,0,r5 /* Grab the lock value */
334 andi. r3,r6,ILK_LOCKED /* Is it locked? */
335 ori r6,r6,ILK_LOCKED /* Set interlock */
336 bne- lcksniff /* Yeah, wait for it to clear... */
337 stwcx. r6,0,r5 /* Try to seize that there durn lock */
338 bne- lcktry /* Couldn't get it... */
339 li r3,1 /* return true */
340 isync /* Make sure we don't use a speculativily loaded value */
341 beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */
342 mtmsr r9 ; Restore interrupt state
343 blr /* Go on home... */
347 lcksniff: lwz r3,0(r5) /* Get that lock in here */
348 andi. r3,r3,ILK_LOCKED /* Is it free yet? */
349 beq+ lcktry /* Yeah, try for it again... */
351 mftb r10 /* Time stamp us now */
352 sub r10,r10,r8 /* Get the elapsed time */
353 cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */
354 blt+ lcksniff /* Not yet... */
356 mtmsr r9 /* Say, any interrupts pending? */
358 /* The following instructions force the pipeline to be interlocked to that only one
359 instruction is issued per cycle. The insures that we stay enabled for a long enough
360 time; if it's too short, pending interruptions will not have a chance to be taken */
362 subi r4,r4,128 /* Back off elapsed time from timeout value */
363 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
364 mr. r4,r4 /* See if we used the whole timeout */
365 li r3,0 /* Assume a timeout return code */
366 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
368 ble- lckfail /* We failed */
369 mtmsr r7 /* Disable for interruptions */
370 mftb r8 /* Get the low part of the time base */
371 b lcksniff /* Now that we've opened an enable window, keep trying... */
373 lckfail: /* We couldn't get the lock */
374 li r3,0 /* Set failure return code */
375 blr /* Return, head hanging low... */
379 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
381 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
382 * multiple bits may be set. Return success (1) or failure (0).
383 * Attempt will fail after timeout ticks of the timebase.
384 * We try fairly hard to get this lock. We disable for interruptions, but
385 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
386 * After checking to see if the large timeout value (passed in) has expired and a
387 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
388 * we return either in abject failure, or disable and go back to the lock sniff routine.
389 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
391 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
392 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
393 * RESTORE FROM THE STACK.
399 nop ; Force loop alignment to cache line
404 .globl EXT(hw_lock_bit)
408 mfmsr r9 /* Get the MSR value */
409 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
410 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
411 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
413 mtmsr r7 /* Turn off interruptions */
414 isync ; May have turned off vec and fp here
416 mftb r8 /* Get the low part of the time base */
418 bittry: lwarx r6,0,r3 /* Grab the lock value */
419 and. r0,r6,r4 /* See if any of the lock bits are on */
420 or r6,r6,r4 /* Turn on the lock bits */
421 bne- bitsniff /* Yeah, wait for it to clear... */
422 stwcx. r6,0,r3 /* Try to seize that there durn lock */
423 beq+ bitgot /* We got it, yahoo... */
424 b bittry /* Just start up again if the store failed... */
428 bitsniff: lwz r6,0(r3) /* Get that lock in here */
429 and. r0,r6,r4 /* See if any of the lock bits are on */
430 beq+ bittry /* Yeah, try for it again... */
432 mftb r6 /* Time stamp us now */
433 sub r6,r6,r8 /* Get the elapsed time */
434 cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */
435 blt+ bitsniff /* Not yet... */
437 mtmsr r9 /* Say, any interrupts pending? */
439 /* The following instructions force the pipeline to be interlocked to that only one
440 instruction is issued per cycle. The insures that we stay enabled for a long enough
441 time. If it's too short, pending interruptions will not have a chance to be taken
444 subi r5,r5,128 /* Back off elapsed time from timeout value */
445 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
446 mr. r5,r5 /* See if we used the whole timeout */
447 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
449 ble- bitfail /* We failed */
450 mtmsr r7 /* Disable for interruptions */
451 mftb r8 /* Get the low part of the time base */
452 b bitsniff /* Now that we've opened an enable window, keep trying... */
456 bitgot: mtmsr r9 /* Enable for interruptions */
457 li r3,1 /* Set good return code */
458 isync /* Make sure we don't use a speculativily loaded value */
461 bitfail: li r3,0 /* Set failure return code */
462 blr /* Return, head hanging low... */
466 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
468 * Release bit based spin-lock. The second parameter is the bit mask to clear.
469 * Multiple bits may be cleared.
471 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
472 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
473 * RESTORE FROM THE STACK.
477 .globl EXT(hw_unlock_bit)
483 ubittry: lwarx r0,0,r3 /* Grab the lock value */
484 andc r0,r0,r4 /* Clear the lock bits */
485 stwcx. r0,0,r3 /* Try to clear that there durn lock */
486 bne- ubittry /* Try again, couldn't save it... */
491 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
492 * unsigned int newb, unsigned int timeout)
494 * Try to acquire spin-lock. The second parameter is the bit mask to check.
495 * The third is the value of those bits and the 4th is what to set them to.
496 * Return success (1) or failure (0).
497 * Attempt will fail after timeout ticks of the timebase.
498 * We try fairly hard to get this lock. We disable for interruptions, but
499 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
500 * After checking to see if the large timeout value (passed in) has expired and a
501 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
502 * we return either in abject failure, or disable and go back to the lock sniff routine.
503 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
509 nop ; Force loop alignment to cache line
514 .globl EXT(hw_lock_mbits)
518 mfmsr r9 ; Get the MSR value
519 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
520 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
521 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible
523 mtmsr r8 ; Turn off interruptions
524 isync ; May have turned off vectors or float here
525 mftb r10 ; Get the low part of the time base
527 mbittry: lwarx r12,0,r3 ; Grab the lock value
528 and r0,r12,r4 ; Clear extra bits
529 andc r12,r12,r4 ; Clear all bits in the bit mask
530 or r12,r12,r6 ; Turn on the lock bits
531 cmplw r0,r5 ; Are these the right bits?
532 bne- mbitsniff ; Nope, wait for it to clear...
533 stwcx. r12,0,r3 ; Try to seize that there durn lock
534 beq+ mbitgot ; We got it, yahoo...
535 b mbittry ; Just start up again if the store failed...
539 mbitsniff: lwz r12,0(r3) ; Get that lock in here
540 and r0,r12,r4 ; Clear extra bits
541 cmplw r0,r5 ; Are these the right bits?
542 beq+ mbittry ; Yeah, try for it again...
544 mftb r11 ; Time stamp us now
545 sub r11,r11,r10 ; Get the elapsed time
546 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
547 blt+ mbitsniff ; Not yet...
549 mtmsr r9 ; Say, any interrupts pending?
551 ; The following instructions force the pipeline to be interlocked to that only one
552 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
553 ; time. If it is too short, pending interruptions will not have a chance to be taken
555 subi r7,r7,128 ; Back off elapsed time from timeout value
556 or r7,r7,r7 ; Do nothing here but force a single cycle delay
557 mr. r7,r7 ; See if we used the whole timeout
558 or r7,r7,r7 ; Do nothing here but force a single cycle delay
560 ble- mbitfail ; We failed
561 mtmsr r8 ; Disable for interruptions
562 mftb r10 ; Get the low part of the time base
563 b mbitsniff ; Now that we have opened an enable window, keep trying...
567 mbitgot: mtmsr r9 ; Enable for interruptions
568 li r3,1 ; Set good return code
569 isync ; Make sure we do not use a speculativily loaded value
572 mbitfail: li r3,0 ; Set failure return code
573 blr ; Return, head hanging low...
577 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
579 * Spin until word hits 0 or timeout.
580 * Return success (1) or failure (0).
581 * Attempt will fail after timeout ticks of the timebase.
583 * The theory is that a processor will bump a counter as it signals
584 * other processors. Then it will spin untl the counter hits 0 (or
585 * times out). The other processors, as it receives the signal will
586 * decrement the counter.
588 * The other processors use interlocked update to decrement, this one
589 * does not need to interlock.
595 .globl EXT(hw_cpu_sync)
599 mftb r10 ; Get the low part of the time base
600 mr r9,r3 ; Save the sync word address
601 li r3,1 ; Assume we work
603 csynctry: lwz r11,0(r9) ; Grab the sync value
604 mr. r11,r11 ; Counter hit 0?
605 beqlr- ; Yeah, we are sunk...
606 mftb r12 ; Time stamp us now
608 sub r12,r12,r10 ; Get the elapsed time
609 cmplw r4,r12 ; Have we gone too long?
610 bge+ csynctry ; Not yet...
612 li r3,0 ; Set failure...
613 blr ; Return, head hanging low...
616 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
618 * Spin until word changes or timeout.
619 * Return success (1) or failure (0).
620 * Attempt will fail after timeout ticks of the timebase.
622 * This is used to insure that a processor passes a certain point.
623 * An example of use is to monitor the last interrupt time in the
624 * per_proc block. This can be used to insure that the other processor
625 * has seen at least one interrupt since a specific time.
631 .globl EXT(hw_cpu_wcng)
635 mftb r10 ; Get the low part of the time base
636 mr r9,r3 ; Save the sync word address
637 li r3,1 ; Assume we work
639 wcngtry: lwz r11,0(r9) ; Grab the value
640 cmplw r11,r4 ; Do they still match?
641 bnelr- ; Nope, cool...
642 mftb r12 ; Time stamp us now
644 sub r12,r12,r10 ; Get the elapsed time
645 cmplw r5,r12 ; Have we gone too long?
646 bge+ wcngtry ; Not yet...
648 li r3,0 ; Set failure...
649 blr ; Return, head hanging low...
653 * unsigned int hw_lock_try(hw_lock_t)
655 * Try to acquire spin-lock. Return success (1) or failure (0)
656 * Returns with preemption disabled on success.
660 .globl EXT(hw_lock_try)
665 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
666 lis r5,0x9999 /* (TEST/DEBUG) */
667 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
668 sc /* (TEST/DEBUG) */
670 mfmsr r9 /* Save the MSR value */
671 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
672 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
673 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */
676 lis r5, 0x10 /* roughly 1E6 */
678 #endif /* MACH_LDEBUG */
680 mtmsr r7 /* Disable interruptions and thus, preemption */
681 isync ; May have turned off fp/vec here
685 bdnz+ 0f /* Count attempts */
686 mtmsr r9 /* Restore enablement */
687 BREAKPOINT_TRAP /* Get to debugger */
688 mtmsr r7 /* Disable interruptions and thus, preemption */
690 #endif /* MACH_LDEBUG */
692 lwarx r5,0,r3 /* Ld from addr of arg and reserve */
694 andi. r6,r5,ILK_LOCKED /* TEST... */
696 bne- .L_lock_try_failed /* branch if taken. Predict free */
698 stwcx. r5,0,r3 /* And SET (if still reserved) */
699 mfsprg r6,0 /* Get the per_proc block */
700 bne- .L_lock_try_loop /* If set failed, loop back */
704 lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */
705 addi r5,r5,1 /* Bring up the disable count */
706 stw r5,PP_PREEMPT_CNT(r6) /* Save it back */
708 mtmsr r9 /* Allow interruptions now */
709 li r3,1 /* Set that the lock was free */
713 mtmsr r9 /* Allow interruptions now */
714 li r3,0 /* FAILURE - lock was taken */
718 * unsigned int hw_lock_held(hw_lock_t)
720 * Return 1 if lock is held
721 * Doesn't change preemption state.
722 * N.B. Racy, of course.
726 .globl EXT(hw_lock_held)
731 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
732 lis r5,0x8888 /* (TEST/DEBUG) */
733 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
734 sc /* (TEST/DEBUG) */
736 isync /* Make sure we don't use a speculativily fetched lock */
737 lwz r3, 0(r3) /* Return value of lock */
741 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
743 * Compare old to area if equal, store new, and return true
744 * else return false and no store
745 * This is an atomic operation
749 .globl EXT(hw_compare_and_store)
751 LEXT(hw_compare_and_store)
753 mr r6,r3 /* Save the old value */
755 cstry: lwarx r9,0,r5 /* Grab the area value */
756 li r3,1 /* Assume it works */
757 cmplw cr0,r9,r6 /* Does it match the old value? */
758 bne- csfail /* No, it must have changed... */
759 stwcx. r4,0,r5 /* Try to save the new value */
760 bne- cstry /* Didn't get it, try again... */
761 isync /* Just hold up prefetch */
764 csfail: li r3,0 /* Set failure */
765 blr /* Better luck next time... */
769 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
771 * Atomically add the second parameter to the first.
772 * Returns the result.
776 .globl EXT(hw_atomic_add)
780 mr r6,r3 /* Save the area */
782 addtry: lwarx r3,0,r6 /* Grab the area value */
783 add r3,r3,r4 /* Add the value */
784 stwcx. r3,0,r6 /* Try to save the new value */
785 bne- addtry /* Didn't get it, try again... */
790 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
792 * Atomically subtract the second parameter from the first.
793 * Returns the result.
797 .globl EXT(hw_atomic_sub)
801 mr r6,r3 /* Save the area */
803 subtry: lwarx r3,0,r6 /* Grab the area value */
804 sub r3,r3,r4 /* Subtract the value */
805 stwcx. r3,0,r6 /* Try to save the new value */
806 bne- subtry /* Didn't get it, try again... */
811 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
813 * Atomically ORs the second parameter into the first.
814 * Returns the result.
818 .globl EXT(hw_atomic_or)
822 mr r6,r3 ; Save the area
824 ortry: lwarx r3,0,r6 ; Grab the area value
825 or r3,r3,r4 ; OR the value
826 stwcx. r3,0,r6 ; Try to save the new value
827 bne- ortry ; Did not get it, try again...
832 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
834 * Atomically ANDs the second parameter with the first.
835 * Returns the result.
839 .globl EXT(hw_atomic_and)
843 mr r6,r3 ; Save the area
845 andtry: lwarx r3,0,r6 ; Grab the area value
846 and r3,r3,r4 ; AND the value
847 stwcx. r3,0,r6 ; Try to save the new value
848 bne- andtry ; Did not get it, try again...
853 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
855 * Atomically inserts the element at the head of the list
856 * anchor is the pointer to the first element
857 * element is the pointer to the element to insert
858 * disp is the displacement into the element to the chain pointer
862 .globl EXT(hw_queue_atomic)
864 LEXT(hw_queue_atomic)
866 mr r7,r4 /* Make end point the same as start */
867 mr r8,r5 /* Copy the displacement also */
868 b hw_queue_comm /* Join common code... */
871 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
873 * Atomically inserts the list of elements at the head of the list
874 * anchor is the pointer to the first element
875 * first is the pointer to the first element to insert
876 * last is the pointer to the last element to insert
877 * disp is the displacement into the element to the chain pointer
881 .globl EXT(hw_queue_atomic_list)
883 LEXT(hw_queue_atomic_list)
885 mr r7,r5 /* Make end point the same as start */
886 mr r8,r6 /* Copy the displacement also */
889 lwarx r9,0,r3 /* Pick up the anchor */
890 stwx r9,r8,r7 /* Chain that to the end of the new stuff */
891 eieio ; Make sure this store makes it before the anchor update
892 stwcx. r4,0,r3 /* Try to chain into the front */
893 bne- hw_queue_comm /* Didn't make it, try again... */
898 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
900 * Atomically removes the first element in a list and returns it.
901 * anchor is the pointer to the first element
902 * disp is the displacement into the element to the chain pointer
903 * Returns element if found, 0 if empty.
907 .globl EXT(hw_dequeue_atomic)
909 LEXT(hw_dequeue_atomic)
911 mr r5,r3 /* Save the anchor */
914 lwarx r3,0,r5 /* Pick up the anchor */
915 mr. r3,r3 /* Is the list empty? */
916 beqlr- /* Leave it list empty... */
917 lwzx r9,r4,r3 /* Get the next in line */
918 stwcx. r9,0,r5 /* Try to chain into the front */
919 beqlr+ ; Got the thing, go away with it...
920 b hw_dequeue_comm ; Did not make it, try again...
923 * void mutex_init(mutex_t* l, etap_event_t etap)
926 ENTRY(mutex_init,TAG_NO_FRAME_USED)
930 stw r10, LOCK_DATA(r3) /* clear lock word */
931 sth r10, MUTEX_WAITERS(r3) /* init waiter count */
932 sth r10, MUTEX_PROMOTED_PRI(r3)
934 stw r10, MUTEX_PC(r3) /* init caller pc */
935 stw r10, MUTEX_THREAD(r3) /* and owning thread */
937 stw r10, MUTEX_TYPE(r3) /* set lock type */
938 #endif /* MACH_LDEBUG */
941 bl EXT(etap_mutex_init) /* init ETAP data */
942 #endif /* ETAP_LOCK_TRACE */
948 * void mutex_lock(mutex_t*)
952 .globl EXT(mutex_lock)
955 .globl EXT(_mutex_lock)
959 mfsprg r6,1 /* load the current thread */
961 lwarx r5,0,r3 /* load the mutex lock */
963 bne- L_mutex_lock_slow /* go to the slow path */
964 stwcx. r6,0,r3 /* grab the lock */
965 bne- L_mutex_lock_loop /* loop back if failed */
966 isync /* stop prefeteching */
971 mflr r12 ; (TEST/DEBUG)
972 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
973 mtlr r12 ; (TEST/DEBUG)
978 bl EXT(assert_wait_possible)
980 bne L_mutex_lock_assert_wait_1
981 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
982 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
986 L_mutex_lock_assert_wait_panic_str:
987 STRINGD "mutex_lock: assert_wait_possible false\n\000"
990 L_mutex_lock_assert_wait_1:
996 stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */
997 stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */
998 stw r0,MISSED(r1) /* clear local miss marker */
999 #endif /* ETAP_LOCK_TRACE */
1003 CHECK_NO_SIMPLELOCKS()
1007 mfsprg r4,0 /* (TEST/DEBUG) */
1008 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1009 lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1010 lis r5,0xAAAA /* (TEST/DEBUG) */
1011 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1012 sc /* (TEST/DEBUG) */
1015 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1016 mr. r4,r3 /* Did we get it? */
1017 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1018 bne+ mlGotInt /* We got it just fine... */
1020 lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message
1021 ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message
1022 bl EXT(panic) ; Call panic
1023 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1027 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
1032 /* Note that there is no reason to do a load and reserve here. We already
1033 hold the interlock lock and no one can touch this field unless they
1034 have that, so, we're free to play */
1036 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
1037 rlwinm. r9,r4,30,2,31 /* So, can we have it? */
1038 bne- mlInUse /* Nope, sombody's playing already... */
1041 mfmsr r11 ; Note: no need to deal with fp or vec here
1042 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1044 mfsprg r9,0 /* Get the per_proc block */
1045 lwz r5,0(r1) /* Get previous save frame */
1046 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
1047 lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */
1048 stw r5,MUTEX_PC(r3) /* Save our caller */
1049 mr. r8,r8 /* Is there any thread? */
1050 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1051 beq- .L_ml_no_active_thread /* No owning thread... */
1052 lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1053 addi r9,r9,1 /* Bump it up */
1054 stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */
1055 .L_ml_no_active_thread:
1057 #endif /* MACH_LDEBUG */
1059 bl EXT(mutex_lock_acquire)
1067 stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1073 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1074 #endif /* ETAP_LOCK_TRACE */
1076 EPILOG /* Restore all saved registers */
1078 b epStart /* Go enable preemption... */
1081 * We come to here when we have a resource conflict. In other words,
1082 * the mutex is held.
1089 cmpwi r7,0 /* did we already take a wait timestamp ? */
1090 bne .L_ml_block /* yup. carry-on */
1091 bl EXT(etap_mutex_miss) /* get wait timestamp */
1092 stw r3,SWT_HI(r1) /* store timestamp */
1094 li r7, 1 /* mark wait timestamp as taken */
1096 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1098 #endif /* ETAP_LOCK_TRACE */
1101 CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */
1104 /* Note that we come in here with the interlock set. The wait routine
1105 * will unlock it before waiting.
1107 ori r4,r4,WAIT_FLAG /* Set the wait flag */
1108 stw r4,LOCK_DATA(r3)
1109 rlwinm r4,r4,0,0,29 /* Extract the lock owner */
1110 bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */
1112 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1113 b .L_ml_retry /* and try again... */
1117 * void _mutex_try(mutex_t*)
1122 .globl EXT(mutex_try)
1124 .globl EXT(_mutex_try)
1127 mfsprg r6,1 /* load the current thread */
1129 lwarx r5,0,r3 /* load the lock value */
1131 bne- L_mutex_try_slow /* branch to the slow path */
1132 stwcx. r6,0,r3 /* grab the lock */
1133 bne- L_mutex_try_loop /* retry if failed */
1134 isync /* stop prefetching */
1140 PROLOG(8) /* reserve space for SWT_HI and SWT_LO */
1144 stw r5, STW_HI(r1) /* set wait time to 0 (HI) */
1145 stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */
1146 #endif /* ETAP_LOCK_TRACE */
1149 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1150 lis r5,0xBBBB /* (TEST/DEBUG) */
1151 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1152 sc /* (TEST/DEBUG) */
1156 CHECK_NO_SIMPLELOCKS()
1158 lwz r6,LOCK_DATA(r3) /* Quick check */
1159 rlwinm. r6,r6,30,2,31 /* to see if someone has this lock already */
1160 bne- mtFail /* Someone's got it already... */
1162 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1163 mr. r4,r3 /* Did we get it? */
1164 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1165 bne+ mtGotInt /* We got it just fine... */
1167 lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message
1168 ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message
1169 bl EXT(panic) ; Call panic
1170 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1174 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1179 /* Note that there is no reason to do a load and reserve here. We already
1180 hold the interlock and no one can touch at this field unless they
1181 have that, so, we're free to play */
1183 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
1184 rlwinm. r9,r4,30,2,31 /* So, can we have it? */
1185 bne- mtInUse /* Nope, sombody's playing already... */
1189 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1191 mfsprg r9,0 /* Get the per_proc block */
1192 lwz r5,0(r1) /* Get previous save frame */
1193 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
1194 lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */
1195 stw r5,MUTEX_PC(r3) /* Save our caller */
1196 mr. r8,r8 /* Is there any thread? */
1197 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1198 beq- .L_mt_no_active_thread /* No owning thread... */
1199 lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1200 addi r9, r9, 1 /* Bump it up */
1201 stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */
1202 .L_mt_no_active_thread:
1204 #endif /* MACH_LDEBUG */
1206 bl EXT(mutex_lock_acquire)
1213 sync /* Push it all out */
1214 stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1217 lwz r4,0(r1) /* Back chain the stack */
1219 lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */
1221 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1222 #endif /* ETAP_LOCK_TRACE */
1224 bl epStart /* Go enable preemption... */
1227 EPILOG /* Restore all saved registers */
1231 * We come to here when we have a resource conflict. In other words,
1232 * the mutex is held.
1236 rlwinm r4,r4,0,0,30 /* Get the unlock value */
1237 stw r4,LOCK_DATA(r3) /* free the interlock */
1238 bl epStart /* Go enable preemption... */
1240 mtFail: li r3,0 /* Set failure code */
1241 EPILOG /* Restore all saved registers */
1246 * void mutex_unlock(mutex_t* l)
1250 .globl EXT(mutex_unlock)
1255 L_mutex_unlock_loop:
1257 rlwinm. r4,r5,0,30,31 /* Bail if pending waiter or interlock set */
1258 li r5,0 /* Clear the mutexlock */
1259 bne- L_mutex_unlock_slow
1261 bne- L_mutex_unlock_loop
1263 L_mutex_unlock_slow:
1268 bl EXT(etap_mutex_unlock) /* collect ETAP data */
1269 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1270 #endif /* ETAP_LOCK_TRACE */
1274 CHECK_THREAD(MUTEX_THREAD)
1277 mfsprg r4,0 /* (TEST/DEBUG) */
1278 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1279 lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1280 lis r5,0xCCCC /* (TEST/DEBUG) */
1281 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1282 sc /* (TEST/DEBUG) */
1284 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1285 mr. r4,r3 /* Did we get it? */
1286 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1287 bne+ muGotInt /* We got it just fine... */
1289 lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message
1290 ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message
1291 bl EXT(panic) ; Call panic
1292 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1296 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1301 lwz r4,LOCK_DATA(r3)
1302 andi. r5,r4,WAIT_FLAG /* are there any waiters ? */
1304 beq+ muUnlock /* Nope, we're done... */
1306 bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */
1307 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1308 lwz r5,LOCK_DATA(r3) /* load the lock */
1313 rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1316 lwz r9,PP_ACTIVE_THREAD(r9)
1317 stw r9,MUTEX_THREAD(r3) /* disown thread */
1319 beq- .L_mu_no_active_thread
1320 lwz r8,THREAD_MUTEX_COUNT(r9)
1322 stw r8,THREAD_MUTEX_COUNT(r9)
1323 .L_mu_no_active_thread:
1325 #endif /* MACH_LDEBUG */
1327 andi. r5,r5,WAIT_FLAG /* Get the unlock value */
1328 sync /* Make sure it's all there before we release */
1329 stw r5,LOCK_DATA(r3) /* unlock the interlock and lock */
1331 EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */
1332 b epStart /* Go enable preemption... */
1335 * void interlock_unlock(hw_lock_t lock)
1339 .globl EXT(interlock_unlock)
1341 LEXT(interlock_unlock)
1344 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1345 lis r5,0xDDDD /* (TEST/DEBUG) */
1346 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1347 sc /* (TEST/DEBUG) */
1349 lwz r10,LOCK_DATA(r3)
1350 rlwinm r10,r10,0,0,30
1352 stw r10,LOCK_DATA(r3)
1354 b epStart /* Go enable preemption... */
1357 * Here is where we enable preemption. We need to be protected
1358 * against ourselves, we can't chance getting interrupted and modifying
1359 * our processor wide preemption count after we'sve loaded it up. So,
1360 * we need to disable all 'rupts. Actually, we could use a compare
1361 * and swap to do this, but, since there are no MP considerations
1362 * (we are dealing with a CPU local field) it is much, much faster
1365 * Note that if we are not genned MP, the calls here will be no-opped via
1366 * a #define and since the _mp forms are the same, likewise a #define
1367 * will be used to route to the other forms
1370 /* This version does not check if we get preempted or not */
1374 .globl EXT(_enable_preemption_no_check)
1376 LEXT(_enable_preemption_no_check)
1377 cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */
1378 b epCommn /* Join up with the other enable code... */
1381 /* This version checks if we get preempted or not */
1384 .globl EXT(_enable_preemption)
1386 LEXT(_enable_preemption)
1388 epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */
1391 * Common enable preemption code
1394 epCommn: mfmsr r9 /* Save the old MSR */
1395 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1396 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1397 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1398 mtmsr r8 /* Interrupts off */
1399 isync ; May have mess with vec/fp here
1401 mfsprg r3,0 /* Get the per_proc block */
1402 li r8,-1 /* Get a decrimenter */
1403 lwz r5,PP_PREEMPT_CNT(r3) /* Get the preemption level */
1404 add. r5,r5,r8 /* Bring down the disable count */
1406 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1407 mr. r4,r4 ; (TEST/DEBUG)
1408 beq- epskptrc0 ; (TEST/DEBUG)
1409 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1410 lis r4,0xBBBB ; (TEST/DEBUG)
1411 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1413 epskptrc0: mr. r5,r5 ; (TEST/DEBUG)
1416 blt- epTooFar /* Yeah, we did... */
1417 #endif /* MACH_LDEBUG */
1418 stw r5,PP_PREEMPT_CNT(r3) /* Save it back */
1420 beq+ epCheckPreempt /* Go check if we need to be preempted... */
1422 epNoCheck: mtmsr r9 /* Restore the interrupt level */
1427 lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */
1428 lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */
1429 ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */
1430 ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */
1431 mtlr r6 /* Get the address of the panic routine */
1432 mtmsr r9 /* Restore interruptions */
1437 STRINGD "_enable_preemption: preemption_level <= 0!\000"
1439 #endif /* MACH_LDEBUG */
1444 lwz r7,PP_NEED_AST(r3) /* Get the AST request address */
1445 li r5,AST_URGENT /* Get the requests we do honor */
1446 lwz r7,0(r7) /* Get the actual, real live, extra special AST word */
1447 lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */
1448 and. r7,r7,r5 ; Should we preempt?
1449 ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */
1450 beq+ epCPno ; No preemption here...
1452 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1454 epCPno: mtmsr r9 /* Allow interrupts if we can */
1455 beqlr+ ; We probably will not preempt...
1456 sc /* Do the preemption */
1457 blr /* Now, go away now... */
1460 * Here is where we disable preemption. Since preemption is on a
1461 * per processor basis (a thread runs on one CPU at a time) we don't
1462 * need any cross-processor synchronization. We do, however, need to
1463 * be interrupt safe, so we don't preempt while in the process of
1464 * disabling it. We could use SPLs, but since we always want complete
1465 * disablement, and this is platform specific code, we'll just kick the
1466 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1471 nop ; Use these 5 nops to force daPreComm
1472 nop ; to a line boundary.
1477 .globl EXT(_disable_preemption)
1479 LEXT(_disable_preemption)
1481 daPreAll: mfmsr r9 /* Save the old MSR */
1482 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1483 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1484 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1485 mtmsr r8 /* Interrupts off */
1486 isync ; May have mess with fp/vec
1488 daPreComm: mfsprg r6,0 /* Get the per_proc block */
1489 lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1490 addi r5,r5,1 /* Bring up the disable count */
1491 stw r5,PP_PREEMPT_CNT(r6) /* Save it back */
1493 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1494 mr. r4,r4 ; (TEST/DEBUG)
1495 beq- epskptrc1 ; (TEST/DEBUG)
1496 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1497 lis r4,0xAAAA ; (TEST/DEBUG)
1498 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1500 epskptrc1: ; (TEST/DEBUG)
1504 ; Set PREEMPTSTACK above to enable a preemption traceback stack.
1506 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
1507 ; set the same as it is here. This is the number of
1508 ; traceback entries we can handle per processor
1510 ; A value of 0 disables the stack.
1513 cmplwi r5,PREEMPTSTACK ; Maximum depth
1514 lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread
1515 bgt- nopredeb ; Too many to stack...
1516 mr. r6,r6 ; During boot?
1517 beq- nopredeb ; Yes, do not do backtrace...
1518 lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation
1519 lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used
1520 mr. r0,r6 ; Any saved context?
1521 beq- nosaveds ; No...
1522 lwz r0,saver1(r6) ; Get end of savearea chain
1524 nosaveds: li r11,0 ; Clear callers callers callers return
1525 li r10,0 ; Clear callers callers callers callers return
1526 li r8,0 ; Clear callers callers callers callers callers return
1527 lwz r2,0(r1) ; Get callers callers stack frame
1528 lwz r12,8(r2) ; Get our callers return
1529 lwz r4,0(r2) ; Back chain
1531 xor r2,r4,r2 ; Form difference
1532 cmplwi r2,8192 ; Within a couple of pages?
1533 mr r2,r4 ; Move register
1534 bge- nosaveher2 ; No, no back chain then...
1535 lwz r11,8(r2) ; Get our callers return
1536 lwz r4,0(r2) ; Back chain
1538 xor r2,r4,r2 ; Form difference
1539 cmplwi r2,8192 ; Within a couple of pages?
1540 mr r2,r4 ; Move register
1541 bge- nosaveher2 ; No, no back chain then...
1542 lwz r10,8(r2) ; Get our callers return
1543 lwz r4,0(r2) ; Back chain
1545 xor r2,r4,r2 ; Form difference
1546 cmplwi r2,8192 ; Within a couple of pages?
1547 mr r2,r4 ; Move register
1548 bge- nosaveher2 ; No, no back chain then...
1549 lwz r8,8(r2) ; Get our callers return
1552 addi r5,r5,-1 ; Get index to slot
1553 mfspr r6,pir ; Get our processor
1554 mflr r4 ; Get our return
1555 rlwinm r6,r6,8,0,23 ; Index to processor slot
1556 lis r2,hi16(EXT(DBGpreempt)) ; Stack high order
1557 rlwinm r5,r5,4,0,27 ; Index to stack slot
1558 ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order
1559 add r2,r2,r5 ; Point to slot
1560 add r2,r2,r6 ; Move to processor
1561 stw r4,0(r2) ; Save our return
1562 stw r11,4(r2) ; Save callers caller
1563 stw r10,8(r2) ; Save callers callers caller
1564 stw r8,12(r2) ; Save callers callers callers caller
1567 mtmsr r9 /* Allow interruptions now */
1572 * Return the active thread for both inside and outside osfmk consumption
1576 .globl EXT(current_thread)
1578 LEXT(current_thread)
1582 lwz r3,ACT_THREAD(r3)
1585 mfmsr r9 /* Save the old MSR */
1586 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1587 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1588 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1589 mtmsr r8 /* Interrupts off */
1591 mfsprg r6,0 /* Get the per_proc */
1592 lwz r3,PP_ACTIVE_THREAD(r6) /* Get the active thread */
1594 lwz r4,ACT_THREAD(r4)
1596 beq current_thread_cont
1597 lis r5,hi16(L_current_thread_paniced)
1598 ori r5,r5,lo16(L_current_thread_paniced)
1601 bne current_thread_cont
1605 lis r3,hi16(L_current_thread_panic)
1606 ori r3,r3,lo16(L_current_thread_panic)
1610 L_current_thread_panic:
1611 STRINGD "current_thread: spr1 not sync %x %x %x\n\000"
1612 L_current_thread_paniced:
1615 current_thread_cont:
1617 mtmsr r9 /* Restore interruptions to entry */
1621 * Set the active thread
1624 .globl EXT(set_machine_current_thread)
1625 LEXT(set_machine_current_thread)
1627 mfmsr r9 /* Save the old MSR */
1628 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1629 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1630 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1631 mtmsr r8 /* Interrupts off */
1632 isync ; May have messed with fp/vec
1633 mfsprg r6,0 /* Get the per_proc */
1634 stw r3,PP_ACTIVE_THREAD(r6) /* Set the active thread */
1635 mtmsr r9 /* Restore interruptions to entry */
1639 * Set the current activation
1642 .globl EXT(set_machine_current_act)
1643 LEXT(set_machine_current_act)
1644 mtsprg 1,r3 /* Set spr1 with the active thread */
1648 * Return the current activation
1651 .globl EXT(current_act)
1659 * Return the current preemption level
1663 .globl EXT(get_preemption_level)
1665 LEXT(get_preemption_level)
1667 mfmsr r9 /* Save the old MSR */
1668 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1669 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1670 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1671 mtmsr r8 /* Interrupts off */
1673 mfsprg r6,0 /* Get the per_proc */
1674 lwz r3,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1675 mtmsr r9 /* Restore interruptions to entry */
1680 * Return the cpu_data
1684 .globl EXT(get_cpu_data)
1688 mfsprg r3,0 /* Get the per_proc */
1689 addi r3,r3,PP_ACTIVE_THREAD /* Get the pointer to the CPU data from per proc */
1694 * Return the simple lock count
1698 .globl EXT(get_simple_lock_count)
1700 LEXT(get_simple_lock_count)
1702 mfmsr r9 /* Save the old MSR */
1703 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1704 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1705 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1706 mtmsr r8 /* Interrupts off */
1707 isync ; May have messed with vec/fp
1708 mfsprg r6,0 /* Get the per_proc */
1709 lwz r3,PP_SIMPLE_LOCK_CNT(r6) /* Get the simple lock count */
1710 mtmsr r9 /* Restore interruptions to entry */
1714 * fast_usimple_lock():
1716 * If EE is off, get the simple lock without incrementing the preemption count and
1717 * mark The simple lock with SLOCK_FAST.
1718 * If EE is on, call usimple_lock().
1721 .globl EXT(fast_usimple_lock)
1723 LEXT(fast_usimple_lock)
1726 b EXT(usimple_lock) ; (TEST/DEBUG)
1729 andi. r7,r9,lo16(MASK(MSR_EE))
1730 bne- L_usimple_lock_c
1731 L_usimple_lock_loop:
1733 li r5,ILK_LOCKED|SLOCK_FAST
1735 bne- L_usimple_lock_c
1737 bne- L_usimple_lock_loop
1744 * fast_usimple_lock_try():
1746 * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and
1747 * if successfully held, the simple lock is marked with SLOCK_FAST.
1748 * If EE is on, call usimple_lock_try()
1751 .globl EXT(fast_usimple_lock_try)
1753 LEXT(fast_usimple_lock_try)
1756 b EXT(usimple_lock_try) ; (TEST/DEBUG)
1759 andi. r7,r9,lo16(MASK(MSR_EE))
1760 bne- L_usimple_lock_try_c
1761 L_usimple_lock_try_loop:
1763 li r5,ILK_LOCKED|SLOCK_FAST
1765 bne- L_usimple_lock_try_fail
1767 bne- L_usimple_lock_try_loop
1771 L_usimple_lock_try_fail:
1774 L_usimple_lock_try_c:
1775 b EXT(usimple_lock_try)
1778 * fast_usimple_unlock():
1780 * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count.
1781 * Call usimple_unlock() otherwise.
1784 .globl EXT(fast_usimple_unlock)
1786 LEXT(fast_usimple_unlock)
1789 b EXT(usimple_unlock) ; (TEST/DEBUG)
1791 lwz r5,LOCK_DATA(r3)
1793 cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST
1794 bne- L_usimple_unlock_c
1798 andi. r7,r9,lo16(MASK(MSR_EE))
1799 beq L_usimple_unlock_cont
1800 lis r3,hi16(L_usimple_unlock_panic)
1801 ori r3,r3,lo16(L_usimple_unlock_panic)
1805 L_usimple_unlock_panic:
1806 STRINGD "fast_usimple_unlock: interrupts not disabled\n\000"
1808 L_usimple_unlock_cont:
1810 stw r0, LOCK_DATA(r3)
1813 b EXT(usimple_unlock)
1816 * enter_funnel_section():
1820 .globl EXT(enter_funnel_section)
1822 LEXT(enter_funnel_section)
1825 lis r10,hi16(EXT(kdebug_enable))
1826 ori r10,r10,lo16(EXT(kdebug_enable))
1828 lis r11,hi16(EXT(split_funnel_off))
1829 ori r11,r11,lo16(EXT(split_funnel_off))
1831 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1832 bne- L_enter_funnel_section_slow1 ; If set, call the slow path
1833 mfsprg r6,1 ; Get the current activation
1834 lwz r7,LOCK_FNL_MUTEX(r3)
1836 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1837 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1838 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1839 mtmsr r10 ; Turn off EE
1840 isync ; May have messed with vec/fp
1842 L_enter_funnel_section_loop:
1843 lwarx r5,0,r7 ; Load the mutex lock
1845 bne- L_enter_funnel_section_slow ; Go to the slow path
1846 stwcx. r6,0,r7 ; Grab the lock
1847 bne- L_enter_funnel_section_loop ; Loop back if failed
1848 isync ; Stop prefeteching
1849 lwz r6,ACT_THREAD(r6) ; Get the current thread
1851 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1852 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1856 L_enter_funnel_section_slow:
1858 L_enter_funnel_section_slow1:
1861 b EXT(thread_funnel_set)
1864 * exit_funnel_section():
1868 .globl EXT(exit_funnel_section)
1870 LEXT(exit_funnel_section)
1873 mfsprg r6,1 ; Get the current activation
1874 lwz r6,ACT_THREAD(r6) ; Get the current thread
1875 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1876 mr. r3,r3 ; Check on funnel held
1877 beq- L_exit_funnel_section_ret ;
1878 lis r10,hi16(EXT(kdebug_enable))
1879 ori r10,r10,lo16(EXT(kdebug_enable))
1882 bne- L_exit_funnel_section_slow1 ; If set, call the slow path
1883 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1885 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1886 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1887 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1888 mtmsr r10 ; Turn off EE
1889 isync ; May have messed with fp/vec
1891 L_exit_funnel_section_loop:
1893 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1894 li r5,0 ; Clear the mutexlock
1895 bne- L_exit_funnel_section_slow
1896 stwcx. r5,0,r7 ; Release the funnel mutexlock
1897 bne- L_exit_funnel_section_loop
1899 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1900 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1902 L_exit_funnel_section_ret:
1904 L_exit_funnel_section_slow:
1906 L_exit_funnel_section_slow1:
1909 b EXT(thread_funnel_set)