]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_lock.s
ca6d2268f6acd72456e529b1f74cd7809f0ed233
[apple/xnu.git] / osfmk / ppc / hw_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <cpus.h>
27 #include <mach_assert.h>
28 #include <mach_ldebug.h>
29 #include <mach_rt.h>
30
31 #include <kern/etap_options.h>
32
33 #include <ppc/asm.h>
34 #include <ppc/proc_reg.h>
35 #include <assym.s>
36
37 #define STRING ascii
38
39 #define SWT_HI 0+FM_SIZE
40 #define SWT_LO 4+FM_SIZE
41 #define MISSED 8+FM_SIZE
42
43 #define ILK_LOCKED 0x01
44 #define WAIT_FLAG 0x02
45 #define SLOCK_FAST 0x02
46 #define TH_FN_OWNED 0x01
47
48 ;
49 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
50 ; set the same as it is here. This is the number of
51 ; traceback entries we can handle per processor
52 ;
53 ; A value of 0 disables the stack.
54 ;
55 #define PREEMPTSTACK 0
56 #define CHECKNMI 0
57 #define CHECKLOCKS 1
58
59 #include <ppc/POWERMAC/mp/mp.h>
60
61 #define PROLOG(space) \
62 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
63 mflr r0 __ASMNL__ \
64 stw r3,FM_ARG0(r1) __ASMNL__ \
65 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
66
67 #define EPILOG \
68 lwz r1,0(r1) __ASMNL__ \
69 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
70 mtlr r0 __ASMNL__
71
72 #if MACH_LDEBUG && CHECKLOCKS
73 /*
74 * Routines for general lock debugging.
75 */
76
77 /* Gets lock check flags in CR6: CR bits 24-27 */
78
79 #define CHECK_SETUP(rg) \
80 lis rg,hi16(EXT(dgWork)) __ASMNL__ \
81 ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \
82 lbz rg,dgFlags(rg) __ASMNL__ \
83 mtcrf 2,rg __ASMNL__
84
85
86 /*
87 * Checks for expected lock types and calls "panic" on
88 * mismatch. Detects calls to Mutex functions with
89 * type simplelock and vice versa.
90 */
91 #define CHECK_MUTEX_TYPE() \
92 bt 24+disLktypeb,1f __ASMNL__ \
93 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
94 cmpwi r10,MUTEX_TAG __ASMNL__ \
95 beq+ 1f __ASMNL__ \
96 lis r3,hi16(not_a_mutex) __ASMNL__ \
97 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
98 bl EXT(panic) __ASMNL__ \
99 lwz r3,FM_ARG0(r1) __ASMNL__ \
100 1:
101
102 .data
103 not_a_mutex:
104 STRINGD "not a mutex!\n\000"
105 .text
106
107 #define CHECK_SIMPLE_LOCK_TYPE() \
108 bt 24+disLktypeb,1f __ASMNL__ \
109 lwz r10,SLOCK_TYPE(r3) __ASMNL__ \
110 cmpwi r10,USLOCK_TAG __ASMNL__ \
111 beq+ 1f __ASMNL__ \
112 lis r3,hi16(not_a_slock) __ASMNL__ \
113 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
114 bl EXT(panic) __ASMNL__ \
115 lwz r3,FM_ARG0(r1) __ASMNL__ \
116 1:
117
118 .data
119 not_a_slock:
120 STRINGD "not a simple lock!\n\000"
121 .text
122
123 #define CHECK_NO_SIMPLELOCKS() \
124 bt 24+disLkNmSimpb,2f __ASMNL__ \
125 mfmsr r11 __ASMNL__ \
126 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
127 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
128 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
129 mtmsr r10 __ASMNL__ \
130 isync __ASMNL__ \
131 mfsprg r10,0 __ASMNL__ \
132 lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \
133 cmpwi r10,0 __ASMNL__ \
134 beq+ 1f __ASMNL__ \
135 lis r3,hi16(simple_locks_held) __ASMNL__ \
136 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
137 bl EXT(panic) __ASMNL__ \
138 lwz r3,FM_ARG0(r1) __ASMNL__ \
139 1: __ASMNL__ \
140 mtmsr r11 __ASMNL__ \
141 2:
142
143 .data
144 simple_locks_held:
145 STRINGD "simple locks held!\n\000"
146 .text
147
148 /*
149 * Verifies return to the correct thread in "unlock" situations.
150 */
151
152 #define CHECK_THREAD(thread_offset) \
153 bt 24+disLkThreadb,2f __ASMNL__ \
154 mfmsr r11 __ASMNL__ \
155 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
156 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
157 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
158 mtmsr r10 __ASMNL__ \
159 isync __ASMNL__ \
160 mfsprg r10,0 __ASMNL__ \
161 lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
162 cmpwi r10,0 __ASMNL__ \
163 beq- 1f __ASMNL__ \
164 lwz r9,thread_offset(r3) __ASMNL__ \
165 cmpw r9,r10 __ASMNL__ \
166 beq+ 1f __ASMNL__ \
167 lis r3,hi16(wrong_thread) __ASMNL__ \
168 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
169 bl EXT(panic) __ASMNL__ \
170 lwz r3,FM_ARG0(r1) __ASMNL__ \
171 1: __ASMNL__ \
172 mtmsr r11 __ASMNL__ \
173 2:
174 .data
175 wrong_thread:
176 STRINGD "wrong thread!\n\000"
177 .text
178
179 #define CHECK_MYLOCK(thread_offset) \
180 bt 24+disLkMyLckb,2f __ASMNL__ \
181 mfmsr r11 __ASMNL__ \
182 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
183 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
184 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
185 mtmsr r10 __ASMNL__ \
186 isync __ASMNL__ \
187 mfsprg r10,0 __ASMNL__ \
188 lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
189 cmpwi r10,0 __ASMNL__ \
190 beq- 1f __ASMNL__ \
191 lwz r9, thread_offset(r3) __ASMNL__ \
192 cmpw r9,r10 __ASMNL__ \
193 bne+ 1f __ASMNL__ \
194 lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \
195 ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \
196 bl EXT(panic) __ASMNL__ \
197 lwz r3,FM_ARG0(r1) __ASMNL__ \
198 1: __ASMNL__ \
199 mtmsr r11 __ASMNL__ \
200 2:
201
202 .data
203 mylock_attempt:
204 STRINGD "mylock attempt!\n\000"
205 .text
206
207 #else /* MACH_LDEBUG */
208
209 #define CHECK_SETUP(rg)
210 #define CHECK_MUTEX_TYPE()
211 #define CHECK_SIMPLE_LOCK_TYPE()
212 #define CHECK_THREAD(thread_offset)
213 #define CHECK_NO_SIMPLELOCKS()
214 #define CHECK_MYLOCK(thread_offset)
215
216 #endif /* MACH_LDEBUG */
217
218 /*
219 * void hw_lock_init(hw_lock_t)
220 *
221 * Initialize a hardware lock. These locks should be cache aligned and a multiple
222 * of cache size.
223 */
224
225 ENTRY(hw_lock_init, TAG_NO_FRAME_USED)
226
227 li r0, 0 /* set lock to free == 0 */
228 stw r0, 0(r3) /* Initialize the lock */
229 blr
230
231 /*
232 * void hw_lock_unlock(hw_lock_t)
233 *
234 * Unconditionally release lock.
235 * Release preemption level.
236 */
237
238
239 .align 5
240 .globl EXT(hw_lock_unlock)
241
242 LEXT(hw_lock_unlock)
243
244 #if 0
245 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
246 lis r5,0xFFFF /* (TEST/DEBUG) */
247 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
248 sc /* (TEST/DEBUG) */
249 #endif
250 sync /* Flush writes done under lock */
251 li r0, 0 /* set lock to free */
252 stw r0, 0(r3)
253
254 b epStart /* Go enable preemption... */
255
256
257 /*
258 * Special case for internal use. Uses same lock code, but sets up so
259 * that there will be no disabling of preemption after locking. Generally
260 * used for mutex locks when obtaining the interlock although there is
261 * nothing stopping other uses.
262 */
263
264 lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
265 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
266 cmplwi cr1,r1,0 /* Set flag to disable disable preemption */
267 lwz r4,0(r4) /* Get the timerout value */
268 b lockComm /* Join on up... */
269
270 /*
271 * void hw_lock_lock(hw_lock_t)
272 *
273 * Acquire lock, spinning until it becomes available.
274 * Return with preemption disabled.
275 * Apparently not used except by mach_perf.
276 * We will just set a default timeout and jump into the NORMAL timeout lock.
277 */
278
279 .align 5
280 .globl EXT(hw_lock_lock)
281
282 LEXT(hw_lock_lock)
283
284 lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
285 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
286 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
287 lwz r4,0(r4) /* Get the timerout value */
288 b lockComm /* Join on up... */
289
290 /*
291 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
292 *
293 * Try to acquire spin-lock. Return success (1) or failure (0).
294 * Attempt will fail after timeout ticks of the timebase.
295 * We try fairly hard to get this lock. We disable for interruptions, but
296 * reenable after a "short" timeout (128 ticks, we may want to change this).
297 * After checking to see if the large timeout value (passed in) has expired and a
298 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
299 * we return either in abject failure, or disable and go back to the lock sniff routine.
300 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
301 *
302 * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL
303 * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT!
304 *
305 */
306 .align 5
307 .globl EXT(hw_lock_to)
308
309 LEXT(hw_lock_to)
310
311 #if 0
312 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
313 lis r5,0xEEEE /* (TEST/DEBUG) */
314 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
315 sc /* (TEST/DEBUG) */
316 #endif
317
318 #if CHECKNMI
319 mflr r12 ; (TEST/DEBUG)
320 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
321 mtlr r12 ; (TEST/DEBUG)
322 #endif
323
324 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
325
326 lockComm: mfmsr r9 /* Get the MSR value */
327 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
328 mr r5,r3 /* Get the address of the lock */
329 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
330 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
331
332 mtmsr r7 /* Turn off interruptions */
333 isync ; May have turned off vec and fp here
334 mftb r8 /* Get the low part of the time base */
335
336 lcktry: lwarx r6,0,r5 /* Grab the lock value */
337 andi. r3,r6,ILK_LOCKED /* Is it locked? */
338 ori r6,r6,ILK_LOCKED /* Set interlock */
339 bne- lcksniff /* Yeah, wait for it to clear... */
340 stwcx. r6,0,r5 /* Try to seize that there durn lock */
341 bne- lcktry /* Couldn't get it... */
342 li r3,1 /* return true */
343 isync /* Make sure we don't use a speculativily loaded value */
344 beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */
345 mtmsr r9 ; Restore interrupt state
346 blr /* Go on home... */
347
348 .align 5
349
350 lcksniff: lwz r3,0(r5) /* Get that lock in here */
351 andi. r3,r3,ILK_LOCKED /* Is it free yet? */
352 beq+ lcktry /* Yeah, try for it again... */
353
354 mftb r10 /* Time stamp us now */
355 sub r10,r10,r8 /* Get the elapsed time */
356 cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */
357 blt+ lcksniff /* Not yet... */
358
359 mtmsr r9 /* Say, any interrupts pending? */
360
361 /* The following instructions force the pipeline to be interlocked to that only one
362 instruction is issued per cycle. The insures that we stay enabled for a long enough
363 time; if it's too short, pending interruptions will not have a chance to be taken */
364
365 subi r4,r4,128 /* Back off elapsed time from timeout value */
366 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
367 mr. r4,r4 /* See if we used the whole timeout */
368 li r3,0 /* Assume a timeout return code */
369 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
370
371 ble- lckfail /* We failed */
372 mtmsr r7 /* Disable for interruptions */
373 mftb r8 /* Get the low part of the time base */
374 b lcksniff /* Now that we've opened an enable window, keep trying... */
375
376 lckfail: /* We couldn't get the lock */
377 li r3,0 /* Set failure return code */
378 blr /* Return, head hanging low... */
379
380
381 /*
382 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
383 *
384 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
385 * multiple bits may be set. Return success (1) or failure (0).
386 * Attempt will fail after timeout ticks of the timebase.
387 * We try fairly hard to get this lock. We disable for interruptions, but
388 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
389 * After checking to see if the large timeout value (passed in) has expired and a
390 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
391 * we return either in abject failure, or disable and go back to the lock sniff routine.
392 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
393 *
394 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
395 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
396 * RESTORE FROM THE STACK.
397 *
398 */
399
400 .align 5
401
402 nop ; Force loop alignment to cache line
403 nop
404 nop
405 nop
406
407 .globl EXT(hw_lock_bit)
408
409 LEXT(hw_lock_bit)
410
411 mfmsr r9 /* Get the MSR value */
412 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
413 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
414 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
415
416 mtmsr r7 /* Turn off interruptions */
417 isync ; May have turned off vec and fp here
418
419 mftb r8 /* Get the low part of the time base */
420
421 bittry: lwarx r6,0,r3 /* Grab the lock value */
422 and. r0,r6,r4 /* See if any of the lock bits are on */
423 or r6,r6,r4 /* Turn on the lock bits */
424 bne- bitsniff /* Yeah, wait for it to clear... */
425 stwcx. r6,0,r3 /* Try to seize that there durn lock */
426 beq+ bitgot /* We got it, yahoo... */
427 b bittry /* Just start up again if the store failed... */
428
429 .align 5
430
431 bitsniff: lwz r6,0(r3) /* Get that lock in here */
432 and. r0,r6,r4 /* See if any of the lock bits are on */
433 beq+ bittry /* Yeah, try for it again... */
434
435 mftb r6 /* Time stamp us now */
436 sub r6,r6,r8 /* Get the elapsed time */
437 cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */
438 blt+ bitsniff /* Not yet... */
439
440 mtmsr r9 /* Say, any interrupts pending? */
441
442 /* The following instructions force the pipeline to be interlocked to that only one
443 instruction is issued per cycle. The insures that we stay enabled for a long enough
444 time. If it's too short, pending interruptions will not have a chance to be taken
445 */
446
447 subi r5,r5,128 /* Back off elapsed time from timeout value */
448 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
449 mr. r5,r5 /* See if we used the whole timeout */
450 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
451
452 ble- bitfail /* We failed */
453 mtmsr r7 /* Disable for interruptions */
454 mftb r8 /* Get the low part of the time base */
455 b bitsniff /* Now that we've opened an enable window, keep trying... */
456
457 .align 5
458
459 bitgot: mtmsr r9 /* Enable for interruptions */
460 li r3,1 /* Set good return code */
461 isync /* Make sure we don't use a speculativily loaded value */
462 blr
463
464 bitfail: li r3,0 /* Set failure return code */
465 blr /* Return, head hanging low... */
466
467
468 /*
469 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
470 *
471 * Release bit based spin-lock. The second parameter is the bit mask to clear.
472 * Multiple bits may be cleared.
473 *
474 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
475 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
476 * RESTORE FROM THE STACK.
477 */
478
479 .align 5
480 .globl EXT(hw_unlock_bit)
481
482 LEXT(hw_unlock_bit)
483
484 sync
485
486 ubittry: lwarx r0,0,r3 /* Grab the lock value */
487 andc r0,r0,r4 /* Clear the lock bits */
488 stwcx. r0,0,r3 /* Try to clear that there durn lock */
489 bne- ubittry /* Try again, couldn't save it... */
490
491 blr /* Leave... */
492
493 /*
494 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
495 * unsigned int newb, unsigned int timeout)
496 *
497 * Try to acquire spin-lock. The second parameter is the bit mask to check.
498 * The third is the value of those bits and the 4th is what to set them to.
499 * Return success (1) or failure (0).
500 * Attempt will fail after timeout ticks of the timebase.
501 * We try fairly hard to get this lock. We disable for interruptions, but
502 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
503 * After checking to see if the large timeout value (passed in) has expired and a
504 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
505 * we return either in abject failure, or disable and go back to the lock sniff routine.
506 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
507 *
508 */
509
510 .align 5
511
512 nop ; Force loop alignment to cache line
513 nop
514 nop
515 nop
516
517 .globl EXT(hw_lock_mbits)
518
519 LEXT(hw_lock_mbits)
520
521 mfmsr r9 ; Get the MSR value
522 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
523 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
524 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible
525
526 mtmsr r8 ; Turn off interruptions
527 isync ; May have turned off vectors or float here
528 mftb r10 ; Get the low part of the time base
529
530 mbittry: lwarx r12,0,r3 ; Grab the lock value
531 and r0,r12,r4 ; Clear extra bits
532 andc r12,r12,r4 ; Clear all bits in the bit mask
533 or r12,r12,r6 ; Turn on the lock bits
534 cmplw r0,r5 ; Are these the right bits?
535 bne- mbitsniff ; Nope, wait for it to clear...
536 stwcx. r12,0,r3 ; Try to seize that there durn lock
537 beq+ mbitgot ; We got it, yahoo...
538 b mbittry ; Just start up again if the store failed...
539
540 .align 5
541
542 mbitsniff: lwz r12,0(r3) ; Get that lock in here
543 and r0,r12,r4 ; Clear extra bits
544 cmplw r0,r5 ; Are these the right bits?
545 beq+ mbittry ; Yeah, try for it again...
546
547 mftb r11 ; Time stamp us now
548 sub r11,r11,r10 ; Get the elapsed time
549 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
550 blt+ mbitsniff ; Not yet...
551
552 mtmsr r9 ; Say, any interrupts pending?
553
554 ; The following instructions force the pipeline to be interlocked to that only one
555 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
556 ; time. If it is too short, pending interruptions will not have a chance to be taken
557
558 subi r7,r7,128 ; Back off elapsed time from timeout value
559 or r7,r7,r7 ; Do nothing here but force a single cycle delay
560 mr. r7,r7 ; See if we used the whole timeout
561 or r7,r7,r7 ; Do nothing here but force a single cycle delay
562
563 ble- mbitfail ; We failed
564 mtmsr r8 ; Disable for interruptions
565 mftb r10 ; Get the low part of the time base
566 b mbitsniff ; Now that we have opened an enable window, keep trying...
567
568 .align 5
569
570 mbitgot: mtmsr r9 ; Enable for interruptions
571 li r3,1 ; Set good return code
572 isync ; Make sure we do not use a speculativily loaded value
573 blr
574
575 mbitfail: li r3,0 ; Set failure return code
576 blr ; Return, head hanging low...
577
578
579 /*
580 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
581 *
582 * Spin until word hits 0 or timeout.
583 * Return success (1) or failure (0).
584 * Attempt will fail after timeout ticks of the timebase.
585 *
586 * The theory is that a processor will bump a counter as it signals
587 * other processors. Then it will spin untl the counter hits 0 (or
588 * times out). The other processors, as it receives the signal will
589 * decrement the counter.
590 *
591 * The other processors use interlocked update to decrement, this one
592 * does not need to interlock.
593 *
594 */
595
596 .align 5
597
598 .globl EXT(hw_cpu_sync)
599
600 LEXT(hw_cpu_sync)
601
602 mftb r10 ; Get the low part of the time base
603 mr r9,r3 ; Save the sync word address
604 li r3,1 ; Assume we work
605
606 csynctry: lwz r11,0(r9) ; Grab the sync value
607 mr. r11,r11 ; Counter hit 0?
608 beqlr- ; Yeah, we are sunk...
609 mftb r12 ; Time stamp us now
610
611 sub r12,r12,r10 ; Get the elapsed time
612 cmplw r4,r12 ; Have we gone too long?
613 bge+ csynctry ; Not yet...
614
615 li r3,0 ; Set failure...
616 blr ; Return, head hanging low...
617
618 /*
619 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
620 *
621 * Spin until word changes or timeout.
622 * Return success (1) or failure (0).
623 * Attempt will fail after timeout ticks of the timebase.
624 *
625 * This is used to insure that a processor passes a certain point.
626 * An example of use is to monitor the last interrupt time in the
627 * per_proc block. This can be used to insure that the other processor
628 * has seen at least one interrupt since a specific time.
629 *
630 */
631
632 .align 5
633
634 .globl EXT(hw_cpu_wcng)
635
636 LEXT(hw_cpu_wcng)
637
638 mftb r10 ; Get the low part of the time base
639 mr r9,r3 ; Save the sync word address
640 li r3,1 ; Assume we work
641
642 wcngtry: lwz r11,0(r9) ; Grab the value
643 cmplw r11,r4 ; Do they still match?
644 bnelr- ; Nope, cool...
645 mftb r12 ; Time stamp us now
646
647 sub r12,r12,r10 ; Get the elapsed time
648 cmplw r5,r12 ; Have we gone too long?
649 bge+ wcngtry ; Not yet...
650
651 li r3,0 ; Set failure...
652 blr ; Return, head hanging low...
653
654
655 /*
656 * unsigned int hw_lock_try(hw_lock_t)
657 *
658 * Try to acquire spin-lock. Return success (1) or failure (0)
659 * Returns with preemption disabled on success.
660 *
661 */
662 .align 5
663 .globl EXT(hw_lock_try)
664
665 LEXT(hw_lock_try)
666
667 #if 0
668 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
669 lis r5,0x9999 /* (TEST/DEBUG) */
670 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
671 sc /* (TEST/DEBUG) */
672 #endif
673 mfmsr r9 /* Save the MSR value */
674 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
675 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
676 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */
677
678 #if MACH_LDEBUG
679 lis r5, 0x10 /* roughly 1E6 */
680 mtctr r5
681 #endif /* MACH_LDEBUG */
682
683 mtmsr r7 /* Disable interruptions and thus, preemption */
684 isync ; May have turned off fp/vec here
685 .L_lock_try_loop:
686
687 #if MACH_LDEBUG
688 bdnz+ 0f /* Count attempts */
689 mtmsr r9 /* Restore enablement */
690 BREAKPOINT_TRAP /* Get to debugger */
691 mtmsr r7 /* Disable interruptions and thus, preemption */
692 0:
693 #endif /* MACH_LDEBUG */
694
695 lwarx r5,0,r3 /* Ld from addr of arg and reserve */
696
697 andi. r6,r5,ILK_LOCKED /* TEST... */
698 ori r5,r5,ILK_LOCKED
699 bne- .L_lock_try_failed /* branch if taken. Predict free */
700
701 stwcx. r5,0,r3 /* And SET (if still reserved) */
702 mfsprg r6,0 /* Get the per_proc block */
703 bne- .L_lock_try_loop /* If set failed, loop back */
704
705 isync
706
707 lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */
708 addi r5,r5,1 /* Bring up the disable count */
709 stw r5,PP_PREEMPT_CNT(r6) /* Save it back */
710
711 mtmsr r9 /* Allow interruptions now */
712 li r3,1 /* Set that the lock was free */
713 blr
714
715 .L_lock_try_failed:
716 mtmsr r9 /* Allow interruptions now */
717 li r3,0 /* FAILURE - lock was taken */
718 blr
719
720 /*
721 * unsigned int hw_lock_held(hw_lock_t)
722 *
723 * Return 1 if lock is held
724 * Doesn't change preemption state.
725 * N.B. Racy, of course.
726 *
727 */
728 .align 5
729 .globl EXT(hw_lock_held)
730
731 LEXT(hw_lock_held)
732
733 #if 0
734 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
735 lis r5,0x8888 /* (TEST/DEBUG) */
736 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
737 sc /* (TEST/DEBUG) */
738 #endif
739 isync /* Make sure we don't use a speculativily fetched lock */
740 lwz r3, 0(r3) /* Return value of lock */
741 blr
742
743 /*
744 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
745 *
746 * Compare old to area if equal, store new, and return true
747 * else return false and no store
748 * This is an atomic operation
749 *
750 */
751 .align 5
752 .globl EXT(hw_compare_and_store)
753
754 LEXT(hw_compare_and_store)
755
756 mr r6,r3 /* Save the old value */
757
758 cstry: lwarx r9,0,r5 /* Grab the area value */
759 li r3,1 /* Assume it works */
760 cmplw cr0,r9,r6 /* Does it match the old value? */
761 bne- csfail /* No, it must have changed... */
762 stwcx. r4,0,r5 /* Try to save the new value */
763 bne- cstry /* Didn't get it, try again... */
764 isync /* Just hold up prefetch */
765 blr /* Return... */
766
767 csfail: li r3,0 /* Set failure */
768 blr /* Better luck next time... */
769
770
771 /*
772 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
773 *
774 * Atomically add the second parameter to the first.
775 * Returns the result.
776 *
777 */
778 .align 5
779 .globl EXT(hw_atomic_add)
780
781 LEXT(hw_atomic_add)
782
783 mr r6,r3 /* Save the area */
784
785 addtry: lwarx r3,0,r6 /* Grab the area value */
786 add r3,r3,r4 /* Add the value */
787 stwcx. r3,0,r6 /* Try to save the new value */
788 bne- addtry /* Didn't get it, try again... */
789 blr /* Return... */
790
791
792 /*
793 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
794 *
795 * Atomically subtract the second parameter from the first.
796 * Returns the result.
797 *
798 */
799 .align 5
800 .globl EXT(hw_atomic_sub)
801
802 LEXT(hw_atomic_sub)
803
804 mr r6,r3 /* Save the area */
805
806 subtry: lwarx r3,0,r6 /* Grab the area value */
807 sub r3,r3,r4 /* Subtract the value */
808 stwcx. r3,0,r6 /* Try to save the new value */
809 bne- subtry /* Didn't get it, try again... */
810 blr /* Return... */
811
812
813 /*
814 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
815 *
816 * Atomically ORs the second parameter into the first.
817 * Returns the result.
818 *
819 */
820 .align 5
821 .globl EXT(hw_atomic_or)
822
823 LEXT(hw_atomic_or)
824
825 mr r6,r3 ; Save the area
826
827 ortry: lwarx r3,0,r6 ; Grab the area value
828 or r3,r3,r4 ; OR the value
829 stwcx. r3,0,r6 ; Try to save the new value
830 bne- ortry ; Did not get it, try again...
831 blr ; Return...
832
833
834 /*
835 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
836 *
837 * Atomically ANDs the second parameter with the first.
838 * Returns the result.
839 *
840 */
841 .align 5
842 .globl EXT(hw_atomic_and)
843
844 LEXT(hw_atomic_and)
845
846 mr r6,r3 ; Save the area
847
848 andtry: lwarx r3,0,r6 ; Grab the area value
849 and r3,r3,r4 ; AND the value
850 stwcx. r3,0,r6 ; Try to save the new value
851 bne- andtry ; Did not get it, try again...
852 blr ; Return...
853
854
855 /*
856 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
857 *
858 * Atomically inserts the element at the head of the list
859 * anchor is the pointer to the first element
860 * element is the pointer to the element to insert
861 * disp is the displacement into the element to the chain pointer
862 *
863 */
864 .align 5
865 .globl EXT(hw_queue_atomic)
866
867 LEXT(hw_queue_atomic)
868
869 mr r7,r4 /* Make end point the same as start */
870 mr r8,r5 /* Copy the displacement also */
871 b hw_queue_comm /* Join common code... */
872
873 /*
874 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
875 *
876 * Atomically inserts the list of elements at the head of the list
877 * anchor is the pointer to the first element
878 * first is the pointer to the first element to insert
879 * last is the pointer to the last element to insert
880 * disp is the displacement into the element to the chain pointer
881 *
882 */
883 .align 5
884 .globl EXT(hw_queue_atomic_list)
885
886 LEXT(hw_queue_atomic_list)
887
888 mr r7,r5 /* Make end point the same as start */
889 mr r8,r6 /* Copy the displacement also */
890
891 hw_queue_comm:
892 lwarx r9,0,r3 /* Pick up the anchor */
893 stwx r9,r8,r7 /* Chain that to the end of the new stuff */
894 eieio ; Make sure this store makes it before the anchor update
895 stwcx. r4,0,r3 /* Try to chain into the front */
896 bne- hw_queue_comm /* Didn't make it, try again... */
897
898 blr /* Return... */
899
900 /*
901 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
902 *
903 * Atomically removes the first element in a list and returns it.
904 * anchor is the pointer to the first element
905 * disp is the displacement into the element to the chain pointer
906 * Returns element if found, 0 if empty.
907 *
908 */
909 .align 5
910 .globl EXT(hw_dequeue_atomic)
911
912 LEXT(hw_dequeue_atomic)
913
914 mr r5,r3 /* Save the anchor */
915
916 hw_dequeue_comm:
917 lwarx r3,0,r5 /* Pick up the anchor */
918 mr. r3,r3 /* Is the list empty? */
919 beqlr- /* Leave it list empty... */
920 lwzx r9,r4,r3 /* Get the next in line */
921 stwcx. r9,0,r5 /* Try to chain into the front */
922 beqlr+ ; Got the thing, go away with it...
923 b hw_dequeue_comm ; Did not make it, try again...
924
925 /*
926 * void mutex_init(mutex_t* l, etap_event_t etap)
927 */
928
929 ENTRY(mutex_init,TAG_NO_FRAME_USED)
930
931 PROLOG(0)
932 li r10, 0
933 stw r10, LOCK_DATA(r3) /* clear lock word */
934 sth r10, MUTEX_WAITERS(r3) /* init waiter count */
935 sth r10, MUTEX_PROMOTED_PRI(r3)
936 #if MACH_LDEBUG
937 stw r10, MUTEX_PC(r3) /* init caller pc */
938 stw r10, MUTEX_THREAD(r3) /* and owning thread */
939 li r10, MUTEX_TAG
940 stw r10, MUTEX_TYPE(r3) /* set lock type */
941 #endif /* MACH_LDEBUG */
942
943 #if ETAP_LOCK_TRACE
944 bl EXT(etap_mutex_init) /* init ETAP data */
945 #endif /* ETAP_LOCK_TRACE */
946
947 EPILOG
948 blr
949
950 /*
951 * void mutex_lock(mutex_t*)
952 */
953
954 .align 5
955 .globl EXT(mutex_lock)
956 LEXT(mutex_lock)
957
958 .globl EXT(_mutex_lock)
959 LEXT(_mutex_lock)
960
961 #if !MACH_LDEBUG
962 mfsprg r6,1 /* load the current thread */
963 L_mutex_lock_loop:
964 lwarx r5,0,r3 /* load the mutex lock */
965 mr. r5,r5
966 bne- L_mutex_lock_slow /* go to the slow path */
967 stwcx. r6,0,r3 /* grab the lock */
968 bne- L_mutex_lock_loop /* loop back if failed */
969 isync /* stop prefeteching */
970 blr
971 L_mutex_lock_slow:
972 #endif
973 #if CHECKNMI
974 mflr r12 ; (TEST/DEBUG)
975 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
976 mtlr r12 ; (TEST/DEBUG)
977 #endif
978
979 PROLOG(12)
980 #if MACH_LDEBUG
981 bl EXT(assert_wait_possible)
982 mr. r3,r3
983 bne L_mutex_lock_assert_wait_1
984 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
985 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
986 bl EXT(panic)
987
988 .data
989 L_mutex_lock_assert_wait_panic_str:
990 STRINGD "mutex_lock: assert_wait_possible false\n\000"
991 .text
992
993 L_mutex_lock_assert_wait_1:
994 lwz r3,FM_ARG0(r1)
995 #endif
996
997 #if ETAP_LOCK_TRACE
998 li r0, 0
999 stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */
1000 stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */
1001 stw r0,MISSED(r1) /* clear local miss marker */
1002 #endif /* ETAP_LOCK_TRACE */
1003
1004 CHECK_SETUP(r12)
1005 CHECK_MUTEX_TYPE()
1006 CHECK_NO_SIMPLELOCKS()
1007
1008 .L_ml_retry:
1009 #if 0
1010 mfsprg r4,0 /* (TEST/DEBUG) */
1011 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1012 lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1013 lis r5,0xAAAA /* (TEST/DEBUG) */
1014 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1015 sc /* (TEST/DEBUG) */
1016 #endif
1017
1018 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1019 mr. r4,r3 /* Did we get it? */
1020 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1021 bne+ mlGotInt /* We got it just fine... */
1022
1023 lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message
1024 ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message
1025 bl EXT(panic) ; Call panic
1026 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1027
1028 .data
1029 mutex_failed1:
1030 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
1031 .text
1032
1033 mlGotInt:
1034
1035 /* Note that there is no reason to do a load and reserve here. We already
1036 hold the interlock lock and no one can touch this field unless they
1037 have that, so, we're free to play */
1038
1039 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
1040 rlwinm. r9,r4,30,2,31 /* So, can we have it? */
1041 bne- mlInUse /* Nope, sombody's playing already... */
1042
1043 #if MACH_LDEBUG
1044 mfmsr r11 ; Note: no need to deal with fp or vec here
1045 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1046 mtmsr r5
1047 mfsprg r9,0 /* Get the per_proc block */
1048 lwz r5,0(r1) /* Get previous save frame */
1049 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
1050 lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */
1051 stw r5,MUTEX_PC(r3) /* Save our caller */
1052 mr. r8,r8 /* Is there any thread? */
1053 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1054 beq- .L_ml_no_active_thread /* No owning thread... */
1055 lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1056 addi r9,r9,1 /* Bump it up */
1057 stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */
1058 .L_ml_no_active_thread:
1059 mtmsr r11
1060 #endif /* MACH_LDEBUG */
1061
1062 bl EXT(mutex_lock_acquire)
1063 mfsprg r5,1
1064 mr. r4,r3
1065 lwz r3,FM_ARG0(r1)
1066 beq mlUnlock
1067 ori r5,r5,WAIT_FLAG
1068 mlUnlock:
1069 sync
1070 stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1071
1072 #if ETAP_LOCK_TRACE
1073 mflr r4
1074 lwz r5,SWT_HI(r1)
1075 lwz r6,SWT_LO(r1)
1076 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1077 #endif /* ETAP_LOCK_TRACE */
1078
1079 EPILOG /* Restore all saved registers */
1080
1081 b epStart /* Go enable preemption... */
1082
1083 /*
1084 * We come to here when we have a resource conflict. In other words,
1085 * the mutex is held.
1086 */
1087
1088 mlInUse:
1089
1090 #if ETAP_LOCK_TRACE
1091 lwz r7,MISSED(r1)
1092 cmpwi r7,0 /* did we already take a wait timestamp ? */
1093 bne .L_ml_block /* yup. carry-on */
1094 bl EXT(etap_mutex_miss) /* get wait timestamp */
1095 stw r3,SWT_HI(r1) /* store timestamp */
1096 stw r4,SWT_LO(r1)
1097 li r7, 1 /* mark wait timestamp as taken */
1098 stw r7,MISSED(r1)
1099 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1100 .L_ml_block:
1101 #endif /* ETAP_LOCK_TRACE */
1102
1103 CHECK_SETUP(r12)
1104 CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */
1105
1106
1107 /* Note that we come in here with the interlock set. The wait routine
1108 * will unlock it before waiting.
1109 */
1110 ori r4,r4,WAIT_FLAG /* Set the wait flag */
1111 stw r4,LOCK_DATA(r3)
1112 rlwinm r4,r4,0,0,29 /* Extract the lock owner */
1113 bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */
1114
1115 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1116 b .L_ml_retry /* and try again... */
1117
1118
1119 /*
1120 * void _mutex_try(mutex_t*)
1121 *
1122 */
1123
1124 .align 5
1125 .globl EXT(mutex_try)
1126 LEXT(mutex_try)
1127 .globl EXT(_mutex_try)
1128 LEXT(_mutex_try)
1129 #if !MACH_LDEBUG
1130 mfsprg r6,1 /* load the current thread */
1131 L_mutex_try_loop:
1132 lwarx r5,0,r3 /* load the lock value */
1133 mr. r5,r5
1134 bne- L_mutex_try_slow /* branch to the slow path */
1135 stwcx. r6,0,r3 /* grab the lock */
1136 bne- L_mutex_try_loop /* retry if failed */
1137 isync /* stop prefetching */
1138 li r3, 1
1139 blr
1140 L_mutex_try_slow:
1141 #endif
1142
1143 PROLOG(8) /* reserve space for SWT_HI and SWT_LO */
1144
1145 #if ETAP_LOCK_TRACE
1146 li r5, 0
1147 stw r5, STW_HI(r1) /* set wait time to 0 (HI) */
1148 stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */
1149 #endif /* ETAP_LOCK_TRACE */
1150
1151 #if 0
1152 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1153 lis r5,0xBBBB /* (TEST/DEBUG) */
1154 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1155 sc /* (TEST/DEBUG) */
1156 #endif
1157 CHECK_SETUP(r12)
1158 CHECK_MUTEX_TYPE()
1159 CHECK_NO_SIMPLELOCKS()
1160
1161 lwz r6,LOCK_DATA(r3) /* Quick check */
1162 rlwinm. r6,r6,30,2,31 /* to see if someone has this lock already */
1163 bne- mtFail /* Someone's got it already... */
1164
1165 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1166 mr. r4,r3 /* Did we get it? */
1167 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1168 bne+ mtGotInt /* We got it just fine... */
1169
1170 lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message
1171 ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message
1172 bl EXT(panic) ; Call panic
1173 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1174
1175 .data
1176 mutex_failed2:
1177 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1178 .text
1179
1180 mtGotInt:
1181
1182 /* Note that there is no reason to do a load and reserve here. We already
1183 hold the interlock and no one can touch at this field unless they
1184 have that, so, we're free to play */
1185
1186 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
1187 rlwinm. r9,r4,30,2,31 /* So, can we have it? */
1188 bne- mtInUse /* Nope, sombody's playing already... */
1189
1190 #if MACH_LDEBUG
1191 mfmsr r11
1192 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1193 mtmsr r5
1194 mfsprg r9,0 /* Get the per_proc block */
1195 lwz r5,0(r1) /* Get previous save frame */
1196 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
1197 lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */
1198 stw r5,MUTEX_PC(r3) /* Save our caller */
1199 mr. r8,r8 /* Is there any thread? */
1200 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1201 beq- .L_mt_no_active_thread /* No owning thread... */
1202 lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1203 addi r9, r9, 1 /* Bump it up */
1204 stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */
1205 .L_mt_no_active_thread:
1206 mtmsr r11
1207 #endif /* MACH_LDEBUG */
1208
1209 bl EXT(mutex_lock_acquire)
1210 mfsprg r5,1
1211 mr. r4,r3
1212 lwz r3,FM_ARG0(r1)
1213 beq mtUnlock
1214 ori r5,r5,WAIT_FLAG
1215 mtUnlock:
1216 sync /* Push it all out */
1217 stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1218
1219 #if ETAP_LOCK_TRACE
1220 lwz r4,0(r1) /* Back chain the stack */
1221 lwz r5,SWT_HI(r1)
1222 lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */
1223 lwz r6,SWT_LO(r1)
1224 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1225 #endif /* ETAP_LOCK_TRACE */
1226
1227 bl epStart /* Go enable preemption... */
1228
1229 li r3, 1
1230 EPILOG /* Restore all saved registers */
1231 blr /* Return... */
1232
1233 /*
1234 * We come to here when we have a resource conflict. In other words,
1235 * the mutex is held.
1236 */
1237
1238 mtInUse:
1239 rlwinm r4,r4,0,0,30 /* Get the unlock value */
1240 stw r4,LOCK_DATA(r3) /* free the interlock */
1241 bl epStart /* Go enable preemption... */
1242
1243 mtFail: li r3,0 /* Set failure code */
1244 EPILOG /* Restore all saved registers */
1245 blr /* Return... */
1246
1247
1248 /*
1249 * void mutex_unlock(mutex_t* l)
1250 */
1251
1252 .align 5
1253 .globl EXT(mutex_unlock)
1254
1255 LEXT(mutex_unlock)
1256 #if !MACH_LDEBUG
1257 sync
1258 L_mutex_unlock_loop:
1259 lwarx r5,0,r3
1260 rlwinm. r4,r5,0,30,31 /* Bail if pending waiter or interlock set */
1261 li r5,0 /* Clear the mutexlock */
1262 bne- L_mutex_unlock_slow
1263 stwcx. r5,0,r3
1264 bne- L_mutex_unlock_loop
1265 blr
1266 L_mutex_unlock_slow:
1267 #endif
1268 PROLOG(0)
1269
1270 #if ETAP_LOCK_TRACE
1271 bl EXT(etap_mutex_unlock) /* collect ETAP data */
1272 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1273 #endif /* ETAP_LOCK_TRACE */
1274
1275 CHECK_SETUP(r12)
1276 CHECK_MUTEX_TYPE()
1277 CHECK_THREAD(MUTEX_THREAD)
1278
1279 #if 0
1280 mfsprg r4,0 /* (TEST/DEBUG) */
1281 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1282 lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1283 lis r5,0xCCCC /* (TEST/DEBUG) */
1284 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1285 sc /* (TEST/DEBUG) */
1286 #endif
1287 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1288 mr. r4,r3 /* Did we get it? */
1289 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1290 bne+ muGotInt /* We got it just fine... */
1291
1292 lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message
1293 ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message
1294 bl EXT(panic) ; Call panic
1295 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1296
1297 .data
1298 mutex_failed3:
1299 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1300 .text
1301
1302
1303 muGotInt:
1304 lwz r4,LOCK_DATA(r3)
1305 andi. r5,r4,WAIT_FLAG /* are there any waiters ? */
1306 rlwinm r4,r4,0,0,29
1307 beq+ muUnlock /* Nope, we're done... */
1308
1309 bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */
1310 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1311 lwz r5,LOCK_DATA(r3) /* load the lock */
1312
1313 muUnlock:
1314 #if MACH_LDEBUG
1315 mfmsr r11
1316 rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1317 mtmsr r9
1318 mfsprg r9,0
1319 lwz r9,PP_ACTIVE_THREAD(r9)
1320 stw r9,MUTEX_THREAD(r3) /* disown thread */
1321 cmpwi r9,0
1322 beq- .L_mu_no_active_thread
1323 lwz r8,THREAD_MUTEX_COUNT(r9)
1324 subi r8,r8,1
1325 stw r8,THREAD_MUTEX_COUNT(r9)
1326 .L_mu_no_active_thread:
1327 mtmsr r11
1328 #endif /* MACH_LDEBUG */
1329
1330 andi. r5,r5,WAIT_FLAG /* Get the unlock value */
1331 sync /* Make sure it's all there before we release */
1332 stw r5,LOCK_DATA(r3) /* unlock the interlock and lock */
1333
1334 EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */
1335 b epStart /* Go enable preemption... */
1336
1337 /*
1338 * void interlock_unlock(hw_lock_t lock)
1339 */
1340
1341 .align 5
1342 .globl EXT(interlock_unlock)
1343
1344 LEXT(interlock_unlock)
1345
1346 #if 0
1347 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1348 lis r5,0xDDDD /* (TEST/DEBUG) */
1349 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1350 sc /* (TEST/DEBUG) */
1351 #endif
1352 lwz r10,LOCK_DATA(r3)
1353 rlwinm r10,r10,0,0,30
1354 sync
1355 stw r10,LOCK_DATA(r3)
1356
1357 b epStart /* Go enable preemption... */
1358
1359 /*
1360 * Here is where we enable preemption. We need to be protected
1361 * against ourselves, we can't chance getting interrupted and modifying
1362 * our processor wide preemption count after we'sve loaded it up. So,
1363 * we need to disable all 'rupts. Actually, we could use a compare
1364 * and swap to do this, but, since there are no MP considerations
1365 * (we are dealing with a CPU local field) it is much, much faster
1366 * to disable.
1367 *
1368 * Note that if we are not genned MP, the calls here will be no-opped via
1369 * a #define and since the _mp forms are the same, likewise a #define
1370 * will be used to route to the other forms
1371 */
1372
1373 /* This version does not check if we get preempted or not */
1374
1375
1376 .align 4
1377 .globl EXT(_enable_preemption_no_check)
1378
1379 LEXT(_enable_preemption_no_check)
1380 cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */
1381 b epCommn /* Join up with the other enable code... */
1382
1383
1384 /* This version checks if we get preempted or not */
1385
1386 .align 5
1387 .globl EXT(_enable_preemption)
1388
1389 LEXT(_enable_preemption)
1390
1391 epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */
1392
1393 /*
1394 * Common enable preemption code
1395 */
1396
1397 epCommn: mfmsr r9 /* Save the old MSR */
1398 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1399 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1400 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1401 mtmsr r8 /* Interrupts off */
1402 isync ; May have mess with vec/fp here
1403
1404 mfsprg r3,0 /* Get the per_proc block */
1405 li r8,-1 /* Get a decrimenter */
1406 lwz r5,PP_PREEMPT_CNT(r3) /* Get the preemption level */
1407 add. r5,r5,r8 /* Bring down the disable count */
1408 #if 0
1409 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1410 mr. r4,r4 ; (TEST/DEBUG)
1411 beq- epskptrc0 ; (TEST/DEBUG)
1412 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1413 lis r4,0xBBBB ; (TEST/DEBUG)
1414 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1415 sc ; (TEST/DEBUG)
1416 epskptrc0: mr. r5,r5 ; (TEST/DEBUG)
1417 #endif
1418 #if MACH_LDEBUG
1419 blt- epTooFar /* Yeah, we did... */
1420 #endif /* MACH_LDEBUG */
1421 stw r5,PP_PREEMPT_CNT(r3) /* Save it back */
1422
1423 beq+ epCheckPreempt /* Go check if we need to be preempted... */
1424
1425 epNoCheck: mtmsr r9 /* Restore the interrupt level */
1426 blr /* Leave... */
1427
1428 #if MACH_LDEBUG
1429 epTooFar:
1430 lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */
1431 lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */
1432 ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */
1433 ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */
1434 mtlr r6 /* Get the address of the panic routine */
1435 mtmsr r9 /* Restore interruptions */
1436 blrl /* Panic... */
1437
1438 .data
1439 epTooFarStr:
1440 STRINGD "_enable_preemption: preemption_level <= 0!\000"
1441 .text
1442 #endif /* MACH_LDEBUG */
1443
1444 .align 5
1445
1446 epCheckPreempt:
1447 lwz r7,PP_NEED_AST(r3) /* Get the AST request address */
1448 li r5,AST_URGENT /* Get the requests we do honor */
1449 lwz r7,0(r7) /* Get the actual, real live, extra special AST word */
1450 lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */
1451 and. r7,r7,r5 ; Should we preempt?
1452 ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */
1453 beq+ epCPno ; No preemption here...
1454
1455 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1456
1457 epCPno: mtmsr r9 /* Allow interrupts if we can */
1458 beqlr+ ; We probably will not preempt...
1459 sc /* Do the preemption */
1460 blr /* Now, go away now... */
1461
1462 /*
1463 * Here is where we disable preemption. Since preemption is on a
1464 * per processor basis (a thread runs on one CPU at a time) we don't
1465 * need any cross-processor synchronization. We do, however, need to
1466 * be interrupt safe, so we don't preempt while in the process of
1467 * disabling it. We could use SPLs, but since we always want complete
1468 * disablement, and this is platform specific code, we'll just kick the
1469 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1470 */
1471
1472 .align 5
1473
1474 nop ; Use these 5 nops to force daPreComm
1475 nop ; to a line boundary.
1476 nop
1477 nop
1478 nop
1479
1480 .globl EXT(_disable_preemption)
1481
1482 LEXT(_disable_preemption)
1483
1484 daPreAll: mfmsr r9 /* Save the old MSR */
1485 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1486 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1487 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1488 mtmsr r8 /* Interrupts off */
1489 isync ; May have mess with fp/vec
1490
1491 daPreComm: mfsprg r6,0 /* Get the per_proc block */
1492 lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1493 addi r5,r5,1 /* Bring up the disable count */
1494 stw r5,PP_PREEMPT_CNT(r6) /* Save it back */
1495 #if 0
1496 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1497 mr. r4,r4 ; (TEST/DEBUG)
1498 beq- epskptrc1 ; (TEST/DEBUG)
1499 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1500 lis r4,0xAAAA ; (TEST/DEBUG)
1501 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1502 sc ; (TEST/DEBUG)
1503 epskptrc1: ; (TEST/DEBUG)
1504 #endif
1505
1506 ;
1507 ; Set PREEMPTSTACK above to enable a preemption traceback stack.
1508 ;
1509 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
1510 ; set the same as it is here. This is the number of
1511 ; traceback entries we can handle per processor
1512 ;
1513 ; A value of 0 disables the stack.
1514 ;
1515 #if PREEMPTSTACK
1516 cmplwi r5,PREEMPTSTACK ; Maximum depth
1517 lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread
1518 bgt- nopredeb ; Too many to stack...
1519 mr. r6,r6 ; During boot?
1520 beq- nopredeb ; Yes, do not do backtrace...
1521 lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation
1522 lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used
1523 mr. r0,r6 ; Any saved context?
1524 beq- nosaveds ; No...
1525 lwz r0,saver1(r6) ; Get end of savearea chain
1526
1527 nosaveds: li r11,0 ; Clear callers callers callers return
1528 li r10,0 ; Clear callers callers callers callers return
1529 li r8,0 ; Clear callers callers callers callers callers return
1530 lwz r2,0(r1) ; Get callers callers stack frame
1531 lwz r12,8(r2) ; Get our callers return
1532 lwz r4,0(r2) ; Back chain
1533
1534 xor r2,r4,r2 ; Form difference
1535 cmplwi r2,8192 ; Within a couple of pages?
1536 mr r2,r4 ; Move register
1537 bge- nosaveher2 ; No, no back chain then...
1538 lwz r11,8(r2) ; Get our callers return
1539 lwz r4,0(r2) ; Back chain
1540
1541 xor r2,r4,r2 ; Form difference
1542 cmplwi r2,8192 ; Within a couple of pages?
1543 mr r2,r4 ; Move register
1544 bge- nosaveher2 ; No, no back chain then...
1545 lwz r10,8(r2) ; Get our callers return
1546 lwz r4,0(r2) ; Back chain
1547
1548 xor r2,r4,r2 ; Form difference
1549 cmplwi r2,8192 ; Within a couple of pages?
1550 mr r2,r4 ; Move register
1551 bge- nosaveher2 ; No, no back chain then...
1552 lwz r8,8(r2) ; Get our callers return
1553
1554 nosaveher2:
1555 addi r5,r5,-1 ; Get index to slot
1556 mfspr r6,pir ; Get our processor
1557 mflr r4 ; Get our return
1558 rlwinm r6,r6,8,0,23 ; Index to processor slot
1559 lis r2,hi16(EXT(DBGpreempt)) ; Stack high order
1560 rlwinm r5,r5,4,0,27 ; Index to stack slot
1561 ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order
1562 add r2,r2,r5 ; Point to slot
1563 add r2,r2,r6 ; Move to processor
1564 stw r4,0(r2) ; Save our return
1565 stw r11,4(r2) ; Save callers caller
1566 stw r10,8(r2) ; Save callers callers caller
1567 stw r8,12(r2) ; Save callers callers callers caller
1568 nopredeb:
1569 #endif
1570 mtmsr r9 /* Allow interruptions now */
1571
1572 blr /* Return... */
1573
1574 /*
1575 * Return the active thread for both inside and outside osfmk consumption
1576 */
1577
1578 .align 5
1579 .globl EXT(current_thread)
1580
1581 LEXT(current_thread)
1582
1583 #if 1
1584 mfsprg r3,1
1585 lwz r3,ACT_THREAD(r3)
1586 blr
1587 #else
1588 mfmsr r9 /* Save the old MSR */
1589 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1590 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1591 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1592 mtmsr r8 /* Interrupts off */
1593 isync
1594 mfsprg r6,0 /* Get the per_proc */
1595 lwz r3,PP_ACTIVE_THREAD(r6) /* Get the active thread */
1596 mfsprg r4,1
1597 lwz r4,ACT_THREAD(r4)
1598 cmplw cr0,r4,r3
1599 beq current_thread_cont
1600 lis r5,hi16(L_current_thread_paniced)
1601 ori r5,r5,lo16(L_current_thread_paniced)
1602 lwz r6,0(r5)
1603 mr. r6,r6
1604 bne current_thread_cont
1605 stw r9,0(r5)
1606 mr r5,r4
1607 mr r4,r3
1608 lis r3,hi16(L_current_thread_panic)
1609 ori r3,r3,lo16(L_current_thread_panic)
1610 bl EXT(panic)
1611
1612 .data
1613 L_current_thread_panic:
1614 STRINGD "current_thread: spr1 not sync %x %x %x\n\000"
1615 L_current_thread_paniced:
1616 .long 0
1617 .text
1618 current_thread_cont:
1619 #endif
1620 mtmsr r9 /* Restore interruptions to entry */
1621 blr /* Return... */
1622
1623 /*
1624 * Set the active thread
1625 */
1626 .align 5
1627 .globl EXT(set_machine_current_thread)
1628 LEXT(set_machine_current_thread)
1629
1630 mfmsr r9 /* Save the old MSR */
1631 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1632 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1633 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1634 mtmsr r8 /* Interrupts off */
1635 isync ; May have messed with fp/vec
1636 mfsprg r6,0 /* Get the per_proc */
1637 stw r3,PP_ACTIVE_THREAD(r6) /* Set the active thread */
1638 mtmsr r9 /* Restore interruptions to entry */
1639 blr /* Return... */
1640
1641 /*
1642 * Set the current activation
1643 */
1644 .align 5
1645 .globl EXT(set_machine_current_act)
1646 LEXT(set_machine_current_act)
1647 mtsprg 1,r3 /* Set spr1 with the active thread */
1648 blr /* Return... */
1649
1650 /*
1651 * Return the current activation
1652 */
1653 .align 5
1654 .globl EXT(current_act)
1655 LEXT(current_act)
1656 mfsprg r3,1
1657 blr
1658
1659
1660
1661 /*
1662 * Return the current preemption level
1663 */
1664
1665 .align 5
1666 .globl EXT(get_preemption_level)
1667
1668 LEXT(get_preemption_level)
1669
1670 mfmsr r9 /* Save the old MSR */
1671 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1672 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1673 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1674 mtmsr r8 /* Interrupts off */
1675 isync
1676 mfsprg r6,0 /* Get the per_proc */
1677 lwz r3,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1678 mtmsr r9 /* Restore interruptions to entry */
1679 blr /* Return... */
1680
1681
1682 /*
1683 * Return the cpu_data
1684 */
1685
1686 .align 5
1687 .globl EXT(get_cpu_data)
1688
1689 LEXT(get_cpu_data)
1690
1691 mfsprg r3,0 /* Get the per_proc */
1692 addi r3,r3,PP_ACTIVE_THREAD /* Get the pointer to the CPU data from per proc */
1693 blr /* Return... */
1694
1695
1696 /*
1697 * Return the simple lock count
1698 */
1699
1700 .align 5
1701 .globl EXT(get_simple_lock_count)
1702
1703 LEXT(get_simple_lock_count)
1704
1705 mfmsr r9 /* Save the old MSR */
1706 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1707 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1708 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1709 mtmsr r8 /* Interrupts off */
1710 isync ; May have messed with vec/fp
1711 mfsprg r6,0 /* Get the per_proc */
1712 lwz r3,PP_SIMPLE_LOCK_CNT(r6) /* Get the simple lock count */
1713 mtmsr r9 /* Restore interruptions to entry */
1714 blr /* Return... */
1715
1716 /*
1717 * fast_usimple_lock():
1718 *
1719 * If EE is off, get the simple lock without incrementing the preemption count and
1720 * mark The simple lock with SLOCK_FAST.
1721 * If EE is on, call usimple_lock().
1722 */
1723 .align 5
1724 .globl EXT(fast_usimple_lock)
1725
1726 LEXT(fast_usimple_lock)
1727
1728 #if CHECKNMI
1729 b EXT(usimple_lock) ; (TEST/DEBUG)
1730 #endif
1731 mfmsr r9
1732 andi. r7,r9,lo16(MASK(MSR_EE))
1733 bne- L_usimple_lock_c
1734 L_usimple_lock_loop:
1735 lwarx r4,0,r3
1736 li r5,ILK_LOCKED|SLOCK_FAST
1737 mr. r4,r4
1738 bne- L_usimple_lock_c
1739 stwcx. r5,0,r3
1740 bne- L_usimple_lock_loop
1741 isync
1742 blr
1743 L_usimple_lock_c:
1744 b EXT(usimple_lock)
1745
1746 /*
1747 * fast_usimple_lock_try():
1748 *
1749 * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and
1750 * if successfully held, the simple lock is marked with SLOCK_FAST.
1751 * If EE is on, call usimple_lock_try()
1752 */
1753 .align 5
1754 .globl EXT(fast_usimple_lock_try)
1755
1756 LEXT(fast_usimple_lock_try)
1757
1758 #if CHECKNMI
1759 b EXT(usimple_lock_try) ; (TEST/DEBUG)
1760 #endif
1761 mfmsr r9
1762 andi. r7,r9,lo16(MASK(MSR_EE))
1763 bne- L_usimple_lock_try_c
1764 L_usimple_lock_try_loop:
1765 lwarx r4,0,r3
1766 li r5,ILK_LOCKED|SLOCK_FAST
1767 mr. r4,r4
1768 bne- L_usimple_lock_try_fail
1769 stwcx. r5,0,r3
1770 bne- L_usimple_lock_try_loop
1771 li r3,1
1772 isync
1773 blr
1774 L_usimple_lock_try_fail:
1775 li r3,0
1776 blr
1777 L_usimple_lock_try_c:
1778 b EXT(usimple_lock_try)
1779
1780 /*
1781 * fast_usimple_unlock():
1782 *
1783 * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count.
1784 * Call usimple_unlock() otherwise.
1785 */
1786 .align 5
1787 .globl EXT(fast_usimple_unlock)
1788
1789 LEXT(fast_usimple_unlock)
1790
1791 #if CHECKNMI
1792 b EXT(usimple_unlock) ; (TEST/DEBUG)
1793 #endif
1794 lwz r5,LOCK_DATA(r3)
1795 li r0,0
1796 cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST
1797 bne- L_usimple_unlock_c
1798 sync
1799 #if 0
1800 mfmsr r9
1801 andi. r7,r9,lo16(MASK(MSR_EE))
1802 beq L_usimple_unlock_cont
1803 lis r3,hi16(L_usimple_unlock_panic)
1804 ori r3,r3,lo16(L_usimple_unlock_panic)
1805 bl EXT(panic)
1806
1807 .data
1808 L_usimple_unlock_panic:
1809 STRINGD "fast_usimple_unlock: interrupts not disabled\n\000"
1810 .text
1811 L_usimple_unlock_cont:
1812 #endif
1813 stw r0, LOCK_DATA(r3)
1814 blr
1815 L_usimple_unlock_c:
1816 b EXT(usimple_unlock)
1817
1818 /*
1819 * enter_funnel_section():
1820 *
1821 */
1822 .align 5
1823 .globl EXT(enter_funnel_section)
1824
1825 LEXT(enter_funnel_section)
1826
1827 #if !MACH_LDEBUG
1828 lis r10,hi16(EXT(kdebug_enable))
1829 ori r10,r10,lo16(EXT(kdebug_enable))
1830 lwz r10,0(r10)
1831 lis r11,hi16(EXT(split_funnel_off))
1832 ori r11,r11,lo16(EXT(split_funnel_off))
1833 lwz r11,0(r11)
1834 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1835 bne- L_enter_funnel_section_slow1 ; If set, call the slow path
1836 mfsprg r6,1 ; Get the current activation
1837 lwz r7,LOCK_FNL_MUTEX(r3)
1838 mfmsr r11
1839 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1840 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1841 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1842 mtmsr r10 ; Turn off EE
1843 isync ; May have messed with vec/fp
1844 mr r9,r6
1845 L_enter_funnel_section_loop:
1846 lwarx r5,0,r7 ; Load the mutex lock
1847 mr. r5,r5
1848 bne- L_enter_funnel_section_slow ; Go to the slow path
1849 stwcx. r6,0,r7 ; Grab the lock
1850 bne- L_enter_funnel_section_loop ; Loop back if failed
1851 isync ; Stop prefeteching
1852 lwz r6,ACT_THREAD(r6) ; Get the current thread
1853 li r7,TH_FN_OWNED
1854 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1855 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1856 mtmsr r11
1857 blr
1858
1859 L_enter_funnel_section_slow:
1860 mtmsr r11
1861 L_enter_funnel_section_slow1:
1862 #endif
1863 li r4,TRUE
1864 b EXT(thread_funnel_set)
1865
1866 /*
1867 * exit_funnel_section():
1868 *
1869 */
1870 .align 5
1871 .globl EXT(exit_funnel_section)
1872
1873 LEXT(exit_funnel_section)
1874
1875 #if !MACH_LDEBUG
1876 mfsprg r6,1 ; Get the current activation
1877 lwz r6,ACT_THREAD(r6) ; Get the current thread
1878 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1879 mr. r3,r3 ; Check on funnel held
1880 beq- L_exit_funnel_section_ret ;
1881 lis r10,hi16(EXT(kdebug_enable))
1882 ori r10,r10,lo16(EXT(kdebug_enable))
1883 lwz r10,0(r10)
1884 mr. r10,r10
1885 bne- L_exit_funnel_section_slow1 ; If set, call the slow path
1886 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1887 mfmsr r11
1888 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1889 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1890 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1891 mtmsr r10 ; Turn off EE
1892 isync ; May have messed with fp/vec
1893 sync
1894 L_exit_funnel_section_loop:
1895 lwarx r5,0,r7
1896 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1897 li r5,0 ; Clear the mutexlock
1898 bne- L_exit_funnel_section_slow
1899 stwcx. r5,0,r7 ; Release the funnel mutexlock
1900 bne- L_exit_funnel_section_loop
1901 li r7,0
1902 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1903 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1904 mtmsr r11
1905 L_exit_funnel_section_ret:
1906 blr
1907 L_exit_funnel_section_slow:
1908 mtmsr r11
1909 L_exit_funnel_section_slow1:
1910 #endif
1911 li r4,FALSE
1912 b EXT(thread_funnel_set)
1913