]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_lock.s
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <cpus.h>
24 #include <mach_assert.h>
25 #include <mach_ldebug.h>
26 #include <mach_rt.h>
27
28 #include <kern/etap_options.h>
29
30 #include <ppc/asm.h>
31 #include <ppc/proc_reg.h>
32 #include <assym.s>
33
34 #define STRING ascii
35
36 #define SWT_HI 0+FM_SIZE
37 #define SWT_LO 4+FM_SIZE
38 #define MISSED 8+FM_SIZE
39
40 ;
41 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
42 ; set the same as it is here. This is the number of
43 ; traceback entries we can handle per processor
44 ;
45 ; A value of 0 disables the stack.
46 ;
47 #define PREEMPTSTACK 0
48 #define CHECKNMI 0
49 #define CHECKLOCKS 1
50
51 #include <ppc/POWERMAC/mp/mp.h>
52
53 #define PROLOG(space) \
54 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
55 mflr r0 __ASMNL__ \
56 stw r3,FM_ARG0(r1) __ASMNL__ \
57 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
58
59 #define EPILOG \
60 lwz r1,0(r1) __ASMNL__ \
61 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
62 mtlr r0 __ASMNL__
63
64 #if MACH_LDEBUG && CHECKLOCKS
65 /*
66 * Routines for general lock debugging.
67 */
68
69 /* Gets lock check flags in CR6: CR bits 24-27 */
70
71 #define CHECK_SETUP(rg) \
72 lis rg,hi16(EXT(dgWork)) __ASMNL__ \
73 ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \
74 lbz rg,dgFlags(rg) __ASMNL__ \
75 mtcrf 2,rg __ASMNL__
76
77
78 /*
79 * Checks for expected lock types and calls "panic" on
80 * mismatch. Detects calls to Mutex functions with
81 * type simplelock and vice versa.
82 */
83 #define CHECK_MUTEX_TYPE() \
84 bt 24+disLktypeb,1f __ASMNL__ \
85 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
86 cmpwi r10,MUTEX_TAG __ASMNL__ \
87 beq+ 1f __ASMNL__ \
88 lis r3,hi16(not_a_mutex) __ASMNL__ \
89 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
90 bl EXT(panic) __ASMNL__ \
91 lwz r3,FM_ARG0(r1) __ASMNL__ \
92 1:
93
94 .data
95 not_a_mutex:
96 STRINGD "not a mutex!\n\000"
97 .text
98
99 #define CHECK_SIMPLE_LOCK_TYPE() \
100 bt 24+disLktypeb,1f __ASMNL__ \
101 lwz r10,SLOCK_TYPE(r3) __ASMNL__ \
102 cmpwi r10,USLOCK_TAG __ASMNL__ \
103 beq+ 1f __ASMNL__ \
104 lis r3,hi16(not_a_slock) __ASMNL__ \
105 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
106 bl EXT(panic) __ASMNL__ \
107 lwz r3,FM_ARG0(r1) __ASMNL__ \
108 1:
109
110 .data
111 not_a_slock:
112 STRINGD "not a simple lock!\n\000"
113 .text
114
115 #define CHECK_NO_SIMPLELOCKS() \
116 bt 24+disLkNmSimpb,2f __ASMNL__ \
117 mfmsr r11 __ASMNL__ \
118 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
119 mtmsr r10 __ASMNL__ \
120 mfsprg r10,0 __ASMNL__ \
121 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
122 lwz r10,CPU_SIMPLE_LOCK_COUNT(r10) __ASMNL__ \
123 cmpwi r10,0 __ASMNL__ \
124 beq+ 1f __ASMNL__ \
125 lis r3,hi16(simple_locks_held) __ASMNL__ \
126 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
127 bl EXT(panic) __ASMNL__ \
128 lwz r3,FM_ARG0(r1) __ASMNL__ \
129 1: __ASMNL__ \
130 mtmsr r11 __ASMNL__ \
131 2:
132
133 .data
134 simple_locks_held:
135 STRINGD "simple locks held!\n\000"
136 .text
137
138 /*
139 * Verifies return to the correct thread in "unlock" situations.
140 */
141
142 #define CHECK_THREAD(thread_offset) \
143 bt 24+disLkThreadb,2f __ASMNL__ \
144 mfmsr r11 __ASMNL__ \
145 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
146 mtmsr r10 __ASMNL__ \
147 mfsprg r10,0 __ASMNL__ \
148 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
149 lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \
150 cmpwi r10,0 __ASMNL__ \
151 beq- 1f __ASMNL__ \
152 lwz r9,thread_offset(r3) __ASMNL__ \
153 cmpw r9,r10 __ASMNL__ \
154 beq+ 1f __ASMNL__ \
155 lis r3,hi16(wrong_thread) __ASMNL__ \
156 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
157 bl EXT(panic) __ASMNL__ \
158 lwz r3,FM_ARG0(r1) __ASMNL__ \
159 1: __ASMNL__ \
160 mtmsr r11 __ASMNL__ \
161 2:
162 .data
163 wrong_thread:
164 STRINGD "wrong thread!\n\000"
165 .text
166
167 #define CHECK_MYLOCK(thread_offset) \
168 bt 24+disLkMyLckb,2f __ASMNL__ \
169 mfmsr r11 __ASMNL__ \
170 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
171 mtmsr r10 __ASMNL__ \
172 mfsprg r10,0 __ASMNL__ \
173 lwz r10,PP_CPU_DATA(r10) __ASMNL__ \
174 lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \
175 cmpwi r10,0 __ASMNL__ \
176 beq- 1f __ASMNL__ \
177 lwz r9, thread_offset(r3) __ASMNL__ \
178 cmpw r9,r10 __ASMNL__ \
179 bne+ 1f __ASMNL__ \
180 lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \
181 ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \
182 bl EXT(panic) __ASMNL__ \
183 lwz r3,FM_ARG0(r1) __ASMNL__ \
184 1: __ASMNL__ \
185 mtmsr r11 __ASMNL__ \
186 2:
187
188 .data
189 mylock_attempt:
190 STRINGD "mylock attempt!\n\000"
191 .text
192
193 #else /* MACH_LDEBUG */
194
195 #define CHECK_SETUP(rg)
196 #define CHECK_MUTEX_TYPE()
197 #define CHECK_SIMPLE_LOCK_TYPE()
198 #define CHECK_THREAD(thread_offset)
199 #define CHECK_NO_SIMPLELOCKS()
200 #define CHECK_MYLOCK(thread_offset)
201
202 #endif /* MACH_LDEBUG */
203
204 /*
205 * void hw_lock_init(hw_lock_t)
206 *
207 * Initialize a hardware lock. These locks should be cache aligned and a multiple
208 * of cache size.
209 */
210
211 ENTRY(hw_lock_init, TAG_NO_FRAME_USED)
212
213 li r0, 0 /* set lock to free == 0 */
214 stw r0, 0(r3) /* Initialize the lock */
215 blr
216
217 /*
218 * void hw_lock_unlock(hw_lock_t)
219 *
220 * Unconditionally release lock.
221 * MACH_RT: release preemption level.
222 */
223
224
225 .align 5
226 .globl EXT(hw_lock_unlock)
227
228 LEXT(hw_lock_unlock)
229
230 #if 0
231 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
232 lis r5,0xFFFF /* (TEST/DEBUG) */
233 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
234 sc /* (TEST/DEBUG) */
235 #endif
236 sync /* Flush writes done under lock */
237 li r0, 0 /* set lock to free */
238 stw r0, 0(r3)
239
240 #if MACH_RT
241 b epStart /* Go enable preemption... */
242 #else
243 blr
244 #endif
245
246
247 /*
248 * Special case for internal use. Uses same lock code, but sets up so
249 * that there will be no disabling of preemption after locking. Generally
250 * used for mutex locks when obtaining the interlock although there is
251 * nothing stopping other uses.
252 */
253
254 lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
255 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
256 cmplwi cr1,r1,0 /* Set flag to disable disable preemption */
257 lwz r4,0(r4) /* Get the timerout value */
258 b lockComm /* Join on up... */
259
260 /*
261 * void hw_lock_lock(hw_lock_t)
262 *
263 * Acquire lock, spinning until it becomes available.
264 * MACH_RT: also return with preemption disabled.
265 * Apparently not used except by mach_perf.
266 * We will just set a default timeout and jump into the NORMAL timeout lock.
267 */
268
269 .align 5
270 .globl EXT(hw_lock_lock)
271
272 LEXT(hw_lock_lock)
273
274 lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
275 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
276 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
277 lwz r4,0(r4) /* Get the timerout value */
278 b lockComm /* Join on up... */
279
280 /*
281 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
282 *
283 * Try to acquire spin-lock. Return success (1) or failure (0).
284 * Attempt will fail after timeout ticks of the timebase.
285 * We try fairly hard to get this lock. We disable for interruptions, but
286 * reenable after a "short" timeout (128 ticks, we may want to change this).
287 * After checking to see if the large timeout value (passed in) has expired and a
288 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
289 * we return either in abject failure, or disable and go back to the lock sniff routine.
290 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
291 *
292 * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL
293 * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT!
294 *
295 */
296 .align 5
297 .globl EXT(hw_lock_to)
298
299 LEXT(hw_lock_to)
300
301 #if 0
302 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
303 lis r5,0xEEEE /* (TEST/DEBUG) */
304 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
305 sc /* (TEST/DEBUG) */
306 #endif
307
308 #if CHECKNMI
309 mflr r12 ; (TEST/DEBUG)
310 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
311 mtlr r12 ; (TEST/DEBUG)
312 #endif
313
314 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
315
316 lockComm: mfmsr r9 /* Get the MSR value */
317 mr r5,r3 /* Get the address of the lock */
318 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
319
320 mtmsr r7 /* Turn off interruptions */
321 mftb r8 /* Get the low part of the time base */
322
323 lwarx r6,0,r5 ; ?
324
325 lcktry: lwarx r6,0,r5 /* Grab the lock value */
326 li r3,1 /* Use part of the delay time */
327 mr. r6,r6 /* Is it locked? */
328 bne- lcksniff /* Yeah, wait for it to clear... */
329 stwcx. r3,0,r5 /* Try to seize that there durn lock */
330 #if MACH_RT
331 bne- lcktry /* Couldn't get it... */
332 beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */
333 mtmsr r9 ; Restore interrupt state
334 blr /* Go on home... */
335 #else /* MACH_RT */
336 beq+ lckgot /* We got it, yahoo... */
337 b lcktry /* Just start up again if the store failed... */
338 #endif /* MACH_RT */
339
340 .align 5
341
342 lcksniff: lwz r3,0(r5) /* Get that lock in here */
343 mr. r3,r3 /* Is it free yet? */
344 beq+ lcktry /* Yeah, try for it again... */
345
346 mftb r10 /* Time stamp us now */
347 sub r10,r10,r8 /* Get the elapsed time */
348 cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */
349 blt+ lcksniff /* Not yet... */
350
351 mtmsr r9 /* Say, any interrupts pending? */
352
353 /* The following instructions force the pipeline to be interlocked to that only one
354 instruction is issued per cycle. The insures that we stay enabled for a long enough
355 time; if it's too short, pending interruptions will not have a chance to be taken */
356
357 subi r4,r4,128 /* Back off elapsed time from timeout value */
358 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
359 mr. r4,r4 /* See if we used the whole timeout */
360 li r3,0 /* Assume a timeout return code */
361 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
362
363 ble- lckfail /* We failed */
364 mtmsr r7 /* Disable for interruptions */
365 mftb r8 /* Get the low part of the time base */
366 b lcksniff /* Now that we've opened an enable window, keep trying... */
367
368 #if !MACH_RT
369 lckgot: mtmsr r9 /* Enable for interruptions */
370 isync /* Make sure we don't use a speculativily loaded value */
371 blr
372 #endif /* !MACH_RT */
373
374 lckfail: /* We couldn't get the lock */
375 li r3,0 /* Set failure return code */
376 blr /* Return, head hanging low... */
377
378
379 /*
380 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
381 *
382 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
383 * multiple bits may be set. Return success (1) or failure (0).
384 * Attempt will fail after timeout ticks of the timebase.
385 * We try fairly hard to get this lock. We disable for interruptions, but
386 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
387 * After checking to see if the large timeout value (passed in) has expired and a
388 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
389 * we return either in abject failure, or disable and go back to the lock sniff routine.
390 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
391 *
392 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
393 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
394 * RESTORE FROM THE STACK.
395 *
396 */
397
398 .align 5
399
400 nop ; Force loop alignment to cache line
401 nop
402 nop
403 nop
404
405 .globl EXT(hw_lock_bit)
406
407 LEXT(hw_lock_bit)
408
409 mfmsr r9 /* Get the MSR value */
410 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
411
412 mtmsr r7 /* Turn off interruptions */
413
414 mftb r8 /* Get the low part of the time base */
415
416 lwarx r0,0,r3 ; ?
417
418 bittry: lwarx r6,0,r3 /* Grab the lock value */
419 and. r0,r6,r4 /* See if any of the lock bits are on */
420 or r6,r6,r4 /* Turn on the lock bits */
421 bne- bitsniff /* Yeah, wait for it to clear... */
422 stwcx. r6,0,r3 /* Try to seize that there durn lock */
423 beq+ bitgot /* We got it, yahoo... */
424 b bittry /* Just start up again if the store failed... */
425
426 .align 5
427
428 bitsniff: lwz r6,0(r3) /* Get that lock in here */
429 and. r0,r6,r4 /* See if any of the lock bits are on */
430 beq+ bittry /* Yeah, try for it again... */
431
432 mftb r6 /* Time stamp us now */
433 sub r6,r6,r8 /* Get the elapsed time */
434 cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */
435 blt+ bitsniff /* Not yet... */
436
437 mtmsr r9 /* Say, any interrupts pending? */
438
439 /* The following instructions force the pipeline to be interlocked to that only one
440 instruction is issued per cycle. The insures that we stay enabled for a long enough
441 time. If it's too short, pending interruptions will not have a chance to be taken
442 */
443
444 subi r5,r5,128 /* Back off elapsed time from timeout value */
445 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
446 mr. r5,r5 /* See if we used the whole timeout */
447 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
448
449 ble- bitfail /* We failed */
450 mtmsr r7 /* Disable for interruptions */
451 mftb r8 /* Get the low part of the time base */
452 b bitsniff /* Now that we've opened an enable window, keep trying... */
453
454 .align 5
455
456 bitgot: mtmsr r9 /* Enable for interruptions */
457 li r3,1 /* Set good return code */
458 isync /* Make sure we don't use a speculativily loaded value */
459 blr
460
461 bitfail: li r3,0 /* Set failure return code */
462 blr /* Return, head hanging low... */
463
464
465 /*
466 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
467 *
468 * Release bit based spin-lock. The second parameter is the bit mask to clear.
469 * Multiple bits may be cleared.
470 *
471 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
472 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
473 * RESTORE FROM THE STACK.
474 */
475
476 .align 5
477 .globl EXT(hw_unlock_bit)
478
479 LEXT(hw_unlock_bit)
480
481 sync
482 lwarx r0,0,r3 ; ?
483
484 ubittry: lwarx r0,0,r3 /* Grab the lock value */
485 andc r0,r0,r4 /* Clear the lock bits */
486 stwcx. r0,0,r3 /* Try to clear that there durn lock */
487 bne- ubittry /* Try again, couldn't save it... */
488
489 blr /* Leave... */
490
491 /*
492 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
493 * unsigned int newb, unsigned int timeout)
494 *
495 * Try to acquire spin-lock. The second parameter is the bit mask to check.
496 * The third is the value of those bits and the 4th is what to set them to.
497 * Return success (1) or failure (0).
498 * Attempt will fail after timeout ticks of the timebase.
499 * We try fairly hard to get this lock. We disable for interruptions, but
500 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
501 * After checking to see if the large timeout value (passed in) has expired and a
502 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
503 * we return either in abject failure, or disable and go back to the lock sniff routine.
504 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
505 *
506 */
507
508 .align 5
509
510 nop ; Force loop alignment to cache line
511 nop
512 nop
513 nop
514
515 .globl EXT(hw_lock_mbits)
516
517 LEXT(hw_lock_mbits)
518
519 mfmsr r9 ; Get the MSR value
520 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible
521
522 mtmsr r8 ; Turn off interruptions
523
524 mftb r10 ; Get the low part of the time base
525
526 lwarx r0,0,r3 ; ?
527
528 mbittry: lwarx r12,0,r3 ; Grab the lock value
529 and r0,r12,r4 ; Clear extra bits
530 or r12,r12,r6 ; Turn on the lock bits
531 cmplw r0,r5 ; Are these the right bits?
532 bne- mbitsniff ; Nope, wait for it to clear...
533 stwcx. r12,0,r3 ; Try to seize that there durn lock
534 beq+ mbitgot ; We got it, yahoo...
535 b mbittry ; Just start up again if the store failed...
536
537 .align 5
538
539 mbitsniff: lwz r12,0(r3) ; Get that lock in here
540 and r0,r12,r4 ; Clear extra bits
541 or r12,r12,r6 ; Turn on the lock bits
542 cmplw r0,r5 ; Are these the right bits?
543 beq+ mbittry ; Yeah, try for it again...
544
545 mftb r11 ; Time stamp us now
546 sub r11,r11,r10 ; Get the elapsed time
547 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
548 blt+ mbitsniff ; Not yet...
549
550 mtmsr r9 ; Say, any interrupts pending?
551
552 ; The following instructions force the pipeline to be interlocked to that only one
553 ; instruction is issued per cycle. The insures that we stay enabled for a long enough
554 ; time. If it is too short, pending interruptions will not have a chance to be taken
555
556 subi r7,r7,128 ; Back off elapsed time from timeout value
557 or r7,r7,r7 ; Do nothing here but force a single cycle delay
558 mr. r7,r7 ; See if we used the whole timeout
559 or r7,r7,r7 ; Do nothing here but force a single cycle delay
560
561 ble- mbitfail ; We failed
562 mtmsr r8 ; Disable for interruptions
563 mftb r10 ; Get the low part of the time base
564 b mbitsniff ; Now that we have opened an enable window, keep trying...
565
566 .align 5
567
568 mbitgot: mtmsr r9 ; Enable for interruptions
569 li r3,1 ; Set good return code
570 isync ; Make sure we do not use a speculativily loaded value
571 blr
572
573 mbitfail: li r3,0 ; Set failure return code
574 blr ; Return, head hanging low...
575
576
577 /*
578 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
579 *
580 * Spin until word hits 0 or timeout.
581 * Return success (1) or failure (0).
582 * Attempt will fail after timeout ticks of the timebase.
583 *
584 * The theory is that a processor will bump a counter as it signals
585 * other processors. Then it will spin untl the counter hits 0 (or
586 * times out). The other processors, as it receives the signal will
587 * decrement the counter.
588 *
589 * The other processors use interlocked update to decrement, this one
590 * does not need to interlock.
591 *
592 */
593
594 .align 5
595
596 .globl EXT(hw_cpu_sync)
597
598 LEXT(hw_cpu_sync)
599
600 mftb r10 ; Get the low part of the time base
601 mr r9,r3 ; Save the sync word address
602 li r3,1 ; Assume we work
603
604 csynctry: lwz r11,0(r9) ; Grab the sync value
605 mr. r11,r11 ; Counter hit 0?
606 beqlr- ; Yeah, we are sunk...
607 mftb r12 ; Time stamp us now
608
609 sub r12,r12,r10 ; Get the elapsed time
610 cmplw r4,r12 ; Have we gone too long?
611 bge+ csynctry ; Not yet...
612
613 li r3,0 ; Set failure...
614 blr ; Return, head hanging low...
615
616 /*
617 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
618 *
619 * Spin until word changes or timeout.
620 * Return success (1) or failure (0).
621 * Attempt will fail after timeout ticks of the timebase.
622 *
623 * This is used to insure that a processor passes a certain point.
624 * An example of use is to monitor the last interrupt time in the
625 * per_proc block. This can be used to insure that the other processor
626 * has seen at least one interrupt since a specific time.
627 *
628 */
629
630 .align 5
631
632 .globl EXT(hw_cpu_sync)
633
634 LEXT(hw_cpu_wcng)
635
636 mftb r10 ; Get the low part of the time base
637 mr r9,r3 ; Save the sync word address
638 li r3,1 ; Assume we work
639
640 wcngtry: lwz r11,0(r9) ; Grab the value
641 cmplw r11,r4 ; Do they still match?
642 bnelr- ; Nope, cool...
643 mftb r12 ; Time stamp us now
644
645 sub r12,r12,r10 ; Get the elapsed time
646 cmplw r5,r12 ; Have we gone too long?
647 bge+ wcngtry ; Not yet...
648
649 li r3,0 ; Set failure...
650 blr ; Return, head hanging low...
651
652
653 /*
654 * unsigned int hw_lock_try(hw_lock_t)
655 *
656 * try to acquire spin-lock. Return success (1) or failure (0)
657 * MACH_RT: returns with preemption disabled on success.
658 *
659 */
660 .align 5
661 .globl EXT(hw_lock_try)
662
663 LEXT(hw_lock_try)
664
665 #if 0
666 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
667 lis r5,0x9999 /* (TEST/DEBUG) */
668 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
669 sc /* (TEST/DEBUG) */
670 #endif
671 mfmsr r9 /* Save the MSR value */
672 li r4, 1 /* value to be stored... 1==taken */
673 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */
674
675 #if MACH_LDEBUG
676 lis r5, 0x10 /* roughly 1E6 */
677 mtctr r5
678 #endif /* MACH_LDEBUG */
679
680 mtmsr r7 /* Disable interruptions and thus, preemption */
681
682 lwarx r5,0,r3 ; ?
683
684 .L_lock_try_loop:
685
686 #if MACH_LDEBUG
687 bdnz+ 0f /* Count attempts */
688 mtmsr r9 /* Restore enablement */
689 BREAKPOINT_TRAP /* Get to debugger */
690 mtmsr r7 /* Disable interruptions and thus, preemption */
691 0:
692 #endif /* MACH_LDEBUG */
693
694 lwarx r5,0,r3 /* Ld from addr of arg and reserve */
695
696 cmpwi r5, 0 /* TEST... */
697 bne- .L_lock_try_failed /* branch if taken. Predict free */
698
699 stwcx. r4, 0,r3 /* And SET (if still reserved) */
700 mfsprg r6,0 /* Get the per_proc block */
701 bne- .L_lock_try_loop /* If set failed, loop back */
702
703 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
704 isync
705
706 #if MACH_RT
707 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
708 addi r5,r5,1 /* Bring up the disable count */
709 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
710
711 #endif /* MACH_RT */
712
713 mtmsr r9 /* Allow interruptions now */
714 li r3,1 /* Set that the lock was free */
715 blr
716
717 .L_lock_try_failed:
718 mtmsr r9 /* Allow interruptions now */
719 li r3,0 /* FAILURE - lock was taken */
720 blr
721
722 /*
723 * unsigned int hw_lock_held(hw_lock_t)
724 *
725 * Return 1 if lock is held
726 * MACH_RT: doesn't change preemption state.
727 * N.B. Racy, of course.
728 *
729 */
730 .align 5
731 .globl EXT(hw_lock_held)
732
733 LEXT(hw_lock_held)
734
735 #if 0
736 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
737 lis r5,0x8888 /* (TEST/DEBUG) */
738 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
739 sc /* (TEST/DEBUG) */
740 #endif
741 isync /* Make sure we don't use a speculativily fetched lock */
742 lwz r3, 0(r3) /* Return value of lock */
743 blr
744
745 /*
746 * unsigned int hw_compare_and_store(unsigned int old, unsigned int new, unsigned int *area)
747 *
748 * Compare old to area if equal, store new, and return true
749 * else return false and no store
750 * This is an atomic operation
751 *
752 */
753 .align 5
754 .globl EXT(hw_compare_and_store)
755
756 LEXT(hw_compare_and_store)
757
758 mr r6,r3 /* Save the old value */
759
760 lwarx r9,0,r5 ; ?
761
762 cstry: lwarx r9,0,r5 /* Grab the area value */
763 li r3,1 /* Assume it works */
764 cmplw cr0,r9,r6 /* Does it match the old value? */
765 bne- csfail /* No, it must have changed... */
766 stwcx. r4,0,r5 /* Try to save the new value */
767 bne- cstry /* Didn't get it, try again... */
768 isync /* Just hold up prefetch */
769 blr /* Return... */
770
771 csfail: li r3,0 /* Set failure */
772 blr /* Better luck next time... */
773
774
775 /*
776 * unsigned int hw_atomic_add(unsigned int *area, int *val)
777 *
778 * Atomically add the second parameter to the first.
779 * Returns the result.
780 *
781 */
782 .align 5
783 .globl EXT(hw_atomic_add)
784
785 LEXT(hw_atomic_add)
786
787 mr r6,r3 /* Save the area */
788
789 lwarx r3,0,r6 ; ?
790
791 addtry: lwarx r3,0,r6 /* Grab the area value */
792 add r3,r3,r4 /* Add the value */
793 stwcx. r3,0,r6 /* Try to save the new value */
794 bne- addtry /* Didn't get it, try again... */
795 blr /* Return... */
796
797
798 /*
799 * unsigned int hw_atomic_sub(unsigned int *area, int *val)
800 *
801 * Atomically subtract the second parameter from the first.
802 * Returns the result.
803 *
804 */
805 .align 5
806 .globl EXT(hw_atomic_sub)
807
808 LEXT(hw_atomic_sub)
809
810 mr r6,r3 /* Save the area */
811
812 lwarx r3,0,r6 ; ?
813
814 subtry: lwarx r3,0,r6 /* Grab the area value */
815 sub r3,r3,r4 /* Subtract the value */
816 stwcx. r3,0,r6 /* Try to save the new value */
817 bne- subtry /* Didn't get it, try again... */
818 blr /* Return... */
819
820
821 /*
822 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
823 *
824 * Atomically inserts the element at the head of the list
825 * anchor is the pointer to the first element
826 * element is the pointer to the element to insert
827 * disp is the displacement into the element to the chain pointer
828 *
829 */
830 .align 5
831 .globl EXT(hw_queue_atomic)
832
833 LEXT(hw_queue_atomic)
834
835 mr r7,r4 /* Make end point the same as start */
836 mr r8,r5 /* Copy the displacement also */
837 b hw_queue_comm /* Join common code... */
838
839 /*
840 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
841 *
842 * Atomically inserts the list of elements at the head of the list
843 * anchor is the pointer to the first element
844 * first is the pointer to the first element to insert
845 * last is the pointer to the last element to insert
846 * disp is the displacement into the element to the chain pointer
847 *
848 */
849 .align 5
850 .globl EXT(hw_queue_atomic_list)
851
852 LEXT(hw_queue_atomic_list)
853
854 mr r7,r5 /* Make end point the same as start */
855 mr r8,r6 /* Copy the displacement also */
856
857 hw_queue_comm:
858 lwarx r9,0,r3 ; ?
859
860 hw_queue_comm2:
861 lwarx r9,0,r3 /* Pick up the anchor */
862 stwx r9,r8,r7 /* Chain that to the end of the new stuff */
863 stwcx. r4,0,r3 /* Try to chain into the front */
864 bne- hw_queue_comm2 /* Didn't make it, try again... */
865
866 blr /* Return... */
867
868 /*
869 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
870 *
871 * Atomically removes the first element in a list and returns it.
872 * anchor is the pointer to the first element
873 * disp is the displacement into the element to the chain pointer
874 * Returns element if found, 0 if empty.
875 *
876 */
877 .align 5
878 .globl EXT(hw_dequeue_atomic)
879
880 LEXT(hw_dequeue_atomic)
881
882 mr r5,r3 /* Save the anchor */
883
884 hw_dequeue_comm:
885 lwarx r9,0,r3 ; ?
886
887 hw_dequeue_comm2:
888 lwarx r3,0,r5 /* Pick up the anchor */
889 mr. r3,r3 /* Is the list empty? */
890 beqlr- /* Leave it list empty... */
891 lwzx r9,r4,r3 /* Get the next in line */
892 stwcx. r9,0,r5 /* Try to chain into the front */
893 beqlr+ ; Got the thing, go away with it...
894 b hw_dequeue_comm2 ; Did not make it, try again...
895
896 /*
897 * void mutex_init(mutex_t* l, etap_event_t etap)
898 */
899
900 ENTRY(mutex_init,TAG_NO_FRAME_USED)
901
902 PROLOG(0)
903 li r10, 0
904 stw r10, MUTEX_ILK(r3) /* clear interlock */
905 stw r10, MUTEX_LOCKED(r3) /* clear locked flag */
906 sth r10, MUTEX_WAITERS(r3) /* init waiter count */
907
908 #if MACH_LDEBUG
909 stw r10, MUTEX_PC(r3) /* init caller pc */
910 stw r10, MUTEX_THREAD(r3) /* and owning thread */
911 li r10, MUTEX_TAG
912 stw r10, MUTEX_TYPE(r3) /* set lock type */
913 #endif /* MACH_LDEBUG */
914
915 #if ETAP_LOCK_TRACE
916 bl EXT(etap_mutex_init) /* init ETAP data */
917 #endif /* ETAP_LOCK_TRACE */
918
919 EPILOG
920 blr
921
922 /*
923 * void _mutex_lock(mutex_t*)
924 */
925
926 .align 5
927 .globl EXT(_mutex_lock)
928
929 LEXT(_mutex_lock)
930
931 #if CHECKNMI
932 mflr r12 ; (TEST/DEBUG)
933 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
934 mtlr r12 ; (TEST/DEBUG)
935 #endif
936
937 PROLOG(12)
938
939 #if ETAP_LOCK_TRACE
940 li r0, 0
941 stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */
942 stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */
943 stw r0,MISSED(r1) /* clear local miss marker */
944 #endif /* ETAP_LOCK_TRACE */
945
946 CHECK_SETUP(r12)
947 CHECK_MUTEX_TYPE()
948 CHECK_NO_SIMPLELOCKS()
949
950 .L_ml_retry:
951 #if 0
952 mfsprg r4,0 /* (TEST/DEBUG) */
953 lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */
954 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
955 lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
956 lis r5,0xAAAA /* (TEST/DEBUG) */
957 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
958 sc /* (TEST/DEBUG) */
959 #endif
960
961 bl lockDisa /* Go get a lock on the mutex's interlock lock */
962 mr. r4,r3 /* Did we get it? */
963 lwz r3,FM_ARG0(r1) /* Restore the lock address */
964 bne+ mlGotInt /* We got it just fine... */
965
966 lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message
967 ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message
968 bl EXT(panic) ; Call panic
969 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
970
971 .data
972 mutex_failed1:
973 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
974 .text
975
976 mlGotInt:
977
978 /* Note that there is no reason to do a load and reserve here. We already
979 hold the interlock lock and no one can touch this field unless they
980 have that, so, we're free to play */
981
982 lwz r4,MUTEX_LOCKED(r3) /* Get the mutex's lock field */
983
984 li r10,1 /* Set the lock value */
985
986 mr. r4,r4 /* So, can we have it? */
987 bne- mlInUse /* Nope, sombody's playing already... */
988
989 stw r10,MUTEX_LOCKED(r3) /* Take it unto ourselves */
990
991 #if MACH_LDEBUG
992 mfmsr r11
993 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
994 mtmsr r10
995 mfsprg r9,0 /* Get the per_proc block */
996 lwz r10,0(r1) /* Get previous save frame */
997 lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */
998 lwz r10,FM_LR_SAVE(r10) /* Get our caller's address */
999 lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */
1000 stw r10,MUTEX_PC(r3) /* Save our caller */
1001 mr. r8,r8 /* Is there any thread? */
1002 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1003 beq- .L_ml_no_active_thread /* No owning thread... */
1004 lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1005 addi r9,r9,1 /* Bump it up */
1006 stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */
1007 .L_ml_no_active_thread:
1008 mtmsr r11
1009 #endif /* MACH_LDEBUG */
1010
1011 li r10,0 /* Get the unlock value */
1012 sync /* Push it all out */
1013 stw r10,MUTEX_ILK(r3) /* free the interlock */
1014
1015 #if ETAP_LOCK_TRACE
1016 mflr r4
1017 lwz r5,SWT_HI(r1)
1018 lwz r6,SWT_LO(r1)
1019 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1020 #endif /* ETAP_LOCK_TRACE */
1021
1022 EPILOG /* Restore all saved registers */
1023
1024 #if MACH_RT
1025 b epStart /* Go enable preemption... */
1026 #else
1027 blr /* Return... */
1028 #endif
1029
1030 /*
1031 * We come to here when we have a resource conflict. In other words,
1032 * the mutex is held.
1033 */
1034
1035 mlInUse:
1036
1037 #if ETAP_LOCK_TRACE
1038 lwz r7,MISSED(r1)
1039 cmpwi r7,0 /* did we already take a wait timestamp ? */
1040 bne .L_ml_block /* yup. carry-on */
1041 bl EXT(etap_mutex_miss) /* get wait timestamp */
1042 stw r3,SWT_HI(r1) /* store timestamp */
1043 stw r4,SWT_LO(r1)
1044 li r7, 1 /* mark wait timestamp as taken */
1045 stw r7,MISSED(r1)
1046 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1047 .L_ml_block:
1048 #endif /* ETAP_LOCK_TRACE */
1049
1050 CHECK_SETUP(r12)
1051 CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */
1052
1053
1054 /* Note that we come in here with the interlock set. The wait routine
1055 * will unlock it before waiting.
1056 */
1057 bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */
1058
1059 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1060 b .L_ml_retry /* and try again... */
1061
1062
1063 /*
1064 * void _mutex_try(mutex_t*)
1065 *
1066 */
1067
1068 .align 5
1069 .globl EXT(_mutex_try)
1070
1071 LEXT(_mutex_try)
1072
1073 PROLOG(8) /* reserve space for SWT_HI and SWT_LO */
1074
1075 #if ETAP_LOCK_TRACE
1076 li r5, 0
1077 stw r5, STW_HI(r1) /* set wait time to 0 (HI) */
1078 stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */
1079 #endif /* ETAP_LOCK_TRACE */
1080
1081 #if 0
1082 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1083 lis r5,0xBBBB /* (TEST/DEBUG) */
1084 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1085 sc /* (TEST/DEBUG) */
1086 #endif
1087 CHECK_SETUP(r12)
1088 CHECK_MUTEX_TYPE()
1089 CHECK_NO_SIMPLELOCKS()
1090
1091 lwz r6,MUTEX_LOCKED(r3) /* Quick check */
1092 mr. r6,r6 /* to see if someone has this lock already */
1093 bne- mtFail /* Someone's got it already... */
1094
1095 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1096 mr. r4,r3 /* Did we get it? */
1097 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1098 bne+ mtGotInt /* We got it just fine... */
1099
1100 lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message
1101 ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message
1102 bl EXT(panic) ; Call panic
1103 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1104
1105 .data
1106 mutex_failed2:
1107 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1108 .text
1109
1110 mtGotInt:
1111
1112 /* Note that there is no reason to do a load and reserve here. We already
1113 hold the interlock and no one can touch at this field unless they
1114 have that, so, we're free to play */
1115
1116 lwz r4,MUTEX_LOCKED(r3) /* Get the mutex's lock field */
1117
1118 li r10,1 /* Set the lock value */
1119
1120 mr. r4,r4 /* So, can we have it? */
1121 bne- mtInUse /* Nope, sombody's playing already... */
1122
1123 stw r10,MUTEX_LOCKED(r3) /* Take it unto ourselves */
1124
1125 #if MACH_LDEBUG
1126 mfmsr r11
1127 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1128 mtmsr r10
1129 mfsprg r9,0 /* Get the per_proc block */
1130 lwz r10,0(r1) /* Get previous save frame */
1131 lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */
1132 lwz r10,FM_LR_SAVE(r10) /* Get our caller's address */
1133 lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */
1134 stw r10,MUTEX_PC(r3) /* Save our caller */
1135 mr. r8,r8 /* Is there any thread? */
1136 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1137 beq- .L_mt_no_active_thread /* No owning thread... */
1138 lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1139 addi r9, r9, 1 /* Bump it up */
1140 stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */
1141 .L_mt_no_active_thread:
1142 mtmsr r11
1143 #endif /* MACH_LDEBUG */
1144
1145 li r10,0 /* Get the unlock value */
1146 sync /* Push it all out */
1147 stw r10,MUTEX_ILK(r3) /* free the interlock */
1148
1149 #if ETAP_LOCK_TRACE
1150 lwz r4,0(r1) /* Back chain the stack */
1151 lwz r5,SWT_HI(r1)
1152 lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */
1153 lwz r6,SWT_LO(r1)
1154 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1155 #endif /* ETAP_LOCK_TRACE */
1156
1157 #if MACH_RT
1158 bl epStart /* Go enable preemption... */
1159 #endif
1160 li r3, 1
1161 EPILOG /* Restore all saved registers */
1162 blr /* Return... */
1163
1164 /*
1165 * We come to here when we have a resource conflict. In other words,
1166 * the mutex is held.
1167 */
1168
1169 mtInUse: li r10,0 /* Get the unlock value */
1170 sync /* Push it all out */
1171 stw r10,MUTEX_ILK(r3) /* free the interlock */
1172 #if MACH_RT
1173 bl epStart /* Go enable preemption... */
1174 #endif
1175
1176 mtFail: li r3,0 /* Set failure code */
1177 EPILOG /* Restore all saved registers */
1178 blr /* Return... */
1179
1180
1181 /*
1182 * void mutex_unlock(mutex_t* l)
1183 */
1184
1185 .align 5
1186 .globl EXT(mutex_unlock)
1187
1188 LEXT(mutex_unlock)
1189
1190 PROLOG(0)
1191
1192 #if ETAP_LOCK_TRACE
1193 bl EXT(etap_mutex_unlock) /* collect ETAP data */
1194 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1195 #endif /* ETAP_LOCK_TRACE */
1196
1197 CHECK_SETUP(r12)
1198 CHECK_MUTEX_TYPE()
1199 CHECK_THREAD(MUTEX_THREAD)
1200
1201 #if 0
1202 mfsprg r4,0 /* (TEST/DEBUG) */
1203 lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */
1204 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1205 lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1206 lis r5,0xCCCC /* (TEST/DEBUG) */
1207 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1208 sc /* (TEST/DEBUG) */
1209 #endif
1210 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1211 mr. r4,r3 /* Did we get it? */
1212 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1213 bne+ muGotInt /* We got it just fine... */
1214
1215 lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message
1216 ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message
1217 bl EXT(panic) ; Call panic
1218 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1219
1220 .data
1221 mutex_failed3:
1222 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1223 .text
1224
1225
1226 muGotInt:
1227 lhz r10,MUTEX_WAITERS(r3) /* are there any waiters ? */
1228 cmpwi r10,0
1229 beq+ muUnlock /* Nope, we're done... */
1230
1231 bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */
1232 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1233 li r10,0 /* Get unlock value */
1234
1235 muUnlock:
1236 #if MACH_LDEBUG
1237 mfmsr r11
1238 rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1239 mtmsr r9
1240 mfsprg r9,0
1241 lwz r9,PP_CPU_DATA(r9)
1242 lwz r9,CPU_ACTIVE_THREAD(r9)
1243 stw r10,MUTEX_THREAD(r3) /* disown thread */
1244 cmpwi r9,0
1245 beq- .L_mu_no_active_thread
1246 lwz r8,THREAD_MUTEX_COUNT(r9)
1247 subi r8,r8,1
1248 stw r8,THREAD_MUTEX_COUNT(r9)
1249 .L_mu_no_active_thread:
1250 mtmsr r11
1251 #endif /* MACH_LDEBUG */
1252
1253 stw r10,MUTEX_LOCKED(r3) /* release the mutex */
1254 sync /* Make sure it's all there before we release */
1255 stw r10,MUTEX_ILK(r3) /* unlock the interlock */
1256
1257 EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */
1258 #if MACH_RT
1259 b epStart /* Go enable preemption... */
1260 #else
1261 blr /* Return... */
1262 #endif
1263
1264 /*
1265 * void interlock_unlock(hw_lock_t lock)
1266 */
1267
1268 .align 5
1269 .globl EXT(interlock_unlock)
1270
1271 LEXT(interlock_unlock)
1272
1273 #if 0
1274 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1275 lis r5,0xDDDD /* (TEST/DEBUG) */
1276 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1277 sc /* (TEST/DEBUG) */
1278 #endif
1279 li r10,0
1280 sync
1281 stw r10,0(r3)
1282
1283 #if MACH_RT
1284 b epStart /* Go enable preemption... */
1285 #else
1286 blr /* Return... */
1287 #endif
1288
1289 #if MACH_RT
1290 /*
1291 * Here is where we enable preemption. We need to be protected
1292 * against ourselves, we can't chance getting interrupted and modifying
1293 * our processor wide preemption count after we'sve loaded it up. So,
1294 * we need to disable all 'rupts. Actually, we could use a compare
1295 * and swap to do this, but, since there are no MP considerations
1296 * (we are dealing with a CPU local field) it is much, much faster
1297 * to disable.
1298 *
1299 * Note that if we are not genned MP, the calls here will be no-opped via
1300 * a #define and since the _mp forms are the same, likewise a #define
1301 * will be used to route to the other forms
1302 */
1303
1304 /* This version does not check if we get preempted or not */
1305
1306
1307 .align 4
1308 .globl EXT(_enable_preemption_no_check)
1309
1310 LEXT(_enable_preemption_no_check)
1311 cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */
1312 b epCommn /* Join up with the other enable code... */
1313
1314
1315 /* This version checks if we get preempted or not */
1316
1317 .align 5
1318 .globl EXT(_enable_preemption)
1319
1320 LEXT(_enable_preemption)
1321
1322 epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */
1323
1324 /*
1325 * Common enable preemption code
1326 */
1327
1328 epCommn: mfmsr r9 /* Save the old MSR */
1329 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1330 mtmsr r8 /* Interrupts off */
1331
1332 mfsprg r3,0 /* Get the per_proc block */
1333 lwz r6,PP_CPU_DATA(r3) /* Get the pointer to the CPU data from per proc */
1334 li r8,-1 /* Get a decrimenter */
1335 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1336 add. r5,r5,r8 /* Bring down the disable count */
1337 #if 0
1338 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1339 mr. r4,r4 ; (TEST/DEBUG)
1340 beq- epskptrc0 ; (TEST/DEBUG)
1341 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1342 lis r4,0xBBBB ; (TEST/DEBUG)
1343 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1344 sc ; (TEST/DEBUG)
1345 epskptrc0: mr. r5,r5 ; (TEST/DEBUG)
1346 #endif
1347 #if MACH_LDEBUG
1348 blt- epTooFar /* Yeah, we did... */
1349 #endif /* MACH_LDEBUG */
1350 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
1351
1352 beq+ epCheckPreempt /* Go check if we need to be preempted... */
1353
1354 epNoCheck: mtmsr r9 /* Restore the interrupt level */
1355 blr /* Leave... */
1356
1357 #if MACH_LDEBUG
1358 epTooFar:
1359 lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */
1360 lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */
1361 ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */
1362 ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */
1363 mtlr r6 /* Get the address of the panic routine */
1364 mtmsr r9 /* Restore interruptions */
1365 blrl /* Panic... */
1366
1367 .data
1368 epTooFarStr:
1369 STRINGD "_enable_preemption: preemption_level <= 0!\000"
1370 .text
1371 #endif /* MACH_LDEBUG */
1372
1373 .align 5
1374
1375 epCheckPreempt:
1376 lwz r7,PP_NEED_AST(r3) /* Get the AST request address */
1377 li r5,AST_URGENT /* Get the requests we do honor */
1378 lwz r7,0(r7) /* Get the actual, real live, extra special AST word */
1379 lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */
1380 and. r7,r7,r5 ; Should we preempt?
1381 ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */
1382 beq+ epCPno ; No preemption here...
1383
1384 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1385
1386 epCPno: mtmsr r9 /* Allow interrupts if we can */
1387 beqlr+ ; We probably will not preempt...
1388 sc /* Do the preemption */
1389 blr /* Now, go away now... */
1390
1391 /*
1392 * Here is where we disable preemption. Since preemption is on a
1393 * per processor basis (a thread runs on one CPU at a time) we don't
1394 * need any cross-processor synchronization. We do, however, need to
1395 * be interrupt safe, so we don't preempt while in the process of
1396 * disabling it. We could use SPLs, but since we always want complete
1397 * disablement, and this is platform specific code, we'll just kick the
1398 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1399 */
1400
1401 .align 5
1402
1403 nop ; Use these 5 nops to force daPreComm
1404 nop ; to a line boundary.
1405 nop
1406 nop
1407 nop
1408
1409 .globl EXT(_disable_preemption)
1410
1411 LEXT(_disable_preemption)
1412
1413 daPreAll: mfmsr r9 /* Save the old MSR */
1414 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1415 mtmsr r8 /* Interrupts off */
1416
1417 daPreComm: mfsprg r6,0 /* Get the per_proc block */
1418 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1419 lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1420 addi r5,r5,1 /* Bring up the disable count */
1421 stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */
1422 #if 0
1423 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1424 mr. r4,r4 ; (TEST/DEBUG)
1425 beq- epskptrc1 ; (TEST/DEBUG)
1426 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1427 lis r4,0xAAAA ; (TEST/DEBUG)
1428 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1429 sc ; (TEST/DEBUG)
1430 epskptrc1: ; (TEST/DEBUG)
1431 #endif
1432
1433 ;
1434 ; Set PREEMPTSTACK above to enable a preemption traceback stack.
1435 ;
1436 ; NOTE: make sure that PREEMPTSTACK in aligned_data is
1437 ; set the same as it is here. This is the number of
1438 ; traceback entries we can handle per processor
1439 ;
1440 ; A value of 0 disables the stack.
1441 ;
1442 #if PREEMPTSTACK
1443 cmplwi r5,PREEMPTSTACK ; Maximum depth
1444 lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread
1445 bgt- nopredeb ; Too many to stack...
1446 mr. r6,r6 ; During boot?
1447 beq- nopredeb ; Yes, do not do backtrace...
1448 lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation
1449 lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used
1450 mr. r0,r6 ; Any saved context?
1451 beq- nosaveds ; No...
1452 lwz r0,saver1(r6) ; Get end of savearea chain
1453
1454 nosaveds: li r11,0 ; Clear callers callers callers return
1455 li r10,0 ; Clear callers callers callers callers return
1456 li r8,0 ; Clear callers callers callers callers callers return
1457 lwz r2,0(r1) ; Get callers callers stack frame
1458 lwz r12,8(r2) ; Get our callers return
1459 lwz r4,0(r2) ; Back chain
1460
1461 xor r2,r4,r2 ; Form difference
1462 cmplwi r2,8192 ; Within a couple of pages?
1463 mr r2,r4 ; Move register
1464 bge- nosaveher2 ; No, no back chain then...
1465 lwz r11,8(r2) ; Get our callers return
1466 lwz r4,0(r2) ; Back chain
1467
1468 xor r2,r4,r2 ; Form difference
1469 cmplwi r2,8192 ; Within a couple of pages?
1470 mr r2,r4 ; Move register
1471 bge- nosaveher2 ; No, no back chain then...
1472 lwz r10,8(r2) ; Get our callers return
1473 lwz r4,0(r2) ; Back chain
1474
1475 xor r2,r4,r2 ; Form difference
1476 cmplwi r2,8192 ; Within a couple of pages?
1477 mr r2,r4 ; Move register
1478 bge- nosaveher2 ; No, no back chain then...
1479 lwz r8,8(r2) ; Get our callers return
1480
1481 nosaveher2:
1482 addi r5,r5,-1 ; Get index to slot
1483 mfspr r6,pir ; Get our processor
1484 mflr r4 ; Get our return
1485 rlwinm r6,r6,8,0,23 ; Index to processor slot
1486 lis r2,hi16(EXT(DBGpreempt)) ; Stack high order
1487 rlwinm r5,r5,4,0,27 ; Index to stack slot
1488 ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order
1489 add r2,r2,r5 ; Point to slot
1490 add r2,r2,r6 ; Move to processor
1491 stw r4,0(r2) ; Save our return
1492 stw r11,4(r2) ; Save callers caller
1493 stw r10,8(r2) ; Save callers callers caller
1494 stw r8,12(r2) ; Save callers callers callers caller
1495 nopredeb:
1496 #endif
1497 mtmsr r9 /* Allow interruptions now */
1498
1499 blr /* Return... */
1500
1501 /*
1502 * Return the active thread for both inside and outside osfmk consumption
1503 */
1504
1505 .align 5
1506 .globl EXT(current_thread)
1507
1508 LEXT(current_thread)
1509
1510 mfmsr r9 /* Save the old MSR */
1511 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1512 mtmsr r8 /* Interrupts off */
1513 mfsprg r6,0 /* Get the per_proc */
1514 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1515 lwz r3,CPU_ACTIVE_THREAD(r6) /* Get the active thread */
1516 mtmsr r9 /* Restore interruptions to entry */
1517 blr /* Return... */
1518
1519
1520 /*
1521 * Return the current preemption level
1522 */
1523
1524 .align 5
1525 .globl EXT(get_preemption_level)
1526
1527 LEXT(get_preemption_level)
1528
1529 mfmsr r9 /* Save the old MSR */
1530 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1531 mtmsr r8 /* Interrupts off */
1532 mfsprg r6,0 /* Get the per_proc */
1533 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1534 lwz r3,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */
1535 mtmsr r9 /* Restore interruptions to entry */
1536 blr /* Return... */
1537
1538
1539 /*
1540 * Return the simple lock count
1541 */
1542
1543 .align 5
1544 .globl EXT(get_simple_lock_count)
1545
1546 LEXT(get_simple_lock_count)
1547
1548 mfmsr r9 /* Save the old MSR */
1549 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1550 mtmsr r8 /* Interrupts off */
1551 mfsprg r6,0 /* Get the per_proc */
1552 lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */
1553 lwz r3,CPU_SIMPLE_LOCK_COUNT(r6) /* Get the simple lock count */
1554 mtmsr r9 /* Restore interruptions to entry */
1555 blr /* Return... */
1556
1557 #endif /* MACH_RT */