]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/hw_lock.s
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26#include <cpus.h>
27#include <mach_assert.h>
28#include <mach_ldebug.h>
29#include <mach_rt.h>
30#include <ppc/asm.h>
31#include <ppc/proc_reg.h>
32#include <assym.s>
33
34#define STRING ascii
35
36#define SWT_HI 0+FM_SIZE
37#define SWT_LO 4+FM_SIZE
38#define MISSED 8+FM_SIZE
39
40#define ILK_LOCKED 0x01
41#define WAIT_FLAG 0x02
42#define TH_FN_OWNED 0x01
43
44#define CHECKNMI 0
45#define CHECKLOCKS 1
46
47#define PROLOG(space) \
48 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
49 mflr r0 __ASMNL__ \
50 stw r3,FM_ARG0(r1) __ASMNL__ \
51 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
52
53#define EPILOG \
54 lwz r1,0(r1) __ASMNL__ \
55 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
56 mtlr r0 __ASMNL__
57
58#if MACH_LDEBUG && CHECKLOCKS
59/*
60 * Routines for general lock debugging.
61 */
62
63/*
64 * Gets lock check flags in CR6: CR bits 24-27
65 */
66
67#define CHECK_SETUP(rg) \
68 lbz rg,dgFlags(0) __ASMNL__ \
69 mtcrf 2,rg __ASMNL__
70
71
72/*
73 * Checks for expected lock types and calls "panic" on
74 * mismatch. Detects calls to Mutex functions with
75 * type simplelock and vice versa.
76 */
77#define CHECK_MUTEX_TYPE() \
78 bt 24+disLktypeb,1f __ASMNL__ \
79 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
80 cmpwi r10,MUTEX_TAG __ASMNL__ \
81 beq+ 1f __ASMNL__ \
82 lis r3,hi16(not_a_mutex) __ASMNL__ \
83 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
84 bl EXT(panic) __ASMNL__ \
85 lwz r3,FM_ARG0(r1) __ASMNL__ \
861:
87
88 .data
89not_a_mutex:
90 STRINGD "not a mutex!\n\000"
91 .text
92
93#define CHECK_SIMPLE_LOCK_TYPE() \
94 bt 24+disLktypeb,1f __ASMNL__ \
95 lhz r10,SLOCK_TYPE(r3) __ASMNL__ \
96 cmpwi r10,USLOCK_TAG __ASMNL__ \
97 beq+ 1f __ASMNL__ \
98 lis r3,hi16(not_a_slock) __ASMNL__ \
99 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
100 bl EXT(panic) __ASMNL__ \
101 lwz r3,FM_ARG0(r1) __ASMNL__ \
1021:
103
104 .data
105not_a_slock:
106 STRINGD "not a simple lock!\n\000"
107 .text
108
109#define CHECK_NO_SIMPLELOCKS() \
110 bt 24+disLkNmSimpb,2f __ASMNL__ \
111 lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \
112 ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \
113 mfmsr r11 __ASMNL__ \
114 andc r11,r11,r10 __ASMNL__ \
115 ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \
116 andc r10,r11,r10 __ASMNL__ \
117 mtmsr r10 __ASMNL__ \
118 isync __ASMNL__ \
119 mfsprg r10,0 __ASMNL__ \
120 lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \
121 cmpwi r10,0 __ASMNL__ \
122 beq+ 1f __ASMNL__ \
123 lis r3,hi16(simple_locks_held) __ASMNL__ \
124 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
125 bl EXT(panic) __ASMNL__ \
126 lwz r3,FM_ARG0(r1) __ASMNL__ \
1271: __ASMNL__ \
128 mtmsr r11 __ASMNL__ \
1292:
130
131 .data
132simple_locks_held:
133 STRINGD "simple locks held!\n\000"
134 .text
135
136/*
137 * Verifies return to the correct thread in "unlock" situations.
138 */
139#define CHECK_THREAD(thread_offset) \
140 bt 24+disLkThreadb,2f __ASMNL__ \
141 lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \
142 ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \
143 mfmsr r11 __ASMNL__ \
144 andc r11,r11,r10 __ASMNL__ \
145 ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \
146 andc r10,r11,r10 __ASMNL__ \
147 mtmsr r10 __ASMNL__ \
148 isync __ASMNL__ \
149 mfsprg r10,1 __ASMNL__ \
150 lwz r10,ACT_THREAD(r10) __ASMNL__ \
151 cmpwi r10,0 __ASMNL__ \
152 beq- 1f __ASMNL__ \
153 lwz r9,thread_offset(r3) __ASMNL__ \
154 cmpw r9,r10 __ASMNL__ \
155 beq+ 1f __ASMNL__ \
156 lis r3,hi16(wrong_thread) __ASMNL__ \
157 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
158 bl EXT(panic) __ASMNL__ \
159 lwz r3,FM_ARG0(r1) __ASMNL__ \
1601: __ASMNL__ \
161 mtmsr r11 __ASMNL__ \
1622:
163 .data
164wrong_thread:
165 STRINGD "wrong thread!\n\000"
166 .text
167
168#define CHECK_MYLOCK(thread_offset) \
169 bt 24+disLkMyLckb,2f __ASMNL__ \
170 lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \
171 ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \
172 mfmsr r11 __ASMNL__ \
173 andc r11,r11,r10 __ASMNL__ \
174 ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \
175 andc r10,r11,r10 __ASMNL__ \
176 mtmsr r10 __ASMNL__ \
177 isync __ASMNL__ \
178 mfsprg r10,1 __ASMNL__ \
179 lwz r10,ACT_THREAD(r10) __ASMNL__ \
180 cmpwi r10,0 __ASMNL__ \
181 beq- 1f __ASMNL__ \
182 lwz r9, thread_offset(r3) __ASMNL__ \
183 cmpw r9,r10 __ASMNL__ \
184 bne+ 1f __ASMNL__ \
185 lis r3, hi16(mylock_attempt) __ASMNL__ \
186 ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
187 bl EXT(panic) __ASMNL__ \
188 lwz r3,FM_ARG0(r1) __ASMNL__ \
1891: __ASMNL__ \
190 mtmsr r11 __ASMNL__ \
1912:
192
193 .data
194mylock_attempt:
195 STRINGD "mylock attempt!\n\000"
196 .text
197
198#else /* MACH_LDEBUG */
199
200#define CHECK_SETUP(rg)
201#define CHECK_MUTEX_TYPE()
202#define CHECK_SIMPLE_LOCK_TYPE()
203#define CHECK_THREAD(thread_offset)
204#define CHECK_NO_SIMPLELOCKS()
205#define CHECK_MYLOCK(thread_offset)
206
207#endif /* MACH_LDEBUG */
208
209/*
210 * void hw_lock_init(hw_lock_t)
211 *
212 * Initialize a hardware lock.
213 */
214 .align 5
215 .globl EXT(hw_lock_init)
216
217LEXT(hw_lock_init)
218
219 li r0, 0 ; set lock to free == 0
220 stw r0, 0(r3) ; Initialize the lock
221 blr
222
223/*
224 * void hw_lock_unlock(hw_lock_t)
225 *
226 * Unconditionally release lock.
227 * Release preemption level.
228 */
229 .align 5
230 .globl EXT(hw_lock_unlock)
231
232LEXT(hw_lock_unlock)
233
234 .globl EXT(hwulckPatch_isync)
235LEXT(hwulckPatch_isync)
236 isync
237 .globl EXT(hwulckPatch_eieio)
238LEXT(hwulckPatch_eieio)
239 eieio
240 li r0, 0 ; set lock to free
241 stw r0, 0(r3)
242
243 b epStart ; Go enable preemption...
244
245/*
246 * void hw_lock_lock(hw_lock_t)
247 *
248 * Acquire lock, spinning until it becomes available.
249 * Return with preemption disabled.
250 * We will just set a default timeout and jump into the NORMAL timeout lock.
251 */
252 .align 5
253 .globl EXT(hw_lock_lock)
254
255LEXT(hw_lock_lock)
256lockDisa:
257 li r4,0 ; no timeout value
258 b lckcomm ; Join on up...
259
260/*
261 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
262 *
263 * Try to acquire spin-lock. Return success (1) or failure (0).
264 * Attempt will fail after timeout ticks of the timebase.
265 * We try fairly hard to get this lock. We disable for interruptions, but
266 * reenable after a "short" timeout (128 ticks, we may want to change this).
267 * After checking to see if the large timeout value (passed in) has expired and a
268 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
269 * we return either in abject failure, or disable and go back to the lock sniff routine.
270 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
271 */
272 .align 5
273 .globl EXT(hw_lock_to)
274
275LEXT(hw_lock_to)
276
277#if CHECKNMI
278 mflr r12 ; (TEST/DEBUG)
279 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
280 mtlr r12 ; (TEST/DEBUG)
281#endif
282
283lckcomm:
284 mfsprg r6,1 ; Get the current activation
285 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
286 addi r5,r5,1 ; Bring up the disable count
287 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
288 mr r5,r3 ; Get the address of the lock
289 li r8,0 ; Set r8 to zero
290
291lcktry: lwarx r6,0,r5 ; Grab the lock value
292 andi. r3,r6,ILK_LOCKED ; Is it locked?
293 ori r6,r6,ILK_LOCKED ; Set interlock
294 bne-- lckspin ; Yeah, wait for it to clear...
295 stwcx. r6,0,r5 ; Try to seize that there durn lock
296 bne-- lcktry ; Couldn't get it...
297 li r3,1 ; return true
298 isync ; Make sure we don't use a speculativily loaded value
299 blr ; Go on home...
300
301lckspin: li r6,lgKillResv ; Get killing field
302 stwcx. r6,0,r6 ; Kill reservation
303
304 mr. r4,r4 ; Test timeout value
305 bne++ lockspin0
306 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
307 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
308 lwz r4,0(r4) ; Get the timeout value
309lockspin0:
310 mr. r8,r8 ; Is r8 set to zero
311 bne++ lockspin1 ; If yes, first spin attempt
312 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
313 mfmsr r9 ; Get the MSR value
314 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
315 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
316 andc r9,r9,r0 ; Clear FP and VEC
317 andc r7,r9,r7 ; Clear EE as well
318 mtmsr r7 ; Turn off interruptions
319 isync ; May have turned off vec and fp here
320 mftb r8 ; Get timestamp on entry
321 b lcksniff
322
323lockspin1: mtmsr r7 ; Turn off interruptions
324 mftb r8 ; Get timestamp on entry
325
326lcksniff: lwz r3,0(r5) ; Get that lock in here
327 andi. r3,r3,ILK_LOCKED ; Is it free yet?
328 beq++ lckretry ; Yeah, try for it again...
329
330 mftb r10 ; Time stamp us now
331 sub r10,r10,r8 ; Get the elapsed time
332 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
333 blt++ lcksniff ; Not yet...
334
335 mtmsr r9 ; Say, any interrupts pending?
336
337; The following instructions force the pipeline to be interlocked to that only one
338; instruction is issued per cycle. The insures that we stay enabled for a long enough
339; time; if it's too short, pending interruptions will not have a chance to be taken
340
341 subi r4,r4,128 ; Back off elapsed time from timeout value
342 or r4,r4,r4 ; Do nothing here but force a single cycle delay
343 mr. r4,r4 ; See if we used the whole timeout
344 li r3,0 ; Assume a timeout return code
345 or r4,r4,r4 ; Do nothing here but force a single cycle delay
346
347 ble-- lckfail ; We failed
348 b lockspin1 ; Now that we've opened an enable window, keep trying...
349lckretry:
350 mtmsr r9 ; Restore interrupt state
351 li r8,1 ; Insure that R8 is not 0
352 b lcktry
353lckfail: ; We couldn't get the lock
354 li r3,0 ; Set failure return code
355 blr ; Return, head hanging low...
356
357
358/*
359 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
360 *
361 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
362 * multiple bits may be set. Return success (1) or failure (0).
363 * Attempt will fail after timeout ticks of the timebase.
364 * We try fairly hard to get this lock. We disable for interruptions, but
365 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
366 * After checking to see if the large timeout value (passed in) has expired and a
367 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
368 * we return either in abject failure, or disable and go back to the lock sniff routine.
369 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
370 */
371 .align 5
372 .globl EXT(hw_lock_bit)
373
374LEXT(hw_lock_bit)
375
376 li r10,0
377
378bittry: lwarx r6,0,r3 ; Grab the lock value
379 and. r0,r6,r4 ; See if any of the lock bits are on
380 or r6,r6,r4 ; Turn on the lock bits
381 bne-- bitspin ; Yeah, wait for it to clear...
382 stwcx. r6,0,r3 ; Try to seize that there durn lock
383 bne-- bittry ; Just start up again if the store failed...
384
385 li r3,1 ; Set good return code
386 isync ; Make sure we don't use a speculativily loaded value
387 blr
388
389 .align 5
390
391bitspin: li r11,lgKillResv ; Get killing field
392 stwcx. r11,0,r11 ; Kill reservation
393
394 mr. r10,r10 ; Is r8 set to zero
395 li r10,1 ; Close gate
396 beq-- bit1sttime ; If yes, first spin attempt
397
398bitspin0: mtmsr r7 ; Turn off interruptions
399 mftb r8 ; Get the low part of the time base
400
401bitsniff: lwz r6,0(r3) ; Get that lock in here
402 and. r0,r6,r4 ; See if any of the lock bits are on
403 beq++ bitretry ; Yeah, try for it again...
404
405 mftb r6 ; Time stamp us now
406 sub r6,r6,r8 ; Get the elapsed time
407 cmplwi r6,128 ; Have we been spinning for 128 tb ticks?
408 blt++ bitsniff ; Not yet...
409
410 mtmsr r9 ; Say, any interrupts pending?
411
412; The following instructions force the pipeline to be interlocked to that only one
413; instruction is issued per cycle. The insures that we stay enabled for a long enough
414; time. If it's too short, pending interruptions will not have a chance to be taken
415
416 subi r5,r5,128 ; Back off elapsed time from timeout value
417 or r5,r5,r5 ; Do nothing here but force a single cycle delay
418 mr. r5,r5 ; See if we used the whole timeout
419 or r5,r5,r5 ; Do nothing here but force a single cycle delay
420
421 bgt++ bitspin0 ; Now that we've opened an enable window, keep trying...
422
423 li r3,0 ; Set failure return code
424 blr ; Return, head hanging low...
425
426bitretry: mtmsr r9 ; Enable for interruptions
427 b bittry
428
429bit1sttime: lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
430 mfmsr r9 ; Get the MSR value
431 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
432 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
433 andc r9,r9,r0 ; Clear FP and VEC
434 andc r7,r9,r7 ; Clear EE as well
435 mtmsr r7 ; Turn off interruptions
436 isync ; May have turned off vec and fp here
437 mftb r8 ; Get the low part of the time base
438 b bitsniff
439
440 .align 5
441
442
443/*
444 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
445 *
446 * Release bit based spin-lock. The second parameter is the bit mask to clear.
447 * Multiple bits may be cleared.
448 *
449 */
450 .align 5
451 .globl EXT(hw_unlock_bit)
452
453LEXT(hw_unlock_bit)
454
455 .globl EXT(hwulckbPatch_isync)
456LEXT(hwulckbPatch_isync)
457 isync
458 .globl EXT(hwulckbPatch_eieio)
459LEXT(hwulckbPatch_eieio)
460 eieio
461ubittry: lwarx r0,0,r3 ; Grab the lock value
462 andc r0,r0,r4 ; Clear the lock bits
463 stwcx. r0,0,r3 ; Try to clear that there durn lock
464 bne- ubittry ; Try again, couldn't save it...
465
466 blr ; Leave...
467
468/*
469 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
470 * unsigned int newb, unsigned int timeout)
471 *
472 * Try to acquire spin-lock. The second parameter is the bit mask to check.
473 * The third is the value of those bits and the 4th is what to set them to.
474 * Return success (1) or failure (0).
475 * Attempt will fail after timeout ticks of the timebase.
476 * We try fairly hard to get this lock. We disable for interruptions, but
477 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
478 * After checking to see if the large timeout value (passed in) has expired and a
479 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
480 * we return either in abject failure, or disable and go back to the lock sniff routine.
481 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
482 */
483 .align 5
484 .globl EXT(hw_lock_mbits)
485
486LEXT(hw_lock_mbits)
487
488 li r10,0
489
490mbittry: lwarx r12,0,r3 ; Grab the lock value
491 and r0,r12,r4 ; Clear extra bits
492 andc r12,r12,r4 ; Clear all bits in the bit mask
493 or r12,r12,r6 ; Turn on the lock bits
494 cmplw r0,r5 ; Are these the right bits?
495 bne-- mbitspin ; Nope, wait for it to clear...
496 stwcx. r12,0,r3 ; Try to seize that there durn lock
497 beq++ mbitgot ; We got it, yahoo...
498 b mbittry ; Just start up again if the store failed...
499
500 .align 5
501mbitspin: li r11,lgKillResv ; Point to killing field
502 stwcx. r11,0,r11 ; Kill it
503
504 mr. r10,r10 ; Is r10 set to zero
505 bne++ mbitspin0 ; If yes, first spin attempt
506 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
507 mfmsr r9 ; Get the MSR value
508 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
509 ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
510 andc r9,r9,r0 ; Clear FP and VEC
511 andc r8,r9,r8 ; Clear EE as well
512 mtmsr r8 ; Turn off interruptions
513 isync ; May have turned off vectors or float here
514 mftb r10 ; Get the low part of the time base
515 b mbitsniff
516mbitspin0:
517 mtmsr r8 ; Turn off interruptions
518 mftb r10 ; Get the low part of the time base
519mbitsniff:
520 lwz r12,0(r3) ; Get that lock in here
521 and r0,r12,r4 ; Clear extra bits
522 cmplw r0,r5 ; Are these the right bits?
523 beq++ mbitretry ; Yeah, try for it again...
524
525 mftb r11 ; Time stamp us now
526 sub r11,r11,r10 ; Get the elapsed time
527 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
528 blt++ mbitsniff ; Not yet...
529
530 mtmsr r9 ; Say, any interrupts pending?
531
532; The following instructions force the pipeline to be interlocked to that only one
533; instruction is issued per cycle. The insures that we stay enabled for a long enough
534; time. If it is too short, pending interruptions will not have a chance to be taken
535
536 subi r7,r7,128 ; Back off elapsed time from timeout value
537 or r7,r7,r7 ; Do nothing here but force a single cycle delay
538 mr. r7,r7 ; See if we used the whole timeout
539 or r7,r7,r7 ; Do nothing here but force a single cycle delay
540
541 ble-- mbitfail ; We failed
542 b mbitspin0 ; Now that we have opened an enable window, keep trying...
543mbitretry:
544 mtmsr r9 ; Enable for interruptions
545 li r10,1 ; Make sure this is non-zero
546 b mbittry
547
548 .align 5
549mbitgot:
550 li r3,1 ; Set good return code
551 isync ; Make sure we do not use a speculativily loaded value
552 blr
553
554mbitfail: li r3,0 ; Set failure return code
555 blr ; Return, head hanging low...
556
557/*
558 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
559 *
560 * Spin until word hits 0 or timeout.
561 * Return success (1) or failure (0).
562 * Attempt will fail after timeout ticks of the timebase.
563 *
564 * The theory is that a processor will bump a counter as it signals
565 * other processors. Then it will spin untl the counter hits 0 (or
566 * times out). The other processors, as it receives the signal will
567 * decrement the counter.
568 *
569 * The other processors use interlocked update to decrement, this one
570 * does not need to interlock.
571 */
572 .align 5
573 .globl EXT(hw_cpu_sync)
574
575LEXT(hw_cpu_sync)
576
577 mftb r10 ; Get the low part of the time base
578 mr r9,r3 ; Save the sync word address
579 li r3,1 ; Assume we work
580
581csynctry: lwz r11,0(r9) ; Grab the sync value
582 mr. r11,r11 ; Counter hit 0?
583 beqlr- ; Yeah, we are sunk...
584 mftb r12 ; Time stamp us now
585
586 sub r12,r12,r10 ; Get the elapsed time
587 cmplw r4,r12 ; Have we gone too long?
588 bge+ csynctry ; Not yet...
589
590 li r3,0 ; Set failure...
591 blr ; Return, head hanging low...
592
593/*
594 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
595 *
596 * Spin until word changes or timeout.
597 * Return success (1) or failure (0).
598 * Attempt will fail after timeout ticks of the timebase.
599 *
600 * This is used to insure that a processor passes a certain point.
601 * An example of use is to monitor the last interrupt time in the
602 * per_proc block. This can be used to insure that the other processor
603 * has seen at least one interrupt since a specific time.
604 */
605 .align 5
606 .globl EXT(hw_cpu_wcng)
607
608LEXT(hw_cpu_wcng)
609
610 mftb r10 ; Get the low part of the time base
611 mr r9,r3 ; Save the sync word address
612 li r3,1 ; Assume we work
613
614wcngtry: lwz r11,0(r9) ; Grab the value
615 cmplw r11,r4 ; Do they still match?
616 bnelr- ; Nope, cool...
617 mftb r12 ; Time stamp us now
618
619 sub r12,r12,r10 ; Get the elapsed time
620 cmplw r5,r12 ; Have we gone too long?
621 bge+ wcngtry ; Not yet...
622
623 li r3,0 ; Set failure...
624 blr ; Return, head hanging low...
625
626
627/*
628 * unsigned int hw_lock_try(hw_lock_t)
629 *
630 * Try to acquire spin-lock. Return success (1) or failure (0)
631 * Returns with preemption disabled on success.
632 *
633 */
634 .align 5
635 .globl EXT(hw_lock_try)
636
637LEXT(hw_lock_try)
638
639 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
640 mfmsr r9 ; Get the MSR value
641 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
642 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
643 andc r9,r9,r0 ; Clear FP and VEC
644 andc r7,r9,r7 ; Clear EE as well
645
646 mtmsr r7 ; Disable interruptions and thus, preemption
647
648 lwz r5,0(r3) ; Quick load
649 andi. r6,r5,ILK_LOCKED ; TEST...
650 bne-- .L_lock_try_failed ; No go...
651
652.L_lock_try_loop:
653 lwarx r5,0,r3 ; Ld from addr of arg and reserve
654
655 andi. r6,r5,ILK_LOCKED ; TEST...
656 ori r5,r5,ILK_LOCKED
657 bne-- .L_lock_try_failedX ; branch if taken. Predict free
658
659 stwcx. r5,0,r3 ; And SET (if still reserved)
660 bne-- .L_lock_try_loop ; If set failed, loop back
661
662 isync
663
664 mfsprg r6,1 ; Get current activation
665 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
666 addi r5,r5,1 ; Bring up the disable count
667 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
668
669 mtmsr r9 ; Allow interruptions now
670 li r3,1 ; Set that the lock was free
671 blr
672
673.L_lock_try_failedX:
674 li r6,lgKillResv ; Killing field
675 stwcx. r6,0,r6 ; Kill reservation
676
677.L_lock_try_failed:
678 mtmsr r9 ; Allow interruptions now
679 li r3,0 ; FAILURE - lock was taken
680 blr
681
682/*
683 * unsigned int hw_lock_held(hw_lock_t)
684 *
685 * Return 1 if lock is held
686 * Doesn't change preemption state.
687 * N.B. Racy, of course.
688 */
689 .align 5
690 .globl EXT(hw_lock_held)
691
692LEXT(hw_lock_held)
693
694 isync ; Make sure we don't use a speculativily fetched lock
695 lwz r3, 0(r3) ; Get lock value
696 andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit
697 blr
698
699/*
700 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
701 *
702 * Compare old to area if equal, store new, and return true
703 * else return false and no store
704 * This is an atomic operation
705 */
706 .align 5
707 .globl EXT(hw_compare_and_store)
708
709LEXT(hw_compare_and_store)
710
711 mr r6,r3 ; Save the old value
712
713cstry: lwarx r9,0,r5 ; Grab the area value
714 li r3,1 ; Assume it works
715 cmplw cr0,r9,r6 ; Does it match the old value?
716 bne-- csfail ; No, it must have changed...
717 stwcx. r4,0,r5 ; Try to save the new value
718 bne-- cstry ; Didn't get it, try again...
719 isync ; Just hold up prefetch
720 blr ; Return...
721
722csfail: li r3,lgKillResv ; Killing field
723 stwcx. r3,0,r3 ; Blow reservation
724
725 li r3,0 ; Set failure
726 blr ; Better luck next time...
727
728
729/*
730 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
731 *
732 * Atomically add the second parameter to the first.
733 * Returns the result.
734 *
735 */
736 .align 5
737 .globl EXT(hw_atomic_add)
738
739LEXT(hw_atomic_add)
740
741 mr r6,r3 ; Save the area
742
743addtry: lwarx r3,0,r6 ; Grab the area value
744 add r3,r3,r4 ; Add the value
745 stwcx. r3,0,r6 ; Try to save the new value
746 bne-- addtry ; Didn't get it, try again...
747 blr ; Return...
748
749
750/*
751 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
752 *
753 * Atomically subtract the second parameter from the first.
754 * Returns the result.
755 *
756 */
757 .align 5
758 .globl EXT(hw_atomic_sub)
759
760LEXT(hw_atomic_sub)
761
762 mr r6,r3 ; Save the area
763
764subtry: lwarx r3,0,r6 ; Grab the area value
765 sub r3,r3,r4 ; Subtract the value
766 stwcx. r3,0,r6 ; Try to save the new value
767 bne-- subtry ; Didn't get it, try again...
768 blr ; Return...
769
770
771/*
772 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
773 *
774 * Atomically ORs the second parameter into the first.
775 * Returns the result.
776 */
777 .align 5
778 .globl EXT(hw_atomic_or)
779
780LEXT(hw_atomic_or)
781
782 mr r6,r3 ; Save the area
783
784ortry: lwarx r3,0,r6 ; Grab the area value
785 or r3,r3,r4 ; OR the value
786 stwcx. r3,0,r6 ; Try to save the new value
787 bne-- ortry ; Did not get it, try again...
788 blr ; Return...
789
790
791/*
792 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
793 *
794 * Atomically ANDs the second parameter with the first.
795 * Returns the result.
796 *
797 */
798 .align 5
799 .globl EXT(hw_atomic_and)
800
801LEXT(hw_atomic_and)
802
803 mr r6,r3 ; Save the area
804
805andtry: lwarx r3,0,r6 ; Grab the area value
806 and r3,r3,r4 ; AND the value
807 stwcx. r3,0,r6 ; Try to save the new value
808 bne-- andtry ; Did not get it, try again...
809 blr ; Return...
810
811
812/*
813 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
814 *
815 * Atomically inserts the element at the head of the list
816 * anchor is the pointer to the first element
817 * element is the pointer to the element to insert
818 * disp is the displacement into the element to the chain pointer
819 *
820 */
821 .align 5
822 .globl EXT(hw_queue_atomic)
823
824LEXT(hw_queue_atomic)
825
826 mr r7,r4 ; Make end point the same as start
827 mr r8,r5 ; Copy the displacement also
828 b hw_queue_comm ; Join common code...
829
830/*
831 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
832 *
833 * Atomically inserts the list of elements at the head of the list
834 * anchor is the pointer to the first element
835 * first is the pointer to the first element to insert
836 * last is the pointer to the last element to insert
837 * disp is the displacement into the element to the chain pointer
838 */
839 .align 5
840 .globl EXT(hw_queue_atomic_list)
841
842LEXT(hw_queue_atomic_list)
843
844 mr r7,r5 ; Make end point the same as start
845 mr r8,r6 ; Copy the displacement also
846
847hw_queue_comm:
848 lwarx r9,0,r3 ; Pick up the anchor
849 stwx r9,r8,r7 ; Chain that to the end of the new stuff
850 eieio ; Make sure this store makes it before the anchor update
851 stwcx. r4,0,r3 ; Try to chain into the front
852 bne-- hw_queue_comm ; Didn't make it, try again...
853
854 blr ; Return...
855
856/*
857 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
858 *
859 * Atomically removes the first element in a list and returns it.
860 * anchor is the pointer to the first element
861 * disp is the displacement into the element to the chain pointer
862 * Returns element if found, 0 if empty.
863 */
864 .align 5
865 .globl EXT(hw_dequeue_atomic)
866
867LEXT(hw_dequeue_atomic)
868
869 mr r5,r3 ; Save the anchor
870
871hw_dequeue_comm:
872 lwarx r3,0,r5 ; Pick up the anchor
873 mr. r3,r3 ; Is the list empty?
874 beq-- hdcFail ; Leave it list empty...
875 lwzx r9,r4,r3 ; Get the next in line
876 stwcx. r9,0,r5 ; Try to chain into the front
877 beqlr++ ; Got the thing, go away with it...
878 b hw_dequeue_comm ; Did not make it, try again...
879
880hdcFail: li r4,lgKillResv ; Killing field
881 stwcx. r4,0,r4 ; Dump reservation
882 blr ; Leave...
883
884
885/*
886 * void mutex_init(mutex_t* l, etap_event_t etap)
887 *
888 */
889 .align 5
890 .globl EXT(mutex_init)
891
892LEXT(mutex_init)
893
894 PROLOG(0)
895 li r10, 0
896 stw r10, LOCK_DATA(r3) ; clear lock word
897 sth r10, MUTEX_WAITERS(r3) ; init waiter count
898 sth r10, MUTEX_PROMOTED_PRI(r3)
899#if MACH_LDEBUG
900 stw r10, MUTEX_PC(r3) ; init caller pc
901 stw r10, MUTEX_THREAD(r3) ; and owning thread
902 li r10, MUTEX_TAG
903 stw r10, MUTEX_TYPE(r3) ; set lock type
904#endif /* MACH_LDEBUG */
905 EPILOG
906 blr
907
908/*
909 * void mutex_lock(mutex_t*)
910 *
911 */
912 .align 5
913 .globl EXT(mutex_lock)
914LEXT(mutex_lock)
915
916 .globl EXT(_mutex_lock)
917LEXT(_mutex_lock)
918
919#if !MACH_LDEBUG
920 mfsprg r6,1 ; load the current thread
921 lwz r5,0(r3) ; Get the lock quickly
922 li r4,0
923 li r8,0
924 mr. r5,r5 ; Quick check
925 bne-- mlckspin1 ; Can not get it right now...
926
927mlcktry:
928 lwarx r5,0,r3 ; load the mutex lock
929 mr. r5,r5
930 bne-- mlckspin0 ; Can not get it right now...
931 stwcx. r6,0,r3 ; grab the lock
932 bne-- mlcktry ; loop back if failed
933 isync ; stop prefeteching
934 mflr r8
935 stw r8,4(r3)
936 blr
937
938mlckspin0:
939 li r5,lgKillResv ; Killing field
940 stwcx. r5,0,r5 ; Kill reservation
941mlckspin1:
942 mr. r4,r4 ; Test timeout value
943 bne++ mlckspin2
944 lis r4,hi16(EXT(MutexSpin)) ; Get the high part
945 ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
946 lwz r4,0(r4) ; Get spin timerout value
947 mr. r4,r4 ; Test spin timeout value
948 beq mlckslow1 ; Is spin timeout set to zero
949
950mlckspin2: mr. r8,r8 ; Is r8 set to zero
951 bne++ mlckspin3 ; If yes, first spin attempt
952 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
953 mfmsr r9 ; Get the MSR value
954 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
955 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
956 andc r9,r9,r0 ; Clear FP and VEC
957 andc r7,r9,r7 ; Clear EE as well
958 mtmsr r7 ; Turn off interruptions
959 isync ; May have turned off vec and fp here
960 mftb r8 ; Get timestamp on entry
961 b mlcksniff
962
963mlckspin3: mtmsr r7 ; Turn off interruptions
964 mftb r8 ; Get timestamp on entry
965
966mlcksniff: lwz r5,0(r3) ; Get that lock in here
967 mr. r5,r5 ; Is the lock held
968 beq++ mlckretry ; No, try for it again...
969 rlwinm r5,r5,0,0,29 ; Extract the lock owner
970 mr. r5,r5 ; Quick check
971 beq++ mlckslow0 ; InterLock is held
972 lwz r10,ACT_MACT_SPF(r5) ; Get the special flags
973 rlwinm. r10,r10,0,OnProcbit,OnProcbit ; Is OnProcbit set?
974 beq mlckslow0 ; Lock owner isn't running
975
976 mftb r10 ; Time stamp us now
977 sub r10,r10,r8 ; Get the elapsed time
978 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
979 blt++ mlcksniff ; Not yet...
980
981 mtmsr r9 ; Say, any interrupts pending?
982
983; The following instructions force the pipeline to be interlocked to that only one
984; instruction is issued per cycle. The insures that we stay enabled for a long enough
985; time; if it's too short, pending interruptions will not have a chance to be taken
986
987 subi r4,r4,128 ; Back off elapsed time from timeout value
988 or r4,r4,r4 ; Do nothing here but force a single cycle delay
989 mr. r4,r4 ; See if we used the whole timeout
990 or r4,r4,r4 ; Do nothing here but force a single cycle delay
991
992 ble-- mlckslow1 ; We failed
993 b mlckspin1 ; Now that we've opened an enable window, keep trying...
994mlckretry:
995 mtmsr r9 ; Restore interrupt state
996 li r8,1 ; Show already through once
997 b mlcktry
998mlckslow0: ; We couldn't get the lock
999 mtmsr r9 ; Restore interrupt state
1000
1001mlckslow1:
1002#endif
1003#if CHECKNMI
1004 mflr r12 ; (TEST/DEBUG)
1005 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
1006 mtlr r12 ; (TEST/DEBUG)
1007#endif
1008
1009 PROLOG(12)
1010#if MACH_LDEBUG
1011 bl EXT(assert_wait_possible)
1012 mr. r3,r3
1013 bne L_mutex_lock_assert_wait_1
1014 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
1015 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
1016 PROLOG(0)
1017 bl EXT(panic)
1018 BREAKPOINT_TRAP ; We die here anyway
1019
1020 .data
1021L_mutex_lock_assert_wait_panic_str:
1022 STRINGD "mutex_lock: assert_wait_possible false\n\000"
1023 .text
1024
1025L_mutex_lock_assert_wait_1:
1026 lwz r3,FM_ARG0(r1)
1027#endif
1028 CHECK_SETUP(r12)
1029 CHECK_MUTEX_TYPE()
1030 CHECK_NO_SIMPLELOCKS()
1031.L_ml_retry:
1032 bl lockDisa ; Go get a lock on the mutex's interlock lock
1033 mr. r4,r3 ; Did we get it?
1034 lwz r3,FM_ARG0(r1) ; Restore the lock address
1035 bne+ mlGotInt ; We got it just fine...
1036
1037 lis r3,hi16(mutex_failed1) ; Get the failed mutex message
1038 ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
1039 PROLOG(0)
1040 bl EXT(panic) ; Call panic
1041 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1042
1043 .data
1044mutex_failed1:
1045 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
1046 .text
1047
1048mlGotInt:
1049
1050; Note that there is no reason to do a load and reserve here. We already
1051; hold the interlock lock and no one can touch this field unless they
1052; have that, so, we're free to play
1053
1054 lwz r4,LOCK_DATA(r3) ; Get the mutex's lock field
1055 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1056 bne- mlInUse ; Nope, sombody's playing already...
1057
1058#if MACH_LDEBUG
1059 li r5,lo16(MASK(MSR_EE)) ; Get the EE bit
1060 mfmsr r11 ; Note: no need to deal with fp or vec here
1061 andc r5,r11,r5
1062 mtmsr r5
1063 mfsprg r9,1 ; Get the current activation
1064 lwz r5,0(r1) ; Get previous save frame
1065 lwz r5,FM_LR_SAVE(r5) ; Get our caller's address
1066 lwz r8, ACT_THREAD(r9) ; Get the active thread
1067 stw r5,MUTEX_PC(r3) ; Save our caller
1068 mr. r8,r8 ; Is there any thread?
1069 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1070 beq- .L_ml_no_active_thread ; No owning thread...
1071 lwz r9,THREAD_MUTEX_COUNT(r8) ; Get the mutex count
1072 addi r9,r9,1 ; Bump it up
1073 stw r9,THREAD_MUTEX_COUNT(r8) ; Stash it back
1074.L_ml_no_active_thread:
1075 mtmsr r11
1076#endif /* MACH_LDEBUG */
1077
1078 bl EXT(mutex_lock_acquire)
1079 mfsprg r5,1
1080 mr. r4,r3
1081 lwz r3,FM_ARG0(r1)
1082 beq mlUnlock
1083 ori r5,r5,WAIT_FLAG
1084
1085mlUnlock: eieio
1086 stw r5,LOCK_DATA(r3) ; grab the mutexlock and free the interlock
1087
1088 EPILOG ; Restore all saved registers
1089 b epStart ; Go enable preemption...
1090
1091; We come to here when we have a resource conflict. In other words,
1092; the mutex is held.
1093
1094mlInUse:
1095
1096 CHECK_SETUP(r12)
1097 CHECK_MYLOCK(MUTEX_THREAD) ; Assert we don't own the lock already */
1098
1099; Note that we come in here with the interlock set. The wait routine
1100; will unlock it before waiting.
1101
1102 ori r4,r4,WAIT_FLAG ; Set the wait flag
1103 stw r4,LOCK_DATA(r3)
1104 rlwinm r4,r4,0,0,29 ; Extract the lock owner
1105 bl EXT(mutex_lock_wait) ; Wait for our turn at the lock
1106
1107 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1108 b .L_ml_retry ; and try again...
1109
1110
1111/*
1112 * void _mutex_try(mutex_t*)
1113 *
1114 */
1115 .align 5
1116 .globl EXT(mutex_try)
1117LEXT(mutex_try)
1118 .globl EXT(_mutex_try)
1119LEXT(_mutex_try)
1120#if !MACH_LDEBUG
1121 mfsprg r6,1 ; load the current thread
1122 lwz r5,0(r3) ; Get the lock value
1123 mr. r5,r5 ; Quick check
1124 bne-- L_mutex_try_slow ; Can not get it now...
1125
1126L_mutex_try_loop:
1127 lwarx r5,0,r3 ; load the lock value
1128 mr. r5,r5
1129 bne-- L_mutex_try_slowX ; branch to the slow path
1130 stwcx. r6,0,r3 ; grab the lock
1131 bne-- L_mutex_try_loop ; retry if failed
1132 isync ; stop prefetching
1133 li r3, 1
1134 blr
1135
1136L_mutex_try_slowX:
1137 li r5,lgKillResv ; Killing field
1138 stwcx. r5,0,r5 ; Kill reservation
1139
1140L_mutex_try_slow:
1141
1142#endif
1143
1144 PROLOG(8) ; reserve space for SWT_HI and SWT_LO
1145
1146 CHECK_SETUP(r12)
1147 CHECK_MUTEX_TYPE()
1148 CHECK_NO_SIMPLELOCKS()
1149
1150 lwz r6,LOCK_DATA(r3) ; Quick check
1151 rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already
1152 bne- mtFail ; Someone's got it already...
1153
1154 bl lockDisa ; Go get a lock on the mutex's interlock lock
1155 mr. r4,r3 ; Did we get it? */
1156 lwz r3,FM_ARG0(r1) ; Restore the lock address
1157 bne+ mtGotInt ; We got it just fine...
1158
1159 lis r3,hi16(mutex_failed2) ; Get the failed mutex message
1160 ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
1161 PROLOG(0)
1162 bl EXT(panic) ; Call panic
1163 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1164
1165 .data
1166mutex_failed2:
1167 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1168 .text
1169
1170mtGotInt:
1171
1172; Note that there is no reason to do a load and reserve here. We already
1173; hold the interlock and no one can touch at this field unless they
1174; have that, so, we're free to play
1175
1176 lwz r4,LOCK_DATA(r3) ; Get the mutex's lock field
1177 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1178 bne- mtInUse ; Nope, sombody's playing already...
1179
1180#if MACH_LDEBUG
1181 lis r9,hi16(MASK(MSR_VEC)) ; Get vector enable
1182 mfmsr r11 ; Get the MSR value
1183 ori r9,r9,lo16(MASK(MSR_FP)) ; Get FP enable
1184 ori r5,r9,lo16(MASK(MSR_EE)) ; Get EE bit on too
1185 andc r11,r11,r9 ; Clear FP and VEC
1186 andc r5,r11,r5 ; Clear EE as well
1187
1188 mtmsr r5
1189 mfsprg r9,1 ; Get the current activation
1190 lwz r5,0(r1) ; Get previous save frame
1191 lwz r5,FM_LR_SAVE(r5) ; Get our caller's address
1192 lwz r8,ACT_THREAD(r9) ; Get the active thread
1193 stw r5,MUTEX_PC(r3) ; Save our caller
1194 mr. r8,r8 ; Is there any thread?
1195 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1196 beq- .L_mt_no_active_thread ; No owning thread...
1197 lwz r9, THREAD_MUTEX_COUNT(r8) ; Get the mutex count
1198 addi r9, r9, 1 ; Bump it up
1199 stw r9, THREAD_MUTEX_COUNT(r8) ; Stash it back
1200.L_mt_no_active_thread:
1201 mtmsr r11
1202#endif /* MACH_LDEBUG */
1203
1204 bl EXT(mutex_lock_acquire)
1205 mfsprg r5,1
1206 mr. r4,r3
1207 lwz r3,FM_ARG0(r1)
1208 beq mtUnlock
1209 ori r5,r5,WAIT_FLAG
1210
1211mtUnlock: eieio
1212 stw r5,LOCK_DATA(r3) ; grab the mutexlock and free the interlock
1213
1214 bl epStart ; Go enable preemption...
1215
1216 li r3, 1
1217 EPILOG ; Restore all saved registers
1218 blr ; Return...
1219
1220; We come to here when we have a resource conflict. In other words,
1221; the mutex is held.
1222
1223mtInUse:
1224 rlwinm r4,r4,0,0,30 ; Get the unlock value
1225 stw r4,LOCK_DATA(r3) ; free the interlock
1226 bl epStart ; Go enable preemption...
1227
1228mtFail: li r3,0 ; Set failure code
1229 EPILOG ; Restore all saved registers
1230 blr ; Return...
1231
1232
1233/*
1234 * void mutex_unlock_rwcmb(mutex_t* l)
1235 *
1236 */
1237 .align 5
1238 .globl EXT(mutex_unlock_rwcmb)
1239
1240LEXT(mutex_unlock_rwcmb)
1241 .globl EXT(mulckPatch_isync)
1242LEXT(mulckPatch_isync)
1243 isync
1244 .globl EXT(mulckPatch_eieio)
1245LEXT(mulckPatch_eieio)
1246 eieio
1247
1248 lwz r5,0(r3) ; Get the lock
1249 rlwinm. r4,r5,0,30,31 ; Quick check
1250 bne-- L_mutex_unlock_slow ; Can not get it now...
1251
1252L_mutex_unlock_rwcmb_loop:
1253 lwarx r5,0,r3
1254 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1255 li r5,0 ; Clear the mutexlock
1256 bne-- L_mutex_unlock_rwcmb_slowX
1257 stwcx. r5,0,r3
1258 bne-- L_mutex_unlock_rwcmb_loop
1259 blr
1260
1261L_mutex_unlock_rwcmb_slowX:
1262 li r5,lgKillResv ; Killing field
1263 stwcx. r5,0,r5 ; Dump reservation
1264 b L_mutex_unlock_slow ; Join slow path...
1265
1266/*
1267 * void mutex_unlock(mutex_t* l)
1268 *
1269 */
1270 .align 5
1271 .globl EXT(mutex_unlock)
1272
1273LEXT(mutex_unlock)
1274#if !MACH_LDEBUG
1275 sync
1276 lwz r5,0(r3) ; Get the lock
1277 rlwinm. r4,r5,0,30,31 ; Quick check
1278 bne-- L_mutex_unlock_slow ; Can not get it now...
1279
1280L_mutex_unlock_loop:
1281 lwarx r5,0,r3
1282 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1283 li r5,0 ; Clear the mutexlock
1284 bne-- L_mutex_unlock_slowX
1285 stwcx. r5,0,r3
1286 bne-- L_mutex_unlock_loop
1287 blr
1288L_mutex_unlock_slowX:
1289 li r5,lgKillResv ; Killing field
1290 stwcx. r5,0,r5 ; Dump reservation
1291
1292#endif
1293
1294L_mutex_unlock_slow:
1295
1296 PROLOG(0)
1297
1298 CHECK_SETUP(r12)
1299 CHECK_MUTEX_TYPE()
1300 CHECK_THREAD(MUTEX_THREAD)
1301
1302 bl lockDisa ; Go get a lock on the mutex's interlock lock
1303 mr. r4,r3 ; Did we get it?
1304 lwz r3,FM_ARG0(r1) ; Restore the lock address
1305 bne+ muGotInt ; We got it just fine...
1306
1307 lis r3,hi16(mutex_failed3) ; Get the failed mutex message
1308 ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
1309 PROLOG(0)
1310 bl EXT(panic) ; Call panic
1311 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1312
1313 .data
1314mutex_failed3:
1315 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1316 .text
1317
1318
1319muGotInt:
1320 lwz r4,LOCK_DATA(r3)
1321 andi. r5,r4,WAIT_FLAG ; are there any waiters ?
1322 rlwinm r4,r4,0,0,29
1323 beq+ muUnlock ; Nope, we're done...
1324
1325 bl EXT(mutex_unlock_wakeup) ; yes, wake a thread
1326 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1327 lwz r5,LOCK_DATA(r3) ; load the lock
1328
1329muUnlock:
1330#if MACH_LDEBUG
1331 lis r8,hi16(MASK(MSR_VEC)) ; Get vector enable
1332 mfmsr r11 ; Get the MSR value
1333 ori r8,r8,lo16(MASK(MSR_FP)) ; Get FP enable
1334 ori r9,r8,lo16(MASK(MSR_EE)) ; Get EE bit on too
1335 andc r11,r11,r8 ; Clear FP and VEC
1336 andc r9,r11,r9 ; Clear EE as well
1337
1338 mtmsr r9
1339 mfsprg r9,1
1340 lwz r9,ACT_THREAD(r9)
1341 stw r9,MUTEX_THREAD(r3) ; disown thread
1342 cmpwi r9,0
1343 beq- .L_mu_no_active_thread
1344 lwz r8,THREAD_MUTEX_COUNT(r9)
1345 subi r8,r8,1
1346 stw r8,THREAD_MUTEX_COUNT(r9)
1347.L_mu_no_active_thread:
1348 mtmsr r11
1349#endif /* MACH_LDEBUG */
1350
1351 andi. r5,r5,WAIT_FLAG ; Get the unlock value
1352 eieio
1353 stw r5,LOCK_DATA(r3) ; unlock the interlock and lock
1354
1355 EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
1356 b epStart ; Go enable preemption...
1357
1358/*
1359 * boolean_t mutex_preblock(mutex_t*, thread_t)
1360 */
1361 .align 5
1362 .globl EXT(mutex_preblock)
1363
1364LEXT(mutex_preblock)
1365 mr r6,r3
1366 lwz r5,LOCK_DATA(r3)
1367 mr. r3,r5
1368 beqlr+
1369 mr r3,r6
1370
1371 PROLOG(0)
1372 stw r4,(FM_ARG0-4)(r1)
1373
1374 bl EXT(hw_lock_try)
1375 mr. r4,r3
1376 lwz r3,FM_ARG0(r1)
1377 bne+ mpbGotInt
1378
1379 li r3,0
1380
1381 EPILOG
1382
1383 blr
1384
1385mpbGotInt:
1386 lwz r6,LOCK_DATA(r3)
1387 rlwinm. r5,r6,0,0,30
1388 bne+ mpbInUse
1389
1390 stw r5,LOCK_DATA(r3)
1391
1392 bl epStart
1393
1394 li r3,0
1395
1396 EPILOG
1397
1398 blr
1399
1400mpbInUse:
1401 lwz r4,(FM_ARG0-4)(r1)
1402 rlwinm r5,r6,0,0,29
1403 bl EXT(mutex_preblock_wait)
1404 lwz r4,FM_ARG0(r1)
1405 mr. r3,r3
1406 lwz r5,LOCK_DATA(r4)
1407 rlwinm r5,r5,0,0,30
1408 beq- mpbUnlock0
1409 ori r5,r5,WAIT_FLAG
1410
1411 eieio
1412 stw r5,LOCK_DATA(r4)
1413
1414 bl epStart
1415
1416 li r3,1
1417
1418 EPILOG
1419
1420 blr
1421
1422mpbUnlock0:
1423 eieio
1424 stw r5,LOCK_DATA(r4)
1425
1426 bl epStart
1427
1428 li r3,0
1429
1430 EPILOG
1431
1432 blr
1433
1434/*
1435 * void interlock_unlock(hw_lock_t lock)
1436 */
1437 .align 5
1438 .globl EXT(interlock_unlock)
1439
1440LEXT(interlock_unlock)
1441
1442 lwz r10,LOCK_DATA(r3)
1443 rlwinm r10,r10,0,0,30
1444 eieio
1445 stw r10,LOCK_DATA(r3)
1446
1447 b epStart ; Go enable preemption...
1448
1449/*
1450 * void _enable_preemption_no_check(void)
1451 *
1452 * This version does not check if we get preempted or not
1453 */
1454 .align 4
1455 .globl EXT(_enable_preemption_no_check)
1456
1457LEXT(_enable_preemption_no_check)
1458
1459 cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted
1460 b epCommn ; Join up with the other enable code...
1461
1462/*
1463 * void _enable_preemption(void)
1464 *
1465 * This version checks if we get preempted or not
1466 */
1467 .align 5
1468 .globl EXT(_enable_preemption)
1469
1470LEXT(_enable_preemption)
1471
1472; Here is where we enable preemption. We need to be protected
1473; against ourselves, we can't chance getting interrupted and modifying
1474; our processor wide preemption count after we'sve loaded it up. So,
1475; we need to disable all 'rupts. Actually, we could use a compare
1476; and swap to do this, but, since there are no MP considerations
1477; (we are dealing with a CPU local field) it is much, much faster
1478; to disable.
1479;
1480; Note that if we are not genned MP, the calls here will be no-opped via
1481; a #define and since the _mp forms are the same, likewise a #define
1482; will be used to route to the other forms
1483
1484epStart:
1485 cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted
1486
1487epCommn:
1488 mfsprg r3,1 ; Get current activation
1489 li r8,-1 ; Get a decrementer
1490 lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level
1491 add. r5,r5,r8 ; Bring down the disable count
1492 blt- epTooFar ; Yeah, we did...
1493 stw r5,ACT_PREEMPT_CNT(r3) ; Save it back
1494 crandc cr0_eq,cr0_eq,cr1_eq
1495 beq+ epCheckPreempt ; Go check if we need to be preempted...
1496 blr ; Leave...
1497epTooFar:
1498 mr r4,r5
1499 lis r3,hi16(epTooFarStr) ; First half of panic string
1500 ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
1501 PROLOG(0)
1502 bl EXT(panic)
1503 BREAKPOINT_TRAP ; We die here anyway
1504
1505 .data
1506epTooFarStr:
1507 STRINGD "_enable_preemption: preemption_level %d\n\000"
1508
1509 .text
1510 .align 5
1511epCheckPreempt:
1512 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1513 mfmsr r9 ; Get the MSR value
1514 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1515 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1516 beq+ epCPno ; No preemption here...
1517 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1518 andc r9,r9,r0 ; Clear FP and VEC
1519 andc r7,r9,r7 ; Clear EE as well
1520 mtmsr r7 ; Turn off interruptions
1521 isync ; May have turned off vec and fp here
1522 mfsprg r3,0 ; Get per_proc
1523 lwz r7,PP_NEED_AST(r3) ; Get the AST request address
1524 li r5,AST_URGENT ; Get the requests we do honor
1525 lwz r7,0(r7) ; Get the actual, real live, extra special AST word
1526 lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call
1527 and. r7,r7,r5 ; Should we preempt?
1528 ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part
1529 mtmsr r9 ; Allow interrupts if we can
1530epCPno:
1531 beqlr+ ; We probably will not preempt...
1532 sc ; Do the preemption
1533 blr ; Now, go away now...
1534
1535/*
1536 * void disable_preemption(void)
1537 *
1538 * Here is where we disable preemption. Since preemption is on a
1539 * per processor basis (a thread runs on one CPU at a time) we don't
1540 * need any cross-processor synchronization. We do, however, need to
1541 * be interrupt safe, so we don't preempt while in the process of
1542 * disabling it. We could use SPLs, but since we always want complete
1543 * disablement, and this is platform specific code, we'll just kick the
1544 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1545 */
1546 .align 5
1547 .globl EXT(_disable_preemption)
1548
1549LEXT(_disable_preemption)
1550
1551 mfsprg r6,1 ; Get the current activation
1552 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1553 addi r5,r5,1 ; Bring up the disable count
1554 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1555 blr ; Return...
1556
1557/*
1558 * int get_preemption_level(void)
1559 *
1560 * Return the current preemption level
1561 */
1562 .align 5
1563 .globl EXT(get_preemption_level)
1564
1565LEXT(get_preemption_level)
1566
1567 mfsprg r6,1 ; Get current activation
1568 lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1569 blr ; Return...
1570
1571/*
1572 * int get_simple_lock_count(void)
1573 *
1574 * Return the simple lock count
1575 *
1576 */
1577 .align 5
1578 .globl EXT(get_simple_lock_count)
1579
1580LEXT(get_simple_lock_count)
1581
1582#if MACH_LDEBUG
1583 lis r3,hi16(MASK(MSR_VEC)) ; Get vector enable
1584 mfmsr r9 ; Get the MSR value
1585 ori r3,r3,lo16(MASK(MSR_FP)) ; Get FP enable
1586 ori r8,r3,lo16(MASK(MSR_EE)) ; Get EE bit on too
1587 andc r9,r9,r3 ; Clear FP and VEC
1588 andc r8,r9,r8 ; Clear EE as well
1589 mtmsr r8 ; Interrupts off
1590 isync ; May have messed with vec/fp
1591 mfsprg r6,0 ; Get the per_proc
1592 lwz r3,PP_SIMPLE_LOCK_CNT(r6) ; Get the simple lock count
1593 mtmsr r9 ; Restore interruptions to entry
1594#else
1595 li r3,0 ; simple lock count not updated
1596#endif
1597 blr ; Return...
1598
1599/*
1600 * void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
1601 *
1602 * Initialize a simple lock.
1603 */
1604 .align 5
1605 .globl EXT(ppc_usimple_lock_init)
1606
1607LEXT(ppc_usimple_lock_init)
1608
1609 li r0, 0 ; set lock to free == 0
1610 stw r0, 0(r3) ; Initialize the lock
1611 blr
1612
1613/*
1614 * void ppc_usimple_lock(simple_lock_t)
1615 *
1616 */
1617 .align 5
1618 .globl EXT(ppc_usimple_lock)
1619
1620LEXT(ppc_usimple_lock)
1621
1622#if CHECKNMI
1623 mflr r12 ; (TEST/DEBUG)
1624 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
1625 mtlr r12 ; (TEST/DEBUG)
1626#endif
1627
1628 mfsprg r6,1 ; Get the current activation
1629 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1630 addi r5,r5,1 ; Bring up the disable count
1631 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1632 mr r5,r3 ; Get the address of the lock
1633 li r8,0 ; Set r8 to zero
1634 li r4,0 ; Set r4 to zero
1635
1636slcktry: lwarx r11,0,r5 ; Grab the lock value
1637 andi. r3,r11,ILK_LOCKED ; Is it locked?
1638 ori r11,r6,ILK_LOCKED ; Set interlock
1639 bne-- slckspin ; Yeah, wait for it to clear...
1640 stwcx. r11,0,r5 ; Try to seize that there durn lock
1641 bne-- slcktry ; Couldn't get it...
1642 isync ; Make sure we don't use a speculativily loaded value
1643 blr ; Go on home...
1644
1645slckspin: li r11,lgKillResv ; Killing field
1646 stwcx. r11,0,r11 ; Kill reservation
1647
1648 mr. r4,r4 ; Test timeout value
1649 bne++ slockspin0
1650 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
1651 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
1652 lwz r4,0(r4) ; Get the timerout value
1653
1654slockspin0: mr. r8,r8 ; Is r8 set to zero
1655 bne++ slockspin1 ; If yes, first spin attempt
1656 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1657 mfmsr r9 ; Get the MSR value
1658 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1659 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1660 andc r9,r9,r0 ; Clear FP and VEC
1661 andc r7,r9,r7 ; Clear EE as well
1662 mtmsr r7 ; Turn off interruptions
1663 isync ; May have turned off vec and fp here
1664 mftb r8 ; Get timestamp on entry
1665 b slcksniff
1666
1667slockspin1: mtmsr r7 ; Turn off interruptions
1668 mftb r8 ; Get timestamp on entry
1669
1670slcksniff: lwz r3,0(r5) ; Get that lock in here
1671 andi. r3,r3,ILK_LOCKED ; Is it free yet?
1672 beq++ slckretry ; Yeah, try for it again...
1673
1674 mftb r10 ; Time stamp us now
1675 sub r10,r10,r8 ; Get the elapsed time
1676 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1677 blt++ slcksniff ; Not yet...
1678
1679 mtmsr r9 ; Say, any interrupts pending?
1680
1681; The following instructions force the pipeline to be interlocked to that only one
1682; instruction is issued per cycle. The insures that we stay enabled for a long enough
1683; time; if it's too short, pending interruptions will not have a chance to be taken
1684
1685 subi r4,r4,128 ; Back off elapsed time from timeout value
1686 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1687 mr. r4,r4 ; See if we used the whole timeout
1688 li r3,0 ; Assume a timeout return code
1689 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1690
1691 ble-- slckfail ; We failed
1692 b slockspin1 ; Now that we've opened an enable window, keep trying...
1693slckretry:
1694 mtmsr r9 ; Restore interrupt state
1695 li r8,1 ; Show already through once
1696 b slcktry
1697slckfail: ; We couldn't get the lock
1698 lis r3,hi16(slckpanic_str)
1699 ori r3,r3,lo16(slckpanic_str)
1700 mr r4,r5
1701 mflr r5
1702 PROLOG(0)
1703 bl EXT(panic)
1704 BREAKPOINT_TRAP ; We die here anyway
1705
1706 .data
1707slckpanic_str:
1708 STRINGD "ppc_usimple_lock: simple lock deadlock detection l=0x%08X, pc=0x%08X\n\000"
1709 .text
1710
1711/*
1712 * unsigned int ppc_usimple_lock_try(simple_lock_t)
1713 *
1714 */
1715 .align 5
1716 .globl EXT(ppc_usimple_lock_try)
1717
1718LEXT(ppc_usimple_lock_try)
1719
1720#if CHECKNMI
1721 mflr r12 ; (TEST/DEBUG)
1722 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
1723 mtlr r12 ; (TEST/DEBUG)
1724#endif
1725 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1726 mfmsr r9 ; Get the MSR value
1727 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1728 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1729 andc r9,r9,r0 ; Clear FP and VEC
1730 andc r7,r9,r7 ; Clear EE as well
1731 mtmsr r7 ; Disable interruptions and thus, preemption
1732 mfsprg r6,1 ; Get current activation
1733
1734 lwz r11,0(r3) ; Get the lock
1735 andi. r5,r11,ILK_LOCKED ; Check it...
1736 bne-- slcktryfail ; Quickly fail...
1737
1738slcktryloop:
1739 lwarx r11,0,r3 ; Ld from addr of arg and reserve
1740
1741 andi. r5,r11,ILK_LOCKED ; TEST...
1742 ori r5,r6,ILK_LOCKED
1743 bne-- slcktryfailX ; branch if taken. Predict free
1744
1745 stwcx. r5,0,r3 ; And SET (if still reserved)
1746 bne-- slcktryloop ; If set failed, loop back
1747
1748 isync
1749
1750 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1751 addi r5,r5,1 ; Bring up the disable count
1752 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1753
1754 mtmsr r9 ; Allow interruptions now
1755 li r3,1 ; Set that the lock was free
1756 blr
1757
1758slcktryfailX:
1759 li r5,lgKillResv ; Killing field
1760 stwcx. r5,0,r5 ; Kill reservation
1761
1762slcktryfail:
1763 mtmsr r9 ; Allow interruptions now
1764 li r3,0 ; FAILURE - lock was taken
1765 blr
1766
1767
1768/*
1769 * void ppc_usimple_unlock_rwcmb(simple_lock_t)
1770 *
1771 */
1772 .align 5
1773 .globl EXT(ppc_usimple_unlock_rwcmb)
1774
1775LEXT(ppc_usimple_unlock_rwcmb)
1776
1777#if CHECKNMI
1778 mflr r12 ; (TEST/DEBUG)
1779 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
1780 mtlr r12 ; (TEST/DEBUG)
1781#endif
1782 li r0,0
1783 .globl EXT(sulckPatch_isync)
1784LEXT(sulckPatch_isync)
1785 isync
1786 .globl EXT(sulckPatch_eieio)
1787LEXT(sulckPatch_eieio)
1788 eieio
1789 stw r0, LOCK_DATA(r3)
1790
1791 b epStart ; Go enable preemption...
1792
1793/*
1794 * void ppc_usimple_unlock_rwmb(simple_lock_t)
1795 *
1796 */
1797 .align 5
1798 .globl EXT(ppc_usimple_unlock_rwmb)
1799
1800LEXT(ppc_usimple_unlock_rwmb)
1801
1802#if CHECKNMI
1803 mflr r12 ; (TEST/DEBUG)
1804 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
1805 mtlr r12 ; (TEST/DEBUG)
1806#endif
1807 li r0,0
1808 sync
1809 stw r0, LOCK_DATA(r3)
1810
1811 b epStart ; Go enable preemption...
1812
1813/*
1814 * void enter_funnel_section(funnel_t *)
1815 *
1816 */
1817 .align 5
1818 .globl EXT(enter_funnel_section)
1819
1820LEXT(enter_funnel_section)
1821
1822#if !MACH_LDEBUG
1823 lis r10,hi16(EXT(kdebug_enable))
1824 ori r10,r10,lo16(EXT(kdebug_enable))
1825 lwz r10,0(r10)
1826 lis r11,hi16(EXT(split_funnel_off))
1827 ori r11,r11,lo16(EXT(split_funnel_off))
1828 lwz r11,0(r11)
1829 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1830 bne- L_enter_funnel_section_slow ; If set, call the slow path
1831 mfsprg r6,1 ; Get the current activation
1832 lwz r7,LOCK_FNL_MUTEX(r3)
1833
1834 lwz r5,0(r7) ; Get lock quickly
1835 mr. r5,r5 ; Locked?
1836 bne-- L_enter_funnel_section_slow ; Yup...
1837
1838L_enter_funnel_section_loop:
1839 lwarx r5,0,r7 ; Load the mutex lock
1840 mr. r5,r5
1841 bne-- L_enter_funnel_section_slowX ; Go to the slow path
1842 stwcx. r6,0,r7 ; Grab the lock
1843 bne-- L_enter_funnel_section_loop ; Loop back if failed
1844 isync ; Stop prefeteching
1845 lwz r6,ACT_THREAD(r6) ; Get the current thread
1846 li r7,TH_FN_OWNED
1847 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1848 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1849 blr
1850
1851L_enter_funnel_section_slowX:
1852 li r4,lgKillResv ; Killing field
1853 stwcx. r4,0,r4 ; Kill reservation
1854
1855L_enter_funnel_section_slow:
1856#endif
1857 li r4,TRUE
1858 b EXT(thread_funnel_set)
1859
1860/*
1861 * void exit_funnel_section(void)
1862 *
1863 */
1864 .align 5
1865 .globl EXT(exit_funnel_section)
1866
1867LEXT(exit_funnel_section)
1868
1869 mfsprg r6,1 ; Get the current activation
1870 lwz r6,ACT_THREAD(r6) ; Get the current thread
1871 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1872 mr. r3,r3 ; Check on funnel held
1873 beq- L_exit_funnel_section_ret ;
1874#if !MACH_LDEBUG
1875 lis r10,hi16(EXT(kdebug_enable))
1876 ori r10,r10,lo16(EXT(kdebug_enable))
1877 lwz r10,0(r10)
1878 mr. r10,r10
1879 bne- L_exit_funnel_section_slow ; If set, call the slow path
1880 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1881 .globl EXT(retfsectPatch_isync)
1882LEXT(retfsectPatch_isync)
1883 isync
1884 .globl EXT(retfsectPatch_eieio)
1885LEXT(retfsectPatch_eieio)
1886 eieio
1887
1888 lwz r5,0(r7) ; Get lock
1889 rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set
1890 bne-- L_exit_funnel_section_slow ; No can get...
1891
1892L_exit_funnel_section_loop:
1893 lwarx r5,0,r7
1894 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1895 li r5,0 ; Clear the mutexlock
1896 bne-- L_exit_funnel_section_slowX
1897 stwcx. r5,0,r7 ; Release the funnel mutexlock
1898 bne-- L_exit_funnel_section_loop
1899 li r7,0
1900 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1901 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1902 blr ; Return
1903
1904L_exit_funnel_section_slowX:
1905 li r4,lgKillResv ; Killing field
1906 stwcx. r4,0,r4 ; Kill it
1907
1908L_exit_funnel_section_slow:
1909#endif
1910 li r4,FALSE
1911 b EXT(thread_funnel_set)
1912L_exit_funnel_section_ret:
1913 blr
1914
1915;
1916; This is bring up code
1917;
1918 .align 5
1919 .globl EXT(condStop)
1920
1921LEXT(condStop)
1922
1923XcondStop: cmplw r3,r4 ; Check if these are equal
1924 beq-- XcondStop ; Stop here until they are different
1925 blr ; Return.
1926