]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_lock.s
xnu-344.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <cpus.h>
24#include <mach_assert.h>
25#include <mach_ldebug.h>
26#include <mach_rt.h>
27
28#include <kern/etap_options.h>
29
30#include <ppc/asm.h>
31#include <ppc/proc_reg.h>
32#include <assym.s>
33
34#define STRING ascii
35
36#define SWT_HI 0+FM_SIZE
37#define SWT_LO 4+FM_SIZE
38#define MISSED 8+FM_SIZE
39
0b4e3aa0 40#define ILK_LOCKED 0x01
9bccf70c 41#define WAIT_FLAG 0x02
0b4e3aa0 42#define SLOCK_FAST 0x02
9bccf70c 43#define TH_FN_OWNED 0x01
0b4e3aa0 44
1c79356b
A
45;
46; NOTE: make sure that PREEMPTSTACK in aligned_data is
47; set the same as it is here. This is the number of
48; traceback entries we can handle per processor
49;
50; A value of 0 disables the stack.
51;
52#define PREEMPTSTACK 0
53#define CHECKNMI 0
54#define CHECKLOCKS 1
55
56#include <ppc/POWERMAC/mp/mp.h>
57
58#define PROLOG(space) \
59 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
60 mflr r0 __ASMNL__ \
61 stw r3,FM_ARG0(r1) __ASMNL__ \
62 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
63
64#define EPILOG \
65 lwz r1,0(r1) __ASMNL__ \
66 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
67 mtlr r0 __ASMNL__
68
69#if MACH_LDEBUG && CHECKLOCKS
70/*
71 * Routines for general lock debugging.
72 */
73
74/* Gets lock check flags in CR6: CR bits 24-27 */
75
76#define CHECK_SETUP(rg) \
77 lis rg,hi16(EXT(dgWork)) __ASMNL__ \
78 ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \
79 lbz rg,dgFlags(rg) __ASMNL__ \
80 mtcrf 2,rg __ASMNL__
81
82
83/*
84 * Checks for expected lock types and calls "panic" on
85 * mismatch. Detects calls to Mutex functions with
86 * type simplelock and vice versa.
87 */
88#define CHECK_MUTEX_TYPE() \
89 bt 24+disLktypeb,1f __ASMNL__ \
90 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
91 cmpwi r10,MUTEX_TAG __ASMNL__ \
92 beq+ 1f __ASMNL__ \
93 lis r3,hi16(not_a_mutex) __ASMNL__ \
94 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
95 bl EXT(panic) __ASMNL__ \
96 lwz r3,FM_ARG0(r1) __ASMNL__ \
971:
98
99 .data
100not_a_mutex:
101 STRINGD "not a mutex!\n\000"
102 .text
103
104#define CHECK_SIMPLE_LOCK_TYPE() \
105 bt 24+disLktypeb,1f __ASMNL__ \
106 lwz r10,SLOCK_TYPE(r3) __ASMNL__ \
107 cmpwi r10,USLOCK_TAG __ASMNL__ \
108 beq+ 1f __ASMNL__ \
109 lis r3,hi16(not_a_slock) __ASMNL__ \
110 ori r3,r3,lo16(not_a_slock) __ASMNL__ \
111 bl EXT(panic) __ASMNL__ \
112 lwz r3,FM_ARG0(r1) __ASMNL__ \
1131:
114
115 .data
116not_a_slock:
117 STRINGD "not a simple lock!\n\000"
118 .text
119
120#define CHECK_NO_SIMPLELOCKS() \
121 bt 24+disLkNmSimpb,2f __ASMNL__ \
122 mfmsr r11 __ASMNL__ \
9bccf70c
A
123 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
124 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
1c79356b
A
125 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
126 mtmsr r10 __ASMNL__ \
9bccf70c 127 isync __ASMNL__ \
1c79356b 128 mfsprg r10,0 __ASMNL__ \
9bccf70c 129 lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \
1c79356b
A
130 cmpwi r10,0 __ASMNL__ \
131 beq+ 1f __ASMNL__ \
132 lis r3,hi16(simple_locks_held) __ASMNL__ \
133 ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
134 bl EXT(panic) __ASMNL__ \
135 lwz r3,FM_ARG0(r1) __ASMNL__ \
1361: __ASMNL__ \
137 mtmsr r11 __ASMNL__ \
1382:
139
140 .data
141simple_locks_held:
142 STRINGD "simple locks held!\n\000"
143 .text
144
145/*
146 * Verifies return to the correct thread in "unlock" situations.
147 */
148
149#define CHECK_THREAD(thread_offset) \
150 bt 24+disLkThreadb,2f __ASMNL__ \
151 mfmsr r11 __ASMNL__ \
9bccf70c
A
152 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
153 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
1c79356b
A
154 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
155 mtmsr r10 __ASMNL__ \
9bccf70c 156 isync __ASMNL__ \
1c79356b 157 mfsprg r10,0 __ASMNL__ \
9bccf70c 158 lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
1c79356b
A
159 cmpwi r10,0 __ASMNL__ \
160 beq- 1f __ASMNL__ \
161 lwz r9,thread_offset(r3) __ASMNL__ \
162 cmpw r9,r10 __ASMNL__ \
163 beq+ 1f __ASMNL__ \
164 lis r3,hi16(wrong_thread) __ASMNL__ \
165 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
166 bl EXT(panic) __ASMNL__ \
167 lwz r3,FM_ARG0(r1) __ASMNL__ \
1681: __ASMNL__ \
169 mtmsr r11 __ASMNL__ \
1702:
171 .data
172wrong_thread:
173 STRINGD "wrong thread!\n\000"
174 .text
175
176#define CHECK_MYLOCK(thread_offset) \
177 bt 24+disLkMyLckb,2f __ASMNL__ \
178 mfmsr r11 __ASMNL__ \
9bccf70c
A
179 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \
180 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \
1c79356b
A
181 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \
182 mtmsr r10 __ASMNL__ \
9bccf70c 183 isync __ASMNL__ \
1c79356b 184 mfsprg r10,0 __ASMNL__ \
9bccf70c 185 lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
1c79356b
A
186 cmpwi r10,0 __ASMNL__ \
187 beq- 1f __ASMNL__ \
188 lwz r9, thread_offset(r3) __ASMNL__ \
189 cmpw r9,r10 __ASMNL__ \
190 bne+ 1f __ASMNL__ \
191 lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \
192 ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \
193 bl EXT(panic) __ASMNL__ \
194 lwz r3,FM_ARG0(r1) __ASMNL__ \
1951: __ASMNL__ \
196 mtmsr r11 __ASMNL__ \
1972:
198
199 .data
200mylock_attempt:
201 STRINGD "mylock attempt!\n\000"
202 .text
203
204#else /* MACH_LDEBUG */
205
206#define CHECK_SETUP(rg)
207#define CHECK_MUTEX_TYPE()
208#define CHECK_SIMPLE_LOCK_TYPE()
209#define CHECK_THREAD(thread_offset)
210#define CHECK_NO_SIMPLELOCKS()
211#define CHECK_MYLOCK(thread_offset)
212
213#endif /* MACH_LDEBUG */
214
215/*
216 * void hw_lock_init(hw_lock_t)
217 *
218 * Initialize a hardware lock. These locks should be cache aligned and a multiple
219 * of cache size.
220 */
221
222ENTRY(hw_lock_init, TAG_NO_FRAME_USED)
223
224 li r0, 0 /* set lock to free == 0 */
225 stw r0, 0(r3) /* Initialize the lock */
226 blr
227
228/*
229 * void hw_lock_unlock(hw_lock_t)
230 *
231 * Unconditionally release lock.
0b4e3aa0 232 * Release preemption level.
1c79356b
A
233 */
234
235
236 .align 5
237 .globl EXT(hw_lock_unlock)
238
239LEXT(hw_lock_unlock)
240
241#if 0
242 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
243 lis r5,0xFFFF /* (TEST/DEBUG) */
244 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
245 sc /* (TEST/DEBUG) */
246#endif
247 sync /* Flush writes done under lock */
248 li r0, 0 /* set lock to free */
249 stw r0, 0(r3)
250
1c79356b 251 b epStart /* Go enable preemption... */
1c79356b
A
252
253
254/*
255 * Special case for internal use. Uses same lock code, but sets up so
256 * that there will be no disabling of preemption after locking. Generally
257 * used for mutex locks when obtaining the interlock although there is
258 * nothing stopping other uses.
259 */
260
261lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
262 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
263 cmplwi cr1,r1,0 /* Set flag to disable disable preemption */
264 lwz r4,0(r4) /* Get the timerout value */
265 b lockComm /* Join on up... */
266
267/*
268 * void hw_lock_lock(hw_lock_t)
269 *
270 * Acquire lock, spinning until it becomes available.
0b4e3aa0 271 * Return with preemption disabled.
1c79356b
A
272 * Apparently not used except by mach_perf.
273 * We will just set a default timeout and jump into the NORMAL timeout lock.
274 */
275
276 .align 5
277 .globl EXT(hw_lock_lock)
278
279LEXT(hw_lock_lock)
280
281lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */
282 ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */
283 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
284 lwz r4,0(r4) /* Get the timerout value */
285 b lockComm /* Join on up... */
286
287/*
288 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
289 *
290 * Try to acquire spin-lock. Return success (1) or failure (0).
291 * Attempt will fail after timeout ticks of the timebase.
292 * We try fairly hard to get this lock. We disable for interruptions, but
293 * reenable after a "short" timeout (128 ticks, we may want to change this).
294 * After checking to see if the large timeout value (passed in) has expired and a
295 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
296 * we return either in abject failure, or disable and go back to the lock sniff routine.
297 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
298 *
299 * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL
300 * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT!
301 *
302 */
303 .align 5
304 .globl EXT(hw_lock_to)
305
306LEXT(hw_lock_to)
307
308#if 0
309 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
310 lis r5,0xEEEE /* (TEST/DEBUG) */
311 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
312 sc /* (TEST/DEBUG) */
313#endif
314
315#if CHECKNMI
316 mflr r12 ; (TEST/DEBUG)
317 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
318 mtlr r12 ; (TEST/DEBUG)
319#endif
320
321 cmplw cr1,r1,r1 /* Set flag to enable disable preemption */
322
323lockComm: mfmsr r9 /* Get the MSR value */
9bccf70c 324 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1c79356b 325 mr r5,r3 /* Get the address of the lock */
9bccf70c 326 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
327 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
328
329 mtmsr r7 /* Turn off interruptions */
9bccf70c 330 isync ; May have turned off vec and fp here
1c79356b
A
331 mftb r8 /* Get the low part of the time base */
332
1c79356b 333lcktry: lwarx r6,0,r5 /* Grab the lock value */
0b4e3aa0
A
334 andi. r3,r6,ILK_LOCKED /* Is it locked? */
335 ori r6,r6,ILK_LOCKED /* Set interlock */
1c79356b 336 bne- lcksniff /* Yeah, wait for it to clear... */
0b4e3aa0 337 stwcx. r6,0,r5 /* Try to seize that there durn lock */
1c79356b 338 bne- lcktry /* Couldn't get it... */
0b4e3aa0
A
339 li r3,1 /* return true */
340 isync /* Make sure we don't use a speculativily loaded value */
1c79356b
A
341 beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */
342 mtmsr r9 ; Restore interrupt state
343 blr /* Go on home... */
1c79356b
A
344
345 .align 5
346
347lcksniff: lwz r3,0(r5) /* Get that lock in here */
0b4e3aa0 348 andi. r3,r3,ILK_LOCKED /* Is it free yet? */
1c79356b
A
349 beq+ lcktry /* Yeah, try for it again... */
350
351 mftb r10 /* Time stamp us now */
352 sub r10,r10,r8 /* Get the elapsed time */
353 cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */
354 blt+ lcksniff /* Not yet... */
355
356 mtmsr r9 /* Say, any interrupts pending? */
357
358/* The following instructions force the pipeline to be interlocked to that only one
359 instruction is issued per cycle. The insures that we stay enabled for a long enough
360 time; if it's too short, pending interruptions will not have a chance to be taken */
361
362 subi r4,r4,128 /* Back off elapsed time from timeout value */
363 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
364 mr. r4,r4 /* See if we used the whole timeout */
365 li r3,0 /* Assume a timeout return code */
366 or r4,r4,r4 /* Do nothing here but force a single cycle delay */
367
368 ble- lckfail /* We failed */
369 mtmsr r7 /* Disable for interruptions */
370 mftb r8 /* Get the low part of the time base */
371 b lcksniff /* Now that we've opened an enable window, keep trying... */
372
1c79356b
A
373lckfail: /* We couldn't get the lock */
374 li r3,0 /* Set failure return code */
375 blr /* Return, head hanging low... */
376
377
378/*
379 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
380 *
381 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
382 * multiple bits may be set. Return success (1) or failure (0).
383 * Attempt will fail after timeout ticks of the timebase.
384 * We try fairly hard to get this lock. We disable for interruptions, but
385 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
386 * After checking to see if the large timeout value (passed in) has expired and a
387 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
388 * we return either in abject failure, or disable and go back to the lock sniff routine.
389 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
390 *
391 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
392 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
393 * RESTORE FROM THE STACK.
394 *
395 */
396
397 .align 5
398
399 nop ; Force loop alignment to cache line
400 nop
401 nop
402 nop
403
404 .globl EXT(hw_lock_bit)
405
406LEXT(hw_lock_bit)
407
408 mfmsr r9 /* Get the MSR value */
9bccf70c
A
409 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
410 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
411 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */
412
413 mtmsr r7 /* Turn off interruptions */
9bccf70c 414 isync ; May have turned off vec and fp here
1c79356b
A
415
416 mftb r8 /* Get the low part of the time base */
417
1c79356b
A
418bittry: lwarx r6,0,r3 /* Grab the lock value */
419 and. r0,r6,r4 /* See if any of the lock bits are on */
420 or r6,r6,r4 /* Turn on the lock bits */
421 bne- bitsniff /* Yeah, wait for it to clear... */
422 stwcx. r6,0,r3 /* Try to seize that there durn lock */
423 beq+ bitgot /* We got it, yahoo... */
424 b bittry /* Just start up again if the store failed... */
425
426 .align 5
427
428bitsniff: lwz r6,0(r3) /* Get that lock in here */
429 and. r0,r6,r4 /* See if any of the lock bits are on */
430 beq+ bittry /* Yeah, try for it again... */
431
432 mftb r6 /* Time stamp us now */
433 sub r6,r6,r8 /* Get the elapsed time */
434 cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */
435 blt+ bitsniff /* Not yet... */
436
437 mtmsr r9 /* Say, any interrupts pending? */
438
439/* The following instructions force the pipeline to be interlocked to that only one
440 instruction is issued per cycle. The insures that we stay enabled for a long enough
441 time. If it's too short, pending interruptions will not have a chance to be taken
442*/
443
444 subi r5,r5,128 /* Back off elapsed time from timeout value */
445 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
446 mr. r5,r5 /* See if we used the whole timeout */
447 or r5,r5,r5 /* Do nothing here but force a single cycle delay */
448
449 ble- bitfail /* We failed */
450 mtmsr r7 /* Disable for interruptions */
451 mftb r8 /* Get the low part of the time base */
452 b bitsniff /* Now that we've opened an enable window, keep trying... */
453
454 .align 5
455
456bitgot: mtmsr r9 /* Enable for interruptions */
457 li r3,1 /* Set good return code */
458 isync /* Make sure we don't use a speculativily loaded value */
459 blr
460
461bitfail: li r3,0 /* Set failure return code */
462 blr /* Return, head hanging low... */
463
464
465/*
466 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
467 *
468 * Release bit based spin-lock. The second parameter is the bit mask to clear.
469 * Multiple bits may be cleared.
470 *
471 * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY
472 * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND
473 * RESTORE FROM THE STACK.
474 */
475
476 .align 5
477 .globl EXT(hw_unlock_bit)
478
479LEXT(hw_unlock_bit)
480
481 sync
1c79356b
A
482
483ubittry: lwarx r0,0,r3 /* Grab the lock value */
484 andc r0,r0,r4 /* Clear the lock bits */
485 stwcx. r0,0,r3 /* Try to clear that there durn lock */
486 bne- ubittry /* Try again, couldn't save it... */
487
488 blr /* Leave... */
489
490/*
491 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
492 * unsigned int newb, unsigned int timeout)
493 *
494 * Try to acquire spin-lock. The second parameter is the bit mask to check.
495 * The third is the value of those bits and the 4th is what to set them to.
496 * Return success (1) or failure (0).
497 * Attempt will fail after timeout ticks of the timebase.
498 * We try fairly hard to get this lock. We disable for interruptions, but
499 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
500 * After checking to see if the large timeout value (passed in) has expired and a
501 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
502 * we return either in abject failure, or disable and go back to the lock sniff routine.
503 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
504 *
505 */
506
507 .align 5
508
509 nop ; Force loop alignment to cache line
510 nop
511 nop
512 nop
513
514 .globl EXT(hw_lock_mbits)
515
516LEXT(hw_lock_mbits)
517
518 mfmsr r9 ; Get the MSR value
9bccf70c
A
519 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
520 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
521 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible
522
523 mtmsr r8 ; Turn off interruptions
9bccf70c 524 isync ; May have turned off vectors or float here
1c79356b
A
525 mftb r10 ; Get the low part of the time base
526
1c79356b
A
527mbittry: lwarx r12,0,r3 ; Grab the lock value
528 and r0,r12,r4 ; Clear extra bits
9bccf70c 529 andc r12,r12,r4 ; Clear all bits in the bit mask
1c79356b
A
530 or r12,r12,r6 ; Turn on the lock bits
531 cmplw r0,r5 ; Are these the right bits?
532 bne- mbitsniff ; Nope, wait for it to clear...
533 stwcx. r12,0,r3 ; Try to seize that there durn lock
534 beq+ mbitgot ; We got it, yahoo...
535 b mbittry ; Just start up again if the store failed...
536
537 .align 5
538
539mbitsniff: lwz r12,0(r3) ; Get that lock in here
540 and r0,r12,r4 ; Clear extra bits
1c79356b
A
541 cmplw r0,r5 ; Are these the right bits?
542 beq+ mbittry ; Yeah, try for it again...
543
544 mftb r11 ; Time stamp us now
545 sub r11,r11,r10 ; Get the elapsed time
546 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
547 blt+ mbitsniff ; Not yet...
548
549 mtmsr r9 ; Say, any interrupts pending?
550
551; The following instructions force the pipeline to be interlocked to that only one
552; instruction is issued per cycle. The insures that we stay enabled for a long enough
553; time. If it is too short, pending interruptions will not have a chance to be taken
554
555 subi r7,r7,128 ; Back off elapsed time from timeout value
556 or r7,r7,r7 ; Do nothing here but force a single cycle delay
557 mr. r7,r7 ; See if we used the whole timeout
558 or r7,r7,r7 ; Do nothing here but force a single cycle delay
559
560 ble- mbitfail ; We failed
561 mtmsr r8 ; Disable for interruptions
562 mftb r10 ; Get the low part of the time base
563 b mbitsniff ; Now that we have opened an enable window, keep trying...
564
565 .align 5
566
567mbitgot: mtmsr r9 ; Enable for interruptions
568 li r3,1 ; Set good return code
569 isync ; Make sure we do not use a speculativily loaded value
570 blr
571
572mbitfail: li r3,0 ; Set failure return code
573 blr ; Return, head hanging low...
574
575
576/*
577 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
578 *
579 * Spin until word hits 0 or timeout.
580 * Return success (1) or failure (0).
581 * Attempt will fail after timeout ticks of the timebase.
582 *
583 * The theory is that a processor will bump a counter as it signals
584 * other processors. Then it will spin untl the counter hits 0 (or
585 * times out). The other processors, as it receives the signal will
586 * decrement the counter.
587 *
588 * The other processors use interlocked update to decrement, this one
589 * does not need to interlock.
590 *
591 */
592
593 .align 5
594
595 .globl EXT(hw_cpu_sync)
596
597LEXT(hw_cpu_sync)
598
599 mftb r10 ; Get the low part of the time base
600 mr r9,r3 ; Save the sync word address
601 li r3,1 ; Assume we work
602
603csynctry: lwz r11,0(r9) ; Grab the sync value
604 mr. r11,r11 ; Counter hit 0?
605 beqlr- ; Yeah, we are sunk...
606 mftb r12 ; Time stamp us now
607
608 sub r12,r12,r10 ; Get the elapsed time
609 cmplw r4,r12 ; Have we gone too long?
610 bge+ csynctry ; Not yet...
611
612 li r3,0 ; Set failure...
613 blr ; Return, head hanging low...
614
615/*
616 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
617 *
618 * Spin until word changes or timeout.
619 * Return success (1) or failure (0).
620 * Attempt will fail after timeout ticks of the timebase.
621 *
622 * This is used to insure that a processor passes a certain point.
623 * An example of use is to monitor the last interrupt time in the
624 * per_proc block. This can be used to insure that the other processor
625 * has seen at least one interrupt since a specific time.
626 *
627 */
628
629 .align 5
630
0b4e3aa0 631 .globl EXT(hw_cpu_wcng)
1c79356b
A
632
633LEXT(hw_cpu_wcng)
634
635 mftb r10 ; Get the low part of the time base
636 mr r9,r3 ; Save the sync word address
637 li r3,1 ; Assume we work
638
639wcngtry: lwz r11,0(r9) ; Grab the value
640 cmplw r11,r4 ; Do they still match?
641 bnelr- ; Nope, cool...
642 mftb r12 ; Time stamp us now
643
644 sub r12,r12,r10 ; Get the elapsed time
645 cmplw r5,r12 ; Have we gone too long?
646 bge+ wcngtry ; Not yet...
647
648 li r3,0 ; Set failure...
649 blr ; Return, head hanging low...
650
651
652/*
653 * unsigned int hw_lock_try(hw_lock_t)
654 *
0b4e3aa0
A
655 * Try to acquire spin-lock. Return success (1) or failure (0)
656 * Returns with preemption disabled on success.
1c79356b
A
657 *
658 */
659 .align 5
660 .globl EXT(hw_lock_try)
661
662LEXT(hw_lock_try)
663
664#if 0
665 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
666 lis r5,0x9999 /* (TEST/DEBUG) */
667 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
668 sc /* (TEST/DEBUG) */
669#endif
670 mfmsr r9 /* Save the MSR value */
9bccf70c
A
671 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
672 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
673 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */
674
675#if MACH_LDEBUG
676 lis r5, 0x10 /* roughly 1E6 */
677 mtctr r5
678#endif /* MACH_LDEBUG */
679
680 mtmsr r7 /* Disable interruptions and thus, preemption */
9bccf70c 681 isync ; May have turned off fp/vec here
1c79356b
A
682.L_lock_try_loop:
683
684#if MACH_LDEBUG
685 bdnz+ 0f /* Count attempts */
686 mtmsr r9 /* Restore enablement */
687 BREAKPOINT_TRAP /* Get to debugger */
688 mtmsr r7 /* Disable interruptions and thus, preemption */
6890:
690#endif /* MACH_LDEBUG */
691
692 lwarx r5,0,r3 /* Ld from addr of arg and reserve */
693
0b4e3aa0
A
694 andi. r6,r5,ILK_LOCKED /* TEST... */
695 ori r5,r5,ILK_LOCKED
1c79356b
A
696 bne- .L_lock_try_failed /* branch if taken. Predict free */
697
0b4e3aa0 698 stwcx. r5,0,r3 /* And SET (if still reserved) */
1c79356b
A
699 mfsprg r6,0 /* Get the per_proc block */
700 bne- .L_lock_try_loop /* If set failed, loop back */
701
1c79356b
A
702 isync
703
9bccf70c 704 lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1c79356b 705 addi r5,r5,1 /* Bring up the disable count */
9bccf70c 706 stw r5,PP_PREEMPT_CNT(r6) /* Save it back */
1c79356b 707
1c79356b
A
708 mtmsr r9 /* Allow interruptions now */
709 li r3,1 /* Set that the lock was free */
710 blr
711
712.L_lock_try_failed:
713 mtmsr r9 /* Allow interruptions now */
714 li r3,0 /* FAILURE - lock was taken */
715 blr
716
717/*
718 * unsigned int hw_lock_held(hw_lock_t)
719 *
720 * Return 1 if lock is held
0b4e3aa0 721 * Doesn't change preemption state.
1c79356b
A
722 * N.B. Racy, of course.
723 *
724 */
725 .align 5
726 .globl EXT(hw_lock_held)
727
728LEXT(hw_lock_held)
729
730#if 0
731 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
732 lis r5,0x8888 /* (TEST/DEBUG) */
733 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
734 sc /* (TEST/DEBUG) */
735#endif
736 isync /* Make sure we don't use a speculativily fetched lock */
737 lwz r3, 0(r3) /* Return value of lock */
738 blr
739
740/*
9bccf70c 741 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
1c79356b
A
742 *
743 * Compare old to area if equal, store new, and return true
744 * else return false and no store
9bccf70c 745 * This is an atomic operation
1c79356b
A
746 *
747 */
748 .align 5
749 .globl EXT(hw_compare_and_store)
750
751LEXT(hw_compare_and_store)
752
753 mr r6,r3 /* Save the old value */
754
1c79356b
A
755cstry: lwarx r9,0,r5 /* Grab the area value */
756 li r3,1 /* Assume it works */
757 cmplw cr0,r9,r6 /* Does it match the old value? */
758 bne- csfail /* No, it must have changed... */
759 stwcx. r4,0,r5 /* Try to save the new value */
760 bne- cstry /* Didn't get it, try again... */
761 isync /* Just hold up prefetch */
762 blr /* Return... */
763
764csfail: li r3,0 /* Set failure */
765 blr /* Better luck next time... */
766
767
768/*
9bccf70c 769 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
1c79356b
A
770 *
771 * Atomically add the second parameter to the first.
772 * Returns the result.
773 *
774 */
775 .align 5
776 .globl EXT(hw_atomic_add)
777
778LEXT(hw_atomic_add)
779
780 mr r6,r3 /* Save the area */
781
1c79356b
A
782addtry: lwarx r3,0,r6 /* Grab the area value */
783 add r3,r3,r4 /* Add the value */
784 stwcx. r3,0,r6 /* Try to save the new value */
785 bne- addtry /* Didn't get it, try again... */
786 blr /* Return... */
787
788
789/*
9bccf70c 790 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
1c79356b
A
791 *
792 * Atomically subtract the second parameter from the first.
793 * Returns the result.
794 *
795 */
796 .align 5
797 .globl EXT(hw_atomic_sub)
798
799LEXT(hw_atomic_sub)
800
801 mr r6,r3 /* Save the area */
802
1c79356b
A
803subtry: lwarx r3,0,r6 /* Grab the area value */
804 sub r3,r3,r4 /* Subtract the value */
805 stwcx. r3,0,r6 /* Try to save the new value */
806 bne- subtry /* Didn't get it, try again... */
807 blr /* Return... */
808
809
0b4e3aa0 810/*
9bccf70c 811 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
0b4e3aa0
A
812 *
813 * Atomically ORs the second parameter into the first.
814 * Returns the result.
815 *
816 */
817 .align 5
818 .globl EXT(hw_atomic_or)
819
820LEXT(hw_atomic_or)
821
822 mr r6,r3 ; Save the area
823
824ortry: lwarx r3,0,r6 ; Grab the area value
825 or r3,r3,r4 ; OR the value
826 stwcx. r3,0,r6 ; Try to save the new value
827 bne- ortry ; Did not get it, try again...
828 blr ; Return...
829
830
831/*
9bccf70c 832 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
0b4e3aa0
A
833 *
834 * Atomically ANDs the second parameter with the first.
835 * Returns the result.
836 *
837 */
838 .align 5
839 .globl EXT(hw_atomic_and)
840
841LEXT(hw_atomic_and)
842
843 mr r6,r3 ; Save the area
844
845andtry: lwarx r3,0,r6 ; Grab the area value
846 and r3,r3,r4 ; AND the value
847 stwcx. r3,0,r6 ; Try to save the new value
848 bne- andtry ; Did not get it, try again...
849 blr ; Return...
850
851
1c79356b
A
852/*
853 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
854 *
855 * Atomically inserts the element at the head of the list
856 * anchor is the pointer to the first element
857 * element is the pointer to the element to insert
858 * disp is the displacement into the element to the chain pointer
859 *
860 */
861 .align 5
862 .globl EXT(hw_queue_atomic)
863
864LEXT(hw_queue_atomic)
865
866 mr r7,r4 /* Make end point the same as start */
867 mr r8,r5 /* Copy the displacement also */
868 b hw_queue_comm /* Join common code... */
869
870/*
871 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
872 *
873 * Atomically inserts the list of elements at the head of the list
874 * anchor is the pointer to the first element
875 * first is the pointer to the first element to insert
876 * last is the pointer to the last element to insert
877 * disp is the displacement into the element to the chain pointer
878 *
879 */
880 .align 5
881 .globl EXT(hw_queue_atomic_list)
882
883LEXT(hw_queue_atomic_list)
884
885 mr r7,r5 /* Make end point the same as start */
886 mr r8,r6 /* Copy the displacement also */
887
888hw_queue_comm:
1c79356b
A
889 lwarx r9,0,r3 /* Pick up the anchor */
890 stwx r9,r8,r7 /* Chain that to the end of the new stuff */
9bccf70c 891 eieio ; Make sure this store makes it before the anchor update
1c79356b 892 stwcx. r4,0,r3 /* Try to chain into the front */
0b4e3aa0 893 bne- hw_queue_comm /* Didn't make it, try again... */
1c79356b
A
894
895 blr /* Return... */
896
897/*
898 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
899 *
900 * Atomically removes the first element in a list and returns it.
901 * anchor is the pointer to the first element
902 * disp is the displacement into the element to the chain pointer
903 * Returns element if found, 0 if empty.
904 *
905 */
906 .align 5
907 .globl EXT(hw_dequeue_atomic)
908
909LEXT(hw_dequeue_atomic)
910
911 mr r5,r3 /* Save the anchor */
912
913hw_dequeue_comm:
1c79356b
A
914 lwarx r3,0,r5 /* Pick up the anchor */
915 mr. r3,r3 /* Is the list empty? */
916 beqlr- /* Leave it list empty... */
917 lwzx r9,r4,r3 /* Get the next in line */
918 stwcx. r9,0,r5 /* Try to chain into the front */
919 beqlr+ ; Got the thing, go away with it...
0b4e3aa0 920 b hw_dequeue_comm ; Did not make it, try again...
1c79356b
A
921
922/*
923 * void mutex_init(mutex_t* l, etap_event_t etap)
924 */
925
926ENTRY(mutex_init,TAG_NO_FRAME_USED)
927
0b4e3aa0
A
928 PROLOG(0)
929 li r10, 0
930 stw r10, LOCK_DATA(r3) /* clear lock word */
931 sth r10, MUTEX_WAITERS(r3) /* init waiter count */
9bccf70c 932 sth r10, MUTEX_PROMOTED_PRI(r3)
1c79356b 933#if MACH_LDEBUG
0b4e3aa0
A
934 stw r10, MUTEX_PC(r3) /* init caller pc */
935 stw r10, MUTEX_THREAD(r3) /* and owning thread */
936 li r10, MUTEX_TAG
937 stw r10, MUTEX_TYPE(r3) /* set lock type */
1c79356b
A
938#endif /* MACH_LDEBUG */
939
940#if ETAP_LOCK_TRACE
0b4e3aa0 941 bl EXT(etap_mutex_init) /* init ETAP data */
1c79356b
A
942#endif /* ETAP_LOCK_TRACE */
943
0b4e3aa0
A
944 EPILOG
945 blr
1c79356b
A
946
947/*
0b4e3aa0 948 * void mutex_lock(mutex_t*)
1c79356b
A
949 */
950
951 .align 5
0b4e3aa0
A
952 .globl EXT(mutex_lock)
953LEXT(mutex_lock)
1c79356b 954
0b4e3aa0 955 .globl EXT(_mutex_lock)
1c79356b
A
956LEXT(_mutex_lock)
957
0b4e3aa0 958#if !MACH_LDEBUG
9bccf70c 959 mfsprg r6,1 /* load the current thread */
0b4e3aa0 960L_mutex_lock_loop:
9bccf70c
A
961 lwarx r5,0,r3 /* load the mutex lock */
962 mr. r5,r5
963 bne- L_mutex_lock_slow /* go to the slow path */
964 stwcx. r6,0,r3 /* grab the lock */
965 bne- L_mutex_lock_loop /* loop back if failed */
966 isync /* stop prefeteching */
0b4e3aa0
A
967 blr
968L_mutex_lock_slow:
969#endif
1c79356b
A
970#if CHECKNMI
971 mflr r12 ; (TEST/DEBUG)
972 bl EXT(ml_sense_nmi) ; (TEST/DEBUG)
973 mtlr r12 ; (TEST/DEBUG)
974#endif
975
976 PROLOG(12)
0b4e3aa0
A
977#if MACH_LDEBUG
978 bl EXT(assert_wait_possible)
979 mr. r3,r3
980 bne L_mutex_lock_assert_wait_1
981 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
982 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
983 bl EXT(panic)
984
985 .data
986L_mutex_lock_assert_wait_panic_str:
987 STRINGD "mutex_lock: assert_wait_possible false\n\000"
988 .text
989
990L_mutex_lock_assert_wait_1:
991 lwz r3,FM_ARG0(r1)
992#endif
1c79356b
A
993
994#if ETAP_LOCK_TRACE
995 li r0, 0
996 stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */
997 stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */
998 stw r0,MISSED(r1) /* clear local miss marker */
999#endif /* ETAP_LOCK_TRACE */
1000
1001 CHECK_SETUP(r12)
1002 CHECK_MUTEX_TYPE()
1003 CHECK_NO_SIMPLELOCKS()
1004
1005.L_ml_retry:
1006#if 0
1007 mfsprg r4,0 /* (TEST/DEBUG) */
1c79356b 1008 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
9bccf70c 1009 lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1c79356b
A
1010 lis r5,0xAAAA /* (TEST/DEBUG) */
1011 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1012 sc /* (TEST/DEBUG) */
1013#endif
1014
1015 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1016 mr. r4,r3 /* Did we get it? */
1017 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1018 bne+ mlGotInt /* We got it just fine... */
1019
1020 lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message
1021 ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message
1022 bl EXT(panic) ; Call panic
1023 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1024
1025 .data
1026mutex_failed1:
1027 STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
1028 .text
1029
1030mlGotInt:
1031
1032/* Note that there is no reason to do a load and reserve here. We already
1033 hold the interlock lock and no one can touch this field unless they
1034 have that, so, we're free to play */
1035
0b4e3aa0 1036 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
9bccf70c 1037 rlwinm. r9,r4,30,2,31 /* So, can we have it? */
1c79356b 1038 bne- mlInUse /* Nope, sombody's playing already... */
1c79356b
A
1039
1040#if MACH_LDEBUG
9bccf70c 1041 mfmsr r11 ; Note: no need to deal with fp or vec here
0b4e3aa0
A
1042 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1043 mtmsr r5
1c79356b 1044 mfsprg r9,0 /* Get the per_proc block */
0b4e3aa0 1045 lwz r5,0(r1) /* Get previous save frame */
0b4e3aa0 1046 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
9bccf70c 1047 lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */
0b4e3aa0 1048 stw r5,MUTEX_PC(r3) /* Save our caller */
1c79356b
A
1049 mr. r8,r8 /* Is there any thread? */
1050 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1051 beq- .L_ml_no_active_thread /* No owning thread... */
1052 lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1053 addi r9,r9,1 /* Bump it up */
1054 stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */
1055.L_ml_no_active_thread:
1056 mtmsr r11
1057#endif /* MACH_LDEBUG */
1058
9bccf70c
A
1059 bl EXT(mutex_lock_acquire)
1060 mfsprg r5,1
1061 mr. r4,r3
1062 lwz r3,FM_ARG0(r1)
1063 beq mlUnlock
1064 ori r5,r5,WAIT_FLAG
1065mlUnlock:
1066 sync
1067 stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1c79356b
A
1068
1069#if ETAP_LOCK_TRACE
1070 mflr r4
1071 lwz r5,SWT_HI(r1)
1072 lwz r6,SWT_LO(r1)
1073 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1074#endif /* ETAP_LOCK_TRACE */
1075
1076 EPILOG /* Restore all saved registers */
1077
1c79356b 1078 b epStart /* Go enable preemption... */
1c79356b
A
1079
1080/*
1081 * We come to here when we have a resource conflict. In other words,
1082 * the mutex is held.
1083 */
1084
1085mlInUse:
1086
1087#if ETAP_LOCK_TRACE
1088 lwz r7,MISSED(r1)
1089 cmpwi r7,0 /* did we already take a wait timestamp ? */
1090 bne .L_ml_block /* yup. carry-on */
1091 bl EXT(etap_mutex_miss) /* get wait timestamp */
1092 stw r3,SWT_HI(r1) /* store timestamp */
1093 stw r4,SWT_LO(r1)
1094 li r7, 1 /* mark wait timestamp as taken */
1095 stw r7,MISSED(r1)
1096 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1097.L_ml_block:
1098#endif /* ETAP_LOCK_TRACE */
1099
1100 CHECK_SETUP(r12)
1101 CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */
1102
1103
1104/* Note that we come in here with the interlock set. The wait routine
1105 * will unlock it before waiting.
1106 */
9bccf70c 1107 ori r4,r4,WAIT_FLAG /* Set the wait flag */
0b4e3aa0 1108 stw r4,LOCK_DATA(r3)
9bccf70c 1109 rlwinm r4,r4,0,0,29 /* Extract the lock owner */
1c79356b
A
1110 bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */
1111
1112 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1113 b .L_ml_retry /* and try again... */
1114
1115
1116/*
1117 * void _mutex_try(mutex_t*)
1118 *
1119 */
1120
1121 .align 5
0b4e3aa0
A
1122 .globl EXT(mutex_try)
1123LEXT(mutex_try)
1c79356b 1124 .globl EXT(_mutex_try)
1c79356b 1125LEXT(_mutex_try)
0b4e3aa0 1126#if !MACH_LDEBUG
9bccf70c 1127 mfsprg r6,1 /* load the current thread */
0b4e3aa0 1128L_mutex_try_loop:
9bccf70c
A
1129 lwarx r5,0,r3 /* load the lock value */
1130 mr. r5,r5
1131 bne- L_mutex_try_slow /* branch to the slow path */
1132 stwcx. r6,0,r3 /* grab the lock */
1133 bne- L_mutex_try_loop /* retry if failed */
1134 isync /* stop prefetching */
0b4e3aa0
A
1135 li r3, 1
1136 blr
1137L_mutex_try_slow:
1138#endif
1c79356b
A
1139
1140 PROLOG(8) /* reserve space for SWT_HI and SWT_LO */
1141
1142#if ETAP_LOCK_TRACE
1143 li r5, 0
1144 stw r5, STW_HI(r1) /* set wait time to 0 (HI) */
1145 stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */
1146#endif /* ETAP_LOCK_TRACE */
1147
1148#if 0
1149 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1150 lis r5,0xBBBB /* (TEST/DEBUG) */
1151 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1152 sc /* (TEST/DEBUG) */
1153#endif
1154 CHECK_SETUP(r12)
1155 CHECK_MUTEX_TYPE()
1156 CHECK_NO_SIMPLELOCKS()
1157
0b4e3aa0 1158 lwz r6,LOCK_DATA(r3) /* Quick check */
9bccf70c 1159 rlwinm. r6,r6,30,2,31 /* to see if someone has this lock already */
1c79356b
A
1160 bne- mtFail /* Someone's got it already... */
1161
1162 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1163 mr. r4,r3 /* Did we get it? */
1164 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1165 bne+ mtGotInt /* We got it just fine... */
1166
1167 lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message
1168 ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message
1169 bl EXT(panic) ; Call panic
1170 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1171
1172 .data
1173mutex_failed2:
1174 STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
1175 .text
1176
1177mtGotInt:
1178
1179/* Note that there is no reason to do a load and reserve here. We already
1180 hold the interlock and no one can touch at this field unless they
1181 have that, so, we're free to play */
1182
0b4e3aa0 1183 lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */
9bccf70c 1184 rlwinm. r9,r4,30,2,31 /* So, can we have it? */
1c79356b
A
1185 bne- mtInUse /* Nope, sombody's playing already... */
1186
1c79356b
A
1187#if MACH_LDEBUG
1188 mfmsr r11
0b4e3aa0
A
1189 rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1190 mtmsr r5
1c79356b 1191 mfsprg r9,0 /* Get the per_proc block */
0b4e3aa0 1192 lwz r5,0(r1) /* Get previous save frame */
0b4e3aa0 1193 lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */
9bccf70c 1194 lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */
0b4e3aa0 1195 stw r5,MUTEX_PC(r3) /* Save our caller */
1c79356b
A
1196 mr. r8,r8 /* Is there any thread? */
1197 stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */
1198 beq- .L_mt_no_active_thread /* No owning thread... */
1199 lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */
1200 addi r9, r9, 1 /* Bump it up */
1201 stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */
1202.L_mt_no_active_thread:
1203 mtmsr r11
1204#endif /* MACH_LDEBUG */
1205
9bccf70c
A
1206 bl EXT(mutex_lock_acquire)
1207 mfsprg r5,1
1208 mr. r4,r3
1209 lwz r3,FM_ARG0(r1)
1210 beq mtUnlock
1211 ori r5,r5,WAIT_FLAG
1212mtUnlock:
1c79356b 1213 sync /* Push it all out */
9bccf70c 1214 stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */
1c79356b
A
1215
1216#if ETAP_LOCK_TRACE
1217 lwz r4,0(r1) /* Back chain the stack */
1218 lwz r5,SWT_HI(r1)
1219 lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */
1220 lwz r6,SWT_LO(r1)
1221 bl EXT(etap_mutex_hold) /* collect hold timestamp */
1222#endif /* ETAP_LOCK_TRACE */
1223
1c79356b 1224 bl epStart /* Go enable preemption... */
0b4e3aa0 1225
1c79356b
A
1226 li r3, 1
1227 EPILOG /* Restore all saved registers */
1228 blr /* Return... */
1229
1230/*
1231 * We come to here when we have a resource conflict. In other words,
1232 * the mutex is held.
1233 */
1234
0b4e3aa0 1235mtInUse:
9bccf70c
A
1236 rlwinm r4,r4,0,0,30 /* Get the unlock value */
1237 stw r4,LOCK_DATA(r3) /* free the interlock */
1c79356b 1238 bl epStart /* Go enable preemption... */
1c79356b
A
1239
1240mtFail: li r3,0 /* Set failure code */
1241 EPILOG /* Restore all saved registers */
1242 blr /* Return... */
1243
1244
1245/*
1246 * void mutex_unlock(mutex_t* l)
1247 */
1248
1249 .align 5
1250 .globl EXT(mutex_unlock)
1251
1252LEXT(mutex_unlock)
0b4e3aa0 1253#if !MACH_LDEBUG
9bccf70c 1254 sync
0b4e3aa0
A
1255L_mutex_unlock_loop:
1256 lwarx r5,0,r3
9bccf70c
A
1257 rlwinm. r4,r5,0,30,31 /* Bail if pending waiter or interlock set */
1258 li r5,0 /* Clear the mutexlock */
0b4e3aa0
A
1259 bne- L_mutex_unlock_slow
1260 stwcx. r5,0,r3
1261 bne- L_mutex_unlock_loop
0b4e3aa0
A
1262 blr
1263L_mutex_unlock_slow:
1264#endif
1c79356b
A
1265 PROLOG(0)
1266
1267#if ETAP_LOCK_TRACE
1268 bl EXT(etap_mutex_unlock) /* collect ETAP data */
1269 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
1270#endif /* ETAP_LOCK_TRACE */
1271
1272 CHECK_SETUP(r12)
1273 CHECK_MUTEX_TYPE()
1274 CHECK_THREAD(MUTEX_THREAD)
1275
1276#if 0
1277 mfsprg r4,0 /* (TEST/DEBUG) */
1c79356b 1278 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
9bccf70c 1279 lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */
1c79356b
A
1280 lis r5,0xCCCC /* (TEST/DEBUG) */
1281 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1282 sc /* (TEST/DEBUG) */
1283#endif
1284 bl lockDisa /* Go get a lock on the mutex's interlock lock */
1285 mr. r4,r3 /* Did we get it? */
1286 lwz r3,FM_ARG0(r1) /* Restore the lock address */
1287 bne+ muGotInt /* We got it just fine... */
1288
1289 lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message
1290 ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message
1291 bl EXT(panic) ; Call panic
1292 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1293
1294 .data
1295mutex_failed3:
1296 STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
1297 .text
1298
1299
1300muGotInt:
9bccf70c
A
1301 lwz r4,LOCK_DATA(r3)
1302 andi. r5,r4,WAIT_FLAG /* are there any waiters ? */
1303 rlwinm r4,r4,0,0,29
1c79356b
A
1304 beq+ muUnlock /* Nope, we're done... */
1305
1306 bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */
1307 lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */
9bccf70c 1308 lwz r5,LOCK_DATA(r3) /* load the lock */
1c79356b
A
1309
1310muUnlock:
1311#if MACH_LDEBUG
1312 mfmsr r11
1313 rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1314 mtmsr r9
1315 mfsprg r9,0
9bccf70c 1316 lwz r9,PP_ACTIVE_THREAD(r9)
0b4e3aa0 1317 stw r9,MUTEX_THREAD(r3) /* disown thread */
1c79356b
A
1318 cmpwi r9,0
1319 beq- .L_mu_no_active_thread
1320 lwz r8,THREAD_MUTEX_COUNT(r9)
1321 subi r8,r8,1
1322 stw r8,THREAD_MUTEX_COUNT(r9)
1323.L_mu_no_active_thread:
1324 mtmsr r11
1325#endif /* MACH_LDEBUG */
1326
9bccf70c 1327 andi. r5,r5,WAIT_FLAG /* Get the unlock value */
1c79356b 1328 sync /* Make sure it's all there before we release */
0b4e3aa0 1329 stw r5,LOCK_DATA(r3) /* unlock the interlock and lock */
1c79356b
A
1330
1331 EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */
1c79356b 1332 b epStart /* Go enable preemption... */
1c79356b
A
1333
1334/*
1335 * void interlock_unlock(hw_lock_t lock)
1336 */
1337
1338 .align 5
1339 .globl EXT(interlock_unlock)
1340
1341LEXT(interlock_unlock)
1342
1343#if 0
1344 lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
1345 lis r5,0xDDDD /* (TEST/DEBUG) */
1346 oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
1347 sc /* (TEST/DEBUG) */
1348#endif
0b4e3aa0
A
1349 lwz r10,LOCK_DATA(r3)
1350 rlwinm r10,r10,0,0,30
1c79356b 1351 sync
0b4e3aa0 1352 stw r10,LOCK_DATA(r3)
1c79356b 1353
1c79356b 1354 b epStart /* Go enable preemption... */
1c79356b 1355
1c79356b
A
1356/*
1357 * Here is where we enable preemption. We need to be protected
1358 * against ourselves, we can't chance getting interrupted and modifying
1359 * our processor wide preemption count after we'sve loaded it up. So,
1360 * we need to disable all 'rupts. Actually, we could use a compare
1361 * and swap to do this, but, since there are no MP considerations
1362 * (we are dealing with a CPU local field) it is much, much faster
1363 * to disable.
1364 *
1365 * Note that if we are not genned MP, the calls here will be no-opped via
1366 * a #define and since the _mp forms are the same, likewise a #define
1367 * will be used to route to the other forms
1368 */
1369
1370/* This version does not check if we get preempted or not */
1371
1372
1373 .align 4
1374 .globl EXT(_enable_preemption_no_check)
1375
1376LEXT(_enable_preemption_no_check)
1377 cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */
1378 b epCommn /* Join up with the other enable code... */
1379
1380
1381/* This version checks if we get preempted or not */
1382
1383 .align 5
1384 .globl EXT(_enable_preemption)
1385
1386LEXT(_enable_preemption)
1387
1388epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */
1389
1390/*
1391 * Common enable preemption code
1392 */
1393
1394epCommn: mfmsr r9 /* Save the old MSR */
9bccf70c
A
1395 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1396 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
1397 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1398 mtmsr r8 /* Interrupts off */
9bccf70c
A
1399 isync ; May have mess with vec/fp here
1400
1c79356b 1401 mfsprg r3,0 /* Get the per_proc block */
1c79356b 1402 li r8,-1 /* Get a decrimenter */
9bccf70c 1403 lwz r5,PP_PREEMPT_CNT(r3) /* Get the preemption level */
1c79356b
A
1404 add. r5,r5,r8 /* Bring down the disable count */
1405#if 0
1406 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1407 mr. r4,r4 ; (TEST/DEBUG)
1408 beq- epskptrc0 ; (TEST/DEBUG)
1409 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1410 lis r4,0xBBBB ; (TEST/DEBUG)
1411 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1412 sc ; (TEST/DEBUG)
1413epskptrc0: mr. r5,r5 ; (TEST/DEBUG)
1414#endif
1415#if MACH_LDEBUG
1416 blt- epTooFar /* Yeah, we did... */
1417#endif /* MACH_LDEBUG */
9bccf70c 1418 stw r5,PP_PREEMPT_CNT(r3) /* Save it back */
1c79356b
A
1419
1420 beq+ epCheckPreempt /* Go check if we need to be preempted... */
1421
1422epNoCheck: mtmsr r9 /* Restore the interrupt level */
1423 blr /* Leave... */
1424
1425#if MACH_LDEBUG
1426epTooFar:
1427 lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */
1428 lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */
1429 ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */
1430 ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */
1431 mtlr r6 /* Get the address of the panic routine */
1432 mtmsr r9 /* Restore interruptions */
1433 blrl /* Panic... */
1434
1435 .data
1436epTooFarStr:
1437 STRINGD "_enable_preemption: preemption_level <= 0!\000"
1438 .text
1439#endif /* MACH_LDEBUG */
1440
1441 .align 5
1442
1443epCheckPreempt:
1444 lwz r7,PP_NEED_AST(r3) /* Get the AST request address */
1445 li r5,AST_URGENT /* Get the requests we do honor */
1446 lwz r7,0(r7) /* Get the actual, real live, extra special AST word */
1447 lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */
1448 and. r7,r7,r5 ; Should we preempt?
1449 ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */
1450 beq+ epCPno ; No preemption here...
1451
1452 andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
1453
1454epCPno: mtmsr r9 /* Allow interrupts if we can */
1455 beqlr+ ; We probably will not preempt...
1456 sc /* Do the preemption */
1457 blr /* Now, go away now... */
1458
1459/*
1460 * Here is where we disable preemption. Since preemption is on a
1461 * per processor basis (a thread runs on one CPU at a time) we don't
1462 * need any cross-processor synchronization. We do, however, need to
1463 * be interrupt safe, so we don't preempt while in the process of
1464 * disabling it. We could use SPLs, but since we always want complete
1465 * disablement, and this is platform specific code, we'll just kick the
1466 * MSR. We'll save a couple of orders of magnitude over using SPLs.
1467 */
1468
1469 .align 5
1470
1471 nop ; Use these 5 nops to force daPreComm
1472 nop ; to a line boundary.
1473 nop
1474 nop
1475 nop
1476
1477 .globl EXT(_disable_preemption)
1478
1479LEXT(_disable_preemption)
1480
1481daPreAll: mfmsr r9 /* Save the old MSR */
9bccf70c
A
1482 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1483 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
1484 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1485 mtmsr r8 /* Interrupts off */
9bccf70c 1486 isync ; May have mess with fp/vec
1c79356b
A
1487
1488daPreComm: mfsprg r6,0 /* Get the per_proc block */
9bccf70c 1489 lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1c79356b 1490 addi r5,r5,1 /* Bring up the disable count */
9bccf70c 1491 stw r5,PP_PREEMPT_CNT(r6) /* Save it back */
1c79356b
A
1492#if 0
1493 mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early
1494 mr. r4,r4 ; (TEST/DEBUG)
1495 beq- epskptrc1 ; (TEST/DEBUG)
1496 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1497 lis r4,0xAAAA ; (TEST/DEBUG)
1498 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1499 sc ; (TEST/DEBUG)
1500epskptrc1: ; (TEST/DEBUG)
1501#endif
1502
1503;
1504; Set PREEMPTSTACK above to enable a preemption traceback stack.
1505;
1506; NOTE: make sure that PREEMPTSTACK in aligned_data is
1507; set the same as it is here. This is the number of
1508; traceback entries we can handle per processor
1509;
1510; A value of 0 disables the stack.
1511;
1512#if PREEMPTSTACK
1513 cmplwi r5,PREEMPTSTACK ; Maximum depth
1514 lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread
1515 bgt- nopredeb ; Too many to stack...
1516 mr. r6,r6 ; During boot?
1517 beq- nopredeb ; Yes, do not do backtrace...
1518 lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation
1519 lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used
1520 mr. r0,r6 ; Any saved context?
1521 beq- nosaveds ; No...
1522 lwz r0,saver1(r6) ; Get end of savearea chain
1523
1524nosaveds: li r11,0 ; Clear callers callers callers return
1525 li r10,0 ; Clear callers callers callers callers return
1526 li r8,0 ; Clear callers callers callers callers callers return
1527 lwz r2,0(r1) ; Get callers callers stack frame
1528 lwz r12,8(r2) ; Get our callers return
1529 lwz r4,0(r2) ; Back chain
1530
1531 xor r2,r4,r2 ; Form difference
1532 cmplwi r2,8192 ; Within a couple of pages?
1533 mr r2,r4 ; Move register
1534 bge- nosaveher2 ; No, no back chain then...
1535 lwz r11,8(r2) ; Get our callers return
1536 lwz r4,0(r2) ; Back chain
1537
1538 xor r2,r4,r2 ; Form difference
1539 cmplwi r2,8192 ; Within a couple of pages?
1540 mr r2,r4 ; Move register
1541 bge- nosaveher2 ; No, no back chain then...
1542 lwz r10,8(r2) ; Get our callers return
1543 lwz r4,0(r2) ; Back chain
1544
1545 xor r2,r4,r2 ; Form difference
1546 cmplwi r2,8192 ; Within a couple of pages?
1547 mr r2,r4 ; Move register
1548 bge- nosaveher2 ; No, no back chain then...
1549 lwz r8,8(r2) ; Get our callers return
1550
1551nosaveher2:
1552 addi r5,r5,-1 ; Get index to slot
1553 mfspr r6,pir ; Get our processor
1554 mflr r4 ; Get our return
1555 rlwinm r6,r6,8,0,23 ; Index to processor slot
1556 lis r2,hi16(EXT(DBGpreempt)) ; Stack high order
1557 rlwinm r5,r5,4,0,27 ; Index to stack slot
1558 ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order
1559 add r2,r2,r5 ; Point to slot
1560 add r2,r2,r6 ; Move to processor
1561 stw r4,0(r2) ; Save our return
1562 stw r11,4(r2) ; Save callers caller
1563 stw r10,8(r2) ; Save callers callers caller
1564 stw r8,12(r2) ; Save callers callers callers caller
1565nopredeb:
1566#endif
1567 mtmsr r9 /* Allow interruptions now */
1568
1569 blr /* Return... */
1570
1571/*
1572 * Return the active thread for both inside and outside osfmk consumption
1573 */
1574
1575 .align 5
1576 .globl EXT(current_thread)
1577
1578LEXT(current_thread)
1579
9bccf70c
A
1580#if 1
1581 mfsprg r3,1
1582 lwz r3,ACT_THREAD(r3)
1583 blr
1584#else
1585 mfmsr r9 /* Save the old MSR */
1586 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1587 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1588 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1589 mtmsr r8 /* Interrupts off */
1590 isync
1591 mfsprg r6,0 /* Get the per_proc */
1592 lwz r3,PP_ACTIVE_THREAD(r6) /* Get the active thread */
1593 mfsprg r4,1
1594 lwz r4,ACT_THREAD(r4)
1595 cmplw cr0,r4,r3
1596 beq current_thread_cont
1597 lis r5,hi16(L_current_thread_paniced)
1598 ori r5,r5,lo16(L_current_thread_paniced)
1599 lwz r6,0(r5)
1600 mr. r6,r6
1601 bne current_thread_cont
1602 stw r9,0(r5)
1603 mr r5,r4
1604 mr r4,r3
1605 lis r3,hi16(L_current_thread_panic)
1606 ori r3,r3,lo16(L_current_thread_panic)
1607 bl EXT(panic)
1608
1609 .data
1610L_current_thread_panic:
1611 STRINGD "current_thread: spr1 not sync %x %x %x\n\000"
1612L_current_thread_paniced:
1613 .long 0
1614 .text
1615current_thread_cont:
1616#endif
1617 mtmsr r9 /* Restore interruptions to entry */
1618 blr /* Return... */
1619
1620/*
1621 * Set the active thread
1622 */
1623 .align 5
1624 .globl EXT(set_machine_current_thread)
1625LEXT(set_machine_current_thread)
1626
1c79356b 1627 mfmsr r9 /* Save the old MSR */
9bccf70c
A
1628 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1629 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
1630 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1631 mtmsr r8 /* Interrupts off */
9bccf70c 1632 isync ; May have messed with fp/vec
1c79356b 1633 mfsprg r6,0 /* Get the per_proc */
9bccf70c 1634 stw r3,PP_ACTIVE_THREAD(r6) /* Set the active thread */
1c79356b
A
1635 mtmsr r9 /* Restore interruptions to entry */
1636 blr /* Return... */
1637
9bccf70c
A
1638/*
1639 * Set the current activation
1640 */
1641 .align 5
1642 .globl EXT(set_machine_current_act)
1643LEXT(set_machine_current_act)
1644 mtsprg 1,r3 /* Set spr1 with the active thread */
1645 blr /* Return... */
1646
1647/*
1648 * Return the current activation
1649 */
1650 .align 5
1651 .globl EXT(current_act)
1652LEXT(current_act)
1653 mfsprg r3,1
1654 blr
1655
1656
1c79356b
A
1657
1658/*
1659 * Return the current preemption level
1660 */
1661
1662 .align 5
1663 .globl EXT(get_preemption_level)
1664
1665LEXT(get_preemption_level)
1666
1667 mfmsr r9 /* Save the old MSR */
9bccf70c
A
1668 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1669 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
1670 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1671 mtmsr r8 /* Interrupts off */
9bccf70c 1672 isync
1c79356b 1673 mfsprg r6,0 /* Get the per_proc */
9bccf70c 1674 lwz r3,PP_PREEMPT_CNT(r6) /* Get the preemption level */
1c79356b
A
1675 mtmsr r9 /* Restore interruptions to entry */
1676 blr /* Return... */
1677
1678
9bccf70c
A
1679/*
1680 * Return the cpu_data
1681 */
1682
1683 .align 5
1684 .globl EXT(get_cpu_data)
1685
1686LEXT(get_cpu_data)
1687
1688 mfsprg r3,0 /* Get the per_proc */
1689 addi r3,r3,PP_ACTIVE_THREAD /* Get the pointer to the CPU data from per proc */
1690 blr /* Return... */
1691
1692
1c79356b
A
1693/*
1694 * Return the simple lock count
1695 */
1696
1697 .align 5
1698 .globl EXT(get_simple_lock_count)
1699
1700LEXT(get_simple_lock_count)
1701
1702 mfmsr r9 /* Save the old MSR */
9bccf70c
A
1703 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1704 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1c79356b
A
1705 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1706 mtmsr r8 /* Interrupts off */
9bccf70c 1707 isync ; May have messed with vec/fp
1c79356b 1708 mfsprg r6,0 /* Get the per_proc */
9bccf70c 1709 lwz r3,PP_SIMPLE_LOCK_CNT(r6) /* Get the simple lock count */
1c79356b
A
1710 mtmsr r9 /* Restore interruptions to entry */
1711 blr /* Return... */
1712
0b4e3aa0
A
1713/*
1714 * fast_usimple_lock():
1715 *
1716 * If EE is off, get the simple lock without incrementing the preemption count and
1717 * mark The simple lock with SLOCK_FAST.
1718 * If EE is on, call usimple_lock().
1719 */
1720 .align 5
1721 .globl EXT(fast_usimple_lock)
1722
1723LEXT(fast_usimple_lock)
1724
9bccf70c
A
1725#if CHECKNMI
1726 b EXT(usimple_lock) ; (TEST/DEBUG)
1727#endif
0b4e3aa0
A
1728 mfmsr r9
1729 andi. r7,r9,lo16(MASK(MSR_EE))
1730 bne- L_usimple_lock_c
1731L_usimple_lock_loop:
1732 lwarx r4,0,r3
1733 li r5,ILK_LOCKED|SLOCK_FAST
1734 mr. r4,r4
1735 bne- L_usimple_lock_c
1736 stwcx. r5,0,r3
1737 bne- L_usimple_lock_loop
1738 isync
1739 blr
1740L_usimple_lock_c:
1741 b EXT(usimple_lock)
1742
1743/*
1744 * fast_usimple_lock_try():
1745 *
1746 * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and
1747 * if successfully held, the simple lock is marked with SLOCK_FAST.
1748 * If EE is on, call usimple_lock_try()
1749 */
1750 .align 5
1751 .globl EXT(fast_usimple_lock_try)
1752
1753LEXT(fast_usimple_lock_try)
1754
9bccf70c
A
1755#if CHECKNMI
1756 b EXT(usimple_lock_try) ; (TEST/DEBUG)
1757#endif
0b4e3aa0
A
1758 mfmsr r9
1759 andi. r7,r9,lo16(MASK(MSR_EE))
1760 bne- L_usimple_lock_try_c
1761L_usimple_lock_try_loop:
1762 lwarx r4,0,r3
1763 li r5,ILK_LOCKED|SLOCK_FAST
1764 mr. r4,r4
1765 bne- L_usimple_lock_try_fail
1766 stwcx. r5,0,r3
1767 bne- L_usimple_lock_try_loop
1768 li r3,1
1769 isync
1770 blr
1771L_usimple_lock_try_fail:
1772 li r3,0
1773 blr
1774L_usimple_lock_try_c:
1775 b EXT(usimple_lock_try)
1776
1777/*
1778 * fast_usimple_unlock():
1779 *
1780 * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count.
1781 * Call usimple_unlock() otherwise.
1782 */
1783 .align 5
1784 .globl EXT(fast_usimple_unlock)
1785
1786LEXT(fast_usimple_unlock)
1787
9bccf70c
A
1788#if CHECKNMI
1789 b EXT(usimple_unlock) ; (TEST/DEBUG)
1790#endif
0b4e3aa0
A
1791 lwz r5,LOCK_DATA(r3)
1792 li r0,0
1793 cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST
1794 bne- L_usimple_unlock_c
1795 sync
1796#if 0
1797 mfmsr r9
1798 andi. r7,r9,lo16(MASK(MSR_EE))
1799 beq L_usimple_unlock_cont
1800 lis r3,hi16(L_usimple_unlock_panic)
1801 ori r3,r3,lo16(L_usimple_unlock_panic)
1802 bl EXT(panic)
1803
1804 .data
1805L_usimple_unlock_panic:
1806 STRINGD "fast_usimple_unlock: interrupts not disabled\n\000"
1807 .text
1808L_usimple_unlock_cont:
1809#endif
1810 stw r0, LOCK_DATA(r3)
1811 blr
1812L_usimple_unlock_c:
1813 b EXT(usimple_unlock)
1814
9bccf70c
A
1815/*
1816 * enter_funnel_section():
1817 *
1818 */
1819 .align 5
1820 .globl EXT(enter_funnel_section)
1821
1822LEXT(enter_funnel_section)
1823
1824#if !MACH_LDEBUG
1825 lis r10,hi16(EXT(kdebug_enable))
1826 ori r10,r10,lo16(EXT(kdebug_enable))
1827 lwz r10,0(r10)
1828 lis r11,hi16(EXT(split_funnel_off))
1829 ori r11,r11,lo16(EXT(split_funnel_off))
1830 lwz r11,0(r11)
1831 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1832 bne- L_enter_funnel_section_slow1 ; If set, call the slow path
1833 mfsprg r6,1 ; Get the current activation
1834 lwz r7,LOCK_FNL_MUTEX(r3)
1835 mfmsr r11
1836 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1837 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1838 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1839 mtmsr r10 ; Turn off EE
1840 isync ; May have messed with vec/fp
1841 mr r9,r6
1842L_enter_funnel_section_loop:
1843 lwarx r5,0,r7 ; Load the mutex lock
1844 mr. r5,r5
1845 bne- L_enter_funnel_section_slow ; Go to the slow path
1846 stwcx. r6,0,r7 ; Grab the lock
1847 bne- L_enter_funnel_section_loop ; Loop back if failed
1848 isync ; Stop prefeteching
1849 lwz r6,ACT_THREAD(r6) ; Get the current thread
1850 li r7,TH_FN_OWNED
1851 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1852 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1853 mtmsr r11
1854 blr
1855
1856L_enter_funnel_section_slow:
1857 mtmsr r11
1858L_enter_funnel_section_slow1:
1859#endif
1860 li r4,TRUE
1861 b EXT(thread_funnel_set)
1862
1863/*
1864 * exit_funnel_section():
1865 *
1866 */
1867 .align 5
1868 .globl EXT(exit_funnel_section)
1869
1870LEXT(exit_funnel_section)
1871
1872#if !MACH_LDEBUG
1873 mfsprg r6,1 ; Get the current activation
1874 lwz r6,ACT_THREAD(r6) ; Get the current thread
1875 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1876 mr. r3,r3 ; Check on funnel held
1877 beq- L_exit_funnel_section_ret ;
1878 lis r10,hi16(EXT(kdebug_enable))
1879 ori r10,r10,lo16(EXT(kdebug_enable))
1880 lwz r10,0(r10)
1881 mr. r10,r10
1882 bne- L_exit_funnel_section_slow1 ; If set, call the slow path
1883 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1884 mfmsr r11
1885 rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1886 rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1887 rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1
1888 mtmsr r10 ; Turn off EE
1889 isync ; May have messed with fp/vec
1890 sync
1891L_exit_funnel_section_loop:
1892 lwarx r5,0,r7
1893 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1894 li r5,0 ; Clear the mutexlock
1895 bne- L_exit_funnel_section_slow
1896 stwcx. r5,0,r7 ; Release the funnel mutexlock
1897 bne- L_exit_funnel_section_loop
1898 li r7,0
1899 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1900 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1901 mtmsr r11
1902L_exit_funnel_section_ret:
1903 blr
1904L_exit_funnel_section_slow:
1905 mtmsr r11
1906L_exit_funnel_section_slow1:
1907#endif
1908 li r4,FALSE
1909 b EXT(thread_funnel_set)
1910