]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_lock.s
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_lock.s
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
1c79356b
A
24#include <mach_assert.h>
25#include <mach_ldebug.h>
1c79356b
A
26#include <ppc/asm.h>
27#include <ppc/proc_reg.h>
28#include <assym.s>
29
30#define STRING ascii
31
0b4e3aa0 32#define ILK_LOCKED 0x01
9bccf70c 33#define WAIT_FLAG 0x02
91447636
A
34#define WANT_UPGRADE 0x04
35#define WANT_EXCL 0x08
1c79356b 36
91447636 37#define TH_FN_OWNED 0x01
1c79356b 38
91447636
A
39# volatile CR bits
40#define hwtimeout 20
41#define mlckmiss 21
1c79356b 42
91447636 43#define RW_DATA 0
1c79356b 44
91447636
A
45#define PROLOG(space) \
46 stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
47 mfcr r2 __ASMNL__ \
48 mflr r0 __ASMNL__ \
49 stw r3,FM_ARG0(r1) __ASMNL__ \
50 stw r11,FM_ARG0+0x04(r1) __ASMNL__ \
51 stw r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1) __ASMNL__ \
52 stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
1c79356b 53
91447636
A
54#define EPILOG \
55 lwz r1,0(r1) __ASMNL__ \
56 lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
57 mtlr r0 __ASMNL__
1c79356b 58
1c79356b 59/*
55e303ae 60 * void hw_lock_init(hw_lock_t)
1c79356b 61 *
55e303ae 62 * Initialize a hardware lock.
1c79356b 63 */
55e303ae
A
64 .align 5
65 .globl EXT(hw_lock_init)
1c79356b 66
55e303ae 67LEXT(hw_lock_init)
1c79356b 68
55e303ae
A
69 li r0, 0 ; set lock to free == 0
70 stw r0, 0(r3) ; Initialize the lock
1c79356b
A
71 blr
72
73/*
91447636 74 * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
1c79356b 75 *
91447636
A
76 * Try to acquire spin-lock. The second parameter is the bit mask to test and set.
77 * multiple bits may be set. Return success (1) or failure (0).
78 * Attempt will fail after timeout ticks of the timebase.
1c79356b 79 */
1c79356b 80 .align 5
91447636 81 .globl EXT(hw_lock_bit)
1c79356b 82
91447636 83LEXT(hw_lock_bit)
1c79356b 84
91447636
A
85 crset hwtimeout ; timeout option
86 mr r12,r4 ; Load bit mask
87 mr r4,r5 ; Load timeout value
88 b lckcomm ; Join on up...
1c79356b 89
1c79356b
A
90/*
91 * void hw_lock_lock(hw_lock_t)
92 *
55e303ae
A
93 * Acquire lock, spinning until it becomes available.
94 * Return with preemption disabled.
95 * We will just set a default timeout and jump into the NORMAL timeout lock.
1c79356b 96 */
1c79356b
A
97 .align 5
98 .globl EXT(hw_lock_lock)
99
100LEXT(hw_lock_lock)
91447636
A
101 crclr hwtimeout ; no timeout option
102 li r4,0 ; request default timeout value
103 li r12,ILK_LOCKED ; Load bit mask
104 b lckcomm ; Join on up...
105
55e303ae 106lockDisa:
91447636
A
107 crset hwtimeout ; timeout option
108 li r4,0 ; request default timeout value
109 li r12,ILK_LOCKED ; Load bit mask
55e303ae 110 b lckcomm ; Join on up...
1c79356b
A
111
112/*
55e303ae 113 * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
1c79356b 114 *
55e303ae
A
115 * Try to acquire spin-lock. Return success (1) or failure (0).
116 * Attempt will fail after timeout ticks of the timebase.
117 * We try fairly hard to get this lock. We disable for interruptions, but
118 * reenable after a "short" timeout (128 ticks, we may want to change this).
119 * After checking to see if the large timeout value (passed in) has expired and a
120 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
121 * we return either in abject failure, or disable and go back to the lock sniff routine.
122 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
1c79356b
A
123 */
124 .align 5
125 .globl EXT(hw_lock_to)
126
127LEXT(hw_lock_to)
91447636
A
128 crset hwtimeout ; timeout option
129 li r12,ILK_LOCKED ; Load bit mask
55e303ae
A
130lckcomm:
131 mfsprg r6,1 ; Get the current activation
132 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
133 addi r5,r5,1 ; Bring up the disable count
134 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
135 mr r5,r3 ; Get the address of the lock
136 li r8,0 ; Set r8 to zero
137
138lcktry: lwarx r6,0,r5 ; Grab the lock value
91447636
A
139 and. r3,r6,r12 ; Is it locked?
140 or r6,r6,r12 ; Set interlock
55e303ae
A
141 bne-- lckspin ; Yeah, wait for it to clear...
142 stwcx. r6,0,r5 ; Try to seize that there durn lock
143 bne-- lcktry ; Couldn't get it...
144 li r3,1 ; return true
91447636
A
145 .globl EXT(hwllckPatch_isync)
146LEXT(hwllckPatch_isync)
55e303ae
A
147 isync ; Make sure we don't use a speculativily loaded value
148 blr ; Go on home...
149
150lckspin: li r6,lgKillResv ; Get killing field
151 stwcx. r6,0,r6 ; Kill reservation
1c79356b 152
55e303ae
A
153 mr. r4,r4 ; Test timeout value
154 bne++ lockspin0
155 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
156 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
157 lwz r4,0(r4) ; Get the timeout value
158lockspin0:
159 mr. r8,r8 ; Is r8 set to zero
160 bne++ lockspin1 ; If yes, first spin attempt
161 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
162 mfmsr r9 ; Get the MSR value
163 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
164 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
165 andc r9,r9,r0 ; Clear FP and VEC
166 andc r7,r9,r7 ; Clear EE as well
167 mtmsr r7 ; Turn off interruptions
168 isync ; May have turned off vec and fp here
169 mftb r8 ; Get timestamp on entry
170 b lcksniff
171
172lockspin1: mtmsr r7 ; Turn off interruptions
173 mftb r8 ; Get timestamp on entry
174
175lcksniff: lwz r3,0(r5) ; Get that lock in here
91447636 176 and. r3,r3,r12 ; Is it free yet?
55e303ae 177 beq++ lckretry ; Yeah, try for it again...
1c79356b 178
55e303ae
A
179 mftb r10 ; Time stamp us now
180 sub r10,r10,r8 ; Get the elapsed time
181 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
182 blt++ lcksniff ; Not yet...
de355530 183
55e303ae 184 mtmsr r9 ; Say, any interrupts pending?
1c79356b 185
55e303ae
A
186; The following instructions force the pipeline to be interlocked to that only one
187; instruction is issued per cycle. The insures that we stay enabled for a long enough
188; time; if it's too short, pending interruptions will not have a chance to be taken
de355530 189
55e303ae
A
190 subi r4,r4,128 ; Back off elapsed time from timeout value
191 or r4,r4,r4 ; Do nothing here but force a single cycle delay
192 mr. r4,r4 ; See if we used the whole timeout
193 li r3,0 ; Assume a timeout return code
194 or r4,r4,r4 ; Do nothing here but force a single cycle delay
195
196 ble-- lckfail ; We failed
197 b lockspin1 ; Now that we've opened an enable window, keep trying...
198lckretry:
199 mtmsr r9 ; Restore interrupt state
200 li r8,1 ; Insure that R8 is not 0
201 b lcktry
202lckfail: ; We couldn't get the lock
91447636 203 bf hwtimeout,lckpanic
55e303ae
A
204 li r3,0 ; Set failure return code
205 blr ; Return, head hanging low...
91447636
A
206lckpanic:
207 mr r4,r5
208 mr r5,r3
209 lis r3,hi16(lckpanic_str) ; Get the failed lck message
210 ori r3,r3,lo16(lckpanic_str) ; Get the failed lck message
211 bl EXT(panic)
212 BREAKPOINT_TRAP ; We die here anyway
213 .data
214lckpanic_str:
215 STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
216 .text
1c79356b
A
217
218/*
91447636 219 * void hw_lock_unlock(hw_lock_t)
1c79356b 220 *
91447636
A
221 * Unconditionally release lock.
222 * Release preemption level.
1c79356b 223 */
1c79356b 224 .align 5
91447636 225 .globl EXT(hw_lock_unlock)
55e303ae 226
91447636 227LEXT(hw_lock_unlock)
1c79356b 228
91447636
A
229 .globl EXT(hwulckPatch_isync)
230LEXT(hwulckPatch_isync)
231 isync
232 .globl EXT(hwulckPatch_eieio)
233LEXT(hwulckPatch_eieio)
234 eieio
235 li r0, 0 ; set lock to free
236 stw r0, 0(r3)
1c79356b 237
91447636 238 b epStart ; Go enable preemption...
1c79356b
A
239
240/*
55e303ae 241 * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
1c79356b 242 *
55e303ae
A
243 * Release bit based spin-lock. The second parameter is the bit mask to clear.
244 * Multiple bits may be cleared.
1c79356b 245 *
1c79356b 246 */
1c79356b
A
247 .align 5
248 .globl EXT(hw_unlock_bit)
249
250LEXT(hw_unlock_bit)
251
55e303ae
A
252 .globl EXT(hwulckbPatch_isync)
253LEXT(hwulckbPatch_isync)
254 isync
255 .globl EXT(hwulckbPatch_eieio)
256LEXT(hwulckbPatch_eieio)
257 eieio
258ubittry: lwarx r0,0,r3 ; Grab the lock value
259 andc r0,r0,r4 ; Clear the lock bits
260 stwcx. r0,0,r3 ; Try to clear that there durn lock
261 bne- ubittry ; Try again, couldn't save it...
1c79356b 262
91447636 263 b epStart ; Go enable preemption...
1c79356b
A
264
265/*
55e303ae 266 * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
1c79356b
A
267 * unsigned int newb, unsigned int timeout)
268 *
55e303ae
A
269 * Try to acquire spin-lock. The second parameter is the bit mask to check.
270 * The third is the value of those bits and the 4th is what to set them to.
271 * Return success (1) or failure (0).
272 * Attempt will fail after timeout ticks of the timebase.
273 * We try fairly hard to get this lock. We disable for interruptions, but
274 * reenable after a "short" timeout (128 ticks, we may want to shorten this).
275 * After checking to see if the large timeout value (passed in) has expired and a
276 * sufficient number of cycles have gone by (to insure pending 'rupts are taken),
277 * we return either in abject failure, or disable and go back to the lock sniff routine.
278 * If the sniffer finds the lock free, it jumps right up and tries to grab it.
1c79356b 279 */
1c79356b 280 .align 5
1c79356b
A
281 .globl EXT(hw_lock_mbits)
282
283LEXT(hw_lock_mbits)
284
55e303ae 285 li r10,0
1c79356b 286
55e303ae
A
287mbittry: lwarx r12,0,r3 ; Grab the lock value
288 and r0,r12,r4 ; Clear extra bits
289 andc r12,r12,r4 ; Clear all bits in the bit mask
290 or r12,r12,r6 ; Turn on the lock bits
291 cmplw r0,r5 ; Are these the right bits?
292 bne-- mbitspin ; Nope, wait for it to clear...
293 stwcx. r12,0,r3 ; Try to seize that there durn lock
294 beq++ mbitgot ; We got it, yahoo...
295 b mbittry ; Just start up again if the store failed...
1c79356b
A
296
297 .align 5
55e303ae
A
298mbitspin: li r11,lgKillResv ; Point to killing field
299 stwcx. r11,0,r11 ; Kill it
1c79356b 300
55e303ae
A
301 mr. r10,r10 ; Is r10 set to zero
302 bne++ mbitspin0 ; If yes, first spin attempt
303 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
304 mfmsr r9 ; Get the MSR value
305 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
306 ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
307 andc r9,r9,r0 ; Clear FP and VEC
308 andc r8,r9,r8 ; Clear EE as well
309 mtmsr r8 ; Turn off interruptions
310 isync ; May have turned off vectors or float here
311 mftb r10 ; Get the low part of the time base
312 b mbitsniff
313mbitspin0:
314 mtmsr r8 ; Turn off interruptions
315 mftb r10 ; Get the low part of the time base
316mbitsniff:
317 lwz r12,0(r3) ; Get that lock in here
318 and r0,r12,r4 ; Clear extra bits
319 cmplw r0,r5 ; Are these the right bits?
320 beq++ mbitretry ; Yeah, try for it again...
1c79356b 321
55e303ae
A
322 mftb r11 ; Time stamp us now
323 sub r11,r11,r10 ; Get the elapsed time
324 cmplwi r11,128 ; Have we been spinning for 128 tb ticks?
325 blt++ mbitsniff ; Not yet...
1c79356b 326
55e303ae 327 mtmsr r9 ; Say, any interrupts pending?
1c79356b
A
328
329; The following instructions force the pipeline to be interlocked to that only one
330; instruction is issued per cycle. The insures that we stay enabled for a long enough
331; time. If it is too short, pending interruptions will not have a chance to be taken
332
55e303ae
A
333 subi r7,r7,128 ; Back off elapsed time from timeout value
334 or r7,r7,r7 ; Do nothing here but force a single cycle delay
335 mr. r7,r7 ; See if we used the whole timeout
336 or r7,r7,r7 ; Do nothing here but force a single cycle delay
1c79356b 337
55e303ae
A
338 ble-- mbitfail ; We failed
339 b mbitspin0 ; Now that we have opened an enable window, keep trying...
340mbitretry:
341 mtmsr r9 ; Enable for interruptions
342 li r10,1 ; Make sure this is non-zero
343 b mbittry
1c79356b
A
344
345 .align 5
55e303ae
A
346mbitgot:
347 li r3,1 ; Set good return code
91447636
A
348 .globl EXT(hwlmlckPatch_isync)
349LEXT(hwlmlckPatch_isync)
55e303ae 350 isync ; Make sure we do not use a speculativily loaded value
1c79356b
A
351 blr
352
55e303ae
A
353mbitfail: li r3,0 ; Set failure return code
354 blr ; Return, head hanging low...
1c79356b
A
355
356/*
357 * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
358 *
55e303ae
A
359 * Spin until word hits 0 or timeout.
360 * Return success (1) or failure (0).
361 * Attempt will fail after timeout ticks of the timebase.
de355530 362 *
55e303ae
A
363 * The theory is that a processor will bump a counter as it signals
364 * other processors. Then it will spin untl the counter hits 0 (or
365 * times out). The other processors, as it receives the signal will
366 * decrement the counter.
1c79356b 367 *
55e303ae
A
368 * The other processors use interlocked update to decrement, this one
369 * does not need to interlock.
1c79356b 370 */
1c79356b 371 .align 5
1c79356b
A
372 .globl EXT(hw_cpu_sync)
373
374LEXT(hw_cpu_sync)
375
55e303ae
A
376 mftb r10 ; Get the low part of the time base
377 mr r9,r3 ; Save the sync word address
378 li r3,1 ; Assume we work
1c79356b 379
55e303ae
A
380csynctry: lwz r11,0(r9) ; Grab the sync value
381 mr. r11,r11 ; Counter hit 0?
382 beqlr- ; Yeah, we are sunk...
383 mftb r12 ; Time stamp us now
1c79356b 384
55e303ae
A
385 sub r12,r12,r10 ; Get the elapsed time
386 cmplw r4,r12 ; Have we gone too long?
387 bge+ csynctry ; Not yet...
1c79356b 388
55e303ae
A
389 li r3,0 ; Set failure...
390 blr ; Return, head hanging low...
1c79356b
A
391
392/*
393 * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
394 *
55e303ae
A
395 * Spin until word changes or timeout.
396 * Return success (1) or failure (0).
397 * Attempt will fail after timeout ticks of the timebase.
1c79356b 398 *
55e303ae
A
399 * This is used to insure that a processor passes a certain point.
400 * An example of use is to monitor the last interrupt time in the
401 * per_proc block. This can be used to insure that the other processor
402 * has seen at least one interrupt since a specific time.
1c79356b 403 */
1c79356b 404 .align 5
0b4e3aa0 405 .globl EXT(hw_cpu_wcng)
1c79356b
A
406
407LEXT(hw_cpu_wcng)
408
55e303ae
A
409 mftb r10 ; Get the low part of the time base
410 mr r9,r3 ; Save the sync word address
411 li r3,1 ; Assume we work
1c79356b 412
55e303ae
A
413wcngtry: lwz r11,0(r9) ; Grab the value
414 cmplw r11,r4 ; Do they still match?
415 bnelr- ; Nope, cool...
416 mftb r12 ; Time stamp us now
1c79356b 417
55e303ae
A
418 sub r12,r12,r10 ; Get the elapsed time
419 cmplw r5,r12 ; Have we gone too long?
420 bge+ wcngtry ; Not yet...
1c79356b 421
55e303ae
A
422 li r3,0 ; Set failure...
423 blr ; Return, head hanging low...
1c79356b
A
424
425
426/*
55e303ae 427 * unsigned int hw_lock_try(hw_lock_t)
1c79356b 428 *
55e303ae
A
429 * Try to acquire spin-lock. Return success (1) or failure (0)
430 * Returns with preemption disabled on success.
1c79356b
A
431 *
432 */
433 .align 5
434 .globl EXT(hw_lock_try)
435
436LEXT(hw_lock_try)
437
55e303ae
A
438 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
439 mfmsr r9 ; Get the MSR value
440 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
441 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
442 andc r9,r9,r0 ; Clear FP and VEC
443 andc r7,r9,r7 ; Clear EE as well
1c79356b 444
55e303ae 445 mtmsr r7 ; Disable interruptions and thus, preemption
1c79356b 446
55e303ae
A
447 lwz r5,0(r3) ; Quick load
448 andi. r6,r5,ILK_LOCKED ; TEST...
449 bne-- .L_lock_try_failed ; No go...
1c79356b 450
55e303ae
A
451.L_lock_try_loop:
452 lwarx r5,0,r3 ; Ld from addr of arg and reserve
1c79356b 453
55e303ae 454 andi. r6,r5,ILK_LOCKED ; TEST...
0b4e3aa0 455 ori r5,r5,ILK_LOCKED
55e303ae 456 bne-- .L_lock_try_failedX ; branch if taken. Predict free
1c79356b 457
55e303ae
A
458 stwcx. r5,0,r3 ; And SET (if still reserved)
459 bne-- .L_lock_try_loop ; If set failed, loop back
1c79356b 460
91447636
A
461 .globl EXT(hwltlckPatch_isync)
462LEXT(hwltlckPatch_isync)
1c79356b
A
463 isync
464
55e303ae
A
465 mfsprg r6,1 ; Get current activation
466 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
467 addi r5,r5,1 ; Bring up the disable count
468 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1c79356b 469
55e303ae
A
470 mtmsr r9 ; Allow interruptions now
471 li r3,1 ; Set that the lock was free
1c79356b
A
472 blr
473
55e303ae
A
474.L_lock_try_failedX:
475 li r6,lgKillResv ; Killing field
476 stwcx. r6,0,r6 ; Kill reservation
477
1c79356b 478.L_lock_try_failed:
55e303ae
A
479 mtmsr r9 ; Allow interruptions now
480 li r3,0 ; FAILURE - lock was taken
1c79356b
A
481 blr
482
483/*
55e303ae 484 * unsigned int hw_lock_held(hw_lock_t)
1c79356b 485 *
55e303ae
A
486 * Return 1 if lock is held
487 * Doesn't change preemption state.
488 * N.B. Racy, of course.
1c79356b
A
489 */
490 .align 5
491 .globl EXT(hw_lock_held)
492
493LEXT(hw_lock_held)
494
55e303ae
A
495 isync ; Make sure we don't use a speculativily fetched lock
496 lwz r3, 0(r3) ; Get lock value
497 andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit
1c79356b
A
498 blr
499
500/*
9bccf70c 501 * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
1c79356b 502 *
55e303ae
A
503 * Compare old to area if equal, store new, and return true
504 * else return false and no store
505 * This is an atomic operation
1c79356b
A
506 */
507 .align 5
508 .globl EXT(hw_compare_and_store)
509
510LEXT(hw_compare_and_store)
511
55e303ae 512 mr r6,r3 ; Save the old value
1c79356b 513
55e303ae
A
514cstry: lwarx r9,0,r5 ; Grab the area value
515 li r3,1 ; Assume it works
516 cmplw cr0,r9,r6 ; Does it match the old value?
517 bne-- csfail ; No, it must have changed...
518 stwcx. r4,0,r5 ; Try to save the new value
519 bne-- cstry ; Didn't get it, try again...
91447636
A
520 .globl EXT(hwcsatomicPatch_isync)
521LEXT(hwcsatomicPatch_isync)
55e303ae
A
522 isync ; Just hold up prefetch
523 blr ; Return...
1c79356b 524
55e303ae
A
525csfail: li r3,lgKillResv ; Killing field
526 stwcx. r3,0,r3 ; Blow reservation
527
528 li r3,0 ; Set failure
529 blr ; Better luck next time...
1c79356b
A
530
531
532/*
9bccf70c 533 * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
1c79356b 534 *
55e303ae
A
535 * Atomically add the second parameter to the first.
536 * Returns the result.
1c79356b
A
537 *
538 */
539 .align 5
540 .globl EXT(hw_atomic_add)
541
542LEXT(hw_atomic_add)
543
55e303ae 544 mr r6,r3 ; Save the area
1c79356b 545
55e303ae
A
546addtry: lwarx r3,0,r6 ; Grab the area value
547 add r3,r3,r4 ; Add the value
548 stwcx. r3,0,r6 ; Try to save the new value
549 bne-- addtry ; Didn't get it, try again...
550 blr ; Return...
1c79356b
A
551
552
553/*
9bccf70c 554 * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
1c79356b 555 *
55e303ae
A
556 * Atomically subtract the second parameter from the first.
557 * Returns the result.
1c79356b
A
558 *
559 */
560 .align 5
561 .globl EXT(hw_atomic_sub)
562
563LEXT(hw_atomic_sub)
564
55e303ae 565 mr r6,r3 ; Save the area
1c79356b 566
55e303ae
A
567subtry: lwarx r3,0,r6 ; Grab the area value
568 sub r3,r3,r4 ; Subtract the value
569 stwcx. r3,0,r6 ; Try to save the new value
570 bne-- subtry ; Didn't get it, try again...
571 blr ; Return...
1c79356b
A
572
573
0b4e3aa0 574/*
9bccf70c 575 * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
0b4e3aa0 576 *
55e303ae
A
577 * Atomically ORs the second parameter into the first.
578 * Returns the result.
0b4e3aa0
A
579 */
580 .align 5
581 .globl EXT(hw_atomic_or)
582
583LEXT(hw_atomic_or)
584
55e303ae 585 mr r6,r3 ; Save the area
0b4e3aa0 586
55e303ae
A
587ortry: lwarx r3,0,r6 ; Grab the area value
588 or r3,r3,r4 ; OR the value
589 stwcx. r3,0,r6 ; Try to save the new value
590 bne-- ortry ; Did not get it, try again...
591 blr ; Return...
0b4e3aa0
A
592
593
594/*
9bccf70c 595 * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
0b4e3aa0 596 *
55e303ae
A
597 * Atomically ANDs the second parameter with the first.
598 * Returns the result.
0b4e3aa0
A
599 *
600 */
601 .align 5
602 .globl EXT(hw_atomic_and)
603
604LEXT(hw_atomic_and)
605
55e303ae 606 mr r6,r3 ; Save the area
0b4e3aa0 607
55e303ae
A
608andtry: lwarx r3,0,r6 ; Grab the area value
609 and r3,r3,r4 ; AND the value
610 stwcx. r3,0,r6 ; Try to save the new value
611 bne-- andtry ; Did not get it, try again...
612 blr ; Return...
0b4e3aa0
A
613
614
1c79356b
A
615/*
616 * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
617 *
55e303ae
A
618 * Atomically inserts the element at the head of the list
619 * anchor is the pointer to the first element
620 * element is the pointer to the element to insert
621 * disp is the displacement into the element to the chain pointer
1c79356b 622 *
91447636 623 * NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
1c79356b
A
624 */
625 .align 5
626 .globl EXT(hw_queue_atomic)
627
628LEXT(hw_queue_atomic)
629
55e303ae
A
630 mr r7,r4 ; Make end point the same as start
631 mr r8,r5 ; Copy the displacement also
632 b hw_queue_comm ; Join common code...
1c79356b
A
633
634/*
635 * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
636 *
55e303ae
A
637 * Atomically inserts the list of elements at the head of the list
638 * anchor is the pointer to the first element
639 * first is the pointer to the first element to insert
640 * last is the pointer to the last element to insert
641 * disp is the displacement into the element to the chain pointer
1c79356b
A
642 */
643 .align 5
644 .globl EXT(hw_queue_atomic_list)
645
646LEXT(hw_queue_atomic_list)
647
55e303ae
A
648 mr r7,r5 ; Make end point the same as start
649 mr r8,r6 ; Copy the displacement also
1c79356b
A
650
651hw_queue_comm:
55e303ae
A
652 lwarx r9,0,r3 ; Pick up the anchor
653 stwx r9,r8,r7 ; Chain that to the end of the new stuff
654 eieio ; Make sure this store makes it before the anchor update
655 stwcx. r4,0,r3 ; Try to chain into the front
656 bne-- hw_queue_comm ; Didn't make it, try again...
657
658 blr ; Return...
1c79356b
A
659
660/*
661 * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
662 *
55e303ae
A
663 * Atomically removes the first element in a list and returns it.
664 * anchor is the pointer to the first element
665 * disp is the displacement into the element to the chain pointer
666 * Returns element if found, 0 if empty.
91447636
A
667 *
668 * NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
1c79356b
A
669 */
670 .align 5
671 .globl EXT(hw_dequeue_atomic)
672
673LEXT(hw_dequeue_atomic)
674
55e303ae 675 mr r5,r3 ; Save the anchor
1c79356b
A
676
677hw_dequeue_comm:
55e303ae
A
678 lwarx r3,0,r5 ; Pick up the anchor
679 mr. r3,r3 ; Is the list empty?
680 beq-- hdcFail ; Leave it list empty...
681 lwzx r9,r4,r3 ; Get the next in line
682 stwcx. r9,0,r5 ; Try to chain into the front
683 beqlr++ ; Got the thing, go away with it...
684 b hw_dequeue_comm ; Did not make it, try again...
685
686hdcFail: li r4,lgKillResv ; Killing field
687 stwcx. r4,0,r4 ; Dump reservation
688 blr ; Leave...
689
1c79356b 690
91447636
A
691/*
692 * Routines for mutex lock debugging.
693 */
694
695/*
696 * Gets lock check flags in CR6: CR bits 24-27
697 */
698#define CHECK_SETUP(rg) \
699 lbz rg,lglcksWork(0) __ASMNL__ \
700 mtcrf 2,rg __ASMNL__
701
702
703/*
704 * Checks for expected lock type.
705 */
706#define CHECK_MUTEX_TYPE() \
707 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
708 bt 24+disLktypeb,1f __ASMNL__ \
709 lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
710 cmpwi r10,MUTEX_TAG __ASMNL__ \
711 beq++ 1f __ASMNL__ \
712 PROLOG(0) __ASMNL__ \
713 mr r4,r11 __ASMNL__ \
714 mr r5,r10 __ASMNL__ \
715 lis r3,hi16(not_a_mutex) __ASMNL__ \
716 ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
717 bl EXT(panic) __ASMNL__ \
718 BREAKPOINT_TRAP __ASMNL__ \
7191:
720
721 .data
722not_a_mutex:
723 STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
724 .text
725
726/*
727 * Verifies return to the correct thread in "unlock" situations.
728 */
729#define CHECK_THREAD(thread_offset) \
730 bf MUTEX_ATTR_DEBUGb,3f __ASMNL__ \
731 bt 24+disLkThreadb,3f __ASMNL__ \
732 mfsprg r10,1 __ASMNL__ \
733 lwz r5,MUTEX_DATA(r3) __ASMNL__ \
734 rlwinm. r9,r5,0,0,29 __ASMNL__ \
735 bne++ 1f __ASMNL__ \
736 lis r3,hi16(not_held) __ASMNL__ \
737 ori r3,r3,lo16(not_held) __ASMNL__ \
738 b 2f __ASMNL__ \
7391: __ASMNL__ \
740 cmpw r9,r10 __ASMNL__ \
741 beq++ 3f __ASMNL__ \
742 mr r5,r10 __ASMNL__ \
743 mr r6,r9 __ASMNL__ \
744 lis r3,hi16(wrong_thread) __ASMNL__ \
745 ori r3,r3,lo16(wrong_thread) __ASMNL__ \
7462: __ASMNL__ \
747 mr r4,r11 __ASMNL__ \
748 PROLOG(0) __ASMNL__ \
749 bl EXT(panic) __ASMNL__ \
750 BREAKPOINT_TRAP __ASMNL__ \
7513:
752
753 .data
754not_held:
755 STRINGD "mutex (0x%08X) not held\n\000"
756wrong_thread:
757 STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
758 .text
759
760#define CHECK_MYLOCK() \
761 bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
762 bt 24+disLkMyLckb,1f __ASMNL__ \
763 mfsprg r10,1 __ASMNL__ \
764 lwz r9,MUTEX_DATA(r3) __ASMNL__ \
765 rlwinm r9,r9,0,0,29 __ASMNL__ \
766 cmpw r9,r10 __ASMNL__ \
767 bne++ 1f __ASMNL__ \
768 mr r4,r11 __ASMNL__ \
769 lis r3, hi16(mylock_attempt) __ASMNL__ \
770 ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
771 bl EXT(panic) __ASMNL__ \
772 BREAKPOINT_TRAP __ASMNL__ \
7731:
774
775 .data
776mylock_attempt:
777 STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
778 .text
779
780#define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp) \
781 bf 24+enaLkExtStckb,3f __ASMNL__ \
782 addi lck_stack,lck,MUTEX_STACK __ASMNL__ \
783 li frame_cnt,MUTEX_FRAMES-1 __ASMNL__ \
7841: __ASMNL__ \
785 mr tmp,stack __ASMNL__ \
786 lwz stack,0(stack) __ASMNL__ \
787 xor tmp,stack,tmp __ASMNL__ \
788 cmplwi tmp,8192 __ASMNL__ \
789 bge-- 2f __ASMNL__ \
790 lwz lr_save,FM_LR_SAVE(stack) __ASMNL__ \
791 stwu lr_save,4(lck_stack) __ASMNL__ \
792 subi frame_cnt,frame_cnt,1 __ASMNL__ \
793 cmpi cr0,frame_cnt,0 __ASMNL__ \
794 bne 1b __ASMNL__ \
795 b 3f __ASMNL__ \
7962: __ASMNL__ \
797 li tmp,0 __ASMNL__ \
798 stwu tmp,4(lck_stack) __ASMNL__ \
799 subi frame_cnt,frame_cnt,1 __ASMNL__ \
800 cmpi cr0,frame_cnt,0 __ASMNL__ \
801 bne 2b __ASMNL__ \
8023:
803
1c79356b 804/*
55e303ae
A
805 * void mutex_init(mutex_t* l, etap_event_t etap)
806 *
1c79356b 807 */
55e303ae
A
808 .align 5
809 .globl EXT(mutex_init)
55e303ae 810LEXT(mutex_init)
1c79356b 811
0b4e3aa0 812 PROLOG(0)
91447636
A
813 li r10,0
814 stw r10,MUTEX_DATA(r3) ; clear lock word
815 sth r10,MUTEX_WAITERS(r3) ; init waiter count
816 sth r10,MUTEX_PROMOTED_PRI(r3)
1c79356b 817#if MACH_LDEBUG
91447636
A
818 li r11,MUTEX_ATTR_DEBUG
819 stw r10,MUTEX_STACK(r3) ; init caller pc
820 stw r10,MUTEX_THREAD(r3) ; and owning thread
821 li r9, MUTEX_TAG
822 stw r9, MUTEX_TYPE(r3) ; set lock type
823 stw r11,MUTEX_ATTR(r3)
824 addi r8,r3,MUTEX_STACK-4
825 li r9,MUTEX_FRAMES
826mlistck:
827 stwu r10,4(r8) ; init stack
828 subi r9,r9,1
829 cmpi cr0,r9,0
830 bne mlistck
1c79356b 831#endif /* MACH_LDEBUG */
0b4e3aa0
A
832 EPILOG
833 blr
1c79356b
A
834
835/*
91447636 836 * void lck_mtx_lock_ext(lck_mtx_ext_t*)
55e303ae 837 *
1c79356b 838 */
1c79356b 839 .align 5
91447636
A
840 .globl EXT(lck_mtx_lock_ext)
841LEXT(lck_mtx_lock_ext)
842#if MACH_LDEBUG
0b4e3aa0
A
843 .globl EXT(mutex_lock)
844LEXT(mutex_lock)
1c79356b 845
0b4e3aa0 846 .globl EXT(_mutex_lock)
1c79356b 847LEXT(_mutex_lock)
91447636
A
848#endif
849 mr r11,r3 ; Save lock addr
850mlckeEnter:
851 lwz r0,MUTEX_ATTR(r3)
852 mtcrf 1,r0 ; Set cr7
853 CHECK_SETUP(r12)
854 CHECK_MUTEX_TYPE()
855
856 bf MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
857 PROLOG(0)
858 bl EXT(assert_wait_possible)
859 mr. r3,r3
860 bne L_mutex_lock_assert_wait_1
861 lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
862 ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
863 bl EXT(panic)
864 BREAKPOINT_TRAP ; We die here anyway
865
866 .data
867L_mutex_lock_assert_wait_panic_str:
868 STRINGD "mutex lock attempt with assert_wait_possible false\n\000"
869 .text
870
871L_mutex_lock_assert_wait_1:
872 lwz r3,FM_ARG0(r1)
873 lwz r11,FM_ARG0+0x04(r1)
874 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
875 mtcr r2
876 EPILOG
877L_mutex_lock_assert_wait_2:
1c79356b 878
55e303ae 879 mfsprg r6,1 ; load the current thread
91447636
A
880 bf MUTEX_ATTR_STATb,mlckestatskip ; Branch if no stat
881 lwz r5,MUTEX_GRP(r3) ; Load lock group
882 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
883mlckestatloop:
884 lwarx r8,r7,r5 ; Load stat util cnt
885 addi r8,r8,1 ; Increment stat util cnt
886 stwcx. r8,r7,r5 ; Store stat util cnt
887 bne-- mlckestatloop ; Retry if failed
888 mr. r8,r8 ; Test for zero
889 bne++ mlckestatskip ; Did stat util cnt wrapped?
890 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
891 addi r8,r8,1 ; Increment upper stat util cnt
892 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
893mlckestatskip:
894 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
ab86ba33
A
895 li r4,0
896 li r8,0
91447636
A
897 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
898 mfmsr r9 ; Get the MSR value
899 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
900 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
901 andc r9,r9,r0 ; Clear FP and VEC
902 andc r7,r9,r7 ; Clear EE as well
903 mtmsr r7 ; Turn off interruptions
904 isync ; May have turned off vec and fp here
55e303ae 905 mr. r5,r5 ; Quick check
91447636 906 bne-- mlckespin01 ; Can not get it right now...
55e303ae 907
91447636
A
908mlcketry:
909 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
9bccf70c 910 mr. r5,r5
91447636
A
911 bne-- mlckespin0 ; Can not get it right now...
912 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
913 bne-- mlcketry ; loop back if failed
914 .globl EXT(mlckePatch_isync)
915LEXT(mlckePatch_isync)
55e303ae 916 isync ; stop prefeteching
91447636
A
917 mflr r12
918 bf MUTEX_ATTR_DEBUGb,mlckedebskip
919 mr r8,r6 ; Get the active thread
920 stw r12,MUTEX_STACK(r3) ; Save our caller
921 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
922 mr r5,r1
923 LCK_STACK(r3,r5,r6,r7,r8,r10)
924mlckedebskip:
925 mtmsr r9 ; Say, any interrupts pending?
0b4e3aa0 926 blr
55e303ae 927
91447636 928mlckespin0:
55e303ae
A
929 li r5,lgKillResv ; Killing field
930 stwcx. r5,0,r5 ; Kill reservation
91447636
A
931mlckespin01:
932 mflr r12
933 mtmsr r9 ; Say, any interrupts pending?
934 bl mlckspin1
935 mtmsr r7 ; Turn off interruptions, vec and fp off already
936 mtlr r12
937 b mlcketry
938
939/*
940 * void lck_mtx_lock(lck_mtx_t*)
941 *
942 */
943 .align 5
944 .globl EXT(lck_mtx_lock)
945LEXT(lck_mtx_lock)
946
947#if !MACH_LDEBUG
948 .globl EXT(mutex_lock)
949LEXT(mutex_lock)
950
951 .globl EXT(_mutex_lock)
952LEXT(_mutex_lock)
953#endif
954
955 mfsprg r6,1 ; load the current thread
956 lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
957 mr r11,r3 ; Save lock addr
958 li r4,0
959 li r8,0
960 li r9,0
961 mr. r5,r5 ; Quick check
962 bne-- mlckspin00 ; Indirect or Can not get it right now...
963
964mlcktry:
965 lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
966 mr. r5,r5
967 bne-- mlckspin01 ; Can not get it right now...
968 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
969 bne-- mlcktry ; loop back if failed
970 .globl EXT(mlckPatch_isync)
971LEXT(mlckPatch_isync)
972 isync ; stop prefeteching
973 blr
974
975mlckspin00:
976 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
977 bne-- mlckspin02 ; No, go handle contention
978 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
979 b mlckeEnter
980mlckspin01:
981 li r5,lgKillResv ; Killing field
982 stwcx. r5,0,r5 ; Kill reservation
983mlckspin02:
984 mflr r12
985 li r0,0
986 mtcrf 1,r0 ; Set cr7 to zero
987 bl mlckspin1
988 mtlr r12
989 b mlcktry
990
991
992mlckspin1:
993 mr. r4,r4 ; Test timeout value
994 bne++ mlckspin2
995 lis r4,hi16(EXT(MutexSpin)) ; Get the high part
ab86ba33
A
996 ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
997 lwz r4,0(r4) ; Get spin timerout value
998 mr. r4,r4 ; Test spin timeout value
91447636
A
999 bne++ mlckspin2 ; Is spin timeout requested
1000 crclr mlckmiss ; Clear miss test
1001 b mlckslow1 ; Don't try to spin
ab86ba33
A
1002
1003mlckspin2: mr. r8,r8 ; Is r8 set to zero
1004 bne++ mlckspin3 ; If yes, first spin attempt
91447636
A
1005 crclr mlckmiss ; Clear miss test
1006 mr. r9,r9 ; Is r9 set to zero
1007 bne++ mlckspin3 ; If yes, r9 set with msr value
ab86ba33
A
1008 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1009 mfmsr r9 ; Get the MSR value
1010 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1011 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1012 andc r9,r9,r0 ; Clear FP and VEC
1013 andc r7,r9,r7 ; Clear EE as well
1014 mtmsr r7 ; Turn off interruptions
1015 isync ; May have turned off vec and fp here
1016 mftb r8 ; Get timestamp on entry
1017 b mlcksniff
1018
1019mlckspin3: mtmsr r7 ; Turn off interruptions
1020 mftb r8 ; Get timestamp on entry
1021
91447636 1022mlcksniff: lwz r5,MUTEX_DATA(r3) ; Get that lock in here
ab86ba33
A
1023 mr. r5,r5 ; Is the lock held
1024 beq++ mlckretry ; No, try for it again...
91447636 1025 rlwinm. r10,r5,0,0,29 ; Extract the lock owner
ab86ba33 1026 beq++ mlckslow0 ; InterLock is held
91447636
A
1027 bf MUTEX_ATTR_STATb,mlStatSkip ; Branch if no stat
1028 andi. r5,r5,ILK_LOCKED ; extract interlocked?
1029 bne mlStatSkip ; yes, skip
1030 bt mlckmiss,mlStatSkip ; miss already counted
1031 crset mlckmiss ; Remember miss recorded
1032 lwz r5,MUTEX_GRP(r3) ; Load lock group
1033 addi r5,r5,GRP_MTX_STAT_MISS+4 ; Add stat miss offset
1034mlStatLoop:
1035 lwarx r6,0,r5 ; Load stat miss cnt
1036 addi r6,r6,1 ; Increment stat miss cnt
1037 stwcx. r6,0,r5 ; Update stat miss cnt
1038 bne-- mlStatLoop ; Retry if failed
1039 mfsprg r6,1 ; Reload current thread
1040mlStatSkip:
1041 lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
1042 rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
ab86ba33 1043 beq mlckslow0 ; Lock owner isn't running
91447636
A
1044 lis r2,hi16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1045 ori r2,r2,lo16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
1046 lwz r10,THREAD_OPTIONS(r10) ; Get the thread options
1047 and. r10,r10,r2 ; Is DelayedIdle set?
1048 bne mlckslow0 ; Lock owner is in delay idle
ab86ba33
A
1049
1050 mftb r10 ; Time stamp us now
1051 sub r10,r10,r8 ; Get the elapsed time
1052 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1053 blt++ mlcksniff ; Not yet...
1054
1055 mtmsr r9 ; Say, any interrupts pending?
1056
1057; The following instructions force the pipeline to be interlocked to that only one
1058; instruction is issued per cycle. The insures that we stay enabled for a long enough
1059; time; if it's too short, pending interruptions will not have a chance to be taken
1060
1061 subi r4,r4,128 ; Back off elapsed time from timeout value
1062 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1063 mr. r4,r4 ; See if we used the whole timeout
1064 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1065
1066 ble-- mlckslow1 ; We failed
91447636 1067 b mlckspin3 ; Now that we've opened an enable window, keep trying...
ab86ba33
A
1068mlckretry:
1069 mtmsr r9 ; Restore interrupt state
1070 li r8,1 ; Show already through once
91447636
A
1071 blr
1072
ab86ba33
A
1073mlckslow0: ; We couldn't get the lock
1074 mtmsr r9 ; Restore interrupt state
55e303ae 1075
ab86ba33 1076mlckslow1:
91447636 1077 mtlr r12
1c79356b 1078
55e303ae 1079 PROLOG(0)
1c79356b 1080.L_ml_retry:
55e303ae
A
1081 bl lockDisa ; Go get a lock on the mutex's interlock lock
1082 mr. r4,r3 ; Did we get it?
1083 lwz r3,FM_ARG0(r1) ; Restore the lock address
3a60a9f5 1084 bne++ mlGotInt ; We got it just fine...
91447636 1085 mr r4,r11 ; Saved lock addr
55e303ae
A
1086 lis r3,hi16(mutex_failed1) ; Get the failed mutex message
1087 ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
55e303ae
A
1088 bl EXT(panic) ; Call panic
1089 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1c79356b
A
1090
1091 .data
1092mutex_failed1:
91447636 1093 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
1c79356b
A
1094 .text
1095
1096mlGotInt:
1097
55e303ae
A
1098; Note that there is no reason to do a load and reserve here. We already
1099; hold the interlock lock and no one can touch this field unless they
1100; have that, so, we're free to play
1101
91447636 1102 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
55e303ae
A
1103 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1104 bne- mlInUse ; Nope, sombody's playing already...
1c79356b 1105
91447636
A
1106 bf++ MUTEX_ATTR_DEBUGb,mlDebSkip
1107 CHECK_SETUP(r5)
55e303ae
A
1108 mfsprg r9,1 ; Get the current activation
1109 lwz r5,0(r1) ; Get previous save frame
91447636
A
1110 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1111 mr r8,r9 ; Get the active thread
1112 stw r6,MUTEX_STACK(r3) ; Save our caller
55e303ae 1113 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
91447636
A
1114 LCK_STACK(r3,r5,r6,r7,r8,r10)
1115mlDebSkip:
1116 mr r3,r11 ; Get the based lock address
1117 bl EXT(lck_mtx_lock_acquire)
1118 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
9bccf70c 1119 mfsprg r5,1
91447636 1120 mtcr r2
9bccf70c 1121 mr. r4,r3
91447636
A
1122 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1123 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
9bccf70c
A
1124 beq mlUnlock
1125 ori r5,r5,WAIT_FLAG
1c79356b 1126
55e303ae 1127mlUnlock: eieio
91447636 1128 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1c79356b 1129
55e303ae
A
1130 EPILOG ; Restore all saved registers
1131 b epStart ; Go enable preemption...
de355530 1132
55e303ae
A
1133; We come to here when we have a resource conflict. In other words,
1134; the mutex is held.
1c79356b
A
1135
1136mlInUse:
1137
de355530 1138 CHECK_SETUP(r12)
91447636 1139 CHECK_MYLOCK() ; Assert we don't own the lock already */
d7e50217 1140
55e303ae
A
1141; Note that we come in here with the interlock set. The wait routine
1142; will unlock it before waiting.
1143
91447636
A
1144 bf MUTEX_ATTR_STATb,mlStatSkip2 ; Branch if no stat
1145 lwz r5,MUTEX_GRP(r3) ; Load lck group
1146 bt mlckmiss,mlStatSkip1 ; Skip miss already counted
1147 crset mlckmiss ; Remember miss recorded
1148 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1149mlStatLoop1:
1150 lwarx r8,r9,r5 ; Load stat miss cnt
1151 addi r8,r8,1 ; Increment stat miss cnt
1152 stwcx. r8,r9,r5 ; Store stat miss cnt
1153 bne-- mlStatLoop1 ; Retry if failed
1154mlStatSkip1:
1155 lwz r9,GRP_MTX_STAT_WAIT+4(r5) ; Load wait cnt
1156 addi r9,r9,1 ; Increment wait cnt
1157 stw r9,GRP_MTX_STAT_WAIT+4(r5) ; Update miss cnt
1158mlStatSkip2:
55e303ae 1159 ori r4,r4,WAIT_FLAG ; Set the wait flag
91447636 1160 stw r4,MUTEX_DATA(r3)
55e303ae 1161 rlwinm r4,r4,0,0,29 ; Extract the lock owner
91447636
A
1162 mfcr r2
1163 stw r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1164 mr r3,r11 ; Get the based lock address
1165 bl EXT(lck_mtx_lock_wait) ; Wait for our turn at the lock
1c79356b 1166
55e303ae 1167 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
91447636
A
1168 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1169 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
1170 mtcr r2
55e303ae 1171 b .L_ml_retry ; and try again...
1c79356b
A
1172
1173
1174/*
91447636 1175 * void lck_mtx_try_lock(_extlck_mtx_ext_t*)
1c79356b
A
1176 *
1177 */
1c79356b 1178 .align 5
91447636
A
1179 .globl EXT(lck_mtx_try_lock_ext)
1180LEXT(lck_mtx_try_lock_ext)
1181#if MACH_LDEBUG
0b4e3aa0
A
1182 .globl EXT(mutex_try)
1183LEXT(mutex_try)
1c79356b 1184 .globl EXT(_mutex_try)
1c79356b 1185LEXT(_mutex_try)
91447636
A
1186#endif
1187 mr r11,r3 ; Save lock addr
1188mlteEnter:
1189 lwz r0,MUTEX_ATTR(r3)
1190 mtcrf 1,r0 ; Set cr7
1191 CHECK_SETUP(r12)
1192 CHECK_MUTEX_TYPE()
1193
1194 bf MUTEX_ATTR_STATb,mlteStatSkip ; Branch if no stat
1195 lwz r5,MUTEX_GRP(r3) ; Load lock group
1196 li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
1197mlteStatLoop:
1198 lwarx r8,r7,r5 ; Load stat util cnt
1199 addi r8,r8,1 ; Increment stat util cnt
1200 stwcx. r8,r7,r5 ; Store stat util cnt
1201 bne-- mlteStatLoop ; Retry if failed
1202 mr. r8,r8 ; Test for zero
1203 bne++ mlteStatSkip ; Did stat util cnt wrapped?
1204 lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
1205 addi r8,r8,1 ; Increment upper stat util cnt
1206 stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
1207mlteStatSkip:
55e303ae 1208 mfsprg r6,1 ; load the current thread
91447636 1209 lwz r5,MUTEX_DATA(r3) ; Get the lock value
55e303ae
A
1210 mr. r5,r5 ; Quick check
1211 bne-- L_mutex_try_slow ; Can not get it now...
91447636
A
1212 mfmsr r9 ; Get the MSR value
1213 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1214 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1215 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1216 andc r9,r9,r0 ; Clear FP and VEC
1217 andc r7,r9,r7 ; Clear EE as well
1218 mtmsr r7 ; Turn off interruptions
1219 isync ; May have turned off vec and fp here
55e303ae 1220
91447636
A
1221mlteLoopTry:
1222 lwarx r5,MUTEX_DATA,r3 ; load the lock value
9bccf70c 1223 mr. r5,r5
91447636
A
1224 bne-- mlteSlowX ; branch to the slow path
1225 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1226 bne-- mlteLoopTry ; retry if failed
1227 .globl EXT(mltelckPatch_isync)
1228LEXT(mltelckPatch_isync)
55e303ae 1229 isync ; stop prefetching
91447636
A
1230 mflr r12
1231 bf MUTEX_ATTR_DEBUGb,mlteDebSkip
1232 mr r8,r6 ; Get the active thread
1233 stw r12,MUTEX_STACK(r3) ; Save our caller
1234 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
1235 mr r5,r1
1236 LCK_STACK(r3,r5,r6,r7,r8,r10)
1237mlteDebSkip:
0b4e3aa0 1238 li r3, 1
91447636 1239 mtmsr r9 ; Say, any interrupts pending?
0b4e3aa0 1240 blr
91447636 1241mlteSlowX:
55e303ae
A
1242 li r5,lgKillResv ; Killing field
1243 stwcx. r5,0,r5 ; Kill reservation
91447636
A
1244 mtmsr r9 ; Say, any interrupts pending?
1245 b L_mutex_try_slow
55e303ae 1246
55e303ae 1247
91447636
A
1248/*
1249 * void lck_mtx_try_lock(lck_mtx_t*)
1250 *
1251 */
1252 .align 5
1253 .globl EXT(lck_mtx_try_lock)
1254LEXT(lck_mtx_try_lock)
1255#if !MACH_LDEBUG
1256 .globl EXT(mutex_try)
1257LEXT(mutex_try)
1258 .globl EXT(_mutex_try)
1259LEXT(_mutex_try)
0b4e3aa0 1260#endif
1c79356b 1261
91447636
A
1262 mfsprg r6,1 ; load the current thread
1263 lwz r5,MUTEX_DATA(r3) ; Get the lock value
1264 mr r11,r3 ; Save lock addr
1265 mr. r5,r5 ; Quick check
1266 bne-- mltSlow00 ; Indirect or Can not get it now...
1267
1268mltLoopTry:
1269 lwarx r5,MUTEX_DATA,r3 ; load the lock value
1270 mr. r5,r5
1271 bne-- mltSlow01 ; branch to the slow path
1272 stwcx. r6,MUTEX_DATA,r3 ; grab the lock
1273 bne-- mltLoopTry ; retry if failed
1274 .globl EXT(mltlckPatch_isync)
1275LEXT(mltlckPatch_isync)
1276 isync ; stop prefetching
1277 li r3, 1
1278 blr
1279
1280mltSlow00:
1281 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1282 bne-- mltSlow02 ; No, go handle contention
1283 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1284 b mlteEnter
1285mltSlow01:
1286 li r5,lgKillResv ; Killing field
1287 stwcx. r5,0,r5 ; Kill reservation
1288
1289mltSlow02:
1290 li r0,0
1291 mtcrf 1,r0 ; Set cr7 to zero
1292
1293L_mutex_try_slow:
1294 PROLOG(0)
1c79356b 1295
91447636 1296 lwz r6,MUTEX_DATA(r3) ; Quick check
55e303ae
A
1297 rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already
1298 bne- mtFail ; Someone's got it already...
1299
1300 bl lockDisa ; Go get a lock on the mutex's interlock lock
3a60a9f5 1301 mr. r4,r3 ; Did we get it?
55e303ae 1302 lwz r3,FM_ARG0(r1) ; Restore the lock address
3a60a9f5 1303 bne++ mtGotInt ; We got it just fine...
91447636 1304 mr r4,r11 ; Saved lock addr
55e303ae
A
1305 lis r3,hi16(mutex_failed2) ; Get the failed mutex message
1306 ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
55e303ae
A
1307 bl EXT(panic) ; Call panic
1308 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1c79356b
A
1309
1310 .data
1311mutex_failed2:
91447636 1312 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
1c79356b
A
1313 .text
1314
1315mtGotInt:
1316
55e303ae
A
1317; Note that there is no reason to do a load and reserve here. We already
1318; hold the interlock and no one can touch at this field unless they
1319; have that, so, we're free to play
1c79356b 1320
91447636 1321 lwz r4,MUTEX_DATA(r3) ; Get the mutex's lock field
55e303ae
A
1322 rlwinm. r9,r4,30,2,31 ; So, can we have it?
1323 bne- mtInUse ; Nope, sombody's playing already...
1c79356b 1324
91447636
A
1325 bf++ MUTEX_ATTR_DEBUGb,mtDebSkip
1326 CHECK_SETUP(r5)
55e303ae
A
1327 mfsprg r9,1 ; Get the current activation
1328 lwz r5,0(r1) ; Get previous save frame
91447636
A
1329 lwz r6,FM_LR_SAVE(r5) ; Get our caller's address
1330 mr r8,r9 ; Get the active thread
1331 stw r6,MUTEX_STACK(r3) ; Save our caller
55e303ae 1332 stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
91447636
A
1333 LCK_STACK(r3,r5,r6,r7,r8,r10)
1334mtDebSkip:
1335 mr r3,r11 ; Get the based lock address
1336 bl EXT(lck_mtx_lock_acquire)
9bccf70c
A
1337 mfsprg r5,1
1338 mr. r4,r3
91447636
A
1339 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
1340 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
9bccf70c
A
1341 beq mtUnlock
1342 ori r5,r5,WAIT_FLAG
1c79356b 1343
55e303ae 1344mtUnlock: eieio
91447636 1345 stw r5,MUTEX_DATA(r3) ; grab the mutexlock and free the interlock
1c79356b 1346
55e303ae 1347 bl epStart ; Go enable preemption...
0b4e3aa0 1348
1c79356b 1349 li r3, 1
55e303ae
A
1350 EPILOG ; Restore all saved registers
1351 blr ; Return...
1c79356b 1352
55e303ae
A
1353; We come to here when we have a resource conflict. In other words,
1354; the mutex is held.
1c79356b 1355
0b4e3aa0 1356mtInUse:
91447636
A
1357 bf++ MUTEX_ATTR_STATb,mtStatSkip ; Branch if no stat
1358 lwz r5,MUTEX_GRP(r3) ; Load lock group
1359 li r9,GRP_MTX_STAT_MISS+4 ; Get stat miss offset
1360mtStatLoop:
1361 lwarx r8,r9,r5 ; Load stat miss cnt
1362 addi r8,r8,1 ; Increment stat miss cnt
1363 stwcx. r8,r9,r5 ; Store stat miss cnt
1364 bne-- mtStatLoop ; Retry if failed
1365mtStatSkip:
55e303ae 1366 rlwinm r4,r4,0,0,30 ; Get the unlock value
91447636 1367 stw r4,MUTEX_DATA(r3) ; free the interlock
55e303ae 1368 bl epStart ; Go enable preemption...
1c79356b 1369
55e303ae
A
1370mtFail: li r3,0 ; Set failure code
1371 EPILOG ; Restore all saved registers
1372 blr ; Return...
1c79356b
A
1373
1374
1375/*
91447636 1376 * void mutex_unlock(mutex_t* l)
55e303ae 1377 *
1c79356b 1378 */
55e303ae 1379 .align 5
91447636
A
1380 .globl EXT(mutex_unlock)
1381LEXT(mutex_unlock)
1382
1383 sync
1384 mr r11,r3 ; Save lock addr
1385#if MACH_LDEBUG
1386 b mlueEnter1
1387#else
1388 b mluEnter1
1389#endif
55e303ae 1390
91447636
A
1391/*
1392 * void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
1393 *
1394 */
1395 .align 5
1396 .globl EXT(lck_mtx_ext_unlock)
1397LEXT(lck_mtx_ext_unlock)
1398#if MACH_LDEBUG
1399 .globl EXT(mutex_unlock_rwcmb)
55e303ae 1400LEXT(mutex_unlock_rwcmb)
91447636
A
1401#endif
1402mlueEnter:
1403 .globl EXT(mulckePatch_isync)
1404LEXT(mulckePatch_isync)
55e303ae 1405 isync
91447636
A
1406 .globl EXT(mulckePatch_eieio)
1407LEXT(mulckePatch_eieio)
55e303ae 1408 eieio
91447636
A
1409 mr r11,r3 ; Save lock addr
1410mlueEnter1:
1411 lwz r0,MUTEX_ATTR(r3)
1412 mtcrf 1,r0 ; Set cr7
1413 CHECK_SETUP(r12)
1414 CHECK_MUTEX_TYPE()
1415 CHECK_THREAD(MUTEX_THREAD)
1c79356b 1416
91447636 1417 lwz r5,MUTEX_DATA(r3) ; Get the lock
55e303ae
A
1418 rlwinm. r4,r5,0,30,31 ; Quick check
1419 bne-- L_mutex_unlock_slow ; Can not get it now...
91447636
A
1420 mfmsr r9 ; Get the MSR value
1421 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1422 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1423 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1424 andc r9,r9,r0 ; Clear FP and VEC
1425 andc r7,r9,r7 ; Clear EE as well
1426 mtmsr r7 ; Turn off interruptions
1427 isync ; May have turned off vec and fp here
55e303ae 1428
91447636
A
1429mlueLoop:
1430 lwarx r5,MUTEX_DATA,r3
55e303ae
A
1431 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1432 li r5,0 ; Clear the mutexlock
91447636
A
1433 bne-- mlueSlowX
1434 stwcx. r5,MUTEX_DATA,r3
1435 bne-- mlueLoop
1436 mtmsr r9 ; Say, any interrupts pending?
55e303ae
A
1437 blr
1438
91447636 1439mlueSlowX:
55e303ae
A
1440 li r5,lgKillResv ; Killing field
1441 stwcx. r5,0,r5 ; Dump reservation
91447636 1442 mtmsr r9 ; Say, any interrupts pending?
55e303ae
A
1443 b L_mutex_unlock_slow ; Join slow path...
1444
1445/*
91447636 1446 * void lck_mtx_unlock(lck_mtx_t* l)
55e303ae
A
1447 *
1448 */
1c79356b 1449 .align 5
91447636
A
1450 .globl EXT(lck_mtx_unlock)
1451LEXT(lck_mtx_unlock)
0b4e3aa0 1452#if !MACH_LDEBUG
91447636
A
1453 .globl EXT(mutex_unlock_rwcmb)
1454LEXT(mutex_unlock_rwcmb)
1455#endif
1456mluEnter:
1457 .globl EXT(mulckPatch_isync)
1458LEXT(mulckPatch_isync)
1459 isync
1460 .globl EXT(mulckPatch_eieio)
1461LEXT(mulckPatch_eieio)
1462 eieio
1463 mr r11,r3 ; Save lock addr
1464mluEnter1:
1465 lwz r5,MUTEX_DATA(r3) ; Get the lock
55e303ae 1466 rlwinm. r4,r5,0,30,31 ; Quick check
91447636 1467 bne-- mluSlow0 ; Indirect or Can not get it now...
55e303ae 1468
91447636
A
1469mluLoop:
1470 lwarx r5,MUTEX_DATA,r3
55e303ae
A
1471 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1472 li r5,0 ; Clear the mutexlock
91447636
A
1473 bne-- mluSlowX
1474 stwcx. r5,MUTEX_DATA,r3
1475 bne-- mluLoop
0b4e3aa0 1476 blr
91447636
A
1477
1478mluSlow0:
1479 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1480 bne-- L_mutex_unlock_slow ; No, go handle contention
1481 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1482 b mlueEnter1
1483mluSlowX:
55e303ae
A
1484 li r5,lgKillResv ; Killing field
1485 stwcx. r5,0,r5 ; Dump reservation
1486
55e303ae
A
1487L_mutex_unlock_slow:
1488
1c79356b
A
1489 PROLOG(0)
1490
55e303ae
A
1491 bl lockDisa ; Go get a lock on the mutex's interlock lock
1492 mr. r4,r3 ; Did we get it?
1493 lwz r3,FM_ARG0(r1) ; Restore the lock address
3a60a9f5 1494 bne++ muGotInt ; We got it just fine...
91447636 1495 mr r4,r11 ; Saved lock addr
55e303ae
A
1496 lis r3,hi16(mutex_failed3) ; Get the failed mutex message
1497 ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
55e303ae
A
1498 bl EXT(panic) ; Call panic
1499 BREAKPOINT_TRAP ; We die here anyway, can not get the lock
1c79356b
A
1500
1501 .data
1502mutex_failed3:
91447636 1503 STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
1c79356b
A
1504 .text
1505
1506
1507muGotInt:
91447636 1508 lwz r4,MUTEX_DATA(r3)
55e303ae 1509 andi. r5,r4,WAIT_FLAG ; are there any waiters ?
9bccf70c 1510 rlwinm r4,r4,0,0,29
55e303ae 1511 beq+ muUnlock ; Nope, we're done...
1c79356b 1512
91447636
A
1513 mr r3,r11 ; Get the based lock address
1514 bl EXT(lck_mtx_unlock_wakeup) ; yes, wake a thread
55e303ae 1515 lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog)
91447636
A
1516 lwz r11,FM_ARG0+0x04(r1) ; restore r11 (saved in prolog)
1517 lwz r5,MUTEX_DATA(r3) ; load the lock
1c79356b
A
1518
1519muUnlock:
55e303ae
A
1520 andi. r5,r5,WAIT_FLAG ; Get the unlock value
1521 eieio
91447636 1522 stw r5,MUTEX_DATA(r3) ; unlock the interlock and lock
55e303ae
A
1523
1524 EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
1525 b epStart ; Go enable preemption...
1c79356b
A
1526
1527/*
91447636
A
1528 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1529 *
1c79356b 1530 */
91447636
A
1531 .align 5
1532 .globl EXT(lck_mtx_assert)
1533LEXT(lck_mtx_assert)
1534 .globl EXT(_mutex_assert)
1535LEXT(_mutex_assert)
1536 mr r11,r3
1537maEnter:
1538 lwz r5,MUTEX_DATA(r3)
1539 cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
1540 bne-- maCheck ; No, go check the assertion
1541 lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
1542 b maEnter
1543maCheck:
1544 mfsprg r6,1 ; load the current thread
1545 rlwinm r5,r5,0,0,29 ; Extract the lock owner
1546 cmpwi r4,MUTEX_ASSERT_OWNED
1547 cmplw cr1,r6,r5 ; Is the lock held by current act
1548 crandc cr0_eq,cr0_eq,cr1_eq ; Check owned assertion
1549 bne-- maNext
1550 mr r4,r11
1551 lis r3,hi16(mutex_assert1) ; Get the failed mutex message
1552 ori r3,r3,lo16(mutex_assert1) ; Get the failed mutex message
1553 b maPanic ; Panic path
1554maNext:
1555 cmpwi r4,MUTEX_ASSERT_NOTOWNED ; Check not owned assertion
1556 crand cr0_eq,cr0_eq,cr1_eq ;
1557 bnelr++
1558maPanic:
55e303ae 1559 PROLOG(0)
91447636
A
1560 mr r4,r11
1561 lis r3,hi16(mutex_assert2) ; Get the failed mutex message
1562 ori r3,r3,lo16(mutex_assert2) ; Get the failed mutex message
1563 bl EXT(panic) ; Call panic
1564 BREAKPOINT_TRAP ; We die here anyway
55e303ae 1565
91447636
A
1566 .data
1567mutex_assert1:
1568 STRINGD "mutex (0x%08X) not owned\n\000"
1569mutex_assert2:
1570 STRINGD "mutex (0x%08X) owned\n\000"
1571 .text
1572
1573
55e303ae 1574/*
91447636 1575 * void lck_mtx_ilk_unlock(lck_mtx *lock)
55e303ae 1576 */
91447636
A
1577 .globl EXT(lck_mtx_ilk_unlock)
1578LEXT(lck_mtx_ilk_unlock)
1c79356b 1579
91447636 1580 lwz r10,MUTEX_DATA(r3)
0b4e3aa0 1581 rlwinm r10,r10,0,0,30
55e303ae 1582 eieio
91447636 1583 stw r10,MUTEX_DATA(r3)
1c79356b 1584
55e303ae 1585 b epStart ; Go enable preemption...
1c79356b 1586
55e303ae
A
1587/*
1588 * void _enable_preemption_no_check(void)
1c79356b 1589 *
55e303ae 1590 * This version does not check if we get preempted or not
1c79356b 1591 */
1c79356b
A
1592 .align 4
1593 .globl EXT(_enable_preemption_no_check)
1594
1595LEXT(_enable_preemption_no_check)
1c79356b 1596
55e303ae
A
1597 cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted
1598 b epCommn ; Join up with the other enable code...
1c79356b 1599
55e303ae
A
1600/*
1601 * void _enable_preemption(void)
1602 *
1603 * This version checks if we get preempted or not
1604 */
1c79356b
A
1605 .align 5
1606 .globl EXT(_enable_preemption)
1607
1608LEXT(_enable_preemption)
1609
91447636 1610; Here is where we enable preemption.
55e303ae
A
1611
1612epStart:
1613 cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted
1614
1615epCommn:
1616 mfsprg r3,1 ; Get current activation
1617 li r8,-1 ; Get a decrementer
1618 lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level
1619 add. r5,r5,r8 ; Bring down the disable count
1620 blt- epTooFar ; Yeah, we did...
1621 stw r5,ACT_PREEMPT_CNT(r3) ; Save it back
1622 crandc cr0_eq,cr0_eq,cr1_eq
1623 beq+ epCheckPreempt ; Go check if we need to be preempted...
1624 blr ; Leave...
1c79356b 1625epTooFar:
55e303ae
A
1626 mr r4,r5
1627 lis r3,hi16(epTooFarStr) ; First half of panic string
1628 ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
1629 PROLOG(0)
1630 bl EXT(panic)
1631 BREAKPOINT_TRAP ; We die here anyway
1c79356b
A
1632
1633 .data
1634epTooFarStr:
91447636 1635 STRINGD "enable_preemption: preemption_level %d\n\000"
de355530 1636
55e303ae 1637 .text
1c79356b 1638 .align 5
1c79356b 1639epCheckPreempt:
55e303ae
A
1640 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1641 mfmsr r9 ; Get the MSR value
1642 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
91447636 1643 andi. r4,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off
55e303ae
A
1644 beq+ epCPno ; No preemption here...
1645 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1646 andc r9,r9,r0 ; Clear FP and VEC
1647 andc r7,r9,r7 ; Clear EE as well
1648 mtmsr r7 ; Turn off interruptions
1649 isync ; May have turned off vec and fp here
91447636
A
1650 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1651 lwz r7,PP_PENDING_AST(r3) ; Get pending AST mask
55e303ae 1652 li r5,AST_URGENT ; Get the requests we do honor
55e303ae
A
1653 lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call
1654 and. r7,r7,r5 ; Should we preempt?
1655 ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part
1656 mtmsr r9 ; Allow interrupts if we can
1657epCPno:
1658 beqlr+ ; We probably will not preempt...
1659 sc ; Do the preemption
1660 blr ; Now, go away now...
1c79356b
A
1661
1662/*
55e303ae
A
1663 * void disable_preemption(void)
1664 *
91447636 1665 * Here is where we disable preemption.
1c79356b 1666 */
1c79356b 1667 .align 5
1c79356b
A
1668 .globl EXT(_disable_preemption)
1669
1670LEXT(_disable_preemption)
1671
55e303ae
A
1672 mfsprg r6,1 ; Get the current activation
1673 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1674 addi r5,r5,1 ; Bring up the disable count
1675 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1676 blr ; Return...
1c79356b
A
1677
1678/*
55e303ae
A
1679 * int get_preemption_level(void)
1680 *
1681 * Return the current preemption level
1c79356b 1682 */
1c79356b 1683 .align 5
55e303ae 1684 .globl EXT(get_preemption_level)
de355530 1685
55e303ae
A
1686LEXT(get_preemption_level)
1687
1688 mfsprg r6,1 ; Get current activation
1689 lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1690 blr ; Return...
9bccf70c 1691
9bccf70c 1692/*
55e303ae
A
1693 * void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
1694 *
1695 * Initialize a simple lock.
9bccf70c
A
1696 */
1697 .align 5
55e303ae 1698 .globl EXT(ppc_usimple_lock_init)
9bccf70c 1699
55e303ae 1700LEXT(ppc_usimple_lock_init)
1c79356b 1701
55e303ae
A
1702 li r0, 0 ; set lock to free == 0
1703 stw r0, 0(r3) ; Initialize the lock
1704 blr
1705
1c79356b 1706/*
91447636
A
1707 * void lck_spin_lock(lck_spin_t *)
1708 * void ppc_usimple_lock(simple_lock_t *)
55e303ae 1709 *
1c79356b 1710 */
1c79356b 1711 .align 5
91447636
A
1712 .globl EXT(lck_spin_lock)
1713LEXT(lck_spin_lock)
55e303ae 1714 .globl EXT(ppc_usimple_lock)
55e303ae 1715LEXT(ppc_usimple_lock)
1c79356b 1716
55e303ae
A
1717 mfsprg r6,1 ; Get the current activation
1718 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1719 addi r5,r5,1 ; Bring up the disable count
1720 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1721 mr r5,r3 ; Get the address of the lock
1722 li r8,0 ; Set r8 to zero
1723 li r4,0 ; Set r4 to zero
1724
91447636 1725slcktry: lwarx r11,SLOCK_ILK,r5 ; Grab the lock value
55e303ae
A
1726 andi. r3,r11,ILK_LOCKED ; Is it locked?
1727 ori r11,r6,ILK_LOCKED ; Set interlock
1728 bne-- slckspin ; Yeah, wait for it to clear...
91447636 1729 stwcx. r11,SLOCK_ILK,r5 ; Try to seize that there durn lock
55e303ae 1730 bne-- slcktry ; Couldn't get it...
91447636
A
1731 .globl EXT(slckPatch_isync)
1732LEXT(slckPatch_isync)
55e303ae
A
1733 isync ; Make sure we don't use a speculativily loaded value
1734 blr ; Go on home...
1735
1736slckspin: li r11,lgKillResv ; Killing field
1737 stwcx. r11,0,r11 ; Kill reservation
1738
1739 mr. r4,r4 ; Test timeout value
1740 bne++ slockspin0
1741 lis r4,hi16(EXT(LockTimeOut)) ; Get the high part
1742 ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part
1743 lwz r4,0(r4) ; Get the timerout value
1744
1745slockspin0: mr. r8,r8 ; Is r8 set to zero
1746 bne++ slockspin1 ; If yes, first spin attempt
1747 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1748 mfmsr r9 ; Get the MSR value
1749 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1750 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1751 andc r9,r9,r0 ; Clear FP and VEC
1752 andc r7,r9,r7 ; Clear EE as well
1753 mtmsr r7 ; Turn off interruptions
1754 isync ; May have turned off vec and fp here
1755 mftb r8 ; Get timestamp on entry
1756 b slcksniff
1757
1758slockspin1: mtmsr r7 ; Turn off interruptions
1759 mftb r8 ; Get timestamp on entry
1760
91447636 1761slcksniff: lwz r3,SLOCK_ILK(r5) ; Get that lock in here
55e303ae
A
1762 andi. r3,r3,ILK_LOCKED ; Is it free yet?
1763 beq++ slckretry ; Yeah, try for it again...
1764
1765 mftb r10 ; Time stamp us now
1766 sub r10,r10,r8 ; Get the elapsed time
1767 cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
1768 blt++ slcksniff ; Not yet...
1769
1770 mtmsr r9 ; Say, any interrupts pending?
9bccf70c 1771
55e303ae
A
1772; The following instructions force the pipeline to be interlocked to that only one
1773; instruction is issued per cycle. The insures that we stay enabled for a long enough
1774; time; if it's too short, pending interruptions will not have a chance to be taken
1c79356b 1775
55e303ae
A
1776 subi r4,r4,128 ; Back off elapsed time from timeout value
1777 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1778 mr. r4,r4 ; See if we used the whole timeout
1779 li r3,0 ; Assume a timeout return code
1780 or r4,r4,r4 ; Do nothing here but force a single cycle delay
1781
1782 ble-- slckfail ; We failed
1783 b slockspin1 ; Now that we've opened an enable window, keep trying...
1784slckretry:
1785 mtmsr r9 ; Restore interrupt state
1786 li r8,1 ; Show already through once
1787 b slcktry
1788slckfail: ; We couldn't get the lock
1789 lis r3,hi16(slckpanic_str)
1790 ori r3,r3,lo16(slckpanic_str)
1791 mr r4,r5
1792 mflr r5
1793 PROLOG(0)
1794 bl EXT(panic)
1795 BREAKPOINT_TRAP ; We die here anyway
de355530 1796
55e303ae
A
1797 .data
1798slckpanic_str:
91447636 1799 STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
55e303ae 1800 .text
1c79356b 1801
0b4e3aa0 1802/*
91447636
A
1803 * boolean_t lck_spin_try_lock(lck_spin_t *)
1804 * unsigned int ppc_usimple_lock_try(simple_lock_t *)
0b4e3aa0 1805 *
0b4e3aa0
A
1806 */
1807 .align 5
91447636
A
1808 .globl EXT(lck_spin_try_lock)
1809LEXT(lck_spin_try_lock)
55e303ae 1810 .globl EXT(ppc_usimple_lock_try)
55e303ae 1811LEXT(ppc_usimple_lock_try)
0b4e3aa0 1812
55e303ae
A
1813 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1814 mfmsr r9 ; Get the MSR value
1815 ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
1816 ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
1817 andc r9,r9,r0 ; Clear FP and VEC
1818 andc r7,r9,r7 ; Clear EE as well
1819 mtmsr r7 ; Disable interruptions and thus, preemption
1820 mfsprg r6,1 ; Get current activation
1821
91447636 1822 lwz r11,SLOCK_ILK(r3) ; Get the lock
55e303ae
A
1823 andi. r5,r11,ILK_LOCKED ; Check it...
1824 bne-- slcktryfail ; Quickly fail...
1825
1826slcktryloop:
91447636 1827 lwarx r11,SLOCK_ILK,r3 ; Ld from addr of arg and reserve
55e303ae
A
1828
1829 andi. r5,r11,ILK_LOCKED ; TEST...
1830 ori r5,r6,ILK_LOCKED
1831 bne-- slcktryfailX ; branch if taken. Predict free
1832
91447636 1833 stwcx. r5,SLOCK_ILK,r3 ; And SET (if still reserved)
55e303ae
A
1834 bne-- slcktryloop ; If set failed, loop back
1835
91447636
A
1836 .globl EXT(stlckPatch_isync)
1837LEXT(stlckPatch_isync)
55e303ae
A
1838 isync
1839
1840 lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level
1841 addi r5,r5,1 ; Bring up the disable count
1842 stw r5,ACT_PREEMPT_CNT(r6) ; Save it back
1843
1844 mtmsr r9 ; Allow interruptions now
1845 li r3,1 ; Set that the lock was free
1846 blr
1847
1848slcktryfailX:
1849 li r5,lgKillResv ; Killing field
1850 stwcx. r5,0,r5 ; Kill reservation
1851
1852slcktryfail:
1853 mtmsr r9 ; Allow interruptions now
1854 li r3,0 ; FAILURE - lock was taken
1855 blr
1856
0b4e3aa0
A
1857
1858/*
91447636
A
1859 * void lck_spin_unlock(lck_spin_t *)
1860 * void ppc_usimple_unlock_rwcmb(simple_lock_t *)
0b4e3aa0 1861 *
0b4e3aa0
A
1862 */
1863 .align 5
91447636
A
1864 .globl EXT(lck_spin_unlock)
1865LEXT(lck_spin_unlock)
55e303ae 1866 .globl EXT(ppc_usimple_unlock_rwcmb)
55e303ae 1867LEXT(ppc_usimple_unlock_rwcmb)
0b4e3aa0 1868
55e303ae
A
1869 li r0,0
1870 .globl EXT(sulckPatch_isync)
1871LEXT(sulckPatch_isync)
1872 isync
1873 .globl EXT(sulckPatch_eieio)
1874LEXT(sulckPatch_eieio)
1875 eieio
91447636 1876 stw r0, SLOCK_ILK(r3)
55e303ae
A
1877
1878 b epStart ; Go enable preemption...
0b4e3aa0
A
1879
1880/*
91447636 1881 * void ppc_usimple_unlock_rwmb(simple_lock_t *)
0b4e3aa0 1882 *
0b4e3aa0
A
1883 */
1884 .align 5
55e303ae 1885 .globl EXT(ppc_usimple_unlock_rwmb)
0b4e3aa0 1886
55e303ae 1887LEXT(ppc_usimple_unlock_rwmb)
0b4e3aa0 1888
55e303ae
A
1889 li r0,0
1890 sync
91447636 1891 stw r0, SLOCK_ILK(r3)
0b4e3aa0 1892
55e303ae 1893 b epStart ; Go enable preemption...
0b4e3aa0 1894
9bccf70c 1895/*
55e303ae 1896 * void enter_funnel_section(funnel_t *)
9bccf70c
A
1897 *
1898 */
1899 .align 5
1900 .globl EXT(enter_funnel_section)
1901
1902LEXT(enter_funnel_section)
1903
1904#if !MACH_LDEBUG
55e303ae
A
1905 lis r10,hi16(EXT(kdebug_enable))
1906 ori r10,r10,lo16(EXT(kdebug_enable))
1907 lwz r10,0(r10)
1908 lis r11,hi16(EXT(split_funnel_off))
1909 ori r11,r11,lo16(EXT(split_funnel_off))
1910 lwz r11,0(r11)
1911 or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
1912 bne- L_enter_funnel_section_slow ; If set, call the slow path
1913 mfsprg r6,1 ; Get the current activation
1914 lwz r7,LOCK_FNL_MUTEX(r3)
1915
1916 lwz r5,0(r7) ; Get lock quickly
1917 mr. r5,r5 ; Locked?
1918 bne-- L_enter_funnel_section_slow ; Yup...
1919
9bccf70c 1920L_enter_funnel_section_loop:
55e303ae
A
1921 lwarx r5,0,r7 ; Load the mutex lock
1922 mr. r5,r5
1923 bne-- L_enter_funnel_section_slowX ; Go to the slow path
1924 stwcx. r6,0,r7 ; Grab the lock
1925 bne-- L_enter_funnel_section_loop ; Loop back if failed
91447636
A
1926 .globl EXT(entfsectPatch_isync)
1927LEXT(entfsectPatch_isync)
55e303ae 1928 isync ; Stop prefeteching
55e303ae
A
1929 li r7,TH_FN_OWNED
1930 stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
1931 stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
1932 blr
1933
1934L_enter_funnel_section_slowX:
1935 li r4,lgKillResv ; Killing field
1936 stwcx. r4,0,r4 ; Kill reservation
9bccf70c
A
1937
1938L_enter_funnel_section_slow:
9bccf70c 1939#endif
55e303ae
A
1940 li r4,TRUE
1941 b EXT(thread_funnel_set)
9bccf70c
A
1942
1943/*
55e303ae 1944 * void exit_funnel_section(void)
9bccf70c
A
1945 *
1946 */
1947 .align 5
1948 .globl EXT(exit_funnel_section)
1949
1950LEXT(exit_funnel_section)
1951
55e303ae 1952 mfsprg r6,1 ; Get the current activation
55e303ae
A
1953 lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
1954 mr. r3,r3 ; Check on funnel held
1955 beq- L_exit_funnel_section_ret ;
9bccf70c 1956#if !MACH_LDEBUG
55e303ae
A
1957 lis r10,hi16(EXT(kdebug_enable))
1958 ori r10,r10,lo16(EXT(kdebug_enable))
1959 lwz r10,0(r10)
1960 mr. r10,r10
1961 bne- L_exit_funnel_section_slow ; If set, call the slow path
1962 lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
1963 .globl EXT(retfsectPatch_isync)
1964LEXT(retfsectPatch_isync)
1965 isync
1966 .globl EXT(retfsectPatch_eieio)
1967LEXT(retfsectPatch_eieio)
1968 eieio
1969
1970 lwz r5,0(r7) ; Get lock
1971 rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set
1972 bne-- L_exit_funnel_section_slow ; No can get...
1973
9bccf70c 1974L_exit_funnel_section_loop:
55e303ae
A
1975 lwarx r5,0,r7
1976 rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
1977 li r5,0 ; Clear the mutexlock
1978 bne-- L_exit_funnel_section_slowX
1979 stwcx. r5,0,r7 ; Release the funnel mutexlock
1980 bne-- L_exit_funnel_section_loop
1981 li r7,0
1982 stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
1983 stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
1984 blr ; Return
1985
1986L_exit_funnel_section_slowX:
1987 li r4,lgKillResv ; Killing field
1988 stwcx. r4,0,r4 ; Kill it
1989
9bccf70c 1990L_exit_funnel_section_slow:
9bccf70c 1991#endif
55e303ae
A
1992 li r4,FALSE
1993 b EXT(thread_funnel_set)
1994L_exit_funnel_section_ret:
1995 blr
1996
91447636
A
1997/*
1998 * void lck_rw_lock_exclusive(lck_rw_t*)
1999 *
2000 */
2001 .align 5
2002 .globl EXT(lck_rw_lock_exclusive)
2003LEXT(lck_rw_lock_exclusive)
2004#if !MACH_LDEBUG
2005 .globl EXT(lock_write)
2006LEXT(lock_write)
2007#endif
2008rwleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2009 rlwinm. r7,r5,30,1,31 ; Can we have it?
2010 ori r6,r5,WANT_EXCL ; Mark Exclusive
2011 bne-- rwlespin ; Branch if cannot be held
2012 stwcx. r6,RW_DATA,r3 ; Update lock word
2013 bne-- rwleloop
2014 .globl EXT(rwlePatch_isync)
2015LEXT(rwlePatch_isync)
2016 isync
2017 blr
2018rwlespin:
2019 li r4,lgKillResv ; Killing field
2020 stwcx. r4,0,r4 ; Kill it
2021 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2022 bne-- rwlespin1 ; No, go handle contention
2023 mr r4,r3 ; pass lock pointer
2024 lwz r3,RW_PTR(r3) ; load lock ext pointer
2025 b EXT(lck_rw_lock_exclusive_ext)
2026rwlespin1:
2027 b EXT(lck_rw_lock_exclusive_gen)
2028
2029/*
2030 * void lck_rw_lock_shared(lck_rw_t*)
2031 *
2032 */
2033 .align 5
2034 .globl EXT(lck_rw_lock_shared)
2035LEXT(lck_rw_lock_shared)
2036#if !MACH_LDEBUG
2037 .globl EXT(lock_read)
2038LEXT(lock_read)
2039#endif
2040rwlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2041 andi. r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
2042 addis r6,r5,1 ; Increment read cnt
2043 bne-- rwlsspin ; Branch if cannot be held
2044 stwcx. r6,RW_DATA,r3 ; Update lock word
2045 bne-- rwlsloop
2046 .globl EXT(rwlsPatch_isync)
2047LEXT(rwlsPatch_isync)
2048 isync
2049 blr
2050rwlsspin:
2051 li r4,lgKillResv ; Killing field
2052 stwcx. r4,0,r4 ; Kill it
2053 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2054 bne-- rwlsspin1 ; No, go handle contention
2055 mr r4,r3 ; pass lock pointer
2056 lwz r3,RW_PTR(r3) ; load lock ext pointer
2057 b EXT(lck_rw_lock_shared_ext)
2058rwlsspin1:
2059 b EXT(lck_rw_lock_shared_gen)
2060
2061/*
2062 * boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
2063 *
2064 */
2065 .align 5
2066 .globl EXT(lck_rw_lock_shared_to_exclusive)
2067LEXT(lck_rw_lock_shared_to_exclusive)
2068#if !MACH_LDEBUG
2069 .globl EXT(lock_read_to_write)
2070LEXT(lock_read_to_write)
2071#endif
2072rwlseloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2073 addis r6,r5,0xFFFF ; Decrement read cnt
2074 lis r8,0xFFFF ; Get read count mask
2075 ori r8,r8,WANT_UPGRADE|ILK_LOCKED ; Include Interlock and upgrade flags
2076 and. r7,r6,r8 ; Can we have it?
2077 ori r9,r6,WANT_UPGRADE ; Mark Exclusive
2078 bne-- rwlsespin ; Branch if cannot be held
2079 stwcx. r9,RW_DATA,r3 ; Update lock word
2080 bne-- rwlseloop
2081 .globl EXT(rwlsePatch_isync)
2082LEXT(rwlsePatch_isync)
2083 isync
2084 li r3,0 ; Succeed, return FALSE...
2085 blr
2086rwlsespin:
2087 li r4,lgKillResv ; Killing field
2088 stwcx. r4,0,r4 ; Kill it
2089 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2090 bne-- rwlsespin1 ; No, go handle contention
2091 mr r4,r3 ; pass lock pointer
2092 lwz r3,RW_PTR(r3) ; load lock ext pointer
2093 b EXT(lck_rw_lock_shared_to_exclusive_ext)
2094rwlsespin1:
2095 b EXT(lck_rw_lock_shared_to_exclusive_gen)
2096
2097
2098
2099/*
2100 * void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
2101 *
2102 */
2103 .align 5
2104 .globl EXT(lck_rw_lock_exclusive_to_shared)
2105LEXT(lck_rw_lock_exclusive_to_shared)
2106#if !MACH_LDEBUG
2107 .globl EXT(lock_write_to_read)
2108LEXT(lock_write_to_read)
2109#endif
2110 .globl EXT(rwlesPatch_isync)
2111LEXT(rwlesPatch_isync)
2112 isync
2113 .globl EXT(rwlesPatch_eieio)
2114LEXT(rwlesPatch_eieio)
2115 eieio
2116rwlesloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2117 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2118 bne-- rwlesspin ; Branch if interlocked
2119 lis r6,1 ; Get 1 for read count
2120 andi. r10,r5,WANT_UPGRADE ; Is it held with upgrade
2121 li r9,WANT_UPGRADE|WAIT_FLAG ; Get upgrade and wait flags mask
2122 bne rwlesexcl1 ; Skip if held with upgrade
2123 li r9,WANT_EXCL|WAIT_FLAG ; Get exclusive and wait flags mask
2124rwlesexcl1:
2125 andc r7,r5,r9 ; Marked free
2126 rlwimi r6,r7,0,16,31 ; Set shared cnt to one
2127 stwcx. r6,RW_DATA,r3 ; Update lock word
2128 bne-- rwlesloop
2129 andi. r7,r5,WAIT_FLAG ; Test wait flag
2130 beqlr++ ; Return of no waiters
2131 addi r3,r3,RW_EVENT ; Get lock event address
2132 b EXT(thread_wakeup) ; wakeup waiters
2133rwlesspin:
2134 li r4,lgKillResv ; Killing field
2135 stwcx. r4,0,r4 ; Kill it
2136 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2137 bne-- rwlesspin1 ; No, go handle contention
2138 mr r4,r3 ; pass lock pointer
2139 lwz r3,RW_PTR(r3) ; load lock ext pointer
2140 b EXT(lck_rw_lock_exclusive_to_shared_ext)
2141rwlesspin1:
2142 b EXT(lck_rw_lock_exclusive_to_shared_gen)
2143
2144
2145
2146/*
2147 * boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
2148 *
2149 */
2150 .align 5
2151 .globl EXT(lck_rw_try_lock_exclusive)
2152LEXT(lck_rw_try_lock_exclusive)
2153 lis r10,0xFFFF ; Load read count mask
2154 ori r10,r10,WANT_EXCL|WANT_UPGRADE ; Include exclusive and upgrade flags
2155rwtleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2156 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2157 bne-- rwtlespin ; Branch if interlocked
2158 and. r7,r5,r10 ; Can we have it
2159 ori r6,r5,WANT_EXCL ; Mark Exclusive
2160 bne-- rwtlefail ;
2161 stwcx. r6,RW_DATA,r3 ; Update lock word
2162 bne-- rwtleloop
2163 .globl EXT(rwtlePatch_isync)
2164LEXT(rwtlePatch_isync)
2165 isync
2166 li r3,1 ; Return TRUE
2167 blr
2168rwtlefail:
2169 li r4,lgKillResv ; Killing field
2170 stwcx. r4,0,r4 ; Kill it
2171 li r3,0 ; Return FALSE
2172 blr
2173rwtlespin:
2174 li r4,lgKillResv ; Killing field
2175 stwcx. r4,0,r4 ; Kill it
2176 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2177 bne-- rwtlespin1 ; No, go handle contention
2178 mr r4,r3 ; pass lock pointer
2179 lwz r3,RW_PTR(r3) ; load lock ext pointer
2180 b EXT(lck_rw_try_lock_exclusive_ext)
2181rwtlespin1:
2182 b EXT(lck_rw_try_lock_exclusive_gen)
2183
2184
2185/*
2186 * boolean_t lck_rw_try_lock_shared(lck_rw_t*)
2187 *
2188 */
2189 .align 5
2190 .globl EXT(lck_rw_try_lock_shared)
2191LEXT(lck_rw_try_lock_shared)
2192rwtlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2193 andi. r7,r5,ILK_LOCKED ; Test interlock flag
2194 bne-- rwtlsspin ; Branch if interlocked
2195 andi. r7,r5,WANT_EXCL|WANT_UPGRADE ; So, can we have it?
2196 addis r6,r5,1 ; Increment read cnt
2197 bne-- rwtlsfail ; Branch if held exclusive
2198 stwcx. r6,RW_DATA,r3 ; Update lock word
2199 bne-- rwtlsloop
2200 .globl EXT(rwtlsPatch_isync)
2201LEXT(rwtlsPatch_isync)
2202 isync
2203 li r3,1 ; Return TRUE
2204 blr
2205rwtlsfail:
2206 li r3,0 ; Return FALSE
2207 blr
2208rwtlsspin:
2209 li r4,lgKillResv ; Killing field
2210 stwcx. r4,0,r4 ; Kill it
2211 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2212 bne-- rwtlsspin1 ; No, go handle contention
2213 mr r4,r3 ; pass lock pointer
2214 lwz r3,RW_PTR(r3) ; load lock ext pointer
2215 b EXT(lck_rw_try_lock_shared_ext)
2216rwtlsspin1:
2217 b EXT(lck_rw_try_lock_shared_gen)
55e303ae 2218
55e303ae 2219
9bccf70c 2220
91447636
A
2221/*
2222 * lck_rw_type_t lck_rw_done(lck_rw_t*)
2223 *
2224 */
2225 .align 5
2226 .globl EXT(lck_rw_done)
2227LEXT(lck_rw_done)
2228#if !MACH_LDEBUG
2229 .globl EXT(lock_done)
2230LEXT(lock_done)
2231#endif
2232 .globl EXT(rwldPatch_isync)
2233LEXT(rwldPatch_isync)
2234 isync
2235 .globl EXT(rwldPatch_eieio)
2236LEXT(rwldPatch_eieio)
2237 eieio
2238 li r10,WAIT_FLAG ; Get wait flag
2239 lis r7,0xFFFF ; Get read cnt mask
2240 mr r12,r3 ; Save lock addr
2241rwldloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
2242 andi. r8,r5,ILK_LOCKED ; Test interlock flag
2243 bne-- rwldspin ; Branch if interlocked
2244 and. r8,r5,r7 ; Is it shared
2245 cmpi cr1,r8,0 ; Is it shared
2246 beq cr1,rwldexcl ; No, check exclusive
2247 li r11,RW_SHARED ; Set return value
2248 addis r6,r5,0xFFFF ; Decrement read count
2249 and. r8,r6,r7 ; Is it still shared
2250 li r8,0 ; Assume no wakeup
2251 bne rwldshared1 ; Skip if still held shared
2252 and r8,r6,r10 ; Extract wait flag
2253 andc r6,r6,r10 ; Clear wait flag
2254rwldshared1:
2255 b rwldstore
2256rwldexcl:
2257 li r11,RW_EXCL ; Set return value
2258 li r9,WANT_UPGRADE ; Get upgrade flag
2259 and. r6,r5,r9 ; Is it held with upgrade
2260 li r9,WANT_UPGRADE|WAIT_FLAG ; Mask upgrade abd wait flags
2261 bne rwldexcl1 ; Skip if held with upgrade
2262 li r9,WANT_EXCL|WAIT_FLAG ; Mask exclusive and wait flags
2263rwldexcl1:
2264 andc r6,r5,r9 ; Marked free
2265 and r8,r5,r10 ; Null if no waiter
2266rwldstore:
2267 stwcx. r6,RW_DATA,r3 ; Update lock word
2268 bne-- rwldloop
2269 mr. r8,r8 ; wakeup needed?
2270 mr r3,r11 ; Return lock held type
2271 beqlr++
2272 mr r3,r12 ; Restore lock address
2273 PROLOG(0)
2274 addi r3,r3,RW_EVENT ; Get lock event address
2275 bl EXT(thread_wakeup) ; wakeup threads
2276 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
2277 mtcr r2
2278 EPILOG
2279 li r3,RW_SHARED ; Assume lock type shared
2280 bne cr1,rwldret ; Branch if was held exclusive
2281 li r3,RW_EXCL ; Return lock type exclusive
2282rwldret:
2283 blr
2284rwldspin:
2285 li r4,lgKillResv ; Killing field
2286 stwcx. r4,0,r4 ; Kill it
2287 cmpli cr0,r5,RW_IND ; Is it a lock indirect
2288 bne-- rwldspin1 ; No, go handle contention
2289 mr r4,r3 ; pass lock pointer
2290 lwz r3,RW_PTR(r3) ; load lock ext pointer
2291 b EXT(lck_rw_done_ext)
2292rwldspin1:
2293 b EXT(lck_rw_done_gen)
2294
2295/*
2296 * void lck_rw_ilk_lock(lck_rw_t *lock)
2297 */
2298 .globl EXT(lck_rw_ilk_lock)
2299LEXT(lck_rw_ilk_lock)
2300 crclr hwtimeout ; no timeout option
2301 li r4,0 ; request default timeout value
2302 li r12,ILK_LOCKED ; Load bit mask
2303 b lckcomm ; Join on up...
2304
2305/*
2306 * void lck_rw_ilk_unlock(lck_rw_t *lock)
2307 */
2308 .globl EXT(lck_rw_ilk_unlock)
2309LEXT(lck_rw_ilk_unlock)
2310 li r4,1
2311 b EXT(hw_unlock_bit)