]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/i386_lock.s
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1989 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
30 */
31
1c79356b
A
32#include <mach_rt.h>
33#include <platforms.h>
34#include <mach_ldebug.h>
35#include <i386/asm.h>
1c79356b 36
9bccf70c 37#include "assym.s"
1c79356b 38
91447636
A
39#define PAUSE rep; nop
40
1c79356b
A
41/*
42 * When performance isn't the only concern, it's
43 * nice to build stack frames...
44 */
91447636
A
45#define BUILD_STACK_FRAMES (GPROF || \
46 ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB))
1c79356b
A
47
48#if BUILD_STACK_FRAMES
49
91447636
A
50/* STack-frame-relative: */
51#define L_PC B_PC
52#define L_ARG0 B_ARG0
53#define L_ARG1 B_ARG1
54
55#define LEAF_ENTRY(name) \
56 Entry(name); \
57 FRAME; \
58 MCOUNT
59
60#define LEAF_ENTRY2(n1,n2) \
61 Entry(n1); \
62 Entry(n2); \
63 FRAME; \
64 MCOUNT
65
66#define LEAF_RET \
67 EMARF; \
68 ret
1c79356b 69
91447636 70#else /* BUILD_STACK_FRAMES */
1c79356b 71
91447636
A
72/* Stack-pointer-relative: */
73#define L_PC S_PC
74#define L_ARG0 S_ARG0
75#define L_ARG1 S_ARG1
76
77#define LEAF_ENTRY(name) \
78 Entry(name)
79
80#define LEAF_ENTRY2(n1,n2) \
81 Entry(n1); \
82 Entry(n2)
83
84#define LEAF_RET \
85 ret
1c79356b 86
91447636 87#endif /* BUILD_STACK_FRAMES */
1c79356b 88
91447636
A
89
90/* Non-leaf routines always have a stack frame: */
91
92#define NONLEAF_ENTRY(name) \
93 Entry(name); \
94 FRAME; \
95 MCOUNT
96
97#define NONLEAF_ENTRY2(n1,n2) \
98 Entry(n1); \
99 Entry(n2); \
100 FRAME; \
101 MCOUNT
102
103#define NONLEAF_RET \
104 EMARF; \
105 ret
1c79356b
A
106
107
55e303ae
A
108#define M_ILK (%edx)
109#define M_LOCKED MUTEX_LOCKED(%edx)
110#define M_WAITERS MUTEX_WAITERS(%edx)
111#define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
91447636
A
112#define M_ITAG MUTEX_ITAG(%edx)
113#define M_PTR MUTEX_PTR(%edx)
1c79356b 114#if MACH_LDEBUG
55e303ae
A
115#define M_TYPE MUTEX_TYPE(%edx)
116#define M_PC MUTEX_PC(%edx)
117#define M_THREAD MUTEX_THREAD(%edx)
1c79356b
A
118#endif /* MACH_LDEBUG */
119
55e303ae 120#include <i386/mp.h>
1c79356b 121#define CX(addr,reg) addr(,reg,4)
1c79356b
A
122
123#if MACH_LDEBUG
124/*
125 * Routines for general lock debugging.
126 */
1c79356b
A
127
128/*
129 * Checks for expected lock types and calls "panic" on
130 * mismatch. Detects calls to Mutex functions with
131 * type simplelock and vice versa.
132 */
133#define CHECK_MUTEX_TYPE() \
9bccf70c 134 cmpl $ MUTEX_TAG,M_TYPE ; \
1c79356b
A
135 je 1f ; \
136 pushl $2f ; \
137 call EXT(panic) ; \
138 hlt ; \
139 .data ; \
1402: String "not a mutex!" ; \
141 .text ; \
1421:
143
1c79356b
A
144/*
145 * If one or more simplelocks are currently held by a thread,
146 * an attempt to acquire a mutex will cause this check to fail
147 * (since a mutex lock may context switch, holding a simplelock
148 * is not a good thing).
149 */
91447636 150#if MACH_RT
1c79356b 151#define CHECK_PREEMPTION_LEVEL() \
91447636 152 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
1c79356b
A
153 je 1f ; \
154 pushl $2f ; \
155 call EXT(panic) ; \
156 hlt ; \
157 .data ; \
1582: String "preemption_level != 0!" ; \
159 .text ; \
1601:
161#else /* MACH_RT */
162#define CHECK_PREEMPTION_LEVEL()
163#endif /* MACH_RT */
164
165#define CHECK_NO_SIMPLELOCKS() \
91447636 166 cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \
1c79356b
A
167 je 1f ; \
168 pushl $2f ; \
169 call EXT(panic) ; \
170 hlt ; \
171 .data ; \
1722: String "simple_locks_held!" ; \
173 .text ; \
1741:
175
176/*
177 * Verifies return to the correct thread in "unlock" situations.
178 */
179#define CHECK_THREAD(thd) \
91447636 180 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
1c79356b
A
181 testl %ecx,%ecx ; \
182 je 1f ; \
183 cmpl %ecx,thd ; \
184 je 1f ; \
185 pushl $2f ; \
186 call EXT(panic) ; \
187 hlt ; \
188 .data ; \
1892: String "wrong thread!" ; \
190 .text ; \
1911:
192
193#define CHECK_MYLOCK(thd) \
91447636 194 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
1c79356b
A
195 testl %ecx,%ecx ; \
196 je 1f ; \
197 cmpl %ecx,thd ; \
198 jne 1f ; \
199 pushl $2f ; \
200 call EXT(panic) ; \
201 hlt ; \
202 .data ; \
2032: String "mylock attempt!" ; \
204 .text ; \
2051:
206
207#define METER_SIMPLE_LOCK_LOCK(reg) \
208 pushl reg ; \
209 call EXT(meter_simple_lock) ; \
210 popl reg
211
212#define METER_SIMPLE_LOCK_UNLOCK(reg) \
213 pushl reg ; \
214 call EXT(meter_simple_unlock) ; \
215 popl reg
216
217#else /* MACH_LDEBUG */
218#define CHECK_MUTEX_TYPE()
219#define CHECK_SIMPLE_LOCK_TYPE
220#define CHECK_THREAD(thd)
221#define CHECK_PREEMPTION_LEVEL()
222#define CHECK_NO_SIMPLELOCKS()
223#define CHECK_MYLOCK(thd)
224#define METER_SIMPLE_LOCK_LOCK(reg)
225#define METER_SIMPLE_LOCK_UNLOCK(reg)
226#endif /* MACH_LDEBUG */
227
228
229/*
230 * void hw_lock_init(hw_lock_t)
231 *
232 * Initialize a hardware lock.
233 */
91447636 234LEAF_ENTRY(hw_lock_init)
1c79356b 235 movl L_ARG0,%edx /* fetch lock pointer */
91447636
A
236 movl $0,0(%edx) /* clear the lock */
237 LEAF_RET
1c79356b
A
238
239/*
240 * void hw_lock_lock(hw_lock_t)
241 *
242 * Acquire lock, spinning until it becomes available.
243 * MACH_RT: also return with preemption disabled.
244 */
91447636 245LEAF_ENTRY(hw_lock_lock)
1c79356b
A
246 movl L_ARG0,%edx /* fetch lock pointer */
247
0c530ab8
A
248 movl %gs:CPU_ACTIVE_THREAD,%ecx
249 DISABLE_PREEMPTION
2501:
91447636
A
251 movl 0(%edx), %eax
252 testl %eax,%eax /* lock locked? */
253 jne 3f /* branch if so */
254 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
1c79356b 255 jne 3f
9bccf70c 256 movl $1,%eax /* In case this was a timeout call */
91447636 257 LEAF_RET /* if yes, then nothing left to do */
0c530ab8 2583:
91447636
A
259 PAUSE /* pause for hyper-threading */
260 jmp 1b /* try again */
1c79356b 261
55e303ae
A
262/*
263 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
264 *
265 * Acquire lock, spinning until it becomes available or timeout.
266 * MACH_RT: also return with preemption disabled.
267 */
91447636 268LEAF_ENTRY(hw_lock_to)
55e303ae 2691:
91447636 270 movl L_ARG0,%edx /* fetch lock pointer */
0c530ab8 271 movl %gs:CPU_ACTIVE_THREAD,%ecx
55e303ae
A
272 /*
273 * Attempt to grab the lock immediately
274 * - fastpath without timeout nonsense.
275 */
91447636
A
276 DISABLE_PREEMPTION
277 movl 0(%edx), %eax
278 testl %eax,%eax /* lock locked? */
279 jne 2f /* branch if so */
280 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
281 jne 2f /* branch on failure */
55e303ae 282 movl $1,%eax
91447636 283 LEAF_RET
55e303ae
A
284
2852:
286#define INNER_LOOP_COUNT 1000
287 /*
288 * Failed to get the lock so set the timeout
289 * and then spin re-checking the lock but pausing
290 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
291 */
292 movl L_ARG1,%ecx /* fetch timeout */
293 push %edi
294 push %ebx
295 mov %edx,%edi
296
297 rdtsc /* read cyclecount into %edx:%eax */
298 addl %ecx,%eax /* fetch and timeout */
299 adcl $0,%edx /* add carry */
300 mov %edx,%ecx
301 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
55e303ae
A
3024:
303 /*
304 * The inner-loop spin to look for the lock being freed.
305 */
55e303ae
A
306 mov $(INNER_LOOP_COUNT),%edx
3075:
91447636
A
308 PAUSE /* pause for hyper-threading */
309 movl 0(%edi),%eax /* spin checking lock value in cache */
310 testl %eax,%eax
55e303ae
A
311 je 6f /* zero => unlocked, try to grab it */
312 decl %edx /* decrement inner loop count */
313 jnz 5b /* time to check for timeout? */
314
315 /*
316 * Here after spinning INNER_LOOP_COUNT times, check for timeout
317 */
318 rdtsc /* cyclecount into %edx:%eax */
319 cmpl %ecx,%edx /* compare high-order 32-bits */
320 jb 4b /* continue spinning if less, or */
321 cmpl %ebx,%eax /* compare low-order 32-bits */
0c530ab8 322 jb 4b /* continue if less, else bail */
55e303ae
A
323 xor %eax,%eax /* with 0 return value */
324 pop %ebx
325 pop %edi
91447636 326 LEAF_RET
55e303ae
A
327
3286:
329 /*
330 * Here to try to grab the lock that now appears to be free
331 * after contention.
332 */
0c530ab8 333 movl %gs:CPU_ACTIVE_THREAD,%edx
91447636 334 lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */
0c530ab8 335 jne 4b /* no - spin again */
55e303ae
A
336 movl $1,%eax /* yes */
337 pop %ebx
338 pop %edi
91447636 339 LEAF_RET
55e303ae 340
1c79356b
A
341/*
342 * void hw_lock_unlock(hw_lock_t)
343 *
344 * Unconditionally release lock.
345 * MACH_RT: release preemption level.
346 */
91447636 347LEAF_ENTRY(hw_lock_unlock)
1c79356b 348 movl L_ARG0,%edx /* fetch lock pointer */
91447636
A
349 movl $0,0(%edx) /* clear the lock */
350 ENABLE_PREEMPTION
351 LEAF_RET
1c79356b 352
0c530ab8
A
353/*
354 * void i386_lock_unlock_with_flush(hw_lock_t)
355 *
356 * Unconditionally release lock, followed by a cacheline flush of
357 * the line corresponding to the lock dword. This routine is currently
358 * used with certain locks which are susceptible to lock starvation,
359 * minimizing cache affinity for lock acquisitions. A queued spinlock
360 * or other mechanism that ensures fairness would obviate the need
361 * for this routine, but ideally few or no spinlocks should exhibit
362 * enough contention to require such measures.
363 * MACH_RT: release preemption level.
364 */
365LEAF_ENTRY(i386_lock_unlock_with_flush)
366 movl L_ARG0,%edx /* Fetch lock pointer */
367 movl $0,0(%edx) /* Clear the lock */
368 mfence /* Serialize prior stores */
369 clflush 0(%edx) /* Write back and invalidate line */
370 ENABLE_PREEMPTION
371 LEAF_RET
372
1c79356b
A
373/*
374 * unsigned int hw_lock_try(hw_lock_t)
375 * MACH_RT: returns with preemption disabled on success.
376 */
91447636 377LEAF_ENTRY(hw_lock_try)
1c79356b
A
378 movl L_ARG0,%edx /* fetch lock pointer */
379
0c530ab8 380 movl %gs:CPU_ACTIVE_THREAD,%ecx
91447636
A
381 DISABLE_PREEMPTION
382 movl 0(%edx),%eax
383 testl %eax,%eax
384 jne 1f
385 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
386 jne 1f
1c79356b
A
387
388 movl $1,%eax /* success */
91447636 389 LEAF_RET
1c79356b 390
0c530ab8
A
3911:
392 ENABLE_PREEMPTION /* failure: release preemption... */
1c79356b 393 xorl %eax,%eax /* ...and return failure */
91447636 394 LEAF_RET
1c79356b
A
395
396/*
397 * unsigned int hw_lock_held(hw_lock_t)
398 * MACH_RT: doesn't change preemption state.
399 * N.B. Racy, of course.
400 */
91447636 401LEAF_ENTRY(hw_lock_held)
1c79356b
A
402 movl L_ARG0,%edx /* fetch lock pointer */
403
91447636
A
404 movl 0(%edx),%eax /* check lock value */
405 testl %eax,%eax
55e303ae 406 movl $1,%ecx
91447636
A
407 cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
408 LEAF_RET
1c79356b 409
91447636
A
410LEAF_ENTRY(mutex_init)
411 movl L_ARG0,%edx /* fetch lock pointer */
412 xorl %eax,%eax
413 movl %eax,M_ILK /* clear interlock */
414 movl %eax,M_LOCKED /* clear locked flag */
415 movw %ax,M_WAITERS /* init waiter count */
416 movw %ax,M_PROMOTED_PRI
1c79356b 417
91447636
A
418#if MACH_LDEBUG
419 movl $ MUTEX_TAG,M_TYPE /* set lock type */
420 movl %eax,M_PC /* init caller pc */
421 movl %eax,M_THREAD /* and owning thread */
422#endif
1c79356b 423
91447636 424 LEAF_RET
1c79356b 425
91447636 426NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
1c79356b 427
91447636 428 movl B_ARG0,%edx /* fetch lock pointer */
1c79356b 429
91447636
A
430 CHECK_MUTEX_TYPE()
431 CHECK_NO_SIMPLELOCKS()
432 CHECK_PREEMPTION_LEVEL()
1c79356b 433
91447636
A
434 pushf /* save interrupt state */
435 cli /* disable interrupts */
0c530ab8
A
436Lml_retry:
437 movl %gs:CPU_ACTIVE_THREAD,%ecx
1c79356b 438
0c530ab8 439Lml_get_hw:
91447636
A
440 movl M_ILK,%eax /* read interlock */
441 testl %eax,%eax /* unlocked? */
0c530ab8
A
442 jne Lml_ilk_fail /* no - take the slow path */
443
91447636 444 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
0c530ab8 445 jne Lml_get_hw /* branch on failure to retry */
91447636
A
446
447 movl M_LOCKED,%ecx /* get lock owner */
448 testl %ecx,%ecx /* is the mutex locked? */
0c530ab8
A
449 jne Lml_fail /* yes, we lose */
450Lml_acquire:
91447636
A
451 movl %gs:CPU_ACTIVE_THREAD,%ecx
452 movl %ecx,M_LOCKED
1c79356b
A
453
454#if MACH_LDEBUG
91447636
A
455 movl %ecx,M_THREAD
456 movl B_PC,%ecx
457 movl %ecx,M_PC
1c79356b 458#endif
1c79356b 459
0c530ab8
A
460 cmpw $0,M_WAITERS /* are there any waiters? */
461 jne Lml_waiters /* yes, more work to do */
462Lml_return:
463 xorl %eax,%eax
464 movl %eax,M_ILK
465
466 popf /* restore interrupt state */
467
468 NONLEAF_RET
469
470Lml_waiters:
91447636
A
471 pushl %edx /* save mutex address */
472 pushl %edx
473 call EXT(lck_mtx_lock_acquire)
474 addl $4,%esp
475 popl %edx /* restore mutex address */
0c530ab8 476 jmp Lml_return
1c79356b 477
0c530ab8
A
478Lml_ilk_fail:
479 /*
480 * Slow path: call out to do the spinning.
481 */
482 pushl %edx /* lock address */
483 call EXT(lck_mtx_interlock_spin)
484 popl %edx /* lock pointer */
485 jmp Lml_retry /* try again */
486
487Lml_fail:
488 /*
489 n Check if the owner is on another processor and therefore
490 * we should try to spin before blocking.
491 */
492 testl $(OnProc),ACT_SPF(%ecx)
493 jz Lml_block
494
495 /*
496 * Here if owner is on another processor:
497 * - release the interlock
498 * - spin on the holder until release or timeout
499 * - in either case re-acquire the interlock
500 * - if released, acquire it
501 * - otherwise drop thru to block.
502 */
91447636 503 xorl %eax,%eax
0c530ab8
A
504 movl %eax,M_ILK /* zero interlock */
505 popf
506 pushf /* restore interrupt state */
4452a7af 507
0c530ab8
A
508 push %edx /* lock address */
509 call EXT(lck_mtx_lock_spin) /* call out to do spinning */
510 addl $4,%esp
511 movl B_ARG0,%edx /* refetch mutex address */
4452a7af 512
0c530ab8
A
513 /* Re-acquire interlock */
514 cli /* disable interrupts */
515Lml_reget_retry:
516 movl %gs:CPU_ACTIVE_THREAD,%ecx
517
518Lml_reget_hw:
519 movl M_ILK,%eax /* read interlock */
520 testl %eax,%eax /* unlocked? */
521 jne Lml_ilk_refail /* no - slow path */
522
523 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
524 jne Lml_reget_hw /* branch on failure to retry */
5d5c5d0d 525
0c530ab8
A
526 movl M_LOCKED,%ecx /* get lock owner */
527 testl %ecx,%ecx /* is the mutex free? */
528 je Lml_acquire /* yes, acquire */
529
530Lml_block:
91447636
A
531 CHECK_MYLOCK(M_THREAD)
532 pushl M_LOCKED
533 pushl %edx /* push mutex address */
534 call EXT(lck_mtx_lock_wait) /* wait for the lock */
535 addl $8,%esp
536 movl B_ARG0,%edx /* refetch mutex address */
0c530ab8
A
537 cli /* ensure interrupts disabled */
538 jmp Lml_retry /* and try again */
539
540Lml_ilk_refail:
541 /*
542 * Slow path: call out to do the spinning.
543 */
544 pushl %edx /* lock address */
545 call EXT(lck_mtx_interlock_spin)
546 popl %edx /* lock pointer */
547 jmp Lml_reget_retry /* try again */
1c79356b 548
91447636 549NONLEAF_ENTRY2(mutex_try,_mutex_try)
1c79356b 550
91447636 551 movl B_ARG0,%edx /* fetch lock pointer */
1c79356b 552
91447636
A
553 CHECK_MUTEX_TYPE()
554 CHECK_NO_SIMPLELOCKS()
1c79356b 555
91447636
A
556 pushf /* save interrupt state */
557 cli /* disable interrupts */
0c530ab8
A
558Lmt_retry:
559 movl %gs:CPU_ACTIVE_THREAD,%ecx
1c79356b 560
0c530ab8 561Lmt_get_hw:
91447636
A
562 movl M_ILK,%eax /* read interlock */
563 testl %eax,%eax /* unlocked? */
0c530ab8
A
564 jne Lmt_ilk_fail /* no - slow path */
565
91447636 566 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
0c530ab8 567 jne Lmt_get_hw /* branch on failure to retry */
1c79356b 568
91447636
A
569 movl M_LOCKED,%ecx /* get lock owner */
570 testl %ecx,%ecx /* is the mutex locked? */
0c530ab8 571 jne Lmt_fail /* yes, we lose */
91447636
A
572 movl %gs:CPU_ACTIVE_THREAD,%ecx
573 movl %ecx,M_LOCKED
1c79356b
A
574
575#if MACH_LDEBUG
91447636
A
576 movl %ecx,M_THREAD
577 movl B_PC,%ecx
578 movl %ecx,M_PC
1c79356b 579#endif
1c79356b 580
0c530ab8
A
581 cmpl $0,M_WAITERS /* are there any waiters? */
582 jne Lmt_waiters /* yes, more work to do */
583Lmt_return:
91447636
A
584 xorl %eax,%eax
585 movl %eax,M_ILK
91447636 586 popf /* restore interrupt state */
1c79356b 587
91447636 588 movl $1,%eax
1c79356b 589
91447636 590 NONLEAF_RET
1c79356b 591
0c530ab8
A
592Lmt_waiters:
593 pushl %edx /* save mutex address */
594 pushl %edx
595 call EXT(lck_mtx_lock_acquire)
596 addl $4,%esp
597 popl %edx /* restore mutex address */
598 jmp Lmt_return
599
600Lmt_ilk_fail:
601 /*
602 * Slow path: call out to do the spinning.
603 */
604 pushl %edx /* lock address */
605 call EXT(lck_mtx_interlock_spin)
606 popl %edx /* lock pointer */
607 jmp Lmt_retry /* try again */
608
609Lmt_fail:
1c79356b 610 xorl %eax,%eax
91447636 611 movl %eax,M_ILK
1c79356b 612
91447636 613 popf /* restore interrupt state */
1c79356b 614
91447636 615 xorl %eax,%eax
1c79356b 616
91447636 617 NONLEAF_RET
1c79356b 618
91447636
A
619NONLEAF_ENTRY(mutex_unlock)
620 movl B_ARG0,%edx /* fetch lock pointer */
1c79356b
A
621
622 CHECK_MUTEX_TYPE()
91447636 623 CHECK_THREAD(M_THREAD)
1c79356b 624
91447636
A
625 pushf /* save interrupt state */
626 cli /* disable interrupts */
0c530ab8
A
627Lmu_retry:
628 movl %gs:CPU_ACTIVE_THREAD,%ecx
1c79356b 629
0c530ab8 630Lmu_get_hw:
91447636
A
631 movl M_ILK,%eax /* read interlock */
632 testl %eax,%eax /* unlocked? */
0c530ab8
A
633 jne Lmu_ilk_fail /* no - slow path */
634
91447636 635 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
0c530ab8 636 jne Lmu_get_hw /* branch on failure to retry */
1c79356b 637
91447636 638 cmpw $0,M_WAITERS /* are there any waiters? */
0c530ab8 639 jne Lmu_wakeup /* yes, more work to do */
91447636 640
0c530ab8 641Lmu_doit:
9bccf70c 642
1c79356b 643#if MACH_LDEBUG
91447636 644 movl $0,M_THREAD /* disown thread */
1c79356b
A
645#endif
646
55e303ae 647 xorl %ecx,%ecx
91447636 648 movl %ecx,M_LOCKED /* unlock the mutex */
1c79356b 649
91447636 650 movl %ecx,M_ILK
1c79356b 651
91447636 652 popf /* restore interrupt state */
1c79356b 653
91447636 654 NONLEAF_RET
1c79356b 655
0c530ab8
A
656Lmu_ilk_fail:
657 /*
658 * Slow path: call out to do the spinning.
659 */
660 pushl %edx /* lock address */
661 call EXT(lck_mtx_interlock_spin)
662 popl %edx /* lock pointer */
663 jmp Lmu_retry /* try again */
664
665Lmu_wakeup:
91447636 666 pushl M_LOCKED
1c79356b 667 pushl %edx /* push mutex address */
91447636 668 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
9bccf70c 669 addl $8,%esp
91447636 670 movl B_ARG0,%edx /* restore lock pointer */
0c530ab8 671 jmp Lmu_doit
1c79356b 672
91447636
A
673/*
674 * lck_mtx_lock()
675 * lck_mtx_try_lock()
676 * lck_mutex_unlock()
677 *
678 * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without
679 * DEBUG checks (which require fields not present in lck_mtx_t's).
680 */
681NONLEAF_ENTRY(lck_mtx_lock)
1c79356b 682
91447636
A
683 movl B_ARG0,%edx /* fetch lock pointer */
684 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
685 cmove M_PTR,%edx /* yes - take indirection */
1c79356b 686
1c79356b 687 CHECK_NO_SIMPLELOCKS()
91447636 688 CHECK_PREEMPTION_LEVEL()
1c79356b 689
91447636
A
690 pushf /* save interrupt state */
691 cli /* disable interrupts */
0c530ab8
A
692Llml_retry:
693 movl %gs:CPU_ACTIVE_THREAD,%ecx
9bccf70c 694
0c530ab8 695Llml_get_hw:
91447636
A
696 movl M_ILK,%eax /* read interlock */
697 testl %eax,%eax /* unlocked? */
0c530ab8
A
698 jne Llml_ilk_fail /* no - slow path */
699
91447636 700 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
0c530ab8 701 jne Llml_get_hw /* branch on failure to retry */
91447636
A
702
703 movl M_LOCKED,%ecx /* get lock owner */
704 testl %ecx,%ecx /* is the mutex locked? */
0c530ab8
A
705 jne Llml_fail /* yes, we lose */
706Llml_acquire:
91447636
A
707 movl %gs:CPU_ACTIVE_THREAD,%ecx
708 movl %ecx,M_LOCKED
9bccf70c 709
0c530ab8
A
710 cmpl $0,M_WAITERS /* are there any waiters? */
711 jne Llml_waiters /* yes, more work to do */
712Llml_return:
713 xorl %eax,%eax
714 movl %eax,M_ILK
715
716 popf /* restore interrupt state */
717
718 NONLEAF_RET
719
720Llml_waiters:
91447636 721 pushl %edx /* save mutex address */
9bccf70c 722 pushl %edx
91447636 723 call EXT(lck_mtx_lock_acquire)
9bccf70c 724 addl $4,%esp
91447636 725 popl %edx /* restore mutex address */
0c530ab8
A
726 jmp Llml_return
727
728Llml_ilk_fail:
729 /*
730 * Slow path: call out to do the spinning.
731 */
732 pushl %edx /* lock address */
733 call EXT(lck_mtx_interlock_spin)
734 popl %edx /* lock pointer */
735 jmp Llml_retry /* try again */
736
737Llml_fail:
738 /*
739 * Check if the owner is on another processor and therefore
740 * we should try to spin before blocking.
741 */
742 testl $(OnProc),ACT_SPF(%ecx)
743 jz Llml_block
5d5c5d0d 744
0c530ab8
A
745 /*
746 * Here if owner is on another processor:
747 * - release the interlock
748 * - spin on the holder until release or timeout
749 * - in either case re-acquire the interlock
750 * - if released, acquire it
751 * - otherwise drop thru to block.
752 */
91447636 753 xorl %eax,%eax
0c530ab8
A
754 movl %eax,M_ILK /* zero interlock */
755 popf
756 pushf /* restore interrupt state */
4452a7af 757
0c530ab8
A
758 pushl %edx /* save mutex address */
759 pushl %edx
760 call EXT(lck_mtx_lock_spin)
761 addl $4,%esp
762 popl %edx /* restore mutex address */
89b3af67 763
0c530ab8
A
764 /* Re-acquire interlock */
765 cli /* disable interrupts */
766Llml_reget_retry:
767 movl %gs:CPU_ACTIVE_THREAD,%ecx
768
769Llml_reget_hw:
770 movl M_ILK,%eax /* read interlock */
771 testl %eax,%eax /* unlocked? */
772 jne Llml_ilk_refail /* no - slow path */
4452a7af 773
0c530ab8
A
774 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
775 jne Llml_reget_hw /* branch on failure to retry */
776
777 movl M_LOCKED,%ecx /* get lock owner */
778 testl %ecx,%ecx /* is the mutex free? */
779 je Llml_acquire /* yes, acquire */
780
781Llml_block:
91447636
A
782 CHECK_MYLOCK(M_THREAD)
783 pushl %edx /* save mutex address */
784 pushl M_LOCKED
9bccf70c 785 pushl %edx /* push mutex address */
91447636
A
786 call EXT(lck_mtx_lock_wait) /* wait for the lock */
787 addl $8,%esp
788 popl %edx /* restore mutex address */
0c530ab8
A
789 cli /* ensure interrupts disabled */
790 jmp Llml_retry /* and try again */
791
792Llml_ilk_refail:
793 /*
794 * Slow path: call out to do the spinning.
795 */
796 pushl %edx /* lock address */
797 call EXT(lck_mtx_interlock_spin)
798 popl %edx /* lock pointer */
799 jmp Llml_reget_retry /* try again */
9bccf70c 800
91447636 801NONLEAF_ENTRY(lck_mtx_try_lock)
1c79356b 802
91447636
A
803 movl B_ARG0,%edx /* fetch lock pointer */
804 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
805 cmove M_PTR,%edx /* yes - take indirection */
9bccf70c 806
91447636
A
807 CHECK_NO_SIMPLELOCKS()
808 CHECK_PREEMPTION_LEVEL()
1c79356b 809
91447636
A
810 pushf /* save interrupt state */
811 cli /* disable interrupts */
0c530ab8
A
812Llmt_retry:
813 movl %gs:CPU_ACTIVE_THREAD,%ecx
9bccf70c 814
0c530ab8 815Llmt_get_hw:
91447636
A
816 movl M_ILK,%eax /* read interlock */
817 testl %eax,%eax /* unlocked? */
0c530ab8
A
818 jne Llmt_ilk_fail /* no - slow path */
819
91447636 820 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
0c530ab8 821 jne Llmt_get_hw /* branch on failure to retry */
91447636
A
822
823 movl M_LOCKED,%ecx /* get lock owner */
824 testl %ecx,%ecx /* is the mutex locked? */
0c530ab8 825 jne Llmt_fail /* yes, we lose */
91447636
A
826 movl %gs:CPU_ACTIVE_THREAD,%ecx
827 movl %ecx,M_LOCKED
828
0c530ab8
A
829 cmpl $0,M_WAITERS /* are there any waiters? */
830 jne Llmt_waiters /* yes, more work to do */
831Llmt_return:
9bccf70c 832 xorl %eax,%eax
91447636 833 movl %eax,M_ILK
9bccf70c 834
91447636 835 popf /* restore interrupt state */
1c79356b 836
91447636
A
837 movl $1,%eax /* return success */
838 NONLEAF_RET
1c79356b 839
0c530ab8
A
840Llmt_waiters:
841 pushl %edx /* save mutex address */
842 pushl %edx
843 call EXT(lck_mtx_lock_acquire)
844 addl $4,%esp
845 popl %edx /* restore mutex address */
846 jmp Llmt_return
847
848Llmt_ilk_fail:
849 /*
850 * Slow path: call out to do the spinning.
851 */
852 pushl %edx /* lock address */
853 call EXT(lck_mtx_interlock_spin)
854 popl %edx /* lock pointer */
855 jmp Llmt_retry /* try again */
856
857Llmt_fail:
91447636
A
858 xorl %eax,%eax
859 movl %eax,M_ILK
1c79356b 860
91447636 861 popf /* restore interrupt state */
1c79356b 862
91447636
A
863 xorl %eax,%eax /* return failure */
864 NONLEAF_RET
1c79356b 865
91447636 866NONLEAF_ENTRY(lck_mtx_unlock)
1c79356b 867
91447636
A
868 movl B_ARG0,%edx /* fetch lock pointer */
869 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
870 cmove M_PTR,%edx /* yes - take indirection */
1c79356b 871
91447636
A
872 pushf /* save interrupt state */
873 cli /* disable interrupts */
0c530ab8
A
874Llmu_retry:
875 movl %gs:CPU_ACTIVE_THREAD,%ecx
1c79356b 876
0c530ab8 877Llmu_get_hw:
91447636
A
878 movl M_ILK,%eax /* read interlock */
879 testl %eax,%eax /* unlocked? */
0c530ab8
A
880 jne Llmu_ilk_fail /* no - slow path */
881
91447636 882 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
0c530ab8 883 jne Llmu_get_hw /* branch on failure to retry */
1c79356b 884
91447636 885 cmpw $0,M_WAITERS /* are there any waiters? */
0c530ab8 886 jne Llmu_wakeup /* yes, more work to do */
91447636 887
0c530ab8 888Llmu_doit:
55e303ae 889 xorl %ecx,%ecx
91447636 890 movl %ecx,M_LOCKED /* unlock the mutex */
1c79356b 891
91447636 892 movl %ecx,M_ILK
1c79356b 893
91447636 894 popf /* restore interrupt state */
1c79356b 895
91447636
A
896 NONLEAF_RET
897
0c530ab8
A
898Llmu_ilk_fail:
899 /*
900 * Slow path: call out to do the spinning.
901 */
902 pushl %edx /* lock address */
903 call EXT(lck_mtx_interlock_spin)
904 popl %edx /* lock pointer */
905 jmp Llmu_retry /* try again */
906
907Llmu_wakeup:
91447636
A
908 pushl %edx /* save mutex address */
909 pushl M_LOCKED
1c79356b 910 pushl %edx /* push mutex address */
91447636 911 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
9bccf70c 912 addl $8,%esp
91447636 913 popl %edx /* restore mutex pointer */
0c530ab8 914 jmp Llmu_doit
1c79356b 915
91447636
A
916LEAF_ENTRY(lck_mtx_ilk_unlock)
917 movl L_ARG0,%edx /* no indirection here */
1c79356b 918
91447636
A
919 xorl %eax,%eax
920 movl %eax,M_ILK
1c79356b 921
91447636 922 LEAF_RET
1c79356b 923
91447636 924LEAF_ENTRY(_disable_preemption)
1c79356b 925#if MACH_RT
91447636 926 _DISABLE_PREEMPTION
1c79356b 927#endif /* MACH_RT */
91447636 928 LEAF_RET
1c79356b 929
91447636 930LEAF_ENTRY(_enable_preemption)
1c79356b
A
931#if MACH_RT
932#if MACH_ASSERT
91447636 933 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
1c79356b 934 jg 1f
91447636 935 pushl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
936 pushl $2f
937 call EXT(panic)
938 hlt
939 .data
9402: String "_enable_preemption: preemption_level(%d) < 0!"
941 .text
9421:
943#endif /* MACH_ASSERT */
91447636 944 _ENABLE_PREEMPTION
1c79356b 945#endif /* MACH_RT */
91447636 946 LEAF_RET
1c79356b 947
91447636 948LEAF_ENTRY(_enable_preemption_no_check)
1c79356b
A
949#if MACH_RT
950#if MACH_ASSERT
91447636 951 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
1c79356b
A
952 jg 1f
953 pushl $2f
954 call EXT(panic)
955 hlt
956 .data
9572: String "_enable_preemption_no_check: preemption_level <= 0!"
958 .text
9591:
960#endif /* MACH_ASSERT */
91447636 961 _ENABLE_PREEMPTION_NO_CHECK
1c79356b 962#endif /* MACH_RT */
91447636 963 LEAF_RET
1c79356b
A
964
965
91447636
A
966LEAF_ENTRY(_mp_disable_preemption)
967#if MACH_RT
968 _DISABLE_PREEMPTION
969#endif /* MACH_RT */
970 LEAF_RET
1c79356b 971
91447636
A
972LEAF_ENTRY(_mp_enable_preemption)
973#if MACH_RT
1c79356b 974#if MACH_ASSERT
91447636 975 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
1c79356b 976 jg 1f
91447636 977 pushl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
978 pushl $2f
979 call EXT(panic)
980 hlt
981 .data
9822: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
983 .text
9841:
985#endif /* MACH_ASSERT */
91447636
A
986 _ENABLE_PREEMPTION
987#endif /* MACH_RT */
988 LEAF_RET
1c79356b 989
91447636
A
990LEAF_ENTRY(_mp_enable_preemption_no_check)
991#if MACH_RT
1c79356b 992#if MACH_ASSERT
91447636 993 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
1c79356b
A
994 jg 1f
995 pushl $2f
996 call EXT(panic)
997 hlt
998 .data
9992: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
1000 .text
10011:
1002#endif /* MACH_ASSERT */
91447636
A
1003 _ENABLE_PREEMPTION_NO_CHECK
1004#endif /* MACH_RT */
1005 LEAF_RET
1c79356b
A
1006
1007
91447636
A
1008LEAF_ENTRY(i_bit_set)
1009 movl L_ARG0,%edx
1010 movl L_ARG1,%eax
1c79356b 1011 lock
c0fea474 1012 bts %edx,(%eax)
91447636 1013 LEAF_RET
1c79356b 1014
91447636
A
1015LEAF_ENTRY(i_bit_clear)
1016 movl L_ARG0,%edx
1017 movl L_ARG1,%eax
1c79356b 1018 lock
c0fea474 1019 btr %edx,(%eax)
91447636 1020 LEAF_RET
1c79356b 1021
91447636
A
1022LEAF_ENTRY(bit_lock)
1023 movl L_ARG0,%ecx
1024 movl L_ARG1,%eax
1c79356b
A
10251:
1026 lock
1027 bts %ecx,(%eax)
1028 jb 1b
91447636 1029 LEAF_RET
1c79356b 1030
91447636
A
1031LEAF_ENTRY(bit_lock_try)
1032 movl L_ARG0,%ecx
1033 movl L_ARG1,%eax
1c79356b
A
1034 lock
1035 bts %ecx,(%eax)
1036 jb bit_lock_failed
91447636 1037 LEAF_RET /* %eax better not be null ! */
1c79356b
A
1038bit_lock_failed:
1039 xorl %eax,%eax
91447636 1040 LEAF_RET
1c79356b 1041
91447636
A
1042LEAF_ENTRY(bit_unlock)
1043 movl L_ARG0,%ecx
1044 movl L_ARG1,%eax
1c79356b
A
1045 lock
1046 btr %ecx,(%eax)
91447636 1047 LEAF_RET