2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1989 Carnegie-Mellon University
36 * All rights reserved. The CMU software License Agreement specifies
37 * the terms and conditions for use and redistribution.
41 #include <platforms.h>
42 #include <mach_ldebug.h>
47 #define PAUSE rep; nop
50 * When performance isn't the only concern, it's
51 * nice to build stack frames...
53 #define BUILD_STACK_FRAMES (GPROF || \
54 ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB))
56 #if BUILD_STACK_FRAMES
58 /* STack-frame-relative: */
63 #define LEAF_ENTRY(name) \
68 #define LEAF_ENTRY2(n1,n2) \
78 #else /* BUILD_STACK_FRAMES */
80 /* Stack-pointer-relative: */
85 #define LEAF_ENTRY(name) \
88 #define LEAF_ENTRY2(n1,n2) \
95 #endif /* BUILD_STACK_FRAMES */
98 /* Non-leaf routines always have a stack frame: */
100 #define NONLEAF_ENTRY(name) \
105 #define NONLEAF_ENTRY2(n1,n2) \
111 #define NONLEAF_RET \
117 #define M_LOCKED MUTEX_LOCKED(%edx)
118 #define M_WAITERS MUTEX_WAITERS(%edx)
119 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
120 #define M_ITAG MUTEX_ITAG(%edx)
121 #define M_PTR MUTEX_PTR(%edx)
123 #define M_TYPE MUTEX_TYPE(%edx)
124 #define M_PC MUTEX_PC(%edx)
125 #define M_THREAD MUTEX_THREAD(%edx)
126 #endif /* MACH_LDEBUG */
129 #define CX(addr,reg) addr(,reg,4)
133 * Routines for general lock debugging.
135 #define S_TYPE SLOCK_TYPE(%edx)
136 #define S_PC SLOCK_PC(%edx)
137 #define S_THREAD SLOCK_THREAD(%edx)
138 #define S_DURATIONH SLOCK_DURATIONH(%edx)
139 #define S_DURATIONL SLOCK_DURATIONL(%edx)
142 * Checks for expected lock types and calls "panic" on
143 * mismatch. Detects calls to Mutex functions with
144 * type simplelock and vice versa.
146 #define CHECK_MUTEX_TYPE() \
147 cmpl $ MUTEX_TAG,M_TYPE ; \
153 2: String "not a mutex!" ; \
157 #define CHECK_SIMPLE_LOCK_TYPE() \
158 cmpl $ USLOCK_TAG,S_TYPE ; \
164 2: String "not a simple lock!" ; \
169 * If one or more simplelocks are currently held by a thread,
170 * an attempt to acquire a mutex will cause this check to fail
171 * (since a mutex lock may context switch, holding a simplelock
172 * is not a good thing).
175 #define CHECK_PREEMPTION_LEVEL() \
176 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
182 2: String "preemption_level != 0!" ; \
186 #define CHECK_PREEMPTION_LEVEL()
189 #define CHECK_NO_SIMPLELOCKS() \
190 cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \
196 2: String "simple_locks_held!" ; \
201 * Verifies return to the correct thread in "unlock" situations.
203 #define CHECK_THREAD(thd) \
204 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
213 2: String "wrong thread!" ; \
217 #define CHECK_MYLOCK(thd) \
218 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
227 2: String "mylock attempt!" ; \
231 #define METER_SIMPLE_LOCK_LOCK(reg) \
233 call EXT(meter_simple_lock) ; \
236 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
238 call EXT(meter_simple_unlock) ; \
241 #else /* MACH_LDEBUG */
242 #define CHECK_MUTEX_TYPE()
243 #define CHECK_SIMPLE_LOCK_TYPE
244 #define CHECK_THREAD(thd)
245 #define CHECK_PREEMPTION_LEVEL()
246 #define CHECK_NO_SIMPLELOCKS()
247 #define CHECK_MYLOCK(thd)
248 #define METER_SIMPLE_LOCK_LOCK(reg)
249 #define METER_SIMPLE_LOCK_UNLOCK(reg)
250 #endif /* MACH_LDEBUG */
254 * void hw_lock_init(hw_lock_t)
256 * Initialize a hardware lock.
258 LEAF_ENTRY(hw_lock_init)
259 movl L_ARG0,%edx /* fetch lock pointer */
260 movl $0,0(%edx) /* clear the lock */
264 * void hw_lock_lock(hw_lock_t)
266 * Acquire lock, spinning until it becomes available.
267 * MACH_RT: also return with preemption disabled.
269 LEAF_ENTRY(hw_lock_lock)
270 movl L_ARG0,%edx /* fetch lock pointer */
273 1: DISABLE_PREEMPTION
275 testl %eax,%eax /* lock locked? */
276 jne 3f /* branch if so */
277 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
279 movl $1,%eax /* In case this was a timeout call */
280 LEAF_RET /* if yes, then nothing left to do */
282 3: ENABLE_PREEMPTION /* no reason we can't be preemptable */
283 PAUSE /* pause for hyper-threading */
284 jmp 1b /* try again */
287 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
289 * Acquire lock, spinning until it becomes available or timeout.
290 * MACH_RT: also return with preemption disabled.
292 LEAF_ENTRY(hw_lock_to)
294 movl L_ARG0,%edx /* fetch lock pointer */
297 * Attempt to grab the lock immediately
298 * - fastpath without timeout nonsense.
302 testl %eax,%eax /* lock locked? */
303 jne 2f /* branch if so */
304 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
305 jne 2f /* branch on failure */
310 #define INNER_LOOP_COUNT 1000
312 * Failed to get the lock so set the timeout
313 * and then spin re-checking the lock but pausing
314 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
316 movl L_ARG1,%ecx /* fetch timeout */
321 rdtsc /* read cyclecount into %edx:%eax */
322 addl %ecx,%eax /* fetch and timeout */
323 adcl $0,%edx /* add carry */
325 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
327 ENABLE_PREEMPTION /* no reason not to be preempted now */
330 * The inner-loop spin to look for the lock being freed.
332 mov $(INNER_LOOP_COUNT),%edx
334 PAUSE /* pause for hyper-threading */
335 movl 0(%edi),%eax /* spin checking lock value in cache */
337 je 6f /* zero => unlocked, try to grab it */
338 decl %edx /* decrement inner loop count */
339 jnz 5b /* time to check for timeout? */
342 * Here after spinning INNER_LOOP_COUNT times, check for timeout
344 rdtsc /* cyclecount into %edx:%eax */
345 cmpl %ecx,%edx /* compare high-order 32-bits */
346 jb 4b /* continue spinning if less, or */
347 cmpl %ebx,%eax /* compare low-order 32-bits */
348 jb 5b /* continue if less, else bail */
349 xor %eax,%eax /* with 0 return value */
356 * Here to try to grab the lock that now appears to be free
359 movl 8+L_PC,%edx /* calling pc (8+ for pushed regs) */
361 lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */
362 jne 3b /* no - spin again */
363 movl $1,%eax /* yes */
369 * void hw_lock_unlock(hw_lock_t)
371 * Unconditionally release lock.
372 * MACH_RT: release preemption level.
374 LEAF_ENTRY(hw_lock_unlock)
375 movl L_ARG0,%edx /* fetch lock pointer */
376 movl $0,0(%edx) /* clear the lock */
381 * unsigned int hw_lock_try(hw_lock_t)
382 * MACH_RT: returns with preemption disabled on success.
384 LEAF_ENTRY(hw_lock_try)
385 movl L_ARG0,%edx /* fetch lock pointer */
392 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
395 movl $1,%eax /* success */
398 1: ENABLE_PREEMPTION /* failure: release preemption... */
399 xorl %eax,%eax /* ...and return failure */
403 * unsigned int hw_lock_held(hw_lock_t)
404 * MACH_RT: doesn't change preemption state.
405 * N.B. Racy, of course.
407 LEAF_ENTRY(hw_lock_held)
408 movl L_ARG0,%edx /* fetch lock pointer */
410 movl 0(%edx),%eax /* check lock value */
413 cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
416 LEAF_ENTRY(mutex_init)
417 movl L_ARG0,%edx /* fetch lock pointer */
419 movl %eax,M_ILK /* clear interlock */
420 movl %eax,M_LOCKED /* clear locked flag */
421 movw %ax,M_WAITERS /* init waiter count */
422 movw %ax,M_PROMOTED_PRI
425 movl $ MUTEX_TAG,M_TYPE /* set lock type */
426 movl %eax,M_PC /* init caller pc */
427 movl %eax,M_THREAD /* and owning thread */
432 NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
434 movl B_ARG0,%edx /* fetch lock pointer */
437 CHECK_NO_SIMPLELOCKS()
438 CHECK_PREEMPTION_LEVEL()
440 pushf /* save interrupt state */
441 cli /* disable interrupts */
447 movl M_ILK,%eax /* read interlock */
448 testl %eax,%eax /* unlocked? */
449 je 1f /* yes - attempt to lock it */
450 PAUSE /* no - pause */
451 jmp ml_get_hw /* try again */
453 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
454 jne ml_get_hw /* branch on failure to retry */
456 movl M_LOCKED,%ecx /* get lock owner */
457 testl %ecx,%ecx /* is the mutex locked? */
458 jne ml_fail /* yes, we lose */
459 movl %gs:CPU_ACTIVE_THREAD,%ecx
468 pushl %edx /* save mutex address */
470 call EXT(lck_mtx_lock_acquire)
472 popl %edx /* restore mutex address */
477 popf /* restore interrupt state */
483 CHECK_MYLOCK(M_THREAD)
485 pushl %edx /* push mutex address */
486 call EXT(lck_mtx_lock_wait) /* wait for the lock */
488 movl B_ARG0,%edx /* refetch mutex address */
489 jmp ml_retry /* and try again */
491 NONLEAF_ENTRY2(mutex_try,_mutex_try)
493 movl B_ARG0,%edx /* fetch lock pointer */
496 CHECK_NO_SIMPLELOCKS()
500 pushf /* save interrupt state */
501 cli /* disable interrupts */
504 movl M_ILK,%eax /* read interlock */
505 testl %eax,%eax /* unlocked? */
506 je 1f /* yes - attempt to lock it */
507 PAUSE /* no - pause */
508 jmp mt_get_hw /* try again */
510 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
511 jne mt_get_hw /* branch on failure to retry */
513 movl M_LOCKED,%ecx /* get lock owner */
514 testl %ecx,%ecx /* is the mutex locked? */
515 jne mt_fail /* yes, we lose */
516 movl %gs:CPU_ACTIVE_THREAD,%ecx
525 pushl %edx /* save mutex address */
527 call EXT(lck_mtx_lock_acquire)
529 popl %edx /* restore mutex address */
534 popf /* restore interrupt state */
544 popf /* restore interrupt state */
550 NONLEAF_ENTRY(mutex_unlock)
551 movl B_ARG0,%edx /* fetch lock pointer */
554 CHECK_THREAD(M_THREAD)
558 pushf /* save interrupt state */
559 cli /* disable interrupts */
562 movl M_ILK,%eax /* read interlock */
563 testl %eax,%eax /* unlocked? */
564 je 1f /* yes - attempt to lock it */
565 PAUSE /* no - pause */
566 jmp mu_get_hw /* try again */
568 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
569 jne mu_get_hw /* branch on failure to retry */
571 cmpw $0,M_WAITERS /* are there any waiters? */
572 jne mu_wakeup /* yes, more work to do */
577 movl $0,M_THREAD /* disown thread */
581 movl %ecx,M_LOCKED /* unlock the mutex */
585 popf /* restore interrupt state */
591 pushl %edx /* push mutex address */
592 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
594 movl B_ARG0,%edx /* restore lock pointer */
602 * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without
603 * DEBUG checks (which require fields not present in lck_mtx_t's).
605 NONLEAF_ENTRY(lck_mtx_lock)
607 movl B_ARG0,%edx /* fetch lock pointer */
608 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
609 cmove M_PTR,%edx /* yes - take indirection */
611 CHECK_NO_SIMPLELOCKS()
612 CHECK_PREEMPTION_LEVEL()
614 pushf /* save interrupt state */
615 cli /* disable interrupts */
621 movl M_ILK,%eax /* read interlock */
622 testl %eax,%eax /* unlocked? */
623 je 1f /* yes - attempt to lock it */
624 PAUSE /* no - pause */
625 jmp lml_get_hw /* try again */
627 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
628 jne lml_get_hw /* branch on failure to retry */
630 movl M_LOCKED,%ecx /* get lock owner */
631 testl %ecx,%ecx /* is the mutex locked? */
632 jne lml_fail /* yes, we lose */
633 movl %gs:CPU_ACTIVE_THREAD,%ecx
636 pushl %edx /* save mutex address */
638 call EXT(lck_mtx_lock_acquire)
640 popl %edx /* restore mutex address */
645 popf /* restore interrupt state */
650 CHECK_MYLOCK(M_THREAD)
651 pushl %edx /* save mutex address */
653 pushl %edx /* push mutex address */
654 call EXT(lck_mtx_lock_wait) /* wait for the lock */
656 popl %edx /* restore mutex address */
657 jmp lml_retry /* and try again */
659 NONLEAF_ENTRY(lck_mtx_try_lock)
661 movl B_ARG0,%edx /* fetch lock pointer */
662 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
663 cmove M_PTR,%edx /* yes - take indirection */
665 CHECK_NO_SIMPLELOCKS()
666 CHECK_PREEMPTION_LEVEL()
670 pushf /* save interrupt state */
671 cli /* disable interrupts */
674 movl M_ILK,%eax /* read interlock */
675 testl %eax,%eax /* unlocked? */
676 je 1f /* yes - attempt to lock it */
677 PAUSE /* no - pause */
678 jmp lmt_get_hw /* try again */
680 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
681 jne lmt_get_hw /* branch on failure to retry */
683 movl M_LOCKED,%ecx /* get lock owner */
684 testl %ecx,%ecx /* is the mutex locked? */
685 jne lmt_fail /* yes, we lose */
686 movl %gs:CPU_ACTIVE_THREAD,%ecx
689 pushl %edx /* save mutex address */
691 call EXT(lck_mtx_lock_acquire)
693 popl %edx /* restore mutex address */
698 popf /* restore interrupt state */
700 movl $1,%eax /* return success */
707 popf /* restore interrupt state */
709 xorl %eax,%eax /* return failure */
712 NONLEAF_ENTRY(lck_mtx_unlock)
714 movl B_ARG0,%edx /* fetch lock pointer */
715 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
716 cmove M_PTR,%edx /* yes - take indirection */
720 pushf /* save interrupt state */
721 cli /* disable interrupts */
724 movl M_ILK,%eax /* read interlock */
725 testl %eax,%eax /* unlocked? */
726 je 1f /* yes - attempt to lock it */
727 PAUSE /* no - pause */
728 jmp lmu_get_hw /* try again */
730 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
731 jne lmu_get_hw /* branch on failure to retry */
733 cmpw $0,M_WAITERS /* are there any waiters? */
734 jne lmu_wakeup /* yes, more work to do */
738 movl %ecx,M_LOCKED /* unlock the mutex */
742 popf /* restore interrupt state */
747 pushl %edx /* save mutex address */
749 pushl %edx /* push mutex address */
750 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
752 popl %edx /* restore mutex pointer */
755 LEAF_ENTRY(lck_mtx_ilk_unlock)
756 movl L_ARG0,%edx /* no indirection here */
763 LEAF_ENTRY(_disable_preemption)
769 LEAF_ENTRY(_enable_preemption)
772 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
774 pushl %gs:CPU_PREEMPTION_LEVEL
779 2: String "_enable_preemption: preemption_level(%d) < 0!"
782 #endif /* MACH_ASSERT */
787 LEAF_ENTRY(_enable_preemption_no_check)
790 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
796 2: String "_enable_preemption_no_check: preemption_level <= 0!"
799 #endif /* MACH_ASSERT */
800 _ENABLE_PREEMPTION_NO_CHECK
805 LEAF_ENTRY(_mp_disable_preemption)
811 LEAF_ENTRY(_mp_enable_preemption)
814 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
816 pushl %gs:CPU_PREEMPTION_LEVEL
821 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
824 #endif /* MACH_ASSERT */
829 LEAF_ENTRY(_mp_enable_preemption_no_check)
832 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
838 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
841 #endif /* MACH_ASSERT */
842 _ENABLE_PREEMPTION_NO_CHECK
847 LEAF_ENTRY(i_bit_set)
854 LEAF_ENTRY(i_bit_clear)
870 LEAF_ENTRY(bit_lock_try)
876 LEAF_RET /* %eax better not be null ! */
881 LEAF_ENTRY(bit_unlock)