2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1989 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
37 #include <platforms.h>
38 #include <mach_ldebug.h>
40 #include <kern/etap_options.h>
45 * When performance isn't the only concern, it's
46 * nice to build stack frames...
48 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
50 #if BUILD_STACK_FRAMES
53 #define L_ARG0 8(%ebp)
54 #define L_ARG1 12(%ebp)
56 #define SWT_HI -4(%ebp)
57 #define SWT_LO -8(%ebp)
58 #define MISSED -12(%ebp)
60 #else /* BUILD_STACK_FRAMES */
67 #define L_ARG0 4(%esp)
68 #define L_ARG1 8(%esp)
70 #endif /* BUILD_STACK_FRAMES */
74 #define M_LOCKED MUTEX_LOCKED(%edx)
75 #define M_WAITERS MUTEX_WAITERS(%edx)
76 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
78 #define M_TYPE MUTEX_TYPE(%edx)
79 #define M_PC MUTEX_PC(%edx)
80 #define M_THREAD MUTEX_THREAD(%edx)
81 #endif /* MACH_LDEBUG */
85 #define CX(addr,reg) addr(,reg,4)
87 #define CPU_NUMBER(reg)
88 #define CX(addr,reg) addr
89 #endif /* (NCPUS > 1) */
93 * Routines for general lock debugging.
95 #define S_TYPE SLOCK_TYPE(%edx)
96 #define S_PC SLOCK_PC(%edx)
97 #define S_THREAD SLOCK_THREAD(%edx)
98 #define S_DURATIONH SLOCK_DURATIONH(%edx)
99 #define S_DURATIONL SLOCK_DURATIONL(%edx)
102 * Checks for expected lock types and calls "panic" on
103 * mismatch. Detects calls to Mutex functions with
104 * type simplelock and vice versa.
106 #define CHECK_MUTEX_TYPE() \
107 cmpl $ MUTEX_TAG,M_TYPE ; \
113 2: String "not a mutex!" ; \
117 #define CHECK_SIMPLE_LOCK_TYPE() \
118 cmpl $ USLOCK_TAG,S_TYPE ; \
124 2: String "not a simple lock!" ; \
129 * If one or more simplelocks are currently held by a thread,
130 * an attempt to acquire a mutex will cause this check to fail
131 * (since a mutex lock may context switch, holding a simplelock
132 * is not a good thing).
134 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
135 #define CHECK_PREEMPTION_LEVEL() \
136 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
137 cmpl $0,%gs:(%eax) ; \
143 2: String "preemption_level != 0!" ; \
147 #define CHECK_PREEMPTION_LEVEL()
150 #define CHECK_NO_SIMPLELOCKS() \
151 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
152 cmpl $0,%gs:(%eax) ; \
158 2: String "simple_locks_held!" ; \
163 * Verifies return to the correct thread in "unlock" situations.
165 #define CHECK_THREAD(thd) \
166 movl $ CPD_ACTIVE_THREAD,%eax ; \
167 movl %gs:(%eax),%ecx ; \
176 2: String "wrong thread!" ; \
180 #define CHECK_MYLOCK(thd) \
181 movl $ CPD_ACTIVE_THREAD,%eax ; \
182 movl %gs:(%eax),%ecx ; \
191 2: String "mylock attempt!" ; \
195 #define METER_SIMPLE_LOCK_LOCK(reg) \
197 call EXT(meter_simple_lock) ; \
200 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
202 call EXT(meter_simple_unlock) ; \
205 #else /* MACH_LDEBUG */
206 #define CHECK_MUTEX_TYPE()
207 #define CHECK_SIMPLE_LOCK_TYPE
208 #define CHECK_THREAD(thd)
209 #define CHECK_PREEMPTION_LEVEL()
210 #define CHECK_NO_SIMPLELOCKS()
211 #define CHECK_MYLOCK(thd)
212 #define METER_SIMPLE_LOCK_LOCK(reg)
213 #define METER_SIMPLE_LOCK_UNLOCK(reg)
214 #endif /* MACH_LDEBUG */
218 * void hw_lock_init(hw_lock_t)
220 * Initialize a hardware lock.
224 movl L_ARG0,%edx /* fetch lock pointer */
226 movl %eax,0(%edx) /* clear the lock */
231 * void hw_lock_lock(hw_lock_t)
233 * Acquire lock, spinning until it becomes available.
234 * MACH_RT: also return with preemption disabled.
238 movl L_ARG0,%edx /* fetch lock pointer */
240 1: DISABLE_PREEMPTION(%eax)
242 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
243 testl %ecx,%ecx /* success? */
245 movl $1,%eax /* In case this was a timeout call */
246 EMARF /* if yes, then nothing left to do */
249 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
253 rep; nop /* pause for hyper-threading */
254 testl %ecx,0(%edx) /* spin checking lock value in cache */
255 jne 2b /* non-zero means locked, keep spinning */
256 jmp 1b /* zero means unlocked, try to grab it */
259 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
261 * Acquire lock, spinning until it becomes available or timeout.
262 * MACH_RT: also return with preemption disabled.
266 movl L_ARG0,%edx /* fetch lock pointer */
269 * Attempt to grab the lock immediately
270 * - fastpath without timeout nonsense.
272 DISABLE_PREEMPTION(%eax)
274 xchgl 0(%edx),%eax /* try to acquire the HW lock */
275 testl %eax,%eax /* success? */
277 movl $1,%eax /* yes, return true */
282 #define INNER_LOOP_COUNT 1000
284 * Failed to get the lock so set the timeout
285 * and then spin re-checking the lock but pausing
286 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
288 movl L_ARG1,%ecx /* fetch timeout */
293 rdtsc /* read cyclecount into %edx:%eax */
294 addl %ecx,%eax /* fetch and timeout */
295 adcl $0,%edx /* add carry */
297 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
299 ENABLE_PREEMPTION(%eax) /* no reason not to be preempted now */
302 * The inner-loop spin to look for the lock being freed.
305 mov $(INNER_LOOP_COUNT),%edx
307 rep; nop /* pause for hyper-threading */
308 testl %eax,0(%edi) /* spin checking lock value in cache */
309 je 6f /* zero => unlocked, try to grab it */
310 decl %edx /* decrement inner loop count */
311 jnz 5b /* time to check for timeout? */
314 * Here after spinning INNER_LOOP_COUNT times, check for timeout
316 rdtsc /* cyclecount into %edx:%eax */
317 cmpl %ecx,%edx /* compare high-order 32-bits */
318 jb 4b /* continue spinning if less, or */
319 cmpl %ebx,%eax /* compare low-order 32-bits */
320 jb 5b /* continue is less, else bail */
321 xor %eax,%eax /* with 0 return value */
329 * Here to try to grab the lock that now appears to be free
332 DISABLE_PREEMPTION(%eax)
334 xchgl 0(%edi),%eax /* try to acquire the HW lock */
335 testl %eax,%eax /* success? */
336 jne 3b /* no - spin again */
337 movl $1,%eax /* yes */
344 * void hw_lock_unlock(hw_lock_t)
346 * Unconditionally release lock.
347 * MACH_RT: release preemption level.
349 ENTRY(hw_lock_unlock)
351 movl L_ARG0,%edx /* fetch lock pointer */
353 xchgl 0(%edx),%eax /* clear the lock... a mov instruction */
354 /* ...might be cheaper and less paranoid */
355 ENABLE_PREEMPTION(%eax)
360 * unsigned int hw_lock_try(hw_lock_t)
361 * MACH_RT: returns with preemption disabled on success.
365 movl L_ARG0,%edx /* fetch lock pointer */
367 DISABLE_PREEMPTION(%eax)
369 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
370 testl %ecx,%ecx /* success? */
371 jne 1f /* if yes, let the caller know */
373 movl $1,%eax /* success */
377 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
378 xorl %eax,%eax /* ...and return failure */
383 * unsigned int hw_lock_held(hw_lock_t)
384 * MACH_RT: doesn't change preemption state.
385 * N.B. Racy, of course.
389 movl L_ARG0,%edx /* fetch lock pointer */
392 testl %ecx,0(%edx) /* check lock value */
393 jne 1f /* non-zero means locked */
394 xorl %eax,%eax /* tell caller: lock wasn't locked */
398 1: movl $1,%eax /* tell caller: lock was locked */
407 ENTRY(_usimple_lock_init)
409 movl L_ARG0,%edx /* fetch lock pointer */
411 movl %eax,USL_INTERLOCK(%edx) /* unlock the HW lock */
417 movl L_ARG0,%edx /* fetch lock pointer */
419 CHECK_SIMPLE_LOCK_TYPE()
421 DISABLE_PREEMPTION(%eax)
425 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
426 testl %ecx,%ecx /* did we succeed? */
430 CHECK_MYLOCK(S_THREAD)
433 #else /* MACH_LDEBUG */
434 jne sl_get_hw /* no, try again */
435 #endif /* MACH_LDEBUG */
440 movl $ CPD_ACTIVE_THREAD,%eax
443 incl CX(EXT(simple_lock_count),%eax)
445 METER_SIMPLE_LOCK_LOCK(%edx)
451 call EXT(lock_stack_push)
454 #endif /* NCPUS == 1 */
455 #endif /* MACH_LDEBUG */
460 ENTRY(_simple_lock_try)
462 movl L_ARG0,%edx /* fetch lock pointer */
464 CHECK_SIMPLE_LOCK_TYPE()
466 DISABLE_PREEMPTION(%eax)
469 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
470 testl %ecx,%ecx /* did we succeed? */
471 jne 1f /* no, return failure */
476 movl $ CPD_ACTIVE_THREAD,%eax
479 incl CX(EXT(simple_lock_count),%eax)
481 METER_SIMPLE_LOCK_LOCK(%edx)
487 call EXT(lock_stack_push)
490 #endif /* NCPUS == 1 */
491 #endif /* MACH_LDEBUG */
493 movl $1,%eax /* return success */
499 ENABLE_PREEMPTION(%eax)
501 xorl %eax,%eax /* and return failure */
506 ENTRY(_simple_unlock)
508 movl L_ARG0,%edx /* fetch lock pointer */
510 CHECK_SIMPLE_LOCK_TYPE()
511 CHECK_THREAD(S_THREAD)
515 movl %eax,S_THREAD /* disown thread */
516 MP_DISABLE_PREEMPTION(%eax)
518 decl CX(EXT(simple_lock_count),%eax)
519 MP_ENABLE_PREEMPTION(%eax)
521 METER_SIMPLE_LOCK_UNLOCK(%edx)
527 call EXT(lock_stack_pop)
530 #endif /* NCPUS == 1 */
531 #endif /* MACH_LDEBUG */
534 xchgl USL_INTERLOCK(%edx),%ecx /* unlock the HW lock */
536 ENABLE_PREEMPTION(%eax)
546 movl L_ARG0,%edx /* fetch lock pointer */
548 movl %eax,M_ILK /* clear interlock */
549 movl %eax,M_LOCKED /* clear locked flag */
550 movw %ax,M_WAITERS /* init waiter count */
551 movw %ax,M_PROMOTED_PRI
554 movl $ MUTEX_TAG,M_TYPE /* set lock type */
555 movl %eax,M_PC /* init caller pc */
556 movl %eax,M_THREAD /* and owning thread */
559 movl L_ARG1,%ecx /* fetch event type */
560 pushl %ecx /* push event type */
561 pushl %edx /* push mutex address */
562 call EXT(etap_mutex_init) /* init ETAP data */
564 #endif /* ETAP_LOCK_TRACE */
569 ENTRY2(mutex_lock,_mutex_lock)
573 subl $12,%esp /* make room for locals */
574 movl $0,SWT_HI /* set wait time to zero (HI) */
575 movl $0,SWT_LO /* set wait time to zero (LO) */
576 movl $0,MISSED /* clear local miss marker */
577 #endif /* ETAP_LOCK_TRACE */
579 movl L_ARG0,%edx /* fetch lock pointer */
582 CHECK_NO_SIMPLELOCKS()
583 CHECK_PREEMPTION_LEVEL()
586 DISABLE_PREEMPTION(%eax)
591 testl %ecx,%ecx /* did we succeed? */
592 jne ml_get_hw /* no, try again */
595 xchgl %ecx,M_LOCKED /* try to set locked flag */
596 testl %ecx,%ecx /* is the mutex locked? */
597 jne ml_fail /* yes, we lose */
600 call EXT(mutex_lock_acquire)
607 movl $ CPD_ACTIVE_THREAD,%eax
612 incl TH_MUTEX_COUNT(%ecx)
619 ENABLE_PREEMPTION(%eax)
622 movl L_PC,%eax /* fetch pc */
623 pushl SWT_LO /* push wait time (low) */
624 pushl SWT_HI /* push wait time (high) */
625 pushl %eax /* push pc */
626 pushl %edx /* push mutex address */
627 call EXT(etap_mutex_hold) /* collect hold timestamp */
628 addl $16+12,%esp /* clean up stack, adjusting for locals */
629 #endif /* ETAP_LOCK_TRACE */
636 cmp $0,MISSED /* did we already take a wait timestamp? */
637 jne ml_block /* yup. carry-on */
638 pushl %edx /* push mutex address */
639 call EXT(etap_mutex_miss) /* get wait timestamp */
640 movl %eax,SWT_HI /* set wait time (high word) */
641 movl %edx,SWT_LO /* set wait time (low word) */
642 popl %edx /* clean up stack */
643 movl $1,MISSED /* mark wait timestamp as taken */
644 #endif /* ETAP_LOCK_TRACE */
647 CHECK_MYLOCK(M_THREAD)
649 pushl %eax /* no promotion here yet */
650 pushl %edx /* push mutex address */
651 call EXT(mutex_lock_wait) /* wait for the lock */
653 movl L_ARG0,%edx /* refetch lock pointer */
654 jmp ml_retry /* and try again */
656 ENTRY2(mutex_try,_mutex_try)
660 subl $8,%esp /* make room for locals */
661 movl $0,SWT_HI /* set wait time to zero (HI) */
662 movl $0,SWT_LO /* set wait time to zero (LO) */
663 #endif /* ETAP_LOCK_TRACE */
665 movl L_ARG0,%edx /* fetch lock pointer */
668 CHECK_NO_SIMPLELOCKS()
670 DISABLE_PREEMPTION(%eax)
684 call EXT(mutex_lock_acquire)
691 movl $ CPD_ACTIVE_THREAD,%ecx
696 incl TH_MUTEX_COUNT(%ecx)
703 ENABLE_PREEMPTION(%eax)
706 movl L_PC,%eax /* fetch pc */
707 pushl SWT_LO /* push wait time (low) */
708 pushl SWT_HI /* push wait time (high) */
709 pushl %eax /* push pc */
710 pushl %edx /* push mutex address */
711 call EXT(etap_mutex_hold) /* get start hold timestamp */
712 addl $16,%esp /* clean up stack, adjusting for locals */
713 #endif /* ETAP_LOCK_TRACE */
717 #if MACH_LDEBUG || ETAP_LOCK_TRACE
719 addl $8,%esp /* pop stack claimed on entry */
730 ENABLE_PREEMPTION(%eax)
733 movl L_PC,%eax /* fetch pc */
734 pushl SWT_LO /* push wait time (low) */
735 pushl SWT_HI /* push wait time (high) */
736 pushl %eax /* push pc */
737 pushl %edx /* push mutex address */
738 call EXT(etap_mutex_hold) /* get start hold timestamp */
739 addl $16,%esp /* clean up stack, adjusting for locals */
740 #endif /* ETAP_LOCK_TRACE */
744 #if MACH_LDEBUG || ETAP_LOCK_TRACE
746 addl $8,%esp /* pop stack claimed on entry */
755 movl L_ARG0,%edx /* fetch lock pointer */
758 pushl %edx /* push mutex address */
759 call EXT(etap_mutex_unlock) /* collect ETAP data */
760 popl %edx /* restore mutex address */
761 #endif /* ETAP_LOCK_TRACE */
764 CHECK_THREAD(M_THREAD)
766 DISABLE_PREEMPTION(%eax)
771 testl %ecx,%ecx /* did we succeed? */
772 jne mu_get_hw /* no, try again */
774 cmpw $0,M_WAITERS /* are there any waiters? */
775 jne mu_wakeup /* yes, more work to do */
780 movl %eax,M_THREAD /* disown thread */
781 movl $ CPD_ACTIVE_THREAD,%eax
785 decl TH_MUTEX_COUNT(%ecx)
790 xchgl %ecx,M_LOCKED /* unlock the mutex */
795 ENABLE_PREEMPTION(%eax)
802 pushl %eax /* no promotion here yet */
803 pushl %edx /* push mutex address */
804 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
806 movl L_ARG0,%edx /* refetch lock pointer */
809 ENTRY(interlock_unlock)
816 ENABLE_PREEMPTION(%eax)
822 ENTRY(_disable_preemption)
824 _DISABLE_PREEMPTION(%eax)
828 ENTRY(_enable_preemption)
831 movl $ CPD_PREEMPTION_LEVEL,%eax
839 2: String "_enable_preemption: preemption_level(%d) < 0!"
842 #endif /* MACH_ASSERT */
843 _ENABLE_PREEMPTION(%eax)
847 ENTRY(_enable_preemption_no_check)
850 movl $ CPD_PREEMPTION_LEVEL,%eax
857 2: String "_enable_preemption_no_check: preemption_level <= 0!"
860 #endif /* MACH_ASSERT */
861 _ENABLE_PREEMPTION_NO_CHECK(%eax)
866 ENTRY(_mp_disable_preemption)
867 #if MACH_RT && NCPUS > 1
868 _DISABLE_PREEMPTION(%eax)
869 #endif /* MACH_RT && NCPUS > 1*/
872 ENTRY(_mp_enable_preemption)
873 #if MACH_RT && NCPUS > 1
875 movl $ CPD_PREEMPTION_LEVEL,%eax
883 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
886 #endif /* MACH_ASSERT */
887 _ENABLE_PREEMPTION(%eax)
888 #endif /* MACH_RT && NCPUS > 1 */
891 ENTRY(_mp_enable_preemption_no_check)
892 #if MACH_RT && NCPUS > 1
894 movl $ CPD_PREEMPTION_LEVEL,%eax
901 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
904 #endif /* MACH_ASSERT */
905 _ENABLE_PREEMPTION_NO_CHECK(%eax)
906 #endif /* MACH_RT && NCPUS > 1 */
939 ret /* %eax better not be null ! */