2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1989 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
34 #include <platforms.h>
35 #include <mach_ldebug.h>
37 #include <kern/etap_options.h>
42 * When performance isn't the only concern, it's
43 * nice to build stack frames...
45 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
47 #if BUILD_STACK_FRAMES
50 #define L_ARG0 8(%ebp)
51 #define L_ARG1 12(%ebp)
53 #define SWT_HI -4(%ebp)
54 #define SWT_LO -8(%ebp)
55 #define MISSED -12(%ebp)
57 #else /* BUILD_STACK_FRAMES */
64 #define L_ARG0 4(%esp)
65 #define L_ARG1 8(%esp)
67 #endif /* BUILD_STACK_FRAMES */
71 #define M_LOCKED MUTEX_LOCKED(%edx)
72 #define M_WAITERS MUTEX_WAITERS(%edx)
73 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
75 #define M_TYPE MUTEX_TYPE(%edx)
76 #define M_PC MUTEX_PC(%edx)
77 #define M_THREAD MUTEX_THREAD(%edx)
78 #endif /* MACH_LDEBUG */
82 #define CX(addr,reg) addr(,reg,4)
84 #define CPU_NUMBER(reg)
85 #define CX(addr,reg) addr
86 #endif /* (NCPUS > 1) */
90 * Routines for general lock debugging.
92 #define S_TYPE SLOCK_TYPE(%edx)
93 #define S_PC SLOCK_PC(%edx)
94 #define S_THREAD SLOCK_THREAD(%edx)
95 #define S_DURATIONH SLOCK_DURATIONH(%edx)
96 #define S_DURATIONL SLOCK_DURATIONL(%edx)
99 * Checks for expected lock types and calls "panic" on
100 * mismatch. Detects calls to Mutex functions with
101 * type simplelock and vice versa.
103 #define CHECK_MUTEX_TYPE() \
104 cmpl $ MUTEX_TAG,M_TYPE ; \
110 2: String "not a mutex!" ; \
114 #define CHECK_SIMPLE_LOCK_TYPE() \
115 cmpl $ USLOCK_TAG,S_TYPE ; \
121 2: String "not a simple lock!" ; \
126 * If one or more simplelocks are currently held by a thread,
127 * an attempt to acquire a mutex will cause this check to fail
128 * (since a mutex lock may context switch, holding a simplelock
129 * is not a good thing).
131 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
132 #define CHECK_PREEMPTION_LEVEL() \
133 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
134 cmpl $0,%gs:(%eax) ; \
140 2: String "preemption_level != 0!" ; \
144 #define CHECK_PREEMPTION_LEVEL()
147 #define CHECK_NO_SIMPLELOCKS() \
148 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
149 cmpl $0,%gs:(%eax) ; \
155 2: String "simple_locks_held!" ; \
160 * Verifies return to the correct thread in "unlock" situations.
162 #define CHECK_THREAD(thd) \
163 movl $ CPD_ACTIVE_THREAD,%eax ; \
164 movl %gs:(%eax),%ecx ; \
173 2: String "wrong thread!" ; \
177 #define CHECK_MYLOCK(thd) \
178 movl $ CPD_ACTIVE_THREAD,%eax ; \
179 movl %gs:(%eax),%ecx ; \
188 2: String "mylock attempt!" ; \
192 #define METER_SIMPLE_LOCK_LOCK(reg) \
194 call EXT(meter_simple_lock) ; \
197 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
199 call EXT(meter_simple_unlock) ; \
202 #else /* MACH_LDEBUG */
203 #define CHECK_MUTEX_TYPE()
204 #define CHECK_SIMPLE_LOCK_TYPE
205 #define CHECK_THREAD(thd)
206 #define CHECK_PREEMPTION_LEVEL()
207 #define CHECK_NO_SIMPLELOCKS()
208 #define CHECK_MYLOCK(thd)
209 #define METER_SIMPLE_LOCK_LOCK(reg)
210 #define METER_SIMPLE_LOCK_UNLOCK(reg)
211 #endif /* MACH_LDEBUG */
215 * void hw_lock_init(hw_lock_t)
217 * Initialize a hardware lock.
221 movl L_ARG0,%edx /* fetch lock pointer */
223 movl %eax,0(%edx) /* clear the lock */
228 * void hw_lock_lock(hw_lock_t)
230 * Acquire lock, spinning until it becomes available.
231 * MACH_RT: also return with preemption disabled.
235 movl L_ARG0,%edx /* fetch lock pointer */
237 1: DISABLE_PREEMPTION(%eax)
239 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
240 testl %ecx,%ecx /* success? */
242 movl $1,%eax /* In case this was a timeout call */
243 EMARF /* if yes, then nothing left to do */
246 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
250 rep; nop /* pause for hyper-threading */
251 testl %ecx,0(%edx) /* spin checking lock value in cache */
252 jne 2b /* non-zero means locked, keep spinning */
253 jmp 1b /* zero means unlocked, try to grab it */
256 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
258 * Acquire lock, spinning until it becomes available or timeout.
259 * MACH_RT: also return with preemption disabled.
263 movl L_ARG0,%edx /* fetch lock pointer */
266 * Attempt to grab the lock immediately
267 * - fastpath without timeout nonsense.
269 DISABLE_PREEMPTION(%eax)
271 xchgl 0(%edx),%eax /* try to acquire the HW lock */
272 testl %eax,%eax /* success? */
274 movl $1,%eax /* yes, return true */
279 #define INNER_LOOP_COUNT 1000
281 * Failed to get the lock so set the timeout
282 * and then spin re-checking the lock but pausing
283 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
285 movl L_ARG1,%ecx /* fetch timeout */
290 rdtsc /* read cyclecount into %edx:%eax */
291 addl %ecx,%eax /* fetch and timeout */
292 adcl $0,%edx /* add carry */
294 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
296 ENABLE_PREEMPTION(%eax) /* no reason not to be preempted now */
299 * The inner-loop spin to look for the lock being freed.
302 mov $(INNER_LOOP_COUNT),%edx
304 rep; nop /* pause for hyper-threading */
305 testl %eax,0(%edi) /* spin checking lock value in cache */
306 je 6f /* zero => unlocked, try to grab it */
307 decl %edx /* decrement inner loop count */
308 jnz 5b /* time to check for timeout? */
311 * Here after spinning INNER_LOOP_COUNT times, check for timeout
313 rdtsc /* cyclecount into %edx:%eax */
314 cmpl %ecx,%edx /* compare high-order 32-bits */
315 jb 4b /* continue spinning if less, or */
316 cmpl %ebx,%eax /* compare low-order 32-bits */
317 jb 5b /* continue is less, else bail */
318 xor %eax,%eax /* with 0 return value */
326 * Here to try to grab the lock that now appears to be free
329 DISABLE_PREEMPTION(%eax)
331 xchgl 0(%edi),%eax /* try to acquire the HW lock */
332 testl %eax,%eax /* success? */
333 jne 3b /* no - spin again */
334 movl $1,%eax /* yes */
341 * void hw_lock_unlock(hw_lock_t)
343 * Unconditionally release lock.
344 * MACH_RT: release preemption level.
346 ENTRY(hw_lock_unlock)
348 movl L_ARG0,%edx /* fetch lock pointer */
350 xchgl 0(%edx),%eax /* clear the lock... a mov instruction */
351 /* ...might be cheaper and less paranoid */
352 ENABLE_PREEMPTION(%eax)
357 * unsigned int hw_lock_try(hw_lock_t)
358 * MACH_RT: returns with preemption disabled on success.
362 movl L_ARG0,%edx /* fetch lock pointer */
364 DISABLE_PREEMPTION(%eax)
366 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
367 testl %ecx,%ecx /* success? */
368 jne 1f /* if yes, let the caller know */
370 movl $1,%eax /* success */
374 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
375 xorl %eax,%eax /* ...and return failure */
380 * unsigned int hw_lock_held(hw_lock_t)
381 * MACH_RT: doesn't change preemption state.
382 * N.B. Racy, of course.
386 movl L_ARG0,%edx /* fetch lock pointer */
389 testl %ecx,0(%edx) /* check lock value */
390 jne 1f /* non-zero means locked */
391 xorl %eax,%eax /* tell caller: lock wasn't locked */
395 1: movl $1,%eax /* tell caller: lock was locked */
404 ENTRY(_usimple_lock_init)
406 movl L_ARG0,%edx /* fetch lock pointer */
408 movl %eax,USL_INTERLOCK(%edx) /* unlock the HW lock */
414 movl L_ARG0,%edx /* fetch lock pointer */
416 CHECK_SIMPLE_LOCK_TYPE()
418 DISABLE_PREEMPTION(%eax)
422 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
423 testl %ecx,%ecx /* did we succeed? */
427 CHECK_MYLOCK(S_THREAD)
430 #else /* MACH_LDEBUG */
431 jne sl_get_hw /* no, try again */
432 #endif /* MACH_LDEBUG */
437 movl $ CPD_ACTIVE_THREAD,%eax
440 incl CX(EXT(simple_lock_count),%eax)
442 METER_SIMPLE_LOCK_LOCK(%edx)
448 call EXT(lock_stack_push)
451 #endif /* NCPUS == 1 */
452 #endif /* MACH_LDEBUG */
457 ENTRY(_simple_lock_try)
459 movl L_ARG0,%edx /* fetch lock pointer */
461 CHECK_SIMPLE_LOCK_TYPE()
463 DISABLE_PREEMPTION(%eax)
466 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
467 testl %ecx,%ecx /* did we succeed? */
468 jne 1f /* no, return failure */
473 movl $ CPD_ACTIVE_THREAD,%eax
476 incl CX(EXT(simple_lock_count),%eax)
478 METER_SIMPLE_LOCK_LOCK(%edx)
484 call EXT(lock_stack_push)
487 #endif /* NCPUS == 1 */
488 #endif /* MACH_LDEBUG */
490 movl $1,%eax /* return success */
496 ENABLE_PREEMPTION(%eax)
498 xorl %eax,%eax /* and return failure */
503 ENTRY(_simple_unlock)
505 movl L_ARG0,%edx /* fetch lock pointer */
507 CHECK_SIMPLE_LOCK_TYPE()
508 CHECK_THREAD(S_THREAD)
512 movl %eax,S_THREAD /* disown thread */
513 MP_DISABLE_PREEMPTION(%eax)
515 decl CX(EXT(simple_lock_count),%eax)
516 MP_ENABLE_PREEMPTION(%eax)
518 METER_SIMPLE_LOCK_UNLOCK(%edx)
524 call EXT(lock_stack_pop)
527 #endif /* NCPUS == 1 */
528 #endif /* MACH_LDEBUG */
531 xchgl USL_INTERLOCK(%edx),%ecx /* unlock the HW lock */
533 ENABLE_PREEMPTION(%eax)
543 movl L_ARG0,%edx /* fetch lock pointer */
545 movl %eax,M_ILK /* clear interlock */
546 movl %eax,M_LOCKED /* clear locked flag */
547 movw %ax,M_WAITERS /* init waiter count */
548 movw %ax,M_PROMOTED_PRI
551 movl $ MUTEX_TAG,M_TYPE /* set lock type */
552 movl %eax,M_PC /* init caller pc */
553 movl %eax,M_THREAD /* and owning thread */
556 movl L_ARG1,%ecx /* fetch event type */
557 pushl %ecx /* push event type */
558 pushl %edx /* push mutex address */
559 call EXT(etap_mutex_init) /* init ETAP data */
561 #endif /* ETAP_LOCK_TRACE */
566 ENTRY2(mutex_lock,_mutex_lock)
570 subl $12,%esp /* make room for locals */
571 movl $0,SWT_HI /* set wait time to zero (HI) */
572 movl $0,SWT_LO /* set wait time to zero (LO) */
573 movl $0,MISSED /* clear local miss marker */
574 #endif /* ETAP_LOCK_TRACE */
576 movl L_ARG0,%edx /* fetch lock pointer */
579 CHECK_NO_SIMPLELOCKS()
580 CHECK_PREEMPTION_LEVEL()
583 DISABLE_PREEMPTION(%eax)
588 testl %ecx,%ecx /* did we succeed? */
589 jne ml_get_hw /* no, try again */
592 xchgl %ecx,M_LOCKED /* try to set locked flag */
593 testl %ecx,%ecx /* is the mutex locked? */
594 jne ml_fail /* yes, we lose */
597 call EXT(mutex_lock_acquire)
604 movl $ CPD_ACTIVE_THREAD,%eax
609 incl TH_MUTEX_COUNT(%ecx)
616 ENABLE_PREEMPTION(%eax)
619 movl L_PC,%eax /* fetch pc */
620 pushl SWT_LO /* push wait time (low) */
621 pushl SWT_HI /* push wait time (high) */
622 pushl %eax /* push pc */
623 pushl %edx /* push mutex address */
624 call EXT(etap_mutex_hold) /* collect hold timestamp */
625 addl $16+12,%esp /* clean up stack, adjusting for locals */
626 #endif /* ETAP_LOCK_TRACE */
633 cmp $0,MISSED /* did we already take a wait timestamp? */
634 jne ml_block /* yup. carry-on */
635 pushl %edx /* push mutex address */
636 call EXT(etap_mutex_miss) /* get wait timestamp */
637 movl %eax,SWT_HI /* set wait time (high word) */
638 movl %edx,SWT_LO /* set wait time (low word) */
639 popl %edx /* clean up stack */
640 movl $1,MISSED /* mark wait timestamp as taken */
641 #endif /* ETAP_LOCK_TRACE */
644 CHECK_MYLOCK(M_THREAD)
646 pushl %eax /* no promotion here yet */
647 pushl %edx /* push mutex address */
648 call EXT(mutex_lock_wait) /* wait for the lock */
650 movl L_ARG0,%edx /* refetch lock pointer */
651 jmp ml_retry /* and try again */
653 ENTRY2(mutex_try,_mutex_try)
657 subl $8,%esp /* make room for locals */
658 movl $0,SWT_HI /* set wait time to zero (HI) */
659 movl $0,SWT_LO /* set wait time to zero (LO) */
660 #endif /* ETAP_LOCK_TRACE */
662 movl L_ARG0,%edx /* fetch lock pointer */
665 CHECK_NO_SIMPLELOCKS()
667 DISABLE_PREEMPTION(%eax)
681 call EXT(mutex_lock_acquire)
688 movl $ CPD_ACTIVE_THREAD,%ecx
693 incl TH_MUTEX_COUNT(%ecx)
700 ENABLE_PREEMPTION(%eax)
703 movl L_PC,%eax /* fetch pc */
704 pushl SWT_LO /* push wait time (low) */
705 pushl SWT_HI /* push wait time (high) */
706 pushl %eax /* push pc */
707 pushl %edx /* push mutex address */
708 call EXT(etap_mutex_hold) /* get start hold timestamp */
709 addl $16,%esp /* clean up stack, adjusting for locals */
710 #endif /* ETAP_LOCK_TRACE */
714 #if MACH_LDEBUG || ETAP_LOCK_TRACE
716 addl $8,%esp /* pop stack claimed on entry */
727 ENABLE_PREEMPTION(%eax)
730 movl L_PC,%eax /* fetch pc */
731 pushl SWT_LO /* push wait time (low) */
732 pushl SWT_HI /* push wait time (high) */
733 pushl %eax /* push pc */
734 pushl %edx /* push mutex address */
735 call EXT(etap_mutex_hold) /* get start hold timestamp */
736 addl $16,%esp /* clean up stack, adjusting for locals */
737 #endif /* ETAP_LOCK_TRACE */
741 #if MACH_LDEBUG || ETAP_LOCK_TRACE
743 addl $8,%esp /* pop stack claimed on entry */
752 movl L_ARG0,%edx /* fetch lock pointer */
755 pushl %edx /* push mutex address */
756 call EXT(etap_mutex_unlock) /* collect ETAP data */
757 popl %edx /* restore mutex address */
758 #endif /* ETAP_LOCK_TRACE */
761 CHECK_THREAD(M_THREAD)
763 DISABLE_PREEMPTION(%eax)
768 testl %ecx,%ecx /* did we succeed? */
769 jne mu_get_hw /* no, try again */
771 cmpw $0,M_WAITERS /* are there any waiters? */
772 jne mu_wakeup /* yes, more work to do */
777 movl %eax,M_THREAD /* disown thread */
778 movl $ CPD_ACTIVE_THREAD,%eax
782 decl TH_MUTEX_COUNT(%ecx)
787 xchgl %ecx,M_LOCKED /* unlock the mutex */
792 ENABLE_PREEMPTION(%eax)
799 pushl %eax /* no promotion here yet */
800 pushl %edx /* push mutex address */
801 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
803 movl L_ARG0,%edx /* refetch lock pointer */
806 ENTRY(interlock_unlock)
813 ENABLE_PREEMPTION(%eax)
819 ENTRY(_disable_preemption)
821 _DISABLE_PREEMPTION(%eax)
825 ENTRY(_enable_preemption)
828 movl $ CPD_PREEMPTION_LEVEL,%eax
836 2: String "_enable_preemption: preemption_level(%d) < 0!"
839 #endif /* MACH_ASSERT */
840 _ENABLE_PREEMPTION(%eax)
844 ENTRY(_enable_preemption_no_check)
847 movl $ CPD_PREEMPTION_LEVEL,%eax
854 2: String "_enable_preemption_no_check: preemption_level <= 0!"
857 #endif /* MACH_ASSERT */
858 _ENABLE_PREEMPTION_NO_CHECK(%eax)
863 ENTRY(_mp_disable_preemption)
864 #if MACH_RT && NCPUS > 1
865 _DISABLE_PREEMPTION(%eax)
866 #endif /* MACH_RT && NCPUS > 1*/
869 ENTRY(_mp_enable_preemption)
870 #if MACH_RT && NCPUS > 1
872 movl $ CPD_PREEMPTION_LEVEL,%eax
880 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
883 #endif /* MACH_ASSERT */
884 _ENABLE_PREEMPTION(%eax)
885 #endif /* MACH_RT && NCPUS > 1 */
888 ENTRY(_mp_enable_preemption_no_check)
889 #if MACH_RT && NCPUS > 1
891 movl $ CPD_PREEMPTION_LEVEL,%eax
898 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
901 #endif /* MACH_ASSERT */
902 _ENABLE_PREEMPTION_NO_CHECK(%eax)
903 #endif /* MACH_RT && NCPUS > 1 */
936 ret /* %eax better not be null ! */