2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1989 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
34 #include <platforms.h>
35 #include <mach_ldebug.h>
38 #include <kern/etap_options.h>
42 * When performance isn't the only concern, it's
43 * nice to build stack frames...
45 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
47 #if BUILD_STACK_FRAMES
50 #define L_ARG0 8(%ebp)
51 #define L_ARG1 12(%ebp)
53 #define SWT_HI -4(%ebp)
54 #define SWT_LO -8(%ebp)
55 #define MISSED -12(%ebp)
57 #else /* BUILD_STACK_FRAMES */
64 #define L_ARG0 4(%esp)
65 #define L_ARG1 8(%esp)
67 #endif /* BUILD_STACK_FRAMES */
71 #define M_LOCKED 1(%edx)
72 #define M_WAITERS 2(%edx)
74 #define M_TYPE 4(%edx)
76 #define M_THREAD 12(%edx)
77 #endif /* MACH_LDEBUG */
79 #include <i386/AT386/mp/mp.h>
81 #define CX(addr,reg) addr(,reg,4)
83 #define CPU_NUMBER(reg)
84 #define CX(addr,reg) addr
85 #endif /* (NCPUS > 1) */
89 * Routines for general lock debugging.
91 #define S_TYPE 4(%edx)
93 #define S_THREAD 12(%edx)
94 #define S_DURATIONH 16(%edx)
95 #define S_DURATIONL 20(%edx)
98 * Checks for expected lock types and calls "panic" on
99 * mismatch. Detects calls to Mutex functions with
100 * type simplelock and vice versa.
102 #define CHECK_MUTEX_TYPE() \
103 cmpl $MUTEX_TAG, M_TYPE ; \
109 2: String "not a mutex!" ; \
113 #define CHECK_SIMPLE_LOCK_TYPE() \
114 cmpl $SIMPLE_LOCK_TAG,S_TYPE ; \
120 2: String "not a simple lock!" ; \
125 * If one or more simplelocks are currently held by a thread,
126 * an attempt to acquire a mutex will cause this check to fail
127 * (since a mutex lock may context switch, holding a simplelock
128 * is not a good thing).
130 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
131 #define CHECK_PREEMPTION_LEVEL() \
132 movl $CPD_PREEMPTION_LEVEL,%eax ; \
133 cmpl $0,%gs:(%eax) ; \
139 2: String "preemption_level != 0!" ; \
143 #define CHECK_PREEMPTION_LEVEL()
146 #define CHECK_NO_SIMPLELOCKS() \
147 movl $CPD_SIMPLE_LOCK_COUNT,%eax ; \
148 cmpl $0,%gs:(%eax) ; \
154 2: String "simple_locks_held!" ; \
159 * Verifies return to the correct thread in "unlock" situations.
161 #define CHECK_THREAD(thd) \
162 movl $CPD_ACTIVE_THREAD,%eax ; \
163 movl %gs:(%eax),%ecx ; \
172 2: String "wrong thread!" ; \
176 #define CHECK_MYLOCK(thd) \
177 movl $CPD_ACTIVE_THREAD,%eax ; \
178 movl %gs:(%eax),%ecx ; \
187 2: String "mylock attempt!" ; \
191 #define METER_SIMPLE_LOCK_LOCK(reg) \
193 call EXT(meter_simple_lock) ; \
196 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
198 call EXT(meter_simple_unlock) ; \
201 #else /* MACH_LDEBUG */
202 #define CHECK_MUTEX_TYPE()
203 #define CHECK_SIMPLE_LOCK_TYPE
204 #define CHECK_THREAD(thd)
205 #define CHECK_PREEMPTION_LEVEL()
206 #define CHECK_NO_SIMPLELOCKS()
207 #define CHECK_MYLOCK(thd)
208 #define METER_SIMPLE_LOCK_LOCK(reg)
209 #define METER_SIMPLE_LOCK_UNLOCK(reg)
210 #endif /* MACH_LDEBUG */
214 * void hw_lock_init(hw_lock_t)
216 * Initialize a hardware lock.
220 movl L_ARG0,%edx /* fetch lock pointer */
222 movb %al,0(%edx) /* clear the lock */
227 * void hw_lock_lock(hw_lock_t)
229 * Acquire lock, spinning until it becomes available.
230 * MACH_RT: also return with preemption disabled.
234 movl L_ARG0,%edx /* fetch lock pointer */
236 1: DISABLE_PREEMPTION(%eax)
238 xchgb 0(%edx),%cl /* try to acquire the HW lock */
239 testb %cl,%cl /* success? */
242 EMARF /* if yes, then nothing left to do */
245 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
248 2: testb %cl,0(%edx) /* spin checking lock value in cache */
249 jne 2b /* non-zero means locked, keep spinning */
250 jmp 1b /* zero means unlocked, try to grab it */
253 * void hw_lock_unlock(hw_lock_t)
255 * Unconditionally release lock.
256 * MACH_RT: release preemption level.
258 ENTRY(hw_lock_unlock)
260 movl L_ARG0,%edx /* fetch lock pointer */
262 xchgb 0(%edx),%al /* clear the lock... a mov instruction */
263 /* ...might be cheaper and less paranoid */
264 ENABLE_PREEMPTION(%eax)
269 * unsigned int hw_lock_try(hw_lock_t)
270 * MACH_RT: returns with preemption disabled on success.
274 movl L_ARG0,%edx /* fetch lock pointer */
276 DISABLE_PREEMPTION(%eax)
278 xchgb 0(%edx),%cl /* try to acquire the HW lock */
279 testb %cl,%cl /* success? */
280 jne 1f /* if yes, let the caller know */
282 movl $1,%eax /* success */
286 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
287 xorl %eax,%eax /* ...and return failure */
292 * unsigned int hw_lock_held(hw_lock_t)
293 * MACH_RT: doesn't change preemption state.
294 * N.B. Racy, of course.
298 movl L_ARG0,%edx /* fetch lock pointer */
301 testb %cl,0(%edx) /* check lock value */
302 jne 1f /* non-zero means locked */
303 xorl %eax,%eax /* tell caller: lock wasn't locked */
307 1: movl $1,%eax /* tell caller: lock was locked */
316 ENTRY(_usimple_lock_init)
318 movl L_ARG0,%edx /* fetch lock pointer */
320 movb %al,USL_INTERLOCK(%edx) /* unlock the HW lock */
326 movl L_ARG0,%edx /* fetch lock pointer */
328 CHECK_SIMPLE_LOCK_TYPE()
330 DISABLE_PREEMPTION(%eax)
334 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
335 testb %cl,%cl /* did we succeed? */
339 CHECK_MYLOCK(S_THREAD)
342 #else /* MACH_LDEBUG */
343 jne sl_get_hw /* no, try again */
344 #endif /* MACH_LDEBUG */
349 movl $CPD_ACTIVE_THREAD,%eax
352 incl CX(EXT(simple_lock_count),%eax)
354 METER_SIMPLE_LOCK_LOCK(%edx)
360 call EXT(lock_stack_push)
363 #endif /* NCPUS == 1 */
364 #endif /* MACH_LDEBUG */
369 ENTRY(_simple_lock_try)
371 movl L_ARG0,%edx /* fetch lock pointer */
373 CHECK_SIMPLE_LOCK_TYPE()
375 DISABLE_PREEMPTION(%eax)
378 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
379 testb %cl,%cl /* did we succeed? */
380 jne 1f /* no, return failure */
385 movl $CPD_ACTIVE_THREAD,%eax
388 incl CX(EXT(simple_lock_count),%eax)
390 METER_SIMPLE_LOCK_LOCK(%edx)
396 call EXT(lock_stack_push)
399 #endif /* NCPUS == 1 */
400 #endif /* MACH_LDEBUG */
402 movl $1,%eax /* return success */
408 ENABLE_PREEMPTION(%eax)
410 xorl %eax,%eax /* and return failure */
415 ENTRY(_simple_unlock)
417 movl L_ARG0,%edx /* fetch lock pointer */
419 CHECK_SIMPLE_LOCK_TYPE()
420 CHECK_THREAD(S_THREAD)
424 movl %eax,S_THREAD /* disown thread */
425 MP_DISABLE_PREEMPTION(%eax)
427 decl CX(EXT(simple_lock_count),%eax)
428 MP_ENABLE_PREEMPTION(%eax)
430 METER_SIMPLE_LOCK_UNLOCK(%edx)
436 call EXT(lock_stack_pop)
439 #endif /* NCPUS == 1 */
440 #endif /* MACH_LDEBUG */
443 xchgb USL_INTERLOCK(%edx),%cl /* unlock the HW lock */
445 ENABLE_PREEMPTION(%eax)
455 movl L_ARG0,%edx /* fetch lock pointer */
457 movb %al,M_ILK /* clear interlock */
458 movb %al,M_LOCKED /* clear locked flag */
459 movw %ax,M_WAITERS /* init waiter count */
462 movl $MUTEX_TAG,M_TYPE /* set lock type */
463 movl %eax,M_PC /* init caller pc */
464 movl %eax,M_THREAD /* and owning thread */
467 movl L_ARG1,%ecx /* fetch event type */
468 pushl %ecx /* push event type */
469 pushl %edx /* push mutex address */
470 call EXT(etap_mutex_init) /* init ETAP data */
472 #endif /* ETAP_LOCK_TRACE */
481 subl $12,%esp /* make room for locals */
482 movl $0,SWT_HI /* set wait time to zero (HI) */
483 movl $0,SWT_LO /* set wait time to zero (LO) */
484 movl $0,MISSED /* clear local miss marker */
485 #endif /* ETAP_LOCK_TRACE */
487 movl L_ARG0,%edx /* fetch lock pointer */
490 CHECK_NO_SIMPLELOCKS()
491 CHECK_PREEMPTION_LEVEL()
494 DISABLE_PREEMPTION(%eax)
499 testb %cl,%cl /* did we succeed? */
500 jne ml_get_hw /* no, try again */
503 / Beware of a race between this code path and the inline ASM fast-path locking
504 / sequence which attempts to lock a mutex by directly setting the locked flag
509 xchgb %cl,M_LOCKED /* try to set locked flag */
510 testb %cl,%cl /* is the mutex locked? */
511 jne ml_fail /* yes, we lose */
516 movl $CPD_ACTIVE_THREAD,%eax
521 incl TH_MUTEX_COUNT(%ecx)
528 ENABLE_PREEMPTION(%eax)
531 movl L_PC,%eax /* fetch pc */
532 pushl SWT_LO /* push wait time (low) */
533 pushl SWT_HI /* push wait time (high) */
534 pushl %eax /* push pc */
535 pushl %edx /* push mutex address */
536 call EXT(etap_mutex_hold) /* collect hold timestamp */
537 addl $16+12,%esp /* clean up stack, adjusting for locals */
538 #endif /* ETAP_LOCK_TRACE */
545 cmp $0,MISSED /* did we already take a wait timestamp? */
546 jne ml_block /* yup. carry-on */
547 pushl %edx /* push mutex address */
548 call EXT(etap_mutex_miss) /* get wait timestamp */
549 movl %eax,SWT_HI /* set wait time (high word) */
550 movl %edx,SWT_LO /* set wait time (low word) */
551 popl %edx /* clean up stack */
552 movl $1,MISSED /* mark wait timestamp as taken */
553 #endif /* ETAP_LOCK_TRACE */
556 CHECK_MYLOCK(M_THREAD)
557 pushl %edx /* push mutex address */
558 call EXT(mutex_lock_wait) /* wait for the lock */
560 movl L_ARG0,%edx /* refetch lock pointer */
561 jmp ml_retry /* and try again */
567 subl $8,%esp /* make room for locals */
568 movl $0,SWT_HI /* set wait time to zero (HI) */
569 movl $0,SWT_LO /* set wait time to zero (LO) */
570 #endif /* ETAP_LOCK_TRACE */
572 movl L_ARG0,%edx /* fetch lock pointer */
575 CHECK_NO_SIMPLELOCKS()
578 movb $1,%al /* locked value for mutex */
579 xchgb %al,M_LOCKED /* swap locked values */
580 xorb $1,%al /* generate return value */
582 #if MACH_LDEBUG || ETAP_LOCK_TRACE
583 testl %eax,%eax /* did we succeed? */
590 movl $CPD_ACTIVE_THREAD,%ecx
595 incl TH_MUTEX_COUNT(%ecx)
600 movl L_PC,%eax /* fetch pc */
601 pushl SWT_LO /* push wait time (low) */
602 pushl SWT_HI /* push wait time (high) */
603 pushl %eax /* push pc */
604 pushl %edx /* push mutex address */
605 call EXT(etap_mutex_hold) /* get start hold timestamp */
606 addl $16,%esp /* clean up stack, adjusting for locals */
607 movl $1,%eax /* put back successful return value */
608 #endif /* ETAP_LOCK_TRACE */
610 #if MACH_LDEBUG || ETAP_LOCK_TRACE
613 addl $8,%esp /* pop stack claimed on entry */
622 movl L_ARG0,%edx /* fetch lock pointer */
625 pushl %edx /* push mutex address */
626 call EXT(etap_mutex_unlock) /* collect ETAP data */
627 popl %edx /* restore mutex address */
628 #endif /* ETAP_LOCK_TRACE */
631 CHECK_THREAD(M_THREAD)
633 DISABLE_PREEMPTION(%eax)
638 testb %cl,%cl /* did we succeed? */
639 jne mu_get_hw /* no, try again */
641 cmpw $0,M_WAITERS /* are there any waiters? */
642 jne mu_wakeup /* yes, more work to do */
647 movl %eax,M_THREAD /* disown thread */
648 movl $CPD_ACTIVE_THREAD,%eax
652 decl TH_MUTEX_COUNT(%ecx)
657 xchgb %cl,M_LOCKED /* unlock the mutex */
662 ENABLE_PREEMPTION(%eax)
668 pushl %edx /* push mutex address */
669 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
671 movl L_ARG0,%edx /* refetch lock pointer */
674 ENTRY(interlock_unlock)
681 ENABLE_PREEMPTION(%eax)
687 ENTRY(_disable_preemption)
689 _DISABLE_PREEMPTION(%eax)
693 ENTRY(_enable_preemption)
696 movl $CPD_PREEMPTION_LEVEL,%eax
704 2: String "_enable_preemption: preemption_level(%d) < 0!"
707 #endif /* MACH_ASSERT */
708 _ENABLE_PREEMPTION(%eax)
712 ENTRY(_enable_preemption_no_check)
715 movl $CPD_PREEMPTION_LEVEL,%eax
722 2: String "_enable_preemption_no_check: preemption_level <= 0!"
725 #endif /* MACH_ASSERT */
726 _ENABLE_PREEMPTION_NO_CHECK(%eax)
731 ENTRY(_mp_disable_preemption)
732 #if MACH_RT && NCPUS > 1
733 _DISABLE_PREEMPTION(%eax)
734 #endif /* MACH_RT && NCPUS > 1*/
737 ENTRY(_mp_enable_preemption)
738 #if MACH_RT && NCPUS > 1
740 movl $CPD_PREEMPTION_LEVEL,%eax
748 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
751 #endif /* MACH_ASSERT */
752 _ENABLE_PREEMPTION(%eax)
753 #endif /* MACH_RT && NCPUS > 1 */
756 ENTRY(_mp_enable_preemption_no_check)
757 #if MACH_RT && NCPUS > 1
759 movl $CPD_PREEMPTION_LEVEL,%eax
766 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
769 #endif /* MACH_ASSERT */
770 _ENABLE_PREEMPTION_NO_CHECK(%eax)
771 #endif /* MACH_RT && NCPUS > 1 */
804 ret /* %eax better not be null ! */