2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1989 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
37 #include <platforms.h>
38 #include <mach_ldebug.h>
40 #include <kern/etap_options.h>
45 * When performance isn't the only concern, it's
46 * nice to build stack frames...
48 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
50 #if BUILD_STACK_FRAMES
53 #define L_ARG0 8(%ebp)
54 #define L_ARG1 12(%ebp)
56 #define SWT_HI -4(%ebp)
57 #define SWT_LO -8(%ebp)
58 #define MISSED -12(%ebp)
60 #else /* BUILD_STACK_FRAMES */
67 #define L_ARG0 4(%esp)
68 #define L_ARG1 8(%esp)
70 #endif /* BUILD_STACK_FRAMES */
74 #define M_LOCKED 1(%edx)
75 #define M_WAITERS 2(%edx)
76 #define M_PROMOTED_PRI 4(%edx)
78 #define M_TYPE 6(%edx)
80 #define M_THREAD 14(%edx)
81 #endif /* MACH_LDEBUG */
83 #include <i386/AT386/mp/mp.h>
85 #define CX(addr,reg) addr(,reg,4)
87 #define CPU_NUMBER(reg)
88 #define CX(addr,reg) addr
89 #endif /* (NCPUS > 1) */
93 * Routines for general lock debugging.
95 #define S_TYPE 4(%edx)
97 #define S_THREAD 12(%edx)
98 #define S_DURATIONH 16(%edx)
99 #define S_DURATIONL 20(%edx)
102 * Checks for expected lock types and calls "panic" on
103 * mismatch. Detects calls to Mutex functions with
104 * type simplelock and vice versa.
106 #define CHECK_MUTEX_TYPE() \
107 cmpl $ MUTEX_TAG,M_TYPE ; \
113 2: String "not a mutex!" ; \
117 #define CHECK_SIMPLE_LOCK_TYPE() \
118 cmpl $ SIMPLE_LOCK_TAG,S_TYPE ; \
124 2: String "not a simple lock!" ; \
129 * If one or more simplelocks are currently held by a thread,
130 * an attempt to acquire a mutex will cause this check to fail
131 * (since a mutex lock may context switch, holding a simplelock
132 * is not a good thing).
134 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
135 #define CHECK_PREEMPTION_LEVEL() \
136 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
137 cmpl $0,%gs:(%eax) ; \
143 2: String "preemption_level != 0!" ; \
147 #define CHECK_PREEMPTION_LEVEL()
150 #define CHECK_NO_SIMPLELOCKS() \
151 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
152 cmpl $0,%gs:(%eax) ; \
158 2: String "simple_locks_held!" ; \
163 * Verifies return to the correct thread in "unlock" situations.
165 #define CHECK_THREAD(thd) \
166 movl $ CPD_ACTIVE_THREAD,%eax ; \
167 movl %gs:(%eax),%ecx ; \
176 2: String "wrong thread!" ; \
180 #define CHECK_MYLOCK(thd) \
181 movl $ CPD_ACTIVE_THREAD,%eax ; \
182 movl %gs:(%eax),%ecx ; \
191 2: String "mylock attempt!" ; \
195 #define METER_SIMPLE_LOCK_LOCK(reg) \
197 call EXT(meter_simple_lock) ; \
200 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
202 call EXT(meter_simple_unlock) ; \
205 #else /* MACH_LDEBUG */
206 #define CHECK_MUTEX_TYPE()
207 #define CHECK_SIMPLE_LOCK_TYPE
208 #define CHECK_THREAD(thd)
209 #define CHECK_PREEMPTION_LEVEL()
210 #define CHECK_NO_SIMPLELOCKS()
211 #define CHECK_MYLOCK(thd)
212 #define METER_SIMPLE_LOCK_LOCK(reg)
213 #define METER_SIMPLE_LOCK_UNLOCK(reg)
214 #endif /* MACH_LDEBUG */
218 * void hw_lock_init(hw_lock_t)
220 * Initialize a hardware lock.
224 movl L_ARG0,%edx /* fetch lock pointer */
226 movb %al,0(%edx) /* clear the lock */
231 * void hw_lock_lock(hw_lock_t)
232 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
234 * Acquire lock, spinning until it becomes available.
235 * XXX: For now, we don't actually implement the timeout.
236 * MACH_RT: also return with preemption disabled.
238 ENTRY2(hw_lock_lock,hw_lock_to)
240 movl L_ARG0,%edx /* fetch lock pointer */
242 1: DISABLE_PREEMPTION(%eax)
244 xchgb 0(%edx),%cl /* try to acquire the HW lock */
245 testb %cl,%cl /* success? */
247 movl $1,%eax /* In case this was a timeout call */
248 EMARF /* if yes, then nothing left to do */
251 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
254 2: testb %cl,0(%edx) /* spin checking lock value in cache */
255 jne 2b /* non-zero means locked, keep spinning */
256 jmp 1b /* zero means unlocked, try to grab it */
259 * void hw_lock_unlock(hw_lock_t)
261 * Unconditionally release lock.
262 * MACH_RT: release preemption level.
264 ENTRY(hw_lock_unlock)
266 movl L_ARG0,%edx /* fetch lock pointer */
268 xchgb 0(%edx),%al /* clear the lock... a mov instruction */
269 /* ...might be cheaper and less paranoid */
270 ENABLE_PREEMPTION(%eax)
275 * unsigned int hw_lock_try(hw_lock_t)
276 * MACH_RT: returns with preemption disabled on success.
280 movl L_ARG0,%edx /* fetch lock pointer */
282 DISABLE_PREEMPTION(%eax)
284 xchgb 0(%edx),%cl /* try to acquire the HW lock */
285 testb %cl,%cl /* success? */
286 jne 1f /* if yes, let the caller know */
288 movl $1,%eax /* success */
292 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
293 xorl %eax,%eax /* ...and return failure */
298 * unsigned int hw_lock_held(hw_lock_t)
299 * MACH_RT: doesn't change preemption state.
300 * N.B. Racy, of course.
304 movl L_ARG0,%edx /* fetch lock pointer */
307 testb %cl,0(%edx) /* check lock value */
308 jne 1f /* non-zero means locked */
309 xorl %eax,%eax /* tell caller: lock wasn't locked */
313 1: movl $1,%eax /* tell caller: lock was locked */
322 ENTRY(_usimple_lock_init)
324 movl L_ARG0,%edx /* fetch lock pointer */
326 movb %al,USL_INTERLOCK(%edx) /* unlock the HW lock */
332 movl L_ARG0,%edx /* fetch lock pointer */
334 CHECK_SIMPLE_LOCK_TYPE()
336 DISABLE_PREEMPTION(%eax)
340 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
341 testb %cl,%cl /* did we succeed? */
345 CHECK_MYLOCK(S_THREAD)
348 #else /* MACH_LDEBUG */
349 jne sl_get_hw /* no, try again */
350 #endif /* MACH_LDEBUG */
355 movl $ CPD_ACTIVE_THREAD,%eax
358 incl CX(EXT(simple_lock_count),%eax)
360 METER_SIMPLE_LOCK_LOCK(%edx)
366 call EXT(lock_stack_push)
369 #endif /* NCPUS == 1 */
370 #endif /* MACH_LDEBUG */
375 ENTRY(_simple_lock_try)
377 movl L_ARG0,%edx /* fetch lock pointer */
379 CHECK_SIMPLE_LOCK_TYPE()
381 DISABLE_PREEMPTION(%eax)
384 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
385 testb %cl,%cl /* did we succeed? */
386 jne 1f /* no, return failure */
391 movl $ CPD_ACTIVE_THREAD,%eax
394 incl CX(EXT(simple_lock_count),%eax)
396 METER_SIMPLE_LOCK_LOCK(%edx)
402 call EXT(lock_stack_push)
405 #endif /* NCPUS == 1 */
406 #endif /* MACH_LDEBUG */
408 movl $1,%eax /* return success */
414 ENABLE_PREEMPTION(%eax)
416 xorl %eax,%eax /* and return failure */
421 ENTRY(_simple_unlock)
423 movl L_ARG0,%edx /* fetch lock pointer */
425 CHECK_SIMPLE_LOCK_TYPE()
426 CHECK_THREAD(S_THREAD)
430 movl %eax,S_THREAD /* disown thread */
431 MP_DISABLE_PREEMPTION(%eax)
433 decl CX(EXT(simple_lock_count),%eax)
434 MP_ENABLE_PREEMPTION(%eax)
436 METER_SIMPLE_LOCK_UNLOCK(%edx)
442 call EXT(lock_stack_pop)
445 #endif /* NCPUS == 1 */
446 #endif /* MACH_LDEBUG */
449 xchgb USL_INTERLOCK(%edx),%cl /* unlock the HW lock */
451 ENABLE_PREEMPTION(%eax)
461 movl L_ARG0,%edx /* fetch lock pointer */
463 movb %al,M_ILK /* clear interlock */
464 movb %al,M_LOCKED /* clear locked flag */
465 movw %ax,M_WAITERS /* init waiter count */
466 movw %ax,M_PROMOTED_PRI
469 movl $ MUTEX_TAG,M_TYPE /* set lock type */
470 movl %eax,M_PC /* init caller pc */
471 movl %eax,M_THREAD /* and owning thread */
474 movl L_ARG1,%ecx /* fetch event type */
475 pushl %ecx /* push event type */
476 pushl %edx /* push mutex address */
477 call EXT(etap_mutex_init) /* init ETAP data */
479 #endif /* ETAP_LOCK_TRACE */
484 ENTRY2(mutex_lock,_mutex_lock)
488 subl $12,%esp /* make room for locals */
489 movl $0,SWT_HI /* set wait time to zero (HI) */
490 movl $0,SWT_LO /* set wait time to zero (LO) */
491 movl $0,MISSED /* clear local miss marker */
492 #endif /* ETAP_LOCK_TRACE */
494 movl L_ARG0,%edx /* fetch lock pointer */
497 CHECK_NO_SIMPLELOCKS()
498 CHECK_PREEMPTION_LEVEL()
501 DISABLE_PREEMPTION(%eax)
506 testb %cl,%cl /* did we succeed? */
507 jne ml_get_hw /* no, try again */
510 xchgb %cl,M_LOCKED /* try to set locked flag */
511 testb %cl,%cl /* is the mutex locked? */
512 jne ml_fail /* yes, we lose */
515 call EXT(mutex_lock_acquire)
522 movl $ CPD_ACTIVE_THREAD,%eax
527 incl TH_MUTEX_COUNT(%ecx)
534 ENABLE_PREEMPTION(%eax)
537 movl L_PC,%eax /* fetch pc */
538 pushl SWT_LO /* push wait time (low) */
539 pushl SWT_HI /* push wait time (high) */
540 pushl %eax /* push pc */
541 pushl %edx /* push mutex address */
542 call EXT(etap_mutex_hold) /* collect hold timestamp */
543 addl $16+12,%esp /* clean up stack, adjusting for locals */
544 #endif /* ETAP_LOCK_TRACE */
551 cmp $0,MISSED /* did we already take a wait timestamp? */
552 jne ml_block /* yup. carry-on */
553 pushl %edx /* push mutex address */
554 call EXT(etap_mutex_miss) /* get wait timestamp */
555 movl %eax,SWT_HI /* set wait time (high word) */
556 movl %edx,SWT_LO /* set wait time (low word) */
557 popl %edx /* clean up stack */
558 movl $1,MISSED /* mark wait timestamp as taken */
559 #endif /* ETAP_LOCK_TRACE */
562 CHECK_MYLOCK(M_THREAD)
564 pushl %eax /* no promotion here yet */
565 pushl %edx /* push mutex address */
566 call EXT(mutex_lock_wait) /* wait for the lock */
568 movl L_ARG0,%edx /* refetch lock pointer */
569 jmp ml_retry /* and try again */
571 ENTRY2(mutex_try,_mutex_try)
575 subl $8,%esp /* make room for locals */
576 movl $0,SWT_HI /* set wait time to zero (HI) */
577 movl $0,SWT_LO /* set wait time to zero (LO) */
578 #endif /* ETAP_LOCK_TRACE */
580 movl L_ARG0,%edx /* fetch lock pointer */
583 CHECK_NO_SIMPLELOCKS()
585 DISABLE_PREEMPTION(%eax)
599 call EXT(mutex_lock_acquire)
606 movl $ CPD_ACTIVE_THREAD,%ecx
611 incl TH_MUTEX_COUNT(%ecx)
618 ENABLE_PREEMPTION(%eax)
621 movl L_PC,%eax /* fetch pc */
622 pushl SWT_LO /* push wait time (low) */
623 pushl SWT_HI /* push wait time (high) */
624 pushl %eax /* push pc */
625 pushl %edx /* push mutex address */
626 call EXT(etap_mutex_hold) /* get start hold timestamp */
627 addl $16,%esp /* clean up stack, adjusting for locals */
628 #endif /* ETAP_LOCK_TRACE */
632 #if MACH_LDEBUG || ETAP_LOCK_TRACE
634 addl $8,%esp /* pop stack claimed on entry */
645 movl $ CPD_ACTIVE_THREAD,%ecx
650 incl TH_MUTEX_COUNT(%ecx)
657 ENABLE_PREEMPTION(%eax)
660 movl L_PC,%eax /* fetch pc */
661 pushl SWT_LO /* push wait time (low) */
662 pushl SWT_HI /* push wait time (high) */
663 pushl %eax /* push pc */
664 pushl %edx /* push mutex address */
665 call EXT(etap_mutex_hold) /* get start hold timestamp */
666 addl $16,%esp /* clean up stack, adjusting for locals */
667 #endif /* ETAP_LOCK_TRACE */
671 #if MACH_LDEBUG || ETAP_LOCK_TRACE
673 addl $8,%esp /* pop stack claimed on entry */
682 movl L_ARG0,%edx /* fetch lock pointer */
685 pushl %edx /* push mutex address */
686 call EXT(etap_mutex_unlock) /* collect ETAP data */
687 popl %edx /* restore mutex address */
688 #endif /* ETAP_LOCK_TRACE */
691 CHECK_THREAD(M_THREAD)
693 DISABLE_PREEMPTION(%eax)
698 testb %cl,%cl /* did we succeed? */
699 jne mu_get_hw /* no, try again */
701 cmpw $0,M_WAITERS /* are there any waiters? */
702 jne mu_wakeup /* yes, more work to do */
707 movl %eax,M_THREAD /* disown thread */
708 movl $ CPD_ACTIVE_THREAD,%eax
712 decl TH_MUTEX_COUNT(%ecx)
717 xchgb %cl,M_LOCKED /* unlock the mutex */
722 ENABLE_PREEMPTION(%eax)
729 pushl %eax /* no promotion here yet */
730 pushl %edx /* push mutex address */
731 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
733 movl L_ARG0,%edx /* refetch lock pointer */
736 ENTRY(interlock_unlock)
743 ENABLE_PREEMPTION(%eax)
749 ENTRY(_disable_preemption)
751 _DISABLE_PREEMPTION(%eax)
755 ENTRY(_enable_preemption)
758 movl $ CPD_PREEMPTION_LEVEL,%eax
766 2: String "_enable_preemption: preemption_level(%d) < 0!"
769 #endif /* MACH_ASSERT */
770 _ENABLE_PREEMPTION(%eax)
774 ENTRY(_enable_preemption_no_check)
777 movl $ CPD_PREEMPTION_LEVEL,%eax
784 2: String "_enable_preemption_no_check: preemption_level <= 0!"
787 #endif /* MACH_ASSERT */
788 _ENABLE_PREEMPTION_NO_CHECK(%eax)
793 ENTRY(_mp_disable_preemption)
794 #if MACH_RT && NCPUS > 1
795 _DISABLE_PREEMPTION(%eax)
796 #endif /* MACH_RT && NCPUS > 1*/
799 ENTRY(_mp_enable_preemption)
800 #if MACH_RT && NCPUS > 1
802 movl $ CPD_PREEMPTION_LEVEL,%eax
810 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
813 #endif /* MACH_ASSERT */
814 _ENABLE_PREEMPTION(%eax)
815 #endif /* MACH_RT && NCPUS > 1 */
818 ENTRY(_mp_enable_preemption_no_check)
819 #if MACH_RT && NCPUS > 1
821 movl $ CPD_PREEMPTION_LEVEL,%eax
828 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
831 #endif /* MACH_ASSERT */
832 _ENABLE_PREEMPTION_NO_CHECK(%eax)
833 #endif /* MACH_RT && NCPUS > 1 */
866 ret /* %eax better not be null ! */