2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Locking primitives implementation
64 #include <mach_ldebug.h>
66 #include <kern/locks.h>
67 #include <kern/kalloc.h>
68 #include <kern/misc_protos.h>
69 #include <kern/thread.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/sched_prim.h>
75 #include <kern/debug.h>
78 #include <i386/machine_routines.h> /* machine_timeout_suspended() */
79 #include <machine/machine_cpu.h>
82 #include <sys/kdebug.h>
83 #include <mach/branch_predicates.h>
86 * We need only enough declarations from the BSD-side to be able to
87 * test if our probe is active, and to call __dtrace_probe(). Setting
88 * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
91 #define NEED_DTRACE_DEFS
92 #include <../bsd/sys/lockstat.h>
95 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
96 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
97 #define LCK_RW_LCK_SHARED_CODE 0x102
98 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
99 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
100 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
102 #define LCK_RW_LCK_EX_WRITER_SPIN_CODE 0x106
103 #define LCK_RW_LCK_EX_WRITER_WAIT_CODE 0x107
104 #define LCK_RW_LCK_EX_READER_SPIN_CODE 0x108
105 #define LCK_RW_LCK_EX_READER_WAIT_CODE 0x109
106 #define LCK_RW_LCK_SHARED_SPIN_CODE 0x110
107 #define LCK_RW_LCK_SHARED_WAIT_CODE 0x111
108 #define LCK_RW_LCK_SH_TO_EX_SPIN_CODE 0x112
109 #define LCK_RW_LCK_SH_TO_EX_WAIT_CODE 0x113
112 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
114 unsigned int LcksOpts
=0;
120 * Perform simple lock checks.
122 int uslock_check
= 1;
123 int max_lock_loops
= 100000000;
124 decl_simple_lock_data(extern , printf_lock
)
125 decl_simple_lock_data(extern , panic_lock
)
126 #endif /* USLOCK_DEBUG */
128 extern unsigned int not_in_kdp
;
131 * We often want to know the addresses of the callers
132 * of the various lock routines. However, this information
133 * is only used for debugging and statistics.
136 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
137 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
139 #define OBTAIN_PC(pc) ((pc) = GET_RETURN_PC())
140 #define DECL_PC(pc) pc_t pc;
141 #else /* ANY_LOCK_DEBUG */
145 * Eliminate lint complaints about unused local pc variables.
147 #define OBTAIN_PC(pc) ++pc
149 #define OBTAIN_PC(pc)
151 #endif /* USLOCK_DEBUG */
155 * Portable lock package implementation of usimple_locks.
159 #define USLDBG(stmt) stmt
160 void usld_lock_init(usimple_lock_t
, unsigned short);
161 void usld_lock_pre(usimple_lock_t
, pc_t
);
162 void usld_lock_post(usimple_lock_t
, pc_t
);
163 void usld_unlock(usimple_lock_t
, pc_t
);
164 void usld_lock_try_pre(usimple_lock_t
, pc_t
);
165 void usld_lock_try_post(usimple_lock_t
, pc_t
);
166 int usld_lock_common_checks(usimple_lock_t
, char *);
167 #else /* USLOCK_DEBUG */
169 #endif /* USLOCK_DEBUG */
172 extern int lck_rw_grab_want(lck_rw_t
*lck
);
173 extern int lck_rw_grab_shared(lck_rw_t
*lck
);
174 extern int lck_rw_held_read_or_upgrade(lck_rw_t
*lck
);
178 * Forward definitions
181 void lck_rw_lock_shared_gen(
184 void lck_rw_lock_exclusive_gen(
187 boolean_t
lck_rw_lock_shared_to_exclusive_success(
190 boolean_t
lck_rw_lock_shared_to_exclusive_failure(
192 int prior_lock_state
);
194 void lck_rw_lock_exclusive_to_shared_gen(
196 int prior_lock_state
);
198 lck_rw_type_t
lck_rw_done_gen(
200 int prior_lock_state
);
202 void lck_rw_clear_promotions_x86(thread_t thread
);
205 * Routine: lck_spin_alloc_init
214 if ((lck
= (lck_spin_t
*)kalloc(sizeof(lck_spin_t
))) != 0)
215 lck_spin_init(lck
, grp
, attr
);
221 * Routine: lck_spin_free
228 lck_spin_destroy(lck
, grp
);
229 kfree(lck
, sizeof(lck_spin_t
));
233 * Routine: lck_spin_init
239 __unused lck_attr_t
*attr
)
241 usimple_lock_init((usimple_lock_t
) lck
, 0);
242 lck_grp_reference(grp
);
243 lck_grp_lckcnt_incr(grp
, LCK_TYPE_SPIN
);
247 * Routine: lck_spin_destroy
254 if (lck
->interlock
== LCK_SPIN_TAG_DESTROYED
)
256 lck
->interlock
= LCK_SPIN_TAG_DESTROYED
;
257 lck_grp_lckcnt_decr(grp
, LCK_TYPE_SPIN
);
258 lck_grp_deallocate(grp
);
263 * Routine: lck_spin_lock
269 usimple_lock((usimple_lock_t
) lck
);
273 * Routine: lck_spin_unlock
279 usimple_unlock((usimple_lock_t
) lck
);
284 * Routine: lck_spin_try_lock
290 return((boolean_t
)usimple_lock_try((usimple_lock_t
) lck
));
294 * Routine: kdp_lck_spin_is_acquired
295 * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
296 * Returns: TRUE if lock is acquired.
299 kdp_lck_spin_is_acquired(lck_spin_t
*lck
) {
301 panic("panic: spinlock acquired check done outside of kernel debugger");
303 return (lck
->interlock
!= 0)? TRUE
: FALSE
;
307 * Initialize a usimple_lock.
309 * No change in preemption state.
314 __unused
unsigned short tag
)
316 #ifndef MACHINE_SIMPLE_LOCK
317 USLDBG(usld_lock_init(l
, tag
));
318 hw_lock_init(&l
->interlock
);
320 simple_lock_init((simple_lock_t
)l
,tag
);
324 volatile uint32_t spinlock_owner_cpu
= ~0;
325 volatile usimple_lock_t spinlock_timed_out
;
327 uint32_t spinlock_timeout_NMI(uintptr_t thread_addr
) {
331 for (i
= 0; i
< real_ncpus
; i
++) {
332 if ((uintptr_t)cpu_data_ptr
[i
]->cpu_active_thread
== thread_addr
) {
333 spinlock_owner_cpu
= i
;
334 if ((uint32_t) cpu_number() == i
)
336 cpu_datap(i
)->cpu_NMI_acknowledged
= FALSE
;
337 cpu_NMI_interrupt(i
);
338 deadline
= mach_absolute_time() + (LockTimeOut
* 2);
339 while (mach_absolute_time() < deadline
&& cpu_datap(i
)->cpu_NMI_acknowledged
== FALSE
)
345 return spinlock_owner_cpu
;
349 * Acquire a usimple_lock.
351 * Returns with preemption disabled. Note
352 * that the hw_lock routines are responsible for
353 * maintaining preemption state.
359 #ifndef MACHINE_SIMPLE_LOCK
363 USLDBG(usld_lock_pre(l
, pc
));
365 if(__improbable(hw_lock_to(&l
->interlock
, LockTimeOutTSC
) == 0)) {
366 boolean_t uslock_acquired
= FALSE
;
367 while (machine_timeout_suspended()) {
369 if ((uslock_acquired
= hw_lock_to(&l
->interlock
, LockTimeOutTSC
)))
373 if (uslock_acquired
== FALSE
) {
375 uintptr_t lowner
= (uintptr_t)l
->interlock
.lock_data
;
376 spinlock_timed_out
= l
;
377 lock_cpu
= spinlock_timeout_NMI(lowner
);
378 panic("Spinlock acquisition timed out: lock=%p, lock owner thread=0x%lx, current_thread: %p, lock owner active on CPU 0x%x, current owner: 0x%lx", l
, lowner
, current_thread(), lock_cpu
, (uintptr_t)l
->interlock
.lock_data
);
381 USLDBG(usld_lock_post(l
, pc
));
383 simple_lock((simple_lock_t
)l
);
389 * Release a usimple_lock.
391 * Returns with preemption enabled. Note
392 * that the hw_lock routines are responsible for
393 * maintaining preemption state.
399 #ifndef MACHINE_SIMPLE_LOCK
403 USLDBG(usld_unlock(l
, pc
));
404 hw_lock_unlock(&l
->interlock
);
406 simple_unlock_rwmb((simple_lock_t
)l
);
412 * Conditionally acquire a usimple_lock.
414 * On success, returns with preemption disabled.
415 * On failure, returns with preemption in the same state
416 * as when first invoked. Note that the hw_lock routines
417 * are responsible for maintaining preemption state.
419 * XXX No stats are gathered on a miss; I preserved this
420 * behavior from the original assembly-language code, but
421 * doesn't it make sense to log misses? XXX
427 #ifndef MACHINE_SIMPLE_LOCK
428 unsigned int success
;
432 USLDBG(usld_lock_try_pre(l
, pc
));
433 if ((success
= hw_lock_try(&l
->interlock
))) {
434 USLDBG(usld_lock_try_post(l
, pc
));
438 return(simple_lock_try((simple_lock_t
)l
));
444 * States of a usimple_lock. The default when initializing
445 * a usimple_lock is setting it up for debug checking.
447 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
448 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
449 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
450 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
451 #define USLOCK_CHECKING(l) (uslock_check && \
452 ((l)->debug.state & USLOCK_CHECKED))
455 * Trace activities of a particularly interesting lock.
457 void usl_trace(usimple_lock_t
, int, pc_t
, const char *);
461 * Initialize the debugging information contained
467 __unused
unsigned short tag
)
469 if (l
== USIMPLE_LOCK_NULL
)
470 panic("lock initialization: null lock pointer");
471 l
->lock_type
= USLOCK_TAG
;
472 l
->debug
.state
= uslock_check
? USLOCK_INITIALIZED
: 0;
473 l
->debug
.lock_cpu
= l
->debug
.unlock_cpu
= 0;
474 l
->debug
.lock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
475 l
->debug
.lock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
476 l
->debug
.duration
[0] = l
->debug
.duration
[1] = 0;
477 l
->debug
.unlock_cpu
= l
->debug
.unlock_cpu
= 0;
478 l
->debug
.unlock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
479 l
->debug
.unlock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
484 * These checks apply to all usimple_locks, not just
485 * those with USLOCK_CHECKED turned on.
488 usld_lock_common_checks(
492 if (l
== USIMPLE_LOCK_NULL
)
493 panic("%s: null lock pointer", caller
);
494 if (l
->lock_type
!= USLOCK_TAG
)
495 panic("%s: %p is not a usimple lock, 0x%x", caller
, l
, l
->lock_type
);
496 if (!(l
->debug
.state
& USLOCK_INIT
))
497 panic("%s: %p is not an initialized lock, 0x%x", caller
, l
, l
->debug
.state
);
498 return USLOCK_CHECKING(l
);
503 * Debug checks on a usimple_lock just before attempting
512 char caller
[] = "usimple_lock";
515 if (!usld_lock_common_checks(l
, caller
))
519 * Note that we have a weird case where we are getting a lock when we are]
520 * in the process of putting the system to sleep. We are running with no
521 * current threads, therefore we can't tell if we are trying to retake a lock
522 * we have or someone on the other processor has it. Therefore we just
523 * ignore this test if the locking thread is 0.
526 if ((l
->debug
.state
& USLOCK_TAKEN
) && l
->debug
.lock_thread
&&
527 l
->debug
.lock_thread
== (void *) current_thread()) {
528 printf("%s: lock %p already locked (at %p) by",
529 caller
, l
, l
->debug
.lock_pc
);
530 printf(" current thread %p (new attempt at pc %p)\n",
531 l
->debug
.lock_thread
, pc
);
534 mp_disable_preemption();
535 usl_trace(l
, cpu_number(), pc
, caller
);
536 mp_enable_preemption();
541 * Debug checks on a usimple_lock just after acquiring it.
543 * Pre-emption has been disabled at this point,
544 * so we are safe in using cpu_number.
552 char caller
[] = "successful usimple_lock";
555 if (!usld_lock_common_checks(l
, caller
))
558 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
559 panic("%s: lock %p became uninitialized",
561 if ((l
->debug
.state
& USLOCK_TAKEN
))
562 panic("%s: lock 0x%p became TAKEN by someone else",
565 mycpu
= cpu_number();
566 l
->debug
.lock_thread
= (void *)current_thread();
567 l
->debug
.state
|= USLOCK_TAKEN
;
568 l
->debug
.lock_pc
= pc
;
569 l
->debug
.lock_cpu
= mycpu
;
571 usl_trace(l
, mycpu
, pc
, caller
);
576 * Debug checks on a usimple_lock just before
577 * releasing it. Note that the caller has not
578 * yet released the hardware lock.
580 * Preemption is still disabled, so there's
581 * no problem using cpu_number.
589 char caller
[] = "usimple_unlock";
592 if (!usld_lock_common_checks(l
, caller
))
595 mycpu
= cpu_number();
597 if (!(l
->debug
.state
& USLOCK_TAKEN
))
598 panic("%s: lock 0x%p hasn't been taken",
600 if (l
->debug
.lock_thread
!= (void *) current_thread())
601 panic("%s: unlocking lock 0x%p, owned by thread %p",
602 caller
, l
, l
->debug
.lock_thread
);
603 if (l
->debug
.lock_cpu
!= mycpu
) {
604 printf("%s: unlocking lock 0x%p on cpu 0x%x",
606 printf(" (acquired on cpu 0x%x)\n", l
->debug
.lock_cpu
);
609 usl_trace(l
, mycpu
, pc
, caller
);
611 l
->debug
.unlock_thread
= l
->debug
.lock_thread
;
612 l
->debug
.lock_thread
= INVALID_PC
;
613 l
->debug
.state
&= ~USLOCK_TAKEN
;
614 l
->debug
.unlock_pc
= pc
;
615 l
->debug
.unlock_cpu
= mycpu
;
620 * Debug checks on a usimple_lock just before
621 * attempting to acquire it.
623 * Preemption isn't guaranteed to be disabled.
630 char caller
[] = "usimple_lock_try";
632 if (!usld_lock_common_checks(l
, caller
))
634 mp_disable_preemption();
635 usl_trace(l
, cpu_number(), pc
, caller
);
636 mp_enable_preemption();
641 * Debug checks on a usimple_lock just after
642 * successfully attempting to acquire it.
644 * Preemption has been disabled by the
645 * lock acquisition attempt, so it's safe
654 char caller
[] = "successful usimple_lock_try";
656 if (!usld_lock_common_checks(l
, caller
))
659 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
660 panic("%s: lock 0x%p became uninitialized",
662 if ((l
->debug
.state
& USLOCK_TAKEN
))
663 panic("%s: lock 0x%p became TAKEN by someone else",
666 mycpu
= cpu_number();
667 l
->debug
.lock_thread
= (void *) current_thread();
668 l
->debug
.state
|= USLOCK_TAKEN
;
669 l
->debug
.lock_pc
= pc
;
670 l
->debug
.lock_cpu
= mycpu
;
672 usl_trace(l
, mycpu
, pc
, caller
);
677 * For very special cases, set traced_lock to point to a
678 * specific lock of interest. The result is a series of
679 * XPRs showing lock operations on that lock. The lock_seq
680 * value is used to show the order of those operations.
682 usimple_lock_t traced_lock
;
683 unsigned int lock_seq
;
690 const char * op_name
)
692 if (traced_lock
== l
) {
694 "seq %d, cpu %d, %s @ %x\n",
695 (uintptr_t) lock_seq
, (uintptr_t) mycpu
,
696 (uintptr_t) op_name
, (uintptr_t) pc
, 0);
702 #endif /* USLOCK_DEBUG */
705 * Routine: lck_rw_alloc_init
713 if ((lck
= (lck_rw_t
*)kalloc(sizeof(lck_rw_t
))) != 0) {
714 bzero(lck
, sizeof(lck_rw_t
));
715 lck_rw_init(lck
, grp
, attr
);
722 * Routine: lck_rw_free
728 lck_rw_destroy(lck
, grp
);
729 kfree(lck
, sizeof(lck_rw_t
));
733 * Routine: lck_rw_init
741 lck_attr_t
*lck_attr
= (attr
!= LCK_ATTR_NULL
) ?
742 attr
: &LockDefaultLckAttr
;
744 hw_lock_byte_init(&lck
->lck_rw_interlock
);
745 lck
->lck_rw_want_write
= FALSE
;
746 lck
->lck_rw_want_upgrade
= FALSE
;
747 lck
->lck_rw_shared_count
= 0;
748 lck
->lck_rw_can_sleep
= TRUE
;
749 lck
->lck_r_waiting
= lck
->lck_w_waiting
= 0;
751 lck
->lck_rw_priv_excl
= ((lck_attr
->lck_attr_val
&
752 LCK_ATTR_RW_SHARED_PRIORITY
) == 0);
754 lck_grp_reference(grp
);
755 lck_grp_lckcnt_incr(grp
, LCK_TYPE_RW
);
759 * Routine: lck_rw_destroy
766 if (lck
->lck_rw_tag
== LCK_RW_TAG_DESTROYED
)
769 lck_rw_assert(lck
, LCK_RW_ASSERT_NOTHELD
);
771 lck
->lck_rw_tag
= LCK_RW_TAG_DESTROYED
;
772 lck_grp_lckcnt_decr(grp
, LCK_TYPE_RW
);
773 lck_grp_deallocate(grp
);
778 * Sleep locks. These use the same data structure and algorithm
779 * as the spin locks, but the process sleeps while it is waiting
780 * for the lock. These work on uniprocessor systems.
783 #define DECREMENTER_TIMEOUT 1000000
785 #define RW_LOCK_READER_EVENT(x) \
786 ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag))))
788 #define RW_LOCK_WRITER_EVENT(x) \
789 ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8))))
792 * We disable interrupts while holding the RW interlock to prevent an
793 * interrupt from exacerbating hold time.
794 * Hence, local helper functions lck_interlock_lock()/lck_interlock_unlock().
797 lck_interlock_lock(lck_rw_t
*lck
)
801 istate
= ml_set_interrupts_enabled(FALSE
);
802 hw_lock_byte_lock(&lck
->lck_rw_interlock
);
808 lck_interlock_unlock(lck_rw_t
*lck
, boolean_t istate
)
810 hw_lock_byte_unlock(&lck
->lck_rw_interlock
);
811 ml_set_interrupts_enabled(istate
);
815 * This inline is used when busy-waiting for an rw lock.
816 * If interrupts were disabled when the lock primitive was called,
817 * we poll the IPI handler for pending tlb flushes.
818 * XXX This is a hack to avoid deadlocking on the pmap_system_lock.
821 lck_rw_lock_pause(boolean_t interrupts_enabled
)
823 if (!interrupts_enabled
)
824 handle_pending_TLB_flushes();
830 * compute the deadline to spin against when
831 * waiting for a change of state on a lck_rw_t
833 static inline uint64_t
834 lck_rw_deadline_for_spin(lck_rw_t
*lck
)
836 if (lck
->lck_rw_can_sleep
) {
837 if (lck
->lck_r_waiting
|| lck
->lck_w_waiting
|| lck
->lck_rw_shared_count
> machine_info
.max_cpus
) {
839 * there are already threads waiting on this lock... this
840 * implies that they have spun beyond their deadlines waiting for
841 * the desired state to show up so we will not bother spinning at this time...
843 * the current number of threads sharing this lock exceeds our capacity to run them
844 * concurrently and since all states we're going to spin for require the rw_shared_count
845 * to be at 0, we'll not bother spinning since the latency for this to happen is
848 return (mach_absolute_time());
850 return (mach_absolute_time() + MutexSpin
);
852 return (mach_absolute_time() + (100000LL * 1000000000LL));
857 * Routine: lck_rw_lock_exclusive
860 lck_rw_lock_exclusive_gen(
863 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(lck
);
864 uint64_t deadline
= 0;
868 wait_result_t res
= 0;
869 boolean_t istate
= -1;
872 boolean_t dtrace_ls_initialized
= FALSE
;
873 boolean_t dtrace_rwl_excl_spin
, dtrace_rwl_excl_block
, dtrace_ls_enabled
= FALSE
;
874 uint64_t wait_interval
= 0;
875 int readers_at_sleep
= 0;
879 * Try to acquire the lck_rw_want_write bit.
881 while ( !lck_rw_grab_want(lck
)) {
884 if (dtrace_ls_initialized
== FALSE
) {
885 dtrace_ls_initialized
= TRUE
;
886 dtrace_rwl_excl_spin
= (lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_SPIN
] != 0);
887 dtrace_rwl_excl_block
= (lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_BLOCK
] != 0);
888 dtrace_ls_enabled
= dtrace_rwl_excl_spin
|| dtrace_rwl_excl_block
;
889 if (dtrace_ls_enabled
) {
891 * Either sleeping or spinning is happening,
892 * start a timing of our delay interval now.
894 readers_at_sleep
= lck
->lck_rw_shared_count
;
895 wait_interval
= mach_absolute_time();
900 istate
= ml_get_interrupts_enabled();
902 deadline
= lck_rw_deadline_for_spin(lck
);
904 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_WRITER_SPIN_CODE
) | DBG_FUNC_START
, trace_lck
, 0, 0, 0, 0);
906 while (((gotlock
= lck_rw_grab_want(lck
)) == 0) && mach_absolute_time() < deadline
)
907 lck_rw_lock_pause(istate
);
909 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_WRITER_SPIN_CODE
) | DBG_FUNC_END
, trace_lck
, 0, 0, gotlock
, 0);
914 * if we get here, the deadline has expired w/o us
915 * being able to grab the lock exclusively
916 * check to see if we're allowed to do a thread_block
918 if (lck
->lck_rw_can_sleep
) {
920 istate
= lck_interlock_lock(lck
);
922 if (lck
->lck_rw_want_write
) {
924 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_WRITER_WAIT_CODE
) | DBG_FUNC_START
, trace_lck
, 0, 0, 0, 0);
926 lck
->lck_w_waiting
= TRUE
;
928 res
= assert_wait(RW_LOCK_WRITER_EVENT(lck
), THREAD_UNINT
);
929 lck_interlock_unlock(lck
, istate
);
931 if (res
== THREAD_WAITING
) {
932 res
= thread_block(THREAD_CONTINUE_NULL
);
935 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_WRITER_WAIT_CODE
) | DBG_FUNC_END
, trace_lck
, res
, slept
, 0, 0);
937 lck
->lck_rw_want_write
= TRUE
;
938 lck_interlock_unlock(lck
, istate
);
944 * Wait for readers (and upgrades) to finish...
945 * the test for these conditions must be done simultaneously with
946 * a check of the interlock not being held since
947 * the rw_shared_count will drop to 0 first and then want_upgrade
948 * will be set to 1 in the shared_to_exclusive scenario... those
949 * adjustments are done behind the interlock and represent an
950 * atomic change in state and must be considered as such
951 * however, once we see the read count at 0, the want_upgrade not set
952 * and the interlock not held, we are safe to proceed
954 while (lck_rw_held_read_or_upgrade(lck
)) {
958 * Either sleeping or spinning is happening, start
959 * a timing of our delay interval now. If we set it
960 * to -1 we don't have accurate data so we cannot later
961 * decide to record a dtrace spin or sleep event.
963 if (dtrace_ls_initialized
== FALSE
) {
964 dtrace_ls_initialized
= TRUE
;
965 dtrace_rwl_excl_spin
= (lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_SPIN
] != 0);
966 dtrace_rwl_excl_block
= (lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_BLOCK
] != 0);
967 dtrace_ls_enabled
= dtrace_rwl_excl_spin
|| dtrace_rwl_excl_block
;
968 if (dtrace_ls_enabled
) {
970 * Either sleeping or spinning is happening,
971 * start a timing of our delay interval now.
973 readers_at_sleep
= lck
->lck_rw_shared_count
;
974 wait_interval
= mach_absolute_time();
979 istate
= ml_get_interrupts_enabled();
981 deadline
= lck_rw_deadline_for_spin(lck
);
983 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_READER_SPIN_CODE
) | DBG_FUNC_START
, trace_lck
, 0, 0, 0, 0);
985 while ((lockheld
= lck_rw_held_read_or_upgrade(lck
)) && mach_absolute_time() < deadline
)
986 lck_rw_lock_pause(istate
);
988 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_READER_SPIN_CODE
) | DBG_FUNC_END
, trace_lck
, 0, 0, lockheld
, 0);
993 * if we get here, the deadline has expired w/o us
994 * being able to grab the lock exclusively
995 * check to see if we're allowed to do a thread_block
997 if (lck
->lck_rw_can_sleep
) {
999 istate
= lck_interlock_lock(lck
);
1001 if (lck
->lck_rw_shared_count
!= 0 || lck
->lck_rw_want_upgrade
) {
1002 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_READER_WAIT_CODE
) | DBG_FUNC_START
, trace_lck
, 0, 0, 0, 0);
1004 lck
->lck_w_waiting
= TRUE
;
1006 res
= assert_wait(RW_LOCK_WRITER_EVENT(lck
), THREAD_UNINT
);
1007 lck_interlock_unlock(lck
, istate
);
1009 if (res
== THREAD_WAITING
) {
1010 res
= thread_block(THREAD_CONTINUE_NULL
);
1013 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_READER_WAIT_CODE
) | DBG_FUNC_END
, trace_lck
, res
, slept
, 0, 0);
1015 lck_interlock_unlock(lck
, istate
);
1017 * must own the lock now, since we checked for
1018 * readers or upgrade owner behind the interlock
1019 * no need for a call to 'lck_rw_held_read_or_upgrade'
1028 * Decide what latencies we suffered that are Dtrace events.
1029 * If we have set wait_interval, then we either spun or slept.
1030 * At least we get out from under the interlock before we record
1031 * which is the best we can do here to minimize the impact
1033 * If we have set wait_interval to -1, then dtrace was not enabled when we
1034 * started sleeping/spinning so we don't record this event.
1036 if (dtrace_ls_enabled
== TRUE
) {
1038 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN
, lck
,
1039 mach_absolute_time() - wait_interval
, 1);
1042 * For the blocking case, we also record if when we blocked
1043 * it was held for read or write, and how many readers.
1044 * Notice that above we recorded this before we dropped
1045 * the interlock so the count is accurate.
1047 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK
, lck
,
1048 mach_absolute_time() - wait_interval
, 1,
1049 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1052 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE
, lck
, 1);
1058 * Routine: lck_rw_done_gen
1060 * called from the assembly language wrapper...
1061 * prior_lock_state is the value in the 1st
1062 * word of the lock at the time of a successful
1063 * atomic compare and exchange with the new value...
1064 * it represents the state of the lock before we
1065 * decremented the rw_shared_count or cleared either
1066 * rw_want_upgrade or rw_want_write and
1067 * the lck_x_waiting bits... since the wrapper
1068 * routine has already changed the state atomically,
1069 * we just need to decide if we should
1070 * wake up anyone and what value to return... we do
1071 * this by examining the state of the lock before
1077 int prior_lock_state
)
1080 lck_rw_type_t lock_type
;
1082 uint32_t rwlock_count
;
1085 * prior_lock state is a snapshot of the 1st word of the
1086 * lock in question... we'll fake up a pointer to it
1087 * and carefully not access anything beyond whats defined
1088 * in the first word of a lck_rw_t
1090 fake_lck
= (lck_rw_t
*)&prior_lock_state
;
1092 if (fake_lck
->lck_rw_shared_count
<= 1) {
1093 if (fake_lck
->lck_w_waiting
)
1094 thread_wakeup(RW_LOCK_WRITER_EVENT(lck
));
1096 if (!(fake_lck
->lck_rw_priv_excl
&& fake_lck
->lck_w_waiting
) && fake_lck
->lck_r_waiting
)
1097 thread_wakeup(RW_LOCK_READER_EVENT(lck
));
1099 if (fake_lck
->lck_rw_shared_count
)
1100 lock_type
= LCK_RW_TYPE_SHARED
;
1102 lock_type
= LCK_RW_TYPE_EXCLUSIVE
;
1104 /* Check if dropping the lock means that we need to unpromote */
1105 thread
= current_thread();
1106 rwlock_count
= thread
->rwlock_count
--;
1108 if (rwlock_count
== 0) {
1109 panic("rw lock count underflow for thread %p", thread
);
1112 if ((rwlock_count
== 1 /* field now 0 */) && (thread
->sched_flags
& TH_SFLAG_RW_PROMOTED
)) {
1113 /* sched_flags checked without lock, but will be rechecked while clearing */
1114 lck_rw_clear_promotion(thread
);
1118 LOCKSTAT_RECORD(LS_LCK_RW_DONE_RELEASE
, lck
, lock_type
== LCK_RW_TYPE_SHARED
? 0 : 1);
1126 * Routine: lck_rw_unlock
1131 lck_rw_type_t lck_rw_type
)
1133 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
1134 lck_rw_unlock_shared(lck
);
1135 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
1136 lck_rw_unlock_exclusive(lck
);
1138 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type
);
1143 * Routine: lck_rw_unlock_shared
1146 lck_rw_unlock_shared(
1151 ret
= lck_rw_done(lck
);
1153 if (ret
!= LCK_RW_TYPE_SHARED
)
1154 panic("lck_rw_unlock(): lock held in mode: %d\n", ret
);
1159 * Routine: lck_rw_unlock_exclusive
1162 lck_rw_unlock_exclusive(
1167 ret
= lck_rw_done(lck
);
1169 if (ret
!= LCK_RW_TYPE_EXCLUSIVE
)
1170 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret
);
1175 * Routine: lck_rw_lock
1180 lck_rw_type_t lck_rw_type
)
1182 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
1183 lck_rw_lock_shared(lck
);
1184 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
1185 lck_rw_lock_exclusive(lck
);
1187 panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type
);
1192 * Routine: lck_rw_lock_shared_gen
1194 * assembly fast path code has determined that this lock
1195 * is held exclusively... this is where we spin/block
1196 * until we can acquire the lock in the shared mode
1199 lck_rw_lock_shared_gen(
1202 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(lck
);
1203 uint64_t deadline
= 0;
1206 wait_result_t res
= 0;
1207 boolean_t istate
= -1;
1210 uint64_t wait_interval
= 0;
1211 int readers_at_sleep
= 0;
1212 boolean_t dtrace_ls_initialized
= FALSE
;
1213 boolean_t dtrace_rwl_shared_spin
, dtrace_rwl_shared_block
, dtrace_ls_enabled
= FALSE
;
1216 while ( !lck_rw_grab_shared(lck
)) {
1219 if (dtrace_ls_initialized
== FALSE
) {
1220 dtrace_ls_initialized
= TRUE
;
1221 dtrace_rwl_shared_spin
= (lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_SPIN
] != 0);
1222 dtrace_rwl_shared_block
= (lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_BLOCK
] != 0);
1223 dtrace_ls_enabled
= dtrace_rwl_shared_spin
|| dtrace_rwl_shared_block
;
1224 if (dtrace_ls_enabled
) {
1226 * Either sleeping or spinning is happening,
1227 * start a timing of our delay interval now.
1229 readers_at_sleep
= lck
->lck_rw_shared_count
;
1230 wait_interval
= mach_absolute_time();
1235 istate
= ml_get_interrupts_enabled();
1237 deadline
= lck_rw_deadline_for_spin(lck
);
1239 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_SPIN_CODE
) | DBG_FUNC_START
,
1240 trace_lck
, lck
->lck_rw_want_write
, lck
->lck_rw_want_upgrade
, 0, 0);
1242 while (((gotlock
= lck_rw_grab_shared(lck
)) == 0) && mach_absolute_time() < deadline
)
1243 lck_rw_lock_pause(istate
);
1245 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_SPIN_CODE
) | DBG_FUNC_END
,
1246 trace_lck
, lck
->lck_rw_want_write
, lck
->lck_rw_want_upgrade
, gotlock
, 0);
1251 * if we get here, the deadline has expired w/o us
1252 * being able to grab the lock for read
1253 * check to see if we're allowed to do a thread_block
1255 if (lck
->lck_rw_can_sleep
) {
1257 istate
= lck_interlock_lock(lck
);
1259 if ((lck
->lck_rw_want_write
|| lck
->lck_rw_want_upgrade
) &&
1260 ((lck
->lck_rw_shared_count
== 0) || lck
->lck_rw_priv_excl
)) {
1262 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_WAIT_CODE
) | DBG_FUNC_START
,
1263 trace_lck
, lck
->lck_rw_want_write
, lck
->lck_rw_want_upgrade
, 0, 0);
1265 lck
->lck_r_waiting
= TRUE
;
1267 res
= assert_wait(RW_LOCK_READER_EVENT(lck
), THREAD_UNINT
);
1268 lck_interlock_unlock(lck
, istate
);
1270 if (res
== THREAD_WAITING
) {
1271 res
= thread_block(THREAD_CONTINUE_NULL
);
1274 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_WAIT_CODE
) | DBG_FUNC_END
,
1275 trace_lck
, res
, slept
, 0, 0);
1277 lck
->lck_rw_shared_count
++;
1278 lck_interlock_unlock(lck
, istate
);
1285 if (dtrace_ls_enabled
== TRUE
) {
1287 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN
, lck
, mach_absolute_time() - wait_interval
, 0);
1289 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK
, lck
,
1290 mach_absolute_time() - wait_interval
, 0,
1291 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1294 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE
, lck
, 0);
1300 * Routine: lck_rw_lock_shared_to_exclusive_failure
1302 * assembly fast path code has already dropped our read
1303 * count and determined that someone else owns 'lck_rw_want_upgrade'
1304 * if 'lck_rw_shared_count' == 0, its also already dropped 'lck_w_waiting'
1305 * all we need to do here is determine if a wakeup is needed
1308 lck_rw_lock_shared_to_exclusive_failure(
1310 int prior_lock_state
)
1313 thread_t thread
= current_thread();
1314 uint32_t rwlock_count
;
1316 /* Check if dropping the lock means that we need to unpromote */
1317 rwlock_count
= thread
->rwlock_count
--;
1319 if (rwlock_count
== 0) {
1320 panic("rw lock count underflow for thread %p", thread
);
1323 if ((rwlock_count
== 1 /* field now 0 */) && (thread
->sched_flags
& TH_SFLAG_RW_PROMOTED
)) {
1324 /* sched_flags checked without lock, but will be rechecked while clearing */
1325 lck_rw_clear_promotion(thread
);
1329 * prior_lock state is a snapshot of the 1st word of the
1330 * lock in question... we'll fake up a pointer to it
1331 * and carefully not access anything beyond whats defined
1332 * in the first word of a lck_rw_t
1334 fake_lck
= (lck_rw_t
*)&prior_lock_state
;
1336 if (fake_lck
->lck_w_waiting
&& fake_lck
->lck_rw_shared_count
== 1) {
1338 * Someone else has requested upgrade.
1339 * Since we've released the read lock, wake
1340 * him up if he's blocked waiting
1342 thread_wakeup(RW_LOCK_WRITER_EVENT(lck
));
1344 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_NONE
,
1345 VM_KERNEL_UNSLIDE_OR_PERM(lck
), lck
->lck_rw_shared_count
, lck
->lck_rw_want_upgrade
, 0, 0);
1352 * Routine: lck_rw_lock_shared_to_exclusive_failure
1354 * assembly fast path code has already dropped our read
1355 * count and successfully acquired 'lck_rw_want_upgrade'
1356 * we just need to wait for the rest of the readers to drain
1357 * and then we can return as the exclusive holder of this lock
1360 lck_rw_lock_shared_to_exclusive_success(
1363 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(lck
);
1364 uint64_t deadline
= 0;
1366 int still_shared
= 0;
1368 boolean_t istate
= -1;
1371 uint64_t wait_interval
= 0;
1372 int readers_at_sleep
= 0;
1373 boolean_t dtrace_ls_initialized
= FALSE
;
1374 boolean_t dtrace_rwl_shared_to_excl_spin
, dtrace_rwl_shared_to_excl_block
, dtrace_ls_enabled
= FALSE
;
1377 while (lck
->lck_rw_shared_count
!= 0) {
1380 if (dtrace_ls_initialized
== FALSE
) {
1381 dtrace_ls_initialized
= TRUE
;
1382 dtrace_rwl_shared_to_excl_spin
= (lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
] != 0);
1383 dtrace_rwl_shared_to_excl_block
= (lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
] != 0);
1384 dtrace_ls_enabled
= dtrace_rwl_shared_to_excl_spin
|| dtrace_rwl_shared_to_excl_block
;
1385 if (dtrace_ls_enabled
) {
1387 * Either sleeping or spinning is happening,
1388 * start a timing of our delay interval now.
1390 readers_at_sleep
= lck
->lck_rw_shared_count
;
1391 wait_interval
= mach_absolute_time();
1396 istate
= ml_get_interrupts_enabled();
1398 deadline
= lck_rw_deadline_for_spin(lck
);
1400 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_SPIN_CODE
) | DBG_FUNC_START
,
1401 trace_lck
, lck
->lck_rw_shared_count
, 0, 0, 0);
1403 while ((still_shared
= lck
->lck_rw_shared_count
) && mach_absolute_time() < deadline
)
1404 lck_rw_lock_pause(istate
);
1406 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_SPIN_CODE
) | DBG_FUNC_END
,
1407 trace_lck
, lck
->lck_rw_shared_count
, 0, 0, 0);
1412 * if we get here, the deadline has expired w/o
1413 * the rw_shared_count having drained to 0
1414 * check to see if we're allowed to do a thread_block
1416 if (lck
->lck_rw_can_sleep
) {
1418 istate
= lck_interlock_lock(lck
);
1420 if (lck
->lck_rw_shared_count
!= 0) {
1421 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_WAIT_CODE
) | DBG_FUNC_START
,
1422 trace_lck
, lck
->lck_rw_shared_count
, 0, 0, 0);
1424 lck
->lck_w_waiting
= TRUE
;
1426 res
= assert_wait(RW_LOCK_WRITER_EVENT(lck
), THREAD_UNINT
);
1427 lck_interlock_unlock(lck
, istate
);
1429 if (res
== THREAD_WAITING
) {
1430 res
= thread_block(THREAD_CONTINUE_NULL
);
1433 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_WAIT_CODE
) | DBG_FUNC_END
,
1434 trace_lck
, res
, slept
, 0, 0);
1436 lck_interlock_unlock(lck
, istate
);
1443 * We infer whether we took the sleep/spin path above by checking readers_at_sleep.
1445 if (dtrace_ls_enabled
== TRUE
) {
1447 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, lck
, mach_absolute_time() - wait_interval
, 0);
1449 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, lck
,
1450 mach_absolute_time() - wait_interval
, 1,
1451 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1454 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, lck
, 1);
1461 * Routine: lck_rw_lock_exclusive_to_shared
1463 * assembly fast path has already dropped
1464 * our exclusive state and bumped lck_rw_shared_count
1465 * all we need to do here is determine if anyone
1466 * needs to be awakened.
1469 lck_rw_lock_exclusive_to_shared_gen(
1471 int prior_lock_state
)
1473 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(lck
);
1477 * prior_lock state is a snapshot of the 1st word of the
1478 * lock in question... we'll fake up a pointer to it
1479 * and carefully not access anything beyond whats defined
1480 * in the first word of a lck_rw_t
1482 fake_lck
= (lck_rw_t
*)&prior_lock_state
;
1484 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_TO_SH_CODE
) | DBG_FUNC_START
,
1485 trace_lck
, fake_lck
->lck_rw_want_write
, fake_lck
->lck_rw_want_upgrade
, 0, 0);
1488 * don't wake up anyone waiting to take the lock exclusively
1489 * since we hold a read count... when the read count drops to 0,
1490 * the writers will be woken.
1492 * wake up any waiting readers if we don't have any writers waiting,
1493 * or the lock is NOT marked as rw_priv_excl (writers have privilege)
1495 if (!(fake_lck
->lck_rw_priv_excl
&& fake_lck
->lck_w_waiting
) && fake_lck
->lck_r_waiting
)
1496 thread_wakeup(RW_LOCK_READER_EVENT(lck
));
1498 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_TO_SH_CODE
) | DBG_FUNC_END
,
1499 trace_lck
, lck
->lck_rw_want_write
, lck
->lck_rw_want_upgrade
, lck
->lck_rw_shared_count
, 0);
1502 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, lck
, 0);
1508 * Routine: lck_rw_try_lock
1513 lck_rw_type_t lck_rw_type
)
1515 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
1516 return(lck_rw_try_lock_shared(lck
));
1517 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
1518 return(lck_rw_try_lock_exclusive(lck
));
1520 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type
);
1531 case LCK_RW_ASSERT_SHARED
:
1532 if (lck
->lck_rw_shared_count
!= 0) {
1536 case LCK_RW_ASSERT_EXCLUSIVE
:
1537 if ((lck
->lck_rw_want_write
||
1538 lck
->lck_rw_want_upgrade
) &&
1539 lck
->lck_rw_shared_count
== 0) {
1543 case LCK_RW_ASSERT_HELD
:
1544 if (lck
->lck_rw_want_write
||
1545 lck
->lck_rw_want_upgrade
||
1546 lck
->lck_rw_shared_count
!= 0) {
1550 case LCK_RW_ASSERT_NOTHELD
:
1551 if (!(lck
->lck_rw_want_write
||
1552 lck
->lck_rw_want_upgrade
||
1553 lck
->lck_rw_shared_count
!= 0)) {
1561 panic("rw lock (%p)%s held (mode=%u), first word %08x\n", lck
, (type
== LCK_RW_ASSERT_NOTHELD
? "" : " not"), type
, *(uint32_t *)lck
);
1564 /* On return to userspace, this routine is called if the rwlock_count is somehow imbalanced */
1566 lck_rw_clear_promotions_x86(thread_t thread
)
1569 /* It's fatal to leave a RW lock locked and return to userspace */
1570 panic("%u rw lock(s) held on return to userspace for thread %p", thread
->rwlock_count
, thread
);
1572 /* Paper over the issue */
1573 thread
->rwlock_count
= 0;
1574 lck_rw_clear_promotion(thread
);
1580 * Routine: kdp_lck_rw_lock_is_acquired_exclusive
1581 * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
1584 kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t
*lck
) {
1586 panic("panic: rw lock exclusive check done outside of kernel debugger");
1588 return ((lck
->lck_rw_want_upgrade
|| lck
->lck_rw_want_write
) && (lck
->lck_rw_shared_count
== 0)) ? TRUE
: FALSE
;
1593 extern zone_t lck_mtx_zone
;
1596 * Routine: lck_mtx_alloc_init
1605 if ((lck
= (lck_mtx_t
*)zalloc(lck_mtx_zone
)) != 0)
1606 lck_mtx_init(lck
, grp
, attr
);
1608 if ((lck
= (lck_mtx_t
*)kalloc(sizeof(lck_mtx_t
))) != 0)
1609 lck_mtx_init(lck
, grp
, attr
);
1615 * Routine: lck_mtx_free
1622 lck_mtx_destroy(lck
, grp
);
1624 zfree(lck_mtx_zone
, lck
);
1626 kfree(lck
, sizeof(lck_mtx_t
));
1631 * Routine: lck_mtx_ext_init
1639 bzero((void *)lck
, sizeof(lck_mtx_ext_t
));
1641 if ((attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
1642 lck
->lck_mtx_deb
.type
= MUTEX_TAG
;
1643 lck
->lck_mtx_attr
|= LCK_MTX_ATTR_DEBUG
;
1646 lck
->lck_mtx_grp
= grp
;
1648 if (grp
->lck_grp_attr
& LCK_GRP_ATTR_STAT
)
1649 lck
->lck_mtx_attr
|= LCK_MTX_ATTR_STAT
;
1651 lck
->lck_mtx
.lck_mtx_is_ext
= 1;
1652 lck
->lck_mtx
.lck_mtx_sw
.lck_mtxd
.lck_mtxd_pad32
= 0xFFFFFFFF;
1656 * Routine: lck_mtx_init
1664 lck_mtx_ext_t
*lck_ext
;
1665 lck_attr_t
*lck_attr
;
1667 if (attr
!= LCK_ATTR_NULL
)
1670 lck_attr
= &LockDefaultLckAttr
;
1672 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
1673 if ((lck_ext
= (lck_mtx_ext_t
*)kalloc(sizeof(lck_mtx_ext_t
))) != 0) {
1674 lck_mtx_ext_init(lck_ext
, grp
, lck_attr
);
1675 lck
->lck_mtx_tag
= LCK_MTX_TAG_INDIRECT
;
1676 lck
->lck_mtx_ptr
= lck_ext
;
1679 lck
->lck_mtx_owner
= 0;
1680 lck
->lck_mtx_state
= 0;
1682 lck
->lck_mtx_sw
.lck_mtxd
.lck_mtxd_pad32
= 0xFFFFFFFF;
1683 lck_grp_reference(grp
);
1684 lck_grp_lckcnt_incr(grp
, LCK_TYPE_MTX
);
1688 * Routine: lck_mtx_init_ext
1693 lck_mtx_ext_t
*lck_ext
,
1697 lck_attr_t
*lck_attr
;
1699 if (attr
!= LCK_ATTR_NULL
)
1702 lck_attr
= &LockDefaultLckAttr
;
1704 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
1705 lck_mtx_ext_init(lck_ext
, grp
, lck_attr
);
1706 lck
->lck_mtx_tag
= LCK_MTX_TAG_INDIRECT
;
1707 lck
->lck_mtx_ptr
= lck_ext
;
1709 lck
->lck_mtx_owner
= 0;
1710 lck
->lck_mtx_state
= 0;
1712 lck
->lck_mtx_sw
.lck_mtxd
.lck_mtxd_pad32
= 0xFFFFFFFF;
1714 lck_grp_reference(grp
);
1715 lck_grp_lckcnt_incr(grp
, LCK_TYPE_MTX
);
1719 * Routine: lck_mtx_destroy
1726 boolean_t lck_is_indirect
;
1728 if (lck
->lck_mtx_tag
== LCK_MTX_TAG_DESTROYED
)
1731 lck_mtx_assert(lck
, LCK_MTX_ASSERT_NOTOWNED
);
1733 lck_is_indirect
= (lck
->lck_mtx_tag
== LCK_MTX_TAG_INDIRECT
);
1735 lck_mtx_lock_mark_destroyed(lck
);
1737 if (lck_is_indirect
)
1738 kfree(lck
->lck_mtx_ptr
, sizeof(lck_mtx_ext_t
));
1739 lck_grp_lckcnt_decr(grp
, LCK_TYPE_MTX
);
1740 lck_grp_deallocate(grp
);
1745 #define LCK_MTX_LCK_WAIT_CODE 0x20
1746 #define LCK_MTX_LCK_WAKEUP_CODE 0x21
1747 #define LCK_MTX_LCK_SPIN_CODE 0x22
1748 #define LCK_MTX_LCK_ACQUIRE_CODE 0x23
1749 #define LCK_MTX_LCK_DEMOTE_CODE 0x24
1753 * Routine: lck_mtx_unlock_wakeup_x86
1755 * Invoked on unlock when there is
1756 * contention (i.e. the assembly routine sees that
1757 * that mutex->lck_mtx_waiters != 0 or
1758 * that mutex->lck_mtx_promoted != 0...
1760 * neither the mutex or interlock is held
1763 lck_mtx_unlock_wakeup_x86 (
1765 int prior_lock_state
)
1767 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(mutex
);
1771 * prior_lock state is a snapshot of the 2nd word of the
1772 * lock in question... we'll fake up a lock with the bits
1773 * copied into place and carefully not access anything
1774 * beyond whats defined in the second word of a lck_mtx_t
1776 fake_lck
.lck_mtx_state
= prior_lock_state
;
1778 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAKEUP_CODE
) | DBG_FUNC_START
,
1779 trace_lck
, fake_lck
.lck_mtx_promoted
, fake_lck
.lck_mtx_waiters
, fake_lck
.lck_mtx_pri
, 0);
1781 if (__probable(fake_lck
.lck_mtx_waiters
)) {
1782 if (fake_lck
.lck_mtx_waiters
> 1)
1783 thread_wakeup_one_with_pri(LCK_MTX_EVENT(mutex
), fake_lck
.lck_mtx_pri
);
1785 thread_wakeup_one(LCK_MTX_EVENT(mutex
));
1788 if (__improbable(fake_lck
.lck_mtx_promoted
)) {
1789 thread_t thread
= current_thread();
1792 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_DEMOTE_CODE
) | DBG_FUNC_NONE
,
1793 thread_tid(thread
), thread
->promotions
, thread
->sched_flags
& TH_SFLAG_PROMOTED
, 0, 0);
1795 if (thread
->promotions
> 0) {
1796 spl_t s
= splsched();
1798 thread_lock(thread
);
1800 if (--thread
->promotions
== 0 && (thread
->sched_flags
& TH_SFLAG_PROMOTED
)) {
1802 thread
->sched_flags
&= ~TH_SFLAG_PROMOTED
;
1804 if (thread
->sched_flags
& TH_SFLAG_RW_PROMOTED
) {
1805 /* Thread still has a RW lock promotion */
1806 } else if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) {
1807 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DEMOTE
) | DBG_FUNC_NONE
,
1808 thread
->sched_pri
, DEPRESSPRI
, 0, trace_lck
, 0);
1810 set_sched_pri(thread
, DEPRESSPRI
);
1813 if (thread
->base_pri
< thread
->sched_pri
) {
1814 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DEMOTE
) | DBG_FUNC_NONE
,
1815 thread
->sched_pri
, thread
->base_pri
, 0, trace_lck
, 0);
1817 thread_recompute_sched_pri(thread
, FALSE
);
1821 thread_unlock(thread
);
1825 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAKEUP_CODE
) | DBG_FUNC_END
,
1826 trace_lck
, 0, mutex
->lck_mtx_waiters
, 0, 0);
1831 * Routine: lck_mtx_lock_acquire_x86
1833 * Invoked on acquiring the mutex when there is
1834 * contention (i.e. the assembly routine sees that
1835 * that mutex->lck_mtx_waiters != 0 or
1836 * thread->was_promoted_on_wakeup != 0)...
1838 * mutex is owned... interlock is held... preemption is disabled
1841 lck_mtx_lock_acquire_x86(
1844 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(mutex
);
1849 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_ACQUIRE_CODE
) | DBG_FUNC_START
,
1850 trace_lck
, thread
->was_promoted_on_wakeup
, mutex
->lck_mtx_waiters
, mutex
->lck_mtx_pri
, 0);
1852 if (mutex
->lck_mtx_waiters
)
1853 priority
= mutex
->lck_mtx_pri
;
1857 thread
= (thread_t
)mutex
->lck_mtx_owner
; /* faster then current_thread() */
1859 if (thread
->sched_pri
< priority
|| thread
->was_promoted_on_wakeup
) {
1861 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_PROMOTE
) | DBG_FUNC_NONE
,
1862 thread
->sched_pri
, priority
, thread
->was_promoted_on_wakeup
, trace_lck
, 0);
1865 thread_lock(thread
);
1867 if (thread
->sched_pri
< priority
) {
1868 /* Do not promote past promotion ceiling */
1869 assert(priority
<= MAXPRI_PROMOTE
);
1870 set_sched_pri(thread
, priority
);
1872 if (mutex
->lck_mtx_promoted
== 0) {
1873 mutex
->lck_mtx_promoted
= 1;
1875 thread
->promotions
++;
1876 thread
->sched_flags
|= TH_SFLAG_PROMOTED
;
1878 thread
->was_promoted_on_wakeup
= 0;
1880 thread_unlock(thread
);
1883 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_ACQUIRE_CODE
) | DBG_FUNC_END
,
1884 trace_lck
, 0, mutex
->lck_mtx_waiters
, 0, 0);
1889 lck_mtx_interlock_try_lock(lck_mtx_t
*mutex
, boolean_t
*istate
)
1893 *istate
= ml_set_interrupts_enabled(FALSE
);
1894 retval
= lck_mtx_ilk_try_lock(mutex
);
1897 ml_set_interrupts_enabled(*istate
);
1903 lck_mtx_interlock_unlock(lck_mtx_t
*mutex
, boolean_t istate
)
1905 lck_mtx_ilk_unlock(mutex
);
1906 ml_set_interrupts_enabled(istate
);
1911 * Routine: lck_mtx_lock_spinwait_x86
1913 * Invoked trying to acquire a mutex when there is contention but
1914 * the holder is running on another processor. We spin for up to a maximum
1915 * time waiting for the lock to be released.
1917 * Called with the interlock unlocked.
1918 * returns 0 if mutex acquired
1919 * returns 1 if we spun
1920 * returns 2 if we didn't spin due to the holder not running
1923 lck_mtx_lock_spinwait_x86(
1926 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(mutex
);
1928 uint64_t overall_deadline
;
1929 uint64_t check_owner_deadline
;
1934 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_SPIN_CODE
) | DBG_FUNC_START
,
1935 trace_lck
, VM_KERNEL_UNSLIDE_OR_PERM(mutex
->lck_mtx_owner
), mutex
->lck_mtx_waiters
, 0, 0);
1937 cur_time
= mach_absolute_time();
1938 overall_deadline
= cur_time
+ MutexSpin
;
1939 check_owner_deadline
= cur_time
;
1943 * - mutex is locked, and
1944 * - its locked as a spin lock, and
1945 * - owner is running on another processor, and
1946 * - owner (processor) is not idling, and
1947 * - we haven't spun for long enough.
1950 if (__probable(lck_mtx_lock_grab_mutex(mutex
))) {
1954 cur_time
= mach_absolute_time();
1956 if (cur_time
>= overall_deadline
)
1959 if (cur_time
>= check_owner_deadline
&& mutex
->lck_mtx_owner
) {
1962 if (lck_mtx_interlock_try_lock(mutex
, &istate
)) {
1964 if ((holder
= (thread_t
) mutex
->lck_mtx_owner
) != NULL
) {
1966 if ( !(holder
->machine
.specFlags
& OnProc
) ||
1967 (holder
->state
& TH_IDLE
)) {
1969 lck_mtx_interlock_unlock(mutex
, istate
);
1976 lck_mtx_interlock_unlock(mutex
, istate
);
1978 check_owner_deadline
= cur_time
+ (MutexSpin
/ 4);
1989 * We've already kept a count via overall_deadline of how long we spun.
1990 * If dtrace is active, then we compute backwards to decide how
1993 * Note that we record a different probe id depending on whether
1994 * this is a direct or indirect mutex. This allows us to
1995 * penalize only lock groups that have debug/stats enabled
1996 * with dtrace processing if desired.
1998 if (__probable(mutex
->lck_mtx_is_ext
== 0)) {
1999 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN
, mutex
,
2000 mach_absolute_time() - (overall_deadline
- MutexSpin
));
2002 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN
, mutex
,
2003 mach_absolute_time() - (overall_deadline
- MutexSpin
));
2005 /* The lockstat acquire event is recorded by the assembly code beneath us. */
2008 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_SPIN_CODE
) | DBG_FUNC_END
,
2009 trace_lck
, VM_KERNEL_UNSLIDE_OR_PERM(mutex
->lck_mtx_owner
), mutex
->lck_mtx_waiters
, retval
, 0);
2017 * Routine: lck_mtx_lock_wait_x86
2019 * Invoked in order to wait on contention.
2021 * Called with the interlock locked and
2022 * preemption disabled...
2023 * returns it unlocked and with preemption enabled
2026 lck_mtx_lock_wait_x86 (
2029 __kdebug_only
uintptr_t trace_lck
= VM_KERNEL_UNSLIDE_OR_PERM(mutex
);
2030 thread_t self
= current_thread();
2035 uint64_t sleep_start
= 0;
2037 if (lockstat_probemap
[LS_LCK_MTX_LOCK_BLOCK
] || lockstat_probemap
[LS_LCK_MTX_EXT_LOCK_BLOCK
]) {
2038 sleep_start
= mach_absolute_time();
2041 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAIT_CODE
) | DBG_FUNC_START
,
2042 trace_lck
, VM_KERNEL_UNSLIDE_OR_PERM(mutex
->lck_mtx_owner
), mutex
->lck_mtx_waiters
, mutex
->lck_mtx_pri
, 0);
2044 priority
= self
->sched_pri
;
2046 if (priority
< self
->base_pri
)
2047 priority
= self
->base_pri
;
2048 if (priority
< BASEPRI_DEFAULT
)
2049 priority
= BASEPRI_DEFAULT
;
2051 /* Do not promote past promotion ceiling */
2052 priority
= MIN(priority
, MAXPRI_PROMOTE
);
2054 if (mutex
->lck_mtx_waiters
== 0 || priority
> mutex
->lck_mtx_pri
)
2055 mutex
->lck_mtx_pri
= priority
;
2056 mutex
->lck_mtx_waiters
++;
2058 if ( (holder
= (thread_t
)mutex
->lck_mtx_owner
) &&
2059 holder
->sched_pri
< mutex
->lck_mtx_pri
) {
2061 thread_lock(holder
);
2063 /* holder priority may have been bumped by another thread
2064 * before thread_lock was taken
2066 if (holder
->sched_pri
< mutex
->lck_mtx_pri
) {
2067 KERNEL_DEBUG_CONSTANT(
2068 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_PROMOTE
) | DBG_FUNC_NONE
,
2069 holder
->sched_pri
, priority
, thread_tid(holder
), trace_lck
, 0);
2070 /* Assert that we're not altering the priority of a
2071 * thread above the MAXPRI_PROMOTE band
2073 assert(holder
->sched_pri
< MAXPRI_PROMOTE
);
2074 set_sched_pri(holder
, priority
);
2076 if (mutex
->lck_mtx_promoted
== 0) {
2077 holder
->promotions
++;
2078 holder
->sched_flags
|= TH_SFLAG_PROMOTED
;
2080 mutex
->lck_mtx_promoted
= 1;
2083 thread_unlock(holder
);
2086 assert_wait(LCK_MTX_EVENT(mutex
), THREAD_UNINT
);
2088 lck_mtx_ilk_unlock(mutex
);
2090 thread_block(THREAD_CONTINUE_NULL
);
2092 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAIT_CODE
) | DBG_FUNC_END
,
2093 trace_lck
, VM_KERNEL_UNSLIDE_OR_PERM(mutex
->lck_mtx_owner
), mutex
->lck_mtx_waiters
, mutex
->lck_mtx_pri
, 0);
2097 * Record the Dtrace lockstat probe for blocking, block time
2098 * measured from when we were entered.
2101 if (mutex
->lck_mtx_is_ext
== 0) {
2102 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK
, mutex
,
2103 mach_absolute_time() - sleep_start
);
2105 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK
, mutex
,
2106 mach_absolute_time() - sleep_start
);
2113 * Routine: kdp_lck_mtx_lock_spin_is_acquired
2114 * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
2115 * Returns: TRUE if lock is acquired.
2118 kdp_lck_mtx_lock_spin_is_acquired(lck_mtx_t
*lck
)
2121 panic("panic: kdp_lck_mtx_lock_spin_is_acquired called outside of kernel debugger");
2124 if (lck
->lck_mtx_sw
.lck_mtxd
.lck_mtxd_ilocked
|| lck
->lck_mtx_sw
.lck_mtxd
.lck_mtxd_mlocked
) {