2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/atomic.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/ioctl.h>
34 #include <sys/file_internal.h>
35 #include <sys/proc_internal.h>
36 #include <sys/kernel.h>
37 #include <sys/guarded.h>
39 #include <sys/malloc.h>
40 #include <sys/sysproto.h>
41 #include <sys/pthread_shims.h>
43 #include <mach/mach_types.h>
45 #include <kern/cpu_data.h>
46 #include <kern/mach_param.h>
47 #include <kern/kern_types.h>
48 #include <kern/assert.h>
49 #include <kern/kalloc.h>
50 #include <kern/thread.h>
51 #include <kern/clock.h>
52 #include <kern/ledger.h>
53 #include <kern/policy_internal.h>
54 #include <kern/task.h>
55 #include <kern/telemetry.h>
56 #include <kern/waitq.h>
57 #include <kern/sched_prim.h>
58 #include <kern/turnstile.h>
59 #include <kern/zalloc.h>
60 #include <kern/debug.h>
62 #include <pexpert/pexpert.h>
64 #define XNU_TEST_BITMAP
65 #include <kern/bits.h>
68 #include <sys/ulock.h>
71 * How ulock promotion works:
73 * There’s a requested policy field on every thread called ‘promotions’, which
74 * expresses which ulock promotions are happening to this thread.
75 * The promotion priority saturates until the promotion count goes to 0.
77 * We also track effective promotion qos, which is the qos before clamping.
78 * This value is used for promoting a thread that another thread is waiting on,
79 * so that the lock owner reinflates to the right priority after unclamping.
81 * This also works for non-QoS threads, which can donate base priority to QoS
82 * and non-QoS threads alike.
84 * ulock wait applies a promotion to the owner communicated through
85 * UL_UNFAIR_LOCK as waiters block, and that promotion is saturated as long as
86 * there is still an owner. In ulock wake, if the waker is still the owner,
87 * then it clears its ownership and drops the boost. It does NOT transfer
88 * ownership/priority boost to the new thread. Instead, it selects the
89 * waiting thread with the highest base priority to be woken next, and
90 * relies on that thread to carry the torch for the other waiting threads.
93 static lck_grp_t
*ull_lck_grp
;
95 typedef lck_spin_t ull_lock_t
;
96 #define ull_lock_init(ull) lck_spin_init(&ull->ull_lock, ull_lck_grp, NULL)
97 #define ull_lock_destroy(ull) lck_spin_destroy(&ull->ull_lock, ull_lck_grp)
98 #define ull_lock(ull) lck_spin_lock_grp(&ull->ull_lock, ull_lck_grp)
99 #define ull_unlock(ull) lck_spin_unlock(&ull->ull_lock)
100 #define ull_assert_owned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_OWNED)
101 #define ull_assert_notwned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_NOTOWNED)
103 #define ULOCK_TO_EVENT(ull) ((event_t)ull)
104 #define EVENT_TO_ULOCK(event) ((ull_t *)event)
114 struct __attribute__((packed
)) {
115 user_addr_t ulk_addr
;
118 struct __attribute__((packed
)) {
123 ulk_type ulk_key_type
;
126 #define ULK_UADDR_LEN (sizeof(user_addr_t) + sizeof(pid_t))
127 #define ULK_XPROC_LEN (sizeof(uint64_t) + sizeof(uint64_t))
130 ull_key_match(ulk_t
*a
, ulk_t
*b
)
132 if (a
->ulk_key_type
!= b
->ulk_key_type
) {
136 if (a
->ulk_key_type
== ULK_UADDR
) {
137 return (a
->ulk_pid
== b
->ulk_pid
) &&
138 (a
->ulk_addr
== b
->ulk_addr
);
141 assert(a
->ulk_key_type
== ULK_XPROC
);
142 return (a
->ulk_object
== b
->ulk_object
) &&
143 (a
->ulk_offset
== b
->ulk_offset
);
148 * ull_owner is the most recent known value for the owner of this ulock
149 * i.e. it may be out of date WRT the real value in userspace.
151 thread_t ull_owner
; /* holds +1 thread reference */
154 uint ull_bucket_index
;
155 int32_t ull_nwaiters
;
156 int32_t ull_refcount
;
158 struct turnstile
*ull_turnstile
;
159 queue_chain_t ull_hash_link
;
162 extern void ulock_initialize(void);
164 #define ULL_MUST_EXIST 0x0001
165 static void ull_put(ull_t
*);
167 static uint32_t ulock_adaptive_spin_usecs
= 20;
169 SYSCTL_INT(_kern
, OID_AUTO
, ulock_adaptive_spin_usecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
170 &ulock_adaptive_spin_usecs
, 0, "ulock adaptive spin duration");
172 #if DEVELOPMENT || DEBUG
173 static int ull_simulate_copyin_fault
= 0;
178 kprintf("ull\t%p\n", ull
);
179 switch (ull
->ull_key
.ulk_key_type
) {
181 kprintf("ull_key.ulk_key_type\tULK_UADDR\n");
182 kprintf("ull_key.ulk_pid\t%d\n", ull
->ull_key
.ulk_pid
);
183 kprintf("ull_key.ulk_addr\t%p\n", (void *)(ull
->ull_key
.ulk_addr
));
186 kprintf("ull_key.ulk_key_type\tULK_XPROC\n");
187 kprintf("ull_key.ulk_object\t%p\n", (void *)(ull
->ull_key
.ulk_object
));
188 kprintf("ull_key.ulk_offset\t%p\n", (void *)(ull
->ull_key
.ulk_offset
));
191 kprintf("ull_key.ulk_key_type\tUNKNOWN %d\n", ull
->ull_key
.ulk_key_type
);
194 kprintf("ull_nwaiters\t%d\n", ull
->ull_nwaiters
);
195 kprintf("ull_refcount\t%d\n", ull
->ull_refcount
);
196 kprintf("ull_opcode\t%d\n\n", ull
->ull_opcode
);
197 kprintf("ull_owner\t0x%llx\n\n", thread_tid(ull
->ull_owner
));
198 kprintf("ull_turnstile\t%p\n\n", ull
->ull_turnstile
);
202 typedef struct ull_bucket
{
203 queue_head_t ulb_head
;
207 static int ull_hash_buckets
;
208 static ull_bucket_t
*ull_bucket
;
209 static uint32_t ull_nzalloc
= 0;
210 static zone_t ull_zone
;
212 #define ull_bucket_lock(i) lck_spin_lock_grp(&ull_bucket[i].ulb_lock, ull_lck_grp)
213 #define ull_bucket_unlock(i) lck_spin_unlock(&ull_bucket[i].ulb_lock)
215 static __inline__
uint32_t
216 ull_hash_index(const void *key
, size_t length
)
218 uint32_t hash
= os_hash_jenkins(key
, length
);
220 hash
&= (ull_hash_buckets
- 1);
225 #define ULL_INDEX(keyp) ull_hash_index(keyp, keyp->ulk_key_type == ULK_UADDR ? ULK_UADDR_LEN : ULK_XPROC_LEN)
228 ulock_initialize(void)
230 ull_lck_grp
= lck_grp_alloc_init("ulocks", NULL
);
232 assert(thread_max
> 16);
233 /* Size ull_hash_buckets based on thread_max.
234 * Round up to nearest power of 2, then divide by 4
236 ull_hash_buckets
= (1 << (bit_ceiling(thread_max
) - 2));
238 kprintf("%s>thread_max=%d, ull_hash_buckets=%d\n", __FUNCTION__
, thread_max
, ull_hash_buckets
);
239 assert(ull_hash_buckets
>= thread_max
/ 4);
241 ull_bucket
= (ull_bucket_t
*)kalloc(sizeof(ull_bucket_t
) * ull_hash_buckets
);
242 assert(ull_bucket
!= NULL
);
244 for (int i
= 0; i
< ull_hash_buckets
; i
++) {
245 queue_init(&ull_bucket
[i
].ulb_head
);
246 lck_spin_init(&ull_bucket
[i
].ulb_lock
, ull_lck_grp
, NULL
);
249 ull_zone
= zinit(sizeof(ull_t
),
250 thread_max
* sizeof(ull_t
),
253 zone_change(ull_zone
, Z_NOENCRYPT
, TRUE
);
254 zone_change(ull_zone
, Z_CACHING_ENABLED
, TRUE
);
257 #if DEVELOPMENT || DEBUG
258 /* Count the number of hash entries for a given pid.
259 * if pid==0, dump the whole table.
262 ull_hash_dump(pid_t pid
)
266 kprintf("%s>total number of ull_t allocated %d\n", __FUNCTION__
, ull_nzalloc
);
267 kprintf("%s>BEGIN\n", __FUNCTION__
);
269 for (int i
= 0; i
< ull_hash_buckets
; i
++) {
271 if (!queue_empty(&ull_bucket
[i
].ulb_head
)) {
274 kprintf("%s>index %d:\n", __FUNCTION__
, i
);
276 qe_foreach_element(elem
, &ull_bucket
[i
].ulb_head
, ull_hash_link
) {
277 if ((pid
== 0) || ((elem
->ull_key
.ulk_key_type
== ULK_UADDR
) && (pid
== elem
->ull_key
.ulk_pid
))) {
283 ull_bucket_unlock(i
);
286 kprintf("%s>END\n", __FUNCTION__
);
294 ull_alloc(ulk_t
*key
)
296 ull_t
*ull
= (ull_t
*)zalloc(ull_zone
);
299 ull
->ull_refcount
= 1;
301 ull
->ull_bucket_index
= ULL_INDEX(key
);
302 ull
->ull_nwaiters
= 0;
305 ull
->ull_owner
= THREAD_NULL
;
306 ull
->ull_turnstile
= TURNSTILE_NULL
;
317 assert(ull
->ull_owner
== THREAD_NULL
);
318 assert(ull
->ull_turnstile
== TURNSTILE_NULL
);
320 ull_assert_notwned(ull
);
322 ull_lock_destroy(ull
);
324 zfree(ull_zone
, ull
);
327 /* Finds an existing ulock structure (ull_t), or creates a new one.
328 * If MUST_EXIST flag is set, returns NULL instead of creating a new one.
329 * The ulock structure is returned with ull_lock locked
332 ull_get(ulk_t
*key
, uint32_t flags
, ull_t
**unused_ull
)
335 uint i
= ULL_INDEX(key
);
336 ull_t
*new_ull
= (flags
& ULL_MUST_EXIST
) ? NULL
: ull_alloc(key
);
340 qe_foreach_element(elem
, &ull_bucket
[i
].ulb_head
, ull_hash_link
) {
342 if (ull_key_match(&elem
->ull_key
, key
)) {
350 if (flags
& ULL_MUST_EXIST
) {
351 /* Must already exist (called from wake) */
352 ull_bucket_unlock(i
);
353 assert(new_ull
== NULL
);
354 assert(unused_ull
== NULL
);
358 if (new_ull
== NULL
) {
359 /* Alloc above failed */
360 ull_bucket_unlock(i
);
366 enqueue(&ull_bucket
[i
].ulb_head
, &ull
->ull_hash_link
);
367 } else if (!(flags
& ULL_MUST_EXIST
)) {
370 assert(*unused_ull
== NULL
);
371 *unused_ull
= new_ull
;
376 ull_bucket_unlock(i
);
378 return ull
; /* still locked */
382 * Must be called with ull_lock held
387 ull_assert_owned(ull
);
388 int refcount
= --ull
->ull_refcount
;
389 assert(refcount
== 0 ? (ull
->ull_key
.ulk_key_type
== ULK_INVALID
) : 1);
396 ull_bucket_lock(ull
->ull_bucket_index
);
397 remqueue(&ull
->ull_hash_link
);
398 ull_bucket_unlock(ull
->ull_bucket_index
);
403 extern kern_return_t
vm_map_page_info(vm_map_t map
, vm_map_offset_t offset
, vm_page_info_flavor_t flavor
, vm_page_info_t info
, mach_msg_type_number_t
*count
);
404 extern vm_map_t
current_map(void);
405 extern boolean_t
machine_thread_on_core(thread_t thread
);
408 uaddr_findobj(user_addr_t uaddr
, uint64_t *objectp
, uint64_t *offsetp
)
411 vm_page_info_basic_data_t info
;
412 mach_msg_type_number_t count
= VM_PAGE_INFO_BASIC_COUNT
;
413 ret
= vm_map_page_info(current_map(), uaddr
, VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&info
, &count
);
414 if (ret
!= KERN_SUCCESS
) {
418 if (objectp
!= NULL
) {
419 *objectp
= (uint64_t)info
.object_id
;
421 if (offsetp
!= NULL
) {
422 *offsetp
= (uint64_t)info
.offset
;
428 static void ulock_wait_continue(void *, wait_result_t
);
429 static void ulock_wait_cleanup(ull_t
*, thread_t
, thread_t
, int32_t *);
432 wait_result_to_return_code(wait_result_t wr
)
437 case THREAD_AWAKENED
:
439 case THREAD_TIMED_OUT
:
442 case THREAD_INTERRUPTED
:
453 ulock_resolve_owner(uint32_t value
, thread_t
*owner
)
455 mach_port_name_t owner_name
= ulock_owner_value_to_port_name(value
);
457 *owner
= port_name_to_thread(owner_name
,
458 PORT_TO_THREAD_IN_CURRENT_TASK
|
459 PORT_TO_THREAD_NOT_CURRENT_THREAD
);
460 if (*owner
== THREAD_NULL
) {
462 * Translation failed - even though the lock value is up to date,
463 * whatever was stored in the lock wasn't actually a thread port.
465 return owner_name
== MACH_PORT_DEAD
? ESRCH
: EOWNERDEAD
;
471 ulock_wait(struct proc
*p
, struct ulock_wait_args
*args
, int32_t *retval
)
473 uint opcode
= args
->operation
& UL_OPCODE_MASK
;
474 uint flags
= args
->operation
& UL_FLAGS_MASK
;
476 if (flags
& ULF_WAIT_CANCEL_POINT
) {
477 __pthread_testcancel(1);
481 thread_t self
= current_thread();
484 /* involved threads - each variable holds +1 ref if not null */
485 thread_t owner_thread
= THREAD_NULL
;
486 thread_t old_owner
= THREAD_NULL
;
488 ull_t
*unused_ull
= NULL
;
490 if ((flags
& ULF_WAIT_MASK
) != flags
) {
495 bool set_owner
= false;
497 size_t lock_size
= sizeof(uint32_t);
504 case UL_COMPARE_AND_WAIT
:
506 case UL_COMPARE_AND_WAIT64
:
507 lock_size
= sizeof(uint64_t);
509 case UL_COMPARE_AND_WAIT_SHARED
:
512 case UL_COMPARE_AND_WAIT64_SHARED
:
514 lock_size
= sizeof(uint64_t);
523 if ((args
->addr
== 0) || (args
->addr
& (lock_size
- 1))) {
532 ret
= uaddr_findobj(args
->addr
, &object
, &offset
);
537 key
.ulk_key_type
= ULK_XPROC
;
538 key
.ulk_object
= object
;
539 key
.ulk_offset
= offset
;
541 key
.ulk_key_type
= ULK_UADDR
;
542 key
.ulk_pid
= p
->p_pid
;
543 key
.ulk_addr
= args
->addr
;
546 if ((flags
& ULF_WAIT_ADAPTIVE_SPIN
) && set_owner
) {
548 * Attempt the copyin outside of the lock once,
550 * If it doesn't match (which is common), return right away.
552 * If it matches, resolve the current owner, and if it is on core,
553 * spin a bit waiting for the value to change. If the owner isn't on
554 * core, or if the value stays stable, then go on with the regular
560 ret
= copyin_atomic32(args
->addr
, &u32
);
561 if (ret
|| u32
!= args
->value
) {
565 if (owner_thread
== NULL
&& ulock_resolve_owner(u32
, &owner_thread
) != 0) {
569 /* owner_thread may have a +1 starting here */
571 if (!machine_thread_on_core(owner_thread
)) {
575 clock_interval_to_deadline(ulock_adaptive_spin_usecs
,
576 NSEC_PER_USEC
, &end
);
577 } else if (mach_absolute_time() > end
) {
580 if (copyin_atomic32_wait_if_equals(args
->addr
, u32
) != 0) {
586 ull_t
*ull
= ull_get(&key
, 0, &unused_ull
);
595 if (ull
->ull_opcode
== 0) {
596 ull
->ull_opcode
= opcode
;
597 } else if (ull
->ull_opcode
!= opcode
) {
603 * We don't want this copyin to get wedged behind VM operations,
604 * but we have to read the userspace value under the ull lock for correctness.
606 * Until <rdar://problem/24999882> exists,
607 * holding the ull spinlock across copyin forces any
608 * vm_fault we encounter to fail.
611 /* copyin_atomicXX always checks alignment */
613 if (lock_size
== 4) {
615 copy_ret
= copyin_atomic32(args
->addr
, &u32
);
618 copy_ret
= copyin_atomic64(args
->addr
, &value
);
621 #if DEVELOPMENT || DEBUG
622 /* Occasionally simulate copyin finding the user address paged out */
623 if (((ull_simulate_copyin_fault
== p
->p_pid
) || (ull_simulate_copyin_fault
== 1)) && (copy_ret
== 0)) {
624 static _Atomic
int fault_inject
= 0;
625 if (os_atomic_inc_orig(&fault_inject
, relaxed
) % 73 == 0) {
631 /* copyin() will return an error if the access to the user addr would have faulted,
632 * so just return and let the user level code fault it in.
638 if (value
!= args
->value
) {
639 /* Lock value has changed from expected so bail out */
644 if (owner_thread
== THREAD_NULL
) {
645 ret
= ulock_resolve_owner(args
->value
, &owner_thread
);
646 if (ret
== EOWNERDEAD
) {
648 * Translation failed - even though the lock value is up to date,
649 * whatever was stored in the lock wasn't actually a thread port.
653 /* HACK: don't bail on MACH_PORT_DEAD, to avoid blowing up the no-tsd pthread lock */
656 /* owner_thread has a +1 reference */
659 * At this point, I know:
660 * a) owner_thread is definitely the current owner, because I just read the value
661 * b) owner_thread is either:
662 * i) holding the user lock or
663 * ii) has just unlocked the user lock after I looked
664 * and is heading toward the kernel to call ull_wake.
665 * If so, it's going to have to wait for the ull mutex.
667 * Therefore, I can ask the turnstile to promote its priority, and I can rely
668 * on it to come by later to issue the wakeup and lose its promotion.
671 /* Return the +1 ref from the ull_owner field */
672 old_owner
= ull
->ull_owner
;
673 ull
->ull_owner
= THREAD_NULL
;
675 if (owner_thread
!= THREAD_NULL
) {
676 /* The ull_owner field now owns a +1 ref on owner_thread */
677 thread_reference(owner_thread
);
678 ull
->ull_owner
= owner_thread
;
683 uint32_t timeout
= args
->timeout
;
684 uint64_t deadline
= TIMEOUT_WAIT_FOREVER
;
685 wait_interrupt_t interruptible
= THREAD_ABORTSAFE
;
686 struct turnstile
*ts
;
688 ts
= turnstile_prepare((uintptr_t)ull
, &ull
->ull_turnstile
,
689 TURNSTILE_NULL
, TURNSTILE_ULOCK
);
690 thread_set_pending_block_hint(self
, kThreadWaitUserLock
);
692 if (flags
& ULF_WAIT_WORKQ_DATA_CONTENTION
) {
693 interruptible
|= THREAD_WAIT_NOREPORT
;
697 clock_interval_to_deadline(timeout
, NSEC_PER_USEC
, &deadline
);
700 turnstile_update_inheritor(ts
, owner_thread
,
701 (TURNSTILE_DELAYED_UPDATE
| TURNSTILE_INHERITOR_THREAD
));
703 wr
= waitq_assert_wait64(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
704 interruptible
, deadline
);
709 ull_free(unused_ull
);
713 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_NOT_HELD
);
715 if (wr
== THREAD_WAITING
) {
716 uthread_t uthread
= (uthread_t
)get_bsdthread_info(self
);
717 uthread
->uu_save
.uus_ulock_wait_data
.retval
= retval
;
718 uthread
->uu_save
.uus_ulock_wait_data
.flags
= flags
;
719 uthread
->uu_save
.uus_ulock_wait_data
.owner_thread
= owner_thread
;
720 uthread
->uu_save
.uus_ulock_wait_data
.old_owner
= old_owner
;
721 if (set_owner
&& owner_thread
!= THREAD_NULL
) {
722 thread_handoff_parameter(owner_thread
, ulock_wait_continue
, ull
);
724 assert(owner_thread
== THREAD_NULL
);
725 thread_block_parameter(ulock_wait_continue
, ull
);
730 ret
= wait_result_to_return_code(wr
);
733 turnstile_complete((uintptr_t)ull
, &ull
->ull_turnstile
, NULL
, TURNSTILE_ULOCK
);
736 ulock_wait_cleanup(ull
, owner_thread
, old_owner
, retval
);
740 ull_free(unused_ull
);
744 assert(*retval
>= 0);
748 thread_deallocate(owner_thread
);
753 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
761 * Must be called with ull_lock held
764 ulock_wait_cleanup(ull_t
*ull
, thread_t owner_thread
, thread_t old_owner
, int32_t *retval
)
766 ull_assert_owned(ull
);
768 thread_t old_lingering_owner
= THREAD_NULL
;
770 *retval
= --ull
->ull_nwaiters
;
771 if (ull
->ull_nwaiters
== 0) {
773 * If the wait was canceled early, we might need to
774 * clear out the lingering owner reference before
777 old_lingering_owner
= ull
->ull_owner
;
778 ull
->ull_owner
= THREAD_NULL
;
780 memset(&ull
->ull_key
, 0, sizeof ull
->ull_key
);
782 assert(ull
->ull_refcount
> 0);
786 /* Need to be called after dropping the interlock */
789 if (owner_thread
!= THREAD_NULL
) {
790 thread_deallocate(owner_thread
);
793 if (old_owner
!= THREAD_NULL
) {
794 thread_deallocate(old_owner
);
797 if (old_lingering_owner
!= THREAD_NULL
) {
798 thread_deallocate(old_lingering_owner
);
801 assert(*retval
>= 0);
804 __attribute__((noreturn
))
806 ulock_wait_continue(void * parameter
, wait_result_t wr
)
808 thread_t self
= current_thread();
809 uthread_t uthread
= (uthread_t
)get_bsdthread_info(self
);
812 ull_t
*ull
= (ull_t
*)parameter
;
813 int32_t *retval
= uthread
->uu_save
.uus_ulock_wait_data
.retval
;
814 uint flags
= uthread
->uu_save
.uus_ulock_wait_data
.flags
;
815 thread_t owner_thread
= uthread
->uu_save
.uus_ulock_wait_data
.owner_thread
;
816 thread_t old_owner
= uthread
->uu_save
.uus_ulock_wait_data
.old_owner
;
818 ret
= wait_result_to_return_code(wr
);
821 turnstile_complete((uintptr_t)ull
, &ull
->ull_turnstile
, NULL
, TURNSTILE_ULOCK
);
823 ulock_wait_cleanup(ull
, owner_thread
, old_owner
, retval
);
825 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
830 unix_syscall_return(ret
);
834 ulock_wake(struct proc
*p
, struct ulock_wake_args
*args
, __unused
int32_t *retval
)
836 uint opcode
= args
->operation
& UL_OPCODE_MASK
;
837 uint flags
= args
->operation
& UL_FLAGS_MASK
;
841 /* involved threads - each variable holds +1 ref if not null */
842 thread_t wake_thread
= THREAD_NULL
;
844 #if DEVELOPMENT || DEBUG
845 if (opcode
== UL_DEBUG_HASH_DUMP_PID
) {
846 *retval
= ull_hash_dump(p
->p_pid
);
848 } else if (opcode
== UL_DEBUG_HASH_DUMP_ALL
) {
849 *retval
= ull_hash_dump(0);
851 } else if (opcode
== UL_DEBUG_SIMULATE_COPYIN_FAULT
) {
852 ull_simulate_copyin_fault
= (int)(args
->wake_value
);
857 bool set_owner
= false;
864 case UL_COMPARE_AND_WAIT
:
865 case UL_COMPARE_AND_WAIT64
:
867 case UL_COMPARE_AND_WAIT_SHARED
:
868 case UL_COMPARE_AND_WAIT64_SHARED
:
876 if ((flags
& ULF_WAKE_MASK
) != flags
) {
881 if ((flags
& ULF_WAKE_THREAD
) && ((flags
& ULF_WAKE_ALL
) || set_owner
)) {
886 if (args
->addr
== 0) {
895 ret
= uaddr_findobj(args
->addr
, &object
, &offset
);
900 key
.ulk_key_type
= ULK_XPROC
;
901 key
.ulk_object
= object
;
902 key
.ulk_offset
= offset
;
904 key
.ulk_key_type
= ULK_UADDR
;
905 key
.ulk_pid
= p
->p_pid
;
906 key
.ulk_addr
= args
->addr
;
909 if (flags
& ULF_WAKE_THREAD
) {
910 mach_port_name_t wake_thread_name
= (mach_port_name_t
)(args
->wake_value
);
911 wake_thread
= port_name_to_thread(wake_thread_name
,
912 PORT_TO_THREAD_IN_CURRENT_TASK
|
913 PORT_TO_THREAD_NOT_CURRENT_THREAD
);
914 if (wake_thread
== THREAD_NULL
) {
920 ull_t
*ull
= ull_get(&key
, ULL_MUST_EXIST
, NULL
);
921 thread_t new_owner
= THREAD_NULL
;
922 struct turnstile
*ts
= TURNSTILE_NULL
;
923 thread_t cleanup_thread
= THREAD_NULL
;
931 if (opcode
!= ull
->ull_opcode
) {
937 if (ull
->ull_owner
!= current_thread()) {
939 * If the current thread isn't the known owner,
940 * then this wake call was late to the party,
941 * and the kernel already knows who owns the lock.
943 * This current owner already knows the lock is contended
944 * and will redrive wakes, just bail out.
949 assert(ull
->ull_owner
== THREAD_NULL
);
952 ts
= turnstile_prepare((uintptr_t)ull
, &ull
->ull_turnstile
,
953 TURNSTILE_NULL
, TURNSTILE_ULOCK
);
954 assert(ts
!= TURNSTILE_NULL
);
956 if (flags
& ULF_WAKE_THREAD
) {
957 kern_return_t kr
= waitq_wakeup64_thread(&ts
->ts_waitq
,
958 CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
959 wake_thread
, THREAD_AWAKENED
);
960 if (kr
!= KERN_SUCCESS
) {
961 assert(kr
== KERN_NOT_WAITING
);
964 } else if (flags
& ULF_WAKE_ALL
) {
966 turnstile_update_inheritor(ts
, THREAD_NULL
,
967 TURNSTILE_IMMEDIATE_UPDATE
| TURNSTILE_INHERITOR_THREAD
);
969 waitq_wakeup64_all(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
971 } else if (set_owner
) {
973 * The turnstile waitq is priority ordered,
974 * and will wake up the highest priority waiter
975 * and set it as the inheritor for us.
977 new_owner
= waitq_wakeup64_identify(&ts
->ts_waitq
,
978 CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
979 THREAD_AWAKENED
, WAITQ_PROMOTE_ON_WAKE
);
981 waitq_wakeup64_one(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
982 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
986 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_HELD
);
987 cleanup_thread
= ull
->ull_owner
;
988 ull
->ull_owner
= new_owner
;
991 turnstile_complete((uintptr_t)ull
, &ull
->ull_turnstile
, NULL
, TURNSTILE_ULOCK
);
996 if (ts
!= TURNSTILE_NULL
) {
997 /* Need to be called after dropping the interlock */
1001 if (cleanup_thread
!= THREAD_NULL
) {
1002 thread_deallocate(cleanup_thread
);
1006 if (wake_thread
!= THREAD_NULL
) {
1007 thread_deallocate(wake_thread
);
1010 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
1018 kdp_ulock_find_owner(__unused
struct waitq
* waitq
, event64_t event
, thread_waitinfo_t
* waitinfo
)
1020 ull_t
*ull
= EVENT_TO_ULOCK(event
);
1021 assert(kdp_is_in_zone(ull
, "ulocks"));
1023 switch (ull
->ull_opcode
) {
1024 case UL_UNFAIR_LOCK
:
1025 case UL_UNFAIR_LOCK64_SHARED
:
1026 waitinfo
->owner
= thread_tid(ull
->ull_owner
);
1027 waitinfo
->context
= ull
->ull_key
.ulk_addr
;
1029 case UL_COMPARE_AND_WAIT
:
1030 case UL_COMPARE_AND_WAIT64
:
1031 case UL_COMPARE_AND_WAIT_SHARED
:
1032 case UL_COMPARE_AND_WAIT64_SHARED
:
1033 waitinfo
->owner
= 0;
1034 waitinfo
->context
= ull
->ull_key
.ulk_addr
;
1037 panic("%s: Invalid ulock opcode %d addr %p", __FUNCTION__
, ull
->ull_opcode
, (void*)ull
);