2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/ioctl.h>
32 #include <sys/file_internal.h>
33 #include <sys/proc_internal.h>
34 #include <sys/kernel.h>
35 #include <sys/guarded.h>
37 #include <sys/malloc.h>
38 #include <sys/sysproto.h>
39 #include <sys/pthread_shims.h>
41 #include <mach/mach_types.h>
43 #include <kern/cpu_data.h>
44 #include <kern/mach_param.h>
45 #include <kern/kern_types.h>
46 #include <kern/assert.h>
47 #include <kern/kalloc.h>
48 #include <kern/thread.h>
49 #include <kern/clock.h>
50 #include <kern/ledger.h>
51 #include <kern/policy_internal.h>
52 #include <kern/task.h>
53 #include <kern/telemetry.h>
54 #include <kern/waitq.h>
55 #include <kern/sched_prim.h>
56 #include <kern/turnstile.h>
57 #include <kern/zalloc.h>
58 #include <kern/debug.h>
60 #include <pexpert/pexpert.h>
62 #define XNU_TEST_BITMAP
63 #include <kern/bits.h>
66 #include <sys/ulock.h>
69 * How ulock promotion works:
71 * There’s a requested policy field on every thread called ‘promotions’, which
72 * expresses which ulock promotions are happening to this thread.
73 * The promotion priority saturates until the promotion count goes to 0.
75 * We also track effective promotion qos, which is the qos before clamping.
76 * This value is used for promoting a thread that another thread is waiting on,
77 * so that the lock owner reinflates to the right priority after unclamping.
79 * This also works for non-QoS threads, which can donate base priority to QoS
80 * and non-QoS threads alike.
82 * ulock wait applies a promotion to the owner communicated through
83 * UL_UNFAIR_LOCK as waiters block, and that promotion is saturated as long as
84 * there is still an owner. In ulock wake, if the waker is still the owner,
85 * then it clears its ownership and drops the boost. It does NOT transfer
86 * ownership/priority boost to the new thread. Instead, it selects the
87 * waiting thread with the highest base priority to be woken next, and
88 * relies on that thread to carry the torch for the other waiting threads.
91 static lck_grp_t
*ull_lck_grp
;
93 typedef lck_spin_t ull_lock_t
;
94 #define ull_lock_init(ull) lck_spin_init(&ull->ull_lock, ull_lck_grp, NULL)
95 #define ull_lock_destroy(ull) lck_spin_destroy(&ull->ull_lock, ull_lck_grp)
96 #define ull_lock(ull) lck_spin_lock_grp(&ull->ull_lock, ull_lck_grp)
97 #define ull_unlock(ull) lck_spin_unlock(&ull->ull_lock)
98 #define ull_assert_owned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_OWNED)
99 #define ull_assert_notwned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_NOTOWNED)
101 #define ULOCK_TO_EVENT(ull) ((event_t)ull)
102 #define EVENT_TO_ULOCK(event) ((ull_t *)event)
104 typedef struct __attribute__((packed
)) {
105 user_addr_t ulk_addr
;
110 ull_key_match(ulk_t
*a
, ulk_t
*b
)
112 return (a
->ulk_pid
== b
->ulk_pid
) &&
113 (a
->ulk_addr
== b
->ulk_addr
);
118 * ull_owner is the most recent known value for the owner of this ulock
119 * i.e. it may be out of date WRT the real value in userspace.
121 thread_t ull_owner
; /* holds +1 thread reference */
125 uint ull_bucket_index
;
126 int32_t ull_nwaiters
;
127 int32_t ull_max_nwaiters
;
128 int32_t ull_refcount
;
130 struct turnstile
*ull_turnstile
;
131 queue_chain_t ull_hash_link
;
134 extern void ulock_initialize(void);
136 #define ULL_MUST_EXIST 0x0001
137 static ull_t
*ull_get(ulk_t
*, uint32_t, ull_t
**);
138 static void ull_put(ull_t
*);
140 #if DEVELOPMENT || DEBUG
141 static int ull_simulate_copyin_fault
= 0;
146 kprintf("ull\t%p\n", ull
);
147 kprintf("ull_key.ulk_pid\t%d\n", ull
->ull_key
.ulk_pid
);
148 kprintf("ull_key.ulk_addr\t%p\n", (void *)(ull
->ull_key
.ulk_addr
));
149 kprintf("ull_saved_key.ulk_pid\t%d\n", ull
->ull_saved_key
.ulk_pid
);
150 kprintf("ull_saved_key.ulk_addr\t%p\n", (void *)(ull
->ull_saved_key
.ulk_addr
));
151 kprintf("ull_nwaiters\t%d\n", ull
->ull_nwaiters
);
152 kprintf("ull_max_nwaiters\t%d\n", ull
->ull_max_nwaiters
);
153 kprintf("ull_refcount\t%d\n", ull
->ull_refcount
);
154 kprintf("ull_opcode\t%d\n\n", ull
->ull_opcode
);
155 kprintf("ull_owner\t0x%llx\n\n", thread_tid(ull
->ull_owner
));
156 kprintf("ull_turnstile\t%p\n\n", ull
->ull_turnstile
);
160 typedef struct ull_bucket
{
161 queue_head_t ulb_head
;
165 static int ull_hash_buckets
;
166 static ull_bucket_t
*ull_bucket
;
167 static uint32_t ull_nzalloc
= 0;
168 static zone_t ull_zone
;
170 #define ull_bucket_lock(i) lck_spin_lock_grp(&ull_bucket[i].ulb_lock, ull_lck_grp)
171 #define ull_bucket_unlock(i) lck_spin_unlock(&ull_bucket[i].ulb_lock)
173 static __inline__
uint32_t
174 ull_hash_index(const void *key
, size_t length
)
176 uint32_t hash
= os_hash_jenkins(key
, length
);
178 hash
&= (ull_hash_buckets
- 1);
183 /* Ensure that the key structure is packed,
184 * so that no undefined memory is passed to
187 static_assert(sizeof(ulk_t
) == sizeof(user_addr_t
) + sizeof(pid_t
));
189 #define ULL_INDEX(keyp) ull_hash_index(keyp, sizeof *keyp)
192 ulock_initialize(void)
194 ull_lck_grp
= lck_grp_alloc_init("ulocks", NULL
);
196 assert(thread_max
> 16);
197 /* Size ull_hash_buckets based on thread_max.
198 * Round up to nearest power of 2, then divide by 4
200 ull_hash_buckets
= (1 << (bit_ceiling(thread_max
) - 2));
202 kprintf("%s>thread_max=%d, ull_hash_buckets=%d\n", __FUNCTION__
, thread_max
, ull_hash_buckets
);
203 assert(ull_hash_buckets
>= thread_max
/ 4);
205 ull_bucket
= (ull_bucket_t
*)kalloc(sizeof(ull_bucket_t
) * ull_hash_buckets
);
206 assert(ull_bucket
!= NULL
);
208 for (int i
= 0; i
< ull_hash_buckets
; i
++) {
209 queue_init(&ull_bucket
[i
].ulb_head
);
210 lck_spin_init(&ull_bucket
[i
].ulb_lock
, ull_lck_grp
, NULL
);
213 ull_zone
= zinit(sizeof(ull_t
),
214 thread_max
* sizeof(ull_t
),
217 zone_change(ull_zone
, Z_NOENCRYPT
, TRUE
);
220 #if DEVELOPMENT || DEBUG
221 /* Count the number of hash entries for a given pid.
222 * if pid==0, dump the whole table.
225 ull_hash_dump(pid_t pid
)
229 kprintf("%s>total number of ull_t allocated %d\n", __FUNCTION__
, ull_nzalloc
);
230 kprintf("%s>BEGIN\n", __FUNCTION__
);
232 for (int i
= 0; i
< ull_hash_buckets
; i
++) {
234 if (!queue_empty(&ull_bucket
[i
].ulb_head
)) {
237 kprintf("%s>index %d:\n", __FUNCTION__
, i
);
239 qe_foreach_element(elem
, &ull_bucket
[i
].ulb_head
, ull_hash_link
) {
240 if ((pid
== 0) || (pid
== elem
->ull_key
.ulk_pid
)) {
246 ull_bucket_unlock(i
);
249 kprintf("%s>END\n", __FUNCTION__
);
257 ull_alloc(ulk_t
*key
)
259 ull_t
*ull
= (ull_t
*)zalloc(ull_zone
);
262 ull
->ull_refcount
= 1;
264 ull
->ull_saved_key
= *key
;
265 ull
->ull_bucket_index
= ULL_INDEX(key
);
266 ull
->ull_nwaiters
= 0;
267 ull
->ull_max_nwaiters
= 0;
270 ull
->ull_owner
= THREAD_NULL
;
271 ull
->ull_turnstile
= TURNSTILE_NULL
;
282 assert(ull
->ull_owner
== THREAD_NULL
);
283 assert(ull
->ull_turnstile
== TURNSTILE_NULL
);
285 ull_assert_notwned(ull
);
287 ull_lock_destroy(ull
);
289 zfree(ull_zone
, ull
);
292 /* Finds an existing ulock structure (ull_t), or creates a new one.
293 * If MUST_EXIST flag is set, returns NULL instead of creating a new one.
294 * The ulock structure is returned with ull_lock locked
297 ull_get(ulk_t
*key
, uint32_t flags
, ull_t
**unused_ull
)
300 uint i
= ULL_INDEX(key
);
301 ull_t
*new_ull
= (flags
& ULL_MUST_EXIST
) ? NULL
: ull_alloc(key
);
305 qe_foreach_element(elem
, &ull_bucket
[i
].ulb_head
, ull_hash_link
) {
307 if (ull_key_match(&elem
->ull_key
, key
)) {
315 if (flags
& ULL_MUST_EXIST
) {
316 /* Must already exist (called from wake) */
317 ull_bucket_unlock(i
);
318 assert(new_ull
== NULL
);
319 assert(unused_ull
== NULL
);
323 if (new_ull
== NULL
) {
324 /* Alloc above failed */
325 ull_bucket_unlock(i
);
331 enqueue(&ull_bucket
[i
].ulb_head
, &ull
->ull_hash_link
);
332 } else if (!(flags
& ULL_MUST_EXIST
)) {
335 assert(*unused_ull
== NULL
);
336 *unused_ull
= new_ull
;
341 ull_bucket_unlock(i
);
343 return ull
; /* still locked */
347 * Must be called with ull_lock held
352 ull_assert_owned(ull
);
353 int refcount
= --ull
->ull_refcount
;
354 assert(refcount
== 0 ? (ull
->ull_key
.ulk_pid
== 0 && ull
->ull_key
.ulk_addr
== 0) : 1);
361 ull_bucket_lock(ull
->ull_bucket_index
);
362 remqueue(&ull
->ull_hash_link
);
363 ull_bucket_unlock(ull
->ull_bucket_index
);
368 static void ulock_wait_continue(void *, wait_result_t
);
369 static void ulock_wait_cleanup(ull_t
*, thread_t
, thread_t
, int32_t *);
372 wait_result_to_return_code(wait_result_t wr
)
377 case THREAD_AWAKENED
:
379 case THREAD_TIMED_OUT
:
382 case THREAD_INTERRUPTED
:
393 ulock_wait(struct proc
*p
, struct ulock_wait_args
*args
, int32_t *retval
)
395 uint opcode
= args
->operation
& UL_OPCODE_MASK
;
396 uint flags
= args
->operation
& UL_FLAGS_MASK
;
398 if (flags
& ULF_WAIT_CANCEL_POINT
) {
399 __pthread_testcancel(1);
403 thread_t self
= current_thread();
406 /* involved threads - each variable holds +1 ref if not null */
407 thread_t owner_thread
= THREAD_NULL
;
408 thread_t old_owner
= THREAD_NULL
;
410 ull_t
*unused_ull
= NULL
;
412 if ((flags
& ULF_WAIT_MASK
) != flags
) {
417 boolean_t set_owner
= FALSE
;
423 case UL_COMPARE_AND_WAIT
:
430 /* 32-bit lock type for UL_COMPARE_AND_WAIT and UL_UNFAIR_LOCK */
433 if ((args
->addr
== 0) || (args
->addr
% _Alignof(_Atomic(typeof(value
))))) {
438 key
.ulk_pid
= p
->p_pid
;
439 key
.ulk_addr
= args
->addr
;
441 ull_t
*ull
= ull_get(&key
, 0, &unused_ull
);
450 if (ull
->ull_nwaiters
> ull
->ull_max_nwaiters
) {
451 ull
->ull_max_nwaiters
= ull
->ull_nwaiters
;
454 if (ull
->ull_opcode
== 0) {
455 ull
->ull_opcode
= opcode
;
456 } else if (ull
->ull_opcode
!= opcode
) {
462 * We don't want this copyin to get wedged behind VM operations,
463 * but we have to read the userspace value under the ull lock for correctness.
465 * Until <rdar://problem/24999882> exists,
466 * holding the ull spinlock across copyin forces any
467 * vm_fault we encounter to fail.
469 uint64_t val64
; /* copyin_word always zero-extends to 64-bits */
471 int copy_ret
= copyin_word(args
->addr
, &val64
, sizeof(value
));
473 value
= (uint32_t)val64
;
475 #if DEVELOPMENT || DEBUG
476 /* Occasionally simulate copyin finding the user address paged out */
477 if (((ull_simulate_copyin_fault
== p
->p_pid
) || (ull_simulate_copyin_fault
== 1)) && (copy_ret
== 0)) {
478 static _Atomic
int fault_inject
= 0;
479 if (__c11_atomic_fetch_add(&fault_inject
, 1, __ATOMIC_RELAXED
) % 73 == 0) {
485 /* copyin() will return an error if the access to the user addr would have faulted,
486 * so just return and let the user level code fault it in.
492 if (value
!= args
->value
) {
493 /* Lock value has changed from expected so bail out */
498 mach_port_name_t owner_name
= ulock_owner_value_to_port_name(args
->value
);
499 owner_thread
= port_name_to_thread_for_ulock(owner_name
);
501 /* HACK: don't bail on MACH_PORT_DEAD, to avoid blowing up the no-tsd pthread lock */
502 if (owner_name
!= MACH_PORT_DEAD
&& owner_thread
== THREAD_NULL
) {
504 * Translation failed - even though the lock value is up to date,
505 * whatever was stored in the lock wasn't actually a thread port.
510 /* owner_thread has a +1 reference */
513 * At this point, I know:
514 * a) owner_thread is definitely the current owner, because I just read the value
515 * b) owner_thread is either:
516 * i) holding the user lock or
517 * ii) has just unlocked the user lock after I looked
518 * and is heading toward the kernel to call ull_wake.
519 * If so, it's going to have to wait for the ull mutex.
521 * Therefore, I can ask the turnstile to promote its priority, and I can rely
522 * on it to come by later to issue the wakeup and lose its promotion.
525 /* Return the +1 ref from the ull_owner field */
526 old_owner
= ull
->ull_owner
;
527 ull
->ull_owner
= THREAD_NULL
;
529 if (owner_thread
!= THREAD_NULL
) {
530 /* The ull_owner field now owns a +1 ref on owner_thread */
531 thread_reference(owner_thread
);
532 ull
->ull_owner
= owner_thread
;
537 uint32_t timeout
= args
->timeout
;
538 uint64_t deadline
= TIMEOUT_WAIT_FOREVER
;
539 wait_interrupt_t interruptible
= THREAD_ABORTSAFE
;
540 struct turnstile
*ts
;
542 ts
= turnstile_prepare((uintptr_t)ull
, &ull
->ull_turnstile
,
543 TURNSTILE_NULL
, TURNSTILE_ULOCK
);
544 thread_set_pending_block_hint(self
, kThreadWaitUserLock
);
546 if (flags
& ULF_WAIT_WORKQ_DATA_CONTENTION
) {
547 interruptible
|= THREAD_WAIT_NOREPORT
;
551 clock_interval_to_deadline(timeout
, NSEC_PER_USEC
, &deadline
);
554 turnstile_update_inheritor(ts
, owner_thread
,
555 (TURNSTILE_DELAYED_UPDATE
| TURNSTILE_INHERITOR_THREAD
));
557 wr
= waitq_assert_wait64(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
558 interruptible
, deadline
);
563 ull_free(unused_ull
);
567 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_NOT_HELD
);
569 if (wr
== THREAD_WAITING
) {
570 uthread_t uthread
= (uthread_t
)get_bsdthread_info(self
);
571 uthread
->uu_save
.uus_ulock_wait_data
.retval
= retval
;
572 uthread
->uu_save
.uus_ulock_wait_data
.flags
= flags
;
573 uthread
->uu_save
.uus_ulock_wait_data
.owner_thread
= owner_thread
;
574 uthread
->uu_save
.uus_ulock_wait_data
.old_owner
= old_owner
;
575 if (set_owner
&& owner_thread
!= THREAD_NULL
) {
576 thread_handoff_parameter(owner_thread
, ulock_wait_continue
, ull
);
578 assert(owner_thread
== THREAD_NULL
);
579 thread_block_parameter(ulock_wait_continue
, ull
);
584 ret
= wait_result_to_return_code(wr
);
587 turnstile_complete((uintptr_t)ull
, &ull
->ull_turnstile
, NULL
);
590 ulock_wait_cleanup(ull
, owner_thread
, old_owner
, retval
);
593 ull_free(unused_ull
);
597 assert(*retval
>= 0);
600 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
608 * Must be called with ull_lock held
611 ulock_wait_cleanup(ull_t
*ull
, thread_t owner_thread
, thread_t old_owner
, int32_t *retval
)
613 ull_assert_owned(ull
);
615 thread_t old_lingering_owner
= THREAD_NULL
;
617 *retval
= --ull
->ull_nwaiters
;
618 if (ull
->ull_nwaiters
== 0) {
620 * If the wait was canceled early, we might need to
621 * clear out the lingering owner reference before
624 old_lingering_owner
= ull
->ull_owner
;
625 ull
->ull_owner
= THREAD_NULL
;
627 ull
->ull_key
.ulk_pid
= 0;
628 ull
->ull_key
.ulk_addr
= 0;
630 assert(ull
->ull_refcount
> 0);
634 /* Need to be called after dropping the interlock */
637 if (owner_thread
!= THREAD_NULL
) {
638 thread_deallocate(owner_thread
);
641 if (old_owner
!= THREAD_NULL
) {
642 thread_deallocate(old_owner
);
645 if (old_lingering_owner
!= THREAD_NULL
) {
646 thread_deallocate(old_lingering_owner
);
649 assert(*retval
>= 0);
652 __attribute__((noreturn
))
654 ulock_wait_continue(void * parameter
, wait_result_t wr
)
656 thread_t self
= current_thread();
657 uthread_t uthread
= (uthread_t
)get_bsdthread_info(self
);
660 ull_t
*ull
= (ull_t
*)parameter
;
661 int32_t *retval
= uthread
->uu_save
.uus_ulock_wait_data
.retval
;
662 uint flags
= uthread
->uu_save
.uus_ulock_wait_data
.flags
;
663 thread_t owner_thread
= uthread
->uu_save
.uus_ulock_wait_data
.owner_thread
;
664 thread_t old_owner
= uthread
->uu_save
.uus_ulock_wait_data
.old_owner
;
666 ret
= wait_result_to_return_code(wr
);
669 turnstile_complete((uintptr_t)ull
, &ull
->ull_turnstile
, NULL
);
671 ulock_wait_cleanup(ull
, owner_thread
, old_owner
, retval
);
673 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
678 unix_syscall_return(ret
);
682 ulock_wake(struct proc
*p
, struct ulock_wake_args
*args
, __unused
int32_t *retval
)
684 uint opcode
= args
->operation
& UL_OPCODE_MASK
;
685 uint flags
= args
->operation
& UL_FLAGS_MASK
;
689 /* involved threads - each variable holds +1 ref if not null */
690 thread_t wake_thread
= THREAD_NULL
;
691 thread_t old_owner
= THREAD_NULL
;
693 if ((flags
& ULF_WAKE_MASK
) != flags
) {
698 #if DEVELOPMENT || DEBUG
699 if (opcode
== UL_DEBUG_HASH_DUMP_PID
) {
700 *retval
= ull_hash_dump(p
->p_pid
);
702 } else if (opcode
== UL_DEBUG_HASH_DUMP_ALL
) {
703 *retval
= ull_hash_dump(0);
705 } else if (opcode
== UL_DEBUG_SIMULATE_COPYIN_FAULT
) {
706 ull_simulate_copyin_fault
= (int)(args
->wake_value
);
711 if (args
->addr
== 0) {
716 if (flags
& ULF_WAKE_THREAD
) {
717 if (flags
& ULF_WAKE_ALL
) {
721 mach_port_name_t wake_thread_name
= (mach_port_name_t
)(args
->wake_value
);
722 wake_thread
= port_name_to_thread_for_ulock(wake_thread_name
);
723 if (wake_thread
== THREAD_NULL
) {
729 key
.ulk_pid
= p
->p_pid
;
730 key
.ulk_addr
= args
->addr
;
732 ull_t
*ull
= ull_get(&key
, ULL_MUST_EXIST
, NULL
);
734 if (wake_thread
!= THREAD_NULL
) {
735 thread_deallocate(wake_thread
);
742 boolean_t clear_owner
= FALSE
; /* need to reset owner */
748 case UL_COMPARE_AND_WAIT
:
755 if (opcode
!= ull
->ull_opcode
) {
761 assert(ull
->ull_owner
== THREAD_NULL
);
764 struct turnstile
*ts
;
765 ts
= turnstile_prepare((uintptr_t)ull
, &ull
->ull_turnstile
,
766 TURNSTILE_NULL
, TURNSTILE_ULOCK
);
768 if (flags
& ULF_WAKE_ALL
) {
769 waitq_wakeup64_all(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
771 } else if (flags
& ULF_WAKE_THREAD
) {
772 kern_return_t kr
= waitq_wakeup64_thread(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
773 wake_thread
, THREAD_AWAKENED
);
774 if (kr
!= KERN_SUCCESS
) {
775 assert(kr
== KERN_NOT_WAITING
);
780 * TODO: WAITQ_SELECT_MAX_PRI forces a linear scan of the (hashed) global waitq.
781 * Move to a ulock-private, priority sorted waitq (i.e. SYNC_POLICY_FIXED_PRIORITY) to avoid that.
783 * TODO: 'owner is not current_thread (or null)' likely means we can avoid this wakeup
784 * <rdar://problem/25487001>
786 waitq_wakeup64_one(&ts
->ts_waitq
, CAST_EVENT64_T(ULOCK_TO_EVENT(ull
)),
787 THREAD_AWAKENED
, WAITQ_SELECT_MAX_PRI
);
791 * Reaching this point means I previously moved the lock to 'unowned' state in userspace.
792 * Therefore I need to relinquish my promotion.
794 * However, someone else could have locked it after I unlocked, and then had a third thread
795 * block on the lock, causing a promotion of some other owner.
797 * I don't want to stomp over that, so only remove the promotion if I'm the current owner.
800 if (ull
->ull_owner
== current_thread()) {
801 turnstile_update_inheritor(ts
, THREAD_NULL
,
802 (TURNSTILE_IMMEDIATE_UPDATE
| TURNSTILE_INHERITOR_THREAD
));
803 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_HELD
);
804 old_owner
= ull
->ull_owner
;
805 ull
->ull_owner
= THREAD_NULL
;
808 turnstile_complete((uintptr_t)ull
, &ull
->ull_turnstile
, NULL
);
813 /* Need to be called after dropping the interlock */
816 if (wake_thread
!= THREAD_NULL
) {
817 thread_deallocate(wake_thread
);
820 if (old_owner
!= THREAD_NULL
) {
821 thread_deallocate(old_owner
);
825 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
833 kdp_ulock_find_owner(__unused
struct waitq
* waitq
, event64_t event
, thread_waitinfo_t
* waitinfo
)
835 ull_t
*ull
= EVENT_TO_ULOCK(event
);
836 assert(kdp_is_in_zone(ull
, "ulocks"));
838 if (ull
->ull_opcode
== UL_UNFAIR_LOCK
) {// owner is only set if it's an os_unfair_lock
839 waitinfo
->owner
= thread_tid(ull
->ull_owner
);
840 waitinfo
->context
= ull
->ull_key
.ulk_addr
;
841 } else if (ull
->ull_opcode
== UL_COMPARE_AND_WAIT
) { // otherwise, this is a spinlock
843 waitinfo
->context
= ull
->ull_key
.ulk_addr
;
845 panic("%s: Invalid ulock opcode %d addr %p", __FUNCTION__
, ull
->ull_opcode
, (void*)ull
);