2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/ioctl.h>
32 #include <sys/file_internal.h>
33 #include <sys/proc_internal.h>
34 #include <sys/kernel.h>
35 #include <sys/guarded.h>
37 #include <sys/malloc.h>
38 #include <sys/sysproto.h>
39 #include <sys/pthread_shims.h>
41 #include <mach/mach_types.h>
43 #include <kern/cpu_data.h>
44 #include <kern/mach_param.h>
45 #include <kern/kern_types.h>
46 #include <kern/assert.h>
47 #include <kern/kalloc.h>
48 #include <kern/thread.h>
49 #include <kern/clock.h>
50 #include <kern/ledger.h>
51 #include <kern/policy_internal.h>
52 #include <kern/task.h>
53 #include <kern/telemetry.h>
54 #include <kern/waitq.h>
55 #include <kern/sched_prim.h>
56 #include <kern/zalloc.h>
57 #include <kern/debug.h>
59 #include <pexpert/pexpert.h>
61 #define XNU_TEST_BITMAP
62 #include <kern/bits.h>
64 #include <sys/ulock.h>
67 * How ulock promotion works:
69 * There’s a requested policy field on every thread called ‘promotions’, which
70 * expresses which ulock promotions are happening to this thread.
71 * The promotion priority saturates until the promotion count goes to 0.
73 * We also track effective promotion qos, which is the qos before clamping.
74 * This value is used for promoting a thread that another thread is waiting on,
75 * so that the lock owner reinflates to the right priority after unclamping.
77 * This also works for non-QoS threads, which can donate base priority to QoS
78 * and non-QoS threads alike.
80 * ulock wait applies a promotion to the owner communicated through
81 * UL_UNFAIR_LOCK as waiters block, and that promotion is saturated as long as
82 * there is still an owner. In ulock wake, if the waker is still the owner,
83 * then it clears its ownership and drops the boost. It does NOT transfer
84 * ownership/priority boost to the new thread. Instead, it selects the
85 * waiting thread with the highest base priority to be woken next, and
86 * relies on that thread to carry the torch for the other waiting threads.
89 static lck_grp_t
*ull_lck_grp
;
90 static lck_mtx_t ull_table_lock
;
92 #define ull_global_lock() lck_mtx_lock(&ull_table_lock)
93 #define ull_global_unlock() lck_mtx_unlock(&ull_table_lock)
95 #define ull_lock(ull) lck_mtx_lock(&ull->ull_lock)
96 #define ull_unlock(ull) lck_mtx_unlock(&ull->ull_lock)
97 #define ull_assert_owned(ull) LCK_MTX_ASSERT(&ull->ull_lock, LCK_MTX_ASSERT_OWNED)
99 #define ULOCK_TO_EVENT(ull) ((event_t)ull)
100 #define EVENT_TO_ULOCK(event) ((ull_t *)event)
102 typedef struct __attribute__((packed
)) {
103 user_addr_t ulk_addr
;
108 ull_key_match(ulk_t
*a
, ulk_t
*b
)
110 return ((a
->ulk_pid
== b
->ulk_pid
) &&
111 (a
->ulk_addr
== b
->ulk_addr
));
116 * ull_owner is the most recent known value for the owner of this ulock
117 * i.e. it may be out of date WRT the real value in userspace.
119 thread_t ull_owner
; /* holds +1 thread reference */
123 int32_t ull_nwaiters
;
124 int32_t ull_max_nwaiters
;
125 int32_t ull_refcount
;
126 struct promote_token ull_promote_token
;
127 queue_chain_t ull_hash_link
;
131 static const bool ull_debug
= false;
133 extern void ulock_initialize(void);
134 extern void kdp_ulock_find_owner(struct waitq
* waitq
, event64_t event
, thread_waitinfo_t
*waitinfo
);
136 #define ULL_MUST_EXIST 0x0001
137 static ull_t
*ull_get(ulk_t
*, uint32_t);
138 static void ull_put(ull_t
*);
140 static thread_t
ull_promote_owner_locked(ull_t
* ull
, thread_t thread
);
142 #if DEVELOPMENT || DEBUG
143 static int ull_simulate_copyin_fault
= 0;
144 static int ull_panic_on_corruption
= 0;
149 kprintf("ull\t%p\n", ull
);
150 kprintf("ull_key.ulk_pid\t%d\n", ull
->ull_key
.ulk_pid
);
151 kprintf("ull_key.ulk_addr\t%p\n", (void *)(ull
->ull_key
.ulk_addr
));
152 kprintf("ull_saved_key.ulk_pid\t%d\n", ull
->ull_saved_key
.ulk_pid
);
153 kprintf("ull_saved_key.ulk_addr\t%p\n", (void *)(ull
->ull_saved_key
.ulk_addr
));
154 kprintf("ull_nwaiters\t%d\n", ull
->ull_nwaiters
);
155 kprintf("ull_max_nwaiters\t%d\n", ull
->ull_max_nwaiters
);
156 kprintf("ull_refcount\t%d\n", ull
->ull_refcount
);
157 kprintf("ull_opcode\t%d\n\n", ull
->ull_opcode
);
158 kprintf("ull_owner\t0x%llx\n\n", thread_tid(ull
->ull_owner
));
159 kprintf("ull_promote_token\t%d, %d\n\n", ull
->ull_promote_token
.pt_basepri
, ull
->ull_promote_token
.pt_qos
);
163 static int ull_hash_buckets
;
164 static queue_head_t
*ull_bucket
;
165 static uint32_t ull_nzalloc
= 0;
166 static zone_t ull_zone
;
168 static __inline__
uint32_t
169 ull_hash_index(char *key
, size_t length
)
171 uint32_t hash
= jenkins_hash(key
, length
);
173 hash
&= (ull_hash_buckets
- 1);
178 /* Ensure that the key structure is packed,
179 * so that no undefined memory is passed to
182 static_assert(sizeof(ulk_t
) == sizeof(user_addr_t
) + sizeof(pid_t
));
184 #define ULL_INDEX(keyp) ull_hash_index((char *)keyp, sizeof *keyp)
187 ulock_initialize(void)
189 ull_lck_grp
= lck_grp_alloc_init("ulocks", NULL
);
190 lck_mtx_init(&ull_table_lock
, ull_lck_grp
, NULL
);
192 assert(thread_max
> 16);
193 /* Size ull_hash_buckets based on thread_max.
194 * Round up to nearest power of 2, then divide by 4
196 ull_hash_buckets
= (1 << (bit_ceiling(thread_max
) - 2));
198 kprintf("%s>thread_max=%d, ull_hash_buckets=%d\n", __FUNCTION__
, thread_max
, ull_hash_buckets
);
199 assert(ull_hash_buckets
>= thread_max
/4);
201 ull_bucket
= (queue_head_t
*)kalloc(sizeof(queue_head_t
) * ull_hash_buckets
);
202 assert(ull_bucket
!= NULL
);
204 for (int i
= 0; i
< ull_hash_buckets
; i
++) {
205 queue_init(&ull_bucket
[i
]);
208 ull_zone
= zinit(sizeof(ull_t
),
209 thread_max
* sizeof(ull_t
),
212 zone_change(ull_zone
, Z_NOENCRYPT
, TRUE
);
214 #if DEVELOPMENT || DEBUG
215 if (!PE_parse_boot_argn("ulock_panic_on_corruption",
216 &ull_panic_on_corruption
, sizeof(ull_panic_on_corruption
))) {
217 ull_panic_on_corruption
= 0;
222 #if DEVELOPMENT || DEBUG
223 /* Count the number of hash entries for a given pid.
224 * if pid==0, dump the whole table.
227 ull_hash_dump(pid_t pid
)
232 kprintf("%s>total number of ull_t allocated %d\n", __FUNCTION__
, ull_nzalloc
);
233 kprintf("%s>BEGIN\n", __FUNCTION__
);
235 for (int i
= 0; i
< ull_hash_buckets
; i
++) {
236 if (!queue_empty(&ull_bucket
[i
])) {
239 kprintf("%s>index %d:\n", __FUNCTION__
, i
);
241 qe_foreach_element(elem
, &ull_bucket
[i
], ull_hash_link
) {
242 if ((pid
== 0) || (pid
== elem
->ull_key
.ulk_pid
)) {
250 kprintf("%s>END\n", __FUNCTION__
);
259 ull_alloc(ulk_t
*key
)
261 ull_t
*ull
= (ull_t
*)zalloc(ull_zone
);
264 ull
->ull_refcount
= 1;
266 ull
->ull_saved_key
= *key
;
267 ull
->ull_nwaiters
= 0;
268 ull
->ull_max_nwaiters
= 0;
271 ull
->ull_owner
= THREAD_NULL
;
272 ull
->ull_promote_token
= PROMOTE_TOKEN_INIT
;
274 lck_mtx_init(&ull
->ull_lock
, ull_lck_grp
, NULL
);
283 assert(ull
->ull_owner
== THREAD_NULL
);
285 lck_mtx_assert(&ull
->ull_lock
, LCK_ASSERT_NOTOWNED
);
287 lck_mtx_destroy(&ull
->ull_lock
, ull_lck_grp
);
289 zfree(ull_zone
, ull
);
292 /* Finds an existing ulock structure (ull_t), or creates a new one.
293 * If MUST_EXIST flag is set, returns NULL instead of creating a new one.
294 * The ulock structure is returned with ull_lock locked
296 * TODO: Per-bucket lock to reduce contention on global lock
299 ull_get(ulk_t
*key
, uint32_t flags
)
302 uint i
= ULL_INDEX(key
);
305 qe_foreach_element(elem
, &ull_bucket
[i
], ull_hash_link
) {
307 if (ull_key_match(&elem
->ull_key
, key
)) {
315 if (flags
& ULL_MUST_EXIST
) {
316 /* Must already exist (called from wake) */
321 /* NRG maybe drop the ull_global_lock before the kalloc,
322 * then take the lock and check again for a key match
323 * and either use the new ull_t or free it.
326 ull
= ull_alloc(key
);
335 enqueue(&ull_bucket
[i
], &ull
->ull_hash_link
);
342 return ull
; /* still locked */
346 * Must be called with ull_lock held
351 ull_assert_owned(ull
);
352 int refcount
= --ull
->ull_refcount
;
353 assert(refcount
== 0 ? (ull
->ull_key
.ulk_pid
== 0 && ull
->ull_key
.ulk_addr
== 0) : 1);
361 remqueue(&ull
->ull_hash_link
);
364 #if DEVELOPMENT || DEBUG
366 kprintf("%s>", __FUNCTION__
);
374 ulock_wait(struct proc
*p
, struct ulock_wait_args
*args
, int32_t *retval
)
376 uint opcode
= args
->operation
& UL_OPCODE_MASK
;
377 uint flags
= args
->operation
& UL_FLAGS_MASK
;
379 thread_t self
= current_thread();
380 int id
= thread_tid(self
);
383 /* involved threads - each variable holds +1 ref if not null */
384 thread_t owner_thread
= THREAD_NULL
;
385 thread_t old_owner
= THREAD_NULL
;
386 thread_t old_lingering_owner
= THREAD_NULL
;
387 sched_call_t workq_callback
= NULL
;
390 kprintf("[%d]%s>ENTER opcode %d addr %llx value %llx timeout %d flags %x\n", id
, __FUNCTION__
, opcode
, (unsigned long long)(args
->addr
), args
->value
, args
->timeout
, flags
);
393 if ((flags
& ULF_WAIT_MASK
) != flags
) {
398 boolean_t set_owner
= FALSE
;
404 case UL_COMPARE_AND_WAIT
:
408 kprintf("[%d]%s>EINVAL opcode %d addr 0x%llx flags 0x%x\n",
409 id
, __FUNCTION__
, opcode
,
410 (unsigned long long)(args
->addr
), flags
);
416 /* 32-bit lock type for UL_COMPARE_AND_WAIT and UL_UNFAIR_LOCK */
419 if ((args
->addr
== 0) || (args
->addr
% _Alignof(_Atomic(typeof(value
))))) {
424 key
.ulk_pid
= p
->p_pid
;
425 key
.ulk_addr
= args
->addr
;
427 if (flags
& ULF_WAIT_WORKQ_DATA_CONTENTION
) {
428 workq_callback
= workqueue_get_sched_callback();
429 workq_callback
= thread_disable_sched_call(self
, workq_callback
);
432 ull_t
*ull
= ull_get(&key
, 0);
441 if (ull
->ull_nwaiters
> ull
->ull_max_nwaiters
) {
442 ull
->ull_max_nwaiters
= ull
->ull_nwaiters
;
445 if (ull
->ull_opcode
== 0) {
446 ull
->ull_opcode
= opcode
;
447 } else if (ull
->ull_opcode
!= opcode
) {
454 * We don't want this copyin to get wedged behind VM operations,
455 * but we have to read the userspace value under the ull lock for correctness.
457 * Until <rdar://problem/24999882> exists,
458 * fake it by disabling preemption across copyin, which forces any
459 * vm_fault we encounter to fail.
461 uint64_t val64
; /* copyin_word always zero-extends to 64-bits */
463 disable_preemption();
464 int copy_ret
= copyin_word(args
->addr
, &val64
, sizeof(value
));
467 value
= (uint32_t)val64
;
469 #if DEVELOPMENT || DEBUG
470 /* Occasionally simulate copyin finding the user address paged out */
471 if (((ull_simulate_copyin_fault
== p
->p_pid
) || (ull_simulate_copyin_fault
== 1)) && (copy_ret
== 0)) {
472 static _Atomic
int fault_inject
= 0;
473 if (__c11_atomic_fetch_add(&fault_inject
, 1, __ATOMIC_RELAXED
) % 73 == 0) {
481 /* copyin() will return an error if the access to the user addr would have faulted,
482 * so just return and let the user level code fault it in.
488 if (value
!= args
->value
) {
489 /* Lock value has changed from expected so bail out */
492 kprintf("[%d]%s>Lock value %d has changed from expected %d so bail out\n",
493 id
, __FUNCTION__
, value
, (uint32_t)(args
->value
));
499 mach_port_name_t owner_name
= ulock_owner_value_to_port_name(args
->value
);
500 owner_thread
= port_name_to_thread_for_ulock(owner_name
);
502 /* HACK: don't bail on MACH_PORT_DEAD, to avoid blowing up the no-tsd pthread lock */
503 if (owner_name
!= MACH_PORT_DEAD
&& owner_thread
== THREAD_NULL
) {
504 #if DEBUG || DEVELOPMENT
505 if (ull_panic_on_corruption
) {
506 if (flags
& ULF_NO_ERRNO
) {
507 // ULF_NO_ERRNO is used by libplatform ulocks, but not libdispatch ones.
508 // Don't panic on libdispatch ulock corruptions; the userspace likely
509 // mismanaged a dispatch queue.
510 panic("ulock_wait: ulock is corrupted; value=0x%x, ull=%p",
511 (uint32_t)(args
->value
), ull
);
516 * Translation failed - even though the lock value is up to date,
517 * whatever was stored in the lock wasn't actually a thread port.
523 /* owner_thread has a +1 reference */
526 * At this point, I know:
527 * a) owner_thread is definitely the current owner, because I just read the value
528 * b) owner_thread is either:
529 * i) holding the user lock or
530 * ii) has just unlocked the user lock after I looked
531 * and is heading toward the kernel to call ull_wake.
532 * If so, it's going to have to wait for the ull mutex.
534 * Therefore, I can promote its priority to match mine, and I can rely on it to
535 * come by later to issue the wakeup and lose its promotion.
538 old_owner
= ull_promote_owner_locked(ull
, owner_thread
);
542 uint32_t timeout
= args
->timeout
;
543 thread_set_pending_block_hint(self
, kThreadWaitUserLock
);
545 wr
= assert_wait_timeout(ULOCK_TO_EVENT(ull
), THREAD_ABORTSAFE
, timeout
, NSEC_PER_USEC
);
547 wr
= assert_wait(ULOCK_TO_EVENT(ull
), THREAD_ABORTSAFE
);
553 kprintf("[%d]%s>after assert_wait() returned %d\n", id
, __FUNCTION__
, wr
);
556 if (set_owner
&& owner_thread
!= THREAD_NULL
&& wr
== THREAD_WAITING
) {
557 wr
= thread_handoff(owner_thread
);
558 /* owner_thread ref is consumed */
559 owner_thread
= THREAD_NULL
;
561 /* NRG At some point this should be a continuation based block, so that we can avoid saving the full kernel context. */
562 wr
= thread_block(NULL
);
565 kprintf("[%d]%s>thread_block() returned %d\n", id
, __FUNCTION__
, wr
);
568 case THREAD_AWAKENED
:
570 case THREAD_TIMED_OUT
:
573 case THREAD_INTERRUPTED
:
582 *retval
= --ull
->ull_nwaiters
;
583 if (ull
->ull_nwaiters
== 0) {
585 * If the wait was canceled early, we might need to
586 * clear out the lingering owner reference before
589 if (ull
->ull_owner
!= THREAD_NULL
) {
590 old_lingering_owner
= ull_promote_owner_locked(ull
, THREAD_NULL
);
593 assert(ull
->ull_owner
== THREAD_NULL
);
595 ull
->ull_key
.ulk_pid
= 0;
596 ull
->ull_key
.ulk_addr
= 0;
598 assert(ull
->ull_refcount
> 0);
602 if (owner_thread
!= THREAD_NULL
) {
603 thread_deallocate(owner_thread
);
606 if (old_owner
!= THREAD_NULL
) {
607 thread_deallocate(old_owner
);
610 if (old_lingering_owner
!= THREAD_NULL
) {
611 thread_deallocate(old_lingering_owner
);
614 assert(*retval
>= 0);
617 if (workq_callback
) {
618 thread_reenable_sched_call(self
, workq_callback
);
621 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
629 ulock_wake(struct proc
*p
, struct ulock_wake_args
*args
, __unused
int32_t *retval
)
631 uint opcode
= args
->operation
& UL_OPCODE_MASK
;
632 uint flags
= args
->operation
& UL_FLAGS_MASK
;
634 int id
= thread_tid(current_thread());
637 /* involved threads - each variable holds +1 ref if not null */
638 thread_t wake_thread
= THREAD_NULL
;
639 thread_t old_owner
= THREAD_NULL
;
642 kprintf("[%d]%s>ENTER opcode %d addr %llx flags %x\n",
643 id
, __FUNCTION__
, opcode
, (unsigned long long)(args
->addr
), flags
);
646 if ((flags
& ULF_WAKE_MASK
) != flags
) {
651 #if DEVELOPMENT || DEBUG
652 if (opcode
== UL_DEBUG_HASH_DUMP_PID
) {
653 *retval
= ull_hash_dump(p
->p_pid
);
655 } else if (opcode
== UL_DEBUG_HASH_DUMP_ALL
) {
656 *retval
= ull_hash_dump(0);
658 } else if (opcode
== UL_DEBUG_SIMULATE_COPYIN_FAULT
) {
659 ull_simulate_copyin_fault
= (int)(args
->wake_value
);
664 if (args
->addr
== 0) {
669 if (flags
& ULF_WAKE_THREAD
) {
670 if (flags
& ULF_WAKE_ALL
) {
674 mach_port_name_t wake_thread_name
= (mach_port_name_t
)(args
->wake_value
);
675 wake_thread
= port_name_to_thread_for_ulock(wake_thread_name
);
676 if (wake_thread
== THREAD_NULL
) {
682 key
.ulk_pid
= p
->p_pid
;
683 key
.ulk_addr
= args
->addr
;
685 ull_t
*ull
= ull_get(&key
, ULL_MUST_EXIST
);
687 if (wake_thread
!= THREAD_NULL
) {
688 thread_deallocate(wake_thread
);
695 boolean_t clear_owner
= FALSE
; /* need to reset owner */
701 case UL_COMPARE_AND_WAIT
:
705 kprintf("[%d]%s>EINVAL opcode %d addr 0x%llx flags 0x%x\n",
706 id
, __FUNCTION__
, opcode
, (unsigned long long)(args
->addr
), flags
);
712 if (opcode
!= ull
->ull_opcode
) {
714 kprintf("[%d]%s>EDOM - opcode mismatch - opcode %d addr 0x%llx flags 0x%x\n",
715 id
, __FUNCTION__
, opcode
, (unsigned long long)(args
->addr
), flags
);
722 assert(ull
->ull_owner
== THREAD_NULL
);
725 if (flags
& ULF_WAKE_ALL
) {
726 thread_wakeup(ULOCK_TO_EVENT(ull
));
727 } else if (flags
& ULF_WAKE_THREAD
) {
728 kern_return_t kr
= thread_wakeup_thread(ULOCK_TO_EVENT(ull
), wake_thread
);
729 if (kr
!= KERN_SUCCESS
) {
730 assert(kr
== KERN_NOT_WAITING
);
735 * TODO: WAITQ_SELECT_MAX_PRI forces a linear scan of the (hashed) global waitq.
736 * Move to a ulock-private, priority sorted waitq to avoid that.
738 * TODO: 'owner is not current_thread (or null)' likely means we can avoid this wakeup
739 * <rdar://problem/25487001>
741 thread_wakeup_one_with_pri(ULOCK_TO_EVENT(ull
), WAITQ_SELECT_MAX_PRI
);
745 * Reaching this point means I previously moved the lock to 'unowned' state in userspace.
746 * Therefore I need to relinquish my promotion.
748 * However, someone else could have locked it after I unlocked, and then had a third thread
749 * block on the lock, causing a promotion of some other owner.
751 * I don't want to stomp over that, so only remove the promotion if I'm the current owner.
754 if (ull
->ull_owner
== current_thread()) {
755 old_owner
= ull_promote_owner_locked(ull
, THREAD_NULL
);
761 if (wake_thread
!= THREAD_NULL
) {
762 thread_deallocate(wake_thread
);
765 if (old_owner
!= THREAD_NULL
) {
766 thread_deallocate(old_owner
);
770 if ((flags
& ULF_NO_ERRNO
) && (ret
!= 0)) {
778 * Change ull_owner to be new_owner, and update it with the properties
779 * of the current thread.
781 * Records the highest current promotion value in ull_promote_token, and applies that
784 * Returns +1 ref to the old ull_owner if it is going away.
787 ull_promote_owner_locked(ull_t
* ull
,
790 if (new_owner
!= THREAD_NULL
&& ull
->ull_owner
== new_owner
) {
791 thread_user_promotion_update(new_owner
, current_thread(), &ull
->ull_promote_token
);
795 thread_t old_owner
= ull
->ull_owner
;
796 ull
->ull_owner
= THREAD_NULL
;
798 if (new_owner
!= THREAD_NULL
) {
799 /* The ull_owner field now owns a +1 ref on thread */
800 thread_reference(new_owner
);
801 ull
->ull_owner
= new_owner
;
803 thread_user_promotion_add(new_owner
, current_thread(), &ull
->ull_promote_token
);
805 /* No new owner - clear the saturated promotion value */
806 ull
->ull_promote_token
= PROMOTE_TOKEN_INIT
;
809 if (old_owner
!= THREAD_NULL
) {
810 thread_user_promotion_drop(old_owner
);
813 /* Return the +1 ref from the ull_owner field */
818 kdp_ulock_find_owner(__unused
struct waitq
* waitq
, event64_t event
, thread_waitinfo_t
* waitinfo
)
820 ull_t
*ull
= EVENT_TO_ULOCK(event
);
821 assert(kdp_is_in_zone(ull
, "ulocks"));
823 if (ull
->ull_opcode
== UL_UNFAIR_LOCK
) {// owner is only set if it's an os_unfair_lock
824 waitinfo
->owner
= thread_tid(ull
->ull_owner
);
825 waitinfo
->context
= ull
->ull_key
.ulk_addr
;
826 } else if (ull
->ull_opcode
== UL_COMPARE_AND_WAIT
) { // otherwise, this is a spinlock
828 waitinfo
->context
= ull
->ull_key
.ulk_addr
;
830 panic("%s: Invalid ulock opcode %d addr %p", __FUNCTION__
, ull
->ull_opcode
, (void*)ull
);