+ if (xproc) {
+ uint64_t object = 0;
+ uint64_t offset = 0;
+
+ ret = uaddr_findobj(args->addr, &object, &offset);
+ if (ret) {
+ ret = EINVAL;
+ goto munge_retval;
+ }
+ key.ulk_key_type = ULK_XPROC;
+ key.ulk_object = object;
+ key.ulk_offset = offset;
+ } else {
+ key.ulk_key_type = ULK_UADDR;
+ key.ulk_pid = p->p_pid;
+ key.ulk_addr = args->addr;
+ }
+
+ if ((flags & ULF_WAIT_ADAPTIVE_SPIN) && set_owner) {
+ /*
+ * Attempt the copyin outside of the lock once,
+ *
+ * If it doesn't match (which is common), return right away.
+ *
+ * If it matches, resolve the current owner, and if it is on core,
+ * spin a bit waiting for the value to change. If the owner isn't on
+ * core, or if the value stays stable, then go on with the regular
+ * blocking code.
+ */
+ uint64_t end = 0;
+ uint32_t u32;
+
+ ret = copyin_atomic32(args->addr, &u32);
+ if (ret || u32 != args->value) {
+ goto munge_retval;
+ }
+ for (;;) {
+ if (owner_thread == NULL && ulock_resolve_owner(u32, &owner_thread) != 0) {
+ break;
+ }
+
+ /* owner_thread may have a +1 starting here */
+
+ if (!machine_thread_on_core(owner_thread)) {
+ break;
+ }
+ if (end == 0) {
+ clock_interval_to_deadline(ulock_adaptive_spin_usecs,
+ NSEC_PER_USEC, &end);
+ } else if (mach_absolute_time() > end) {
+ break;
+ }
+ if (copyin_atomic32_wait_if_equals(args->addr, u32) != 0) {
+ goto munge_retval;
+ }
+ }
+ }