+/*
+ * Routine: hw_lock_init
+ *
+ * Initialize a hardware lock.
+ */
+void
+hw_lock_init(hw_lock_t lock)
+{
+ ordered_store_hw(lock, 0);
+}
+
+/*
+ * Routine: hw_lock_lock_contended
+ *
+ * Spin until lock is acquired or timeout expires.
+ * timeout is in mach_absolute_time ticks. Called with
+ * preemption disabled.
+ */
+
+#if __SMP__
+static unsigned int NOINLINE
+hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean_t do_panic)
+{
+ uint64_t end = 0;
+ uintptr_t holder = lock->lock_data;
+ int i;
+
+ if (timeout == 0)
+ timeout = LOCK_PANIC_TIMEOUT;
+#if CONFIG_DTRACE
+ uint64_t begin;
+ boolean_t dtrace_enabled = lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0;
+ if (__improbable(dtrace_enabled))
+ begin = mach_absolute_time();
+#endif
+ for ( ; ; ) {
+ for (i = 0; i < LOCK_SNOOP_SPINS; i++) {
+ cpu_pause();
+#if (!__ARM_ENABLE_WFE_) || (LOCK_PRETEST)
+ holder = ordered_load_hw(lock);
+ if (holder != 0)
+ continue;
+#endif
+ if (atomic_compare_exchange(&lock->lock_data, 0, data,
+ memory_order_acquire_smp, TRUE)) {
+#if CONFIG_DTRACE
+ if (__improbable(dtrace_enabled)) {
+ uint64_t spintime = mach_absolute_time() - begin;
+ if (spintime > dtrace_spin_threshold)
+ LOCKSTAT_RECORD2(LS_LCK_SPIN_LOCK_SPIN, lock, spintime, dtrace_spin_threshold);
+ }
+#endif
+ return 1;
+ }
+ }
+ if (end == 0) {
+ end = ml_get_timebase() + timeout;
+ }
+ else if (ml_get_timebase() >= end)
+ break;
+ }
+ if (do_panic) {
+ // Capture the actual time spent blocked, which may be higher than the timeout
+ // if a misbehaving interrupt stole this thread's CPU time.
+ panic("Spinlock timeout after %llu ticks, %p = %lx",
+ (ml_get_timebase() - end + timeout), lock, holder);
+ }
+ return 0;
+}
+#endif // __SMP__
+
+static inline void
+hw_lock_lock_internal(hw_lock_t lock, thread_t thread)
+{
+ uintptr_t state;
+
+ state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK;
+#if __SMP__
+
+#if LOCK_PRETEST
+ if (ordered_load_hw(lock))
+ goto contended;
+#endif // LOCK_PRETEST
+ if (atomic_compare_exchange(&lock->lock_data, 0, state,
+ memory_order_acquire_smp, TRUE)) {
+ goto end;
+ }
+#if LOCK_PRETEST
+contended:
+#endif // LOCK_PRETEST
+ hw_lock_lock_contended(lock, state, 0, spinlock_timeout_panic);
+end:
+#else // __SMP__
+ if (lock->lock_data)
+ panic("Spinlock held %p", lock);
+ lock->lock_data = state;
+#endif // __SMP__
+#if CONFIG_DTRACE
+ LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0);
+#endif
+ return;
+}
+
+/*
+ * Routine: hw_lock_lock
+ *
+ * Acquire lock, spinning until it becomes available,
+ * return with preemption disabled.
+ */
+void
+hw_lock_lock(hw_lock_t lock)
+{
+ thread_t thread = current_thread();
+ disable_preemption_for_thread(thread);
+ hw_lock_lock_internal(lock, thread);
+}
+
+/*
+ * Routine: hw_lock_lock_nopreempt
+ *
+ * Acquire lock, spinning until it becomes available.
+ */
+void
+hw_lock_lock_nopreempt(hw_lock_t lock)
+{
+ thread_t thread = current_thread();
+ if (__improbable(!preemption_disabled_for_thread(thread)))
+ panic("Attempt to take no-preempt spinlock %p in preemptible context", lock);
+ hw_lock_lock_internal(lock, thread);
+}
+
+/*
+ * Routine: hw_lock_to
+ *
+ * Acquire lock, spinning until it becomes available or timeout.
+ * Timeout is in mach_absolute_time ticks, return with
+ * preemption disabled.
+ */
+unsigned int
+hw_lock_to(hw_lock_t lock, uint64_t timeout)
+{
+ thread_t thread;
+ uintptr_t state;
+ unsigned int success = 0;
+
+ thread = current_thread();
+ disable_preemption_for_thread(thread);
+ state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK;
+#if __SMP__
+
+#if LOCK_PRETEST
+ if (ordered_load_hw(lock))
+ goto contended;
+#endif // LOCK_PRETEST
+ if (atomic_compare_exchange(&lock->lock_data, 0, state,
+ memory_order_acquire_smp, TRUE)) {
+ success = 1;
+ goto end;
+ }
+#if LOCK_PRETEST
+contended:
+#endif // LOCK_PRETEST
+ success = hw_lock_lock_contended(lock, state, timeout, FALSE);
+end:
+#else // __SMP__
+ (void)timeout;
+ if (ordered_load_hw(lock) == 0) {
+ ordered_store_hw(lock, state);
+ success = 1;
+ }
+#endif // __SMP__
+#if CONFIG_DTRACE
+ if (success)
+ LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0);
+#endif
+ return success;
+}
+
+/*
+ * Routine: hw_lock_try
+ *
+ * returns with preemption disabled on success.
+ */
+static inline unsigned int
+hw_lock_try_internal(hw_lock_t lock, thread_t thread)
+{
+ int success = 0;
+
+#if __SMP__
+#if LOCK_PRETEST
+ if (ordered_load_hw(lock))
+ goto failed;
+#endif // LOCK_PRETEST
+ success = atomic_compare_exchange(&lock->lock_data, 0, LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK,
+ memory_order_acquire_smp, FALSE);
+#else
+ if (lock->lock_data == 0) {
+ lock->lock_data = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK;
+ success = 1;
+ }
+#endif // __SMP__
+
+#if LOCK_PRETEST
+failed:
+#endif // LOCK_PRETEST
+#if CONFIG_DTRACE
+ if (success)
+ LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0);
+#endif
+ return success;
+}
+
+unsigned int
+hw_lock_try(hw_lock_t lock)
+{
+ thread_t thread = current_thread();
+ disable_preemption_for_thread(thread);
+ unsigned int success = hw_lock_try_internal(lock, thread);
+ if (!success)
+ enable_preemption();
+ return success;
+}
+
+unsigned int
+hw_lock_try_nopreempt(hw_lock_t lock)
+{
+ thread_t thread = current_thread();
+ if (__improbable(!preemption_disabled_for_thread(thread)))
+ panic("Attempt to test no-preempt spinlock %p in preemptible context", lock);
+ return hw_lock_try_internal(lock, thread);
+}
+
+/*
+ * Routine: hw_lock_unlock
+ *
+ * Unconditionally release lock, release preemption level.
+ */
+static inline void
+hw_lock_unlock_internal(hw_lock_t lock)
+{
+ __c11_atomic_store((_Atomic uintptr_t *)&lock->lock_data, 0, memory_order_release_smp);
+#if __arm__ || __arm64__
+ // ARM tests are only for open-source exclusion
+ set_event();
+#endif // __arm__ || __arm64__
+#if CONFIG_DTRACE
+ LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, 0);
+#endif /* CONFIG_DTRACE */
+}
+
+void
+hw_lock_unlock(hw_lock_t lock)
+{
+ hw_lock_unlock_internal(lock);
+ enable_preemption();
+}
+
+void
+hw_lock_unlock_nopreempt(hw_lock_t lock)
+{
+ if (__improbable(!preemption_disabled_for_thread(current_thread())))
+ panic("Attempt to release no-preempt spinlock %p in preemptible context", lock);
+ hw_lock_unlock_internal(lock);
+}
+
+/*
+ * Routine hw_lock_held, doesn't change preemption state.
+ * N.B. Racy, of course.
+ */
+unsigned int
+hw_lock_held(hw_lock_t lock)
+{
+ return (ordered_load_hw(lock) != 0);
+}