-#define simple_lock_init(l,t) usimple_lock_init(l,t)
-#define simple_lock(l) usimple_lock(l)
-#define simple_unlock(l) usimple_unlock(l)
-#define simple_lock_try(l) usimple_lock_try(l)
-#define simple_lock_addr(l) (&(l))
-#define __slock_held_func__(l) usimple_lock_held(l)
-#endif / * !defined(simple_lock_init) */
-
-#if USLOCK_DEBUG
-/*
- * Debug-time only:
- * + verify that usimple_lock is already held by caller
- * + verify that usimple_lock is NOT held by caller
- * + verify that current processor owns no usimple_locks
- *
- * We do not provide a simple_lock_NOT_held function because
- * it's impossible to verify when only MACH_RT is turned on.
- * In that situation, only preemption is enabled/disabled
- * around lock use, and it's impossible to tell which lock
- * acquisition caused preemption to be disabled. However,
- * note that it's still valid to use check_simple_locks
- * when only MACH_RT is turned on -- no locks should be
- * held, hence preemption should be enabled.
- * Actually, the above isn't strictly true, as explicit calls
- * to disable_preemption() need to be accounted for.
- */
-#define simple_lock_held(l) __slock_held_func__(l)
-#define check_simple_locks() usimple_lock_none_held()
-#else /* USLOCK_DEBUG */
-#define simple_lock_held(l)
-#define check_simple_locks()
-#endif /* USLOCK_DEBUG */
-
-#endif /*!_SIMPLE_LOCK_H_*/
+#define simple_lock_init(l, t) usimple_lock_init(l,t)
+#define simple_lock(l, grp) usimple_lock(l, grp)
+#define simple_unlock(l) usimple_unlock(l)
+#define simple_lock_try(l, grp) usimple_lock_try(l, grp)
+#define simple_lock_try_lock_loop(l, grp) usimple_lock_try_lock_loop(l, grp)
+#define simple_lock_try_lock_mp_signal_safe_loop_deadline(l, ddl, grp) \
+ usimple_lock_try_lock_mp_signal_safe_loop_deadline(l, ddl, grp)
+#define simple_lock_try_lock_mp_signal_safe_loop_duration(l, dur, grp) \
+ usimple_lock_try_lock_mp_signal_safe_loop_duration(l, dur, grp)
+#define simple_lock_addr(l) (&(l))
+#endif /* !defined(simple_lock_init) */
+
+#ifdef MACH_KERNEL_PRIVATE
+
+typedef uint32_t hw_lock_bit_t;
+
+#if LOCK_STATS
+extern void hw_lock_bit(
+ hw_lock_bit_t *,
+ unsigned int,
+ lck_grp_t*);
+
+extern void hw_lock_bit_nopreempt(
+ hw_lock_bit_t *,
+ unsigned int,
+ lck_grp_t*);
+
+extern unsigned int hw_lock_bit_try(
+ hw_lock_bit_t *,
+ unsigned int,
+ lck_grp_t*);
+
+extern unsigned int hw_lock_bit_to(
+ hw_lock_bit_t *,
+ unsigned int,
+ uint32_t,
+ lck_grp_t*);
+
+#else
+extern void hw_lock_bit(
+ hw_lock_bit_t *,
+ unsigned int);
+#define hw_lock_bit(lck, bit, grp) \
+ hw_lock_bit(lck, bit)
+
+extern void hw_lock_bit_nopreempt(
+ hw_lock_bit_t *,
+ unsigned int);
+#define hw_lock_bit_nopreempt(lck, bit, grp) \
+ hw_lock_bit_nopreempt(lck, bit)
+
+extern unsigned int hw_lock_bit_try(
+ hw_lock_bit_t *,
+ unsigned int);
+#define hw_lock_bit_try(lck, bit, grp) \
+ hw_lock_bit_try(lck, bit)
+
+extern unsigned int hw_lock_bit_to(
+ hw_lock_bit_t *,
+ unsigned int,
+ uint32_t);
+#define hw_lock_bit_to(lck, bit, timeout, grp) \
+ hw_lock_bit_to(lck, bit, timeout)
+
+#endif /* LOCK_STATS */
+
+extern void hw_unlock_bit(
+ hw_lock_bit_t *,
+ unsigned int);
+
+extern void hw_unlock_bit_nopreempt(
+ hw_lock_bit_t *,
+ unsigned int);
+
+#define hw_lock_bit_held(l, b) \
+ (((*(l)) & (1 << (b))) != 0)
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+__END_DECLS
+
+#pragma GCC visibility pop
+
+#endif /* XNU_KERNEL_PRIVATE */
+#endif /*!_KERN_SIMPLE_LOCK_H_*/
+
+#endif /* KERNEL_PRIVATE */