+#if DIAGNOSTIC
+_audit_mtx_init(struct mtx *mp, const char *lckname)
+#else
+_audit_mtx_init(struct mtx *mp, __unused const char *lckname)
+#endif
+{
+ mp->mtx_lock = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL);
+ KASSERT(mp->mtx_lock != NULL,
+ ("_audit_mtx_init: Could not allocate a mutex."));
+#if DIAGNOSTIC
+ strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME);
+#endif
+}
+
+void
+_audit_mtx_destroy(struct mtx *mp)
+{
+ if (mp->mtx_lock) {
+ lck_mtx_free(mp->mtx_lock, &audit_lck_grp);
+ mp->mtx_lock = NULL;
+ }
+}
+
+/*
+ * BSD rw locks.
+ */
+void
+#if DIAGNOSTIC
+_audit_rw_init(struct rwlock *lp, const char *lckname)
+#else
+_audit_rw_init(struct rwlock *lp, __unused const char *lckname)
+#endif
+{
+ lp->rw_lock = lck_rw_alloc_init(&audit_lck_grp, LCK_ATTR_NULL);
+ KASSERT(lp->rw_lock != NULL,
+ ("_audit_rw_init: Could not allocate a rw lock."));
+#if DIAGNOSTIC
+ strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME);
+#endif
+}
+
+void
+_audit_rw_destroy(struct rwlock *lp)
+{
+ if (lp->rw_lock) {
+ lck_rw_free(lp->rw_lock, &audit_lck_grp);
+ lp->rw_lock = NULL;
+ }
+}
+/*
+ * Wait on a condition variable in a continuation (i.e. yield kernel stack).
+ * A cv_signal or cv_broadcast on the same condition variable will cause
+ * the thread to be scheduled.
+ */
+int
+_audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t function)