+ if (mp->mtx_lock) {
+ lck_mtx_free(mp->mtx_lock, audit_lck_grp);
+ mp->mtx_lock = NULL;
+ }
+}
+
+/*
+ * BSD rw locks.
+ */
+void
+#if DIAGNOSTIC
+_audit_rw_init(struct rwlock *lp, const char *lckname)
+#else
+_audit_rw_init(struct rwlock *lp, __unused const char *lckname)
+#endif
+{
+ lp->rw_lock = lck_rw_alloc_init(audit_lck_grp, LCK_ATTR_NULL);
+ KASSERT(lp->rw_lock != NULL,
+ ("_audit_rw_init: Could not allocate a rw lock."));
+#if DIAGNOSTIC
+ strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME);
+#endif
+}
+
+void
+_audit_rw_destroy(struct rwlock *lp)
+{
+
+ if (lp->rw_lock) {
+ lck_rw_free(lp->rw_lock, audit_lck_grp);
+ lp->rw_lock = NULL;
+ }
+}
+/*
+ * Wait on a condition variable in a continuation (i.e. yield kernel stack).
+ * A cv_signal or cv_broadcast on the same condition variable will cause
+ * the thread to be scheduled.
+ */
+int
+_audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t function)
+{
+ int status = KERN_SUCCESS;
+
+ cvp->cv_waiters++;
+ assert_wait(cvp, THREAD_UNINT);
+ lck_mtx_unlock(mp);
+
+ status = thread_block(function);
+
+ /* should not be reached, but just in case, re-lock */
+ lck_mtx_lock(mp);
+
+ return status;
+}
+
+/*
+ * Simple recursive lock.
+ */
+void
+#if DIAGNOSTIC
+_audit_rlck_init(struct rlck *lp, const char *lckname)
+#else
+_audit_rlck_init(struct rlck *lp, __unused const char *lckname)
+#endif
+{
+
+ lp->rl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL);
+ KASSERT(lp->rl_mtx != NULL,
+ ("_audit_rlck_init: Could not allocate a recursive lock."));
+#if DIAGNOSTIC
+ strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME);
+#endif