+/*
+ * kqueue/note lock attributes and implementations
+ *
+ * kqueues have locks, while knotes have use counts
+ * Most of the knote state is guarded by the object lock.
+ * the knote "inuse" count and status use the kqueue lock.
+ */
+lck_grp_attr_t * kq_lck_grp_attr;
+lck_grp_t * kq_lck_grp;
+lck_attr_t * kq_lck_attr;
+
+static inline void
+kqlock(struct kqueue *kq)
+{
+ lck_spin_lock(&kq->kq_lock);
+}
+
+static inline void
+kqunlock(struct kqueue *kq)
+{
+ lck_spin_unlock(&kq->kq_lock);
+}
+
+/*
+ * Convert a kq lock to a knote use referece.
+ *
+ * If the knote is being dropped, we can't get
+ * a use reference, so just return with it
+ * still locked.
+ *
+ * - kq locked at entry
+ * - unlock on exit if we get the use reference
+ */
+static int
+kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
+{
+ if (kn->kn_status & KN_DROPPING)
+ return 0;
+ kn->kn_inuse++;
+ kqunlock(kq);
+ return 1;
+ }
+
+/*
+ * Convert a kq lock to a knote use referece.
+ *
+ * If the knote is being dropped, we can't get
+ * a use reference, so just return with it
+ * still locked.
+ *
+ * - kq locked at entry
+ * - kq always unlocked on exit
+ */
+static int
+kqlock2knoteusewait(struct kqueue *kq, struct knote *kn)
+{
+ if (!kqlock2knoteuse(kq, kn)) {
+ kn->kn_status |= KN_DROPWAIT;
+ assert_wait(&kn->kn_status, THREAD_UNINT);
+ kqunlock(kq);
+ thread_block(THREAD_CONTINUE_NULL);
+ return 0;
+ }
+ return 1;
+ }
+
+/*
+ * Convert from a knote use reference back to kq lock.
+ *
+ * Drop a use reference and wake any waiters if
+ * this is the last one.
+ *
+ * The exit return indicates if the knote is
+ * still alive - but the kqueue lock is taken
+ * unconditionally.
+ */
+static int
+knoteuse2kqlock(struct kqueue *kq, struct knote *kn)
+{
+ kqlock(kq);
+ if ((--kn->kn_inuse == 0) &&
+ (kn->kn_status & KN_USEWAIT)) {
+ kn->kn_status &= ~KN_USEWAIT;
+ thread_wakeup(&kn->kn_inuse);
+ }
+ return ((kn->kn_status & KN_DROPPING) == 0);
+ }
+
+/*
+ * Convert a kq lock to a knote drop referece.
+ *
+ * If the knote is in use, wait for the use count
+ * to subside. We first mark our intention to drop
+ * it - keeping other users from "piling on."
+ * If we are too late, we have to wait for the
+ * other drop to complete.
+ *
+ * - kq locked at entry
+ * - always unlocked on exit.
+ * - caller can't hold any locks that would prevent
+ * the other dropper from completing.
+ */
+static int
+kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
+{
+
+ if ((kn->kn_status & KN_DROPPING) == 0) {
+ kn->kn_status |= KN_DROPPING;
+ if (kn->kn_inuse > 0) {
+ kn->kn_status |= KN_USEWAIT;
+ assert_wait(&kn->kn_inuse, THREAD_UNINT);
+ kqunlock(kq);
+ thread_block(THREAD_CONTINUE_NULL);
+ } else
+ kqunlock(kq);
+ return 1;
+ } else {
+ kn->kn_status |= KN_DROPWAIT;
+ assert_wait(&kn->kn_status, THREAD_UNINT);
+ kqunlock(kq);
+ thread_block(THREAD_CONTINUE_NULL);
+ return 0;
+ }
+}
+
+/*
+ * Release a knote use count reference.
+ */
+static void
+knote_put(struct knote *kn)
+{
+ struct kqueue *kq = kn->kn_kq;
+
+ kqlock(kq);
+ if ((--kn->kn_inuse == 0) &&
+ (kn->kn_status & KN_USEWAIT)) {
+ kn->kn_status &= ~KN_USEWAIT;
+ thread_wakeup(&kn->kn_inuse);
+ }
+ kqunlock(kq);
+ }
+
+
+