+#define KQEXTENT 256 /* linear growth by this amount */
+
+struct knote_lock_ctx {
+ struct knote *knlc_knote;
+ thread_t knlc_thread;
+ uintptr_t knlc_waiters;
+ LIST_ENTRY(knote_lock_ctx) knlc_link;
+#if DEBUG || DEVELOPMENT
+#define KNOTE_LOCK_CTX_UNLOCKED 0
+#define KNOTE_LOCK_CTX_LOCKED 1
+#define KNOTE_LOCK_CTX_WAITING 2
+ int knlc_state;
+#endif
+};
+LIST_HEAD(knote_locks, knote_lock_ctx);
+
+#if DEBUG || DEVELOPMENT
+/*
+ * KNOTE_LOCK_CTX(name) is a convenience macro to define a knote lock context on
+ * the stack named `name`. In development kernels, it uses tricks to make sure
+ * not locks was still held when exiting the C-scope that contains this context.
+ */
+static inline void
+knote_lock_ctx_chk(struct knote_lock_ctx *knlc)
+{
+ /* evil hackery to make sure no one forgets to unlock */
+ assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
+}
+#define KNOTE_LOCK_CTX(n) \
+ struct knote_lock_ctx n __attribute__((cleanup(knote_lock_ctx_chk))); \
+ n.knlc_state = KNOTE_LOCK_CTX_UNLOCKED
+#else
+#define KNOTE_LOCK_CTX(n) \
+ struct knote_lock_ctx n
+#endif
+
+
+__options_decl(kq_state_t, uint16_t, {
+ KQ_SEL = 0x0001, /* select was recorded for kq */
+ KQ_SLEEP = 0x0002, /* thread is waiting for events */
+ KQ_PROCWAIT = 0x0004, /* thread waiting for processing */
+ KQ_KEV32 = 0x0008, /* kq is used with 32-bit events */
+ KQ_KEV64 = 0x0010, /* kq is used with 64-bit events */
+ KQ_KEV_QOS = 0x0020, /* kq events carry QoS info */
+ KQ_WORKQ = 0x0040, /* KQ is bound to process workq */
+ KQ_WORKLOOP = 0x0080, /* KQ is part of a workloop */
+ KQ_PROCESSING = 0x0100, /* KQ is being processed */
+ KQ_DRAIN = 0x0200, /* kq is draining */
+ KQ_WAKEUP = 0x0400, /* kq awakened while processing */
+ KQ_DYNAMIC = 0x0800, /* kqueue is dynamically managed */
+ KQ_R2K_ARMED = 0x1000, /* ast notification armed */
+ KQ_HAS_TURNSTILE = 0x2000, /* this kqueue has a turnstile */
+});