]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/kern_event.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / kern / kern_event.c
index 6bc84137c321faccf14151711f9b4989cff444c8..66cd6e2a534a5e599537e45bd36b08d30dd040c1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -86,6 +86,7 @@
 
 #include <kern/locks.h>
 #include <kern/clock.h>
+#include <kern/policy_internal.h>
 #include <kern/thread_call.h>
 #include <kern/sched_prim.h>
 #include <kern/waitq.h>
 
 #include <mach/task.h>
 
-#if VM_PRESSURE_EVENTS
-#include <kern/vm_pressure.h>
-#endif
-
 #if CONFIG_MEMORYSTATUS
 #include <sys/kern_memorystatus.h>
 #endif
 
+/*
+ * JMM - this typedef needs to be unified with pthread_priority_t
+ *       and mach_msg_priority_t. It also needs to be the same type
+ *       everywhere.
+ */
+typedef int32_t qos_t;
+
 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
 
 #define        KQ_EVENT        NO_EVENT64
@@ -114,11 +118,10 @@ static inline void kqlock(struct kqueue *kq);
 static inline void kqunlock(struct kqueue *kq);
 
 static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
-static int kqlock2knoteusewait(struct kqueue *kq, struct knote *kn);
 static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
-static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn);
+static int kqlock2knotedetach(struct kqueue *kq, struct knote *kn);
+static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int defer_drop);
 
-static void kqueue_wakeup(struct kqueue *kq, int closed);
 static int kqueue_read(struct fileproc *fp, struct uio *uio,
     int flags, vfs_context_t ctx);
 static int kqueue_write(struct fileproc *fp, struct uio *uio,
@@ -146,7 +149,7 @@ static const struct fileops kqueueops = {
 static int kevent_internal(struct proc *p, int fd, 
                           user_addr_t changelist, int nchanges,
                           user_addr_t eventlist, int nevents, 
-                          user_addr_t data_out, user_size_t *data_available,
+                          user_addr_t data_out, uint64_t data_available,
                           unsigned int flags, user_addr_t utimeout,
                           kqueue_continue_t continuation,
                           int32_t *retval);
@@ -156,27 +159,66 @@ static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp,
                          struct proc *p, unsigned int flags);
 char * kevent_description(struct kevent_internal_s *kevp, char *s, size_t n);
 
+static void kqueue_interrupt(struct kqueue *kq);
 static int kevent_callback(struct kqueue *kq, struct kevent_internal_s *kevp,
                           void *data);
 static void kevent_continue(struct kqueue *kq, void *data, int error);
 static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
-static int kqueue_process(struct kqueue *kq, kevent_callback_t callback,
-                         void *data, int *countp, struct proc *p);
-static int kqueue_begin_processing(struct kqueue *kq);
-static void kqueue_end_processing(struct kqueue *kq);
-static int knote_process(struct knote *kn, kevent_callback_t callback,
-                        void *data, struct kqtailq *inprocessp, struct proc *p);
+static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, void *callback_data,
+                          struct filt_process_s *process_data, kq_index_t servicer_qos_index,
+                          int *countp, struct proc *p);
+static int kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags);
+static void kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags);
+static struct kqtailq *kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index);
+static struct kqtailq *kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index);
+static int kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index);
+
+static struct kqtailq *kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index);
+
+static void kqworkq_request_thread(struct kqworkq *kqwq, kq_index_t qos_index);
+static void kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index, uint32_t type);
+static void kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index);
+static void kqworkq_bind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
+static void kqworkq_unbind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
+static struct kqrequest *kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
+
+
+static int knote_process(struct knote *kn, kevent_callback_t callback, void *callback_data,
+                        struct filt_process_s *process_data, struct proc *p);
+#if 0
 static void knote_put(struct knote *kn);
-static int knote_fdpattach(struct knote *kn, struct filedesc *fdp,
-                          struct proc *p);
+#endif
+
+static int knote_fdadd(struct knote *kn, struct proc *p);
+static void knote_fdremove(struct knote *kn, struct proc *p);
+static struct knote *knote_fdfind(struct kqueue *kq, struct kevent_internal_s *kev, struct proc *p);
+
 static void knote_drop(struct knote *kn, struct proc *p);
-static void knote_activate(struct knote *kn, int);
-static void knote_deactivate(struct knote *kn);
-static void knote_enqueue(struct knote *kn);
-static void knote_dequeue(struct knote *kn);
 static struct knote *knote_alloc(void);
 static void knote_free(struct knote *kn);
 
+static void knote_activate(struct knote *kn);
+static void knote_deactivate(struct knote *kn);
+
+static void knote_enable(struct knote *kn);
+static void knote_disable(struct knote *kn);
+
+static int knote_enqueue(struct knote *kn);
+static void knote_dequeue(struct knote *kn);
+
+static void knote_suppress(struct knote *kn);
+static void knote_unsuppress(struct knote *kn);
+static void knote_wakeup(struct knote *kn);
+
+static kq_index_t knote_get_queue_index(struct knote *kn);
+static struct kqtailq *knote_get_queue(struct knote *kn);
+static struct kqtailq *knote_get_suppressed_queue(struct knote *kn);
+static kq_index_t knote_get_req_index(struct knote *kn);
+static kq_index_t knote_get_qos_index(struct knote *kn);
+static void knote_set_qos_index(struct knote *kn, kq_index_t qos_index);
+static kq_index_t knote_get_qos_override_index(struct knote *kn);
+static void knote_set_qos_override_index(struct knote *kn, kq_index_t qos_index);
+
 static int filt_fileattach(struct knote *kn);
 static struct filterops file_filtops = {
        .f_isfd = 1,
@@ -185,10 +227,14 @@ static struct filterops file_filtops = {
 
 static void filt_kqdetach(struct knote *kn);
 static int filt_kqueue(struct knote *kn, long hint);
+static int filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
 static struct filterops kqread_filtops = {
        .f_isfd = 1,
        .f_detach = filt_kqdetach,
        .f_event = filt_kqueue,
+       .f_touch = filt_kqtouch,
+       .f_process = filt_kqprocess,
 };
 
 /* placeholder for not-yet-implemented filters */
@@ -200,23 +246,16 @@ static struct filterops bad_filtops = {
 static int filt_procattach(struct knote *kn);
 static void filt_procdetach(struct knote *kn);
 static int filt_proc(struct knote *kn, long hint);
+static int filt_proctouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
 static struct filterops proc_filtops = {
        .f_attach = filt_procattach,
        .f_detach = filt_procdetach,
        .f_event = filt_proc,
+       .f_touch = filt_proctouch,
+       .f_process = filt_procprocess,
 };
 
-#if VM_PRESSURE_EVENTS
-static int filt_vmattach(struct knote *kn);
-static void filt_vmdetach(struct knote *kn);
-static int filt_vm(struct knote *kn, long hint);
-static struct filterops vm_filtops = {
-       .f_attach = filt_vmattach,
-       .f_detach = filt_vmdetach,
-       .f_event = filt_vm,
-};
-#endif /* VM_PRESSURE_EVENTS */
-
 #if CONFIG_MEMORYSTATUS
 extern struct filterops memorystatus_filtops;
 #endif /* CONFIG_MEMORYSTATUS */
@@ -229,19 +268,20 @@ extern struct filterops sig_filtops;
 static int filt_timerattach(struct knote *kn);
 static void filt_timerdetach(struct knote *kn);
 static int filt_timer(struct knote *kn, long hint);
-static void filt_timertouch(struct knote *kn, struct kevent_internal_s *kev,
-    long type);
+static int filt_timertouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_timerprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
 static struct filterops timer_filtops = {
        .f_attach = filt_timerattach,
        .f_detach = filt_timerdetach,
        .f_event = filt_timer,
        .f_touch = filt_timertouch,
+       .f_process = filt_timerprocess,
 };
 
 /* Helpers */
 static void filt_timerexpire(void *knx, void *param1);
 static int filt_timervalidate(struct knote *kn);
-static void filt_timerupdate(struct knote *kn);
+static void filt_timerupdate(struct knote *kn, int num_fired);
 static void filt_timercancel(struct knote *kn);
 
 #define        TIMER_RUNNING           0x1
@@ -252,6 +292,8 @@ static void filt_timerlock(void);
 static void filt_timerunlock(void);
 
 static zone_t knote_zone;
+static zone_t kqfile_zone;
+static zone_t kqworkq_zone;
 
 #define        KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
 
@@ -266,53 +308,186 @@ extern struct filterops machport_filtops;
 static int filt_userattach(struct knote *kn);
 static void filt_userdetach(struct knote *kn);
 static int filt_user(struct knote *kn, long hint);
-static void filt_usertouch(struct knote *kn, struct kevent_internal_s *kev,
-    long type);
+static int filt_usertouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_userprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
 static struct filterops user_filtops = {
        .f_attach = filt_userattach,
        .f_detach = filt_userdetach,
        .f_event = filt_user,
        .f_touch = filt_usertouch,
+       .f_process = filt_userprocess,
 };
 
+static lck_spin_t _filt_userlock;
+static void filt_userlock(void);
+static void filt_userunlock(void);
+
+extern struct filterops pipe_rfiltops;
+extern struct filterops pipe_wfiltops;
+extern struct filterops ptsd_kqops;
+extern struct filterops soread_filtops;
+extern struct filterops sowrite_filtops;
+extern struct filterops sock_filtops;
+extern struct filterops soexcept_filtops;
+extern struct filterops spec_filtops;
+extern struct filterops bpfread_filtops;
+extern struct filterops necp_fd_rfiltops;
+extern struct filterops skywalk_channel_rfiltops;
+extern struct filterops skywalk_channel_wfiltops;
+extern struct filterops fsevent_filtops;
+extern struct filterops vnode_filtops;
+
 /*
- * Table for all system-defined filters.
+ *
+ * Rules for adding new filters to the system:
+ * Public filters:
+ * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
+ *   in the exported section of the header
+ * - Update the EVFILT_SYSCOUNT value to reflect the new addition
+ * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end 
+ *   of the Public Filters section in the array.
+ * Private filters:
+ * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
+ *   in the XNU_KERNEL_PRIVATE section of the header
+ * - Update the EVFILTID_MAX value to reflect the new addition
+ * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of 
+ *   the Private filters section of the array. 
  */
-static struct filterops *sysfilt_ops[] = {
-       &file_filtops,                  /* EVFILT_READ */
-       &file_filtops,                  /* EVFILT_WRITE */
-#if 0
-       &aio_filtops,                   /* EVFILT_AIO */
-#else
-       &bad_filtops,                   /* EVFILT_AIO */
-#endif
-       &file_filtops,                  /* EVFILT_VNODE */
-       &proc_filtops,                  /* EVFILT_PROC */
-       &sig_filtops,                   /* EVFILT_SIGNAL */
-       &timer_filtops,                 /* EVFILT_TIMER */
-       &machport_filtops,              /* EVFILT_MACHPORT */
-       &fs_filtops,                    /* EVFILT_FS */
-       &user_filtops,                  /* EVFILT_USER */
-       &bad_filtops,                   /* unused */
-#if VM_PRESSURE_EVENTS
-       &vm_filtops,                    /* EVFILT_VM */
-#else
-       &bad_filtops,                   /* EVFILT_VM */
-#endif
-       &file_filtops,                  /* EVFILT_SOCK */
+static struct filterops *sysfilt_ops[EVFILTID_MAX] = {
+       /* Public Filters */
+       [~EVFILT_READ]                                  = &file_filtops,
+       [~EVFILT_WRITE]                                 = &file_filtops,
+       [~EVFILT_AIO]                                   = &bad_filtops,
+       [~EVFILT_VNODE]                                 = &file_filtops,
+       [~EVFILT_PROC]                                  = &proc_filtops,
+       [~EVFILT_SIGNAL]                                = &sig_filtops,
+       [~EVFILT_TIMER]                                 = &timer_filtops,
+       [~EVFILT_MACHPORT]                              = &machport_filtops,
+       [~EVFILT_FS]                                    = &fs_filtops,
+       [~EVFILT_USER]                                  = &user_filtops,
+                                                                         &bad_filtops,
+                                                                         &bad_filtops,
+       [~EVFILT_SOCK]                                  = &file_filtops,
 #if CONFIG_MEMORYSTATUS
-       &memorystatus_filtops,  /* EVFILT_MEMORYSTATUS */
+       [~EVFILT_MEMORYSTATUS]                  = &memorystatus_filtops,
 #else
-       &bad_filtops,                   /* EVFILT_MEMORYSTATUS */
+       [~EVFILT_MEMORYSTATUS]                  = &bad_filtops,
 #endif
+       [~EVFILT_EXCEPT]                                = &file_filtops,
+
+       /* Private filters */
+       [EVFILTID_KQREAD]                               = &kqread_filtops,
+       [EVFILTID_PIPE_R]                               = &pipe_rfiltops,
+       [EVFILTID_PIPE_W]                               = &pipe_wfiltops,
+       [EVFILTID_PTSD]                                 = &ptsd_kqops,
+       [EVFILTID_SOREAD]                               = &soread_filtops,
+       [EVFILTID_SOWRITE]                              = &sowrite_filtops,
+       [EVFILTID_SCK]                                  = &sock_filtops,
+       [EVFILTID_SOEXCEPT]                     = &soexcept_filtops,
+       [EVFILTID_SPEC]                                 = &spec_filtops,
+       [EVFILTID_BPFREAD]                              = &bpfread_filtops,
+       [EVFILTID_NECP_FD]                              = &necp_fd_rfiltops,
+       [EVFILTID_FSEVENT]                              = &fsevent_filtops,
+       [EVFILTID_VN]                                   = &vnode_filtops
 };
 
+/* waitq prepost callback */
+void waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos);
+
+#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
+#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 /* pthread event manager bit */
+#endif
+#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
+#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG    0x80000000 /* request overcommit threads */
+#endif
+#ifndef _PTHREAD_PRIORITY_QOS_CLASS_MASK
+#define _PTHREAD_PRIORITY_QOS_CLASS_MASK    0x003fff00  /* QoS class mask */
+#endif
+#ifndef _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32
+#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32 8
+#endif
+
+static inline
+qos_t canonicalize_kevent_qos(qos_t qos)
+{
+       unsigned long canonical;
+
+       /* preserve manager and overcommit flags in this case */
+       canonical = pthread_priority_canonicalize(qos, FALSE);
+       return (qos_t)canonical;
+}
+
+static inline
+kq_index_t qos_index_from_qos(qos_t qos, boolean_t propagation)
+{
+       kq_index_t qos_index;
+       unsigned long flags = 0;
+
+       qos_index = (kq_index_t)thread_qos_from_pthread_priority(
+                               (unsigned long)qos, &flags);
+       
+       if (!propagation && (flags & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG))
+               return KQWQ_QOS_MANAGER;
+
+       return qos_index;
+}
+
+static inline
+qos_t qos_from_qos_index(kq_index_t qos_index)
+{
+       if (qos_index == KQWQ_QOS_MANAGER)
+               return  _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+
+       if (qos_index == 0)
+               return 0; /* Unspecified */
+
+       /* Should have support from pthread kext support */
+       return (1 << (qos_index - 1 + 
+                     _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32));
+}
+
+static inline
+kq_index_t qos_index_for_servicer(int qos_class, thread_t thread, int flags)
+{
+       kq_index_t qos_index;
+
+       if (flags & KEVENT_FLAG_WORKQ_MANAGER)
+               return KQWQ_QOS_MANAGER;
+
+       /* 
+        * If the caller didn't pass in a class (legacy pthread kext)
+        * the we use the thread policy QoS of the current thread.
+        */
+       assert(qos_class != -1);
+       if (qos_class == -1)
+               qos_index = proc_get_thread_policy(thread,
+                                                  TASK_POLICY_ATTRIBUTE,
+                                                  TASK_POLICY_QOS);
+       else
+               qos_index = (kq_index_t)qos_class;
+
+       assert(qos_index > 0 && qos_index < KQWQ_NQOS);
+
+       return qos_index;
+}
+
 /*
- * kqueue/note lock attributes and implementations
+ * kqueue/note lock implementations
+ *
+ *     The kqueue lock guards the kq state, the state of its queues,
+ *     and the kqueue-aware status and use counts of individual knotes.
+ *
+ *     The kqueue workq lock is used to protect state guarding the
+ *     interaction of the kqueue with the workq.  This state cannot
+ *     be guarded by the kq lock - as it needs to be taken when we
+ *     already have the waitq set lock held (during the waitq hook
+ *     callback).  It might be better to use the waitq lock itself
+ *     for this, but the IRQ requirements make that difficult).
  *
- *     kqueues have locks, while knotes have use counts
- *     Most of the knote state is guarded by the object lock.
- *     the knote "inuse" count and status use the kqueue lock.
+ *     Knote flags, filter flags, and associated data are protected
+ *     by the underlying object lock - and are only ever looked at
+ *     by calling the filter to get a [consistent] snapshot of that
+ *     data.
  */
 lck_grp_attr_t * kq_lck_grp_attr;
 lck_grp_t * kq_lck_grp;
@@ -330,79 +505,143 @@ kqunlock(struct kqueue *kq)
        lck_spin_unlock(&kq->kq_lock);
 }
 
+
 /*
  * Convert a kq lock to a knote use referece.
  *
- *     If the knote is being dropped, we can't get
- *     a use reference, so just return with it
- *     still locked.
+ *     If the knote is being dropped, or has
+ *  vanished, we can't get a use reference.
+ *  Just return with it still locked.
+ *
  *     - kq locked at entry
  *     - unlock on exit if we get the use reference
  */
 static int
 kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
 {
-       if (kn->kn_status & KN_DROPPING)
+       if (kn->kn_status & (KN_DROPPING | KN_VANISHED))
                return (0);
-       kn->kn_inuse++;
-       kqunlock(kq);
-       return (1);
-}
 
-/*
- * Convert a kq lock to a knote use referece,
- * but wait for attach and drop events to complete.
- *
- *     If the knote is being dropped, we can't get
- *     a use reference, so just return with it
- *     still locked.
- *     - kq locked at entry
- *     - kq always unlocked on exit
- */
-static int
-kqlock2knoteusewait(struct kqueue *kq, struct knote *kn)
-{
-       if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
-               kn->kn_status |= KN_USEWAIT;
-               waitq_assert_wait64((struct waitq *)kq->kq_wqs,
-                                   CAST_EVENT64_T(&kn->kn_status),
-                                   THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
-               kqunlock(kq);
-               thread_block(THREAD_CONTINUE_NULL);
-               return (0);
-       }
+       assert(kn->kn_status & KN_ATTACHED);
        kn->kn_inuse++;
        kqunlock(kq);
        return (1);
 }
 
+
 /*
  * Convert from a knote use reference back to kq lock.
  *
  *     Drop a use reference and wake any waiters if
  *     this is the last one.
  *
- *     The exit return indicates if the knote is
- *     still alive - but the kqueue lock is taken
- *     unconditionally.
+ *  If someone is trying to drop the knote, but the
+ *  caller has events they must deliver, take 
+ *  responsibility for the drop later - and wake the
+ *  other attempted dropper in a manner that informs
+ *  him of the transfer of responsibility.
+ *
+ *     The exit return indicates if the knote is still alive
+ *  (or if not, the other dropper has been given the green
+ *  light to drop it).
+ *
+ *  The kqueue lock is re-taken unconditionally.
  */
 static int
-knoteuse2kqlock(struct kqueue *kq, struct knote *kn)
+knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int steal_drop)
 {
+       int dropped = 0;
+
        kqlock(kq);
        if (--kn->kn_inuse == 0) {
+
                if ((kn->kn_status & KN_ATTACHING) != 0) {
                        kn->kn_status &= ~KN_ATTACHING;
                }
+
                if ((kn->kn_status & KN_USEWAIT) != 0) {
+                       wait_result_t result;
+
+                       /* If we need to, try and steal the drop */
+                       if (kn->kn_status & KN_DROPPING) {
+                               if (steal_drop && !(kn->kn_status & KN_STOLENDROP)) {
+                                       kn->kn_status |= KN_STOLENDROP;
+                               } else {
+                                       dropped = 1;
+                               }
+                       }
+
+                       /* wakeup indicating if ANY USE stole the drop */
+                       result = (kn->kn_status & KN_STOLENDROP) ?
+                                THREAD_RESTART : THREAD_AWAKENED;
+
                        kn->kn_status &= ~KN_USEWAIT;
-                       waitq_wakeup64_all((struct waitq *)kq->kq_wqs,
+                       waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
                                           CAST_EVENT64_T(&kn->kn_status),
-                                          THREAD_AWAKENED,
+                                          result,
                                           WAITQ_ALL_PRIORITIES);
+               } else {
+                       /* should have seen use-wait if dropping with use refs */
+                       assert((kn->kn_status & (KN_DROPPING|KN_STOLENDROP)) == 0);
+               }
+
+       } else if (kn->kn_status & KN_DROPPING) {
+               /* not the last ref but want to steal a drop if present */
+               if (steal_drop && ((kn->kn_status & KN_STOLENDROP) == 0)) {
+                       kn->kn_status |= KN_STOLENDROP;
+
+                       /* but we now have to wait to be the last ref */
+                       kn->kn_status |= KN_USEWAIT;
+                       waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+                                           CAST_EVENT64_T(&kn->kn_status),
+                                           THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
+                       kqunlock(kq);
+                       thread_block(THREAD_CONTINUE_NULL);
+                       kqlock(kq);
+               } else {
+                       dropped = 1;
                }
        }
-       return ((kn->kn_status & KN_DROPPING) == 0);
+
+       return (!dropped);
+}
+
+/*
+ * Convert a kq lock to a knote use reference
+ * (for the purpose of detaching AND vanishing it).
+ *
+ *     If the knote is being dropped, we can't get
+ *     a detach reference, so wait for the knote to
+ *  finish dropping before returning.
+ *
+ *  If the knote is being used for other purposes,
+ *  we cannot detach it until those uses are done
+ *  as well. Again, just wait for them to finish
+ *  (caller will start over at lookup).
+ *
+ *     - kq locked at entry
+ *     - unlocked on exit 
+ */
+static int
+kqlock2knotedetach(struct kqueue *kq, struct knote *kn)
+{
+       if ((kn->kn_status & KN_DROPPING) || kn->kn_inuse) {
+               /* have to wait for dropper or current uses to go away */
+               kn->kn_status |= KN_USEWAIT;
+               waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+                                   CAST_EVENT64_T(&kn->kn_status),
+                                   THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
+               kqunlock(kq);
+               thread_block(THREAD_CONTINUE_NULL);
+               return (0);
+       }
+       assert((kn->kn_status & KN_VANISHED) == 0);
+       assert(kn->kn_status & KN_ATTACHED);
+       kn->kn_status &= ~KN_ATTACHED;
+       kn->kn_status |= KN_VANISHED;
+       kn->kn_inuse++;
+       kqunlock(kq);
+       return (1);
 }
 
 /*
@@ -423,10 +662,13 @@ static int
 kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
 {
        int oktodrop;
+       wait_result_t result;
 
        oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0);
-       kn->kn_status &= ~KN_STAYQUEUED;
+       /* if another thread is attaching, they will become the dropping thread */
        kn->kn_status |= KN_DROPPING;
+       knote_unsuppress(kn);
+       knote_dequeue(kn);
        if (oktodrop) {
                if (kn->kn_inuse == 0) {
                        kqunlock(kq);
@@ -434,27 +676,29 @@ kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
                }
        }
        kn->kn_status |= KN_USEWAIT;
-       waitq_assert_wait64((struct waitq *)kq->kq_wqs,
+       waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
                            CAST_EVENT64_T(&kn->kn_status),
                            THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
        kqunlock(kq);
-       thread_block(THREAD_CONTINUE_NULL);
-       return (oktodrop);
+       result = thread_block(THREAD_CONTINUE_NULL);
+       /* THREAD_RESTART == another thread stole the knote drop */
+       return (result == THREAD_AWAKENED);
 }
 
+#if 0
 /*
  * Release a knote use count reference.
  */
 static void
 knote_put(struct knote *kn)
 {
-       struct kqueue *kq = kn->kn_kq;
+       struct kqueue *kq = knote_get_kq(kn);
 
        kqlock(kq);
        if (--kn->kn_inuse == 0) {
                if ((kn->kn_status & KN_USEWAIT) != 0) {
                        kn->kn_status &= ~KN_USEWAIT;
-                       waitq_wakeup64_all((struct waitq *)kq->kq_wqs,
+                       waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
                                           CAST_EVENT64_T(&kn->kn_status),
                                           THREAD_AWAKENED,
                                           WAITQ_ALL_PRIORITIES);
@@ -462,6 +706,7 @@ knote_put(struct knote *kn)
        }
        kqunlock(kq);
 }
+#endif
 
 static int
 filt_fileattach(struct knote *kn)
@@ -479,10 +724,11 @@ filt_fileattach(struct knote *kn)
 static void
 filt_kqdetach(struct knote *kn)
 {
-       struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+       struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
+       struct kqueue *kq = &kqf->kqf_kqueue;
 
        kqlock(kq);
-       KNOTE_DETACH(&kq->kq_sel.si_note, kn);
+       KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
        kqunlock(kq);
 }
 
@@ -491,9 +737,48 @@ static int
 filt_kqueue(struct knote *kn, __unused long hint)
 {
        struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+       int count;
+
+       count = kq->kq_count;
+       return (count > 0);
+}
+
+static int
+filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev)
+{
+#pragma unused(kev)
+       struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+       int res;
+
+       kqlock(kq);
+       kn->kn_data = kq->kq_count;
+       if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+               kn->kn_udata = kev->udata;
+       res = (kn->kn_data > 0);
+
+       kqunlock(kq);
+
+       return res;
+}
+
+static int
+filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+{
+#pragma unused(data)
+       struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+       int res;
 
+       kqlock(kq);
        kn->kn_data = kq->kq_count;
-       return (kn->kn_data > 0);
+       res = (kn->kn_data > 0);
+       if (res) {
+               *kev = kn->kn_kevent;
+               if (kn->kn_flags & EV_CLEAR)
+                       kn->kn_data = 0;
+       }
+       kqunlock(kq);
+
+       return res;
 }
 
 static int
@@ -503,12 +788,17 @@ filt_procattach(struct knote *kn)
 
        assert(PID_MAX < NOTE_PDATAMASK);
 
-       if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0)
-               return (ENOTSUP);
+       if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
+               kn->kn_flags = EV_ERROR;
+               kn->kn_data = ENOTSUP;
+               return 0;
+       }
 
        p = proc_find(kn->kn_id);
        if (p == NULL) {
-               return (ESRCH);
+               kn->kn_flags = EV_ERROR;
+               kn->kn_data = ESRCH;
+               return 0;
        }
 
        const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
@@ -525,12 +815,13 @@ filt_procattach(struct knote *kn)
                                break;  /* parent-in-waiting => ok */
 
                        proc_rele(p);
-                       return (EACCES);
+                       kn->kn_flags = EV_ERROR;
+                       kn->kn_data = EACCES;
+                       return 0;
                } while (0);
 
        proc_klist_lock();
 
-       kn->kn_flags |= EV_CLEAR;       /* automatically set */
        kn->kn_ptr.p_proc = p;          /* store the proc handle */
 
        KNOTE_ATTACH(&p->p_klist, kn);
@@ -539,9 +830,14 @@ filt_procattach(struct knote *kn)
 
        proc_rele(p);
 
+       /*
+        * only captures edge-triggered events after this point
+        * so it can't already be fired.
+        */
        return (0);
 }
 
+
 /*
  * The knote may be attached to a different process, which may exit,
  * leaving nothing for the knote to be attached to.  In that case,
@@ -566,151 +862,150 @@ filt_procdetach(struct knote *kn)
 static int
 filt_proc(struct knote *kn, long hint)
 {
+       u_int event;
+
+       /* ALWAYS CALLED WITH proc_klist_lock */
+
        /*
         * Note: a lot of bits in hint may be obtained from the knote
         * To free some of those bits, see <rdar://problem/12592988> Freeing up
         * bits in hint for filt_proc
+        *
+        * mask off extra data
         */
-       /* hint is 0 when called from above */
-       if (hint != 0) {
-               u_int event;
-
-               /* ALWAYS CALLED WITH proc_klist_lock when (hint != 0) */
-
-               /*
-                * mask off extra data
-                */
-               event = (u_int)hint & NOTE_PCTRLMASK;
+       event = (u_int)hint & NOTE_PCTRLMASK;
 
-               /*
-                * termination lifecycle events can happen while a debugger
-                * has reparented a process, in which case notifications
-                * should be quashed except to the tracing parent. When
-                * the debugger reaps the child (either via wait4(2) or
-                * process exit), the child will be reparented to the original
-                * parent and these knotes re-fired.
-                */
-               if (event & NOTE_EXIT) {
-                       if ((kn->kn_ptr.p_proc->p_oppid != 0)
-                               && (kn->kn_kq->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
-                               /*
-                                * This knote is not for the current ptrace(2) parent, ignore.
-                                */
-                               return 0;
-                       }
-               }                                       
+       /*
+        * termination lifecycle events can happen while a debugger
+        * has reparented a process, in which case notifications
+        * should be quashed except to the tracing parent. When
+        * the debugger reaps the child (either via wait4(2) or
+        * process exit), the child will be reparented to the original
+        * parent and these knotes re-fired.
+        */
+       if (event & NOTE_EXIT) {
+               if ((kn->kn_ptr.p_proc->p_oppid != 0)
+                   && (knote_get_kq(kn)->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
+                       /*
+                        * This knote is not for the current ptrace(2) parent, ignore.
+                        */
+                       return 0;
+               }
+       }                                       
 
-               /*
-                * if the user is interested in this event, record it.
-                */
-               if (kn->kn_sfflags & event)
-                       kn->kn_fflags |= event;
+       /*
+        * if the user is interested in this event, record it.
+        */
+       if (kn->kn_sfflags & event)
+               kn->kn_fflags |= event;
 
 #pragma clang diagnostic push
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-               if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
-                       kn->kn_flags |= (EV_EOF | EV_ONESHOT);
-               }
+       if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
+               kn->kn_flags |= (EV_EOF | EV_ONESHOT);
+       }
 #pragma clang diagnostic pop
 
 
-               /*
-                * The kernel has a wrapper in place that returns the same data
-                * as is collected here, in kn_data.  Any changes to how 
-                * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
-                * should also be reflected in the proc_pidnoteexit() wrapper.
-                */
-               if (event == NOTE_EXIT) {
-                       kn->kn_data = 0;
-                       if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
-                               kn->kn_fflags |= NOTE_EXITSTATUS;
-                               kn->kn_data |= (hint & NOTE_PDATAMASK);
+       /*
+        * The kernel has a wrapper in place that returns the same data
+        * as is collected here, in kn_data.  Any changes to how 
+        * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
+        * should also be reflected in the proc_pidnoteexit() wrapper.
+        */
+       if (event == NOTE_EXIT) {
+               kn->kn_data = 0;
+               if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
+                       kn->kn_fflags |= NOTE_EXITSTATUS;
+                       kn->kn_data |= (hint & NOTE_PDATAMASK);
+               }
+               if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
+                       kn->kn_fflags |= NOTE_EXIT_DETAIL;
+                       if ((kn->kn_ptr.p_proc->p_lflag &
+                            P_LTERM_DECRYPTFAIL) != 0) {
+                               kn->kn_data |= NOTE_EXIT_DECRYPTFAIL; 
                        }
-                       if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
-                               kn->kn_fflags |= NOTE_EXIT_DETAIL;
-                               if ((kn->kn_ptr.p_proc->p_lflag &
-                                   P_LTERM_DECRYPTFAIL) != 0) {
-                                       kn->kn_data |= NOTE_EXIT_DECRYPTFAIL; 
-                               }
-                               if ((kn->kn_ptr.p_proc->p_lflag &
-                                   P_LTERM_JETSAM) != 0) {
-                                       kn->kn_data |= NOTE_EXIT_MEMORY;
-                                       switch (kn->kn_ptr.p_proc->p_lflag &
-                                           P_JETSAM_MASK) {
-                                               case P_JETSAM_VMPAGESHORTAGE:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
-                                                       break;
-                                               case P_JETSAM_VMTHRASHING:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
-                                                       break;
-                                               case P_JETSAM_FCTHRASHING:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
-                                                       break;
-                                               case P_JETSAM_VNODE:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
-                                                       break;
-                                               case P_JETSAM_HIWAT:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
-                                                       break;
-                                               case P_JETSAM_PID:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_PID;
-                                                       break;
-                                               case P_JETSAM_IDLEEXIT:
-                                                       kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
-                                                       break;
-                                       }
-                               }
-                               if ((kn->kn_ptr.p_proc->p_csflags &
-                                   CS_KILLED) != 0) {
-                                       kn->kn_data |= NOTE_EXIT_CSERROR;
+                       if ((kn->kn_ptr.p_proc->p_lflag &
+                            P_LTERM_JETSAM) != 0) {
+                               kn->kn_data |= NOTE_EXIT_MEMORY;
+                               switch (kn->kn_ptr.p_proc->p_lflag & P_JETSAM_MASK) {
+                               case P_JETSAM_VMPAGESHORTAGE:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
+                                       break;
+                               case P_JETSAM_VMTHRASHING:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
+                                       break;
+                               case P_JETSAM_FCTHRASHING:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
+                                       break;
+                               case P_JETSAM_VNODE:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
+                                       break;
+                               case P_JETSAM_HIWAT:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
+                                       break;
+                               case P_JETSAM_PID:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_PID;
+                                       break;
+                               case P_JETSAM_IDLEEXIT:
+                                       kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
+                                       break;
                                }
                        }
+                       if ((kn->kn_ptr.p_proc->p_csflags &
+                            CS_KILLED) != 0) {
+                               kn->kn_data |= NOTE_EXIT_CSERROR;
+                       }
                }
        }
 
-       /* atomic check, no locking need when called from above */
+       /* if we have any matching state, activate the knote */
        return (kn->kn_fflags != 0);
 }
 
-#if VM_PRESSURE_EVENTS
-/*
- * Virtual memory kevents
- *
- * author: Matt Jacobson [matthew_jacobson@apple.com]
- */
-
 static int
-filt_vmattach(struct knote *kn)
+filt_proctouch(struct knote *kn, struct kevent_internal_s *kev)
 {
+       int res;
+
+       proc_klist_lock();
+
+       /* accept new filter flags and mask off output events no long interesting */
+       kn->kn_sfflags = kev->fflags;
+       if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+               kn->kn_udata = kev->udata;
+
+       /* restrict the current results to the (smaller?) set of new interest */
        /*
-        * The note will be cleared once the information has been flushed to
-        * the client. If there is still pressure, we will be re-alerted.
+        * For compatibility with previous implementations, we leave kn_fflags
+        * as they were before.
         */
-       kn->kn_flags |= EV_CLEAR;
-       return (vm_knote_register(kn));
-}
+       //kn->kn_fflags &= kn->kn_sfflags;
 
-static void
-filt_vmdetach(struct knote *kn)
-{
-       vm_knote_unregister(kn);
+       res = (kn->kn_fflags != 0);
+
+       proc_klist_unlock();
+
+       return res;
 }
 
 static int
-filt_vm(struct knote *kn, long hint)
+filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
 {
-       /* hint == 0 means this is just an alive? check (always true) */
-       if (hint != 0) {
-               const pid_t pid = (pid_t)hint;
-               if ((kn->kn_sfflags & NOTE_VM_PRESSURE) &&
-                   (kn->kn_kq->kq_p->p_pid == pid)) {
-                       kn->kn_fflags |= NOTE_VM_PRESSURE;
-               }
-       }
+#pragma unused(data)
+       int res;
 
-       return (kn->kn_fflags != 0);
+       proc_klist_lock();
+       res = (kn->kn_fflags != 0);
+       if (res) {
+               *kev = kn->kn_kevent;
+               kn->kn_flags |= EV_CLEAR;       /* automatically set */
+               kn->kn_fflags = 0;
+               kn->kn_data = 0;
+       }
+       proc_klist_unlock();
+       return res;
 }
-#endif /* VM_PRESSURE_EVENTS */
 
 /*
  * filt_timervalidate - process data from user
@@ -774,13 +1069,17 @@ filt_timervalidate(struct knote *kn)
                nanoseconds_to_absolutetime((uint64_t)seconds * NSEC_PER_SEC +
                    nanoseconds, &now);
 
-               if (raw < now) {
-                       /* time has already passed */
-                       kn->kn_ext[0] = 0;
-               } else {
+               /* if time is in the future */
+               if (now < raw) {
                        raw -= now;
-                       clock_absolutetime_interval_to_deadline(raw,
-                           &kn->kn_ext[0]);
+
+                       if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
+                               clock_continuoustime_interval_to_deadline(raw,
+                                   &kn->kn_ext[0]);
+                       } else {
+                               clock_absolutetime_interval_to_deadline(raw,
+                                   &kn->kn_ext[0]);
+                       }
                }
        } else {
                kn->kn_sdata = raw;
@@ -801,16 +1100,24 @@ filt_timervalidate(struct knote *kn)
  *     Timer filter lock is held.
  */
 static void
-filt_timerupdate(struct knote *kn)
+filt_timerupdate(struct knote *kn, int num_fired)
 {
+       assert(num_fired > 0);
+
        /* if there's no interval, deadline is just in kn_ext[0] */
        if (kn->kn_sdata == 0)
                return;
 
        /* if timer hasn't fired before, fire in interval nsecs */
        if (kn->kn_ext[0] == 0) {
-               clock_absolutetime_interval_to_deadline(kn->kn_sdata,
-                   &kn->kn_ext[0]);
+               assert(num_fired == 1);
+               if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
+                       clock_continuoustime_interval_to_deadline(kn->kn_sdata,
+                           &kn->kn_ext[0]);
+               } else {
+                       clock_absolutetime_interval_to_deadline(kn->kn_sdata,
+                           &kn->kn_ext[0]);
+               }
        } else {
                /*
                 * If timer has fired before, schedule the next pop
@@ -818,8 +1125,11 @@ filt_timerupdate(struct knote *kn)
                 *
                 * We could check for whether the deadline has expired,
                 * but the thread call layer can handle that.
+                * 
+                * Go forward an additional number of periods, in the case the
+                * timer fired multiple times while the system was asleep.
                 */
-               kn->kn_ext[0] += kn->kn_sdata;
+               kn->kn_ext[0] += (kn->kn_sdata * num_fired);
        }
 }
 
@@ -849,8 +1159,8 @@ filt_timerexpire(void *knx, __unused void *spare)
 
        /* if someone is waiting for timer to pop */
        if (kn->kn_hookid & TIMER_CANCELWAIT) {
-               struct kqueue *kq = kn->kn_kq;
-               waitq_wakeup64_all((struct waitq *)kq->kq_wqs,
+               struct kqueue *kq = knote_get_kq(kn);
+               waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
                                   CAST_EVENT64_T(&kn->kn_hook),
                                   THREAD_AWAKENED,
                                   WAITQ_ALL_PRIORITIES);
@@ -866,7 +1176,7 @@ filt_timerexpire(void *knx, __unused void *spare)
 static void
 filt_timercancel(struct knote *kn)
 {
-       struct kqueue *kq = kn->kn_kq;
+       struct kqueue *kq = knote_get_kq(kn);
        thread_call_t callout = kn->kn_hook;
        boolean_t cancelled;
 
@@ -878,7 +1188,7 @@ filt_timercancel(struct knote *kn)
                } else {
                        /* we have to wait for the expire routine.  */
                        kn->kn_hookid |= TIMER_CANCELWAIT;
-                       waitq_assert_wait64((struct waitq *)kq->kq_wqs,
+                       waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
                                            CAST_EVENT64_T(&kn->kn_hook),
                                            THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
                        filt_timerunlock();
@@ -897,17 +1207,23 @@ filt_timerattach(struct knote *kn)
 {
        thread_call_t callout;
        int error;
+       int res;
 
        callout = thread_call_allocate(filt_timerexpire, kn);
-       if (NULL == callout)
-               return (ENOMEM);
+       if (NULL == callout) {
+               kn->kn_flags = EV_ERROR;
+               kn->kn_data = ENOMEM;
+               return 0;
+       }
 
        filt_timerlock();
        error = filt_timervalidate(kn);
        if (error != 0) {
                filt_timerunlock();
                thread_call_free(callout);
-               return (error);
+               kn->kn_flags = EV_ERROR;
+               kn->kn_data = error;
+               return 0;
        }
 
        kn->kn_hook = (void*)callout;
@@ -917,7 +1233,7 @@ filt_timerattach(struct knote *kn)
        if (kn->kn_sfflags & NOTE_ABSOLUTE)
                kn->kn_flags |= EV_ONESHOT;
 
-       filt_timerupdate(kn);
+       filt_timerupdate(kn, 1);
        if (kn->kn_ext[0]) {
                kn->kn_flags |= EV_CLEAR;
                unsigned int timer_flags = 0;
@@ -930,6 +1246,8 @@ filt_timerattach(struct knote *kn)
 
                if (kn->kn_sfflags & NOTE_LEEWAY)
                        timer_flags |= THREAD_CALL_DELAY_LEEWAY;
+               if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
+                       timer_flags |= THREAD_CALL_CONTINUOUS;
 
                thread_call_enter_delayed_with_leeway(callout, NULL,
                                kn->kn_ext[0], kn->kn_ext[1], timer_flags);
@@ -940,8 +1258,11 @@ filt_timerattach(struct knote *kn)
                kn->kn_data = 1;
        }
 
+       res = (kn->kn_data > 0);
+
        filt_timerunlock();
-       return (0);
+
+       return res;
 }
 
 /*
@@ -963,94 +1284,68 @@ filt_timerdetach(struct knote *kn)
 }
 
 
-
-static int
-filt_timer(struct knote *kn, long hint)
+static int filt_timer_num_fired(struct knote *kn)
 {
-       int result;
-
-       if (hint) {
-               /* real timer pop -- timer lock held by filt_timerexpire */
-               kn->kn_data++;
-
-               if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) &&
-                               ((kn->kn_flags & EV_ONESHOT) == 0)) {
-
-                       /* evaluate next time to fire */
-                       filt_timerupdate(kn);
-
-                       if (kn->kn_ext[0]) {
-                               unsigned int timer_flags = 0;
+       /* by default we fire a timer once */
+       int num_fired = 1;
 
-                               /* keep the callout and re-arm */
-                               if (kn->kn_sfflags & NOTE_CRITICAL)
-                                       timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
-                               else if (kn->kn_sfflags & NOTE_BACKGROUND)
-                                       timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
-                               else
-                                       timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
-
-                               if (kn->kn_sfflags & NOTE_LEEWAY)
-                                       timer_flags |= THREAD_CALL_DELAY_LEEWAY;
-
-                               thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
-                                               kn->kn_ext[0], kn->kn_ext[1], timer_flags);
-
-                               kn->kn_hookid |= TIMER_RUNNING;
-                       }
+       /*
+        * When the time base is mach_continuous_time, we have to calculate
+        * the number of times the timer fired while we were asleep.
+        */
+       if ((kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) &&
+           (kn->kn_sdata  != 0) &&
+           (kn->kn_ext[0] != 0))
+       {
+               const uint64_t now = mach_continuous_time();
+               // time for timer to fire (right now) is kn_ext[0]
+               // kn_sdata is period for timer to fire
+               assert(now >= kn->kn_ext[0]);
+               assert(kn->kn_sdata > 0);
+
+               const uint64_t overrun_ticks = now - kn->kn_ext[0];
+               const uint64_t kn_sdata = kn->kn_sdata;
+
+               if (overrun_ticks < kn_sdata) {
+                       num_fired = 1;
+               } else if (overrun_ticks < (kn_sdata << 1)) {
+                       num_fired = 2;
+               } else {
+                       num_fired = (overrun_ticks / kn_sdata) + 1;
                }
-
-               return (1);
        }
 
-       /* user-query */
-       filt_timerlock();
-
-       result = (kn->kn_data != 0);
-
-       filt_timerunlock();
-
-       return (result);
+       return num_fired;
 }
 
-
 /*
- * filt_timertouch - update knote with new user input
+ * filt_timer - post events to a timer knote
  *
- * Cancel and restart the timer based on new user data. When
- * the user picks up a knote, clear the count of how many timer
- * pops have gone off (in kn_data).
+ * Count the timer fire and re-arm as requested.
+ * This always crosses the threshold of interest,
+ * so always return an indication that the knote
+ * should be activated (if not already).
  */
-static void
-filt_timertouch(struct knote *kn, struct kevent_internal_s *kev, long type)
+static int
+filt_timer(
+       struct knote *kn, 
+       long hint)
 {
-       int error;
-       filt_timerlock();
+#pragma unused(hint)
 
-       switch (type) {
-       case EVENT_REGISTER:
-               /* cancel current call */
-               filt_timercancel(kn);
+       /* real timer pop -- timer lock held by filt_timerexpire */
+       int num_fired = filt_timer_num_fired(kn);
+       kn->kn_data += num_fired;
 
-               /* recalculate deadline */
-               kn->kn_sdata = kev->data;
-               kn->kn_sfflags = kev->fflags;
-               kn->kn_ext[0] = kev->ext[0];
-               kn->kn_ext[1] = kev->ext[1];
-
-               error = filt_timervalidate(kn);
-               if (error) {
-                       /* no way to report error, so mark it in the knote */
-                       kn->kn_flags |= EV_ERROR;
-                       kn->kn_data = error;
-                       break;
-               }
-
-               /* start timer if necessary */
-               filt_timerupdate(kn);
+       if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) &&
+           ((kn->kn_flags & EV_ONESHOT) == 0)) {
+               /* evaluate next time to fire */
+               filt_timerupdate(kn, num_fired);
 
                if (kn->kn_ext[0]) {
                        unsigned int timer_flags = 0;
+
+                       /* keep the callout and re-arm */
                        if (kn->kn_sfflags & NOTE_CRITICAL)
                                timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
                        else if (kn->kn_sfflags & NOTE_BACKGROUND)
@@ -1065,27 +1360,123 @@ filt_timertouch(struct knote *kn, struct kevent_internal_s *kev, long type)
                                        kn->kn_ext[0], kn->kn_ext[1], timer_flags);
 
                        kn->kn_hookid |= TIMER_RUNNING;
-               } else {
-                       /* pretend the timer has fired */
-                       kn->kn_data = 1;
                }
+       }
+       return (1);
+}
 
-               break;
 
-       case EVENT_PROCESS:
-               /* reset the timer pop count in kn_data */
-               *kev = kn->kn_kevent;
-               kev->ext[0] = 0;
-               kn->kn_data = 0;
-               if (kn->kn_flags & EV_CLEAR)
-                       kn->kn_fflags = 0;
-               break;
-       default:
-               panic("%s: - invalid type (%ld)", __func__, type);
-               break;
+
+/*
+ * filt_timertouch - update timer knote with new user input
+ *
+ * Cancel and restart the timer based on new user data. When
+ * the user picks up a knote, clear the count of how many timer
+ * pops have gone off (in kn_data).
+ */
+static int
+filt_timertouch(
+       struct knote *kn,
+       struct kevent_internal_s *kev)
+{
+       int error;
+       int res;
+
+       filt_timerlock();
+
+       /* cancel current call */
+       filt_timercancel(kn);
+
+       /* capture the new values used to compute deadline */
+       kn->kn_sdata = kev->data;
+       kn->kn_sfflags = kev->fflags;
+       kn->kn_ext[0] = kev->ext[0];
+       kn->kn_ext[1] = kev->ext[1];
+
+       if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+               kn->kn_udata = kev->udata;
+
+       /* recalculate deadline */
+       error = filt_timervalidate(kn);
+       if (error) {
+               /* no way to report error, so mark it in the knote */
+               filt_timerunlock();
+               kn->kn_flags |= EV_ERROR;
+               kn->kn_data = error;
+               return 1;
+       }
+
+       /* start timer if necessary */
+       filt_timerupdate(kn, 1);
+
+       if (kn->kn_ext[0]) {
+               unsigned int timer_flags = 0;
+               if (kn->kn_sfflags & NOTE_CRITICAL)
+                       timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
+               else if (kn->kn_sfflags & NOTE_BACKGROUND)
+                       timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
+               else
+                       timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
+
+               if (kn->kn_sfflags & NOTE_LEEWAY)
+                       timer_flags |= THREAD_CALL_DELAY_LEEWAY;
+
+               thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
+                               kn->kn_ext[0], kn->kn_ext[1], timer_flags);
+
+               kn->kn_hookid |= TIMER_RUNNING;
+       } else {
+               /* pretend the timer has fired */
+               kn->kn_data = 1;
+       }
+
+       /* capture if already fired */
+       res = (kn->kn_data > 0);
+
+       filt_timerunlock();
+
+       return res;
+}
+
+/*
+ * filt_timerprocess - query state of knote and snapshot event data
+ *
+ * Determine if the timer has fired in the past, snapshot the state
+ * of the kevent for returning to user-space, and clear pending event
+ * counters for the next time.
+ */
+static int
+filt_timerprocess(
+       struct knote *kn,
+       __unused struct filt_process_s *data,
+       struct kevent_internal_s *kev)
+{
+       filt_timerlock();
+
+       /* user-query */
+       if (kn->kn_data == 0) {
+               filt_timerunlock();
+               return 0;
        }
 
+       /*
+        * Copy out the interesting kevent state,
+        * but don't leak out the raw time calculations.
+        */
+       *kev = kn->kn_kevent;
+       kev->ext[0] = 0;
+       /* kev->ext[1] = 0;  JMM - shouldn't we hide this too? */
+
+       /*
+        * reset the timer pop count in kn_data
+        * and (optionally) clear the fflags.
+        */
+       kn->kn_data = 0;
+       if (kn->kn_flags & EV_CLEAR)
+               kn->kn_fflags = 0;
+
        filt_timerunlock();
+       return 1;
 }
 
 static void
@@ -1100,17 +1491,30 @@ filt_timerunlock(void)
        lck_mtx_unlock(&_filt_timerlock);
 }
 
+static void
+filt_userlock(void)
+{
+       lck_spin_lock(&_filt_userlock);
+}
+
+static void
+filt_userunlock(void)
+{
+       lck_spin_unlock(&_filt_userlock);
+}
+
 static int
 filt_userattach(struct knote *kn)
 {
        /* EVFILT_USER knotes are not attached to anything in the kernel */
+       /* Cant discover this knote until after attach - so no lock needed */
        kn->kn_hook = NULL;
        if (kn->kn_fflags & NOTE_TRIGGER) {
                kn->kn_hookid = 1;
        } else {
                kn->kn_hookid = 0;
        }
-       return (0);
+       return (kn->kn_hookid);
 }
 
 static void
@@ -1120,52 +1524,79 @@ filt_userdetach(__unused struct knote *kn)
 }
 
 static int
-filt_user(struct knote *kn, __unused long hint)
+filt_user(
+       __unused struct knote *kn,
+       __unused long hint)
 {
-       return (kn->kn_hookid);
+       panic("filt_user");
+       return 0;
 }
 
-static void
-filt_usertouch(struct knote *kn, struct kevent_internal_s *kev, long type)
+static int
+filt_usertouch(
+       struct knote *kn,
+       struct kevent_internal_s *kev)
 {
        uint32_t ffctrl;
-       switch (type) {
-       case EVENT_REGISTER:
-               if (kev->fflags & NOTE_TRIGGER) {
-                       kn->kn_hookid = 1;
-               }
+       int fflags;
+       int active;
 
-               ffctrl = kev->fflags & NOTE_FFCTRLMASK;
-               kev->fflags &= NOTE_FFLAGSMASK;
-               switch (ffctrl) {
-               case NOTE_FFNOP:
-                       break;
-               case NOTE_FFAND:
-                       OSBitAndAtomic(kev->fflags, &kn->kn_sfflags);
-                       break;
-               case NOTE_FFOR:
-                       OSBitOrAtomic(kev->fflags, &kn->kn_sfflags);
-                       break;
-               case NOTE_FFCOPY:
-                       kn->kn_sfflags = kev->fflags;
-                       break;
-               }
-               kn->kn_sdata = kev->data;
+       filt_userlock();
+
+       ffctrl = kev->fflags & NOTE_FFCTRLMASK;
+       fflags = kev->fflags & NOTE_FFLAGSMASK;
+       switch (ffctrl) {
+       case NOTE_FFNOP:
                break;
-       case EVENT_PROCESS:
-               *kev = kn->kn_kevent;
-               kev->fflags = (volatile UInt32)kn->kn_sfflags;
-               kev->data = kn->kn_sdata;
-               if (kn->kn_flags & EV_CLEAR) {
-                       kn->kn_hookid = 0;
-                       kn->kn_data = 0;
-                       kn->kn_fflags = 0;
-               }
+       case NOTE_FFAND:
+               kn->kn_sfflags &= fflags;
                break;
-       default:
-               panic("%s: - invalid type (%ld)", __func__, type);
+       case NOTE_FFOR:
+               kn->kn_sfflags |= fflags;
                break;
+       case NOTE_FFCOPY:
+               kn->kn_sfflags = fflags;
+               break;
+       }
+       kn->kn_sdata = kev->data;
+
+       if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+               kn->kn_udata = kev->udata;
+
+       if (kev->fflags & NOTE_TRIGGER) {
+               kn->kn_hookid = 1;
+       }
+       active = kn->kn_hookid;
+
+       filt_userunlock();
+
+       return (active);
+}
+
+static int
+filt_userprocess(
+       struct knote *kn,
+       __unused struct filt_process_s *data,
+       struct kevent_internal_s *kev)
+{
+       filt_userlock();
+
+       if (kn->kn_hookid == 0) {
+               filt_userunlock();
+               return 0;
+       }
+
+       *kev = kn->kn_kevent;
+       kev->fflags = (volatile UInt32)kn->kn_sfflags;
+       kev->data = kn->kn_sdata;
+       if (kn->kn_flags & EV_CLEAR) {
+               kn->kn_hookid = 0;
+               kn->kn_data = 0;
+               kn->kn_fflags = 0;
        }
+       filt_userunlock();
+
+       return 1;
 }
 
 /*
@@ -1174,33 +1605,65 @@ filt_usertouch(struct knote *kn, struct kevent_internal_s *kev, long type)
 static int
 filt_badattach(__unused struct knote *kn)
 {
-       return (ENOTSUP);
+       kn->kn_flags |= EV_ERROR;
+       kn->kn_data = ENOTSUP;
+       return 0;
 }
 
 struct kqueue *
-kqueue_alloc(struct proc *p)
+kqueue_alloc(struct proc *p, unsigned int flags)
 {
        struct filedesc *fdp = p->p_fd;
-       struct kqueue *kq;
+       struct kqueue *kq = NULL;
+       int policy;
+       void *hook;
+       uint64_t kq_addr_offset;
 
-       MALLOC_ZONE(kq, struct kqueue *, sizeof (struct kqueue), M_KQUEUE,
-           M_WAITOK);
-       if (kq != NULL) {
-               struct waitq_set *wqs;
-
-               wqs = waitq_set_alloc(SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST | SYNC_POLICY_DISABLE_IRQ);
-               if (wqs != NULL) {
-                       bzero(kq, sizeof (struct kqueue));
-                       lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
-                       TAILQ_INIT(&kq->kq_head);
-                       kq->kq_wqs = wqs;
-                       kq->kq_p = p;
-               } else {
-                       FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
-                       kq = NULL;
+       if (flags & KEVENT_FLAG_WORKQ) {
+               struct kqworkq *kqwq;
+               int i;
+
+               kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
+               if (kqwq == NULL)
+                       return NULL;
+
+               kq = &kqwq->kqwq_kqueue;
+               bzero(kqwq, sizeof (struct kqworkq));
+
+               kqwq->kqwq_state = KQ_WORKQ;
+
+               for (i = 0; i < KQWQ_NBUCKETS; i++) {
+                       TAILQ_INIT(&kq->kq_queue[i]);
+               }
+               for (i = 0; i < KQWQ_NQOS; i++) {
+                       TAILQ_INIT(&kqwq->kqwq_request[i].kqr_suppressed);
                }
+
+               lck_spin_init(&kqwq->kqwq_reqlock, kq_lck_grp, kq_lck_attr);
+               policy = SYNC_POLICY_FIFO;
+               hook = (void *)kqwq;
+               
+       } else {
+               struct kqfile *kqf;
+               
+               kqf = (struct kqfile *)zalloc(kqfile_zone);
+               if (kqf == NULL)
+                       return NULL;
+
+               kq = &kqf->kqf_kqueue;
+               bzero(kqf, sizeof (struct kqfile));
+               TAILQ_INIT(&kq->kq_queue[0]);
+               TAILQ_INIT(&kqf->kqf_suppressed);
+               
+               policy = SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST;
+               hook = NULL;
+
        }
 
+       waitq_set_init(&kq->kq_wqs, policy, NULL, hook);
+       lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
+       kq->kq_p = p;
+
        if (fdp->fd_knlistsize < 0) {
                proc_fdlock(p);
                if (fdp->fd_knlistsize < 0)
@@ -1208,6 +1671,9 @@ kqueue_alloc(struct proc *p)
                proc_fdunlock(p);
        }
 
+       kq_addr_offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
+       /* Assert that the address can be pointer compacted for use with knote */
+       assert(kq_addr_offset < (uint64_t)(1ull << KNOTE_KQ_BITSIZE));
        return (kq);
 }
 
@@ -1243,12 +1709,11 @@ kqueue_dealloc(struct kqueue *kq)
        for (i = 0; i < fdp->fd_knlistsize; i++) {
                kn = SLIST_FIRST(&fdp->fd_knlist[i]);
                while (kn != NULL) {
-                       if (kq == kn->kn_kq) {
+                       if (kq == knote_get_kq(kn)) {
                                kqlock(kq);
                                proc_fdunlock(p);
                                /* drop it ourselves or wait */
                                if (kqlock2knotedrop(kq, kn)) {
-                                       kn->kn_fop->f_detach(kn);
                                        knote_drop(kn, p);
                                }
                                proc_fdlock(p);
@@ -1263,12 +1728,11 @@ kqueue_dealloc(struct kqueue *kq)
                for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
                        kn = SLIST_FIRST(&fdp->fd_knhash[i]);
                        while (kn != NULL) {
-                               if (kq == kn->kn_kq) {
+                               if (kq == knote_get_kq(kn)) {
                                        kqlock(kq);
                                        proc_fdunlock(p);
                                        /* drop it ourselves or wait */
                                        if (kqlock2knotedrop(kq, kn)) {
-                                               kn->kn_fop->f_detach(kn);
                                                knote_drop(kn, p);
                                        }
                                        proc_fdlock(p);
@@ -1283,13 +1747,22 @@ kqueue_dealloc(struct kqueue *kq)
        proc_fdunlock(p);
 
        /*
-        * waitq_set_free() clears all preposts and also remove the KQ's
-        * waitq set from any select sets to which it may belong.
+        * waitq_set_deinit() remove the KQ's waitq set from
+        * any select sets to which it may belong.
         */
-       waitq_set_free(kq->kq_wqs);
-       kq->kq_wqs = NULL;
+       waitq_set_deinit(&kq->kq_wqs);
        lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
-       FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
+
+       if (kq->kq_state & KQ_WORKQ) {
+               struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+               lck_spin_destroy(&kqwq->kqwq_reqlock, kq_lck_grp);
+               zfree(kqworkq_zone, kqwq);
+       } else {
+               struct kqfile *kqf = (struct kqfile *)kq;
+
+               zfree(kqfile_zone, kqf);
+       }
 }
 
 int
@@ -1305,7 +1778,7 @@ kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
                return (error);
        }
 
-       kq = kqueue_alloc(p);
+       kq = kqueue_alloc(p, 0);
        if (kq == NULL) {
                fp_free(p, fd, fp);
                return (ENOMEM);
@@ -1398,11 +1871,15 @@ kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p
                kevp->ident = kevqos.ident;
                kevp->filter = kevqos.filter;
                kevp->flags = kevqos.flags;
+               kevp->qos = kevqos.qos;
+//             kevp->xflags = kevqos.xflags;
                kevp->udata = kevqos.udata;
                kevp->fflags = kevqos.fflags;
                kevp->data = kevqos.data;
                kevp->ext[0] = kevqos.ext[0];
                kevp->ext[1] = kevqos.ext[1];
+               kevp->ext[2] = kevqos.ext[2];
+               kevp->ext[3] = kevqos.ext[3];
        }
        if (!error)
                *addrp += advance;
@@ -1417,12 +1894,21 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *
        int advance;
        int error;
 
+       /* 
+        * fully initialize the differnt output event structure
+        * types from the internal kevent (and some universal
+        * defaults for fields not represented in the internal
+        * form).
+        */
        if (flags & KEVENT_FLAG_LEGACY32) {
                assert((flags & KEVENT_FLAG_STACK_EVENTS) == 0);
 
                if (IS_64BIT_PROCESS(p)) {
                        struct user64_kevent kev64;
 
+                       advance = sizeof (kev64);
+                       bzero(&kev64, advance);
+                       
                        /*
                         * deal with the special case of a user-supplied
                         * value of (uintptr_t)-1.
@@ -1435,18 +1921,18 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *
                        kev64.fflags = kevp->fflags;
                        kev64.data = (int64_t) kevp->data;
                        kev64.udata = kevp->udata;
-                       advance = sizeof (kev64);
                        error = copyout((caddr_t)&kev64, addr, advance);
                } else {
                        struct user32_kevent kev32;
 
+                       advance = sizeof (kev32);
+                       bzero(&kev32, advance);
                        kev32.ident = (uint32_t)kevp->ident;
                        kev32.filter = kevp->filter;
                        kev32.flags = kevp->flags;
                        kev32.fflags = kevp->fflags;
                        kev32.data = (int32_t)kevp->data;
                        kev32.udata = kevp->udata;
-                       advance = sizeof (kev32);
                        error = copyout((caddr_t)&kev32, addr, advance);
                }
        } else if (flags & KEVENT_FLAG_LEGACY64) {
@@ -1456,6 +1942,7 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *
                if (flags & KEVENT_FLAG_STACK_EVENTS) {
                        addr -= advance;
                }
+               bzero(&kev64, advance);
                kev64.ident = kevp->ident;
                kev64.filter = kevp->filter;
                kev64.flags = kevp->flags;
@@ -1467,20 +1954,24 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *
                error = copyout((caddr_t)&kev64, addr, advance);
        } else {
                struct kevent_qos_s kevqos;
-       
-               bzero(&kevqos, sizeof (struct kevent_qos_s));
+          
                advance = sizeof (struct kevent_qos_s);
                if (flags & KEVENT_FLAG_STACK_EVENTS) {
                        addr -= advance;
                }
+               bzero(&kevqos, advance);
                kevqos.ident = kevp->ident;
                kevqos.filter = kevp->filter;
                kevqos.flags = kevp->flags;
+               kevqos.qos = kevp->qos;
+               kevqos.udata = kevp->udata;
                kevqos.fflags = kevp->fflags;
+               kevqos.xflags = 0;
                kevqos.data = (int64_t) kevp->data;
-               kevqos.udata = kevp->udata;
                kevqos.ext[0] = kevp->ext[0];
                kevqos.ext[1] = kevp->ext[1];
+               kevqos.ext[2] = kevp->ext[2];
+               kevqos.ext[3] = kevp->ext[3];
                error = copyout((caddr_t)&kevqos, addr, advance);
        }
        if (!error) {
@@ -1492,23 +1983,84 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *
        return (error);
 }
 
+static int
+kevent_get_data_size(struct proc *p, 
+                     uint64_t data_available,
+                     unsigned int flags,
+                     user_size_t *residp)
+{
+       user_size_t resid;
+       int error = 0;
+
+       if (data_available != USER_ADDR_NULL) {
+               if (flags & KEVENT_FLAG_KERNEL) {
+                       resid = *(user_size_t *)(uintptr_t)data_available;
+               } else if (IS_64BIT_PROCESS(p)) {
+                       user64_size_t usize;
+                       error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
+                       resid = (user_size_t)usize;
+               } else {
+                       user32_size_t usize;
+                       error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
+                       resid = (user_size_t)usize;
+               }
+               if (error)
+                       return(error);
+       } else {
+               resid = 0;
+       }
+       *residp = resid;
+       return 0;
+}
+
+static int
+kevent_put_data_size(struct proc *p, 
+                     uint64_t data_available,
+                     unsigned int flags,
+                     user_size_t resid)
+{
+       int error = 0;
+
+       if (data_available) {
+               if (flags & KEVENT_FLAG_KERNEL) {
+                       *(user_size_t *)(uintptr_t)data_available = resid;
+               } else if (IS_64BIT_PROCESS(p)) {
+                       user64_size_t usize = (user64_size_t)resid;
+                       error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
+               } else {
+                       user32_size_t usize = (user32_size_t)resid;
+                       error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
+               }
+       }
+       return error;
+}
+
 /*
  * kevent_continue - continue a kevent syscall after blocking
  *
  *     assume we inherit a use count on the kq fileglob.
  */
 
+__attribute__((noreturn))
 static void
 kevent_continue(__unused struct kqueue *kq, void *data, int error)
 {
        struct _kevent *cont_args;
        struct fileproc *fp;
+       uint64_t data_available;
+       user_size_t data_size;
+       user_size_t data_resid;
+       unsigned int flags;
        int32_t *retval;
        int noutputs;
        int fd;
        struct proc *p = current_proc();
 
        cont_args = (struct _kevent *)data;
+       data_available = cont_args->data_available;
+       flags = cont_args->process_data.fp_flags;
+       data_size = cont_args->process_data.fp_data_size;
+       data_resid = cont_args->process_data.fp_data_resid;
        noutputs = cont_args->eventout;
        retval = cont_args->retval;
        fd = cont_args->fd;
@@ -1517,6 +2069,11 @@ kevent_continue(__unused struct kqueue *kq, void *data, int error)
        if (fp != NULL)
                fp_drop(p, fd, fp, 0);
 
+       /* don't abandon other output just because of residual copyout failures */
+       if (error == 0 && data_available && data_resid != data_size) {
+               (void)kevent_put_data_size(p, data_available, flags, data_resid);
+       }
+
        /* don't restart after signals... */
        if (error == ERESTART)
                error = EINTR;
@@ -1537,14 +2094,14 @@ kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
        unsigned int flags = KEVENT_FLAG_LEGACY32;
 
        return kevent_internal(p,
-                              uap->fd,
-                              uap->changelist, uap->nchanges,
-                              uap->eventlist, uap->nevents,
-                              0ULL, 0ULL,
-                              flags,
-                              uap->timeout,
-                              kevent_continue,
-                              retval);
+                              uap->fd,
+                              uap->changelist, uap->nchanges,
+                              uap->eventlist, uap->nevents,
+                              0ULL, 0ULL,
+                              flags,
+                              uap->timeout,
+                              kevent_continue,
+                              retval);
 }
 
 int
@@ -1557,64 +2114,31 @@ kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
        flags |= KEVENT_FLAG_LEGACY64;
 
        return kevent_internal(p,
-                              uap->fd,
-                              uap->changelist, uap->nchanges,
-                              uap->eventlist, uap->nevents,
-                              0ULL, 0ULL,
-                              flags,
-                              uap->timeout,
-                              kevent_continue,
-                              retval);
+                              uap->fd,
+                              uap->changelist, uap->nchanges,
+                              uap->eventlist, uap->nevents,
+                              0ULL, 0ULL,
+                              flags,
+                              uap->timeout,
+                              kevent_continue,
+                              retval);
 }
 
 int
 kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
 {
-       user_size_t usize = 0;
-       user_size_t ssize;
-       int error;
-
        /* restrict to user flags */
        uap->flags &= KEVENT_FLAG_USER;
 
-       if (uap->data_available) {
-               if (!IS_64BIT_PROCESS(p)) {
-                       uint32_t csize;
-
-                       error = copyin(uap->data_available, (caddr_t)&csize, sizeof(csize));
-                       if (error)
-                               return error;
-                       usize = csize;
-               } else {
-                       uint64_t csize;
-                       error = copyin(uap->data_available, (caddr_t)&csize, sizeof(csize));
-                       if (error)
-                               return error;
-                       usize = csize;
-               }
-       }
-       ssize = usize;
-
-       error = kevent_internal(p,
-                               uap->fd,
-                               uap->changelist, uap->nchanges,
-                               uap->eventlist, uap->nevents,
-                               uap->data_out, &usize,
-                               uap->flags,
-                               0ULL,
-                               kevent_continue,
-                               retval);
-
-       if (error == 0 && uap->data_available && usize != ssize) {
-               if (!IS_64BIT_PROCESS(p)) {
-                       uint32_t csize = (uint32_t)usize;
-
-                       error = copyout((caddr_t)&csize, uap->data_available, sizeof(csize));
-               } else {
-                       error = copyout((caddr_t)&usize, uap->data_available, sizeof(usize));
-               }
-       }
-       return error;
+       return kevent_internal(p,
+                              uap->fd,
+                              uap->changelist, uap->nchanges,
+                              uap->eventlist,  uap->nevents,
+                              uap->data_out, (uint64_t)uap->data_available,
+                              uap->flags,
+                              0ULL,
+                              kevent_continue,
+                              retval);
 }
 
 int 
@@ -1626,57 +2150,33 @@ kevent_qos_internal(struct proc *p, int fd,
                    int32_t *retval) 
 {
        return kevent_internal(p,
-                              fd,
-                              changelist, nchanges,
-                              eventlist, nevents,
-                              data_out, data_available,
-                              flags,
-                              0ULL,
-                              NULL,
-                              retval);
+                              fd,
+                              changelist, nchanges,
+                              eventlist, nevents,
+                              data_out, (uint64_t)data_available,
+                              (flags | KEVENT_FLAG_KERNEL),
+                              0ULL,
+                              NULL,
+                              retval);
 }
  
 static int
-kevent_internal(struct proc *p, 
-               int fd,
-               user_addr_t changelist, int nchanges,
-               user_addr_t ueventlist, int nevents,
-               user_addr_t data_out, user_size_t *data_available,
-               unsigned int flags, 
-               user_addr_t utimeout,
-               kqueue_continue_t continuation,
-               int32_t *retval)
+kevent_get_timeout(struct proc *p,
+                  user_addr_t utimeout,
+                  unsigned int flags,
+                  struct timeval *atvp)
 {
-       struct _kevent *cont_args;
-       uthread_t ut;
-       struct kqueue *kq;
-       struct fileproc *fp = NULL;
-       struct kevent_internal_s kev;
-       int error = 0, noutputs;
        struct timeval atv;
+       int error = 0;
 
-#if 1
-       /* temporarily ignore these fields */
-       (void)data_out;
-       (void)data_available;
-#endif
-
-       /* prepare to deal with stack-wise allocation of out events */
-       if (flags & KEVENT_FLAG_STACK_EVENTS) {
-               int scale = ((flags & KEVENT_FLAG_LEGACY32) ? 
-                            (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
-                                                   sizeof(struct user32_kevent)) :
-                            ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
-                                                              sizeof(struct kevent_qos_s)));
-               ueventlist += nevents * scale;
-       }
-
-       /* convert timeout to absolute - if we have one (and not immediate) */
        if (flags & KEVENT_FLAG_IMMEDIATE) {
                getmicrouptime(&atv);
        } else if (utimeout != USER_ADDR_NULL) {
                struct timeval rtv;
-               if (IS_64BIT_PROCESS(p)) {
+               if (flags & KEVENT_FLAG_KERNEL) {
+                       struct timespec *tsp = (struct timespec *)utimeout;
+                       TIMESPEC_TO_TIMEVAL(&rtv, tsp);
+               } else if (IS_64BIT_PROCESS(p)) {
                        struct user64_timespec ts;
                        error = copyin(utimeout, &ts, sizeof(ts));
                        if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
@@ -1699,6 +2199,41 @@ kevent_internal(struct proc *p,
                atv.tv_sec = 0;
                atv.tv_usec = 0;
        }
+       *atvp = atv;
+       return 0;
+}
+
+static int
+kevent_set_kq_mode(struct kqueue *kq, unsigned int flags)
+{
+       /* each kq should only be used for events of one type */
+       kqlock(kq);
+       if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
+               if (flags & KEVENT_FLAG_LEGACY32) {
+                       if ((kq->kq_state & KQ_KEV32) == 0) {
+                               kqunlock(kq);
+                               return EINVAL;
+                       }
+               } else if (kq->kq_state & KQ_KEV32) {
+                       kqunlock(kq);
+                       return EINVAL;
+               }
+       } else if (flags & KEVENT_FLAG_LEGACY32) {
+               kq->kq_state |= KQ_KEV32;
+       } else {
+               /* JMM - set KQ_KEVQOS when we are ready for exclusive */
+               kq->kq_state |= KQ_KEV64;
+       }
+       kqunlock(kq);
+       return 0;
+}
+
+static int
+kevent_get_kq(struct proc *p, int fd, unsigned int flags, struct fileproc **fpp, struct kqueue **kqp)
+{
+       struct fileproc *fp = NULL;
+       struct kqueue *kq;
+       int error;
 
        if (flags & KEVENT_FLAG_WORKQ) {
                /*
@@ -1709,18 +2244,12 @@ kevent_internal(struct proc *p,
                 */
                kq = p->p_wqkqueue;
                if (kq == NULL) {
-                       struct kqueue *alloc_kq = kqueue_alloc(p);
+                       struct kqueue *alloc_kq = kqueue_alloc(p, KEVENT_FLAG_WORKQ);
                        if (alloc_kq == NULL)
                                return ENOMEM;
 
                        proc_fdlock(p);
                        if (p->p_wqkqueue == NULL) {
-                               /*
-                                * The kq is marked as special -
-                                * with unique interactions with
-                                * the workq for this process.
-                                */
-                               alloc_kq->kq_state |= KQ_WORKQ;
                                kq = p->p_wqkqueue = alloc_kq;
                                proc_fdunlock(p);
                        } else {
@@ -1734,28 +2263,69 @@ kevent_internal(struct proc *p,
                if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
                        return (error);
        }
+       if ((error = kevent_set_kq_mode(kq, flags)) != 0) {
+               /* drop the usecount */
+               if (fp != NULL)
+                       fp_drop(p, fd, fp, 0);
+               return error;
+       } 
+
+       *fpp = fp;
+       *kqp = kq;
+       return 0;
+}
 
-       /* each kq should only be used for events of one type */
-       kqlock(kq);
-       if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
-               if (flags & KEVENT_FLAG_LEGACY32) {
-                       if ((kq->kq_state & KQ_KEV32) == 0) {
-                               error = EINVAL;
-                               kqunlock(kq);
-                               goto errorout;
-                       }
-               } else if (kq->kq_state & KQ_KEV32) {
-                       error = EINVAL;
-                       kqunlock(kq);
-                       goto errorout;
-               }
-       } else if (flags & KEVENT_FLAG_LEGACY32) {
-               kq->kq_state |= KQ_KEV32;
-       } else {
-               /* JMM - set KQ_KEVQOS when we are ready for exclusive */
-               kq->kq_state |= KQ_KEV64;
+
+static int
+kevent_internal(struct proc *p, 
+               int fd,
+               user_addr_t changelist, int nchanges,
+               user_addr_t ueventlist, int nevents,
+               user_addr_t data_out, uint64_t data_available,
+               unsigned int flags, 
+               user_addr_t utimeout,
+               kqueue_continue_t continuation,
+               int32_t *retval)
+{
+       struct _kevent *cont_args;
+       uthread_t ut;
+       struct kqueue *kq;
+       struct fileproc *fp = NULL;
+       struct kevent_internal_s kev;
+       int error, noutputs;
+       struct timeval atv;
+       user_size_t data_size;
+       user_size_t data_resid;
+
+       /* Don't allow user-space threads to process output events from the workq kq */
+       if ((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ &&
+           !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0)
+               return EINVAL;
+
+       /* prepare to deal with stack-wise allocation of out events */
+       if (flags & KEVENT_FLAG_STACK_EVENTS) {
+               int scale = ((flags & KEVENT_FLAG_LEGACY32) ? 
+                            (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
+                                                   sizeof(struct user32_kevent)) :
+                            ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
+                                                              sizeof(struct kevent_qos_s)));
+               ueventlist += nevents * scale;
        }
-       kqunlock(kq);
+
+       /* convert timeout to absolute - if we have one (and not immediate) */
+       error = kevent_get_timeout(p, utimeout, flags, &atv);
+       if (error)
+               return error;
+       
+       /* copyin initial value of data residual from data_available */
+       error = kevent_get_data_size(p, data_available, flags, &data_size);
+       if (error)
+               return error;
+
+       /* get the kq we are going to be working on */
+       error = kevent_get_kq(p, fd, flags, &fp, &kq);
+       if (error)
+               return error;
 
        /* register all the change requests the user provided... */
        noutputs = 0;
@@ -1764,24 +2334,33 @@ kevent_internal(struct proc *p,
                if (error)
                        break;
 
+               /* Make sure user doesn't pass in any system flags */
                kev.flags &= ~EV_SYSFLAGS;
-               error = kevent_register(kq, &kev, p);
-               if ((error || (kev.flags & EV_RECEIPT)) && nevents > 0) {
-                       kev.flags = EV_ERROR;
-                       kev.data = error;
+
+               kevent_register(kq, &kev, p);
+
+               if (nevents > 0 &&
+                   ((kev.flags & EV_ERROR) || (kev.flags & EV_RECEIPT))) {
+                       if (kev.flags & EV_RECEIPT) {
+                               kev.flags |= EV_ERROR;
+                               kev.data = 0;
+                       }
                        error = kevent_copyout(&kev, &ueventlist, p, flags);
                        if (error == 0) {
                                nevents--;
                                noutputs++;
                        }
+               } else if (kev.flags & EV_ERROR) {
+                       error = kev.data;
                }
                nchanges--;
        }
 
        /* short-circuit the scan if we only want error events */
-       if (flags & KEVENT_FLAG_ERROR_EVENTS) 
+       if (flags & KEVENT_FLAG_ERROR_EVENTS)
                nevents = 0;
 
+       /* process pending events */
        if (nevents > 0 && noutputs == 0 && error == 0) {
 
                /* store the continuation/completion data in the uthread */
@@ -1793,13 +2372,27 @@ kevent_internal(struct proc *p,
                cont_args->eventlist = ueventlist;
                cont_args->eventcount = nevents;
                cont_args->eventout = noutputs;
-               cont_args->eventflags = flags;
+               cont_args->data_available = data_available;
+               cont_args->process_data.fp_fd = fd;
+               cont_args->process_data.fp_flags = flags;
+               cont_args->process_data.fp_data_out = data_out;
+               cont_args->process_data.fp_data_size = data_size;
+               cont_args->process_data.fp_data_resid = data_size;
 
                error = kqueue_scan(kq, kevent_callback,
                                    continuation, cont_args,
+                                   &cont_args->process_data,
                                    &atv, p);
 
+               /* process remaining outputs */
                noutputs = cont_args->eventout;
+               data_resid = cont_args->process_data.fp_data_resid;
+
+               /* copyout residual data size value (if it needs to be copied out) */
+               /* don't abandon other output just because of residual copyout failures */
+               if (error == 0 && data_available && data_resid != data_size) {
+                       (void)kevent_put_data_size(p, data_available, flags, data_resid);
+               }
        }
 
        /* don't restart after signals... */
@@ -1809,7 +2402,6 @@ kevent_internal(struct proc *p,
                error = 0;
        if (error == 0)
                *retval = noutputs;
-errorout:
        if (fp != NULL)
                fp_drop(p, fd, fp, 0);
        return (error);
@@ -1836,7 +2428,7 @@ kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp,
         * Copy out the appropriate amount of event data for this user.
         */
        error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
-                              cont_args->eventflags);
+                              cont_args->process_data.fp_flags);
 
        /*
         * If there isn't space for additional events, return
@@ -1885,250 +2477,287 @@ kevent_description(struct kevent_internal_s *kevp, char *s, size_t n)
  *     caller holds a reference on the kqueue
  */
 
-int
+void
 kevent_register(struct kqueue *kq, struct kevent_internal_s *kev,
     __unused struct proc *ctxp)
 {
        struct proc *p = kq->kq_p;
-       struct filedesc *fdp = p->p_fd;
        struct filterops *fops;
-       struct fileproc *fp = NULL;
        struct knote *kn = NULL;
-       struct klist *list;
+       int result = 0;
        int error = 0;
 
        if (kev->filter < 0) {
-               if (kev->filter + EVFILT_SYSCOUNT < 0)
-                       return (EINVAL);
+               if (kev->filter + EVFILT_SYSCOUNT < 0) {
+                       error = EINVAL;
+                       goto out;
+               }
                fops = sysfilt_ops[~kev->filter];       /* to 0-base index */
        } else {
-               return (EINVAL);
+               error = EINVAL;
+               goto out;
        }
 
+       /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
+       if ((kev->flags & EV_VANISHED) &&
+           (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) {
+               error = EINVAL;
+               goto out;
+       }
+
+       /* Simplify the flags - delete and disable overrule */
+       if (kev->flags & EV_DELETE)
+               kev->flags &= ~EV_ADD;
+       if (kev->flags & EV_DISABLE)
+               kev->flags &= ~EV_ENABLE;
+
 restart:
-       /* this iocount needs to be dropped if it is not registered */
-       list = NULL;
+
        proc_fdlock(p);
 
-       /* 
-        * determine where to look for the knote
-        */
-       if (fops->f_isfd) {
-               if ((error = fp_lookup(p, kev->ident, &fp, 1)) != 0) {
-                       proc_fdunlock(p);
-                       return (error);
-               }
-               /* fd-based knotes are linked off the fd table */
-               if (kev->ident < (u_int)fdp->fd_knlistsize) {
-                       list = &fdp->fd_knlist[kev->ident];
-               }
-       } else if (fdp->fd_knhashmask != 0) {
-               /* hash non-fd knotes here too */
-               list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
-       }
+       /* find the matching knote from the fd tables/hashes */
+       kn = knote_fdfind(kq, kev, p);
 
-       /*
-        * scan the selected list looking for a match
-        */
-       if (list != NULL) {
-               SLIST_FOREACH(kn, list, kn_link) {
-                       if (kq == kn->kn_kq &&
-                           kev->ident == kn->kn_id && 
-                           kev->filter == kn->kn_filter) {
-                               if (kev->flags & EV_UDATA_SPECIFIC) {
-                                       if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
-                                           kev->udata == kn->kn_udata) {
-                                               break; /* matching udata-specific knote */
-                                       }
-                               } else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
-                                       break; /* matching non-udata-specific knote */
+       if (kn == NULL) {
+               if (kev->flags & EV_ADD) {
+                       struct fileproc *fp = NULL;
+
+                       /* grab a file reference for the new knote */
+                       if (fops->f_isfd) {
+                               if ((error = fp_lookup(p, kev->ident, &fp, 1)) != 0) {
+                                       proc_fdunlock(p);
+                                       goto out;
                                }
                        }
-               }
-       }
 
-       /*
-        * kn now contains the matching knote, or NULL if no match
-        */
-       if (kn == NULL) {
-               if ((kev->flags & (EV_ADD|EV_DELETE)) == EV_ADD) {
                        kn = knote_alloc();
                        if (kn == NULL) {
                                proc_fdunlock(p);
                                error = ENOMEM;
-                               goto done;
+                               if (fp != NULL)
+                                       fp_drop(p, kev->ident, fp, 0);
+                               goto out;
                        }
+
                        kn->kn_fp = fp;
-                       kn->kn_kq = kq;
-                       kn->kn_tq = &kq->kq_head;
-                       kn->kn_fop = fops;
+                       knote_set_kq(kn,kq);
+                       kn->kn_filtid = ~kev->filter;
+                       kn->kn_inuse = 1;  /* for f_attach() */
+                       kn->kn_status = KN_ATTACHING | KN_ATTACHED;
+
+                       /* was vanish support requested */
+                       if (kev->flags & EV_VANISHED) {
+                               kev->flags &= ~EV_VANISHED;
+                               kn->kn_status |= KN_REQVANISH;
+                       }
+
+                       /* snapshot matching/dispatching protcol flags into knote */
+                       if (kev->flags & EV_DISPATCH)
+                               kn->kn_status |= KN_DISPATCH;
+                       if (kev->flags & EV_UDATA_SPECIFIC)
+                               kn->kn_status |= KN_UDATA_SPECIFIC;
+
+                       /*
+                        * copy the kevent state into knote
+                        * protocol is that fflags and data
+                        * are saved off, and cleared before
+                        * calling the attach routine.
+                        */
+                       kn->kn_kevent = *kev;
                        kn->kn_sfflags = kev->fflags;
                        kn->kn_sdata = kev->data;
-                       kev->fflags = 0;
-                       kev->data = 0;
-                       kn->kn_kevent = *kev;
-                       kn->kn_inuse = 1;  /* for f_attach() */
-                       kn->kn_status = KN_ATTACHING;
+                       kn->kn_fflags = 0;
+                       kn->kn_data = 0;
+
+                       /* invoke pthread kext to convert kevent qos to thread qos */
+                       if (kq->kq_state & KQ_WORKQ) {
+                               kn->kn_qos = canonicalize_kevent_qos(kn->kn_qos);
+                               knote_set_qos_index(kn, qos_index_from_qos(kn->kn_qos, FALSE));
+                               knote_set_qos_override_index(kn, QOS_INDEX_KQFILE);
+                               assert(knote_get_qos_index(kn) < KQWQ_NQOS);
+                       } else {
+                               knote_set_qos_index(kn, QOS_INDEX_KQFILE);
+                               knote_set_qos_override_index(kn, QOS_INDEX_KQFILE);
+                       }
 
                        /* before anyone can find it */
                        if (kev->flags & EV_DISABLE)
-                               kn->kn_status |= KN_DISABLED;
+                               knote_disable(kn);
 
-                       error = knote_fdpattach(kn, fdp, p);
+                       /* Add the knote for lookup thru the fd table */
+                       error = knote_fdadd(kn, p);
                        proc_fdunlock(p);
 
                        if (error) {
                                knote_free(kn);
-                               goto done;
+                               if (fp != NULL)
+                                       fp_drop(p, kev->ident, fp, 0);
+                               goto out;
                        }
 
-                       /*
-                        * apply reference count to knote structure, and
-                        * do not release it at the end of this routine.
-                        */
-                       fp = NULL;
+                       /* fp reference count now applies to knote */
 
-                       error = fops->f_attach(kn);
+                       /* call filter attach routine */
+                       result = fops->f_attach(kn);
 
-                       kqlock(kq);
+                       /*
+                        * Trade knote use count for kq lock.
+                        * Cannot be dropped because we held
+                        * KN_ATTACHING throughout.
+                        */
+                       knoteuse2kqlock(kq, kn, 1);
 
-                       if (error != 0) {
+                       if (kn->kn_flags & EV_ERROR) {
                                /*
                                 * Failed to attach correctly, so drop.
                                 * All other possible users/droppers
-                                * have deferred to us.
+                                * have deferred to us.  Save the error
+                                * to return to our caller.
                                 */
+                               kn->kn_status &= ~KN_ATTACHED;
                                kn->kn_status |= KN_DROPPING;
+                               error = kn->kn_data;
                                kqunlock(kq);
                                knote_drop(kn, p);
-                               goto done;
-                       } else if (kn->kn_status & KN_DROPPING) {
+                               goto out;
+                       }
+
+                       /* end "attaching" phase - now just attached */
+                       kn->kn_status &= ~KN_ATTACHING;
+
+                       if (kn->kn_status & KN_DROPPING) {
                                /*
                                 * Attach succeeded, but someone else
                                 * deferred their drop - now we have
-                                * to do it for them (after detaching).
+                                * to do it for them.
                                 */
                                kqunlock(kq);
-                               kn->kn_fop->f_detach(kn);
                                knote_drop(kn, p);
-                               goto done;
+                               goto out;
                        }
-                       kn->kn_status &= ~KN_ATTACHING;
-                       kqunlock(kq);
+
+                       /*
+                        * If the attach routine indicated that an
+                        * event is already fired, activate the knote.
+                        */
+                       if (result)
+                               knote_activate(kn);
+
                } else {
                        proc_fdunlock(p);
                        error = ENOENT;
-                       goto done;
+                       goto out;
                }
+
        } else {
                /* existing knote - get kqueue lock */
                kqlock(kq);
                proc_fdunlock(p);
 
+               if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
+                       /*
+                        * The knote is not in a stable state, wait for that
+                        * transition to complete and then redrive the lookup.
+                        */
+                       kn->kn_status |= KN_USEWAIT;
+                       waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+                                           CAST_EVENT64_T(&kn->kn_status),
+                                           THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
+                       kqunlock(kq);
+                       thread_block(THREAD_CONTINUE_NULL);
+                       goto restart;
+               }
+
                if (kev->flags & EV_DELETE) {
+
+                       /*
+                        * If attempting to delete a disabled dispatch2 knote,
+                        * we must wait for the knote to be re-enabled (unless
+                        * it is being re-enabled atomically here).
+                        */
                        if ((kev->flags & EV_ENABLE) == 0 &&
-                           (kev->flags & EV_DISPATCH2) == EV_DISPATCH2 &&
-                           (kn->kn_status & KN_DISABLED) == KN_DISABLED) {
-                               /* mark for deferred drop */
-                               kn->kn_status |= KN_DEFERDROP;
+                           (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) ==
+                                            (KN_DISPATCH2 | KN_DISABLED)) {
+                               kn->kn_status |= KN_DEFERDELETE;
                                kqunlock(kq);
                                error = EINPROGRESS;
+                       } else if (kqlock2knotedrop(kq, kn)) {
+                               knote_drop(kn, p);
                        } else {
-                               knote_dequeue(kn);
-                               kn->kn_status |= KN_DISABLED;
-                               if (kqlock2knotedrop(kq, kn)) {
-                                       kn->kn_fop->f_detach(kn);
-                                       knote_drop(kn, p);
-                               } else {
-                                       /* pretend we didn't find it */
-                                       error = ENOENT;
-                               }
-                       }
-                       goto done;
-               }
-
-               /* update status flags for existing knote */
-               if (kev->flags & EV_DISABLE) {
-                       knote_dequeue(kn);
-                       kn->kn_status |= KN_DISABLED;
-
-               } else if ((kev->flags & EV_ENABLE) &&
-                          (kn->kn_status & KN_DISABLED)) {
-                       kn->kn_status &= ~KN_DISABLED;
-
-                       /* handle deferred drop */
-                       if (kn->kn_status & KN_DEFERDROP) {
-                               kn->kn_status &= ~KN_DEFERDROP;
-                               kn->kn_flags |= (EV_DELETE | EV_ONESHOT);
-                               knote_activate(kn, 0);
-                               kqunlock(kq);
-                               goto done;
-                       }
-
-                       if (kn->kn_status & KN_ACTIVE) {
-                               /* force re-activate if previously active */
-                               knote_activate(kn, 1);
+                               /*
+                                * The kqueue is unlocked, it's not being
+                                * dropped, and kqlock2knotedrop returned 0:
+                                * this means that someone stole the drop of
+                                * the knote from us.
+                                */
+                               error = EINPROGRESS;
                        }
+                       goto out;
                }
 
                /*
-                * The user may change some filter values after the
-                * initial EV_ADD, but doing so will not reset any
-                * filter which have already been triggered.
+                * If we are re-enabling a deferred-delete knote,
+                * just enable it now and avoid calling the
+                * filter touch routine (it has delivered its
+                * last event already).
                 */
-               kn->kn_kevent.udata = kev->udata;
-               if (fops->f_isfd || fops->f_touch == NULL) {
-                       kn->kn_sfflags = kev->fflags;
-                       kn->kn_sdata = kev->data;
+               if ((kev->flags & EV_ENABLE) &&
+                   (kn->kn_status & KN_DEFERDELETE)) {
+                       assert(kn->kn_status & KN_DISABLED);
+                       knote_activate(kn);
+                       knote_enable(kn);
+                       kqunlock(kq);
+                       goto out;
                }
 
                /*
-                * If somebody is in the middle of dropping this
-                * knote - go find/insert a new one.  But we have
-                * wait for this one to go away first. Attaches
-                * running in parallel may also drop/modify the
-                * knote.  Wait for those to complete as well and
-                * then start over if we encounter one.
+                * If we are disabling, do it before unlocking and
+                * calling the touch routine (so no processing can
+                * see the new kevent state before the disable is
+                * applied).
                 */
-               if (!kqlock2knoteusewait(kq, kn)) {
-                       /* kqueue, proc_fdlock both unlocked */
-                       goto restart;
-               }
+               if (kev->flags & EV_DISABLE)
+                       knote_disable(kn);
 
                /*
-                * Call touch routine to notify filter of changes
-                * in filter values.
+                * Convert the kqlock to a use reference on the
+                * knote so we can call the filter touch routine.
                 */
-               if (!fops->f_isfd && fops->f_touch != NULL)
-                       fops->f_touch(kn, kev, EVENT_REGISTER);
-       }
-       /* still have use ref on knote */
+               if (kqlock2knoteuse(kq, kn)) {
+
+                       /*
+                        * Call touch routine to notify filter of changes
+                        * in filter values (and to re-determine if any
+                        * events are fired).
+                        */
+                       result = knote_fops(kn)->f_touch(kn, kev);
+
+                       /* Get the kq lock back (don't defer droppers). */
+                       if (!knoteuse2kqlock(kq, kn, 0)) {
+                               kqunlock(kq);
+                               goto out;
+                       }
+
+                       /* Activate it if the touch routine said to */
+                       if (result)
+                               knote_activate(kn);
+               }
+
+               /* Enable the knote if called for */
+               if (kev->flags & EV_ENABLE)
+                       knote_enable(kn);
 
-       /*
-        * Invoke the filter routine to see if it should be enqueued now.
-        */
-#if 0
-       if (kn->kn_fop->f_event(kn, 0)) {
-#else
-       /*
-        * JMM - temporary workaround until rdar://problem/19986199 
-        * This potentially results in extra wakeups for KN_STAYQUEUED event types,
-        * but waking up only truly active ones (yet trying below to determine
-        * active status, by invoking the filter routine, is having side-effects).
-        */
-       if ((kn->kn_status & KN_STAYQUEUED) || kn->kn_fop->f_event(kn, 0)) {
-#endif
-               if (knoteuse2kqlock(kq, kn))
-                       knote_activate(kn, (kn->kn_status & KN_STAYQUEUED));
-               kqunlock(kq);
-       } else {
-               knote_put(kn);
        }
 
-done:
-       if (fp != NULL)
-               fp_drop(p, kev->ident, fp, 0);
-       return (error);
+       /* still have kqlock held and knote is valid */
+       kqunlock(kq);
+
+ out:
+       /* output local errors through the kevent */
+       if (error) {
+               kev->flags |= EV_ERROR;
+               kev->data = error;
+       }
 }
 
 
@@ -2138,168 +2767,217 @@ done:
  *     Validate that it is really still a triggered event
  *     by calling the filter routines (if necessary).  Hold
  *     a use reference on the knote to avoid it being detached.
- *     If it is still considered triggered, invoke the callback
- *     routine provided and move it to the provided inprocess
- *     queue.
+ *
+ *     If it is still considered triggered, we will have taken
+ *     a copy of the state under the filter lock.  We use that
+ *     snapshot to dispatch the knote for future processing (or
+ *     not, if this was a lost event).
+ *
+ *     Our caller assures us that nobody else can be processing
+ *     events from this knote during the whole operation. But
+ *     others can be touching or posting events to the knote
+ *     interspersed with our processing it.
  *
  *     caller holds a reference on the kqueue.
  *     kqueue locked on entry and exit - but may be dropped
  */
 static int
-knote_process(struct knote *kn,
-    kevent_callback_t callback,
-    void *data,
-    struct kqtailq *inprocessp,
-    struct proc *p)
+knote_process(struct knote *kn,        
+       kevent_callback_t callback,
+       void *callback_data,
+       struct filt_process_s *process_data,
+       struct proc *p)
 {
-       struct kqueue *kq = kn->kn_kq;
        struct kevent_internal_s kev;
-       int touch;
-       int result;
-       int error;
+       struct kqueue *kq = knote_get_kq(kn);
+       int result = 0;
+       int error = 0;
+
+       bzero(&kev, sizeof(kev));
 
        /*
-        * Determine the kevent state we want to return.
-        *
-        * Some event states need to be revalidated before returning
-        * them, others we take the snapshot at the time the event
-        * was enqueued.
-        *
-        * Events with non-NULL f_touch operations must be touched.
-        * Triggered events must fill in kev for the callback.
-        *
-        * Convert our lock to a use-count and call the event's
-        * filter routine(s) to update.
+        * Must be active or stayactive
+        * Must be queued and not disabled/suppressed
         */
-       if ((kn->kn_status & KN_DISABLED) != 0) {
-               result = 0;
-               touch = 0;
-       } else {
-               int revalidate;
+       assert(kn->kn_status & KN_QUEUED);
+       assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE));
+       assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING)));
 
+       /*
+        * For deferred-drop or vanished events, we just create a fake
+        * event to acknowledge end-of-life.  Otherwise, we call the
+        * filter's process routine to snapshot the kevent state under
+        * the filter's locking protocol.
+        */
+       if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
+               /* create fake event */
+               kev.filter = kn->kn_filter;
+               kev.ident = kn->kn_id;
+               kev.qos = kn->kn_qos;
+               kev.flags = (kn->kn_status & KN_DEFERDELETE) ? 
+                           EV_DELETE : EV_VANISHED;
+               kev.flags |= (EV_DISPATCH2 | EV_ONESHOT);
+               kev.udata = kn->kn_udata;
                result = 1;
-               revalidate = ((kn->kn_status & KN_STAYQUEUED) != 0 ||
-                   (kn->kn_flags & EV_ONESHOT) == 0);
-               touch = (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL);
 
-               if (revalidate || touch) {
-                       if (revalidate)
-                               knote_deactivate(kn);
+               knote_suppress(kn);
+       } else {
 
-                       /* call the filter/touch routines with just a ref */
-                       if (kqlock2knoteuse(kq, kn)) {
-                               /* if we have to revalidate, call the filter */
-                               if (revalidate) {
-                                       result = kn->kn_fop->f_event(kn, 0);
-                               }
+               /* deactivate - so new activations indicate a wakeup */
+               knote_deactivate(kn);
 
-                               /*
-                                * capture the kevent data - using touch if
-                                * specified
-                                */
-                               if (result && touch) {
-                                       kn->kn_fop->f_touch(kn, &kev,
-                                           EVENT_PROCESS);
-                               }
-                               if (result && (kn->kn_status & KN_TOUCH))
-                                       kn->kn_fop->f_touch(kn, &kev,
-                                           EVENT_PROCESS);
+               /* suppress knotes to avoid returning the same event multiple times in a single call. */
+               knote_suppress(kn);
 
-                               /*
-                                * convert back to a kqlock - bail if the knote
-                                * went away
-                                */
-                               if (!knoteuse2kqlock(kq, kn)) {
-                                       return (EJUSTRETURN);
-                               } else if (result) {
-                                       /*
-                                        * if revalidated as alive, make sure
-                                        * it's active
-                                        */
-                                       knote_activate(kn, 0);
+               /* convert lock to a knote use reference */
+               if (!kqlock2knoteuse(kq, kn))
+                       panic("dropping knote found on queue\n");
 
-                                       /*
-                                        * capture all events that occurred
-                                        * during filter
-                                        */
-                                       if (!touch) {
-                                               kev = kn->kn_kevent;
-                                       }
+               /* call out to the filter to process with just a ref */
+               result = knote_fops(kn)->f_process(kn, process_data, &kev);
+
+               /*
+                * convert our reference back to a lock. accept drop
+                * responsibility from others if we've committed to
+                * delivering event data.
+                */
+               if (!knoteuse2kqlock(kq, kn, result)) {
+                       /* knote dropped */
+                       kn = NULL;
+               }
+       }
 
-                               } else if ((kn->kn_status & KN_STAYQUEUED) == 0) {
+       if (kn != NULL) {
+               /*
+                * Determine how to dispatch the knote for future event handling.
+                * not-fired: just return (do not callout, leave deactivated).
+                * One-shot:  If dispatch2, enter deferred-delete mode (unless this is
+                *            is the deferred delete event delivery itself).  Otherwise,
+                *            drop it.
+                * stolendrop:We took responsibility for someone else's drop attempt.
+                *            treat this just like one-shot and prepare to turn it back
+                *            into a deferred delete if required.
+                * Dispatch:  don't clear state, just mark it disabled.
+                * Cleared:   just leave it deactivated.
+                * Others:    re-activate as there may be more events to handle.
+                *            This will not wake up more handlers right now, but
+                *            at the completion of handling events it may trigger
+                *            more handler threads (TODO: optimize based on more than
+                *            just this one event being detected by the filter).
+                */
+
+               if (result == 0)
+                       return (EJUSTRETURN);
+
+               if ((kev.flags & EV_ONESHOT) || (kn->kn_status & KN_STOLENDROP)) {
+                       if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) {
+                               /* defer dropping non-delete oneshot dispatch2 events */
+                               kn->kn_status |= KN_DEFERDELETE;
+                               knote_disable(kn);
+
+                               /* if we took over another's drop clear those flags here */
+                               if (kn->kn_status & KN_STOLENDROP) {
+                                       assert(kn->kn_status & KN_DROPPING);
                                        /*
-                                        * was already dequeued, so just bail on
-                                        * this one
+                                        * the knote will be dropped when the
+                                        * deferred deletion occurs
                                         */
-                                       return (EJUSTRETURN);
+                                       kn->kn_status &= ~(KN_DROPPING|KN_STOLENDROP);
                                }
-                       } else {
-                               return (EJUSTRETURN);
+                       } else if (kn->kn_status & KN_STOLENDROP) {
+                               /* We now own the drop of the knote. */
+                               assert(kn->kn_status & KN_DROPPING);
+                               knote_unsuppress(kn);
+                               kqunlock(kq);
+                               knote_drop(kn, p);
+                               kqlock(kq);
+                       } else if (kqlock2knotedrop(kq, kn)) {
+                               /* just EV_ONESHOT, _not_ DISPATCH2 */
+                               knote_drop(kn, p);
+                               kqlock(kq);
                        }
-               } else {
-                       kev = kn->kn_kevent;
+               } else if (kn->kn_status & KN_DISPATCH) {
+                       /* disable all dispatch knotes */
+                       knote_disable(kn);
+               } else if ((kev.flags & EV_CLEAR) == 0) {
+                       /* re-activate in case there are more events */
+                       knote_activate(kn);
                }
        }
 
-       /* move knote onto inprocess queue */
-       assert(kn->kn_tq == &kq->kq_head);
-       TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
-       kn->kn_tq = inprocessp;
-       TAILQ_INSERT_TAIL(inprocessp, kn, kn_tqe);
-
        /*
-        * Determine how to dispatch the knote for future event handling.
-        * not-fired: just return (do not callout).
-        * One-shot: If dispatch2, enter deferred-delete mode (unless this is
-        *           is the deferred delete event delivery itself).  Otherwise,
-        *           deactivate and drop it.
-        * Clear: deactivate and clear the state.
-        * Dispatch: don't clear state, just deactivate it and mark it disabled.
-        * All others: just leave where they are.
+        * callback to handle each event as we find it.
+        * If we have to detach and drop the knote, do
+        * it while we have the kq unlocked.
         */
+       if (result) {
+               kqunlock(kq);
+               error = (callback)(kq, &kev, callback_data);
+               kqlock(kq);
+       }
+       return (error);
+}
 
-       if (result == 0) {
-               return (EJUSTRETURN);
-       } else if ((kn->kn_flags & EV_ONESHOT) != 0) {
-               knote_deactivate(kn);
-               if ((kn->kn_flags & (EV_DISPATCH2|EV_DELETE)) == EV_DISPATCH2) {
-                       /* defer dropping non-delete oneshot dispatch2 events */
-                       kn->kn_status |= (KN_DISABLED | KN_DEFERDROP);
-                       kqunlock(kq);
-               } else if (kqlock2knotedrop(kq, kn)) {
-                       kn->kn_fop->f_detach(kn);
-                       knote_drop(kn, p);
-               }
-       } else if ((kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) != 0) {
-               if ((kn->kn_flags & EV_DISPATCH) != 0) {
-                       /* deactivate and disable all dispatch knotes */
-                       knote_deactivate(kn);
-                       kn->kn_status |= KN_DISABLED;
-               } else if (!touch || kn->kn_fflags == 0) {
-                       /* only deactivate if nothing since the touch */
-                       knote_deactivate(kn);
-               }
-               if (!touch && (kn->kn_flags & EV_CLEAR) != 0) {
-                       /* manually clear non-touch knotes */
-                       kn->kn_data = 0;
-                       kn->kn_fflags = 0;
+
+/*
+ * Return 0 to indicate that processing should proceed,
+ * -1 if there is nothing to process.
+ *
+ * Called with kqueue locked and returns the same way,
+ * but may drop lock temporarily.
+ */
+static int
+kqworkq_begin_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
+{
+       struct kqrequest *kqr;
+       thread_t self = current_thread();
+       __assert_only struct uthread *ut = get_bsdthread_info(self);
+       thread_t thread;
+
+       assert(kqwq->kqwq_state & KQ_WORKQ);
+       assert(qos_index < KQWQ_NQOS);
+
+       kqwq_req_lock(kqwq);
+       kqr = kqworkq_get_request(kqwq, qos_index);
+
+       thread = kqr->kqr_thread;
+
+       /* manager skips buckets that haven't ask for its help */
+       if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+
+               /* If nothing for manager to do, just return */
+               if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+                       assert(kqr->kqr_thread != self);
+                       kqwq_req_unlock(kqwq);
+                       return -1;
                }
-               kqunlock(kq);
+
+               /* bind manager thread from this time on */
+               kqworkq_bind_thread(kqwq, qos_index, self, flags);
+
        } else {
-               /*
-                * leave on inprocess queue.  We'll
-                * move all the remaining ones back
-                * the kq queue and wakeup any
-                * waiters when we are done.
-                */
-               kqunlock(kq);
+               /* must have been bound by now */
+               assert(thread == self);
+               assert(ut->uu_kqueue_bound == qos_index);
+               assert((ut->uu_kqueue_flags & flags) == ut->uu_kqueue_flags);
        }
 
-       /* callback to handle each event as we find it */
-       error = (callback)(kq, &kev, data);
+       /* nobody else should still be processing */
+       assert(kqr->kqr_state & KQWQ_THREQUESTED);
+       assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
+                  
+       /* anything left to process? */
+       if (kqueue_queue_empty(&kqwq->kqwq_kqueue, qos_index)) {
+               kqwq_req_unlock(kqwq);
+               return -1;
+       }
 
-       kqlock(kq);
-       return (error);
+       /* convert to processing mode */
+       /* reset workq triggers and thread requests - maybe processing */
+       kqr->kqr_state &= ~(KQWQ_HOOKCALLED | KQWQ_WAKEUP);
+       kqr->kqr_state |= KQWQ_PROCESSING;
+       kqwq_req_unlock(kqwq);
+       return 0;
 }
 
 /*
@@ -2308,45 +2986,378 @@ knote_process(struct knote *kn,
  *
  * Called with kqueue locked and returns the same way,
  * but may drop lock temporarily.
+ * May block.
  */
 static int
-kqueue_begin_processing(struct kqueue *kq)
+kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
 {
+       struct kqtailq *suppressq;
+
+       if (kq->kq_state & KQ_WORKQ)
+               return kqworkq_begin_processing((struct kqworkq *)kq, qos_index, flags);
+
+       assert(qos_index == QOS_INDEX_KQFILE);
+
+       /* wait to become the exclusive processing thread */
        for (;;) {
-               if (kq->kq_count == 0) {
-                       return (-1);
-               }
+               if (kq->kq_state & KQ_DRAIN)
+                       return -1;
+
+               if ((kq->kq_state & KQ_PROCESSING) == 0)
+                       break;
 
                /* if someone else is processing the queue, wait */
-               if (kq->kq_nprocess != 0) {
-                       waitq_assert_wait64((struct waitq *)kq->kq_wqs,
-                                           CAST_EVENT64_T(&kq->kq_nprocess),
-                                           THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
-                       kq->kq_state |= KQ_PROCWAIT;
-                       kqunlock(kq);
-                       thread_block(THREAD_CONTINUE_NULL);
-                       kqlock(kq);
+               kq->kq_state |= KQ_PROCWAIT;
+               suppressq = kqueue_get_suppressed_queue(kq, qos_index);
+               waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+                                   CAST_EVENT64_T(suppressq),
+                                   THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
+               
+               kqunlock(kq);
+               thread_block(THREAD_CONTINUE_NULL);
+               kqlock(kq);
+       }
+
+       /* Nobody else processing */
+
+       /* clear pre-posts and KQ_WAKEUP now, in case we bail early */
+       waitq_set_clear_preposts(&kq->kq_wqs);
+       kq->kq_state &= ~KQ_WAKEUP;
+                  
+       /* anything left to process? */
+       if (kqueue_queue_empty(kq, qos_index))
+               return -1;
+
+       /* convert to processing mode */
+       kq->kq_state |= KQ_PROCESSING;
+
+       return 0;
+}
+
+/*
+ *     kqworkq_end_processing - Complete the processing of a workq kqueue
+ *
+ *     We may have to request new threads.
+ *     This can happen there are no waiting processing threads and:
+ *     - there were active events we never got to (count > 0)
+ *     - we pended waitq hook callouts during processing
+ *     - we pended wakeups while processing (or unsuppressing)
+ *
+ *     Called with kqueue lock held.
+ */
+static void
+kqworkq_end_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
+{
+#pragma unused(flags)
+
+       struct kqueue *kq = &kqwq->kqwq_kqueue;
+       struct kqtailq *suppressq = kqueue_get_suppressed_queue(kq, qos_index);
+
+       thread_t self = current_thread();
+       __assert_only struct uthread *ut = get_bsdthread_info(self);
+       struct knote *kn;
+       struct kqrequest *kqr;
+       int queued_events;
+       uint16_t pended;
+       thread_t thread;
+
+       assert(kqwq->kqwq_state & KQ_WORKQ);
+       assert(qos_index < KQWQ_NQOS);
+
+       /* leave early if we are not even processing */
+       kqwq_req_lock(kqwq);
+       kqr = kqworkq_get_request(kqwq, qos_index);
+       thread = kqr->kqr_thread;
+
+       if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+               assert(ut->uu_kqueue_bound == KQWQ_QOS_MANAGER);
+               assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
+
+               /* if this bucket didn't need manager help, bail */
+               if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+                       assert(thread != self);
+                       kqwq_req_unlock(kqwq);
+                       return;
+               }
+
+               assert(kqr->kqr_state & KQWQ_THREQUESTED);
+
+               /* unbound bucket - see if still needs servicing */
+               if (thread == THREAD_NULL) {
+                       assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
+                       assert(TAILQ_EMPTY(suppressq));
                } else {
-                       kq->kq_nprocess = 1;
-                       return (0);
+                       assert(thread == self);
                }
+
+       } else {
+               assert(thread == self);
+               assert(ut->uu_kqueue_bound == qos_index);
+               assert((ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0);
        }
+
+       kqwq_req_unlock(kqwq);
+
+       /* Any events queued before we put suppressed ones back? */
+       queued_events = !kqueue_queue_empty(kq, qos_index);
+
+       /*
+        * Return suppressed knotes to their original state.
+        * For workq kqueues, suppressed ones that are still
+        * truly active (not just forced into the queue) will
+        * set flags we check below to see if anything got
+        * woken up.
+        */
+       while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
+               assert(kn->kn_status & KN_SUPPRESSED);
+               knote_unsuppress(kn);
+       }
+
+       kqwq_req_lock(kqwq);
+
+       /* Determine if wakeup-type events were pended during servicing */
+       pended = (kqr->kqr_state & (KQWQ_HOOKCALLED | KQWQ_WAKEUP));
+
+       /* unbind thread thread */
+       kqworkq_unbind_thread(kqwq, qos_index, self, flags);
+
+       /* Indicate that we are done processing */
+       kqr->kqr_state &= ~(KQWQ_PROCESSING | \
+                           KQWQ_THREQUESTED | KQWQ_THMANAGER);
+
+       /*
+        * request a new thread if events have happened
+        * (not just putting stay-active events back).
+        */
+       if ((queued_events || pended) &&
+           !kqueue_queue_empty(kq, qos_index)) {
+               kqworkq_request_thread(kqwq, qos_index);
+       }
+
+       kqwq_req_unlock(kqwq);
 }
 
 /*
  * Called with kqueue lock held.
  */
 static void
-kqueue_end_processing(struct kqueue *kq)
+kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
+{
+       struct knote *kn;
+       struct kqtailq *suppressq;
+       int procwait;
+
+       if (kq->kq_state & KQ_WORKQ) {
+               kqworkq_end_processing((struct kqworkq *)kq, qos_index, flags);
+               return;
+       }
+
+       assert(qos_index == QOS_INDEX_KQFILE);
+
+       /*
+        * Return suppressed knotes to their original state.
+        * For workq kqueues, suppressed ones that are still
+        * truly active (not just forced into the queue) will
+        * set flags we check below to see if anything got
+        * woken up.
+        */
+       suppressq = kqueue_get_suppressed_queue(kq, qos_index);
+       while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
+               assert(kn->kn_status & KN_SUPPRESSED);
+               knote_unsuppress(kn);
+       }
+
+       procwait = (kq->kq_state & KQ_PROCWAIT);
+       kq->kq_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
+
+       if (procwait) {
+               /* first wake up any thread already waiting to process */
+               waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+                                  CAST_EVENT64_T(suppressq),
+                                  THREAD_AWAKENED,
+                                  WAITQ_ALL_PRIORITIES);
+       }               
+}
+
+/*
+ *     kevent_qos_internal_bind - bind thread to processing kqueue
+ *
+ *     Indicates that the provided thread will be responsible for
+ *     servicing the particular QoS class index specified in the
+ *     parameters. Once the binding is done, any overrides that may
+ *     be associated with the cooresponding events can be applied.
+ *
+ *     This should be called as soon as the thread identity is known,
+ *     preferably while still at high priority during creation.
+ *
+ *  - caller holds a reference on the kqueue.
+ *     - the thread MUST call kevent_qos_internal after being bound
+ *       or the bucket of events may never be delivered.  
+ *     - Nothing locked (may take mutex or block).
+ */
+
+int
+kevent_qos_internal_bind(
+       struct proc *p,
+       int qos_class,
+       thread_t thread,
+       unsigned int flags)
+{
+       struct fileproc *fp = NULL;
+       struct kqueue *kq = NULL;
+       struct kqworkq *kqwq;
+       struct kqrequest *kqr;
+       struct uthread *ut;
+       kq_index_t qos_index;
+       int res = 0;
+
+       assert(thread != THREAD_NULL);
+       assert(flags & KEVENT_FLAG_WORKQ);
+
+       if (thread == THREAD_NULL ||
+           (flags & KEVENT_FLAG_WORKQ) == 0) {
+               return EINVAL;
+       }
+
+       ut = get_bsdthread_info(thread);
+
+       /* find the kqueue */
+       res = kevent_get_kq(p, -1, flags, &fp, &kq);
+       assert(fp == NULL);
+       if (res)
+               return res;
+
+       /* get the qos index we're going to service */
+       qos_index = qos_index_for_servicer(qos_class, thread, flags);
+       
+       /* No need to bind the manager thread to any bucket */
+       if (qos_index == KQWQ_QOS_MANAGER) {
+               assert(ut->uu_kqueue_bound == 0);
+               ut->uu_kqueue_bound = qos_index;
+               ut->uu_kqueue_flags = flags;
+               return 0;
+       }
+
+       kqlock(kq);
+       assert(kq->kq_state & KQ_WORKQ);
+       
+       kqwq = (struct kqworkq *)kq;
+       kqr = kqworkq_get_request(kqwq, qos_index);
+
+       kqwq_req_lock(kqwq);
+
+       /* 
+        * A (non-emergency) request should have been made
+        * and nobody should already be servicing this bucket.
+        */
+       assert(kqr->kqr_state & KQWQ_THREQUESTED);
+       assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
+       assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
+
+       /* Is this is an extraneous bind? */
+       if (thread == kqr->kqr_thread) {
+               assert(ut->uu_kqueue_bound == qos_index);
+               goto out;
+       }
+
+       /* nobody else bound and we're not bound elsewhere */
+       assert(ut->uu_kqueue_bound == 0);
+       assert(ut->uu_kqueue_flags == 0);
+       assert(kqr->kqr_thread == THREAD_NULL);
+
+       /* Don't bind if there is a conflict */
+       if (kqr->kqr_thread != THREAD_NULL ||
+           (kqr->kqr_state & KQWQ_THMANAGER)) {
+               res = EINPROGRESS;
+               goto out;
+       }
+
+       /* finally bind the thread */
+       kqr->kqr_thread = thread;
+       ut->uu_kqueue_bound = qos_index;
+       ut->uu_kqueue_flags = flags;
+
+       /* add any pending overrides to the thread */
+       if (kqr->kqr_override_delta) {
+               thread_add_ipc_override(thread, qos_index + kqr->kqr_override_delta);
+       }
+
+out:
+       kqwq_req_unlock(kqwq);
+       kqunlock(kq);
+
+       return res;
+}
+
+/*
+ *     kevent_qos_internal_unbind - unbind thread from processing kqueue
+ *
+ *     End processing the per-QoS bucket of events and allow other threads
+ *     to be requested for future servicing.  
+ *
+ *     caller holds a reference on the kqueue.
+ *     thread is the current thread.
+ */
+
+int
+kevent_qos_internal_unbind(
+       struct proc *p,
+       int qos_class,
+       thread_t thread,
+       unsigned int flags)
 {
-       kq->kq_nprocess = 0;
-       if (kq->kq_state & KQ_PROCWAIT) {
-               kq->kq_state &= ~KQ_PROCWAIT;
-               waitq_wakeup64_all((struct waitq *)kq->kq_wqs,
-                                  CAST_EVENT64_T(&kq->kq_nprocess),
-                                  THREAD_AWAKENED,
-                                  WAITQ_ALL_PRIORITIES);
+       struct kqueue *kq;
+       struct uthread *ut;
+       struct fileproc *fp = NULL;
+       kq_index_t qos_index;
+       kq_index_t end_index;
+       int res;
+
+       assert(flags & KEVENT_FLAG_WORKQ);
+       assert(thread == current_thread());
+
+       if (thread == THREAD_NULL ||
+           (flags & KEVENT_FLAG_WORKQ) == 0)
+               return EINVAL;
+           
+       /* get the kq */
+       res = kevent_get_kq(p, -1, flags, &fp, &kq);
+       assert(fp == NULL);
+       if (res)
+               return res;
+
+       assert(kq->kq_state & KQ_WORKQ);
+
+       /* get the index we have been servicing */
+       qos_index = qos_index_for_servicer(qos_class, thread, flags);
+
+       ut = get_bsdthread_info(thread);
+
+       /* early out if we were already unbound - or never bound */
+       if (ut->uu_kqueue_bound != qos_index) {
+               __assert_only struct kqworkq *kqwq = (struct kqworkq *)kq;
+               __assert_only struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
+
+               assert(ut->uu_kqueue_bound == 0);
+               assert(ut->uu_kqueue_flags == 0);
+               assert(kqr->kqr_thread != thread);
+               return EALREADY;
        }
+
+       /* unbind from all the buckets we might own */
+       end_index = (qos_index == KQWQ_QOS_MANAGER) ? 
+                   0 : qos_index;
+       kqlock(kq);
+       do {
+               kqueue_end_processing(kq, qos_index, flags);
+       } while (qos_index-- > end_index);
+       kqunlock(kq);
+
+       /* indicate that we are done processing in the uthread */
+       ut->uu_kqueue_bound = 0;
+       ut->uu_kqueue_flags = 0;
+
+       return 0;
 }
 
 /*
@@ -2367,65 +3378,74 @@ kqueue_end_processing(struct kqueue *kq)
 static int
 kqueue_process(struct kqueue *kq,
     kevent_callback_t callback,
-    void *data,
+    void *callback_data,
+    struct filt_process_s *process_data,
+    kq_index_t servicer_qos_index,
     int *countp,
     struct proc *p)
 {
-       struct kqtailq inprocess;
+       unsigned int flags = process_data ? process_data->fp_flags : 0;
+       kq_index_t start_index, end_index, i;
        struct knote *kn;
-       int nevents;
-       int error;
-
-       TAILQ_INIT(&inprocess);
-
-       if (kqueue_begin_processing(kq) == -1) {
-               *countp = 0;
-               /* Nothing to process */
-               return (0);
-       }
+       int nevents = 0;
+       int error = 0;
 
        /*
-        * Clear any pre-posted status from previous runs, so we
-        * only detect events that occur during this run.
+        * Based on the native QoS of the servicer,
+        * determine the range of QoSes that need checking
         */
-       waitq_set_clear_preposts(kq->kq_wqs);
+       start_index = servicer_qos_index;
+       end_index = (start_index == KQWQ_QOS_MANAGER) ? 0 : start_index;
+       
+       i = start_index;
 
-       /*
-        * loop through the enqueued knotes, processing each one and
-        * revalidating those that need it. As they are processed,
-        * they get moved to the inprocess queue (so the loop can end).
-        */
-       error = 0;
-       nevents = 0;
+       do {
+               if (kqueue_begin_processing(kq, i, flags) == -1) {
+                       *countp = 0;
+                       /* Nothing to process */
+                       continue;
+               }
 
-       while (error == 0 &&
-           (kn = TAILQ_FIRST(&kq->kq_head)) != NULL) {
-               error = knote_process(kn, callback, data, &inprocess, p);
-               if (error == EJUSTRETURN)
-                       error = 0;
-               else
-                       nevents++;
-       }
+               /*
+                * loop through the enqueued knotes, processing each one and
+                * revalidating those that need it. As they are processed,
+                * they get moved to the inprocess queue (so the loop can end).
+                */
+               error = 0;
 
-       /*
-        * With the kqueue still locked, move any knotes
-        * remaining on the inprocess queue back to the
-        * kq's queue and wake up any waiters.
-        */
-       while ((kn = TAILQ_FIRST(&inprocess)) != NULL) {
-               assert(kn->kn_tq == &inprocess);
-               TAILQ_REMOVE(&inprocess, kn, kn_tqe);
-               kn->kn_tq = &kq->kq_head;
-               TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
-       }
+               struct kqtailq *base_queue = kqueue_get_base_queue(kq, i);
+               struct kqtailq *queue = kqueue_get_high_queue(kq, i);
+               do {
+                       while (error == 0 &&
+                              (kn = TAILQ_FIRST(queue)) != NULL) {
+                               /* Process the knote */
+                               error = knote_process(kn, callback, callback_data, process_data, p);
+                               if (error == EJUSTRETURN)
+                                       error = 0;
+                               else
+                                       nevents++;
+
+                               /* break out if no more space for additional events */
+                               if (error == EWOULDBLOCK) {
+                                       if ((kq->kq_state & KQ_WORKQ) == 0)
+                                               kqueue_end_processing(kq, i, flags);
+                                       error = 0;
+                                       goto out;
+                               }
+                       }
+               } while (error == 0 && queue-- > base_queue);
+
+               /* let somebody else process events if we're not in workq mode */
+               if ((kq->kq_state & KQ_WORKQ) == 0)
+                       kqueue_end_processing(kq, i, flags);
 
-       kqueue_end_processing(kq);
+       } while (i-- > end_index);
 
+out:
        *countp = nevents;
        return (error);
 }
 
-
 static void
 kqueue_scan_continue(void *data, wait_result_t wait_result)
 {
@@ -2433,17 +3453,22 @@ kqueue_scan_continue(void *data, wait_result_t wait_result)
        uthread_t ut = (uthread_t)get_bsdthread_info(self);
        struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
        struct kqueue *kq = (struct kqueue *)data;
+       struct filt_process_s *process_data = cont_args->process_data;
        int error;
        int count;
 
        /* convert the (previous) wait_result to a proper error */
        switch (wait_result) {
-       case THREAD_AWAKENED:
+       case THREAD_AWAKENED: {
                kqlock(kq);
-               error = kqueue_process(kq, cont_args->call, cont_args, &count,
-                   current_proc());
+       retry:
+               error = kqueue_process(kq, cont_args->call, cont_args->data, 
+                                      process_data, cont_args->servicer_qos_index,
+                                      &count, current_proc());
                if (error == 0 && count == 0) {
-                       waitq_assert_wait64((struct waitq *)kq->kq_wqs,
+                       if (kq->kq_state & KQ_WAKEUP)
+                               goto retry;
+                       waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
                                            KQ_EVENT, THREAD_ABORTSAFE,
                                            cont_args->deadline);
                        kq->kq_state |= KQ_SLEEP;
@@ -2452,13 +3477,16 @@ kqueue_scan_continue(void *data, wait_result_t wait_result)
                        /* NOTREACHED */
                }
                kqunlock(kq);
-               break;
+               break;
        case THREAD_TIMED_OUT:
                error = EWOULDBLOCK;
                break;
        case THREAD_INTERRUPTED:
                error = EINTR;
                break;
+       case THREAD_RESTART:
+               error = EBADF;
+               break;
        default:
                panic("%s: - invalid wait_result (%d)", __func__,
                    wait_result);
@@ -2489,17 +3517,30 @@ int
 kqueue_scan(struct kqueue *kq,
            kevent_callback_t callback,
            kqueue_continue_t continuation,
-           void *data,
+           void *callback_data,
+           struct filt_process_s *process_data,
            struct timeval *atvp,
            struct proc *p)
 {
        thread_continue_t cont = THREAD_CONTINUE_NULL;
+       kq_index_t servicer_qos_index;
+       unsigned int flags;
        uint64_t deadline;
        int error;
        int first;
+       int fd;
 
        assert(callback != NULL);
 
+       /*
+        * Determine which QoS index we are servicing
+        */
+       flags = (process_data) ? process_data->fp_flags : 0;
+       fd = (process_data) ? process_data->fp_fd : -1;
+       servicer_qos_index = (kq->kq_state & KQ_WORKQ) ?
+           qos_index_for_servicer(fd, current_thread(), flags) :
+           QOS_INDEX_KQFILE;
+
        first = 1;
        for (;;) {
                wait_result_t wait_result;
@@ -2510,7 +3551,9 @@ kqueue_scan(struct kqueue *kq,
                 * triggered.
                 */
                kqlock(kq);
-               error = kqueue_process(kq, callback, data, &count, p);
+               error = kqueue_process(kq, callback, callback_data,
+                                      process_data, servicer_qos_index,
+                                      &count, p);
                if (error || count)
                        break; /* lock still held */
 
@@ -2543,13 +3586,21 @@ kqueue_scan(struct kqueue *kq,
                                cont_args->call = callback;
                                cont_args->cont = continuation;
                                cont_args->deadline = deadline;
-                               cont_args->data = data;
+                               cont_args->data = callback_data;
+                               cont_args->process_data = process_data;
+                               cont_args->servicer_qos_index = servicer_qos_index;
                                cont = kqueue_scan_continue;
                        }
                }
 
+               /* If awakened during processing, try again */
+               if (kq->kq_state & KQ_WAKEUP) {
+                       kqunlock(kq);
+                       continue;
+               }
+
                /* go ahead and wait */
-               waitq_assert_wait64_leeway((struct waitq *)kq->kq_wqs,
+               waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
                                           KQ_EVENT, THREAD_ABORTSAFE,
                                           TIMEOUT_URGENCY_USER_NORMAL,
                                           deadline, TIMEOUT_NO_LEEWAY);
@@ -2562,9 +3613,11 @@ kqueue_scan(struct kqueue *kq,
                case THREAD_AWAKENED:
                        continue;
                case THREAD_TIMED_OUT:
-                       return (EWOULDBLOCK);
+                       return EWOULDBLOCK;
                case THREAD_INTERRUPTED:
-                       return (EINTR);
+                       return EINTR;
+               case THREAD_RESTART:
+                       return EBADF;
                default:
                        panic("%s: - bad wait_result (%d)", __func__,
                            wait_result);
@@ -2616,16 +3669,18 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
     __unused vfs_context_t ctx)
 {
        struct kqueue *kq = (struct kqueue *)fp->f_data;
+       struct kqtailq *queue;
+       struct kqtailq *suppressq;
        struct knote *kn;
-       struct kqtailq inprocessq;
        int retnum = 0;
 
        if (which != FREAD)
                return (0);
 
-       TAILQ_INIT(&inprocessq);
-
        kqlock(kq);
+
+       assert((kq->kq_state & KQ_WORKQ) == 0);
+
        /*
         * If this is the first pass, link the wait queue associated with the
         * the kqueue onto the wait queue set for the select().  Normally we
@@ -2639,7 +3694,7 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
                struct uthread * ut = get_bsdthread_info(cur_act);
 
                kq->kq_state |= KQ_SEL;
-               waitq_link((struct waitq *)kq->kq_wqs, ut->uu_wqset,
+               waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset,
                           WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
 
                /* always consume the reserved link object */
@@ -2655,56 +3710,64 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
                 * memcpy here because the pointer may not be properly aligned
                 * on 32-bit systems.
                 */
-               memcpy(wq_link_id, (void *)&(kq->kq_wqs), sizeof(void *));
+               void *wqptr = &kq->kq_wqs;
+               memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
        }
 
-       if (kqueue_begin_processing(kq) == -1) {
+       if (kqueue_begin_processing(kq, QOS_INDEX_KQFILE, 0) == -1) {
                kqunlock(kq);
                return (0);
        }
 
-       if (kq->kq_count != 0) {
+       queue = kqueue_get_base_queue(kq, QOS_INDEX_KQFILE);
+       if (!TAILQ_EMPTY(queue)) {
                /*
                 * there is something queued - but it might be a
-                * KN_STAYQUEUED knote, which may or may not have
-                * any events pending.  So, we have to walk the
-                * list of knotes to see, and peek at the stay-
-                * queued ones to be really sure.
+                * KN_STAYACTIVE knote, which may or may not have
+                * any events pending.  Otherwise, we have to walk
+                * the list of knotes to see, and peek at the
+                * (non-vanished) stay-active ones to be really sure.
                 */
-               while ((kn = (struct knote *)TAILQ_FIRST(&kq->kq_head)) != NULL) {
-                       if ((kn->kn_status & KN_STAYQUEUED) == 0) {
+               while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
+                       if (kn->kn_status & KN_ACTIVE) {
                                retnum = 1;
                                goto out;
                        }
+                       assert(kn->kn_status & KN_STAYACTIVE);
+                       knote_suppress(kn);
+               }
 
-                       TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
-                       TAILQ_INSERT_TAIL(&inprocessq, kn, kn_tqe);
+               /*
+                * There were no regular events on the queue, so take
+                * a deeper look at the stay-queued ones we suppressed.
+                */
+               suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
+               while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
+                       unsigned peek = 1;
 
+                       /* If didn't vanish while suppressed - peek at it */
                        if (kqlock2knoteuse(kq, kn)) {
-                               unsigned peek;
 
-                               peek = kn->kn_fop->f_peek(kn);
-                               if (knoteuse2kqlock(kq, kn)) {
-                                       if (peek > 0) {
-                                               retnum = 1;
-                                               goto out;
-                                       }
-                               } else {
-                                       retnum = 0;
-                               }
+                               peek = knote_fops(kn)->f_peek(kn);
+
+                               /* if it dropped while getting lock - move on */
+                               if (!knoteuse2kqlock(kq, kn, 0))
+                                       continue;
+                       }
+
+                       /* unsuppress it */
+                       knote_unsuppress(kn);
+
+                       /* has data or it has to report a vanish */
+                       if (peek > 0) {
+                               retnum = 1;
+                               goto out;
                        }
                }
        }
 
 out:
-       /* Return knotes to active queue */
-       while ((kn = TAILQ_FIRST(&inprocessq)) != NULL) {
-               TAILQ_REMOVE(&inprocessq, kn, kn_tqe);
-               kn->kn_tq = &kq->kq_head;
-               TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
-       }
-
-       kqueue_end_processing(kq);
+       kqueue_end_processing(kq, QOS_INDEX_KQFILE, 0);
        kqunlock(kq);
        return (retnum);
 }
@@ -2716,9 +3779,10 @@ out:
 static int
 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
 {
-       struct kqueue *kq = (struct kqueue *)fg->fg_data;
+       struct kqfile *kqf = (struct kqfile *)fg->fg_data;
 
-       kqueue_dealloc(kq);
+       assert((kqf->kqf_state & KQ_WORKQ) == 0);
+       kqueue_dealloc(&kqf->kqf_kqueue);
        fg->fg_data = NULL;
        return (0);
 }
@@ -2732,12 +3796,18 @@ kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
 static int
 kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_context_t ctx)
 {
-       struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
-       struct kqueue *parentkq = kn->kn_kq;
+       struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
+       struct kqueue *kq = &kqf->kqf_kqueue;
+       struct kqueue *parentkq = knote_get_kq(kn);
+
+       assert((kqf->kqf_state & KQ_WORKQ) == 0);
 
        if (parentkq == kq ||
-           kn->kn_filter != EVFILT_READ)
-               return (1);
+           kn->kn_filter != EVFILT_READ) {
+               kn->kn_flags = EV_ERROR;
+               kn->kn_data = EINVAL;
+               return 0;
+       }
 
        /*
         * We have to avoid creating a cycle when nesting kqueues
@@ -2755,7 +3825,9 @@ kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_con
            parentkq->kq_level < kq->kq_level)
        {
                kqunlock(parentkq);
-               return (1);
+               kn->kn_flags = EV_ERROR;
+               kn->kn_data = EINVAL;
+               return 0;
        } else {
                /* set parent level appropriately */
                if (parentkq->kq_level == 0)
@@ -2764,14 +3836,16 @@ kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_con
                        parentkq->kq_level = kq->kq_level + 1;
                kqunlock(parentkq);
 
-               kn->kn_fop = &kqread_filtops;
+               kn->kn_filtid = EVFILTID_KQREAD;
                kqlock(kq);
-               KNOTE_ATTACH(&kq->kq_sel.si_note, kn);
+               KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
                /* indicate nesting in child, if needed */
                if (kq->kq_level == 0)
                        kq->kq_level = 1;
+
+               int count = kq->kq_count;
                kqunlock(kq);
-               return (0);
+               return (count > 0);
        }
 }
 
@@ -2783,8 +3857,12 @@ static int
 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
 {
        struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
+
+       assert((kq->kq_state & KQ_WORKQ) == 0);
+
        kqlock(kq);
-       kqueue_wakeup(kq, 1);
+       kq->kq_state |= KQ_DRAIN;
+       kqueue_interrupt(kq);
        kqunlock(kq);
        return (0);
 }
@@ -2793,6 +3871,8 @@ kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
 int
 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
 {
+       assert((kq->kq_state & KQ_WORKQ) == 0);
+
        kqlock(kq);
        if (isstat64 != 0) {
                struct stat64 *sb64 = (struct stat64 *)ub;
@@ -2809,58 +3889,549 @@ kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
                        sb64->st_blksize = sizeof(struct user32_kevent);
                sb64->st_mode = S_IFIFO;
        } else {
-               struct stat *sb = (struct stat *)ub;
+               struct stat *sb = (struct stat *)ub;
+
+               bzero((void *)sb, sizeof(*sb));
+               sb->st_size = kq->kq_count;
+               if (kq->kq_state & KQ_KEV_QOS)
+                       sb->st_blksize = sizeof(struct kevent_qos_s);
+               else if (kq->kq_state & KQ_KEV64)
+                       sb->st_blksize = sizeof(struct kevent64_s);
+               else if (IS_64BIT_PROCESS(p))
+                       sb->st_blksize = sizeof(struct user64_kevent);
+               else
+                       sb->st_blksize = sizeof(struct user32_kevent);
+               sb->st_mode = S_IFIFO;
+       }
+       kqunlock(kq);
+       return (0);
+}
+
+
+/*
+ * Interact with the pthread kext to request a servicing there.
+ * Eventually, this will request threads at specific QoS levels.
+ * For now, it only requests a dispatch-manager-QoS thread, and
+ * only one-at-a-time.
+ *
+ * - Caller holds the workq request lock
+ *
+ * - May be called with the kqueue's wait queue set locked,
+ *   so cannot do anything that could recurse on that.
+ */
+static void
+kqworkq_request_thread(
+       struct kqworkq *kqwq, 
+       kq_index_t qos_index)
+{
+       struct kqrequest *kqr;
+
+       assert(kqwq->kqwq_state & KQ_WORKQ);
+       assert(qos_index < KQWQ_NQOS);
+
+       kqr = kqworkq_get_request(kqwq, qos_index);
+
+       /* 
+        * If we have already requested a thread, and it hasn't
+        * started processing yet, there's no use hammering away
+        * on the pthread kext.
+        */
+       if (kqr->kqr_state & KQWQ_THREQUESTED)
+               return;
+
+       assert(kqr->kqr_thread == THREAD_NULL);
+
+       /* request additional workq threads if appropriate */
+       if (pthread_functions != NULL &&
+           pthread_functions->workq_reqthreads != NULL) {
+               unsigned int flags = KEVENT_FLAG_WORKQ;
+
+               /* Compute a priority based on qos_index. */
+               struct workq_reqthreads_req_s request = {
+                       .priority = qos_from_qos_index(qos_index),
+                       .count = 1
+               };
+
+               thread_t wqthread;
+               wqthread = (*pthread_functions->workq_reqthreads)(kqwq->kqwq_p, 1, &request);
+               kqr->kqr_state |= KQWQ_THREQUESTED;
+
+               /* Have we been switched to the emergency/manager thread? */
+               if (wqthread == (thread_t)-1) {
+                       flags |= KEVENT_FLAG_WORKQ_MANAGER;
+                       wqthread = THREAD_NULL;
+               } else if (qos_index == KQWQ_QOS_MANAGER)
+                       flags |= KEVENT_FLAG_WORKQ_MANAGER;
+
+               /* bind the thread */
+               kqworkq_bind_thread(kqwq, qos_index, wqthread, flags);
+       }
+}
+
+/*
+ * If we aren't already busy processing events [for this QoS],
+ * request workq thread support as appropriate.
+ *
+ * TBD - for now, we don't segregate out processing by QoS.
+ *
+ * - May be called with the kqueue's wait queue set locked,
+ *   so cannot do anything that could recurse on that.
+ */
+static void
+kqworkq_request_help(
+       struct kqworkq *kqwq, 
+       kq_index_t qos_index,
+       uint32_t type)
+{
+       struct kqrequest *kqr;
+
+       /* convert to thread qos value */
+       assert(qos_index < KQWQ_NQOS);
+       
+       kqwq_req_lock(kqwq);
+       kqr = kqworkq_get_request(kqwq, qos_index);
+
+       /*
+        * If someone is processing the queue, just mark what type
+        * of attempt this was (from a kq wakeup or from a waitq hook).
+        * They'll be noticed at the end of servicing and a new thread
+        * will be requested at that point.
+        */
+       if (kqr->kqr_state & KQWQ_PROCESSING) {
+               kqr->kqr_state |= type;
+               kqwq_req_unlock(kqwq);
+               return;
+       }
+
+       kqworkq_request_thread(kqwq, qos_index);
+       kqwq_req_unlock(kqwq);
+}
+
+/*
+ * These arrays described the low and high qindexes for a given qos_index.
+ * The values come from the chart in <sys/eventvar.h> (must stay in sync).
+ */
+static kq_index_t _kq_base_index[KQWQ_NQOS] = {0, 0, 6, 11, 15, 18, 20, 21};
+static kq_index_t _kq_high_index[KQWQ_NQOS] = {0, 5, 10, 14, 17, 19, 20, 21};
+
+static struct kqtailq *
+kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index)
+{
+       assert(qos_index < KQWQ_NQOS);
+       return &kq->kq_queue[_kq_base_index[qos_index]];
+}
+
+static struct kqtailq *
+kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index)
+{
+       assert(qos_index < KQWQ_NQOS);
+       return &kq->kq_queue[_kq_high_index[qos_index]];
+}
+
+static int
+kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index)
+{
+       struct kqtailq *base_queue = kqueue_get_base_queue(kq, qos_index);
+       struct kqtailq *queue = kqueue_get_high_queue(kq, qos_index);
+
+       do {
+               if (!TAILQ_EMPTY(queue))
+                       return 0;
+       } while (queue-- > base_queue);
+       return 1;
+}
+
+static struct kqtailq *
+kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index)
+{
+       if (kq->kq_state & KQ_WORKQ) {
+               struct kqworkq *kqwq = (struct kqworkq *)kq;
+               struct kqrequest *kqr;
+
+               kqr = kqworkq_get_request(kqwq, qos_index);
+               return &kqr->kqr_suppressed;
+       } else {
+               struct kqfile *kqf = (struct kqfile *)kq;
+               return &kqf->kqf_suppressed;
+       }
+}
+
+static kq_index_t
+knote_get_queue_index(struct knote *kn)
+{
+       kq_index_t override_index = knote_get_qos_override_index(kn);
+       kq_index_t qos_index = knote_get_qos_index(kn);
+       struct kqueue *kq = knote_get_kq(kn);
+       kq_index_t res;
+
+       if ((kq->kq_state & KQ_WORKQ) == 0) {
+               assert(qos_index == 0);
+               assert(override_index == 0);
+       }
+       res = _kq_base_index[qos_index];
+       if (override_index > qos_index)
+               res += override_index - qos_index;
+
+       assert(res <= _kq_high_index[qos_index]);
+       return res;
+}
+
+static struct kqtailq *
+knote_get_queue(struct knote *kn)
+{
+       kq_index_t qindex = knote_get_queue_index(kn);
+
+       return &(knote_get_kq(kn))->kq_queue[qindex];
+}
+
+static struct kqtailq *
+knote_get_suppressed_queue(struct knote *kn)
+{
+       kq_index_t qos_index = knote_get_qos_index(kn);
+       struct kqueue *kq = knote_get_kq(kn);
+
+       return kqueue_get_suppressed_queue(kq, qos_index);
+}
+
+static kq_index_t
+knote_get_req_index(struct knote *kn)
+{
+       return kn->kn_req_index;
+}
+
+static kq_index_t
+knote_get_qos_index(struct knote *kn)
+{
+       return kn->kn_qos_index;
+}
+
+static void
+knote_set_qos_index(struct knote *kn, kq_index_t qos_index)
+{
+       struct kqueue *kq = knote_get_kq(kn);
+
+       assert(qos_index < KQWQ_NQOS);
+       assert((kn->kn_status & KN_QUEUED) == 0);
+
+       if (kq->kq_state & KQ_WORKQ)
+               assert(qos_index > QOS_INDEX_KQFILE);
+       else
+               assert(qos_index == QOS_INDEX_KQFILE);
+
+       /* always set requested */
+       kn->kn_req_index = qos_index;
+
+       /* only adjust in-use qos index when not suppressed */
+       if ((kn->kn_status & KN_SUPPRESSED) == 0)
+               kn->kn_qos_index = qos_index;
+}
+
+static kq_index_t
+knote_get_qos_override_index(struct knote *kn)
+{
+       return kn->kn_qos_override;
+}
+
+static void
+knote_set_qos_override_index(struct knote *kn, kq_index_t override_index)
+{
+       struct kqueue *kq = knote_get_kq(kn);
+       kq_index_t qos_index = knote_get_qos_index(kn);
+
+       assert((kn->kn_status & KN_QUEUED) == 0);
+
+       if (override_index == KQWQ_QOS_MANAGER)
+               assert(qos_index == KQWQ_QOS_MANAGER);
+       else 
+               assert(override_index < KQWQ_QOS_MANAGER);
+
+       kn->kn_qos_override = override_index;
+
+       /* 
+        * If this is a workq kqueue, apply the override to the 
+        * workq servicing thread.  
+        */
+       if (kq->kq_state & KQ_WORKQ)  {
+               struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+               assert(qos_index > QOS_INDEX_KQFILE);
+               kqworkq_update_override(kqwq, qos_index, override_index);
+       }
+}
+
+static void
+kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index)
+{
+       struct kqrequest *kqr;
+       kq_index_t new_delta;
+       kq_index_t old_delta;
+
+       new_delta = (override_index > qos_index) ?
+                   override_index - qos_index : 0;
+
+       kqr = kqworkq_get_request(kqwq, qos_index);
+
+       kqwq_req_lock(kqwq);
+       old_delta = kqr->kqr_override_delta;
+
+       if (new_delta > old_delta) {
+               thread_t wqthread = kqr->kqr_thread;
+
+               /* store the new override delta */
+               kqr->kqr_override_delta = new_delta;
+
+               /* apply the override to [incoming?] servicing thread */
+               if (wqthread) {
+                       /* only apply if non-manager */
+                   if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+                               if (old_delta)
+                                       thread_update_ipc_override(wqthread, override_index);
+                               else
+                                       thread_add_ipc_override(wqthread, override_index);
+                       }
+               }
+       }
+       kqwq_req_unlock(kqwq);
+}
+
+/* called with the kqworkq lock held */
+static void
+kqworkq_bind_thread(
+       struct kqworkq *kqwq,
+       kq_index_t qos_index,
+       thread_t thread,
+       unsigned int flags)
+{
+       struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
+       thread_t old_thread = kqr->kqr_thread;
+       struct uthread *ut;
+
+       assert(kqr->kqr_state & KQWQ_THREQUESTED);
+
+       /* If no identity yet, just set flags as needed */
+       if (thread == THREAD_NULL) {
+               assert(old_thread == THREAD_NULL);
+
+               /* emergency or unindetified */
+               if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+                       assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
+                       kqr->kqr_state |= KQWQ_THMANAGER;
+               }
+               return;
+       }
+
+       /* Known thread identity */
+       ut = get_bsdthread_info(thread);
+
+       /* 
+        * If this is a manager, and the manager request bit is
+        * not set, assure no other thread is bound. If the bit
+        * is set, make sure the old thread is us (or not set).
+        */
+       if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+               if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+                       assert(old_thread == THREAD_NULL);
+                       kqr->kqr_state |= KQWQ_THMANAGER;
+               } else if (old_thread == THREAD_NULL) {
+                       kqr->kqr_thread = thread;
+                       ut->uu_kqueue_bound = KQWQ_QOS_MANAGER;
+                       ut->uu_kqueue_flags = (KEVENT_FLAG_WORKQ | 
+                                              KEVENT_FLAG_WORKQ_MANAGER);
+               } else {
+                       assert(thread == old_thread);
+                       assert(ut->uu_kqueue_bound == KQWQ_QOS_MANAGER);
+                       assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
+               }
+               return;
+       }
+
+       /* Just a normal one-queue servicing thread */
+       assert(old_thread == THREAD_NULL);
+       assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
+
+       kqr->kqr_thread = thread;
+       
+       /* apply an ipc QoS override if one is needed */
+       if (kqr->kqr_override_delta)
+               thread_add_ipc_override(thread, qos_index + kqr->kqr_override_delta);
+
+       /* indicate that we are processing in the uthread */
+       ut->uu_kqueue_bound = qos_index;
+       ut->uu_kqueue_flags = flags;
+}
+
+/* called with the kqworkq lock held */
+static void
+kqworkq_unbind_thread(
+       struct kqworkq *kqwq,
+       kq_index_t qos_index,
+       thread_t thread, 
+       __unused unsigned int flags)
+{
+       struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
+       kq_index_t override = 0;
+
+       assert(thread == current_thread());
+
+       /* 
+        * If there is an override, drop it from the current thread
+        * and then we are free to recompute (a potentially lower)
+        * minimum override to apply to the next thread request.
+        */
+       if (kqr->kqr_override_delta) {
+               struct kqtailq *base_queue = kqueue_get_base_queue(&kqwq->kqwq_kqueue, qos_index);
+               struct kqtailq *queue = kqueue_get_high_queue(&kqwq->kqwq_kqueue, qos_index);
+
+               /* if not bound to a manager thread, drop the current ipc override */
+               if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+                       assert(thread == kqr->kqr_thread);
+                       thread_drop_ipc_override(thread);
+               }
+
+               /* recompute the new override */
+               do {
+                       if (!TAILQ_EMPTY(queue)) {
+                               override = queue - base_queue;
+                               break;
+                       }
+               } while (queue-- > base_queue);
+       }
+
+       /* unbind the thread and apply the new override */
+       kqr->kqr_thread = THREAD_NULL;
+       kqr->kqr_override_delta = override;
+}
+
+struct kqrequest *
+kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
+{
+       assert(qos_index < KQWQ_NQOS);
+       return &kqwq->kqwq_request[qos_index];
+}
+
+void
+knote_adjust_qos(struct knote *kn, qos_t new_qos, qos_t new_override)
+{
+       if (knote_get_kq(kn)->kq_state & KQ_WORKQ) {
+               kq_index_t new_qos_index;
+               kq_index_t new_override_index;
+               kq_index_t servicer_qos_index;
+
+               new_qos_index = qos_index_from_qos(new_qos, FALSE);
+               new_override_index = qos_index_from_qos(new_override, TRUE);
+
+               /* make sure the servicer qos acts as a floor */
+               servicer_qos_index = qos_index_from_qos(kn->kn_qos, FALSE);
+               if (servicer_qos_index > new_qos_index)
+                       new_qos_index = servicer_qos_index;
+               if (servicer_qos_index > new_override_index)
+                       new_override_index = servicer_qos_index;
+
+               kqlock(knote_get_kq(kn));
+               if (new_qos_index != knote_get_req_index(kn) ||
+                   new_override_index != knote_get_qos_override_index(kn)) {
+                       if (kn->kn_status & KN_QUEUED) {
+                               knote_dequeue(kn);
+                               knote_set_qos_index(kn, new_qos_index);
+                               knote_set_qos_override_index(kn, new_override_index);
+                               knote_enqueue(kn);
+                               knote_wakeup(kn);
+                       } else {
+                               knote_set_qos_index(kn, new_qos_index);
+                               knote_set_qos_override_index(kn, new_override_index);
+                       }
+               }
+               kqunlock(knote_get_kq(kn));
+       }
+}
+
+static void
+knote_wakeup(struct knote *kn)
+{
+       struct kqueue *kq = knote_get_kq(kn);
+
+       if (kq->kq_state & KQ_WORKQ) {
+               /* request a servicing thread */
+               struct kqworkq *kqwq = (struct kqworkq *)kq;
+               kq_index_t qos_index = knote_get_qos_index(kn);
+
+               kqworkq_request_help(kqwq, qos_index, KQWQ_WAKEUP);
+
+       } else {
+               struct kqfile *kqf = (struct kqfile *)kq;
+
+               /* flag wakeups during processing */
+               if (kq->kq_state & KQ_PROCESSING)
+                       kq->kq_state |= KQ_WAKEUP;
+
+               /* wakeup a thread waiting on this queue */
+               if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
+                       kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
+                       waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+                                          KQ_EVENT,
+                                          THREAD_AWAKENED,
+                                          WAITQ_ALL_PRIORITIES);
+               }
 
-               bzero((void *)sb, sizeof(*sb));
-               sb->st_size = kq->kq_count;
-               if (kq->kq_state & KQ_KEV_QOS)
-                       sb->st_blksize = sizeof(struct kevent_qos_s);
-               else if (kq->kq_state & KQ_KEV64)
-                       sb->st_blksize = sizeof(struct kevent64_s);
-               else if (IS_64BIT_PROCESS(p))
-                       sb->st_blksize = sizeof(struct user64_kevent);
-               else
-                       sb->st_blksize = sizeof(struct user32_kevent);
-               sb->st_mode = S_IFIFO;
+               /* wakeup other kqueues/select sets we're inside */
+               KNOTE(&kqf->kqf_sel.si_note, 0);
        }
-       kqunlock(kq);
-       return (0);
 }
-
+       
 /*
  * Called with the kqueue locked
  */
 static void
-kqueue_wakeup(struct kqueue *kq, int closed)
+kqueue_interrupt(struct kqueue *kq)
 {
-       wait_result_t res = THREAD_NOT_WAITING;
+       assert((kq->kq_state & KQ_WORKQ) == 0);
 
-       if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0 || kq->kq_nprocess > 0) {
+       /* wakeup sleeping threads */
+       if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) {
                kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
-               res = waitq_wakeup64_all((struct waitq *)kq->kq_wqs, KQ_EVENT,
-                                        (closed) ? THREAD_INTERRUPTED : THREAD_AWAKENED,
-                                        WAITQ_ALL_PRIORITIES);
+               (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+                                        KQ_EVENT,
+                                        THREAD_RESTART,
+                                        WAITQ_ALL_PRIORITIES);
        }
 
-       /* request additional workq threads if appropriate */
-       if (res == THREAD_NOT_WAITING && (kq->kq_state & KQ_WORKQ) &&
-           pthread_functions != NULL && pthread_functions->workq_reqthreads != NULL) {
-               /*
-                * The special workq kq should be accumulating the counts of
-                * queued sources on a pthread_priority_t basis and we should
-                * be providing that here.  For now, just hard-code a single
-                * entry request at a fixed (default) QOS.
-                */
-               struct workq_reqthreads_req_s request = { 
-                                     .priority = 0x020004ff,  /* legacy event manager */
-                                                         .count = kq->kq_count };
-               thread_t wqthread;
+       /* wakeup threads waiting their turn to process */
+       if (kq->kq_state & KQ_PROCWAIT) {
+               struct kqtailq *suppressq;
 
-               wqthread = (*pthread_functions->workq_reqthreads)(kq->kq_p, 1, &request);
-               assert(wqthread == THREAD_NULL);
+               assert(kq->kq_state & KQ_PROCESSING);
+
+               kq->kq_state &= ~KQ_PROCWAIT;
+               suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
+               (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, 
+                                        CAST_EVENT64_T(suppressq),
+                                        THREAD_RESTART,
+                                        WAITQ_ALL_PRIORITIES);
        }
 }
 
+/*
+ * Called back from waitq code when no threads waiting and the hook was set.
+ *
+ * Interrupts are likely disabled and spin locks are held - minimal work
+ * can be done in this context!!!
+ *
+ * JMM - in the future, this will try to determine which knotes match the
+ * wait queue wakeup and apply these wakeups against those knotes themselves.
+ * For now, all the events dispatched this way are dispatch-manager handled,
+ * so hard-code that for now.
+ */
+void
+waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos)
+{
+#pragma unused(knote_hook, qos)
+
+       struct kqworkq *kqwq = (struct kqworkq *)kq_hook;
+
+       assert(kqwq->kqwq_state & KQ_WORKQ);
+       kqworkq_request_help(kqwq, KQWQ_QOS_MANAGER, KQWQ_HOOKCALLED);
+}
+
 void
 klist_init(struct klist *list)
 {
@@ -2878,7 +4449,7 @@ klist_init(struct klist *list)
  *
  *     The object lock should also hold off pending
  *     detach/drop operations.  But we'll prevent it here
- *     too - just in case.
+ *     too (by taking a use reference) - just in case.
  */
 void
 knote(struct klist *list, long hint)
@@ -2886,19 +4457,21 @@ knote(struct klist *list, long hint)
        struct knote *kn;
 
        SLIST_FOREACH(kn, list, kn_selnext) {
-               struct kqueue *kq = kn->kn_kq;
+               struct kqueue *kq = knote_get_kq(kn);
 
                kqlock(kq);
+
+               /* If we can get a use reference - deliver event */
                if (kqlock2knoteuse(kq, kn)) {
                        int result;
 
                        /* call the event with only a use count */
-                       result = kn->kn_fop->f_event(kn, hint);
+                       result = knote_fops(kn)->f_event(kn, hint);
 
                        /* if its not going away and triggered */
-                       if (knoteuse2kqlock(kq, kn) && result)
-                               knote_activate(kn, 0);
-                       /* lock held again */
+                       if (knoteuse2kqlock(kq, kn, 0) && result)
+                               knote_activate(kn);
+                       /* kq lock held */
                }
                kqunlock(kq);
        }
@@ -2927,6 +4500,53 @@ knote_detach(struct klist *list, struct knote *kn)
        return (SLIST_EMPTY(list));
 }
 
+/*
+ * knote_vanish - Indicate that the source has vanished
+ *
+ * If the knote has requested EV_VANISHED delivery,
+ * arrange for that. Otherwise, deliver a NOTE_REVOKE
+ * event for backward compatibility.
+ *
+ * The knote is marked as having vanished, but is not
+ * actually detached from the source in this instance.
+ * The actual detach is deferred until the knote drop.
+ *
+ * Our caller already has the object lock held. Calling
+ * the detach routine would try to take that lock
+ * recursively - which likely is not supported.
+ */
+void
+knote_vanish(struct klist *list)
+{
+       struct knote *kn;
+       struct knote *kn_next;
+
+       SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
+               struct kqueue *kq = knote_get_kq(kn);
+               int result;
+
+               kqlock(kq);
+               if ((kn->kn_status & KN_DROPPING) == 0) {
+
+                       /* If EV_VANISH supported - prepare to deliver one */
+                       if (kn->kn_status & KN_REQVANISH) {
+                               kn->kn_status |= KN_VANISHED;
+                               knote_activate(kn);
+
+                       } else if (kqlock2knoteuse(kq, kn)) {
+                               /* call the event with only a use count */
+                               result = knote_fops(kn)->f_event(kn, NOTE_REVOKE);
+                               
+                               /* if its not going away and triggered */
+                               if (knoteuse2kqlock(kq, kn, 0) && result)
+                                       knote_activate(kn);
+                               /* lock held again */
+                       }
+               }
+               kqunlock(kq);
+       }
+}
+
 /*
  * For a given knote, link a provided wait queue directly with the kqueue.
  * Wakeups will happen via recursive wait queue support.  But nothing will move
@@ -2934,18 +4554,19 @@ knote_detach(struct klist *list, struct knote *kn)
  * we permanently enqueue them here.
  *
  * kqueue and knote references are held by caller.
+ * waitq locked by caller.
  *
  * caller provides the wait queue link structure.
  */
 int
 knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
 {
-       struct kqueue *kq = kn->kn_kq;
+       struct kqueue *kq = knote_get_kq(kn);
        kern_return_t kr;
 
-       kr = waitq_link(wq, kq->kq_wqs, WAITQ_SHOULD_LOCK, reserved_link);
+       kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
        if (kr == KERN_SUCCESS) {
-               knote_markstayqueued(kn);
+               knote_markstayactive(kn);
                return (0);
        } else {
                return (EINVAL);
@@ -2964,11 +4585,11 @@ knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
 int
 knote_unlink_waitq(struct knote *kn, struct waitq *wq)
 {
-       struct kqueue *kq = kn->kn_kq;
+       struct kqueue *kq = knote_get_kq(kn);
        kern_return_t kr;
 
-       kr = waitq_unlink(wq, kq->kq_wqs);
-       knote_clearstayqueued(kn);
+       kr = waitq_unlink(wq, &kq->kq_wqs);
+       knote_clearstayactive(kn);
        return ((kr != KERN_SUCCESS) ? EINVAL : 0);
 }
 
@@ -2982,49 +4603,90 @@ knote_unlink_waitq(struct knote *kn, struct waitq *wq)
  * It returns the same way, but may drop it temporarily.
  */
 void
-knote_fdclose(struct proc *p, int fd)
+knote_fdclose(struct proc *p, int fd, int force)
 {
-       struct filedesc *fdp = p->p_fd;
        struct klist *list;
        struct knote *kn;
 
-       list = &fdp->fd_knlist[fd];
-       while ((kn = SLIST_FIRST(list)) != NULL) {
-               struct kqueue *kq = kn->kn_kq;
+restart:
+       list = &p->p_fd->fd_knlist[fd];
+       SLIST_FOREACH(kn, list, kn_link) {
+               struct kqueue *kq = knote_get_kq(kn);
+
+               kqlock(kq);
 
                if (kq->kq_p != p)
                        panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
                            __func__, kq->kq_p, p);
 
-               kqlock(kq);
+               /*
+                * If the knote supports EV_VANISHED delivery,
+                * transition it to vanished mode (or skip over
+                * it if already vanished).
+                */
+               if (!force && (kn->kn_status & KN_REQVANISH)) {
+
+                       if ((kn->kn_status & KN_VANISHED) == 0) {
+                               proc_fdunlock(p);
+
+                               /* get detach reference (also marks vanished) */
+                               if (kqlock2knotedetach(kq, kn)) {
+
+                                       /* detach knote and drop fp use reference */
+                                       knote_fops(kn)->f_detach(kn);
+                                       if (knote_fops(kn)->f_isfd)
+                                               fp_drop(p, kn->kn_id, kn->kn_fp, 0);
+
+                                       /* activate it if it's still in existence */
+                                       if (knoteuse2kqlock(kq, kn, 0)) {
+                                               knote_activate(kn);
+                                       }
+                                       kqunlock(kq);
+                               }
+                               proc_fdlock(p);
+                               goto restart;
+                       } else {
+                               kqunlock(kq);
+                               continue;
+                       }
+               }
+
                proc_fdunlock(p);
 
                /*
-                * Convert the lock to a drop ref.
+                * Convert the kq lock to a drop ref.
                 * If we get it, go ahead and drop it.
-                * Otherwise, we waited for it to
-                * be dropped by the other guy, so
-                * it is safe to move on in the list.
+                * Otherwise, we waited for the blocking
+                * condition to complete. Either way,
+                * we dropped the fdlock so start over.
                 */
                if (kqlock2knotedrop(kq, kn)) {
-                       kn->kn_fop->f_detach(kn);
                        knote_drop(kn, p);
                }
 
                proc_fdlock(p);
-
-               /* the fd tables may have changed - start over */
-               list = &fdp->fd_knlist[fd];
+               goto restart;
        }
 }
 
-/* proc_fdlock held on entry (and exit) */
+/* 
+ * knote_fdadd - Add knote to the fd table for process
+ *
+ * All file-based filters associate a list of knotes by file
+ * descriptor index. All other filters hash the knote by ident.
+ *
+ * May have to grow the table of knote lists to cover the
+ * file descriptor index presented.
+ *
+ * proc_fdlock held on entry (and exit) 
+ */
 static int
-knote_fdpattach(struct knote *kn, struct filedesc *fdp, struct proc *p)
+knote_fdadd(struct knote *kn, struct proc *p)
 {
+       struct filedesc *fdp = p->p_fd;
        struct klist *list = NULL;
 
-       if (! kn->kn_fop->f_isfd) {
+       if (! knote_fops(kn)->f_isfd) {
                if (fdp->fd_knhashmask == 0)
                        fdp->fd_knhash = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
                            &fdp->fd_knhashmask);
@@ -3065,41 +4727,139 @@ knote_fdpattach(struct knote *kn, struct filedesc *fdp, struct proc *p)
        return (0);
 }
 
+/* 
+ * knote_fdremove - remove a knote from the fd table for process
+ *
+ * If the filter is file-based, remove based on fd index.
+ * Otherwise remove from the hash based on the ident.
+ *
+ * proc_fdlock held on entry (and exit)
+ */
+static void
+knote_fdremove(struct knote *kn, struct proc *p)
+{
+       struct filedesc *fdp = p->p_fd;
+       struct klist *list = NULL;
+
+       if (knote_fops(kn)->f_isfd) {
+               assert ((u_int)fdp->fd_knlistsize > kn->kn_id);
+               list = &fdp->fd_knlist[kn->kn_id];
+       } else {
+               list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+       }
+       SLIST_REMOVE(list, kn, knote, kn_link);
+}
+
+/* 
+ * knote_fdfind - lookup a knote in the fd table for process
+ *
+ * If the filter is file-based, lookup based on fd index.
+ * Otherwise use a hash based on the ident.
+ *
+ * Matching is based on kq, filter, and ident. Optionally,
+ * it may also be based on the udata field in the kevent -
+ * allowing multiple event registration for the file object
+ * per kqueue.
+ *
+ * proc_fdlock held on entry (and exit)
+ */
+static struct knote *
+knote_fdfind(struct kqueue *kq,
+             struct kevent_internal_s *kev,
+             struct proc *p)
+{
+       struct filedesc *fdp = p->p_fd;
+       struct klist *list = NULL;
+       struct knote *kn = NULL;
+       struct filterops *fops;
+       
+       fops = sysfilt_ops[~kev->filter];       /* to 0-base index */
+
+       /* 
+        * determine where to look for the knote
+        */
+       if (fops->f_isfd) {
+               /* fd-based knotes are linked off the fd table */
+               if (kev->ident < (u_int)fdp->fd_knlistsize) {
+                       list = &fdp->fd_knlist[kev->ident];
+               }
+       } else if (fdp->fd_knhashmask != 0) {
+               /* hash non-fd knotes here too */
+               list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
+       }
 
+       /*
+        * scan the selected list looking for a match
+        */
+       if (list != NULL) {
+               SLIST_FOREACH(kn, list, kn_link) {
+                       if (kq == knote_get_kq(kn) &&
+                           kev->ident == kn->kn_id && 
+                           kev->filter == kn->kn_filter) {
+                               if (kev->flags & EV_UDATA_SPECIFIC) {
+                                       if ((kn->kn_status & KN_UDATA_SPECIFIC) &&
+                                           kev->udata == kn->kn_udata) {
+                                               break; /* matching udata-specific knote */
+                                       }
+                               } else if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) {
+                                       break; /* matching non-udata-specific knote */
+                               }
+                       }
+               }
+       }
+       return kn;
+}
 
 /*
- * should be called at spl == 0, since we don't want to hold spl
- * while calling fdrop and free.
+ * knote_drop - disconnect and drop the knote
+ *
+ * Called with the kqueue unlocked and holding a
+ * "drop reference" on the knote in question.
+ * This reference is most often aquired thru a call
+ * to kqlock2knotedrop(). But it can also be acquired
+ * through stealing a drop reference via a call to
+ * knoteuse2knotedrop() or during the initial attach
+ * of the knote.
+ *
+ * The knote may have already been detached from
+ * (or not yet attached to) its source object.
  */
 static void
 knote_drop(struct knote *kn, __unused struct proc *ctxp)
 {
-       struct kqueue *kq = kn->kn_kq;
+       struct kqueue *kq = knote_get_kq(kn);
        struct proc *p = kq->kq_p;
-       struct filedesc *fdp = p->p_fd;
-       struct klist *list;
        int needswakeup;
 
+       /* We have to have a dropping reference on the knote */
+       assert(kn->kn_status & KN_DROPPING);
+
+       /* If we are attached, disconnect from the source first */
+       if (kn->kn_status & KN_ATTACHED) {
+               knote_fops(kn)->f_detach(kn);
+       }
+
        proc_fdlock(p);
-       if (kn->kn_fop->f_isfd)
-               list = &fdp->fd_knlist[kn->kn_id];
-       else
-               list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
 
-       SLIST_REMOVE(list, kn, knote, kn_link);
+       /* Remove the source from the appropriate hash */
+       knote_fdremove(kn, p);
+
+       /* trade fdlock for kq lock */
        kqlock(kq);
-       knote_dequeue(kn);
+       proc_fdunlock(p);
+
+       /* determine if anyone needs to know about the drop */
+       assert((kn->kn_status & (KN_SUPPRESSED | KN_QUEUED)) == 0);
        needswakeup = (kn->kn_status & KN_USEWAIT);
        kqunlock(kq);
-       proc_fdunlock(p);
 
        if (needswakeup)
-               waitq_wakeup64_all((struct waitq *)kq->kq_wqs,
+               waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
                                   CAST_EVENT64_T(&kn->kn_status),
-                                  THREAD_AWAKENED,
+                                  THREAD_RESTART,
                                   WAITQ_ALL_PRIORITIES);
 
-       if (kn->kn_fop->f_isfd)
+       if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0))
                fp_drop(p, kn->kn_id, kn->kn_fp, 0);
 
        knote_free(kn);
@@ -3107,19 +4867,14 @@ knote_drop(struct knote *kn, __unused struct proc *ctxp)
 
 /* called with kqueue lock held */
 static void
-knote_activate(struct knote *kn, int force)
+knote_activate(struct knote *kn)
 {
-       struct kqueue *kq = kn->kn_kq;
-
-       if (!force && (kn->kn_status & KN_ACTIVE))
+       if (kn->kn_status & KN_ACTIVE)
                return;
 
        kn->kn_status |= KN_ACTIVE;
-       knote_enqueue(kn);
-       kqueue_wakeup(kq, 0);
-
-       /* wake up the parent kq, too */
-       KNOTE(&kq->kq_sel.si_note, 0);
+       if (knote_enqueue(kn))
+               knote_wakeup(kn);
 }
 
 /* called with kqueue lock held */
@@ -3127,45 +4882,118 @@ static void
 knote_deactivate(struct knote *kn)
 {
        kn->kn_status &= ~KN_ACTIVE;
+       if ((kn->kn_status & KN_STAYACTIVE) == 0)
+               knote_dequeue(kn);
+}
+
+/* called with kqueue lock held */
+static void
+knote_enable(struct knote *kn)
+{
+       if ((kn->kn_status & KN_DISABLED) == 0)
+               return;
+
+       kn->kn_status &= ~KN_DISABLED;
+       if (knote_enqueue(kn))
+               knote_wakeup(kn);
+}
+
+/* called with kqueue lock held */
+static void
+knote_disable(struct knote *kn)
+{
+       if (kn->kn_status & KN_DISABLED)
+               return;
+
+       kn->kn_status |= KN_DISABLED;
+       knote_dequeue(kn);
+}
+
+/* called with kqueue lock held */
+static void
+knote_suppress(struct knote *kn)
+{
+       struct kqtailq *suppressq;
+
+       if (kn->kn_status & KN_SUPPRESSED)
+               return;
+
        knote_dequeue(kn);
+       kn->kn_status |= KN_SUPPRESSED;
+       suppressq = knote_get_suppressed_queue(kn);
+       TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
 }
 
 /* called with kqueue lock held */
 static void
+knote_unsuppress(struct knote *kn)
+{
+       struct kqtailq *suppressq;
+
+       if ((kn->kn_status & KN_SUPPRESSED) == 0)
+               return;
+
+       kn->kn_status &= ~KN_SUPPRESSED;
+       suppressq = knote_get_suppressed_queue(kn);
+       TAILQ_REMOVE(suppressq, kn, kn_tqe);
+
+       /* udate in-use qos to equal requested qos */
+       kn->kn_qos_index = kn->kn_req_index;
+
+       /* don't wakeup if unsuppressing just a stay-active knote */
+       if (knote_enqueue(kn) &&
+           (kn->kn_status & KN_ACTIVE))
+               knote_wakeup(kn);
+}
+
+/* called with kqueue lock held */
+static int
 knote_enqueue(struct knote *kn)
 {
-       if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_STAYQUEUED ||
-           (kn->kn_status & (KN_QUEUED | KN_STAYQUEUED | KN_DISABLED)) == 0) {
-               struct kqtailq *tq = kn->kn_tq;
-               struct kqueue *kq = kn->kn_kq;
+       if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0 ||
+           (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)))
+               return 0;
+
+       if ((kn->kn_status & KN_QUEUED) == 0) {
+               struct kqtailq *queue = knote_get_queue(kn);
+               struct kqueue *kq = knote_get_kq(kn);
 
-               TAILQ_INSERT_TAIL(tq, kn, kn_tqe);
+               TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
                kn->kn_status |= KN_QUEUED;
                kq->kq_count++;
+               return 1;
        }
+       return ((kn->kn_status & KN_STAYACTIVE) != 0);
 }
 
+
 /* called with kqueue lock held */
 static void
 knote_dequeue(struct knote *kn)
 {
-       struct kqueue *kq = kn->kn_kq;
+       struct kqueue *kq = knote_get_kq(kn);
+       struct kqtailq *queue;
 
-       if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_QUEUED) {
-               struct kqtailq *tq = kn->kn_tq;
+       if ((kn->kn_status & KN_QUEUED) == 0)
+               return;
 
-               TAILQ_REMOVE(tq, kn, kn_tqe);
-               kn->kn_tq = &kq->kq_head;
-               kn->kn_status &= ~KN_QUEUED;
-               kq->kq_count--;
-       }
+       queue = knote_get_queue(kn);
+       TAILQ_REMOVE(queue, kn, kn_tqe);
+       kn->kn_status &= ~KN_QUEUED;
+       kq->kq_count--;
 }
 
 void
 knote_init(void)
 {
        knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
-           8192, "knote zone");
+                          8192, "knote zone");
+
+       kqfile_zone = zinit(sizeof(struct kqfile), 8192*sizeof(struct kqfile),
+                           8192, "kqueue file zone");
+
+       kqworkq_zone = zinit(sizeof(struct kqworkq), 8192*sizeof(struct kqworkq),
+                           8192, "kqueue workq zone");
 
        /* allocate kq lock group attribute and group */
        kq_lck_grp_attr = lck_grp_attr_alloc_init();
@@ -3178,10 +5006,8 @@ knote_init(void)
        /* Initialize the timer filter lock */
        lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr);
 
-#if VM_PRESSURE_EVENTS
-       /* Initialize the vm pressure list lock */
-       vm_pressure_init(kq_lck_grp, kq_lck_attr);
-#endif
+       /* Initialize the user filter lock */
+       lck_spin_init(&_filt_userlock, kq_lck_grp, kq_lck_attr);
 
 #if CONFIG_MEMORYSTATUS
        /* Initialize the memorystatus list lock */
@@ -3190,6 +5016,12 @@ knote_init(void)
 }
 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
 
+struct filterops *
+knote_fops(struct knote *kn)
+{
+       return sysfilt_ops[kn->kn_filtid];
+}
+
 static struct knote *
 knote_alloc(void)
 {
@@ -3335,9 +5167,10 @@ event_unlock(struct socket *so, int refcount, void *lr)
        else
                lr_saved = lr;
 
-       if (refcount)
+       if (refcount) {
+               VERIFY(so->so_usecount > 0);
                so->so_usecount--;
-
+       }
        if (so->so_usecount < 0) {
                panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
                    so, so->so_usecount, solockhistory_nr(so));
@@ -3640,7 +5473,7 @@ kev_post_msg(struct kev_msg *event_msg)
                         * unsafe to use "m2"
                         */
                        so_inc_recv_data_stat(ev_pcb->evp_socket,
-                           1, m->m_len, SO_TC_BE);
+                           1, m->m_len, MBUF_TC_BE);
 
                        sorwakeup(ev_pcb->evp_socket);
                        OSIncrementAtomic64((SInt64 *)&kevtstat.kes_posted);
@@ -3857,38 +5690,43 @@ fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
 
 
 void
-knote_markstayqueued(struct knote *kn)
+knote_markstayactive(struct knote *kn)
 {
-       kqlock(kn->kn_kq);
-       kn->kn_status |= KN_STAYQUEUED;
-       knote_enqueue(kn);
-       kqunlock(kn->kn_kq);
+       kqlock(knote_get_kq(kn));
+       kn->kn_status |= KN_STAYACTIVE;
+
+       /* handle all stayactive knotes on the manager */
+       if (knote_get_kq(kn)->kq_state & KQ_WORKQ)
+               knote_set_qos_index(kn, KQWQ_QOS_MANAGER);
+
+       knote_activate(kn);
+       kqunlock(knote_get_kq(kn));
 }
 
 void
-knote_clearstayqueued(struct knote *kn)
+knote_clearstayactive(struct knote *kn)
 {
-       kqlock(kn->kn_kq);
-       kn->kn_status &= ~KN_STAYQUEUED;
-       knote_dequeue(kn);
-       kqunlock(kn->kn_kq);
+       kqlock(knote_get_kq(kn));
+       kn->kn_status &= ~KN_STAYACTIVE;
+       knote_deactivate(kn);
+       kqunlock(knote_get_kq(kn));
 }
 
 static unsigned long
 kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
                unsigned long buflen, unsigned long nknotes)
 {
-       struct kevent_qos_s kevqos;
        struct kevent_internal_s *kevp;
        for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
-               if (kq == kn->kn_kq) {
+               if (kq == knote_get_kq(kn)) {
                        if (nknotes < buflen) {
                                struct kevent_extinfo *info = &buf[nknotes];
+                               struct kevent_qos_s kevqos;
 
                                kqlock(kq);
-                               bzero(&kevqos, sizeof(kevqos));
                                kevp = &(kn->kn_kevent);
 
+                               bzero(&kevqos, sizeof(kevqos));
                                kevqos.ident = kevp->ident;
                                kevqos.filter = kevp->filter;
                                kevqos.flags = kevp->flags;
@@ -3900,10 +5738,7 @@ kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *
 
                                memcpy(&info->kqext_kev, &kevqos, sizeof(info->kqext_kev));
                                info->kqext_sdata = kn->kn_sdata;
-
-                               /* status flags exported to userspace/libproc */
-#define KQEXT_STATUS_MASK (KN_ACTIVE|KN_QUEUED|KN_DISABLED|KN_STAYQUEUED)
-                               info->kqext_status = kn->kn_status & KQEXT_STATUS_MASK;
+                               info->kqext_status = kn->kn_status;
                                info->kqext_sfflags = kn->kn_sfflags;
 
                                kqunlock(kq);
@@ -3929,6 +5764,9 @@ pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
        unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
        struct kevent_extinfo *kqext = NULL;
 
+       /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
+       buflen = min(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
+
        kqext = kalloc(buflen * sizeof(struct kevent_extinfo));
        if (kqext == NULL) {
                err = ENOMEM;
@@ -3961,7 +5799,59 @@ pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
                kqext = NULL;
        }
 
-       if (!err)
-               *retval = nknotes;
+       if (!err) {
+               *retval = min(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
+       }
        return err;
 }
+
+static unsigned long
+kevent_udatainfo_emit(struct kqueue *kq, struct knote *kn, uint64_t *buf,
+               unsigned long buflen, unsigned long nknotes)
+{
+       struct kevent_internal_s *kevp;
+       for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
+               if (kq == knote_get_kq(kn)) {
+                       if (nknotes < buflen) {
+                               kqlock(kq);
+                               kevp = &(kn->kn_kevent);
+                               buf[nknotes] = kevp->udata;
+                               kqunlock(kq);
+                       }
+
+                       /* we return total number of knotes, which may be more than requested */
+                       nknotes++;
+               }
+       }
+
+       return nknotes;
+}
+
+int
+pid_kqueue_udatainfo(proc_t p, struct kqueue *kq, uint64_t *buf,
+               uint32_t bufsize)
+{
+       struct knote *kn;
+       int i;
+       struct filedesc *fdp = p->p_fd;
+       unsigned long nknotes = 0;
+       unsigned long buflen = bufsize / sizeof(uint64_t);
+
+       proc_fdlock(p);
+
+       for (i = 0; i < fdp->fd_knlistsize; i++) {
+               kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+               nknotes = kevent_udatainfo_emit(kq, kn, buf, buflen, nknotes);
+       }
+
+       if (fdp->fd_knhashmask != 0) {
+               for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
+                       kn = SLIST_FIRST(&fdp->fd_knhash[i]);
+                       nknotes = kevent_udatainfo_emit(kq, kn, buf, buflen, nknotes);
+               }
+       }
+
+       proc_fdunlock(p);
+       return (int)nknotes;
+}
+