+ kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
+ if (kqwq == NULL)
+ return NULL;
+
+ kq = &kqwq->kqwq_kqueue;
+ bzero(kqwq, sizeof (struct kqworkq));
+
+ kqwq->kqwq_state = KQ_WORKQ;
+
+ for (i = 0; i < KQWQ_NBUCKETS; i++) {
+ TAILQ_INIT(&kq->kq_queue[i]);
+ }
+ for (i = 0; i < KQWQ_NQOS; i++) {
+ kqwq->kqwq_request[i].kqr_qos_index = i;
+ }
+
+ lck_spin_init(&kqwq->kqwq_reqlock, kq_lck_grp, kq_lck_attr);
+ policy = SYNC_POLICY_FIFO;
+ hook = (void *)kqwq;
+
+ } else if (flags & KEVENT_FLAG_WORKLOOP) {
+ struct kqworkloop *kqwl;
+ int i;
+
+ kqwl = (struct kqworkloop *)zalloc(kqworkloop_zone);
+ if (kqwl == NULL)
+ return NULL;
+
+ bzero(kqwl, sizeof (struct kqworkloop));
+
+ kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC;
+ kqwl->kqwl_retains = 1; /* donate a retain to creator */
+
+ kq = &kqwl->kqwl_kqueue;
+ for (i = 0; i < KQWL_NBUCKETS; i++) {
+ TAILQ_INIT(&kq->kq_queue[i]);
+ }
+ TAILQ_INIT(&kqwl->kqwl_request.kqr_suppressed);
+
+ lck_spin_init(&kqwl->kqwl_reqlock, kq_lck_grp, kq_lck_attr);
+ lck_mtx_init(&kqwl->kqwl_statelock, kq_lck_grp, kq_lck_attr);
+
+ policy = SYNC_POLICY_FIFO;
+ if (flags & KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD) {
+ policy |= SYNC_POLICY_PREPOST;
+ kq->kq_state |= KQ_NO_WQ_THREAD;
+ } else {
+ hook = (void *)kqwl;
+ }
+
+ } else {
+ struct kqfile *kqf;
+
+ kqf = (struct kqfile *)zalloc(kqfile_zone);
+ if (kqf == NULL)
+ return NULL;
+
+ kq = &kqf->kqf_kqueue;
+ bzero(kqf, sizeof (struct kqfile));
+ TAILQ_INIT(&kq->kq_queue[0]);
+ TAILQ_INIT(&kqf->kqf_suppressed);
+
+ policy = SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST;
+ }
+
+ waitq_set_init(&kq->kq_wqs, policy, NULL, hook);
+ lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
+ kq->kq_p = p;
+
+ if (fdp->fd_knlistsize < 0) {
+ proc_fdlock(p);
+ if (fdp->fd_knlistsize < 0)
+ fdp->fd_knlistsize = 0; /* this process has had a kq */
+ proc_fdunlock(p);
+ }
+
+ kq_addr_offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
+ /* Assert that the address can be pointer compacted for use with knote */
+ assert(kq_addr_offset < (uint64_t)(1ull << KNOTE_KQ_BITSIZE));
+ return (kq);
+}
+
+/*
+ * knotes_dealloc - detach all knotes for the process and drop them
+ *
+ * Called with proc_fdlock held.
+ * Returns with it locked.
+ * May drop it temporarily.
+ * Process is in such a state that it will not try to allocate
+ * any more knotes during this process (stopped for exit or exec).
+ */
+void
+knotes_dealloc(proc_t p)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct kqueue *kq;
+ struct knote *kn;
+ struct klist *kn_hash = NULL;
+ int i;
+
+ /* Close all the fd-indexed knotes up front */
+ if (fdp->fd_knlistsize > 0) {
+ for (i = 0; i < fdp->fd_knlistsize; i++) {
+ while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
+ kq = knote_get_kq(kn);
+ kqlock(kq);
+ proc_fdunlock(p);
+ /* drop it ourselves or wait */
+ if (kqlock2knotedrop(kq, kn)) {
+ knote_drop(kn, p);
+ }
+ proc_fdlock(p);
+ }
+ }
+ /* free the table */
+ FREE(fdp->fd_knlist, M_KQUEUE);
+ fdp->fd_knlist = NULL;
+ }
+ fdp->fd_knlistsize = -1;
+
+ knhash_lock(p);
+ proc_fdunlock(p);
+
+ /* Clean out all the hashed knotes as well */
+ if (fdp->fd_knhashmask != 0) {
+ for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
+ while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
+ kq = knote_get_kq(kn);
+ kqlock(kq);
+ knhash_unlock(p);
+ /* drop it ourselves or wait */
+ if (kqlock2knotedrop(kq, kn)) {
+ knote_drop(kn, p);
+ }
+ knhash_lock(p);
+ }
+ }
+ kn_hash = fdp->fd_knhash;
+ fdp->fd_knhashmask = 0;
+ fdp->fd_knhash = NULL;
+ }
+
+ knhash_unlock(p);
+
+ /* free the kn_hash table */
+ if (kn_hash)
+ FREE(kn_hash, M_KQUEUE);
+
+ proc_fdlock(p);
+}
+
+
+/*
+ * kqueue_dealloc - detach all knotes from a kqueue and free it
+ *
+ * We walk each list looking for knotes referencing this
+ * this kqueue. If we find one, we try to drop it. But
+ * if we fail to get a drop reference, that will wait
+ * until it is dropped. So, we can just restart again
+ * safe in the assumption that the list will eventually
+ * not contain any more references to this kqueue (either
+ * we dropped them all, or someone else did).
+ *
+ * Assumes no new events are being added to the kqueue.
+ * Nothing locked on entry or exit.
+ *
+ * Workloop kqueues cant get here unless all the knotes
+ * are already gone and all requested threads have come
+ * and gone (cancelled or arrived).
+ */
+void
+kqueue_dealloc(struct kqueue *kq)
+{
+ struct proc *p;
+ struct filedesc *fdp;
+ struct knote *kn;
+ int i;
+
+ if (kq == NULL)
+ return;
+
+ p = kq->kq_p;
+ fdp = p->p_fd;
+
+ proc_fdlock(p);
+ for (i = 0; i < fdp->fd_knlistsize; i++) {
+ kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+ while (kn != NULL) {
+ if (kq == knote_get_kq(kn)) {
+ assert((kq->kq_state & KQ_WORKLOOP) == 0);
+ kqlock(kq);
+ proc_fdunlock(p);
+ /* drop it ourselves or wait */
+ if (kqlock2knotedrop(kq, kn)) {
+ knote_drop(kn, p);
+ }
+ proc_fdlock(p);
+ /* start over at beginning of list */
+ kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+ continue;
+ }
+ kn = SLIST_NEXT(kn, kn_link);
+ }
+ }
+ knhash_lock(p);
+ proc_fdunlock(p);
+
+ if (fdp->fd_knhashmask != 0) {
+ for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
+ kn = SLIST_FIRST(&fdp->fd_knhash[i]);
+ while (kn != NULL) {
+ if (kq == knote_get_kq(kn)) {
+ assert((kq->kq_state & KQ_WORKLOOP) == 0);
+ kqlock(kq);
+ knhash_unlock(p);
+ /* drop it ourselves or wait */
+ if (kqlock2knotedrop(kq, kn)) {
+ knote_drop(kn, p);
+ }
+ knhash_lock(p);
+ /* start over at beginning of list */
+ kn = SLIST_FIRST(&fdp->fd_knhash[i]);
+ continue;
+ }
+ kn = SLIST_NEXT(kn, kn_link);
+ }
+ }
+ }
+ knhash_unlock(p);
+
+ if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ thread_t cur_owner = kqwl->kqwl_owner;
+
+ assert(TAILQ_EMPTY(&kqwl->kqwl_request.kqr_suppressed));
+ if (filt_wlowner_is_valid(cur_owner)) {
+ /*
+ * If the kqueue had an owner that prevented the thread request to
+ * go through, then no unbind happened, and we may have lingering
+ * overrides to drop.
+ */
+ if (kqr->kqr_dsync_owner_qos != THREAD_QOS_UNSPECIFIED) {
+ thread_drop_ipc_override(cur_owner);
+ kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED;
+ }
+
+ if (kqr->kqr_owner_override_is_sync) {
+ thread_drop_sync_ipc_override(cur_owner);
+ kqr->kqr_owner_override_is_sync = 0;
+ }
+ thread_ends_owning_workloop(cur_owner);
+ thread_deallocate(cur_owner);
+ kqwl->kqwl_owner = THREAD_NULL;
+ }
+ }
+
+ /*
+ * waitq_set_deinit() remove the KQ's waitq set from
+ * any select sets to which it may belong.
+ */
+ waitq_set_deinit(&kq->kq_wqs);
+ lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
+
+ if (kq->kq_state & KQ_WORKQ) {
+ struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+ lck_spin_destroy(&kqwq->kqwq_reqlock, kq_lck_grp);
+ zfree(kqworkq_zone, kqwq);
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ assert(kqwl->kqwl_retains == 0);
+ lck_spin_destroy(&kqwl->kqwl_reqlock, kq_lck_grp);
+ lck_mtx_destroy(&kqwl->kqwl_statelock, kq_lck_grp);
+ zfree(kqworkloop_zone, kqwl);
+ } else {
+ struct kqfile *kqf = (struct kqfile *)kq;
+
+ zfree(kqfile_zone, kqf);
+ }
+}
+
+static inline void
+kqueue_retain(struct kqueue *kq)
+{
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ uint32_t previous;
+
+ if ((kq->kq_state & KQ_DYNAMIC) == 0)
+ return;
+
+ previous = OSIncrementAtomic(&kqwl->kqwl_retains);
+ if (previous == KQ_WORKLOOP_RETAINS_MAX)
+ panic("kq(%p) retain overflow", kq);
+
+ if (previous == 0)
+ panic("kq(%p) resurrection", kq);
+}
+
+#define KQUEUE_CANT_BE_LAST_REF 0
+#define KQUEUE_MIGHT_BE_LAST_REF 1
+
+static inline int
+kqueue_release(struct kqueue *kq, __assert_only int possibly_last)
+{
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ if ((kq->kq_state & KQ_DYNAMIC) == 0) {
+ return 0;
+ }
+
+ assert(kq->kq_state & KQ_WORKLOOP); /* for now */
+ uint32_t refs = OSDecrementAtomic(&kqwl->kqwl_retains);
+ if (__improbable(refs == 0)) {
+ panic("kq(%p) over-release", kq);
+ }
+ if (refs == 1) {
+ assert(possibly_last);
+ }
+ return refs == 1;
+}
+
+int
+kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
+{
+ struct kqueue *kq;
+ struct fileproc *fp;
+ int fd, error;
+
+ error = falloc_withalloc(p,
+ &fp, &fd, vfs_context_current(), fp_zalloc, cra);
+ if (error) {
+ return (error);
+ }
+
+ kq = kqueue_alloc(p, 0);
+ if (kq == NULL) {
+ fp_free(p, fd, fp);
+ return (ENOMEM);
+ }
+
+ fp->f_flag = FREAD | FWRITE;
+ fp->f_ops = &kqueueops;
+ fp->f_data = kq;
+
+ proc_fdlock(p);
+ *fdflags(p, fd) |= UF_EXCLOSE;
+ procfdtbl_releasefd(p, fd, NULL);
+ fp_drop(p, fd, fp, 1);
+ proc_fdunlock(p);
+
+ *retval = fd;
+ return (error);
+}
+
+int
+kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
+{
+ return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
+}
+
+static int
+kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p,
+ unsigned int flags)
+{
+ int advance;
+ int error;
+
+ if (flags & KEVENT_FLAG_LEGACY32) {
+ bzero(kevp, sizeof (*kevp));
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_kevent kev64;
+
+ advance = sizeof (kev64);
+ error = copyin(*addrp, (caddr_t)&kev64, advance);
+ if (error)
+ return (error);
+ kevp->ident = kev64.ident;
+ kevp->filter = kev64.filter;
+ kevp->flags = kev64.flags;
+ kevp->udata = kev64.udata;
+ kevp->fflags = kev64.fflags;
+ kevp->data = kev64.data;
+ } else {
+ struct user32_kevent kev32;
+
+ advance = sizeof (kev32);
+ error = copyin(*addrp, (caddr_t)&kev32, advance);
+ if (error)
+ return (error);
+ kevp->ident = (uintptr_t)kev32.ident;
+ kevp->filter = kev32.filter;
+ kevp->flags = kev32.flags;
+ kevp->udata = CAST_USER_ADDR_T(kev32.udata);
+ kevp->fflags = kev32.fflags;
+ kevp->data = (intptr_t)kev32.data;
+ }
+ } else if (flags & KEVENT_FLAG_LEGACY64) {
+ struct kevent64_s kev64;
+
+ bzero(kevp, sizeof (*kevp));
+
+ advance = sizeof (struct kevent64_s);
+ error = copyin(*addrp, (caddr_t)&kev64, advance);
+ if (error)
+ return(error);
+ kevp->ident = kev64.ident;
+ kevp->filter = kev64.filter;
+ kevp->flags = kev64.flags;
+ kevp->udata = kev64.udata;
+ kevp->fflags = kev64.fflags;
+ kevp->data = kev64.data;
+ kevp->ext[0] = kev64.ext[0];
+ kevp->ext[1] = kev64.ext[1];
+
+ } else {
+ struct kevent_qos_s kevqos;
+
+ bzero(kevp, sizeof (*kevp));
+
+ advance = sizeof (struct kevent_qos_s);
+ error = copyin(*addrp, (caddr_t)&kevqos, advance);
+ if (error)
+ return error;
+ kevp->ident = kevqos.ident;
+ kevp->filter = kevqos.filter;
+ kevp->flags = kevqos.flags;
+ kevp->qos = kevqos.qos;
+// kevp->xflags = kevqos.xflags;
+ kevp->udata = kevqos.udata;
+ kevp->fflags = kevqos.fflags;
+ kevp->data = kevqos.data;
+ kevp->ext[0] = kevqos.ext[0];
+ kevp->ext[1] = kevqos.ext[1];
+ kevp->ext[2] = kevqos.ext[2];
+ kevp->ext[3] = kevqos.ext[3];
+ }
+ if (!error)
+ *addrp += advance;
+ return (error);
+}
+
+static int
+kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *p,
+ unsigned int flags)
+{
+ user_addr_t addr = *addrp;
+ int advance;
+ int error;
+
+ /*
+ * fully initialize the differnt output event structure
+ * types from the internal kevent (and some universal
+ * defaults for fields not represented in the internal
+ * form).
+ */
+ if (flags & KEVENT_FLAG_LEGACY32) {
+ assert((flags & KEVENT_FLAG_STACK_EVENTS) == 0);
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_kevent kev64;
+
+ advance = sizeof (kev64);
+ bzero(&kev64, advance);
+
+ /*
+ * deal with the special case of a user-supplied
+ * value of (uintptr_t)-1.
+ */
+ kev64.ident = (kevp->ident == (uintptr_t)-1) ?
+ (uint64_t)-1LL : (uint64_t)kevp->ident;
+
+ kev64.filter = kevp->filter;
+ kev64.flags = kevp->flags;
+ kev64.fflags = kevp->fflags;
+ kev64.data = (int64_t) kevp->data;
+ kev64.udata = kevp->udata;
+ error = copyout((caddr_t)&kev64, addr, advance);
+ } else {
+ struct user32_kevent kev32;
+
+ advance = sizeof (kev32);
+ bzero(&kev32, advance);
+ kev32.ident = (uint32_t)kevp->ident;
+ kev32.filter = kevp->filter;
+ kev32.flags = kevp->flags;
+ kev32.fflags = kevp->fflags;
+ kev32.data = (int32_t)kevp->data;
+ kev32.udata = kevp->udata;
+ error = copyout((caddr_t)&kev32, addr, advance);
+ }
+ } else if (flags & KEVENT_FLAG_LEGACY64) {
+ struct kevent64_s kev64;
+
+ advance = sizeof (struct kevent64_s);
+ if (flags & KEVENT_FLAG_STACK_EVENTS) {
+ addr -= advance;
+ }
+ bzero(&kev64, advance);
+ kev64.ident = kevp->ident;
+ kev64.filter = kevp->filter;
+ kev64.flags = kevp->flags;
+ kev64.fflags = kevp->fflags;
+ kev64.data = (int64_t) kevp->data;
+ kev64.udata = kevp->udata;
+ kev64.ext[0] = kevp->ext[0];
+ kev64.ext[1] = kevp->ext[1];
+ error = copyout((caddr_t)&kev64, addr, advance);
+ } else {
+ struct kevent_qos_s kevqos;
+
+ advance = sizeof (struct kevent_qos_s);
+ if (flags & KEVENT_FLAG_STACK_EVENTS) {
+ addr -= advance;
+ }
+ bzero(&kevqos, advance);
+ kevqos.ident = kevp->ident;
+ kevqos.filter = kevp->filter;
+ kevqos.flags = kevp->flags;
+ kevqos.qos = kevp->qos;
+ kevqos.udata = kevp->udata;
+ kevqos.fflags = kevp->fflags;
+ kevqos.xflags = 0;
+ kevqos.data = (int64_t) kevp->data;
+ kevqos.ext[0] = kevp->ext[0];
+ kevqos.ext[1] = kevp->ext[1];
+ kevqos.ext[2] = kevp->ext[2];
+ kevqos.ext[3] = kevp->ext[3];
+ error = copyout((caddr_t)&kevqos, addr, advance);
+ }
+ if (!error) {
+ if (flags & KEVENT_FLAG_STACK_EVENTS)
+ *addrp = addr;
+ else
+ *addrp = addr + advance;
+ }
+ return (error);
+}
+
+static int
+kevent_get_data_size(struct proc *p,
+ uint64_t data_available,
+ unsigned int flags,
+ user_size_t *residp)
+{
+ user_size_t resid;
+ int error = 0;
+
+ if (data_available != USER_ADDR_NULL) {
+ if (flags & KEVENT_FLAG_KERNEL) {
+ resid = *(user_size_t *)(uintptr_t)data_available;
+ } else if (IS_64BIT_PROCESS(p)) {
+ user64_size_t usize;
+ error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
+ resid = (user_size_t)usize;
+ } else {
+ user32_size_t usize;
+ error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
+ resid = (user_size_t)usize;
+ }
+ if (error)
+ return(error);
+ } else {
+ resid = 0;
+ }
+ *residp = resid;
+ return 0;
+}
+
+static int
+kevent_put_data_size(struct proc *p,
+ uint64_t data_available,
+ unsigned int flags,
+ user_size_t resid)
+{
+ int error = 0;
+
+ if (data_available) {
+ if (flags & KEVENT_FLAG_KERNEL) {
+ *(user_size_t *)(uintptr_t)data_available = resid;
+ } else if (IS_64BIT_PROCESS(p)) {
+ user64_size_t usize = (user64_size_t)resid;
+ error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
+ } else {
+ user32_size_t usize = (user32_size_t)resid;
+ error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
+ }
+ }
+ return error;
+}
+
+/*
+ * kevent_continue - continue a kevent syscall after blocking
+ *
+ * assume we inherit a use count on the kq fileglob.
+ */
+
+__attribute__((noreturn))
+static void
+kevent_continue(__unused struct kqueue *kq, void *data, int error)
+{
+ struct _kevent *cont_args;
+ struct fileproc *fp;
+ uint64_t data_available;
+ user_size_t data_size;
+ user_size_t data_resid;
+ unsigned int flags;
+ int32_t *retval;
+ int noutputs;
+ int fd;
+ struct proc *p = current_proc();
+
+ cont_args = (struct _kevent *)data;
+ data_available = cont_args->data_available;
+ flags = cont_args->process_data.fp_flags;
+ data_size = cont_args->process_data.fp_data_size;
+ data_resid = cont_args->process_data.fp_data_resid;
+ noutputs = cont_args->eventout;
+ retval = cont_args->retval;
+ fd = cont_args->fd;
+ fp = cont_args->fp;
+
+ kevent_put_kq(p, fd, fp, kq);
+
+ /* don't abandon other output just because of residual copyout failures */
+ if (error == 0 && data_available && data_resid != data_size) {
+ (void)kevent_put_data_size(p, data_available, flags, data_resid);
+ }
+
+ /* don't restart after signals... */
+ if (error == ERESTART)
+ error = EINTR;
+ else if (error == EWOULDBLOCK)
+ error = 0;
+ if (error == 0)
+ *retval = noutputs;
+ unix_syscall_return(error);
+}
+
+/*
+ * kevent - [syscall] register and wait for kernel events
+ *
+ */
+int
+kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
+{
+ unsigned int flags = KEVENT_FLAG_LEGACY32;
+
+ return kevent_internal(p,
+ (kqueue_id_t)uap->fd, NULL,
+ uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents,
+ 0ULL, 0ULL,
+ flags,
+ uap->timeout,
+ kevent_continue,
+ retval);
+}
+
+int
+kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
+{
+ unsigned int flags;
+
+ /* restrict to user flags and set legacy64 */
+ flags = uap->flags & KEVENT_FLAG_USER;
+ flags |= KEVENT_FLAG_LEGACY64;
+
+ return kevent_internal(p,
+ (kqueue_id_t)uap->fd, NULL,
+ uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents,
+ 0ULL, 0ULL,
+ flags,
+ uap->timeout,
+ kevent_continue,
+ retval);
+}
+
+int
+kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
+{
+ /* restrict to user flags */
+ uap->flags &= KEVENT_FLAG_USER;
+
+ return kevent_internal(p,
+ (kqueue_id_t)uap->fd, NULL,
+ uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents,
+ uap->data_out, (uint64_t)uap->data_available,
+ uap->flags,
+ 0ULL,
+ kevent_continue,
+ retval);
+}
+
+int
+kevent_qos_internal(struct proc *p, int fd,
+ user_addr_t changelist, int nchanges,
+ user_addr_t eventlist, int nevents,
+ user_addr_t data_out, user_size_t *data_available,
+ unsigned int flags,
+ int32_t *retval)
+{
+ return kevent_internal(p,
+ (kqueue_id_t)fd, NULL,
+ changelist, nchanges,
+ eventlist, nevents,
+ data_out, (uint64_t)data_available,
+ (flags | KEVENT_FLAG_KERNEL),
+ 0ULL,
+ NULL,
+ retval);
+}
+
+int
+kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
+{
+ /* restrict to user flags */
+ uap->flags &= KEVENT_FLAG_USER;
+
+ return kevent_internal(p,
+ (kqueue_id_t)uap->id, NULL,
+ uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents,
+ uap->data_out, (uint64_t)uap->data_available,
+ (uap->flags | KEVENT_FLAG_DYNAMIC_KQUEUE),
+ 0ULL,
+ kevent_continue,
+ retval);
+}
+
+int
+kevent_id_internal(struct proc *p, kqueue_id_t *id,
+ user_addr_t changelist, int nchanges,
+ user_addr_t eventlist, int nevents,
+ user_addr_t data_out, user_size_t *data_available,
+ unsigned int flags,
+ int32_t *retval)
+{
+ return kevent_internal(p,
+ *id, id,
+ changelist, nchanges,
+ eventlist, nevents,
+ data_out, (uint64_t)data_available,
+ (flags | KEVENT_FLAG_KERNEL | KEVENT_FLAG_DYNAMIC_KQUEUE),
+ 0ULL,
+ NULL,
+ retval);
+}
+
+static int
+kevent_get_timeout(struct proc *p,
+ user_addr_t utimeout,
+ unsigned int flags,
+ struct timeval *atvp)
+{
+ struct timeval atv;
+ int error = 0;
+
+ if (flags & KEVENT_FLAG_IMMEDIATE) {
+ getmicrouptime(&atv);
+ } else if (utimeout != USER_ADDR_NULL) {
+ struct timeval rtv;
+ if (flags & KEVENT_FLAG_KERNEL) {
+ struct timespec *tsp = (struct timespec *)utimeout;
+ TIMESPEC_TO_TIMEVAL(&rtv, tsp);
+ } else if (IS_64BIT_PROCESS(p)) {
+ struct user64_timespec ts;
+ error = copyin(utimeout, &ts, sizeof(ts));
+ if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
+ error = EINVAL;
+ else
+ TIMESPEC_TO_TIMEVAL(&rtv, &ts);
+ } else {
+ struct user32_timespec ts;
+ error = copyin(utimeout, &ts, sizeof(ts));
+ TIMESPEC_TO_TIMEVAL(&rtv, &ts);
+ }
+ if (error)
+ return (error);
+ if (itimerfix(&rtv))
+ return (EINVAL);
+ getmicrouptime(&atv);
+ timevaladd(&atv, &rtv);
+ } else {
+ /* wait forever value */
+ atv.tv_sec = 0;
+ atv.tv_usec = 0;
+ }
+ *atvp = atv;
+ return 0;
+}
+
+static int
+kevent_set_kq_mode(struct kqueue *kq, unsigned int flags)
+{
+ /* each kq should only be used for events of one type */
+ kqlock(kq);
+ if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
+ if (flags & KEVENT_FLAG_LEGACY32) {
+ if ((kq->kq_state & KQ_KEV32) == 0) {
+ kqunlock(kq);
+ return EINVAL;
+ }
+ } else if (kq->kq_state & KQ_KEV32) {
+ kqunlock(kq);
+ return EINVAL;
+ }
+ } else if (flags & KEVENT_FLAG_LEGACY32) {
+ kq->kq_state |= KQ_KEV32;
+ } else if (flags & KEVENT_FLAG_LEGACY64) {
+ kq->kq_state |= KQ_KEV64;
+ } else {
+ kq->kq_state |= KQ_KEV_QOS;
+ }
+ kqunlock(kq);
+ return 0;
+}
+
+#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
+#define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
+
+static inline void
+kqhash_lock(proc_t p)
+{
+ lck_mtx_lock_spin_always(&p->p_fd->fd_kqhashlock);
+}
+
+static inline void
+kqhash_lock_held(__assert_only proc_t p)
+{
+ LCK_MTX_ASSERT(&p->p_fd->fd_kqhashlock, LCK_MTX_ASSERT_OWNED);
+}
+
+static inline void
+kqhash_unlock(proc_t p)
+{
+ lck_mtx_unlock(&p->p_fd->fd_kqhashlock);
+}
+
+static void
+kqueue_hash_init_if_needed(proc_t p)
+{
+ struct filedesc *fdp = p->p_fd;
+
+ kqhash_lock_held(p);
+
+ if (__improbable(fdp->fd_kqhash == NULL)) {
+ struct kqlist *alloc_hash;
+ u_long alloc_mask;
+
+ kqhash_unlock(p);
+ alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
+ kqhash_lock(p);
+
+ /* See if we won the race */
+ if (fdp->fd_kqhashmask == 0) {
+ fdp->fd_kqhash = alloc_hash;
+ fdp->fd_kqhashmask = alloc_mask;
+ } else {
+ kqhash_unlock(p);
+ FREE(alloc_hash, M_KQUEUE);
+ kqhash_lock(p);
+ }
+ }
+}
+
+/*
+ * Called with the kqhash_lock() held
+ */
+static void
+kqueue_hash_insert(
+ struct proc *p,
+ kqueue_id_t id,
+ struct kqueue *kq)
+{
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ struct filedesc *fdp = p->p_fd;
+ struct kqlist *list;
+
+ /* should hold the kq hash lock */
+ kqhash_lock_held(p);
+
+ if ((kq->kq_state & KQ_DYNAMIC) == 0) {
+ assert(kq->kq_state & KQ_DYNAMIC);
+ return;
+ }
+
+ /* only dynamically allocate workloop kqs for now */
+ assert(kq->kq_state & KQ_WORKLOOP);
+ assert(fdp->fd_kqhash);
+
+ kqwl->kqwl_dynamicid = id;
+
+ list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
+ SLIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
+}
+
+/* Called with kqhash_lock held */
+static void
+kqueue_hash_remove(
+ struct proc *p,
+ struct kqueue *kq)
+{
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ struct filedesc *fdp = p->p_fd;
+ struct kqlist *list;
+
+ /* should hold the kq hash lock */
+ kqhash_lock_held(p);
+
+ if ((kq->kq_state & KQ_DYNAMIC) == 0) {
+ assert(kq->kq_state & KQ_DYNAMIC);
+ return;
+ }
+ assert(kq->kq_state & KQ_WORKLOOP); /* for now */
+ list = &fdp->fd_kqhash[KQ_HASH(kqwl->kqwl_dynamicid, fdp->fd_kqhashmask)];
+ SLIST_REMOVE(list, kqwl, kqworkloop, kqwl_hashlink);
+}
+
+/* Called with kqhash_lock held */
+static struct kqueue *
+kqueue_hash_lookup(struct proc *p, kqueue_id_t id)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct kqlist *list;
+ struct kqworkloop *kqwl;
+
+ /* should hold the kq hash lock */
+ kqhash_lock_held(p);
+
+ if (fdp->fd_kqhashmask == 0) return NULL;
+
+ list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
+ SLIST_FOREACH(kqwl, list, kqwl_hashlink) {
+ if (kqwl->kqwl_dynamicid == id) {
+ struct kqueue *kq = (struct kqueue *)kqwl;
+
+ assert(kq->kq_state & KQ_DYNAMIC);
+ assert(kq->kq_state & KQ_WORKLOOP); /* for now */
+ return kq;
+ }
+ }
+ return NULL;
+}
+
+static inline void
+kqueue_release_last(struct proc *p, struct kqueue *kq)
+{
+ if (kq->kq_state & KQ_DYNAMIC) {
+ kqhash_lock(p);
+ if (kqueue_release(kq, KQUEUE_MIGHT_BE_LAST_REF)) {
+ kqueue_hash_remove(p, kq);
+ kqhash_unlock(p);
+ kqueue_dealloc(kq);
+ } else {
+ kqhash_unlock(p);
+ }
+ }
+}
+
+static struct kqueue *
+kevent_get_bound_kq(__assert_only struct proc *p, thread_t thread,
+ unsigned int kev_flags, unsigned int kq_flags)
+{
+ struct kqueue *kq;
+ struct uthread *ut = get_bsdthread_info(thread);
+
+ assert(p == get_bsdthreadtask_info(thread));
+
+ if (!(ut->uu_kqueue_flags & kev_flags))
+ return NULL;
+
+ kq = ut->uu_kqueue_bound;
+ if (!kq)
+ return NULL;
+
+ if (!(kq->kq_state & kq_flags))
+ return NULL;
+
+ return kq;
+}
+
+static int
+kevent_get_kq(struct proc *p, kqueue_id_t id, unsigned int flags, struct fileproc **fpp, int *fdp, struct kqueue **kqp)
+{
+ struct filedesc *descp = p->p_fd;
+ struct fileproc *fp = NULL;
+ struct kqueue *kq;
+ int fd = 0;
+ int error = 0;
+
+ /* Was the workloop flag passed? Then it is for sure only a workloop */
+ if (flags & KEVENT_FLAG_DYNAMIC_KQUEUE) {
+ assert(flags & KEVENT_FLAG_WORKLOOP);
+ if (id == (kqueue_id_t)-1 &&
+ (flags & KEVENT_FLAG_KERNEL) &&
+ (flags & KEVENT_FLAG_WORKLOOP)) {
+
+ assert(is_workqueue_thread(current_thread()));
+
+ /*
+ * when kevent_id_internal is called from within the
+ * kernel, and the passed 'id' value is '-1' then we
+ * look for the currently bound workloop kq.
+ *
+ * Until pthread kext avoids calling in to kevent_id_internal
+ * for threads whose fulfill is canceled, calling in unbound
+ * can't be fatal.
+ */
+ kq = kevent_get_bound_kq(p, current_thread(),
+ KEVENT_FLAG_WORKLOOP, KQ_WORKLOOP);
+ if (kq) {
+ kqueue_retain(kq);
+ } else {
+ struct uthread *ut = get_bsdthread_info(current_thread());
+
+ /* If thread is unbound due to cancel, just return an error */
+ if (ut->uu_kqueue_flags == KEVENT_FLAG_WORKLOOP_CANCELED) {
+ ut->uu_kqueue_flags = 0;
+ error = ECANCELED;
+ } else {
+ panic("Unbound thread called kevent_internal with id=-1"
+ " uu_kqueue_flags:0x%x, uu_kqueue_bound:%p",
+ ut->uu_kqueue_flags, ut->uu_kqueue_bound);
+ }
+ }
+
+ *fpp = NULL;
+ *fdp = 0;
+ *kqp = kq;
+ return error;
+ }
+
+ /* try shortcut on kq lookup for bound threads */
+ kq = kevent_get_bound_kq(p, current_thread(), KEVENT_FLAG_WORKLOOP, KQ_WORKLOOP);
+ if (kq != NULL && ((struct kqworkloop *)kq)->kqwl_dynamicid == id) {
+
+ if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
+ error = EEXIST;
+ kq = NULL;
+ goto out;
+ }
+
+ /* retain a reference while working with this kq. */
+ assert(kq->kq_state & KQ_DYNAMIC);
+ kqueue_retain(kq);
+ error = 0;
+ goto out;
+ }
+
+ /* look for the kq on the hash table */
+ kqhash_lock(p);
+ kq = kqueue_hash_lookup(p, id);
+ if (kq == NULL) {
+ kqhash_unlock(p);
+
+ if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST) {
+ error = ENOENT;
+ goto out;
+ }
+
+ struct kqueue *alloc_kq;
+ alloc_kq = kqueue_alloc(p, flags);
+ if (alloc_kq) {
+ kqhash_lock(p);
+ kqueue_hash_init_if_needed(p);
+ kq = kqueue_hash_lookup(p, id);
+ if (kq == NULL) {
+ /* insert our new one */
+ kq = alloc_kq;
+ kqueue_hash_insert(p, id, kq);
+ kqhash_unlock(p);
+ } else {
+ /* lost race, retain existing workloop */
+ kqueue_retain(kq);
+ kqhash_unlock(p);
+ kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF);
+ kqueue_dealloc(alloc_kq);
+ }
+ } else {
+ error = ENOMEM;
+ goto out;
+ }
+ } else {
+
+ if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
+ kqhash_unlock(p);
+ kq = NULL;
+ error = EEXIST;
+ goto out;
+ }
+
+ /* retain a reference while working with this kq. */
+ assert(kq->kq_state & KQ_DYNAMIC);
+ kqueue_retain(kq);
+ kqhash_unlock(p);
+ }
+
+ } else if (flags & KEVENT_FLAG_WORKQ) {
+ /* must already exist for bound threads. */
+ if (flags & KEVENT_FLAG_KERNEL) {
+ assert(descp->fd_wqkqueue != NULL);
+ }
+
+ /*
+ * use the private kq associated with the proc workq.
+ * Just being a thread within the process (and not
+ * being the exit/exec thread) is enough to hold a
+ * reference on this special kq.
+ */
+ kq = descp->fd_wqkqueue;
+ if (kq == NULL) {
+ struct kqueue *alloc_kq = kqueue_alloc(p, KEVENT_FLAG_WORKQ);
+ if (alloc_kq == NULL)
+ return ENOMEM;
+
+ knhash_lock(p);
+ if (descp->fd_wqkqueue == NULL) {
+ kq = descp->fd_wqkqueue = alloc_kq;
+ knhash_unlock(p);
+ } else {
+ knhash_unlock(p);
+ kq = descp->fd_wqkqueue;
+ kqueue_dealloc(alloc_kq);
+ }
+ }
+ } else {
+ /* get a usecount for the kq itself */
+ fd = (int)id;
+ if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
+ return (error);
+ }
+ if ((error = kevent_set_kq_mode(kq, flags)) != 0) {
+ /* drop the usecount */
+ if (fp != NULL)
+ fp_drop(p, fd, fp, 0);
+ return error;
+ }
+
+out:
+ *fpp = fp;
+ *fdp = fd;
+ *kqp = kq;
+
+ return error;
+}
+
+static void
+kevent_put_kq(
+ struct proc *p,
+ kqueue_id_t id,
+ struct fileproc *fp,
+ struct kqueue *kq)
+{
+ kqueue_release_last(p, kq);
+ if (fp != NULL) {
+ assert((kq->kq_state & KQ_WORKQ) == 0);
+ fp_drop(p, (int)id, fp, 0);
+ }
+}
+
+static uint64_t
+kevent_workloop_serial_no_copyin(proc_t p, uint64_t workloop_id)
+{
+ uint64_t serial_no = 0;
+ user_addr_t addr;
+ int rc;
+
+ if (workloop_id == 0 || p->p_dispatchqueue_serialno_offset == 0) {
+ return 0;
+ }
+ addr = (user_addr_t)(workloop_id + p->p_dispatchqueue_serialno_offset);
+
+ if (proc_is64bit(p)) {
+ rc = copyin(addr, (caddr_t)&serial_no, sizeof(serial_no));
+ } else {
+ uint32_t serial_no32 = 0;
+ rc = copyin(addr, (caddr_t)&serial_no32, sizeof(serial_no32));
+ serial_no = serial_no32;
+ }
+ return rc == 0 ? serial_no : 0;
+}
+
+int
+kevent_exit_on_workloop_ownership_leak(thread_t thread)
+{
+ proc_t p = current_proc();
+ struct filedesc *fdp = p->p_fd;
+ kqueue_id_t workloop_id = 0;
+ os_reason_t reason;
+ mach_vm_address_t addr;
+ uint32_t reason_size;
+
+ kqhash_lock(p);
+ if (fdp->fd_kqhashmask > 0) {
+ for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
+ struct kqworkloop *kqwl;
+
+ SLIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+ if ((kq->kq_state & KQ_DYNAMIC) && kqwl->kqwl_owner == thread) {
+ workloop_id = kqwl->kqwl_dynamicid;
+ break;
+ }
+ }
+ }
+ }
+ kqhash_unlock(p);
+ assert(workloop_id);
+
+ reason = os_reason_create(OS_REASON_LIBSYSTEM,
+ OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK);
+ if (reason == OS_REASON_NULL) {
+ goto out;
+ }
+
+ reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
+ reason_size = 2 * sizeof(uint64_t);
+ reason_size = kcdata_estimate_required_buffer_size(2, reason_size);
+ if (os_reason_alloc_buffer(reason, reason_size) != 0) {
+ goto out;
+ }
+
+ struct kcdata_descriptor *kcd = &reason->osr_kcd_descriptor;
+
+ if (kcdata_get_memory_addr(kcd, EXIT_REASON_WORKLOOP_ID,
+ sizeof(workloop_id), &addr) == KERN_SUCCESS) {
+ kcdata_memcpy(kcd, addr, &workloop_id, sizeof(workloop_id));
+ }
+
+ uint64_t serial_no = kevent_workloop_serial_no_copyin(p, workloop_id);
+ if (serial_no && kcdata_get_memory_addr(kcd, EXIT_REASON_DISPATCH_QUEUE_NO,
+ sizeof(serial_no), &addr) == KERN_SUCCESS) {
+ kcdata_memcpy(kcd, addr, &serial_no, sizeof(serial_no));
+ }
+
+out:
+#if DEVELOPMENT || DEBUG
+ psignal_try_thread_with_reason(p, thread, SIGABRT, reason);
+ return 0;
+#else
+ return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL,
+ FALSE, FALSE, 0, reason);
+#endif
+}
+
+
+static int
+kevent_servicer_detach_preflight(thread_t thread, unsigned int flags, struct kqueue *kq)
+{
+ int error = 0;
+ struct kqworkloop *kqwl;
+ struct uthread *ut;
+ struct kqrequest *kqr;
+
+ if (!(flags & KEVENT_FLAG_WORKLOOP) || !(kq->kq_state & KQ_WORKLOOP))
+ return EINVAL;
+
+ /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads */
+ if (!(kq->kq_state & KQ_NO_WQ_THREAD))
+ return EINVAL;
+
+ /* allow detach only on not wq threads */
+ if (is_workqueue_thread(thread))
+ return EINVAL;
+
+ /* check that the current thread is bound to the requested wq */
+ ut = get_bsdthread_info(thread);
+ if (ut->uu_kqueue_bound != kq)
+ return EINVAL;
+
+ kqwl = (struct kqworkloop *)kq;
+ kqwl_req_lock(kqwl);
+ kqr = &kqwl->kqwl_request;
+
+ /* check that the wq is bound to the thread */
+ if ((kqr->kqr_state & KQR_BOUND) == 0 || (kqr->kqr_thread != thread))
+ error = EINVAL;
+
+ kqwl_req_unlock(kqwl);
+
+ return error;
+}
+
+static void
+kevent_servicer_detach_thread(struct proc *p, kqueue_id_t id, thread_t thread,
+ unsigned int flags, struct kqueue *kq)
+{
+ struct kqworkloop *kqwl;
+ struct uthread *ut;
+
+ assert((flags & KEVENT_FLAG_WORKLOOP) && (kq->kq_state & KQ_WORKLOOP));
+
+ /* allow detach only on not wqthreads threads */
+ assert(!is_workqueue_thread(thread));
+
+ /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads */
+ assert(kq->kq_state & KQ_NO_WQ_THREAD);
+
+ /* check that the current thread is bound to the requested kq */
+ ut = get_bsdthread_info(thread);
+ assert(ut->uu_kqueue_bound == kq);
+
+ kqwl = (struct kqworkloop *)kq;
+
+ kqlock(kq);
+
+ /* unbind the thread.
+ * unbind itself checks if still processing and ends it.
+ */
+ kqworkloop_unbind_thread(kqwl, thread, flags);
+
+ kqunlock(kq);
+
+ kevent_put_kq(p, id, NULL, kq);
+
+ return;
+}
+
+static int
+kevent_servicer_attach_thread(thread_t thread, unsigned int flags, struct kqueue *kq)
+{
+ int error = 0;
+ struct kqworkloop *kqwl;
+ struct uthread *ut;
+ struct kqrequest *kqr;
+
+ if (!(flags & KEVENT_FLAG_WORKLOOP) || !(kq->kq_state & KQ_WORKLOOP))
+ return EINVAL;
+
+ /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads*/
+ if (!(kq->kq_state & KQ_NO_WQ_THREAD))
+ return EINVAL;
+
+ /* allow attach only on not wqthreads */
+ if (is_workqueue_thread(thread))
+ return EINVAL;
+
+ /* check that the thread is not already bound */
+ ut = get_bsdthread_info(thread);
+ if (ut->uu_kqueue_bound != NULL)
+ return EINVAL;
+
+ assert(ut->uu_kqueue_flags == 0);
+
+ kqlock(kq);
+ kqwl = (struct kqworkloop *)kq;
+ kqwl_req_lock(kqwl);
+ kqr = &kqwl->kqwl_request;
+
+ /* check that the kqueue is not already bound */
+ if (kqr->kqr_state & (KQR_BOUND | KQR_THREQUESTED | KQR_DRAIN)) {
+ error = EINVAL;
+ goto out;
+ }
+
+ assert(kqr->kqr_thread == NULL);
+ assert((kqr->kqr_state & KQR_PROCESSING) == 0);
+
+ kqr->kqr_state |= KQR_THREQUESTED;
+ kqr->kqr_qos_index = THREAD_QOS_UNSPECIFIED;
+ kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
+ kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED;
+ kqr->kqr_owner_override_is_sync = 0;
+
+ kqworkloop_bind_thread_impl(kqwl, thread, KEVENT_FLAG_WORKLOOP);
+
+ /* get a ref on the wlkq on behalf of the attached thread */
+ kqueue_retain(kq);
+
+out:
+ kqwl_req_unlock(kqwl);
+ kqunlock(kq);
+
+ return error;
+}
+
+static inline
+boolean_t kevent_args_requesting_events(unsigned int flags, int nevents)
+{
+ return (!(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0);
+}
+
+static int
+kevent_internal(struct proc *p,
+ kqueue_id_t id, kqueue_id_t *id_out,
+ user_addr_t changelist, int nchanges,
+ user_addr_t ueventlist, int nevents,
+ user_addr_t data_out, uint64_t data_available,
+ unsigned int flags,
+ user_addr_t utimeout,
+ kqueue_continue_t continuation,
+ int32_t *retval)
+{
+ struct _kevent *cont_args;
+ uthread_t ut;
+ struct kqueue *kq;
+ struct fileproc *fp = NULL;
+ int fd = 0;
+ struct kevent_internal_s kev;
+ int error, noutputs;
+ struct timeval atv;
+ user_size_t data_size;
+ user_size_t data_resid;
+ thread_t thread = current_thread();
+
+ /* Don't allow user-space threads to process output events from the workq kqs */
+ if (((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ) &&
+ kevent_args_requesting_events(flags, nevents))
+ return EINVAL;
+
+ /* restrict dynamic kqueue allocation to workloops (for now) */
+ if ((flags & (KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP)) == KEVENT_FLAG_DYNAMIC_KQUEUE)
+ return EINVAL;
+
+ if (flags & (KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH | KEVENT_FLAG_WORKLOOP_SERVICER_DETACH |
+ KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST | KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD)) {
+
+ /* allowed only on workloops when calling kevent_id from user-space */
+ if (!(flags & KEVENT_FLAG_WORKLOOP) || (flags & KEVENT_FLAG_KERNEL) || !(flags & KEVENT_FLAG_DYNAMIC_KQUEUE))
+ return EINVAL;
+
+ /* cannot attach and detach simultaneously*/
+ if ((flags & KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH) && (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH))
+ return EINVAL;
+
+ /* cannot ask for events and detach */
+ if ((flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) && kevent_args_requesting_events(flags, nevents))
+ return EINVAL;
+
+ }
+
+ /* prepare to deal with stack-wise allocation of out events */
+ if (flags & KEVENT_FLAG_STACK_EVENTS) {
+ int scale = ((flags & KEVENT_FLAG_LEGACY32) ?
+ (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
+ sizeof(struct user32_kevent)) :
+ ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
+ sizeof(struct kevent_qos_s)));
+ ueventlist += nevents * scale;
+ }
+
+ /* convert timeout to absolute - if we have one (and not immediate) */
+ error = kevent_get_timeout(p, utimeout, flags, &atv);
+ if (error)
+ return error;
+
+ /* copyin initial value of data residual from data_available */
+ error = kevent_get_data_size(p, data_available, flags, &data_size);
+ if (error)
+ return error;
+
+ /* get the kq we are going to be working on */
+ error = kevent_get_kq(p, id, flags, &fp, &fd, &kq);
+ if (error)
+ return error;
+
+ /* only bound threads can receive events on workloops */
+ if ((flags & KEVENT_FLAG_WORKLOOP) && kevent_args_requesting_events(flags, nevents)) {
+ ut = (uthread_t)get_bsdthread_info(thread);
+ if (ut->uu_kqueue_bound != kq) {
+ error = EXDEV;
+ goto out;
+ }
+
+ }
+
+ /* attach the current thread if necessary */
+ if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH) {
+ error = kevent_servicer_attach_thread(thread, flags, kq);
+ if (error)
+ goto out;
+ }
+ else {
+ /* before processing events and committing to the system call, return an error if the thread cannot be detached when requested */
+ if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) {
+ error = kevent_servicer_detach_preflight(thread, flags, kq);
+ if (error)
+ goto out;
+ }
+ }
+
+ if (id_out && kq && (flags & KEVENT_FLAG_WORKLOOP)) {
+ assert(kq->kq_state & KQ_WORKLOOP);
+ struct kqworkloop *kqwl;
+ kqwl = (struct kqworkloop *)kq;
+ *id_out = kqwl->kqwl_dynamicid;
+ }
+
+ /* register all the change requests the user provided... */
+ noutputs = 0;
+ while (nchanges > 0 && error == 0) {
+ error = kevent_copyin(&changelist, &kev, p, flags);
+ if (error)
+ break;
+
+ /* Make sure user doesn't pass in any system flags */
+ kev.flags &= ~EV_SYSFLAGS;
+
+ kevent_register(kq, &kev, p);
+
+ if (nevents > 0 &&
+ ((kev.flags & EV_ERROR) || (kev.flags & EV_RECEIPT))) {
+ if (kev.flags & EV_RECEIPT) {
+ kev.flags |= EV_ERROR;
+ kev.data = 0;
+ }
+ error = kevent_copyout(&kev, &ueventlist, p, flags);
+ if (error == 0) {
+ nevents--;
+ noutputs++;
+ }
+ } else if (kev.flags & EV_ERROR) {
+ error = kev.data;
+ }
+ nchanges--;
+ }
+
+ /* short-circuit the scan if we only want error events */
+ if (flags & KEVENT_FLAG_ERROR_EVENTS)
+ nevents = 0;
+
+ /* process pending events */
+ if (nevents > 0 && noutputs == 0 && error == 0) {
+ /* store the continuation/completion data in the uthread */
+ ut = (uthread_t)get_bsdthread_info(thread);
+ cont_args = &ut->uu_kevent.ss_kevent;
+ cont_args->fp = fp;
+ cont_args->fd = fd;
+ cont_args->retval = retval;
+ cont_args->eventlist = ueventlist;
+ cont_args->eventcount = nevents;
+ cont_args->eventout = noutputs;
+ cont_args->data_available = data_available;
+ cont_args->process_data.fp_fd = (int)id;
+ cont_args->process_data.fp_flags = flags;
+ cont_args->process_data.fp_data_out = data_out;
+ cont_args->process_data.fp_data_size = data_size;
+ cont_args->process_data.fp_data_resid = data_size;
+
+ error = kqueue_scan(kq, kevent_callback,
+ continuation, cont_args,
+ &cont_args->process_data,
+ &atv, p);
+
+ /* process remaining outputs */
+ noutputs = cont_args->eventout;
+ data_resid = cont_args->process_data.fp_data_resid;
+
+ /* copyout residual data size value (if it needs to be copied out) */
+ /* don't abandon other output just because of residual copyout failures */
+ if (error == 0 && data_available && data_resid != data_size) {
+ (void)kevent_put_data_size(p, data_available, flags, data_resid);
+ }
+ }
+
+ /* detach the current thread if necessary */
+ if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) {
+ assert(fp == NULL);
+ kevent_servicer_detach_thread(p, id, thread, flags, kq);
+ }
+
+out:
+ kevent_put_kq(p, id, fp, kq);
+
+ /* don't restart after signals... */
+ if (error == ERESTART)
+ error = EINTR;
+ else if (error == EWOULDBLOCK)
+ error = 0;
+ if (error == 0)
+ *retval = noutputs;
+ return (error);
+}
+
+
+/*
+ * kevent_callback - callback for each individual event
+ *
+ * called with nothing locked
+ * caller holds a reference on the kqueue
+ */
+static int
+kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp,
+ void *data)
+{
+ struct _kevent *cont_args;
+ int error;
+
+ cont_args = (struct _kevent *)data;
+ assert(cont_args->eventout < cont_args->eventcount);
+
+ /*
+ * Copy out the appropriate amount of event data for this user.
+ */
+ error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
+ cont_args->process_data.fp_flags);
+
+ /*
+ * If there isn't space for additional events, return
+ * a harmless error to stop the processing here
+ */
+ if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
+ error = EWOULDBLOCK;
+ return (error);
+}
+
+/*
+ * kevent_description - format a description of a kevent for diagnostic output
+ *
+ * called with a 256-byte string buffer
+ */
+
+char *
+kevent_description(struct kevent_internal_s *kevp, char *s, size_t n)
+{
+ snprintf(s, n,
+ "kevent="
+ "{.ident=%#llx, .filter=%d, .flags=%#x, .udata=%#llx, .fflags=%#x, .data=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
+ kevp->ident,
+ kevp->filter,
+ kevp->flags,
+ kevp->udata,
+ kevp->fflags,
+ kevp->data,
+ kevp->ext[0],
+ kevp->ext[1] );
+
+ return (s);
+}
+
+/*
+ * kevent_register - add a new event to a kqueue
+ *
+ * Creates a mapping between the event source and
+ * the kqueue via a knote data structure.
+ *
+ * Because many/most the event sources are file
+ * descriptor related, the knote is linked off
+ * the filedescriptor table for quick access.
+ *
+ * called with nothing locked
+ * caller holds a reference on the kqueue
+ */
+
+void
+kevent_register(struct kqueue *kq, struct kevent_internal_s *kev,
+ __unused struct proc *ctxp)
+{
+ struct proc *p = kq->kq_p;
+ const struct filterops *fops;
+ struct knote *kn = NULL;
+ int result = 0;
+ int error = 0;
+ unsigned short kev_flags = kev->flags;
+ int knoteuse_flags = KNUSE_NONE;
+
+ if (kev->filter < 0) {
+ if (kev->filter + EVFILT_SYSCOUNT < 0) {
+ error = EINVAL;
+ goto out;
+ }
+ fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
+ } else {
+ error = EINVAL;
+ goto out;
+ }
+
+ /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
+ if ((kev->flags & EV_VANISHED) &&
+ (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Simplify the flags - delete and disable overrule */
+ if (kev->flags & EV_DELETE)
+ kev->flags &= ~EV_ADD;
+ if (kev->flags & EV_DISABLE)
+ kev->flags &= ~EV_ENABLE;
+
+ if (kq->kq_state & KQ_WORKLOOP) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
+ ((struct kqworkloop *)kq)->kqwl_dynamicid,
+ kev->udata, kev->flags, kev->filter);
+ } else if (kq->kq_state & KQ_WORKQ) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
+ 0, kev->udata, kev->flags, kev->filter);
+ } else {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
+ VM_KERNEL_UNSLIDE_OR_PERM(kq),
+ kev->udata, kev->flags, kev->filter);
+ }
+
+restart:
+
+ /* find the matching knote from the fd tables/hashes */
+ kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
+
+ if (kn == NULL) {
+ if (kev->flags & EV_ADD) {
+ struct fileproc *knote_fp = NULL;
+
+ /* grab a file reference for the new knote */
+ if (fops->f_isfd) {
+ if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) {
+ goto out;
+ }
+ }
+
+ kn = knote_alloc();
+ if (kn == NULL) {
+ error = ENOMEM;
+ if (knote_fp != NULL)
+ fp_drop(p, kev->ident, knote_fp, 0);
+ goto out;
+ }
+
+ kn->kn_fp = knote_fp;
+ knote_set_kq(kn, kq);
+ kqueue_retain(kq); /* retain a kq ref */
+ kn->kn_filtid = ~kev->filter;
+ kn->kn_inuse = 1; /* for f_attach() */
+ kn->kn_status = KN_ATTACHING | KN_ATTACHED;
+
+ /* was vanish support requested */
+ if (kev->flags & EV_VANISHED) {
+ kev->flags &= ~EV_VANISHED;
+ kn->kn_status |= KN_REQVANISH;
+ }
+
+ /* snapshot matching/dispatching protcol flags into knote */
+ if (kev->flags & EV_DISPATCH)
+ kn->kn_status |= KN_DISPATCH;
+ if (kev->flags & EV_UDATA_SPECIFIC)
+ kn->kn_status |= KN_UDATA_SPECIFIC;
+
+ /*
+ * copy the kevent state into knote
+ * protocol is that fflags and data
+ * are saved off, and cleared before
+ * calling the attach routine.
+ */
+ kn->kn_kevent = *kev;
+ kn->kn_sfflags = kev->fflags;
+ kn->kn_sdata = kev->data;
+ kn->kn_fflags = 0;
+ kn->kn_data = 0;
+
+ /* invoke pthread kext to convert kevent qos to thread qos */
+ knote_canonicalize_kevent_qos(kn);
+ knote_set_qos_index(kn, qos_index_from_qos(kn, kn->kn_qos, FALSE));
+
+ /* before anyone can find it */
+ if (kev->flags & EV_DISABLE) {
+ /*
+ * do this before anyone can find it,
+ * this can't call knote_disable() because it expects having
+ * the kqlock held
+ */
+ kn->kn_status |= KN_DISABLED;
+ }
+
+ /* Add the knote for lookup thru the fd table */
+ error = kq_add_knote(kq, kn, kev, p, &knoteuse_flags);
+ if (error) {
+ (void)kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
+ knote_free(kn);
+ if (knote_fp != NULL)
+ fp_drop(p, kev->ident, knote_fp, 0);
+
+ if (error == ERESTART) {
+ error = 0;
+ goto restart;
+ }
+ goto out;
+ }
+
+ /* fp reference count now applies to knote */
+ /* rwlock boost is now held */
+
+ /* call filter attach routine */
+ result = fops->f_attach(kn, kev);
+
+ /*
+ * Trade knote use count for kq lock.
+ * Cannot be dropped because we held
+ * KN_ATTACHING throughout.
+ */
+ knoteuse2kqlock(kq, kn, KNUSE_STEAL_DROP | knoteuse_flags);
+
+ if (kn->kn_flags & EV_ERROR) {
+ /*
+ * Failed to attach correctly, so drop.
+ * All other possible users/droppers
+ * have deferred to us. Save the error
+ * to return to our caller.
+ */
+ kn->kn_status &= ~KN_ATTACHED;
+ kn->kn_status |= KN_DROPPING;
+ error = kn->kn_data;
+ kqunlock(kq);
+ knote_drop(kn, p);
+ goto out;
+ }
+
+ /* end "attaching" phase - now just attached */
+ kn->kn_status &= ~KN_ATTACHING;
+
+ if (kn->kn_status & KN_DROPPING) {
+ /*
+ * Attach succeeded, but someone else
+ * deferred their drop - now we have
+ * to do it for them.
+ */
+ kqunlock(kq);
+ knote_drop(kn, p);
+ goto out;
+ }
+
+ /* Mark the thread request overcommit - if appropos */
+ knote_set_qos_overcommit(kn);
+
+ /*
+ * If the attach routine indicated that an
+ * event is already fired, activate the knote.
+ */
+ if (result)
+ knote_activate(kn);
+
+ if (knote_fops(kn)->f_post_attach) {
+ error = knote_fops(kn)->f_post_attach(kn, kev);
+ if (error) {
+ kqunlock(kq);
+ goto out;
+ }
+ }
+
+ } else {
+ if ((kev_flags & (EV_ADD | EV_DELETE)) == (EV_ADD | EV_DELETE) &&
+ (kq->kq_state & KQ_WORKLOOP)) {
+ /*
+ * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
+ * that doesn't care about ENOENT, so just pretend the deletion
+ * happened.
+ */
+ } else {
+ error = ENOENT;
+ }
+ goto out;
+ }
+
+ } else {
+ /* existing knote: kqueue lock already taken by kq_find_knote_and_kq_lock */
+
+ if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
+ /*
+ * The knote is not in a stable state, wait for that
+ * transition to complete and then redrive the lookup.
+ */
+ knoteusewait(kq, kn);
+ goto restart;
+ }
+
+ if (kev->flags & EV_DELETE) {
+
+ /*
+ * If attempting to delete a disabled dispatch2 knote,
+ * we must wait for the knote to be re-enabled (unless
+ * it is being re-enabled atomically here).
+ */
+ if ((kev->flags & EV_ENABLE) == 0 &&
+ (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) ==
+ (KN_DISPATCH2 | KN_DISABLED)) {
+ kn->kn_status |= KN_DEFERDELETE;
+ kqunlock(kq);
+ error = EINPROGRESS;
+ } else if (knote_fops(kn)->f_drop_and_unlock) {
+ /*
+ * The filter has requested to handle EV_DELETE events
+ *
+ * ERESTART means the kevent has to be re-evaluated
+ */
+ error = knote_fops(kn)->f_drop_and_unlock(kn, kev);
+ if (error == ERESTART) {
+ error = 0;
+ goto restart;
+ }
+ } else if (kqlock2knotedrop(kq, kn)) {
+ /* standard/default EV_DELETE path */
+ knote_drop(kn, p);
+ } else {
+ /*
+ * The kqueue is unlocked, it's not being
+ * dropped, and kqlock2knotedrop returned 0:
+ * this means that someone stole the drop of
+ * the knote from us.
+ */
+ error = EINPROGRESS;
+ }
+ goto out;
+ }
+
+ /*
+ * If we are re-enabling a deferred-delete knote,
+ * just enable it now and avoid calling the
+ * filter touch routine (it has delivered its
+ * last event already).
+ */
+ if ((kev->flags & EV_ENABLE) &&
+ (kn->kn_status & KN_DEFERDELETE)) {
+ assert(kn->kn_status & KN_DISABLED);
+ knote_activate(kn);
+ knote_enable(kn);
+ kqunlock(kq);
+ goto out;
+ }
+
+ /*
+ * If we are disabling, do it before unlocking and
+ * calling the touch routine (so no processing can
+ * see the new kevent state before the disable is
+ * applied).
+ */
+ if (kev->flags & EV_DISABLE)
+ knote_disable(kn);
+
+ /*
+ * Convert the kqlock to a use reference on the
+ * knote so we can call the filter touch routine.
+ */
+ if (knoteuse_needs_boost(kn, kev)) {
+ knoteuse_flags |= KNUSE_BOOST;
+ }
+ if (kqlock2knoteuse(kq, kn, knoteuse_flags)) {
+ /*
+ * Call touch routine to notify filter of changes
+ * in filter values (and to re-determine if any
+ * events are fired).
+ */
+ result = knote_fops(kn)->f_touch(kn, kev);
+
+ /* Get the kq lock back (don't defer droppers). */
+ if (!knoteuse2kqlock(kq, kn, knoteuse_flags)) {
+ kqunlock(kq);
+ goto out;
+ }
+
+ /* Handle errors during touch routine */
+ if (kev->flags & EV_ERROR) {
+ error = kev->data;
+ kqunlock(kq);
+ goto out;
+ }
+
+ /* Activate it if the touch routine said to */
+ if (result)
+ knote_activate(kn);
+ }
+
+ /* Enable the knote if called for */
+ if (kev->flags & EV_ENABLE)
+ knote_enable(kn);
+
+ }
+
+ /* still have kqlock held and knote is valid */
+ kqunlock(kq);
+
+out:
+ /* output local errors through the kevent */
+ if (error) {
+ kev->flags |= EV_ERROR;
+ kev->data = error;
+ }
+}
+
+
+/*
+ * knote_process - process a triggered event
+ *
+ * Validate that it is really still a triggered event
+ * by calling the filter routines (if necessary). Hold
+ * a use reference on the knote to avoid it being detached.
+ *
+ * If it is still considered triggered, we will have taken
+ * a copy of the state under the filter lock. We use that
+ * snapshot to dispatch the knote for future processing (or
+ * not, if this was a lost event).
+ *
+ * Our caller assures us that nobody else can be processing
+ * events from this knote during the whole operation. But
+ * others can be touching or posting events to the knote
+ * interspersed with our processing it.
+ *
+ * caller holds a reference on the kqueue.
+ * kqueue locked on entry and exit - but may be dropped
+ */
+static int
+knote_process(struct knote *kn,
+ kevent_callback_t callback,
+ void *callback_data,
+ struct filt_process_s *process_data,
+ struct proc *p)
+{
+ struct kevent_internal_s kev;
+ struct kqueue *kq = knote_get_kq(kn);
+ int result = 0;
+ int error = 0;
+
+ bzero(&kev, sizeof(kev));
+
+ /*
+ * Must be active or stayactive
+ * Must be queued and not disabled/suppressed
+ */
+ assert(kn->kn_status & KN_QUEUED);
+ assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE));
+ assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING)));
+
+ if (kq->kq_state & KQ_WORKLOOP) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
+ ((struct kqworkloop *)kq)->kqwl_dynamicid,
+ kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
+ kn->kn_filtid);
+ } else if (kq->kq_state & KQ_WORKQ) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
+ 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
+ kn->kn_filtid);
+ } else {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
+ kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
+ }
+
+ /*
+ * For deferred-drop or vanished events, we just create a fake
+ * event to acknowledge end-of-life. Otherwise, we call the
+ * filter's process routine to snapshot the kevent state under
+ * the filter's locking protocol.
+ */
+ if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
+ /* create fake event */
+ kev.filter = kn->kn_filter;
+ kev.ident = kn->kn_id;
+ kev.qos = kn->kn_qos;
+ kev.flags = (kn->kn_status & KN_DEFERDELETE) ?
+ EV_DELETE : EV_VANISHED;
+ kev.flags |= (EV_DISPATCH2 | EV_ONESHOT);
+ kev.udata = kn->kn_udata;
+ result = 1;
+
+ knote_suppress(kn);
+ } else {
+ int flags = KNUSE_NONE;
+ /* deactivate - so new activations indicate a wakeup */
+ knote_deactivate(kn);
+
+ /* suppress knotes to avoid returning the same event multiple times in a single call. */
+ knote_suppress(kn);
+
+ if (knoteuse_needs_boost(kn, NULL)) {
+ flags |= KNUSE_BOOST;
+ }
+ /* convert lock to a knote use reference */
+ if (!kqlock2knoteuse(kq, kn, flags))
+ panic("dropping knote found on queue\n");
+
+ /* call out to the filter to process with just a ref */
+ result = knote_fops(kn)->f_process(kn, process_data, &kev);
+ if (result) flags |= KNUSE_STEAL_DROP;
+
+ /*
+ * convert our reference back to a lock. accept drop
+ * responsibility from others if we've committed to
+ * delivering event data.
+ */
+ if (!knoteuse2kqlock(kq, kn, flags)) {
+ /* knote dropped */
+ kn = NULL;
+ }
+ }
+
+ if (kn != NULL) {
+ /*
+ * Determine how to dispatch the knote for future event handling.
+ * not-fired: just return (do not callout, leave deactivated).
+ * One-shot: If dispatch2, enter deferred-delete mode (unless this is
+ * is the deferred delete event delivery itself). Otherwise,
+ * drop it.
+ * stolendrop:We took responsibility for someone else's drop attempt.
+ * treat this just like one-shot and prepare to turn it back
+ * into a deferred delete if required.
+ * Dispatch: don't clear state, just mark it disabled.
+ * Cleared: just leave it deactivated.
+ * Others: re-activate as there may be more events to handle.
+ * This will not wake up more handlers right now, but
+ * at the completion of handling events it may trigger
+ * more handler threads (TODO: optimize based on more than
+ * just this one event being detected by the filter).
+ */
+
+ if (result == 0)
+ return (EJUSTRETURN);
+
+ if ((kev.flags & EV_ONESHOT) || (kn->kn_status & KN_STOLENDROP)) {
+ if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) {
+ /* defer dropping non-delete oneshot dispatch2 events */
+ kn->kn_status |= KN_DEFERDELETE;
+ knote_disable(kn);
+
+ /* if we took over another's drop clear those flags here */
+ if (kn->kn_status & KN_STOLENDROP) {
+ assert(kn->kn_status & KN_DROPPING);
+ /*
+ * the knote will be dropped when the
+ * deferred deletion occurs
+ */
+ kn->kn_status &= ~(KN_DROPPING|KN_STOLENDROP);
+ }
+ } else if (kn->kn_status & KN_STOLENDROP) {
+ /* We now own the drop of the knote. */
+ assert(kn->kn_status & KN_DROPPING);
+ knote_unsuppress(kn);
+ kqunlock(kq);
+ knote_drop(kn, p);
+ kqlock(kq);
+ } else if (kqlock2knotedrop(kq, kn)) {
+ /* just EV_ONESHOT, _not_ DISPATCH2 */
+ knote_drop(kn, p);
+ kqlock(kq);
+ }
+ } else if (kn->kn_status & KN_DISPATCH) {
+ /* disable all dispatch knotes */
+ knote_disable(kn);
+ } else if ((kev.flags & EV_CLEAR) == 0) {
+ /* re-activate in case there are more events */
+ knote_activate(kn);
+ }
+ }
+
+ /*
+ * callback to handle each event as we find it.
+ * If we have to detach and drop the knote, do
+ * it while we have the kq unlocked.
+ */
+ if (result) {
+ kqunlock(kq);
+ error = (callback)(kq, &kev, callback_data);
+ kqlock(kq);
+ }
+ return (error);
+}
+
+
+/*
+ * Return 0 to indicate that processing should proceed,
+ * -1 if there is nothing to process.
+ *
+ * Called with kqueue locked and returns the same way,
+ * but may drop lock temporarily.
+ */
+static int
+kqworkq_begin_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
+{
+ struct kqrequest *kqr;
+ thread_t self = current_thread();
+ __assert_only struct uthread *ut = get_bsdthread_info(self);
+
+ assert(kqwq->kqwq_state & KQ_WORKQ);
+ assert(qos_index < KQWQ_NQOS);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
+ flags, qos_index);
+
+ kqwq_req_lock(kqwq);
+
+ kqr = kqworkq_get_request(kqwq, qos_index);
+
+ /* manager skips buckets that haven't asked for its help */
+ if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+
+ /* If nothing for manager to do, just return */
+ if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ 0, kqr->kqr_state);
+ kqwq_req_unlock(kqwq);
+ return -1;
+ }
+ /* bind manager thread from this time on */
+ kqworkq_bind_thread_impl(kqwq, qos_index, self, flags);
+
+ } else {
+ /* We should already be bound to this kqueue */
+ assert(kqr->kqr_state & KQR_BOUND);
+ assert(kqr->kqr_thread == self);
+ assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq);
+ assert(ut->uu_kqueue_qos_index == qos_index);
+ assert((ut->uu_kqueue_flags & flags) == ut->uu_kqueue_flags);
+ }
+
+ /*
+ * we should have been requested to be here
+ * and nobody else should still be processing
+ */
+ assert(kqr->kqr_state & KQR_WAKEUP);
+ assert(kqr->kqr_state & KQR_THREQUESTED);
+ assert((kqr->kqr_state & KQR_PROCESSING) == 0);
+
+ /* reset wakeup trigger to catch new events after we start processing */
+ kqr->kqr_state &= ~KQR_WAKEUP;
+
+ /* convert to processing mode */
+ kqr->kqr_state |= KQR_PROCESSING;
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ kqr_thread_id(kqr), kqr->kqr_state);
+
+ kqwq_req_unlock(kqwq);
+ return 0;
+}
+
+static inline bool
+kqworkloop_is_processing_on_current_thread(struct kqworkloop *kqwl)
+{
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+
+ kqlock_held(kq);
+
+ if (kq->kq_state & KQ_PROCESSING) {
+ /*
+ * KQ_PROCESSING is unset with the kqlock held, and the kqr thread is
+ * never modified while KQ_PROCESSING is set, meaning that peeking at
+ * its value is safe from this context.
+ */
+ return kqwl->kqwl_request.kqr_thread == current_thread();
+ }
+ return false;
+}
+
+static void
+kqworkloop_acknowledge_events(struct kqworkloop *kqwl, boolean_t clear_ipc_override)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ struct knote *kn, *tmp;
+
+ kqlock_held(&kqwl->kqwl_kqueue);
+
+ TAILQ_FOREACH_SAFE(kn, &kqr->kqr_suppressed, kn_tqe, tmp) {
+ /*
+ * If a knote that can adjust QoS is disabled because of the automatic
+ * behavior of EV_DISPATCH, the knotes should stay suppressed so that
+ * further overrides keep pushing.
+ */
+ if (knote_fops(kn)->f_adjusts_qos && (kn->kn_status & KN_DISABLED) &&
+ (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 &&
+ (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
+ /*
+ * When called from unbind, clear the sync ipc override on the knote
+ * for events which are delivered.
+ */
+ if (clear_ipc_override) {
+ knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE);
+ }
+ continue;
+ }
+ knote_unsuppress(kn);
+ }
+}
+
+static int
+kqworkloop_begin_processing(struct kqworkloop *kqwl,
+ __assert_only unsigned int flags)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+
+ kqlock_held(kq);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
+ kqwl->kqwl_dynamicid, flags, 0);
+
+ kqwl_req_lock(kqwl);
+
+ /* nobody else should still be processing */
+ assert((kqr->kqr_state & KQR_PROCESSING) == 0);
+ assert((kq->kq_state & KQ_PROCESSING) == 0);
+
+ kqr->kqr_state |= KQR_PROCESSING | KQR_R2K_NOTIF_ARMED;
+ kq->kq_state |= KQ_PROCESSING;
+
+ kqwl_req_unlock(kqwl);
+
+ kqworkloop_acknowledge_events(kqwl, FALSE);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
+ kqwl->kqwl_dynamicid, flags, 0);
+
+ return 0;
+}
+
+/*
+ * Return 0 to indicate that processing should proceed,
+ * -1 if there is nothing to process.
+ *
+ * Called with kqueue locked and returns the same way,
+ * but may drop lock temporarily.
+ * May block.
+ */
+static int
+kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
+{
+ struct kqtailq *suppressq;
+
+ kqlock_held(kq);
+
+ if (kq->kq_state & KQ_WORKQ) {
+ return kqworkq_begin_processing((struct kqworkq *)kq, qos_index, flags);
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ return kqworkloop_begin_processing((struct kqworkloop*)kq, flags);
+ }
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), flags);
+
+ assert(qos_index == QOS_INDEX_KQFILE);
+
+ /* wait to become the exclusive processing thread */
+ for (;;) {
+ if (kq->kq_state & KQ_DRAIN) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
+ return -1;
+ }
+
+ if ((kq->kq_state & KQ_PROCESSING) == 0)
+ break;
+
+ /* if someone else is processing the queue, wait */
+ kq->kq_state |= KQ_PROCWAIT;
+ suppressq = kqueue_get_suppressed_queue(kq, qos_index);
+ waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+ CAST_EVENT64_T(suppressq),
+ THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
+
+ kqunlock(kq);
+ thread_block(THREAD_CONTINUE_NULL);
+ kqlock(kq);
+ }
+
+ /* Nobody else processing */
+
+ /* clear pre-posts and KQ_WAKEUP now, in case we bail early */
+ waitq_set_clear_preposts(&kq->kq_wqs);
+ kq->kq_state &= ~KQ_WAKEUP;
+
+ /* anything left to process? */
+ if (kqueue_queue_empty(kq, qos_index)) {
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
+ return -1;
+ }
+
+ /* convert to processing mode */
+ kq->kq_state |= KQ_PROCESSING;
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq));
+
+ return 0;
+}
+
+/*
+ * kqworkq_end_processing - Complete the processing of a workq kqueue
+ *
+ * We may have to request new threads.
+ * This can happen there are no waiting processing threads and:
+ * - there were active events we never got to (count > 0)
+ * - we pended waitq hook callouts during processing
+ * - we pended wakeups while processing (or unsuppressing)
+ *
+ * Called with kqueue lock held.
+ */
+static void
+kqworkq_end_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
+{
+#pragma unused(flags)
+
+ struct kqueue *kq = &kqwq->kqwq_kqueue;
+ struct kqtailq *suppressq = kqueue_get_suppressed_queue(kq, qos_index);
+
+ thread_t self = current_thread();
+ struct uthread *ut = get_bsdthread_info(self);
+ struct knote *kn;
+ struct kqrequest *kqr;
+ thread_t thread;
+
+ assert(kqwq->kqwq_state & KQ_WORKQ);
+ assert(qos_index < KQWQ_NQOS);
+
+ /* Are we really bound to this kqueue? */
+ if (ut->uu_kqueue_bound != kq) {
+ assert(ut->uu_kqueue_bound == kq);
+ return;
+ }
+
+ kqr = kqworkq_get_request(kqwq, qos_index);
+
+ kqwq_req_lock(kqwq);
+
+ /* Do we claim to be manager? */
+ if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+
+ /* bail if not bound that way */
+ if (ut->uu_kqueue_qos_index != KQWQ_QOS_MANAGER ||
+ (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0) {
+ assert(ut->uu_kqueue_qos_index == KQWQ_QOS_MANAGER);
+ assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
+ kqwq_req_unlock(kqwq);
+ return;
+ }
+
+ /* bail if this request wasn't already getting manager help */
+ if ((kqr->kqr_state & KQWQ_THMANAGER) == 0 ||
+ (kqr->kqr_state & KQR_PROCESSING) == 0) {
+ kqwq_req_unlock(kqwq);
+ return;
+ }
+ } else {
+ if (ut->uu_kqueue_qos_index != qos_index ||
+ (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER)) {
+ assert(ut->uu_kqueue_qos_index == qos_index);
+ assert((ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0);
+ kqwq_req_unlock(kqwq);
+ return;
+ }
+ }
+
+ assert(kqr->kqr_state & KQR_BOUND);
+ thread = kqr->kqr_thread;
+ assert(thread == self);
+
+ assert(kqr->kqr_state & KQR_PROCESSING);
+
+ /* If we didn't drain the whole queue, re-mark a wakeup being needed */
+ if (!kqueue_queue_empty(kq, qos_index))
+ kqr->kqr_state |= KQR_WAKEUP;
+
+ kqwq_req_unlock(kqwq);
+
+ /*
+ * Return suppressed knotes to their original state.
+ * For workq kqueues, suppressed ones that are still
+ * truly active (not just forced into the queue) will
+ * set flags we check below to see if anything got
+ * woken up.
+ */
+ while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
+ assert(kn->kn_status & KN_SUPPRESSED);
+ knote_unsuppress(kn);
+ }
+
+ kqwq_req_lock(kqwq);
+
+ /* Indicate that we are done processing this request */
+ kqr->kqr_state &= ~KQR_PROCESSING;
+
+ /*
+ * Drop our association with this one request and its
+ * override on us.
+ */
+ kqworkq_unbind_thread(kqwq, qos_index, thread, flags);
+
+ /*
+ * request a new thread if we didn't process the whole
+ * queue or real events have happened (not just putting
+ * stay-active events back).
+ */
+ if (kqr->kqr_state & KQR_WAKEUP) {
+ if (kqueue_queue_empty(kq, qos_index)) {
+ kqr->kqr_state &= ~KQR_WAKEUP;
+ } else {
+ kqworkq_request_thread(kqwq, qos_index);
+ }
+ }
+ kqwq_req_unlock(kqwq);
+}
+
+static void
+kqworkloop_end_processing(struct kqworkloop *kqwl, int nevents,
+ unsigned int flags)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+
+ kqlock_held(kq);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
+ kqwl->kqwl_dynamicid, flags, 0);
+
+ if ((kq->kq_state & KQ_NO_WQ_THREAD) && nevents == 0 &&
+ (flags & KEVENT_FLAG_IMMEDIATE) == 0) {
+ /*
+ * <rdar://problem/31634014> We may soon block, but have returned no
+ * kevents that need to be kept supressed for overriding purposes.
+ *
+ * It is hence safe to acknowledge events and unsuppress everything, so
+ * that if we block we can observe all events firing.
+ */
+ kqworkloop_acknowledge_events(kqwl, TRUE);
+ }
+
+ kqwl_req_lock(kqwl);
+
+ assert(kqr->kqr_state & KQR_PROCESSING);
+ assert(kq->kq_state & KQ_PROCESSING);
+
+ kq->kq_state &= ~KQ_PROCESSING;
+ kqr->kqr_state &= ~KQR_PROCESSING;
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
+
+ kqwl_req_unlock(kqwl);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
+ kqwl->kqwl_dynamicid, flags, 0);
+}
+
+/*
+ * Called with kqueue lock held.
+ */
+static void
+kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index,
+ int nevents, unsigned int flags)
+{
+ struct knote *kn;
+ struct kqtailq *suppressq;
+ int procwait;
+
+ kqlock_held(kq);
+
+ assert((kq->kq_state & KQ_WORKQ) == 0);
+
+ if (kq->kq_state & KQ_WORKLOOP) {
+ return kqworkloop_end_processing((struct kqworkloop *)kq, nevents, flags);
+ }
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), flags);
+
+ assert(qos_index == QOS_INDEX_KQFILE);
+
+ /*
+ * Return suppressed knotes to their original state.
+ */
+ suppressq = kqueue_get_suppressed_queue(kq, qos_index);
+ while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
+ assert(kn->kn_status & KN_SUPPRESSED);
+ knote_unsuppress(kn);
+ }
+
+ procwait = (kq->kq_state & KQ_PROCWAIT);
+ kq->kq_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
+
+ if (procwait) {
+ /* first wake up any thread already waiting to process */
+ waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+ CAST_EVENT64_T(suppressq),
+ THREAD_AWAKENED,
+ WAITQ_ALL_PRIORITIES);
+ }
+}
+
+/*
+ * kqwq_internal_bind - bind thread to processing workq kqueue
+ *
+ * Determines if the provided thread will be responsible for
+ * servicing the particular QoS class index specified in the
+ * parameters. Once the binding is done, any overrides that may
+ * be associated with the cooresponding events can be applied.
+ *
+ * This should be called as soon as the thread identity is known,
+ * preferably while still at high priority during creation.
+ *
+ * - caller holds a reference on the process (and workq kq)
+ * - the thread MUST call kevent_qos_internal after being bound
+ * or the bucket of events may never be delivered.
+ * - Nothing locked
+ * (unless this is a synchronous bind, then the request is locked)
+ */
+static int
+kqworkq_internal_bind(
+ struct proc *p,
+ kq_index_t qos_index,
+ thread_t thread,
+ unsigned int flags)
+{
+ struct kqueue *kq;
+ struct kqworkq *kqwq;
+ struct kqrequest *kqr;
+ struct uthread *ut = get_bsdthread_info(thread);
+
+ /* If no process workq, can't be our thread. */
+ kq = p->p_fd->fd_wqkqueue;
+
+ if (kq == NULL)
+ return 0;
+
+ assert(kq->kq_state & KQ_WORKQ);
+ kqwq = (struct kqworkq *)kq;
+
+ /*
+ * No need to bind the manager thread to any specific
+ * bucket, but still claim the thread.
+ */
+ if (qos_index == KQWQ_QOS_MANAGER) {
+ assert(ut->uu_kqueue_bound == NULL);
+ assert(flags & KEVENT_FLAG_WORKQ_MANAGER);
+ ut->uu_kqueue_bound = kq;
+ ut->uu_kqueue_qos_index = qos_index;
+ ut->uu_kqueue_flags = flags;
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND),
+ thread_tid(thread), flags, qos_index);
+
+ return 1;
+ }
+
+ /*
+ * If this is a synchronous bind callback, the request
+ * lock is already held, so just do the bind.
+ */
+ if (flags & KEVENT_FLAG_SYNCHRONOUS_BIND) {
+ kqwq_req_held(kqwq);
+ /* strip out synchronout bind flag */
+ flags &= ~KEVENT_FLAG_SYNCHRONOUS_BIND;
+ kqworkq_bind_thread_impl(kqwq, qos_index, thread, flags);
+ return 1;
+ }
+
+ /*
+ * check the request that corresponds to our qos_index
+ * to see if there is an outstanding request.
+ */
+ kqr = kqworkq_get_request(kqwq, qos_index);
+ assert(kqr->kqr_qos_index == qos_index);
+ kqwq_req_lock(kqwq);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND),
+ thread_tid(thread), flags, qos_index, kqr->kqr_state);
+
+ if ((kqr->kqr_state & KQR_THREQUESTED) &&
+ (kqr->kqr_state & KQR_PROCESSING) == 0) {
+
+ if ((kqr->kqr_state & KQR_BOUND) &&
+ thread == kqr->kqr_thread) {
+ /* duplicate bind - claim the thread */
+ assert(ut->uu_kqueue_bound == kq);
+ assert(ut->uu_kqueue_qos_index == qos_index);
+ kqwq_req_unlock(kqwq);
+ return 1;
+ }
+ if ((kqr->kqr_state & (KQR_BOUND | KQWQ_THMANAGER)) == 0) {
+ /* ours to bind to */
+ kqworkq_bind_thread_impl(kqwq, qos_index, thread, flags);
+ kqwq_req_unlock(kqwq);
+ return 1;
+ }
+ }
+ kqwq_req_unlock(kqwq);
+ return 0;
+}
+
+static void
+kqworkloop_bind_thread_impl(struct kqworkloop *kqwl,
+ thread_t thread,
+ __assert_only unsigned int flags)
+{
+ assert(flags & KEVENT_FLAG_WORKLOOP);
+
+ /* the request object must be locked */
+ kqwl_req_held(kqwl);
+
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ struct uthread *ut = get_bsdthread_info(thread);
+ boolean_t ipc_override_is_sync;
+ kq_index_t qos_index = kqworkloop_combined_qos(kqwl, &ipc_override_is_sync);
+
+ /* nobody else bound so finally bind (as a workloop) */
+ assert(kqr->kqr_state & KQR_THREQUESTED);
+ assert((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) == 0);
+ assert(thread != kqwl->kqwl_owner);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_BIND),
+ kqwl->kqwl_dynamicid, (uintptr_t)thread_tid(thread),
+ qos_index,
+ (uintptr_t)(((uintptr_t)kqr->kqr_override_index << 16) |
+ (((uintptr_t)kqr->kqr_state) << 8) |
+ ((uintptr_t)ipc_override_is_sync)));
+
+ kqr->kqr_state |= KQR_BOUND | KQR_R2K_NOTIF_ARMED;
+ kqr->kqr_thread = thread;
+
+ /* bind the workloop to the uthread */
+ ut->uu_kqueue_bound = (struct kqueue *)kqwl;
+ ut->uu_kqueue_flags = flags;
+ ut->uu_kqueue_qos_index = qos_index;
+ assert(ut->uu_kqueue_override_is_sync == 0);
+ ut->uu_kqueue_override_is_sync = ipc_override_is_sync;
+ if (qos_index) {
+ thread_add_ipc_override(thread, qos_index);
+ }
+ if (ipc_override_is_sync) {
+ thread_add_sync_ipc_override(thread);
+ }
+}
+
+/*
+ * workloop_fulfill_threadreq - bind thread to processing workloop
+ *
+ * The provided thread will be responsible for delivering events
+ * associated with the given kqrequest. Bind it and get ready for
+ * the thread to eventually arrive.
+ *
+ * If WORKLOOP_FULFILL_THREADREQ_SYNC is specified, the callback
+ * within the context of the pthread_functions->workq_threadreq
+ * callout. In this case, the request structure is already locked.
+ */
+int
+workloop_fulfill_threadreq(struct proc *p,
+ workq_threadreq_t req,
+ thread_t thread,
+ int flags)
+{
+ int sync = (flags & WORKLOOP_FULFILL_THREADREQ_SYNC);
+ int cancel = (flags & WORKLOOP_FULFILL_THREADREQ_CANCEL);
+ struct kqrequest *kqr;
+ struct kqworkloop *kqwl;
+
+ kqwl = (struct kqworkloop *)((uintptr_t)req -
+ offsetof(struct kqworkloop, kqwl_request) -
+ offsetof(struct kqrequest, kqr_req));
+ kqr = &kqwl->kqwl_request;
+
+ /* validate we're looking at something valid */
+ if (kqwl->kqwl_p != p ||
+ (kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
+ assert(kqwl->kqwl_p == p);
+ assert(kqwl->kqwl_state & KQ_WORKLOOP);
+ return EINVAL;
+ }
+
+ if (!sync)
+ kqwl_req_lock(kqwl);
+
+ /* Should be a pending request */
+ if ((kqr->kqr_state & KQR_BOUND) ||
+ (kqr->kqr_state & KQR_THREQUESTED) == 0) {
+
+ assert((kqr->kqr_state & KQR_BOUND) == 0);
+ assert(kqr->kqr_state & KQR_THREQUESTED);
+ if (!sync)
+ kqwl_req_unlock(kqwl);
+ return EINPROGRESS;
+ }
+
+ assert((kqr->kqr_state & KQR_DRAIN) == 0);
+
+ /*
+ * Is it a cancel indication from pthread.
+ * If so, we must be exiting/exec'ing. Forget
+ * our pending request.
+ */
+ if (cancel) {
+ kqr->kqr_state &= ~KQR_THREQUESTED;
+ kqr->kqr_state |= KQR_DRAIN;
+ } else {
+ /* do the actual bind? */
+ kqworkloop_bind_thread_impl(kqwl, thread, KEVENT_FLAG_WORKLOOP);
+ }
+
+ if (!sync)
+ kqwl_req_unlock(kqwl);
+
+ if (cancel)
+ kqueue_release_last(p, &kqwl->kqwl_kqueue); /* may dealloc kq */
+
+ return 0;
+}
+
+
+/*
+ * kevent_qos_internal_bind - bind thread to processing kqueue
+ *
+ * Indicates that the provided thread will be responsible for
+ * servicing the particular QoS class index specified in the
+ * parameters. Once the binding is done, any overrides that may
+ * be associated with the cooresponding events can be applied.
+ *
+ * This should be called as soon as the thread identity is known,
+ * preferably while still at high priority during creation.
+ *
+ * - caller holds a reference on the kqueue.
+ * - the thread MUST call kevent_qos_internal after being bound
+ * or the bucket of events may never be delivered.
+ * - Nothing locked (may take mutex or block).
+ */
+
+int
+kevent_qos_internal_bind(
+ struct proc *p,
+ int qos_class,
+ thread_t thread,
+ unsigned int flags)
+{
+ kq_index_t qos_index;
+
+ assert(flags & KEVENT_FLAG_WORKQ);
+
+ if (thread == THREAD_NULL || (flags & KEVENT_FLAG_WORKQ) == 0) {
+ return EINVAL;
+ }
+
+ /* get the qos index we're going to service */
+ qos_index = qos_index_for_servicer(qos_class, thread, flags);
+
+ if (kqworkq_internal_bind(p, qos_index, thread, flags))
+ return 0;
+
+ return EINPROGRESS;
+}
+
+
+static void
+kqworkloop_internal_unbind(
+ struct proc *p,
+ thread_t thread,
+ unsigned int flags)
+{
+ struct kqueue *kq;
+ struct kqworkloop *kqwl;
+ struct uthread *ut = get_bsdthread_info(thread);
+
+ assert(ut->uu_kqueue_bound != NULL);
+ kq = ut->uu_kqueue_bound;
+ assert(kq->kq_state & KQ_WORKLOOP);
+ kqwl = (struct kqworkloop *)kq;
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND),
+ kqwl->kqwl_dynamicid, (uintptr_t)thread_tid(thread),
+ flags, 0);
+
+ if (!(kq->kq_state & KQ_NO_WQ_THREAD)) {
+ assert(is_workqueue_thread(thread));
+
+ kqlock(kq);
+ kqworkloop_unbind_thread(kqwl, thread, flags);
+ kqunlock(kq);
+
+ /* If last reference, dealloc the workloop kq */
+ kqueue_release_last(p, kq);
+ } else {
+ assert(!is_workqueue_thread(thread));
+ kevent_servicer_detach_thread(p, kqwl->kqwl_dynamicid, thread, flags, kq);
+ }
+}
+
+static void
+kqworkq_internal_unbind(
+ struct proc *p,
+ kq_index_t qos_index,
+ thread_t thread,
+ unsigned int flags)
+{
+ struct kqueue *kq;
+ struct kqworkq *kqwq;
+ struct uthread *ut;
+ kq_index_t end_index;
+
+ assert(thread == current_thread());
+ ut = get_bsdthread_info(thread);
+
+ kq = p->p_fd->fd_wqkqueue;
+ assert(kq->kq_state & KQ_WORKQ);
+ assert(ut->uu_kqueue_bound == kq);
+
+ kqwq = (struct kqworkq *)kq;
+
+ /* end servicing any requests we might own */
+ end_index = (qos_index == KQWQ_QOS_MANAGER) ?
+ 0 : qos_index;
+ kqlock(kq);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND),
+ (uintptr_t)thread_tid(thread), flags, qos_index);
+
+ do {
+ kqworkq_end_processing(kqwq, qos_index, flags);
+ } while (qos_index-- > end_index);
+
+ ut->uu_kqueue_bound = NULL;
+ ut->uu_kqueue_qos_index = 0;
+ ut->uu_kqueue_flags = 0;
+
+ kqunlock(kq);
+}
+
+/*
+ * kevent_qos_internal_unbind - unbind thread from processing kqueue
+ *
+ * End processing the per-QoS bucket of events and allow other threads
+ * to be requested for future servicing.
+ *
+ * caller holds a reference on the kqueue.
+ * thread is the current thread.
+ */
+
+int
+kevent_qos_internal_unbind(
+ struct proc *p,
+ int qos_class,
+ thread_t thread,
+ unsigned int flags)
+{
+#pragma unused(qos_class)
+
+ struct uthread *ut;
+ struct kqueue *kq;
+ unsigned int bound_flags;
+ bool check_flags;
+
+ ut = get_bsdthread_info(thread);
+ if (ut->uu_kqueue_bound == NULL) {
+ /* early out if we are already unbound */
+ assert(ut->uu_kqueue_flags == 0);
+ assert(ut->uu_kqueue_qos_index == 0);
+ assert(ut->uu_kqueue_override_is_sync == 0);
+ return EALREADY;
+ }
+
+ assert(flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP));
+ assert(thread == current_thread());
+
+ check_flags = flags & KEVENT_FLAG_UNBIND_CHECK_FLAGS;
+
+ /* Get the kqueue we started with */
+ kq = ut->uu_kqueue_bound;
+ assert(kq != NULL);
+ assert(kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
+
+ /* get flags and QoS parameters we started with */
+ bound_flags = ut->uu_kqueue_flags;
+
+ /* Unbind from the class of workq */
+ if (kq->kq_state & KQ_WORKQ) {
+ if (check_flags && !(flags & KEVENT_FLAG_WORKQ)) {
+ return EINVAL;
+ }
+
+ kqworkq_internal_unbind(p, ut->uu_kqueue_qos_index, thread, bound_flags);
+ } else {
+ if (check_flags && !(flags & KEVENT_FLAG_WORKLOOP)) {
+ return EINVAL;
+ }
+
+ kqworkloop_internal_unbind(p, thread, bound_flags);
+ }
+
+ return 0;
+}
+
+/*
+ * kqueue_process - process the triggered events in a kqueue
+ *
+ * Walk the queued knotes and validate that they are
+ * really still triggered events by calling the filter
+ * routines (if necessary). Hold a use reference on
+ * the knote to avoid it being detached. For each event
+ * that is still considered triggered, invoke the
+ * callback routine provided.
+ *
+ * caller holds a reference on the kqueue.
+ * kqueue locked on entry and exit - but may be dropped
+ * kqueue list locked (held for duration of call)
+ */
+
+static int
+kqueue_process(struct kqueue *kq,
+ kevent_callback_t callback,
+ void *callback_data,
+ struct filt_process_s *process_data,
+ int *countp,
+ struct proc *p)
+{
+ unsigned int flags = process_data ? process_data->fp_flags : 0;
+ struct uthread *ut = get_bsdthread_info(current_thread());
+ kq_index_t start_index, end_index, i;
+ struct knote *kn;
+ int nevents = 0;
+ int error = 0;
+
+ /*
+ * Based on the mode of the kqueue and the bound QoS of the servicer,
+ * determine the range of thread requests that need checking
+ */
+ if (kq->kq_state & KQ_WORKQ) {
+ if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+ start_index = KQWQ_QOS_MANAGER;
+ } else if (ut->uu_kqueue_bound != kq) {
+ return EJUSTRETURN;
+ } else {
+ start_index = ut->uu_kqueue_qos_index;
+ }
+
+ /* manager services every request in a workq kqueue */
+ assert(start_index > 0 && start_index <= KQWQ_QOS_MANAGER);
+ end_index = (start_index == KQWQ_QOS_MANAGER) ? 0 : start_index;
+
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ if (ut->uu_kqueue_bound != kq)
+ return EJUSTRETURN;
+
+ /*
+ * Single request servicing
+ * we want to deliver all events, regardless of the QOS
+ */
+ start_index = end_index = THREAD_QOS_UNSPECIFIED;
+ } else {
+ start_index = end_index = QOS_INDEX_KQFILE;
+ }
+
+ i = start_index;
+
+ do {
+ if (kqueue_begin_processing(kq, i, flags) == -1) {
+ *countp = 0;
+ /* Nothing to process */
+ continue;
+ }
+
+ /*
+ * loop through the enqueued knotes associated with this request,
+ * processing each one. Each request may have several queues
+ * of knotes to process (depending on the type of kqueue) so we
+ * have to loop through all the queues as long as we have additional
+ * space.
+ */
+ error = 0;
+
+ struct kqtailq *base_queue = kqueue_get_base_queue(kq, i);
+ struct kqtailq *queue = kqueue_get_high_queue(kq, i);
+ do {
+ while (error == 0 && (kn = TAILQ_FIRST(queue)) != NULL) {
+ error = knote_process(kn, callback, callback_data, process_data, p);
+ if (error == EJUSTRETURN) {
+ error = 0;
+ } else {
+ nevents++;
+ }
+ /* error is EWOULDBLOCK when the out event array is full */
+ }
+ } while (error == 0 && queue-- > base_queue);
+
+ if ((kq->kq_state & KQ_WORKQ) == 0) {
+ kqueue_end_processing(kq, i, nevents, flags);
+ }
+
+ if (error == EWOULDBLOCK) {
+ /* break out if no more space for additional events */
+ error = 0;
+ break;
+ }
+ } while (i-- > end_index);
+
+ *countp = nevents;
+ return (error);
+}
+
+static void
+kqueue_scan_continue(void *data, wait_result_t wait_result)
+{
+ thread_t self = current_thread();
+ uthread_t ut = (uthread_t)get_bsdthread_info(self);
+ struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
+ struct kqueue *kq = (struct kqueue *)data;
+ struct filt_process_s *process_data = cont_args->process_data;
+ int error;
+ int count;
+
+ /* convert the (previous) wait_result to a proper error */
+ switch (wait_result) {
+ case THREAD_AWAKENED: {
+ kqlock(kq);
+ retry:
+ error = kqueue_process(kq, cont_args->call, cont_args->data,
+ process_data, &count, current_proc());
+ if (error == 0 && count == 0) {
+ if (kq->kq_state & KQ_DRAIN) {
+ kqunlock(kq);
+ goto drain;
+ }
+
+ if (kq->kq_state & KQ_WAKEUP)
+ goto retry;
+
+ waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+ KQ_EVENT, THREAD_ABORTSAFE,
+ cont_args->deadline);
+ kq->kq_state |= KQ_SLEEP;
+ kqunlock(kq);
+ thread_block_parameter(kqueue_scan_continue, kq);
+ /* NOTREACHED */
+ }
+ kqunlock(kq);
+ } break;
+ case THREAD_TIMED_OUT:
+ error = EWOULDBLOCK;
+ break;
+ case THREAD_INTERRUPTED:
+ error = EINTR;
+ break;
+ case THREAD_RESTART:
+ drain:
+ error = EBADF;
+ break;
+ default:
+ panic("%s: - invalid wait_result (%d)", __func__,
+ wait_result);
+ error = 0;
+ }
+
+ /* call the continuation with the results */
+ assert(cont_args->cont != NULL);
+ (cont_args->cont)(kq, cont_args->data, error);
+}
+
+
+/*
+ * kqueue_scan - scan and wait for events in a kqueue
+ *
+ * Process the triggered events in a kqueue.
+ *
+ * If there are no events triggered arrange to
+ * wait for them. If the caller provided a
+ * continuation routine, then kevent_scan will
+ * also.
+ *
+ * The callback routine must be valid.
+ * The caller must hold a use-count reference on the kq.
+ */
+
+int
+kqueue_scan(struct kqueue *kq,
+ kevent_callback_t callback,
+ kqueue_continue_t continuation,
+ void *callback_data,
+ struct filt_process_s *process_data,
+ struct timeval *atvp,
+ struct proc *p)
+{
+ thread_continue_t cont = THREAD_CONTINUE_NULL;
+ unsigned int flags;
+ uint64_t deadline;
+ int error;
+ int first;
+ int fd;
+
+ assert(callback != NULL);
+
+ /*
+ * Determine which QoS index we are servicing
+ */
+ flags = (process_data) ? process_data->fp_flags : 0;
+ fd = (process_data) ? process_data->fp_fd : -1;
+
+ first = 1;
+ for (;;) {
+ wait_result_t wait_result;
+ int count;
+
+ /*
+ * Make a pass through the kq to find events already
+ * triggered.
+ */
+ kqlock(kq);
+ error = kqueue_process(kq, callback, callback_data,
+ process_data, &count, p);
+ if (error || count)
+ break; /* lock still held */
+
+ /* looks like we have to consider blocking */
+ if (first) {
+ first = 0;
+ /* convert the timeout to a deadline once */
+ if (atvp->tv_sec || atvp->tv_usec) {
+ uint64_t now;
+
+ clock_get_uptime(&now);
+ nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
+ atvp->tv_usec * (long)NSEC_PER_USEC,
+ &deadline);
+ if (now >= deadline) {
+ /* non-blocking call */
+ error = EWOULDBLOCK;
+ break; /* lock still held */
+ }
+ deadline -= now;
+ clock_absolutetime_interval_to_deadline(deadline, &deadline);
+ } else {
+ deadline = 0; /* block forever */
+ }
+
+ if (continuation) {
+ uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
+ struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan;
+
+ cont_args->call = callback;
+ cont_args->cont = continuation;
+ cont_args->deadline = deadline;
+ cont_args->data = callback_data;
+ cont_args->process_data = process_data;
+ cont = kqueue_scan_continue;
+ }
+ }
+
+ if (kq->kq_state & KQ_DRAIN) {
+ kqunlock(kq);
+ return EBADF;
+ }
+
+ /* If awakened during processing, try again */
+ if (kq->kq_state & KQ_WAKEUP) {
+ kqunlock(kq);
+ continue;
+ }
+
+ /* go ahead and wait */
+ waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
+ KQ_EVENT, THREAD_ABORTSAFE,
+ TIMEOUT_URGENCY_USER_NORMAL,
+ deadline, TIMEOUT_NO_LEEWAY);
+ kq->kq_state |= KQ_SLEEP;
+ kqunlock(kq);
+ wait_result = thread_block_parameter(cont, kq);
+ /* NOTREACHED if (continuation != NULL) */
+
+ switch (wait_result) {
+ case THREAD_AWAKENED:
+ continue;
+ case THREAD_TIMED_OUT:
+ return EWOULDBLOCK;
+ case THREAD_INTERRUPTED:
+ return EINTR;
+ case THREAD_RESTART:
+ return EBADF;
+ default:
+ panic("%s: - bad wait_result (%d)", __func__,
+ wait_result);
+ error = 0;
+ }
+ }
+ kqunlock(kq);
+ return (error);
+}
+
+
+/*
+ * XXX
+ * This could be expanded to call kqueue_scan, if desired.
+ */
+/*ARGSUSED*/
+static int
+kqueue_read(__unused struct fileproc *fp,
+ __unused struct uio *uio,
+ __unused int flags,
+ __unused vfs_context_t ctx)
+{
+ return (ENXIO);
+}
+
+/*ARGSUSED*/
+static int
+kqueue_write(__unused struct fileproc *fp,
+ __unused struct uio *uio,
+ __unused int flags,
+ __unused vfs_context_t ctx)
+{
+ return (ENXIO);
+}
+
+/*ARGSUSED*/
+static int
+kqueue_ioctl(__unused struct fileproc *fp,
+ __unused u_long com,
+ __unused caddr_t data,
+ __unused vfs_context_t ctx)
+{
+ return (ENOTTY);
+}
+
+/*ARGSUSED*/
+static int
+kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
+ __unused vfs_context_t ctx)
+{
+ struct kqueue *kq = (struct kqueue *)fp->f_data;
+ struct kqtailq *queue;
+ struct kqtailq *suppressq;
+ struct knote *kn;
+ int retnum = 0;
+
+ if (which != FREAD)
+ return (0);
+
+ kqlock(kq);
+
+ assert((kq->kq_state & KQ_WORKQ) == 0);
+
+ /*
+ * If this is the first pass, link the wait queue associated with the
+ * the kqueue onto the wait queue set for the select(). Normally we
+ * use selrecord() for this, but it uses the wait queue within the
+ * selinfo structure and we need to use the main one for the kqueue to
+ * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
+ * (The select() call will unlink them when it ends).
+ */
+ if (wq_link_id != NULL) {
+ thread_t cur_act = current_thread();
+ struct uthread * ut = get_bsdthread_info(cur_act);
+
+ kq->kq_state |= KQ_SEL;
+ waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset,
+ WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
+
+ /* always consume the reserved link object */
+ waitq_link_release(*(uint64_t *)wq_link_id);
+ *(uint64_t *)wq_link_id = 0;
+
+ /*
+ * selprocess() is expecting that we send it back the waitq
+ * that was just added to the thread's waitq set. In order
+ * to not change the selrecord() API (which is exported to
+ * kexts), we pass this value back through the
+ * void *wq_link_id pointer we were passed. We need to use
+ * memcpy here because the pointer may not be properly aligned
+ * on 32-bit systems.
+ */
+ void *wqptr = &kq->kq_wqs;
+ memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
+ }
+
+ if (kqueue_begin_processing(kq, QOS_INDEX_KQFILE, 0) == -1) {
+ kqunlock(kq);
+ return (0);
+ }
+
+ queue = kqueue_get_base_queue(kq, QOS_INDEX_KQFILE);
+ if (!TAILQ_EMPTY(queue)) {
+ /*
+ * there is something queued - but it might be a
+ * KN_STAYACTIVE knote, which may or may not have
+ * any events pending. Otherwise, we have to walk
+ * the list of knotes to see, and peek at the
+ * (non-vanished) stay-active ones to be really sure.
+ */
+ while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
+ if (kn->kn_status & KN_ACTIVE) {
+ retnum = 1;
+ goto out;
+ }
+ assert(kn->kn_status & KN_STAYACTIVE);
+ knote_suppress(kn);
+ }
+
+ /*
+ * There were no regular events on the queue, so take
+ * a deeper look at the stay-queued ones we suppressed.
+ */
+ suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
+ while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
+ unsigned peek = 1;
+
+ assert(!knoteuse_needs_boost(kn, NULL));
+
+ /* If didn't vanish while suppressed - peek at it */
+ if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
+ peek = knote_fops(kn)->f_peek(kn);
+
+ /* if it dropped while getting lock - move on */
+ if (!knoteuse2kqlock(kq, kn, KNUSE_NONE))
+ continue;
+ }
+
+ /* unsuppress it */
+ knote_unsuppress(kn);
+
+ /* has data or it has to report a vanish */
+ if (peek > 0) {
+ retnum = 1;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kqueue_end_processing(kq, QOS_INDEX_KQFILE, retnum, 0);
+ kqunlock(kq);
+ return (retnum);
+}
+
+/*
+ * kqueue_close -
+ */
+/*ARGSUSED*/
+static int
+kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
+{
+ struct kqfile *kqf = (struct kqfile *)fg->fg_data;
+
+ assert((kqf->kqf_state & KQ_WORKQ) == 0);
+ kqueue_dealloc(&kqf->kqf_kqueue);
+ fg->fg_data = NULL;
+ return (0);
+}
+
+/*ARGSUSED*/
+/*
+ * The callers has taken a use-count reference on this kqueue and will donate it
+ * to the kqueue we are being added to. This keeps the kqueue from closing until
+ * that relationship is torn down.
+ */
+static int
+kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn,
+ __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx)
+{
+ struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
+ struct kqueue *kq = &kqf->kqf_kqueue;
+ struct kqueue *parentkq = knote_get_kq(kn);
+
+ assert((kqf->kqf_state & KQ_WORKQ) == 0);
+
+ if (parentkq == kq ||
+ kn->kn_filter != EVFILT_READ) {
+ kn->kn_flags = EV_ERROR;
+ kn->kn_data = EINVAL;
+ return 0;
+ }
+
+ /*
+ * We have to avoid creating a cycle when nesting kqueues
+ * inside another. Rather than trying to walk the whole
+ * potential DAG of nested kqueues, we just use a simple
+ * ceiling protocol. When a kqueue is inserted into another,
+ * we check that the (future) parent is not already nested
+ * into another kqueue at a lower level than the potenial
+ * child (because it could indicate a cycle). If that test
+ * passes, we just mark the nesting levels accordingly.
+ */
+
+ kqlock(parentkq);
+ if (parentkq->kq_level > 0 &&
+ parentkq->kq_level < kq->kq_level)
+ {
+ kqunlock(parentkq);
+ kn->kn_flags = EV_ERROR;
+ kn->kn_data = EINVAL;
+ return 0;
+ } else {
+ /* set parent level appropriately */
+ if (parentkq->kq_level == 0)
+ parentkq->kq_level = 2;
+ if (parentkq->kq_level < kq->kq_level + 1)
+ parentkq->kq_level = kq->kq_level + 1;
+ kqunlock(parentkq);
+
+ kn->kn_filtid = EVFILTID_KQREAD;
+ kqlock(kq);
+ KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
+ /* indicate nesting in child, if needed */
+ if (kq->kq_level == 0)
+ kq->kq_level = 1;
+
+ int count = kq->kq_count;
+ kqunlock(kq);
+ return (count > 0);
+ }
+}
+
+/*
+ * kqueue_drain - called when kq is closed
+ */
+/*ARGSUSED*/
+static int
+kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
+{
+ struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
+
+ assert((kq->kq_state & KQ_WORKQ) == 0);
+
+ kqlock(kq);
+ kq->kq_state |= KQ_DRAIN;
+ kqueue_interrupt(kq);
+ kqunlock(kq);
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
+{
+ assert((kq->kq_state & KQ_WORKQ) == 0);
+
+ kqlock(kq);
+ if (isstat64 != 0) {
+ struct stat64 *sb64 = (struct stat64 *)ub;
+
+ bzero((void *)sb64, sizeof(*sb64));
+ sb64->st_size = kq->kq_count;
+ if (kq->kq_state & KQ_KEV_QOS)
+ sb64->st_blksize = sizeof(struct kevent_qos_s);
+ else if (kq->kq_state & KQ_KEV64)
+ sb64->st_blksize = sizeof(struct kevent64_s);
+ else if (IS_64BIT_PROCESS(p))
+ sb64->st_blksize = sizeof(struct user64_kevent);
+ else
+ sb64->st_blksize = sizeof(struct user32_kevent);
+ sb64->st_mode = S_IFIFO;
+ } else {
+ struct stat *sb = (struct stat *)ub;
+
+ bzero((void *)sb, sizeof(*sb));
+ sb->st_size = kq->kq_count;
+ if (kq->kq_state & KQ_KEV_QOS)
+ sb->st_blksize = sizeof(struct kevent_qos_s);
+ else if (kq->kq_state & KQ_KEV64)
+ sb->st_blksize = sizeof(struct kevent64_s);
+ else if (IS_64BIT_PROCESS(p))
+ sb->st_blksize = sizeof(struct user64_kevent);
+ else
+ sb->st_blksize = sizeof(struct user32_kevent);
+ sb->st_mode = S_IFIFO;
+ }
+ kqunlock(kq);
+ return (0);
+}
+
+/*
+ * Interact with the pthread kext to request a servicing there.
+ * Eventually, this will request threads at specific QoS levels.
+ * For now, it only requests a dispatch-manager-QoS thread, and
+ * only one-at-a-time.
+ *
+ * - Caller holds the workq request lock
+ *
+ * - May be called with the kqueue's wait queue set locked,
+ * so cannot do anything that could recurse on that.
+ */
+static void
+kqworkq_request_thread(
+ struct kqworkq *kqwq,
+ kq_index_t qos_index)
+{
+ struct kqrequest *kqr;
+
+ assert(kqwq->kqwq_state & KQ_WORKQ);
+ assert(qos_index < KQWQ_NQOS);
+
+ kqr = kqworkq_get_request(kqwq, qos_index);
+
+ assert(kqr->kqr_state & KQR_WAKEUP);
+
+ /*
+ * If we have already requested a thread, and it hasn't
+ * started processing yet, there's no use hammering away
+ * on the pthread kext.
+ */
+ if (kqr->kqr_state & KQR_THREQUESTED)
+ return;
+
+ assert((kqr->kqr_state & KQR_BOUND) == 0);
+
+ /* request additional workq threads if appropriate */
+ if (pthread_functions != NULL &&
+ pthread_functions->workq_reqthreads != NULL) {
+ unsigned int flags = KEVENT_FLAG_WORKQ;
+ unsigned long priority;
+ thread_t wqthread;
+
+ /* Compute the appropriate pthread priority */
+ priority = qos_from_qos_index(qos_index);
+
+#if 0
+ /* JMM - for now remain compatible with old invocations */
+ /* set the over-commit flag on the request if needed */
+ if (kqr->kqr_state & KQR_THOVERCOMMIT)
+ priority |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
+#endif /* 0 */
+
+ /* Compute a priority based on qos_index. */
+ struct workq_reqthreads_req_s request = {
+ .priority = priority,
+ .count = 1
+ };
+
+ /* mark that we are making a request */
+ kqr->kqr_state |= KQR_THREQUESTED;
+ if (qos_index == KQWQ_QOS_MANAGER)
+ kqr->kqr_state |= KQWQ_THMANAGER;
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST),
+ 0, qos_index,
+ (((uintptr_t)kqr->kqr_override_index << 8) |
+ (uintptr_t)kqr->kqr_state));
+ wqthread = (*pthread_functions->workq_reqthreads)(kqwq->kqwq_p, 1, &request);
+
+ /* We've been switched to the emergency/manager thread */
+ if (wqthread == (thread_t)-1) {
+ assert(qos_index != KQWQ_QOS_MANAGER);
+ kqr->kqr_state |= KQWQ_THMANAGER;
+ return;
+ }
+
+ /*
+ * bind the returned thread identity
+ * This goes away when we switch to synchronous callback
+ * binding from the pthread kext.
+ */
+ if (wqthread != NULL) {
+ kqworkq_bind_thread_impl(kqwq, qos_index, wqthread, flags);
+ }
+ }
+}
+
+/*
+ * If we aren't already busy processing events [for this QoS],
+ * request workq thread support as appropriate.
+ *
+ * TBD - for now, we don't segregate out processing by QoS.
+ *
+ * - May be called with the kqueue's wait queue set locked,
+ * so cannot do anything that could recurse on that.
+ */
+static void
+kqworkq_request_help(
+ struct kqworkq *kqwq,
+ kq_index_t qos_index)
+{
+ struct kqrequest *kqr;
+
+ /* convert to thread qos value */
+ assert(qos_index < KQWQ_NQOS);
+
+ kqwq_req_lock(kqwq);
+ kqr = kqworkq_get_request(kqwq, qos_index);
+
+ if ((kqr->kqr_state & KQR_WAKEUP) == 0) {
+ /* Indicate that we needed help from this request */
+ kqr->kqr_state |= KQR_WAKEUP;
+
+ /* Go assure a thread request has been made */
+ kqworkq_request_thread(kqwq, qos_index);
+ }
+ kqwq_req_unlock(kqwq);
+}
+
+static void
+kqworkloop_threadreq_impl(struct kqworkloop *kqwl, kq_index_t qos_index)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ unsigned long pri = pthread_priority_for_kqrequest(kqr, qos_index);
+ int op, ret;
+
+ assert((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED);
+
+ /*
+ * New-style thread request supported. Provide
+ * the pthread kext a pointer to a workq_threadreq_s
+ * structure for its use until a corresponding
+ * workloop_fulfill_threqreq callback.
+ */
+ if (current_proc() == kqwl->kqwl_kqueue.kq_p) {
+ op = WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL;
+ } else {
+ op = WORKQ_THREADREQ_WORKLOOP;
+ }
+again:
+ ret = (*pthread_functions->workq_threadreq)(kqwl->kqwl_p, &kqr->kqr_req,
+ WORKQ_THREADREQ_WORKLOOP, pri, 0);
+ switch (ret) {
+ case ENOTSUP:
+ assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL);
+ op = WORKQ_THREADREQ_WORKLOOP;
+ goto again;
+
+ case ECANCELED:
+ case EINVAL:
+ /*
+ * Process is shutting down or exec'ing.
+ * All the kqueues are going to be cleaned up
+ * soon. Forget we even asked for a thread -
+ * and make sure we don't ask for more.
+ */
+ kqueue_release((struct kqueue *)kqwl, KQUEUE_CANT_BE_LAST_REF);
+ kqr->kqr_state &= ~KQR_THREQUESTED;
+ kqr->kqr_state |= KQR_DRAIN;
+ break;
+
+ case EAGAIN:
+ assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL);
+ act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
+ break;
+
+ default:
+ assert(ret == 0);
+ }
+}
+
+static void
+kqworkloop_threadreq_modify(struct kqworkloop *kqwl, kq_index_t qos_index)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ unsigned long pri = pthread_priority_for_kqrequest(kqr, qos_index);
+ int ret, op = WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL;
+
+ assert((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED);
+
+ if (current_proc() == kqwl->kqwl_kqueue.kq_p) {
+ op = WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL;
+ } else {
+ op = WORKQ_THREADREQ_CHANGE_PRI;
+ }
+again:
+ ret = (*pthread_functions->workq_threadreq_modify)(kqwl->kqwl_p,
+ &kqr->kqr_req, op, pri, 0);
+ switch (ret) {
+ case ENOTSUP:
+ assert(op == WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL);
+ op = WORKQ_THREADREQ_CHANGE_PRI;
+ goto again;
+
+ case EAGAIN:
+ assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL);
+ act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
+ break;
+
+ case ECANCELED:
+ case EINVAL:
+ case 0:
+ break;
+
+ default:
+ assert(ret == 0);
+ }
+}
+
+/*
+ * Interact with the pthread kext to request a servicing thread.
+ * This will request a single thread at the highest QoS level
+ * for which there is work (whether that was the requested QoS
+ * for an event or an override applied to a lower-QoS request).
+ *
+ * - Caller holds the workloop request lock
+ *
+ * - May be called with the kqueue's wait queue set locked,
+ * so cannot do anything that could recurse on that.
+ */
+static void
+kqworkloop_request_thread(struct kqworkloop *kqwl, kq_index_t qos_index)
+{
+ struct kqrequest *kqr;
+
+ assert(kqwl->kqwl_state & KQ_WORKLOOP);
+
+ kqr = &kqwl->kqwl_request;
+
+ assert(kqwl->kqwl_owner == THREAD_NULL);
+ assert((kqr->kqr_state & KQR_BOUND) == 0);
+ assert((kqr->kqr_state & KQR_THREQUESTED) == 0);
+ assert(!(kqwl->kqwl_kqueue.kq_state & KQ_NO_WQ_THREAD));
+
+ /* If we're draining thread requests, just bail */
+ if (kqr->kqr_state & KQR_DRAIN)
+ return;
+
+ if (pthread_functions != NULL &&
+ pthread_functions->workq_threadreq != NULL) {
+ /*
+ * set request state flags, etc... before calling pthread
+ * This assures they are set before a possible synchronous
+ * callback to workloop_fulfill_threadreq().
+ */
+ kqr->kqr_state |= KQR_THREQUESTED;
+
+ /* Add a thread request reference on the kqueue. */
+ kqueue_retain((struct kqueue *)kqwl);
+
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
+ kqwl->kqwl_dynamicid,
+ 0, qos_index, kqr->kqr_state);
+ kqworkloop_threadreq_impl(kqwl, qos_index);
+ } else {
+ panic("kqworkloop_request_thread");
+ return;
+ }
+}
+
+static void
+kqworkloop_update_sync_override_state(struct kqworkloop *kqwl, boolean_t sync_ipc_override)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ kqwl_req_lock(kqwl);
+ kqr->kqr_has_sync_override = sync_ipc_override;
+ kqwl_req_unlock(kqwl);
+
+}
+
+static inline kq_index_t
+kqworkloop_combined_qos(struct kqworkloop *kqwl, boolean_t *ipc_override_is_sync)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ kq_index_t override;
+
+ *ipc_override_is_sync = FALSE;
+ override = MAX(MAX(kqr->kqr_qos_index, kqr->kqr_override_index),
+ kqr->kqr_dsync_waiters_qos);
+
+ if (kqr->kqr_sync_suppress_count > 0 || kqr->kqr_has_sync_override) {
+ *ipc_override_is_sync = TRUE;
+ override = THREAD_QOS_USER_INTERACTIVE;
+ }
+ return override;
+}
+
+static inline void
+kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+
+ kqwl_req_held(kqwl);
+
+ if (kqr->kqr_state & KQR_R2K_NOTIF_ARMED) {
+ assert(kqr->kqr_state & KQR_BOUND);
+ assert(kqr->kqr_thread);
+
+ kqr->kqr_state &= ~KQR_R2K_NOTIF_ARMED;
+ act_set_astkevent(kqr->kqr_thread, AST_KEVENT_RETURN_TO_KERNEL);
+ }
+}
+
+static void
+kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
+{
+ const uint8_t KQWL_STAYACTIVE_FIRED_BIT = (1 << 0);
+
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+ boolean_t old_ipc_override_is_sync = FALSE;
+ kq_index_t old_qos = kqworkloop_combined_qos(kqwl, &old_ipc_override_is_sync);
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+ bool static_thread = (kq->kq_state & KQ_NO_WQ_THREAD);
+ kq_index_t i;
+
+ /* must hold the kqr lock */
+ kqwl_req_held(kqwl);
+
+ switch (op) {
+ case KQWL_UTQ_UPDATE_WAKEUP_QOS:
+ if (qos == KQWL_BUCKET_STAYACTIVE) {
+ /*
+ * the KQWL_BUCKET_STAYACTIVE is not a QoS bucket, we only remember
+ * a high watermark (kqr_stayactive_qos) of any stay active knote
+ * that was ever registered with this workloop.
+ *
+ * When waitq_set__CALLING_PREPOST_HOOK__() wakes up any stay active
+ * knote, we use this high-watermark as a wakeup-index, and also set
+ * the magic KQWL_BUCKET_STAYACTIVE bit to make sure we remember
+ * there is at least one stay active knote fired until the next full
+ * processing of this bucket.
+ */
+ kqr->kqr_wakeup_indexes |= KQWL_STAYACTIVE_FIRED_BIT;
+ qos = kqr->kqr_stayactive_qos;
+ assert(qos);
+ assert(!static_thread);
+ }
+ if (kqr->kqr_wakeup_indexes & (1 << qos)) {
+ assert(kqr->kqr_state & KQR_WAKEUP);
+ break;
+ }
+
+ kqr->kqr_wakeup_indexes |= (1 << qos);
+ kqr->kqr_state |= KQR_WAKEUP;
+ kqworkloop_request_fire_r2k_notification(kqwl);
+ goto recompute_async;
+
+ case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
+ assert(qos);
+ if (kqr->kqr_stayactive_qos < qos) {
+ kqr->kqr_stayactive_qos = qos;
+ if (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT) {
+ assert(kqr->kqr_state & KQR_WAKEUP);
+ kqr->kqr_wakeup_indexes |= (1 << qos);
+ goto recompute_async;
+ }
+ }
+ break;
+
+ case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
+ kqlock_held(kq); // to look at kq_queues
+ kqr->kqr_has_sync_override = FALSE;
+ i = KQWL_BUCKET_STAYACTIVE;
+ if (TAILQ_EMPTY(&kqr->kqr_suppressed)) {
+ kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
+ }
+ if (!TAILQ_EMPTY(&kq->kq_queue[i]) &&
+ (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) {
+ /*
+ * If the KQWL_STAYACTIVE_FIRED_BIT is set, it means a stay active
+ * knote may have fired, so we need to merge in kqr_stayactive_qos.
+ *
+ * Unlike other buckets, this one is never empty but could be idle.
+ */
+ kqr->kqr_wakeup_indexes &= KQWL_STAYACTIVE_FIRED_BIT;
+ kqr->kqr_wakeup_indexes |= (1 << kqr->kqr_stayactive_qos);
+ } else {
+ kqr->kqr_wakeup_indexes = 0;
+ }
+ for (i = THREAD_QOS_UNSPECIFIED + 1; i < KQWL_BUCKET_STAYACTIVE; i++) {
+ if (!TAILQ_EMPTY(&kq->kq_queue[i])) {
+ kqr->kqr_wakeup_indexes |= (1 << i);
+ struct knote *kn = TAILQ_FIRST(&kqwl->kqwl_kqueue.kq_queue[i]);
+ if (i == THREAD_QOS_USER_INTERACTIVE &&
+ kn->kn_qos_override_is_sync) {
+ kqr->kqr_has_sync_override = TRUE;
+ }
+ }
+ }
+ if (kqr->kqr_wakeup_indexes) {
+ kqr->kqr_state |= KQR_WAKEUP;
+ kqworkloop_request_fire_r2k_notification(kqwl);
+ } else {
+ kqr->kqr_state &= ~KQR_WAKEUP;
+ }
+ assert(qos == THREAD_QOS_UNSPECIFIED);
+ goto recompute_async;
+
+ case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
+ kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
+ assert(qos == THREAD_QOS_UNSPECIFIED);
+ goto recompute_async;
+
+ case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
+ recompute_async:
+ /*
+ * When modifying the wakeup QoS or the async override QoS, we always
+ * need to maintain our invariant that kqr_override_index is at least as
+ * large as the highest QoS for which an event is fired.
+ *
+ * However this override index can be larger when there is an overriden
+ * suppressed knote pushing on the kqueue.
+ */
+ if (kqr->kqr_wakeup_indexes > (1 << qos)) {
+ qos = fls(kqr->kqr_wakeup_indexes) - 1; /* fls is 1-based */
+ }
+ if (kqr->kqr_override_index < qos) {
+ kqr->kqr_override_index = qos;
+ }
+ break;
+
+ case KQWL_UTQ_REDRIVE_EVENTS:
+ break;
+
+ case KQWL_UTQ_SET_ASYNC_QOS:
+ filt_wlheld(kqwl);
+ kqr->kqr_qos_index = qos;
+ break;
+
+ case KQWL_UTQ_SET_SYNC_WAITERS_QOS:
+ filt_wlheld(kqwl);
+ kqr->kqr_dsync_waiters_qos = qos;
+ break;
+
+ default:
+ panic("unknown kqwl thread qos update operation: %d", op);
+ }
+
+ boolean_t new_ipc_override_is_sync = FALSE;
+ kq_index_t new_qos = kqworkloop_combined_qos(kqwl, &new_ipc_override_is_sync);
+ thread_t kqwl_owner = kqwl->kqwl_owner;
+ thread_t servicer = kqr->kqr_thread;
+ __assert_only int ret;
+
+ /*
+ * Apply the diffs to the owner if applicable
+ */
+ if (filt_wlowner_is_valid(kqwl_owner)) {
+#if 0
+ /* JMM - need new trace hooks for owner overrides */
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
+ kqwl->kqwl_dynamicid,
+ (kqr->kqr_state & KQR_BOUND) ? thread_tid(kqwl_owner) : 0,
+ (kqr->kqr_qos_index << 8) | new_qos,
+ (kqr->kqr_override_index << 8) | kqr->kqr_state);
+#endif
+ if (new_qos == kqr->kqr_dsync_owner_qos) {
+ // nothing to do
+ } else if (kqr->kqr_dsync_owner_qos == THREAD_QOS_UNSPECIFIED) {
+ thread_add_ipc_override(kqwl_owner, new_qos);
+ } else if (new_qos == THREAD_QOS_UNSPECIFIED) {
+ thread_drop_ipc_override(kqwl_owner);
+ } else /* kqr->kqr_dsync_owner_qos != new_qos */ {
+ thread_update_ipc_override(kqwl_owner, new_qos);
+ }
+ kqr->kqr_dsync_owner_qos = new_qos;
+
+ if (new_ipc_override_is_sync &&
+ !kqr->kqr_owner_override_is_sync) {
+ thread_add_sync_ipc_override(kqwl_owner);
+ } else if (!new_ipc_override_is_sync &&
+ kqr->kqr_owner_override_is_sync) {
+ thread_drop_sync_ipc_override(kqwl_owner);
+ }
+ kqr->kqr_owner_override_is_sync = new_ipc_override_is_sync;
+ }
+
+ /*
+ * apply the diffs to the servicer
+ */
+ if (static_thread) {
+ /*
+ * Statically bound thread
+ *
+ * These threads don't participates in QoS overrides today, just wakeup
+ * the thread blocked on this kqueue if a new event arrived.
+ */
+
+ switch (op) {
+ case KQWL_UTQ_UPDATE_WAKEUP_QOS:
+ case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
+ case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
+ break;
+
+ case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
+ case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
+ case KQWL_UTQ_REDRIVE_EVENTS:
+ case KQWL_UTQ_SET_ASYNC_QOS:
+ case KQWL_UTQ_SET_SYNC_WAITERS_QOS:
+ panic("should never be called");
+ break;
+ }
+
+ kqlock_held(kq);
+
+ if ((kqr->kqr_state & KQR_BOUND) && (kqr->kqr_state & KQR_WAKEUP)) {
+ assert(servicer && !is_workqueue_thread(servicer));
+ if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
+ kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
+ waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, KQ_EVENT,
+ THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
+ }
+ }
+ } else if ((kqr->kqr_state & KQR_THREQUESTED) == 0) {
+ /*
+ * No servicer, nor thread-request
+ *
+ * Make a new thread request, unless there is an owner (or the workloop
+ * is suspended in userland) or if there is no asynchronous work in the
+ * first place.
+ */
+
+ if (kqwl_owner == THREAD_NULL && (kqr->kqr_state & KQR_WAKEUP)) {
+ kqworkloop_request_thread(kqwl, new_qos);
+ }
+ } else if ((kqr->kqr_state & KQR_BOUND) == 0 &&
+ (kqwl_owner || (kqr->kqr_state & KQR_WAKEUP) == 0)) {
+ /*
+ * No servicer, thread request in flight we want to cancel
+ *
+ * We just got rid of the last knote of the kqueue or noticed an owner
+ * with a thread request still in flight, take it back.
+ */
+ ret = (*pthread_functions->workq_threadreq_modify)(kqwl->kqwl_p,
+ &kqr->kqr_req, WORKQ_THREADREQ_CANCEL, 0, 0);
+ if (ret == 0) {
+ kqr->kqr_state &= ~KQR_THREQUESTED;
+ kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
+ }
+ } else {
+ boolean_t qos_changed = FALSE;
+
+ /*
+ * Servicer or request is in flight
+ *
+ * Just apply the diff to the servicer or the thread request
+ */
+ if (kqr->kqr_state & KQR_BOUND) {
+ servicer = kqr->kqr_thread;
+ struct uthread *ut = get_bsdthread_info(servicer);
+ if (ut->uu_kqueue_qos_index != new_qos) {
+ if (ut->uu_kqueue_qos_index == THREAD_QOS_UNSPECIFIED) {
+ thread_add_ipc_override(servicer, new_qos);
+ } else if (new_qos == THREAD_QOS_UNSPECIFIED) {
+ thread_drop_ipc_override(servicer);
+ } else /* ut->uu_kqueue_qos_index != new_qos */ {
+ thread_update_ipc_override(servicer, new_qos);
+ }
+ ut->uu_kqueue_qos_index = new_qos;
+ qos_changed = TRUE;
+ }
+
+ if (new_ipc_override_is_sync != ut->uu_kqueue_override_is_sync) {
+ if (new_ipc_override_is_sync &&
+ !ut->uu_kqueue_override_is_sync) {
+ thread_add_sync_ipc_override(servicer);
+ } else if (!new_ipc_override_is_sync &&
+ ut->uu_kqueue_override_is_sync) {
+ thread_drop_sync_ipc_override(servicer);
+ }
+ ut->uu_kqueue_override_is_sync = new_ipc_override_is_sync;
+ qos_changed = TRUE;
+ }
+ } else if (old_qos != new_qos) {
+ assert(new_qos);
+ kqworkloop_threadreq_modify(kqwl, new_qos);
+ qos_changed = TRUE;
+ }
+ if (qos_changed) {
+ servicer = kqr->kqr_thread;
+ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
+ kqwl->kqwl_dynamicid,
+ (kqr->kqr_state & KQR_BOUND) ? thread_tid(servicer) : 0,
+ (kqr->kqr_qos_index << 16) | (new_qos << 8) | new_ipc_override_is_sync,
+ (kqr->kqr_override_index << 8) | kqr->kqr_state);
+ }
+ }
+}
+
+static void
+kqworkloop_request_help(struct kqworkloop *kqwl, kq_index_t qos_index)
+{
+ /* convert to thread qos value */
+ assert(qos_index < KQWL_NBUCKETS);
+
+ kqwl_req_lock(kqwl);
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos_index);
+ kqwl_req_unlock(kqwl);
+}
+
+/*
+ * These arrays described the low and high qindexes for a given qos_index.
+ * The values come from the chart in <sys/eventvar.h> (must stay in sync).
+ */
+static kq_index_t _kqwq_base_index[KQWQ_NQOS] = {0, 0, 6, 11, 15, 18, 20, 21};
+static kq_index_t _kqwq_high_index[KQWQ_NQOS] = {0, 5, 10, 14, 17, 19, 20, 21};
+
+static struct kqtailq *
+kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index)
+{
+ if (kq->kq_state & KQ_WORKQ) {
+ assert(qos_index < KQWQ_NQOS);
+ return &kq->kq_queue[_kqwq_base_index[qos_index]];
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ assert(qos_index < KQWL_NBUCKETS);
+ return &kq->kq_queue[qos_index];
+ } else {
+ assert(qos_index == QOS_INDEX_KQFILE);
+ return &kq->kq_queue[QOS_INDEX_KQFILE];
+ }
+}
+
+static struct kqtailq *
+kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index)
+{
+ if (kq->kq_state & KQ_WORKQ) {
+ assert(qos_index < KQWQ_NQOS);
+ return &kq->kq_queue[_kqwq_high_index[qos_index]];
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ assert(qos_index < KQWL_NBUCKETS);
+ return &kq->kq_queue[KQWL_BUCKET_STAYACTIVE];
+ } else {
+ assert(qos_index == QOS_INDEX_KQFILE);
+ return &kq->kq_queue[QOS_INDEX_KQFILE];
+ }
+}
+
+static int
+kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index)
+{
+ struct kqtailq *base_queue = kqueue_get_base_queue(kq, qos_index);
+ struct kqtailq *queue = kqueue_get_high_queue(kq, qos_index);
+
+ do {
+ if (!TAILQ_EMPTY(queue))
+ return 0;
+ } while (queue-- > base_queue);
+ return 1;
+}
+
+static struct kqtailq *
+kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index)
+{
+ struct kqtailq *res;
+ struct kqrequest *kqr;
+
+ if (kq->kq_state & KQ_WORKQ) {
+ struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+ kqr = kqworkq_get_request(kqwq, qos_index);
+ res = &kqr->kqr_suppressed;
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ kqr = &kqwl->kqwl_request;
+ res = &kqr->kqr_suppressed;
+ } else {
+ struct kqfile *kqf = (struct kqfile *)kq;
+ res = &kqf->kqf_suppressed;
+ }
+ return res;
+}
+
+static kq_index_t
+knote_get_queue_index(struct knote *kn)
+{
+ kq_index_t override_index = knote_get_qos_override_index(kn);
+ kq_index_t qos_index = knote_get_qos_index(kn);
+ struct kqueue *kq = knote_get_kq(kn);
+ kq_index_t res;
+
+ if (kq->kq_state & KQ_WORKQ) {
+ res = _kqwq_base_index[qos_index];
+ if (override_index > qos_index)
+ res += override_index - qos_index;
+ assert(res <= _kqwq_high_index[qos_index]);
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ res = MAX(override_index, qos_index);
+ assert(res < KQWL_NBUCKETS);
+ } else {
+ assert(qos_index == QOS_INDEX_KQFILE);
+ assert(override_index == QOS_INDEX_KQFILE);
+ res = QOS_INDEX_KQFILE;
+ }
+ return res;
+}
+
+static struct kqtailq *
+knote_get_queue(struct knote *kn)
+{
+ kq_index_t qindex = knote_get_queue_index(kn);
+
+ return &(knote_get_kq(kn))->kq_queue[qindex];
+}
+
+static kq_index_t
+knote_get_req_index(struct knote *kn)
+{
+ return kn->kn_req_index;
+}
+
+static kq_index_t
+knote_get_qos_index(struct knote *kn)
+{
+ return kn->kn_qos_index;
+}
+
+static void
+knote_set_qos_index(struct knote *kn, kq_index_t qos_index)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+
+ assert(qos_index < KQWQ_NQOS);
+ assert((kn->kn_status & KN_QUEUED) == 0);
+
+ if (kq->kq_state & KQ_WORKQ) {
+ assert(qos_index > THREAD_QOS_UNSPECIFIED);
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ /* XXX this policy decision shouldn't be here */
+ if (qos_index == THREAD_QOS_UNSPECIFIED)
+ qos_index = THREAD_QOS_LEGACY;
+ } else
+ qos_index = QOS_INDEX_KQFILE;
+
+ /* always set requested */
+ kn->kn_req_index = qos_index;
+
+ /* only adjust in-use qos index when not suppressed */
+ if ((kn->kn_status & KN_SUPPRESSED) == 0)
+ kn->kn_qos_index = qos_index;
+}
+
+static void
+knote_set_qos_overcommit(struct knote *kn)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ struct kqrequest *kqr;
+
+ /* turn overcommit on for the appropriate thread request? */
+ if (kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
+ if (kq->kq_state & KQ_WORKQ) {
+ kq_index_t qos_index = knote_get_qos_index(kn);
+ struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+ kqr = kqworkq_get_request(kqwq, qos_index);
+
+ kqwq_req_lock(kqwq);
+ kqr->kqr_state |= KQR_THOVERCOMMIT;
+ kqwq_req_unlock(kqwq);
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ kqr = &kqwl->kqwl_request;
+
+ kqwl_req_lock(kqwl);
+ kqr->kqr_state |= KQR_THOVERCOMMIT;
+ kqwl_req_unlock(kqwl);
+ }
+ }
+}
+
+static kq_index_t
+knote_get_qos_override_index(struct knote *kn)
+{
+ return kn->kn_qos_override;
+}
+
+static void
+knote_set_qos_override_index(struct knote *kn, kq_index_t override_index,
+ boolean_t override_is_sync)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ kq_index_t qos_index = knote_get_qos_index(kn);
+ kq_index_t old_override_index = knote_get_qos_override_index(kn);
+ boolean_t old_override_is_sync = kn->kn_qos_override_is_sync;
+ uint32_t flags = 0;
+
+ assert((kn->kn_status & KN_QUEUED) == 0);
+
+ if (override_index == KQWQ_QOS_MANAGER) {
+ assert(qos_index == KQWQ_QOS_MANAGER);
+ } else {
+ assert(override_index < KQWQ_QOS_MANAGER);
+ }
+
+ kn->kn_qos_override = override_index;
+ kn->kn_qos_override_is_sync = override_is_sync;
+
+ /*
+ * If this is a workq/workloop kqueue, apply the override to the
+ * servicing thread.
+ */
+ if (kq->kq_state & KQ_WORKQ) {
+ struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+ assert(qos_index > THREAD_QOS_UNSPECIFIED);
+ kqworkq_update_override(kqwq, qos_index, override_index);
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ if ((kn->kn_status & KN_SUPPRESSED) == KN_SUPPRESSED) {
+ flags = flags | KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS;
+
+ if (override_index == THREAD_QOS_USER_INTERACTIVE
+ && override_is_sync) {
+ flags = flags | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI;
+ }
+
+ if (old_override_index == THREAD_QOS_USER_INTERACTIVE
+ && old_override_is_sync) {
+ flags = flags | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI;
+ }
+ }
+
+ assert(qos_index > THREAD_QOS_UNSPECIFIED);
+ kqworkloop_update_override(kqwl, qos_index, override_index, flags);
+ }
+}
+
+static kq_index_t
+knote_get_sync_qos_override_index(struct knote *kn)
+{
+ return kn->kn_qos_sync_override;
+}
+
+static void
+kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index)
+{
+ struct kqrequest *kqr;
+ kq_index_t old_override_index;
+
+ if (override_index <= qos_index) {
+ return;
+ }
+
+ kqr = kqworkq_get_request(kqwq, qos_index);
+
+ kqwq_req_lock(kqwq);
+ old_override_index = kqr->kqr_override_index;
+ if (override_index > MAX(kqr->kqr_qos_index, old_override_index)) {
+ kqr->kqr_override_index = override_index;
+
+ /* apply the override to [incoming?] servicing thread */
+ if (kqr->kqr_state & KQR_BOUND) {
+ thread_t wqthread = kqr->kqr_thread;
+
+ /* only apply if non-manager */
+ assert(wqthread);
+ if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+ if (old_override_index)
+ thread_update_ipc_override(wqthread, override_index);
+ else
+ thread_add_ipc_override(wqthread, override_index);
+ }
+ }
+ }
+ kqwq_req_unlock(kqwq);
+}
+
+/* called with the kqworkq lock held */
+static void
+kqworkq_bind_thread_impl(
+ struct kqworkq *kqwq,
+ kq_index_t qos_index,
+ thread_t thread,
+ unsigned int flags)
+{
+ /* request lock must be held */
+ kqwq_req_held(kqwq);
+
+ struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
+ assert(kqr->kqr_state & KQR_THREQUESTED);
+
+ if (qos_index == KQWQ_QOS_MANAGER)
+ flags |= KEVENT_FLAG_WORKQ_MANAGER;
+
+ struct uthread *ut = get_bsdthread_info(thread);
+
+ /*
+ * If this is a manager, and the manager request bit is
+ * not set, assure no other thread is bound. If the bit
+ * is set, make sure the old thread is us (or not set).
+ */
+ if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+ if ((kqr->kqr_state & KQR_BOUND) == 0) {
+ kqr->kqr_state |= (KQR_BOUND | KQWQ_THMANAGER);
+ TAILQ_INIT(&kqr->kqr_suppressed);
+ kqr->kqr_thread = thread;
+ ut->uu_kqueue_bound = (struct kqueue *)kqwq;
+ ut->uu_kqueue_qos_index = KQWQ_QOS_MANAGER;
+ ut->uu_kqueue_flags = (KEVENT_FLAG_WORKQ |
+ KEVENT_FLAG_WORKQ_MANAGER);
+ } else {
+ assert(kqr->kqr_state & KQR_BOUND);
+ assert(thread == kqr->kqr_thread);
+ assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq);
+ assert(ut->uu_kqueue_qos_index == KQWQ_QOS_MANAGER);
+ assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
+ }
+ return;
+ }
+
+ /* Just a normal one-queue servicing thread */
+ assert(kqr->kqr_state & KQR_THREQUESTED);
+ assert(kqr->kqr_qos_index == qos_index);
+
+ if ((kqr->kqr_state & KQR_BOUND) == 0) {
+ kqr->kqr_state |= KQR_BOUND;
+ TAILQ_INIT(&kqr->kqr_suppressed);
+ kqr->kqr_thread = thread;
+
+ /* apply an ipc QoS override if one is needed */
+ if (kqr->kqr_override_index) {
+ assert(kqr->kqr_qos_index);
+ assert(kqr->kqr_override_index > kqr->kqr_qos_index);
+ assert(thread_get_ipc_override(thread) == THREAD_QOS_UNSPECIFIED);
+ thread_add_ipc_override(thread, kqr->kqr_override_index);
+ }
+
+ /* indicate that we are processing in the uthread */
+ ut->uu_kqueue_bound = (struct kqueue *)kqwq;
+ ut->uu_kqueue_qos_index = qos_index;
+ ut->uu_kqueue_flags = flags;
+ } else {
+ /*
+ * probably syncronously bound AND post-request bound
+ * this logic can go away when we get rid of post-request bind
+ */
+ assert(kqr->kqr_state & KQR_BOUND);
+ assert(thread == kqr->kqr_thread);
+ assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq);
+ assert(ut->uu_kqueue_qos_index == qos_index);
+ assert((ut->uu_kqueue_flags & flags) == flags);
+ }
+}
+
+static void
+kqworkloop_update_override(
+ struct kqworkloop *kqwl,
+ kq_index_t qos_index,
+ kq_index_t override_index,
+ uint32_t flags)
+{
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+
+ kqwl_req_lock(kqwl);
+
+ /* Do not override on attached threads */
+ if (kqr->kqr_state & KQR_BOUND) {
+ assert(kqr->kqr_thread);
+
+ if (kqwl->kqwl_kqueue.kq_state & KQ_NO_WQ_THREAD) {
+ kqwl_req_unlock(kqwl);
+ assert(!is_workqueue_thread(kqr->kqr_thread));
+ return;
+ }
+ }
+
+ /* Update sync ipc counts on kqr for suppressed knotes */
+ if (flags & KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS) {
+ kqworkloop_update_suppress_sync_count(kqr, flags);
+ }
+
+ if ((flags & KQWL_UO_UPDATE_OVERRIDE_LAZY) == 0) {
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
+ MAX(qos_index, override_index));
+ }
+ kqwl_req_unlock(kqwl);
+}
+
+static void
+kqworkloop_update_suppress_sync_count(
+ struct kqrequest *kqr,
+ uint32_t flags)
+{
+ if (flags & KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI) {
+ kqr->kqr_sync_suppress_count++;
+ }
+
+ if (flags & KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI) {
+ assert(kqr->kqr_sync_suppress_count > 0);
+ kqr->kqr_sync_suppress_count--;
+ }
+}
+
+/*
+ * kqworkloop_unbind_thread - Unbind the servicer thread of a workloop kqueue
+ *
+ * It will end the processing phase in case it was still processing:
+ *
+ * We may have to request a new thread for not KQ_NO_WQ_THREAD workloop.
+ * This can happen if :
+ * - there were active events at or above our QoS we never got to (count > 0)
+ * - we pended waitq hook callouts during processing
+ * - we pended wakeups while processing (or unsuppressing)
+ *
+ * Called with kqueue lock held.
+ */
+
+static void
+kqworkloop_unbind_thread(
+ struct kqworkloop *kqwl,
+ thread_t thread,
+ __unused unsigned int flags)
+{
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+ struct kqrequest *kqr = &kqwl->kqwl_request;
+
+ kqlock_held(kq);
+
+ assert((kq->kq_state & KQ_PROCESSING) == 0);
+ if (kq->kq_state & KQ_PROCESSING) {
+ return;
+ }
+
+ /*
+ * Forcing the KQ_PROCESSING flag allows for QoS updates because of
+ * unsuppressing knotes not to be applied until the eventual call to
+ * kqworkloop_update_threads_qos() below.
+ */
+ kq->kq_state |= KQ_PROCESSING;
+ kqworkloop_acknowledge_events(kqwl, TRUE);
+ kq->kq_state &= ~KQ_PROCESSING;
+
+ kqwl_req_lock(kqwl);
+
+ /* deal with extraneous unbinds in release kernels */
+ assert((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) == KQR_BOUND);
+ if ((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) != KQR_BOUND) {
+ kqwl_req_unlock(kqwl);
+ return;
+ }
+
+ assert(thread == current_thread());
+ assert(kqr->kqr_thread == thread);
+ if (kqr->kqr_thread != thread) {
+ kqwl_req_unlock(kqwl);
+ return;
+ }
+
+ struct uthread *ut = get_bsdthread_info(thread);
+ kq_index_t old_qos_index = ut->uu_kqueue_qos_index;
+ boolean_t ipc_override_is_sync = ut->uu_kqueue_override_is_sync;
+ ut->uu_kqueue_bound = NULL;
+ ut->uu_kqueue_qos_index = 0;
+ ut->uu_kqueue_override_is_sync = 0;
+ ut->uu_kqueue_flags = 0;
+
+ /* unbind the servicer thread, drop overrides */
+ kqr->kqr_thread = NULL;
+ kqr->kqr_state &= ~(KQR_BOUND | KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED);
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
+
+ kqwl_req_unlock(kqwl);
+
+ /*
+ * Drop the override on the current thread last, after the call to
+ * kqworkloop_update_threads_qos above.
+ */
+ if (old_qos_index) {
+ thread_drop_ipc_override(thread);
+ }
+ if (ipc_override_is_sync) {
+ thread_drop_sync_ipc_override(thread);
+ }
+}
+
+/* called with the kqworkq lock held */
+static void
+kqworkq_unbind_thread(
+ struct kqworkq *kqwq,
+ kq_index_t qos_index,
+ thread_t thread,
+ __unused unsigned int flags)
+{
+ struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
+ kq_index_t override_index = 0;
+
+ /* request lock must be held */
+ kqwq_req_held(kqwq);
+
+ assert(thread == current_thread());
+
+ if ((kqr->kqr_state & KQR_BOUND) == 0) {
+ assert(kqr->kqr_state & KQR_BOUND);
+ return;
+ }
+
+ assert(kqr->kqr_thread == thread);
+ assert(TAILQ_EMPTY(&kqr->kqr_suppressed));
+
+ /*
+ * If there is an override, drop it from the current thread
+ * and then we are free to recompute (a potentially lower)
+ * minimum override to apply to the next thread request.
+ */
+ if (kqr->kqr_override_index) {
+ struct kqtailq *base_queue = kqueue_get_base_queue(&kqwq->kqwq_kqueue, qos_index);
+ struct kqtailq *queue = kqueue_get_high_queue(&kqwq->kqwq_kqueue, qos_index);
+
+ /* if not bound to a manager thread, drop the current ipc override */
+ if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
+ thread_drop_ipc_override(thread);
+ }
+
+ /* recompute the new override */
+ do {
+ if (!TAILQ_EMPTY(queue)) {
+ override_index = queue - base_queue + qos_index;
+ break;
+ }
+ } while (queue-- > base_queue);
+ }
+
+ /* Mark it unbound */
+ kqr->kqr_thread = NULL;
+ kqr->kqr_state &= ~(KQR_BOUND | KQR_THREQUESTED | KQWQ_THMANAGER);
+
+ /* apply the new override */
+ if (override_index > kqr->kqr_qos_index) {
+ kqr->kqr_override_index = override_index;
+ } else {
+ kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
+ }
+}
+
+struct kqrequest *
+kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
+{
+ assert(qos_index < KQWQ_NQOS);
+ return &kqwq->kqwq_request[qos_index];
+}
+
+void
+knote_adjust_qos(struct knote *kn, qos_t new_qos, qos_t new_override, kq_index_t sync_override_index)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ boolean_t override_is_sync = FALSE;
+
+ if (kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) {
+ kq_index_t new_qos_index;
+ kq_index_t new_override_index;
+ kq_index_t servicer_qos_index;
+
+ new_qos_index = qos_index_from_qos(kn, new_qos, FALSE);
+ new_override_index = qos_index_from_qos(kn, new_override, TRUE);
+
+ /* make sure the servicer qos acts as a floor */
+ servicer_qos_index = qos_index_from_qos(kn, kn->kn_qos, FALSE);
+ if (servicer_qos_index > new_qos_index)
+ new_qos_index = servicer_qos_index;
+ if (servicer_qos_index > new_override_index)
+ new_override_index = servicer_qos_index;
+ if (sync_override_index >= new_override_index) {
+ new_override_index = sync_override_index;
+ override_is_sync = TRUE;
+ }
+
+ kqlock(kq);
+ if (new_qos_index != knote_get_req_index(kn) ||
+ new_override_index != knote_get_qos_override_index(kn) ||
+ override_is_sync != kn->kn_qos_override_is_sync) {
+ if (kn->kn_status & KN_QUEUED) {
+ knote_dequeue(kn);
+ knote_set_qos_index(kn, new_qos_index);
+ knote_set_qos_override_index(kn, new_override_index, override_is_sync);
+ knote_enqueue(kn);
+ knote_wakeup(kn);
+ } else {
+ knote_set_qos_index(kn, new_qos_index);
+ knote_set_qos_override_index(kn, new_override_index, override_is_sync);
+ }
+ }
+ kqunlock(kq);
+ }
+}
+
+void
+knote_adjust_sync_qos(struct knote *kn, kq_index_t sync_qos, boolean_t lock_kq)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ kq_index_t old_sync_override;
+ kq_index_t qos_index = knote_get_qos_index(kn);
+ uint32_t flags = 0;
+
+ /* Tracking only happens for UI qos */
+ if (sync_qos != THREAD_QOS_USER_INTERACTIVE &&
+ sync_qos != THREAD_QOS_UNSPECIFIED) {
+ return;
+ }
+
+ if (lock_kq)
+ kqlock(kq);
+
+ if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ old_sync_override = knote_get_sync_qos_override_index(kn);
+ if (old_sync_override != sync_qos) {
+ kn->kn_qos_sync_override = sync_qos;
+
+ /* update sync ipc counters for suppressed knotes */
+ if ((kn->kn_status & KN_SUPPRESSED) == KN_SUPPRESSED) {
+ flags = flags | KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS;
+
+ /* Do not recalculate kqwl override, it would be done later */
+ flags = flags | KQWL_UO_UPDATE_OVERRIDE_LAZY;
+
+ if (sync_qos == THREAD_QOS_USER_INTERACTIVE) {
+ flags = flags | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI;
+ }
+
+ if (old_sync_override == THREAD_QOS_USER_INTERACTIVE) {
+ flags = flags | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI;
+ }
+
+ kqworkloop_update_override(kqwl, qos_index, sync_qos,
+ flags);
+ }
+
+ }
+ }
+ if (lock_kq)
+ kqunlock(kq);
+}
+
+static void
+knote_wakeup(struct knote *kn)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ kq_index_t qos_index = knote_get_qos_index(kn);
+
+ kqlock_held(kq);
+
+ if (kq->kq_state & KQ_WORKQ) {
+ /* request a servicing thread */
+ struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+ kqworkq_request_help(kqwq, qos_index);
+
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ /* request a servicing thread */
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ if (kqworkloop_is_processing_on_current_thread(kqwl)) {
+ /*
+ * kqworkloop_end_processing() will perform the required QoS
+ * computations when it unsets the processing mode.
+ */
+ return;
+ }
+ kqworkloop_request_help(kqwl, qos_index);
+ } else {
+ struct kqfile *kqf = (struct kqfile *)kq;
+
+ /* flag wakeups during processing */
+ if (kq->kq_state & KQ_PROCESSING)
+ kq->kq_state |= KQ_WAKEUP;
+
+ /* wakeup a thread waiting on this queue */
+ if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
+ kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
+ waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+ KQ_EVENT,
+ THREAD_AWAKENED,
+ WAITQ_ALL_PRIORITIES);
+ }
+
+ /* wakeup other kqueues/select sets we're inside */
+ KNOTE(&kqf->kqf_sel.si_note, 0);
+ }
+}
+
+/*
+ * Called with the kqueue locked
+ */
+static void
+kqueue_interrupt(struct kqueue *kq)
+{
+ assert((kq->kq_state & KQ_WORKQ) == 0);
+
+ /* wakeup sleeping threads */
+ if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) {
+ kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
+ (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+ KQ_EVENT,
+ THREAD_RESTART,
+ WAITQ_ALL_PRIORITIES);
+ }
+
+ /* wakeup threads waiting their turn to process */
+ if (kq->kq_state & KQ_PROCWAIT) {
+ struct kqtailq *suppressq;
+
+ assert(kq->kq_state & KQ_PROCESSING);
+
+ kq->kq_state &= ~KQ_PROCWAIT;
+ suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
+ (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
+ CAST_EVENT64_T(suppressq),
+ THREAD_RESTART,
+ WAITQ_ALL_PRIORITIES);
+ }
+}
+
+/*
+ * Called back from waitq code when no threads waiting and the hook was set.
+ *
+ * Interrupts are likely disabled and spin locks are held - minimal work
+ * can be done in this context!!!
+ *
+ * JMM - in the future, this will try to determine which knotes match the
+ * wait queue wakeup and apply these wakeups against those knotes themselves.
+ * For now, all the events dispatched this way are dispatch-manager handled,
+ * so hard-code that for now.
+ */
+void
+waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos)
+{
+#pragma unused(knote_hook, qos)
+
+ struct kqueue *kq = (struct kqueue *)kq_hook;
+
+ if (kq->kq_state & KQ_WORKQ) {
+ struct kqworkq *kqwq = (struct kqworkq *)kq;
+
+ kqworkq_request_help(kqwq, KQWQ_QOS_MANAGER);
+
+ } else if (kq->kq_state & KQ_WORKLOOP) {
+ struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+
+ kqworkloop_request_help(kqwl, KQWL_BUCKET_STAYACTIVE);
+ }
+}
+
+void
+klist_init(struct klist *list)
+{
+ SLIST_INIT(list);
+}
+
+
+/*
+ * Query/Post each knote in the object's list
+ *
+ * The object lock protects the list. It is assumed
+ * that the filter/event routine for the object can
+ * determine that the object is already locked (via
+ * the hint) and not deadlock itself.
+ *
+ * The object lock should also hold off pending
+ * detach/drop operations. But we'll prevent it here
+ * too (by taking a use reference) - just in case.
+ */
+void
+knote(struct klist *list, long hint)
+{
+ struct knote *kn;
+
+ SLIST_FOREACH(kn, list, kn_selnext) {
+ struct kqueue *kq = knote_get_kq(kn);
+
+ kqlock(kq);
+
+ assert(!knoteuse_needs_boost(kn, NULL));
+
+ /* If we can get a use reference - deliver event */
+ if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
+ int result;
+
+ /* call the event with only a use count */
+ result = knote_fops(kn)->f_event(kn, hint);
+
+ /* if its not going away and triggered */
+ if (knoteuse2kqlock(kq, kn, KNUSE_NONE) && result)
+ knote_activate(kn);
+ /* kq lock held */
+ }
+ kqunlock(kq);
+ }
+}
+
+/*
+ * attach a knote to the specified list. Return true if this is the first entry.
+ * The list is protected by whatever lock the object it is associated with uses.
+ */
+int
+knote_attach(struct klist *list, struct knote *kn)
+{
+ int ret = SLIST_EMPTY(list);
+ SLIST_INSERT_HEAD(list, kn, kn_selnext);
+ return (ret);
+}
+
+/*
+ * detach a knote from the specified list. Return true if that was the last entry.
+ * The list is protected by whatever lock the object it is associated with uses.
+ */
+int
+knote_detach(struct klist *list, struct knote *kn)
+{
+ SLIST_REMOVE(list, kn, knote, kn_selnext);
+ return (SLIST_EMPTY(list));
+}
+
+/*
+ * knote_vanish - Indicate that the source has vanished
+ *
+ * If the knote has requested EV_VANISHED delivery,
+ * arrange for that. Otherwise, deliver a NOTE_REVOKE
+ * event for backward compatibility.
+ *
+ * The knote is marked as having vanished, but is not
+ * actually detached from the source in this instance.
+ * The actual detach is deferred until the knote drop.
+ *
+ * Our caller already has the object lock held. Calling
+ * the detach routine would try to take that lock
+ * recursively - which likely is not supported.
+ */
+void
+knote_vanish(struct klist *list)
+{
+ struct knote *kn;
+ struct knote *kn_next;
+
+ SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
+ struct kqueue *kq = knote_get_kq(kn);
+ int result;
+
+ kqlock(kq);
+
+ assert(!knoteuse_needs_boost(kn, NULL));
+
+ if ((kn->kn_status & KN_DROPPING) == 0) {
+ /* If EV_VANISH supported - prepare to deliver one */
+ if (kn->kn_status & KN_REQVANISH) {
+ kn->kn_status |= KN_VANISHED;
+ knote_activate(kn);
+
+ } else if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
+ /* call the event with only a use count */
+ result = knote_fops(kn)->f_event(kn, NOTE_REVOKE);
+
+ /* if its not going away and triggered */
+ if (knoteuse2kqlock(kq, kn, KNUSE_NONE) && result)
+ knote_activate(kn);
+ /* lock held again */
+ }
+ }
+ kqunlock(kq);
+ }
+}
+
+/*
+ * For a given knote, link a provided wait queue directly with the kqueue.
+ * Wakeups will happen via recursive wait queue support. But nothing will move
+ * the knote to the active list at wakeup (nothing calls knote()). Instead,
+ * we permanently enqueue them here.
+ *
+ * kqueue and knote references are held by caller.
+ * waitq locked by caller.
+ *
+ * caller provides the wait queue link structure.
+ */
+int
+knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ kern_return_t kr;
+
+ kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
+ if (kr == KERN_SUCCESS) {
+ knote_markstayactive(kn);
+ return (0);
+ } else {
+ return (EINVAL);
+ }
+}
+
+/*
+ * Unlink the provided wait queue from the kqueue associated with a knote.
+ * Also remove it from the magic list of directly attached knotes.
+ *
+ * Note that the unlink may have already happened from the other side, so
+ * ignore any failures to unlink and just remove it from the kqueue list.
+ *
+ * On success, caller is responsible for the link structure
+ */
+int
+knote_unlink_waitq(struct knote *kn, struct waitq *wq)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ kern_return_t kr;
+
+ kr = waitq_unlink(wq, &kq->kq_wqs);
+ knote_clearstayactive(kn);
+ return ((kr != KERN_SUCCESS) ? EINVAL : 0);
+}
+
+/*
+ * remove all knotes referencing a specified fd
+ *
+ * Essentially an inlined knote_remove & knote_drop
+ * when we know for sure that the thing is a file
+ *
+ * Entered with the proc_fd lock already held.
+ * It returns the same way, but may drop it temporarily.
+ */
+void
+knote_fdclose(struct proc *p, int fd, int force)
+{
+ struct klist *list;
+ struct knote *kn;
+
+restart:
+ list = &p->p_fd->fd_knlist[fd];
+ SLIST_FOREACH(kn, list, kn_link) {
+ struct kqueue *kq = knote_get_kq(kn);
+
+ kqlock(kq);
+
+ if (kq->kq_p != p)
+ panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
+ __func__, kq->kq_p, p);
+
+ /*
+ * If the knote supports EV_VANISHED delivery,
+ * transition it to vanished mode (or skip over
+ * it if already vanished).
+ */
+ if (!force && (kn->kn_status & KN_REQVANISH)) {
+
+ if ((kn->kn_status & KN_VANISHED) == 0) {
+ proc_fdunlock(p);
+
+ assert(!knoteuse_needs_boost(kn, NULL));
+
+ /* get detach reference (also marks vanished) */
+ if (kqlock2knotedetach(kq, kn, KNUSE_NONE)) {
+ /* detach knote and drop fp use reference */
+ knote_fops(kn)->f_detach(kn);
+ if (knote_fops(kn)->f_isfd)
+ fp_drop(p, kn->kn_id, kn->kn_fp, 0);
+
+ /* activate it if it's still in existence */
+ if (knoteuse2kqlock(kq, kn, KNUSE_NONE)) {
+ knote_activate(kn);
+ }
+ kqunlock(kq);
+ }
+ proc_fdlock(p);
+ goto restart;
+ } else {
+ kqunlock(kq);
+ continue;
+ }
+ }
+
+ proc_fdunlock(p);
+
+ /*
+ * Convert the kq lock to a drop ref.
+ * If we get it, go ahead and drop it.
+ * Otherwise, we waited for the blocking
+ * condition to complete. Either way,
+ * we dropped the fdlock so start over.
+ */
+ if (kqlock2knotedrop(kq, kn)) {
+ knote_drop(kn, p);
+ }
+
+ proc_fdlock(p);
+ goto restart;
+ }
+}
+
+/*
+ * knote_fdfind - lookup a knote in the fd table for process
+ *
+ * If the filter is file-based, lookup based on fd index.
+ * Otherwise use a hash based on the ident.
+ *
+ * Matching is based on kq, filter, and ident. Optionally,
+ * it may also be based on the udata field in the kevent -
+ * allowing multiple event registration for the file object
+ * per kqueue.
+ *
+ * fd_knhashlock or fdlock held on entry (and exit)
+ */
+static struct knote *
+knote_fdfind(struct kqueue *kq,
+ struct kevent_internal_s *kev,
+ bool is_fd,
+ struct proc *p)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct klist *list = NULL;
+ struct knote *kn = NULL;
+
+ /*
+ * determine where to look for the knote
+ */
+ if (is_fd) {
+ /* fd-based knotes are linked off the fd table */
+ if (kev->ident < (u_int)fdp->fd_knlistsize) {
+ list = &fdp->fd_knlist[kev->ident];
+ }
+ } else if (fdp->fd_knhashmask != 0) {
+ /* hash non-fd knotes here too */
+ list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
+ }
+
+ /*
+ * scan the selected list looking for a match
+ */
+ if (list != NULL) {
+ SLIST_FOREACH(kn, list, kn_link) {
+ if (kq == knote_get_kq(kn) &&
+ kev->ident == kn->kn_id &&
+ kev->filter == kn->kn_filter) {
+ if (kev->flags & EV_UDATA_SPECIFIC) {
+ if ((kn->kn_status & KN_UDATA_SPECIFIC) &&
+ kev->udata == kn->kn_udata) {
+ break; /* matching udata-specific knote */
+ }
+ } else if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) {
+ break; /* matching non-udata-specific knote */
+ }
+ }
+ }
+ }
+ return kn;
+}
+
+/*
+ * kq_add_knote- Add knote to the fd table for process
+ * while checking for duplicates.
+ *
+ * All file-based filters associate a list of knotes by file
+ * descriptor index. All other filters hash the knote by ident.
+ *
+ * May have to grow the table of knote lists to cover the
+ * file descriptor index presented.
+ *
+ * fd_knhashlock and fdlock unheld on entry (and exit).
+ *
+ * Takes a rwlock boost if inserting the knote is successful.
+ */
+static int
+kq_add_knote(struct kqueue *kq, struct knote *kn,
+ struct kevent_internal_s *kev,
+ struct proc *p, int *knoteuse_flags)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct klist *list = NULL;
+ int ret = 0;
+ bool is_fd = knote_fops(kn)->f_isfd;
+
+ if (is_fd)
+ proc_fdlock(p);
+ else
+ knhash_lock(p);
+
+ if (knote_fdfind(kq, kev, is_fd, p) != NULL) {
+ /* found an existing knote: we can't add this one */
+ ret = ERESTART;
+ goto out_locked;
+ }
+
+ /* knote was not found: add it now */
+ if (!is_fd) {
+ if (fdp->fd_knhashmask == 0) {
+ u_long size = 0;
+
+ list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
+ &size);
+ if (list == NULL) {
+ ret = ENOMEM;
+ goto out_locked;
+ }
+
+ fdp->fd_knhash = list;
+ fdp->fd_knhashmask = size;
+ }
+
+ list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+ SLIST_INSERT_HEAD(list, kn, kn_link);
+ ret = 0;
+ goto out_locked;
+
+ } else {
+ /* knote is fd based */
+
+ if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
+ u_int size = 0;
+
+ if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
+ || kn->kn_id >= (uint64_t)maxfiles) {
+ ret = EINVAL;
+ goto out_locked;
+ }
+ /* have to grow the fd_knlist */
+ size = fdp->fd_knlistsize;
+ while (size <= kn->kn_id)
+ size += KQEXTENT;
+
+ if (size >= (UINT_MAX/sizeof(struct klist *))) {
+ ret = EINVAL;
+ goto out_locked;
+ }
+
+ MALLOC(list, struct klist *,
+ size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
+ if (list == NULL) {
+ ret = ENOMEM;
+ goto out_locked;
+ }
+
+ bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
+ fdp->fd_knlistsize * sizeof(struct klist *));
+ bzero((caddr_t)list +
+ fdp->fd_knlistsize * sizeof(struct klist *),
+ (size - fdp->fd_knlistsize) * sizeof(struct klist *));
+ FREE(fdp->fd_knlist, M_KQUEUE);
+ fdp->fd_knlist = list;
+ fdp->fd_knlistsize = size;
+ }
+
+ list = &fdp->fd_knlist[kn->kn_id];
+ SLIST_INSERT_HEAD(list, kn, kn_link);
+ ret = 0;
+ goto out_locked;
+
+ }
+
+out_locked:
+ if (ret == 0 && knoteuse_needs_boost(kn, kev)) {
+ set_thread_rwlock_boost();
+ *knoteuse_flags = KNUSE_BOOST;
+ } else {
+ *knoteuse_flags = KNUSE_NONE;
+ }
+ if (is_fd)
+ proc_fdunlock(p);
+ else
+ knhash_unlock(p);
+
+ return ret;
+}
+
+/*
+ * kq_remove_knote - remove a knote from the fd table for process
+ * and copy kn_status an kq_state while holding kqlock and
+ * fd table locks.
+ *
+ * If the filter is file-based, remove based on fd index.
+ * Otherwise remove from the hash based on the ident.
+ *
+ * fd_knhashlock and fdlock unheld on entry (and exit).
+ */
+static void
+kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
+ kn_status_t *kn_status, uint16_t *kq_state)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct klist *list = NULL;
+ bool is_fd;
+
+ is_fd = knote_fops(kn)->f_isfd;
+
+ if (is_fd)
+ proc_fdlock(p);
+ else
+ knhash_lock(p);
+
+ if (is_fd) {
+ assert ((u_int)fdp->fd_knlistsize > kn->kn_id);
+ list = &fdp->fd_knlist[kn->kn_id];
+ } else {
+ list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+ }
+ SLIST_REMOVE(list, kn, knote, kn_link);
+
+ kqlock(kq);
+ *kn_status = kn->kn_status;
+ *kq_state = kq->kq_state;
+ kqunlock(kq);
+
+ if (is_fd)
+ proc_fdunlock(p);
+ else
+ knhash_unlock(p);
+}
+
+/*
+ * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
+ * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
+ *
+ * fd_knhashlock or fdlock unheld on entry (and exit)
+ */
+
+static struct knote *
+kq_find_knote_and_kq_lock(struct kqueue *kq,
+ struct kevent_internal_s *kev,
+ bool is_fd,
+ struct proc *p)
+{
+ struct knote * ret;
+
+ if (is_fd)
+ proc_fdlock(p);
+ else
+ knhash_lock(p);
+
+ ret = knote_fdfind(kq, kev, is_fd, p);
+
+ if (ret) {