+static int
+kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp, struct proc *p, int iskev64)
+{
+ int advance;
+ int error;
+
+ if (iskev64) {
+ advance = sizeof(struct kevent64_s);
+ error = copyin(*addrp, (caddr_t)kevp, advance);
+ } else if (IS_64BIT_PROCESS(p)) {
+ struct user64_kevent kev64;
+ bzero(kevp, sizeof(struct kevent64_s));
+
+ advance = sizeof(kev64);
+ error = copyin(*addrp, (caddr_t)&kev64, advance);
+ if (error)
+ return error;
+ kevp->ident = kev64.ident;
+ kevp->filter = kev64.filter;
+ kevp->flags = kev64.flags;
+ kevp->fflags = kev64.fflags;
+ kevp->data = kev64.data;
+ kevp->udata = kev64.udata;
+ } else {
+ struct user32_kevent kev32;
+ bzero(kevp, sizeof(struct kevent64_s));
+
+ advance = sizeof(kev32);
+ error = copyin(*addrp, (caddr_t)&kev32, advance);
+ if (error)
+ return error;
+ kevp->ident = (uintptr_t)kev32.ident;
+ kevp->filter = kev32.filter;
+ kevp->flags = kev32.flags;
+ kevp->fflags = kev32.fflags;
+ kevp->data = (intptr_t)kev32.data;
+ kevp->udata = CAST_USER_ADDR_T(kev32.udata);
+ }
+ if (!error)
+ *addrp += advance;
+ return error;
+}
+
+static int
+kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp, struct proc *p, int iskev64)
+{
+ int advance;
+ int error;
+
+ if (iskev64) {
+ advance = sizeof(struct kevent64_s);
+ error = copyout((caddr_t)kevp, *addrp, advance);
+ } else if (IS_64BIT_PROCESS(p)) {
+ struct user64_kevent kev64;
+
+ /*
+ * deal with the special case of a user-supplied
+ * value of (uintptr_t)-1.
+ */
+ kev64.ident = (kevp->ident == (uintptr_t)-1) ?
+ (uint64_t)-1LL : (uint64_t)kevp->ident;
+
+ kev64.filter = kevp->filter;
+ kev64.flags = kevp->flags;
+ kev64.fflags = kevp->fflags;
+ kev64.data = (int64_t) kevp->data;
+ kev64.udata = kevp->udata;
+ advance = sizeof(kev64);
+ error = copyout((caddr_t)&kev64, *addrp, advance);
+ } else {
+ struct user32_kevent kev32;
+
+ kev32.ident = (uint32_t)kevp->ident;
+ kev32.filter = kevp->filter;
+ kev32.flags = kevp->flags;
+ kev32.fflags = kevp->fflags;
+ kev32.data = (int32_t)kevp->data;
+ kev32.udata = kevp->udata;
+ advance = sizeof(kev32);
+ error = copyout((caddr_t)&kev32, *addrp, advance);
+ }
+ if (!error)
+ *addrp += advance;
+ return error;
+}
+
+/*
+ * kevent_continue - continue a kevent syscall after blocking
+ *
+ * assume we inherit a use count on the kq fileglob.
+ */
+
+static void
+kevent_continue(__unused struct kqueue *kq, void *data, int error)
+{
+ struct _kevent *cont_args;
+ struct fileproc *fp;
+ int32_t *retval;
+ int noutputs;
+ int fd;
+ struct proc *p = current_proc();
+
+ cont_args = (struct _kevent *)data;
+ noutputs = cont_args->eventout;
+ retval = cont_args->retval;
+ fd = cont_args->fd;
+ fp = cont_args->fp;
+
+ fp_drop(p, fd, fp, 0);
+
+ /* don't restart after signals... */
+ if (error == ERESTART)
+ error = EINTR;
+ else if (error == EWOULDBLOCK)
+ error = 0;
+ if (error == 0)
+ *retval = noutputs;
+ unix_syscall_return(error);
+}
+
+/*
+ * kevent - [syscall] register and wait for kernel events
+ *
+ */
+int
+kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
+{
+ return kevent_internal(p,
+ 0,
+ uap->changelist,
+ uap->nchanges,
+ uap->eventlist,
+ uap->nevents,
+ uap->fd,
+ uap->timeout,
+ 0, /* no flags from old kevent() call */
+ retval);
+}
+
+int
+kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
+{
+ return kevent_internal(p,
+ 1,
+ uap->changelist,
+ uap->nchanges,
+ uap->eventlist,
+ uap->nevents,
+ uap->fd,
+ uap->timeout,
+ uap->flags,
+ retval);
+}
+
+static int
+kevent_internal(struct proc *p, int iskev64, user_addr_t changelist,
+ int nchanges, user_addr_t ueventlist, int nevents, int fd,
+ user_addr_t utimeout, __unused unsigned int flags,
+ int32_t *retval)
+{
+ struct _kevent *cont_args;
+ uthread_t ut;
+ struct kqueue *kq;
+ struct fileproc *fp;
+ struct kevent64_s kev;
+ int error, noutputs;
+ struct timeval atv;
+
+ /* convert timeout to absolute - if we have one */
+ if (utimeout != USER_ADDR_NULL) {
+ struct timeval rtv;
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_timespec ts;
+ error = copyin(utimeout, &ts, sizeof(ts));
+ if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
+ error = EINVAL;
+ else
+ TIMESPEC_TO_TIMEVAL(&rtv, &ts);
+ } else {
+ struct user32_timespec ts;
+ error = copyin(utimeout, &ts, sizeof(ts));
+ TIMESPEC_TO_TIMEVAL(&rtv, &ts);
+ }
+ if (error)
+ return error;
+ if (itimerfix(&rtv))
+ return EINVAL;
+ getmicrouptime(&atv);
+ timevaladd(&atv, &rtv);
+ } else {
+ atv.tv_sec = 0;
+ atv.tv_usec = 0;
+ }
+
+ /* get a usecount for the kq itself */
+ if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
+ return(error);
+
+ /* each kq should only be used for events of one type */
+ kqlock(kq);
+ if (kq->kq_state & (KQ_KEV32 | KQ_KEV64)) {
+ if (((iskev64 && (kq->kq_state & KQ_KEV32)) ||
+ (!iskev64 && (kq->kq_state & KQ_KEV64)))) {
+ error = EINVAL;
+ kqunlock(kq);
+ goto errorout;
+ }
+ } else {
+ kq->kq_state |= (iskev64 ? KQ_KEV64 : KQ_KEV32);
+ }
+ kqunlock(kq);
+
+ /* register all the change requests the user provided... */
+ noutputs = 0;
+ while (nchanges > 0 && error == 0) {
+ error = kevent_copyin(&changelist, &kev, p, iskev64);
+ if (error)
+ break;
+
+ kev.flags &= ~EV_SYSFLAGS;
+ error = kevent_register(kq, &kev, p);
+ if ((error || (kev.flags & EV_RECEIPT)) && nevents > 0) {
+ kev.flags = EV_ERROR;
+ kev.data = error;
+ error = kevent_copyout(&kev, &ueventlist, p, iskev64);
+ if (error == 0) {
+ nevents--;
+ noutputs++;
+ }
+ }
+ nchanges--;
+ }
+
+ /* store the continuation/completion data in the uthread */
+ ut = (uthread_t)get_bsdthread_info(current_thread());
+ cont_args = &ut->uu_kevent.ss_kevent;
+ cont_args->fp = fp;
+ cont_args->fd = fd;
+ cont_args->retval = retval;
+ cont_args->eventlist = ueventlist;
+ cont_args->eventcount = nevents;
+ cont_args->eventout = noutputs;
+ cont_args->eventsize = iskev64;
+
+ if (nevents > 0 && noutputs == 0 && error == 0)
+ error = kqueue_scan(kq, kevent_callback,
+ kevent_continue, cont_args,
+ &atv, p);
+ kevent_continue(kq, cont_args, error);
+
+errorout:
+ fp_drop(p, fd, fp, 0);
+ return error;
+}
+
+
+/*
+ * kevent_callback - callback for each individual event
+ *
+ * called with nothing locked
+ * caller holds a reference on the kqueue
+ */
+
+static int
+kevent_callback(__unused struct kqueue *kq, struct kevent64_s *kevp,
+ void *data)
+{
+ struct _kevent *cont_args;
+ int error;
+ int iskev64;
+
+ cont_args = (struct _kevent *)data;
+ assert(cont_args->eventout < cont_args->eventcount);
+
+ iskev64 = cont_args->eventsize;
+
+ /*
+ * Copy out the appropriate amount of event data for this user.
+ */
+ error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(), iskev64);
+
+ /*
+ * If there isn't space for additional events, return
+ * a harmless error to stop the processing here
+ */
+ if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
+ error = EWOULDBLOCK;
+ return error;
+}
+
+/*
+ * kevent_description - format a description of a kevent for diagnostic output
+ *
+ * called with a 128-byte string buffer
+ */
+
+char *
+kevent_description(struct kevent64_s *kevp, char *s, size_t n)
+{
+ snprintf(s, n,
+ "kevent="
+ "{.ident=%#llx, .filter=%d, .flags=%#x, .fflags=%#x, .data=%#llx, .udata=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
+ kevp->ident,
+ kevp->filter,
+ kevp->flags,
+ kevp->fflags,
+ kevp->data,
+ kevp->udata,
+ kevp->ext[0],
+ kevp->ext[1]);
+ return s;
+}
+
+/*
+ * kevent_register - add a new event to a kqueue
+ *
+ * Creates a mapping between the event source and
+ * the kqueue via a knote data structure.
+ *
+ * Because many/most the event sources are file
+ * descriptor related, the knote is linked off
+ * the filedescriptor table for quick access.
+ *
+ * called with nothing locked
+ * caller holds a reference on the kqueue
+ */
+