+ ibits = sel->ibits;
+ obits = sel->obits;
+ wql = sel->wql;
+
+ nw = howmany(nfd, NFDBITS);
+
+ count = sel->count;
+
+ nc = 0;
+ if (count) {
+ proc_fdlock(p);
+ for (msk = 0; msk < 3; msk++) {
+ iptr = (u_int32_t *)&ibits[msk * nw];
+ optr = (u_int32_t *)&obits[msk * nw];
+
+ for (i = 0; i < nfd; i += NFDBITS) {
+ bits = iptr[i/NFDBITS];
+
+ while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
+ bits &= ~(1 << j);
+ fp = fdp->fd_ofiles[fd];
+
+ if (fp == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
+ /*
+ * If we abort because of a bad
+ * fd, let the caller unwind...
+ */
+ proc_fdunlock(p);
+ return(EBADF);
+ }
+ if (sel_pass == SEL_SECONDPASS) {
+ wql_ptr = (char *)0;
+ if ((fp->f_flags & FP_INSELECT) && (fp->f_waddr == (void *)wqsub)) {
+ fp->f_flags &= ~FP_INSELECT;
+ fp->f_waddr = (void *)0;
+ }
+ } else {
+ wql_ptr = (wql + nc * SIZEOF_WAITQUEUE_LINK);
+ if (fp->f_flags & FP_INSELECT) {
+ /* someone is already in select on this fp */
+ fp->f_flags |= FP_SELCONFLICT;
+ wait_queue_link(&select_conflict_queue, (wait_queue_set_t)wqsub);
+ } else {
+ fp->f_flags |= FP_INSELECT;
+ fp->f_waddr = (void *)wqsub;
+ }
+ }
+
+ context.vc_ucred = fp->f_cred;
+
+ /* The select; set the bit, if true */
+ if (fp->f_ops
+ && fo_select(fp, flag[msk], wql_ptr, &context)) {
+ optr[fd/NFDBITS] |= (1 << (fd % NFDBITS));
+ n++;
+ }
+ nc++;
+ }
+ }
+ }
+ proc_fdunlock(p);
+ }
+ *retval = n;
+ return (0);
+}
+
+int poll_callback(struct kqueue *, struct kevent64_s *, void *);
+
+struct poll_continue_args {
+ user_addr_t pca_fds;
+ u_int pca_nfds;
+ u_int pca_rfds;
+};
+
+int
+poll(struct proc *p, struct poll_args *uap, int32_t *retval)
+{
+ __pthread_testcancel(1);
+ return(poll_nocancel(p, (struct poll_nocancel_args *)uap, retval));
+}
+
+
+int
+poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
+{
+ struct poll_continue_args *cont;
+ struct pollfd *fds;
+ struct kqueue *kq;
+ struct timeval atv;
+ int ncoll, error = 0;
+ u_int nfds = uap->nfds;
+ u_int rfds = 0;
+ u_int i;
+ size_t ni;
+
+ /*
+ * This is kinda bogus. We have fd limits, but that is not
+ * really related to the size of the pollfd array. Make sure
+ * we let the process use at least FD_SETSIZE entries and at
+ * least enough for the current limits. We want to be reasonably
+ * safe, but not overly restrictive.
+ */
+ if (nfds > OPEN_MAX ||
+ (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE)))
+ return (EINVAL);
+
+ kq = kqueue_alloc(p);
+ if (kq == NULL)
+ return (EAGAIN);
+
+ ni = nfds * sizeof(struct pollfd) + sizeof(struct poll_continue_args);
+ MALLOC(cont, struct poll_continue_args *, ni, M_TEMP, M_WAITOK);
+ if (NULL == cont) {
+ error = EAGAIN;
+ goto out;
+ }
+
+ fds = (struct pollfd *)&cont[1];
+ error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
+ if (error)
+ goto out;
+
+ if (uap->timeout != -1) {
+ struct timeval rtv;
+
+ atv.tv_sec = uap->timeout / 1000;
+ atv.tv_usec = (uap->timeout % 1000) * 1000;
+ if (itimerfix(&atv)) {
+ error = EINVAL;
+ goto out;
+ }
+ getmicrouptime(&rtv);
+ timevaladd(&atv, &rtv);
+ } else {
+ atv.tv_sec = 0;
+ atv.tv_usec = 0;
+ }
+
+ /* JMM - all this P_SELECT stuff is bogus */
+ ncoll = nselcoll;
+ OSBitOrAtomic(P_SELECT, &p->p_flag);
+ for (i = 0; i < nfds; i++) {
+ short events = fds[i].events;
+ struct kevent64_s kev;
+ int kerror = 0;
+
+ /* per spec, ignore fd values below zero */
+ if (fds[i].fd < 0) {
+ fds[i].revents = 0;
+ continue;
+ }
+
+ /* convert the poll event into a kqueue kevent */
+ kev.ident = fds[i].fd;
+ kev.flags = EV_ADD | EV_ONESHOT | EV_POLL;
+ kev.udata = CAST_USER_ADDR_T(&fds[i]);
+ kev.fflags = 0;
+ kev.data = 0;
+ kev.ext[0] = 0;
+ kev.ext[1] = 0;
+
+ /* Handle input events */
+ if (events & ( POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP )) {
+ kev.filter = EVFILT_READ;
+ if (!(events & ( POLLIN | POLLRDNORM )))
+ kev.flags |= EV_OOBAND;
+ kerror = kevent_register(kq, &kev, p);
+ }
+
+ /* Handle output events */
+ if (kerror == 0 &&
+ events & ( POLLOUT | POLLWRNORM | POLLWRBAND )) {
+ kev.filter = EVFILT_WRITE;
+ kerror = kevent_register(kq, &kev, p);
+ }
+
+ /* Handle BSD extension vnode events */
+ if (kerror == 0 &&
+ events & ( POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE )) {
+ kev.filter = EVFILT_VNODE;
+ kev.fflags = 0;
+ if (events & POLLEXTEND)
+ kev.fflags |= NOTE_EXTEND;
+ if (events & POLLATTRIB)
+ kev.fflags |= NOTE_ATTRIB;
+ if (events & POLLNLINK)
+ kev.fflags |= NOTE_LINK;
+ if (events & POLLWRITE)
+ kev.fflags |= NOTE_WRITE;
+ kerror = kevent_register(kq, &kev, p);
+ }
+
+ if (kerror != 0) {
+ fds[i].revents = POLLNVAL;
+ rfds++;
+ } else
+ fds[i].revents = 0;
+ }
+
+ /* Did we have any trouble registering? */
+ if (rfds > 0)
+ goto done;
+
+ /* scan for, and possibly wait for, the kevents to trigger */
+ cont->pca_fds = uap->fds;
+ cont->pca_nfds = nfds;
+ cont->pca_rfds = rfds;
+ error = kqueue_scan(kq, poll_callback, NULL, cont, &atv, p);
+ rfds = cont->pca_rfds;
+
+ done:
+ OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
+ /* poll is not restarted after signals... */
+ if (error == ERESTART)
+ error = EINTR;
+ if (error == EWOULDBLOCK)
+ error = 0;
+ if (error == 0) {
+ error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
+ *retval = rfds;
+ }
+
+ out:
+ if (NULL != cont)
+ FREE(cont, M_TEMP);
+
+ kqueue_dealloc(kq);
+ return (error);
+}
+
+int
+poll_callback(__unused struct kqueue *kq, struct kevent64_s *kevp, void *data)
+{
+ struct poll_continue_args *cont = (struct poll_continue_args *)data;
+ struct pollfd *fds = CAST_DOWN(struct pollfd *, kevp->udata);
+ short mask;
+
+ /* convert the results back into revents */
+ if (kevp->flags & EV_EOF)
+ fds->revents |= POLLHUP;
+ if (kevp->flags & EV_ERROR)
+ fds->revents |= POLLERR;
+
+ switch (kevp->filter) {
+ case EVFILT_READ:
+ if (fds->revents & POLLHUP)
+ mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND );
+ else {
+ mask = 0;
+ if (kevp->data != 0)
+ mask |= (POLLIN | POLLRDNORM );
+ if (kevp->flags & EV_OOBAND)
+ mask |= ( POLLPRI | POLLRDBAND );
+ }
+ fds->revents |= (fds->events & mask);
+ break;
+
+ case EVFILT_WRITE:
+ if (!(fds->revents & POLLHUP))
+ fds->revents |= (fds->events & ( POLLOUT | POLLWRNORM | POLLWRBAND ));
+ break;
+
+ case EVFILT_VNODE:
+ if (kevp->fflags & NOTE_EXTEND)
+ fds->revents |= (fds->events & POLLEXTEND);
+ if (kevp->fflags & NOTE_ATTRIB)
+ fds->revents |= (fds->events & POLLATTRIB);
+ if (kevp->fflags & NOTE_LINK)
+ fds->revents |= (fds->events & POLLNLINK);
+ if (kevp->fflags & NOTE_WRITE)
+ fds->revents |= (fds->events & POLLWRITE);
+ break;
+ }
+
+ if (fds->revents)
+ cont->pca_rfds++;
+
+ return 0;
+}
+
+int
+seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
+{
+
+ return (1);
+}
+
+/*
+ * selcount
+ *
+ * Count the number of bits set in the input bit vector, and establish an
+ * outstanding fp->f_iocount for each of the descriptors which will be in
+ * use in the select operation.
+ *
+ * Parameters: p The process doing the select
+ * ibits The input bit vector
+ * nfd The number of fd's in the vector
+ * countp Pointer to where to store the bit count
+ *
+ * Returns: 0 Success
+ * EIO Bad per process open file table
+ * EBADF One of the bits in the input bit vector
+ * references an invalid fd
+ *
+ * Implicit: *countp (modified) Count of fd's
+ *
+ * Notes: This function is the first pass under the proc_fdlock() that
+ * permits us to recognize invalid descriptors in the bit vector;
+ * the may, however, not remain valid through the drop and
+ * later reacquisition of the proc_fdlock().
+ */
+static int
+selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
+{
+ struct filedesc *fdp = p->p_fd;
+ int msk, i, j, fd;
+ u_int32_t bits;
+ struct fileproc *fp;
+ int n = 0;
+ u_int32_t *iptr;
+ u_int nw;
+ int error=0;
+ int dropcount;
+ int need_wakeup = 0;