+
+/*
+ * knote_process - process a triggered event
+ *
+ * Validate that it is really still a triggered event
+ * by calling the filter routines (if necessary). Hold
+ * a use reference on the knote to avoid it being detached.
+ * If it is still considered triggered, invoke the callback
+ * routine provided and move it to the provided inprocess
+ * queue.
+ *
+ * caller holds a reference on the kqueue.
+ * kqueue locked on entry and exit - but may be dropped
+ */
+static int
+knote_process(struct knote *kn,
+ kevent_callback_t callback,
+ void *data,
+ struct kqtailq *inprocessp,
+ struct proc *p)
+{
+ struct kqueue *kq = kn->kn_kq;
+ struct kevent64_s kev;
+ int touch;
+ int result;
+ int error;
+
+ /*
+ * Determine the kevent state we want to return.
+ *
+ * Some event states need to be revalidated before returning
+ * them, others we take the snapshot at the time the event
+ * was enqueued.
+ *
+ * Events with non-NULL f_touch operations must be touched.
+ * Triggered events must fill in kev for the callback.
+ *
+ * Convert our lock to a use-count and call the event's
+ * filter routine(s) to update.
+ */
+ if ((kn->kn_status & KN_DISABLED) != 0) {
+ result = 0;
+ touch = 0;
+ } else {
+ int revalidate;
+
+ result = 1;
+ revalidate = ((kn->kn_status & KN_STAYQUEUED) != 0 ||
+ (kn->kn_flags & EV_ONESHOT) == 0);
+ touch = (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL);
+
+ if (revalidate || touch) {
+ if (revalidate)
+ knote_deactivate(kn);
+
+ /* call the filter/touch routines with just a ref */
+ if (kqlock2knoteuse(kq, kn)) {
+
+ /* if we have to revalidate, call the filter */
+ if (revalidate) {
+ result = kn->kn_fop->f_event(kn, 0);
+ }
+
+ /* capture the kevent data - using touch if specified */
+ if (result && touch) {
+ kn->kn_fop->f_touch(kn, &kev, EVENT_PROCESS);
+ }
+
+ /* convert back to a kqlock - bail if the knote went away */
+ if (!knoteuse2kqlock(kq, kn)) {
+ return EJUSTRETURN;
+ } else if (result) {
+ /* if revalidated as alive, make sure it's active */
+ if (!(kn->kn_status & KN_ACTIVE)) {
+ knote_activate(kn, 0);
+ }
+
+ /* capture all events that occurred during filter */
+ if (!touch) {
+ kev = kn->kn_kevent;
+ }
+
+ } else if ((kn->kn_status & KN_STAYQUEUED) == 0) {
+ /* was already dequeued, so just bail on this one */
+ return EJUSTRETURN;
+ }
+ } else {
+ return EJUSTRETURN;
+ }
+ } else {
+ kev = kn->kn_kevent;
+ }
+ }
+
+ /* move knote onto inprocess queue */
+ assert(kn->kn_tq == &kq->kq_head);
+ TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
+ kn->kn_tq = inprocessp;
+ TAILQ_INSERT_TAIL(inprocessp, kn, kn_tqe);
+
+ /*
+ * Determine how to dispatch the knote for future event handling.
+ * not-fired: just return (do not callout).
+ * One-shot: deactivate it.
+ * Clear: deactivate and clear the state.
+ * Dispatch: don't clear state, just deactivate it and mark it disabled.
+ * All others: just leave where they are.
+ */
+
+ if (result == 0) {
+ return EJUSTRETURN;
+ } else if ((kn->kn_flags & EV_ONESHOT) != 0) {
+ knote_deactivate(kn);
+ if (kqlock2knotedrop(kq, kn)) {
+ kn->kn_fop->f_detach(kn);
+ knote_drop(kn, p);
+ }
+ } else if ((kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) != 0) {
+ if ((kn->kn_flags & EV_DISPATCH) != 0) {
+ /* deactivate and disable all dispatch knotes */
+ knote_deactivate(kn);
+ kn->kn_status |= KN_DISABLED;
+ } else if (!touch || kn->kn_fflags == 0) {
+ /* only deactivate if nothing since the touch */
+ knote_deactivate(kn);
+ }
+ if (!touch && (kn->kn_flags & EV_CLEAR) != 0) {
+ /* manually clear non-touch knotes */
+ kn->kn_data = 0;
+ kn->kn_fflags = 0;
+ }
+ kqunlock(kq);
+ } else {
+ /*
+ * leave on inprocess queue. We'll
+ * move all the remaining ones back
+ * the kq queue and wakeup any
+ * waiters when we are done.
+ */
+ kqunlock(kq);
+ }
+
+ /* callback to handle each event as we find it */
+ error = (callback)(kq, &kev, data);
+
+ kqlock(kq);
+ return error;
+}
+
+/*
+ * Return 0 to indicate that processing should proceed,
+ * -1 if there is nothing to process.
+ *
+ * Called with kqueue locked and returns the same way,
+ * but may drop lock temporarily.
+ */
+static int
+kqueue_begin_processing(struct kqueue *kq)
+{
+ for (;;) {
+ if (kq->kq_count == 0) {
+ return -1;
+ }
+
+ /* if someone else is processing the queue, wait */
+ if (kq->kq_nprocess != 0) {
+ wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kq->kq_nprocess, THREAD_UNINT, 0);
+ kq->kq_state |= KQ_PROCWAIT;
+ kqunlock(kq);
+ thread_block(THREAD_CONTINUE_NULL);
+ kqlock(kq);
+ } else {
+ kq->kq_nprocess = 1;
+ return 0;
+ }
+ }
+}
+
+/*
+ * Called with kqueue lock held.
+ */
+static void
+kqueue_end_processing(struct kqueue *kq)
+{
+ kq->kq_nprocess = 0;
+ if (kq->kq_state & KQ_PROCWAIT) {
+ kq->kq_state &= ~KQ_PROCWAIT;
+ wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kq->kq_nprocess, THREAD_AWAKENED);
+ }
+}
+