+ struct pipe *wpipe;
+ int retval;
+
+ /*
+ * we're being called back via the KNOTE post
+ * we made in pipeselwakeup, and we already hold the mutex...
+ */
+
+ wpipe = rpipe->pipe_peer;
+ kn->kn_data = rpipe->pipe_buffer.cnt;
+ if ((rpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
+ (wpipe == NULL) || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF))) {
+ kn->kn_flags |= EV_EOF;
+ retval = 1;
+ } else {
+ int64_t lowwat = 1;
+ if (kn->kn_sfflags & NOTE_LOWAT) {
+ if (rpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(rpipe))
+ lowwat = MAX_PIPESIZE(rpipe);
+ else if (kn->kn_sdata > lowwat)
+ lowwat = kn->kn_sdata;
+ }
+ retval = kn->kn_data >= lowwat;
+ }
+ return (retval);
+}
+
+static int
+filt_piperead(struct knote *kn, long hint)
+{
+#pragma unused(hint)
+ struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
+
+ return filt_piperead_common(kn, rpipe);
+}
+
+static int
+filt_pipereadtouch(struct knote *kn, struct kevent_internal_s *kev)
+{
+ struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
+ int retval;
+
+ PIPE_LOCK(rpipe);
+
+ /* accept new inputs (and save the low water threshold and flag) */
+ kn->kn_sdata = kev->data;
+ kn->kn_sfflags = kev->fflags;
+ if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+ kn->kn_udata = kev->udata;
+
+ /* identify if any events are now fired */
+ retval = filt_piperead_common(kn, rpipe);
+
+ PIPE_UNLOCK(rpipe);
+
+ return retval;
+}
+
+static int
+filt_pipereadprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+{
+#pragma unused(data)
+ struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
+ int retval;
+
+ PIPE_LOCK(rpipe);
+ retval = filt_piperead_common(kn, rpipe);
+ if (retval) {
+ *kev = kn->kn_kevent;
+ if (kn->kn_flags & EV_CLEAR) {
+ kn->kn_fflags = 0;
+ kn->kn_data = 0;
+ }
+ }
+ PIPE_UNLOCK(rpipe);
+
+ return (retval);
+}
+
+/*ARGSUSED*/
+static int
+filt_pipewrite_common(struct knote *kn, struct pipe *rpipe)
+{
+ struct pipe *wpipe;
+
+ /*
+ * we're being called back via the KNOTE post
+ * we made in pipeselwakeup, and we already hold the mutex...
+ */
+ wpipe = rpipe->pipe_peer;
+
+ if ((wpipe == NULL) || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF))) {
+ kn->kn_data = 0;
+ kn->kn_flags |= EV_EOF;
+ return (1);
+ }
+ kn->kn_data = MAX_PIPESIZE(wpipe) - wpipe->pipe_buffer.cnt;
+
+ int64_t lowwat = PIPE_BUF;
+ if (kn->kn_sfflags & NOTE_LOWAT) {
+ if (wpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(wpipe))
+ lowwat = MAX_PIPESIZE(wpipe);
+ else if (kn->kn_sdata > lowwat)
+ lowwat = kn->kn_sdata;
+ }
+
+ return (kn->kn_data >= lowwat);
+}
+
+/*ARGSUSED*/
+static int
+filt_pipewrite(struct knote *kn, long hint)
+{
+#pragma unused(hint)
+ struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
+
+ return filt_pipewrite_common(kn, rpipe);
+}
+
+
+static int
+filt_pipewritetouch(struct knote *kn, struct kevent_internal_s *kev)
+{
+ struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
+ int res;
+
+ PIPE_LOCK(rpipe);
+
+ /* accept new kevent data (and save off lowat threshold and flag) */
+ kn->kn_sfflags = kev->fflags;
+ kn->kn_sdata = kev->data;
+ if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+ kn->kn_udata = kev->udata;
+
+ /* determine if any event is now deemed fired */
+ res = filt_pipewrite_common(kn, rpipe);
+
+ PIPE_UNLOCK(rpipe);
+
+ return res;
+}
+
+static int
+filt_pipewriteprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+{
+#pragma unused(data)
+ struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
+ int res;
+
+ PIPE_LOCK(rpipe);
+ res = filt_pipewrite_common(kn, rpipe);
+ if (res) {
+ *kev = kn->kn_kevent;
+ if (kn->kn_flags & EV_CLEAR) {
+ kn->kn_fflags = 0;
+ kn->kn_data = 0;
+ }
+ }
+ PIPE_UNLOCK(rpipe);
+
+ return res;
+}