+ return (kn->kn_data >= lowwat);
+}
+
+int
+fill_pipeinfo(struct pipe * cpipe, struct pipe_info * pinfo)
+{
+#if CONFIG_MACF
+ int error;
+#endif
+ struct timeval now;
+ struct vinfo_stat * ub;
+ int pipe_size = 0;
+ int pipe_count;
+
+ if (cpipe == NULL)
+ return (EBADF);
+ PIPE_LOCK(cpipe);
+
+#if CONFIG_MACF
+ error = mac_pipe_check_stat(kauth_cred_get(), cpipe);
+ if (error) {
+ PIPE_UNLOCK(cpipe);
+ return (error);
+ }
+#endif
+ if (cpipe->pipe_buffer.buffer == 0) {
+ /*
+ * must be stat'ing the write fd
+ */
+ if (cpipe->pipe_peer) {
+ /*
+ * the peer still exists, use it's info
+ */
+ pipe_size = MAX_PIPESIZE(cpipe->pipe_peer);
+ pipe_count = cpipe->pipe_peer->pipe_buffer.cnt;
+ } else {
+ pipe_count = 0;
+ }
+ } else {
+ pipe_size = MAX_PIPESIZE(cpipe);
+ pipe_count = cpipe->pipe_buffer.cnt;
+ }
+ /*
+ * since peer's buffer is setup ouside of lock
+ * we might catch it in transient state
+ */
+ if (pipe_size == 0)
+ pipe_size = PIPE_SIZE;
+
+ ub = &pinfo->pipe_stat;
+
+ bzero(ub, sizeof(*ub));
+ ub->vst_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
+ ub->vst_blksize = pipe_size;
+ ub->vst_size = pipe_count;
+ if (ub->vst_blksize != 0)
+ ub->vst_blocks = (ub->vst_size + ub->vst_blksize - 1) / ub->vst_blksize;
+ ub->vst_nlink = 1;
+
+ ub->vst_uid = kauth_getuid();
+ ub->vst_gid = kauth_getgid();
+
+ microtime(&now);
+ ub->vst_atime = now.tv_sec;
+ ub->vst_atimensec = now.tv_usec * 1000;
+
+ ub->vst_mtime = now.tv_sec;
+ ub->vst_mtimensec = now.tv_usec * 1000;
+
+ ub->vst_ctime = now.tv_sec;
+ ub->vst_ctimensec = now.tv_usec * 1000;
+
+ /*
+ * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen, st_uid, st_gid.
+ * XXX (st_dev, st_ino) should be unique.
+ */
+
+ pinfo->pipe_handle = (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)cpipe);
+ pinfo->pipe_peerhandle = (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)(cpipe->pipe_peer));
+ pinfo->pipe_status = cpipe->pipe_state;
+
+ PIPE_UNLOCK(cpipe);
+
+ return (0);
+}
+
+
+static int
+pipe_drain(struct fileproc *fp, __unused vfs_context_t ctx)
+{
+
+ /* Note: fdlock already held */
+ struct pipe *ppipe, *cpipe = (struct pipe *)(fp->f_fglob->fg_data);
+
+ if (cpipe) {
+ PIPE_LOCK(cpipe);
+ cpipe->pipe_state |= PIPE_DRAIN;
+ cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
+ wakeup(cpipe);
+
+ /* Must wake up peer: a writer sleeps on the read side */
+ if ((ppipe = cpipe->pipe_peer)) {
+ ppipe->pipe_state |= PIPE_DRAIN;
+ ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
+ wakeup(ppipe);
+ }
+
+ PIPE_UNLOCK(cpipe);
+ return 0;
+ }
+
+ return 1;
+}
+
+
+ /*
+ * When a thread sets a write-select on a pipe, it creates an implicit,
+ * untracked dependency between that thread and the peer of the pipe
+ * on which the select is set. If the peer pipe is closed and freed
+ * before the select()ing thread wakes up, the system will panic as
+ * it attempts to unwind the dangling select(). To avoid that panic,
+ * we notice whenever a dangerous select() is set on a pipe, and
+ * defer the final deletion of the pipe until that select()s are all
+ * resolved. Since we can't currently detect exactly when that
+ * resolution happens, we use a simple garbage collection queue to
+ * reap the at-risk pipes 'later'.
+ */
+static void
+pipe_garbage_collect(struct pipe *cpipe)
+{
+ uint64_t old, now;
+ struct pipe_garbage *pgp;
+
+ /* Convert msecs to nsecs and then to abstime */
+ old = pipe_garbage_age_limit * 1000000;
+ nanoseconds_to_absolutetime(old, &old);
+
+ lck_mtx_lock(pipe_garbage_lock);
+
+ /* Free anything that's been on the queue for <mumble> seconds */
+ now = mach_absolute_time();
+ old = now - old;
+ while ((pgp = pipe_garbage_head) && pgp->pg_timestamp < old) {
+ pipe_garbage_head = pgp->pg_next;
+ if (pipe_garbage_head == NULL)
+ pipe_garbage_tail = NULL;
+ pipe_garbage_count--;
+ zfree(pipe_zone, pgp->pg_pipe);
+ zfree(pipe_garbage_zone, pgp);
+ }
+
+ /* Add the new pipe (if any) to the tail of the garbage queue */
+ if (cpipe) {
+ cpipe->pipe_state = PIPE_DEAD;
+ pgp = (struct pipe_garbage *)zalloc(pipe_garbage_zone);
+ if (pgp == NULL) {
+ /*
+ * We're too low on memory to garbage collect the
+ * pipe. Freeing it runs the risk of panicing the
+ * system. All we can do is leak it and leave
+ * a breadcrumb behind. The good news, such as it
+ * is, is that this will probably never happen.
+ * We will probably hit the panic below first.
+ */
+ printf("Leaking pipe %p - no room left in the queue",
+ cpipe);
+ lck_mtx_unlock(pipe_garbage_lock);
+ return;
+ }
+
+ pgp->pg_pipe = cpipe;
+ pgp->pg_timestamp = now;
+ pgp->pg_next = NULL;
+
+ if (pipe_garbage_tail)
+ pipe_garbage_tail->pg_next = pgp;
+ pipe_garbage_tail = pgp;
+ if (pipe_garbage_head == NULL)
+ pipe_garbage_head = pipe_garbage_tail;
+
+ if (pipe_garbage_count++ >= PIPE_GARBAGE_QUEUE_LIMIT)
+ panic("Length of pipe garbage queue exceeded %d",
+ PIPE_GARBAGE_QUEUE_LIMIT);
+ }
+ lck_mtx_unlock(pipe_garbage_lock);