-/*
- * set up the control block
- */
- wpipe->pipe_map.npages = i;
- wpipe->pipe_map.pos =
- ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
- wpipe->pipe_map.cnt = size;
-
-/*
- * and map the buffer
- */
- if (wpipe->pipe_map.kva == 0) {
- /*
- * We need to allocate space for an extra page because the
- * address range might (will) span pages at times.
- */
- wpipe->pipe_map.kva = kmem_alloc_nofault(kernel_map,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- atomic_add_int(&amountpipekvawired,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- }
- pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
- wpipe->pipe_map.npages);
-
-/*
- * and update the uio data
- */
-
- uio->uio_iov->iov_len -= size;
- uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
- if (uio->uio_iov->iov_len == 0)
- uio->uio_iov++;
- uio_setresid(uio, (uio_resid(uio) - size));
- uio->uio_offset += size;
- return (0);
-}
-
-/*
- * unmap and unwire the process buffer
- */
-static void
-pipe_destroy_write_buffer(wpipe)
- struct pipe *wpipe;
-{
- int i;
-
- if (wpipe->pipe_map.kva) {
- pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
-
- if (amountpipekvawired > maxpipekvawired / 2) {
- /* Conserve address space */
- vm_offset_t kva = wpipe->pipe_map.kva;
- wpipe->pipe_map.kva = 0;
- kmem_free(kernel_map, kva,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- atomic_subtract_int(&amountpipekvawired,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- }
- }
- vm_page_lock_queues();
- for (i = 0; i < wpipe->pipe_map.npages; i++) {
- vm_page_unhold(wpipe->pipe_map.ms[i]);
- }
- vm_page_unlock_queues();
- wpipe->pipe_map.npages = 0;
-}
-
-/*
- * In the case of a signal, the writing process might go away. This
- * code copies the data into the circular buffer so that the source
- * pages can be freed without loss of data.
- */
-static void
-pipe_clone_write_buffer(wpipe)
- struct pipe *wpipe;
-{
- int size;
- int pos;
-
- size = wpipe->pipe_map.cnt;
- pos = wpipe->pipe_map.pos;
-
- wpipe->pipe_buffer.in = size;
- wpipe->pipe_buffer.out = 0;
- wpipe->pipe_buffer.cnt = size;
- wpipe->pipe_state &= ~PIPE_DIRECTW;
-
- PIPE_UNLOCK(wpipe);
- bcopy((caddr_t) wpipe->pipe_map.kva + pos,
- wpipe->pipe_buffer.buffer, size);
- pipe_destroy_write_buffer(wpipe);
- PIPE_LOCK(wpipe);
-}
-
-/*
- * This implements the pipe buffer write mechanism. Note that only
- * a direct write OR a normal pipe write can be pending at any given time.
- * If there are any characters in the pipe buffer, the direct write will
- * be deferred until the receiving process grabs all of the bytes from
- * the pipe buffer. Then the direct mapping write is set-up.
- */
-static int
-pipe_direct_write(wpipe, uio)
- struct pipe *wpipe;
- struct uio *uio;
-{
- int error;
-
-retry:
- while (wpipe->pipe_state & PIPE_DIRECTW) {
- if (wpipe->pipe_state & PIPE_WANTR) {
- wpipe->pipe_state &= ~PIPE_WANTR;
- wakeup(wpipe);
- }
- wpipe->pipe_state |= PIPE_WANTW;
- error = msleep(wpipe, PIPE_MTX(wpipe),
- PRIBIO | PCATCH, "pipdww", 0);
- if (error)
- goto error1;
- if (wpipe->pipe_state & PIPE_EOF) {
- error = EPIPE;
- goto error1;
- }
- }
- wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
- if (wpipe->pipe_buffer.cnt > 0) {
- if (wpipe->pipe_state & PIPE_WANTR) {
- wpipe->pipe_state &= ~PIPE_WANTR;
- wakeup(wpipe);
- }
-
- wpipe->pipe_state |= PIPE_WANTW;
- error = msleep(wpipe, PIPE_MTX(wpipe),
- PRIBIO | PCATCH, "pipdwc", 0);
- if (error)
- goto error1;
- if (wpipe->pipe_state & PIPE_EOF) {
- error = EPIPE;
- goto error1;
- }
- goto retry;
- }
-
- wpipe->pipe_state |= PIPE_DIRECTW;
-
- pipelock(wpipe, 0);
- PIPE_UNLOCK(wpipe);
- error = pipe_build_write_buffer(wpipe, uio);
- PIPE_LOCK(wpipe);
- pipeunlock(wpipe);
- if (error) {
- wpipe->pipe_state &= ~PIPE_DIRECTW;
- goto error1;
- }
-
- error = 0;
- while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
- if (wpipe->pipe_state & PIPE_EOF) {
- pipelock(wpipe, 0);
- PIPE_UNLOCK(wpipe);
- pipe_destroy_write_buffer(wpipe);
- PIPE_LOCK(wpipe);
- pipeselwakeup(wpipe, wpipe);
- pipeunlock(wpipe);
- error = EPIPE;
- goto error1;
- }
- if (wpipe->pipe_state & PIPE_WANTR) {
- wpipe->pipe_state &= ~PIPE_WANTR;
- wakeup(wpipe);
- }
- pipeselwakeup(wpipe, wpipe);
- error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
- "pipdwt", 0);
- }
-
- pipelock(wpipe,0);
- if (wpipe->pipe_state & PIPE_DIRECTW) {
- /*
- * this bit of trickery substitutes a kernel buffer for
- * the process that might be going away.
- */
- pipe_clone_write_buffer(wpipe);
- } else {
- PIPE_UNLOCK(wpipe);
- pipe_destroy_write_buffer(wpipe);
- PIPE_LOCK(wpipe);
- }
- pipeunlock(wpipe);
- return (error);
-
-error1:
- wakeup(wpipe);
- return (error);
-}
-#endif
-
-
-
-static int
-pipe_write(struct fileproc *fp, struct uio *uio, __unused kauth_cred_t active_cred, __unused int flags, __unused struct proc *p)
-{
- int error = 0;
- int orig_resid;
- int pipe_size;
- struct pipe *wpipe, *rpipe;
-
- rpipe = (struct pipe *)fp->f_data;
-
- PIPE_LOCK(rpipe);
- wpipe = rpipe->pipe_peer;
-
- /*
- * detect loss of pipe read side, issue SIGPIPE if lost.
- */
- if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)) {
- PIPE_UNLOCK(rpipe);
- return (EPIPE);
- }
-#ifdef MAC
- error = mac_check_pipe_write(active_cred, wpipe);
- if (error) {
- PIPE_UNLOCK(rpipe);
- return (error);
- }
-#endif
- ++wpipe->pipe_busy;
-
- pipe_size = 0;
-
- if (wpipe->pipe_buffer.buffer == 0) {
- /*
- * need to allocate some storage... we delay the allocation
- * until the first write on fd[0] to avoid allocating storage for both
- * 'pipe ends'... most pipes are half-duplex with the writes targeting
- * fd[1], so allocating space for both ends is a waste...
- *
- * Reduce to 1/4th pipe size if we're over our global max.
- */
- if (amountpipekva > maxpipekva / 2)
- pipe_size = SMALL_PIPE_SIZE;
- else
- pipe_size = PIPE_SIZE;
- }