-static int
-pipe_build_write_buffer(wpipe, uio)
- struct pipe *wpipe;
- struct uio *uio;
-{
- pmap_t pmap;
- u_int size;
- int i, j;
- vm_offset_t addr, endaddr;
-
-
- size = (u_int) uio->uio_iov->iov_len;
- if (size > wpipe->pipe_buffer.size)
- size = wpipe->pipe_buffer.size;
-
- pmap = vmspace_pmap(curproc->p_vmspace);
- endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
- addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
- for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
- /*
- * vm_fault_quick() can sleep. Consequently,
- * vm_page_lock_queue() and vm_page_unlock_queue()
- * should not be performed outside of this loop.
- */
- race:
- if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) {
- vm_page_lock_queues();
- for (j = 0; j < i; j++)
- vm_page_unhold(wpipe->pipe_map.ms[j]);
- vm_page_unlock_queues();
- return (EFAULT);
- }
- wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr,
- VM_PROT_READ);
- if (wpipe->pipe_map.ms[i] == NULL)
- goto race;
- }
-
-/*
- * set up the control block
- */
- wpipe->pipe_map.npages = i;
- wpipe->pipe_map.pos =
- ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
- wpipe->pipe_map.cnt = size;
-
-/*
- * and map the buffer
- */
- if (wpipe->pipe_map.kva == 0) {
- /*
- * We need to allocate space for an extra page because the
- * address range might (will) span pages at times.
- */
- wpipe->pipe_map.kva = kmem_alloc_nofault(kernel_map,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- atomic_add_int(&amountpipekvawired,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- }
- pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
- wpipe->pipe_map.npages);
-
-/*
- * and update the uio data
- */
-
- uio->uio_iov->iov_len -= size;
- uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
- if (uio->uio_iov->iov_len == 0)
- uio->uio_iov++;
- uio_setresid(uio, (uio_resid(uio) - size));
- uio->uio_offset += size;
- return (0);
-}
-
-/*
- * unmap and unwire the process buffer
- */
-static void
-pipe_destroy_write_buffer(wpipe)
- struct pipe *wpipe;
-{
- int i;
-
- if (wpipe->pipe_map.kva) {
- pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
-
- if (amountpipekvawired > maxpipekvawired / 2) {
- /* Conserve address space */
- vm_offset_t kva = wpipe->pipe_map.kva;
- wpipe->pipe_map.kva = 0;
- kmem_free(kernel_map, kva,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- atomic_subtract_int(&amountpipekvawired,
- wpipe->pipe_buffer.size + PAGE_SIZE);
- }
- }
- vm_page_lock_queues();
- for (i = 0; i < wpipe->pipe_map.npages; i++) {
- vm_page_unhold(wpipe->pipe_map.ms[i]);
- }
- vm_page_unlock_queues();
- wpipe->pipe_map.npages = 0;
-}
-
-/*
- * In the case of a signal, the writing process might go away. This
- * code copies the data into the circular buffer so that the source
- * pages can be freed without loss of data.
- */
-static void
-pipe_clone_write_buffer(wpipe)
- struct pipe *wpipe;
-{
- int size;
- int pos;
-
- size = wpipe->pipe_map.cnt;
- pos = wpipe->pipe_map.pos;
-
- wpipe->pipe_buffer.in = size;
- wpipe->pipe_buffer.out = 0;
- wpipe->pipe_buffer.cnt = size;
- wpipe->pipe_state &= ~PIPE_DIRECTW;
-
- PIPE_UNLOCK(wpipe);
- bcopy((caddr_t) wpipe->pipe_map.kva + pos,
- wpipe->pipe_buffer.buffer, size);
- pipe_destroy_write_buffer(wpipe);
- PIPE_LOCK(wpipe);
-}
-
-/*
- * This implements the pipe buffer write mechanism. Note that only
- * a direct write OR a normal pipe write can be pending at any given time.
- * If there are any characters in the pipe buffer, the direct write will
- * be deferred until the receiving process grabs all of the bytes from
- * the pipe buffer. Then the direct mapping write is set-up.
- */
-static int
-pipe_direct_write(wpipe, uio)
- struct pipe *wpipe;
- struct uio *uio;
-{
- int error;
-
-retry:
- while (wpipe->pipe_state & PIPE_DIRECTW) {
- if (wpipe->pipe_state & PIPE_WANTR) {
- wpipe->pipe_state &= ~PIPE_WANTR;
- wakeup(wpipe);
- }
- wpipe->pipe_state |= PIPE_WANTW;
- error = msleep(wpipe, PIPE_MTX(wpipe),
- PRIBIO | PCATCH, "pipdww", 0);
- if (error)
- goto error1;
- if (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) {
- error = EPIPE;
- goto error1;
- }
- }
- wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
- if (wpipe->pipe_buffer.cnt > 0) {
- if (wpipe->pipe_state & PIPE_WANTR) {
- wpipe->pipe_state &= ~PIPE_WANTR;
- wakeup(wpipe);
- }
-
- wpipe->pipe_state |= PIPE_WANTW;
- error = msleep(wpipe, PIPE_MTX(wpipe),
- PRIBIO | PCATCH, "pipdwc", 0);
- if (error)
- goto error1;
- if (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) {
- error = EPIPE;
- goto error1;
- }
- goto retry;
- }
-
- wpipe->pipe_state |= PIPE_DIRECTW;
-
- pipelock(wpipe, 0);
- PIPE_UNLOCK(wpipe);
- error = pipe_build_write_buffer(wpipe, uio);
- PIPE_LOCK(wpipe);
- pipeunlock(wpipe);
- if (error) {
- wpipe->pipe_state &= ~PIPE_DIRECTW;
- goto error1;
- }
-
- error = 0;
- while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
- if (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) {
- pipelock(wpipe, 0);
- PIPE_UNLOCK(wpipe);
- pipe_destroy_write_buffer(wpipe);
- PIPE_LOCK(wpipe);
- pipeselwakeup(wpipe, wpipe);
- pipeunlock(wpipe);
- error = EPIPE;
- goto error1;
- }
- if (wpipe->pipe_state & PIPE_WANTR) {
- wpipe->pipe_state &= ~PIPE_WANTR;
- wakeup(wpipe);
- }
- pipeselwakeup(wpipe, wpipe);
- error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
- "pipdwt", 0);
- }
-
- pipelock(wpipe,0);
- if (wpipe->pipe_state & PIPE_DIRECTW) {
- /*
- * this bit of trickery substitutes a kernel buffer for
- * the process that might be going away.
- */
- pipe_clone_write_buffer(wpipe);
- } else {
- PIPE_UNLOCK(wpipe);
- pipe_destroy_write_buffer(wpipe);
- PIPE_LOCK(wpipe);
- }
- pipeunlock(wpipe);
- return (error);
-
-error1:
- wakeup(wpipe);
- return (error);
-}
-#endif
-
-
-