]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/sys_pipe.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / kern / sys_pipe.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 */
19/*
fe8ab488 20 * Copyright (c) 2003-2014 Apple Inc. All rights reserved.
91447636 21 *
2d21ac55 22 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 23 *
2d21ac55
A
24 * This file contains Original Code and/or Modifications of Original Code
25 * as defined in and that are subject to the Apple Public Source License
26 * Version 2.0 (the 'License'). You may not use this file except in
27 * compliance with the License. The rights granted to you under the License
28 * may not be used to create, or enable the creation or redistribution of,
29 * unlawful or unlicensed copies of an Apple operating system, or to
30 * circumvent, violate, or enable the circumvention or violation of, any
31 * terms of an Apple operating system software license agreement.
0a7de745 32 *
2d21ac55
A
33 * Please obtain a copy of the License at
34 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 35 *
2d21ac55
A
36 * The Original Code and all software distributed under the License are
37 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
38 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
39 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
40 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
41 * Please see the License for the specific language governing rights and
42 * limitations under the License.
0a7de745 43 *
2d21ac55
A
44 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
45 */
46/*
47 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
48 * support for mandatory and extensible security protections. This notice
49 * is included in support of clause 2.2 (b) of the Apple Public License,
50 * Version 2.0.
91447636
A
51 */
52
53/*
54 * This file contains a high-performance replacement for the socket-based
55 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
56 * all features of sockets, but does do everything that pipes normally
57 * do.
316670eb
A
58 *
59 * Pipes are implemented as circular buffers. Following are the valid states in pipes operations
0a7de745 60 *
316670eb
A
61 * _________________________________
62 * 1. |_________________________________| r=w, c=0
0a7de745 63 *
316670eb
A
64 * _________________________________
65 * 2. |__r:::::wc_______________________| r <= w , c > 0
66 *
67 * _________________________________
68 * 3. |::::wc_____r:::::::::::::::::::::| r>w , c > 0
69 *
70 * _________________________________
71 * 4. |:::::::wrc:::::::::::::::::::::::| w=r, c = Max size
72 *
73 *
74 * Nomenclature:-
75 * a-z define the steps in a program flow
76 * 1-4 are the states as defined aboe
77 * Action: is what file operation is done on the pipe
0a7de745 78 *
316670eb
A
79 * Current:None Action: initialize with size M=200
80 * a. State 1 ( r=0, w=0, c=0)
0a7de745 81 *
316670eb
A
82 * Current: a Action: write(100) (w < M)
83 * b. State 2 (r=0, w=100, c=100)
0a7de745 84 *
316670eb
A
85 * Current: b Action: write(100) (w = M-w)
86 * c. State 4 (r=0,w=0,c=200)
0a7de745 87 *
316670eb
A
88 * Current: b Action: read(70) ( r < c )
89 * d. State 2(r=70,w=100,c=30)
0a7de745 90 *
316670eb
A
91 * Current: d Action: write(75) ( w < (m-w))
92 * e. State 2 (r=70,w=175,c=105)
0a7de745 93 *
316670eb
A
94 * Current: d Action: write(110) ( w > (m-w))
95 * f. State 3 (r=70,w=10,c=140)
0a7de745 96 *
316670eb
A
97 * Current: d Action: read(30) (r >= c )
98 * g. State 1 (r=100,w=100,c=0)
0a7de745 99 *
91447636
A
100 */
101
102/*
316670eb
A
103 * This code create half duplex pipe buffers for facilitating file like
104 * operations on pipes. The initial buffer is very small, but this can
105 * dynamically change to larger sizes based on usage. The buffer size is never
106 * reduced. The total amount of kernel memory used is governed by maxpipekva.
107 * In case of dynamic expansion limit is reached, the output thread is blocked
0a7de745 108 * until the pipe buffer empties enough to continue.
91447636
A
109 *
110 * In order to limit the resource use of pipes, two sysctls exist:
111 *
112 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
0a7de745 113 * address space available to us in pipe_map.
91447636
A
114 *
115 * Memory usage may be monitored through the sysctls
316670eb 116 * kern.ipc.pipes, kern.ipc.pipekva.
91447636
A
117 *
118 */
119
120#include <sys/param.h>
121#include <sys/systm.h>
122#include <sys/filedesc.h>
123#include <sys/kernel.h>
124#include <sys/vnode.h>
125#include <sys/proc_internal.h>
126#include <sys/kauth.h>
127#include <sys/file_internal.h>
128#include <sys/stat.h>
129#include <sys/ioctl.h>
130#include <sys/fcntl.h>
131#include <sys/malloc.h>
132#include <sys/syslog.h>
133#include <sys/unistd.h>
134#include <sys/resourcevar.h>
135#include <sys/aio_kern.h>
136#include <sys/signalvar.h>
137#include <sys/pipe.h>
138#include <sys/sysproto.h>
0c530ab8 139#include <sys/proc_info.h>
91447636 140
b0d623f7 141#include <security/audit/audit.h>
91447636
A
142
143#include <sys/kdebug.h>
144
145#include <kern/zalloc.h>
316670eb 146#include <kern/kalloc.h>
91447636
A
147#include <vm/vm_kern.h>
148#include <libkern/OSAtomic.h>
5ba3f43e
A
149#include <libkern/section_keywords.h>
150
151#if CONFIG_MACF
152#include <security/mac_framework.h>
153#endif
91447636
A
154
155#define f_flag f_fglob->fg_flag
91447636
A
156#define f_msgcount f_fglob->fg_msgcount
157#define f_cred f_fglob->fg_cred
158#define f_ops f_fglob->fg_ops
159#define f_offset f_fglob->fg_offset
160#define f_data f_fglob->fg_data
91447636 161
cb323159
A
162struct pipepair {
163 lck_mtx_t pp_mtx;
164 struct pipe pp_rpipe;
165 struct pipe pp_wpipe;
166};
167
168#define PIPE_PAIR(pipe) \
169 __container_of(PIPE_MTX(pipe), struct pipepair, pp_mtx)
170
91447636 171/*
0a7de745 172 * interfaces to the outside world exported through file operations
91447636
A
173 */
174static int pipe_read(struct fileproc *fp, struct uio *uio,
0a7de745 175 int flags, vfs_context_t ctx);
91447636 176static int pipe_write(struct fileproc *fp, struct uio *uio,
0a7de745 177 int flags, vfs_context_t ctx);
2d21ac55 178static int pipe_close(struct fileglob *fg, vfs_context_t ctx);
2d21ac55 179static int pipe_select(struct fileproc *fp, int which, void * wql,
0a7de745 180 vfs_context_t ctx);
2d21ac55 181static int pipe_kqfilter(struct fileproc *fp, struct knote *kn,
cb323159 182 struct kevent_qos_s *kev);
2d21ac55 183static int pipe_ioctl(struct fileproc *fp, u_long cmd, caddr_t data,
0a7de745
A
184 vfs_context_t ctx);
185static int pipe_drain(struct fileproc *fp, vfs_context_t ctx);
b0d623f7 186
39236c6e 187static const struct fileops pipeops = {
cb323159
A
188 .fo_type = DTYPE_PIPE,
189 .fo_read = pipe_read,
190 .fo_write = pipe_write,
191 .fo_ioctl = pipe_ioctl,
192 .fo_select = pipe_select,
193 .fo_close = pipe_close,
194 .fo_drain = pipe_drain,
39037602 195 .fo_kqfilter = pipe_kqfilter,
39236c6e 196};
91447636 197
39037602 198static void filt_pipedetach(struct knote *kn);
91447636 199
cb323159
A
200static int filt_pipenotsup(struct knote *kn, long hint);
201static int filt_pipenotsuptouch(struct knote *kn, struct kevent_qos_s *kev);
202static int filt_pipenotsupprocess(struct knote *kn, struct kevent_qos_s *kev);
203
39037602 204static int filt_piperead(struct knote *kn, long hint);
cb323159
A
205static int filt_pipereadtouch(struct knote *kn, struct kevent_qos_s *kev);
206static int filt_pipereadprocess(struct knote *kn, struct kevent_qos_s *kev);
39037602
A
207
208static int filt_pipewrite(struct knote *kn, long hint);
cb323159
A
209static int filt_pipewritetouch(struct knote *kn, struct kevent_qos_s *kev);
210static int filt_pipewriteprocess(struct knote *kn, struct kevent_qos_s *kev);
211
212SECURITY_READ_ONLY_EARLY(struct filterops) pipe_nfiltops = {
213 .f_isfd = 1,
214 .f_detach = filt_pipedetach,
215 .f_event = filt_pipenotsup,
216 .f_touch = filt_pipenotsuptouch,
217 .f_process = filt_pipenotsupprocess,
218};
39037602 219
5ba3f43e 220SECURITY_READ_ONLY_EARLY(struct filterops) pipe_rfiltops = {
cb323159
A
221 .f_isfd = 1,
222 .f_detach = filt_pipedetach,
223 .f_event = filt_piperead,
224 .f_touch = filt_pipereadtouch,
39037602 225 .f_process = filt_pipereadprocess,
b0d623f7 226};
316670eb 227
5ba3f43e 228SECURITY_READ_ONLY_EARLY(struct filterops) pipe_wfiltops = {
cb323159
A
229 .f_isfd = 1,
230 .f_detach = filt_pipedetach,
231 .f_event = filt_pipewrite,
232 .f_touch = filt_pipewritetouch,
39037602 233 .f_process = filt_pipewriteprocess,
b0d623f7 234};
91447636 235
316670eb
A
236static int nbigpipe; /* for compatibility sake. no longer used */
237static int amountpipes; /* total number of pipes in system */
238static int amountpipekva; /* total memory used by pipes */
91447636 239
39236c6e 240int maxpipekva __attribute__((used)) = PIPE_KVAMAX; /* allowing 16MB max. */
91447636
A
241
242#if PIPE_SYSCTLS
243SYSCTL_DECL(_kern_ipc);
244
0a7de745
A
245SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RD | CTLFLAG_LOCKED,
246 &maxpipekva, 0, "Pipe KVA limit");
247SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekvawired, CTLFLAG_RW | CTLFLAG_LOCKED,
248 &maxpipekvawired, 0, "Pipe KVA wired limit");
249SYSCTL_INT(_kern_ipc, OID_AUTO, pipes, CTLFLAG_RD | CTLFLAG_LOCKED,
250 &amountpipes, 0, "Current # of pipes");
251SYSCTL_INT(_kern_ipc, OID_AUTO, bigpipes, CTLFLAG_RD | CTLFLAG_LOCKED,
252 &nbigpipe, 0, "Current # of big pipes");
253SYSCTL_INT(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD | CTLFLAG_LOCKED,
254 &amountpipekva, 0, "Pipe KVA usage");
255SYSCTL_INT(_kern_ipc, OID_AUTO, pipekvawired, CTLFLAG_RD | CTLFLAG_LOCKED,
256 &amountpipekvawired, 0, "Pipe wired KVA usage");
91447636
A
257#endif
258
cb323159 259static int pipepair_alloc(struct pipe **rpipe, struct pipe **wpipe);
91447636
A
260static void pipeclose(struct pipe *cpipe);
261static void pipe_free_kmem(struct pipe *cpipe);
316670eb
A
262static int pipespace(struct pipe *cpipe, int size);
263static int choose_pipespace(unsigned long current, unsigned long expected);
264static int expand_pipespace(struct pipe *p, int target_size);
91447636 265static void pipeselwakeup(struct pipe *cpipe, struct pipe *spipe);
316670eb
A
266static __inline int pipeio_lock(struct pipe *cpipe, int catch);
267static __inline void pipeio_unlock(struct pipe *cpipe);
91447636
A
268
269extern int postpipeevent(struct pipe *, int);
270extern void evpipefree(struct pipe *cpipe);
271
0a7de745
A
272static lck_grp_t *pipe_mtx_grp;
273static lck_attr_t *pipe_mtx_attr;
274static lck_grp_attr_t *pipe_mtx_grp_attr;
91447636
A
275
276static zone_t pipe_zone;
277
0a7de745 278#define MAX_PIPESIZE(pipe) ( MAX(PIPE_SIZE, (pipe)->pipe_buffer.size) )
316670eb 279
91447636
A
280SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
281
316670eb 282/* initial setup done at time of sysinit */
91447636 283void
2d21ac55 284pipeinit(void)
91447636 285{
0a7de745 286 nbigpipe = 0;
ebb1b9f4 287 vm_size_t zone_size;
0a7de745 288
cb323159
A
289 zone_size = 8192 * sizeof(struct pipepair);
290 pipe_zone = zinit(sizeof(struct pipepair), zone_size, 4096, "pipe zone");
91447636 291
316670eb
A
292
293 /* allocate lock group attribute and group for pipe mutexes */
91447636 294 pipe_mtx_grp_attr = lck_grp_attr_alloc_init();
91447636
A
295 pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr);
296
316670eb 297 /* allocate the lock attribute for pipe mutexes */
91447636 298 pipe_mtx_attr = lck_attr_alloc_init();
91447636
A
299}
300
0a7de745 301#ifndef CONFIG_EMBEDDED
2d21ac55 302/* Bitmap for things to touch in pipe_touch() */
0a7de745
A
303#define PIPE_ATIME 0x00000001 /* time of last access */
304#define PIPE_MTIME 0x00000002 /* time of last modification */
305#define PIPE_CTIME 0x00000004 /* time of last status change */
2d21ac55
A
306
307static void
308pipe_touch(struct pipe *tpipe, int touch)
309{
5ba3f43e 310 struct timespec now;
2d21ac55 311
5ba3f43e 312 nanotime(&now);
2d21ac55
A
313
314 if (touch & PIPE_ATIME) {
315 tpipe->st_atimespec.tv_sec = now.tv_sec;
5ba3f43e 316 tpipe->st_atimespec.tv_nsec = now.tv_nsec;
2d21ac55
A
317 }
318
319 if (touch & PIPE_MTIME) {
320 tpipe->st_mtimespec.tv_sec = now.tv_sec;
5ba3f43e 321 tpipe->st_mtimespec.tv_nsec = now.tv_nsec;
2d21ac55
A
322 }
323
324 if (touch & PIPE_CTIME) {
325 tpipe->st_ctimespec.tv_sec = now.tv_sec;
5ba3f43e 326 tpipe->st_ctimespec.tv_nsec = now.tv_nsec;
2d21ac55
A
327 }
328}
5ba3f43e 329#endif
2d21ac55 330
0a7de745 331static const unsigned int pipesize_blocks[] = {512, 1024, 2048, 4096, 4096 * 2, PIPE_SIZE, PIPE_SIZE * 4 };
316670eb 332
0a7de745
A
333/*
334 * finds the right size from possible sizes in pipesize_blocks
335 * returns the size which matches max(current,expected)
316670eb 336 */
0a7de745 337static int
316670eb
A
338choose_pipespace(unsigned long current, unsigned long expected)
339{
0a7de745 340 int i = sizeof(pipesize_blocks) / sizeof(unsigned int) - 1;
316670eb
A
341 unsigned long target;
342
3e170ce0
A
343 /*
344 * assert that we always get an atomic transaction sized pipe buffer,
345 * even if the system pipe buffer high-water mark has been crossed.
346 */
347 assert(PIPE_BUF == pipesize_blocks[0]);
348
0a7de745 349 if (expected > current) {
316670eb 350 target = expected;
0a7de745 351 } else {
316670eb 352 target = current;
0a7de745 353 }
316670eb 354
0a7de745
A
355 while (i > 0 && pipesize_blocks[i - 1] > target) {
356 i = i - 1;
316670eb 357 }
0a7de745 358
316670eb
A
359 return pipesize_blocks[i];
360}
91447636
A
361
362
316670eb
A
363/*
364 * expand the size of pipe while there is data to be read,
365 * and then free the old buffer once the current buffered
366 * data has been transferred to new storage.
367 * Required: PIPE_LOCK and io lock to be held by caller.
368 * returns 0 on success or no expansion possible
369 */
0a7de745 370static int
316670eb
A
371expand_pipespace(struct pipe *p, int target_size)
372{
373 struct pipe tmp, oldpipe;
374 int error;
375 tmp.pipe_buffer.buffer = 0;
0a7de745 376
316670eb
A
377 if (p->pipe_buffer.size >= (unsigned) target_size) {
378 return 0; /* the existing buffer is max size possible */
379 }
0a7de745 380
316670eb
A
381 /* create enough space in the target */
382 error = pipespace(&tmp, target_size);
0a7de745
A
383 if (error != 0) {
384 return error;
385 }
316670eb
A
386
387 oldpipe.pipe_buffer.buffer = p->pipe_buffer.buffer;
388 oldpipe.pipe_buffer.size = p->pipe_buffer.size;
0a7de745 389
316670eb 390 memcpy(tmp.pipe_buffer.buffer, p->pipe_buffer.buffer, p->pipe_buffer.size);
0a7de745 391 if (p->pipe_buffer.cnt > 0 && p->pipe_buffer.in <= p->pipe_buffer.out) {
316670eb
A
392 /* we are in State 3 and need extra copying for read to be consistent */
393 memcpy(&tmp.pipe_buffer.buffer[p->pipe_buffer.size], p->pipe_buffer.buffer, p->pipe_buffer.size);
394 p->pipe_buffer.in += p->pipe_buffer.size;
395 }
396
397 p->pipe_buffer.buffer = tmp.pipe_buffer.buffer;
398 p->pipe_buffer.size = tmp.pipe_buffer.size;
399
400
401 pipe_free_kmem(&oldpipe);
402 return 0;
403}
404
91447636
A
405/*
406 * The pipe system call for the DTYPE_PIPE type of pipes
0a7de745 407 *
316670eb 408 * returns:
0a7de745 409 * FREAD | fd0 | -->[struct rpipe] --> |~~buffer~~| \
316670eb 410 * (pipe_mutex)
0a7de745 411 * FWRITE | fd1 | -->[struct wpipe] --X /
91447636
A
412 */
413
414/* ARGSUSED */
415int
b0d623f7 416pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval)
91447636
A
417{
418 struct fileproc *rf, *wf;
419 struct pipe *rpipe, *wpipe;
cb323159 420 int error;
91447636 421
cb323159
A
422 error = pipepair_alloc(&rpipe, &wpipe);
423 if (error) {
424 return error;
0a7de745
A
425 }
426
0a7de745 427 /*
cb323159
A
428 * for now we'll create half-duplex pipes(refer returns section above).
429 * this is what we've always supported..
0a7de745 430 */
91447636 431
cb323159 432 error = falloc(p, &rf, &retval[0], vfs_context_current());
91447636 433 if (error) {
0a7de745 434 goto freepipes;
91447636 435 }
91447636 436 rf->f_flag = FREAD;
91447636
A
437 rf->f_data = (caddr_t)rpipe;
438 rf->f_ops = &pipeops;
439
cb323159 440 error = falloc(p, &wf, &retval[1], vfs_context_current());
91447636
A
441 if (error) {
442 fp_free(p, retval[0], rf);
0a7de745 443 goto freepipes;
91447636
A
444 }
445 wf->f_flag = FWRITE;
91447636
A
446 wf->f_data = (caddr_t)wpipe;
447 wf->f_ops = &pipeops;
448
6601e61a
A
449 rpipe->pipe_peer = wpipe;
450 wpipe->pipe_peer = rpipe;
2d21ac55 451
2d21ac55 452#if CONFIG_MACF
91447636
A
453 /*
454 * XXXXXXXX SHOULD NOT HOLD FILE_LOCK() XXXXXXXXXXXX
455 *
456 * struct pipe represents a pipe endpoint. The MAC label is shared
2d21ac55
A
457 * between the connected endpoints. As a result mac_pipe_label_init() and
458 * mac_pipe_label_associate() should only be called on one of the endpoints
91447636
A
459 * after they have been connected.
460 */
2d21ac55
A
461 mac_pipe_label_init(rpipe);
462 mac_pipe_label_associate(kauth_cred_get(), rpipe);
463 wpipe->pipe_label = rpipe->pipe_label;
91447636 464#endif
2d21ac55 465 proc_fdlock_spin(p);
6601e61a
A
466 procfdtbl_releasefd(p, retval[0], NULL);
467 procfdtbl_releasefd(p, retval[1], NULL);
91447636
A
468 fp_drop(p, retval[0], rf, 1);
469 fp_drop(p, retval[1], wf, 1);
470 proc_fdunlock(p);
0a7de745 471 return 0;
91447636
A
472
473freepipes:
0a7de745
A
474 pipeclose(rpipe);
475 pipeclose(wpipe);
0a7de745 476 return error;
91447636
A
477}
478
91447636 479int
2d21ac55 480pipe_stat(struct pipe *cpipe, void *ub, int isstat64)
91447636 481{
2d21ac55 482#if CONFIG_MACF
0a7de745 483 int error;
91447636 484#endif
0a7de745
A
485 int pipe_size = 0;
486 int pipe_count;
487 struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */
2d21ac55 488 struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */
91447636 489
0a7de745
A
490 if (cpipe == NULL) {
491 return EBADF;
492 }
91447636 493 PIPE_LOCK(cpipe);
2d21ac55
A
494
495#if CONFIG_MACF
496 error = mac_pipe_check_stat(kauth_cred_get(), cpipe);
497 if (error) {
498 PIPE_UNLOCK(cpipe);
0a7de745 499 return error;
2d21ac55 500 }
91447636
A
501#endif
502 if (cpipe->pipe_buffer.buffer == 0) {
0a7de745
A
503 /* must be stat'ing the write fd */
504 if (cpipe->pipe_peer) {
505 /* the peer still exists, use it's info */
506 pipe_size = MAX_PIPESIZE(cpipe->pipe_peer);
2d21ac55
A
507 pipe_count = cpipe->pipe_peer->pipe_buffer.cnt;
508 } else {
509 pipe_count = 0;
510 }
511 } else {
0a7de745 512 pipe_size = MAX_PIPESIZE(cpipe);
2d21ac55 513 pipe_count = cpipe->pipe_buffer.cnt;
91447636 514 }
2d21ac55
A
515 /*
516 * since peer's buffer is setup ouside of lock
517 * we might catch it in transient state
518 */
0a7de745 519 if (pipe_size == 0) {
316670eb 520 pipe_size = MAX(PIPE_SIZE, pipesize_blocks[0]);
0a7de745 521 }
91447636 522
2d21ac55 523 if (isstat64 != 0) {
0a7de745 524 sb64 = (struct stat64 *)ub;
91447636 525
2d21ac55
A
526 bzero(sb64, sizeof(*sb64));
527 sb64->st_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
528 sb64->st_blksize = pipe_size;
529 sb64->st_size = pipe_count;
530 sb64->st_blocks = (sb64->st_size + sb64->st_blksize - 1) / sb64->st_blksize;
0a7de745 531
2d21ac55
A
532 sb64->st_uid = kauth_getuid();
533 sb64->st_gid = kauth_getgid();
0a7de745 534
2d21ac55
A
535 sb64->st_atimespec.tv_sec = cpipe->st_atimespec.tv_sec;
536 sb64->st_atimespec.tv_nsec = cpipe->st_atimespec.tv_nsec;
0a7de745 537
2d21ac55
A
538 sb64->st_mtimespec.tv_sec = cpipe->st_mtimespec.tv_sec;
539 sb64->st_mtimespec.tv_nsec = cpipe->st_mtimespec.tv_nsec;
91447636 540
2d21ac55
A
541 sb64->st_ctimespec.tv_sec = cpipe->st_ctimespec.tv_sec;
542 sb64->st_ctimespec.tv_nsec = cpipe->st_ctimespec.tv_nsec;
91447636 543
2d21ac55 544 /*
0a7de745
A
545 * Return a relatively unique inode number based on the current
546 * address of this pipe's struct pipe. This number may be recycled
547 * relatively quickly.
548 */
cb323159 549 sb64->st_ino = (ino64_t)VM_KERNEL_ADDRHASH((uintptr_t)cpipe);
2d21ac55 550 } else {
0a7de745 551 sb = (struct stat *)ub;
2d21ac55
A
552
553 bzero(sb, sizeof(*sb));
554 sb->st_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
555 sb->st_blksize = pipe_size;
556 sb->st_size = pipe_count;
557 sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
0a7de745 558
2d21ac55
A
559 sb->st_uid = kauth_getuid();
560 sb->st_gid = kauth_getgid();
0a7de745 561
2d21ac55
A
562 sb->st_atimespec.tv_sec = cpipe->st_atimespec.tv_sec;
563 sb->st_atimespec.tv_nsec = cpipe->st_atimespec.tv_nsec;
0a7de745 564
2d21ac55
A
565 sb->st_mtimespec.tv_sec = cpipe->st_mtimespec.tv_sec;
566 sb->st_mtimespec.tv_nsec = cpipe->st_mtimespec.tv_nsec;
567
568 sb->st_ctimespec.tv_sec = cpipe->st_ctimespec.tv_sec;
569 sb->st_ctimespec.tv_nsec = cpipe->st_ctimespec.tv_nsec;
570
571 /*
0a7de745
A
572 * Return a relatively unique inode number based on the current
573 * address of this pipe's struct pipe. This number may be recycled
574 * relatively quickly.
575 */
cb323159 576 sb->st_ino = (ino_t)VM_KERNEL_ADDRHASH((uintptr_t)cpipe);
2d21ac55
A
577 }
578 PIPE_UNLOCK(cpipe);
91447636
A
579
580 /*
2d21ac55
A
581 * POSIX: Left as 0: st_dev, st_nlink, st_rdev, st_flags, st_gen,
582 * st_uid, st_gid.
583 *
584 * XXX (st_dev) should be unique, but there is no device driver that
585 * XXX is associated with pipes, since they are implemented via a
586 * XXX struct fileops indirection rather than as FS objects.
91447636 587 */
0a7de745 588 return 0;
91447636
A
589}
590
591
592/*
593 * Allocate kva for pipe circular buffer, the space is pageable
594 * This routine will 'realloc' the size of a pipe safely, if it fails
595 * it will retain the old buffer.
596 * If it fails it will return ENOMEM.
597 */
598static int
599pipespace(struct pipe *cpipe, int size)
600{
601 vm_offset_t buffer;
602
0a7de745
A
603 if (size <= 0) {
604 return EINVAL;
605 }
91447636 606
0a7de745
A
607 if ((buffer = (vm_offset_t)kalloc(size)) == 0) {
608 return ENOMEM;
609 }
91447636
A
610
611 /* free old resources if we're resizing */
612 pipe_free_kmem(cpipe);
613 cpipe->pipe_buffer.buffer = (caddr_t)buffer;
614 cpipe->pipe_buffer.size = size;
615 cpipe->pipe_buffer.in = 0;
616 cpipe->pipe_buffer.out = 0;
617 cpipe->pipe_buffer.cnt = 0;
618
b0d623f7
A
619 OSAddAtomic(1, &amountpipes);
620 OSAddAtomic(cpipe->pipe_buffer.size, &amountpipekva);
91447636 621
0a7de745 622 return 0;
91447636
A
623}
624
625/*
626 * initialize and allocate VM and memory for pipe
627 */
628static int
cb323159 629pipepair_alloc(struct pipe **rp_out, struct pipe **wp_out)
91447636 630{
cb323159
A
631 struct pipepair *pp = zalloc(pipe_zone);
632 struct pipe *rpipe = &pp->pp_rpipe;
633 struct pipe *wpipe = &pp->pp_wpipe;
91447636 634
cb323159 635 if (pp == NULL) {
0a7de745
A
636 return ENOMEM;
637 }
91447636
A
638
639 /*
640 * protect so pipespace or pipeclose don't follow a junk pointer
641 * if pipespace() fails.
642 */
cb323159
A
643 bzero(pp, sizeof(struct pipepair));
644 lck_mtx_init(&pp->pp_mtx, pipe_mtx_grp, pipe_mtx_attr);
645
646 rpipe->pipe_mtxp = &pp->pp_mtx;
647 wpipe->pipe_mtxp = &pp->pp_mtx;
648
649 TAILQ_INIT(&rpipe->pipe_evlist);
650 TAILQ_INIT(&wpipe->pipe_evlist);
91447636 651
0a7de745 652#ifndef CONFIG_EMBEDDED
2d21ac55 653 /* Initial times are all the time of creation of the pipe */
cb323159
A
654 pipe_touch(rpipe, PIPE_ATIME | PIPE_MTIME | PIPE_CTIME);
655 pipe_touch(wpipe, PIPE_ATIME | PIPE_MTIME | PIPE_CTIME);
5ba3f43e 656#endif
cb323159
A
657
658 /*
659 * allocate the space for the normal I/O direction up
660 * front... we'll delay the allocation for the other
661 * direction until a write actually occurs (most likely it won't)...
662 */
663 int error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0));
664 if (__improbable(error)) {
665 lck_mtx_destroy(&pp->pp_mtx, pipe_mtx_grp);
666 zfree(pipe_zone, pp);
667 return error;
668 }
669
670 *rp_out = rpipe;
671 *wp_out = wpipe;
0a7de745 672 return 0;
91447636
A
673}
674
cb323159
A
675static void
676pipepair_destroy_pipe(struct pipepair *pp, struct pipe *cpipe)
677{
678 bool can_free;
679
680 pipe_free_kmem(cpipe);
681
682 lck_mtx_lock(&pp->pp_mtx);
683 if (__improbable(cpipe->pipe_state & PIPE_DEAD)) {
684 panic("double free of pipe %p in pair %p", cpipe, pp);
685 }
686
687 cpipe->pipe_state |= PIPE_DEAD;
688
689 can_free = (pp->pp_rpipe.pipe_state & PIPE_DEAD) &&
690 (pp->pp_wpipe.pipe_state & PIPE_DEAD);
691 lck_mtx_unlock(&pp->pp_mtx);
692
693 if (can_free) {
694 lck_mtx_destroy(&pp->pp_mtx, pipe_mtx_grp);
695 zfree(pipe_zone, pp);
696 }
697}
91447636
A
698
699/*
700 * lock a pipe for I/O, blocking other access
701 */
2d21ac55 702static inline int
316670eb 703pipeio_lock(struct pipe *cpipe, int catch)
91447636
A
704{
705 int error;
91447636
A
706 while (cpipe->pipe_state & PIPE_LOCKFL) {
707 cpipe->pipe_state |= PIPE_LWANT;
91447636 708 error = msleep(cpipe, PIPE_MTX(cpipe), catch ? (PRIBIO | PCATCH) : PRIBIO,
0a7de745
A
709 "pipelk", 0);
710 if (error != 0) {
711 return error;
712 }
91447636
A
713 }
714 cpipe->pipe_state |= PIPE_LOCKFL;
0a7de745 715 return 0;
91447636
A
716}
717
718/*
719 * unlock a pipe I/O lock
720 */
2d21ac55 721static inline void
316670eb 722pipeio_unlock(struct pipe *cpipe)
91447636 723{
91447636 724 cpipe->pipe_state &= ~PIPE_LOCKFL;
91447636
A
725 if (cpipe->pipe_state & PIPE_LWANT) {
726 cpipe->pipe_state &= ~PIPE_LWANT;
727 wakeup(cpipe);
728 }
729}
730
316670eb
A
731/*
732 * wakeup anyone whos blocked in select
733 */
91447636 734static void
2d21ac55 735pipeselwakeup(struct pipe *cpipe, struct pipe *spipe)
91447636 736{
91447636
A
737 if (cpipe->pipe_state & PIPE_SEL) {
738 cpipe->pipe_state &= ~PIPE_SEL;
739 selwakeup(&cpipe->pipe_sel);
740 }
cb323159
A
741
742 KNOTE(&cpipe->pipe_sel.si_note, 1);
91447636
A
743
744 postpipeevent(cpipe, EV_RWBYTES);
745
746 if (spipe && (spipe->pipe_state & PIPE_ASYNC) && spipe->pipe_pgid) {
0a7de745
A
747 if (spipe->pipe_pgid < 0) {
748 gsignal(-spipe->pipe_pgid, SIGIO);
749 } else {
750 proc_signal(spipe->pipe_pgid, SIGIO);
751 }
752 }
91447636
A
753}
754
316670eb
A
755/*
756 * Read n bytes from the buffer. Semantics are similar to file read.
757 * returns: number of bytes read from the buffer
758 */
91447636
A
759/* ARGSUSED */
760static int
2d21ac55 761pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags,
0a7de745 762 __unused vfs_context_t ctx)
91447636
A
763{
764 struct pipe *rpipe = (struct pipe *)fp->f_data;
765 int error;
766 int nread = 0;
767 u_int size;
768
769 PIPE_LOCK(rpipe);
770 ++rpipe->pipe_busy;
771
316670eb 772 error = pipeio_lock(rpipe, 1);
0a7de745 773 if (error) {
91447636 774 goto unlocked_error;
0a7de745 775 }
91447636 776
2d21ac55
A
777#if CONFIG_MACF
778 error = mac_pipe_check_read(kauth_cred_get(), rpipe);
0a7de745 779 if (error) {
91447636 780 goto locked_error;
0a7de745 781 }
91447636
A
782#endif
783
316670eb 784
91447636
A
785 while (uio_resid(uio)) {
786 /*
787 * normal pipe buffer receive
788 */
789 if (rpipe->pipe_buffer.cnt > 0) {
316670eb
A
790 /*
791 * # bytes to read is min( bytes from read pointer until end of buffer,
0a7de745 792 * total unread bytes,
316670eb
A
793 * user requested byte count)
794 */
91447636 795 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
0a7de745 796 if (size > rpipe->pipe_buffer.cnt) {
91447636 797 size = rpipe->pipe_buffer.cnt;
0a7de745 798 }
91447636 799 // LP64todo - fix this!
0a7de745 800 if (size > (u_int) uio_resid(uio)) {
91447636 801 size = (u_int) uio_resid(uio);
0a7de745 802 }
91447636 803
316670eb 804 PIPE_UNLOCK(rpipe); /* we still hold io lock.*/
91447636 805 error = uiomove(
0a7de745
A
806 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
807 size, uio);
91447636 808 PIPE_LOCK(rpipe);
0a7de745 809 if (error) {
91447636 810 break;
0a7de745 811 }
91447636
A
812
813 rpipe->pipe_buffer.out += size;
0a7de745 814 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) {
91447636 815 rpipe->pipe_buffer.out = 0;
0a7de745 816 }
91447636
A
817
818 rpipe->pipe_buffer.cnt -= size;
0a7de745 819
91447636
A
820 /*
821 * If there is no more to read in the pipe, reset
822 * its pointers to the beginning. This improves
823 * cache hit stats.
824 */
825 if (rpipe->pipe_buffer.cnt == 0) {
826 rpipe->pipe_buffer.in = 0;
827 rpipe->pipe_buffer.out = 0;
828 }
829 nread += size;
91447636
A
830 } else {
831 /*
832 * detect EOF condition
833 * read returns 0 on EOF, no need to set error
834 */
cb323159
A
835 if ((rpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
836 (fileproc_get_vflags(fp) & FPV_DRAIN)) {
91447636 837 break;
b0d623f7 838 }
91447636
A
839
840 /*
841 * If the "write-side" has been blocked, wake it up now.
842 */
843 if (rpipe->pipe_state & PIPE_WANTW) {
844 rpipe->pipe_state &= ~PIPE_WANTW;
845 wakeup(rpipe);
846 }
847
848 /*
316670eb 849 * Break if some data was read in previous iteration.
91447636 850 */
0a7de745 851 if (nread > 0) {
91447636 852 break;
0a7de745 853 }
91447636
A
854
855 /*
0a7de745 856 * Unlock the pipe buffer for our remaining processing.
91447636
A
857 * We will either break out with an error or we will
858 * sleep and relock to loop.
859 */
316670eb 860 pipeio_unlock(rpipe);
91447636
A
861
862 /*
863 * Handle non-blocking mode operation or
864 * wait for more data.
865 */
866 if (fp->f_flag & FNONBLOCK) {
867 error = EAGAIN;
868 } else {
869 rpipe->pipe_state |= PIPE_WANTR;
91447636 870 error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, "piperd", 0);
0a7de745
A
871 if (error == 0) {
872 error = pipeio_lock(rpipe, 1);
873 }
91447636 874 }
0a7de745 875 if (error) {
91447636 876 goto unlocked_error;
0a7de745 877 }
91447636
A
878 }
879 }
2d21ac55 880#if CONFIG_MACF
91447636
A
881locked_error:
882#endif
316670eb 883 pipeio_unlock(rpipe);
91447636
A
884
885unlocked_error:
886 --rpipe->pipe_busy;
887
888 /*
889 * PIPE_WANT processing only makes sense if pipe_busy is 0.
890 */
891 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
0a7de745 892 rpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTW);
91447636 893 wakeup(rpipe);
316670eb 894 } else if (rpipe->pipe_buffer.cnt < rpipe->pipe_buffer.size) {
91447636
A
895 /*
896 * Handle write blocking hysteresis.
897 */
898 if (rpipe->pipe_state & PIPE_WANTW) {
899 rpipe->pipe_state &= ~PIPE_WANTW;
900 wakeup(rpipe);
901 }
902 }
903
0a7de745 904 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) > 0) {
91447636 905 pipeselwakeup(rpipe, rpipe->pipe_peer);
0a7de745 906 }
91447636 907
0a7de745 908#ifndef CONFIG_EMBEDDED
2d21ac55
A
909 /* update last read time */
910 pipe_touch(rpipe, PIPE_ATIME);
5ba3f43e 911#endif
2d21ac55 912
91447636
A
913 PIPE_UNLOCK(rpipe);
914
0a7de745 915 return error;
91447636
A
916}
917
91447636 918/*
0a7de745 919 * perform a write of n bytes into the read side of buffer. Since
316670eb 920 * pipes are unidirectional a write is meant to be read by the otherside only.
91447636 921 */
91447636 922static int
2d21ac55 923pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags,
0a7de745 924 __unused vfs_context_t ctx)
91447636
A
925{
926 int error = 0;
927 int orig_resid;
928 int pipe_size;
929 struct pipe *wpipe, *rpipe;
316670eb
A
930 // LP64todo - fix this!
931 orig_resid = uio_resid(uio);
932 int space;
91447636
A
933
934 rpipe = (struct pipe *)fp->f_data;
935
936 PIPE_LOCK(rpipe);
937 wpipe = rpipe->pipe_peer;
938
939 /*
940 * detect loss of pipe read side, issue SIGPIPE if lost.
941 */
cb323159
A
942 if (wpipe == NULL || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
943 (fileproc_get_vflags(fp) & FPV_DRAIN)) {
91447636 944 PIPE_UNLOCK(rpipe);
0a7de745 945 return EPIPE;
91447636 946 }
2d21ac55
A
947#if CONFIG_MACF
948 error = mac_pipe_check_write(kauth_cred_get(), wpipe);
91447636
A
949 if (error) {
950 PIPE_UNLOCK(rpipe);
0a7de745 951 return error;
91447636
A
952 }
953#endif
954 ++wpipe->pipe_busy;
955
956 pipe_size = 0;
957
91447636 958 /*
316670eb
A
959 * need to allocate some storage... we delay the allocation
960 * until the first write on fd[0] to avoid allocating storage for both
961 * 'pipe ends'... most pipes are half-duplex with the writes targeting
962 * fd[1], so allocating space for both ends is a waste...
91447636 963 */
91447636 964
0a7de745
A
965 if (wpipe->pipe_buffer.buffer == 0 || (
966 (unsigned)orig_resid > wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt &&
967 amountpipekva < maxpipekva)) {
968 pipe_size = choose_pipespace(wpipe->pipe_buffer.size, wpipe->pipe_buffer.cnt + orig_resid);
91447636
A
969 }
970 if (pipe_size) {
0a7de745 971 /*
91447636 972 * need to do initial allocation or resizing of pipe
0a7de745 973 * holding both structure and io locks.
91447636 974 */
316670eb 975 if ((error = pipeio_lock(wpipe, 1)) == 0) {
0a7de745 976 if (wpipe->pipe_buffer.cnt == 0) {
316670eb 977 error = pipespace(wpipe, pipe_size);
0a7de745 978 } else {
316670eb 979 error = expand_pipespace(wpipe, pipe_size);
0a7de745
A
980 }
981
316670eb 982 pipeio_unlock(wpipe);
0a7de745 983
316670eb 984 /* allocation failed */
0a7de745
A
985 if (wpipe->pipe_buffer.buffer == 0) {
986 error = ENOMEM;
987 }
91447636
A
988 }
989 if (error) {
0a7de745 990 /*
91447636
A
991 * If an error occurred unbusy and return, waking up any pending
992 * readers.
993 */
0a7de745
A
994 --wpipe->pipe_busy;
995 if ((wpipe->pipe_busy == 0) &&
91447636 996 (wpipe->pipe_state & PIPE_WANT)) {
0a7de745 997 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
91447636
A
998 wakeup(wpipe);
999 }
1000 PIPE_UNLOCK(rpipe);
0a7de745 1001 return error;
91447636
A
1002 }
1003 }
91447636
A
1004
1005 while (uio_resid(uio)) {
0a7de745 1006retrywrite:
91447636
A
1007 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1008
316670eb 1009 /* Writes of size <= PIPE_BUF must be atomic. */
0a7de745 1010 if ((space < uio_resid(uio)) && (orig_resid <= PIPE_BUF)) {
91447636 1011 space = 0;
0a7de745 1012 }
91447636
A
1013
1014 if (space > 0) {
0a7de745
A
1015 if ((error = pipeio_lock(wpipe, 1)) == 0) {
1016 int size; /* Transfer size */
1017 int segsize; /* first segment to transfer */
91447636 1018
cb323159
A
1019 if ((wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
1020 (fileproc_get_vflags(fp) & FPV_DRAIN)) {
316670eb 1021 pipeio_unlock(wpipe);
0a7de745 1022 error = EPIPE;
91447636
A
1023 break;
1024 }
0a7de745 1025 /*
316670eb 1026 * If a process blocked in pipeio_lock, our
91447636
A
1027 * value for space might be bad... the mutex
1028 * is dropped while we're blocked
1029 */
0a7de745 1030 if (space > (int)(wpipe->pipe_buffer.size -
91447636 1031 wpipe->pipe_buffer.cnt)) {
316670eb 1032 pipeio_unlock(wpipe);
91447636
A
1033 goto retrywrite;
1034 }
1035
1036 /*
1037 * Transfer size is minimum of uio transfer
1038 * and free space in pipe buffer.
1039 */
1040 // LP64todo - fix this!
0a7de745 1041 if (space > uio_resid(uio)) {
91447636 1042 size = uio_resid(uio);
0a7de745 1043 } else {
91447636 1044 size = space;
0a7de745 1045 }
91447636 1046 /*
0a7de745 1047 * First segment to transfer is minimum of
91447636
A
1048 * transfer size and contiguous space in
1049 * pipe buffer. If first segment to transfer
1050 * is less than the transfer size, we've got
1051 * a wraparound in the buffer.
1052 */
0a7de745
A
1053 segsize = wpipe->pipe_buffer.size -
1054 wpipe->pipe_buffer.in;
1055 if (segsize > size) {
91447636 1056 segsize = size;
0a7de745
A
1057 }
1058
91447636
A
1059 /* Transfer first segment */
1060
1061 PIPE_UNLOCK(rpipe);
0a7de745
A
1062 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1063 segsize, uio);
91447636 1064 PIPE_LOCK(rpipe);
0a7de745 1065
91447636 1066 if (error == 0 && segsize < size) {
0a7de745 1067 /*
91447636
A
1068 * Transfer remaining part now, to
1069 * support atomic writes. Wraparound
316670eb 1070 * happened. (State 3)
91447636 1071 */
0a7de745
A
1072 if (wpipe->pipe_buffer.in + segsize !=
1073 wpipe->pipe_buffer.size) {
91447636
A
1074 panic("Expected pipe buffer "
1075 "wraparound disappeared");
0a7de745
A
1076 }
1077
91447636
A
1078 PIPE_UNLOCK(rpipe);
1079 error = uiomove(
0a7de745
A
1080 &wpipe->pipe_buffer.buffer[0],
1081 size - segsize, uio);
91447636
A
1082 PIPE_LOCK(rpipe);
1083 }
0a7de745 1084 /*
316670eb
A
1085 * readers never know to read until count is updated.
1086 */
91447636
A
1087 if (error == 0) {
1088 wpipe->pipe_buffer.in += size;
316670eb 1089 if (wpipe->pipe_buffer.in >
91447636
A
1090 wpipe->pipe_buffer.size) {
1091 if (wpipe->pipe_buffer.in !=
1092 size - segsize +
0a7de745 1093 wpipe->pipe_buffer.size) {
91447636
A
1094 panic("Expected "
1095 "wraparound bad");
0a7de745 1096 }
91447636
A
1097 wpipe->pipe_buffer.in = size -
1098 segsize;
1099 }
0a7de745 1100
91447636
A
1101 wpipe->pipe_buffer.cnt += size;
1102 if (wpipe->pipe_buffer.cnt >
0a7de745 1103 wpipe->pipe_buffer.size) {
91447636 1104 panic("Pipe buffer overflow");
0a7de745 1105 }
91447636 1106 }
316670eb 1107 pipeio_unlock(wpipe);
91447636 1108 }
0a7de745 1109 if (error) {
91447636 1110 break;
0a7de745 1111 }
91447636
A
1112 } else {
1113 /*
1114 * If the "read-side" has been blocked, wake it up now.
1115 */
1116 if (wpipe->pipe_state & PIPE_WANTR) {
1117 wpipe->pipe_state &= ~PIPE_WANTR;
1118 wakeup(wpipe);
1119 }
cb323159 1120
91447636 1121 /*
cb323159
A
1122 * If read side wants to go away, we just issue a signal
1123 * to ourselves.
91447636 1124 */
cb323159
A
1125 if ((wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
1126 (fileproc_get_vflags(fp) & FPV_DRAIN)) {
1127 error = EPIPE;
91447636
A
1128 break;
1129 }
6d2010ae
A
1130
1131 /*
cb323159
A
1132 * don't block on non-blocking I/O
1133 * we'll do the pipeselwakeup on the way out
6d2010ae 1134 */
cb323159
A
1135 if (fp->f_flag & FNONBLOCK) {
1136 error = EAGAIN;
6d2010ae 1137 break;
0a7de745 1138 }
6d2010ae 1139
91447636
A
1140 /*
1141 * We have no more space and have something to offer,
1142 * wake up select/poll.
1143 */
1144 pipeselwakeup(wpipe, wpipe);
1145
1146 wpipe->pipe_state |= PIPE_WANTW;
1147
1148 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, "pipewr", 0);
1149
0a7de745 1150 if (error != 0) {
91447636 1151 break;
0a7de745 1152 }
91447636
A
1153 }
1154 }
1155 --wpipe->pipe_busy;
1156
1157 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1158 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1159 wakeup(wpipe);
1160 }
1161 if (wpipe->pipe_buffer.cnt > 0) {
1162 /*
1163 * If there are any characters in the buffer, we wake up
1164 * the reader if it was blocked waiting for data.
1165 */
1166 if (wpipe->pipe_state & PIPE_WANTR) {
1167 wpipe->pipe_state &= ~PIPE_WANTR;
1168 wakeup(wpipe);
1169 }
1170 /*
1171 * wake up thread blocked in select/poll or post the notification
1172 */
1173 pipeselwakeup(wpipe, wpipe);
1174 }
2d21ac55 1175
0a7de745 1176#ifndef CONFIG_EMBEDDED
2d21ac55
A
1177 /* Update modification, status change (# of bytes in pipe) times */
1178 pipe_touch(rpipe, PIPE_MTIME | PIPE_CTIME);
1179 pipe_touch(wpipe, PIPE_MTIME | PIPE_CTIME);
5ba3f43e 1180#endif
91447636
A
1181 PIPE_UNLOCK(rpipe);
1182
0a7de745 1183 return error;
91447636
A
1184}
1185
1186/*
1187 * we implement a very minimal set of ioctls for compatibility with sockets.
1188 */
1189/* ARGSUSED 3 */
1190static int
2d21ac55 1191pipe_ioctl(struct fileproc *fp, u_long cmd, caddr_t data,
0a7de745 1192 __unused vfs_context_t ctx)
91447636
A
1193{
1194 struct pipe *mpipe = (struct pipe *)fp->f_data;
2d21ac55 1195#if CONFIG_MACF
91447636
A
1196 int error;
1197#endif
1198
1199 PIPE_LOCK(mpipe);
1200
2d21ac55
A
1201#if CONFIG_MACF
1202 error = mac_pipe_check_ioctl(kauth_cred_get(), mpipe, cmd);
91447636
A
1203 if (error) {
1204 PIPE_UNLOCK(mpipe);
1205
0a7de745 1206 return error;
91447636
A
1207 }
1208#endif
1209
1210 switch (cmd) {
91447636
A
1211 case FIONBIO:
1212 PIPE_UNLOCK(mpipe);
0a7de745 1213 return 0;
91447636
A
1214
1215 case FIOASYNC:
1216 if (*(int *)data) {
1217 mpipe->pipe_state |= PIPE_ASYNC;
1218 } else {
1219 mpipe->pipe_state &= ~PIPE_ASYNC;
1220 }
1221 PIPE_UNLOCK(mpipe);
0a7de745 1222 return 0;
91447636
A
1223
1224 case FIONREAD:
316670eb 1225 *(int *)data = mpipe->pipe_buffer.cnt;
91447636 1226 PIPE_UNLOCK(mpipe);
0a7de745 1227 return 0;
91447636
A
1228
1229 case TIOCSPGRP:
1230 mpipe->pipe_pgid = *(int *)data;
1231
1232 PIPE_UNLOCK(mpipe);
0a7de745 1233 return 0;
91447636
A
1234
1235 case TIOCGPGRP:
1236 *(int *)data = mpipe->pipe_pgid;
1237
1238 PIPE_UNLOCK(mpipe);
0a7de745 1239 return 0;
91447636
A
1240 }
1241 PIPE_UNLOCK(mpipe);
0a7de745 1242 return ENOTTY;
91447636
A
1243}
1244
1245
1246static int
2d21ac55 1247pipe_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
91447636
A
1248{
1249 struct pipe *rpipe = (struct pipe *)fp->f_data;
1250 struct pipe *wpipe;
1251 int retnum = 0;
1252
0a7de745
A
1253 if (rpipe == NULL || rpipe == (struct pipe *)-1) {
1254 return retnum;
1255 }
91447636
A
1256
1257 PIPE_LOCK(rpipe);
1258
1259 wpipe = rpipe->pipe_peer;
0a7de745 1260
91447636 1261
2d21ac55
A
1262#if CONFIG_MACF
1263 /*
1264 * XXX We should use a per thread credential here; minimally, the
1265 * XXX process credential should have a persistent reference on it
1266 * XXX before being passed in here.
1267 */
1268 if (mac_pipe_check_select(vfs_context_ucred(ctx), rpipe, which)) {
1269 PIPE_UNLOCK(rpipe);
0a7de745 1270 return 0;
2d21ac55
A
1271 }
1272#endif
0a7de745
A
1273 switch (which) {
1274 case FREAD:
91447636
A
1275 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1276 (rpipe->pipe_buffer.cnt > 0) ||
cb323159
A
1277 (rpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
1278 (fileproc_get_vflags(fp) & FPV_DRAIN)) {
0a7de745 1279 retnum = 1;
91447636 1280 } else {
0a7de745
A
1281 rpipe->pipe_state |= PIPE_SEL;
1282 selrecord(vfs_context_proc(ctx), &rpipe->pipe_sel, wql);
91447636
A
1283 }
1284 break;
1285
0a7de745
A
1286 case FWRITE:
1287 if (wpipe) {
ebb1b9f4 1288 wpipe->pipe_state |= PIPE_WSELECT;
0a7de745 1289 }
b0d623f7 1290 if (wpipe == NULL || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
cb323159 1291 (fileproc_get_vflags(fp) & FPV_DRAIN) ||
91447636 1292 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
0a7de745
A
1293 (MAX_PIPESIZE(wpipe) - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) {
1294 retnum = 1;
91447636 1295 } else {
0a7de745 1296 wpipe->pipe_state |= PIPE_SEL;
2d21ac55 1297 selrecord(vfs_context_proc(ctx), &wpipe->pipe_sel, wql);
91447636
A
1298 }
1299 break;
0a7de745
A
1300 case 0:
1301 rpipe->pipe_state |= PIPE_SEL;
2d21ac55 1302 selrecord(vfs_context_proc(ctx), &rpipe->pipe_sel, wql);
91447636 1303 break;
0a7de745 1304 }
91447636
A
1305 PIPE_UNLOCK(rpipe);
1306
0a7de745 1307 return retnum;
91447636
A
1308}
1309
1310
1311/* ARGSUSED 1 */
1312static int
2d21ac55 1313pipe_close(struct fileglob *fg, __unused vfs_context_t ctx)
91447636 1314{
0a7de745 1315 struct pipe *cpipe;
91447636 1316
2d21ac55 1317 proc_fdlock_spin(vfs_context_proc(ctx));
91447636
A
1318 cpipe = (struct pipe *)fg->fg_data;
1319 fg->fg_data = NULL;
2d21ac55 1320 proc_fdunlock(vfs_context_proc(ctx));
0a7de745
A
1321 if (cpipe) {
1322 pipeclose(cpipe);
1323 }
91447636 1324
0a7de745 1325 return 0;
91447636
A
1326}
1327
1328static void
1329pipe_free_kmem(struct pipe *cpipe)
1330{
91447636 1331 if (cpipe->pipe_buffer.buffer != NULL) {
b0d623f7
A
1332 OSAddAtomic(-(cpipe->pipe_buffer.size), &amountpipekva);
1333 OSAddAtomic(-1, &amountpipes);
0a7de745
A
1334 kfree(cpipe->pipe_buffer.buffer,
1335 cpipe->pipe_buffer.size);
91447636 1336 cpipe->pipe_buffer.buffer = NULL;
316670eb 1337 cpipe->pipe_buffer.size = 0;
91447636 1338 }
ebb1b9f4
A
1339}
1340
91447636
A
1341/*
1342 * shutdown the pipe
1343 */
1344static void
1345pipeclose(struct pipe *cpipe)
1346{
1347 struct pipe *ppipe;
1348
cb323159 1349 PIPE_LOCK(cpipe);
91447636
A
1350
1351 /*
1352 * If the other side is blocked, wake it up saying that
1353 * we want to close it down.
1354 */
b0d623f7 1355 cpipe->pipe_state &= ~PIPE_DRAIN;
2d21ac55
A
1356 cpipe->pipe_state |= PIPE_EOF;
1357 pipeselwakeup(cpipe, cpipe);
0a7de745 1358
91447636 1359 while (cpipe->pipe_busy) {
2d21ac55 1360 cpipe->pipe_state |= PIPE_WANT;
91447636
A
1361
1362 wakeup(cpipe);
0a7de745 1363 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
91447636
A
1364 }
1365
2d21ac55
A
1366#if CONFIG_MACF
1367 /*
1368 * Free the shared pipe label only after the two ends are disconnected.
1369 */
0a7de745 1370 if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL) {
2d21ac55 1371 mac_pipe_label_destroy(cpipe);
0a7de745 1372 }
91447636
A
1373#endif
1374
1375 /*
1376 * Disconnect from peer
1377 */
1378 if ((ppipe = cpipe->pipe_peer) != NULL) {
b0d623f7 1379 ppipe->pipe_state &= ~(PIPE_DRAIN);
91447636
A
1380 ppipe->pipe_state |= PIPE_EOF;
1381
1382 pipeselwakeup(ppipe, ppipe);
1383 wakeup(ppipe);
1384
cb323159 1385 KNOTE(&ppipe->pipe_sel.si_note, 1);
91447636
A
1386
1387 postpipeevent(ppipe, EV_RCLOSED);
1388
1389 ppipe->pipe_peer = NULL;
1390 }
1391 evpipefree(cpipe);
1392
1393 /*
1394 * free resources
1395 */
cb323159
A
1396
1397 PIPE_UNLOCK(cpipe);
1398
1399 pipepair_destroy_pipe(PIPE_PAIR(cpipe), cpipe);
1400}
1401
1402static int64_t
1403filt_pipelowwat(struct knote *kn, struct pipe *rpipe, int64_t def_lowwat)
1404{
1405 if ((kn->kn_sfflags & NOTE_LOWAT) == 0) {
1406 return def_lowwat;
91447636 1407 }
cb323159
A
1408 if (rpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(rpipe)) {
1409 return MAX_PIPESIZE(rpipe);
ebb1b9f4 1410 }
cb323159 1411 return MAX(kn->kn_sdata, def_lowwat);
91447636
A
1412}
1413
91447636 1414static int
cb323159 1415filt_pipe_draincommon(struct knote *kn, struct pipe *rpipe)
91447636 1416{
cb323159 1417 struct pipe *wpipe = rpipe->pipe_peer;
39037602 1418
39037602
A
1419 if ((rpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) ||
1420 (wpipe == NULL) || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF))) {
1421 kn->kn_flags |= EV_EOF;
cb323159
A
1422 return 1;
1423 }
1424
1425 return 0;
1426}
1427
1428static int
1429filt_pipenotsup(struct knote *kn, long hint)
1430{
1431#pragma unused(hint)
1432 struct pipe *rpipe = kn->kn_hook;
1433
1434 return filt_pipe_draincommon(kn, rpipe);
1435}
1436
1437static int
1438filt_pipenotsuptouch(struct knote *kn, struct kevent_qos_s *kev)
1439{
1440 struct pipe *rpipe = kn->kn_hook;
1441 int res;
1442
1443 PIPE_LOCK(rpipe);
1444
1445 /* accept new kevent data (and save off lowat threshold and flag) */
1446 kn->kn_sfflags = kev->fflags;
1447 kn->kn_sdata = kev->data;
1448
1449 /* determine if any event is now deemed fired */
1450 res = filt_pipe_draincommon(kn, rpipe);
1451
1452 PIPE_UNLOCK(rpipe);
1453
1454 return res;
1455}
1456
1457static int
1458filt_pipenotsupprocess(struct knote *kn, struct kevent_qos_s *kev)
1459{
1460 struct pipe *rpipe = kn->kn_hook;
1461 int res;
1462
1463 PIPE_LOCK(rpipe);
1464 res = filt_pipe_draincommon(kn, rpipe);
1465 if (res) {
1466 knote_fill_kevent(kn, kev, 0);
1467 }
1468 PIPE_UNLOCK(rpipe);
1469
1470 return res;
1471}
1472
1473/*ARGSUSED*/
1474static int
1475filt_piperead_common(struct knote *kn, struct kevent_qos_s *kev, struct pipe *rpipe)
1476{
1477 int64_t data = rpipe->pipe_buffer.cnt;
1478 int res = 0;
1479
1480 if (filt_pipe_draincommon(kn, rpipe)) {
1481 res = 1;
39037602 1482 } else {
cb323159 1483 res = data >= filt_pipelowwat(kn, rpipe, 1);
39037602 1484 }
cb323159
A
1485 if (res && kev) {
1486 knote_fill_kevent(kn, kev, data);
1487 }
1488 return res;
39037602 1489}
91447636 1490
39037602
A
1491static int
1492filt_piperead(struct knote *kn, long hint)
1493{
1494#pragma unused(hint)
cb323159 1495 struct pipe *rpipe = kn->kn_hook;
39037602 1496
cb323159 1497 return filt_piperead_common(kn, NULL, rpipe);
39037602 1498}
d9a64523 1499
39037602 1500static int
cb323159 1501filt_pipereadtouch(struct knote *kn, struct kevent_qos_s *kev)
39037602 1502{
cb323159 1503 struct pipe *rpipe = kn->kn_hook;
39037602
A
1504 int retval;
1505
1506 PIPE_LOCK(rpipe);
1507
1508 /* accept new inputs (and save the low water threshold and flag) */
1509 kn->kn_sdata = kev->data;
1510 kn->kn_sfflags = kev->fflags;
39037602
A
1511
1512 /* identify if any events are now fired */
cb323159 1513 retval = filt_piperead_common(kn, NULL, rpipe);
39037602
A
1514
1515 PIPE_UNLOCK(rpipe);
1516
1517 return retval;
1518}
1519
1520static int
cb323159 1521filt_pipereadprocess(struct knote *kn, struct kevent_qos_s *kev)
39037602 1522{
cb323159 1523 struct pipe *rpipe = kn->kn_hook;
39037602
A
1524 int retval;
1525
1526 PIPE_LOCK(rpipe);
cb323159 1527 retval = filt_piperead_common(kn, kev, rpipe);
39037602
A
1528 PIPE_UNLOCK(rpipe);
1529
0a7de745 1530 return retval;
39037602
A
1531}
1532
1533/*ARGSUSED*/
1534static int
cb323159 1535filt_pipewrite_common(struct knote *kn, struct kevent_qos_s *kev, struct pipe *rpipe)
39037602 1536{
cb323159
A
1537 int64_t data = 0;
1538 int res = 0;
39037602 1539
cb323159
A
1540 if (filt_pipe_draincommon(kn, rpipe)) {
1541 res = 1;
1542 } else {
1543 data = MAX_PIPESIZE(rpipe) - rpipe->pipe_buffer.cnt;
1544 res = data >= filt_pipelowwat(kn, rpipe, PIPE_BUF);
39037602 1545 }
cb323159
A
1546 if (res && kev) {
1547 knote_fill_kevent(kn, kev, data);
39037602 1548 }
cb323159 1549 return res;
39037602
A
1550}
1551
1552/*ARGSUSED*/
1553static int
1554filt_pipewrite(struct knote *kn, long hint)
1555{
1556#pragma unused(hint)
cb323159 1557 struct pipe *rpipe = kn->kn_hook;
39037602 1558
cb323159 1559 return filt_pipewrite_common(kn, NULL, rpipe);
39037602
A
1560}
1561
1562
1563static int
cb323159 1564filt_pipewritetouch(struct knote *kn, struct kevent_qos_s *kev)
39037602 1565{
cb323159 1566 struct pipe *rpipe = kn->kn_hook;
39037602
A
1567 int res;
1568
1569 PIPE_LOCK(rpipe);
1570
1571 /* accept new kevent data (and save off lowat threshold and flag) */
1572 kn->kn_sfflags = kev->fflags;
1573 kn->kn_sdata = kev->data;
39037602
A
1574
1575 /* determine if any event is now deemed fired */
cb323159 1576 res = filt_pipewrite_common(kn, NULL, rpipe);
39037602
A
1577
1578 PIPE_UNLOCK(rpipe);
1579
1580 return res;
1581}
1582
1583static int
cb323159 1584filt_pipewriteprocess(struct knote *kn, struct kevent_qos_s *kev)
39037602 1585{
cb323159 1586 struct pipe *rpipe = kn->kn_hook;
39037602
A
1587 int res;
1588
1589 PIPE_LOCK(rpipe);
cb323159 1590 res = filt_pipewrite_common(kn, kev, rpipe);
39037602
A
1591 PIPE_UNLOCK(rpipe);
1592
1593 return res;
1594}
1595
1596/*ARGSUSED*/
1597static int
cb323159
A
1598pipe_kqfilter(struct fileproc *fp, struct knote *kn,
1599 __unused struct kevent_qos_s *kev)
39037602 1600{
cb323159
A
1601 struct pipe *cpipe = (struct pipe *)fp->f_data;
1602 struct pipe *rpipe = &PIPE_PAIR(cpipe)->pp_rpipe;
39037602 1603 int res;
91447636
A
1604
1605 PIPE_LOCK(cpipe);
2d21ac55
A
1606#if CONFIG_MACF
1607 /*
1608 * XXX We should use a per thread credential here; minimally, the
1609 * XXX process credential should have a persistent reference on it
1610 * XXX before being passed in here.
1611 */
cb323159
A
1612 kauth_cred_t cred = vfs_context_ucred(vfs_context_current());
1613 if (mac_pipe_check_kqfilter(cred, kn, cpipe) != 0) {
2d21ac55 1614 PIPE_UNLOCK(cpipe);
cb323159 1615 knote_set_error(kn, EPERM);
39037602 1616 return 0;
2d21ac55
A
1617 }
1618#endif
91447636 1619
cb323159
A
1620 /*
1621 * FreeBSD will fail the attach with EPIPE if the peer pipe is detached,
1622 * however, this isn't a programming error as the other side closing
1623 * could race with the kevent registration.
1624 *
1625 * Attach should only fail for programming mistakes else it will break
1626 * libdispatch.
1627 *
1628 * Like FreeBSD, have a "Neutered" filter that will not fire until
1629 * the pipe dies if the wrong filter is attached to the wrong end.
1630 *
1631 * Knotes are always attached to the "rpipe".
1632 */
91447636
A
1633 switch (kn->kn_filter) {
1634 case EVFILT_READ:
cb323159
A
1635 if (fp->f_flag & FREAD) {
1636 kn->kn_filtid = EVFILTID_PIPE_R;
1637 res = filt_piperead_common(kn, NULL, rpipe);
1638 } else {
1639 kn->kn_filtid = EVFILTID_PIPE_N;
1640 res = filt_pipe_draincommon(kn, rpipe);
1641 }
91447636 1642 break;
39037602 1643
91447636 1644 case EVFILT_WRITE:
cb323159
A
1645 if (fp->f_flag & FWRITE) {
1646 kn->kn_filtid = EVFILTID_PIPE_W;
1647 res = filt_pipewrite_common(kn, NULL, rpipe);
1648 } else {
1649 kn->kn_filtid = EVFILTID_PIPE_N;
1650 res = filt_pipe_draincommon(kn, rpipe);
0a7de745 1651 }
91447636 1652 break;
cb323159 1653
91447636 1654 default:
0a7de745 1655 PIPE_UNLOCK(cpipe);
cb323159 1656 knote_set_error(kn, EINVAL);
39037602 1657 return 0;
91447636
A
1658 }
1659
cb323159
A
1660 kn->kn_hook = rpipe;
1661 KNOTE_ATTACH(&rpipe->pipe_sel.si_note, kn);
91447636
A
1662
1663 PIPE_UNLOCK(cpipe);
39037602 1664 return res;
91447636
A
1665}
1666
1667static void
1668filt_pipedetach(struct knote *kn)
1669{
1670 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
cb323159 1671 struct pipe *rpipe = &PIPE_PAIR(cpipe)->pp_rpipe;
91447636
A
1672
1673 PIPE_LOCK(cpipe);
cb323159 1674 KNOTE_DETACH(&rpipe->pipe_sel.si_note, kn);
91447636
A
1675 PIPE_UNLOCK(cpipe);
1676}
1677
0c530ab8
A
1678int
1679fill_pipeinfo(struct pipe * cpipe, struct pipe_info * pinfo)
1680{
2d21ac55 1681#if CONFIG_MACF
0a7de745 1682 int error;
0c530ab8 1683#endif
5ba3f43e 1684 struct timespec now;
2d21ac55
A
1685 struct vinfo_stat * ub;
1686 int pipe_size = 0;
1687 int pipe_count;
0c530ab8 1688
0a7de745
A
1689 if (cpipe == NULL) {
1690 return EBADF;
1691 }
0c530ab8 1692 PIPE_LOCK(cpipe);
2d21ac55
A
1693
1694#if CONFIG_MACF
1695 error = mac_pipe_check_stat(kauth_cred_get(), cpipe);
1696 if (error) {
1697 PIPE_UNLOCK(cpipe);
0a7de745 1698 return error;
2d21ac55 1699 }
0c530ab8
A
1700#endif
1701 if (cpipe->pipe_buffer.buffer == 0) {
0a7de745 1702 /*
0c530ab8
A
1703 * must be stat'ing the write fd
1704 */
0a7de745
A
1705 if (cpipe->pipe_peer) {
1706 /*
2d21ac55
A
1707 * the peer still exists, use it's info
1708 */
0a7de745 1709 pipe_size = MAX_PIPESIZE(cpipe->pipe_peer);
2d21ac55
A
1710 pipe_count = cpipe->pipe_peer->pipe_buffer.cnt;
1711 } else {
1712 pipe_count = 0;
1713 }
1714 } else {
0a7de745 1715 pipe_size = MAX_PIPESIZE(cpipe);
2d21ac55 1716 pipe_count = cpipe->pipe_buffer.cnt;
0c530ab8 1717 }
2d21ac55
A
1718 /*
1719 * since peer's buffer is setup ouside of lock
1720 * we might catch it in transient state
1721 */
0a7de745 1722 if (pipe_size == 0) {
2d21ac55 1723 pipe_size = PIPE_SIZE;
0a7de745 1724 }
0c530ab8
A
1725
1726 ub = &pinfo->pipe_stat;
1727
1728 bzero(ub, sizeof(*ub));
2d21ac55
A
1729 ub->vst_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
1730 ub->vst_blksize = pipe_size;
1731 ub->vst_size = pipe_count;
0a7de745 1732 if (ub->vst_blksize != 0) {
2d21ac55 1733 ub->vst_blocks = (ub->vst_size + ub->vst_blksize - 1) / ub->vst_blksize;
0a7de745 1734 }
2d21ac55 1735 ub->vst_nlink = 1;
0c530ab8 1736
2d21ac55
A
1737 ub->vst_uid = kauth_getuid();
1738 ub->vst_gid = kauth_getgid();
0c530ab8 1739
5ba3f43e 1740 nanotime(&now);
2d21ac55 1741 ub->vst_atime = now.tv_sec;
5ba3f43e 1742 ub->vst_atimensec = now.tv_nsec;
0c530ab8 1743
2d21ac55 1744 ub->vst_mtime = now.tv_sec;
5ba3f43e 1745 ub->vst_mtimensec = now.tv_nsec;
0c530ab8 1746
2d21ac55 1747 ub->vst_ctime = now.tv_sec;
5ba3f43e 1748 ub->vst_ctimensec = now.tv_nsec;
0c530ab8
A
1749
1750 /*
1751 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen, st_uid, st_gid.
1752 * XXX (st_dev, st_ino) should be unique.
1753 */
1754
cb323159
A
1755 pinfo->pipe_handle = (uint64_t)VM_KERNEL_ADDRHASH((uintptr_t)cpipe);
1756 pinfo->pipe_peerhandle = (uint64_t)VM_KERNEL_ADDRHASH((uintptr_t)(cpipe->pipe_peer));
0c530ab8 1757 pinfo->pipe_status = cpipe->pipe_state;
2d21ac55
A
1758
1759 PIPE_UNLOCK(cpipe);
1760
0a7de745 1761 return 0;
0c530ab8 1762}
b0d623f7
A
1763
1764
0a7de745 1765static int
b0d623f7
A
1766pipe_drain(struct fileproc *fp, __unused vfs_context_t ctx)
1767{
b0d623f7
A
1768 /* Note: fdlock already held */
1769 struct pipe *ppipe, *cpipe = (struct pipe *)(fp->f_fglob->fg_data);
cb323159
A
1770 boolean_t drain_pipe = FALSE;
1771
1772 /* Check if the pipe is going away */
1773 lck_mtx_lock_spin(&fp->f_fglob->fg_lock);
1774 if (fp->f_fglob->fg_count == 1) {
1775 drain_pipe = TRUE;
1776 }
1777 lck_mtx_unlock(&fp->f_fglob->fg_lock);
b0d623f7
A
1778
1779 if (cpipe) {
1780 PIPE_LOCK(cpipe);
cb323159
A
1781
1782 if (drain_pipe) {
1783 cpipe->pipe_state |= PIPE_DRAIN;
1784 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1785 }
b0d623f7 1786 wakeup(cpipe);
0a7de745 1787
b0d623f7
A
1788 /* Must wake up peer: a writer sleeps on the read side */
1789 if ((ppipe = cpipe->pipe_peer)) {
cb323159
A
1790 if (drain_pipe) {
1791 ppipe->pipe_state |= PIPE_DRAIN;
1792 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1793 }
b0d623f7
A
1794 wakeup(ppipe);
1795 }
0a7de745 1796
b0d623f7
A
1797 PIPE_UNLOCK(cpipe);
1798 return 0;
1799 }
1800
1801 return 1;
1802}