]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sys_generic.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / kern / sys_generic.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99
100 #include <mach/mach_types.h>
101 #include <kern/kern_types.h>
102 #include <kern/assert.h>
103 #include <kern/kalloc.h>
104 #include <kern/thread.h>
105 #include <kern/clock.h>
106 #include <kern/ledger.h>
107 #include <kern/task.h>
108 #include <kern/telemetry.h>
109
110 #include <sys/mbuf.h>
111 #include <sys/domain.h>
112 #include <sys/socket.h>
113 #include <sys/socketvar.h>
114 #include <sys/errno.h>
115 #include <sys/syscall.h>
116 #include <sys/pipe.h>
117
118 #include <security/audit/audit.h>
119
120 #include <net/if.h>
121 #include <net/route.h>
122
123 #include <netinet/in.h>
124 #include <netinet/in_systm.h>
125 #include <netinet/ip.h>
126 #include <netinet/in_pcb.h>
127 #include <netinet/ip_var.h>
128 #include <netinet/ip6.h>
129 #include <netinet/tcp.h>
130 #include <netinet/tcp_fsm.h>
131 #include <netinet/tcp_seq.h>
132 #include <netinet/tcp_timer.h>
133 #include <netinet/tcp_var.h>
134 #include <netinet/tcpip.h>
135 #include <netinet/tcp_debug.h>
136 /* for wait queue based select */
137 #include <kern/wait_queue.h>
138 #include <kern/kalloc.h>
139 #include <sys/vnode_internal.h>
140
141 /* XXX should be in a header file somewhere */
142 void evsofree(struct socket *);
143 void evpipefree(struct pipe *);
144 void postpipeevent(struct pipe *, int);
145 void postevent(struct socket *, struct sockbuf *, int);
146 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
147
148 int rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval);
149 int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval);
150
151 __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp,
152 user_addr_t bufp, user_size_t nbyte,
153 off_t offset, int flags, user_ssize_t *retval);
154 __private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
155 user_addr_t bufp, user_size_t nbyte,
156 off_t offset, int flags, user_ssize_t *retval);
157 __private_extern__ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
158 __private_extern__ void donefileread(struct proc *p, struct fileproc *fp_ret, int fd);
159
160
161 /* Conflict wait queue for when selects collide (opaque type) */
162 struct wait_queue select_conflict_queue;
163
164 /*
165 * Init routine called from bsd_init.c
166 */
167 void select_wait_queue_init(void);
168 void
169 select_wait_queue_init(void)
170 {
171 wait_queue_init(&select_conflict_queue, SYNC_POLICY_FIFO);
172 }
173
174 #define f_flag f_fglob->fg_flag
175 #define f_type f_fglob->fg_ops->fo_type
176 #define f_msgcount f_fglob->fg_msgcount
177 #define f_cred f_fglob->fg_cred
178 #define f_ops f_fglob->fg_ops
179 #define f_offset f_fglob->fg_offset
180 #define f_data f_fglob->fg_data
181
182 /*
183 * Read system call.
184 *
185 * Returns: 0 Success
186 * preparefileread:EBADF
187 * preparefileread:ESPIPE
188 * preparefileread:ENXIO
189 * preparefileread:EBADF
190 * dofileread:???
191 */
192 int
193 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
194 {
195 __pthread_testcancel(1);
196 return(read_nocancel(p, (struct read_nocancel_args *)uap, retval));
197 }
198
199 int
200 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
201 {
202 struct fileproc *fp;
203 int error;
204 int fd = uap->fd;
205 struct vfs_context context;
206
207 if ( (error = preparefileread(p, &fp, fd, 0)) )
208 return (error);
209
210 context = *(vfs_context_current());
211 context.vc_ucred = fp->f_fglob->fg_cred;
212
213 error = dofileread(&context, fp, uap->cbuf, uap->nbyte,
214 (off_t)-1, 0, retval);
215
216 donefileread(p, fp, fd);
217
218 return (error);
219 }
220
221 /*
222 * Pread system call
223 *
224 * Returns: 0 Success
225 * preparefileread:EBADF
226 * preparefileread:ESPIPE
227 * preparefileread:ENXIO
228 * preparefileread:EBADF
229 * dofileread:???
230 */
231 int
232 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
233 {
234 __pthread_testcancel(1);
235 return(pread_nocancel(p, (struct pread_nocancel_args *)uap, retval));
236 }
237
238 int
239 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
240 {
241 struct fileproc *fp = NULL; /* fp set by preparefileread() */
242 int fd = uap->fd;
243 int error;
244 struct vfs_context context;
245
246 if ( (error = preparefileread(p, &fp, fd, 1)) )
247 goto out;
248
249 context = *(vfs_context_current());
250 context.vc_ucred = fp->f_fglob->fg_cred;
251
252 error = dofileread(&context, fp, uap->buf, uap->nbyte,
253 uap->offset, FOF_OFFSET, retval);
254
255 donefileread(p, fp, fd);
256
257 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
258 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
259
260 out:
261 return (error);
262 }
263
264 /*
265 * Code common for read and pread
266 */
267
268 void
269 donefileread(struct proc *p, struct fileproc *fp, int fd)
270 {
271 proc_fdlock_spin(p);
272 fp_drop(p, fd, fp, 1);
273 proc_fdunlock(p);
274 }
275
276 /*
277 * Returns: 0 Success
278 * EBADF
279 * ESPIPE
280 * ENXIO
281 * fp_lookup:EBADF
282 * fo_read:???
283 */
284 int
285 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
286 {
287 vnode_t vp;
288 int error;
289 struct fileproc *fp;
290
291 AUDIT_ARG(fd, fd);
292
293 proc_fdlock_spin(p);
294
295 error = fp_lookup(p, fd, &fp, 1);
296
297 if (error) {
298 proc_fdunlock(p);
299 return (error);
300 }
301 if ((fp->f_flag & FREAD) == 0) {
302 error = EBADF;
303 goto out;
304 }
305 if (check_for_pread && (fp->f_type != DTYPE_VNODE)) {
306 error = ESPIPE;
307 goto out;
308 }
309 if (fp->f_type == DTYPE_VNODE) {
310 vp = (struct vnode *)fp->f_fglob->fg_data;
311
312 if (check_for_pread && (vnode_isfifo(vp))) {
313 error = ESPIPE;
314 goto out;
315 }
316 if (check_for_pread && (vp->v_flag & VISTTY)) {
317 error = ENXIO;
318 goto out;
319 }
320 }
321
322 *fp_ret = fp;
323
324 proc_fdunlock(p);
325 return (0);
326
327 out:
328 fp_drop(p, fd, fp, 1);
329 proc_fdunlock(p);
330 return (error);
331 }
332
333
334 /*
335 * Returns: 0 Success
336 * EINVAL
337 * fo_read:???
338 */
339 __private_extern__ int
340 dofileread(vfs_context_t ctx, struct fileproc *fp,
341 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
342 user_ssize_t *retval)
343 {
344 uio_t auio;
345 user_ssize_t bytecnt;
346 long error = 0;
347 char uio_buf[ UIO_SIZEOF(1) ];
348
349 if (nbyte > INT_MAX)
350 return (EINVAL);
351
352 if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
353 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_READ,
354 &uio_buf[0], sizeof(uio_buf));
355 } else {
356 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_READ,
357 &uio_buf[0], sizeof(uio_buf));
358 }
359 uio_addiov(auio, bufp, nbyte);
360
361 bytecnt = nbyte;
362
363 if ((error = fo_read(fp, auio, flags, ctx))) {
364 if (uio_resid(auio) != bytecnt && (error == ERESTART ||
365 error == EINTR || error == EWOULDBLOCK))
366 error = 0;
367 }
368 bytecnt -= uio_resid(auio);
369
370 *retval = bytecnt;
371
372 return (error);
373 }
374
375 /*
376 * Scatter read system call.
377 *
378 * Returns: 0 Success
379 * EINVAL
380 * ENOMEM
381 * copyin:EFAULT
382 * rd_uio:???
383 */
384 int
385 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
386 {
387 __pthread_testcancel(1);
388 return(readv_nocancel(p, (struct readv_nocancel_args *)uap, retval));
389 }
390
391 int
392 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
393 {
394 uio_t auio = NULL;
395 int error;
396 struct user_iovec *iovp;
397
398 /* Verify range bedfore calling uio_create() */
399 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
400 return (EINVAL);
401
402 /* allocate a uio large enough to hold the number of iovecs passed */
403 auio = uio_create(uap->iovcnt, 0,
404 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
405 UIO_READ);
406
407 /* get location of iovecs within the uio. then copyin the iovecs from
408 * user space.
409 */
410 iovp = uio_iovsaddr(auio);
411 if (iovp == NULL) {
412 error = ENOMEM;
413 goto ExitThisRoutine;
414 }
415 error = copyin_user_iovec_array(uap->iovp,
416 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
417 uap->iovcnt, iovp);
418 if (error) {
419 goto ExitThisRoutine;
420 }
421
422 /* finalize uio_t for use and do the IO
423 */
424 error = uio_calculateresid(auio);
425 if (error) {
426 goto ExitThisRoutine;
427 }
428 error = rd_uio(p, uap->fd, auio, retval);
429
430 ExitThisRoutine:
431 if (auio != NULL) {
432 uio_free(auio);
433 }
434 return (error);
435 }
436
437 /*
438 * Write system call
439 *
440 * Returns: 0 Success
441 * EBADF
442 * fp_lookup:EBADF
443 * dofilewrite:???
444 */
445 int
446 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
447 {
448 __pthread_testcancel(1);
449 return(write_nocancel(p, (struct write_nocancel_args *)uap, retval));
450
451 }
452
453 int
454 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
455 {
456 struct fileproc *fp;
457 int error;
458 int fd = uap->fd;
459 bool wrote_some = false;
460
461 AUDIT_ARG(fd, fd);
462
463 error = fp_lookup(p,fd,&fp,0);
464 if (error)
465 return(error);
466 if ((fp->f_flag & FWRITE) == 0) {
467 error = EBADF;
468 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
469 proc_fdlock(p);
470 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
471 proc_fdunlock(p);
472 } else {
473 struct vfs_context context = *(vfs_context_current());
474 context.vc_ucred = fp->f_fglob->fg_cred;
475
476 error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte,
477 (off_t)-1, 0, retval);
478
479 wrote_some = *retval > 0;
480 }
481 if (wrote_some)
482 fp_drop_written(p, fd, fp);
483 else
484 fp_drop(p, fd, fp, 0);
485 return(error);
486 }
487
488 /*
489 * pwrite system call
490 *
491 * Returns: 0 Success
492 * EBADF
493 * ESPIPE
494 * ENXIO
495 * EINVAL
496 * fp_lookup:EBADF
497 * dofilewrite:???
498 */
499 int
500 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
501 {
502 __pthread_testcancel(1);
503 return(pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval));
504 }
505
506 int
507 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
508 {
509 struct fileproc *fp;
510 int error;
511 int fd = uap->fd;
512 vnode_t vp = (vnode_t)0;
513 bool wrote_some = false;
514
515 AUDIT_ARG(fd, fd);
516
517 error = fp_lookup(p,fd,&fp,0);
518 if (error)
519 return(error);
520
521 if ((fp->f_flag & FWRITE) == 0) {
522 error = EBADF;
523 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
524 proc_fdlock(p);
525 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
526 proc_fdunlock(p);
527 } else {
528 struct vfs_context context = *vfs_context_current();
529 context.vc_ucred = fp->f_fglob->fg_cred;
530
531 if (fp->f_type != DTYPE_VNODE) {
532 error = ESPIPE;
533 goto errout;
534 }
535 vp = (vnode_t)fp->f_fglob->fg_data;
536 if (vnode_isfifo(vp)) {
537 error = ESPIPE;
538 goto errout;
539 }
540 if ((vp->v_flag & VISTTY)) {
541 error = ENXIO;
542 goto errout;
543 }
544 if (uap->offset == (off_t)-1) {
545 error = EINVAL;
546 goto errout;
547 }
548
549 error = dofilewrite(&context, fp, uap->buf, uap->nbyte,
550 uap->offset, FOF_OFFSET, retval);
551 wrote_some = *retval > 0;
552 }
553 errout:
554 if (wrote_some)
555 fp_drop_written(p, fd, fp);
556 else
557 fp_drop(p, fd, fp, 0);
558
559 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
560 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
561
562 return(error);
563 }
564
565 /*
566 * Returns: 0 Success
567 * EINVAL
568 * <fo_write>:EPIPE
569 * <fo_write>:??? [indirect through struct fileops]
570 */
571 __private_extern__ int
572 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
573 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
574 user_ssize_t *retval)
575 {
576 uio_t auio;
577 long error = 0;
578 user_ssize_t bytecnt;
579 char uio_buf[ UIO_SIZEOF(1) ];
580
581 if (nbyte > INT_MAX) {
582 *retval = 0;
583 return (EINVAL);
584 }
585
586 if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
587 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_WRITE,
588 &uio_buf[0], sizeof(uio_buf));
589 } else {
590 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_WRITE,
591 &uio_buf[0], sizeof(uio_buf));
592 }
593 uio_addiov(auio, bufp, nbyte);
594
595 bytecnt = nbyte;
596 if ((error = fo_write(fp, auio, flags, ctx))) {
597 if (uio_resid(auio) != bytecnt && (error == ERESTART ||
598 error == EINTR || error == EWOULDBLOCK))
599 error = 0;
600 /* The socket layer handles SIGPIPE */
601 if (error == EPIPE && fp->f_type != DTYPE_SOCKET &&
602 (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) {
603 /* XXX Raise the signal on the thread? */
604 psignal(vfs_context_proc(ctx), SIGPIPE);
605 }
606 }
607 bytecnt -= uio_resid(auio);
608 *retval = bytecnt;
609
610 return (error);
611 }
612
613 /*
614 * Gather write system call
615 */
616 int
617 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
618 {
619 __pthread_testcancel(1);
620 return(writev_nocancel(p, (struct writev_nocancel_args *)uap, retval));
621 }
622
623 int
624 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
625 {
626 uio_t auio = NULL;
627 int error;
628 struct fileproc *fp;
629 struct user_iovec *iovp;
630 bool wrote_some = false;
631
632 AUDIT_ARG(fd, uap->fd);
633
634 /* Verify range bedfore calling uio_create() */
635 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
636 return (EINVAL);
637
638 /* allocate a uio large enough to hold the number of iovecs passed */
639 auio = uio_create(uap->iovcnt, 0,
640 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
641 UIO_WRITE);
642
643 /* get location of iovecs within the uio. then copyin the iovecs from
644 * user space.
645 */
646 iovp = uio_iovsaddr(auio);
647 if (iovp == NULL) {
648 error = ENOMEM;
649 goto ExitThisRoutine;
650 }
651 error = copyin_user_iovec_array(uap->iovp,
652 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
653 uap->iovcnt, iovp);
654 if (error) {
655 goto ExitThisRoutine;
656 }
657
658 /* finalize uio_t for use and do the IO
659 */
660 error = uio_calculateresid(auio);
661 if (error) {
662 goto ExitThisRoutine;
663 }
664
665 error = fp_lookup(p, uap->fd, &fp, 0);
666 if (error)
667 goto ExitThisRoutine;
668
669 if ((fp->f_flag & FWRITE) == 0) {
670 error = EBADF;
671 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
672 proc_fdlock(p);
673 error = fp_guard_exception(p, uap->fd, fp, kGUARD_EXC_WRITE);
674 proc_fdunlock(p);
675 } else {
676 error = wr_uio(p, fp, auio, retval);
677 wrote_some = *retval > 0;
678 }
679
680 if (wrote_some)
681 fp_drop_written(p, uap->fd, fp);
682 else
683 fp_drop(p, uap->fd, fp, 0);
684
685 ExitThisRoutine:
686 if (auio != NULL) {
687 uio_free(auio);
688 }
689 return (error);
690 }
691
692
693 int
694 wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval)
695 {
696 int error;
697 user_ssize_t count;
698 struct vfs_context context = *vfs_context_current();
699
700 count = uio_resid(uio);
701
702 context.vc_ucred = fp->f_cred;
703 error = fo_write(fp, uio, 0, &context);
704 if (error) {
705 if (uio_resid(uio) != count && (error == ERESTART ||
706 error == EINTR || error == EWOULDBLOCK))
707 error = 0;
708 /* The socket layer handles SIGPIPE */
709 if (error == EPIPE && fp->f_type != DTYPE_SOCKET &&
710 (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0)
711 psignal(p, SIGPIPE);
712 }
713 *retval = count - uio_resid(uio);
714
715 return(error);
716 }
717
718
719 int
720 rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval)
721 {
722 struct fileproc *fp;
723 int error;
724 user_ssize_t count;
725 struct vfs_context context = *vfs_context_current();
726
727 if ( (error = preparefileread(p, &fp, fdes, 0)) )
728 return (error);
729
730 count = uio_resid(uio);
731
732 context.vc_ucred = fp->f_cred;
733
734 error = fo_read(fp, uio, 0, &context);
735
736 if (error) {
737 if (uio_resid(uio) != count && (error == ERESTART ||
738 error == EINTR || error == EWOULDBLOCK))
739 error = 0;
740 }
741 *retval = count - uio_resid(uio);
742
743 donefileread(p, fp, fdes);
744
745 return (error);
746 }
747
748 /*
749 * Ioctl system call
750 *
751 * Returns: 0 Success
752 * EBADF
753 * ENOTTY
754 * ENOMEM
755 * ESRCH
756 * copyin:EFAULT
757 * copyoutEFAULT
758 * fp_lookup:EBADF Bad file descriptor
759 * fo_ioctl:???
760 */
761 int
762 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
763 {
764 struct fileproc *fp = NULL;
765 int error = 0;
766 u_int size = 0;
767 caddr_t datap = NULL, memp = NULL;
768 boolean_t is64bit = FALSE;
769 int tmp = 0;
770 #define STK_PARAMS 128
771 char stkbuf[STK_PARAMS];
772 int fd = uap->fd;
773 u_long com = uap->com;
774 struct vfs_context context = *vfs_context_current();
775
776 AUDIT_ARG(fd, uap->fd);
777 AUDIT_ARG(addr, uap->data);
778
779 is64bit = proc_is64bit(p);
780 #if CONFIG_AUDIT
781 if (is64bit)
782 AUDIT_ARG(value64, com);
783 else
784 AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
785 #endif /* CONFIG_AUDIT */
786
787 /*
788 * Interpret high order word to find amount of data to be
789 * copied to/from the user's address space.
790 */
791 size = IOCPARM_LEN(com);
792 if (size > IOCPARM_MAX)
793 return ENOTTY;
794 if (size > sizeof (stkbuf)) {
795 if ((memp = (caddr_t)kalloc(size)) == 0)
796 return ENOMEM;
797 datap = memp;
798 } else
799 datap = &stkbuf[0];
800 if (com & IOC_IN) {
801 if (size) {
802 error = copyin(uap->data, datap, size);
803 if (error)
804 goto out_nofp;
805 } else {
806 /* XXX - IOC_IN and no size? we should proably return an error here!! */
807 if (is64bit) {
808 *(user_addr_t *)datap = uap->data;
809 }
810 else {
811 *(uint32_t *)datap = (uint32_t)uap->data;
812 }
813 }
814 } else if ((com & IOC_OUT) && size)
815 /*
816 * Zero the buffer so the user always
817 * gets back something deterministic.
818 */
819 bzero(datap, size);
820 else if (com & IOC_VOID) {
821 /* XXX - this is odd since IOC_VOID means no parameters */
822 if (is64bit) {
823 *(user_addr_t *)datap = uap->data;
824 }
825 else {
826 *(uint32_t *)datap = (uint32_t)uap->data;
827 }
828 }
829
830 proc_fdlock(p);
831 error = fp_lookup(p,fd,&fp,1);
832 if (error) {
833 proc_fdunlock(p);
834 goto out_nofp;
835 }
836
837 AUDIT_ARG(file, p, fp);
838
839 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
840 error = EBADF;
841 goto out;
842 }
843
844 context.vc_ucred = fp->f_fglob->fg_cred;
845
846 #if CONFIG_MACF
847 error = mac_file_check_ioctl(context.vc_ucred, fp->f_fglob, com);
848 if (error)
849 goto out;
850 #endif
851
852 switch (com) {
853 case FIONCLEX:
854 *fdflags(p, fd) &= ~UF_EXCLOSE;
855 break;
856
857 case FIOCLEX:
858 *fdflags(p, fd) |= UF_EXCLOSE;
859 break;
860
861 case FIONBIO:
862 if ( (tmp = *(int *)datap) )
863 fp->f_flag |= FNONBLOCK;
864 else
865 fp->f_flag &= ~FNONBLOCK;
866 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
867 break;
868
869 case FIOASYNC:
870 if ( (tmp = *(int *)datap) )
871 fp->f_flag |= FASYNC;
872 else
873 fp->f_flag &= ~FASYNC;
874 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
875 break;
876
877 case FIOSETOWN:
878 tmp = *(int *)datap;
879 if (fp->f_type == DTYPE_SOCKET) {
880 ((struct socket *)fp->f_data)->so_pgid = tmp;
881 break;
882 }
883 if (fp->f_type == DTYPE_PIPE) {
884 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
885 break;
886 }
887 if (tmp <= 0) {
888 tmp = -tmp;
889 } else {
890 struct proc *p1 = proc_find(tmp);
891 if (p1 == 0) {
892 error = ESRCH;
893 break;
894 }
895 tmp = p1->p_pgrpid;
896 proc_rele(p1);
897 }
898 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
899 break;
900
901 case FIOGETOWN:
902 if (fp->f_type == DTYPE_SOCKET) {
903 *(int *)datap = ((struct socket *)fp->f_data)->so_pgid;
904 break;
905 }
906 error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
907 *(int *)datap = -*(int *)datap;
908 break;
909
910 default:
911 error = fo_ioctl(fp, com, datap, &context);
912 /*
913 * Copy any data to user, size was
914 * already set and checked above.
915 */
916 if (error == 0 && (com & IOC_OUT) && size)
917 error = copyout(datap, uap->data, (u_int)size);
918 break;
919 }
920 out:
921 fp_drop(p, fd, fp, 1);
922 proc_fdunlock(p);
923
924 out_nofp:
925 if (memp)
926 kfree(memp, size);
927 return(error);
928 }
929
930 int selwait, nselcoll;
931 #define SEL_FIRSTPASS 1
932 #define SEL_SECONDPASS 2
933 extern int selcontinue(int error);
934 extern int selprocess(int error, int sel_pass);
935 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
936 int nfd, int32_t *retval, int sel_pass, wait_queue_sub_t wqsub);
937 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
938 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount);
939 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd);
940
941 /*
942 * Select system call.
943 *
944 * Returns: 0 Success
945 * EINVAL Invalid argument
946 * EAGAIN Nonconformant error if allocation fails
947 * selprocess:???
948 */
949 int
950 select(struct proc *p, struct select_args *uap, int32_t *retval)
951 {
952 __pthread_testcancel(1);
953 return(select_nocancel(p, (struct select_nocancel_args *)uap, retval));
954 }
955
956 int
957 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
958 {
959 int error = 0;
960 u_int ni, nw, size;
961 thread_t th_act;
962 struct uthread *uth;
963 struct _select *sel;
964 struct _select_data *seldata;
965 int needzerofill = 1;
966 int count = 0;
967
968 th_act = current_thread();
969 uth = get_bsdthread_info(th_act);
970 sel = &uth->uu_select;
971 seldata = &uth->uu_kevent.ss_select_data;
972 *retval = 0;
973
974 seldata->args = uap;
975 seldata->retval = retval;
976
977 if (uap->nd < 0) {
978 return (EINVAL);
979 }
980
981 /* select on thread of process that already called proc_exit() */
982 if (p->p_fd == NULL) {
983 return (EBADF);
984 }
985
986 if (uap->nd > p->p_fd->fd_nfiles)
987 uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
988
989 nw = howmany(uap->nd, NFDBITS);
990 ni = nw * sizeof(fd_mask);
991
992 /*
993 * if the previously allocated space for the bits is smaller than
994 * what is requested or no space has yet been allocated for this
995 * thread, allocate enough space now.
996 *
997 * Note: If this process fails, select() will return EAGAIN; this
998 * is the same thing pool() returns in a no-memory situation, but
999 * it is not a POSIX compliant error code for select().
1000 */
1001 if (sel->nbytes < (3 * ni)) {
1002 int nbytes = 3 * ni;
1003
1004 /* Free previous allocation, if any */
1005 if (sel->ibits != NULL)
1006 FREE(sel->ibits, M_TEMP);
1007 if (sel->obits != NULL) {
1008 FREE(sel->obits, M_TEMP);
1009 /* NULL out; subsequent ibits allocation may fail */
1010 sel->obits = NULL;
1011 }
1012
1013 MALLOC(sel->ibits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO);
1014 if (sel->ibits == NULL)
1015 return (EAGAIN);
1016 MALLOC(sel->obits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO);
1017 if (sel->obits == NULL) {
1018 FREE(sel->ibits, M_TEMP);
1019 sel->ibits = NULL;
1020 return (EAGAIN);
1021 }
1022 sel->nbytes = nbytes;
1023 needzerofill = 0;
1024 }
1025
1026 if (needzerofill) {
1027 bzero((caddr_t)sel->ibits, sel->nbytes);
1028 bzero((caddr_t)sel->obits, sel->nbytes);
1029 }
1030
1031 /*
1032 * get the bits from the user address space
1033 */
1034 #define getbits(name, x) \
1035 do { \
1036 if (uap->name && (error = copyin(uap->name, \
1037 (caddr_t)&sel->ibits[(x) * nw], ni))) \
1038 goto continuation; \
1039 } while (0)
1040
1041 getbits(in, 0);
1042 getbits(ou, 1);
1043 getbits(ex, 2);
1044 #undef getbits
1045
1046 if (uap->tv) {
1047 struct timeval atv;
1048 if (IS_64BIT_PROCESS(p)) {
1049 struct user64_timeval atv64;
1050 error = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1051 /* Loses resolution - assume timeout < 68 years */
1052 atv.tv_sec = atv64.tv_sec;
1053 atv.tv_usec = atv64.tv_usec;
1054 } else {
1055 struct user32_timeval atv32;
1056 error = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1057 atv.tv_sec = atv32.tv_sec;
1058 atv.tv_usec = atv32.tv_usec;
1059 }
1060 if (error)
1061 goto continuation;
1062 if (itimerfix(&atv)) {
1063 error = EINVAL;
1064 goto continuation;
1065 }
1066
1067 clock_absolutetime_interval_to_deadline(
1068 tvtoabstime(&atv), &seldata->abstime);
1069 }
1070 else
1071 seldata->abstime = 0;
1072
1073 if ( (error = selcount(p, sel->ibits, uap->nd, &count)) ) {
1074 goto continuation;
1075 }
1076
1077 seldata->count = count;
1078 size = SIZEOF_WAITQUEUE_SET + (count * SIZEOF_WAITQUEUE_LINK);
1079 if (uth->uu_allocsize) {
1080 if (uth->uu_wqset == 0)
1081 panic("select: wql memory smashed");
1082 /* needed for the select now */
1083 if (size > uth->uu_allocsize) {
1084 kfree(uth->uu_wqset, uth->uu_allocsize);
1085 uth->uu_allocsize = size;
1086 uth->uu_wqset = (wait_queue_set_t)kalloc(size);
1087 if (uth->uu_wqset == (wait_queue_set_t)NULL)
1088 panic("failed to allocate memory for waitqueue\n");
1089 }
1090 } else {
1091 uth->uu_allocsize = size;
1092 uth->uu_wqset = (wait_queue_set_t)kalloc(uth->uu_allocsize);
1093 if (uth->uu_wqset == (wait_queue_set_t)NULL)
1094 panic("failed to allocate memory for waitqueue\n");
1095 }
1096 bzero(uth->uu_wqset, size);
1097 seldata->wql = (char *)uth->uu_wqset + SIZEOF_WAITQUEUE_SET;
1098 wait_queue_set_init(uth->uu_wqset, (SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST));
1099
1100 continuation:
1101
1102 if (error) {
1103 /*
1104 * We have already cleaned up any state we established,
1105 * either locally or as a result of selcount(). We don't
1106 * need to wait_subqueue_unlink_all(), since we haven't set
1107 * anything at this point.
1108 */
1109 return (error);
1110 }
1111
1112 return selprocess(0, SEL_FIRSTPASS);
1113 }
1114
1115 int
1116 selcontinue(int error)
1117 {
1118 return selprocess(error, SEL_SECONDPASS);
1119 }
1120
1121
1122 /*
1123 * selprocess
1124 *
1125 * Parameters: error The error code from our caller
1126 * sel_pass The pass we are on
1127 */
1128 int
1129 selprocess(int error, int sel_pass)
1130 {
1131 int ncoll;
1132 u_int ni, nw;
1133 thread_t th_act;
1134 struct uthread *uth;
1135 struct proc *p;
1136 struct select_nocancel_args *uap;
1137 int *retval;
1138 struct _select *sel;
1139 struct _select_data *seldata;
1140 int unwind = 1;
1141 int prepost = 0;
1142 int somewakeup = 0;
1143 int doretry = 0;
1144 wait_result_t wait_result;
1145
1146 p = current_proc();
1147 th_act = current_thread();
1148 uth = get_bsdthread_info(th_act);
1149 sel = &uth->uu_select;
1150 seldata = &uth->uu_kevent.ss_select_data;
1151 uap = seldata->args;
1152 retval = seldata->retval;
1153
1154 if ((error != 0) && (sel_pass == SEL_FIRSTPASS))
1155 unwind = 0;
1156 if (seldata->count == 0)
1157 unwind = 0;
1158 retry:
1159 if (error != 0) {
1160 sel_pass = SEL_FIRSTPASS; /* Reset for seldrop */
1161 goto done;
1162 }
1163
1164 ncoll = nselcoll;
1165 OSBitOrAtomic(P_SELECT, &p->p_flag);
1166 /* skip scans if the select is just for timeouts */
1167 if (seldata->count) {
1168 /*
1169 * Clear out any dangling refs from prior calls; technically
1170 * there should not be any.
1171 */
1172 if (sel_pass == SEL_FIRSTPASS)
1173 wait_queue_sub_clearrefs(uth->uu_wqset);
1174
1175 error = selscan(p, sel, seldata, uap->nd, retval, sel_pass, (wait_queue_sub_t)uth->uu_wqset);
1176 if (error || *retval) {
1177 goto done;
1178 }
1179 if (prepost) {
1180 /* if the select of log, then we canwakeup and discover some one
1181 * else already read the data; go toselct again if time permits
1182 */
1183 prepost = 0;
1184 doretry = 1;
1185 }
1186 if (somewakeup) {
1187 somewakeup = 0;
1188 doretry = 1;
1189 }
1190 }
1191
1192 if (uap->tv) {
1193 uint64_t now;
1194
1195 clock_get_uptime(&now);
1196 if (now >= seldata->abstime)
1197 goto done;
1198 }
1199
1200 if (doretry) {
1201 /* cleanup obits and try again */
1202 doretry = 0;
1203 sel_pass = SEL_FIRSTPASS;
1204 goto retry;
1205 }
1206
1207 /*
1208 * To effect a poll, the timeout argument should be
1209 * non-nil, pointing to a zero-valued timeval structure.
1210 */
1211 if (uap->tv && seldata->abstime == 0) {
1212 goto done;
1213 }
1214
1215 /* No spurious wakeups due to colls,no need to check for them */
1216 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1217 sel_pass = SEL_FIRSTPASS;
1218 goto retry;
1219 }
1220
1221 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1222
1223 /* if the select is just for timeout skip check */
1224 if (seldata->count &&(sel_pass == SEL_SECONDPASS))
1225 panic("selprocess: 2nd pass assertwaiting");
1226
1227 /* Wait Queue Subordinate has waitqueue as first element */
1228 wait_result = wait_queue_assert_wait_with_leeway((wait_queue_t)uth->uu_wqset,
1229 NULL, THREAD_ABORTSAFE,
1230 TIMEOUT_URGENCY_USER_NORMAL, seldata->abstime, 0);
1231 if (wait_result != THREAD_AWAKENED) {
1232 /* there are no preposted events */
1233 error = tsleep1(NULL, PSOCK | PCATCH,
1234 "select", 0, selcontinue);
1235 } else {
1236 prepost = 1;
1237 error = 0;
1238 }
1239
1240 if (error == 0) {
1241 sel_pass = SEL_SECONDPASS;
1242 if (!prepost)
1243 somewakeup = 1;
1244 goto retry;
1245 }
1246 done:
1247 if (unwind) {
1248 wait_subqueue_unlink_all(uth->uu_wqset);
1249 seldrop(p, sel->ibits, uap->nd);
1250 }
1251 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1252 /* select is not restarted after signals... */
1253 if (error == ERESTART)
1254 error = EINTR;
1255 if (error == EWOULDBLOCK)
1256 error = 0;
1257 nw = howmany(uap->nd, NFDBITS);
1258 ni = nw * sizeof(fd_mask);
1259
1260 #define putbits(name, x) \
1261 do { \
1262 if (uap->name && (error2 = \
1263 copyout((caddr_t)&sel->obits[(x) * nw], uap->name, ni))) \
1264 error = error2; \
1265 } while (0)
1266
1267 if (error == 0) {
1268 int error2;
1269
1270 putbits(in, 0);
1271 putbits(ou, 1);
1272 putbits(ex, 2);
1273 #undef putbits
1274 }
1275 return(error);
1276 }
1277
1278
1279 /*
1280 * selscan
1281 *
1282 * Parameters: p Process performing the select
1283 * sel The per-thread select context structure
1284 * nfd The number of file descriptors to scan
1285 * retval The per thread system call return area
1286 * sel_pass Which pass this is; allowed values are
1287 * SEL_FIRSTPASS and SEL_SECONDPASS
1288 * wqsub The per thread wait queue set
1289 *
1290 * Returns: 0 Success
1291 * EIO Invalid p->p_fd field XXX Obsolete?
1292 * EBADF One of the files in the bit vector is
1293 * invalid.
1294 */
1295 static int
1296 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, int nfd, int32_t *retval,
1297 int sel_pass, wait_queue_sub_t wqsub)
1298 {
1299 struct filedesc *fdp = p->p_fd;
1300 int msk, i, j, fd;
1301 u_int32_t bits;
1302 struct fileproc *fp;
1303 int n = 0; /* count of bits */
1304 int nc = 0; /* bit vector offset (nc'th bit) */
1305 static int flag[3] = { FREAD, FWRITE, 0 };
1306 u_int32_t *iptr, *optr;
1307 u_int nw;
1308 u_int32_t *ibits, *obits;
1309 char * wql;
1310 char * wql_ptr;
1311 int count;
1312 struct vfs_context context = *vfs_context_current();
1313
1314 /*
1315 * Problems when reboot; due to MacOSX signal probs
1316 * in Beaker1C ; verify that the p->p_fd is valid
1317 */
1318 if (fdp == NULL) {
1319 *retval=0;
1320 return(EIO);
1321 }
1322 ibits = sel->ibits;
1323 obits = sel->obits;
1324 wql = seldata->wql;
1325
1326 nw = howmany(nfd, NFDBITS);
1327
1328 count = seldata->count;
1329
1330 nc = 0;
1331 if (count) {
1332 proc_fdlock(p);
1333 for (msk = 0; msk < 3; msk++) {
1334 iptr = (u_int32_t *)&ibits[msk * nw];
1335 optr = (u_int32_t *)&obits[msk * nw];
1336
1337 for (i = 0; i < nfd; i += NFDBITS) {
1338 bits = iptr[i/NFDBITS];
1339
1340 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1341 bits &= ~(1 << j);
1342
1343 if (fd < fdp->fd_nfiles)
1344 fp = fdp->fd_ofiles[fd];
1345 else
1346 fp = NULL;
1347
1348 if (fp == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1349 /*
1350 * If we abort because of a bad
1351 * fd, let the caller unwind...
1352 */
1353 proc_fdunlock(p);
1354 return(EBADF);
1355 }
1356 if (sel_pass == SEL_SECONDPASS) {
1357 wql_ptr = (char *)0;
1358 if ((fp->f_flags & FP_INSELECT) && (fp->f_waddr == (void *)wqsub)) {
1359 fp->f_flags &= ~FP_INSELECT;
1360 fp->f_waddr = (void *)0;
1361 }
1362 } else {
1363 wql_ptr = (wql + nc * SIZEOF_WAITQUEUE_LINK);
1364 if (fp->f_flags & FP_INSELECT) {
1365 /* someone is already in select on this fp */
1366 fp->f_flags |= FP_SELCONFLICT;
1367 wait_queue_link(&select_conflict_queue, (wait_queue_set_t)wqsub);
1368 } else {
1369 fp->f_flags |= FP_INSELECT;
1370 fp->f_waddr = (void *)wqsub;
1371 }
1372 }
1373
1374 context.vc_ucred = fp->f_cred;
1375
1376 /* The select; set the bit, if true */
1377 if (fp->f_ops && fp->f_type
1378 && fo_select(fp, flag[msk], wql_ptr, &context)) {
1379 optr[fd/NFDBITS] |= (1 << (fd % NFDBITS));
1380 n++;
1381 }
1382 nc++;
1383 }
1384 }
1385 }
1386 proc_fdunlock(p);
1387 }
1388 *retval = n;
1389 return (0);
1390 }
1391
1392 int poll_callback(struct kqueue *, struct kevent64_s *, void *);
1393
1394 struct poll_continue_args {
1395 user_addr_t pca_fds;
1396 u_int pca_nfds;
1397 u_int pca_rfds;
1398 };
1399
1400 int
1401 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1402 {
1403 __pthread_testcancel(1);
1404 return(poll_nocancel(p, (struct poll_nocancel_args *)uap, retval));
1405 }
1406
1407
1408 int
1409 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1410 {
1411 struct poll_continue_args *cont;
1412 struct pollfd *fds;
1413 struct kqueue *kq;
1414 struct timeval atv;
1415 int ncoll, error = 0;
1416 u_int nfds = uap->nfds;
1417 u_int rfds = 0;
1418 u_int i;
1419 size_t ni;
1420
1421 /*
1422 * This is kinda bogus. We have fd limits, but that is not
1423 * really related to the size of the pollfd array. Make sure
1424 * we let the process use at least FD_SETSIZE entries and at
1425 * least enough for the current limits. We want to be reasonably
1426 * safe, but not overly restrictive.
1427 */
1428 if (nfds > OPEN_MAX ||
1429 (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE)))
1430 return (EINVAL);
1431
1432 kq = kqueue_alloc(p);
1433 if (kq == NULL)
1434 return (EAGAIN);
1435
1436 ni = nfds * sizeof(struct pollfd) + sizeof(struct poll_continue_args);
1437 MALLOC(cont, struct poll_continue_args *, ni, M_TEMP, M_WAITOK);
1438 if (NULL == cont) {
1439 error = EAGAIN;
1440 goto out;
1441 }
1442
1443 fds = (struct pollfd *)&cont[1];
1444 error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1445 if (error)
1446 goto out;
1447
1448 if (uap->timeout != -1) {
1449 struct timeval rtv;
1450
1451 atv.tv_sec = uap->timeout / 1000;
1452 atv.tv_usec = (uap->timeout % 1000) * 1000;
1453 if (itimerfix(&atv)) {
1454 error = EINVAL;
1455 goto out;
1456 }
1457 getmicrouptime(&rtv);
1458 timevaladd(&atv, &rtv);
1459 } else {
1460 atv.tv_sec = 0;
1461 atv.tv_usec = 0;
1462 }
1463
1464 /* JMM - all this P_SELECT stuff is bogus */
1465 ncoll = nselcoll;
1466 OSBitOrAtomic(P_SELECT, &p->p_flag);
1467 for (i = 0; i < nfds; i++) {
1468 short events = fds[i].events;
1469 struct kevent64_s kev;
1470 int kerror = 0;
1471
1472 /* per spec, ignore fd values below zero */
1473 if (fds[i].fd < 0) {
1474 fds[i].revents = 0;
1475 continue;
1476 }
1477
1478 /* convert the poll event into a kqueue kevent */
1479 kev.ident = fds[i].fd;
1480 kev.flags = EV_ADD | EV_ONESHOT | EV_POLL;
1481 kev.udata = CAST_USER_ADDR_T(&fds[i]);
1482 kev.fflags = 0;
1483 kev.data = 0;
1484 kev.ext[0] = 0;
1485 kev.ext[1] = 0;
1486
1487 /* Handle input events */
1488 if (events & ( POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP )) {
1489 kev.filter = EVFILT_READ;
1490 if (events & ( POLLPRI | POLLRDBAND ))
1491 kev.flags |= EV_OOBAND;
1492 kerror = kevent_register(kq, &kev, p);
1493 }
1494
1495 /* Handle output events */
1496 if (kerror == 0 &&
1497 events & ( POLLOUT | POLLWRNORM | POLLWRBAND )) {
1498 kev.filter = EVFILT_WRITE;
1499 kerror = kevent_register(kq, &kev, p);
1500 }
1501
1502 /* Handle BSD extension vnode events */
1503 if (kerror == 0 &&
1504 events & ( POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE )) {
1505 kev.filter = EVFILT_VNODE;
1506 kev.fflags = 0;
1507 if (events & POLLEXTEND)
1508 kev.fflags |= NOTE_EXTEND;
1509 if (events & POLLATTRIB)
1510 kev.fflags |= NOTE_ATTRIB;
1511 if (events & POLLNLINK)
1512 kev.fflags |= NOTE_LINK;
1513 if (events & POLLWRITE)
1514 kev.fflags |= NOTE_WRITE;
1515 kerror = kevent_register(kq, &kev, p);
1516 }
1517
1518 if (kerror != 0) {
1519 fds[i].revents = POLLNVAL;
1520 rfds++;
1521 } else
1522 fds[i].revents = 0;
1523 }
1524
1525 /* Did we have any trouble registering? */
1526 if (rfds > 0)
1527 goto done;
1528
1529 /* scan for, and possibly wait for, the kevents to trigger */
1530 cont->pca_fds = uap->fds;
1531 cont->pca_nfds = nfds;
1532 cont->pca_rfds = rfds;
1533 error = kqueue_scan(kq, poll_callback, NULL, cont, &atv, p);
1534 rfds = cont->pca_rfds;
1535
1536 done:
1537 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1538 /* poll is not restarted after signals... */
1539 if (error == ERESTART)
1540 error = EINTR;
1541 if (error == EWOULDBLOCK)
1542 error = 0;
1543 if (error == 0) {
1544 error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1545 *retval = rfds;
1546 }
1547
1548 out:
1549 if (NULL != cont)
1550 FREE(cont, M_TEMP);
1551
1552 kqueue_dealloc(kq);
1553 return (error);
1554 }
1555
1556 int
1557 poll_callback(__unused struct kqueue *kq, struct kevent64_s *kevp, void *data)
1558 {
1559 struct poll_continue_args *cont = (struct poll_continue_args *)data;
1560 struct pollfd *fds = CAST_DOWN(struct pollfd *, kevp->udata);
1561 short prev_revents = fds->revents;
1562 short mask = 0;
1563
1564 /* convert the results back into revents */
1565 if (kevp->flags & EV_EOF)
1566 fds->revents |= POLLHUP;
1567 if (kevp->flags & EV_ERROR)
1568 fds->revents |= POLLERR;
1569
1570 switch (kevp->filter) {
1571 case EVFILT_READ:
1572 if (fds->revents & POLLHUP)
1573 mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND );
1574 else {
1575 if ((kevp->flags & EV_ERROR) == 0 && kevp->data != 0)
1576 mask = (POLLIN | POLLRDNORM );
1577 if (kevp->flags & EV_OOBAND)
1578 mask |= ( POLLPRI | POLLRDBAND );
1579 }
1580 fds->revents |= (fds->events & mask);
1581 break;
1582
1583 case EVFILT_WRITE:
1584 if (!(fds->revents & POLLHUP))
1585 fds->revents |= (fds->events & ( POLLOUT | POLLWRNORM | POLLWRBAND ));
1586 break;
1587
1588 case EVFILT_VNODE:
1589 if (kevp->fflags & NOTE_EXTEND)
1590 fds->revents |= (fds->events & POLLEXTEND);
1591 if (kevp->fflags & NOTE_ATTRIB)
1592 fds->revents |= (fds->events & POLLATTRIB);
1593 if (kevp->fflags & NOTE_LINK)
1594 fds->revents |= (fds->events & POLLNLINK);
1595 if (kevp->fflags & NOTE_WRITE)
1596 fds->revents |= (fds->events & POLLWRITE);
1597 break;
1598 }
1599
1600 if (fds->revents != 0 && prev_revents == 0)
1601 cont->pca_rfds++;
1602
1603 return 0;
1604 }
1605
1606 int
1607 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1608 {
1609
1610 return (1);
1611 }
1612
1613 /*
1614 * selcount
1615 *
1616 * Count the number of bits set in the input bit vector, and establish an
1617 * outstanding fp->f_iocount for each of the descriptors which will be in
1618 * use in the select operation.
1619 *
1620 * Parameters: p The process doing the select
1621 * ibits The input bit vector
1622 * nfd The number of fd's in the vector
1623 * countp Pointer to where to store the bit count
1624 *
1625 * Returns: 0 Success
1626 * EIO Bad per process open file table
1627 * EBADF One of the bits in the input bit vector
1628 * references an invalid fd
1629 *
1630 * Implicit: *countp (modified) Count of fd's
1631 *
1632 * Notes: This function is the first pass under the proc_fdlock() that
1633 * permits us to recognize invalid descriptors in the bit vector;
1634 * the may, however, not remain valid through the drop and
1635 * later reacquisition of the proc_fdlock().
1636 */
1637 static int
1638 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1639 {
1640 struct filedesc *fdp = p->p_fd;
1641 int msk, i, j, fd;
1642 u_int32_t bits;
1643 struct fileproc *fp;
1644 int n = 0;
1645 u_int32_t *iptr;
1646 u_int nw;
1647 int error=0;
1648 int dropcount;
1649 int need_wakeup = 0;
1650
1651 /*
1652 * Problems when reboot; due to MacOSX signal probs
1653 * in Beaker1C ; verify that the p->p_fd is valid
1654 */
1655 if (fdp == NULL) {
1656 *countp = 0;
1657 return(EIO);
1658 }
1659 nw = howmany(nfd, NFDBITS);
1660
1661 proc_fdlock(p);
1662 for (msk = 0; msk < 3; msk++) {
1663 iptr = (u_int32_t *)&ibits[msk * nw];
1664 for (i = 0; i < nfd; i += NFDBITS) {
1665 bits = iptr[i/NFDBITS];
1666 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1667 bits &= ~(1 << j);
1668
1669 if (fd < fdp->fd_nfiles)
1670 fp = fdp->fd_ofiles[fd];
1671 else
1672 fp = NULL;
1673
1674 if (fp == NULL ||
1675 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1676 *countp = 0;
1677 error = EBADF;
1678 goto bad;
1679 }
1680 fp->f_iocount++;
1681 n++;
1682 }
1683 }
1684 }
1685 proc_fdunlock(p);
1686
1687 *countp = n;
1688 return (0);
1689
1690 bad:
1691 dropcount = 0;
1692
1693 if (n== 0)
1694 goto out;
1695 /* Ignore error return; it's already EBADF */
1696 (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup, 1);
1697
1698 out:
1699 proc_fdunlock(p);
1700 if (need_wakeup) {
1701 wakeup(&p->p_fpdrainwait);
1702 }
1703 return(error);
1704 }
1705
1706
1707 /*
1708 * seldrop_locked
1709 *
1710 * Drop outstanding wait queue references set up during selscan(); drop the
1711 * outstanding per fileproc f_iocount() picked up during the selcount().
1712 *
1713 * Parameters: p Process performing the select
1714 * ibits Input pit bector of fd's
1715 * nfd Number of fd's
1716 * lim Limit to number of vector entries to
1717 * consider, or -1 for "all"
1718 * inselect True if
1719 * need_wakeup Pointer to flag to set to do a wakeup
1720 * if f_iocont on any descriptor goes to 0
1721 *
1722 * Returns: 0 Success
1723 * EBADF One or more fds in the bit vector
1724 * were invalid, but the rest
1725 * were successfully dropped
1726 *
1727 * Notes: An fd make become bad while the proc_fdlock() is not held,
1728 * if a multithreaded application closes the fd out from under
1729 * the in progress select. In this case, we still have to
1730 * clean up after the set up on the remaining fds.
1731 */
1732 static int
1733 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount)
1734 {
1735 struct filedesc *fdp = p->p_fd;
1736 int msk, i, j, fd;
1737 u_int32_t bits;
1738 struct fileproc *fp;
1739 u_int32_t *iptr;
1740 u_int nw;
1741 int error = 0;
1742 int dropcount = 0;
1743 uthread_t uth = get_bsdthread_info(current_thread());
1744
1745 *need_wakeup = 0;
1746
1747 /*
1748 * Problems when reboot; due to MacOSX signal probs
1749 * in Beaker1C ; verify that the p->p_fd is valid
1750 */
1751 if (fdp == NULL) {
1752 return(EIO);
1753 }
1754
1755 nw = howmany(nfd, NFDBITS);
1756
1757 for (msk = 0; msk < 3; msk++) {
1758 iptr = (u_int32_t *)&ibits[msk * nw];
1759 for (i = 0; i < nfd; i += NFDBITS) {
1760 bits = iptr[i/NFDBITS];
1761 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1762 bits &= ~(1 << j);
1763 fp = fdp->fd_ofiles[fd];
1764 /*
1765 * If we've already dropped as many as were
1766 * counted/scanned, then we are done.
1767 */
1768 if ((fromselcount != 0) && (++dropcount > lim))
1769 goto done;
1770
1771 if (fp == NULL) {
1772 /* skip (now) bad fds */
1773 error = EBADF;
1774 continue;
1775 }
1776 /*
1777 * Only clear the flag if we set it. We'll
1778 * only find that we set it if we had made
1779 * at least one [partial] pass through selscan().
1780 */
1781 if ((fp->f_flags & FP_INSELECT) && (fp->f_waddr == (void *)uth->uu_wqset)) {
1782 fp->f_flags &= ~FP_INSELECT;
1783 fp->f_waddr = (void *)0;
1784 }
1785
1786 fp->f_iocount--;
1787 if (fp->f_iocount < 0)
1788 panic("f_iocount overdecrement!");
1789
1790 if (fp->f_iocount == 0) {
1791 /*
1792 * The last iocount is responsible for clearing
1793 * selconfict flag - even if we didn't set it -
1794 * and is also responsible for waking up anyone
1795 * waiting on iocounts to drain.
1796 */
1797 if (fp->f_flags & FP_SELCONFLICT)
1798 fp->f_flags &= ~FP_SELCONFLICT;
1799 if (p->p_fpdrainwait) {
1800 p->p_fpdrainwait = 0;
1801 *need_wakeup = 1;
1802 }
1803 }
1804 }
1805 }
1806 }
1807 done:
1808 return (error);
1809 }
1810
1811
1812 static int
1813 seldrop(struct proc *p, u_int32_t *ibits, int nfd)
1814 {
1815 int error;
1816 int need_wakeup = 0;
1817
1818 proc_fdlock(p);
1819 error = seldrop_locked(p, ibits, nfd, nfd, &need_wakeup, 0);
1820 proc_fdunlock(p);
1821 if (need_wakeup) {
1822 wakeup(&p->p_fpdrainwait);
1823 }
1824 return (error);
1825 }
1826
1827 /*
1828 * Record a select request.
1829 */
1830 void
1831 selrecord(__unused struct proc *selector, struct selinfo *sip, void * p_wql)
1832 {
1833 thread_t cur_act = current_thread();
1834 struct uthread * ut = get_bsdthread_info(cur_act);
1835
1836 /* need to look at collisions */
1837
1838 /*do not record if this is second pass of select */
1839 if(p_wql == (void *)0) {
1840 return;
1841 }
1842
1843 if ((sip->si_flags & SI_INITED) == 0) {
1844 wait_queue_init(&sip->si_wait_queue, SYNC_POLICY_FIFO);
1845 sip->si_flags |= SI_INITED;
1846 sip->si_flags &= ~SI_CLEAR;
1847 }
1848
1849 if (sip->si_flags & SI_RECORDED) {
1850 sip->si_flags |= SI_COLL;
1851 } else
1852 sip->si_flags &= ~SI_COLL;
1853
1854 sip->si_flags |= SI_RECORDED;
1855 if (!wait_queue_member(&sip->si_wait_queue, ut->uu_wqset))
1856 wait_queue_link_noalloc(&sip->si_wait_queue, ut->uu_wqset,
1857 (wait_queue_link_t)p_wql);
1858
1859 return;
1860 }
1861
1862 void
1863 selwakeup(struct selinfo *sip)
1864 {
1865
1866 if ((sip->si_flags & SI_INITED) == 0) {
1867 return;
1868 }
1869
1870 if (sip->si_flags & SI_COLL) {
1871 nselcoll++;
1872 sip->si_flags &= ~SI_COLL;
1873 #if 0
1874 /* will not support */
1875 //wakeup((caddr_t)&selwait);
1876 #endif
1877 }
1878
1879 if (sip->si_flags & SI_RECORDED) {
1880 wait_queue_wakeup_all(&sip->si_wait_queue, NULL, THREAD_AWAKENED);
1881 sip->si_flags &= ~SI_RECORDED;
1882 }
1883
1884 }
1885
1886 void
1887 selthreadclear(struct selinfo *sip)
1888 {
1889
1890 if ((sip->si_flags & SI_INITED) == 0) {
1891 return;
1892 }
1893 if (sip->si_flags & SI_RECORDED) {
1894 selwakeup(sip);
1895 sip->si_flags &= ~(SI_RECORDED | SI_COLL);
1896 }
1897 sip->si_flags |= SI_CLEAR;
1898 wait_queue_unlink_all(&sip->si_wait_queue);
1899 }
1900
1901
1902
1903
1904 #define DBG_POST 0x10
1905 #define DBG_WATCH 0x11
1906 #define DBG_WAIT 0x12
1907 #define DBG_MOD 0x13
1908 #define DBG_EWAKEUP 0x14
1909 #define DBG_ENQUEUE 0x15
1910 #define DBG_DEQUEUE 0x16
1911
1912 #define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST)
1913 #define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH)
1914 #define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT)
1915 #define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD)
1916 #define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP)
1917 #define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE)
1918 #define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE)
1919
1920
1921 #define EVPROCDEQUE(p, evq) do { \
1922 proc_lock(p); \
1923 if (evq->ee_flags & EV_QUEUED) { \
1924 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \
1925 evq->ee_flags &= ~EV_QUEUED; \
1926 } \
1927 proc_unlock(p); \
1928 } while (0);
1929
1930
1931 /*
1932 * called upon socket close. deque and free all events for
1933 * the socket... socket must be locked by caller.
1934 */
1935 void
1936 evsofree(struct socket *sp)
1937 {
1938 struct eventqelt *evq, *next;
1939 proc_t p;
1940
1941 if (sp == NULL)
1942 return;
1943
1944 for (evq = sp->so_evlist.tqh_first; evq != NULL; evq = next) {
1945 next = evq->ee_slist.tqe_next;
1946 p = evq->ee_proc;
1947
1948 if (evq->ee_flags & EV_QUEUED) {
1949 EVPROCDEQUE(p, evq);
1950 }
1951 TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist); // remove from socket q
1952 FREE(evq, M_TEMP);
1953 }
1954 }
1955
1956
1957 /*
1958 * called upon pipe close. deque and free all events for
1959 * the pipe... pipe must be locked by caller
1960 */
1961 void
1962 evpipefree(struct pipe *cpipe)
1963 {
1964 struct eventqelt *evq, *next;
1965 proc_t p;
1966
1967 for (evq = cpipe->pipe_evlist.tqh_first; evq != NULL; evq = next) {
1968 next = evq->ee_slist.tqe_next;
1969 p = evq->ee_proc;
1970
1971 EVPROCDEQUE(p, evq);
1972
1973 TAILQ_REMOVE(&cpipe->pipe_evlist, evq, ee_slist); // remove from pipe q
1974 FREE(evq, M_TEMP);
1975 }
1976 }
1977
1978
1979 /*
1980 * enqueue this event if it's not already queued. wakeup
1981 * the proc if we do queue this event to it...
1982 * entered with proc lock held... we drop it before
1983 * doing the wakeup and return in that state
1984 */
1985 static void
1986 evprocenque(struct eventqelt *evq)
1987 {
1988 proc_t p;
1989
1990 assert(evq);
1991 p = evq->ee_proc;
1992
1993 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_START, (uint32_t)evq, evq->ee_flags, evq->ee_eventmask,0,0);
1994
1995 proc_lock(p);
1996
1997 if (evq->ee_flags & EV_QUEUED) {
1998 proc_unlock(p);
1999
2000 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
2001 return;
2002 }
2003 evq->ee_flags |= EV_QUEUED;
2004
2005 TAILQ_INSERT_TAIL(&p->p_evlist, evq, ee_plist);
2006
2007 proc_unlock(p);
2008
2009 wakeup(&p->p_evlist);
2010
2011 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
2012 }
2013
2014
2015 /*
2016 * pipe lock must be taken by the caller
2017 */
2018 void
2019 postpipeevent(struct pipe *pipep, int event)
2020 {
2021 int mask;
2022 struct eventqelt *evq;
2023
2024 if (pipep == NULL)
2025 return;
2026 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, event,0,0,1,0);
2027
2028 for (evq = pipep->pipe_evlist.tqh_first;
2029 evq != NULL; evq = evq->ee_slist.tqe_next) {
2030
2031 if (evq->ee_eventmask == 0)
2032 continue;
2033 mask = 0;
2034
2035 switch (event & (EV_RWBYTES | EV_RCLOSED | EV_WCLOSED)) {
2036
2037 case EV_RWBYTES:
2038 if ((evq->ee_eventmask & EV_RE) && pipep->pipe_buffer.cnt) {
2039 mask |= EV_RE;
2040 evq->ee_req.er_rcnt = pipep->pipe_buffer.cnt;
2041 }
2042 if ((evq->ee_eventmask & EV_WR) &&
2043 (MAX(pipep->pipe_buffer.size,PIPE_SIZE) - pipep->pipe_buffer.cnt) >= PIPE_BUF) {
2044
2045 if (pipep->pipe_state & PIPE_EOF) {
2046 mask |= EV_WR|EV_RESET;
2047 break;
2048 }
2049 mask |= EV_WR;
2050 evq->ee_req.er_wcnt = MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt;
2051 }
2052 break;
2053
2054 case EV_WCLOSED:
2055 case EV_RCLOSED:
2056 if ((evq->ee_eventmask & EV_RE)) {
2057 mask |= EV_RE|EV_RCLOSED;
2058 }
2059 if ((evq->ee_eventmask & EV_WR)) {
2060 mask |= EV_WR|EV_WCLOSED;
2061 }
2062 break;
2063
2064 default:
2065 return;
2066 }
2067 if (mask) {
2068 /*
2069 * disarm... postevents are nops until this event is 'read' via
2070 * waitevent and then re-armed via modwatch
2071 */
2072 evq->ee_eventmask = 0;
2073
2074 /*
2075 * since events are disarmed until after the waitevent
2076 * the ee_req.er_xxxx fields can't change once we've
2077 * inserted this event into the proc queue...
2078 * therefore, the waitevent will see a 'consistent'
2079 * snapshot of the event, even though it won't hold
2080 * the pipe lock, and we're updating the event outside
2081 * of the proc lock, which it will hold
2082 */
2083 evq->ee_req.er_eventbits |= mask;
2084
2085 KERNEL_DEBUG(DBG_MISC_POST, (uint32_t)evq, evq->ee_req.er_eventbits, mask, 1,0);
2086
2087 evprocenque(evq);
2088 }
2089 }
2090 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, 0,0,0,1,0);
2091 }
2092
2093 #if SOCKETS
2094 /*
2095 * given either a sockbuf or a socket run down the
2096 * event list and queue ready events found...
2097 * the socket must be locked by the caller
2098 */
2099 void
2100 postevent(struct socket *sp, struct sockbuf *sb, int event)
2101 {
2102 int mask;
2103 struct eventqelt *evq;
2104 struct tcpcb *tp;
2105
2106 if (sb)
2107 sp = sb->sb_so;
2108 if (sp == NULL)
2109 return;
2110
2111 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, (int)sp, event, 0, 0, 0);
2112
2113 for (evq = sp->so_evlist.tqh_first;
2114 evq != NULL; evq = evq->ee_slist.tqe_next) {
2115
2116 if (evq->ee_eventmask == 0)
2117 continue;
2118 mask = 0;
2119
2120 /* ready for reading:
2121 - byte cnt >= receive low water mark
2122 - read-half of conn closed
2123 - conn pending for listening sock
2124 - socket error pending
2125
2126 ready for writing
2127 - byte cnt avail >= send low water mark
2128 - write half of conn closed
2129 - socket error pending
2130 - non-blocking conn completed successfully
2131
2132 exception pending
2133 - out of band data
2134 - sock at out of band mark
2135 */
2136
2137 switch (event & EV_DMASK) {
2138
2139 case EV_OOB:
2140 if ((evq->ee_eventmask & EV_EX)) {
2141 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK)))
2142 mask |= EV_EX|EV_OOB;
2143 }
2144 break;
2145
2146 case EV_RWBYTES|EV_OOB:
2147 if ((evq->ee_eventmask & EV_EX)) {
2148 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK)))
2149 mask |= EV_EX|EV_OOB;
2150 }
2151 /*
2152 * fall into the next case
2153 */
2154 case EV_RWBYTES:
2155 if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) {
2156 /* for AFP/OT purposes; may go away in future */
2157 if ((SOCK_DOM(sp) == PF_INET ||
2158 SOCK_DOM(sp) == PF_INET6) &&
2159 SOCK_PROTO(sp) == IPPROTO_TCP &&
2160 (sp->so_error == ECONNREFUSED ||
2161 sp->so_error == ECONNRESET)) {
2162 if (sp->so_pcb == NULL ||
2163 sotoinpcb(sp)->inp_state ==
2164 INPCB_STATE_DEAD ||
2165 (tp = sototcpcb(sp)) == NULL ||
2166 tp->t_state == TCPS_CLOSED) {
2167 mask |= EV_RE|EV_RESET;
2168 break;
2169 }
2170 }
2171 mask |= EV_RE;
2172 evq->ee_req.er_rcnt = sp->so_rcv.sb_cc;
2173
2174 if (sp->so_state & SS_CANTRCVMORE) {
2175 mask |= EV_FIN;
2176 break;
2177 }
2178 }
2179 if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) {
2180 /* for AFP/OT purposes; may go away in future */
2181 if ((SOCK_DOM(sp) == PF_INET ||
2182 SOCK_DOM(sp) == PF_INET6) &&
2183 SOCK_PROTO(sp) == IPPROTO_TCP &&
2184 (sp->so_error == ECONNREFUSED ||
2185 sp->so_error == ECONNRESET)) {
2186 if (sp->so_pcb == NULL ||
2187 sotoinpcb(sp)->inp_state ==
2188 INPCB_STATE_DEAD ||
2189 (tp = sototcpcb(sp)) == NULL ||
2190 tp->t_state == TCPS_CLOSED) {
2191 mask |= EV_WR|EV_RESET;
2192 break;
2193 }
2194 }
2195 mask |= EV_WR;
2196 evq->ee_req.er_wcnt = sbspace(&sp->so_snd);
2197 }
2198 break;
2199
2200 case EV_RCONN:
2201 if ((evq->ee_eventmask & EV_RE)) {
2202 mask |= EV_RE|EV_RCONN;
2203 evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one
2204 }
2205 break;
2206
2207 case EV_WCONN:
2208 if ((evq->ee_eventmask & EV_WR)) {
2209 mask |= EV_WR|EV_WCONN;
2210 }
2211 break;
2212
2213 case EV_RCLOSED:
2214 if ((evq->ee_eventmask & EV_RE)) {
2215 mask |= EV_RE|EV_RCLOSED;
2216 }
2217 break;
2218
2219 case EV_WCLOSED:
2220 if ((evq->ee_eventmask & EV_WR)) {
2221 mask |= EV_WR|EV_WCLOSED;
2222 }
2223 break;
2224
2225 case EV_FIN:
2226 if (evq->ee_eventmask & EV_RE) {
2227 mask |= EV_RE|EV_FIN;
2228 }
2229 break;
2230
2231 case EV_RESET:
2232 case EV_TIMEOUT:
2233 if (evq->ee_eventmask & EV_RE) {
2234 mask |= EV_RE | event;
2235 }
2236 if (evq->ee_eventmask & EV_WR) {
2237 mask |= EV_WR | event;
2238 }
2239 break;
2240
2241 default:
2242 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, -1, 0, 0, 0);
2243 return;
2244 } /* switch */
2245
2246 KERNEL_DEBUG(DBG_MISC_POST, (int)evq, evq->ee_eventmask, evq->ee_req.er_eventbits, mask, 0);
2247
2248 if (mask) {
2249 /*
2250 * disarm... postevents are nops until this event is 'read' via
2251 * waitevent and then re-armed via modwatch
2252 */
2253 evq->ee_eventmask = 0;
2254
2255 /*
2256 * since events are disarmed until after the waitevent
2257 * the ee_req.er_xxxx fields can't change once we've
2258 * inserted this event into the proc queue...
2259 * since waitevent can't see this event until we
2260 * enqueue it, waitevent will see a 'consistent'
2261 * snapshot of the event, even though it won't hold
2262 * the socket lock, and we're updating the event outside
2263 * of the proc lock, which it will hold
2264 */
2265 evq->ee_req.er_eventbits |= mask;
2266
2267 evprocenque(evq);
2268 }
2269 }
2270 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, 0, 0, 0, 0);
2271 }
2272 #endif /* SOCKETS */
2273
2274
2275 /*
2276 * watchevent system call. user passes us an event to watch
2277 * for. we malloc an event object, initialize it, and queue
2278 * it to the open socket. when the event occurs, postevent()
2279 * will enque it back to our proc where we can retrieve it
2280 * via waitevent().
2281 *
2282 * should this prevent duplicate events on same socket?
2283 *
2284 * Returns:
2285 * ENOMEM No memory for operation
2286 * copyin:EFAULT
2287 */
2288 int
2289 watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval)
2290 {
2291 struct eventqelt *evq = (struct eventqelt *)0;
2292 struct eventqelt *np = NULL;
2293 struct eventreq64 *erp;
2294 struct fileproc *fp = NULL;
2295 int error;
2296
2297 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_START, 0,0,0,0,0);
2298
2299 // get a qelt and fill with users req
2300 MALLOC(evq, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK);
2301
2302 if (evq == NULL)
2303 return (ENOMEM);
2304 erp = &evq->ee_req;
2305
2306 // get users request pkt
2307
2308 if (IS_64BIT_PROCESS(p)) {
2309 error = copyin(uap->u_req, (caddr_t)erp, sizeof(struct eventreq64));
2310 } else {
2311 struct eventreq32 er32;
2312
2313 error = copyin(uap->u_req, (caddr_t)&er32, sizeof(struct eventreq32));
2314 if (error == 0) {
2315 /*
2316 * the user only passes in the
2317 * er_type, er_handle and er_data...
2318 * the other fields are initialized
2319 * below, so don't bother to copy
2320 */
2321 erp->er_type = er32.er_type;
2322 erp->er_handle = er32.er_handle;
2323 erp->er_data = (user_addr_t)er32.er_data;
2324 }
2325 }
2326 if (error) {
2327 FREE(evq, M_TEMP);
2328 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
2329
2330 return(error);
2331 }
2332 KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
2333
2334 // validate, freeing qelt if errors
2335 error = 0;
2336 proc_fdlock(p);
2337
2338 if (erp->er_type != EV_FD) {
2339 error = EINVAL;
2340 } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
2341 error = EBADF;
2342 #if SOCKETS
2343 } else if (fp->f_type == DTYPE_SOCKET) {
2344 socket_lock((struct socket *)fp->f_data, 1);
2345 np = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2346 #endif /* SOCKETS */
2347 } else if (fp->f_type == DTYPE_PIPE) {
2348 PIPE_LOCK((struct pipe *)fp->f_data);
2349 np = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2350 } else {
2351 fp_drop(p, erp->er_handle, fp, 1);
2352 error = EINVAL;
2353 }
2354 proc_fdunlock(p);
2355
2356 if (error) {
2357 FREE(evq, M_TEMP);
2358
2359 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
2360 return(error);
2361 }
2362
2363 /*
2364 * only allow one watch per file per proc
2365 */
2366 for ( ; np != NULL; np = np->ee_slist.tqe_next) {
2367 if (np->ee_proc == p) {
2368 #if SOCKETS
2369 if (fp->f_type == DTYPE_SOCKET)
2370 socket_unlock((struct socket *)fp->f_data, 1);
2371 else
2372 #endif /* SOCKETS */
2373 PIPE_UNLOCK((struct pipe *)fp->f_data);
2374 fp_drop(p, erp->er_handle, fp, 0);
2375 FREE(evq, M_TEMP);
2376
2377 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
2378 return(EINVAL);
2379 }
2380 }
2381 erp->er_ecnt = erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0;
2382 evq->ee_proc = p;
2383 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
2384 evq->ee_flags = 0;
2385
2386 #if SOCKETS
2387 if (fp->f_type == DTYPE_SOCKET) {
2388 TAILQ_INSERT_TAIL(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2389 postevent((struct socket *)fp->f_data, 0, EV_RWBYTES); // catch existing events
2390
2391 socket_unlock((struct socket *)fp->f_data, 1);
2392 } else
2393 #endif /* SOCKETS */
2394 {
2395 TAILQ_INSERT_TAIL(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2396 postpipeevent((struct pipe *)fp->f_data, EV_RWBYTES);
2397
2398 PIPE_UNLOCK((struct pipe *)fp->f_data);
2399 }
2400 fp_drop_event(p, erp->er_handle, fp);
2401
2402 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, 0,0,0,0,0);
2403 return(0);
2404 }
2405
2406
2407
2408 /*
2409 * waitevent system call.
2410 * grabs the next waiting event for this proc and returns
2411 * it. if no events, user can request to sleep with timeout
2412 * or without or poll mode
2413 * ((tv != NULL && interval == 0) || tv == -1)
2414 */
2415 int
2416 waitevent(proc_t p, struct waitevent_args *uap, int *retval)
2417 {
2418 int error = 0;
2419 struct eventqelt *evq;
2420 struct eventreq64 *erp;
2421 uint64_t abstime, interval;
2422 boolean_t fast_poll = FALSE;
2423 union {
2424 struct eventreq64 er64;
2425 struct eventreq32 er32;
2426 } uer;
2427
2428 interval = 0;
2429
2430 if (uap->tv) {
2431 struct timeval atv;
2432 /*
2433 * check for fast poll method
2434 */
2435 if (IS_64BIT_PROCESS(p)) {
2436 if (uap->tv == (user_addr_t)-1)
2437 fast_poll = TRUE;
2438 } else if (uap->tv == (user_addr_t)((uint32_t)-1))
2439 fast_poll = TRUE;
2440
2441 if (fast_poll == TRUE) {
2442 if (p->p_evlist.tqh_first == NULL) {
2443 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_NONE, -1,0,0,0,0);
2444 /*
2445 * poll failed
2446 */
2447 *retval = 1;
2448 return (0);
2449 }
2450 proc_lock(p);
2451 goto retry;
2452 }
2453 if (IS_64BIT_PROCESS(p)) {
2454 struct user64_timeval atv64;
2455 error = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
2456 /* Loses resolution - assume timeout < 68 years */
2457 atv.tv_sec = atv64.tv_sec;
2458 atv.tv_usec = atv64.tv_usec;
2459 } else {
2460 struct user32_timeval atv32;
2461 error = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
2462 atv.tv_sec = atv32.tv_sec;
2463 atv.tv_usec = atv32.tv_usec;
2464 }
2465
2466 if (error)
2467 return(error);
2468 if (itimerfix(&atv)) {
2469 error = EINVAL;
2470 return(error);
2471 }
2472 interval = tvtoabstime(&atv);
2473 }
2474 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_START, 0,0,0,0,0);
2475
2476 proc_lock(p);
2477 retry:
2478 if ((evq = p->p_evlist.tqh_first) != NULL) {
2479 /*
2480 * found one... make a local copy while it's still on the queue
2481 * to prevent it from changing while in the midst of copying
2482 * don't want to hold the proc lock across a copyout because
2483 * it might block on a page fault at the target in user space
2484 */
2485 erp = &evq->ee_req;
2486
2487 if (IS_64BIT_PROCESS(p))
2488 bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof (struct eventreq64));
2489 else {
2490 uer.er32.er_type = erp->er_type;
2491 uer.er32.er_handle = erp->er_handle;
2492 uer.er32.er_data = (uint32_t)erp->er_data;
2493 uer.er32.er_ecnt = erp->er_ecnt;
2494 uer.er32.er_rcnt = erp->er_rcnt;
2495 uer.er32.er_wcnt = erp->er_wcnt;
2496 uer.er32.er_eventbits = erp->er_eventbits;
2497 }
2498 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist);
2499
2500 evq->ee_flags &= ~EV_QUEUED;
2501
2502 proc_unlock(p);
2503
2504 if (IS_64BIT_PROCESS(p))
2505 error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64));
2506 else
2507 error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32));
2508
2509 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,
2510 evq->ee_req.er_handle,evq->ee_req.er_eventbits,(uint32_t)evq,0);
2511 return (error);
2512 }
2513 else {
2514 if (uap->tv && interval == 0) {
2515 proc_unlock(p);
2516 *retval = 1; // poll failed
2517
2518 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0);
2519 return (error);
2520 }
2521 if (interval != 0)
2522 clock_absolutetime_interval_to_deadline(interval, &abstime);
2523 else
2524 abstime = 0;
2525
2526 KERNEL_DEBUG(DBG_MISC_WAIT, 1,(uint32_t)&p->p_evlist,0,0,0);
2527
2528 error = msleep1(&p->p_evlist, &p->p_mlock, (PSOCK | PCATCH), "waitevent", abstime);
2529
2530 KERNEL_DEBUG(DBG_MISC_WAIT, 2,(uint32_t)&p->p_evlist,0,0,0);
2531
2532 if (error == 0)
2533 goto retry;
2534 if (error == ERESTART)
2535 error = EINTR;
2536 if (error == EWOULDBLOCK) {
2537 *retval = 1;
2538 error = 0;
2539 }
2540 }
2541 proc_unlock(p);
2542
2543 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0);
2544 return (error);
2545 }
2546
2547
2548 /*
2549 * modwatch system call. user passes in event to modify.
2550 * if we find it we reset the event bits and que/deque event
2551 * it needed.
2552 */
2553 int
2554 modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval)
2555 {
2556 struct eventreq64 er;
2557 struct eventreq64 *erp = &er;
2558 struct eventqelt *evq = NULL; /* protected by error return */
2559 int error;
2560 struct fileproc *fp;
2561 int flag;
2562
2563 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0);
2564
2565 /*
2566 * get user's request pkt
2567 * just need the er_type and er_handle which sit above the
2568 * problematic er_data (32/64 issue)... so only copy in
2569 * those 2 fields
2570 */
2571 if ((error = copyin(uap->u_req, (caddr_t)erp, sizeof(er.er_type) + sizeof(er.er_handle)))) {
2572 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
2573 return(error);
2574 }
2575 proc_fdlock(p);
2576
2577 if (erp->er_type != EV_FD) {
2578 error = EINVAL;
2579 } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
2580 error = EBADF;
2581 #if SOCKETS
2582 } else if (fp->f_type == DTYPE_SOCKET) {
2583 socket_lock((struct socket *)fp->f_data, 1);
2584 evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2585 #endif /* SOCKETS */
2586 } else if (fp->f_type == DTYPE_PIPE) {
2587 PIPE_LOCK((struct pipe *)fp->f_data);
2588 evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2589 } else {
2590 fp_drop(p, erp->er_handle, fp, 1);
2591 error = EINVAL;
2592 }
2593
2594 if (error) {
2595 proc_fdunlock(p);
2596 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
2597 return(error);
2598 }
2599
2600 if ((uap->u_eventmask == EV_RM) && (fp->f_flags & FP_WAITEVENT)) {
2601 fp->f_flags &= ~FP_WAITEVENT;
2602 }
2603 proc_fdunlock(p);
2604
2605 // locate event if possible
2606 for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
2607 if (evq->ee_proc == p)
2608 break;
2609 }
2610 if (evq == NULL) {
2611 #if SOCKETS
2612 if (fp->f_type == DTYPE_SOCKET)
2613 socket_unlock((struct socket *)fp->f_data, 1);
2614 else
2615 #endif /* SOCKETS */
2616 PIPE_UNLOCK((struct pipe *)fp->f_data);
2617 fp_drop(p, erp->er_handle, fp, 0);
2618 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0);
2619 return(EINVAL);
2620 }
2621 KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
2622
2623 if (uap->u_eventmask == EV_RM) {
2624 EVPROCDEQUE(p, evq);
2625
2626 #if SOCKETS
2627 if (fp->f_type == DTYPE_SOCKET) {
2628 TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2629 socket_unlock((struct socket *)fp->f_data, 1);
2630 } else
2631 #endif /* SOCKETS */
2632 {
2633 TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2634 PIPE_UNLOCK((struct pipe *)fp->f_data);
2635 }
2636 fp_drop(p, erp->er_handle, fp, 0);
2637 FREE(evq, M_TEMP);
2638 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0);
2639 return(0);
2640 }
2641 switch (uap->u_eventmask & EV_MASK) {
2642
2643 case 0:
2644 flag = 0;
2645 break;
2646
2647 case EV_RE:
2648 case EV_WR:
2649 case EV_RE|EV_WR:
2650 flag = EV_RWBYTES;
2651 break;
2652
2653 case EV_EX:
2654 flag = EV_OOB;
2655 break;
2656
2657 case EV_EX|EV_RE:
2658 case EV_EX|EV_WR:
2659 case EV_EX|EV_RE|EV_WR:
2660 flag = EV_OOB|EV_RWBYTES;
2661 break;
2662
2663 default:
2664 #if SOCKETS
2665 if (fp->f_type == DTYPE_SOCKET)
2666 socket_unlock((struct socket *)fp->f_data, 1);
2667 else
2668 #endif /* SOCKETS */
2669 PIPE_UNLOCK((struct pipe *)fp->f_data);
2670 fp_drop(p, erp->er_handle, fp, 0);
2671 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
2672 return(EINVAL);
2673 }
2674 /*
2675 * since we're holding the socket/pipe lock, the event
2676 * cannot go from the unqueued state to the queued state
2677 * however, it can go from the queued state to the unqueued state
2678 * since that direction is protected by the proc_lock...
2679 * so do a quick check for EV_QUEUED w/o holding the proc lock
2680 * since by far the common case will be NOT EV_QUEUED, this saves
2681 * us taking the proc_lock the majority of the time
2682 */
2683 if (evq->ee_flags & EV_QUEUED) {
2684 /*
2685 * EVPROCDEQUE will recheck the state after it grabs the proc_lock
2686 */
2687 EVPROCDEQUE(p, evq);
2688 }
2689 /*
2690 * while the event is off the proc queue and
2691 * we're holding the socket/pipe lock
2692 * it's safe to update these fields...
2693 */
2694 evq->ee_req.er_eventbits = 0;
2695 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
2696
2697 #if SOCKETS
2698 if (fp->f_type == DTYPE_SOCKET) {
2699 postevent((struct socket *)fp->f_data, 0, flag);
2700 socket_unlock((struct socket *)fp->f_data, 1);
2701 } else
2702 #endif /* SOCKETS */
2703 {
2704 postpipeevent((struct pipe *)fp->f_data, flag);
2705 PIPE_UNLOCK((struct pipe *)fp->f_data);
2706 }
2707 fp_drop(p, erp->er_handle, fp, 0);
2708 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,(uint32_t)fp->f_data,flag,0);
2709 return(0);
2710 }
2711
2712 /* this routine is called from the close of fd with proc_fdlock held */
2713 int
2714 waitevent_close(struct proc *p, struct fileproc *fp)
2715 {
2716 struct eventqelt *evq;
2717
2718
2719 fp->f_flags &= ~FP_WAITEVENT;
2720
2721 #if SOCKETS
2722 if (fp->f_type == DTYPE_SOCKET) {
2723 socket_lock((struct socket *)fp->f_data, 1);
2724 evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2725 } else
2726 #endif /* SOCKETS */
2727 if (fp->f_type == DTYPE_PIPE) {
2728 PIPE_LOCK((struct pipe *)fp->f_data);
2729 evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2730 }
2731 else {
2732 return(EINVAL);
2733 }
2734 proc_fdunlock(p);
2735
2736
2737 // locate event if possible
2738 for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
2739 if (evq->ee_proc == p)
2740 break;
2741 }
2742 if (evq == NULL) {
2743 #if SOCKETS
2744 if (fp->f_type == DTYPE_SOCKET)
2745 socket_unlock((struct socket *)fp->f_data, 1);
2746 else
2747 #endif /* SOCKETS */
2748 PIPE_UNLOCK((struct pipe *)fp->f_data);
2749
2750 proc_fdlock(p);
2751
2752 return(EINVAL);
2753 }
2754 EVPROCDEQUE(p, evq);
2755
2756 #if SOCKETS
2757 if (fp->f_type == DTYPE_SOCKET) {
2758 TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2759 socket_unlock((struct socket *)fp->f_data, 1);
2760 } else
2761 #endif /* SOCKETS */
2762 {
2763 TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2764 PIPE_UNLOCK((struct pipe *)fp->f_data);
2765 }
2766 FREE(evq, M_TEMP);
2767
2768 proc_fdlock(p);
2769
2770 return(0);
2771 }
2772
2773
2774 /*
2775 * gethostuuid
2776 *
2777 * Description: Get the host UUID from IOKit and return it to user space.
2778 *
2779 * Parameters: uuid_buf Pointer to buffer to receive UUID
2780 * timeout Timespec for timout
2781 * spi SPI, skip sandbox check (temporary)
2782 *
2783 * Returns: 0 Success
2784 * EWOULDBLOCK Timeout is too short
2785 * copyout:EFAULT Bad user buffer
2786 * mac_system_check_info:EPERM Client not allowed to perform this operation
2787 *
2788 * Notes: A timeout seems redundant, since if it's tolerable to not
2789 * have a system UUID in hand, then why ask for one?
2790 */
2791 int
2792 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2793 {
2794 kern_return_t kret;
2795 int error;
2796 mach_timespec_t mach_ts; /* for IOKit call */
2797 __darwin_uuid_t uuid_kern; /* for IOKit call */
2798
2799 if (!uap->spi) {
2800 }
2801
2802 /* Convert the 32/64 bit timespec into a mach_timespec_t */
2803 if ( proc_is64bit(p) ) {
2804 struct user64_timespec ts;
2805 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2806 if (error)
2807 return (error);
2808 mach_ts.tv_sec = ts.tv_sec;
2809 mach_ts.tv_nsec = ts.tv_nsec;
2810 } else {
2811 struct user32_timespec ts;
2812 error = copyin(uap->timeoutp, &ts, sizeof(ts) );
2813 if (error)
2814 return (error);
2815 mach_ts.tv_sec = ts.tv_sec;
2816 mach_ts.tv_nsec = ts.tv_nsec;
2817 }
2818
2819 /* Call IOKit with the stack buffer to get the UUID */
2820 kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
2821
2822 /*
2823 * If we get it, copy out the data to the user buffer; note that a
2824 * uuid_t is an array of characters, so this is size invariant for
2825 * 32 vs. 64 bit.
2826 */
2827 if (kret == KERN_SUCCESS) {
2828 error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2829 } else {
2830 error = EWOULDBLOCK;
2831 }
2832
2833 return (error);
2834 }
2835
2836 /*
2837 * ledger
2838 *
2839 * Description: Omnibus system call for ledger operations
2840 */
2841 int
2842 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2843 {
2844 #if !CONFIG_MACF
2845 #pragma unused(p)
2846 #endif
2847 int rval, pid, len, error;
2848 #ifdef LEDGER_DEBUG
2849 struct ledger_limit_args lla;
2850 #endif
2851 task_t task;
2852 proc_t proc;
2853
2854 /* Finish copying in the necessary args before taking the proc lock */
2855 error = 0;
2856 len = 0;
2857 if (args->cmd == LEDGER_ENTRY_INFO)
2858 error = copyin(args->arg3, (char *)&len, sizeof (len));
2859 else if (args->cmd == LEDGER_TEMPLATE_INFO)
2860 error = copyin(args->arg2, (char *)&len, sizeof (len));
2861 #ifdef LEDGER_DEBUG
2862 else if (args->cmd == LEDGER_LIMIT)
2863 error = copyin(args->arg2, (char *)&lla, sizeof (lla));
2864 #endif
2865 if (error)
2866 return (error);
2867 if (len < 0)
2868 return (EINVAL);
2869
2870 rval = 0;
2871 if (args->cmd != LEDGER_TEMPLATE_INFO) {
2872 pid = args->arg1;
2873 proc = proc_find(pid);
2874 if (proc == NULL)
2875 return (ESRCH);
2876
2877 #if CONFIG_MACF
2878 error = mac_proc_check_ledger(p, proc, args->cmd);
2879 if (error) {
2880 proc_rele(proc);
2881 return (error);
2882 }
2883 #endif
2884
2885 task = proc->task;
2886 }
2887
2888 switch (args->cmd) {
2889 #ifdef LEDGER_DEBUG
2890 case LEDGER_LIMIT: {
2891 if (!kauth_cred_issuser(kauth_cred_get()))
2892 rval = EPERM;
2893 rval = ledger_limit(task, &lla);
2894 proc_rele(proc);
2895 break;
2896 }
2897 #endif
2898 case LEDGER_INFO: {
2899 struct ledger_info info;
2900
2901 rval = ledger_info(task, &info);
2902 proc_rele(proc);
2903 if (rval == 0)
2904 rval = copyout(&info, args->arg2,
2905 sizeof (info));
2906 break;
2907 }
2908
2909 case LEDGER_ENTRY_INFO: {
2910 void *buf;
2911 int sz;
2912
2913 rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
2914 proc_rele(proc);
2915 if ((rval == 0) && (len > 0)) {
2916 sz = len * sizeof (struct ledger_entry_info);
2917 rval = copyout(buf, args->arg2, sz);
2918 kfree(buf, sz);
2919 }
2920 if (rval == 0)
2921 rval = copyout(&len, args->arg3, sizeof (len));
2922 break;
2923 }
2924
2925 case LEDGER_TEMPLATE_INFO: {
2926 void *buf;
2927 int sz;
2928
2929 rval = ledger_template_info(&buf, &len);
2930 if ((rval == 0) && (len > 0)) {
2931 sz = len * sizeof (struct ledger_template_info);
2932 rval = copyout(buf, args->arg1, sz);
2933 kfree(buf, sz);
2934 }
2935 if (rval == 0)
2936 rval = copyout(&len, args->arg2, sizeof (len));
2937 break;
2938 }
2939
2940 default:
2941 rval = EINVAL;
2942 }
2943
2944 return (rval);
2945 }
2946
2947 int
2948 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2949 {
2950 int error = 0;
2951
2952 switch (args->cmd) {
2953 #if CONFIG_TELEMETRY
2954 case TELEMETRY_CMD_TIMER_EVENT:
2955 error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
2956 break;
2957 #endif /* CONFIG_TELEMETRY */
2958 case TELEMETRY_CMD_VOUCHER_NAME:
2959 if (thread_set_voucher_name((mach_port_name_t)args->deadline))
2960 error = EINVAL;
2961 break;
2962
2963 default:
2964 error = EINVAL;
2965 break;
2966 }
2967
2968 return (error);
2969 }