]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sys_generic.c
d6c46f58db4658d7f9e9a759e44085c3e059948c
[apple/xnu.git] / bsd / kern / sys_generic.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99
100 #include <mach/mach_types.h>
101 #include <kern/kern_types.h>
102 #include <kern/assert.h>
103 #include <kern/kalloc.h>
104 #include <kern/thread.h>
105 #include <kern/clock.h>
106 #include <kern/ledger.h>
107 #include <kern/task.h>
108 #include <kern/telemetry.h>
109 #include <kern/waitq.h>
110 #include <kern/sched_prim.h>
111
112 #include <sys/mbuf.h>
113 #include <sys/domain.h>
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/errno.h>
117 #include <sys/syscall.h>
118 #include <sys/pipe.h>
119
120 #include <security/audit/audit.h>
121
122 #include <net/if.h>
123 #include <net/route.h>
124
125 #include <netinet/in.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/ip.h>
128 #include <netinet/in_pcb.h>
129 #include <netinet/ip_var.h>
130 #include <netinet/ip6.h>
131 #include <netinet/tcp.h>
132 #include <netinet/tcp_fsm.h>
133 #include <netinet/tcp_seq.h>
134 #include <netinet/tcp_timer.h>
135 #include <netinet/tcp_var.h>
136 #include <netinet/tcpip.h>
137 #include <netinet/tcp_debug.h>
138 /* for wait queue based select */
139 #include <kern/waitq.h>
140 #include <kern/kalloc.h>
141 #include <sys/vnode_internal.h>
142
143 /* XXX should be in a header file somewhere */
144 void evsofree(struct socket *);
145 void evpipefree(struct pipe *);
146 void postpipeevent(struct pipe *, int);
147 void postevent(struct socket *, struct sockbuf *, int);
148 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
149 extern void delay(int);
150
151 int rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval);
152 int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval);
153
154 __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp,
155 user_addr_t bufp, user_size_t nbyte,
156 off_t offset, int flags, user_ssize_t *retval);
157 __private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
158 user_addr_t bufp, user_size_t nbyte,
159 off_t offset, int flags, user_ssize_t *retval);
160 __private_extern__ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
161 __private_extern__ void donefileread(struct proc *p, struct fileproc *fp_ret, int fd);
162
163
164 /* Conflict wait queue for when selects collide (opaque type) */
165 struct waitq select_conflict_queue;
166
167 /*
168 * Init routine called from bsd_init.c
169 */
170 void select_waitq_init(void);
171 void
172 select_waitq_init(void)
173 {
174 waitq_init(&select_conflict_queue, SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
175 }
176
177 #define f_flag f_fglob->fg_flag
178 #define f_type f_fglob->fg_ops->fo_type
179 #define f_msgcount f_fglob->fg_msgcount
180 #define f_cred f_fglob->fg_cred
181 #define f_ops f_fglob->fg_ops
182 #define f_offset f_fglob->fg_offset
183 #define f_data f_fglob->fg_data
184
185 /*
186 * Read system call.
187 *
188 * Returns: 0 Success
189 * preparefileread:EBADF
190 * preparefileread:ESPIPE
191 * preparefileread:ENXIO
192 * preparefileread:EBADF
193 * dofileread:???
194 */
195 int
196 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
197 {
198 __pthread_testcancel(1);
199 return(read_nocancel(p, (struct read_nocancel_args *)uap, retval));
200 }
201
202 int
203 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
204 {
205 struct fileproc *fp;
206 int error;
207 int fd = uap->fd;
208 struct vfs_context context;
209
210 if ( (error = preparefileread(p, &fp, fd, 0)) )
211 return (error);
212
213 context = *(vfs_context_current());
214 context.vc_ucred = fp->f_fglob->fg_cred;
215
216 error = dofileread(&context, fp, uap->cbuf, uap->nbyte,
217 (off_t)-1, 0, retval);
218
219 donefileread(p, fp, fd);
220
221 return (error);
222 }
223
224 /*
225 * Pread system call
226 *
227 * Returns: 0 Success
228 * preparefileread:EBADF
229 * preparefileread:ESPIPE
230 * preparefileread:ENXIO
231 * preparefileread:EBADF
232 * dofileread:???
233 */
234 int
235 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
236 {
237 __pthread_testcancel(1);
238 return(pread_nocancel(p, (struct pread_nocancel_args *)uap, retval));
239 }
240
241 int
242 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
243 {
244 struct fileproc *fp = NULL; /* fp set by preparefileread() */
245 int fd = uap->fd;
246 int error;
247 struct vfs_context context;
248
249 if ( (error = preparefileread(p, &fp, fd, 1)) )
250 goto out;
251
252 context = *(vfs_context_current());
253 context.vc_ucred = fp->f_fglob->fg_cred;
254
255 error = dofileread(&context, fp, uap->buf, uap->nbyte,
256 uap->offset, FOF_OFFSET, retval);
257
258 donefileread(p, fp, fd);
259
260 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
261 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
262
263 out:
264 return (error);
265 }
266
267 /*
268 * Code common for read and pread
269 */
270
271 void
272 donefileread(struct proc *p, struct fileproc *fp, int fd)
273 {
274 proc_fdlock_spin(p);
275 fp_drop(p, fd, fp, 1);
276 proc_fdunlock(p);
277 }
278
279 /*
280 * Returns: 0 Success
281 * EBADF
282 * ESPIPE
283 * ENXIO
284 * fp_lookup:EBADF
285 * fo_read:???
286 */
287 int
288 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
289 {
290 vnode_t vp;
291 int error;
292 struct fileproc *fp;
293
294 AUDIT_ARG(fd, fd);
295
296 proc_fdlock_spin(p);
297
298 error = fp_lookup(p, fd, &fp, 1);
299
300 if (error) {
301 proc_fdunlock(p);
302 return (error);
303 }
304 if ((fp->f_flag & FREAD) == 0) {
305 error = EBADF;
306 goto out;
307 }
308 if (check_for_pread && (fp->f_type != DTYPE_VNODE)) {
309 error = ESPIPE;
310 goto out;
311 }
312 if (fp->f_type == DTYPE_VNODE) {
313 vp = (struct vnode *)fp->f_fglob->fg_data;
314
315 if (check_for_pread && (vnode_isfifo(vp))) {
316 error = ESPIPE;
317 goto out;
318 }
319 if (check_for_pread && (vp->v_flag & VISTTY)) {
320 error = ENXIO;
321 goto out;
322 }
323 }
324
325 *fp_ret = fp;
326
327 proc_fdunlock(p);
328 return (0);
329
330 out:
331 fp_drop(p, fd, fp, 1);
332 proc_fdunlock(p);
333 return (error);
334 }
335
336
337 /*
338 * Returns: 0 Success
339 * EINVAL
340 * fo_read:???
341 */
342 __private_extern__ int
343 dofileread(vfs_context_t ctx, struct fileproc *fp,
344 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
345 user_ssize_t *retval)
346 {
347 uio_t auio;
348 user_ssize_t bytecnt;
349 long error = 0;
350 char uio_buf[ UIO_SIZEOF(1) ];
351
352 if (nbyte > INT_MAX)
353 return (EINVAL);
354
355 if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
356 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_READ,
357 &uio_buf[0], sizeof(uio_buf));
358 } else {
359 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_READ,
360 &uio_buf[0], sizeof(uio_buf));
361 }
362 uio_addiov(auio, bufp, nbyte);
363
364 bytecnt = nbyte;
365
366 if ((error = fo_read(fp, auio, flags, ctx))) {
367 if (uio_resid(auio) != bytecnt && (error == ERESTART ||
368 error == EINTR || error == EWOULDBLOCK))
369 error = 0;
370 }
371 bytecnt -= uio_resid(auio);
372
373 *retval = bytecnt;
374
375 return (error);
376 }
377
378 /*
379 * Scatter read system call.
380 *
381 * Returns: 0 Success
382 * EINVAL
383 * ENOMEM
384 * copyin:EFAULT
385 * rd_uio:???
386 */
387 int
388 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
389 {
390 __pthread_testcancel(1);
391 return(readv_nocancel(p, (struct readv_nocancel_args *)uap, retval));
392 }
393
394 int
395 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
396 {
397 uio_t auio = NULL;
398 int error;
399 struct user_iovec *iovp;
400
401 /* Verify range bedfore calling uio_create() */
402 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
403 return (EINVAL);
404
405 /* allocate a uio large enough to hold the number of iovecs passed */
406 auio = uio_create(uap->iovcnt, 0,
407 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
408 UIO_READ);
409
410 /* get location of iovecs within the uio. then copyin the iovecs from
411 * user space.
412 */
413 iovp = uio_iovsaddr(auio);
414 if (iovp == NULL) {
415 error = ENOMEM;
416 goto ExitThisRoutine;
417 }
418 error = copyin_user_iovec_array(uap->iovp,
419 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
420 uap->iovcnt, iovp);
421 if (error) {
422 goto ExitThisRoutine;
423 }
424
425 /* finalize uio_t for use and do the IO
426 */
427 error = uio_calculateresid(auio);
428 if (error) {
429 goto ExitThisRoutine;
430 }
431 error = rd_uio(p, uap->fd, auio, retval);
432
433 ExitThisRoutine:
434 if (auio != NULL) {
435 uio_free(auio);
436 }
437 return (error);
438 }
439
440 /*
441 * Write system call
442 *
443 * Returns: 0 Success
444 * EBADF
445 * fp_lookup:EBADF
446 * dofilewrite:???
447 */
448 int
449 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
450 {
451 __pthread_testcancel(1);
452 return(write_nocancel(p, (struct write_nocancel_args *)uap, retval));
453
454 }
455
456 int
457 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
458 {
459 struct fileproc *fp;
460 int error;
461 int fd = uap->fd;
462 bool wrote_some = false;
463
464 AUDIT_ARG(fd, fd);
465
466 error = fp_lookup(p,fd,&fp,0);
467 if (error)
468 return(error);
469 if ((fp->f_flag & FWRITE) == 0) {
470 error = EBADF;
471 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
472 proc_fdlock(p);
473 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
474 proc_fdunlock(p);
475 } else {
476 struct vfs_context context = *(vfs_context_current());
477 context.vc_ucred = fp->f_fglob->fg_cred;
478
479 error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte,
480 (off_t)-1, 0, retval);
481
482 wrote_some = *retval > 0;
483 }
484 if (wrote_some)
485 fp_drop_written(p, fd, fp);
486 else
487 fp_drop(p, fd, fp, 0);
488 return(error);
489 }
490
491 /*
492 * pwrite system call
493 *
494 * Returns: 0 Success
495 * EBADF
496 * ESPIPE
497 * ENXIO
498 * EINVAL
499 * fp_lookup:EBADF
500 * dofilewrite:???
501 */
502 int
503 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
504 {
505 __pthread_testcancel(1);
506 return(pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval));
507 }
508
509 int
510 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
511 {
512 struct fileproc *fp;
513 int error;
514 int fd = uap->fd;
515 vnode_t vp = (vnode_t)0;
516 bool wrote_some = false;
517
518 AUDIT_ARG(fd, fd);
519
520 error = fp_lookup(p,fd,&fp,0);
521 if (error)
522 return(error);
523
524 if ((fp->f_flag & FWRITE) == 0) {
525 error = EBADF;
526 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
527 proc_fdlock(p);
528 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
529 proc_fdunlock(p);
530 } else {
531 struct vfs_context context = *vfs_context_current();
532 context.vc_ucred = fp->f_fglob->fg_cred;
533
534 if (fp->f_type != DTYPE_VNODE) {
535 error = ESPIPE;
536 goto errout;
537 }
538 vp = (vnode_t)fp->f_fglob->fg_data;
539 if (vnode_isfifo(vp)) {
540 error = ESPIPE;
541 goto errout;
542 }
543 if ((vp->v_flag & VISTTY)) {
544 error = ENXIO;
545 goto errout;
546 }
547 if (uap->offset == (off_t)-1) {
548 error = EINVAL;
549 goto errout;
550 }
551
552 error = dofilewrite(&context, fp, uap->buf, uap->nbyte,
553 uap->offset, FOF_OFFSET, retval);
554 wrote_some = *retval > 0;
555 }
556 errout:
557 if (wrote_some)
558 fp_drop_written(p, fd, fp);
559 else
560 fp_drop(p, fd, fp, 0);
561
562 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
563 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
564
565 return(error);
566 }
567
568 /*
569 * Returns: 0 Success
570 * EINVAL
571 * <fo_write>:EPIPE
572 * <fo_write>:??? [indirect through struct fileops]
573 */
574 __private_extern__ int
575 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
576 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
577 user_ssize_t *retval)
578 {
579 uio_t auio;
580 long error = 0;
581 user_ssize_t bytecnt;
582 char uio_buf[ UIO_SIZEOF(1) ];
583
584 if (nbyte > INT_MAX) {
585 *retval = 0;
586 return (EINVAL);
587 }
588
589 if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
590 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_WRITE,
591 &uio_buf[0], sizeof(uio_buf));
592 } else {
593 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_WRITE,
594 &uio_buf[0], sizeof(uio_buf));
595 }
596 uio_addiov(auio, bufp, nbyte);
597
598 bytecnt = nbyte;
599 if ((error = fo_write(fp, auio, flags, ctx))) {
600 if (uio_resid(auio) != bytecnt && (error == ERESTART ||
601 error == EINTR || error == EWOULDBLOCK))
602 error = 0;
603 /* The socket layer handles SIGPIPE */
604 if (error == EPIPE && fp->f_type != DTYPE_SOCKET &&
605 (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) {
606 /* XXX Raise the signal on the thread? */
607 psignal(vfs_context_proc(ctx), SIGPIPE);
608 }
609 }
610 bytecnt -= uio_resid(auio);
611 *retval = bytecnt;
612
613 return (error);
614 }
615
616 /*
617 * Gather write system call
618 */
619 int
620 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
621 {
622 __pthread_testcancel(1);
623 return(writev_nocancel(p, (struct writev_nocancel_args *)uap, retval));
624 }
625
626 int
627 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
628 {
629 uio_t auio = NULL;
630 int error;
631 struct fileproc *fp;
632 struct user_iovec *iovp;
633 bool wrote_some = false;
634
635 AUDIT_ARG(fd, uap->fd);
636
637 /* Verify range bedfore calling uio_create() */
638 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
639 return (EINVAL);
640
641 /* allocate a uio large enough to hold the number of iovecs passed */
642 auio = uio_create(uap->iovcnt, 0,
643 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
644 UIO_WRITE);
645
646 /* get location of iovecs within the uio. then copyin the iovecs from
647 * user space.
648 */
649 iovp = uio_iovsaddr(auio);
650 if (iovp == NULL) {
651 error = ENOMEM;
652 goto ExitThisRoutine;
653 }
654 error = copyin_user_iovec_array(uap->iovp,
655 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
656 uap->iovcnt, iovp);
657 if (error) {
658 goto ExitThisRoutine;
659 }
660
661 /* finalize uio_t for use and do the IO
662 */
663 error = uio_calculateresid(auio);
664 if (error) {
665 goto ExitThisRoutine;
666 }
667
668 error = fp_lookup(p, uap->fd, &fp, 0);
669 if (error)
670 goto ExitThisRoutine;
671
672 if ((fp->f_flag & FWRITE) == 0) {
673 error = EBADF;
674 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
675 proc_fdlock(p);
676 error = fp_guard_exception(p, uap->fd, fp, kGUARD_EXC_WRITE);
677 proc_fdunlock(p);
678 } else {
679 error = wr_uio(p, fp, auio, retval);
680 wrote_some = *retval > 0;
681 }
682
683 if (wrote_some)
684 fp_drop_written(p, uap->fd, fp);
685 else
686 fp_drop(p, uap->fd, fp, 0);
687
688 ExitThisRoutine:
689 if (auio != NULL) {
690 uio_free(auio);
691 }
692 return (error);
693 }
694
695
696 int
697 wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval)
698 {
699 int error;
700 user_ssize_t count;
701 struct vfs_context context = *vfs_context_current();
702
703 count = uio_resid(uio);
704
705 context.vc_ucred = fp->f_cred;
706 error = fo_write(fp, uio, 0, &context);
707 if (error) {
708 if (uio_resid(uio) != count && (error == ERESTART ||
709 error == EINTR || error == EWOULDBLOCK))
710 error = 0;
711 /* The socket layer handles SIGPIPE */
712 if (error == EPIPE && fp->f_type != DTYPE_SOCKET &&
713 (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0)
714 psignal(p, SIGPIPE);
715 }
716 *retval = count - uio_resid(uio);
717
718 return(error);
719 }
720
721
722 int
723 rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval)
724 {
725 struct fileproc *fp;
726 int error;
727 user_ssize_t count;
728 struct vfs_context context = *vfs_context_current();
729
730 if ( (error = preparefileread(p, &fp, fdes, 0)) )
731 return (error);
732
733 count = uio_resid(uio);
734
735 context.vc_ucred = fp->f_cred;
736
737 error = fo_read(fp, uio, 0, &context);
738
739 if (error) {
740 if (uio_resid(uio) != count && (error == ERESTART ||
741 error == EINTR || error == EWOULDBLOCK))
742 error = 0;
743 }
744 *retval = count - uio_resid(uio);
745
746 donefileread(p, fp, fdes);
747
748 return (error);
749 }
750
751 /*
752 * Ioctl system call
753 *
754 * Returns: 0 Success
755 * EBADF
756 * ENOTTY
757 * ENOMEM
758 * ESRCH
759 * copyin:EFAULT
760 * copyoutEFAULT
761 * fp_lookup:EBADF Bad file descriptor
762 * fo_ioctl:???
763 */
764 int
765 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
766 {
767 struct fileproc *fp = NULL;
768 int error = 0;
769 u_int size = 0;
770 caddr_t datap = NULL, memp = NULL;
771 boolean_t is64bit = FALSE;
772 int tmp = 0;
773 #define STK_PARAMS 128
774 char stkbuf[STK_PARAMS];
775 int fd = uap->fd;
776 u_long com = uap->com;
777 struct vfs_context context = *vfs_context_current();
778
779 AUDIT_ARG(fd, uap->fd);
780 AUDIT_ARG(addr, uap->data);
781
782 is64bit = proc_is64bit(p);
783 #if CONFIG_AUDIT
784 if (is64bit)
785 AUDIT_ARG(value64, com);
786 else
787 AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
788 #endif /* CONFIG_AUDIT */
789
790 /*
791 * Interpret high order word to find amount of data to be
792 * copied to/from the user's address space.
793 */
794 size = IOCPARM_LEN(com);
795 if (size > IOCPARM_MAX)
796 return ENOTTY;
797 if (size > sizeof (stkbuf)) {
798 if ((memp = (caddr_t)kalloc(size)) == 0)
799 return ENOMEM;
800 datap = memp;
801 } else
802 datap = &stkbuf[0];
803 if (com & IOC_IN) {
804 if (size) {
805 error = copyin(uap->data, datap, size);
806 if (error)
807 goto out_nofp;
808 } else {
809 /* XXX - IOC_IN and no size? we should proably return an error here!! */
810 if (is64bit) {
811 *(user_addr_t *)datap = uap->data;
812 }
813 else {
814 *(uint32_t *)datap = (uint32_t)uap->data;
815 }
816 }
817 } else if ((com & IOC_OUT) && size)
818 /*
819 * Zero the buffer so the user always
820 * gets back something deterministic.
821 */
822 bzero(datap, size);
823 else if (com & IOC_VOID) {
824 /* XXX - this is odd since IOC_VOID means no parameters */
825 if (is64bit) {
826 *(user_addr_t *)datap = uap->data;
827 }
828 else {
829 *(uint32_t *)datap = (uint32_t)uap->data;
830 }
831 }
832
833 proc_fdlock(p);
834 error = fp_lookup(p,fd,&fp,1);
835 if (error) {
836 proc_fdunlock(p);
837 goto out_nofp;
838 }
839
840 AUDIT_ARG(file, p, fp);
841
842 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
843 error = EBADF;
844 goto out;
845 }
846
847 context.vc_ucred = fp->f_fglob->fg_cred;
848
849 #if CONFIG_MACF
850 error = mac_file_check_ioctl(context.vc_ucred, fp->f_fglob, com);
851 if (error)
852 goto out;
853 #endif
854
855 switch (com) {
856 case FIONCLEX:
857 *fdflags(p, fd) &= ~UF_EXCLOSE;
858 break;
859
860 case FIOCLEX:
861 *fdflags(p, fd) |= UF_EXCLOSE;
862 break;
863
864 case FIONBIO:
865 if ( (tmp = *(int *)datap) )
866 fp->f_flag |= FNONBLOCK;
867 else
868 fp->f_flag &= ~FNONBLOCK;
869 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
870 break;
871
872 case FIOASYNC:
873 if ( (tmp = *(int *)datap) )
874 fp->f_flag |= FASYNC;
875 else
876 fp->f_flag &= ~FASYNC;
877 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
878 break;
879
880 case FIOSETOWN:
881 tmp = *(int *)datap;
882 if (fp->f_type == DTYPE_SOCKET) {
883 ((struct socket *)fp->f_data)->so_pgid = tmp;
884 break;
885 }
886 if (fp->f_type == DTYPE_PIPE) {
887 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
888 break;
889 }
890 if (tmp <= 0) {
891 tmp = -tmp;
892 } else {
893 struct proc *p1 = proc_find(tmp);
894 if (p1 == 0) {
895 error = ESRCH;
896 break;
897 }
898 tmp = p1->p_pgrpid;
899 proc_rele(p1);
900 }
901 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
902 break;
903
904 case FIOGETOWN:
905 if (fp->f_type == DTYPE_SOCKET) {
906 *(int *)datap = ((struct socket *)fp->f_data)->so_pgid;
907 break;
908 }
909 error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
910 *(int *)datap = -*(int *)datap;
911 break;
912
913 default:
914 error = fo_ioctl(fp, com, datap, &context);
915 /*
916 * Copy any data to user, size was
917 * already set and checked above.
918 */
919 if (error == 0 && (com & IOC_OUT) && size)
920 error = copyout(datap, uap->data, (u_int)size);
921 break;
922 }
923 out:
924 fp_drop(p, fd, fp, 1);
925 proc_fdunlock(p);
926
927 out_nofp:
928 if (memp)
929 kfree(memp, size);
930 return(error);
931 }
932
933 int selwait, nselcoll;
934 #define SEL_FIRSTPASS 1
935 #define SEL_SECONDPASS 2
936 extern int selcontinue(int error);
937 extern int selprocess(int error, int sel_pass);
938 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
939 int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset);
940 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
941 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount);
942 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd);
943
944 /*
945 * Select system call.
946 *
947 * Returns: 0 Success
948 * EINVAL Invalid argument
949 * EAGAIN Nonconformant error if allocation fails
950 * selprocess:???
951 */
952 int
953 select(struct proc *p, struct select_args *uap, int32_t *retval)
954 {
955 __pthread_testcancel(1);
956 return(select_nocancel(p, (struct select_nocancel_args *)uap, retval));
957 }
958
959 int
960 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
961 {
962 int error = 0;
963 u_int ni, nw;
964 thread_t th_act;
965 struct uthread *uth;
966 struct _select *sel;
967 struct _select_data *seldata;
968 int needzerofill = 1;
969 int count = 0;
970 size_t sz = 0;
971
972 th_act = current_thread();
973 uth = get_bsdthread_info(th_act);
974 sel = &uth->uu_select;
975 seldata = &uth->uu_kevent.ss_select_data;
976 *retval = 0;
977
978 seldata->args = uap;
979 seldata->retval = retval;
980 seldata->wqp = NULL;
981 seldata->count = 0;
982
983 if (uap->nd < 0) {
984 return (EINVAL);
985 }
986
987 /* select on thread of process that already called proc_exit() */
988 if (p->p_fd == NULL) {
989 return (EBADF);
990 }
991
992 if (uap->nd > p->p_fd->fd_nfiles)
993 uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
994
995 nw = howmany(uap->nd, NFDBITS);
996 ni = nw * sizeof(fd_mask);
997
998 /*
999 * if the previously allocated space for the bits is smaller than
1000 * what is requested or no space has yet been allocated for this
1001 * thread, allocate enough space now.
1002 *
1003 * Note: If this process fails, select() will return EAGAIN; this
1004 * is the same thing pool() returns in a no-memory situation, but
1005 * it is not a POSIX compliant error code for select().
1006 */
1007 if (sel->nbytes < (3 * ni)) {
1008 int nbytes = 3 * ni;
1009
1010 /* Free previous allocation, if any */
1011 if (sel->ibits != NULL)
1012 FREE(sel->ibits, M_TEMP);
1013 if (sel->obits != NULL) {
1014 FREE(sel->obits, M_TEMP);
1015 /* NULL out; subsequent ibits allocation may fail */
1016 sel->obits = NULL;
1017 }
1018
1019 MALLOC(sel->ibits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO);
1020 if (sel->ibits == NULL)
1021 return (EAGAIN);
1022 MALLOC(sel->obits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO);
1023 if (sel->obits == NULL) {
1024 FREE(sel->ibits, M_TEMP);
1025 sel->ibits = NULL;
1026 return (EAGAIN);
1027 }
1028 sel->nbytes = nbytes;
1029 needzerofill = 0;
1030 }
1031
1032 if (needzerofill) {
1033 bzero((caddr_t)sel->ibits, sel->nbytes);
1034 bzero((caddr_t)sel->obits, sel->nbytes);
1035 }
1036
1037 /*
1038 * get the bits from the user address space
1039 */
1040 #define getbits(name, x) \
1041 do { \
1042 if (uap->name && (error = copyin(uap->name, \
1043 (caddr_t)&sel->ibits[(x) * nw], ni))) \
1044 goto continuation; \
1045 } while (0)
1046
1047 getbits(in, 0);
1048 getbits(ou, 1);
1049 getbits(ex, 2);
1050 #undef getbits
1051
1052 if (uap->tv) {
1053 struct timeval atv;
1054 if (IS_64BIT_PROCESS(p)) {
1055 struct user64_timeval atv64;
1056 error = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1057 /* Loses resolution - assume timeout < 68 years */
1058 atv.tv_sec = atv64.tv_sec;
1059 atv.tv_usec = atv64.tv_usec;
1060 } else {
1061 struct user32_timeval atv32;
1062 error = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1063 atv.tv_sec = atv32.tv_sec;
1064 atv.tv_usec = atv32.tv_usec;
1065 }
1066 if (error)
1067 goto continuation;
1068 if (itimerfix(&atv)) {
1069 error = EINVAL;
1070 goto continuation;
1071 }
1072
1073 clock_absolutetime_interval_to_deadline(
1074 tvtoabstime(&atv), &seldata->abstime);
1075 }
1076 else
1077 seldata->abstime = 0;
1078
1079 if ( (error = selcount(p, sel->ibits, uap->nd, &count)) ) {
1080 goto continuation;
1081 }
1082
1083 /*
1084 * We need an array of waitq pointers. This is due to the new way
1085 * in which waitqs are linked to sets. When a thread selects on a
1086 * file descriptor, a waitq (embedded in a selinfo structure) is
1087 * added to the thread's local waitq set. There is no longer any
1088 * way to directly iterate over all members of a given waitq set.
1089 * The process of linking a waitq into a set may allocate a link
1090 * table object. Because we can't iterate over all the waitqs to
1091 * which our thread waitq set belongs, we need a way of removing
1092 * this link object!
1093 *
1094 * Thus we need a buffer which will hold one waitq pointer
1095 * per FD being selected. During the tear-down phase we can use
1096 * these pointers to dis-associate the underlying selinfo's waitq
1097 * from our thread's waitq set.
1098 *
1099 * Because we also need to allocate a waitq set for this thread,
1100 * we use a bare buffer pointer to hold all the memory. Note that
1101 * this memory is cached in the thread pointer and not reaped until
1102 * the thread exists. This is generally OK because threads that
1103 * call select tend to keep calling select repeatedly.
1104 */
1105 sz = ALIGN(sizeof(struct waitq_set)) + (count * sizeof(uint64_t));
1106 if (sz > uth->uu_wqstate_sz) {
1107 /* (re)allocate a buffer to hold waitq pointers */
1108 if (uth->uu_wqset) {
1109 if (waitq_set_is_valid(uth->uu_wqset))
1110 waitq_set_deinit(uth->uu_wqset);
1111 FREE(uth->uu_wqset, M_SELECT);
1112 } else if (uth->uu_wqstate_sz && !uth->uu_wqset)
1113 panic("select: thread structure corrupt! "
1114 "uu_wqstate_sz:%ld, wqstate_buf == NULL",
1115 uth->uu_wqstate_sz);
1116 uth->uu_wqstate_sz = sz;
1117 MALLOC(uth->uu_wqset, struct waitq_set *, sz, M_SELECT, M_WAITOK);
1118 if (!uth->uu_wqset)
1119 panic("can't allocate %ld bytes for wqstate buffer",
1120 uth->uu_wqstate_sz);
1121 waitq_set_init(uth->uu_wqset,
1122 SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST|SYNC_POLICY_DISABLE_IRQ, NULL);
1123 }
1124
1125 if (!waitq_set_is_valid(uth->uu_wqset))
1126 waitq_set_init(uth->uu_wqset,
1127 SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST|SYNC_POLICY_DISABLE_IRQ, NULL);
1128
1129 /* the last chunk of our buffer is an array of waitq pointers */
1130 seldata->wqp = (uint64_t *)((char *)(uth->uu_wqset) + ALIGN(sizeof(struct waitq_set)));
1131 bzero(seldata->wqp, sz - ALIGN(sizeof(struct waitq_set)));
1132
1133 seldata->count = count;
1134
1135 continuation:
1136
1137 if (error) {
1138 /*
1139 * We have already cleaned up any state we established,
1140 * either locally or as a result of selcount(). We don't
1141 * need to wait_subqueue_unlink_all(), since we haven't set
1142 * anything at this point.
1143 */
1144 return (error);
1145 }
1146
1147 return selprocess(0, SEL_FIRSTPASS);
1148 }
1149
1150 int
1151 selcontinue(int error)
1152 {
1153 return selprocess(error, SEL_SECONDPASS);
1154 }
1155
1156
1157 /*
1158 * selprocess
1159 *
1160 * Parameters: error The error code from our caller
1161 * sel_pass The pass we are on
1162 */
1163 int
1164 selprocess(int error, int sel_pass)
1165 {
1166 int ncoll;
1167 u_int ni, nw;
1168 thread_t th_act;
1169 struct uthread *uth;
1170 struct proc *p;
1171 struct select_nocancel_args *uap;
1172 int *retval;
1173 struct _select *sel;
1174 struct _select_data *seldata;
1175 int unwind = 1;
1176 int prepost = 0;
1177 int somewakeup = 0;
1178 int doretry = 0;
1179 wait_result_t wait_result;
1180
1181 p = current_proc();
1182 th_act = current_thread();
1183 uth = get_bsdthread_info(th_act);
1184 sel = &uth->uu_select;
1185 seldata = &uth->uu_kevent.ss_select_data;
1186 uap = seldata->args;
1187 retval = seldata->retval;
1188
1189 if ((error != 0) && (sel_pass == SEL_FIRSTPASS))
1190 unwind = 0;
1191 if (seldata->count == 0)
1192 unwind = 0;
1193 retry:
1194 if (error != 0)
1195 goto done;
1196
1197 ncoll = nselcoll;
1198 OSBitOrAtomic(P_SELECT, &p->p_flag);
1199
1200 /* skip scans if the select is just for timeouts */
1201 if (seldata->count) {
1202 error = selscan(p, sel, seldata, uap->nd, retval, sel_pass, uth->uu_wqset);
1203 if (error || *retval) {
1204 goto done;
1205 }
1206 if (prepost || somewakeup) {
1207 /*
1208 * if the select of log, then we can wakeup and
1209 * discover some one else already read the data;
1210 * go to select again if time permits
1211 */
1212 prepost = 0;
1213 somewakeup = 0;
1214 doretry = 1;
1215 }
1216 }
1217
1218 if (uap->tv) {
1219 uint64_t now;
1220
1221 clock_get_uptime(&now);
1222 if (now >= seldata->abstime)
1223 goto done;
1224 }
1225
1226 if (doretry) {
1227 /* cleanup obits and try again */
1228 doretry = 0;
1229 sel_pass = SEL_FIRSTPASS;
1230 goto retry;
1231 }
1232
1233 /*
1234 * To effect a poll, the timeout argument should be
1235 * non-nil, pointing to a zero-valued timeval structure.
1236 */
1237 if (uap->tv && seldata->abstime == 0) {
1238 goto done;
1239 }
1240
1241 /* No spurious wakeups due to colls,no need to check for them */
1242 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1243 sel_pass = SEL_FIRSTPASS;
1244 goto retry;
1245 }
1246
1247 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1248
1249 /* if the select is just for timeout skip check */
1250 if (seldata->count && (sel_pass == SEL_SECONDPASS))
1251 panic("selprocess: 2nd pass assertwaiting");
1252
1253 /* waitq_set has waitqueue as first element */
1254 wait_result = waitq_assert_wait64_leeway((struct waitq *)uth->uu_wqset,
1255 NO_EVENT64, THREAD_ABORTSAFE,
1256 TIMEOUT_URGENCY_USER_NORMAL,
1257 seldata->abstime,
1258 TIMEOUT_NO_LEEWAY);
1259 if (wait_result != THREAD_AWAKENED) {
1260 /* there are no preposted events */
1261 error = tsleep1(NULL, PSOCK | PCATCH,
1262 "select", 0, selcontinue);
1263 } else {
1264 prepost = 1;
1265 error = 0;
1266 }
1267
1268 if (error == 0) {
1269 sel_pass = SEL_SECONDPASS;
1270 if (!prepost)
1271 somewakeup = 1;
1272 goto retry;
1273 }
1274 done:
1275 if (unwind) {
1276 seldrop(p, sel->ibits, uap->nd);
1277 waitq_set_deinit(uth->uu_wqset);
1278 /*
1279 * zero out the waitq pointer array to avoid use-after free
1280 * errors in the selcount error path (seldrop_locked) if/when
1281 * the thread re-calls select().
1282 */
1283 bzero((void *)uth->uu_wqset, uth->uu_wqstate_sz);
1284 }
1285 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1286 /* select is not restarted after signals... */
1287 if (error == ERESTART)
1288 error = EINTR;
1289 if (error == EWOULDBLOCK)
1290 error = 0;
1291 nw = howmany(uap->nd, NFDBITS);
1292 ni = nw * sizeof(fd_mask);
1293
1294 #define putbits(name, x) \
1295 do { \
1296 if (uap->name && (error2 = \
1297 copyout((caddr_t)&sel->obits[(x) * nw], uap->name, ni))) \
1298 error = error2; \
1299 } while (0)
1300
1301 if (error == 0) {
1302 int error2;
1303
1304 putbits(in, 0);
1305 putbits(ou, 1);
1306 putbits(ex, 2);
1307 #undef putbits
1308 }
1309 return(error);
1310 }
1311
1312
1313 /**
1314 * remove the fileproc's underlying waitq from the supplied waitq set;
1315 * clear FP_INSELECT when appropriate
1316 *
1317 * Parameters:
1318 * fp File proc that is potentially currently in select
1319 * wqset Waitq set to which the fileproc may belong
1320 * (usually this is the thread's private waitq set)
1321 * Conditions:
1322 * proc_fdlock is held
1323 */
1324 static void selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset)
1325 {
1326 int valid_set = waitq_set_is_valid(wqset);
1327 int valid_q = !!wqp_id;
1328
1329 /*
1330 * This could be called (from selcount error path) before we setup
1331 * the thread's wqset. Check the wqset passed in, and only unlink if
1332 * the set is valid.
1333 */
1334
1335 /* unlink the underlying waitq from the input set (thread waitq set) */
1336 if (valid_q && valid_set)
1337 waitq_unlink_by_prepost_id(wqp_id, wqset);
1338
1339 /* allow passing a NULL/invalid fp for seldrop unwind */
1340 if (!fp || !(fp->f_flags & (FP_INSELECT|FP_SELCONFLICT)))
1341 return;
1342
1343 /*
1344 * We can always remove the conflict queue from our thread's set: this
1345 * will not affect other threads that potentially need to be awoken on
1346 * the conflict queue during a fileproc_drain - those sets will still
1347 * be linked with the global conflict queue, and the last waiter
1348 * on the fp clears the CONFLICT marker.
1349 */
1350 if (valid_set && (fp->f_flags & FP_SELCONFLICT))
1351 waitq_unlink(&select_conflict_queue, wqset);
1352
1353 /* jca: TODO:
1354 * This isn't quite right - we don't actually know if this
1355 * fileproc is in another select or not! Here we just assume
1356 * that if we were the first thread to select on the FD, then
1357 * we'll be the one to clear this flag...
1358 */
1359 if (valid_set && fp->f_wset == (void *)wqset) {
1360 fp->f_flags &= ~FP_INSELECT;
1361 fp->f_wset = NULL;
1362 }
1363 }
1364
1365 /**
1366 * connect a fileproc to the given wqset, potentially bridging to a waitq
1367 * pointed to indirectly by wq_data
1368 *
1369 * Parameters:
1370 * fp File proc potentially currently in select
1371 * wq_data Pointer to a pointer to a waitq (could be NULL)
1372 * wqset Waitq set to which the fileproc should now belong
1373 * (usually this is the thread's private waitq set)
1374 *
1375 * Conditions:
1376 * proc_fdlock is held
1377 */
1378 static uint64_t sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset)
1379 {
1380 struct waitq *f_wq = NULL;
1381
1382 if ((fp->f_flags & FP_INSELECT) != FP_INSELECT) {
1383 if (wq_data)
1384 panic("non-null data:%p on fp:%p not in select?!"
1385 "(wqset:%p)", wq_data, fp, wqset);
1386 return 0;
1387 }
1388
1389 if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1390 /*
1391 * The conflict queue requires disabling interrupts, so we
1392 * need to explicitly reserve a link object to avoid a
1393 * panic/assert in the waitq code. Hopefully this extra step
1394 * can be avoided if we can split the waitq structure into
1395 * blocking and linkage sub-structures.
1396 */
1397 uint64_t reserved_link = waitq_link_reserve(&select_conflict_queue);
1398 waitq_link(&select_conflict_queue, wqset, WAITQ_SHOULD_LOCK, &reserved_link);
1399 waitq_link_release(reserved_link);
1400 }
1401
1402 /*
1403 * The wq_data parameter has potentially been set by selrecord called
1404 * from a subsystems fo_select() function. If the subsystem does not
1405 * call selrecord, then wq_data will be NULL
1406 *
1407 * Use memcpy to get the value into a proper pointer because
1408 * wq_data most likely points to a stack variable that could be
1409 * unaligned on 32-bit systems.
1410 */
1411 if (wq_data) {
1412 memcpy(&f_wq, wq_data, sizeof(f_wq));
1413 if (!waitq_is_valid(f_wq))
1414 f_wq = NULL;
1415 }
1416
1417 /* record the first thread's wqset in the fileproc structure */
1418 if (!fp->f_wset)
1419 fp->f_wset = (void *)wqset;
1420
1421 /* handles NULL f_wq */
1422 return waitq_get_prepost_id(f_wq);
1423 }
1424
1425
1426 /*
1427 * selscan
1428 *
1429 * Parameters: p Process performing the select
1430 * sel The per-thread select context structure
1431 * nfd The number of file descriptors to scan
1432 * retval The per thread system call return area
1433 * sel_pass Which pass this is; allowed values are
1434 * SEL_FIRSTPASS and SEL_SECONDPASS
1435 * wqset The per thread wait queue set
1436 *
1437 * Returns: 0 Success
1438 * EIO Invalid p->p_fd field XXX Obsolete?
1439 * EBADF One of the files in the bit vector is
1440 * invalid.
1441 */
1442 static int
1443 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1444 int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset)
1445 {
1446 struct filedesc *fdp = p->p_fd;
1447 int msk, i, j, fd;
1448 u_int32_t bits;
1449 struct fileproc *fp;
1450 int n = 0; /* count of bits */
1451 int nc = 0; /* bit vector offset (nc'th bit) */
1452 static int flag[3] = { FREAD, FWRITE, 0 };
1453 u_int32_t *iptr, *optr;
1454 u_int nw;
1455 u_int32_t *ibits, *obits;
1456 uint64_t reserved_link, *rl_ptr = NULL;
1457 int count;
1458 struct vfs_context context = *vfs_context_current();
1459
1460 /*
1461 * Problems when reboot; due to MacOSX signal probs
1462 * in Beaker1C ; verify that the p->p_fd is valid
1463 */
1464 if (fdp == NULL) {
1465 *retval=0;
1466 return(EIO);
1467 }
1468 ibits = sel->ibits;
1469 obits = sel->obits;
1470
1471 nw = howmany(nfd, NFDBITS);
1472
1473 count = seldata->count;
1474
1475 nc = 0;
1476 if (!count) {
1477 *retval = 0;
1478 return 0;
1479 }
1480
1481 proc_fdlock(p);
1482 for (msk = 0; msk < 3; msk++) {
1483 iptr = (u_int32_t *)&ibits[msk * nw];
1484 optr = (u_int32_t *)&obits[msk * nw];
1485
1486 for (i = 0; i < nfd; i += NFDBITS) {
1487 bits = iptr[i/NFDBITS];
1488
1489 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1490 bits &= ~(1 << j);
1491
1492 if (fd < fdp->fd_nfiles)
1493 fp = fdp->fd_ofiles[fd];
1494 else
1495 fp = NULL;
1496
1497 if (fp == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1498 /*
1499 * If we abort because of a bad
1500 * fd, let the caller unwind...
1501 */
1502 proc_fdunlock(p);
1503 return(EBADF);
1504 }
1505 if (sel_pass == SEL_SECONDPASS) {
1506 reserved_link = 0;
1507 rl_ptr = NULL;
1508 selunlinkfp(fp, seldata->wqp[nc], wqset);
1509 } else {
1510 reserved_link = waitq_link_reserve((struct waitq *)wqset);
1511 rl_ptr = &reserved_link;
1512 if (fp->f_flags & FP_INSELECT)
1513 /* someone is already in select on this fp */
1514 fp->f_flags |= FP_SELCONFLICT;
1515 else
1516 fp->f_flags |= FP_INSELECT;
1517 }
1518
1519 context.vc_ucred = fp->f_cred;
1520
1521 /*
1522 * stash this value b/c fo_select may replace
1523 * reserved_link with a pointer to a waitq object
1524 */
1525 uint64_t rsvd = reserved_link;
1526
1527 /* The select; set the bit, if true */
1528 if (fp->f_ops && fp->f_type
1529 && fo_select(fp, flag[msk], rl_ptr, &context)) {
1530 optr[fd/NFDBITS] |= (1 << (fd % NFDBITS));
1531 n++;
1532 }
1533 if (sel_pass == SEL_FIRSTPASS) {
1534 waitq_link_release(rsvd);
1535 /*
1536 * If the fp's supporting selinfo structure was linked
1537 * to this thread's waitq set, then 'reserved_link'
1538 * will have been updated by selrecord to be a pointer
1539 * to the selinfo's waitq.
1540 */
1541 if (reserved_link == rsvd)
1542 rl_ptr = NULL; /* fo_select never called selrecord() */
1543 /*
1544 * Hook up the thread's waitq set either to
1545 * the fileproc structure, or to the global
1546 * conflict queue: but only on the first
1547 * select pass.
1548 */
1549 seldata->wqp[nc] = sellinkfp(fp, (void **)rl_ptr, wqset);
1550 }
1551 nc++;
1552 }
1553 }
1554 }
1555 proc_fdunlock(p);
1556
1557 *retval = n;
1558 return (0);
1559 }
1560
1561 int poll_callback(struct kqueue *, struct kevent_internal_s *, void *);
1562
1563 struct poll_continue_args {
1564 user_addr_t pca_fds;
1565 u_int pca_nfds;
1566 u_int pca_rfds;
1567 };
1568
1569 int
1570 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1571 {
1572 __pthread_testcancel(1);
1573 return(poll_nocancel(p, (struct poll_nocancel_args *)uap, retval));
1574 }
1575
1576
1577 int
1578 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1579 {
1580 struct poll_continue_args *cont;
1581 struct pollfd *fds;
1582 struct kqueue *kq;
1583 struct timeval atv;
1584 int ncoll, error = 0;
1585 u_int nfds = uap->nfds;
1586 u_int rfds = 0;
1587 u_int i;
1588 size_t ni;
1589
1590 /*
1591 * This is kinda bogus. We have fd limits, but that is not
1592 * really related to the size of the pollfd array. Make sure
1593 * we let the process use at least FD_SETSIZE entries and at
1594 * least enough for the current limits. We want to be reasonably
1595 * safe, but not overly restrictive.
1596 */
1597 if (nfds > OPEN_MAX ||
1598 (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE)))
1599 return (EINVAL);
1600
1601 kq = kqueue_alloc(p);
1602 if (kq == NULL)
1603 return (EAGAIN);
1604
1605 ni = nfds * sizeof(struct pollfd) + sizeof(struct poll_continue_args);
1606 MALLOC(cont, struct poll_continue_args *, ni, M_TEMP, M_WAITOK);
1607 if (NULL == cont) {
1608 error = EAGAIN;
1609 goto out;
1610 }
1611
1612 fds = (struct pollfd *)&cont[1];
1613 error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1614 if (error)
1615 goto out;
1616
1617 if (uap->timeout != -1) {
1618 struct timeval rtv;
1619
1620 atv.tv_sec = uap->timeout / 1000;
1621 atv.tv_usec = (uap->timeout % 1000) * 1000;
1622 if (itimerfix(&atv)) {
1623 error = EINVAL;
1624 goto out;
1625 }
1626 getmicrouptime(&rtv);
1627 timevaladd(&atv, &rtv);
1628 } else {
1629 atv.tv_sec = 0;
1630 atv.tv_usec = 0;
1631 }
1632
1633 /* JMM - all this P_SELECT stuff is bogus */
1634 ncoll = nselcoll;
1635 OSBitOrAtomic(P_SELECT, &p->p_flag);
1636 for (i = 0; i < nfds; i++) {
1637 short events = fds[i].events;
1638 int kerror = 0;
1639
1640 /* per spec, ignore fd values below zero */
1641 if (fds[i].fd < 0) {
1642 fds[i].revents = 0;
1643 continue;
1644 }
1645
1646 /* convert the poll event into a kqueue kevent */
1647 struct kevent_internal_s kev = {
1648 .ident = fds[i].fd,
1649 .flags = EV_ADD | EV_ONESHOT | EV_POLL,
1650 .udata = CAST_USER_ADDR_T(&fds[i]) };
1651
1652 /* Handle input events */
1653 if (events & ( POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP )) {
1654 kev.filter = EVFILT_READ;
1655 if (events & ( POLLPRI | POLLRDBAND ))
1656 kev.flags |= EV_OOBAND;
1657 kerror = kevent_register(kq, &kev, p);
1658 }
1659
1660 /* Handle output events */
1661 if (kerror == 0 &&
1662 events & ( POLLOUT | POLLWRNORM | POLLWRBAND )) {
1663 kev.filter = EVFILT_WRITE;
1664 kerror = kevent_register(kq, &kev, p);
1665 }
1666
1667 /* Handle BSD extension vnode events */
1668 if (kerror == 0 &&
1669 events & ( POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE )) {
1670 kev.filter = EVFILT_VNODE;
1671 kev.fflags = 0;
1672 if (events & POLLEXTEND)
1673 kev.fflags |= NOTE_EXTEND;
1674 if (events & POLLATTRIB)
1675 kev.fflags |= NOTE_ATTRIB;
1676 if (events & POLLNLINK)
1677 kev.fflags |= NOTE_LINK;
1678 if (events & POLLWRITE)
1679 kev.fflags |= NOTE_WRITE;
1680 kerror = kevent_register(kq, &kev, p);
1681 }
1682
1683 if (kerror != 0) {
1684 fds[i].revents = POLLNVAL;
1685 rfds++;
1686 } else
1687 fds[i].revents = 0;
1688 }
1689
1690 /* Did we have any trouble registering? */
1691 if (rfds > 0)
1692 goto done;
1693
1694 /* scan for, and possibly wait for, the kevents to trigger */
1695 cont->pca_fds = uap->fds;
1696 cont->pca_nfds = nfds;
1697 cont->pca_rfds = rfds;
1698 error = kqueue_scan(kq, poll_callback, NULL, cont, &atv, p);
1699 rfds = cont->pca_rfds;
1700
1701 done:
1702 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1703 /* poll is not restarted after signals... */
1704 if (error == ERESTART)
1705 error = EINTR;
1706 if (error == EWOULDBLOCK)
1707 error = 0;
1708 if (error == 0) {
1709 error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1710 *retval = rfds;
1711 }
1712
1713 out:
1714 if (NULL != cont)
1715 FREE(cont, M_TEMP);
1716
1717 kqueue_dealloc(kq);
1718 return (error);
1719 }
1720
1721 int
1722 poll_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp, void *data)
1723 {
1724 struct poll_continue_args *cont = (struct poll_continue_args *)data;
1725 struct pollfd *fds = CAST_DOWN(struct pollfd *, kevp->udata);
1726 short prev_revents = fds->revents;
1727 short mask = 0;
1728
1729 /* convert the results back into revents */
1730 if (kevp->flags & EV_EOF)
1731 fds->revents |= POLLHUP;
1732 if (kevp->flags & EV_ERROR)
1733 fds->revents |= POLLERR;
1734
1735 switch (kevp->filter) {
1736 case EVFILT_READ:
1737 if (fds->revents & POLLHUP)
1738 mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND );
1739 else {
1740 mask = (POLLIN | POLLRDNORM);
1741 if (kevp->flags & EV_OOBAND)
1742 mask |= (POLLPRI | POLLRDBAND);
1743 }
1744 fds->revents |= (fds->events & mask);
1745 break;
1746
1747 case EVFILT_WRITE:
1748 if (!(fds->revents & POLLHUP))
1749 fds->revents |= (fds->events & ( POLLOUT | POLLWRNORM | POLLWRBAND ));
1750 break;
1751
1752 case EVFILT_VNODE:
1753 if (kevp->fflags & NOTE_EXTEND)
1754 fds->revents |= (fds->events & POLLEXTEND);
1755 if (kevp->fflags & NOTE_ATTRIB)
1756 fds->revents |= (fds->events & POLLATTRIB);
1757 if (kevp->fflags & NOTE_LINK)
1758 fds->revents |= (fds->events & POLLNLINK);
1759 if (kevp->fflags & NOTE_WRITE)
1760 fds->revents |= (fds->events & POLLWRITE);
1761 break;
1762 }
1763
1764 if (fds->revents != 0 && prev_revents == 0)
1765 cont->pca_rfds++;
1766
1767 return 0;
1768 }
1769
1770 int
1771 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1772 {
1773
1774 return (1);
1775 }
1776
1777 /*
1778 * selcount
1779 *
1780 * Count the number of bits set in the input bit vector, and establish an
1781 * outstanding fp->f_iocount for each of the descriptors which will be in
1782 * use in the select operation.
1783 *
1784 * Parameters: p The process doing the select
1785 * ibits The input bit vector
1786 * nfd The number of fd's in the vector
1787 * countp Pointer to where to store the bit count
1788 *
1789 * Returns: 0 Success
1790 * EIO Bad per process open file table
1791 * EBADF One of the bits in the input bit vector
1792 * references an invalid fd
1793 *
1794 * Implicit: *countp (modified) Count of fd's
1795 *
1796 * Notes: This function is the first pass under the proc_fdlock() that
1797 * permits us to recognize invalid descriptors in the bit vector;
1798 * the may, however, not remain valid through the drop and
1799 * later reacquisition of the proc_fdlock().
1800 */
1801 static int
1802 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1803 {
1804 struct filedesc *fdp = p->p_fd;
1805 int msk, i, j, fd;
1806 u_int32_t bits;
1807 struct fileproc *fp;
1808 int n = 0;
1809 u_int32_t *iptr;
1810 u_int nw;
1811 int error=0;
1812 int dropcount;
1813 int need_wakeup = 0;
1814
1815 /*
1816 * Problems when reboot; due to MacOSX signal probs
1817 * in Beaker1C ; verify that the p->p_fd is valid
1818 */
1819 if (fdp == NULL) {
1820 *countp = 0;
1821 return(EIO);
1822 }
1823 nw = howmany(nfd, NFDBITS);
1824
1825 proc_fdlock(p);
1826 for (msk = 0; msk < 3; msk++) {
1827 iptr = (u_int32_t *)&ibits[msk * nw];
1828 for (i = 0; i < nfd; i += NFDBITS) {
1829 bits = iptr[i/NFDBITS];
1830 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1831 bits &= ~(1 << j);
1832
1833 if (fd < fdp->fd_nfiles)
1834 fp = fdp->fd_ofiles[fd];
1835 else
1836 fp = NULL;
1837
1838 if (fp == NULL ||
1839 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1840 *countp = 0;
1841 error = EBADF;
1842 goto bad;
1843 }
1844 fp->f_iocount++;
1845 n++;
1846 }
1847 }
1848 }
1849 proc_fdunlock(p);
1850
1851 *countp = n;
1852 return (0);
1853
1854 bad:
1855 dropcount = 0;
1856
1857 if (n == 0)
1858 goto out;
1859 /* Ignore error return; it's already EBADF */
1860 (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup, 1);
1861
1862 out:
1863 proc_fdunlock(p);
1864 if (need_wakeup) {
1865 wakeup(&p->p_fpdrainwait);
1866 }
1867 return(error);
1868 }
1869
1870
1871 /*
1872 * seldrop_locked
1873 *
1874 * Drop outstanding wait queue references set up during selscan(); drop the
1875 * outstanding per fileproc f_iocount() picked up during the selcount().
1876 *
1877 * Parameters: p Process performing the select
1878 * ibits Input bit bector of fd's
1879 * nfd Number of fd's
1880 * lim Limit to number of vector entries to
1881 * consider, or -1 for "all"
1882 * inselect True if
1883 * need_wakeup Pointer to flag to set to do a wakeup
1884 * if f_iocont on any descriptor goes to 0
1885 *
1886 * Returns: 0 Success
1887 * EBADF One or more fds in the bit vector
1888 * were invalid, but the rest
1889 * were successfully dropped
1890 *
1891 * Notes: An fd make become bad while the proc_fdlock() is not held,
1892 * if a multithreaded application closes the fd out from under
1893 * the in progress select. In this case, we still have to
1894 * clean up after the set up on the remaining fds.
1895 */
1896 static int
1897 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount)
1898 {
1899 struct filedesc *fdp = p->p_fd;
1900 int msk, i, j, nc, fd;
1901 u_int32_t bits;
1902 struct fileproc *fp;
1903 u_int32_t *iptr;
1904 u_int nw;
1905 int error = 0;
1906 int dropcount = 0;
1907 uthread_t uth = get_bsdthread_info(current_thread());
1908 struct _select_data *seldata;
1909
1910 *need_wakeup = 0;
1911
1912 /*
1913 * Problems when reboot; due to MacOSX signal probs
1914 * in Beaker1C ; verify that the p->p_fd is valid
1915 */
1916 if (fdp == NULL) {
1917 return(EIO);
1918 }
1919
1920 nw = howmany(nfd, NFDBITS);
1921 seldata = &uth->uu_kevent.ss_select_data;
1922
1923 nc = 0;
1924 for (msk = 0; msk < 3; msk++) {
1925 iptr = (u_int32_t *)&ibits[msk * nw];
1926 for (i = 0; i < nfd; i += NFDBITS) {
1927 bits = iptr[i/NFDBITS];
1928 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1929 bits &= ~(1 << j);
1930 fp = fdp->fd_ofiles[fd];
1931 /*
1932 * If we've already dropped as many as were
1933 * counted/scanned, then we are done.
1934 */
1935 if ((fromselcount != 0) && (++dropcount > lim))
1936 goto done;
1937
1938 /*
1939 * unlink even potentially NULL fileprocs.
1940 * If the FD was closed from under us, we
1941 * still need to cleanup the waitq links!
1942 */
1943 selunlinkfp(fp,
1944 seldata->wqp ? seldata->wqp[nc] : 0,
1945 uth->uu_wqset);
1946
1947 nc++;
1948
1949 if (fp == NULL) {
1950 /* skip (now) bad fds */
1951 error = EBADF;
1952 continue;
1953 }
1954
1955 fp->f_iocount--;
1956 if (fp->f_iocount < 0)
1957 panic("f_iocount overdecrement!");
1958
1959 if (fp->f_iocount == 0) {
1960 /*
1961 * The last iocount is responsible for clearing
1962 * selconfict flag - even if we didn't set it -
1963 * and is also responsible for waking up anyone
1964 * waiting on iocounts to drain.
1965 */
1966 if (fp->f_flags & FP_SELCONFLICT)
1967 fp->f_flags &= ~FP_SELCONFLICT;
1968 if (p->p_fpdrainwait) {
1969 p->p_fpdrainwait = 0;
1970 *need_wakeup = 1;
1971 }
1972 }
1973 }
1974 }
1975 }
1976 done:
1977 return (error);
1978 }
1979
1980
1981 static int
1982 seldrop(struct proc *p, u_int32_t *ibits, int nfd)
1983 {
1984 int error;
1985 int need_wakeup = 0;
1986
1987 proc_fdlock(p);
1988 error = seldrop_locked(p, ibits, nfd, nfd, &need_wakeup, 0);
1989 proc_fdunlock(p);
1990 if (need_wakeup) {
1991 wakeup(&p->p_fpdrainwait);
1992 }
1993 return (error);
1994 }
1995
1996 /*
1997 * Record a select request.
1998 */
1999 void
2000 selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2001 {
2002 thread_t cur_act = current_thread();
2003 struct uthread * ut = get_bsdthread_info(cur_act);
2004 /* on input, s_data points to the 64-bit ID of a reserved link object */
2005 uint64_t *reserved_link = (uint64_t *)s_data;
2006
2007 /* need to look at collisions */
2008
2009 /*do not record if this is second pass of select */
2010 if (!s_data)
2011 return;
2012
2013 if ((sip->si_flags & SI_INITED) == 0) {
2014 waitq_init(&sip->si_waitq, SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
2015 sip->si_flags |= SI_INITED;
2016 sip->si_flags &= ~SI_CLEAR;
2017 }
2018
2019 if (sip->si_flags & SI_RECORDED)
2020 sip->si_flags |= SI_COLL;
2021 else
2022 sip->si_flags &= ~SI_COLL;
2023
2024 sip->si_flags |= SI_RECORDED;
2025 /* note: this checks for pre-existing linkage */
2026 waitq_link(&sip->si_waitq, ut->uu_wqset,
2027 WAITQ_SHOULD_LOCK, reserved_link);
2028
2029 /*
2030 * Always consume the reserved link.
2031 * We can always call waitq_link_release() safely because if
2032 * waitq_link is successful, it consumes the link and resets the
2033 * value to 0, in which case our call to release becomes a no-op.
2034 * If waitq_link fails, then the following release call will actually
2035 * release the reserved link object.
2036 */
2037 waitq_link_release(*reserved_link);
2038 *reserved_link = 0;
2039
2040 /*
2041 * Use the s_data pointer as an output parameter as well
2042 * This avoids changing the prototype for this function which is
2043 * used by many kexts. We need to surface the waitq object
2044 * associated with the selinfo we just added to the thread's select
2045 * set. New waitq sets do not have back-pointers to set members, so
2046 * the only way to clear out set linkage objects is to go from the
2047 * waitq to the set. We use a memcpy because s_data could be
2048 * pointing to an unaligned value on the stack
2049 * (especially on 32-bit systems)
2050 */
2051 void *wqptr = (void *)&sip->si_waitq;
2052 memcpy((void *)s_data, (void *)&wqptr, sizeof(void *));
2053
2054 return;
2055 }
2056
2057 void
2058 selwakeup(struct selinfo *sip)
2059 {
2060
2061 if ((sip->si_flags & SI_INITED) == 0) {
2062 return;
2063 }
2064
2065 if (sip->si_flags & SI_COLL) {
2066 nselcoll++;
2067 sip->si_flags &= ~SI_COLL;
2068 #if 0
2069 /* will not support */
2070 //wakeup((caddr_t)&selwait);
2071 #endif
2072 }
2073
2074 if (sip->si_flags & SI_RECORDED) {
2075 waitq_wakeup64_all(&sip->si_waitq, NO_EVENT64,
2076 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
2077 sip->si_flags &= ~SI_RECORDED;
2078 }
2079
2080 }
2081
2082 void
2083 selthreadclear(struct selinfo *sip)
2084 {
2085 struct waitq *wq;
2086
2087 if ((sip->si_flags & SI_INITED) == 0) {
2088 return;
2089 }
2090 if (sip->si_flags & SI_RECORDED) {
2091 selwakeup(sip);
2092 sip->si_flags &= ~(SI_RECORDED | SI_COLL);
2093 }
2094 sip->si_flags |= SI_CLEAR;
2095 sip->si_flags &= ~SI_INITED;
2096
2097 wq = &sip->si_waitq;
2098
2099 /*
2100 * Higher level logic may have a handle on this waitq's prepost ID,
2101 * but that's OK because the waitq_deinit will remove/invalidate the
2102 * prepost object (as well as mark the waitq invalid). This de-couples
2103 * us from any callers that may have a handle to this waitq via the
2104 * prepost ID.
2105 */
2106 waitq_deinit(wq);
2107 }
2108
2109
2110
2111
2112 #define DBG_POST 0x10
2113 #define DBG_WATCH 0x11
2114 #define DBG_WAIT 0x12
2115 #define DBG_MOD 0x13
2116 #define DBG_EWAKEUP 0x14
2117 #define DBG_ENQUEUE 0x15
2118 #define DBG_DEQUEUE 0x16
2119
2120 #define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST)
2121 #define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH)
2122 #define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT)
2123 #define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD)
2124 #define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP)
2125 #define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE)
2126 #define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE)
2127
2128
2129 #define EVPROCDEQUE(p, evq) do { \
2130 proc_lock(p); \
2131 if (evq->ee_flags & EV_QUEUED) { \
2132 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \
2133 evq->ee_flags &= ~EV_QUEUED; \
2134 } \
2135 proc_unlock(p); \
2136 } while (0);
2137
2138
2139 /*
2140 * called upon socket close. deque and free all events for
2141 * the socket... socket must be locked by caller.
2142 */
2143 void
2144 evsofree(struct socket *sp)
2145 {
2146 struct eventqelt *evq, *next;
2147 proc_t p;
2148
2149 if (sp == NULL)
2150 return;
2151
2152 for (evq = sp->so_evlist.tqh_first; evq != NULL; evq = next) {
2153 next = evq->ee_slist.tqe_next;
2154 p = evq->ee_proc;
2155
2156 if (evq->ee_flags & EV_QUEUED) {
2157 EVPROCDEQUE(p, evq);
2158 }
2159 TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist); // remove from socket q
2160 FREE(evq, M_TEMP);
2161 }
2162 }
2163
2164
2165 /*
2166 * called upon pipe close. deque and free all events for
2167 * the pipe... pipe must be locked by caller
2168 */
2169 void
2170 evpipefree(struct pipe *cpipe)
2171 {
2172 struct eventqelt *evq, *next;
2173 proc_t p;
2174
2175 for (evq = cpipe->pipe_evlist.tqh_first; evq != NULL; evq = next) {
2176 next = evq->ee_slist.tqe_next;
2177 p = evq->ee_proc;
2178
2179 EVPROCDEQUE(p, evq);
2180
2181 TAILQ_REMOVE(&cpipe->pipe_evlist, evq, ee_slist); // remove from pipe q
2182 FREE(evq, M_TEMP);
2183 }
2184 }
2185
2186
2187 /*
2188 * enqueue this event if it's not already queued. wakeup
2189 * the proc if we do queue this event to it...
2190 * entered with proc lock held... we drop it before
2191 * doing the wakeup and return in that state
2192 */
2193 static void
2194 evprocenque(struct eventqelt *evq)
2195 {
2196 proc_t p;
2197
2198 assert(evq);
2199 p = evq->ee_proc;
2200
2201 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_START, (uint32_t)evq, evq->ee_flags, evq->ee_eventmask,0,0);
2202
2203 proc_lock(p);
2204
2205 if (evq->ee_flags & EV_QUEUED) {
2206 proc_unlock(p);
2207
2208 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
2209 return;
2210 }
2211 evq->ee_flags |= EV_QUEUED;
2212
2213 TAILQ_INSERT_TAIL(&p->p_evlist, evq, ee_plist);
2214
2215 proc_unlock(p);
2216
2217 wakeup(&p->p_evlist);
2218
2219 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
2220 }
2221
2222
2223 /*
2224 * pipe lock must be taken by the caller
2225 */
2226 void
2227 postpipeevent(struct pipe *pipep, int event)
2228 {
2229 int mask;
2230 struct eventqelt *evq;
2231
2232 if (pipep == NULL)
2233 return;
2234 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, event,0,0,1,0);
2235
2236 for (evq = pipep->pipe_evlist.tqh_first;
2237 evq != NULL; evq = evq->ee_slist.tqe_next) {
2238
2239 if (evq->ee_eventmask == 0)
2240 continue;
2241 mask = 0;
2242
2243 switch (event & (EV_RWBYTES | EV_RCLOSED | EV_WCLOSED)) {
2244
2245 case EV_RWBYTES:
2246 if ((evq->ee_eventmask & EV_RE) && pipep->pipe_buffer.cnt) {
2247 mask |= EV_RE;
2248 evq->ee_req.er_rcnt = pipep->pipe_buffer.cnt;
2249 }
2250 if ((evq->ee_eventmask & EV_WR) &&
2251 (MAX(pipep->pipe_buffer.size,PIPE_SIZE) - pipep->pipe_buffer.cnt) >= PIPE_BUF) {
2252
2253 if (pipep->pipe_state & PIPE_EOF) {
2254 mask |= EV_WR|EV_RESET;
2255 break;
2256 }
2257 mask |= EV_WR;
2258 evq->ee_req.er_wcnt = MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt;
2259 }
2260 break;
2261
2262 case EV_WCLOSED:
2263 case EV_RCLOSED:
2264 if ((evq->ee_eventmask & EV_RE)) {
2265 mask |= EV_RE|EV_RCLOSED;
2266 }
2267 if ((evq->ee_eventmask & EV_WR)) {
2268 mask |= EV_WR|EV_WCLOSED;
2269 }
2270 break;
2271
2272 default:
2273 return;
2274 }
2275 if (mask) {
2276 /*
2277 * disarm... postevents are nops until this event is 'read' via
2278 * waitevent and then re-armed via modwatch
2279 */
2280 evq->ee_eventmask = 0;
2281
2282 /*
2283 * since events are disarmed until after the waitevent
2284 * the ee_req.er_xxxx fields can't change once we've
2285 * inserted this event into the proc queue...
2286 * therefore, the waitevent will see a 'consistent'
2287 * snapshot of the event, even though it won't hold
2288 * the pipe lock, and we're updating the event outside
2289 * of the proc lock, which it will hold
2290 */
2291 evq->ee_req.er_eventbits |= mask;
2292
2293 KERNEL_DEBUG(DBG_MISC_POST, (uint32_t)evq, evq->ee_req.er_eventbits, mask, 1,0);
2294
2295 evprocenque(evq);
2296 }
2297 }
2298 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, 0,0,0,1,0);
2299 }
2300
2301 #if SOCKETS
2302 /*
2303 * given either a sockbuf or a socket run down the
2304 * event list and queue ready events found...
2305 * the socket must be locked by the caller
2306 */
2307 void
2308 postevent(struct socket *sp, struct sockbuf *sb, int event)
2309 {
2310 int mask;
2311 struct eventqelt *evq;
2312 struct tcpcb *tp;
2313
2314 if (sb)
2315 sp = sb->sb_so;
2316 if (sp == NULL)
2317 return;
2318
2319 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, (int)sp, event, 0, 0, 0);
2320
2321 for (evq = sp->so_evlist.tqh_first;
2322 evq != NULL; evq = evq->ee_slist.tqe_next) {
2323
2324 if (evq->ee_eventmask == 0)
2325 continue;
2326 mask = 0;
2327
2328 /* ready for reading:
2329 - byte cnt >= receive low water mark
2330 - read-half of conn closed
2331 - conn pending for listening sock
2332 - socket error pending
2333
2334 ready for writing
2335 - byte cnt avail >= send low water mark
2336 - write half of conn closed
2337 - socket error pending
2338 - non-blocking conn completed successfully
2339
2340 exception pending
2341 - out of band data
2342 - sock at out of band mark
2343 */
2344
2345 switch (event & EV_DMASK) {
2346
2347 case EV_OOB:
2348 if ((evq->ee_eventmask & EV_EX)) {
2349 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK)))
2350 mask |= EV_EX|EV_OOB;
2351 }
2352 break;
2353
2354 case EV_RWBYTES|EV_OOB:
2355 if ((evq->ee_eventmask & EV_EX)) {
2356 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK)))
2357 mask |= EV_EX|EV_OOB;
2358 }
2359 /*
2360 * fall into the next case
2361 */
2362 case EV_RWBYTES:
2363 if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) {
2364 /* for AFP/OT purposes; may go away in future */
2365 if ((SOCK_DOM(sp) == PF_INET ||
2366 SOCK_DOM(sp) == PF_INET6) &&
2367 SOCK_PROTO(sp) == IPPROTO_TCP &&
2368 (sp->so_error == ECONNREFUSED ||
2369 sp->so_error == ECONNRESET)) {
2370 if (sp->so_pcb == NULL ||
2371 sotoinpcb(sp)->inp_state ==
2372 INPCB_STATE_DEAD ||
2373 (tp = sototcpcb(sp)) == NULL ||
2374 tp->t_state == TCPS_CLOSED) {
2375 mask |= EV_RE|EV_RESET;
2376 break;
2377 }
2378 }
2379 mask |= EV_RE;
2380 evq->ee_req.er_rcnt = sp->so_rcv.sb_cc;
2381
2382 if (sp->so_state & SS_CANTRCVMORE) {
2383 mask |= EV_FIN;
2384 break;
2385 }
2386 }
2387 if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) {
2388 /* for AFP/OT purposes; may go away in future */
2389 if ((SOCK_DOM(sp) == PF_INET ||
2390 SOCK_DOM(sp) == PF_INET6) &&
2391 SOCK_PROTO(sp) == IPPROTO_TCP &&
2392 (sp->so_error == ECONNREFUSED ||
2393 sp->so_error == ECONNRESET)) {
2394 if (sp->so_pcb == NULL ||
2395 sotoinpcb(sp)->inp_state ==
2396 INPCB_STATE_DEAD ||
2397 (tp = sototcpcb(sp)) == NULL ||
2398 tp->t_state == TCPS_CLOSED) {
2399 mask |= EV_WR|EV_RESET;
2400 break;
2401 }
2402 }
2403 mask |= EV_WR;
2404 evq->ee_req.er_wcnt = sbspace(&sp->so_snd);
2405 }
2406 break;
2407
2408 case EV_RCONN:
2409 if ((evq->ee_eventmask & EV_RE)) {
2410 mask |= EV_RE|EV_RCONN;
2411 evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one
2412 }
2413 break;
2414
2415 case EV_WCONN:
2416 if ((evq->ee_eventmask & EV_WR)) {
2417 mask |= EV_WR|EV_WCONN;
2418 }
2419 break;
2420
2421 case EV_RCLOSED:
2422 if ((evq->ee_eventmask & EV_RE)) {
2423 mask |= EV_RE|EV_RCLOSED;
2424 }
2425 break;
2426
2427 case EV_WCLOSED:
2428 if ((evq->ee_eventmask & EV_WR)) {
2429 mask |= EV_WR|EV_WCLOSED;
2430 }
2431 break;
2432
2433 case EV_FIN:
2434 if (evq->ee_eventmask & EV_RE) {
2435 mask |= EV_RE|EV_FIN;
2436 }
2437 break;
2438
2439 case EV_RESET:
2440 case EV_TIMEOUT:
2441 if (evq->ee_eventmask & EV_RE) {
2442 mask |= EV_RE | event;
2443 }
2444 if (evq->ee_eventmask & EV_WR) {
2445 mask |= EV_WR | event;
2446 }
2447 break;
2448
2449 default:
2450 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, -1, 0, 0, 0);
2451 return;
2452 } /* switch */
2453
2454 KERNEL_DEBUG(DBG_MISC_POST, (int)evq, evq->ee_eventmask, evq->ee_req.er_eventbits, mask, 0);
2455
2456 if (mask) {
2457 /*
2458 * disarm... postevents are nops until this event is 'read' via
2459 * waitevent and then re-armed via modwatch
2460 */
2461 evq->ee_eventmask = 0;
2462
2463 /*
2464 * since events are disarmed until after the waitevent
2465 * the ee_req.er_xxxx fields can't change once we've
2466 * inserted this event into the proc queue...
2467 * since waitevent can't see this event until we
2468 * enqueue it, waitevent will see a 'consistent'
2469 * snapshot of the event, even though it won't hold
2470 * the socket lock, and we're updating the event outside
2471 * of the proc lock, which it will hold
2472 */
2473 evq->ee_req.er_eventbits |= mask;
2474
2475 evprocenque(evq);
2476 }
2477 }
2478 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, 0, 0, 0, 0);
2479 }
2480 #endif /* SOCKETS */
2481
2482
2483 /*
2484 * watchevent system call. user passes us an event to watch
2485 * for. we malloc an event object, initialize it, and queue
2486 * it to the open socket. when the event occurs, postevent()
2487 * will enque it back to our proc where we can retrieve it
2488 * via waitevent().
2489 *
2490 * should this prevent duplicate events on same socket?
2491 *
2492 * Returns:
2493 * ENOMEM No memory for operation
2494 * copyin:EFAULT
2495 */
2496 int
2497 watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval)
2498 {
2499 struct eventqelt *evq = (struct eventqelt *)0;
2500 struct eventqelt *np = NULL;
2501 struct eventreq64 *erp;
2502 struct fileproc *fp = NULL;
2503 int error;
2504
2505 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_START, 0,0,0,0,0);
2506
2507 // get a qelt and fill with users req
2508 MALLOC(evq, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK);
2509
2510 if (evq == NULL)
2511 return (ENOMEM);
2512 erp = &evq->ee_req;
2513
2514 // get users request pkt
2515
2516 if (IS_64BIT_PROCESS(p)) {
2517 error = copyin(uap->u_req, (caddr_t)erp, sizeof(struct eventreq64));
2518 } else {
2519 struct eventreq32 er32;
2520
2521 error = copyin(uap->u_req, (caddr_t)&er32, sizeof(struct eventreq32));
2522 if (error == 0) {
2523 /*
2524 * the user only passes in the
2525 * er_type, er_handle and er_data...
2526 * the other fields are initialized
2527 * below, so don't bother to copy
2528 */
2529 erp->er_type = er32.er_type;
2530 erp->er_handle = er32.er_handle;
2531 erp->er_data = (user_addr_t)er32.er_data;
2532 }
2533 }
2534 if (error) {
2535 FREE(evq, M_TEMP);
2536 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
2537
2538 return(error);
2539 }
2540 KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
2541
2542 // validate, freeing qelt if errors
2543 error = 0;
2544 proc_fdlock(p);
2545
2546 if (erp->er_type != EV_FD) {
2547 error = EINVAL;
2548 } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
2549 error = EBADF;
2550 #if SOCKETS
2551 } else if (fp->f_type == DTYPE_SOCKET) {
2552 socket_lock((struct socket *)fp->f_data, 1);
2553 np = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2554 #endif /* SOCKETS */
2555 } else if (fp->f_type == DTYPE_PIPE) {
2556 PIPE_LOCK((struct pipe *)fp->f_data);
2557 np = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2558 } else {
2559 fp_drop(p, erp->er_handle, fp, 1);
2560 error = EINVAL;
2561 }
2562 proc_fdunlock(p);
2563
2564 if (error) {
2565 FREE(evq, M_TEMP);
2566
2567 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
2568 return(error);
2569 }
2570
2571 /*
2572 * only allow one watch per file per proc
2573 */
2574 for ( ; np != NULL; np = np->ee_slist.tqe_next) {
2575 if (np->ee_proc == p) {
2576 #if SOCKETS
2577 if (fp->f_type == DTYPE_SOCKET)
2578 socket_unlock((struct socket *)fp->f_data, 1);
2579 else
2580 #endif /* SOCKETS */
2581 PIPE_UNLOCK((struct pipe *)fp->f_data);
2582 fp_drop(p, erp->er_handle, fp, 0);
2583 FREE(evq, M_TEMP);
2584
2585 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
2586 return(EINVAL);
2587 }
2588 }
2589 erp->er_ecnt = erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0;
2590 evq->ee_proc = p;
2591 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
2592 evq->ee_flags = 0;
2593
2594 #if SOCKETS
2595 if (fp->f_type == DTYPE_SOCKET) {
2596 TAILQ_INSERT_TAIL(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2597 postevent((struct socket *)fp->f_data, 0, EV_RWBYTES); // catch existing events
2598
2599 socket_unlock((struct socket *)fp->f_data, 1);
2600 } else
2601 #endif /* SOCKETS */
2602 {
2603 TAILQ_INSERT_TAIL(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2604 postpipeevent((struct pipe *)fp->f_data, EV_RWBYTES);
2605
2606 PIPE_UNLOCK((struct pipe *)fp->f_data);
2607 }
2608 fp_drop_event(p, erp->er_handle, fp);
2609
2610 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, 0,0,0,0,0);
2611 return(0);
2612 }
2613
2614
2615
2616 /*
2617 * waitevent system call.
2618 * grabs the next waiting event for this proc and returns
2619 * it. if no events, user can request to sleep with timeout
2620 * or without or poll mode
2621 * ((tv != NULL && interval == 0) || tv == -1)
2622 */
2623 int
2624 waitevent(proc_t p, struct waitevent_args *uap, int *retval)
2625 {
2626 int error = 0;
2627 struct eventqelt *evq;
2628 struct eventreq64 *erp;
2629 uint64_t abstime, interval;
2630 boolean_t fast_poll = FALSE;
2631 union {
2632 struct eventreq64 er64;
2633 struct eventreq32 er32;
2634 } uer;
2635
2636 interval = 0;
2637
2638 if (uap->tv) {
2639 struct timeval atv;
2640 /*
2641 * check for fast poll method
2642 */
2643 if (IS_64BIT_PROCESS(p)) {
2644 if (uap->tv == (user_addr_t)-1)
2645 fast_poll = TRUE;
2646 } else if (uap->tv == (user_addr_t)((uint32_t)-1))
2647 fast_poll = TRUE;
2648
2649 if (fast_poll == TRUE) {
2650 if (p->p_evlist.tqh_first == NULL) {
2651 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_NONE, -1,0,0,0,0);
2652 /*
2653 * poll failed
2654 */
2655 *retval = 1;
2656 return (0);
2657 }
2658 proc_lock(p);
2659 goto retry;
2660 }
2661 if (IS_64BIT_PROCESS(p)) {
2662 struct user64_timeval atv64;
2663 error = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
2664 /* Loses resolution - assume timeout < 68 years */
2665 atv.tv_sec = atv64.tv_sec;
2666 atv.tv_usec = atv64.tv_usec;
2667 } else {
2668 struct user32_timeval atv32;
2669 error = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
2670 atv.tv_sec = atv32.tv_sec;
2671 atv.tv_usec = atv32.tv_usec;
2672 }
2673
2674 if (error)
2675 return(error);
2676 if (itimerfix(&atv)) {
2677 error = EINVAL;
2678 return(error);
2679 }
2680 interval = tvtoabstime(&atv);
2681 }
2682 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_START, 0,0,0,0,0);
2683
2684 proc_lock(p);
2685 retry:
2686 if ((evq = p->p_evlist.tqh_first) != NULL) {
2687 /*
2688 * found one... make a local copy while it's still on the queue
2689 * to prevent it from changing while in the midst of copying
2690 * don't want to hold the proc lock across a copyout because
2691 * it might block on a page fault at the target in user space
2692 */
2693 erp = &evq->ee_req;
2694
2695 if (IS_64BIT_PROCESS(p))
2696 bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof (struct eventreq64));
2697 else {
2698 uer.er32.er_type = erp->er_type;
2699 uer.er32.er_handle = erp->er_handle;
2700 uer.er32.er_data = (uint32_t)erp->er_data;
2701 uer.er32.er_ecnt = erp->er_ecnt;
2702 uer.er32.er_rcnt = erp->er_rcnt;
2703 uer.er32.er_wcnt = erp->er_wcnt;
2704 uer.er32.er_eventbits = erp->er_eventbits;
2705 }
2706 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist);
2707
2708 evq->ee_flags &= ~EV_QUEUED;
2709
2710 proc_unlock(p);
2711
2712 if (IS_64BIT_PROCESS(p))
2713 error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64));
2714 else
2715 error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32));
2716
2717 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,
2718 evq->ee_req.er_handle,evq->ee_req.er_eventbits,(uint32_t)evq,0);
2719 return (error);
2720 }
2721 else {
2722 if (uap->tv && interval == 0) {
2723 proc_unlock(p);
2724 *retval = 1; // poll failed
2725
2726 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0);
2727 return (error);
2728 }
2729 if (interval != 0)
2730 clock_absolutetime_interval_to_deadline(interval, &abstime);
2731 else
2732 abstime = 0;
2733
2734 KERNEL_DEBUG(DBG_MISC_WAIT, 1,(uint32_t)&p->p_evlist,0,0,0);
2735
2736 error = msleep1(&p->p_evlist, &p->p_mlock, (PSOCK | PCATCH), "waitevent", abstime);
2737
2738 KERNEL_DEBUG(DBG_MISC_WAIT, 2,(uint32_t)&p->p_evlist,0,0,0);
2739
2740 if (error == 0)
2741 goto retry;
2742 if (error == ERESTART)
2743 error = EINTR;
2744 if (error == EWOULDBLOCK) {
2745 *retval = 1;
2746 error = 0;
2747 }
2748 }
2749 proc_unlock(p);
2750
2751 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0);
2752 return (error);
2753 }
2754
2755
2756 /*
2757 * modwatch system call. user passes in event to modify.
2758 * if we find it we reset the event bits and que/deque event
2759 * it needed.
2760 */
2761 int
2762 modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval)
2763 {
2764 struct eventreq64 er;
2765 struct eventreq64 *erp = &er;
2766 struct eventqelt *evq = NULL; /* protected by error return */
2767 int error;
2768 struct fileproc *fp;
2769 int flag;
2770
2771 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0);
2772
2773 /*
2774 * get user's request pkt
2775 * just need the er_type and er_handle which sit above the
2776 * problematic er_data (32/64 issue)... so only copy in
2777 * those 2 fields
2778 */
2779 if ((error = copyin(uap->u_req, (caddr_t)erp, sizeof(er.er_type) + sizeof(er.er_handle)))) {
2780 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
2781 return(error);
2782 }
2783 proc_fdlock(p);
2784
2785 if (erp->er_type != EV_FD) {
2786 error = EINVAL;
2787 } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
2788 error = EBADF;
2789 #if SOCKETS
2790 } else if (fp->f_type == DTYPE_SOCKET) {
2791 socket_lock((struct socket *)fp->f_data, 1);
2792 evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2793 #endif /* SOCKETS */
2794 } else if (fp->f_type == DTYPE_PIPE) {
2795 PIPE_LOCK((struct pipe *)fp->f_data);
2796 evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2797 } else {
2798 fp_drop(p, erp->er_handle, fp, 1);
2799 error = EINVAL;
2800 }
2801
2802 if (error) {
2803 proc_fdunlock(p);
2804 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
2805 return(error);
2806 }
2807
2808 if ((uap->u_eventmask == EV_RM) && (fp->f_flags & FP_WAITEVENT)) {
2809 fp->f_flags &= ~FP_WAITEVENT;
2810 }
2811 proc_fdunlock(p);
2812
2813 // locate event if possible
2814 for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
2815 if (evq->ee_proc == p)
2816 break;
2817 }
2818 if (evq == NULL) {
2819 #if SOCKETS
2820 if (fp->f_type == DTYPE_SOCKET)
2821 socket_unlock((struct socket *)fp->f_data, 1);
2822 else
2823 #endif /* SOCKETS */
2824 PIPE_UNLOCK((struct pipe *)fp->f_data);
2825 fp_drop(p, erp->er_handle, fp, 0);
2826 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0);
2827 return(EINVAL);
2828 }
2829 KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
2830
2831 if (uap->u_eventmask == EV_RM) {
2832 EVPROCDEQUE(p, evq);
2833
2834 #if SOCKETS
2835 if (fp->f_type == DTYPE_SOCKET) {
2836 TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2837 socket_unlock((struct socket *)fp->f_data, 1);
2838 } else
2839 #endif /* SOCKETS */
2840 {
2841 TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2842 PIPE_UNLOCK((struct pipe *)fp->f_data);
2843 }
2844 fp_drop(p, erp->er_handle, fp, 0);
2845 FREE(evq, M_TEMP);
2846 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0);
2847 return(0);
2848 }
2849 switch (uap->u_eventmask & EV_MASK) {
2850
2851 case 0:
2852 flag = 0;
2853 break;
2854
2855 case EV_RE:
2856 case EV_WR:
2857 case EV_RE|EV_WR:
2858 flag = EV_RWBYTES;
2859 break;
2860
2861 case EV_EX:
2862 flag = EV_OOB;
2863 break;
2864
2865 case EV_EX|EV_RE:
2866 case EV_EX|EV_WR:
2867 case EV_EX|EV_RE|EV_WR:
2868 flag = EV_OOB|EV_RWBYTES;
2869 break;
2870
2871 default:
2872 #if SOCKETS
2873 if (fp->f_type == DTYPE_SOCKET)
2874 socket_unlock((struct socket *)fp->f_data, 1);
2875 else
2876 #endif /* SOCKETS */
2877 PIPE_UNLOCK((struct pipe *)fp->f_data);
2878 fp_drop(p, erp->er_handle, fp, 0);
2879 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
2880 return(EINVAL);
2881 }
2882 /*
2883 * since we're holding the socket/pipe lock, the event
2884 * cannot go from the unqueued state to the queued state
2885 * however, it can go from the queued state to the unqueued state
2886 * since that direction is protected by the proc_lock...
2887 * so do a quick check for EV_QUEUED w/o holding the proc lock
2888 * since by far the common case will be NOT EV_QUEUED, this saves
2889 * us taking the proc_lock the majority of the time
2890 */
2891 if (evq->ee_flags & EV_QUEUED) {
2892 /*
2893 * EVPROCDEQUE will recheck the state after it grabs the proc_lock
2894 */
2895 EVPROCDEQUE(p, evq);
2896 }
2897 /*
2898 * while the event is off the proc queue and
2899 * we're holding the socket/pipe lock
2900 * it's safe to update these fields...
2901 */
2902 evq->ee_req.er_eventbits = 0;
2903 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
2904
2905 #if SOCKETS
2906 if (fp->f_type == DTYPE_SOCKET) {
2907 postevent((struct socket *)fp->f_data, 0, flag);
2908 socket_unlock((struct socket *)fp->f_data, 1);
2909 } else
2910 #endif /* SOCKETS */
2911 {
2912 postpipeevent((struct pipe *)fp->f_data, flag);
2913 PIPE_UNLOCK((struct pipe *)fp->f_data);
2914 }
2915 fp_drop(p, erp->er_handle, fp, 0);
2916 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,(uint32_t)fp->f_data,flag,0);
2917 return(0);
2918 }
2919
2920 /* this routine is called from the close of fd with proc_fdlock held */
2921 int
2922 waitevent_close(struct proc *p, struct fileproc *fp)
2923 {
2924 struct eventqelt *evq;
2925
2926
2927 fp->f_flags &= ~FP_WAITEVENT;
2928
2929 #if SOCKETS
2930 if (fp->f_type == DTYPE_SOCKET) {
2931 socket_lock((struct socket *)fp->f_data, 1);
2932 evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2933 } else
2934 #endif /* SOCKETS */
2935 if (fp->f_type == DTYPE_PIPE) {
2936 PIPE_LOCK((struct pipe *)fp->f_data);
2937 evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2938 }
2939 else {
2940 return(EINVAL);
2941 }
2942 proc_fdunlock(p);
2943
2944
2945 // locate event if possible
2946 for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
2947 if (evq->ee_proc == p)
2948 break;
2949 }
2950 if (evq == NULL) {
2951 #if SOCKETS
2952 if (fp->f_type == DTYPE_SOCKET)
2953 socket_unlock((struct socket *)fp->f_data, 1);
2954 else
2955 #endif /* SOCKETS */
2956 PIPE_UNLOCK((struct pipe *)fp->f_data);
2957
2958 proc_fdlock(p);
2959
2960 return(EINVAL);
2961 }
2962 EVPROCDEQUE(p, evq);
2963
2964 #if SOCKETS
2965 if (fp->f_type == DTYPE_SOCKET) {
2966 TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2967 socket_unlock((struct socket *)fp->f_data, 1);
2968 } else
2969 #endif /* SOCKETS */
2970 {
2971 TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2972 PIPE_UNLOCK((struct pipe *)fp->f_data);
2973 }
2974 FREE(evq, M_TEMP);
2975
2976 proc_fdlock(p);
2977
2978 return(0);
2979 }
2980
2981
2982 /*
2983 * gethostuuid
2984 *
2985 * Description: Get the host UUID from IOKit and return it to user space.
2986 *
2987 * Parameters: uuid_buf Pointer to buffer to receive UUID
2988 * timeout Timespec for timout
2989 * spi SPI, skip sandbox check (temporary)
2990 *
2991 * Returns: 0 Success
2992 * EWOULDBLOCK Timeout is too short
2993 * copyout:EFAULT Bad user buffer
2994 * mac_system_check_info:EPERM Client not allowed to perform this operation
2995 *
2996 * Notes: A timeout seems redundant, since if it's tolerable to not
2997 * have a system UUID in hand, then why ask for one?
2998 */
2999 int
3000 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
3001 {
3002 kern_return_t kret;
3003 int error;
3004 mach_timespec_t mach_ts; /* for IOKit call */
3005 __darwin_uuid_t uuid_kern; /* for IOKit call */
3006
3007 if (!uap->spi) {
3008 }
3009
3010 /* Convert the 32/64 bit timespec into a mach_timespec_t */
3011 if ( proc_is64bit(p) ) {
3012 struct user64_timespec ts;
3013 error = copyin(uap->timeoutp, &ts, sizeof(ts));
3014 if (error)
3015 return (error);
3016 mach_ts.tv_sec = ts.tv_sec;
3017 mach_ts.tv_nsec = ts.tv_nsec;
3018 } else {
3019 struct user32_timespec ts;
3020 error = copyin(uap->timeoutp, &ts, sizeof(ts) );
3021 if (error)
3022 return (error);
3023 mach_ts.tv_sec = ts.tv_sec;
3024 mach_ts.tv_nsec = ts.tv_nsec;
3025 }
3026
3027 /* Call IOKit with the stack buffer to get the UUID */
3028 kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
3029
3030 /*
3031 * If we get it, copy out the data to the user buffer; note that a
3032 * uuid_t is an array of characters, so this is size invariant for
3033 * 32 vs. 64 bit.
3034 */
3035 if (kret == KERN_SUCCESS) {
3036 error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
3037 } else {
3038 error = EWOULDBLOCK;
3039 }
3040
3041 return (error);
3042 }
3043
3044 /*
3045 * ledger
3046 *
3047 * Description: Omnibus system call for ledger operations
3048 */
3049 int
3050 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
3051 {
3052 #if !CONFIG_MACF
3053 #pragma unused(p)
3054 #endif
3055 int rval, pid, len, error;
3056 #ifdef LEDGER_DEBUG
3057 struct ledger_limit_args lla;
3058 #endif
3059 task_t task;
3060 proc_t proc;
3061
3062 /* Finish copying in the necessary args before taking the proc lock */
3063 error = 0;
3064 len = 0;
3065 if (args->cmd == LEDGER_ENTRY_INFO)
3066 error = copyin(args->arg3, (char *)&len, sizeof (len));
3067 else if (args->cmd == LEDGER_TEMPLATE_INFO)
3068 error = copyin(args->arg2, (char *)&len, sizeof (len));
3069 #ifdef LEDGER_DEBUG
3070 else if (args->cmd == LEDGER_LIMIT)
3071 error = copyin(args->arg2, (char *)&lla, sizeof (lla));
3072 #endif
3073 if (error)
3074 return (error);
3075 if (len < 0)
3076 return (EINVAL);
3077
3078 rval = 0;
3079 if (args->cmd != LEDGER_TEMPLATE_INFO) {
3080 pid = args->arg1;
3081 proc = proc_find(pid);
3082 if (proc == NULL)
3083 return (ESRCH);
3084
3085 #if CONFIG_MACF
3086 error = mac_proc_check_ledger(p, proc, args->cmd);
3087 if (error) {
3088 proc_rele(proc);
3089 return (error);
3090 }
3091 #endif
3092
3093 task = proc->task;
3094 }
3095
3096 switch (args->cmd) {
3097 #ifdef LEDGER_DEBUG
3098 case LEDGER_LIMIT: {
3099 if (!kauth_cred_issuser(kauth_cred_get()))
3100 rval = EPERM;
3101 rval = ledger_limit(task, &lla);
3102 proc_rele(proc);
3103 break;
3104 }
3105 #endif
3106 case LEDGER_INFO: {
3107 struct ledger_info info;
3108
3109 rval = ledger_info(task, &info);
3110 proc_rele(proc);
3111 if (rval == 0)
3112 rval = copyout(&info, args->arg2,
3113 sizeof (info));
3114 break;
3115 }
3116
3117 case LEDGER_ENTRY_INFO: {
3118 void *buf;
3119 int sz;
3120
3121 rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
3122 proc_rele(proc);
3123 if ((rval == 0) && (len > 0)) {
3124 sz = len * sizeof (struct ledger_entry_info);
3125 rval = copyout(buf, args->arg2, sz);
3126 kfree(buf, sz);
3127 }
3128 if (rval == 0)
3129 rval = copyout(&len, args->arg3, sizeof (len));
3130 break;
3131 }
3132
3133 case LEDGER_TEMPLATE_INFO: {
3134 void *buf;
3135 int sz;
3136
3137 rval = ledger_template_info(&buf, &len);
3138 if ((rval == 0) && (len > 0)) {
3139 sz = len * sizeof (struct ledger_template_info);
3140 rval = copyout(buf, args->arg1, sz);
3141 kfree(buf, sz);
3142 }
3143 if (rval == 0)
3144 rval = copyout(&len, args->arg2, sizeof (len));
3145 break;
3146 }
3147
3148 default:
3149 rval = EINVAL;
3150 }
3151
3152 return (rval);
3153 }
3154
3155 int
3156 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
3157 {
3158 int error = 0;
3159
3160 switch (args->cmd) {
3161 #if CONFIG_TELEMETRY
3162 case TELEMETRY_CMD_TIMER_EVENT:
3163 error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
3164 break;
3165 #endif /* CONFIG_TELEMETRY */
3166 case TELEMETRY_CMD_VOUCHER_NAME:
3167 if (thread_set_voucher_name((mach_port_name_t)args->deadline))
3168 error = EINVAL;
3169 break;
3170
3171 default:
3172 error = EINVAL;
3173 break;
3174 }
3175
3176 return (error);
3177 }
3178
3179 #if defined(DEVELOPMENT) || defined(DEBUG)
3180 #if CONFIG_WAITQ_DEBUG
3181 static uint64_t g_wqset_num = 0;
3182 struct g_wqset {
3183 queue_chain_t link;
3184 struct waitq_set *wqset;
3185 };
3186
3187 static queue_head_t g_wqset_list;
3188 static struct waitq_set *g_waitq_set = NULL;
3189
3190 static inline struct waitq_set *sysctl_get_wqset(int idx)
3191 {
3192 struct g_wqset *gwqs;
3193
3194 if (!g_wqset_num)
3195 queue_init(&g_wqset_list);
3196
3197 /* don't bother with locks: this is test-only code! */
3198 qe_foreach_element(gwqs, &g_wqset_list, link) {
3199 if ((int)(wqset_id(gwqs->wqset) & 0xffffffff) == idx)
3200 return gwqs->wqset;
3201 }
3202
3203 /* allocate a new one */
3204 ++g_wqset_num;
3205 gwqs = (struct g_wqset *)kalloc(sizeof(*gwqs));
3206 assert(gwqs != NULL);
3207
3208 gwqs->wqset = waitq_set_alloc(SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST|SYNC_POLICY_DISABLE_IRQ);
3209 enqueue_tail(&g_wqset_list, &gwqs->link);
3210 printf("[WQ]: created new waitq set 0x%llx\n", wqset_id(gwqs->wqset));
3211
3212 return gwqs->wqset;
3213 }
3214
3215 #define MAX_GLOBAL_TEST_QUEUES 64
3216 static int g_wq_init = 0;
3217 static struct waitq g_wq[MAX_GLOBAL_TEST_QUEUES];
3218
3219 static inline struct waitq *global_test_waitq(int idx)
3220 {
3221 if (idx < 0)
3222 return NULL;
3223
3224 if (!g_wq_init) {
3225 g_wq_init = 1;
3226 for (int i = 0; i < MAX_GLOBAL_TEST_QUEUES; i++)
3227 waitq_init(&g_wq[i], SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ);
3228 }
3229
3230 return &g_wq[idx % MAX_GLOBAL_TEST_QUEUES];
3231 }
3232
3233 static int sysctl_waitq_wakeup_one SYSCTL_HANDLER_ARGS
3234 {
3235 #pragma unused(oidp, arg1, arg2)
3236 int error;
3237 int index;
3238 struct waitq *waitq;
3239 kern_return_t kr;
3240 int64_t event64 = 0;
3241
3242 error = SYSCTL_IN(req, &event64, sizeof(event64));
3243 if (error)
3244 return error;
3245
3246 if (!req->newptr)
3247 return SYSCTL_OUT(req, &event64, sizeof(event64));
3248
3249 if (event64 < 0) {
3250 index = (int)((-event64) & 0xffffffff);
3251 waitq = wqset_waitq(sysctl_get_wqset(index));
3252 index = -index;
3253 } else {
3254 index = (int)event64;
3255 waitq = global_test_waitq(index);
3256 }
3257
3258 event64 = 0;
3259
3260 printf("[WQ]: Waking one thread on waitq [%d] event:0x%llx\n",
3261 index, event64);
3262 kr = waitq_wakeup64_one(waitq, (event64_t)event64, THREAD_AWAKENED,
3263 WAITQ_ALL_PRIORITIES);
3264 printf("[WQ]: \tkr=%d\n", kr);
3265
3266 return SYSCTL_OUT(req, &kr, sizeof(kr));
3267 }
3268 SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_one, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3269 0, 0, sysctl_waitq_wakeup_one, "Q", "wakeup one thread waiting on given event");
3270
3271
3272 static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS
3273 {
3274 #pragma unused(oidp, arg1, arg2)
3275 int error;
3276 int index;
3277 struct waitq *waitq;
3278 kern_return_t kr;
3279 int64_t event64 = 0;
3280
3281 error = SYSCTL_IN(req, &event64, sizeof(event64));
3282 if (error)
3283 return error;
3284
3285 if (!req->newptr)
3286 return SYSCTL_OUT(req, &event64, sizeof(event64));
3287
3288 if (event64 < 0) {
3289 index = (int)((-event64) & 0xffffffff);
3290 waitq = wqset_waitq(sysctl_get_wqset(index));
3291 index = -index;
3292 } else {
3293 index = (int)event64;
3294 waitq = global_test_waitq(index);
3295 }
3296
3297 event64 = 0;
3298
3299 printf("[WQ]: Waking all threads on waitq [%d] event:0x%llx\n",
3300 index, event64);
3301 kr = waitq_wakeup64_all(waitq, (event64_t)event64,
3302 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
3303 printf("[WQ]: \tkr=%d\n", kr);
3304
3305 return SYSCTL_OUT(req, &kr, sizeof(kr));
3306 }
3307 SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3308 0, 0, sysctl_waitq_wakeup_all, "Q", "wakeup all threads waiting on given event");
3309
3310
3311 static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS
3312 {
3313 #pragma unused(oidp, arg1, arg2)
3314 int error;
3315 int index;
3316 struct waitq *waitq;
3317 kern_return_t kr;
3318 int64_t event64 = 0;
3319
3320 error = SYSCTL_IN(req, &event64, sizeof(event64));
3321 if (error)
3322 return error;
3323
3324 if (!req->newptr)
3325 return SYSCTL_OUT(req, &event64, sizeof(event64));
3326
3327 if (event64 < 0) {
3328 index = (int)((-event64) & 0xffffffff);
3329 waitq = wqset_waitq(sysctl_get_wqset(index));
3330 index = -index;
3331 } else {
3332 index = (int)event64;
3333 waitq = global_test_waitq(index);
3334 }
3335
3336 event64 = 0;
3337
3338 printf("[WQ]: Current thread waiting on waitq [%d] event:0x%llx\n",
3339 index, event64);
3340 kr = waitq_assert_wait64(waitq, (event64_t)event64, THREAD_INTERRUPTIBLE, 0);
3341 if (kr == THREAD_WAITING)
3342 thread_block(THREAD_CONTINUE_NULL);
3343 printf("[WQ]: \tWoke Up: kr=%d\n", kr);
3344
3345 return SYSCTL_OUT(req, &kr, sizeof(kr));
3346 }
3347 SYSCTL_PROC(_kern, OID_AUTO, waitq_wait, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3348 0, 0, sysctl_waitq_wait, "Q", "start waiting on given event");
3349
3350
3351 static int sysctl_wqset_select SYSCTL_HANDLER_ARGS
3352 {
3353 #pragma unused(oidp, arg1, arg2)
3354 int error;
3355 struct waitq_set *wqset;
3356 uint64_t event64 = 0;
3357
3358 error = SYSCTL_IN(req, &event64, sizeof(event64));
3359 if (error)
3360 return error;
3361
3362 if (!req->newptr)
3363 goto out;
3364
3365 wqset = sysctl_get_wqset((int)(event64 & 0xffffffff));
3366 g_waitq_set = wqset;
3367
3368 event64 = wqset_id(wqset);
3369 printf("[WQ]: selected wqset 0x%llx\n", event64);
3370
3371 out:
3372 if (g_waitq_set)
3373 event64 = wqset_id(g_waitq_set);
3374 else
3375 event64 = (uint64_t)(-1);
3376
3377 return SYSCTL_OUT(req, &event64, sizeof(event64));
3378 }
3379 SYSCTL_PROC(_kern, OID_AUTO, wqset_select, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3380 0, 0, sysctl_wqset_select, "Q", "select/create a global waitq set");
3381
3382
3383 static int sysctl_waitq_link SYSCTL_HANDLER_ARGS
3384 {
3385 #pragma unused(oidp, arg1, arg2)
3386 int error;
3387 int index;
3388 struct waitq *waitq;
3389 struct waitq_set *wqset;
3390 kern_return_t kr;
3391 uint64_t reserved_link = 0;
3392 int64_t event64 = 0;
3393
3394 error = SYSCTL_IN(req, &event64, sizeof(event64));
3395 if (error)
3396 return error;
3397
3398 if (!req->newptr)
3399 return SYSCTL_OUT(req, &event64, sizeof(event64));
3400
3401 if (!g_waitq_set)
3402 g_waitq_set = sysctl_get_wqset(1);
3403 wqset = g_waitq_set;
3404
3405 if (event64 < 0) {
3406 struct waitq_set *tmp;
3407 index = (int)((-event64) & 0xffffffff);
3408 tmp = sysctl_get_wqset(index);
3409 if (tmp == wqset)
3410 goto out;
3411 waitq = wqset_waitq(tmp);
3412 index = -index;
3413 } else {
3414 index = (int)event64;
3415 waitq = global_test_waitq(index);
3416 }
3417
3418 printf("[WQ]: linking waitq [%d] to global wqset (0x%llx)\n",
3419 index, wqset_id(wqset));
3420 reserved_link = waitq_link_reserve(waitq);
3421 kr = waitq_link(waitq, wqset, WAITQ_SHOULD_LOCK, &reserved_link);
3422 waitq_link_release(reserved_link);
3423
3424 printf("[WQ]: \tkr=%d\n", kr);
3425
3426 out:
3427 return SYSCTL_OUT(req, &kr, sizeof(kr));
3428 }
3429 SYSCTL_PROC(_kern, OID_AUTO, waitq_link, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3430 0, 0, sysctl_waitq_link, "Q", "link global waitq to test waitq set");
3431
3432
3433 static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS
3434 {
3435 #pragma unused(oidp, arg1, arg2)
3436 int error;
3437 int index;
3438 struct waitq *waitq;
3439 struct waitq_set *wqset;
3440 kern_return_t kr;
3441 uint64_t event64 = 0;
3442
3443 error = SYSCTL_IN(req, &event64, sizeof(event64));
3444 if (error)
3445 return error;
3446
3447 if (!req->newptr)
3448 return SYSCTL_OUT(req, &event64, sizeof(event64));
3449
3450 if (!g_waitq_set)
3451 g_waitq_set = sysctl_get_wqset(1);
3452 wqset = g_waitq_set;
3453
3454 index = (int)event64;
3455 waitq = global_test_waitq(index);
3456
3457 printf("[WQ]: unlinking waitq [%d] from global wqset (0x%llx)\n",
3458 index, wqset_id(wqset));
3459
3460 kr = waitq_unlink(waitq, wqset);
3461 printf("[WQ]: \tkr=%d\n", kr);
3462
3463 return SYSCTL_OUT(req, &kr, sizeof(kr));
3464 }
3465 SYSCTL_PROC(_kern, OID_AUTO, waitq_unlink, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3466 0, 0, sysctl_waitq_unlink, "Q", "unlink global waitq from test waitq set");
3467
3468
3469 static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS
3470 {
3471 #pragma unused(oidp, arg1, arg2)
3472 struct waitq *waitq;
3473 uint64_t event64 = 0;
3474 int error, index;
3475
3476 error = SYSCTL_IN(req, &event64, sizeof(event64));
3477 if (error)
3478 return error;
3479
3480 if (!req->newptr)
3481 return SYSCTL_OUT(req, &event64, sizeof(event64));
3482
3483 index = (int)event64;
3484 waitq = global_test_waitq(index);
3485
3486 printf("[WQ]: clearing prepost on waitq [%d]\n", index);
3487 waitq_clear_prepost(waitq);
3488
3489 return SYSCTL_OUT(req, &event64, sizeof(event64));
3490 }
3491 SYSCTL_PROC(_kern, OID_AUTO, waitq_clear_prepost, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3492 0, 0, sysctl_waitq_clear_prepost, "Q", "clear prepost on given waitq");
3493
3494
3495 static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS
3496 {
3497 #pragma unused(oidp, arg1, arg2)
3498 int error;
3499 struct waitq_set *wqset;
3500 kern_return_t kr;
3501 uint64_t event64 = 0;
3502
3503 error = SYSCTL_IN(req, &event64, sizeof(event64));
3504 if (error)
3505 return error;
3506
3507 if (!req->newptr)
3508 return SYSCTL_OUT(req, &event64, sizeof(event64));
3509
3510 if (!g_waitq_set)
3511 g_waitq_set = sysctl_get_wqset(1);
3512 wqset = g_waitq_set;
3513
3514 printf("[WQ]: unlinking all queues from global wqset (0x%llx)\n",
3515 wqset_id(wqset));
3516
3517 kr = waitq_set_unlink_all(wqset);
3518 printf("[WQ]: \tkr=%d\n", kr);
3519
3520 return SYSCTL_OUT(req, &kr, sizeof(kr));
3521 }
3522 SYSCTL_PROC(_kern, OID_AUTO, wqset_unlink_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3523 0, 0, sysctl_wqset_unlink_all, "Q", "unlink all queues from test waitq set");
3524
3525
3526 static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS
3527 {
3528 #pragma unused(oidp, arg1, arg2)
3529 struct waitq_set *wqset = NULL;
3530 uint64_t event64 = 0;
3531 int error, index;
3532
3533 error = SYSCTL_IN(req, &event64, sizeof(event64));
3534 if (error)
3535 return error;
3536
3537 if (!req->newptr)
3538 goto out;
3539
3540 index = (int)((event64) & 0xffffffff);
3541 wqset = sysctl_get_wqset(index);
3542 assert(wqset != NULL);
3543
3544 printf("[WQ]: clearing preposts on wqset 0x%llx\n", wqset_id(wqset));
3545 waitq_set_clear_preposts(wqset);
3546
3547 out:
3548 if (wqset)
3549 event64 = wqset_id(wqset);
3550 else
3551 event64 = (uint64_t)(-1);
3552
3553 return SYSCTL_OUT(req, &event64, sizeof(event64));
3554 }
3555 SYSCTL_PROC(_kern, OID_AUTO, wqset_clear_preposts, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3556 0, 0, sysctl_wqset_clear_preposts, "Q", "clear preposts on given waitq set");
3557
3558 #endif /* CONFIG_WAITQ_DEBUG */
3559 #endif /* defined(DEVELOPMENT) || defined(DEBUG) */