]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/sys_generic.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / bsd / kern / sys_generic.c
CommitLineData
1c79356b 1/*
3e170ce0 2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
67 */
2d21ac55
A
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
1c79356b
A
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/filedesc.h>
78#include <sys/ioctl.h>
91447636
A
79#include <sys/file_internal.h>
80#include <sys/proc_internal.h>
1c79356b 81#include <sys/socketvar.h>
91447636 82#include <sys/uio_internal.h>
1c79356b 83#include <sys/kernel.h>
fe8ab488 84#include <sys/guarded.h>
1c79356b
A
85#include <sys/stat.h>
86#include <sys/malloc.h>
91447636 87#include <sys/sysproto.h>
1c79356b 88
91447636 89#include <sys/mount_internal.h>
1c79356b
A
90#include <sys/protosw.h>
91#include <sys/ev.h>
92#include <sys/user.h>
93#include <sys/kdebug.h>
91447636
A
94#include <sys/poll.h>
95#include <sys/event.h>
96#include <sys/eventvar.h>
316670eb 97#include <sys/proc.h>
39236c6e 98#include <sys/kauth.h>
91447636
A
99
100#include <mach/mach_types.h>
101#include <kern/kern_types.h>
1c79356b 102#include <kern/assert.h>
91447636
A
103#include <kern/kalloc.h>
104#include <kern/thread.h>
105#include <kern/clock.h>
316670eb
A
106#include <kern/ledger.h>
107#include <kern/task.h>
39236c6e 108#include <kern/telemetry.h>
3e170ce0
A
109#include <kern/waitq.h>
110#include <kern/sched_prim.h>
1c79356b
A
111
112#include <sys/mbuf.h>
39236c6e 113#include <sys/domain.h>
1c79356b
A
114#include <sys/socket.h>
115#include <sys/socketvar.h>
116#include <sys/errno.h>
55e303ae 117#include <sys/syscall.h>
91447636 118#include <sys/pipe.h>
1c79356b 119
b0d623f7 120#include <security/audit/audit.h>
e5568f75 121
1c79356b
A
122#include <net/if.h>
123#include <net/route.h>
124
125#include <netinet/in.h>
126#include <netinet/in_systm.h>
127#include <netinet/ip.h>
128#include <netinet/in_pcb.h>
129#include <netinet/ip_var.h>
130#include <netinet/ip6.h>
131#include <netinet/tcp.h>
132#include <netinet/tcp_fsm.h>
133#include <netinet/tcp_seq.h>
134#include <netinet/tcp_timer.h>
135#include <netinet/tcp_var.h>
136#include <netinet/tcpip.h>
137#include <netinet/tcp_debug.h>
0b4e3aa0 138/* for wait queue based select */
3e170ce0 139#include <kern/waitq.h>
91447636 140#include <kern/kalloc.h>
91447636
A
141#include <sys/vnode_internal.h>
142
2d21ac55
A
143/* XXX should be in a header file somewhere */
144void evsofree(struct socket *);
145void evpipefree(struct pipe *);
146void postpipeevent(struct pipe *, int);
147void postevent(struct socket *, struct sockbuf *, int);
148extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
3e170ce0 149extern void delay(int);
2d21ac55 150
91447636 151int rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval);
fe8ab488 152int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval);
91447636 153
2d21ac55 154__private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp,
91447636
A
155 user_addr_t bufp, user_size_t nbyte,
156 off_t offset, int flags, user_ssize_t *retval);
2d21ac55 157__private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
91447636
A
158 user_addr_t bufp, user_size_t nbyte,
159 off_t offset, int flags, user_ssize_t *retval);
160__private_extern__ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
161__private_extern__ void donefileread(struct proc *p, struct fileproc *fp_ret, int fd);
9bccf70c 162
6d2010ae
A
163
164/* Conflict wait queue for when selects collide (opaque type) */
3e170ce0 165struct waitq select_conflict_queue;
6d2010ae
A
166
167/*
168 * Init routine called from bsd_init.c
169 */
3e170ce0 170void select_waitq_init(void);
6d2010ae 171void
3e170ce0 172select_waitq_init(void)
6d2010ae 173{
3e170ce0 174 waitq_init(&select_conflict_queue, SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
6d2010ae
A
175}
176
91447636 177#define f_flag f_fglob->fg_flag
39236c6e 178#define f_type f_fglob->fg_ops->fo_type
91447636
A
179#define f_msgcount f_fglob->fg_msgcount
180#define f_cred f_fglob->fg_cred
181#define f_ops f_fglob->fg_ops
182#define f_offset f_fglob->fg_offset
183#define f_data f_fglob->fg_data
2d21ac55 184
1c79356b
A
185/*
186 * Read system call.
2d21ac55
A
187 *
188 * Returns: 0 Success
189 * preparefileread:EBADF
190 * preparefileread:ESPIPE
191 * preparefileread:ENXIO
192 * preparefileread:EBADF
193 * dofileread:???
1c79356b 194 */
9bccf70c 195int
2d21ac55
A
196read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
197{
198 __pthread_testcancel(1);
199 return(read_nocancel(p, (struct read_nocancel_args *)uap, retval));
200}
201
202int
203read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
9bccf70c 204{
91447636 205 struct fileproc *fp;
9bccf70c 206 int error;
91447636 207 int fd = uap->fd;
b0d623f7 208 struct vfs_context context;
91447636
A
209
210 if ( (error = preparefileread(p, &fp, fd, 0)) )
211 return (error);
9bccf70c 212
b0d623f7
A
213 context = *(vfs_context_current());
214 context.vc_ucred = fp->f_fglob->fg_cred;
215
216 error = dofileread(&context, fp, uap->cbuf, uap->nbyte,
91447636
A
217 (off_t)-1, 0, retval);
218
219 donefileread(p, fp, fd);
220
221 return (error);
9bccf70c
A
222}
223
224/*
225 * Pread system call
2d21ac55
A
226 *
227 * Returns: 0 Success
228 * preparefileread:EBADF
229 * preparefileread:ESPIPE
230 * preparefileread:ENXIO
231 * preparefileread:EBADF
232 * dofileread:???
9bccf70c 233 */
9bccf70c 234int
2d21ac55 235pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
9bccf70c 236{
2d21ac55
A
237 __pthread_testcancel(1);
238 return(pread_nocancel(p, (struct pread_nocancel_args *)uap, retval));
239}
240
241int
242pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
243{
244 struct fileproc *fp = NULL; /* fp set by preparefileread() */
91447636 245 int fd = uap->fd;
9bccf70c 246 int error;
b0d623f7 247 struct vfs_context context;
9bccf70c 248
91447636 249 if ( (error = preparefileread(p, &fp, fd, 1)) )
4a3eedf9 250 goto out;
91447636 251
b0d623f7
A
252 context = *(vfs_context_current());
253 context.vc_ucred = fp->f_fglob->fg_cred;
254
255 error = dofileread(&context, fp, uap->buf, uap->nbyte,
91447636 256 uap->offset, FOF_OFFSET, retval);
55e303ae 257
91447636
A
258 donefileread(p, fp, fd);
259
b7266188 260 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
55e303ae 261 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
4a3eedf9
A
262
263out:
91447636 264 return (error);
9bccf70c
A
265}
266
267/*
268 * Code common for read and pread
269 */
91447636
A
270
271void
272donefileread(struct proc *p, struct fileproc *fp, int fd)
273{
2d21ac55 274 proc_fdlock_spin(p);
91447636
A
275 fp_drop(p, fd, fp, 1);
276 proc_fdunlock(p);
277}
278
2d21ac55
A
279/*
280 * Returns: 0 Success
281 * EBADF
282 * ESPIPE
283 * ENXIO
284 * fp_lookup:EBADF
285 * fo_read:???
286 */
91447636
A
287int
288preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
289{
290 vnode_t vp;
291 int error;
292 struct fileproc *fp;
293
b0d623f7
A
294 AUDIT_ARG(fd, fd);
295
2d21ac55 296 proc_fdlock_spin(p);
91447636
A
297
298 error = fp_lookup(p, fd, &fp, 1);
299
300 if (error) {
301 proc_fdunlock(p);
302 return (error);
303 }
304 if ((fp->f_flag & FREAD) == 0) {
305 error = EBADF;
306 goto out;
307 }
308 if (check_for_pread && (fp->f_type != DTYPE_VNODE)) {
309 error = ESPIPE;
310 goto out;
311 }
312 if (fp->f_type == DTYPE_VNODE) {
313 vp = (struct vnode *)fp->f_fglob->fg_data;
314
2d21ac55
A
315 if (check_for_pread && (vnode_isfifo(vp))) {
316 error = ESPIPE;
317 goto out;
318 }
319 if (check_for_pread && (vp->v_flag & VISTTY)) {
320 error = ENXIO;
321 goto out;
322 }
91447636
A
323 }
324
325 *fp_ret = fp;
326
327 proc_fdunlock(p);
328 return (0);
329
330out:
331 fp_drop(p, fd, fp, 1);
332 proc_fdunlock(p);
333 return (error);
334}
335
336
2d21ac55
A
337/*
338 * Returns: 0 Success
339 * EINVAL
340 * fo_read:???
341 */
55e303ae 342__private_extern__ int
2d21ac55
A
343dofileread(vfs_context_t ctx, struct fileproc *fp,
344 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
345 user_ssize_t *retval)
1c79356b 346{
91447636
A
347 uio_t auio;
348 user_ssize_t bytecnt;
349 long error = 0;
350 char uio_buf[ UIO_SIZEOF(1) ];
1c79356b 351
9bccf70c
A
352 if (nbyte > INT_MAX)
353 return (EINVAL);
91447636 354
2d21ac55 355 if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
91447636
A
356 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_READ,
357 &uio_buf[0], sizeof(uio_buf));
358 } else {
359 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_READ,
360 &uio_buf[0], sizeof(uio_buf));
361 }
362 uio_addiov(auio, bufp, nbyte);
363
91447636 364 bytecnt = nbyte;
9bccf70c 365
2d21ac55 366 if ((error = fo_read(fp, auio, flags, ctx))) {
91447636 367 if (uio_resid(auio) != bytecnt && (error == ERESTART ||
9bccf70c
A
368 error == EINTR || error == EWOULDBLOCK))
369 error = 0;
370 }
91447636 371 bytecnt -= uio_resid(auio);
91447636
A
372
373 *retval = bytecnt;
374
9bccf70c 375 return (error);
1c79356b
A
376}
377
9bccf70c
A
378/*
379 * Scatter read system call.
2d21ac55
A
380 *
381 * Returns: 0 Success
382 * EINVAL
383 * ENOMEM
384 * copyin:EFAULT
385 * rd_uio:???
9bccf70c 386 */
9bccf70c 387int
2d21ac55
A
388readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
389{
390 __pthread_testcancel(1);
391 return(readv_nocancel(p, (struct readv_nocancel_args *)uap, retval));
392}
393
394int
395readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
1c79356b 396{
91447636 397 uio_t auio = NULL;
1c79356b 398 int error;
91447636
A
399 struct user_iovec *iovp;
400
401 /* Verify range bedfore calling uio_create() */
402 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
403 return (EINVAL);
404
405 /* allocate a uio large enough to hold the number of iovecs passed */
406 auio = uio_create(uap->iovcnt, 0,
407 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
408 UIO_READ);
409
410 /* get location of iovecs within the uio. then copyin the iovecs from
411 * user space.
412 */
413 iovp = uio_iovsaddr(auio);
414 if (iovp == NULL) {
415 error = ENOMEM;
416 goto ExitThisRoutine;
417 }
b0d623f7
A
418 error = copyin_user_iovec_array(uap->iovp,
419 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
420 uap->iovcnt, iovp);
91447636
A
421 if (error) {
422 goto ExitThisRoutine;
423 }
424
425 /* finalize uio_t for use and do the IO
426 */
39236c6e
A
427 error = uio_calculateresid(auio);
428 if (error) {
429 goto ExitThisRoutine;
430 }
91447636
A
431 error = rd_uio(p, uap->fd, auio, retval);
432
433ExitThisRoutine:
434 if (auio != NULL) {
435 uio_free(auio);
436 }
1c79356b
A
437 return (error);
438}
439
440/*
441 * Write system call
2d21ac55
A
442 *
443 * Returns: 0 Success
444 * EBADF
445 * fp_lookup:EBADF
446 * dofilewrite:???
1c79356b 447 */
9bccf70c 448int
2d21ac55
A
449write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
450{
451 __pthread_testcancel(1);
452 return(write_nocancel(p, (struct write_nocancel_args *)uap, retval));
453
454}
455
456int
457write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
1c79356b 458{
91447636 459 struct fileproc *fp;
9bccf70c 460 int error;
91447636 461 int fd = uap->fd;
fe8ab488 462 bool wrote_some = false;
9bccf70c 463
b0d623f7
A
464 AUDIT_ARG(fd, fd);
465
91447636
A
466 error = fp_lookup(p,fd,&fp,0);
467 if (error)
468 return(error);
469 if ((fp->f_flag & FWRITE) == 0) {
470 error = EBADF;
fe8ab488
A
471 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
472 proc_fdlock(p);
473 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
474 proc_fdunlock(p);
91447636 475 } else {
2d21ac55
A
476 struct vfs_context context = *(vfs_context_current());
477 context.vc_ucred = fp->f_fglob->fg_cred;
478
479 error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte,
9bccf70c 480 (off_t)-1, 0, retval);
fe8ab488
A
481
482 wrote_some = *retval > 0;
91447636 483 }
fe8ab488 484 if (wrote_some)
91447636
A
485 fp_drop_written(p, fd, fp);
486 else
487 fp_drop(p, fd, fp, 0);
9bccf70c
A
488 return(error);
489}
490
491/*
91447636 492 * pwrite system call
2d21ac55
A
493 *
494 * Returns: 0 Success
495 * EBADF
496 * ESPIPE
497 * ENXIO
498 * EINVAL
499 * fp_lookup:EBADF
500 * dofilewrite:???
9bccf70c 501 */
9bccf70c 502int
2d21ac55
A
503pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
504{
505 __pthread_testcancel(1);
506 return(pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval));
507}
508
509int
510pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
9bccf70c 511{
91447636 512 struct fileproc *fp;
9bccf70c 513 int error;
91447636 514 int fd = uap->fd;
2d21ac55 515 vnode_t vp = (vnode_t)0;
fe8ab488 516 bool wrote_some = false;
91447636 517
b0d623f7
A
518 AUDIT_ARG(fd, fd);
519
91447636
A
520 error = fp_lookup(p,fd,&fp,0);
521 if (error)
522 return(error);
9bccf70c 523
91447636
A
524 if ((fp->f_flag & FWRITE) == 0) {
525 error = EBADF;
fe8ab488
A
526 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
527 proc_fdlock(p);
528 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
529 proc_fdunlock(p);
91447636 530 } else {
2d21ac55
A
531 struct vfs_context context = *vfs_context_current();
532 context.vc_ucred = fp->f_fglob->fg_cred;
533
91447636
A
534 if (fp->f_type != DTYPE_VNODE) {
535 error = ESPIPE;
2d21ac55
A
536 goto errout;
537 }
538 vp = (vnode_t)fp->f_fglob->fg_data;
539 if (vnode_isfifo(vp)) {
540 error = ESPIPE;
541 goto errout;
542 }
543 if ((vp->v_flag & VISTTY)) {
544 error = ENXIO;
545 goto errout;
91447636 546 }
2d21ac55
A
547 if (uap->offset == (off_t)-1) {
548 error = EINVAL;
549 goto errout;
550 }
551
552 error = dofilewrite(&context, fp, uap->buf, uap->nbyte,
553 uap->offset, FOF_OFFSET, retval);
fe8ab488 554 wrote_some = *retval > 0;
9bccf70c 555 }
2d21ac55 556errout:
fe8ab488 557 if (wrote_some)
91447636
A
558 fp_drop_written(p, fd, fp);
559 else
560 fp_drop(p, fd, fp, 0);
55e303ae 561
b7266188 562 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
55e303ae
A
563 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
564
9bccf70c
A
565 return(error);
566}
567
2d21ac55
A
568/*
569 * Returns: 0 Success
570 * EINVAL
571 * <fo_write>:EPIPE
572 * <fo_write>:??? [indirect through struct fileops]
573 */
55e303ae 574__private_extern__ int
2d21ac55
A
575dofilewrite(vfs_context_t ctx, struct fileproc *fp,
576 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
577 user_ssize_t *retval)
9bccf70c 578{
91447636
A
579 uio_t auio;
580 long error = 0;
581 user_ssize_t bytecnt;
582 char uio_buf[ UIO_SIZEOF(1) ];
91447636 583
fe8ab488
A
584 if (nbyte > INT_MAX) {
585 *retval = 0;
9bccf70c 586 return (EINVAL);
fe8ab488 587 }
91447636 588
2d21ac55 589 if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
91447636
A
590 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_WRITE,
591 &uio_buf[0], sizeof(uio_buf));
592 } else {
593 auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_WRITE,
594 &uio_buf[0], sizeof(uio_buf));
595 }
596 uio_addiov(auio, bufp, nbyte);
597
91447636 598 bytecnt = nbyte;
2d21ac55 599 if ((error = fo_write(fp, auio, flags, ctx))) {
91447636 600 if (uio_resid(auio) != bytecnt && (error == ERESTART ||
9bccf70c
A
601 error == EINTR || error == EWOULDBLOCK))
602 error = 0;
55e303ae 603 /* The socket layer handles SIGPIPE */
6d2010ae
A
604 if (error == EPIPE && fp->f_type != DTYPE_SOCKET &&
605 (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) {
2d21ac55
A
606 /* XXX Raise the signal on the thread? */
607 psignal(vfs_context_proc(ctx), SIGPIPE);
608 }
9bccf70c 609 }
91447636 610 bytecnt -= uio_resid(auio);
91447636
A
611 *retval = bytecnt;
612
9bccf70c 613 return (error);
1c79356b 614}
9bccf70c
A
615
616/*
617 * Gather write system call
618 */
9bccf70c 619int
2d21ac55
A
620writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
621{
622 __pthread_testcancel(1);
623 return(writev_nocancel(p, (struct writev_nocancel_args *)uap, retval));
624}
625
626int
627writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
1c79356b 628{
91447636 629 uio_t auio = NULL;
1c79356b 630 int error;
fe8ab488 631 struct fileproc *fp;
91447636 632 struct user_iovec *iovp;
fe8ab488 633 bool wrote_some = false;
91447636 634
b0d623f7
A
635 AUDIT_ARG(fd, uap->fd);
636
91447636
A
637 /* Verify range bedfore calling uio_create() */
638 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
639 return (EINVAL);
640
641 /* allocate a uio large enough to hold the number of iovecs passed */
642 auio = uio_create(uap->iovcnt, 0,
643 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
644 UIO_WRITE);
645
646 /* get location of iovecs within the uio. then copyin the iovecs from
647 * user space.
648 */
649 iovp = uio_iovsaddr(auio);
650 if (iovp == NULL) {
651 error = ENOMEM;
652 goto ExitThisRoutine;
653 }
b0d623f7
A
654 error = copyin_user_iovec_array(uap->iovp,
655 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
656 uap->iovcnt, iovp);
91447636
A
657 if (error) {
658 goto ExitThisRoutine;
659 }
660
661 /* finalize uio_t for use and do the IO
662 */
39236c6e
A
663 error = uio_calculateresid(auio);
664 if (error) {
665 goto ExitThisRoutine;
666 }
fe8ab488
A
667
668 error = fp_lookup(p, uap->fd, &fp, 0);
669 if (error)
670 goto ExitThisRoutine;
671
672 if ((fp->f_flag & FWRITE) == 0) {
673 error = EBADF;
674 } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
675 proc_fdlock(p);
676 error = fp_guard_exception(p, uap->fd, fp, kGUARD_EXC_WRITE);
677 proc_fdunlock(p);
678 } else {
679 error = wr_uio(p, fp, auio, retval);
680 wrote_some = *retval > 0;
681 }
682
683 if (wrote_some)
684 fp_drop_written(p, uap->fd, fp);
685 else
686 fp_drop(p, uap->fd, fp, 0);
91447636
A
687
688ExitThisRoutine:
689 if (auio != NULL) {
690 uio_free(auio);
691 }
1c79356b
A
692 return (error);
693}
694
91447636 695
9bccf70c 696int
fe8ab488 697wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval)
1c79356b 698{
91447636
A
699 int error;
700 user_ssize_t count;
2d21ac55 701 struct vfs_context context = *vfs_context_current();
1c79356b 702
91447636 703 count = uio_resid(uio);
2d21ac55
A
704
705 context.vc_ucred = fp->f_cred;
706 error = fo_write(fp, uio, 0, &context);
91447636
A
707 if (error) {
708 if (uio_resid(uio) != count && (error == ERESTART ||
709 error == EINTR || error == EWOULDBLOCK))
710 error = 0;
711 /* The socket layer handles SIGPIPE */
6d2010ae
A
712 if (error == EPIPE && fp->f_type != DTYPE_SOCKET &&
713 (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0)
91447636
A
714 psignal(p, SIGPIPE);
715 }
716 *retval = count - uio_resid(uio);
717
91447636
A
718 return(error);
719}
720
721
722int
2d21ac55 723rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval)
91447636
A
724{
725 struct fileproc *fp;
726 int error;
727 user_ssize_t count;
2d21ac55 728 struct vfs_context context = *vfs_context_current();
91447636
A
729
730 if ( (error = preparefileread(p, &fp, fdes, 0)) )
731 return (error);
732
733 count = uio_resid(uio);
2d21ac55
A
734
735 context.vc_ucred = fp->f_cred;
736
737 error = fo_read(fp, uio, 0, &context);
9bccf70c 738
91447636
A
739 if (error) {
740 if (uio_resid(uio) != count && (error == ERESTART ||
741 error == EINTR || error == EWOULDBLOCK))
742 error = 0;
1c79356b 743 }
91447636 744 *retval = count - uio_resid(uio);
9bccf70c 745
91447636 746 donefileread(p, fp, fdes);
9bccf70c 747
91447636 748 return (error);
1c79356b
A
749}
750
751/*
752 * Ioctl system call
91447636 753 *
2d21ac55
A
754 * Returns: 0 Success
755 * EBADF
756 * ENOTTY
757 * ENOMEM
758 * ESRCH
759 * copyin:EFAULT
760 * copyoutEFAULT
761 * fp_lookup:EBADF Bad file descriptor
762 * fo_ioctl:???
1c79356b 763 */
9bccf70c 764int
b0d623f7 765ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
1c79356b 766{
39236c6e 767 struct fileproc *fp = NULL;
91447636 768 int error = 0;
39236c6e
A
769 u_int size = 0;
770 caddr_t datap = NULL, memp = NULL;
771 boolean_t is64bit = FALSE;
772 int tmp = 0;
1c79356b
A
773#define STK_PARAMS 128
774 char stkbuf[STK_PARAMS];
91447636 775 int fd = uap->fd;
39236c6e 776 u_long com = uap->com;
2d21ac55 777 struct vfs_context context = *vfs_context_current();
1c79356b 778
e5568f75 779 AUDIT_ARG(fd, uap->fd);
e5568f75 780 AUDIT_ARG(addr, uap->data);
91447636
A
781
782 is64bit = proc_is64bit(p);
b0d623f7
A
783#if CONFIG_AUDIT
784 if (is64bit)
39236c6e 785 AUDIT_ARG(value64, com);
b0d623f7 786 else
39236c6e 787 AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
b0d623f7 788#endif /* CONFIG_AUDIT */
91447636 789
1c79356b
A
790 /*
791 * Interpret high order word to find amount of data to be
792 * copied to/from the user's address space.
793 */
794 size = IOCPARM_LEN(com);
39236c6e
A
795 if (size > IOCPARM_MAX)
796 return ENOTTY;
1c79356b 797 if (size > sizeof (stkbuf)) {
39236c6e
A
798 if ((memp = (caddr_t)kalloc(size)) == 0)
799 return ENOMEM;
91447636 800 datap = memp;
1c79356b 801 } else
91447636 802 datap = &stkbuf[0];
39236c6e 803 if (com & IOC_IN) {
1c79356b 804 if (size) {
91447636 805 error = copyin(uap->data, datap, size);
39236c6e
A
806 if (error)
807 goto out_nofp;
91447636
A
808 } else {
809 /* XXX - IOC_IN and no size? we should proably return an error here!! */
810 if (is64bit) {
811 *(user_addr_t *)datap = uap->data;
812 }
813 else {
814 *(uint32_t *)datap = (uint32_t)uap->data;
815 }
816 }
39236c6e 817 } else if ((com & IOC_OUT) && size)
1c79356b
A
818 /*
819 * Zero the buffer so the user always
820 * gets back something deterministic.
821 */
91447636 822 bzero(datap, size);
39236c6e 823 else if (com & IOC_VOID) {
91447636
A
824 /* XXX - this is odd since IOC_VOID means no parameters */
825 if (is64bit) {
826 *(user_addr_t *)datap = uap->data;
827 }
828 else {
829 *(uint32_t *)datap = (uint32_t)uap->data;
830 }
831 }
1c79356b 832
39236c6e
A
833 proc_fdlock(p);
834 error = fp_lookup(p,fd,&fp,1);
835 if (error) {
836 proc_fdunlock(p);
837 goto out_nofp;
838 }
839
840 AUDIT_ARG(file, p, fp);
841
842 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
843 error = EBADF;
844 goto out;
845 }
846
847 context.vc_ucred = fp->f_fglob->fg_cred;
848
849#if CONFIG_MACF
850 error = mac_file_check_ioctl(context.vc_ucred, fp->f_fglob, com);
851 if (error)
852 goto out;
853#endif
854
1c79356b 855 switch (com) {
39236c6e
A
856 case FIONCLEX:
857 *fdflags(p, fd) &= ~UF_EXCLOSE;
858 break;
859
860 case FIOCLEX:
861 *fdflags(p, fd) |= UF_EXCLOSE;
862 break;
1c79356b
A
863
864 case FIONBIO:
91447636 865 if ( (tmp = *(int *)datap) )
1c79356b
A
866 fp->f_flag |= FNONBLOCK;
867 else
868 fp->f_flag &= ~FNONBLOCK;
2d21ac55 869 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
1c79356b
A
870 break;
871
872 case FIOASYNC:
91447636 873 if ( (tmp = *(int *)datap) )
1c79356b
A
874 fp->f_flag |= FASYNC;
875 else
876 fp->f_flag &= ~FASYNC;
2d21ac55 877 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
1c79356b
A
878 break;
879
880 case FIOSETOWN:
91447636 881 tmp = *(int *)datap;
1c79356b
A
882 if (fp->f_type == DTYPE_SOCKET) {
883 ((struct socket *)fp->f_data)->so_pgid = tmp;
1c79356b
A
884 break;
885 }
91447636 886 if (fp->f_type == DTYPE_PIPE) {
2d21ac55 887 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
91447636
A
888 break;
889 }
1c79356b
A
890 if (tmp <= 0) {
891 tmp = -tmp;
892 } else {
2d21ac55 893 struct proc *p1 = proc_find(tmp);
1c79356b
A
894 if (p1 == 0) {
895 error = ESRCH;
896 break;
897 }
2d21ac55
A
898 tmp = p1->p_pgrpid;
899 proc_rele(p1);
1c79356b 900 }
2d21ac55 901 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
1c79356b
A
902 break;
903
904 case FIOGETOWN:
905 if (fp->f_type == DTYPE_SOCKET) {
91447636 906 *(int *)datap = ((struct socket *)fp->f_data)->so_pgid;
1c79356b
A
907 break;
908 }
2d21ac55 909 error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
91447636 910 *(int *)datap = -*(int *)datap;
1c79356b
A
911 break;
912
913 default:
2d21ac55 914 error = fo_ioctl(fp, com, datap, &context);
1c79356b
A
915 /*
916 * Copy any data to user, size was
917 * already set and checked above.
918 */
39236c6e 919 if (error == 0 && (com & IOC_OUT) && size)
91447636 920 error = copyout(datap, uap->data, (u_int)size);
1c79356b
A
921 break;
922 }
91447636
A
923out:
924 fp_drop(p, fd, fp, 1);
925 proc_fdunlock(p);
39236c6e
A
926
927out_nofp:
928 if (memp)
929 kfree(memp, size);
91447636 930 return(error);
1c79356b
A
931}
932
1c79356b 933int selwait, nselcoll;
0b4e3aa0
A
934#define SEL_FIRSTPASS 1
935#define SEL_SECONDPASS 2
9bccf70c
A
936extern int selcontinue(int error);
937extern int selprocess(int error, int sel_pass);
fe8ab488 938static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
3e170ce0 939 int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset);
6d2010ae
A
940static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
941static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount);
91447636 942static int seldrop(struct proc *p, u_int32_t *ibits, int nfd);
4bd07ac2 943static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1c79356b
A
944
945/*
946 * Select system call.
2d21ac55
A
947 *
948 * Returns: 0 Success
949 * EINVAL Invalid argument
950 * EAGAIN Nonconformant error if allocation fails
1c79356b 951 */
9bccf70c 952int
b0d623f7 953select(struct proc *p, struct select_args *uap, int32_t *retval)
2d21ac55
A
954{
955 __pthread_testcancel(1);
4bd07ac2 956 return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
2d21ac55
A
957}
958
959int
b0d623f7 960select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
4bd07ac2
A
961{
962 uint64_t timeout = 0;
963
964 if (uap->tv) {
965 int err;
966 struct timeval atv;
967 if (IS_64BIT_PROCESS(p)) {
968 struct user64_timeval atv64;
969 err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
970 /* Loses resolution - assume timeout < 68 years */
971 atv.tv_sec = atv64.tv_sec;
972 atv.tv_usec = atv64.tv_usec;
973 } else {
974 struct user32_timeval atv32;
975 err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
976 atv.tv_sec = atv32.tv_sec;
977 atv.tv_usec = atv32.tv_usec;
978 }
979 if (err)
980 return err;
981
982 if (itimerfix(&atv)) {
983 err = EINVAL;
984 return err;
985 }
986
987 clock_absolutetime_interval_to_deadline(tvtoabstime(&atv), &timeout);
988 }
989
990 return select_internal(p, uap, timeout, retval);
991}
992
993int
994pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
995{
996 __pthread_testcancel(1);
997 return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
998}
999
1000int
1001pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1002{
1003 int err;
1004 struct uthread *ut;
1005 uint64_t timeout = 0;
1006
1007 if (uap->ts) {
1008 struct timespec ts;
1009
1010 if (IS_64BIT_PROCESS(p)) {
1011 struct user64_timespec ts64;
1012 err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1013 ts.tv_sec = ts64.tv_sec;
1014 ts.tv_nsec = ts64.tv_nsec;
1015 } else {
1016 struct user32_timespec ts32;
1017 err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1018 ts.tv_sec = ts32.tv_sec;
1019 ts.tv_nsec = ts32.tv_nsec;
1020 }
1021 if (err) {
1022 return err;
1023 }
1024
1025 if (!timespec_is_valid(&ts)) {
1026 return EINVAL;
1027 }
1028 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), &timeout);
1029 }
1030
1031 ut = get_bsdthread_info(current_thread());
1032
1033 if (uap->mask != USER_ADDR_NULL) {
1034 /* save current mask, then copyin and set new mask */
1035 sigset_t newset;
1036 err = copyin(uap->mask, &newset, sizeof(sigset_t));
1037 if (err) {
1038 return err;
1039 }
1040 ut->uu_oldmask = ut->uu_sigmask;
1041 ut->uu_flag |= UT_SAS_OLDMASK;
1042 ut->uu_sigmask = (newset & ~sigcantmask);
1043 }
1044
1045 err = select_internal(p, (struct select_nocancel_args *)uap, timeout, retval);
1046
1047 if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1048 /*
1049 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1050 * if the thread is cancelled. In that case, we don't reset the signal
1051 * mask to its original value (which usually happens in the signal
1052 * delivery path). This behavior is permitted by POSIX.
1053 */
1054 ut->uu_sigmask = ut->uu_oldmask;
1055 ut->uu_oldmask = 0;
1056 ut->uu_flag &= ~UT_SAS_OLDMASK;
1057 }
1058
1059 return err;
1060}
1061
1062/*
1063 * Generic implementation of {,p}select. Care: we type-pun uap across the two
1064 * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1065 * are identical. The 5th (timeout) argument points to different types, so we
1066 * unpack in the syscall-specific code, but the generic code still does a null
1067 * check on this argument to determine if a timeout was specified.
1068 */
1069static int
1070select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1c79356b 1071{
9bccf70c 1072 int error = 0;
3e170ce0 1073 u_int ni, nw;
91447636 1074 thread_t th_act;
1c79356b
A
1075 struct uthread *uth;
1076 struct _select *sel;
fe8ab488 1077 struct _select_data *seldata;
1c79356b 1078 int needzerofill = 1;
0b4e3aa0 1079 int count = 0;
3e170ce0 1080 size_t sz = 0;
1c79356b 1081
91447636 1082 th_act = current_thread();
1c79356b 1083 uth = get_bsdthread_info(th_act);
91447636 1084 sel = &uth->uu_select;
fe8ab488 1085 seldata = &uth->uu_kevent.ss_select_data;
1c79356b
A
1086 *retval = 0;
1087
fe8ab488
A
1088 seldata->args = uap;
1089 seldata->retval = retval;
3e170ce0
A
1090 seldata->wqp = NULL;
1091 seldata->count = 0;
fe8ab488 1092
0b4e3aa0 1093 if (uap->nd < 0) {
1c79356b 1094 return (EINVAL);
0b4e3aa0 1095 }
1c79356b 1096
2d21ac55
A
1097 /* select on thread of process that already called proc_exit() */
1098 if (p->p_fd == NULL) {
1099 return (EBADF);
1100 }
1101
1c79356b
A
1102 if (uap->nd > p->p_fd->fd_nfiles)
1103 uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
1104
1105 nw = howmany(uap->nd, NFDBITS);
1106 ni = nw * sizeof(fd_mask);
1107
1108 /*
2d21ac55
A
1109 * if the previously allocated space for the bits is smaller than
1110 * what is requested or no space has yet been allocated for this
1111 * thread, allocate enough space now.
1112 *
1113 * Note: If this process fails, select() will return EAGAIN; this
1114 * is the same thing pool() returns in a no-memory situation, but
1115 * it is not a POSIX compliant error code for select().
1c79356b
A
1116 */
1117 if (sel->nbytes < (3 * ni)) {
2d21ac55
A
1118 int nbytes = 3 * ni;
1119
1120 /* Free previous allocation, if any */
1121 if (sel->ibits != NULL)
1122 FREE(sel->ibits, M_TEMP);
1123 if (sel->obits != NULL) {
1124 FREE(sel->obits, M_TEMP);
1125 /* NULL out; subsequent ibits allocation may fail */
1126 sel->obits = NULL;
1127 }
1128
1129 MALLOC(sel->ibits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO);
1130 if (sel->ibits == NULL)
1131 return (EAGAIN);
1132 MALLOC(sel->obits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO);
1133 if (sel->obits == NULL) {
1134 FREE(sel->ibits, M_TEMP);
1135 sel->ibits = NULL;
1136 return (EAGAIN);
1137 }
1138 sel->nbytes = nbytes;
1c79356b 1139 needzerofill = 0;
2d21ac55 1140 }
1c79356b
A
1141
1142 if (needzerofill) {
1143 bzero((caddr_t)sel->ibits, sel->nbytes);
1144 bzero((caddr_t)sel->obits, sel->nbytes);
1145 }
1146
1147 /*
1148 * get the bits from the user address space
1149 */
1150#define getbits(name, x) \
1151 do { \
91447636 1152 if (uap->name && (error = copyin(uap->name, \
1c79356b
A
1153 (caddr_t)&sel->ibits[(x) * nw], ni))) \
1154 goto continuation; \
1155 } while (0)
1156
1157 getbits(in, 0);
1158 getbits(ou, 1);
1159 getbits(ex, 2);
1160#undef getbits
1161
4bd07ac2 1162 seldata->abstime = timeout;
9bccf70c 1163
6d2010ae 1164 if ( (error = selcount(p, sel->ibits, uap->nd, &count)) ) {
0b4e3aa0
A
1165 goto continuation;
1166 }
b0d623f7 1167
3e170ce0
A
1168 /*
1169 * We need an array of waitq pointers. This is due to the new way
1170 * in which waitqs are linked to sets. When a thread selects on a
1171 * file descriptor, a waitq (embedded in a selinfo structure) is
1172 * added to the thread's local waitq set. There is no longer any
1173 * way to directly iterate over all members of a given waitq set.
1174 * The process of linking a waitq into a set may allocate a link
1175 * table object. Because we can't iterate over all the waitqs to
1176 * which our thread waitq set belongs, we need a way of removing
1177 * this link object!
1178 *
1179 * Thus we need a buffer which will hold one waitq pointer
1180 * per FD being selected. During the tear-down phase we can use
1181 * these pointers to dis-associate the underlying selinfo's waitq
1182 * from our thread's waitq set.
1183 *
1184 * Because we also need to allocate a waitq set for this thread,
1185 * we use a bare buffer pointer to hold all the memory. Note that
1186 * this memory is cached in the thread pointer and not reaped until
1187 * the thread exists. This is generally OK because threads that
1188 * call select tend to keep calling select repeatedly.
1189 */
1190 sz = ALIGN(sizeof(struct waitq_set)) + (count * sizeof(uint64_t));
1191 if (sz > uth->uu_wqstate_sz) {
1192 /* (re)allocate a buffer to hold waitq pointers */
1193 if (uth->uu_wqset) {
1194 if (waitq_set_is_valid(uth->uu_wqset))
1195 waitq_set_deinit(uth->uu_wqset);
1196 FREE(uth->uu_wqset, M_SELECT);
1197 } else if (uth->uu_wqstate_sz && !uth->uu_wqset)
1198 panic("select: thread structure corrupt! "
1199 "uu_wqstate_sz:%ld, wqstate_buf == NULL",
1200 uth->uu_wqstate_sz);
1201 uth->uu_wqstate_sz = sz;
1202 MALLOC(uth->uu_wqset, struct waitq_set *, sz, M_SELECT, M_WAITOK);
1203 if (!uth->uu_wqset)
1204 panic("can't allocate %ld bytes for wqstate buffer",
1205 uth->uu_wqstate_sz);
1206 waitq_set_init(uth->uu_wqset,
1207 SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST|SYNC_POLICY_DISABLE_IRQ, NULL);
1208 }
1209
1210 if (!waitq_set_is_valid(uth->uu_wqset))
1211 waitq_set_init(uth->uu_wqset,
1212 SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST|SYNC_POLICY_DISABLE_IRQ, NULL);
1213
1214 /* the last chunk of our buffer is an array of waitq pointers */
1215 seldata->wqp = (uint64_t *)((char *)(uth->uu_wqset) + ALIGN(sizeof(struct waitq_set)));
1216 bzero(seldata->wqp, sz - ALIGN(sizeof(struct waitq_set)));
1217
fe8ab488 1218 seldata->count = count;
0b4e3aa0 1219
1c79356b 1220continuation:
6d2010ae
A
1221
1222 if (error) {
1223 /*
1224 * We have already cleaned up any state we established,
1225 * either locally or as a result of selcount(). We don't
1226 * need to wait_subqueue_unlink_all(), since we haven't set
1227 * anything at this point.
1228 */
1229 return (error);
1230 }
1231
1232 return selprocess(0, SEL_FIRSTPASS);
0b4e3aa0
A
1233}
1234
1235int
1236selcontinue(int error)
1237{
9bccf70c 1238 return selprocess(error, SEL_SECONDPASS);
1c79356b
A
1239}
1240
6d2010ae
A
1241
1242/*
1243 * selprocess
1244 *
1245 * Parameters: error The error code from our caller
1246 * sel_pass The pass we are on
1247 */
1c79356b 1248int
91447636 1249selprocess(int error, int sel_pass)
1c79356b 1250{
9bccf70c 1251 int ncoll;
1c79356b 1252 u_int ni, nw;
91447636 1253 thread_t th_act;
1c79356b
A
1254 struct uthread *uth;
1255 struct proc *p;
fe8ab488 1256 struct select_nocancel_args *uap;
1c79356b
A
1257 int *retval;
1258 struct _select *sel;
fe8ab488 1259 struct _select_data *seldata;
0b4e3aa0 1260 int unwind = 1;
9bccf70c 1261 int prepost = 0;
0b4e3aa0
A
1262 int somewakeup = 0;
1263 int doretry = 0;
9bccf70c 1264 wait_result_t wait_result;
1c79356b
A
1265
1266 p = current_proc();
91447636 1267 th_act = current_thread();
1c79356b 1268 uth = get_bsdthread_info(th_act);
91447636 1269 sel = &uth->uu_select;
fe8ab488
A
1270 seldata = &uth->uu_kevent.ss_select_data;
1271 uap = seldata->args;
1272 retval = seldata->retval;
1c79356b 1273
0b4e3aa0 1274 if ((error != 0) && (sel_pass == SEL_FIRSTPASS))
3e170ce0 1275 unwind = 0;
fe8ab488 1276 if (seldata->count == 0)
3e170ce0 1277 unwind = 0;
1c79356b 1278retry:
3e170ce0 1279 if (error != 0)
6d2010ae 1280 goto done;
0b4e3aa0 1281
1c79356b 1282 ncoll = nselcoll;
b0d623f7 1283 OSBitOrAtomic(P_SELECT, &p->p_flag);
3e170ce0 1284
0b4e3aa0 1285 /* skip scans if the select is just for timeouts */
fe8ab488 1286 if (seldata->count) {
3e170ce0 1287 error = selscan(p, sel, seldata, uap->nd, retval, sel_pass, uth->uu_wqset);
0b4e3aa0
A
1288 if (error || *retval) {
1289 goto done;
1290 }
3e170ce0
A
1291 if (prepost || somewakeup) {
1292 /*
1293 * if the select of log, then we can wakeup and
1294 * discover some one else already read the data;
1295 * go to select again if time permits
1296 */
1297 prepost = 0;
1298 somewakeup = 0;
1299 doretry = 1;
0b4e3aa0
A
1300 }
1301 }
1302
9bccf70c
A
1303 if (uap->tv) {
1304 uint64_t now;
1305
1306 clock_get_uptime(&now);
fe8ab488 1307 if (now >= seldata->abstime)
9bccf70c 1308 goto done;
1c79356b 1309 }
0b4e3aa0
A
1310
1311 if (doretry) {
1312 /* cleanup obits and try again */
1313 doretry = 0;
1314 sel_pass = SEL_FIRSTPASS;
1315 goto retry;
1316 }
1317
1c79356b
A
1318 /*
1319 * To effect a poll, the timeout argument should be
1320 * non-nil, pointing to a zero-valued timeval structure.
1321 */
fe8ab488 1322 if (uap->tv && seldata->abstime == 0) {
1c79356b
A
1323 goto done;
1324 }
0b4e3aa0
A
1325
1326 /* No spurious wakeups due to colls,no need to check for them */
1327 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1328 sel_pass = SEL_FIRSTPASS;
1c79356b
A
1329 goto retry;
1330 }
0b4e3aa0 1331
b0d623f7 1332 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1c79356b 1333
0b4e3aa0 1334 /* if the select is just for timeout skip check */
3e170ce0 1335 if (seldata->count && (sel_pass == SEL_SECONDPASS))
0b4e3aa0
A
1336 panic("selprocess: 2nd pass assertwaiting");
1337
3e170ce0
A
1338 /* waitq_set has waitqueue as first element */
1339 wait_result = waitq_assert_wait64_leeway((struct waitq *)uth->uu_wqset,
1340 NO_EVENT64, THREAD_ABORTSAFE,
1341 TIMEOUT_URGENCY_USER_NORMAL,
1342 seldata->abstime,
1343 TIMEOUT_NO_LEEWAY);
9bccf70c
A
1344 if (wait_result != THREAD_AWAKENED) {
1345 /* there are no preposted events */
91447636
A
1346 error = tsleep1(NULL, PSOCK | PCATCH,
1347 "select", 0, selcontinue);
0b4e3aa0
A
1348 } else {
1349 prepost = 1;
1350 error = 0;
1351 }
1352
0b4e3aa0 1353 if (error == 0) {
6d2010ae 1354 sel_pass = SEL_SECONDPASS;
0b4e3aa0 1355 if (!prepost)
6d2010ae 1356 somewakeup = 1;
1c79356b 1357 goto retry;
0b4e3aa0 1358 }
1c79356b 1359done:
91447636 1360 if (unwind) {
91447636 1361 seldrop(p, sel->ibits, uap->nd);
3e170ce0
A
1362 waitq_set_deinit(uth->uu_wqset);
1363 /*
1364 * zero out the waitq pointer array to avoid use-after free
1365 * errors in the selcount error path (seldrop_locked) if/when
1366 * the thread re-calls select().
1367 */
1368 bzero((void *)uth->uu_wqset, uth->uu_wqstate_sz);
91447636 1369 }
b0d623f7 1370 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1c79356b
A
1371 /* select is not restarted after signals... */
1372 if (error == ERESTART)
1373 error = EINTR;
1374 if (error == EWOULDBLOCK)
1375 error = 0;
1c79356b
A
1376 nw = howmany(uap->nd, NFDBITS);
1377 ni = nw * sizeof(fd_mask);
1378
1379#define putbits(name, x) \
1380 do { \
91447636
A
1381 if (uap->name && (error2 = \
1382 copyout((caddr_t)&sel->obits[(x) * nw], uap->name, ni))) \
1c79356b
A
1383 error = error2; \
1384 } while (0)
1385
1386 if (error == 0) {
1387 int error2;
1388
1389 putbits(in, 0);
1390 putbits(ou, 1);
1391 putbits(ex, 2);
1392#undef putbits
1393 }
4bd07ac2
A
1394
1395 if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1396 /* restore signal mask - continuation case */
1397 uth->uu_sigmask = uth->uu_oldmask;
1398 uth->uu_oldmask = 0;
1399 uth->uu_flag &= ~UT_SAS_OLDMASK;
1400 }
1401
1c79356b 1402 return(error);
1c79356b
A
1403}
1404
6d2010ae 1405
3e170ce0
A
1406/**
1407 * remove the fileproc's underlying waitq from the supplied waitq set;
1408 * clear FP_INSELECT when appropriate
1409 *
1410 * Parameters:
1411 * fp File proc that is potentially currently in select
1412 * wqset Waitq set to which the fileproc may belong
1413 * (usually this is the thread's private waitq set)
1414 * Conditions:
1415 * proc_fdlock is held
1416 */
1417static void selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset)
1418{
1419 int valid_set = waitq_set_is_valid(wqset);
1420 int valid_q = !!wqp_id;
1421
1422 /*
1423 * This could be called (from selcount error path) before we setup
1424 * the thread's wqset. Check the wqset passed in, and only unlink if
1425 * the set is valid.
1426 */
1427
1428 /* unlink the underlying waitq from the input set (thread waitq set) */
1429 if (valid_q && valid_set)
1430 waitq_unlink_by_prepost_id(wqp_id, wqset);
1431
1432 /* allow passing a NULL/invalid fp for seldrop unwind */
1433 if (!fp || !(fp->f_flags & (FP_INSELECT|FP_SELCONFLICT)))
1434 return;
1435
1436 /*
1437 * We can always remove the conflict queue from our thread's set: this
1438 * will not affect other threads that potentially need to be awoken on
1439 * the conflict queue during a fileproc_drain - those sets will still
1440 * be linked with the global conflict queue, and the last waiter
1441 * on the fp clears the CONFLICT marker.
1442 */
1443 if (valid_set && (fp->f_flags & FP_SELCONFLICT))
1444 waitq_unlink(&select_conflict_queue, wqset);
1445
1446 /* jca: TODO:
1447 * This isn't quite right - we don't actually know if this
1448 * fileproc is in another select or not! Here we just assume
1449 * that if we were the first thread to select on the FD, then
1450 * we'll be the one to clear this flag...
1451 */
1452 if (valid_set && fp->f_wset == (void *)wqset) {
1453 fp->f_flags &= ~FP_INSELECT;
1454 fp->f_wset = NULL;
1455 }
1456}
1457
1458/**
1459 * connect a fileproc to the given wqset, potentially bridging to a waitq
1460 * pointed to indirectly by wq_data
1461 *
1462 * Parameters:
1463 * fp File proc potentially currently in select
1464 * wq_data Pointer to a pointer to a waitq (could be NULL)
1465 * wqset Waitq set to which the fileproc should now belong
1466 * (usually this is the thread's private waitq set)
1467 *
1468 * Conditions:
1469 * proc_fdlock is held
1470 */
1471static uint64_t sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset)
1472{
1473 struct waitq *f_wq = NULL;
1474
1475 if ((fp->f_flags & FP_INSELECT) != FP_INSELECT) {
1476 if (wq_data)
1477 panic("non-null data:%p on fp:%p not in select?!"
1478 "(wqset:%p)", wq_data, fp, wqset);
1479 return 0;
1480 }
1481
1482 if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1483 /*
1484 * The conflict queue requires disabling interrupts, so we
1485 * need to explicitly reserve a link object to avoid a
1486 * panic/assert in the waitq code. Hopefully this extra step
1487 * can be avoided if we can split the waitq structure into
1488 * blocking and linkage sub-structures.
1489 */
1490 uint64_t reserved_link = waitq_link_reserve(&select_conflict_queue);
1491 waitq_link(&select_conflict_queue, wqset, WAITQ_SHOULD_LOCK, &reserved_link);
1492 waitq_link_release(reserved_link);
1493 }
1494
1495 /*
1496 * The wq_data parameter has potentially been set by selrecord called
1497 * from a subsystems fo_select() function. If the subsystem does not
1498 * call selrecord, then wq_data will be NULL
1499 *
1500 * Use memcpy to get the value into a proper pointer because
1501 * wq_data most likely points to a stack variable that could be
1502 * unaligned on 32-bit systems.
1503 */
1504 if (wq_data) {
1505 memcpy(&f_wq, wq_data, sizeof(f_wq));
1506 if (!waitq_is_valid(f_wq))
1507 f_wq = NULL;
1508 }
1509
1510 /* record the first thread's wqset in the fileproc structure */
1511 if (!fp->f_wset)
1512 fp->f_wset = (void *)wqset;
1513
1514 /* handles NULL f_wq */
1515 return waitq_get_prepost_id(f_wq);
1516}
1517
1518
6d2010ae
A
1519/*
1520 * selscan
1521 *
1522 * Parameters: p Process performing the select
1523 * sel The per-thread select context structure
1524 * nfd The number of file descriptors to scan
1525 * retval The per thread system call return area
1526 * sel_pass Which pass this is; allowed values are
1527 * SEL_FIRSTPASS and SEL_SECONDPASS
3e170ce0 1528 * wqset The per thread wait queue set
6d2010ae
A
1529 *
1530 * Returns: 0 Success
1531 * EIO Invalid p->p_fd field XXX Obsolete?
1532 * EBADF One of the files in the bit vector is
1533 * invalid.
1534 */
1c79356b 1535static int
3e170ce0
A
1536selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1537 int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset)
1c79356b 1538{
2d21ac55
A
1539 struct filedesc *fdp = p->p_fd;
1540 int msk, i, j, fd;
1541 u_int32_t bits;
91447636 1542 struct fileproc *fp;
6d2010ae
A
1543 int n = 0; /* count of bits */
1544 int nc = 0; /* bit vector offset (nc'th bit) */
1c79356b
A
1545 static int flag[3] = { FREAD, FWRITE, 0 };
1546 u_int32_t *iptr, *optr;
1547 u_int nw;
0b4e3aa0 1548 u_int32_t *ibits, *obits;
3e170ce0 1549 uint64_t reserved_link, *rl_ptr = NULL;
6d2010ae 1550 int count;
2d21ac55 1551 struct vfs_context context = *vfs_context_current();
1c79356b
A
1552
1553 /*
1554 * Problems when reboot; due to MacOSX signal probs
1555 * in Beaker1C ; verify that the p->p_fd is valid
1556 */
1557 if (fdp == NULL) {
1558 *retval=0;
1559 return(EIO);
1560 }
0b4e3aa0
A
1561 ibits = sel->ibits;
1562 obits = sel->obits;
0b4e3aa0 1563
1c79356b
A
1564 nw = howmany(nfd, NFDBITS);
1565
fe8ab488 1566 count = seldata->count;
2d21ac55
A
1567
1568 nc = 0;
3e170ce0
A
1569 if (!count) {
1570 *retval = 0;
1571 return 0;
1572 }
1573
1574 proc_fdlock(p);
1575 for (msk = 0; msk < 3; msk++) {
1576 iptr = (u_int32_t *)&ibits[msk * nw];
1577 optr = (u_int32_t *)&obits[msk * nw];
1578
1579 for (i = 0; i < nfd; i += NFDBITS) {
1580 bits = iptr[i/NFDBITS];
2d21ac55 1581
3e170ce0
A
1582 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1583 bits &= ~(1 << j);
2d21ac55 1584
3e170ce0
A
1585 if (fd < fdp->fd_nfiles)
1586 fp = fdp->fd_ofiles[fd];
1587 else
1588 fp = NULL;
fe8ab488 1589
3e170ce0
A
1590 if (fp == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1591 /*
1592 * If we abort because of a bad
1593 * fd, let the caller unwind...
1594 */
1595 proc_fdunlock(p);
1596 return(EBADF);
1597 }
1598 if (sel_pass == SEL_SECONDPASS) {
1599 reserved_link = 0;
1600 rl_ptr = NULL;
1601 selunlinkfp(fp, seldata->wqp[nc], wqset);
1602 } else {
1603 reserved_link = waitq_link_reserve((struct waitq *)wqset);
1604 rl_ptr = &reserved_link;
1605 if (fp->f_flags & FP_INSELECT)
1606 /* someone is already in select on this fp */
1607 fp->f_flags |= FP_SELCONFLICT;
fe8ab488 1608 else
3e170ce0
A
1609 fp->f_flags |= FP_INSELECT;
1610 }
2d21ac55 1611
3e170ce0 1612 context.vc_ucred = fp->f_cred;
2d21ac55 1613
3e170ce0
A
1614 /*
1615 * stash this value b/c fo_select may replace
1616 * reserved_link with a pointer to a waitq object
1617 */
1618 uint64_t rsvd = reserved_link;
1619
1620 /* The select; set the bit, if true */
1621 if (fp->f_ops && fp->f_type
1622 && fo_select(fp, flag[msk], rl_ptr, &context)) {
1623 optr[fd/NFDBITS] |= (1 << (fd % NFDBITS));
1624 n++;
1625 }
1626 if (sel_pass == SEL_FIRSTPASS) {
1627 waitq_link_release(rsvd);
1628 /*
1629 * If the fp's supporting selinfo structure was linked
1630 * to this thread's waitq set, then 'reserved_link'
1631 * will have been updated by selrecord to be a pointer
1632 * to the selinfo's waitq.
1633 */
1634 if (reserved_link == rsvd)
1635 rl_ptr = NULL; /* fo_select never called selrecord() */
1636 /*
1637 * Hook up the thread's waitq set either to
1638 * the fileproc structure, or to the global
1639 * conflict queue: but only on the first
1640 * select pass.
1641 */
1642 seldata->wqp[nc] = sellinkfp(fp, (void **)rl_ptr, wqset);
2d21ac55 1643 }
3e170ce0 1644 nc++;
2d21ac55
A
1645 }
1646 }
0b4e3aa0 1647 }
3e170ce0
A
1648 proc_fdunlock(p);
1649
1c79356b
A
1650 *retval = n;
1651 return (0);
1652}
1653
3e170ce0 1654int poll_callback(struct kqueue *, struct kevent_internal_s *, void *);
91447636
A
1655
1656struct poll_continue_args {
1657 user_addr_t pca_fds;
1658 u_int pca_nfds;
1659 u_int pca_rfds;
1660};
1661
9bccf70c 1662int
b0d623f7 1663poll(struct proc *p, struct poll_args *uap, int32_t *retval)
2d21ac55
A
1664{
1665 __pthread_testcancel(1);
1666 return(poll_nocancel(p, (struct poll_nocancel_args *)uap, retval));
1667}
1668
1669
1670int
b0d623f7 1671poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1c79356b 1672{
91447636
A
1673 struct poll_continue_args *cont;
1674 struct pollfd *fds;
1675 struct kqueue *kq;
1676 struct timeval atv;
1677 int ncoll, error = 0;
1678 u_int nfds = uap->nfds;
1679 u_int rfds = 0;
1680 u_int i;
1681 size_t ni;
1c79356b 1682
91447636
A
1683 /*
1684 * This is kinda bogus. We have fd limits, but that is not
1685 * really related to the size of the pollfd array. Make sure
1686 * we let the process use at least FD_SETSIZE entries and at
1687 * least enough for the current limits. We want to be reasonably
1688 * safe, but not overly restrictive.
1689 */
1690 if (nfds > OPEN_MAX ||
2d21ac55 1691 (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE)))
91447636 1692 return (EINVAL);
1c79356b 1693
91447636
A
1694 kq = kqueue_alloc(p);
1695 if (kq == NULL)
1696 return (EAGAIN);
1697
1698 ni = nfds * sizeof(struct pollfd) + sizeof(struct poll_continue_args);
1699 MALLOC(cont, struct poll_continue_args *, ni, M_TEMP, M_WAITOK);
1700 if (NULL == cont) {
1701 error = EAGAIN;
1702 goto out;
1703 }
1704
1705 fds = (struct pollfd *)&cont[1];
1706 error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1707 if (error)
1708 goto out;
1709
1710 if (uap->timeout != -1) {
1711 struct timeval rtv;
1712
1713 atv.tv_sec = uap->timeout / 1000;
1714 atv.tv_usec = (uap->timeout % 1000) * 1000;
1715 if (itimerfix(&atv)) {
1716 error = EINVAL;
1717 goto out;
1718 }
1719 getmicrouptime(&rtv);
1720 timevaladd(&atv, &rtv);
1721 } else {
1722 atv.tv_sec = 0;
1723 atv.tv_usec = 0;
1724 }
1725
1726 /* JMM - all this P_SELECT stuff is bogus */
1727 ncoll = nselcoll;
b0d623f7 1728 OSBitOrAtomic(P_SELECT, &p->p_flag);
91447636
A
1729 for (i = 0; i < nfds; i++) {
1730 short events = fds[i].events;
91447636
A
1731 int kerror = 0;
1732
1733 /* per spec, ignore fd values below zero */
1734 if (fds[i].fd < 0) {
1735 fds[i].revents = 0;
1736 continue;
1737 }
1738
1739 /* convert the poll event into a kqueue kevent */
3e170ce0
A
1740 struct kevent_internal_s kev = {
1741 .ident = fds[i].fd,
1742 .flags = EV_ADD | EV_ONESHOT | EV_POLL,
1743 .udata = CAST_USER_ADDR_T(&fds[i]) };
91447636
A
1744
1745 /* Handle input events */
2d21ac55 1746 if (events & ( POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP )) {
91447636 1747 kev.filter = EVFILT_READ;
04b8595b 1748 if (events & ( POLLPRI | POLLRDBAND ))
91447636
A
1749 kev.flags |= EV_OOBAND;
1750 kerror = kevent_register(kq, &kev, p);
1751 }
1752
1753 /* Handle output events */
1754 if (kerror == 0 &&
1755 events & ( POLLOUT | POLLWRNORM | POLLWRBAND )) {
1756 kev.filter = EVFILT_WRITE;
1757 kerror = kevent_register(kq, &kev, p);
1758 }
1759
1760 /* Handle BSD extension vnode events */
1761 if (kerror == 0 &&
1762 events & ( POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE )) {
1763 kev.filter = EVFILT_VNODE;
1764 kev.fflags = 0;
1765 if (events & POLLEXTEND)
1766 kev.fflags |= NOTE_EXTEND;
1767 if (events & POLLATTRIB)
1768 kev.fflags |= NOTE_ATTRIB;
1769 if (events & POLLNLINK)
1770 kev.fflags |= NOTE_LINK;
1771 if (events & POLLWRITE)
1772 kev.fflags |= NOTE_WRITE;
1773 kerror = kevent_register(kq, &kev, p);
1774 }
1775
1776 if (kerror != 0) {
1777 fds[i].revents = POLLNVAL;
1778 rfds++;
1779 } else
1780 fds[i].revents = 0;
1781 }
1782
1783 /* Did we have any trouble registering? */
1784 if (rfds > 0)
1785 goto done;
1786
1787 /* scan for, and possibly wait for, the kevents to trigger */
1788 cont->pca_fds = uap->fds;
1789 cont->pca_nfds = nfds;
1790 cont->pca_rfds = rfds;
b0d623f7 1791 error = kqueue_scan(kq, poll_callback, NULL, cont, &atv, p);
91447636
A
1792 rfds = cont->pca_rfds;
1793
1794 done:
b0d623f7 1795 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
91447636
A
1796 /* poll is not restarted after signals... */
1797 if (error == ERESTART)
1798 error = EINTR;
1799 if (error == EWOULDBLOCK)
1800 error = 0;
1801 if (error == 0) {
1802 error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1803 *retval = rfds;
1804 }
1805
1806 out:
1807 if (NULL != cont)
1808 FREE(cont, M_TEMP);
1809
2d21ac55 1810 kqueue_dealloc(kq);
91447636
A
1811 return (error);
1812}
1813
2d21ac55 1814int
3e170ce0 1815poll_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp, void *data)
91447636
A
1816{
1817 struct poll_continue_args *cont = (struct poll_continue_args *)data;
1818 struct pollfd *fds = CAST_DOWN(struct pollfd *, kevp->udata);
316670eb 1819 short prev_revents = fds->revents;
04b8595b 1820 short mask = 0;
ff6e181a 1821
91447636
A
1822 /* convert the results back into revents */
1823 if (kevp->flags & EV_EOF)
1824 fds->revents |= POLLHUP;
1825 if (kevp->flags & EV_ERROR)
1826 fds->revents |= POLLERR;
91447636
A
1827
1828 switch (kevp->filter) {
1829 case EVFILT_READ:
ff6e181a
A
1830 if (fds->revents & POLLHUP)
1831 mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND );
1832 else {
3e170ce0 1833 mask = (POLLIN | POLLRDNORM);
ff6e181a 1834 if (kevp->flags & EV_OOBAND)
3e170ce0 1835 mask |= (POLLPRI | POLLRDBAND);
ff6e181a
A
1836 }
1837 fds->revents |= (fds->events & mask);
91447636
A
1838 break;
1839
1840 case EVFILT_WRITE:
1841 if (!(fds->revents & POLLHUP))
1842 fds->revents |= (fds->events & ( POLLOUT | POLLWRNORM | POLLWRBAND ));
1843 break;
1844
2d21ac55 1845 case EVFILT_VNODE:
91447636
A
1846 if (kevp->fflags & NOTE_EXTEND)
1847 fds->revents |= (fds->events & POLLEXTEND);
1848 if (kevp->fflags & NOTE_ATTRIB)
1849 fds->revents |= (fds->events & POLLATTRIB);
1850 if (kevp->fflags & NOTE_LINK)
1851 fds->revents |= (fds->events & POLLNLINK);
1852 if (kevp->fflags & NOTE_WRITE)
1853 fds->revents |= (fds->events & POLLWRITE);
1854 break;
1855 }
2d21ac55 1856
316670eb 1857 if (fds->revents != 0 && prev_revents == 0)
2d21ac55
A
1858 cont->pca_rfds++;
1859
91447636
A
1860 return 0;
1861}
1862
1863int
1864seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1865{
1866
1867 return (1);
1868}
1869
6d2010ae
A
1870/*
1871 * selcount
1872 *
1873 * Count the number of bits set in the input bit vector, and establish an
1874 * outstanding fp->f_iocount for each of the descriptors which will be in
1875 * use in the select operation.
1876 *
1877 * Parameters: p The process doing the select
1878 * ibits The input bit vector
1879 * nfd The number of fd's in the vector
1880 * countp Pointer to where to store the bit count
1881 *
1882 * Returns: 0 Success
1883 * EIO Bad per process open file table
1884 * EBADF One of the bits in the input bit vector
1885 * references an invalid fd
1886 *
1887 * Implicit: *countp (modified) Count of fd's
1888 *
1889 * Notes: This function is the first pass under the proc_fdlock() that
1890 * permits us to recognize invalid descriptors in the bit vector;
1891 * the may, however, not remain valid through the drop and
1892 * later reacquisition of the proc_fdlock().
1893 */
91447636 1894static int
6d2010ae 1895selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
91447636 1896{
2d21ac55
A
1897 struct filedesc *fdp = p->p_fd;
1898 int msk, i, j, fd;
1899 u_int32_t bits;
91447636 1900 struct fileproc *fp;
0b4e3aa0 1901 int n = 0;
91447636 1902 u_int32_t *iptr;
0b4e3aa0 1903 u_int nw;
91447636
A
1904 int error=0;
1905 int dropcount;
6d2010ae 1906 int need_wakeup = 0;
0b4e3aa0
A
1907
1908 /*
1909 * Problems when reboot; due to MacOSX signal probs
1910 * in Beaker1C ; verify that the p->p_fd is valid
1911 */
1912 if (fdp == NULL) {
2d21ac55 1913 *countp = 0;
0b4e3aa0
A
1914 return(EIO);
1915 }
0b4e3aa0
A
1916 nw = howmany(nfd, NFDBITS);
1917
91447636 1918 proc_fdlock(p);
0b4e3aa0
A
1919 for (msk = 0; msk < 3; msk++) {
1920 iptr = (u_int32_t *)&ibits[msk * nw];
1921 for (i = 0; i < nfd; i += NFDBITS) {
1922 bits = iptr[i/NFDBITS];
1923 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1924 bits &= ~(1 << j);
fe8ab488
A
1925
1926 if (fd < fdp->fd_nfiles)
1927 fp = fdp->fd_ofiles[fd];
1928 else
1929 fp = NULL;
1930
0b4e3aa0
A
1931 if (fp == NULL ||
1932 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
2d21ac55 1933 *countp = 0;
91447636
A
1934 error = EBADF;
1935 goto bad;
0b4e3aa0 1936 }
91447636 1937 fp->f_iocount++;
0b4e3aa0
A
1938 n++;
1939 }
1940 }
1941 }
91447636
A
1942 proc_fdunlock(p);
1943
2d21ac55 1944 *countp = n;
91447636 1945 return (0);
6d2010ae 1946
91447636
A
1947bad:
1948 dropcount = 0;
1949
3e170ce0 1950 if (n == 0)
91447636 1951 goto out;
6d2010ae
A
1952 /* Ignore error return; it's already EBADF */
1953 (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup, 1);
91447636 1954
91447636
A
1955out:
1956 proc_fdunlock(p);
6d2010ae
A
1957 if (need_wakeup) {
1958 wakeup(&p->p_fpdrainwait);
1959 }
91447636
A
1960 return(error);
1961}
1962
6d2010ae
A
1963
1964/*
1965 * seldrop_locked
1966 *
1967 * Drop outstanding wait queue references set up during selscan(); drop the
1968 * outstanding per fileproc f_iocount() picked up during the selcount().
1969 *
1970 * Parameters: p Process performing the select
3e170ce0 1971 * ibits Input bit bector of fd's
6d2010ae
A
1972 * nfd Number of fd's
1973 * lim Limit to number of vector entries to
1974 * consider, or -1 for "all"
1975 * inselect True if
1976 * need_wakeup Pointer to flag to set to do a wakeup
1977 * if f_iocont on any descriptor goes to 0
1978 *
1979 * Returns: 0 Success
1980 * EBADF One or more fds in the bit vector
1981 * were invalid, but the rest
1982 * were successfully dropped
1983 *
1984 * Notes: An fd make become bad while the proc_fdlock() is not held,
1985 * if a multithreaded application closes the fd out from under
1986 * the in progress select. In this case, we still have to
1987 * clean up after the set up on the remaining fds.
1988 */
91447636 1989static int
6d2010ae 1990seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount)
91447636 1991{
2d21ac55 1992 struct filedesc *fdp = p->p_fd;
3e170ce0 1993 int msk, i, j, nc, fd;
2d21ac55 1994 u_int32_t bits;
91447636 1995 struct fileproc *fp;
91447636
A
1996 u_int32_t *iptr;
1997 u_int nw;
6d2010ae
A
1998 int error = 0;
1999 int dropcount = 0;
2000 uthread_t uth = get_bsdthread_info(current_thread());
3e170ce0 2001 struct _select_data *seldata;
6d2010ae
A
2002
2003 *need_wakeup = 0;
91447636
A
2004
2005 /*
2006 * Problems when reboot; due to MacOSX signal probs
2007 * in Beaker1C ; verify that the p->p_fd is valid
2008 */
2009 if (fdp == NULL) {
2010 return(EIO);
2011 }
2012
2013 nw = howmany(nfd, NFDBITS);
3e170ce0 2014 seldata = &uth->uu_kevent.ss_select_data;
91447636 2015
3e170ce0 2016 nc = 0;
91447636
A
2017 for (msk = 0; msk < 3; msk++) {
2018 iptr = (u_int32_t *)&ibits[msk * nw];
2019 for (i = 0; i < nfd; i += NFDBITS) {
2020 bits = iptr[i/NFDBITS];
2021 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2022 bits &= ~(1 << j);
2023 fp = fdp->fd_ofiles[fd];
6d2010ae
A
2024 /*
2025 * If we've already dropped as many as were
2026 * counted/scanned, then we are done.
2027 */
2028 if ((fromselcount != 0) && (++dropcount > lim))
2029 goto done;
2030
3e170ce0
A
2031 /*
2032 * unlink even potentially NULL fileprocs.
2033 * If the FD was closed from under us, we
2034 * still need to cleanup the waitq links!
2035 */
2036 selunlinkfp(fp,
2037 seldata->wqp ? seldata->wqp[nc] : 0,
2038 uth->uu_wqset);
2039
2040 nc++;
2041
6d2010ae
A
2042 if (fp == NULL) {
2043 /* skip (now) bad fds */
2044 error = EBADF;
2045 continue;
2046 }
91447636 2047
6d2010ae
A
2048 fp->f_iocount--;
2049 if (fp->f_iocount < 0)
2050 panic("f_iocount overdecrement!");
2051
2052 if (fp->f_iocount == 0) {
2053 /*
2054 * The last iocount is responsible for clearing
2055 * selconfict flag - even if we didn't set it -
2056 * and is also responsible for waking up anyone
2057 * waiting on iocounts to drain.
2058 */
2059 if (fp->f_flags & FP_SELCONFLICT)
2060 fp->f_flags &= ~FP_SELCONFLICT;
2061 if (p->p_fpdrainwait) {
2062 p->p_fpdrainwait = 0;
2063 *need_wakeup = 1;
2064 }
91447636
A
2065 }
2066 }
2067 }
2068 }
6d2010ae
A
2069done:
2070 return (error);
2071}
2072
2073
2074static int
2075seldrop(struct proc *p, u_int32_t *ibits, int nfd)
2076{
2077 int error;
2078 int need_wakeup = 0;
2079
2080 proc_fdlock(p);
2081 error = seldrop_locked(p, ibits, nfd, nfd, &need_wakeup, 0);
91447636 2082 proc_fdunlock(p);
6d2010ae
A
2083 if (need_wakeup) {
2084 wakeup(&p->p_fpdrainwait);
2085 }
2086 return (error);
0b4e3aa0
A
2087}
2088
1c79356b
A
2089/*
2090 * Record a select request.
2091 */
2092void
3e170ce0 2093selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
1c79356b 2094{
91447636 2095 thread_t cur_act = current_thread();
0b4e3aa0 2096 struct uthread * ut = get_bsdthread_info(cur_act);
3e170ce0
A
2097 /* on input, s_data points to the 64-bit ID of a reserved link object */
2098 uint64_t *reserved_link = (uint64_t *)s_data;
1c79356b 2099
0b4e3aa0
A
2100 /* need to look at collisions */
2101
0b4e3aa0 2102 /*do not record if this is second pass of select */
3e170ce0 2103 if (!s_data)
0b4e3aa0 2104 return;
1c79356b 2105
0b4e3aa0 2106 if ((sip->si_flags & SI_INITED) == 0) {
3e170ce0 2107 waitq_init(&sip->si_waitq, SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
0b4e3aa0
A
2108 sip->si_flags |= SI_INITED;
2109 sip->si_flags &= ~SI_CLEAR;
2110 }
2111
3e170ce0 2112 if (sip->si_flags & SI_RECORDED)
0b4e3aa0 2113 sip->si_flags |= SI_COLL;
3e170ce0 2114 else
0b4e3aa0
A
2115 sip->si_flags &= ~SI_COLL;
2116
2117 sip->si_flags |= SI_RECORDED;
3e170ce0
A
2118 /* note: this checks for pre-existing linkage */
2119 waitq_link(&sip->si_waitq, ut->uu_wqset,
2120 WAITQ_SHOULD_LOCK, reserved_link);
2121
2122 /*
2123 * Always consume the reserved link.
2124 * We can always call waitq_link_release() safely because if
2125 * waitq_link is successful, it consumes the link and resets the
2126 * value to 0, in which case our call to release becomes a no-op.
2127 * If waitq_link fails, then the following release call will actually
2128 * release the reserved link object.
2129 */
2130 waitq_link_release(*reserved_link);
2131 *reserved_link = 0;
2132
2133 /*
2134 * Use the s_data pointer as an output parameter as well
2135 * This avoids changing the prototype for this function which is
2136 * used by many kexts. We need to surface the waitq object
2137 * associated with the selinfo we just added to the thread's select
2138 * set. New waitq sets do not have back-pointers to set members, so
2139 * the only way to clear out set linkage objects is to go from the
2140 * waitq to the set. We use a memcpy because s_data could be
2141 * pointing to an unaligned value on the stack
2142 * (especially on 32-bit systems)
2143 */
2144 void *wqptr = (void *)&sip->si_waitq;
2145 memcpy((void *)s_data, (void *)&wqptr, sizeof(void *));
0b4e3aa0 2146
1c79356b
A
2147 return;
2148}
2149
2150void
2d21ac55 2151selwakeup(struct selinfo *sip)
1c79356b 2152{
1c79356b 2153
0b4e3aa0 2154 if ((sip->si_flags & SI_INITED) == 0) {
1c79356b 2155 return;
0b4e3aa0 2156 }
1c79356b
A
2157
2158 if (sip->si_flags & SI_COLL) {
2159 nselcoll++;
2160 sip->si_flags &= ~SI_COLL;
0b4e3aa0
A
2161#if 0
2162 /* will not support */
2163 //wakeup((caddr_t)&selwait);
2164#endif
1c79356b 2165 }
1c79356b 2166
0b4e3aa0 2167 if (sip->si_flags & SI_RECORDED) {
3e170ce0
A
2168 waitq_wakeup64_all(&sip->si_waitq, NO_EVENT64,
2169 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
0b4e3aa0 2170 sip->si_flags &= ~SI_RECORDED;
1c79356b 2171 }
1c79356b 2172
1c79356b
A
2173}
2174
2175void
2d21ac55 2176selthreadclear(struct selinfo *sip)
1c79356b 2177{
3e170ce0 2178 struct waitq *wq;
1c79356b 2179
0b4e3aa0
A
2180 if ((sip->si_flags & SI_INITED) == 0) {
2181 return;
2182 }
2183 if (sip->si_flags & SI_RECORDED) {
2184 selwakeup(sip);
2185 sip->si_flags &= ~(SI_RECORDED | SI_COLL);
1c79356b 2186 }
0b4e3aa0 2187 sip->si_flags |= SI_CLEAR;
3e170ce0
A
2188 sip->si_flags &= ~SI_INITED;
2189
2190 wq = &sip->si_waitq;
2191
2192 /*
2193 * Higher level logic may have a handle on this waitq's prepost ID,
2194 * but that's OK because the waitq_deinit will remove/invalidate the
2195 * prepost object (as well as mark the waitq invalid). This de-couples
2196 * us from any callers that may have a handle to this waitq via the
2197 * prepost ID.
2198 */
2199 waitq_deinit(wq);
1c79356b
A
2200}
2201
2202
91447636
A
2203
2204
91447636
A
2205#define DBG_POST 0x10
2206#define DBG_WATCH 0x11
2207#define DBG_WAIT 0x12
2208#define DBG_MOD 0x13
2209#define DBG_EWAKEUP 0x14
2210#define DBG_ENQUEUE 0x15
2211#define DBG_DEQUEUE 0x16
2212
2213#define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST)
2214#define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH)
2215#define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT)
2216#define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD)
2217#define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP)
2218#define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE)
2219#define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE)
2220
2221
2222#define EVPROCDEQUE(p, evq) do { \
2223 proc_lock(p); \
2224 if (evq->ee_flags & EV_QUEUED) { \
2225 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \
2226 evq->ee_flags &= ~EV_QUEUED; \
2227 } \
2228 proc_unlock(p); \
2229} while (0);
2230
1c79356b
A
2231
2232/*
2233 * called upon socket close. deque and free all events for
91447636 2234 * the socket... socket must be locked by caller.
1c79356b 2235 */
9bccf70c 2236void
1c79356b
A
2237evsofree(struct socket *sp)
2238{
91447636
A
2239 struct eventqelt *evq, *next;
2240 proc_t p;
2241
2242 if (sp == NULL)
2243 return;
1c79356b 2244
91447636
A
2245 for (evq = sp->so_evlist.tqh_first; evq != NULL; evq = next) {
2246 next = evq->ee_slist.tqe_next;
2247 p = evq->ee_proc;
1c79356b 2248
91447636
A
2249 if (evq->ee_flags & EV_QUEUED) {
2250 EVPROCDEQUE(p, evq);
2251 }
2252 TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist); // remove from socket q
2253 FREE(evq, M_TEMP);
2254 }
1c79356b
A
2255}
2256
2257
91447636
A
2258/*
2259 * called upon pipe close. deque and free all events for
2260 * the pipe... pipe must be locked by caller
2261 */
2262void
2263evpipefree(struct pipe *cpipe)
2264{
2265 struct eventqelt *evq, *next;
2266 proc_t p;
1c79356b 2267
91447636
A
2268 for (evq = cpipe->pipe_evlist.tqh_first; evq != NULL; evq = next) {
2269 next = evq->ee_slist.tqe_next;
2270 p = evq->ee_proc;
1c79356b 2271
91447636
A
2272 EVPROCDEQUE(p, evq);
2273
2274 TAILQ_REMOVE(&cpipe->pipe_evlist, evq, ee_slist); // remove from pipe q
2275 FREE(evq, M_TEMP);
2276 }
2277}
1c79356b
A
2278
2279
2280/*
91447636
A
2281 * enqueue this event if it's not already queued. wakeup
2282 * the proc if we do queue this event to it...
2283 * entered with proc lock held... we drop it before
2284 * doing the wakeup and return in that state
1c79356b 2285 */
91447636
A
2286static void
2287evprocenque(struct eventqelt *evq)
1c79356b 2288{
91447636
A
2289 proc_t p;
2290
2291 assert(evq);
2292 p = evq->ee_proc;
2293
2d21ac55 2294 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_START, (uint32_t)evq, evq->ee_flags, evq->ee_eventmask,0,0);
91447636
A
2295
2296 proc_lock(p);
2297
2298 if (evq->ee_flags & EV_QUEUED) {
2299 proc_unlock(p);
2300
2301 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
2302 return;
2303 }
2304 evq->ee_flags |= EV_QUEUED;
2305
2306 TAILQ_INSERT_TAIL(&p->p_evlist, evq, ee_plist);
2307
2308 proc_unlock(p);
2309
2310 wakeup(&p->p_evlist);
2311
2312 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
1c79356b
A
2313}
2314
91447636 2315
1c79356b 2316/*
91447636 2317 * pipe lock must be taken by the caller
1c79356b 2318 */
9bccf70c 2319void
91447636 2320postpipeevent(struct pipe *pipep, int event)
1c79356b 2321{
91447636
A
2322 int mask;
2323 struct eventqelt *evq;
2324
2325 if (pipep == NULL)
2326 return;
2327 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, event,0,0,1,0);
2328
2329 for (evq = pipep->pipe_evlist.tqh_first;
2330 evq != NULL; evq = evq->ee_slist.tqe_next) {
2331
2332 if (evq->ee_eventmask == 0)
2333 continue;
2334 mask = 0;
2335
2336 switch (event & (EV_RWBYTES | EV_RCLOSED | EV_WCLOSED)) {
2337
2338 case EV_RWBYTES:
2339 if ((evq->ee_eventmask & EV_RE) && pipep->pipe_buffer.cnt) {
2340 mask |= EV_RE;
2341 evq->ee_req.er_rcnt = pipep->pipe_buffer.cnt;
2342 }
2343 if ((evq->ee_eventmask & EV_WR) &&
316670eb 2344 (MAX(pipep->pipe_buffer.size,PIPE_SIZE) - pipep->pipe_buffer.cnt) >= PIPE_BUF) {
91447636
A
2345
2346 if (pipep->pipe_state & PIPE_EOF) {
2347 mask |= EV_WR|EV_RESET;
2348 break;
2349 }
2350 mask |= EV_WR;
316670eb 2351 evq->ee_req.er_wcnt = MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt;
91447636
A
2352 }
2353 break;
2354
2355 case EV_WCLOSED:
2356 case EV_RCLOSED:
2357 if ((evq->ee_eventmask & EV_RE)) {
2358 mask |= EV_RE|EV_RCLOSED;
2359 }
2360 if ((evq->ee_eventmask & EV_WR)) {
2361 mask |= EV_WR|EV_WCLOSED;
2362 }
2363 break;
2364
2365 default:
2366 return;
2367 }
2368 if (mask) {
2369 /*
2370 * disarm... postevents are nops until this event is 'read' via
2371 * waitevent and then re-armed via modwatch
2372 */
2373 evq->ee_eventmask = 0;
2374
2375 /*
2376 * since events are disarmed until after the waitevent
2377 * the ee_req.er_xxxx fields can't change once we've
2378 * inserted this event into the proc queue...
2379 * therefore, the waitevent will see a 'consistent'
2380 * snapshot of the event, even though it won't hold
2381 * the pipe lock, and we're updating the event outside
2382 * of the proc lock, which it will hold
2383 */
2384 evq->ee_req.er_eventbits |= mask;
2385
2d21ac55 2386 KERNEL_DEBUG(DBG_MISC_POST, (uint32_t)evq, evq->ee_req.er_eventbits, mask, 1,0);
91447636
A
2387
2388 evprocenque(evq);
2389 }
2390 }
2391 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, 0,0,0,1,0);
1c79356b
A
2392}
2393
2d21ac55 2394#if SOCKETS
1c79356b 2395/*
91447636
A
2396 * given either a sockbuf or a socket run down the
2397 * event list and queue ready events found...
2398 * the socket must be locked by the caller
1c79356b 2399 */
91447636
A
2400void
2401postevent(struct socket *sp, struct sockbuf *sb, int event)
1c79356b 2402{
91447636
A
2403 int mask;
2404 struct eventqelt *evq;
2405 struct tcpcb *tp;
2406
2407 if (sb)
2408 sp = sb->sb_so;
2409 if (sp == NULL)
2410 return;
2411
2412 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, (int)sp, event, 0, 0, 0);
2413
2414 for (evq = sp->so_evlist.tqh_first;
2415 evq != NULL; evq = evq->ee_slist.tqe_next) {
2416
2417 if (evq->ee_eventmask == 0)
2418 continue;
2419 mask = 0;
2420
2421 /* ready for reading:
2422 - byte cnt >= receive low water mark
2423 - read-half of conn closed
2424 - conn pending for listening sock
2425 - socket error pending
2426
2427 ready for writing
2428 - byte cnt avail >= send low water mark
2429 - write half of conn closed
2430 - socket error pending
2431 - non-blocking conn completed successfully
2432
2433 exception pending
2434 - out of band data
2435 - sock at out of band mark
2436 */
2437
2438 switch (event & EV_DMASK) {
2439
2440 case EV_OOB:
2441 if ((evq->ee_eventmask & EV_EX)) {
2442 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK)))
2443 mask |= EV_EX|EV_OOB;
2444 }
2445 break;
2446
2447 case EV_RWBYTES|EV_OOB:
2448 if ((evq->ee_eventmask & EV_EX)) {
2449 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK)))
2450 mask |= EV_EX|EV_OOB;
2451 }
2452 /*
2453 * fall into the next case
2454 */
2455 case EV_RWBYTES:
2456 if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) {
39236c6e
A
2457 /* for AFP/OT purposes; may go away in future */
2458 if ((SOCK_DOM(sp) == PF_INET ||
2459 SOCK_DOM(sp) == PF_INET6) &&
2460 SOCK_PROTO(sp) == IPPROTO_TCP &&
2461 (sp->so_error == ECONNREFUSED ||
2462 sp->so_error == ECONNRESET)) {
2463 if (sp->so_pcb == NULL ||
2464 sotoinpcb(sp)->inp_state ==
2465 INPCB_STATE_DEAD ||
2466 (tp = sototcpcb(sp)) == NULL ||
2467 tp->t_state == TCPS_CLOSED) {
2468 mask |= EV_RE|EV_RESET;
2469 break;
91447636
A
2470 }
2471 }
2472 mask |= EV_RE;
2473 evq->ee_req.er_rcnt = sp->so_rcv.sb_cc;
2474
2475 if (sp->so_state & SS_CANTRCVMORE) {
2476 mask |= EV_FIN;
2477 break;
2478 }
2479 }
2480 if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) {
39236c6e
A
2481 /* for AFP/OT purposes; may go away in future */
2482 if ((SOCK_DOM(sp) == PF_INET ||
2483 SOCK_DOM(sp) == PF_INET6) &&
2484 SOCK_PROTO(sp) == IPPROTO_TCP &&
2485 (sp->so_error == ECONNREFUSED ||
2486 sp->so_error == ECONNRESET)) {
2487 if (sp->so_pcb == NULL ||
2488 sotoinpcb(sp)->inp_state ==
2489 INPCB_STATE_DEAD ||
2490 (tp = sototcpcb(sp)) == NULL ||
2491 tp->t_state == TCPS_CLOSED) {
2492 mask |= EV_WR|EV_RESET;
2493 break;
91447636
A
2494 }
2495 }
2496 mask |= EV_WR;
2497 evq->ee_req.er_wcnt = sbspace(&sp->so_snd);
2498 }
2499 break;
2500
2501 case EV_RCONN:
2502 if ((evq->ee_eventmask & EV_RE)) {
2503 mask |= EV_RE|EV_RCONN;
2504 evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one
2505 }
2506 break;
2507
2508 case EV_WCONN:
2509 if ((evq->ee_eventmask & EV_WR)) {
2510 mask |= EV_WR|EV_WCONN;
2511 }
2512 break;
2513
2514 case EV_RCLOSED:
2515 if ((evq->ee_eventmask & EV_RE)) {
2516 mask |= EV_RE|EV_RCLOSED;
2517 }
2518 break;
2519
2520 case EV_WCLOSED:
2521 if ((evq->ee_eventmask & EV_WR)) {
2522 mask |= EV_WR|EV_WCLOSED;
2523 }
2524 break;
2525
2526 case EV_FIN:
2527 if (evq->ee_eventmask & EV_RE) {
2528 mask |= EV_RE|EV_FIN;
2529 }
2530 break;
2531
2532 case EV_RESET:
2533 case EV_TIMEOUT:
2534 if (evq->ee_eventmask & EV_RE) {
2535 mask |= EV_RE | event;
2536 }
2537 if (evq->ee_eventmask & EV_WR) {
2538 mask |= EV_WR | event;
2539 }
2540 break;
2541
2542 default:
2543 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, -1, 0, 0, 0);
2544 return;
2545 } /* switch */
2546
2547 KERNEL_DEBUG(DBG_MISC_POST, (int)evq, evq->ee_eventmask, evq->ee_req.er_eventbits, mask, 0);
2548
2549 if (mask) {
2550 /*
2551 * disarm... postevents are nops until this event is 'read' via
2552 * waitevent and then re-armed via modwatch
2553 */
2554 evq->ee_eventmask = 0;
2555
2556 /*
2557 * since events are disarmed until after the waitevent
2558 * the ee_req.er_xxxx fields can't change once we've
2559 * inserted this event into the proc queue...
2560 * since waitevent can't see this event until we
2561 * enqueue it, waitevent will see a 'consistent'
2562 * snapshot of the event, even though it won't hold
2563 * the socket lock, and we're updating the event outside
2564 * of the proc lock, which it will hold
2565 */
2566 evq->ee_req.er_eventbits |= mask;
2567
2568 evprocenque(evq);
2569 }
2570 }
2571 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, 0, 0, 0, 0);
1c79356b 2572}
2d21ac55 2573#endif /* SOCKETS */
1c79356b 2574
1c79356b
A
2575
2576/*
2577 * watchevent system call. user passes us an event to watch
2578 * for. we malloc an event object, initialize it, and queue
2579 * it to the open socket. when the event occurs, postevent()
2580 * will enque it back to our proc where we can retrieve it
2581 * via waitevent().
2582 *
2583 * should this prevent duplicate events on same socket?
2d21ac55
A
2584 *
2585 * Returns:
2586 * ENOMEM No memory for operation
2587 * copyin:EFAULT
1c79356b
A
2588 */
2589int
91447636 2590watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval)
1c79356b 2591{
91447636
A
2592 struct eventqelt *evq = (struct eventqelt *)0;
2593 struct eventqelt *np = NULL;
2d21ac55 2594 struct eventreq64 *erp;
91447636
A
2595 struct fileproc *fp = NULL;
2596 int error;
2597
2598 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_START, 0,0,0,0,0);
2599
2600 // get a qelt and fill with users req
2601 MALLOC(evq, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK);
2602
2603 if (evq == NULL)
2d21ac55 2604 return (ENOMEM);
91447636
A
2605 erp = &evq->ee_req;
2606
2607 // get users request pkt
91447636 2608
2d21ac55
A
2609 if (IS_64BIT_PROCESS(p)) {
2610 error = copyin(uap->u_req, (caddr_t)erp, sizeof(struct eventreq64));
2611 } else {
2612 struct eventreq32 er32;
2613
2614 error = copyin(uap->u_req, (caddr_t)&er32, sizeof(struct eventreq32));
2615 if (error == 0) {
2616 /*
2617 * the user only passes in the
2618 * er_type, er_handle and er_data...
2619 * the other fields are initialized
2620 * below, so don't bother to copy
2621 */
2622 erp->er_type = er32.er_type;
2623 erp->er_handle = er32.er_handle;
2624 erp->er_data = (user_addr_t)er32.er_data;
2625 }
2626 }
2627 if (error) {
2628 FREE(evq, M_TEMP);
91447636 2629 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
2d21ac55
A
2630
2631 return(error);
91447636 2632 }
2d21ac55 2633 KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
91447636
A
2634
2635 // validate, freeing qelt if errors
2636 error = 0;
2637 proc_fdlock(p);
2638
2639 if (erp->er_type != EV_FD) {
2640 error = EINVAL;
2641 } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
2642 error = EBADF;
2d21ac55 2643#if SOCKETS
91447636
A
2644 } else if (fp->f_type == DTYPE_SOCKET) {
2645 socket_lock((struct socket *)fp->f_data, 1);
2646 np = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2d21ac55 2647#endif /* SOCKETS */
91447636
A
2648 } else if (fp->f_type == DTYPE_PIPE) {
2649 PIPE_LOCK((struct pipe *)fp->f_data);
2650 np = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2651 } else {
2652 fp_drop(p, erp->er_handle, fp, 1);
2653 error = EINVAL;
2654 }
2655 proc_fdunlock(p);
2656
2657 if (error) {
2658 FREE(evq, M_TEMP);
2659
2660 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
2661 return(error);
2662 }
2663
2664 /*
2665 * only allow one watch per file per proc
2666 */
2667 for ( ; np != NULL; np = np->ee_slist.tqe_next) {
2668 if (np->ee_proc == p) {
2d21ac55 2669#if SOCKETS
91447636
A
2670 if (fp->f_type == DTYPE_SOCKET)
2671 socket_unlock((struct socket *)fp->f_data, 1);
2672 else
2d21ac55 2673#endif /* SOCKETS */
91447636
A
2674 PIPE_UNLOCK((struct pipe *)fp->f_data);
2675 fp_drop(p, erp->er_handle, fp, 0);
2676 FREE(evq, M_TEMP);
2677
2678 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
2679 return(EINVAL);
2680 }
2681 }
2682 erp->er_ecnt = erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0;
2683 evq->ee_proc = p;
2684 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
2685 evq->ee_flags = 0;
2686
2d21ac55 2687#if SOCKETS
91447636
A
2688 if (fp->f_type == DTYPE_SOCKET) {
2689 TAILQ_INSERT_TAIL(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2690 postevent((struct socket *)fp->f_data, 0, EV_RWBYTES); // catch existing events
2691
2692 socket_unlock((struct socket *)fp->f_data, 1);
2d21ac55
A
2693 } else
2694#endif /* SOCKETS */
2695 {
91447636
A
2696 TAILQ_INSERT_TAIL(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2697 postpipeevent((struct pipe *)fp->f_data, EV_RWBYTES);
2698
2699 PIPE_UNLOCK((struct pipe *)fp->f_data);
2700 }
2701 fp_drop_event(p, erp->er_handle, fp);
2702
2703 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, 0,0,0,0,0);
2704 return(0);
1c79356b
A
2705}
2706
91447636 2707
1c79356b
A
2708
2709/*
2710 * waitevent system call.
2711 * grabs the next waiting event for this proc and returns
2712 * it. if no events, user can request to sleep with timeout
2d21ac55
A
2713 * or without or poll mode
2714 * ((tv != NULL && interval == 0) || tv == -1)
1c79356b
A
2715 */
2716int
91447636 2717waitevent(proc_t p, struct waitevent_args *uap, int *retval)
1c79356b 2718{
91447636
A
2719 int error = 0;
2720 struct eventqelt *evq;
2d21ac55 2721 struct eventreq64 *erp;
9bccf70c 2722 uint64_t abstime, interval;
2d21ac55
A
2723 boolean_t fast_poll = FALSE;
2724 union {
2725 struct eventreq64 er64;
2726 struct eventreq32 er32;
2727 } uer;
2728
2729 interval = 0;
1c79356b
A
2730
2731 if (uap->tv) {
9bccf70c 2732 struct timeval atv;
2d21ac55
A
2733 /*
2734 * check for fast poll method
2735 */
2736 if (IS_64BIT_PROCESS(p)) {
2737 if (uap->tv == (user_addr_t)-1)
2738 fast_poll = TRUE;
2739 } else if (uap->tv == (user_addr_t)((uint32_t)-1))
2740 fast_poll = TRUE;
2741
2742 if (fast_poll == TRUE) {
2743 if (p->p_evlist.tqh_first == NULL) {
2744 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_NONE, -1,0,0,0,0);
2745 /*
2746 * poll failed
2747 */
2748 *retval = 1;
2749 return (0);
2750 }
2751 proc_lock(p);
2752 goto retry;
2753 }
b0d623f7
A
2754 if (IS_64BIT_PROCESS(p)) {
2755 struct user64_timeval atv64;
2756 error = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
2757 /* Loses resolution - assume timeout < 68 years */
2758 atv.tv_sec = atv64.tv_sec;
2759 atv.tv_usec = atv64.tv_usec;
2760 } else {
2761 struct user32_timeval atv32;
2762 error = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
2763 atv.tv_sec = atv32.tv_sec;
2764 atv.tv_usec = atv32.tv_usec;
2765 }
9bccf70c 2766
1c79356b 2767 if (error)
9bccf70c 2768 return(error);
1c79356b
A
2769 if (itimerfix(&atv)) {
2770 error = EINVAL;
2771 return(error);
2772 }
9bccf70c 2773 interval = tvtoabstime(&atv);
2d21ac55 2774 }
9bccf70c 2775 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_START, 0,0,0,0,0);
1c79356b 2776
91447636 2777 proc_lock(p);
1c79356b 2778retry:
91447636
A
2779 if ((evq = p->p_evlist.tqh_first) != NULL) {
2780 /*
2781 * found one... make a local copy while it's still on the queue
2782 * to prevent it from changing while in the midst of copying
2783 * don't want to hold the proc lock across a copyout because
2784 * it might block on a page fault at the target in user space
2785 */
2d21ac55 2786 erp = &evq->ee_req;
91447636 2787
2d21ac55
A
2788 if (IS_64BIT_PROCESS(p))
2789 bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof (struct eventreq64));
2790 else {
2791 uer.er32.er_type = erp->er_type;
2792 uer.er32.er_handle = erp->er_handle;
2793 uer.er32.er_data = (uint32_t)erp->er_data;
2794 uer.er32.er_ecnt = erp->er_ecnt;
2795 uer.er32.er_rcnt = erp->er_rcnt;
2796 uer.er32.er_wcnt = erp->er_wcnt;
2797 uer.er32.er_eventbits = erp->er_eventbits;
2798 }
91447636
A
2799 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist);
2800
2801 evq->ee_flags &= ~EV_QUEUED;
1c79356b 2802
91447636
A
2803 proc_unlock(p);
2804
2d21ac55
A
2805 if (IS_64BIT_PROCESS(p))
2806 error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64));
2807 else
2808 error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32));
91447636
A
2809
2810 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,
2d21ac55 2811 evq->ee_req.er_handle,evq->ee_req.er_eventbits,(uint32_t)evq,0);
9bccf70c
A
2812 return (error);
2813 }
2814 else {
2815 if (uap->tv && interval == 0) {
91447636 2816 proc_unlock(p);
9bccf70c 2817 *retval = 1; // poll failed
9bccf70c 2818
91447636 2819 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0);
9bccf70c
A
2820 return (error);
2821 }
9bccf70c 2822 if (interval != 0)
55e303ae 2823 clock_absolutetime_interval_to_deadline(interval, &abstime);
91447636
A
2824 else
2825 abstime = 0;
9bccf70c 2826
2d21ac55 2827 KERNEL_DEBUG(DBG_MISC_WAIT, 1,(uint32_t)&p->p_evlist,0,0,0);
91447636
A
2828
2829 error = msleep1(&p->p_evlist, &p->p_mlock, (PSOCK | PCATCH), "waitevent", abstime);
2830
2d21ac55 2831 KERNEL_DEBUG(DBG_MISC_WAIT, 2,(uint32_t)&p->p_evlist,0,0,0);
91447636 2832
9bccf70c
A
2833 if (error == 0)
2834 goto retry;
2835 if (error == ERESTART)
2836 error = EINTR;
2837 if (error == EWOULDBLOCK) {
2838 *retval = 1;
2839 error = 0;
2840 }
2841 }
91447636 2842 proc_unlock(p);
9bccf70c
A
2843
2844 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0);
9bccf70c 2845 return (error);
1c79356b
A
2846}
2847
1c79356b
A
2848
2849/*
2850 * modwatch system call. user passes in event to modify.
2851 * if we find it we reset the event bits and que/deque event
2852 * it needed.
2853 */
2854int
91447636 2855modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval)
1c79356b 2856{
2d21ac55
A
2857 struct eventreq64 er;
2858 struct eventreq64 *erp = &er;
2859 struct eventqelt *evq = NULL; /* protected by error return */
91447636
A
2860 int error;
2861 struct fileproc *fp;
2862 int flag;
2863
2864 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0);
2865
2866 /*
2867 * get user's request pkt
2d21ac55
A
2868 * just need the er_type and er_handle which sit above the
2869 * problematic er_data (32/64 issue)... so only copy in
2870 * those 2 fields
91447636 2871 */
2d21ac55
A
2872 if ((error = copyin(uap->u_req, (caddr_t)erp, sizeof(er.er_type) + sizeof(er.er_handle)))) {
2873 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
91447636
A
2874 return(error);
2875 }
2876 proc_fdlock(p);
2877
2878 if (erp->er_type != EV_FD) {
2879 error = EINVAL;
2880 } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
2881 error = EBADF;
2d21ac55 2882#if SOCKETS
91447636
A
2883 } else if (fp->f_type == DTYPE_SOCKET) {
2884 socket_lock((struct socket *)fp->f_data, 1);
2885 evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2d21ac55 2886#endif /* SOCKETS */
91447636
A
2887 } else if (fp->f_type == DTYPE_PIPE) {
2888 PIPE_LOCK((struct pipe *)fp->f_data);
2889 evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
2890 } else {
2891 fp_drop(p, erp->er_handle, fp, 1);
2892 error = EINVAL;
2893 }
2894
2895 if (error) {
2896 proc_fdunlock(p);
2897 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
2898 return(error);
2899 }
2900
2901 if ((uap->u_eventmask == EV_RM) && (fp->f_flags & FP_WAITEVENT)) {
2902 fp->f_flags &= ~FP_WAITEVENT;
2903 }
2904 proc_fdunlock(p);
2905
2906 // locate event if possible
2907 for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
2908 if (evq->ee_proc == p)
2909 break;
2910 }
2911 if (evq == NULL) {
2d21ac55 2912#if SOCKETS
91447636
A
2913 if (fp->f_type == DTYPE_SOCKET)
2914 socket_unlock((struct socket *)fp->f_data, 1);
2d21ac55
A
2915 else
2916#endif /* SOCKETS */
91447636
A
2917 PIPE_UNLOCK((struct pipe *)fp->f_data);
2918 fp_drop(p, erp->er_handle, fp, 0);
2919 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0);
2920 return(EINVAL);
2921 }
2d21ac55 2922 KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
91447636
A
2923
2924 if (uap->u_eventmask == EV_RM) {
2925 EVPROCDEQUE(p, evq);
2926
2d21ac55 2927#if SOCKETS
91447636
A
2928 if (fp->f_type == DTYPE_SOCKET) {
2929 TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
2930 socket_unlock((struct socket *)fp->f_data, 1);
2d21ac55
A
2931 } else
2932#endif /* SOCKETS */
2933 {
91447636
A
2934 TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
2935 PIPE_UNLOCK((struct pipe *)fp->f_data);
2936 }
2937 fp_drop(p, erp->er_handle, fp, 0);
2938 FREE(evq, M_TEMP);
2939 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0);
2940 return(0);
2941 }
2942 switch (uap->u_eventmask & EV_MASK) {
1c79356b 2943
91447636
A
2944 case 0:
2945 flag = 0;
2946 break;
2947
2948 case EV_RE:
2949 case EV_WR:
2950 case EV_RE|EV_WR:
2951 flag = EV_RWBYTES;
2952 break;
2953
2954 case EV_EX:
2955 flag = EV_OOB;
2956 break;
2957
2958 case EV_EX|EV_RE:
2959 case EV_EX|EV_WR:
2960 case EV_EX|EV_RE|EV_WR:
2961 flag = EV_OOB|EV_RWBYTES;
2962 break;
2963
2964 default:
2d21ac55 2965#if SOCKETS
91447636
A
2966 if (fp->f_type == DTYPE_SOCKET)
2967 socket_unlock((struct socket *)fp->f_data, 1);
2968 else
2d21ac55 2969#endif /* SOCKETS */
91447636
A
2970 PIPE_UNLOCK((struct pipe *)fp->f_data);
2971 fp_drop(p, erp->er_handle, fp, 0);
2972 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
2973 return(EINVAL);
2974 }
2975 /*
2976 * since we're holding the socket/pipe lock, the event
2977 * cannot go from the unqueued state to the queued state
2978 * however, it can go from the queued state to the unqueued state
2979 * since that direction is protected by the proc_lock...
2980 * so do a quick check for EV_QUEUED w/o holding the proc lock
2981 * since by far the common case will be NOT EV_QUEUED, this saves
2982 * us taking the proc_lock the majority of the time
2983 */
2984 if (evq->ee_flags & EV_QUEUED) {
2985 /*
2986 * EVPROCDEQUE will recheck the state after it grabs the proc_lock
2987 */
2988 EVPROCDEQUE(p, evq);
2989 }
2990 /*
2991 * while the event is off the proc queue and
2992 * we're holding the socket/pipe lock
2993 * it's safe to update these fields...
2994 */
2995 evq->ee_req.er_eventbits = 0;
2996 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
2997
2d21ac55 2998#if SOCKETS
91447636
A
2999 if (fp->f_type == DTYPE_SOCKET) {
3000 postevent((struct socket *)fp->f_data, 0, flag);
3001 socket_unlock((struct socket *)fp->f_data, 1);
2d21ac55
A
3002 } else
3003#endif /* SOCKETS */
3004 {
91447636
A
3005 postpipeevent((struct pipe *)fp->f_data, flag);
3006 PIPE_UNLOCK((struct pipe *)fp->f_data);
3007 }
3008 fp_drop(p, erp->er_handle, fp, 0);
2d21ac55 3009 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,(uint32_t)fp->f_data,flag,0);
91447636 3010 return(0);
1c79356b 3011}
91447636
A
3012
3013/* this routine is called from the close of fd with proc_fdlock held */
3014int
3015waitevent_close(struct proc *p, struct fileproc *fp)
3016{
3017 struct eventqelt *evq;
3018
3019
3020 fp->f_flags &= ~FP_WAITEVENT;
3021
2d21ac55 3022#if SOCKETS
91447636
A
3023 if (fp->f_type == DTYPE_SOCKET) {
3024 socket_lock((struct socket *)fp->f_data, 1);
3025 evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
2d21ac55
A
3026 } else
3027#endif /* SOCKETS */
3028 if (fp->f_type == DTYPE_PIPE) {
91447636
A
3029 PIPE_LOCK((struct pipe *)fp->f_data);
3030 evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
3031 }
3032 else {
3033 return(EINVAL);
3034 }
3035 proc_fdunlock(p);
3036
3037
3038 // locate event if possible
3039 for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
3040 if (evq->ee_proc == p)
3041 break;
3042 }
3043 if (evq == NULL) {
2d21ac55 3044#if SOCKETS
91447636
A
3045 if (fp->f_type == DTYPE_SOCKET)
3046 socket_unlock((struct socket *)fp->f_data, 1);
3047 else
2d21ac55 3048#endif /* SOCKETS */
91447636
A
3049 PIPE_UNLOCK((struct pipe *)fp->f_data);
3050
3051 proc_fdlock(p);
3052
3053 return(EINVAL);
3054 }
3055 EVPROCDEQUE(p, evq);
3056
2d21ac55 3057#if SOCKETS
91447636
A
3058 if (fp->f_type == DTYPE_SOCKET) {
3059 TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
3060 socket_unlock((struct socket *)fp->f_data, 1);
2d21ac55
A
3061 } else
3062#endif /* SOCKETS */
3063 {
91447636
A
3064 TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
3065 PIPE_UNLOCK((struct pipe *)fp->f_data);
3066 }
3067 FREE(evq, M_TEMP);
3068
3069 proc_fdlock(p);
3070
3071 return(0);
3072}
3073
2d21ac55
A
3074
3075/*
3076 * gethostuuid
3077 *
3078 * Description: Get the host UUID from IOKit and return it to user space.
3079 *
3080 * Parameters: uuid_buf Pointer to buffer to receive UUID
3081 * timeout Timespec for timout
39236c6e 3082 * spi SPI, skip sandbox check (temporary)
2d21ac55
A
3083 *
3084 * Returns: 0 Success
3085 * EWOULDBLOCK Timeout is too short
3086 * copyout:EFAULT Bad user buffer
fe8ab488 3087 * mac_system_check_info:EPERM Client not allowed to perform this operation
2d21ac55
A
3088 *
3089 * Notes: A timeout seems redundant, since if it's tolerable to not
3090 * have a system UUID in hand, then why ask for one?
3091 */
3092int
b0d623f7 3093gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2d21ac55
A
3094{
3095 kern_return_t kret;
3096 int error;
3097 mach_timespec_t mach_ts; /* for IOKit call */
3098 __darwin_uuid_t uuid_kern; /* for IOKit call */
3099
39236c6e 3100 if (!uap->spi) {
39236c6e
A
3101 }
3102
2d21ac55
A
3103 /* Convert the 32/64 bit timespec into a mach_timespec_t */
3104 if ( proc_is64bit(p) ) {
b0d623f7 3105 struct user64_timespec ts;
2d21ac55
A
3106 error = copyin(uap->timeoutp, &ts, sizeof(ts));
3107 if (error)
3108 return (error);
3109 mach_ts.tv_sec = ts.tv_sec;
3110 mach_ts.tv_nsec = ts.tv_nsec;
3111 } else {
b0d623f7 3112 struct user32_timespec ts;
2d21ac55
A
3113 error = copyin(uap->timeoutp, &ts, sizeof(ts) );
3114 if (error)
3115 return (error);
3116 mach_ts.tv_sec = ts.tv_sec;
3117 mach_ts.tv_nsec = ts.tv_nsec;
3118 }
3119
3120 /* Call IOKit with the stack buffer to get the UUID */
3121 kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
3122
3123 /*
3124 * If we get it, copy out the data to the user buffer; note that a
3125 * uuid_t is an array of characters, so this is size invariant for
3126 * 32 vs. 64 bit.
3127 */
3128 if (kret == KERN_SUCCESS) {
3129 error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
3130 } else {
3131 error = EWOULDBLOCK;
3132 }
3133
3134 return (error);
3135}
316670eb
A
3136
3137/*
3138 * ledger
3139 *
3140 * Description: Omnibus system call for ledger operations
3141 */
3142int
3143ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
3144{
39236c6e
A
3145#if !CONFIG_MACF
3146#pragma unused(p)
3147#endif
316670eb
A
3148 int rval, pid, len, error;
3149#ifdef LEDGER_DEBUG
3150 struct ledger_limit_args lla;
3151#endif
3152 task_t task;
3153 proc_t proc;
3154
3155 /* Finish copying in the necessary args before taking the proc lock */
3156 error = 0;
3157 len = 0;
3158 if (args->cmd == LEDGER_ENTRY_INFO)
3159 error = copyin(args->arg3, (char *)&len, sizeof (len));
3160 else if (args->cmd == LEDGER_TEMPLATE_INFO)
3161 error = copyin(args->arg2, (char *)&len, sizeof (len));
3162#ifdef LEDGER_DEBUG
3163 else if (args->cmd == LEDGER_LIMIT)
3164 error = copyin(args->arg2, (char *)&lla, sizeof (lla));
3165#endif
ecc0ceb4
A
3166 else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD))
3167 return (EINVAL);
3168
316670eb
A
3169 if (error)
3170 return (error);
3171 if (len < 0)
3172 return (EINVAL);
3173
3174 rval = 0;
3175 if (args->cmd != LEDGER_TEMPLATE_INFO) {
3176 pid = args->arg1;
3177 proc = proc_find(pid);
3178 if (proc == NULL)
3179 return (ESRCH);
3180
3181#if CONFIG_MACF
3182 error = mac_proc_check_ledger(p, proc, args->cmd);
3183 if (error) {
3184 proc_rele(proc);
3185 return (error);
3186 }
3187#endif
3188
3189 task = proc->task;
3190 }
3191
3192 switch (args->cmd) {
3193#ifdef LEDGER_DEBUG
3194 case LEDGER_LIMIT: {
39236c6e 3195 if (!kauth_cred_issuser(kauth_cred_get()))
316670eb
A
3196 rval = EPERM;
3197 rval = ledger_limit(task, &lla);
3198 proc_rele(proc);
3199 break;
3200 }
3201#endif
3202 case LEDGER_INFO: {
3203 struct ledger_info info;
3204
3205 rval = ledger_info(task, &info);
3206 proc_rele(proc);
3207 if (rval == 0)
3208 rval = copyout(&info, args->arg2,
3209 sizeof (info));
3210 break;
3211 }
3212
3213 case LEDGER_ENTRY_INFO: {
3214 void *buf;
3215 int sz;
3216
39236c6e 3217 rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
316670eb
A
3218 proc_rele(proc);
3219 if ((rval == 0) && (len > 0)) {
3220 sz = len * sizeof (struct ledger_entry_info);
3221 rval = copyout(buf, args->arg2, sz);
3222 kfree(buf, sz);
3223 }
3224 if (rval == 0)
3225 rval = copyout(&len, args->arg3, sizeof (len));
3226 break;
3227 }
3228
3229 case LEDGER_TEMPLATE_INFO: {
3230 void *buf;
3231 int sz;
3232
3233 rval = ledger_template_info(&buf, &len);
3234 if ((rval == 0) && (len > 0)) {
3235 sz = len * sizeof (struct ledger_template_info);
3236 rval = copyout(buf, args->arg1, sz);
3237 kfree(buf, sz);
3238 }
3239 if (rval == 0)
3240 rval = copyout(&len, args->arg2, sizeof (len));
3241 break;
3242 }
3243
3244 default:
ecc0ceb4
A
3245 panic("ledger syscall logic error -- command type %d", args->cmd);
3246 proc_rele(proc);
316670eb
A
3247 rval = EINVAL;
3248 }
3249
3250 return (rval);
3251}
39236c6e 3252
39236c6e
A
3253int
3254telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
3255{
3256 int error = 0;
3257
3258 switch (args->cmd) {
fe8ab488 3259#if CONFIG_TELEMETRY
39236c6e
A
3260 case TELEMETRY_CMD_TIMER_EVENT:
3261 error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
3262 break;
fe8ab488
A
3263#endif /* CONFIG_TELEMETRY */
3264 case TELEMETRY_CMD_VOUCHER_NAME:
3265 if (thread_set_voucher_name((mach_port_name_t)args->deadline))
3266 error = EINVAL;
3267 break;
3268
39236c6e
A
3269 default:
3270 error = EINVAL;
3271 break;
3272 }
3273
3274 return (error);
3275}
3e170ce0
A
3276
3277#if defined(DEVELOPMENT) || defined(DEBUG)
3278#if CONFIG_WAITQ_DEBUG
3279static uint64_t g_wqset_num = 0;
3280struct g_wqset {
3281 queue_chain_t link;
3282 struct waitq_set *wqset;
3283};
3284
3285static queue_head_t g_wqset_list;
3286static struct waitq_set *g_waitq_set = NULL;
3287
3288static inline struct waitq_set *sysctl_get_wqset(int idx)
3289{
3290 struct g_wqset *gwqs;
3291
3292 if (!g_wqset_num)
3293 queue_init(&g_wqset_list);
3294
3295 /* don't bother with locks: this is test-only code! */
3296 qe_foreach_element(gwqs, &g_wqset_list, link) {
3297 if ((int)(wqset_id(gwqs->wqset) & 0xffffffff) == idx)
3298 return gwqs->wqset;
3299 }
3300
3301 /* allocate a new one */
3302 ++g_wqset_num;
3303 gwqs = (struct g_wqset *)kalloc(sizeof(*gwqs));
3304 assert(gwqs != NULL);
3305
3306 gwqs->wqset = waitq_set_alloc(SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST|SYNC_POLICY_DISABLE_IRQ);
3307 enqueue_tail(&g_wqset_list, &gwqs->link);
3308 printf("[WQ]: created new waitq set 0x%llx\n", wqset_id(gwqs->wqset));
3309
3310 return gwqs->wqset;
3311}
3312
3313#define MAX_GLOBAL_TEST_QUEUES 64
3314static int g_wq_init = 0;
3315static struct waitq g_wq[MAX_GLOBAL_TEST_QUEUES];
3316
3317static inline struct waitq *global_test_waitq(int idx)
3318{
3319 if (idx < 0)
3320 return NULL;
3321
3322 if (!g_wq_init) {
3323 g_wq_init = 1;
3324 for (int i = 0; i < MAX_GLOBAL_TEST_QUEUES; i++)
3325 waitq_init(&g_wq[i], SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ);
3326 }
3327
3328 return &g_wq[idx % MAX_GLOBAL_TEST_QUEUES];
3329}
3330
3331static int sysctl_waitq_wakeup_one SYSCTL_HANDLER_ARGS
3332{
3333#pragma unused(oidp, arg1, arg2)
3334 int error;
3335 int index;
3336 struct waitq *waitq;
3337 kern_return_t kr;
3338 int64_t event64 = 0;
3339
3340 error = SYSCTL_IN(req, &event64, sizeof(event64));
3341 if (error)
3342 return error;
3343
3344 if (!req->newptr)
3345 return SYSCTL_OUT(req, &event64, sizeof(event64));
3346
3347 if (event64 < 0) {
3348 index = (int)((-event64) & 0xffffffff);
3349 waitq = wqset_waitq(sysctl_get_wqset(index));
3350 index = -index;
3351 } else {
3352 index = (int)event64;
3353 waitq = global_test_waitq(index);
3354 }
3355
3356 event64 = 0;
3357
3358 printf("[WQ]: Waking one thread on waitq [%d] event:0x%llx\n",
3359 index, event64);
3360 kr = waitq_wakeup64_one(waitq, (event64_t)event64, THREAD_AWAKENED,
3361 WAITQ_ALL_PRIORITIES);
3362 printf("[WQ]: \tkr=%d\n", kr);
3363
3364 return SYSCTL_OUT(req, &kr, sizeof(kr));
3365}
3366SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_one, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3367 0, 0, sysctl_waitq_wakeup_one, "Q", "wakeup one thread waiting on given event");
3368
3369
3370static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS
3371{
3372#pragma unused(oidp, arg1, arg2)
3373 int error;
3374 int index;
3375 struct waitq *waitq;
3376 kern_return_t kr;
3377 int64_t event64 = 0;
3378
3379 error = SYSCTL_IN(req, &event64, sizeof(event64));
3380 if (error)
3381 return error;
3382
3383 if (!req->newptr)
3384 return SYSCTL_OUT(req, &event64, sizeof(event64));
3385
3386 if (event64 < 0) {
3387 index = (int)((-event64) & 0xffffffff);
3388 waitq = wqset_waitq(sysctl_get_wqset(index));
3389 index = -index;
3390 } else {
3391 index = (int)event64;
3392 waitq = global_test_waitq(index);
3393 }
3394
3395 event64 = 0;
3396
3397 printf("[WQ]: Waking all threads on waitq [%d] event:0x%llx\n",
3398 index, event64);
3399 kr = waitq_wakeup64_all(waitq, (event64_t)event64,
3400 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
3401 printf("[WQ]: \tkr=%d\n", kr);
3402
3403 return SYSCTL_OUT(req, &kr, sizeof(kr));
3404}
3405SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3406 0, 0, sysctl_waitq_wakeup_all, "Q", "wakeup all threads waiting on given event");
3407
3408
3409static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS
3410{
3411#pragma unused(oidp, arg1, arg2)
3412 int error;
3413 int index;
3414 struct waitq *waitq;
3415 kern_return_t kr;
3416 int64_t event64 = 0;
3417
3418 error = SYSCTL_IN(req, &event64, sizeof(event64));
3419 if (error)
3420 return error;
3421
3422 if (!req->newptr)
3423 return SYSCTL_OUT(req, &event64, sizeof(event64));
3424
3425 if (event64 < 0) {
3426 index = (int)((-event64) & 0xffffffff);
3427 waitq = wqset_waitq(sysctl_get_wqset(index));
3428 index = -index;
3429 } else {
3430 index = (int)event64;
3431 waitq = global_test_waitq(index);
3432 }
3433
3434 event64 = 0;
3435
3436 printf("[WQ]: Current thread waiting on waitq [%d] event:0x%llx\n",
3437 index, event64);
3438 kr = waitq_assert_wait64(waitq, (event64_t)event64, THREAD_INTERRUPTIBLE, 0);
3439 if (kr == THREAD_WAITING)
3440 thread_block(THREAD_CONTINUE_NULL);
3441 printf("[WQ]: \tWoke Up: kr=%d\n", kr);
3442
3443 return SYSCTL_OUT(req, &kr, sizeof(kr));
3444}
3445SYSCTL_PROC(_kern, OID_AUTO, waitq_wait, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3446 0, 0, sysctl_waitq_wait, "Q", "start waiting on given event");
3447
3448
3449static int sysctl_wqset_select SYSCTL_HANDLER_ARGS
3450{
3451#pragma unused(oidp, arg1, arg2)
3452 int error;
3453 struct waitq_set *wqset;
3454 uint64_t event64 = 0;
3455
3456 error = SYSCTL_IN(req, &event64, sizeof(event64));
3457 if (error)
3458 return error;
3459
3460 if (!req->newptr)
3461 goto out;
3462
3463 wqset = sysctl_get_wqset((int)(event64 & 0xffffffff));
3464 g_waitq_set = wqset;
3465
3466 event64 = wqset_id(wqset);
3467 printf("[WQ]: selected wqset 0x%llx\n", event64);
3468
3469out:
3470 if (g_waitq_set)
3471 event64 = wqset_id(g_waitq_set);
3472 else
3473 event64 = (uint64_t)(-1);
3474
3475 return SYSCTL_OUT(req, &event64, sizeof(event64));
3476}
3477SYSCTL_PROC(_kern, OID_AUTO, wqset_select, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3478 0, 0, sysctl_wqset_select, "Q", "select/create a global waitq set");
3479
3480
3481static int sysctl_waitq_link SYSCTL_HANDLER_ARGS
3482{
3483#pragma unused(oidp, arg1, arg2)
3484 int error;
3485 int index;
3486 struct waitq *waitq;
3487 struct waitq_set *wqset;
3488 kern_return_t kr;
3489 uint64_t reserved_link = 0;
3490 int64_t event64 = 0;
3491
3492 error = SYSCTL_IN(req, &event64, sizeof(event64));
3493 if (error)
3494 return error;
3495
3496 if (!req->newptr)
3497 return SYSCTL_OUT(req, &event64, sizeof(event64));
3498
3499 if (!g_waitq_set)
3500 g_waitq_set = sysctl_get_wqset(1);
3501 wqset = g_waitq_set;
3502
3503 if (event64 < 0) {
3504 struct waitq_set *tmp;
3505 index = (int)((-event64) & 0xffffffff);
3506 tmp = sysctl_get_wqset(index);
3507 if (tmp == wqset)
3508 goto out;
3509 waitq = wqset_waitq(tmp);
3510 index = -index;
3511 } else {
3512 index = (int)event64;
3513 waitq = global_test_waitq(index);
3514 }
3515
3516 printf("[WQ]: linking waitq [%d] to global wqset (0x%llx)\n",
3517 index, wqset_id(wqset));
3518 reserved_link = waitq_link_reserve(waitq);
3519 kr = waitq_link(waitq, wqset, WAITQ_SHOULD_LOCK, &reserved_link);
3520 waitq_link_release(reserved_link);
3521
3522 printf("[WQ]: \tkr=%d\n", kr);
3523
3524out:
3525 return SYSCTL_OUT(req, &kr, sizeof(kr));
3526}
3527SYSCTL_PROC(_kern, OID_AUTO, waitq_link, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3528 0, 0, sysctl_waitq_link, "Q", "link global waitq to test waitq set");
3529
3530
3531static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS
3532{
3533#pragma unused(oidp, arg1, arg2)
3534 int error;
3535 int index;
3536 struct waitq *waitq;
3537 struct waitq_set *wqset;
3538 kern_return_t kr;
3539 uint64_t event64 = 0;
3540
3541 error = SYSCTL_IN(req, &event64, sizeof(event64));
3542 if (error)
3543 return error;
3544
3545 if (!req->newptr)
3546 return SYSCTL_OUT(req, &event64, sizeof(event64));
3547
3548 if (!g_waitq_set)
3549 g_waitq_set = sysctl_get_wqset(1);
3550 wqset = g_waitq_set;
3551
3552 index = (int)event64;
3553 waitq = global_test_waitq(index);
3554
3555 printf("[WQ]: unlinking waitq [%d] from global wqset (0x%llx)\n",
3556 index, wqset_id(wqset));
3557
3558 kr = waitq_unlink(waitq, wqset);
3559 printf("[WQ]: \tkr=%d\n", kr);
3560
3561 return SYSCTL_OUT(req, &kr, sizeof(kr));
3562}
3563SYSCTL_PROC(_kern, OID_AUTO, waitq_unlink, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3564 0, 0, sysctl_waitq_unlink, "Q", "unlink global waitq from test waitq set");
3565
3566
3567static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS
3568{
3569#pragma unused(oidp, arg1, arg2)
3570 struct waitq *waitq;
3571 uint64_t event64 = 0;
3572 int error, index;
3573
3574 error = SYSCTL_IN(req, &event64, sizeof(event64));
3575 if (error)
3576 return error;
3577
3578 if (!req->newptr)
3579 return SYSCTL_OUT(req, &event64, sizeof(event64));
3580
3581 index = (int)event64;
3582 waitq = global_test_waitq(index);
3583
3584 printf("[WQ]: clearing prepost on waitq [%d]\n", index);
3585 waitq_clear_prepost(waitq);
3586
3587 return SYSCTL_OUT(req, &event64, sizeof(event64));
3588}
3589SYSCTL_PROC(_kern, OID_AUTO, waitq_clear_prepost, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3590 0, 0, sysctl_waitq_clear_prepost, "Q", "clear prepost on given waitq");
3591
3592
3593static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS
3594{
3595#pragma unused(oidp, arg1, arg2)
3596 int error;
3597 struct waitq_set *wqset;
3598 kern_return_t kr;
3599 uint64_t event64 = 0;
3600
3601 error = SYSCTL_IN(req, &event64, sizeof(event64));
3602 if (error)
3603 return error;
3604
3605 if (!req->newptr)
3606 return SYSCTL_OUT(req, &event64, sizeof(event64));
3607
3608 if (!g_waitq_set)
3609 g_waitq_set = sysctl_get_wqset(1);
3610 wqset = g_waitq_set;
3611
3612 printf("[WQ]: unlinking all queues from global wqset (0x%llx)\n",
3613 wqset_id(wqset));
3614
3615 kr = waitq_set_unlink_all(wqset);
3616 printf("[WQ]: \tkr=%d\n", kr);
3617
3618 return SYSCTL_OUT(req, &kr, sizeof(kr));
3619}
3620SYSCTL_PROC(_kern, OID_AUTO, wqset_unlink_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3621 0, 0, sysctl_wqset_unlink_all, "Q", "unlink all queues from test waitq set");
3622
3623
3624static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS
3625{
3626#pragma unused(oidp, arg1, arg2)
3627 struct waitq_set *wqset = NULL;
3628 uint64_t event64 = 0;
3629 int error, index;
3630
3631 error = SYSCTL_IN(req, &event64, sizeof(event64));
3632 if (error)
3633 return error;
3634
3635 if (!req->newptr)
3636 goto out;
3637
3638 index = (int)((event64) & 0xffffffff);
3639 wqset = sysctl_get_wqset(index);
3640 assert(wqset != NULL);
3641
3642 printf("[WQ]: clearing preposts on wqset 0x%llx\n", wqset_id(wqset));
3643 waitq_set_clear_preposts(wqset);
3644
3645out:
3646 if (wqset)
3647 event64 = wqset_id(wqset);
3648 else
3649 event64 = (uint64_t)(-1);
3650
3651 return SYSCTL_OUT(req, &event64, sizeof(event64));
3652}
3653SYSCTL_PROC(_kern, OID_AUTO, wqset_clear_preposts, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3654 0, 0, sysctl_wqset_clear_preposts, "Q", "clear preposts on given waitq set");
3655
3656#endif /* CONFIG_WAITQ_DEBUG */
3657#endif /* defined(DEVELOPMENT) || defined(DEBUG) */