2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
93 #include <sys/kdebug.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
98 #include <sys/kauth.h>
100 #include <mach/mach_types.h>
101 #include <kern/kern_types.h>
102 #include <kern/assert.h>
103 #include <kern/kalloc.h>
104 #include <kern/thread.h>
105 #include <kern/clock.h>
106 #include <kern/ledger.h>
107 #include <kern/task.h>
108 #include <kern/telemetry.h>
109 #include <kern/waitq.h>
110 #include <kern/sched_prim.h>
112 #include <sys/mbuf.h>
113 #include <sys/domain.h>
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/errno.h>
117 #include <sys/syscall.h>
118 #include <sys/pipe.h>
120 #include <security/audit/audit.h>
123 #include <net/route.h>
125 #include <netinet/in.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/ip.h>
128 #include <netinet/in_pcb.h>
129 #include <netinet/ip_var.h>
130 #include <netinet/ip6.h>
131 #include <netinet/tcp.h>
132 #include <netinet/tcp_fsm.h>
133 #include <netinet/tcp_seq.h>
134 #include <netinet/tcp_timer.h>
135 #include <netinet/tcp_var.h>
136 #include <netinet/tcpip.h>
137 #include <netinet/tcp_debug.h>
138 /* for wait queue based select */
139 #include <kern/waitq.h>
140 #include <kern/kalloc.h>
141 #include <sys/vnode_internal.h>
143 /* XXX should be in a header file somewhere */
144 void evsofree(struct socket
*);
145 void evpipefree(struct pipe
*);
146 void postpipeevent(struct pipe
*, int);
147 void postevent(struct socket
*, struct sockbuf
*, int);
148 extern kern_return_t
IOBSDGetPlatformUUID(__darwin_uuid_t uuid
, mach_timespec_t timeoutp
);
149 extern void delay(int);
151 int rd_uio(struct proc
*p
, int fdes
, uio_t uio
, user_ssize_t
*retval
);
152 int wr_uio(struct proc
*p
, struct fileproc
*fp
, uio_t uio
, user_ssize_t
*retval
);
154 __private_extern__
int dofileread(vfs_context_t ctx
, struct fileproc
*fp
,
155 user_addr_t bufp
, user_size_t nbyte
,
156 off_t offset
, int flags
, user_ssize_t
*retval
);
157 __private_extern__
int dofilewrite(vfs_context_t ctx
, struct fileproc
*fp
,
158 user_addr_t bufp
, user_size_t nbyte
,
159 off_t offset
, int flags
, user_ssize_t
*retval
);
160 __private_extern__
int preparefileread(struct proc
*p
, struct fileproc
**fp_ret
, int fd
, int check_for_vnode
);
161 __private_extern__
void donefileread(struct proc
*p
, struct fileproc
*fp_ret
, int fd
);
164 /* Conflict wait queue for when selects collide (opaque type) */
165 struct waitq select_conflict_queue
;
168 * Init routine called from bsd_init.c
170 void select_waitq_init(void);
172 select_waitq_init(void)
174 waitq_init(&select_conflict_queue
, SYNC_POLICY_FIFO
| SYNC_POLICY_DISABLE_IRQ
);
177 #define f_flag f_fglob->fg_flag
178 #define f_type f_fglob->fg_ops->fo_type
179 #define f_msgcount f_fglob->fg_msgcount
180 #define f_cred f_fglob->fg_cred
181 #define f_ops f_fglob->fg_ops
182 #define f_offset f_fglob->fg_offset
183 #define f_data f_fglob->fg_data
189 * preparefileread:EBADF
190 * preparefileread:ESPIPE
191 * preparefileread:ENXIO
192 * preparefileread:EBADF
196 read(struct proc
*p
, struct read_args
*uap
, user_ssize_t
*retval
)
198 __pthread_testcancel(1);
199 return(read_nocancel(p
, (struct read_nocancel_args
*)uap
, retval
));
203 read_nocancel(struct proc
*p
, struct read_nocancel_args
*uap
, user_ssize_t
*retval
)
208 struct vfs_context context
;
210 if ( (error
= preparefileread(p
, &fp
, fd
, 0)) )
213 context
= *(vfs_context_current());
214 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
216 error
= dofileread(&context
, fp
, uap
->cbuf
, uap
->nbyte
,
217 (off_t
)-1, 0, retval
);
219 donefileread(p
, fp
, fd
);
228 * preparefileread:EBADF
229 * preparefileread:ESPIPE
230 * preparefileread:ENXIO
231 * preparefileread:EBADF
235 pread(struct proc
*p
, struct pread_args
*uap
, user_ssize_t
*retval
)
237 __pthread_testcancel(1);
238 return(pread_nocancel(p
, (struct pread_nocancel_args
*)uap
, retval
));
242 pread_nocancel(struct proc
*p
, struct pread_nocancel_args
*uap
, user_ssize_t
*retval
)
244 struct fileproc
*fp
= NULL
; /* fp set by preparefileread() */
247 struct vfs_context context
;
249 if ( (error
= preparefileread(p
, &fp
, fd
, 1)) )
252 context
= *(vfs_context_current());
253 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
255 error
= dofileread(&context
, fp
, uap
->buf
, uap
->nbyte
,
256 uap
->offset
, FOF_OFFSET
, retval
);
258 donefileread(p
, fp
, fd
);
260 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_pread
) | DBG_FUNC_NONE
),
261 uap
->fd
, uap
->nbyte
, (unsigned int)((uap
->offset
>> 32)), (unsigned int)(uap
->offset
), 0);
268 * Code common for read and pread
272 donefileread(struct proc
*p
, struct fileproc
*fp
, int fd
)
275 fp_drop(p
, fd
, fp
, 1);
288 preparefileread(struct proc
*p
, struct fileproc
**fp_ret
, int fd
, int check_for_pread
)
298 error
= fp_lookup(p
, fd
, &fp
, 1);
304 if ((fp
->f_flag
& FREAD
) == 0) {
308 if (check_for_pread
&& (fp
->f_type
!= DTYPE_VNODE
)) {
312 if (fp
->f_type
== DTYPE_VNODE
) {
313 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
315 if (check_for_pread
&& (vnode_isfifo(vp
))) {
319 if (check_for_pread
&& (vp
->v_flag
& VISTTY
)) {
331 fp_drop(p
, fd
, fp
, 1);
342 __private_extern__
int
343 dofileread(vfs_context_t ctx
, struct fileproc
*fp
,
344 user_addr_t bufp
, user_size_t nbyte
, off_t offset
, int flags
,
345 user_ssize_t
*retval
)
348 user_ssize_t bytecnt
;
350 char uio_buf
[ UIO_SIZEOF(1) ];
355 if (IS_64BIT_PROCESS(vfs_context_proc(ctx
))) {
356 auio
= uio_createwithbuffer(1, offset
, UIO_USERSPACE64
, UIO_READ
,
357 &uio_buf
[0], sizeof(uio_buf
));
359 auio
= uio_createwithbuffer(1, offset
, UIO_USERSPACE32
, UIO_READ
,
360 &uio_buf
[0], sizeof(uio_buf
));
362 uio_addiov(auio
, bufp
, nbyte
);
366 if ((error
= fo_read(fp
, auio
, flags
, ctx
))) {
367 if (uio_resid(auio
) != bytecnt
&& (error
== ERESTART
||
368 error
== EINTR
|| error
== EWOULDBLOCK
))
371 bytecnt
-= uio_resid(auio
);
379 * Scatter read system call.
388 readv(struct proc
*p
, struct readv_args
*uap
, user_ssize_t
*retval
)
390 __pthread_testcancel(1);
391 return(readv_nocancel(p
, (struct readv_nocancel_args
*)uap
, retval
));
395 readv_nocancel(struct proc
*p
, struct readv_nocancel_args
*uap
, user_ssize_t
*retval
)
399 struct user_iovec
*iovp
;
401 /* Verify range bedfore calling uio_create() */
402 if (uap
->iovcnt
<= 0 || uap
->iovcnt
> UIO_MAXIOV
)
405 /* allocate a uio large enough to hold the number of iovecs passed */
406 auio
= uio_create(uap
->iovcnt
, 0,
407 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
410 /* get location of iovecs within the uio. then copyin the iovecs from
413 iovp
= uio_iovsaddr(auio
);
416 goto ExitThisRoutine
;
418 error
= copyin_user_iovec_array(uap
->iovp
,
419 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
422 goto ExitThisRoutine
;
425 /* finalize uio_t for use and do the IO
427 error
= uio_calculateresid(auio
);
429 goto ExitThisRoutine
;
431 error
= rd_uio(p
, uap
->fd
, auio
, retval
);
449 write(struct proc
*p
, struct write_args
*uap
, user_ssize_t
*retval
)
451 __pthread_testcancel(1);
452 return(write_nocancel(p
, (struct write_nocancel_args
*)uap
, retval
));
457 write_nocancel(struct proc
*p
, struct write_nocancel_args
*uap
, user_ssize_t
*retval
)
462 bool wrote_some
= false;
466 error
= fp_lookup(p
,fd
,&fp
,0);
469 if ((fp
->f_flag
& FWRITE
) == 0) {
471 } else if (FP_ISGUARDED(fp
, GUARD_WRITE
)) {
473 error
= fp_guard_exception(p
, fd
, fp
, kGUARD_EXC_WRITE
);
476 struct vfs_context context
= *(vfs_context_current());
477 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
479 error
= dofilewrite(&context
, fp
, uap
->cbuf
, uap
->nbyte
,
480 (off_t
)-1, 0, retval
);
482 wrote_some
= *retval
> 0;
485 fp_drop_written(p
, fd
, fp
);
487 fp_drop(p
, fd
, fp
, 0);
503 pwrite(struct proc
*p
, struct pwrite_args
*uap
, user_ssize_t
*retval
)
505 __pthread_testcancel(1);
506 return(pwrite_nocancel(p
, (struct pwrite_nocancel_args
*)uap
, retval
));
510 pwrite_nocancel(struct proc
*p
, struct pwrite_nocancel_args
*uap
, user_ssize_t
*retval
)
515 vnode_t vp
= (vnode_t
)0;
516 bool wrote_some
= false;
520 error
= fp_lookup(p
,fd
,&fp
,0);
524 if ((fp
->f_flag
& FWRITE
) == 0) {
526 } else if (FP_ISGUARDED(fp
, GUARD_WRITE
)) {
528 error
= fp_guard_exception(p
, fd
, fp
, kGUARD_EXC_WRITE
);
531 struct vfs_context context
= *vfs_context_current();
532 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
534 if (fp
->f_type
!= DTYPE_VNODE
) {
538 vp
= (vnode_t
)fp
->f_fglob
->fg_data
;
539 if (vnode_isfifo(vp
)) {
543 if ((vp
->v_flag
& VISTTY
)) {
547 if (uap
->offset
== (off_t
)-1) {
552 error
= dofilewrite(&context
, fp
, uap
->buf
, uap
->nbyte
,
553 uap
->offset
, FOF_OFFSET
, retval
);
554 wrote_some
= *retval
> 0;
558 fp_drop_written(p
, fd
, fp
);
560 fp_drop(p
, fd
, fp
, 0);
562 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_pwrite
) | DBG_FUNC_NONE
),
563 uap
->fd
, uap
->nbyte
, (unsigned int)((uap
->offset
>> 32)), (unsigned int)(uap
->offset
), 0);
572 * <fo_write>:??? [indirect through struct fileops]
574 __private_extern__
int
575 dofilewrite(vfs_context_t ctx
, struct fileproc
*fp
,
576 user_addr_t bufp
, user_size_t nbyte
, off_t offset
, int flags
,
577 user_ssize_t
*retval
)
581 user_ssize_t bytecnt
;
582 char uio_buf
[ UIO_SIZEOF(1) ];
584 if (nbyte
> INT_MAX
) {
589 if (IS_64BIT_PROCESS(vfs_context_proc(ctx
))) {
590 auio
= uio_createwithbuffer(1, offset
, UIO_USERSPACE64
, UIO_WRITE
,
591 &uio_buf
[0], sizeof(uio_buf
));
593 auio
= uio_createwithbuffer(1, offset
, UIO_USERSPACE32
, UIO_WRITE
,
594 &uio_buf
[0], sizeof(uio_buf
));
596 uio_addiov(auio
, bufp
, nbyte
);
599 if ((error
= fo_write(fp
, auio
, flags
, ctx
))) {
600 if (uio_resid(auio
) != bytecnt
&& (error
== ERESTART
||
601 error
== EINTR
|| error
== EWOULDBLOCK
))
603 /* The socket layer handles SIGPIPE */
604 if (error
== EPIPE
&& fp
->f_type
!= DTYPE_SOCKET
&&
605 (fp
->f_fglob
->fg_lflags
& FG_NOSIGPIPE
) == 0) {
606 /* XXX Raise the signal on the thread? */
607 psignal(vfs_context_proc(ctx
), SIGPIPE
);
610 bytecnt
-= uio_resid(auio
);
617 * Gather write system call
620 writev(struct proc
*p
, struct writev_args
*uap
, user_ssize_t
*retval
)
622 __pthread_testcancel(1);
623 return(writev_nocancel(p
, (struct writev_nocancel_args
*)uap
, retval
));
627 writev_nocancel(struct proc
*p
, struct writev_nocancel_args
*uap
, user_ssize_t
*retval
)
632 struct user_iovec
*iovp
;
633 bool wrote_some
= false;
635 AUDIT_ARG(fd
, uap
->fd
);
637 /* Verify range bedfore calling uio_create() */
638 if (uap
->iovcnt
<= 0 || uap
->iovcnt
> UIO_MAXIOV
)
641 /* allocate a uio large enough to hold the number of iovecs passed */
642 auio
= uio_create(uap
->iovcnt
, 0,
643 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
646 /* get location of iovecs within the uio. then copyin the iovecs from
649 iovp
= uio_iovsaddr(auio
);
652 goto ExitThisRoutine
;
654 error
= copyin_user_iovec_array(uap
->iovp
,
655 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
658 goto ExitThisRoutine
;
661 /* finalize uio_t for use and do the IO
663 error
= uio_calculateresid(auio
);
665 goto ExitThisRoutine
;
668 error
= fp_lookup(p
, uap
->fd
, &fp
, 0);
670 goto ExitThisRoutine
;
672 if ((fp
->f_flag
& FWRITE
) == 0) {
674 } else if (FP_ISGUARDED(fp
, GUARD_WRITE
)) {
676 error
= fp_guard_exception(p
, uap
->fd
, fp
, kGUARD_EXC_WRITE
);
679 error
= wr_uio(p
, fp
, auio
, retval
);
680 wrote_some
= *retval
> 0;
684 fp_drop_written(p
, uap
->fd
, fp
);
686 fp_drop(p
, uap
->fd
, fp
, 0);
697 wr_uio(struct proc
*p
, struct fileproc
*fp
, uio_t uio
, user_ssize_t
*retval
)
701 struct vfs_context context
= *vfs_context_current();
703 count
= uio_resid(uio
);
705 context
.vc_ucred
= fp
->f_cred
;
706 error
= fo_write(fp
, uio
, 0, &context
);
708 if (uio_resid(uio
) != count
&& (error
== ERESTART
||
709 error
== EINTR
|| error
== EWOULDBLOCK
))
711 /* The socket layer handles SIGPIPE */
712 if (error
== EPIPE
&& fp
->f_type
!= DTYPE_SOCKET
&&
713 (fp
->f_fglob
->fg_lflags
& FG_NOSIGPIPE
) == 0)
716 *retval
= count
- uio_resid(uio
);
723 rd_uio(struct proc
*p
, int fdes
, uio_t uio
, user_ssize_t
*retval
)
728 struct vfs_context context
= *vfs_context_current();
730 if ( (error
= preparefileread(p
, &fp
, fdes
, 0)) )
733 count
= uio_resid(uio
);
735 context
.vc_ucred
= fp
->f_cred
;
737 error
= fo_read(fp
, uio
, 0, &context
);
740 if (uio_resid(uio
) != count
&& (error
== ERESTART
||
741 error
== EINTR
|| error
== EWOULDBLOCK
))
744 *retval
= count
- uio_resid(uio
);
746 donefileread(p
, fp
, fdes
);
761 * fp_lookup:EBADF Bad file descriptor
765 ioctl(struct proc
*p
, struct ioctl_args
*uap
, __unused
int32_t *retval
)
767 struct fileproc
*fp
= NULL
;
770 caddr_t datap
= NULL
, memp
= NULL
;
771 boolean_t is64bit
= FALSE
;
773 #define STK_PARAMS 128
774 char stkbuf
[STK_PARAMS
];
776 u_long com
= uap
->com
;
777 struct vfs_context context
= *vfs_context_current();
779 AUDIT_ARG(fd
, uap
->fd
);
780 AUDIT_ARG(addr
, uap
->data
);
782 is64bit
= proc_is64bit(p
);
785 AUDIT_ARG(value64
, com
);
787 AUDIT_ARG(cmd
, CAST_DOWN_EXPLICIT(int, com
));
788 #endif /* CONFIG_AUDIT */
791 * Interpret high order word to find amount of data to be
792 * copied to/from the user's address space.
794 size
= IOCPARM_LEN(com
);
795 if (size
> IOCPARM_MAX
)
797 if (size
> sizeof (stkbuf
)) {
798 if ((memp
= (caddr_t
)kalloc(size
)) == 0)
805 error
= copyin(uap
->data
, datap
, size
);
809 /* XXX - IOC_IN and no size? we should proably return an error here!! */
811 *(user_addr_t
*)datap
= uap
->data
;
814 *(uint32_t *)datap
= (uint32_t)uap
->data
;
817 } else if ((com
& IOC_OUT
) && size
)
819 * Zero the buffer so the user always
820 * gets back something deterministic.
823 else if (com
& IOC_VOID
) {
824 /* XXX - this is odd since IOC_VOID means no parameters */
826 *(user_addr_t
*)datap
= uap
->data
;
829 *(uint32_t *)datap
= (uint32_t)uap
->data
;
834 error
= fp_lookup(p
,fd
,&fp
,1);
840 AUDIT_ARG(file
, p
, fp
);
842 if ((fp
->f_flag
& (FREAD
| FWRITE
)) == 0) {
847 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
850 error
= mac_file_check_ioctl(context
.vc_ucred
, fp
->f_fglob
, com
);
857 *fdflags(p
, fd
) &= ~UF_EXCLOSE
;
861 *fdflags(p
, fd
) |= UF_EXCLOSE
;
865 if ( (tmp
= *(int *)datap
) )
866 fp
->f_flag
|= FNONBLOCK
;
868 fp
->f_flag
&= ~FNONBLOCK
;
869 error
= fo_ioctl(fp
, FIONBIO
, (caddr_t
)&tmp
, &context
);
873 if ( (tmp
= *(int *)datap
) )
874 fp
->f_flag
|= FASYNC
;
876 fp
->f_flag
&= ~FASYNC
;
877 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
, &context
);
882 if (fp
->f_type
== DTYPE_SOCKET
) {
883 ((struct socket
*)fp
->f_data
)->so_pgid
= tmp
;
886 if (fp
->f_type
== DTYPE_PIPE
) {
887 error
= fo_ioctl(fp
, (int)TIOCSPGRP
, (caddr_t
)&tmp
, &context
);
893 struct proc
*p1
= proc_find(tmp
);
901 error
= fo_ioctl(fp
, (int)TIOCSPGRP
, (caddr_t
)&tmp
, &context
);
905 if (fp
->f_type
== DTYPE_SOCKET
) {
906 *(int *)datap
= ((struct socket
*)fp
->f_data
)->so_pgid
;
909 error
= fo_ioctl(fp
, TIOCGPGRP
, datap
, &context
);
910 *(int *)datap
= -*(int *)datap
;
914 error
= fo_ioctl(fp
, com
, datap
, &context
);
916 * Copy any data to user, size was
917 * already set and checked above.
919 if (error
== 0 && (com
& IOC_OUT
) && size
)
920 error
= copyout(datap
, uap
->data
, (u_int
)size
);
924 fp_drop(p
, fd
, fp
, 1);
933 int selwait
, nselcoll
;
934 #define SEL_FIRSTPASS 1
935 #define SEL_SECONDPASS 2
936 extern int selcontinue(int error
);
937 extern int selprocess(int error
, int sel_pass
);
938 static int selscan(struct proc
*p
, struct _select
* sel
, struct _select_data
* seldata
,
939 int nfd
, int32_t *retval
, int sel_pass
, struct waitq_set
*wqset
);
940 static int selcount(struct proc
*p
, u_int32_t
*ibits
, int nfd
, int *count
);
941 static int seldrop_locked(struct proc
*p
, u_int32_t
*ibits
, int nfd
, int lim
, int *need_wakeup
, int fromselcount
);
942 static int seldrop(struct proc
*p
, u_int32_t
*ibits
, int nfd
);
945 * Select system call.
948 * EINVAL Invalid argument
949 * EAGAIN Nonconformant error if allocation fails
953 select(struct proc
*p
, struct select_args
*uap
, int32_t *retval
)
955 __pthread_testcancel(1);
956 return(select_nocancel(p
, (struct select_nocancel_args
*)uap
, retval
));
960 select_nocancel(struct proc
*p
, struct select_nocancel_args
*uap
, int32_t *retval
)
967 struct _select_data
*seldata
;
968 int needzerofill
= 1;
972 th_act
= current_thread();
973 uth
= get_bsdthread_info(th_act
);
974 sel
= &uth
->uu_select
;
975 seldata
= &uth
->uu_kevent
.ss_select_data
;
979 seldata
->retval
= retval
;
987 /* select on thread of process that already called proc_exit() */
988 if (p
->p_fd
== NULL
) {
992 if (uap
->nd
> p
->p_fd
->fd_nfiles
)
993 uap
->nd
= p
->p_fd
->fd_nfiles
; /* forgiving; slightly wrong */
995 nw
= howmany(uap
->nd
, NFDBITS
);
996 ni
= nw
* sizeof(fd_mask
);
999 * if the previously allocated space for the bits is smaller than
1000 * what is requested or no space has yet been allocated for this
1001 * thread, allocate enough space now.
1003 * Note: If this process fails, select() will return EAGAIN; this
1004 * is the same thing pool() returns in a no-memory situation, but
1005 * it is not a POSIX compliant error code for select().
1007 if (sel
->nbytes
< (3 * ni
)) {
1008 int nbytes
= 3 * ni
;
1010 /* Free previous allocation, if any */
1011 if (sel
->ibits
!= NULL
)
1012 FREE(sel
->ibits
, M_TEMP
);
1013 if (sel
->obits
!= NULL
) {
1014 FREE(sel
->obits
, M_TEMP
);
1015 /* NULL out; subsequent ibits allocation may fail */
1019 MALLOC(sel
->ibits
, u_int32_t
*, nbytes
, M_TEMP
, M_WAITOK
| M_ZERO
);
1020 if (sel
->ibits
== NULL
)
1022 MALLOC(sel
->obits
, u_int32_t
*, nbytes
, M_TEMP
, M_WAITOK
| M_ZERO
);
1023 if (sel
->obits
== NULL
) {
1024 FREE(sel
->ibits
, M_TEMP
);
1028 sel
->nbytes
= nbytes
;
1033 bzero((caddr_t
)sel
->ibits
, sel
->nbytes
);
1034 bzero((caddr_t
)sel
->obits
, sel
->nbytes
);
1038 * get the bits from the user address space
1040 #define getbits(name, x) \
1042 if (uap->name && (error = copyin(uap->name, \
1043 (caddr_t)&sel->ibits[(x) * nw], ni))) \
1044 goto continuation; \
1054 if (IS_64BIT_PROCESS(p
)) {
1055 struct user64_timeval atv64
;
1056 error
= copyin(uap
->tv
, (caddr_t
)&atv64
, sizeof(atv64
));
1057 /* Loses resolution - assume timeout < 68 years */
1058 atv
.tv_sec
= atv64
.tv_sec
;
1059 atv
.tv_usec
= atv64
.tv_usec
;
1061 struct user32_timeval atv32
;
1062 error
= copyin(uap
->tv
, (caddr_t
)&atv32
, sizeof(atv32
));
1063 atv
.tv_sec
= atv32
.tv_sec
;
1064 atv
.tv_usec
= atv32
.tv_usec
;
1068 if (itimerfix(&atv
)) {
1073 clock_absolutetime_interval_to_deadline(
1074 tvtoabstime(&atv
), &seldata
->abstime
);
1077 seldata
->abstime
= 0;
1079 if ( (error
= selcount(p
, sel
->ibits
, uap
->nd
, &count
)) ) {
1084 * We need an array of waitq pointers. This is due to the new way
1085 * in which waitqs are linked to sets. When a thread selects on a
1086 * file descriptor, a waitq (embedded in a selinfo structure) is
1087 * added to the thread's local waitq set. There is no longer any
1088 * way to directly iterate over all members of a given waitq set.
1089 * The process of linking a waitq into a set may allocate a link
1090 * table object. Because we can't iterate over all the waitqs to
1091 * which our thread waitq set belongs, we need a way of removing
1094 * Thus we need a buffer which will hold one waitq pointer
1095 * per FD being selected. During the tear-down phase we can use
1096 * these pointers to dis-associate the underlying selinfo's waitq
1097 * from our thread's waitq set.
1099 * Because we also need to allocate a waitq set for this thread,
1100 * we use a bare buffer pointer to hold all the memory. Note that
1101 * this memory is cached in the thread pointer and not reaped until
1102 * the thread exists. This is generally OK because threads that
1103 * call select tend to keep calling select repeatedly.
1105 sz
= ALIGN(sizeof(struct waitq_set
)) + (count
* sizeof(uint64_t));
1106 if (sz
> uth
->uu_wqstate_sz
) {
1107 /* (re)allocate a buffer to hold waitq pointers */
1108 if (uth
->uu_wqset
) {
1109 if (waitq_set_is_valid(uth
->uu_wqset
))
1110 waitq_set_deinit(uth
->uu_wqset
);
1111 FREE(uth
->uu_wqset
, M_SELECT
);
1112 } else if (uth
->uu_wqstate_sz
&& !uth
->uu_wqset
)
1113 panic("select: thread structure corrupt! "
1114 "uu_wqstate_sz:%ld, wqstate_buf == NULL",
1115 uth
->uu_wqstate_sz
);
1116 uth
->uu_wqstate_sz
= sz
;
1117 MALLOC(uth
->uu_wqset
, struct waitq_set
*, sz
, M_SELECT
, M_WAITOK
);
1119 panic("can't allocate %ld bytes for wqstate buffer",
1120 uth
->uu_wqstate_sz
);
1121 waitq_set_init(uth
->uu_wqset
,
1122 SYNC_POLICY_FIFO
|SYNC_POLICY_PREPOST
|SYNC_POLICY_DISABLE_IRQ
, NULL
);
1125 if (!waitq_set_is_valid(uth
->uu_wqset
))
1126 waitq_set_init(uth
->uu_wqset
,
1127 SYNC_POLICY_FIFO
|SYNC_POLICY_PREPOST
|SYNC_POLICY_DISABLE_IRQ
, NULL
);
1129 /* the last chunk of our buffer is an array of waitq pointers */
1130 seldata
->wqp
= (uint64_t *)((char *)(uth
->uu_wqset
) + ALIGN(sizeof(struct waitq_set
)));
1131 bzero(seldata
->wqp
, sz
- ALIGN(sizeof(struct waitq_set
)));
1133 seldata
->count
= count
;
1139 * We have already cleaned up any state we established,
1140 * either locally or as a result of selcount(). We don't
1141 * need to wait_subqueue_unlink_all(), since we haven't set
1142 * anything at this point.
1147 return selprocess(0, SEL_FIRSTPASS
);
1151 selcontinue(int error
)
1153 return selprocess(error
, SEL_SECONDPASS
);
1160 * Parameters: error The error code from our caller
1161 * sel_pass The pass we are on
1164 selprocess(int error
, int sel_pass
)
1169 struct uthread
*uth
;
1171 struct select_nocancel_args
*uap
;
1173 struct _select
*sel
;
1174 struct _select_data
*seldata
;
1179 wait_result_t wait_result
;
1182 th_act
= current_thread();
1183 uth
= get_bsdthread_info(th_act
);
1184 sel
= &uth
->uu_select
;
1185 seldata
= &uth
->uu_kevent
.ss_select_data
;
1186 uap
= seldata
->args
;
1187 retval
= seldata
->retval
;
1189 if ((error
!= 0) && (sel_pass
== SEL_FIRSTPASS
))
1191 if (seldata
->count
== 0)
1198 OSBitOrAtomic(P_SELECT
, &p
->p_flag
);
1200 /* skip scans if the select is just for timeouts */
1201 if (seldata
->count
) {
1202 error
= selscan(p
, sel
, seldata
, uap
->nd
, retval
, sel_pass
, uth
->uu_wqset
);
1203 if (error
|| *retval
) {
1206 if (prepost
|| somewakeup
) {
1208 * if the select of log, then we can wakeup and
1209 * discover some one else already read the data;
1210 * go to select again if time permits
1221 clock_get_uptime(&now
);
1222 if (now
>= seldata
->abstime
)
1227 /* cleanup obits and try again */
1229 sel_pass
= SEL_FIRSTPASS
;
1234 * To effect a poll, the timeout argument should be
1235 * non-nil, pointing to a zero-valued timeval structure.
1237 if (uap
->tv
&& seldata
->abstime
== 0) {
1241 /* No spurious wakeups due to colls,no need to check for them */
1242 if ((sel_pass
== SEL_SECONDPASS
) || ((p
->p_flag
& P_SELECT
) == 0)) {
1243 sel_pass
= SEL_FIRSTPASS
;
1247 OSBitAndAtomic(~((uint32_t)P_SELECT
), &p
->p_flag
);
1249 /* if the select is just for timeout skip check */
1250 if (seldata
->count
&& (sel_pass
== SEL_SECONDPASS
))
1251 panic("selprocess: 2nd pass assertwaiting");
1253 /* waitq_set has waitqueue as first element */
1254 wait_result
= waitq_assert_wait64_leeway((struct waitq
*)uth
->uu_wqset
,
1255 NO_EVENT64
, THREAD_ABORTSAFE
,
1256 TIMEOUT_URGENCY_USER_NORMAL
,
1259 if (wait_result
!= THREAD_AWAKENED
) {
1260 /* there are no preposted events */
1261 error
= tsleep1(NULL
, PSOCK
| PCATCH
,
1262 "select", 0, selcontinue
);
1269 sel_pass
= SEL_SECONDPASS
;
1276 seldrop(p
, sel
->ibits
, uap
->nd
);
1277 waitq_set_deinit(uth
->uu_wqset
);
1279 * zero out the waitq pointer array to avoid use-after free
1280 * errors in the selcount error path (seldrop_locked) if/when
1281 * the thread re-calls select().
1283 bzero((void *)uth
->uu_wqset
, uth
->uu_wqstate_sz
);
1285 OSBitAndAtomic(~((uint32_t)P_SELECT
), &p
->p_flag
);
1286 /* select is not restarted after signals... */
1287 if (error
== ERESTART
)
1289 if (error
== EWOULDBLOCK
)
1291 nw
= howmany(uap
->nd
, NFDBITS
);
1292 ni
= nw
* sizeof(fd_mask
);
1294 #define putbits(name, x) \
1296 if (uap->name && (error2 = \
1297 copyout((caddr_t)&sel->obits[(x) * nw], uap->name, ni))) \
1314 * remove the fileproc's underlying waitq from the supplied waitq set;
1315 * clear FP_INSELECT when appropriate
1318 * fp File proc that is potentially currently in select
1319 * wqset Waitq set to which the fileproc may belong
1320 * (usually this is the thread's private waitq set)
1322 * proc_fdlock is held
1324 static void selunlinkfp(struct fileproc
*fp
, uint64_t wqp_id
, struct waitq_set
*wqset
)
1326 int valid_set
= waitq_set_is_valid(wqset
);
1327 int valid_q
= !!wqp_id
;
1330 * This could be called (from selcount error path) before we setup
1331 * the thread's wqset. Check the wqset passed in, and only unlink if
1335 /* unlink the underlying waitq from the input set (thread waitq set) */
1336 if (valid_q
&& valid_set
)
1337 waitq_unlink_by_prepost_id(wqp_id
, wqset
);
1339 /* allow passing a NULL/invalid fp for seldrop unwind */
1340 if (!fp
|| !(fp
->f_flags
& (FP_INSELECT
|FP_SELCONFLICT
)))
1344 * We can always remove the conflict queue from our thread's set: this
1345 * will not affect other threads that potentially need to be awoken on
1346 * the conflict queue during a fileproc_drain - those sets will still
1347 * be linked with the global conflict queue, and the last waiter
1348 * on the fp clears the CONFLICT marker.
1350 if (valid_set
&& (fp
->f_flags
& FP_SELCONFLICT
))
1351 waitq_unlink(&select_conflict_queue
, wqset
);
1354 * This isn't quite right - we don't actually know if this
1355 * fileproc is in another select or not! Here we just assume
1356 * that if we were the first thread to select on the FD, then
1357 * we'll be the one to clear this flag...
1359 if (valid_set
&& fp
->f_wset
== (void *)wqset
) {
1360 fp
->f_flags
&= ~FP_INSELECT
;
1366 * connect a fileproc to the given wqset, potentially bridging to a waitq
1367 * pointed to indirectly by wq_data
1370 * fp File proc potentially currently in select
1371 * wq_data Pointer to a pointer to a waitq (could be NULL)
1372 * wqset Waitq set to which the fileproc should now belong
1373 * (usually this is the thread's private waitq set)
1376 * proc_fdlock is held
1378 static uint64_t sellinkfp(struct fileproc
*fp
, void **wq_data
, struct waitq_set
*wqset
)
1380 struct waitq
*f_wq
= NULL
;
1382 if ((fp
->f_flags
& FP_INSELECT
) != FP_INSELECT
) {
1384 panic("non-null data:%p on fp:%p not in select?!"
1385 "(wqset:%p)", wq_data
, fp
, wqset
);
1389 if ((fp
->f_flags
& FP_SELCONFLICT
) == FP_SELCONFLICT
) {
1391 * The conflict queue requires disabling interrupts, so we
1392 * need to explicitly reserve a link object to avoid a
1393 * panic/assert in the waitq code. Hopefully this extra step
1394 * can be avoided if we can split the waitq structure into
1395 * blocking and linkage sub-structures.
1397 uint64_t reserved_link
= waitq_link_reserve(&select_conflict_queue
);
1398 waitq_link(&select_conflict_queue
, wqset
, WAITQ_SHOULD_LOCK
, &reserved_link
);
1399 waitq_link_release(reserved_link
);
1403 * The wq_data parameter has potentially been set by selrecord called
1404 * from a subsystems fo_select() function. If the subsystem does not
1405 * call selrecord, then wq_data will be NULL
1407 * Use memcpy to get the value into a proper pointer because
1408 * wq_data most likely points to a stack variable that could be
1409 * unaligned on 32-bit systems.
1412 memcpy(&f_wq
, wq_data
, sizeof(f_wq
));
1413 if (!waitq_is_valid(f_wq
))
1417 /* record the first thread's wqset in the fileproc structure */
1419 fp
->f_wset
= (void *)wqset
;
1421 /* handles NULL f_wq */
1422 return waitq_get_prepost_id(f_wq
);
1429 * Parameters: p Process performing the select
1430 * sel The per-thread select context structure
1431 * nfd The number of file descriptors to scan
1432 * retval The per thread system call return area
1433 * sel_pass Which pass this is; allowed values are
1434 * SEL_FIRSTPASS and SEL_SECONDPASS
1435 * wqset The per thread wait queue set
1437 * Returns: 0 Success
1438 * EIO Invalid p->p_fd field XXX Obsolete?
1439 * EBADF One of the files in the bit vector is
1443 selscan(struct proc
*p
, struct _select
*sel
, struct _select_data
* seldata
,
1444 int nfd
, int32_t *retval
, int sel_pass
, struct waitq_set
*wqset
)
1446 struct filedesc
*fdp
= p
->p_fd
;
1449 struct fileproc
*fp
;
1450 int n
= 0; /* count of bits */
1451 int nc
= 0; /* bit vector offset (nc'th bit) */
1452 static int flag
[3] = { FREAD
, FWRITE
, 0 };
1453 u_int32_t
*iptr
, *optr
;
1455 u_int32_t
*ibits
, *obits
;
1456 uint64_t reserved_link
, *rl_ptr
= NULL
;
1458 struct vfs_context context
= *vfs_context_current();
1461 * Problems when reboot; due to MacOSX signal probs
1462 * in Beaker1C ; verify that the p->p_fd is valid
1471 nw
= howmany(nfd
, NFDBITS
);
1473 count
= seldata
->count
;
1482 for (msk
= 0; msk
< 3; msk
++) {
1483 iptr
= (u_int32_t
*)&ibits
[msk
* nw
];
1484 optr
= (u_int32_t
*)&obits
[msk
* nw
];
1486 for (i
= 0; i
< nfd
; i
+= NFDBITS
) {
1487 bits
= iptr
[i
/NFDBITS
];
1489 while ((j
= ffs(bits
)) && (fd
= i
+ --j
) < nfd
) {
1492 if (fd
< fdp
->fd_nfiles
)
1493 fp
= fdp
->fd_ofiles
[fd
];
1497 if (fp
== NULL
|| (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1499 * If we abort because of a bad
1500 * fd, let the caller unwind...
1505 if (sel_pass
== SEL_SECONDPASS
) {
1508 selunlinkfp(fp
, seldata
->wqp
[nc
], wqset
);
1510 reserved_link
= waitq_link_reserve((struct waitq
*)wqset
);
1511 rl_ptr
= &reserved_link
;
1512 if (fp
->f_flags
& FP_INSELECT
)
1513 /* someone is already in select on this fp */
1514 fp
->f_flags
|= FP_SELCONFLICT
;
1516 fp
->f_flags
|= FP_INSELECT
;
1519 context
.vc_ucred
= fp
->f_cred
;
1522 * stash this value b/c fo_select may replace
1523 * reserved_link with a pointer to a waitq object
1525 uint64_t rsvd
= reserved_link
;
1527 /* The select; set the bit, if true */
1528 if (fp
->f_ops
&& fp
->f_type
1529 && fo_select(fp
, flag
[msk
], rl_ptr
, &context
)) {
1530 optr
[fd
/NFDBITS
] |= (1 << (fd
% NFDBITS
));
1533 if (sel_pass
== SEL_FIRSTPASS
) {
1534 waitq_link_release(rsvd
);
1536 * If the fp's supporting selinfo structure was linked
1537 * to this thread's waitq set, then 'reserved_link'
1538 * will have been updated by selrecord to be a pointer
1539 * to the selinfo's waitq.
1541 if (reserved_link
== rsvd
)
1542 rl_ptr
= NULL
; /* fo_select never called selrecord() */
1544 * Hook up the thread's waitq set either to
1545 * the fileproc structure, or to the global
1546 * conflict queue: but only on the first
1549 seldata
->wqp
[nc
] = sellinkfp(fp
, (void **)rl_ptr
, wqset
);
1561 int poll_callback(struct kqueue
*, struct kevent_internal_s
*, void *);
1563 struct poll_continue_args
{
1564 user_addr_t pca_fds
;
1570 poll(struct proc
*p
, struct poll_args
*uap
, int32_t *retval
)
1572 __pthread_testcancel(1);
1573 return(poll_nocancel(p
, (struct poll_nocancel_args
*)uap
, retval
));
1578 poll_nocancel(struct proc
*p
, struct poll_nocancel_args
*uap
, int32_t *retval
)
1580 struct poll_continue_args
*cont
;
1584 int ncoll
, error
= 0;
1585 u_int nfds
= uap
->nfds
;
1591 * This is kinda bogus. We have fd limits, but that is not
1592 * really related to the size of the pollfd array. Make sure
1593 * we let the process use at least FD_SETSIZE entries and at
1594 * least enough for the current limits. We want to be reasonably
1595 * safe, but not overly restrictive.
1597 if (nfds
> OPEN_MAX
||
1598 (nfds
> p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
&& (proc_suser(p
) || nfds
> FD_SETSIZE
)))
1601 kq
= kqueue_alloc(p
);
1605 ni
= nfds
* sizeof(struct pollfd
) + sizeof(struct poll_continue_args
);
1606 MALLOC(cont
, struct poll_continue_args
*, ni
, M_TEMP
, M_WAITOK
);
1612 fds
= (struct pollfd
*)&cont
[1];
1613 error
= copyin(uap
->fds
, fds
, nfds
* sizeof(struct pollfd
));
1617 if (uap
->timeout
!= -1) {
1620 atv
.tv_sec
= uap
->timeout
/ 1000;
1621 atv
.tv_usec
= (uap
->timeout
% 1000) * 1000;
1622 if (itimerfix(&atv
)) {
1626 getmicrouptime(&rtv
);
1627 timevaladd(&atv
, &rtv
);
1633 /* JMM - all this P_SELECT stuff is bogus */
1635 OSBitOrAtomic(P_SELECT
, &p
->p_flag
);
1636 for (i
= 0; i
< nfds
; i
++) {
1637 short events
= fds
[i
].events
;
1640 /* per spec, ignore fd values below zero */
1641 if (fds
[i
].fd
< 0) {
1646 /* convert the poll event into a kqueue kevent */
1647 struct kevent_internal_s kev
= {
1649 .flags
= EV_ADD
| EV_ONESHOT
| EV_POLL
,
1650 .udata
= CAST_USER_ADDR_T(&fds
[i
]) };
1652 /* Handle input events */
1653 if (events
& ( POLLIN
| POLLRDNORM
| POLLPRI
| POLLRDBAND
| POLLHUP
)) {
1654 kev
.filter
= EVFILT_READ
;
1655 if (events
& ( POLLPRI
| POLLRDBAND
))
1656 kev
.flags
|= EV_OOBAND
;
1657 kerror
= kevent_register(kq
, &kev
, p
);
1660 /* Handle output events */
1662 events
& ( POLLOUT
| POLLWRNORM
| POLLWRBAND
)) {
1663 kev
.filter
= EVFILT_WRITE
;
1664 kerror
= kevent_register(kq
, &kev
, p
);
1667 /* Handle BSD extension vnode events */
1669 events
& ( POLLEXTEND
| POLLATTRIB
| POLLNLINK
| POLLWRITE
)) {
1670 kev
.filter
= EVFILT_VNODE
;
1672 if (events
& POLLEXTEND
)
1673 kev
.fflags
|= NOTE_EXTEND
;
1674 if (events
& POLLATTRIB
)
1675 kev
.fflags
|= NOTE_ATTRIB
;
1676 if (events
& POLLNLINK
)
1677 kev
.fflags
|= NOTE_LINK
;
1678 if (events
& POLLWRITE
)
1679 kev
.fflags
|= NOTE_WRITE
;
1680 kerror
= kevent_register(kq
, &kev
, p
);
1684 fds
[i
].revents
= POLLNVAL
;
1690 /* Did we have any trouble registering? */
1694 /* scan for, and possibly wait for, the kevents to trigger */
1695 cont
->pca_fds
= uap
->fds
;
1696 cont
->pca_nfds
= nfds
;
1697 cont
->pca_rfds
= rfds
;
1698 error
= kqueue_scan(kq
, poll_callback
, NULL
, cont
, &atv
, p
);
1699 rfds
= cont
->pca_rfds
;
1702 OSBitAndAtomic(~((uint32_t)P_SELECT
), &p
->p_flag
);
1703 /* poll is not restarted after signals... */
1704 if (error
== ERESTART
)
1706 if (error
== EWOULDBLOCK
)
1709 error
= copyout(fds
, uap
->fds
, nfds
* sizeof(struct pollfd
));
1722 poll_callback(__unused
struct kqueue
*kq
, struct kevent_internal_s
*kevp
, void *data
)
1724 struct poll_continue_args
*cont
= (struct poll_continue_args
*)data
;
1725 struct pollfd
*fds
= CAST_DOWN(struct pollfd
*, kevp
->udata
);
1726 short prev_revents
= fds
->revents
;
1729 /* convert the results back into revents */
1730 if (kevp
->flags
& EV_EOF
)
1731 fds
->revents
|= POLLHUP
;
1732 if (kevp
->flags
& EV_ERROR
)
1733 fds
->revents
|= POLLERR
;
1735 switch (kevp
->filter
) {
1737 if (fds
->revents
& POLLHUP
)
1738 mask
= (POLLIN
| POLLRDNORM
| POLLPRI
| POLLRDBAND
);
1740 mask
= (POLLIN
| POLLRDNORM
);
1741 if (kevp
->flags
& EV_OOBAND
)
1742 mask
|= (POLLPRI
| POLLRDBAND
);
1744 fds
->revents
|= (fds
->events
& mask
);
1748 if (!(fds
->revents
& POLLHUP
))
1749 fds
->revents
|= (fds
->events
& ( POLLOUT
| POLLWRNORM
| POLLWRBAND
));
1753 if (kevp
->fflags
& NOTE_EXTEND
)
1754 fds
->revents
|= (fds
->events
& POLLEXTEND
);
1755 if (kevp
->fflags
& NOTE_ATTRIB
)
1756 fds
->revents
|= (fds
->events
& POLLATTRIB
);
1757 if (kevp
->fflags
& NOTE_LINK
)
1758 fds
->revents
|= (fds
->events
& POLLNLINK
);
1759 if (kevp
->fflags
& NOTE_WRITE
)
1760 fds
->revents
|= (fds
->events
& POLLWRITE
);
1764 if (fds
->revents
!= 0 && prev_revents
== 0)
1771 seltrue(__unused dev_t dev
, __unused
int flag
, __unused
struct proc
*p
)
1780 * Count the number of bits set in the input bit vector, and establish an
1781 * outstanding fp->f_iocount for each of the descriptors which will be in
1782 * use in the select operation.
1784 * Parameters: p The process doing the select
1785 * ibits The input bit vector
1786 * nfd The number of fd's in the vector
1787 * countp Pointer to where to store the bit count
1789 * Returns: 0 Success
1790 * EIO Bad per process open file table
1791 * EBADF One of the bits in the input bit vector
1792 * references an invalid fd
1794 * Implicit: *countp (modified) Count of fd's
1796 * Notes: This function is the first pass under the proc_fdlock() that
1797 * permits us to recognize invalid descriptors in the bit vector;
1798 * the may, however, not remain valid through the drop and
1799 * later reacquisition of the proc_fdlock().
1802 selcount(struct proc
*p
, u_int32_t
*ibits
, int nfd
, int *countp
)
1804 struct filedesc
*fdp
= p
->p_fd
;
1807 struct fileproc
*fp
;
1813 int need_wakeup
= 0;
1816 * Problems when reboot; due to MacOSX signal probs
1817 * in Beaker1C ; verify that the p->p_fd is valid
1823 nw
= howmany(nfd
, NFDBITS
);
1826 for (msk
= 0; msk
< 3; msk
++) {
1827 iptr
= (u_int32_t
*)&ibits
[msk
* nw
];
1828 for (i
= 0; i
< nfd
; i
+= NFDBITS
) {
1829 bits
= iptr
[i
/NFDBITS
];
1830 while ((j
= ffs(bits
)) && (fd
= i
+ --j
) < nfd
) {
1833 if (fd
< fdp
->fd_nfiles
)
1834 fp
= fdp
->fd_ofiles
[fd
];
1839 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1859 /* Ignore error return; it's already EBADF */
1860 (void)seldrop_locked(p
, ibits
, nfd
, n
, &need_wakeup
, 1);
1865 wakeup(&p
->p_fpdrainwait
);
1874 * Drop outstanding wait queue references set up during selscan(); drop the
1875 * outstanding per fileproc f_iocount() picked up during the selcount().
1877 * Parameters: p Process performing the select
1878 * ibits Input bit bector of fd's
1879 * nfd Number of fd's
1880 * lim Limit to number of vector entries to
1881 * consider, or -1 for "all"
1883 * need_wakeup Pointer to flag to set to do a wakeup
1884 * if f_iocont on any descriptor goes to 0
1886 * Returns: 0 Success
1887 * EBADF One or more fds in the bit vector
1888 * were invalid, but the rest
1889 * were successfully dropped
1891 * Notes: An fd make become bad while the proc_fdlock() is not held,
1892 * if a multithreaded application closes the fd out from under
1893 * the in progress select. In this case, we still have to
1894 * clean up after the set up on the remaining fds.
1897 seldrop_locked(struct proc
*p
, u_int32_t
*ibits
, int nfd
, int lim
, int *need_wakeup
, int fromselcount
)
1899 struct filedesc
*fdp
= p
->p_fd
;
1900 int msk
, i
, j
, nc
, fd
;
1902 struct fileproc
*fp
;
1907 uthread_t uth
= get_bsdthread_info(current_thread());
1908 struct _select_data
*seldata
;
1913 * Problems when reboot; due to MacOSX signal probs
1914 * in Beaker1C ; verify that the p->p_fd is valid
1920 nw
= howmany(nfd
, NFDBITS
);
1921 seldata
= &uth
->uu_kevent
.ss_select_data
;
1924 for (msk
= 0; msk
< 3; msk
++) {
1925 iptr
= (u_int32_t
*)&ibits
[msk
* nw
];
1926 for (i
= 0; i
< nfd
; i
+= NFDBITS
) {
1927 bits
= iptr
[i
/NFDBITS
];
1928 while ((j
= ffs(bits
)) && (fd
= i
+ --j
) < nfd
) {
1930 fp
= fdp
->fd_ofiles
[fd
];
1932 * If we've already dropped as many as were
1933 * counted/scanned, then we are done.
1935 if ((fromselcount
!= 0) && (++dropcount
> lim
))
1939 * unlink even potentially NULL fileprocs.
1940 * If the FD was closed from under us, we
1941 * still need to cleanup the waitq links!
1944 seldata
->wqp
? seldata
->wqp
[nc
] : 0,
1950 /* skip (now) bad fds */
1956 if (fp
->f_iocount
< 0)
1957 panic("f_iocount overdecrement!");
1959 if (fp
->f_iocount
== 0) {
1961 * The last iocount is responsible for clearing
1962 * selconfict flag - even if we didn't set it -
1963 * and is also responsible for waking up anyone
1964 * waiting on iocounts to drain.
1966 if (fp
->f_flags
& FP_SELCONFLICT
)
1967 fp
->f_flags
&= ~FP_SELCONFLICT
;
1968 if (p
->p_fpdrainwait
) {
1969 p
->p_fpdrainwait
= 0;
1982 seldrop(struct proc
*p
, u_int32_t
*ibits
, int nfd
)
1985 int need_wakeup
= 0;
1988 error
= seldrop_locked(p
, ibits
, nfd
, nfd
, &need_wakeup
, 0);
1991 wakeup(&p
->p_fpdrainwait
);
1997 * Record a select request.
2000 selrecord(__unused
struct proc
*selector
, struct selinfo
*sip
, void *s_data
)
2002 thread_t cur_act
= current_thread();
2003 struct uthread
* ut
= get_bsdthread_info(cur_act
);
2004 /* on input, s_data points to the 64-bit ID of a reserved link object */
2005 uint64_t *reserved_link
= (uint64_t *)s_data
;
2007 /* need to look at collisions */
2009 /*do not record if this is second pass of select */
2013 if ((sip
->si_flags
& SI_INITED
) == 0) {
2014 waitq_init(&sip
->si_waitq
, SYNC_POLICY_FIFO
| SYNC_POLICY_DISABLE_IRQ
);
2015 sip
->si_flags
|= SI_INITED
;
2016 sip
->si_flags
&= ~SI_CLEAR
;
2019 if (sip
->si_flags
& SI_RECORDED
)
2020 sip
->si_flags
|= SI_COLL
;
2022 sip
->si_flags
&= ~SI_COLL
;
2024 sip
->si_flags
|= SI_RECORDED
;
2025 /* note: this checks for pre-existing linkage */
2026 waitq_link(&sip
->si_waitq
, ut
->uu_wqset
,
2027 WAITQ_SHOULD_LOCK
, reserved_link
);
2030 * Always consume the reserved link.
2031 * We can always call waitq_link_release() safely because if
2032 * waitq_link is successful, it consumes the link and resets the
2033 * value to 0, in which case our call to release becomes a no-op.
2034 * If waitq_link fails, then the following release call will actually
2035 * release the reserved link object.
2037 waitq_link_release(*reserved_link
);
2041 * Use the s_data pointer as an output parameter as well
2042 * This avoids changing the prototype for this function which is
2043 * used by many kexts. We need to surface the waitq object
2044 * associated with the selinfo we just added to the thread's select
2045 * set. New waitq sets do not have back-pointers to set members, so
2046 * the only way to clear out set linkage objects is to go from the
2047 * waitq to the set. We use a memcpy because s_data could be
2048 * pointing to an unaligned value on the stack
2049 * (especially on 32-bit systems)
2051 void *wqptr
= (void *)&sip
->si_waitq
;
2052 memcpy((void *)s_data
, (void *)&wqptr
, sizeof(void *));
2058 selwakeup(struct selinfo
*sip
)
2061 if ((sip
->si_flags
& SI_INITED
) == 0) {
2065 if (sip
->si_flags
& SI_COLL
) {
2067 sip
->si_flags
&= ~SI_COLL
;
2069 /* will not support */
2070 //wakeup((caddr_t)&selwait);
2074 if (sip
->si_flags
& SI_RECORDED
) {
2075 waitq_wakeup64_all(&sip
->si_waitq
, NO_EVENT64
,
2076 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
2077 sip
->si_flags
&= ~SI_RECORDED
;
2083 selthreadclear(struct selinfo
*sip
)
2087 if ((sip
->si_flags
& SI_INITED
) == 0) {
2090 if (sip
->si_flags
& SI_RECORDED
) {
2092 sip
->si_flags
&= ~(SI_RECORDED
| SI_COLL
);
2094 sip
->si_flags
|= SI_CLEAR
;
2095 sip
->si_flags
&= ~SI_INITED
;
2097 wq
= &sip
->si_waitq
;
2100 * Higher level logic may have a handle on this waitq's prepost ID,
2101 * but that's OK because the waitq_deinit will remove/invalidate the
2102 * prepost object (as well as mark the waitq invalid). This de-couples
2103 * us from any callers that may have a handle to this waitq via the
2112 #define DBG_POST 0x10
2113 #define DBG_WATCH 0x11
2114 #define DBG_WAIT 0x12
2115 #define DBG_MOD 0x13
2116 #define DBG_EWAKEUP 0x14
2117 #define DBG_ENQUEUE 0x15
2118 #define DBG_DEQUEUE 0x16
2120 #define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST)
2121 #define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH)
2122 #define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT)
2123 #define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD)
2124 #define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP)
2125 #define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE)
2126 #define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE)
2129 #define EVPROCDEQUE(p, evq) do { \
2131 if (evq->ee_flags & EV_QUEUED) { \
2132 TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \
2133 evq->ee_flags &= ~EV_QUEUED; \
2140 * called upon socket close. deque and free all events for
2141 * the socket... socket must be locked by caller.
2144 evsofree(struct socket
*sp
)
2146 struct eventqelt
*evq
, *next
;
2152 for (evq
= sp
->so_evlist
.tqh_first
; evq
!= NULL
; evq
= next
) {
2153 next
= evq
->ee_slist
.tqe_next
;
2156 if (evq
->ee_flags
& EV_QUEUED
) {
2157 EVPROCDEQUE(p
, evq
);
2159 TAILQ_REMOVE(&sp
->so_evlist
, evq
, ee_slist
); // remove from socket q
2166 * called upon pipe close. deque and free all events for
2167 * the pipe... pipe must be locked by caller
2170 evpipefree(struct pipe
*cpipe
)
2172 struct eventqelt
*evq
, *next
;
2175 for (evq
= cpipe
->pipe_evlist
.tqh_first
; evq
!= NULL
; evq
= next
) {
2176 next
= evq
->ee_slist
.tqe_next
;
2179 EVPROCDEQUE(p
, evq
);
2181 TAILQ_REMOVE(&cpipe
->pipe_evlist
, evq
, ee_slist
); // remove from pipe q
2188 * enqueue this event if it's not already queued. wakeup
2189 * the proc if we do queue this event to it...
2190 * entered with proc lock held... we drop it before
2191 * doing the wakeup and return in that state
2194 evprocenque(struct eventqelt
*evq
)
2201 KERNEL_DEBUG(DBG_MISC_ENQUEUE
|DBG_FUNC_START
, (uint32_t)evq
, evq
->ee_flags
, evq
->ee_eventmask
,0,0);
2205 if (evq
->ee_flags
& EV_QUEUED
) {
2208 KERNEL_DEBUG(DBG_MISC_ENQUEUE
|DBG_FUNC_END
, 0,0,0,0,0);
2211 evq
->ee_flags
|= EV_QUEUED
;
2213 TAILQ_INSERT_TAIL(&p
->p_evlist
, evq
, ee_plist
);
2217 wakeup(&p
->p_evlist
);
2219 KERNEL_DEBUG(DBG_MISC_ENQUEUE
|DBG_FUNC_END
, 0,0,0,0,0);
2224 * pipe lock must be taken by the caller
2227 postpipeevent(struct pipe
*pipep
, int event
)
2230 struct eventqelt
*evq
;
2234 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_START
, event
,0,0,1,0);
2236 for (evq
= pipep
->pipe_evlist
.tqh_first
;
2237 evq
!= NULL
; evq
= evq
->ee_slist
.tqe_next
) {
2239 if (evq
->ee_eventmask
== 0)
2243 switch (event
& (EV_RWBYTES
| EV_RCLOSED
| EV_WCLOSED
)) {
2246 if ((evq
->ee_eventmask
& EV_RE
) && pipep
->pipe_buffer
.cnt
) {
2248 evq
->ee_req
.er_rcnt
= pipep
->pipe_buffer
.cnt
;
2250 if ((evq
->ee_eventmask
& EV_WR
) &&
2251 (MAX(pipep
->pipe_buffer
.size
,PIPE_SIZE
) - pipep
->pipe_buffer
.cnt
) >= PIPE_BUF
) {
2253 if (pipep
->pipe_state
& PIPE_EOF
) {
2254 mask
|= EV_WR
|EV_RESET
;
2258 evq
->ee_req
.er_wcnt
= MAX(pipep
->pipe_buffer
.size
, PIPE_SIZE
) - pipep
->pipe_buffer
.cnt
;
2264 if ((evq
->ee_eventmask
& EV_RE
)) {
2265 mask
|= EV_RE
|EV_RCLOSED
;
2267 if ((evq
->ee_eventmask
& EV_WR
)) {
2268 mask
|= EV_WR
|EV_WCLOSED
;
2277 * disarm... postevents are nops until this event is 'read' via
2278 * waitevent and then re-armed via modwatch
2280 evq
->ee_eventmask
= 0;
2283 * since events are disarmed until after the waitevent
2284 * the ee_req.er_xxxx fields can't change once we've
2285 * inserted this event into the proc queue...
2286 * therefore, the waitevent will see a 'consistent'
2287 * snapshot of the event, even though it won't hold
2288 * the pipe lock, and we're updating the event outside
2289 * of the proc lock, which it will hold
2291 evq
->ee_req
.er_eventbits
|= mask
;
2293 KERNEL_DEBUG(DBG_MISC_POST
, (uint32_t)evq
, evq
->ee_req
.er_eventbits
, mask
, 1,0);
2298 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_END
, 0,0,0,1,0);
2303 * given either a sockbuf or a socket run down the
2304 * event list and queue ready events found...
2305 * the socket must be locked by the caller
2308 postevent(struct socket
*sp
, struct sockbuf
*sb
, int event
)
2311 struct eventqelt
*evq
;
2319 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_START
, (int)sp
, event
, 0, 0, 0);
2321 for (evq
= sp
->so_evlist
.tqh_first
;
2322 evq
!= NULL
; evq
= evq
->ee_slist
.tqe_next
) {
2324 if (evq
->ee_eventmask
== 0)
2328 /* ready for reading:
2329 - byte cnt >= receive low water mark
2330 - read-half of conn closed
2331 - conn pending for listening sock
2332 - socket error pending
2335 - byte cnt avail >= send low water mark
2336 - write half of conn closed
2337 - socket error pending
2338 - non-blocking conn completed successfully
2342 - sock at out of band mark
2345 switch (event
& EV_DMASK
) {
2348 if ((evq
->ee_eventmask
& EV_EX
)) {
2349 if (sp
->so_oobmark
|| ((sp
->so_state
& SS_RCVATMARK
)))
2350 mask
|= EV_EX
|EV_OOB
;
2354 case EV_RWBYTES
|EV_OOB
:
2355 if ((evq
->ee_eventmask
& EV_EX
)) {
2356 if (sp
->so_oobmark
|| ((sp
->so_state
& SS_RCVATMARK
)))
2357 mask
|= EV_EX
|EV_OOB
;
2360 * fall into the next case
2363 if ((evq
->ee_eventmask
& EV_RE
) && soreadable(sp
)) {
2364 /* for AFP/OT purposes; may go away in future */
2365 if ((SOCK_DOM(sp
) == PF_INET
||
2366 SOCK_DOM(sp
) == PF_INET6
) &&
2367 SOCK_PROTO(sp
) == IPPROTO_TCP
&&
2368 (sp
->so_error
== ECONNREFUSED
||
2369 sp
->so_error
== ECONNRESET
)) {
2370 if (sp
->so_pcb
== NULL
||
2371 sotoinpcb(sp
)->inp_state
==
2373 (tp
= sototcpcb(sp
)) == NULL
||
2374 tp
->t_state
== TCPS_CLOSED
) {
2375 mask
|= EV_RE
|EV_RESET
;
2380 evq
->ee_req
.er_rcnt
= sp
->so_rcv
.sb_cc
;
2382 if (sp
->so_state
& SS_CANTRCVMORE
) {
2387 if ((evq
->ee_eventmask
& EV_WR
) && sowriteable(sp
)) {
2388 /* for AFP/OT purposes; may go away in future */
2389 if ((SOCK_DOM(sp
) == PF_INET
||
2390 SOCK_DOM(sp
) == PF_INET6
) &&
2391 SOCK_PROTO(sp
) == IPPROTO_TCP
&&
2392 (sp
->so_error
== ECONNREFUSED
||
2393 sp
->so_error
== ECONNRESET
)) {
2394 if (sp
->so_pcb
== NULL
||
2395 sotoinpcb(sp
)->inp_state
==
2397 (tp
= sototcpcb(sp
)) == NULL
||
2398 tp
->t_state
== TCPS_CLOSED
) {
2399 mask
|= EV_WR
|EV_RESET
;
2404 evq
->ee_req
.er_wcnt
= sbspace(&sp
->so_snd
);
2409 if ((evq
->ee_eventmask
& EV_RE
)) {
2410 mask
|= EV_RE
|EV_RCONN
;
2411 evq
->ee_req
.er_rcnt
= sp
->so_qlen
+ 1; // incl this one
2416 if ((evq
->ee_eventmask
& EV_WR
)) {
2417 mask
|= EV_WR
|EV_WCONN
;
2422 if ((evq
->ee_eventmask
& EV_RE
)) {
2423 mask
|= EV_RE
|EV_RCLOSED
;
2428 if ((evq
->ee_eventmask
& EV_WR
)) {
2429 mask
|= EV_WR
|EV_WCLOSED
;
2434 if (evq
->ee_eventmask
& EV_RE
) {
2435 mask
|= EV_RE
|EV_FIN
;
2441 if (evq
->ee_eventmask
& EV_RE
) {
2442 mask
|= EV_RE
| event
;
2444 if (evq
->ee_eventmask
& EV_WR
) {
2445 mask
|= EV_WR
| event
;
2450 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_END
, (int)sp
, -1, 0, 0, 0);
2454 KERNEL_DEBUG(DBG_MISC_POST
, (int)evq
, evq
->ee_eventmask
, evq
->ee_req
.er_eventbits
, mask
, 0);
2458 * disarm... postevents are nops until this event is 'read' via
2459 * waitevent and then re-armed via modwatch
2461 evq
->ee_eventmask
= 0;
2464 * since events are disarmed until after the waitevent
2465 * the ee_req.er_xxxx fields can't change once we've
2466 * inserted this event into the proc queue...
2467 * since waitevent can't see this event until we
2468 * enqueue it, waitevent will see a 'consistent'
2469 * snapshot of the event, even though it won't hold
2470 * the socket lock, and we're updating the event outside
2471 * of the proc lock, which it will hold
2473 evq
->ee_req
.er_eventbits
|= mask
;
2478 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_END
, (int)sp
, 0, 0, 0, 0);
2480 #endif /* SOCKETS */
2484 * watchevent system call. user passes us an event to watch
2485 * for. we malloc an event object, initialize it, and queue
2486 * it to the open socket. when the event occurs, postevent()
2487 * will enque it back to our proc where we can retrieve it
2490 * should this prevent duplicate events on same socket?
2493 * ENOMEM No memory for operation
2497 watchevent(proc_t p
, struct watchevent_args
*uap
, __unused
int *retval
)
2499 struct eventqelt
*evq
= (struct eventqelt
*)0;
2500 struct eventqelt
*np
= NULL
;
2501 struct eventreq64
*erp
;
2502 struct fileproc
*fp
= NULL
;
2505 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_START
, 0,0,0,0,0);
2507 // get a qelt and fill with users req
2508 MALLOC(evq
, struct eventqelt
*, sizeof(struct eventqelt
), M_TEMP
, M_WAITOK
);
2514 // get users request pkt
2516 if (IS_64BIT_PROCESS(p
)) {
2517 error
= copyin(uap
->u_req
, (caddr_t
)erp
, sizeof(struct eventreq64
));
2519 struct eventreq32 er32
;
2521 error
= copyin(uap
->u_req
, (caddr_t
)&er32
, sizeof(struct eventreq32
));
2524 * the user only passes in the
2525 * er_type, er_handle and er_data...
2526 * the other fields are initialized
2527 * below, so don't bother to copy
2529 erp
->er_type
= er32
.er_type
;
2530 erp
->er_handle
= er32
.er_handle
;
2531 erp
->er_data
= (user_addr_t
)er32
.er_data
;
2536 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, error
,0,0,0,0);
2540 KERNEL_DEBUG(DBG_MISC_WATCH
, erp
->er_handle
,uap
->u_eventmask
,(uint32_t)evq
,0,0);
2542 // validate, freeing qelt if errors
2546 if (erp
->er_type
!= EV_FD
) {
2548 } else if ((error
= fp_lookup(p
, erp
->er_handle
, &fp
, 1)) != 0) {
2551 } else if (fp
->f_type
== DTYPE_SOCKET
) {
2552 socket_lock((struct socket
*)fp
->f_data
, 1);
2553 np
= ((struct socket
*)fp
->f_data
)->so_evlist
.tqh_first
;
2554 #endif /* SOCKETS */
2555 } else if (fp
->f_type
== DTYPE_PIPE
) {
2556 PIPE_LOCK((struct pipe
*)fp
->f_data
);
2557 np
= ((struct pipe
*)fp
->f_data
)->pipe_evlist
.tqh_first
;
2559 fp_drop(p
, erp
->er_handle
, fp
, 1);
2567 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, error
,0,0,0,0);
2572 * only allow one watch per file per proc
2574 for ( ; np
!= NULL
; np
= np
->ee_slist
.tqe_next
) {
2575 if (np
->ee_proc
== p
) {
2577 if (fp
->f_type
== DTYPE_SOCKET
)
2578 socket_unlock((struct socket
*)fp
->f_data
, 1);
2580 #endif /* SOCKETS */
2581 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2582 fp_drop(p
, erp
->er_handle
, fp
, 0);
2585 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, EINVAL
,0,0,0,0);
2589 erp
->er_ecnt
= erp
->er_rcnt
= erp
->er_wcnt
= erp
->er_eventbits
= 0;
2591 evq
->ee_eventmask
= uap
->u_eventmask
& EV_MASK
;
2595 if (fp
->f_type
== DTYPE_SOCKET
) {
2596 TAILQ_INSERT_TAIL(&((struct socket
*)fp
->f_data
)->so_evlist
, evq
, ee_slist
);
2597 postevent((struct socket
*)fp
->f_data
, 0, EV_RWBYTES
); // catch existing events
2599 socket_unlock((struct socket
*)fp
->f_data
, 1);
2601 #endif /* SOCKETS */
2603 TAILQ_INSERT_TAIL(&((struct pipe
*)fp
->f_data
)->pipe_evlist
, evq
, ee_slist
);
2604 postpipeevent((struct pipe
*)fp
->f_data
, EV_RWBYTES
);
2606 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2608 fp_drop_event(p
, erp
->er_handle
, fp
);
2610 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, 0,0,0,0,0);
2617 * waitevent system call.
2618 * grabs the next waiting event for this proc and returns
2619 * it. if no events, user can request to sleep with timeout
2620 * or without or poll mode
2621 * ((tv != NULL && interval == 0) || tv == -1)
2624 waitevent(proc_t p
, struct waitevent_args
*uap
, int *retval
)
2627 struct eventqelt
*evq
;
2628 struct eventreq64
*erp
;
2629 uint64_t abstime
, interval
;
2630 boolean_t fast_poll
= FALSE
;
2632 struct eventreq64 er64
;
2633 struct eventreq32 er32
;
2641 * check for fast poll method
2643 if (IS_64BIT_PROCESS(p
)) {
2644 if (uap
->tv
== (user_addr_t
)-1)
2646 } else if (uap
->tv
== (user_addr_t
)((uint32_t)-1))
2649 if (fast_poll
== TRUE
) {
2650 if (p
->p_evlist
.tqh_first
== NULL
) {
2651 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_NONE
, -1,0,0,0,0);
2661 if (IS_64BIT_PROCESS(p
)) {
2662 struct user64_timeval atv64
;
2663 error
= copyin(uap
->tv
, (caddr_t
)&atv64
, sizeof(atv64
));
2664 /* Loses resolution - assume timeout < 68 years */
2665 atv
.tv_sec
= atv64
.tv_sec
;
2666 atv
.tv_usec
= atv64
.tv_usec
;
2668 struct user32_timeval atv32
;
2669 error
= copyin(uap
->tv
, (caddr_t
)&atv32
, sizeof(atv32
));
2670 atv
.tv_sec
= atv32
.tv_sec
;
2671 atv
.tv_usec
= atv32
.tv_usec
;
2676 if (itimerfix(&atv
)) {
2680 interval
= tvtoabstime(&atv
);
2682 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_START
, 0,0,0,0,0);
2686 if ((evq
= p
->p_evlist
.tqh_first
) != NULL
) {
2688 * found one... make a local copy while it's still on the queue
2689 * to prevent it from changing while in the midst of copying
2690 * don't want to hold the proc lock across a copyout because
2691 * it might block on a page fault at the target in user space
2695 if (IS_64BIT_PROCESS(p
))
2696 bcopy((caddr_t
)erp
, (caddr_t
)&uer
.er64
, sizeof (struct eventreq64
));
2698 uer
.er32
.er_type
= erp
->er_type
;
2699 uer
.er32
.er_handle
= erp
->er_handle
;
2700 uer
.er32
.er_data
= (uint32_t)erp
->er_data
;
2701 uer
.er32
.er_ecnt
= erp
->er_ecnt
;
2702 uer
.er32
.er_rcnt
= erp
->er_rcnt
;
2703 uer
.er32
.er_wcnt
= erp
->er_wcnt
;
2704 uer
.er32
.er_eventbits
= erp
->er_eventbits
;
2706 TAILQ_REMOVE(&p
->p_evlist
, evq
, ee_plist
);
2708 evq
->ee_flags
&= ~EV_QUEUED
;
2712 if (IS_64BIT_PROCESS(p
))
2713 error
= copyout((caddr_t
)&uer
.er64
, uap
->u_req
, sizeof(struct eventreq64
));
2715 error
= copyout((caddr_t
)&uer
.er32
, uap
->u_req
, sizeof(struct eventreq32
));
2717 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_END
, error
,
2718 evq
->ee_req
.er_handle
,evq
->ee_req
.er_eventbits
,(uint32_t)evq
,0);
2722 if (uap
->tv
&& interval
== 0) {
2724 *retval
= 1; // poll failed
2726 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_END
, error
,0,0,0,0);
2730 clock_absolutetime_interval_to_deadline(interval
, &abstime
);
2734 KERNEL_DEBUG(DBG_MISC_WAIT
, 1,(uint32_t)&p
->p_evlist
,0,0,0);
2736 error
= msleep1(&p
->p_evlist
, &p
->p_mlock
, (PSOCK
| PCATCH
), "waitevent", abstime
);
2738 KERNEL_DEBUG(DBG_MISC_WAIT
, 2,(uint32_t)&p
->p_evlist
,0,0,0);
2742 if (error
== ERESTART
)
2744 if (error
== EWOULDBLOCK
) {
2751 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_END
, 0,0,0,0,0);
2757 * modwatch system call. user passes in event to modify.
2758 * if we find it we reset the event bits and que/deque event
2762 modwatch(proc_t p
, struct modwatch_args
*uap
, __unused
int *retval
)
2764 struct eventreq64 er
;
2765 struct eventreq64
*erp
= &er
;
2766 struct eventqelt
*evq
= NULL
; /* protected by error return */
2768 struct fileproc
*fp
;
2771 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_START
, 0,0,0,0,0);
2774 * get user's request pkt
2775 * just need the er_type and er_handle which sit above the
2776 * problematic er_data (32/64 issue)... so only copy in
2779 if ((error
= copyin(uap
->u_req
, (caddr_t
)erp
, sizeof(er
.er_type
) + sizeof(er
.er_handle
)))) {
2780 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, error
,0,0,0,0);
2785 if (erp
->er_type
!= EV_FD
) {
2787 } else if ((error
= fp_lookup(p
, erp
->er_handle
, &fp
, 1)) != 0) {
2790 } else if (fp
->f_type
== DTYPE_SOCKET
) {
2791 socket_lock((struct socket
*)fp
->f_data
, 1);
2792 evq
= ((struct socket
*)fp
->f_data
)->so_evlist
.tqh_first
;
2793 #endif /* SOCKETS */
2794 } else if (fp
->f_type
== DTYPE_PIPE
) {
2795 PIPE_LOCK((struct pipe
*)fp
->f_data
);
2796 evq
= ((struct pipe
*)fp
->f_data
)->pipe_evlist
.tqh_first
;
2798 fp_drop(p
, erp
->er_handle
, fp
, 1);
2804 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, error
,0,0,0,0);
2808 if ((uap
->u_eventmask
== EV_RM
) && (fp
->f_flags
& FP_WAITEVENT
)) {
2809 fp
->f_flags
&= ~FP_WAITEVENT
;
2813 // locate event if possible
2814 for ( ; evq
!= NULL
; evq
= evq
->ee_slist
.tqe_next
) {
2815 if (evq
->ee_proc
== p
)
2820 if (fp
->f_type
== DTYPE_SOCKET
)
2821 socket_unlock((struct socket
*)fp
->f_data
, 1);
2823 #endif /* SOCKETS */
2824 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2825 fp_drop(p
, erp
->er_handle
, fp
, 0);
2826 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, EINVAL
,0,0,0,0);
2829 KERNEL_DEBUG(DBG_MISC_MOD
, erp
->er_handle
,uap
->u_eventmask
,(uint32_t)evq
,0,0);
2831 if (uap
->u_eventmask
== EV_RM
) {
2832 EVPROCDEQUE(p
, evq
);
2835 if (fp
->f_type
== DTYPE_SOCKET
) {
2836 TAILQ_REMOVE(&((struct socket
*)fp
->f_data
)->so_evlist
, evq
, ee_slist
);
2837 socket_unlock((struct socket
*)fp
->f_data
, 1);
2839 #endif /* SOCKETS */
2841 TAILQ_REMOVE(&((struct pipe
*)fp
->f_data
)->pipe_evlist
, evq
, ee_slist
);
2842 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2844 fp_drop(p
, erp
->er_handle
, fp
, 0);
2846 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, 0,0,0,0,0);
2849 switch (uap
->u_eventmask
& EV_MASK
) {
2867 case EV_EX
|EV_RE
|EV_WR
:
2868 flag
= EV_OOB
|EV_RWBYTES
;
2873 if (fp
->f_type
== DTYPE_SOCKET
)
2874 socket_unlock((struct socket
*)fp
->f_data
, 1);
2876 #endif /* SOCKETS */
2877 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2878 fp_drop(p
, erp
->er_handle
, fp
, 0);
2879 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, EINVAL
,0,0,0,0);
2883 * since we're holding the socket/pipe lock, the event
2884 * cannot go from the unqueued state to the queued state
2885 * however, it can go from the queued state to the unqueued state
2886 * since that direction is protected by the proc_lock...
2887 * so do a quick check for EV_QUEUED w/o holding the proc lock
2888 * since by far the common case will be NOT EV_QUEUED, this saves
2889 * us taking the proc_lock the majority of the time
2891 if (evq
->ee_flags
& EV_QUEUED
) {
2893 * EVPROCDEQUE will recheck the state after it grabs the proc_lock
2895 EVPROCDEQUE(p
, evq
);
2898 * while the event is off the proc queue and
2899 * we're holding the socket/pipe lock
2900 * it's safe to update these fields...
2902 evq
->ee_req
.er_eventbits
= 0;
2903 evq
->ee_eventmask
= uap
->u_eventmask
& EV_MASK
;
2906 if (fp
->f_type
== DTYPE_SOCKET
) {
2907 postevent((struct socket
*)fp
->f_data
, 0, flag
);
2908 socket_unlock((struct socket
*)fp
->f_data
, 1);
2910 #endif /* SOCKETS */
2912 postpipeevent((struct pipe
*)fp
->f_data
, flag
);
2913 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2915 fp_drop(p
, erp
->er_handle
, fp
, 0);
2916 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, evq
->ee_req
.er_handle
,evq
->ee_eventmask
,(uint32_t)fp
->f_data
,flag
,0);
2920 /* this routine is called from the close of fd with proc_fdlock held */
2922 waitevent_close(struct proc
*p
, struct fileproc
*fp
)
2924 struct eventqelt
*evq
;
2927 fp
->f_flags
&= ~FP_WAITEVENT
;
2930 if (fp
->f_type
== DTYPE_SOCKET
) {
2931 socket_lock((struct socket
*)fp
->f_data
, 1);
2932 evq
= ((struct socket
*)fp
->f_data
)->so_evlist
.tqh_first
;
2934 #endif /* SOCKETS */
2935 if (fp
->f_type
== DTYPE_PIPE
) {
2936 PIPE_LOCK((struct pipe
*)fp
->f_data
);
2937 evq
= ((struct pipe
*)fp
->f_data
)->pipe_evlist
.tqh_first
;
2945 // locate event if possible
2946 for ( ; evq
!= NULL
; evq
= evq
->ee_slist
.tqe_next
) {
2947 if (evq
->ee_proc
== p
)
2952 if (fp
->f_type
== DTYPE_SOCKET
)
2953 socket_unlock((struct socket
*)fp
->f_data
, 1);
2955 #endif /* SOCKETS */
2956 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2962 EVPROCDEQUE(p
, evq
);
2965 if (fp
->f_type
== DTYPE_SOCKET
) {
2966 TAILQ_REMOVE(&((struct socket
*)fp
->f_data
)->so_evlist
, evq
, ee_slist
);
2967 socket_unlock((struct socket
*)fp
->f_data
, 1);
2969 #endif /* SOCKETS */
2971 TAILQ_REMOVE(&((struct pipe
*)fp
->f_data
)->pipe_evlist
, evq
, ee_slist
);
2972 PIPE_UNLOCK((struct pipe
*)fp
->f_data
);
2985 * Description: Get the host UUID from IOKit and return it to user space.
2987 * Parameters: uuid_buf Pointer to buffer to receive UUID
2988 * timeout Timespec for timout
2989 * spi SPI, skip sandbox check (temporary)
2991 * Returns: 0 Success
2992 * EWOULDBLOCK Timeout is too short
2993 * copyout:EFAULT Bad user buffer
2994 * mac_system_check_info:EPERM Client not allowed to perform this operation
2996 * Notes: A timeout seems redundant, since if it's tolerable to not
2997 * have a system UUID in hand, then why ask for one?
3000 gethostuuid(struct proc
*p
, struct gethostuuid_args
*uap
, __unused
int32_t *retval
)
3004 mach_timespec_t mach_ts
; /* for IOKit call */
3005 __darwin_uuid_t uuid_kern
; /* for IOKit call */
3010 /* Convert the 32/64 bit timespec into a mach_timespec_t */
3011 if ( proc_is64bit(p
) ) {
3012 struct user64_timespec ts
;
3013 error
= copyin(uap
->timeoutp
, &ts
, sizeof(ts
));
3016 mach_ts
.tv_sec
= ts
.tv_sec
;
3017 mach_ts
.tv_nsec
= ts
.tv_nsec
;
3019 struct user32_timespec ts
;
3020 error
= copyin(uap
->timeoutp
, &ts
, sizeof(ts
) );
3023 mach_ts
.tv_sec
= ts
.tv_sec
;
3024 mach_ts
.tv_nsec
= ts
.tv_nsec
;
3027 /* Call IOKit with the stack buffer to get the UUID */
3028 kret
= IOBSDGetPlatformUUID(uuid_kern
, mach_ts
);
3031 * If we get it, copy out the data to the user buffer; note that a
3032 * uuid_t is an array of characters, so this is size invariant for
3035 if (kret
== KERN_SUCCESS
) {
3036 error
= copyout(uuid_kern
, uap
->uuid_buf
, sizeof(uuid_kern
));
3038 error
= EWOULDBLOCK
;
3047 * Description: Omnibus system call for ledger operations
3050 ledger(struct proc
*p
, struct ledger_args
*args
, __unused
int32_t *retval
)
3055 int rval
, pid
, len
, error
;
3057 struct ledger_limit_args lla
;
3062 /* Finish copying in the necessary args before taking the proc lock */
3065 if (args
->cmd
== LEDGER_ENTRY_INFO
)
3066 error
= copyin(args
->arg3
, (char *)&len
, sizeof (len
));
3067 else if (args
->cmd
== LEDGER_TEMPLATE_INFO
)
3068 error
= copyin(args
->arg2
, (char *)&len
, sizeof (len
));
3070 else if (args
->cmd
== LEDGER_LIMIT
)
3071 error
= copyin(args
->arg2
, (char *)&lla
, sizeof (lla
));
3079 if (args
->cmd
!= LEDGER_TEMPLATE_INFO
) {
3081 proc
= proc_find(pid
);
3086 error
= mac_proc_check_ledger(p
, proc
, args
->cmd
);
3096 switch (args
->cmd
) {
3098 case LEDGER_LIMIT
: {
3099 if (!kauth_cred_issuser(kauth_cred_get()))
3101 rval
= ledger_limit(task
, &lla
);
3107 struct ledger_info info
;
3109 rval
= ledger_info(task
, &info
);
3112 rval
= copyout(&info
, args
->arg2
,
3117 case LEDGER_ENTRY_INFO
: {
3121 rval
= ledger_get_task_entry_info_multiple(task
, &buf
, &len
);
3123 if ((rval
== 0) && (len
> 0)) {
3124 sz
= len
* sizeof (struct ledger_entry_info
);
3125 rval
= copyout(buf
, args
->arg2
, sz
);
3129 rval
= copyout(&len
, args
->arg3
, sizeof (len
));
3133 case LEDGER_TEMPLATE_INFO
: {
3137 rval
= ledger_template_info(&buf
, &len
);
3138 if ((rval
== 0) && (len
> 0)) {
3139 sz
= len
* sizeof (struct ledger_template_info
);
3140 rval
= copyout(buf
, args
->arg1
, sz
);
3144 rval
= copyout(&len
, args
->arg2
, sizeof (len
));
3156 telemetry(__unused
struct proc
*p
, struct telemetry_args
*args
, __unused
int32_t *retval
)
3160 switch (args
->cmd
) {
3161 #if CONFIG_TELEMETRY
3162 case TELEMETRY_CMD_TIMER_EVENT
:
3163 error
= telemetry_timer_event(args
->deadline
, args
->interval
, args
->leeway
);
3165 #endif /* CONFIG_TELEMETRY */
3166 case TELEMETRY_CMD_VOUCHER_NAME
:
3167 if (thread_set_voucher_name((mach_port_name_t
)args
->deadline
))
3179 #if defined(DEVELOPMENT) || defined(DEBUG)
3180 #if CONFIG_WAITQ_DEBUG
3181 static uint64_t g_wqset_num
= 0;
3184 struct waitq_set
*wqset
;
3187 static queue_head_t g_wqset_list
;
3188 static struct waitq_set
*g_waitq_set
= NULL
;
3190 static inline struct waitq_set
*sysctl_get_wqset(int idx
)
3192 struct g_wqset
*gwqs
;
3195 queue_init(&g_wqset_list
);
3197 /* don't bother with locks: this is test-only code! */
3198 qe_foreach_element(gwqs
, &g_wqset_list
, link
) {
3199 if ((int)(wqset_id(gwqs
->wqset
) & 0xffffffff) == idx
)
3203 /* allocate a new one */
3205 gwqs
= (struct g_wqset
*)kalloc(sizeof(*gwqs
));
3206 assert(gwqs
!= NULL
);
3208 gwqs
->wqset
= waitq_set_alloc(SYNC_POLICY_FIFO
|SYNC_POLICY_PREPOST
|SYNC_POLICY_DISABLE_IRQ
);
3209 enqueue_tail(&g_wqset_list
, &gwqs
->link
);
3210 printf("[WQ]: created new waitq set 0x%llx\n", wqset_id(gwqs
->wqset
));
3215 #define MAX_GLOBAL_TEST_QUEUES 64
3216 static int g_wq_init
= 0;
3217 static struct waitq g_wq
[MAX_GLOBAL_TEST_QUEUES
];
3219 static inline struct waitq
*global_test_waitq(int idx
)
3226 for (int i
= 0; i
< MAX_GLOBAL_TEST_QUEUES
; i
++)
3227 waitq_init(&g_wq
[i
], SYNC_POLICY_FIFO
|SYNC_POLICY_DISABLE_IRQ
);
3230 return &g_wq
[idx
% MAX_GLOBAL_TEST_QUEUES
];
3233 static int sysctl_waitq_wakeup_one SYSCTL_HANDLER_ARGS
3235 #pragma unused(oidp, arg1, arg2)
3238 struct waitq
*waitq
;
3240 int64_t event64
= 0;
3242 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3247 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3250 index
= (int)((-event64
) & 0xffffffff);
3251 waitq
= wqset_waitq(sysctl_get_wqset(index
));
3254 index
= (int)event64
;
3255 waitq
= global_test_waitq(index
);
3260 printf("[WQ]: Waking one thread on waitq [%d] event:0x%llx\n",
3262 kr
= waitq_wakeup64_one(waitq
, (event64_t
)event64
, THREAD_AWAKENED
,
3263 WAITQ_ALL_PRIORITIES
);
3264 printf("[WQ]: \tkr=%d\n", kr
);
3266 return SYSCTL_OUT(req
, &kr
, sizeof(kr
));
3268 SYSCTL_PROC(_kern
, OID_AUTO
, waitq_wakeup_one
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3269 0, 0, sysctl_waitq_wakeup_one
, "Q", "wakeup one thread waiting on given event");
3272 static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS
3274 #pragma unused(oidp, arg1, arg2)
3277 struct waitq
*waitq
;
3279 int64_t event64
= 0;
3281 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3286 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3289 index
= (int)((-event64
) & 0xffffffff);
3290 waitq
= wqset_waitq(sysctl_get_wqset(index
));
3293 index
= (int)event64
;
3294 waitq
= global_test_waitq(index
);
3299 printf("[WQ]: Waking all threads on waitq [%d] event:0x%llx\n",
3301 kr
= waitq_wakeup64_all(waitq
, (event64_t
)event64
,
3302 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
3303 printf("[WQ]: \tkr=%d\n", kr
);
3305 return SYSCTL_OUT(req
, &kr
, sizeof(kr
));
3307 SYSCTL_PROC(_kern
, OID_AUTO
, waitq_wakeup_all
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3308 0, 0, sysctl_waitq_wakeup_all
, "Q", "wakeup all threads waiting on given event");
3311 static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS
3313 #pragma unused(oidp, arg1, arg2)
3316 struct waitq
*waitq
;
3318 int64_t event64
= 0;
3320 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3325 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3328 index
= (int)((-event64
) & 0xffffffff);
3329 waitq
= wqset_waitq(sysctl_get_wqset(index
));
3332 index
= (int)event64
;
3333 waitq
= global_test_waitq(index
);
3338 printf("[WQ]: Current thread waiting on waitq [%d] event:0x%llx\n",
3340 kr
= waitq_assert_wait64(waitq
, (event64_t
)event64
, THREAD_INTERRUPTIBLE
, 0);
3341 if (kr
== THREAD_WAITING
)
3342 thread_block(THREAD_CONTINUE_NULL
);
3343 printf("[WQ]: \tWoke Up: kr=%d\n", kr
);
3345 return SYSCTL_OUT(req
, &kr
, sizeof(kr
));
3347 SYSCTL_PROC(_kern
, OID_AUTO
, waitq_wait
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3348 0, 0, sysctl_waitq_wait
, "Q", "start waiting on given event");
3351 static int sysctl_wqset_select SYSCTL_HANDLER_ARGS
3353 #pragma unused(oidp, arg1, arg2)
3355 struct waitq_set
*wqset
;
3356 uint64_t event64
= 0;
3358 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3365 wqset
= sysctl_get_wqset((int)(event64
& 0xffffffff));
3366 g_waitq_set
= wqset
;
3368 event64
= wqset_id(wqset
);
3369 printf("[WQ]: selected wqset 0x%llx\n", event64
);
3373 event64
= wqset_id(g_waitq_set
);
3375 event64
= (uint64_t)(-1);
3377 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3379 SYSCTL_PROC(_kern
, OID_AUTO
, wqset_select
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3380 0, 0, sysctl_wqset_select
, "Q", "select/create a global waitq set");
3383 static int sysctl_waitq_link SYSCTL_HANDLER_ARGS
3385 #pragma unused(oidp, arg1, arg2)
3388 struct waitq
*waitq
;
3389 struct waitq_set
*wqset
;
3391 uint64_t reserved_link
= 0;
3392 int64_t event64
= 0;
3394 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3399 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3402 g_waitq_set
= sysctl_get_wqset(1);
3403 wqset
= g_waitq_set
;
3406 struct waitq_set
*tmp
;
3407 index
= (int)((-event64
) & 0xffffffff);
3408 tmp
= sysctl_get_wqset(index
);
3411 waitq
= wqset_waitq(tmp
);
3414 index
= (int)event64
;
3415 waitq
= global_test_waitq(index
);
3418 printf("[WQ]: linking waitq [%d] to global wqset (0x%llx)\n",
3419 index
, wqset_id(wqset
));
3420 reserved_link
= waitq_link_reserve(waitq
);
3421 kr
= waitq_link(waitq
, wqset
, WAITQ_SHOULD_LOCK
, &reserved_link
);
3422 waitq_link_release(reserved_link
);
3424 printf("[WQ]: \tkr=%d\n", kr
);
3427 return SYSCTL_OUT(req
, &kr
, sizeof(kr
));
3429 SYSCTL_PROC(_kern
, OID_AUTO
, waitq_link
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3430 0, 0, sysctl_waitq_link
, "Q", "link global waitq to test waitq set");
3433 static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS
3435 #pragma unused(oidp, arg1, arg2)
3438 struct waitq
*waitq
;
3439 struct waitq_set
*wqset
;
3441 uint64_t event64
= 0;
3443 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3448 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3451 g_waitq_set
= sysctl_get_wqset(1);
3452 wqset
= g_waitq_set
;
3454 index
= (int)event64
;
3455 waitq
= global_test_waitq(index
);
3457 printf("[WQ]: unlinking waitq [%d] from global wqset (0x%llx)\n",
3458 index
, wqset_id(wqset
));
3460 kr
= waitq_unlink(waitq
, wqset
);
3461 printf("[WQ]: \tkr=%d\n", kr
);
3463 return SYSCTL_OUT(req
, &kr
, sizeof(kr
));
3465 SYSCTL_PROC(_kern
, OID_AUTO
, waitq_unlink
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3466 0, 0, sysctl_waitq_unlink
, "Q", "unlink global waitq from test waitq set");
3469 static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS
3471 #pragma unused(oidp, arg1, arg2)
3472 struct waitq
*waitq
;
3473 uint64_t event64
= 0;
3476 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3481 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3483 index
= (int)event64
;
3484 waitq
= global_test_waitq(index
);
3486 printf("[WQ]: clearing prepost on waitq [%d]\n", index
);
3487 waitq_clear_prepost(waitq
);
3489 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3491 SYSCTL_PROC(_kern
, OID_AUTO
, waitq_clear_prepost
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3492 0, 0, sysctl_waitq_clear_prepost
, "Q", "clear prepost on given waitq");
3495 static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS
3497 #pragma unused(oidp, arg1, arg2)
3499 struct waitq_set
*wqset
;
3501 uint64_t event64
= 0;
3503 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3508 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3511 g_waitq_set
= sysctl_get_wqset(1);
3512 wqset
= g_waitq_set
;
3514 printf("[WQ]: unlinking all queues from global wqset (0x%llx)\n",
3517 kr
= waitq_set_unlink_all(wqset
);
3518 printf("[WQ]: \tkr=%d\n", kr
);
3520 return SYSCTL_OUT(req
, &kr
, sizeof(kr
));
3522 SYSCTL_PROC(_kern
, OID_AUTO
, wqset_unlink_all
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3523 0, 0, sysctl_wqset_unlink_all
, "Q", "unlink all queues from test waitq set");
3526 static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS
3528 #pragma unused(oidp, arg1, arg2)
3529 struct waitq_set
*wqset
= NULL
;
3530 uint64_t event64
= 0;
3533 error
= SYSCTL_IN(req
, &event64
, sizeof(event64
));
3540 index
= (int)((event64
) & 0xffffffff);
3541 wqset
= sysctl_get_wqset(index
);
3542 assert(wqset
!= NULL
);
3544 printf("[WQ]: clearing preposts on wqset 0x%llx\n", wqset_id(wqset
));
3545 waitq_set_clear_preposts(wqset
);
3549 event64
= wqset_id(wqset
);
3551 event64
= (uint64_t)(-1);
3553 return SYSCTL_OUT(req
, &event64
, sizeof(event64
));
3555 SYSCTL_PROC(_kern
, OID_AUTO
, wqset_clear_preposts
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3556 0, 0, sysctl_wqset_clear_preposts
, "Q", "clear preposts on given waitq set");
3558 #endif /* CONFIG_WAITQ_DEBUG */
3559 #endif /* defined(DEVELOPMENT) || defined(DEBUG) */