]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sys_generic.c
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/filedesc.h>
66 #include <sys/ioctl.h>
69 #include <sys/socketvar.h>
71 #include <sys/kernel.h>
73 #include <sys/malloc.h>
76 #include <sys/ktrace.h>
79 #include <sys/mount.h>
80 #include <sys/protosw.h>
83 #include <sys/kdebug.h>
84 #include <kern/assert.h>
85 #include <kern/thread_act.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/errno.h>
93 #include <net/route.h>
95 #include <netinet/in.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/ip.h>
98 #include <netinet/in_pcb.h>
99 #include <netinet/ip_var.h>
100 #include <netinet/ip6.h>
101 #include <netinet/tcp.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/tcp_seq.h>
104 #include <netinet/tcp_timer.h>
105 #include <netinet/tcp_var.h>
106 #include <netinet/tcpip.h>
107 #include <netinet/tcp_debug.h>
108 /* for wait queue based select */
109 #include <kern/wait_queue.h>
122 register struct read_args
*uap
;
128 aiov
.iov_base
= (caddr_t
)uap
->cbuf
;
129 aiov
.iov_len
= uap
->nbyte
;
130 auio
.uio_iov
= &aiov
;
132 auio
.uio_rw
= UIO_READ
;
133 return (rwuio(p
, uap
->fd
, &auio
, UIO_READ
, retval
));
141 readv(p
, uap
, retval
)
143 register struct readv_args
*uap
;
147 register struct iovec
*iov
;
149 struct iovec aiov
[UIO_SMALLIOV
];
151 if (uap
->iovcnt
> UIO_SMALLIOV
) {
152 if (uap
->iovcnt
> UIO_MAXIOV
)
154 if ((iov
= (struct iovec
*)
155 kalloc(sizeof(struct iovec
) * (uap
->iovcnt
))) == 0)
160 auio
.uio_iovcnt
= uap
->iovcnt
;
161 auio
.uio_rw
= UIO_READ
;
162 error
= copyin((caddr_t
)uap
->iovp
, (caddr_t
)iov
,
163 uap
->iovcnt
* sizeof (struct iovec
));
165 error
= rwuio(p
, uap
->fd
, &auio
, UIO_READ
, retval
);
166 if (uap
->iovcnt
> UIO_SMALLIOV
)
167 kfree(iov
, sizeof(struct iovec
)*uap
->iovcnt
);
179 write(p
, uap
, retval
)
181 register struct write_args
*uap
;
187 aiov
.iov_base
= uap
->cbuf
;
188 aiov
.iov_len
= uap
->nbyte
;
189 auio
.uio_iov
= &aiov
;
191 auio
.uio_rw
= UIO_WRITE
;
192 return (rwuio(p
, uap
->fd
, &auio
, UIO_WRITE
, retval
));
200 writev(p
, uap
, retval
)
202 register struct writev_args
*uap
;
206 register struct iovec
*iov
;
208 struct iovec aiov
[UIO_SMALLIOV
];
210 if (uap
->iovcnt
> UIO_SMALLIOV
) {
211 if (uap
->iovcnt
> UIO_MAXIOV
)
213 if ((iov
= (struct iovec
*)
214 kalloc(sizeof(struct iovec
) * (uap
->iovcnt
))) == 0)
219 auio
.uio_iovcnt
= uap
->iovcnt
;
220 auio
.uio_rw
= UIO_WRITE
;
221 error
= copyin((caddr_t
)uap
->iovp
, (caddr_t
)iov
,
222 uap
->iovcnt
* sizeof (struct iovec
));
224 error
= rwuio(p
, uap
->fd
, &auio
, UIO_WRITE
, retval
);
225 if (uap
->iovcnt
> UIO_SMALLIOV
)
226 kfree(iov
, sizeof(struct iovec
)*uap
->iovcnt
);
230 rwuio(p
, fdes
, uio
, rw
, retval
)
233 register struct uio
*uio
;
238 register struct iovec
*iov
;
239 int i
, count
, flag
, error
;
241 if (error
= fdgetf(p
, fdes
, &fp
))
244 if ((fp
->f_flag
&(rw
==UIO_READ
? FREAD
: FWRITE
)) == 0) {
248 uio
->uio_segflg
= UIO_USERSPACE
;
251 for (i
= 0; i
< uio
->uio_iovcnt
; i
++) {
252 if (iov
->iov_len
< 0) {
255 uio
->uio_resid
+= iov
->iov_len
;
256 if (uio
->uio_resid
< 0) {
261 count
= uio
->uio_resid
;
262 if (rw
== UIO_READ
) {
263 if (error
= (*fp
->f_ops
->fo_read
)(fp
, uio
, fp
->f_cred
))
264 if (uio
->uio_resid
!= count
&& (error
== ERESTART
||
265 error
== EINTR
|| error
== EWOULDBLOCK
))
268 if (error
= (*fp
->f_ops
->fo_write
)(fp
, uio
, fp
->f_cred
)) {
269 if (uio
->uio_resid
!= count
&& (error
== ERESTART
||
270 error
== EINTR
|| error
== EWOULDBLOCK
))
276 *retval
= count
- uio
->uio_resid
;
289 ioctl(p
, uap
, retval
)
291 register struct ioctl_args
*uap
;
300 #define STK_PARAMS 128
301 char stkbuf
[STK_PARAMS
];
303 if (error
= fdgetf(p
, uap
->fd
, &fp
))
306 if ((fp
->f_flag
& (FREAD
| FWRITE
)) == 0)
309 /*### LD 6/11/97 Hack Alert: this is to get AppleTalk to work
310 * while implementing an ATioctl system call
314 extern int appletalk_inited
;
316 if (appletalk_inited
&& ((uap
->com
& 0x0000FFFF) == 0xff99)) {
317 #ifdef APPLETALK_DEBUG
318 kprintf("ioctl: special AppleTalk \n");
320 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, uap
->com
, uap
->data
, p
);
328 switch (com
= uap
->com
) {
330 *fdflags(p
, uap
->fd
) &= ~UF_EXCLOSE
;
333 *fdflags(p
, uap
->fd
) |= UF_EXCLOSE
;
338 * Interpret high order word to find amount of data to be
339 * copied to/from the user's address space.
341 size
= IOCPARM_LEN(com
);
342 if (size
> IOCPARM_MAX
)
345 if (size
> sizeof (stkbuf
)) {
346 if ((memp
= (caddr_t
)kalloc(size
)) == 0)
353 error
= copyin(uap
->data
, data
, (u_int
)size
);
360 *(caddr_t
*)data
= uap
->data
;
361 } else if ((com
&IOC_OUT
) && size
)
363 * Zero the buffer so the user always
364 * gets back something deterministic.
367 else if (com
&IOC_VOID
)
368 *(caddr_t
*)data
= uap
->data
;
373 if (tmp
= *(int *)data
)
374 fp
->f_flag
|= FNONBLOCK
;
376 fp
->f_flag
&= ~FNONBLOCK
;
377 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, FIONBIO
, (caddr_t
)&tmp
, p
);
381 if (tmp
= *(int *)data
)
382 fp
->f_flag
|= FASYNC
;
384 fp
->f_flag
&= ~FASYNC
;
385 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, FIOASYNC
, (caddr_t
)&tmp
, p
);
390 if (fp
->f_type
== DTYPE_SOCKET
) {
391 ((struct socket
*)fp
->f_data
)->so_pgid
= tmp
;
398 struct proc
*p1
= pfind(tmp
);
403 tmp
= p1
->p_pgrp
->pg_id
;
405 error
= (*fp
->f_ops
->fo_ioctl
)
406 (fp
, (int)TIOCSPGRP
, (caddr_t
)&tmp
, p
);
410 if (fp
->f_type
== DTYPE_SOCKET
) {
412 *(int *)data
= ((struct socket
*)fp
->f_data
)->so_pgid
;
415 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, TIOCGPGRP
, data
, p
);
416 *(int *)data
= -*(int *)data
;
420 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, com
, data
, p
);
422 * Copy any data to user, size was
423 * already set and checked above.
425 if (error
== 0 && (com
&IOC_OUT
) && size
)
426 error
= copyout(data
, uap
->data
, (u_int
)size
);
435 int selwait
, nselcoll
;
436 #define SEL_FIRSTPASS 1
437 #define SEL_SECONDPASS 2
440 * Select system call.
450 extern int selcontinue(int error
);
451 extern int selprocess(int error
, int sel_pass
);
452 static int selscan( struct proc
*p
, struct _select
* sel
,
453 int nfd
, register_t
*retval
, int sel_pass
);
454 static int selcount(struct proc
*p
, u_int32_t
*ibits
, u_int32_t
*obits
,
455 int nfd
, int * count
, int * nfcount
);
457 select(p
, uap
, retval
)
458 register struct proc
*p
;
459 register struct select_args
*uap
;
462 int s
, error
= 0, timo
;
467 int needzerofill
= 1;
472 th_act
= current_act();
473 uth
= get_bsdthread_info(th_act
);
474 sel
= &uth
->uu_state
.ss_select
;
475 retval
= (int *)get_bsduthreadrval(th_act
);
482 if (uap
->nd
> p
->p_fd
->fd_nfiles
)
483 uap
->nd
= p
->p_fd
->fd_nfiles
; /* forgiving; slightly wrong */
485 nw
= howmany(uap
->nd
, NFDBITS
);
486 ni
= nw
* sizeof(fd_mask
);
489 * if this is the first select by the thread
490 * allocate the space for bits.
492 if (sel
->nbytes
== 0) {
493 sel
->nbytes
= 3 * ni
;
494 MALLOC(sel
->ibits
, u_int32_t
*, sel
->nbytes
, M_TEMP
, M_WAITOK
);
495 MALLOC(sel
->obits
, u_int32_t
*, sel
->nbytes
, M_TEMP
, M_WAITOK
);
496 bzero((caddr_t
)sel
->ibits
, sel
->nbytes
);
497 bzero((caddr_t
)sel
->obits
, sel
->nbytes
);
502 * if the previously allocated space for the bits
503 * is smaller than what is requested. Reallocate.
505 if (sel
->nbytes
< (3 * ni
)) {
506 sel
->nbytes
= (3 * ni
);
507 FREE(sel
->ibits
, M_TEMP
);
508 FREE(sel
->obits
, M_TEMP
);
509 MALLOC(sel
->ibits
, u_int32_t
*, sel
->nbytes
, M_TEMP
, M_WAITOK
);
510 MALLOC(sel
->obits
, u_int32_t
*, sel
->nbytes
, M_TEMP
, M_WAITOK
);
511 bzero((caddr_t
)sel
->ibits
, sel
->nbytes
);
512 bzero((caddr_t
)sel
->obits
, sel
->nbytes
);
517 bzero((caddr_t
)sel
->ibits
, sel
->nbytes
);
518 bzero((caddr_t
)sel
->obits
, sel
->nbytes
);
522 * get the bits from the user address space
524 #define getbits(name, x) \
526 if (uap->name && (error = copyin((caddr_t)uap->name, \
527 (caddr_t)&sel->ibits[(x) * nw], ni))) \
537 error
= copyin((caddr_t
)uap
->tv
, (caddr_t
)&sel
->atv
,
541 if (itimerfix(&sel
->atv
)) {
546 timeradd(&sel
->atv
, &time
, &sel
->atv
);
547 timo
= hzto(&sel
->atv
);
552 if (error
= selcount(p
, sel
->ibits
, sel
->obits
, uap
->nd
, &count
, &nfcount
)) {
556 sel
->nfcount
= nfcount
;
558 size
= SIZEOF_WAITQUEUE_SUB
+ (count
* SIZEOF_WAITQUEUE_LINK
);
559 if (sel
->allocsize
) {
560 if (uth
->uu_wqsub
== 0)
561 panic("select: wql memory smashed");
562 /* needed for the select now */
563 if (size
> sel
->allocsize
) {
564 kfree(uth
->uu_wqsub
, sel
->allocsize
);
565 sel
->allocsize
= size
;
566 uth
->uu_wqsub
= (wait_queue_sub_t
)kalloc(sel
->allocsize
);
567 if (uth
->uu_wqsub
== (wait_queue_sub_t
)NULL
)
568 panic("failed to allocate memory for waitqueue\n");
569 sel
->wql
= (char *)uth
->uu_wqsub
+ SIZEOF_WAITQUEUE_SUB
;
573 sel
->allocsize
= size
;
574 uth
->uu_wqsub
= (wait_queue_sub_t
)kalloc(sel
->allocsize
);
575 if (uth
->uu_wqsub
== (wait_queue_sub_t
)NULL
)
576 panic("failed to allocate memory for waitqueue\n");
577 sel
->wql
= (char *)uth
->uu_wqsub
+ SIZEOF_WAITQUEUE_SUB
;
579 bzero(uth
->uu_wqsub
, size
);
580 wait_queue_sub_init(uth
->uu_wqsub
, (SYNC_POLICY_FIFO
| SYNC_POLICY_PREPOST
));
583 selprocess(error
, SEL_FIRSTPASS
);
587 selcontinue(int error
)
589 selprocess(error
, SEL_SECONDPASS
);
593 selprocess(error
, sel_pass
)
600 struct select_args
*uap
;
609 th_act
= current_act();
610 uap
= (struct select_args
*)get_bsduthreadarg(th_act
);
611 retval
= (int *)get_bsduthreadrval(th_act
);
612 uth
= get_bsdthread_info(th_act
);
613 sel
= &uth
->uu_state
.ss_select
;
615 /* if it is first pass wait queue is not setup yet */
616 if ((error
!= 0) && (sel_pass
== SEL_FIRSTPASS
))
626 p
->p_flag
|= P_SELECT
;
627 /* skip scans if the select is just for timeouts */
629 if (sel_pass
== SEL_FIRSTPASS
)
630 wait_queue_sub_clearrefs(uth
->uu_wqsub
);
632 error
= selscan(p
, sel
, uap
->nd
, retval
, sel_pass
);
633 if (error
|| *retval
) {
637 /* if the select of log, then we canwakeup and discover some one
638 * else already read the data; go toselct again if time permits
649 /* this should be timercmp(&time, &atv, >=) */
650 if (uap
->tv
&& (time
.tv_sec
> sel
->atv
.tv_sec
||
651 time
.tv_sec
== sel
->atv
.tv_sec
&& time
.tv_usec
>= sel
->atv
.tv_usec
)) {
656 /* cleanup obits and try again */
658 sel_pass
= SEL_FIRSTPASS
;
663 * To effect a poll, the timeout argument should be
664 * non-nil, pointing to a zero-valued timeval structure.
668 if (uap
->tv
&& (timo
== 0)) {
672 /* No spurious wakeups due to colls,no need to check for them */
673 if ((sel_pass
== SEL_SECONDPASS
) || ((p
->p_flag
& P_SELECT
) == 0)) {
674 sel_pass
= SEL_FIRSTPASS
;
678 p
->p_flag
&= ~P_SELECT
;
680 /* if the select is just for timeout skip check */
681 if (sel
->count
&&(sel_pass
== SEL_SECONDPASS
))
682 panic("selprocess: 2nd pass assertwaiting");
684 /* Wait Queue Subordinate has waitqueue as first element */
685 if (wait_queue_assert_wait(uth
->uu_wqsub
, &selwait
, THREAD_ABORTSAFE
)) {
686 /* If it is true then there are no preposted events */
687 error
= tsleep1((caddr_t
)&selwait
, PSOCK
| PCATCH
, "select", timo
, selcontinue
);
693 sel_pass
= SEL_SECONDPASS
;
701 wait_subqueue_unlink_all(uth
->uu_wqsub
);
702 p
->p_flag
&= ~P_SELECT
;
703 /* select is not restarted after signals... */
704 if (error
== ERESTART
)
706 if (error
== EWOULDBLOCK
)
708 nw
= howmany(uap
->nd
, NFDBITS
);
709 ni
= nw
* sizeof(fd_mask
);
711 #define putbits(name, x) \
713 if (uap->name && (error2 = copyout((caddr_t)&sel->obits[(x) * nw], \
714 (caddr_t)uap->name, ni))) \
727 #if defined (__i386__)
730 unix_syscall_return(error
);
735 selscan(p
, sel
, nfd
, retval
, sel_pass
)
742 register struct filedesc
*fdp
= p
->p_fd
;
743 register int msk
, i
, j
, fd
;
744 register u_int32_t bits
;
748 static int flag
[3] = { FREAD
, FWRITE
, 0 };
749 u_int32_t
*iptr
, *optr
;
751 u_int32_t
*ibits
, *obits
;
758 * Problems when reboot; due to MacOSX signal probs
759 * in Beaker1C ; verify that the p->p_fd is valid
771 nfcount
= sel
->nfcount
;
774 panic("selcount count<nfcount");
776 nw
= howmany(nfd
, NFDBITS
);
779 if ( nfcount
< count
) {
780 /* some or all in kernel funnel */
781 for (msk
= 0; msk
< 3; msk
++) {
782 iptr
= (u_int32_t
*)&ibits
[msk
* nw
];
783 optr
= (u_int32_t
*)&obits
[msk
* nw
];
784 for (i
= 0; i
< nfd
; i
+= NFDBITS
) {
785 bits
= iptr
[i
/NFDBITS
];
786 while ((j
= ffs(bits
)) && (fd
= i
+ --j
) < nfd
) {
788 fp
= fdp
->fd_ofiles
[fd
];
790 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
793 if (sel_pass
== SEL_SECONDPASS
)
796 wql_ptr
= (wql
+ nc
* SIZEOF_WAITQUEUE_LINK
);
797 if (fp
->f_ops
&& (fp
->f_type
!= DTYPE_SOCKET
)
798 && (*fp
->f_ops
->fo_select
)(fp
, flag
[msk
], wql_ptr
, p
)) {
799 optr
[fd
/NFDBITS
] |= (1 << (fd
% NFDBITS
));
809 /* socket file descriptors for scan */
810 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
813 for (msk
= 0; msk
< 3; msk
++) {
814 iptr
= (u_int32_t
*)&ibits
[msk
* nw
];
815 optr
= (u_int32_t
*)&obits
[msk
* nw
];
816 for (i
= 0; i
< nfd
; i
+= NFDBITS
) {
817 bits
= iptr
[i
/NFDBITS
];
818 while ((j
= ffs(bits
)) && (fd
= i
+ --j
) < nfd
) {
820 fp
= fdp
->fd_ofiles
[fd
];
822 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
825 if (sel_pass
== SEL_SECONDPASS
)
828 wql_ptr
= (wql
+ nc
* SIZEOF_WAITQUEUE_LINK
);
829 if (fp
->f_ops
&& (fp
->f_type
== DTYPE_SOCKET
) &&
830 (*fp
->f_ops
->fo_select
)(fp
, flag
[msk
], wql_ptr
, p
)) {
831 optr
[fd
/NFDBITS
] |= (1 << (fd
% NFDBITS
));
838 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
846 seltrue(dev
, flag
, p
)
856 selcount(p
, ibits
, obits
, nfd
, count
, nfcount
)
858 u_int32_t
*ibits
, *obits
;
863 register struct filedesc
*fdp
= p
->p_fd
;
864 register int msk
, i
, j
, fd
;
865 register u_int32_t bits
;
870 static int flag
[3] = { FREAD
, FWRITE
, 0 };
871 u_int32_t
*iptr
, *fptr
, *fbits
;
875 * Problems when reboot; due to MacOSX signal probs
876 * in Beaker1C ; verify that the p->p_fd is valid
884 nw
= howmany(nfd
, NFDBITS
);
887 for (msk
= 0; msk
< 3; msk
++) {
888 iptr
= (u_int32_t
*)&ibits
[msk
* nw
];
889 for (i
= 0; i
< nfd
; i
+= NFDBITS
) {
890 bits
= iptr
[i
/NFDBITS
];
891 while ((j
= ffs(bits
)) && (fd
= i
+ --j
) < nfd
) {
893 fp
= fdp
->fd_ofiles
[fd
];
895 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
900 if (fp
->f_type
== DTYPE_SOCKET
)
912 * Record a select request.
915 selrecord(selector
, sip
, p_wql
)
916 struct proc
*selector
;
920 thread_act_t cur_act
= current_act();
921 struct uthread
* ut
= get_bsdthread_info(cur_act
);
923 /* need to look at collisions */
925 if ((p_wql
== (void *)0) && ((sip
->si_flags
& SI_INITED
) == 0)) {
929 /*do not record if this is second pass of select */
930 if((p_wql
== (void *)0)) {
934 if ((sip
->si_flags
& SI_INITED
) == 0) {
935 wait_queue_init(&sip
->wait_queue
, SYNC_POLICY_FIFO
);
936 sip
->si_flags
|= SI_INITED
;
937 sip
->si_flags
&= ~SI_CLEAR
;
940 if (sip
->si_flags
& SI_RECORDED
) {
941 sip
->si_flags
|= SI_COLL
;
943 sip
->si_flags
&= ~SI_COLL
;
945 sip
->si_flags
|= SI_RECORDED
;
946 if (!wait_queue_member(&sip
->wait_queue
, ut
->uu_wqsub
))
947 wait_queue_link_noalloc(&sip
->wait_queue
, ut
->uu_wqsub
, (wait_queue_link_t
)p_wql
);
954 register struct selinfo
*sip
;
957 if ((sip
->si_flags
& SI_INITED
) == 0) {
961 if (sip
->si_flags
& SI_COLL
) {
963 sip
->si_flags
&= ~SI_COLL
;
965 /* will not support */
966 //wakeup((caddr_t)&selwait);
970 if (sip
->si_flags
& SI_RECORDED
) {
971 wait_queue_wakeup_all(&sip
->wait_queue
, &selwait
, THREAD_AWAKENED
);
972 sip
->si_flags
&= ~SI_RECORDED
;
979 register struct selinfo
*sip
;
982 if ((sip
->si_flags
& SI_INITED
) == 0) {
985 if (sip
->si_flags
& SI_RECORDED
) {
987 sip
->si_flags
&= ~(SI_RECORDED
| SI_COLL
);
989 sip
->si_flags
|= SI_CLEAR
;
990 wait_queue_unlinkall_nofree(&sip
->wait_queue
);
994 extern struct eventqelt
*evprocdeque(struct proc
*p
, struct eventqelt
*eqp
);
997 * called upon socket close. deque and free all events for
1000 evsofree(struct socket
*sp
)
1002 struct eventqelt
*eqp
, *next
;
1004 if (sp
== NULL
) return;
1006 for (eqp
= sp
->so_evlist
.tqh_first
; eqp
!= NULL
; eqp
= next
) {
1007 next
= eqp
->ee_slist
.tqe_next
;
1008 evprocdeque(eqp
->ee_proc
, eqp
); // remove from proc q if there
1009 TAILQ_REMOVE(&sp
->so_evlist
, eqp
, ee_slist
); // remove from socket q
1015 #define DBG_EVENT 0x10
1017 #define DBG_POST 0x10
1018 #define DBG_WATCH 0x11
1019 #define DBG_WAIT 0x12
1020 #define DBG_MOD 0x13
1021 #define DBG_EWAKEUP 0x14
1022 #define DBG_ENQUEUE 0x15
1023 #define DBG_DEQUEUE 0x16
1025 #define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST)
1026 #define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH)
1027 #define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT)
1028 #define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD)
1029 #define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP)
1030 #define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE)
1031 #define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE)
1035 * enque this event if it's not already queued. wakeup
1036 the proc if we do queue this event to it.
1038 evprocenque(struct eventqelt
*eqp
)
1043 KERNEL_DEBUG(DBG_MISC_ENQUEUE
|DBG_FUNC_START
, eqp
, eqp
->ee_flags
, eqp
->ee_eventmask
,0,0);
1044 if (eqp
->ee_flags
& EV_QUEUED
) {
1045 KERNEL_DEBUG(DBG_MISC_ENQUEUE
|DBG_FUNC_END
, 0,0,0,0,0);
1048 eqp
->ee_flags
|= EV_QUEUED
;
1049 eqp
->ee_eventmask
= 0; // disarm
1051 TAILQ_INSERT_TAIL(&p
->p_evlist
, eqp
, ee_plist
);
1052 KERNEL_DEBUG(DBG_MISC_EWAKEUP
,0,0,0,eqp
,0);
1053 wakeup(&p
->p_evlist
);
1054 KERNEL_DEBUG(DBG_MISC_ENQUEUE
|DBG_FUNC_END
, 0,0,0,0,0);
1058 * given either a sockbuf or a socket run down the
1059 * event list and queue ready events found
1061 postevent(struct socket
*sp
, struct sockbuf
*sb
, int event
)
1064 struct eventqelt
*evq
;
1065 register struct tcpcb
*tp
;
1067 if (sb
) sp
= sb
->sb_so
;
1068 if (!sp
|| sp
->so_evlist
.tqh_first
== NULL
) return;
1070 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_START
, event
,0,0,0,0);
1072 for (evq
= sp
->so_evlist
.tqh_first
;
1073 evq
!= NULL
; evq
= evq
->ee_slist
.tqe_next
) {
1077 /* ready for reading:
1078 - byte cnt >= receive low water mark
1079 - read-half of conn closed
1080 - conn pending for listening sock
1081 - socket error pending
1084 - byte cnt avail >= send low water mark
1085 - write half of conn closed
1086 - socket error pending
1087 - non-blocking conn completed successfully
1091 - sock at out of band mark
1094 switch (event
& EV_DMASK
) {
1098 case EV_RWBYTES
|EV_OOB
:
1099 if (event
& EV_OOB
) {
1100 if ((evq
->ee_eventmask
& EV_EX
)) {
1101 if (sp
->so_oobmark
|| ((sp
->so_state
& SS_RCVATMARK
))) {
1102 mask
|= EV_EX
|EV_OOB
;
1106 if (event
& EV_RWBYTES
) {
1107 if ((evq
->ee_eventmask
& EV_RE
) && soreadable(sp
)) {
1108 if ((sp
->so_type
== SOCK_STREAM
) && (sp
->so_error
== ECONNREFUSED
) ||
1109 (sp
->so_error
== ECONNRESET
)) {
1110 if ((sp
->so_pcb
== 0) ||
1111 !(tp
= sototcpcb(sp
)) ||
1112 (tp
->t_state
== TCPS_CLOSED
)) {
1113 mask
|= EV_RE
|EV_RESET
;
1117 if (sp
->so_state
& SS_CANTRCVMORE
) {
1118 mask
|= EV_RE
|EV_FIN
;
1119 evq
->ee_req
.er_rcnt
= sp
->so_rcv
.sb_cc
;
1123 evq
->ee_req
.er_rcnt
= sp
->so_rcv
.sb_cc
;
1126 if ((evq
->ee_eventmask
& EV_WR
) && sowriteable(sp
)) {
1127 if ((sp
->so_type
== SOCK_STREAM
) &&(sp
->so_error
== ECONNREFUSED
) ||
1128 (sp
->so_error
== ECONNRESET
)) {
1129 if ((sp
->so_pcb
== 0) ||
1130 !(tp
= sototcpcb(sp
)) ||
1131 (tp
->t_state
== TCPS_CLOSED
)) {
1132 mask
|= EV_WR
|EV_RESET
;
1137 evq
->ee_req
.er_wcnt
= sbspace(&sp
->so_snd
);
1143 if ((evq
->ee_eventmask
& EV_RE
)) {
1144 evq
->ee_req
.er_rcnt
= sp
->so_qlen
+ 1; // incl this one
1145 mask
|= EV_RE
|EV_RCONN
;
1150 if ((evq
->ee_eventmask
& EV_WR
)) {
1151 mask
|= EV_WR
|EV_WCONN
;
1156 if ((evq
->ee_eventmask
& EV_RE
)) {
1157 mask
|= EV_RE
|EV_RCLOSED
;
1162 if ((evq
->ee_eventmask
& EV_WR
)) {
1163 mask
|= EV_WR
|EV_WCLOSED
;
1168 if (evq
->ee_eventmask
& EV_RE
) {
1169 mask
|= EV_RE
|EV_FIN
;
1175 if (evq
->ee_eventmask
& EV_RE
) {
1176 mask
|= EV_RE
| event
;
1178 if (evq
->ee_eventmask
& EV_WR
) {
1179 mask
|= EV_WR
| event
;
1188 evq
->ee_req
.er_eventbits
|= mask
;
1189 KERNEL_DEBUG(DBG_MISC_POST
, evq
, evq
->ee_req
.er_eventbits
, mask
,0,0);
1193 KERNEL_DEBUG(DBG_MISC_POST
|DBG_FUNC_END
, 0,0,0,0,0);
1197 * remove and return the first event (eqp=NULL) or a specific
1198 * event, or return NULL if no events found
1201 evprocdeque(struct proc
*p
, struct eventqelt
*eqp
)
1204 KERNEL_DEBUG(DBG_MISC_DEQUEUE
|DBG_FUNC_START
,p
,eqp
,0,0,0);
1206 if (eqp
&& ((eqp
->ee_flags
& EV_QUEUED
) == NULL
)) {
1207 KERNEL_DEBUG(DBG_MISC_DEQUEUE
|DBG_FUNC_END
,0,0,0,0,0);
1210 if (p
->p_evlist
.tqh_first
== NULL
) {
1211 KERNEL_DEBUG(DBG_MISC_DEQUEUE
|DBG_FUNC_END
,0,0,0,0,0);
1214 if (eqp
== NULL
) { // remove first
1215 eqp
= p
->p_evlist
.tqh_first
;
1217 TAILQ_REMOVE(&p
->p_evlist
, eqp
, ee_plist
);
1218 eqp
->ee_flags
&= ~EV_QUEUED
;
1219 KERNEL_DEBUG(DBG_MISC_DEQUEUE
|DBG_FUNC_END
,eqp
,0,0,0,0);
1223 struct evwatch_args
{
1224 struct eventreq
*u_req
;
1230 * watchevent system call. user passes us an event to watch
1231 * for. we malloc an event object, initialize it, and queue
1232 * it to the open socket. when the event occurs, postevent()
1233 * will enque it back to our proc where we can retrieve it
1236 * should this prevent duplicate events on same socket?
1239 watchevent(p
, uap
, retval
)
1241 struct evwatch_args
*uap
;
1244 struct eventqelt
*eqp
= (struct eventqelt
*)0;
1245 struct eventqelt
*np
;
1246 struct eventreq
*erp
;
1251 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_START
, 0,0,0,0,0);
1253 // get a qelt and fill with users req
1254 MALLOC(eqp
, struct eventqelt
*, sizeof(struct eventqelt
), M_TEMP
, M_WAITOK
);
1255 if (!eqp
) panic("can't MALLOC eqp");
1257 // get users request pkt
1258 if (error
= copyin((caddr_t
)uap
->u_req
, (caddr_t
)erp
,
1259 sizeof(struct eventreq
))) {
1261 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, error
,0,0,0,0);
1264 KERNEL_DEBUG(DBG_MISC_WATCH
, erp
->er_handle
,uap
->u_eventmask
,eqp
,0,0);
1265 // validate, freeing qelt if errors
1267 if (erp
->er_type
!= EV_FD
) {
1269 } else if (erp
->er_handle
< 0) {
1271 } else if (erp
->er_handle
> p
->p_fd
->fd_nfiles
) {
1273 } else if ((fp
= *fdfile(p
, erp
->er_handle
)) == NULL
) {
1275 } else if (fp
->f_type
!= DTYPE_SOCKET
) {
1280 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, error
,0,0,0,0);
1284 erp
->er_rcnt
= erp
->er_wcnt
= erp
->er_eventbits
= 0;
1286 eqp
->ee_eventmask
= uap
->u_eventmask
& EV_MASK
;
1289 sp
= (struct socket
*)fp
->f_data
;
1292 // only allow one watch per file per proc
1293 for (np
= sp
->so_evlist
.tqh_first
; np
!= NULL
; np
= np
->ee_slist
.tqe_next
) {
1294 if (np
->ee_proc
== p
) {
1296 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, EINVAL
,0,0,0,0);
1301 TAILQ_INSERT_TAIL(&sp
->so_evlist
, eqp
, ee_slist
);
1302 postevent(sp
, 0, EV_RWBYTES
); // catch existing events
1303 KERNEL_DEBUG(DBG_MISC_WATCH
|DBG_FUNC_END
, 0,0,0,0,0);
1307 struct evwait_args
{
1308 struct eventreq
*u_req
;
1313 * waitevent system call.
1314 * grabs the next waiting event for this proc and returns
1315 * it. if no events, user can request to sleep with timeout
1316 * or poll mode (tv=NULL);
1319 waitevent(p
, uap
, retval
)
1321 struct evwait_args
*uap
;
1325 struct eventqelt
*eqp
;
1331 error
= copyin((caddr_t
)uap
->tv
, (caddr_t
)&atv
,
1335 if (itimerfix(&atv
)) {
1340 timeradd(&atv
, &time
, &atv
);
1346 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_START
, 0,0,0,0,0);
1350 if ((eqp
= evprocdeque(p
,NULL
)) != NULL
) {
1352 error
= copyout((caddr_t
)&eqp
->ee_req
, (caddr_t
)uap
->u_req
,
1353 sizeof(struct eventreq
));
1354 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_END
, error
,
1355 eqp
->ee_req
.er_handle
,eqp
->ee_req
.er_eventbits
,eqp
,0);
1358 if (uap
->tv
&& (timo
== 0)) {
1360 *retval
= 1; // poll failed
1361 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_END
, error
,0,0,0,0);
1365 KERNEL_DEBUG(DBG_MISC_WAIT
, 1,&p
->p_evlist
,0,0,0);
1366 error
= tsleep(&p
->p_evlist
, PSOCK
| PCATCH
, "waitevent", timo
);
1367 KERNEL_DEBUG(DBG_MISC_WAIT
, 2,&p
->p_evlist
,0,0,0);
1371 if (error
== ERESTART
)
1373 if (error
== EWOULDBLOCK
) {
1378 KERNEL_DEBUG(DBG_MISC_WAIT
|DBG_FUNC_END
, 0,0,0,0,0);
1382 struct modwatch_args
{
1383 struct eventreq
*u_req
;
1388 * modwatch system call. user passes in event to modify.
1389 * if we find it we reset the event bits and que/deque event
1393 modwatch(p
, uap
, retval
)
1395 struct modwatch_args
*uap
;
1399 struct eventreq
*erp
= &er
;
1400 struct eventqelt
*evq
;
1406 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_START
, 0,0,0,0,0);
1408 // get users request pkt
1409 if (error
= copyin((caddr_t
)uap
->u_req
, (caddr_t
)erp
,
1410 sizeof(struct eventreq
))) return(error
);
1412 if (erp
->er_type
!= EV_FD
) return(EINVAL
);
1413 if (erp
->er_handle
< 0) return(EBADF
);
1414 if (erp
->er_handle
> p
->p_fd
->fd_nfiles
) return(EBADF
);
1415 if ((fp
= *fdfile(p
, erp
->er_handle
)) == NULL
)
1417 if (fp
->f_type
!= DTYPE_SOCKET
) return(EINVAL
); // for now must be sock
1418 sp
= (struct socket
*)fp
->f_data
;
1422 // locate event if possible
1423 for (evq
= sp
->so_evlist
.tqh_first
;
1424 evq
!= NULL
; evq
= evq
->ee_slist
.tqe_next
) {
1425 if (evq
->ee_proc
== p
) break;
1429 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, EINVAL
,0,0,0,0);
1432 KERNEL_DEBUG(DBG_MISC_MOD
, erp
->er_handle
,uap
->u_eventmask
,evq
,0,0);
1434 if (uap
->u_eventmask
== EV_RM
) {
1435 evprocdeque(p
, evq
);
1436 TAILQ_REMOVE(&sp
->so_evlist
, evq
, ee_slist
);
1438 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, 0,0,0,0,0);
1442 switch (uap
->u_eventmask
& EV_MASK
) {
1460 case EV_EX
|EV_RE
|EV_WR
:
1461 flag
= EV_OOB
|EV_RWBYTES
;
1468 evq
->ee_eventmask
= uap
->u_eventmask
& EV_MASK
;
1469 evprocdeque(p
, evq
);
1470 evq
->ee_req
.er_eventbits
= 0;
1471 postevent(sp
, 0, flag
);
1472 KERNEL_DEBUG(DBG_MISC_MOD
|DBG_FUNC_END
, evq
->ee_req
.er_handle
,evq
->ee_eventmask
,sp
,flag
,0);