]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sys_generic.c
xnu-344.34.tar.gz
[apple/xnu.git] / bsd / kern / sys_generic.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/filedesc.h>
66 #include <sys/ioctl.h>
67 #include <sys/file.h>
68 #include <sys/proc.h>
69 #include <sys/socketvar.h>
70 #include <sys/uio.h>
71 #include <sys/kernel.h>
72 #include <sys/stat.h>
73 #include <sys/malloc.h>
74
75 #include <sys/mount.h>
76 #include <sys/protosw.h>
77 #include <sys/ev.h>
78 #include <sys/user.h>
79 #include <sys/kdebug.h>
80 #include <kern/assert.h>
81 #include <kern/thread_act.h>
82
83 #include <sys/mbuf.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/errno.h>
87
88 #include <net/if.h>
89 #include <net/route.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/ip6.h>
97 #include <netinet/tcp.h>
98 #include <netinet/tcp_fsm.h>
99 #include <netinet/tcp_seq.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcpip.h>
103 #include <netinet/tcp_debug.h>
104 /* for wait queue based select */
105 #include <kern/wait_queue.h>
106 #if KTRACE
107 #include <sys/ktrace.h>
108 #endif
109
110 static int dofileread __P((struct proc *, struct file *, int, void *,
111 size_t, off_t, int, int*));
112 static int dofilewrite __P((struct proc *, struct file *, int,
113 const void *, size_t, off_t, int, int*));
114
115 static struct file*
116 holdfp(fdp, fd, flag)
117 struct filedesc* fdp;
118 int fd, flag;
119 {
120 struct file* fp;
121
122 if (((u_int)fd) >= fdp->fd_nfiles ||
123 (fp = fdp->fd_ofiles[fd]) == NULL ||
124 (fp->f_flag & flag) == 0) {
125 return (NULL);
126 }
127 if (fref(fp) == -1)
128 return (NULL);
129 return (fp);
130 }
131
132 /*
133 * Read system call.
134 */
135 #ifndef _SYS_SYSPROTO_H_
136 struct read_args {
137 int fd;
138 char *cbuf;
139 u_int nbyte;
140 };
141 #endif
142 int
143 read(p, uap, retval)
144 struct proc *p;
145 register struct read_args *uap;
146 register_t *retval;
147 {
148 register struct file *fp;
149 int error;
150
151 if ((fp = holdfp(p->p_fd, uap->fd, FREAD)) == NULL)
152 return (EBADF);
153 error = dofileread(p, fp, uap->fd, uap->cbuf, uap->nbyte,
154 (off_t)-1, 0, retval);
155 frele(fp);
156 return(error);
157 }
158
159 /*
160 * Pread system call
161 */
162 #ifndef _SYS_SYSPROTO_H_
163 struct pread_args {
164 int fd;
165 void *buf;
166 size_t nbyte;
167 #ifdef DOUBLE_ALIGN_PARAMS
168 int pad;
169 #endif
170 off_t offset;
171 };
172 #endif
173 int
174 pread(p, uap, retval)
175 struct proc *p;
176 register struct pread_args *uap;
177 int *retval;
178 {
179 register struct file *fp;
180 int error;
181
182 if ((fp = holdfp(p->p_fd, uap->fd, FREAD)) == NULL)
183 return (EBADF);
184 if (fp->f_type != DTYPE_VNODE) {
185 error = ESPIPE;
186 } else {
187 error = dofileread(p, fp, uap->fd, uap->buf, uap->nbyte,
188 uap->offset, FOF_OFFSET, retval);
189 }
190 frele(fp);
191 return(error);
192 }
193
194 /*
195 * Code common for read and pread
196 */
197 int
198 dofileread(p, fp, fd, buf, nbyte, offset, flags, retval)
199 struct proc *p;
200 struct file *fp;
201 int fd, flags;
202 void *buf;
203 size_t nbyte;
204 off_t offset;
205 int *retval;
206 {
207 struct uio auio;
208 struct iovec aiov;
209 long cnt, error = 0;
210 #if KTRACE
211 struct iovec ktriov;
212 struct uio ktruio;
213 int didktr = 0;
214 #endif
215
216 aiov.iov_base = (caddr_t)buf;
217 aiov.iov_len = nbyte;
218 auio.uio_iov = &aiov;
219 auio.uio_iovcnt = 1;
220 auio.uio_offset = offset;
221 if (nbyte > INT_MAX)
222 return (EINVAL);
223 auio.uio_resid = nbyte;
224 auio.uio_rw = UIO_READ;
225 auio.uio_segflg = UIO_USERSPACE;
226 auio.uio_procp = p;
227 #if KTRACE
228 /*
229 * if tracing, save a copy of iovec
230 */
231 if (KTRPOINT(p, KTR_GENIO)) {
232 ktriov = aiov;
233 ktruio = auio;
234 didktr = 1;
235 }
236 #endif
237 cnt = nbyte;
238
239 if ((error = fo_read(fp, &auio, fp->f_cred, flags, p))) {
240 if (auio.uio_resid != cnt && (error == ERESTART ||
241 error == EINTR || error == EWOULDBLOCK))
242 error = 0;
243 }
244 cnt -= auio.uio_resid;
245 #if KTRACE
246 if (didktr && error == 0) {
247 ktruio.uio_iov = &ktriov;
248 ktruio.uio_resid = cnt;
249 ktrgenio(p->p_tracep, fd, UIO_READ, &ktruio, error,
250 KERNEL_FUNNEL);
251 }
252 #endif
253 *retval = cnt;
254 return (error);
255 }
256
257 /*
258 * Scatter read system call.
259 */
260 #ifndef _SYS_SYSPROTO_H_
261 struct readv_args {
262 int fd;
263 struct iovec *iovp;
264 u_int iovcnt;
265 };
266 #endif
267 int
268 readv(p, uap, retval)
269 struct proc *p;
270 register struct readv_args *uap;
271 int *retval;
272 {
273 struct uio auio;
274 register struct iovec *iov;
275 int error;
276 struct iovec aiov[UIO_SMALLIOV];
277
278 if (uap->iovcnt > UIO_SMALLIOV) {
279 if (uap->iovcnt > UIO_MAXIOV)
280 return (EINVAL);
281 if ((iov = (struct iovec *)
282 kalloc(sizeof(struct iovec) * (uap->iovcnt))) == 0)
283 return (ENOMEM);
284 } else
285 iov = aiov;
286 auio.uio_iov = iov;
287 auio.uio_iovcnt = uap->iovcnt;
288 auio.uio_rw = UIO_READ;
289 error = copyin((caddr_t)uap->iovp, (caddr_t)iov,
290 uap->iovcnt * sizeof (struct iovec));
291 if (!error)
292 error = rwuio(p, uap->fd, &auio, UIO_READ, retval);
293 if (uap->iovcnt > UIO_SMALLIOV)
294 kfree(iov, sizeof(struct iovec)*uap->iovcnt);
295 return (error);
296 }
297
298 /*
299 * Write system call
300 */
301 #ifndef _SYS_SYSPROTO_H_
302 struct write_args {
303 int fd;
304 char *cbuf;
305 u_int nbyte;
306 };
307 #endif
308 int
309 write(p, uap, retval)
310 struct proc *p;
311 register struct write_args *uap;
312 int *retval;
313 {
314 register struct file *fp;
315 int error;
316
317 if ((fp = holdfp(p->p_fd, uap->fd, FWRITE)) == NULL)
318 return (EBADF);
319 error = dofilewrite(p, fp, uap->fd, uap->cbuf, uap->nbyte,
320 (off_t)-1, 0, retval);
321 frele(fp);
322 return(error);
323 }
324
325 /*
326 * Pwrite system call
327 */
328 #ifndef _SYS_SYSPROTO_H_
329 struct pwrite_args {
330 int fd;
331 const void *buf;
332 size_t nbyte;
333 #ifdef DOUBLE_ALIGN_PARAMS
334 int pad;
335 #endif
336 off_t offset;
337 };
338 #endif
339 int
340 pwrite(p, uap, retval)
341 struct proc *p;
342 register struct pwrite_args *uap;
343 int *retval;
344 {
345 register struct file *fp;
346 int error;
347
348 if ((fp = holdfp(p->p_fd, uap->fd, FWRITE)) == NULL)
349 return (EBADF);
350 if (fp->f_type != DTYPE_VNODE) {
351 error = ESPIPE;
352 } else {
353 error = dofilewrite(p, fp, uap->fd, uap->buf, uap->nbyte,
354 uap->offset, FOF_OFFSET, retval);
355 }
356 frele(fp);
357 return(error);
358 }
359
360 static int
361 dofilewrite(p, fp, fd, buf, nbyte, offset, flags, retval)
362 struct proc *p;
363 struct file *fp;
364 int fd, flags;
365 const void *buf;
366 size_t nbyte;
367 off_t offset;
368 int *retval;
369 {
370 struct uio auio;
371 struct iovec aiov;
372 long cnt, error = 0;
373 #if KTRACE
374 struct iovec ktriov;
375 struct uio ktruio;
376 int didktr = 0;
377 #endif
378
379 aiov.iov_base = (void *)(uintptr_t)buf;
380 aiov.iov_len = nbyte;
381 auio.uio_iov = &aiov;
382 auio.uio_iovcnt = 1;
383 auio.uio_offset = offset;
384 if (nbyte > INT_MAX)
385 return (EINVAL);
386 auio.uio_resid = nbyte;
387 auio.uio_rw = UIO_WRITE;
388 auio.uio_segflg = UIO_USERSPACE;
389 auio.uio_procp = p;
390 #if KTRACE
391 /*
392 * if tracing, save a copy of iovec and uio
393 */
394 if (KTRPOINT(p, KTR_GENIO)) {
395 ktriov = aiov;
396 ktruio = auio;
397 didktr = 1;
398 }
399 #endif
400 cnt = nbyte;
401 if (fp->f_type == DTYPE_VNODE)
402 bwillwrite();
403 if ((error = fo_write(fp, &auio, fp->f_cred, flags, p))) {
404 if (auio.uio_resid != cnt && (error == ERESTART ||
405 error == EINTR || error == EWOULDBLOCK))
406 error = 0;
407 if (error == EPIPE)
408 psignal(p, SIGPIPE);
409 }
410 cnt -= auio.uio_resid;
411 #if KTRACE
412 if (didktr && error == 0) {
413 ktruio.uio_iov = &ktriov;
414 ktruio.uio_resid = cnt;
415 ktrgenio(p->p_tracep, fd, UIO_WRITE, &ktruio, error,
416 KERNEL_FUNNEL);
417 }
418 #endif
419 *retval = cnt;
420 return (error);
421 }
422
423 /*
424 * Gather write system call
425 */
426 #ifndef _SYS_SYSPROTO_H_
427 struct writev_args {
428 int fd;
429 struct iovec *iovp;
430 u_int iovcnt;
431 };
432 #endif
433 int
434 writev(p, uap, retval)
435 struct proc *p;
436 register struct writev_args *uap;
437 int *retval;
438 {
439 struct uio auio;
440 register struct iovec *iov;
441 int error;
442 struct iovec aiov[UIO_SMALLIOV];
443
444 if (uap->iovcnt > UIO_SMALLIOV) {
445 if (uap->iovcnt > UIO_MAXIOV)
446 return (EINVAL);
447 if ((iov = (struct iovec *)
448 kalloc(sizeof(struct iovec) * (uap->iovcnt))) == 0)
449 return (ENOMEM);
450 } else
451 iov = aiov;
452 auio.uio_iov = iov;
453 auio.uio_iovcnt = uap->iovcnt;
454 auio.uio_rw = UIO_WRITE;
455 error = copyin((caddr_t)uap->iovp, (caddr_t)iov,
456 uap->iovcnt * sizeof (struct iovec));
457 if (!error)
458 error = rwuio(p, uap->fd, &auio, UIO_WRITE, retval);
459 if (uap->iovcnt > UIO_SMALLIOV)
460 kfree(iov, sizeof(struct iovec)*uap->iovcnt);
461 return (error);
462 }
463
464 int
465 rwuio(p, fdes, uio, rw, retval)
466 struct proc *p;
467 int fdes;
468 register struct uio *uio;
469 enum uio_rw rw;
470 int *retval;
471 {
472 struct file *fp;
473 register struct iovec *iov;
474 int i, count, flag, error;
475 #if KTRACE
476 struct iovec *ktriov;
477 struct uio ktruio;
478 int didktr = 0;
479 u_int iovlen;
480 #endif
481
482 if (error = fdgetf(p, fdes, &fp))
483 return (error);
484
485 if ((fp->f_flag&(rw==UIO_READ ? FREAD : FWRITE)) == 0) {
486 return(EBADF);
487 }
488 uio->uio_resid = 0;
489 uio->uio_segflg = UIO_USERSPACE;
490 uio->uio_procp = p;
491 iov = uio->uio_iov;
492 for (i = 0; i < uio->uio_iovcnt; i++) {
493 if (iov->iov_len < 0) {
494 return(EINVAL);
495 }
496 uio->uio_resid += iov->iov_len;
497 if (uio->uio_resid < 0) {
498 return(EINVAL);
499 }
500 iov++;
501 }
502 count = uio->uio_resid;
503 #if KTRACE
504 /*
505 * if tracing, save a copy of iovec
506 */
507 if (KTRPOINT(p, KTR_GENIO)) {
508 iovlen = uio->uio_iovcnt * sizeof (struct iovec);
509 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
510 bcopy((caddr_t)uio->uio_iov, (caddr_t)ktriov, iovlen);
511 ktruio = *uio;
512 didktr = 1;
513 }
514 #endif
515
516 if (rw == UIO_READ) {
517 if (error = fo_read(fp, uio, fp->f_cred, 0, p))
518 if (uio->uio_resid != count && (error == ERESTART ||
519 error == EINTR || error == EWOULDBLOCK))
520 error = 0;
521 } else {
522 if (fp->f_type == DTYPE_VNODE)
523 bwillwrite();
524 if (error = fo_write(fp, uio, fp->f_cred, 0, p)) {
525 if (uio->uio_resid != count && (error == ERESTART ||
526 error == EINTR || error == EWOULDBLOCK))
527 error = 0;
528 /* The socket layer handles SIGPIPE */
529 if (error == EPIPE && fp->f_type != DTYPE_SOCKET)
530 psignal(p, SIGPIPE);
531 }
532 }
533
534 *retval = count - uio->uio_resid;
535
536 #if KTRACE
537 if (didktr) {
538 if (error == 0) {
539 ktruio.uio_iov = ktriov;
540 ktruio.uio_resid = *retval;
541 ktrgenio(p->p_tracep, fdes, rw, &ktruio, error,
542 KERNEL_FUNNEL);
543 }
544 FREE(ktriov, M_TEMP);
545 }
546 #endif
547
548 return(error);
549 }
550
551 /*
552 * Ioctl system call
553 */
554 #ifndef _SYS_SYSPROTO_H_
555 struct ioctl_args {
556 int fd;
557 u_long com;
558 caddr_t data;
559 };
560 #endif
561 int
562 ioctl(p, uap, retval)
563 struct proc *p;
564 register struct ioctl_args *uap;
565 register_t *retval;
566 {
567 struct file *fp;
568 register u_long com;
569 register int error;
570 register u_int size;
571 caddr_t data, memp;
572 int tmp;
573 #define STK_PARAMS 128
574 char stkbuf[STK_PARAMS];
575
576 if (error = fdgetf(p, uap->fd, &fp))
577 return (error);
578
579 if ((fp->f_flag & (FREAD | FWRITE)) == 0)
580 return (EBADF);
581
582 #if NETAT
583 /*
584 * ### LD 6/11/97 Hack Alert: this is to get AppleTalk to work
585 * while implementing an ATioctl system call
586 */
587 {
588 extern int appletalk_inited;
589
590 if (appletalk_inited && ((uap->com & 0x0000FFFF) == 0xff99)) {
591 #ifdef APPLETALK_DEBUG
592 kprintf("ioctl: special AppleTalk \n");
593 #endif
594 error = fo_ioctl(fp, uap->com, uap->data, p);
595 return(error);
596 }
597 }
598
599 #endif /* NETAT */
600
601
602 switch (com = uap->com) {
603 case FIONCLEX:
604 *fdflags(p, uap->fd) &= ~UF_EXCLOSE;
605 return (0);
606 case FIOCLEX:
607 *fdflags(p, uap->fd) |= UF_EXCLOSE;
608 return (0);
609 }
610
611 /*
612 * Interpret high order word to find amount of data to be
613 * copied to/from the user's address space.
614 */
615 size = IOCPARM_LEN(com);
616 if (size > IOCPARM_MAX)
617 return (ENOTTY);
618 memp = NULL;
619 if (size > sizeof (stkbuf)) {
620 if ((memp = (caddr_t)kalloc(size)) == 0)
621 return(ENOMEM);
622 data = memp;
623 } else
624 data = stkbuf;
625 if (com&IOC_IN) {
626 if (size) {
627 error = copyin(uap->data, data, (u_int)size);
628 if (error) {
629 if (memp)
630 kfree(memp, size);
631 return (error);
632 }
633 } else
634 *(caddr_t *)data = uap->data;
635 } else if ((com&IOC_OUT) && size)
636 /*
637 * Zero the buffer so the user always
638 * gets back something deterministic.
639 */
640 bzero(data, size);
641 else if (com&IOC_VOID)
642 *(caddr_t *)data = uap->data;
643
644 switch (com) {
645
646 case FIONBIO:
647 if (tmp = *(int *)data)
648 fp->f_flag |= FNONBLOCK;
649 else
650 fp->f_flag &= ~FNONBLOCK;
651 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, p);
652 break;
653
654 case FIOASYNC:
655 if (tmp = *(int *)data)
656 fp->f_flag |= FASYNC;
657 else
658 fp->f_flag &= ~FASYNC;
659 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, p);
660 break;
661
662 case FIOSETOWN:
663 tmp = *(int *)data;
664 if (fp->f_type == DTYPE_SOCKET) {
665 ((struct socket *)fp->f_data)->so_pgid = tmp;
666 error = 0;
667 break;
668 }
669 if (tmp <= 0) {
670 tmp = -tmp;
671 } else {
672 struct proc *p1 = pfind(tmp);
673 if (p1 == 0) {
674 error = ESRCH;
675 break;
676 }
677 tmp = p1->p_pgrp->pg_id;
678 }
679 error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, p);
680 break;
681
682 case FIOGETOWN:
683 if (fp->f_type == DTYPE_SOCKET) {
684 error = 0;
685 *(int *)data = ((struct socket *)fp->f_data)->so_pgid;
686 break;
687 }
688 error = fo_ioctl(fp, TIOCGPGRP, data, p);
689 *(int *)data = -*(int *)data;
690 break;
691
692 default:
693 error = fo_ioctl(fp, com, data, p);
694 /*
695 * Copy any data to user, size was
696 * already set and checked above.
697 */
698 if (error == 0 && (com&IOC_OUT) && size)
699 error = copyout(data, uap->data, (u_int)size);
700 break;
701 }
702 if (memp)
703 kfree(memp, size);
704 return (error);
705 }
706
707 int selwait, nselcoll;
708 #define SEL_FIRSTPASS 1
709 #define SEL_SECONDPASS 2
710 extern int selcontinue(int error);
711 extern int selprocess(int error, int sel_pass);
712 static int selscan(struct proc *p, struct _select * sel,
713 int nfd, register_t *retval, int sel_pass);
714 static int selcount(struct proc *p, u_int32_t *ibits, u_int32_t *obits,
715 int nfd, int * count, int * nfcount);
716 extern uint64_t tvtoabstime(struct timeval *tvp);
717
718 /*
719 * Select system call.
720 */
721 #ifndef _SYS_SYSPROTO_H_
722 struct select_args {
723 int nd;
724 u_int32_t *in;
725 u_int32_t *ou;
726 u_int32_t *ex;
727 struct timeval *tv;
728 };
729 #endif
730 int
731 select(p, uap, retval)
732 register struct proc *p;
733 register struct select_args *uap;
734 register_t *retval;
735 {
736 int error = 0;
737 u_int ni, nw, size;
738 thread_act_t th_act;
739 struct uthread *uth;
740 struct _select *sel;
741 int needzerofill = 1;
742 int kfcount =0;
743 int nfcount = 0;
744 int count = 0;
745
746 th_act = current_act();
747 uth = get_bsdthread_info(th_act);
748 sel = &uth->uu_state.ss_select;
749 retval = (int *)get_bsduthreadrval(th_act);
750 *retval = 0;
751
752 if (uap->nd < 0) {
753 return (EINVAL);
754 }
755
756 if (uap->nd > p->p_fd->fd_nfiles)
757 uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
758
759 nw = howmany(uap->nd, NFDBITS);
760 ni = nw * sizeof(fd_mask);
761
762 /*
763 * if this is the first select by the thread
764 * allocate the space for bits.
765 */
766 if (sel->nbytes == 0) {
767 sel->nbytes = 3 * ni;
768 MALLOC(sel->ibits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK);
769 MALLOC(sel->obits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK);
770 bzero((caddr_t)sel->ibits, sel->nbytes);
771 bzero((caddr_t)sel->obits, sel->nbytes);
772 needzerofill = 0;
773 }
774
775 /*
776 * if the previously allocated space for the bits
777 * is smaller than what is requested. Reallocate.
778 */
779 if (sel->nbytes < (3 * ni)) {
780 sel->nbytes = (3 * ni);
781 FREE(sel->ibits, M_TEMP);
782 FREE(sel->obits, M_TEMP);
783 MALLOC(sel->ibits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK);
784 MALLOC(sel->obits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK);
785 bzero((caddr_t)sel->ibits, sel->nbytes);
786 bzero((caddr_t)sel->obits, sel->nbytes);
787 needzerofill = 0;
788 }
789
790 if (needzerofill) {
791 bzero((caddr_t)sel->ibits, sel->nbytes);
792 bzero((caddr_t)sel->obits, sel->nbytes);
793 }
794
795 /*
796 * get the bits from the user address space
797 */
798 #define getbits(name, x) \
799 do { \
800 if (uap->name && (error = copyin((caddr_t)uap->name, \
801 (caddr_t)&sel->ibits[(x) * nw], ni))) \
802 goto continuation; \
803 } while (0)
804
805 getbits(in, 0);
806 getbits(ou, 1);
807 getbits(ex, 2);
808 #undef getbits
809
810 if (uap->tv) {
811 struct timeval atv;
812
813 error = copyin((caddr_t)uap->tv, (caddr_t)&atv, sizeof (atv));
814 if (error)
815 goto continuation;
816 if (itimerfix(&atv)) {
817 error = EINVAL;
818 goto continuation;
819 }
820
821 clock_absolutetime_interval_to_deadline(
822 tvtoabstime(&atv), &sel->abstime);
823 }
824 else
825 sel->abstime = 0;
826
827 sel->nfcount = 0;
828 if (error = selcount(p, sel->ibits, sel->obits, uap->nd, &count, &nfcount)) {
829 goto continuation;
830 }
831
832 sel->nfcount = nfcount;
833 sel->count = count;
834 size = SIZEOF_WAITQUEUE_SUB + (count * SIZEOF_WAITQUEUE_LINK);
835 if (sel->allocsize) {
836 if (uth->uu_wqsub == 0)
837 panic("select: wql memory smashed");
838 /* needed for the select now */
839 if (size > sel->allocsize) {
840 kfree(uth->uu_wqsub, sel->allocsize);
841 sel->allocsize = size;
842 uth->uu_wqsub = (wait_queue_sub_t)kalloc(sel->allocsize);
843 if (uth->uu_wqsub == (wait_queue_sub_t)NULL)
844 panic("failed to allocate memory for waitqueue\n");
845 sel->wql = (char *)uth->uu_wqsub + SIZEOF_WAITQUEUE_SUB;
846 }
847 } else {
848 sel->count = count;
849 sel->allocsize = size;
850 uth->uu_wqsub = (wait_queue_sub_t)kalloc(sel->allocsize);
851 if (uth->uu_wqsub == (wait_queue_sub_t)NULL)
852 panic("failed to allocate memory for waitqueue\n");
853 sel->wql = (char *)uth->uu_wqsub + SIZEOF_WAITQUEUE_SUB;
854 }
855 bzero(uth->uu_wqsub, size);
856 wait_queue_sub_init(uth->uu_wqsub, (SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST));
857
858 continuation:
859 return selprocess(error, SEL_FIRSTPASS);
860 }
861
862 int
863 selcontinue(int error)
864 {
865 return selprocess(error, SEL_SECONDPASS);
866 }
867
868 int
869 selprocess(error, sel_pass)
870 {
871 int ncoll;
872 u_int ni, nw;
873 thread_act_t th_act;
874 struct uthread *uth;
875 struct proc *p;
876 struct select_args *uap;
877 int *retval;
878 struct _select *sel;
879 int unwind = 1;
880 int prepost = 0;
881 int somewakeup = 0;
882 int doretry = 0;
883 wait_result_t wait_result;
884
885 p = current_proc();
886 th_act = current_act();
887 uap = (struct select_args *)get_bsduthreadarg(th_act);
888 retval = (int *)get_bsduthreadrval(th_act);
889 uth = get_bsdthread_info(th_act);
890 sel = &uth->uu_state.ss_select;
891
892 /* if it is first pass wait queue is not setup yet */
893 if ((error != 0) && (sel_pass == SEL_FIRSTPASS))
894 unwind = 0;
895 if (sel->count == 0)
896 unwind = 0;
897 retry:
898 if (error != 0) {
899 goto done;
900 }
901
902 ncoll = nselcoll;
903 p->p_flag |= P_SELECT;
904 /* skip scans if the select is just for timeouts */
905 if (sel->count) {
906 if (sel_pass == SEL_FIRSTPASS)
907 wait_queue_sub_clearrefs(uth->uu_wqsub);
908
909 error = selscan(p, sel, uap->nd, retval, sel_pass);
910 if (error || *retval) {
911 goto done;
912 }
913 if (prepost) {
914 /* if the select of log, then we canwakeup and discover some one
915 * else already read the data; go toselct again if time permits
916 */
917 prepost = 0;
918 doretry = 1;
919 }
920 if (somewakeup) {
921 somewakeup = 0;
922 doretry = 1;
923 }
924 }
925
926 if (uap->tv) {
927 uint64_t now;
928
929 clock_get_uptime(&now);
930 if (now >= sel->abstime)
931 goto done;
932 }
933
934 if (doretry) {
935 /* cleanup obits and try again */
936 doretry = 0;
937 sel_pass = SEL_FIRSTPASS;
938 goto retry;
939 }
940
941 /*
942 * To effect a poll, the timeout argument should be
943 * non-nil, pointing to a zero-valued timeval structure.
944 */
945 if (uap->tv && sel->abstime == 0) {
946 goto done;
947 }
948
949 /* No spurious wakeups due to colls,no need to check for them */
950 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
951 sel_pass = SEL_FIRSTPASS;
952 goto retry;
953 }
954
955 p->p_flag &= ~P_SELECT;
956
957 /* if the select is just for timeout skip check */
958 if (sel->count &&(sel_pass == SEL_SECONDPASS))
959 panic("selprocess: 2nd pass assertwaiting");
960
961 /* Wait Queue Subordinate has waitqueue as first element */
962 wait_result = wait_queue_assert_wait((wait_queue_t)uth->uu_wqsub,
963 &selwait, THREAD_ABORTSAFE);
964 if (wait_result != THREAD_AWAKENED) {
965 /* there are no preposted events */
966 error = tsleep1(NULL, PSOCK | PCATCH,
967 "select", sel->abstime, selcontinue);
968 } else {
969 prepost = 1;
970 error = 0;
971 }
972
973 sel_pass = SEL_SECONDPASS;
974 if (error == 0) {
975 if (!prepost)
976 somewakeup =1;
977 goto retry;
978 }
979 done:
980 if (unwind)
981 wait_subqueue_unlink_all(uth->uu_wqsub);
982 p->p_flag &= ~P_SELECT;
983 /* select is not restarted after signals... */
984 if (error == ERESTART)
985 error = EINTR;
986 if (error == EWOULDBLOCK)
987 error = 0;
988 nw = howmany(uap->nd, NFDBITS);
989 ni = nw * sizeof(fd_mask);
990
991 #define putbits(name, x) \
992 do { \
993 if (uap->name && (error2 = copyout((caddr_t)&sel->obits[(x) * nw], \
994 (caddr_t)uap->name, ni))) \
995 error = error2; \
996 } while (0)
997
998 if (error == 0) {
999 int error2;
1000
1001 putbits(in, 0);
1002 putbits(ou, 1);
1003 putbits(ex, 2);
1004 #undef putbits
1005 }
1006 return(error);
1007 }
1008
1009 static int
1010 selscan(p, sel, nfd, retval, sel_pass)
1011 struct proc *p;
1012 struct _select *sel;
1013 int nfd;
1014 register_t *retval;
1015 int sel_pass;
1016 {
1017 register struct filedesc *fdp = p->p_fd;
1018 register int msk, i, j, fd;
1019 register u_int32_t bits;
1020 struct file *fp;
1021 int n = 0;
1022 int nc = 0;
1023 static int flag[3] = { FREAD, FWRITE, 0 };
1024 u_int32_t *iptr, *optr;
1025 u_int nw;
1026 u_int32_t *ibits, *obits;
1027 char * wql;
1028 int nfunnel = 0;
1029 int count, nfcount;
1030 char * wql_ptr;
1031
1032 /*
1033 * Problems when reboot; due to MacOSX signal probs
1034 * in Beaker1C ; verify that the p->p_fd is valid
1035 */
1036 if (fdp == NULL) {
1037 *retval=0;
1038 return(EIO);
1039 }
1040
1041 ibits = sel->ibits;
1042 obits = sel->obits;
1043 wql = sel->wql;
1044
1045 count = sel->count;
1046 nfcount = sel->nfcount;
1047
1048 if (nfcount > count)
1049 panic("selcount count<nfcount");
1050
1051 nw = howmany(nfd, NFDBITS);
1052
1053 nc = 0;
1054 if ( nfcount < count) {
1055 /* some or all in kernel funnel */
1056 for (msk = 0; msk < 3; msk++) {
1057 iptr = (u_int32_t *)&ibits[msk * nw];
1058 optr = (u_int32_t *)&obits[msk * nw];
1059 for (i = 0; i < nfd; i += NFDBITS) {
1060 bits = iptr[i/NFDBITS];
1061 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1062 bits &= ~(1 << j);
1063 fp = fdp->fd_ofiles[fd];
1064 if (fp == NULL ||
1065 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1066 return(EBADF);
1067 }
1068 if (sel_pass == SEL_SECONDPASS)
1069 wql_ptr = (char *)0;
1070 else
1071 wql_ptr = (wql+ nc * SIZEOF_WAITQUEUE_LINK);
1072 if (fp->f_ops && (fp->f_type != DTYPE_SOCKET)
1073 && fo_select(fp, flag[msk], wql_ptr, p)) {
1074 optr[fd/NFDBITS] |= (1 << (fd % NFDBITS));
1075 n++;
1076 }
1077 nc++;
1078 }
1079 }
1080 }
1081 }
1082
1083 if (nfcount) {
1084 /* socket file descriptors for scan */
1085 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1086
1087 nc = 0;
1088 for (msk = 0; msk < 3; msk++) {
1089 iptr = (u_int32_t *)&ibits[msk * nw];
1090 optr = (u_int32_t *)&obits[msk * nw];
1091 for (i = 0; i < nfd; i += NFDBITS) {
1092 bits = iptr[i/NFDBITS];
1093 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1094 bits &= ~(1 << j);
1095 fp = fdp->fd_ofiles[fd];
1096 if (fp == NULL ||
1097 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1098 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1099 return(EBADF);
1100 }
1101 if (sel_pass == SEL_SECONDPASS)
1102 wql_ptr = (char *)0;
1103 else
1104 wql_ptr = (wql+ nc * SIZEOF_WAITQUEUE_LINK);
1105 if (fp->f_ops && (fp->f_type == DTYPE_SOCKET) &&
1106 fo_select(fp, flag[msk], wql_ptr, p)) {
1107 optr[fd/NFDBITS] |= (1 << (fd % NFDBITS));
1108 n++;
1109 }
1110 nc++;
1111 }
1112 }
1113 }
1114 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1115 }
1116
1117 *retval = n;
1118 return (0);
1119 }
1120
1121 /*ARGSUSED*/
1122 int
1123 seltrue(dev, flag, p)
1124 dev_t dev;
1125 int flag;
1126 struct proc *p;
1127 {
1128
1129 return (1);
1130 }
1131
1132 static int
1133 selcount(p, ibits, obits, nfd, count, nfcount)
1134 struct proc *p;
1135 u_int32_t *ibits, *obits;
1136 int nfd;
1137 int *count;
1138 int *nfcount;
1139 {
1140 register struct filedesc *fdp = p->p_fd;
1141 register int msk, i, j, fd;
1142 register u_int32_t bits;
1143 struct file *fp;
1144 int n = 0;
1145 int nc = 0;
1146 int nfc = 0;
1147 static int flag[3] = { FREAD, FWRITE, 0 };
1148 u_int32_t *iptr, *fptr, *fbits;
1149 u_int nw;
1150
1151 /*
1152 * Problems when reboot; due to MacOSX signal probs
1153 * in Beaker1C ; verify that the p->p_fd is valid
1154 */
1155 if (fdp == NULL) {
1156 *count=0;
1157 *nfcount=0;
1158 return(EIO);
1159 }
1160
1161 nw = howmany(nfd, NFDBITS);
1162
1163
1164 for (msk = 0; msk < 3; msk++) {
1165 iptr = (u_int32_t *)&ibits[msk * nw];
1166 for (i = 0; i < nfd; i += NFDBITS) {
1167 bits = iptr[i/NFDBITS];
1168 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1169 bits &= ~(1 << j);
1170 fp = fdp->fd_ofiles[fd];
1171 if (fp == NULL ||
1172 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1173 *count=0;
1174 *nfcount=0;
1175 return(EBADF);
1176 }
1177 if (fp->f_type == DTYPE_SOCKET)
1178 nfc++;
1179 n++;
1180 }
1181 }
1182 }
1183 *count = n;
1184 *nfcount = nfc;
1185 return (0);
1186 }
1187
1188 /*
1189 * Record a select request.
1190 */
1191 void
1192 selrecord(selector, sip, p_wql)
1193 struct proc *selector;
1194 struct selinfo *sip;
1195 void * p_wql;
1196 {
1197 thread_act_t cur_act = current_act();
1198 struct uthread * ut = get_bsdthread_info(cur_act);
1199
1200 /* need to look at collisions */
1201
1202 if ((p_wql == (void *)0) && ((sip->si_flags & SI_INITED) == 0)) {
1203 return;
1204 }
1205
1206 /*do not record if this is second pass of select */
1207 if((p_wql == (void *)0)) {
1208 return;
1209 }
1210
1211 if ((sip->si_flags & SI_INITED) == 0) {
1212 wait_queue_init(&sip->wait_queue, SYNC_POLICY_FIFO);
1213 sip->si_flags |= SI_INITED;
1214 sip->si_flags &= ~SI_CLEAR;
1215 }
1216
1217 if (sip->si_flags & SI_RECORDED) {
1218 sip->si_flags |= SI_COLL;
1219 } else
1220 sip->si_flags &= ~SI_COLL;
1221
1222 sip->si_flags |= SI_RECORDED;
1223 if (!wait_queue_member(&sip->wait_queue, ut->uu_wqsub))
1224 wait_queue_link_noalloc(&sip->wait_queue, ut->uu_wqsub, (wait_queue_link_t)p_wql);
1225
1226 return;
1227 }
1228
1229 void
1230 selwakeup(sip)
1231 register struct selinfo *sip;
1232 {
1233
1234 if ((sip->si_flags & SI_INITED) == 0) {
1235 return;
1236 }
1237
1238 if (sip->si_flags & SI_COLL) {
1239 nselcoll++;
1240 sip->si_flags &= ~SI_COLL;
1241 #if 0
1242 /* will not support */
1243 //wakeup((caddr_t)&selwait);
1244 #endif
1245 }
1246
1247 if (sip->si_flags & SI_RECORDED) {
1248 wait_queue_wakeup_all(&sip->wait_queue, &selwait, THREAD_AWAKENED);
1249 sip->si_flags &= ~SI_RECORDED;
1250 }
1251
1252 }
1253
1254 void
1255 selthreadclear(sip)
1256 register struct selinfo *sip;
1257 {
1258
1259 if ((sip->si_flags & SI_INITED) == 0) {
1260 return;
1261 }
1262 if (sip->si_flags & SI_RECORDED) {
1263 selwakeup(sip);
1264 sip->si_flags &= ~(SI_RECORDED | SI_COLL);
1265 }
1266 sip->si_flags |= SI_CLEAR;
1267 wait_queue_unlinkall_nofree(&sip->wait_queue);
1268 }
1269
1270
1271 extern struct eventqelt *evprocdeque(struct proc *p, struct eventqelt *eqp);
1272
1273 /*
1274 * called upon socket close. deque and free all events for
1275 * the socket
1276 */
1277 void
1278 evsofree(struct socket *sp)
1279 {
1280 struct eventqelt *eqp, *next;
1281
1282 if (sp == NULL) return;
1283
1284 for (eqp = sp->so_evlist.tqh_first; eqp != NULL; eqp = next) {
1285 next = eqp->ee_slist.tqe_next;
1286 evprocdeque(eqp->ee_proc, eqp); // remove from proc q if there
1287 TAILQ_REMOVE(&sp->so_evlist, eqp, ee_slist); // remove from socket q
1288 FREE(eqp, M_TEMP);
1289 }
1290 }
1291
1292
1293 #define DBG_EVENT 0x10
1294
1295 #define DBG_POST 0x10
1296 #define DBG_WATCH 0x11
1297 #define DBG_WAIT 0x12
1298 #define DBG_MOD 0x13
1299 #define DBG_EWAKEUP 0x14
1300 #define DBG_ENQUEUE 0x15
1301 #define DBG_DEQUEUE 0x16
1302
1303 #define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST)
1304 #define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH)
1305 #define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT)
1306 #define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD)
1307 #define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP)
1308 #define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE)
1309 #define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE)
1310
1311
1312 /*
1313 * enque this event if it's not already queued. wakeup
1314 the proc if we do queue this event to it.
1315 */
1316 void
1317 evprocenque(struct eventqelt *eqp)
1318 {
1319 struct proc *p;
1320
1321 assert(eqp);
1322 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_START, eqp, eqp->ee_flags, eqp->ee_eventmask,0,0);
1323 if (eqp->ee_flags & EV_QUEUED) {
1324 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
1325 return;
1326 }
1327 eqp->ee_flags |= EV_QUEUED;
1328 eqp->ee_eventmask = 0; // disarm
1329 p = eqp->ee_proc;
1330 TAILQ_INSERT_TAIL(&p->p_evlist, eqp, ee_plist);
1331 KERNEL_DEBUG(DBG_MISC_EWAKEUP,0,0,0,eqp,0);
1332 wakeup(&p->p_evlist);
1333 KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0);
1334 }
1335
1336 /*
1337 * given either a sockbuf or a socket run down the
1338 * event list and queue ready events found
1339 */
1340 void
1341 postevent(struct socket *sp, struct sockbuf *sb, int event)
1342 {
1343 int mask;
1344 struct eventqelt *evq;
1345 register struct tcpcb *tp;
1346
1347 if (sb) sp = sb->sb_so;
1348 if (!sp || sp->so_evlist.tqh_first == NULL) return;
1349
1350 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, event,0,0,0,0);
1351
1352 for (evq = sp->so_evlist.tqh_first;
1353 evq != NULL; evq = evq->ee_slist.tqe_next) {
1354
1355 mask = 0;
1356
1357 /* ready for reading:
1358 - byte cnt >= receive low water mark
1359 - read-half of conn closed
1360 - conn pending for listening sock
1361 - socket error pending
1362
1363 ready for writing
1364 - byte cnt avail >= send low water mark
1365 - write half of conn closed
1366 - socket error pending
1367 - non-blocking conn completed successfully
1368
1369 exception pending
1370 - out of band data
1371 - sock at out of band mark
1372
1373 */
1374 switch (event & EV_DMASK) {
1375
1376 case EV_RWBYTES:
1377 case EV_OOB:
1378 case EV_RWBYTES|EV_OOB:
1379 if (event & EV_OOB) {
1380 if ((evq->ee_eventmask & EV_EX)) {
1381 if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) {
1382 mask |= EV_EX|EV_OOB;
1383 }
1384 }
1385 }
1386 if (event & EV_RWBYTES) {
1387 if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) {
1388 if ((sp->so_type == SOCK_STREAM) && (sp->so_error == ECONNREFUSED) ||
1389 (sp->so_error == ECONNRESET)) {
1390 if ((sp->so_pcb == 0) ||
1391 !(tp = sototcpcb(sp)) ||
1392 (tp->t_state == TCPS_CLOSED)) {
1393 mask |= EV_RE|EV_RESET;
1394 break;
1395 }
1396 }
1397 if (sp->so_state & SS_CANTRCVMORE) {
1398 mask |= EV_RE|EV_FIN;
1399 evq->ee_req.er_rcnt = sp->so_rcv.sb_cc;
1400 break;
1401 }
1402 mask |= EV_RE;
1403 evq->ee_req.er_rcnt = sp->so_rcv.sb_cc;
1404 }
1405
1406 if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) {
1407 if ((sp->so_type == SOCK_STREAM) &&(sp->so_error == ECONNREFUSED) ||
1408 (sp->so_error == ECONNRESET)) {
1409 if ((sp->so_pcb == 0) ||
1410 !(tp = sototcpcb(sp)) ||
1411 (tp->t_state == TCPS_CLOSED)) {
1412 mask |= EV_WR|EV_RESET;
1413 break;
1414 }
1415 }
1416 mask |= EV_WR;
1417 evq->ee_req.er_wcnt = sbspace(&sp->so_snd);
1418 }
1419 }
1420 break;
1421
1422 case EV_RCONN:
1423 if ((evq->ee_eventmask & EV_RE)) {
1424 evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one
1425 mask |= EV_RE|EV_RCONN;
1426 }
1427 break;
1428
1429 case EV_WCONN:
1430 if ((evq->ee_eventmask & EV_WR)) {
1431 mask |= EV_WR|EV_WCONN;
1432 }
1433 break;
1434
1435 case EV_RCLOSED:
1436 if ((evq->ee_eventmask & EV_RE)) {
1437 mask |= EV_RE|EV_RCLOSED;
1438 }
1439 break;
1440
1441 case EV_WCLOSED:
1442 if ((evq->ee_eventmask & EV_WR)) {
1443 mask |= EV_WR|EV_WCLOSED;
1444 }
1445 break;
1446
1447 case EV_FIN:
1448 if (evq->ee_eventmask & EV_RE) {
1449 mask |= EV_RE|EV_FIN;
1450 }
1451 break;
1452
1453 case EV_RESET:
1454 case EV_TIMEOUT:
1455 if (evq->ee_eventmask & EV_RE) {
1456 mask |= EV_RE | event;
1457 }
1458 if (evq->ee_eventmask & EV_WR) {
1459 mask |= EV_WR | event;
1460 }
1461 break;
1462
1463 default:
1464 return;
1465 } /* switch */
1466
1467 if (mask) {
1468 evq->ee_req.er_eventbits |= mask;
1469 KERNEL_DEBUG(DBG_MISC_POST, evq, evq->ee_req.er_eventbits, mask,0,0);
1470 evprocenque(evq);
1471 }
1472 }
1473 KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, 0,0,0,0,0);
1474 }
1475
1476 /*
1477 * remove and return the first event (eqp=NULL) or a specific
1478 * event, or return NULL if no events found
1479 */
1480 struct eventqelt *
1481 evprocdeque(struct proc *p, struct eventqelt *eqp)
1482 {
1483
1484 KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_START,p,eqp,0,0,0);
1485
1486 if (eqp && ((eqp->ee_flags & EV_QUEUED) == NULL)) {
1487 KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_END,0,0,0,0,0);
1488 return(NULL);
1489 }
1490 if (p->p_evlist.tqh_first == NULL) {
1491 KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_END,0,0,0,0,0);
1492 return(NULL);
1493 }
1494 if (eqp == NULL) { // remove first
1495 eqp = p->p_evlist.tqh_first;
1496 }
1497 TAILQ_REMOVE(&p->p_evlist, eqp, ee_plist);
1498 eqp->ee_flags &= ~EV_QUEUED;
1499 KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_END,eqp,0,0,0,0);
1500 return(eqp);
1501 }
1502
1503 struct evwatch_args {
1504 struct eventreq *u_req;
1505 int u_eventmask;
1506 };
1507
1508
1509 /*
1510 * watchevent system call. user passes us an event to watch
1511 * for. we malloc an event object, initialize it, and queue
1512 * it to the open socket. when the event occurs, postevent()
1513 * will enque it back to our proc where we can retrieve it
1514 * via waitevent().
1515 *
1516 * should this prevent duplicate events on same socket?
1517 */
1518 int
1519 watchevent(p, uap, retval)
1520 struct proc *p;
1521 struct evwatch_args *uap;
1522 register_t *retval;
1523 {
1524 struct eventqelt *eqp = (struct eventqelt *)0;
1525 struct eventqelt *np;
1526 struct eventreq *erp;
1527 struct file *fp;
1528 struct socket *sp;
1529 int error;
1530
1531 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_START, 0,0,0,0,0);
1532
1533 // get a qelt and fill with users req
1534 MALLOC(eqp, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK);
1535 if (!eqp) panic("can't MALLOC eqp");
1536 erp = &eqp->ee_req;
1537 // get users request pkt
1538 if (error = copyin((caddr_t)uap->u_req, (caddr_t)erp,
1539 sizeof(struct eventreq))) {
1540 FREE(eqp, M_TEMP);
1541 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
1542 return(error);
1543 }
1544 KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle,uap->u_eventmask,eqp,0,0);
1545 // validate, freeing qelt if errors
1546 error = 0;
1547 if (erp->er_type != EV_FD) {
1548 error = EINVAL;
1549 } else if (erp->er_handle < 0) {
1550 error = EBADF;
1551 } else if (erp->er_handle > p->p_fd->fd_nfiles) {
1552 error = EBADF;
1553 } else if ((fp = *fdfile(p, erp->er_handle)) == NULL) {
1554 error = EBADF;
1555 } else if (fp->f_type != DTYPE_SOCKET) {
1556 error = EINVAL;
1557 }
1558 if (error) {
1559 FREE(eqp,M_TEMP);
1560 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0);
1561 return(error);
1562 }
1563
1564 erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0;
1565 eqp->ee_proc = p;
1566 eqp->ee_eventmask = uap->u_eventmask & EV_MASK;
1567 eqp->ee_flags = 0;
1568
1569 sp = (struct socket *)fp->f_data;
1570 assert(sp != NULL);
1571
1572 // only allow one watch per file per proc
1573 for (np = sp->so_evlist.tqh_first; np != NULL; np = np->ee_slist.tqe_next) {
1574 if (np->ee_proc == p) {
1575 FREE(eqp,M_TEMP);
1576 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
1577 return(EINVAL);
1578 }
1579 }
1580
1581 TAILQ_INSERT_TAIL(&sp->so_evlist, eqp, ee_slist);
1582 postevent(sp, 0, EV_RWBYTES); // catch existing events
1583 KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, 0,0,0,0,0);
1584 return(0);
1585 }
1586
1587 struct evwait_args {
1588 struct eventreq *u_req;
1589 struct timeval *tv;
1590 };
1591
1592 /*
1593 * waitevent system call.
1594 * grabs the next waiting event for this proc and returns
1595 * it. if no events, user can request to sleep with timeout
1596 * or poll mode (tv=NULL);
1597 */
1598 int
1599 waitevent(p, uap, retval)
1600 struct proc *p;
1601 struct evwait_args *uap;
1602 register_t *retval;
1603 {
1604 int error = 0;
1605 struct eventqelt *eqp;
1606 uint64_t abstime, interval;
1607
1608 if (uap->tv) {
1609 struct timeval atv;
1610
1611 error = copyin((caddr_t)uap->tv, (caddr_t)&atv, sizeof (atv));
1612 if (error)
1613 return(error);
1614 if (itimerfix(&atv)) {
1615 error = EINVAL;
1616 return(error);
1617 }
1618
1619 interval = tvtoabstime(&atv);
1620 }
1621 else
1622 abstime = interval = 0;
1623
1624 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_START, 0,0,0,0,0);
1625
1626 retry:
1627 if ((eqp = evprocdeque(p,NULL)) != NULL) {
1628 error = copyout((caddr_t)&eqp->ee_req,
1629 (caddr_t)uap->u_req, sizeof(struct eventreq));
1630 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,
1631 eqp->ee_req.er_handle,eqp->ee_req.er_eventbits,eqp,0);
1632
1633 return (error);
1634 }
1635 else {
1636 if (uap->tv && interval == 0) {
1637 *retval = 1; // poll failed
1638 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0);
1639
1640 return (error);
1641 }
1642
1643 if (interval != 0)
1644 clock_absolutetime_interval_to_deadline(interval, &abstime)
1645
1646 KERNEL_DEBUG(DBG_MISC_WAIT, 1,&p->p_evlist,0,0,0);
1647 error = tsleep1(&p->p_evlist, PSOCK | PCATCH,
1648 "waitevent", abstime, (int (*)(int))0);
1649 KERNEL_DEBUG(DBG_MISC_WAIT, 2,&p->p_evlist,0,0,0);
1650 if (error == 0)
1651 goto retry;
1652 if (error == ERESTART)
1653 error = EINTR;
1654 if (error == EWOULDBLOCK) {
1655 *retval = 1;
1656 error = 0;
1657 }
1658 }
1659
1660 KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0);
1661
1662 return (error);
1663 }
1664
1665 struct modwatch_args {
1666 struct eventreq *u_req;
1667 int u_eventmask;
1668 };
1669
1670 /*
1671 * modwatch system call. user passes in event to modify.
1672 * if we find it we reset the event bits and que/deque event
1673 * it needed.
1674 */
1675 int
1676 modwatch(p, uap, retval)
1677 struct proc *p;
1678 struct modwatch_args *uap;
1679 register_t *retval;
1680 {
1681 struct eventreq er;
1682 struct eventreq *erp = &er;
1683 struct eventqelt *evq;
1684 int error;
1685 struct file *fp;
1686 struct socket *sp;
1687 int flag;
1688
1689 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0);
1690
1691 // get users request pkt
1692 if (error = copyin((caddr_t)uap->u_req, (caddr_t)erp,
1693 sizeof(struct eventreq))) return(error);
1694
1695 if (erp->er_type != EV_FD) return(EINVAL);
1696 if (erp->er_handle < 0) return(EBADF);
1697 if (erp->er_handle > p->p_fd->fd_nfiles) return(EBADF);
1698 if ((fp = *fdfile(p, erp->er_handle)) == NULL)
1699 return(EBADF);
1700 if (fp->f_type != DTYPE_SOCKET) return(EINVAL); // for now must be sock
1701 sp = (struct socket *)fp->f_data;
1702 assert(sp != NULL);
1703
1704
1705 // locate event if possible
1706 for (evq = sp->so_evlist.tqh_first;
1707 evq != NULL; evq = evq->ee_slist.tqe_next) {
1708 if (evq->ee_proc == p) break;
1709 }
1710
1711 if (evq == NULL) {
1712 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0);
1713 return(EINVAL);
1714 }
1715 KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,evq,0,0);
1716
1717 if (uap->u_eventmask == EV_RM) {
1718 evprocdeque(p, evq);
1719 TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist);
1720 FREE(evq, M_TEMP);
1721 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0);
1722 return(0);
1723 }
1724
1725 switch (uap->u_eventmask & EV_MASK) {
1726
1727 case 0:
1728 flag = 0;
1729 break;
1730
1731 case EV_RE:
1732 case EV_WR:
1733 case EV_RE|EV_WR:
1734 flag = EV_RWBYTES;
1735 break;
1736
1737 case EV_EX:
1738 flag = EV_OOB;
1739 break;
1740
1741 case EV_EX|EV_RE:
1742 case EV_EX|EV_WR:
1743 case EV_EX|EV_RE|EV_WR:
1744 flag = EV_OOB|EV_RWBYTES;
1745 break;
1746
1747 default:
1748 return(EINVAL);
1749 }
1750
1751 evq->ee_eventmask = uap->u_eventmask & EV_MASK;
1752 evprocdeque(p, evq);
1753 evq->ee_req.er_eventbits = 0;
1754 postevent(sp, 0, flag);
1755 KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,sp,flag,0);
1756 return(0);
1757 }