2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
27 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * @(#)kern_event.c 1.0 (3/31/2000)
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/filedesc.h>
58 #include <sys/kernel.h>
60 #include <sys/malloc.h>
61 #include <sys/unistd.h>
63 #include <sys/fcntl.h>
64 #include <sys/select.h>
65 #include <sys/queue.h>
66 #include <sys/event.h>
67 #include <sys/eventvar.h>
68 #include <sys/protosw.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
72 #include <sys/sysctl.h>
75 #include <kern/zalloc.h>
77 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
79 static int kqueue_scan(struct file
*fp
, int maxevents
,
80 struct kevent
*ulistp
, const struct timespec
*timeout
,
81 register_t
*retval
, struct proc
*p
);
82 static void kqueue_wakeup(struct kqueue
*kq
);
84 static int kqueue_read
__P((struct file
*fp
, struct uio
*uio
,
85 struct ucred
*cred
, int flags
, struct proc
*p
));
86 static int kqueue_write
__P((struct file
*fp
, struct uio
*uio
,
87 struct ucred
*cred
, int flags
, struct proc
*p
));
88 static int kqueue_ioctl
__P((struct file
*fp
, u_long com
, caddr_t data
,
90 static int kqueue_select
__P((struct file
*fp
, int which
, void *wql
,
92 static int kqueue_close
__P((struct file
*fp
, struct proc
*p
));
93 static int kqueue_kqfilter
__P((struct file
*fp
, struct knote
*kn
, struct proc
*p
));
95 static struct fileops kqueueops
= {
104 static void knote_fdpattach(struct knote
*kn
, struct filedesc
*fdp
);
105 static void knote_drop(struct knote
*kn
, struct proc
*p
);
106 static void knote_enqueue(struct knote
*kn
);
107 static void knote_dequeue(struct knote
*kn
);
108 static struct knote
*knote_alloc(void);
109 static void knote_free(struct knote
*kn
);
111 static int filt_fileattach(struct knote
*kn
);
112 static struct filterops file_filtops
=
113 { 1, filt_fileattach
, NULL
, NULL
};
115 static void filt_kqdetach(struct knote
*kn
);
116 static int filt_kqueue(struct knote
*kn
, long hint
);
117 static struct filterops kqread_filtops
=
118 { 1, NULL
, filt_kqdetach
, filt_kqueue
};
121 * JMM - placeholder for not-yet-implemented filters
123 static int filt_badattach(struct knote
*kn
);
124 static struct filterops bad_filtops
=
125 { 0, filt_badattach
, 0 , 0 };
127 static int filt_procattach(struct knote
*kn
);
128 static void filt_procdetach(struct knote
*kn
);
129 static int filt_proc(struct knote
*kn
, long hint
);
131 static struct filterops proc_filtops
=
132 { 0, filt_procattach
, filt_procdetach
, filt_proc
};
134 extern struct filterops fs_filtops
;
136 extern struct filterops sig_filtops
;
139 /* JMM - We don't implement these now */
140 static void filt_timerexpire(void *knx
);
141 static int filt_timerattach(struct knote
*kn
);
142 static void filt_timerdetach(struct knote
*kn
);
143 static int filt_timer(struct knote
*kn
, long hint
);
145 static struct filterops timer_filtops
=
146 { 0, filt_timerattach
, filt_timerdetach
, filt_timer
};
148 static int kq_ncallouts
= 0;
149 static int kq_calloutmax
= (4 * 1024);
151 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
152 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
155 static zone_t knote_zone
;
157 #define KNOTE_ACTIVATE(kn) do { \
158 kn->kn_status |= KN_ACTIVE; \
159 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
163 #define KN_HASHSIZE 64 /* XXX should be tunable */
164 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
167 extern struct filterops aio_filtops
;
171 * Table for for all system-defined filters.
173 static struct filterops
*sysfilt_ops
[] = {
174 &file_filtops
, /* EVFILT_READ */
175 &file_filtops
, /* EVFILT_WRITE */
177 &aio_filtops
, /* EVFILT_AIO */
179 &bad_filtops
, /* EVFILT_AIO */
181 &file_filtops
, /* EVFILT_VNODE */
182 &proc_filtops
, /* EVFILT_PROC */
183 &sig_filtops
, /* EVFILT_SIGNAL */
185 &timer_filtops
, /* EVFILT_TIMER */
187 &bad_filtops
, /* EVFILT_TIMER */
189 &bad_filtops
, /* EVFILT_MACHPORT */
190 &fs_filtops
/* EVFILT_FS */
194 filt_fileattach(struct knote
*kn
)
197 return (fo_kqfilter(kn
->kn_fp
, kn
, current_proc()));
201 filt_kqdetach(struct knote
*kn
)
203 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
205 if (kq
->kq_state
& KQ_SEL
)
208 KNOTE_DETACH(&kq
->kq_sel
.si_note
, kn
);
213 filt_kqueue(struct knote
*kn
, long hint
)
215 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
217 kn
->kn_data
= kq
->kq_count
;
218 return (kn
->kn_data
> 0);
222 filt_procattach(struct knote
*kn
)
226 p
= pfind(kn
->kn_id
);
229 if (! PRISON_CHECK(current_proc(), p
))
232 kn
->kn_ptr
.p_proc
= p
;
233 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
236 * internal flag indicating registration done by kernel
238 if (kn
->kn_flags
& EV_FLAG1
) {
239 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
240 kn
->kn_fflags
= NOTE_CHILD
;
241 kn
->kn_flags
&= ~EV_FLAG1
;
244 /* XXX lock the proc here while adding to the list? */
245 KNOTE_ATTACH(&p
->p_klist
, kn
);
251 * The knote may be attached to a different process, which may exit,
252 * leaving nothing for the knote to be attached to. So when the process
253 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
254 * it will be deleted when read out. However, as part of the knote deletion,
255 * this routine is called, so a check is needed to avoid actually performing
256 * a detach, because the original process does not exist any more.
259 filt_procdetach(struct knote
*kn
)
261 struct proc
*p
= kn
->kn_ptr
.p_proc
;
263 if (kn
->kn_status
& KN_DETACHED
)
266 /* XXX locking? this might modify another process. */
267 KNOTE_DETACH(&p
->p_klist
, kn
);
271 filt_proc(struct knote
*kn
, long hint
)
276 * mask off extra data
278 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
281 * if the user is interested in this event, record it.
283 if (kn
->kn_sfflags
& event
)
284 kn
->kn_fflags
|= event
;
287 * process is gone, so flag the event as finished.
289 if (event
== NOTE_EXIT
) {
290 kn
->kn_status
|= KN_DETACHED
;
291 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
296 * process forked, and user wants to track the new process,
297 * so attach a new knote to it, and immediately report an
298 * event with the parent's pid.
300 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
305 * register knote with new process.
307 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
308 kev
.filter
= kn
->kn_filter
;
309 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
310 kev
.fflags
= kn
->kn_sfflags
;
311 kev
.data
= kn
->kn_id
; /* parent */
312 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
313 error
= kqueue_register(kn
->kn_kq
, &kev
, NULL
);
315 kn
->kn_fflags
|= NOTE_TRACKERR
;
318 return (kn
->kn_fflags
!= 0);
323 filt_timerexpire(void *knx
)
325 struct knote
*kn
= knx
;
326 struct callout
*calloutp
;
333 if ((kn
->kn_flags
& EV_ONESHOT
) == 0) {
334 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
335 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
336 tticks
= tvtohz(&tv
);
337 calloutp
= (struct callout
*)kn
->kn_hook
;
338 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
343 * data contains amount of time to sleep, in milliseconds
346 filt_timerattach(struct knote
*kn
)
348 struct callout
*calloutp
;
352 if (kq_ncallouts
>= kq_calloutmax
)
356 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
357 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
358 tticks
= tvtohz(&tv
);
360 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
361 MALLOC(calloutp
, struct callout
*, sizeof(*calloutp
),
363 callout_init(calloutp
);
364 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
365 kn
->kn_hook
= (caddr_t
)calloutp
;
371 filt_timerdetach(struct knote
*kn
)
373 struct callout
*calloutp
;
375 calloutp
= (struct callout
*)kn
->kn_hook
;
376 callout_stop(calloutp
);
377 FREE(calloutp
, M_KQUEUE
);
382 filt_timer(struct knote
*kn
, long hint
)
385 return (kn
->kn_data
!= 0);
390 * JMM - placeholder for not-yet-implemented filters
393 filt_badattach(struct knote
*kn
)
398 #ifndef _SYS_SYSPROTO_H_
405 kqueue(struct proc
*p
, struct kqueue_args
*uap
, register_t
*retval
)
407 struct filedesc
*fdp
= p
->p_fd
;
412 error
= falloc(p
, &fp
, &fd
);
415 fp
->f_flag
= FREAD
| FWRITE
;
416 fp
->f_type
= DTYPE_KQUEUE
;
417 fp
->f_ops
= &kqueueops
;
418 kq
= (struct kqueue
*)_MALLOC(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
419 TAILQ_INIT(&kq
->kq_head
);
420 fp
->f_data
= (caddr_t
)kq
;
422 if (fdp
->fd_knlistsize
< 0)
423 fdp
->fd_knlistsize
= 0; /* this process has a kq */
428 #ifndef _SYS_SYSPROTO_H_
429 struct kqueue_portset_np_args
{
434 kqueue_portset_np(struct proc
*p
, struct kqueue_portset_np_args
*uap
, register_t
*retval
)
436 /* JMM - Placeholder for now */
440 #ifndef _SYS_SYSPROTO_H_
441 struct kqueue_from_portset_np_args
{
446 kqueue_from_portset_np(struct proc
*p
, struct kqueue_from_portset_np_args
*uap
, register_t
*retval
)
448 /* JMM - Placeholder for now */
453 /* JMM - We don't implement this yet */
458 #ifndef _SYS_SYSPROTO_H_
461 const struct kevent
*changelist
;
463 struct kevent
*eventlist
;
465 const struct timespec
*timeout
;
469 kevent(struct proc
*p
, struct kevent_args
*uap
, register_t
*retval
)
471 struct filedesc
* fdp
= p
->p_fd
;
473 struct file
*fp
= NULL
;
475 int i
, nerrors
, error
;
477 if (uap
->timeout
!= NULL
) {
478 error
= copyin((caddr_t
)uap
->timeout
, (caddr_t
)&ts
, sizeof(ts
));
484 if (((u_int
)uap
->fd
) >= fdp
->fd_nfiles
||
485 (fp
= fdp
->fd_ofiles
[uap
->fd
]) == NULL
||
486 (fp
->f_type
!= DTYPE_KQUEUE
))
491 kq
= (struct kqueue
*)fp
->f_data
;
494 while (uap
->nchanges
> 0) {
496 int n
= uap
->nchanges
> KQ_NEVENTS
? KQ_NEVENTS
: uap
->nchanges
;
497 struct kevent kq_kev
[n
];
499 error
= copyin((caddr_t
)uap
->changelist
, (caddr_t
)kq_kev
,
500 n
* sizeof(struct kevent
));
503 for (i
= 0; i
< n
; i
++) {
504 struct kevent
*kevp
= &kq_kev
[i
];
506 kevp
->flags
&= ~EV_SYSFLAGS
;
507 error
= kqueue_register(kq
, kevp
, p
);
509 if (uap
->nevents
!= 0) {
510 kevp
->flags
= EV_ERROR
;
512 (void) copyout((caddr_t
)kevp
,
513 (caddr_t
)uap
->eventlist
,
524 uap
->changelist
+= n
;
532 error
= kqueue_scan(fp
, uap
->nevents
, uap
->eventlist
, uap
->timeout
, retval
, p
);
540 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
, struct proc
*p
)
542 struct filedesc
*fdp
= kq
->kq_fdp
;
543 struct filterops
*fops
;
544 struct file
*fp
= NULL
;
545 struct knote
*kn
= NULL
;
548 if (kev
->filter
< 0) {
549 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
551 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
555 * filter attach routine is responsible for insuring that
556 * the identifier can be attached to it.
558 printf("unknown filter: %d\n", kev
->filter
);
563 /* validate descriptor */
564 if ((u_int
)kev
->ident
>= fdp
->fd_nfiles
||
565 (fp
= fdp
->fd_ofiles
[kev
->ident
]) == NULL
)
569 if (kev
->ident
< fdp
->fd_knlistsize
) {
570 SLIST_FOREACH(kn
, &fdp
->fd_knlist
[kev
->ident
], kn_link
)
571 if (kq
== kn
->kn_kq
&&
572 kev
->filter
== kn
->kn_filter
)
576 if (fdp
->fd_knhashmask
!= 0) {
579 list
= &fdp
->fd_knhash
[
580 KN_HASH((u_long
)kev
->ident
, fdp
->fd_knhashmask
)];
581 SLIST_FOREACH(kn
, list
, kn_link
)
582 if (kev
->ident
== kn
->kn_id
&&
584 kev
->filter
== kn
->kn_filter
)
589 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
595 * kn now contains the matching knote, or NULL if no match
597 if (kev
->flags
& EV_ADD
) {
610 * apply reference count to knote structure, and
611 * do not release it at the end of this routine.
615 kn
->kn_sfflags
= kev
->fflags
;
616 kn
->kn_sdata
= kev
->data
;
619 kn
->kn_kevent
= *kev
;
621 knote_fdpattach(kn
, fdp
);
622 if ((error
= fops
->f_attach(kn
)) != 0) {
628 * The user may change some filter values after the
629 * initial EV_ADD, but doing so will not reset any
630 * filter which have already been triggered.
632 kn
->kn_sfflags
= kev
->fflags
;
633 kn
->kn_sdata
= kev
->data
;
634 kn
->kn_kevent
.udata
= kev
->udata
;
638 if (kn
->kn_fop
->f_event(kn
, 0))
642 } else if (kev
->flags
& EV_DELETE
) {
643 kn
->kn_fop
->f_detach(kn
);
648 if ((kev
->flags
& EV_DISABLE
) &&
649 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
651 kn
->kn_status
|= KN_DISABLED
;
655 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
657 kn
->kn_status
&= ~KN_DISABLED
;
658 if ((kn
->kn_status
& KN_ACTIVE
) &&
659 ((kn
->kn_status
& KN_QUEUED
) == 0))
671 kqueue_scan(struct file
*fp
, int maxevents
, struct kevent
*ulistp
,
672 const struct timespec
*tsp
, register_t
*retval
, struct proc
*p
)
674 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
675 struct timeval atv
, rtv
, ttv
;
676 int s
, count
, timeout
, error
= 0;
684 TIMESPEC_TO_TIMEVAL(&atv
, tsp
);
685 if (itimerfix(&atv
)) {
689 if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0)
692 timeout
= atv
.tv_sec
> 24 * 60 * 60 ?
693 24 * 60 * 60 * hz
: tvtohz(&atv
);
694 getmicrouptime(&rtv
);
695 timevaladd(&atv
, &rtv
);
704 if (atv
.tv_sec
|| atv
.tv_usec
) {
705 getmicrouptime(&rtv
);
706 if (timevalcmp(&rtv
, &atv
, >=))
709 timevalsub(&ttv
, &rtv
);
710 timeout
= ttv
.tv_sec
> 24 * 60 * 60 ?
711 24 * 60 * 60 * hz
: tvtohz(&ttv
);
716 if (kq
->kq_count
== 0) {
720 kq
->kq_state
|= KQ_SLEEP
;
721 error
= tsleep(kq
, PSOCK
| PCATCH
, "kqread", timeout
);
726 /* don't restart after signals... */
727 if (error
== ERESTART
)
729 else if (error
== EWOULDBLOCK
)
734 /* JMM - This marker trick doesn't work with multiple threads */
735 TAILQ_INSERT_TAIL(&kq
->kq_head
, &marker
, kn_tqe
);
737 int maxkev
= (count
> KQ_NEVENTS
) ? KQ_NEVENTS
: count
;
738 struct kevent kq_kev
[maxkev
];
739 struct kevent
*kevp
= kq_kev
;
743 while (nkev
< maxkev
) {
744 kn
= TAILQ_FIRST(&kq
->kq_head
);
745 TAILQ_REMOVE(&kq
->kq_head
, kn
, kn_tqe
);
747 if (count
== maxevents
)
750 } else if (kn
->kn_status
& KN_DISABLED
) {
751 kn
->kn_status
&= ~KN_QUEUED
;
754 } else if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
755 kn
->kn_fop
->f_event(kn
, 0) == 0) {
756 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
761 *kevp
= kn
->kn_kevent
;
766 if (kn
->kn_flags
& EV_ONESHOT
) {
767 kn
->kn_status
&= ~KN_QUEUED
;
770 kn
->kn_fop
->f_detach(kn
);
773 } else if (kn
->kn_flags
& EV_CLEAR
) {
776 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
779 TAILQ_INSERT_TAIL(&kq
->kq_head
, kn
, kn_tqe
);
783 error
= copyout((caddr_t
)kq_kev
, (caddr_t
)ulistp
,
784 sizeof(struct kevent
) * nkev
);
792 TAILQ_REMOVE(&kq
->kq_head
, &marker
, kn_tqe
);
795 *retval
= maxevents
- count
;
801 * This could be expanded to call kqueue_scan, if desired.
805 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
,
806 int flags
, struct proc
*p
)
813 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
,
814 int flags
, struct proc
*p
)
821 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
, struct proc
*p
)
828 kqueue_select(struct file
*fp
, int which
, void *wql
, struct proc
*p
)
830 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
834 if (which
== FREAD
) {
838 selrecord(p
, &kq
->kq_sel
, wql
);
839 kq
->kq_state
|= KQ_SEL
;
848 kqueue_close(struct file
*fp
, struct proc
*p
)
850 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
851 struct filedesc
*fdp
= p
->p_fd
;
852 struct knote
**knp
, *kn
, *kn0
;
855 for (i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
856 knp
= &SLIST_FIRST(&fdp
->fd_knlist
[i
]);
859 kn0
= SLIST_NEXT(kn
, kn_link
);
860 if (kq
== kn
->kn_kq
) {
861 kn
->kn_fop
->f_detach(kn
);
866 knp
= &SLIST_NEXT(kn
, kn_link
);
871 if (fdp
->fd_knhashmask
!= 0) {
872 for (i
= 0; i
< fdp
->fd_knhashmask
+ 1; i
++) {
873 knp
= &SLIST_FIRST(&fdp
->fd_knhash
[i
]);
876 kn0
= SLIST_NEXT(kn
, kn_link
);
877 if (kq
== kn
->kn_kq
) {
878 kn
->kn_fop
->f_detach(kn
);
879 /* XXX non-fd release of kn->kn_ptr */
883 knp
= &SLIST_NEXT(kn
, kn_link
);
897 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
, struct proc
*p
)
899 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
901 if (kn
->kn_filter
!= EVFILT_READ
|| (kq
->kq_state
& KQ_SEL
))
904 kn
->kn_fop
= &kqread_filtops
;
905 KNOTE_ATTACH(&kq
->kq_sel
.si_note
, kn
);
911 kqueue_stat(struct file
*fp
, struct stat
*st
, struct proc
*p
)
913 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
915 bzero((void *)st
, sizeof(*st
));
916 st
->st_size
= kq
->kq_count
;
917 st
->st_blksize
= sizeof(struct kevent
);
918 st
->st_mode
= S_IFIFO
;
923 kqueue_wakeup(struct kqueue
*kq
)
926 if (kq
->kq_state
& KQ_SLEEP
) {
927 kq
->kq_state
&= ~KQ_SLEEP
;
930 if (kq
->kq_state
& KQ_SEL
) {
931 // kq->kq_state &= ~KQ_SEL; /* remove for now */
932 selwakeup(&kq
->kq_sel
);
934 KNOTE(&kq
->kq_sel
.si_note
, 0);
938 klist_init(struct klist
*list
)
944 * walk down a list of knotes, activating them if their event has triggered.
947 knote(struct klist
*list
, long hint
)
951 SLIST_FOREACH(kn
, list
, kn_selnext
)
952 if (kn
->kn_fop
->f_event(kn
, hint
))
957 * attach a knote to the specified list. Return true if this is the first entry.
960 knote_attach(struct klist
*list
, struct knote
*kn
)
962 int ret
= SLIST_EMPTY(list
);
963 SLIST_INSERT_HEAD(list
, kn
, kn_selnext
);
968 * detach a knote from the specified list. Return true if that was the last entry.
971 knote_detach(struct klist
*list
, struct knote
*kn
)
973 SLIST_REMOVE(list
, kn
, knote
, kn_selnext
);
974 return SLIST_EMPTY(list
);
978 * remove all knotes from a specified klist
981 knote_remove(struct proc
*p
, struct klist
*list
)
985 while ((kn
= SLIST_FIRST(list
)) != NULL
) {
986 kn
->kn_fop
->f_detach(kn
);
992 * remove all knotes referencing a specified fd
995 knote_fdclose(struct proc
*p
, int fd
)
997 struct filedesc
*fdp
= p
->p_fd
;
998 struct klist
*list
= &fdp
->fd_knlist
[fd
];
1000 knote_remove(p
, list
);
1004 knote_fdpattach(struct knote
*kn
, struct filedesc
*fdp
)
1009 if (! kn
->kn_fop
->f_isfd
) {
1010 if (fdp
->fd_knhashmask
== 0)
1011 fdp
->fd_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1012 &fdp
->fd_knhashmask
);
1013 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
1017 if (fdp
->fd_knlistsize
<= kn
->kn_id
) {
1018 size
= fdp
->fd_knlistsize
;
1019 while (size
<= kn
->kn_id
)
1021 MALLOC(list
, struct klist
*,
1022 size
* sizeof(struct klist
*), M_KQUEUE
, M_WAITOK
);
1023 bcopy((caddr_t
)fdp
->fd_knlist
, (caddr_t
)list
,
1024 fdp
->fd_knlistsize
* sizeof(struct klist
*));
1025 bzero((caddr_t
)list
+
1026 fdp
->fd_knlistsize
* sizeof(struct klist
*),
1027 (size
- fdp
->fd_knlistsize
) * sizeof(struct klist
*));
1028 if (fdp
->fd_knlist
!= NULL
)
1029 FREE(fdp
->fd_knlist
, M_KQUEUE
);
1030 fdp
->fd_knlistsize
= size
;
1031 fdp
->fd_knlist
= list
;
1033 list
= &fdp
->fd_knlist
[kn
->kn_id
];
1035 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1040 * should be called at spl == 0, since we don't want to hold spl
1041 * while calling fdrop and free.
1044 knote_drop(struct knote
*kn
, struct proc
*p
)
1046 struct filedesc
*fdp
= p
->p_fd
;
1049 if (kn
->kn_fop
->f_isfd
)
1050 list
= &fdp
->fd_knlist
[kn
->kn_id
];
1052 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
1054 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
1055 if (kn
->kn_status
& KN_QUEUED
)
1057 if (kn
->kn_fop
->f_isfd
)
1058 fdrop(kn
->kn_fp
, p
);
1064 knote_enqueue(struct knote
*kn
)
1066 struct kqueue
*kq
= kn
->kn_kq
;
1069 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1071 TAILQ_INSERT_TAIL(&kq
->kq_head
, kn
, kn_tqe
);
1072 kn
->kn_status
|= KN_QUEUED
;
1079 knote_dequeue(struct knote
*kn
)
1081 struct kqueue
*kq
= kn
->kn_kq
;
1084 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
1086 TAILQ_REMOVE(&kq
->kq_head
, kn
, kn_tqe
);
1087 kn
->kn_status
&= ~KN_QUEUED
;
1095 knote_zone
= zinit(sizeof(struct knote
), 8192*sizeof(struct knote
), 8192, "knote zone");
1097 SYSINIT(knote
, SI_SUB_PSEUDO
, SI_ORDER_ANY
, knote_init
, NULL
)
1099 static struct knote
*
1102 return ((struct knote
*)zalloc(knote_zone
));
1106 knote_free(struct knote
*kn
)
1108 zfree(knote_zone
, (vm_offset_t
)kn
);
1111 #include <sys/param.h>
1112 #include <sys/socket.h>
1113 #include <sys/protosw.h>
1114 #include <sys/domain.h>
1115 #include <sys/mbuf.h>
1116 #include <sys/kern_event.h>
1117 #include <sys/malloc.h>
1118 #include <sys/sys_domain.h>
1119 #include <sys/syslog.h>
1123 struct pr_usrreqs event_usrreqs
;
1125 struct protosw eventsw
[] = {
1127 SOCK_RAW
, &systemdomain
, SYSPROTO_EVENT
, PR_ATOMIC
,
1136 struct kern_event_head kern_event_head
;
1138 static u_long static_event_id
= 0;
1141 * Install the protosw's for the NKE manager. Invoked at
1142 * extension load time
1145 kern_event_init(void)
1149 if ((retval
= net_add_proto(eventsw
, &systemdomain
)) == 0)
1150 return(KERN_SUCCESS
);
1152 log(LOG_WARNING
, "Can't install kernel events protocol (%d)\n", retval
);
1156 int kev_attach(struct socket
*so
, int proto
, struct proc
*p
)
1159 struct kern_event_pcb
*ev_pcb
;
1161 error
= soreserve(so
, KEV_SNDSPACE
, KEV_RECVSPACE
);
1165 ev_pcb
= _MALLOC(sizeof(struct kern_event_pcb
), M_PCB
, M_WAITOK
);
1169 ev_pcb
->ev_socket
= so
;
1170 ev_pcb
->vendor_code_filter
= 0xffffffff;
1172 so
->so_pcb
= (caddr_t
) ev_pcb
;
1173 LIST_INSERT_HEAD(&kern_event_head
, ev_pcb
, ev_link
);
1179 int kev_detach(struct socket
*so
)
1181 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
1184 LIST_REMOVE(ev_pcb
, ev_link
);
1185 FREE(ev_pcb
, M_PCB
);
1193 int kev_post_msg(struct kev_msg
*event_msg
)
1195 struct mbuf
*m
, *m2
;
1196 struct kern_event_pcb
*ev_pcb
;
1197 struct kern_event_msg
*ev
;
1203 m
= m_get(M_DONTWAIT
, MT_DATA
);
1207 ev
= mtod(m
, struct kern_event_msg
*);
1208 total_size
= KEV_MSG_HEADER_SIZE
;
1210 tmp
= (char *) &ev
->event_data
[0];
1211 for (i
= 0; i
< 5; i
++) {
1212 if (event_msg
->dv
[i
].data_length
== 0)
1215 total_size
+= event_msg
->dv
[i
].data_length
;
1216 bcopy(event_msg
->dv
[i
].data_ptr
, tmp
,
1217 event_msg
->dv
[i
].data_length
);
1218 tmp
+= event_msg
->dv
[i
].data_length
;
1222 ev
->id
= ++static_event_id
;
1223 ev
->total_size
= total_size
;
1224 ev
->vendor_code
= event_msg
->vendor_code
;
1225 ev
->kev_class
= event_msg
->kev_class
;
1226 ev
->kev_subclass
= event_msg
->kev_subclass
;
1227 ev
->event_code
= event_msg
->event_code
;
1229 m
->m_len
= total_size
;
1230 for (ev_pcb
= LIST_FIRST(&kern_event_head
);
1232 ev_pcb
= LIST_NEXT(ev_pcb
, ev_link
)) {
1234 if (ev_pcb
->vendor_code_filter
!= KEV_ANY_VENDOR
) {
1235 if (ev_pcb
->vendor_code_filter
!= ev
->vendor_code
)
1238 if (ev_pcb
->class_filter
!= KEV_ANY_CLASS
) {
1239 if (ev_pcb
->class_filter
!= ev
->kev_class
)
1242 if ((ev_pcb
->subclass_filter
!= KEV_ANY_SUBCLASS
) &&
1243 (ev_pcb
->subclass_filter
!= ev
->kev_subclass
))
1248 m2
= m_copym(m
, 0, m
->m_len
, M_NOWAIT
);
1254 sbappendrecord(&ev_pcb
->ev_socket
->so_rcv
, m2
);
1255 sorwakeup(ev_pcb
->ev_socket
);
1264 int kev_control(so
, cmd
, data
, ifp
, p
)
1268 register struct ifnet
*ifp
;
1271 struct kev_request
*kev_req
= (struct kev_request
*) data
;
1273 struct kern_event_pcb
*ev_pcb
;
1274 u_long
*id_value
= (u_long
*) data
;
1280 *id_value
= static_event_id
;
1284 ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
1285 ev_pcb
->vendor_code_filter
= kev_req
->vendor_code
;
1286 ev_pcb
->class_filter
= kev_req
->kev_class
;
1287 ev_pcb
->subclass_filter
= kev_req
->kev_subclass
;
1291 ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
1292 kev_req
->vendor_code
= ev_pcb
->vendor_code_filter
;
1293 kev_req
->kev_class
= ev_pcb
->class_filter
;
1294 kev_req
->kev_subclass
= ev_pcb
->subclass_filter
;
1305 struct pr_usrreqs event_usrreqs
= {
1306 pru_abort_notsupp
, pru_accept_notsupp
, kev_attach
, pru_bind_notsupp
, pru_connect_notsupp
,
1307 pru_connect2_notsupp
, kev_control
, kev_detach
, pru_disconnect_notsupp
,
1308 pru_listen_notsupp
, pru_peeraddr_notsupp
, pru_rcvd_notsupp
, pru_rcvoob_notsupp
,
1309 pru_send_notsupp
, pru_sense_null
, pru_shutdown_notsupp
, pru_sockaddr_notsupp
,
1310 pru_sosend_notsupp
, soreceive
, sopoll