2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)kern_event.c 1.0 (3/31/2000)
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/filedesc.h>
62 #include <sys/kernel.h>
63 #include <sys/proc_internal.h>
64 #include <sys/kauth.h>
65 #include <sys/malloc.h>
66 #include <sys/unistd.h>
67 #include <sys/file_internal.h>
68 #include <sys/fcntl.h>
69 #include <sys/select.h>
70 #include <sys/queue.h>
71 #include <sys/event.h>
72 #include <sys/eventvar.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
77 #include <sys/sysctl.h>
79 #include <sys/sysproto.h>
81 #include <sys/vnode_internal.h>
83 #include <sys/proc_info.h>
84 #include <sys/codesign.h>
86 #include <kern/lock.h>
87 #include <kern/clock.h>
88 #include <kern/thread_call.h>
89 #include <kern/sched_prim.h>
90 #include <kern/zalloc.h>
91 #include <kern/assert.h>
93 #include <libkern/libkern.h>
94 #include "net/net_str_id.h"
96 #include <mach/task.h>
98 #if VM_PRESSURE_EVENTS
99 #include <kern/vm_pressure.h>
102 #if CONFIG_MEMORYSTATUS
103 #include <sys/kern_memorystatus.h>
106 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
108 #define KQ_EVENT NULL
110 static inline void kqlock(struct kqueue
*kq
);
111 static inline void kqunlock(struct kqueue
*kq
);
113 static int kqlock2knoteuse(struct kqueue
*kq
, struct knote
*kn
);
114 static int kqlock2knoteusewait(struct kqueue
*kq
, struct knote
*kn
);
115 static int kqlock2knotedrop(struct kqueue
*kq
, struct knote
*kn
);
116 static int knoteuse2kqlock(struct kqueue
*kq
, struct knote
*kn
);
118 static void kqueue_wakeup(struct kqueue
*kq
, int closed
);
119 static int kqueue_read(struct fileproc
*fp
, struct uio
*uio
,
120 int flags
, vfs_context_t ctx
);
121 static int kqueue_write(struct fileproc
*fp
, struct uio
*uio
,
122 int flags
, vfs_context_t ctx
);
123 static int kqueue_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
,
125 static int kqueue_select(struct fileproc
*fp
, int which
, void *wql
,
127 static int kqueue_close(struct fileglob
*fg
, vfs_context_t ctx
);
128 static int kqueue_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
130 static int kqueue_drain(struct fileproc
*fp
, vfs_context_t ctx
);
131 extern int kqueue_stat(struct fileproc
*fp
, void *ub
, int isstat64
,
134 static const struct fileops kqueueops
= {
135 .fo_type
= DTYPE_KQUEUE
,
136 .fo_read
= kqueue_read
,
137 .fo_write
= kqueue_write
,
138 .fo_ioctl
= kqueue_ioctl
,
139 .fo_select
= kqueue_select
,
140 .fo_close
= kqueue_close
,
141 .fo_kqfilter
= kqueue_kqfilter
,
142 .fo_drain
= kqueue_drain
,
145 static int kevent_internal(struct proc
*p
, int iskev64
, user_addr_t changelist
,
146 int nchanges
, user_addr_t eventlist
, int nevents
, int fd
,
147 user_addr_t utimeout
, unsigned int flags
, int32_t *retval
);
148 static int kevent_copyin(user_addr_t
*addrp
, struct kevent64_s
*kevp
,
149 struct proc
*p
, int iskev64
);
150 static int kevent_copyout(struct kevent64_s
*kevp
, user_addr_t
*addrp
,
151 struct proc
*p
, int iskev64
);
152 char * kevent_description(struct kevent64_s
*kevp
, char *s
, size_t n
);
154 static int kevent_callback(struct kqueue
*kq
, struct kevent64_s
*kevp
,
156 static void kevent_continue(struct kqueue
*kq
, void *data
, int error
);
157 static void kqueue_scan_continue(void *contp
, wait_result_t wait_result
);
158 static int kqueue_process(struct kqueue
*kq
, kevent_callback_t callback
,
159 void *data
, int *countp
, struct proc
*p
);
160 static int kqueue_begin_processing(struct kqueue
*kq
);
161 static void kqueue_end_processing(struct kqueue
*kq
);
162 static int knote_process(struct knote
*kn
, kevent_callback_t callback
,
163 void *data
, struct kqtailq
*inprocessp
, struct proc
*p
);
164 static void knote_put(struct knote
*kn
);
165 static int knote_fdpattach(struct knote
*kn
, struct filedesc
*fdp
,
167 static void knote_drop(struct knote
*kn
, struct proc
*p
);
168 static void knote_activate(struct knote
*kn
, int);
169 static void knote_deactivate(struct knote
*kn
);
170 static void knote_enqueue(struct knote
*kn
);
171 static void knote_dequeue(struct knote
*kn
);
172 static struct knote
*knote_alloc(void);
173 static void knote_free(struct knote
*kn
);
175 static int filt_fileattach(struct knote
*kn
);
176 static struct filterops file_filtops
= {
178 .f_attach
= filt_fileattach
,
181 static void filt_kqdetach(struct knote
*kn
);
182 static int filt_kqueue(struct knote
*kn
, long hint
);
183 static struct filterops kqread_filtops
= {
185 .f_detach
= filt_kqdetach
,
186 .f_event
= filt_kqueue
,
189 /* placeholder for not-yet-implemented filters */
190 static int filt_badattach(struct knote
*kn
);
191 static struct filterops bad_filtops
= {
192 .f_attach
= filt_badattach
,
195 static int filt_procattach(struct knote
*kn
);
196 static void filt_procdetach(struct knote
*kn
);
197 static int filt_proc(struct knote
*kn
, long hint
);
198 static struct filterops proc_filtops
= {
199 .f_attach
= filt_procattach
,
200 .f_detach
= filt_procdetach
,
201 .f_event
= filt_proc
,
204 #if VM_PRESSURE_EVENTS
205 static int filt_vmattach(struct knote
*kn
);
206 static void filt_vmdetach(struct knote
*kn
);
207 static int filt_vm(struct knote
*kn
, long hint
);
208 static struct filterops vm_filtops
= {
209 .f_attach
= filt_vmattach
,
210 .f_detach
= filt_vmdetach
,
213 #endif /* VM_PRESSURE_EVENTS */
215 #if CONFIG_MEMORYSTATUS
216 extern struct filterops memorystatus_filtops
;
217 #endif /* CONFIG_MEMORYSTATUS */
219 extern struct filterops fs_filtops
;
221 extern struct filterops sig_filtops
;
224 static int filt_timerattach(struct knote
*kn
);
225 static void filt_timerdetach(struct knote
*kn
);
226 static int filt_timer(struct knote
*kn
, long hint
);
227 static void filt_timertouch(struct knote
*kn
, struct kevent64_s
*kev
,
229 static struct filterops timer_filtops
= {
230 .f_attach
= filt_timerattach
,
231 .f_detach
= filt_timerdetach
,
232 .f_event
= filt_timer
,
233 .f_touch
= filt_timertouch
,
237 static void filt_timerexpire(void *knx
, void *param1
);
238 static int filt_timervalidate(struct knote
*kn
);
239 static void filt_timerupdate(struct knote
*kn
);
240 static void filt_timercancel(struct knote
*kn
);
242 #define TIMER_RUNNING 0x1
243 #define TIMER_CANCELWAIT 0x2
245 static lck_mtx_t _filt_timerlock
;
246 static void filt_timerlock(void);
247 static void filt_timerunlock(void);
249 static zone_t knote_zone
;
251 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
254 extern struct filterops aio_filtops
;
257 /* Mach portset filter */
258 extern struct filterops machport_filtops
;
261 static int filt_userattach(struct knote
*kn
);
262 static void filt_userdetach(struct knote
*kn
);
263 static int filt_user(struct knote
*kn
, long hint
);
264 static void filt_usertouch(struct knote
*kn
, struct kevent64_s
*kev
,
266 static struct filterops user_filtops
= {
267 .f_attach
= filt_userattach
,
268 .f_detach
= filt_userdetach
,
269 .f_event
= filt_user
,
270 .f_touch
= filt_usertouch
,
274 * Table for all system-defined filters.
276 static struct filterops
*sysfilt_ops
[] = {
277 &file_filtops
, /* EVFILT_READ */
278 &file_filtops
, /* EVFILT_WRITE */
280 &aio_filtops
, /* EVFILT_AIO */
282 &bad_filtops
, /* EVFILT_AIO */
284 &file_filtops
, /* EVFILT_VNODE */
285 &proc_filtops
, /* EVFILT_PROC */
286 &sig_filtops
, /* EVFILT_SIGNAL */
287 &timer_filtops
, /* EVFILT_TIMER */
288 &machport_filtops
, /* EVFILT_MACHPORT */
289 &fs_filtops
, /* EVFILT_FS */
290 &user_filtops
, /* EVFILT_USER */
291 &bad_filtops
, /* unused */
292 #if VM_PRESSURE_EVENTS
293 &vm_filtops
, /* EVFILT_VM */
295 &bad_filtops
, /* EVFILT_VM */
297 &file_filtops
, /* EVFILT_SOCK */
298 #if CONFIG_MEMORYSTATUS
299 &memorystatus_filtops
, /* EVFILT_MEMORYSTATUS */
301 &bad_filtops
, /* EVFILT_MEMORYSTATUS */
306 * kqueue/note lock attributes and implementations
308 * kqueues have locks, while knotes have use counts
309 * Most of the knote state is guarded by the object lock.
310 * the knote "inuse" count and status use the kqueue lock.
312 lck_grp_attr_t
* kq_lck_grp_attr
;
313 lck_grp_t
* kq_lck_grp
;
314 lck_attr_t
* kq_lck_attr
;
317 kqlock(struct kqueue
*kq
)
319 lck_spin_lock(&kq
->kq_lock
);
323 kqunlock(struct kqueue
*kq
)
325 lck_spin_unlock(&kq
->kq_lock
);
329 * Convert a kq lock to a knote use referece.
331 * If the knote is being dropped, we can't get
332 * a use reference, so just return with it
334 * - kq locked at entry
335 * - unlock on exit if we get the use reference
338 kqlock2knoteuse(struct kqueue
*kq
, struct knote
*kn
)
340 if (kn
->kn_status
& KN_DROPPING
)
348 * Convert a kq lock to a knote use referece,
349 * but wait for attach and drop events to complete.
351 * If the knote is being dropped, we can't get
352 * a use reference, so just return with it
354 * - kq locked at entry
355 * - kq always unlocked on exit
358 kqlock2knoteusewait(struct kqueue
*kq
, struct knote
*kn
)
360 if ((kn
->kn_status
& (KN_DROPPING
| KN_ATTACHING
)) != 0) {
361 kn
->kn_status
|= KN_USEWAIT
;
362 wait_queue_assert_wait((wait_queue_t
)kq
->kq_wqs
,
363 &kn
->kn_status
, THREAD_UNINT
, 0);
365 thread_block(THREAD_CONTINUE_NULL
);
374 * Convert from a knote use reference back to kq lock.
376 * Drop a use reference and wake any waiters if
377 * this is the last one.
379 * The exit return indicates if the knote is
380 * still alive - but the kqueue lock is taken
384 knoteuse2kqlock(struct kqueue
*kq
, struct knote
*kn
)
387 if (--kn
->kn_inuse
== 0) {
388 if ((kn
->kn_status
& KN_ATTACHING
) != 0) {
389 kn
->kn_status
&= ~KN_ATTACHING
;
391 if ((kn
->kn_status
& KN_USEWAIT
) != 0) {
392 kn
->kn_status
&= ~KN_USEWAIT
;
393 wait_queue_wakeup_all((wait_queue_t
)kq
->kq_wqs
,
394 &kn
->kn_status
, THREAD_AWAKENED
);
397 return ((kn
->kn_status
& KN_DROPPING
) == 0);
401 * Convert a kq lock to a knote drop reference.
403 * If the knote is in use, wait for the use count
404 * to subside. We first mark our intention to drop
405 * it - keeping other users from "piling on."
406 * If we are too late, we have to wait for the
407 * other drop to complete.
409 * - kq locked at entry
410 * - always unlocked on exit.
411 * - caller can't hold any locks that would prevent
412 * the other dropper from completing.
415 kqlock2knotedrop(struct kqueue
*kq
, struct knote
*kn
)
419 oktodrop
= ((kn
->kn_status
& (KN_DROPPING
| KN_ATTACHING
)) == 0);
420 kn
->kn_status
|= KN_DROPPING
;
422 if (kn
->kn_inuse
== 0) {
427 kn
->kn_status
|= KN_USEWAIT
;
428 wait_queue_assert_wait((wait_queue_t
)kq
->kq_wqs
, &kn
->kn_status
,
431 thread_block(THREAD_CONTINUE_NULL
);
436 * Release a knote use count reference.
439 knote_put(struct knote
*kn
)
441 struct kqueue
*kq
= kn
->kn_kq
;
444 if (--kn
->kn_inuse
== 0) {
445 if ((kn
->kn_status
& KN_USEWAIT
) != 0) {
446 kn
->kn_status
&= ~KN_USEWAIT
;
447 wait_queue_wakeup_all((wait_queue_t
)kq
->kq_wqs
,
448 &kn
->kn_status
, THREAD_AWAKENED
);
455 filt_fileattach(struct knote
*kn
)
457 return (fo_kqfilter(kn
->kn_fp
, kn
, vfs_context_current()));
460 #define f_flag f_fglob->fg_flag
461 #define f_msgcount f_fglob->fg_msgcount
462 #define f_cred f_fglob->fg_cred
463 #define f_ops f_fglob->fg_ops
464 #define f_offset f_fglob->fg_offset
465 #define f_data f_fglob->fg_data
468 filt_kqdetach(struct knote
*kn
)
470 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
473 KNOTE_DETACH(&kq
->kq_sel
.si_note
, kn
);
479 filt_kqueue(struct knote
*kn
, __unused
long hint
)
481 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
483 kn
->kn_data
= kq
->kq_count
;
484 return (kn
->kn_data
> 0);
488 filt_procattach(struct knote
*kn
)
492 assert(PID_MAX
< NOTE_PDATAMASK
);
494 if ((kn
->kn_sfflags
& (NOTE_TRACK
| NOTE_TRACKERR
| NOTE_CHILD
)) != 0)
497 p
= proc_find(kn
->kn_id
);
502 const int NoteExitStatusBits
= NOTE_EXIT
| NOTE_EXITSTATUS
;
504 if ((kn
->kn_sfflags
& NoteExitStatusBits
) == NoteExitStatusBits
)
506 pid_t selfpid
= proc_selfpid();
508 if (p
->p_ppid
== selfpid
)
509 break; /* parent => ok */
511 if ((p
->p_lflag
& P_LTRACED
) != 0 &&
512 (p
->p_oppid
== selfpid
))
513 break; /* parent-in-waiting => ok */
521 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
522 kn
->kn_ptr
.p_proc
= p
; /* store the proc handle */
524 KNOTE_ATTACH(&p
->p_klist
, kn
);
534 * The knote may be attached to a different process, which may exit,
535 * leaving nothing for the knote to be attached to. In that case,
536 * the pointer to the process will have already been nulled out.
539 filt_procdetach(struct knote
*kn
)
545 p
= kn
->kn_ptr
.p_proc
;
546 if (p
!= PROC_NULL
) {
547 kn
->kn_ptr
.p_proc
= PROC_NULL
;
548 KNOTE_DETACH(&p
->p_klist
, kn
);
555 filt_proc(struct knote
*kn
, long hint
)
558 * Note: a lot of bits in hint may be obtained from the knote
559 * To free some of those bits, see <rdar://problem/12592988> Freeing up
560 * bits in hint for filt_proc
562 /* hint is 0 when called from above */
566 /* ALWAYS CALLED WITH proc_klist_lock when (hint != 0) */
569 * mask off extra data
571 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
574 * termination lifecycle events can happen while a debugger
575 * has reparented a process, in which case notifications
576 * should be quashed except to the tracing parent. When
577 * the debugger reaps the child (either via wait4(2) or
578 * process exit), the child will be reparented to the original
579 * parent and these knotes re-fired.
581 if (event
& NOTE_EXIT
) {
582 if ((kn
->kn_ptr
.p_proc
->p_oppid
!= 0)
583 && (kn
->kn_kq
->kq_p
->p_pid
!= kn
->kn_ptr
.p_proc
->p_ppid
)) {
585 * This knote is not for the current ptrace(2) parent, ignore.
592 * if the user is interested in this event, record it.
594 if (kn
->kn_sfflags
& event
)
595 kn
->kn_fflags
|= event
;
597 #pragma clang diagnostic push
598 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
599 if ((event
== NOTE_REAP
) || ((event
== NOTE_EXIT
) && !(kn
->kn_sfflags
& NOTE_REAP
))) {
600 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
602 #pragma clang diagnostic pop
604 if (event
== NOTE_EXIT
) {
606 if ((kn
->kn_sfflags
& NOTE_EXITSTATUS
) != 0) {
607 kn
->kn_fflags
|= NOTE_EXITSTATUS
;
608 kn
->kn_data
|= (hint
& NOTE_PDATAMASK
);
610 if ((kn
->kn_sfflags
& NOTE_EXIT_DETAIL
) != 0) {
611 kn
->kn_fflags
|= NOTE_EXIT_DETAIL
;
612 if ((kn
->kn_ptr
.p_proc
->p_lflag
&
613 P_LTERM_DECRYPTFAIL
) != 0) {
614 kn
->kn_data
|= NOTE_EXIT_DECRYPTFAIL
;
616 if ((kn
->kn_ptr
.p_proc
->p_lflag
&
617 P_LTERM_JETSAM
) != 0) {
618 kn
->kn_data
|= NOTE_EXIT_MEMORY
;
619 switch (kn
->kn_ptr
.p_proc
->p_lflag
&
621 case P_JETSAM_VMPAGESHORTAGE
:
622 kn
->kn_data
|= NOTE_EXIT_MEMORY_VMPAGESHORTAGE
;
624 case P_JETSAM_VMTHRASHING
:
625 kn
->kn_data
|= NOTE_EXIT_MEMORY_VMTHRASHING
;
628 kn
->kn_data
|= NOTE_EXIT_MEMORY_VNODE
;
631 kn
->kn_data
|= NOTE_EXIT_MEMORY_HIWAT
;
634 kn
->kn_data
|= NOTE_EXIT_MEMORY_PID
;
636 case P_JETSAM_IDLEEXIT
:
637 kn
->kn_data
|= NOTE_EXIT_MEMORY_IDLE
;
641 if ((kn
->kn_ptr
.p_proc
->p_csflags
&
643 kn
->kn_data
|= NOTE_EXIT_CSERROR
;
650 /* atomic check, no locking need when called from above */
651 return (kn
->kn_fflags
!= 0);
654 #if VM_PRESSURE_EVENTS
656 * Virtual memory kevents
658 * author: Matt Jacobson [matthew_jacobson@apple.com]
662 filt_vmattach(struct knote
*kn
)
665 * The note will be cleared once the information has been flushed to
666 * the client. If there is still pressure, we will be re-alerted.
668 kn
->kn_flags
|= EV_CLEAR
;
669 return (vm_knote_register(kn
));
673 filt_vmdetach(struct knote
*kn
)
675 vm_knote_unregister(kn
);
679 filt_vm(struct knote
*kn
, long hint
)
681 /* hint == 0 means this is just an alive? check (always true) */
683 const pid_t pid
= (pid_t
)hint
;
684 if ((kn
->kn_sfflags
& NOTE_VM_PRESSURE
) &&
685 (kn
->kn_kq
->kq_p
->p_pid
== pid
)) {
686 kn
->kn_fflags
|= NOTE_VM_PRESSURE
;
690 return (kn
->kn_fflags
!= 0);
692 #endif /* VM_PRESSURE_EVENTS */
695 * filt_timervalidate - process data from user
697 * Converts to either interval or deadline format.
699 * The saved-data field in the knote contains the
700 * time value. The saved filter-flags indicates
701 * the unit of measurement.
703 * After validation, either the saved-data field
704 * contains the interval in absolute time, or ext[0]
705 * contains the expected deadline. If that deadline
706 * is in the past, ext[0] is 0.
708 * Returns EINVAL for unrecognized units of time.
710 * Timer filter lock is held.
714 filt_timervalidate(struct knote
*kn
)
719 switch (kn
->kn_sfflags
& (NOTE_SECONDS
|NOTE_USECONDS
|NOTE_NSECONDS
)) {
721 multiplier
= NSEC_PER_SEC
;
724 multiplier
= NSEC_PER_USEC
;
729 case 0: /* milliseconds (default) */
730 multiplier
= NSEC_PER_SEC
/ 1000;
736 /* transform the slop delta(leeway) in kn_ext[1] if passed to same time scale */
737 if(kn
->kn_sfflags
& NOTE_LEEWAY
){
738 nanoseconds_to_absolutetime((uint64_t)kn
->kn_ext
[1] * multiplier
, &raw
);
742 nanoseconds_to_absolutetime((uint64_t)kn
->kn_sdata
* multiplier
, &raw
);
747 if (kn
->kn_sfflags
& NOTE_ABSOLUTE
) {
749 clock_nsec_t nanoseconds
;
752 clock_get_calendar_nanotime(&seconds
, &nanoseconds
);
753 nanoseconds_to_absolutetime((uint64_t)seconds
* NSEC_PER_SEC
+
757 /* time has already passed */
761 clock_absolutetime_interval_to_deadline(raw
,
772 * filt_timerupdate - compute the next deadline
774 * Repeating timers store their interval in kn_sdata. Absolute
775 * timers have already calculated the deadline, stored in ext[0].
777 * On return, the next deadline (or zero if no deadline is needed)
778 * is stored in kn_ext[0].
780 * Timer filter lock is held.
783 filt_timerupdate(struct knote
*kn
)
785 /* if there's no interval, deadline is just in kn_ext[0] */
786 if (kn
->kn_sdata
== 0)
789 /* if timer hasn't fired before, fire in interval nsecs */
790 if (kn
->kn_ext
[0] == 0) {
791 clock_absolutetime_interval_to_deadline(kn
->kn_sdata
,
795 * If timer has fired before, schedule the next pop
796 * relative to the last intended deadline.
798 * We could check for whether the deadline has expired,
799 * but the thread call layer can handle that.
801 kn
->kn_ext
[0] += kn
->kn_sdata
;
806 * filt_timerexpire - the timer callout routine
808 * Just propagate the timer event into the knote
809 * filter routine (by going through the knote
810 * synchronization point). Pass a hint to
811 * indicate this is a real event, not just a
815 filt_timerexpire(void *knx
, __unused
void *spare
)
817 struct klist timer_list
;
818 struct knote
*kn
= knx
;
822 kn
->kn_hookid
&= ~TIMER_RUNNING
;
824 /* no "object" for timers, so fake a list */
825 SLIST_INIT(&timer_list
);
826 SLIST_INSERT_HEAD(&timer_list
, kn
, kn_selnext
);
827 KNOTE(&timer_list
, 1);
829 /* if someone is waiting for timer to pop */
830 if (kn
->kn_hookid
& TIMER_CANCELWAIT
) {
831 struct kqueue
*kq
= kn
->kn_kq
;
832 wait_queue_wakeup_all((wait_queue_t
)kq
->kq_wqs
, &kn
->kn_hook
,
840 * Cancel a running timer (or wait for the pop).
841 * Timer filter lock is held.
844 filt_timercancel(struct knote
*kn
)
846 struct kqueue
*kq
= kn
->kn_kq
;
847 thread_call_t callout
= kn
->kn_hook
;
850 if (kn
->kn_hookid
& TIMER_RUNNING
) {
851 /* cancel the callout if we can */
852 cancelled
= thread_call_cancel(callout
);
854 kn
->kn_hookid
&= ~TIMER_RUNNING
;
856 /* we have to wait for the expire routine. */
857 kn
->kn_hookid
|= TIMER_CANCELWAIT
;
858 wait_queue_assert_wait((wait_queue_t
)kq
->kq_wqs
,
859 &kn
->kn_hook
, THREAD_UNINT
, 0);
861 thread_block(THREAD_CONTINUE_NULL
);
863 assert((kn
->kn_hookid
& TIMER_RUNNING
) == 0);
869 * Allocate a thread call for the knote's lifetime, and kick off the timer.
872 filt_timerattach(struct knote
*kn
)
874 thread_call_t callout
;
877 callout
= thread_call_allocate(filt_timerexpire
, kn
);
882 error
= filt_timervalidate(kn
);
888 kn
->kn_hook
= (void*)callout
;
891 /* absolute=EV_ONESHOT */
892 if (kn
->kn_sfflags
& NOTE_ABSOLUTE
)
893 kn
->kn_flags
|= EV_ONESHOT
;
895 filt_timerupdate(kn
);
897 kn
->kn_flags
|= EV_CLEAR
;
898 unsigned int timer_flags
= 0;
899 if (kn
->kn_sfflags
& NOTE_CRITICAL
)
900 timer_flags
|= THREAD_CALL_DELAY_USER_CRITICAL
;
901 else if (kn
->kn_sfflags
& NOTE_BACKGROUND
)
902 timer_flags
|= THREAD_CALL_DELAY_USER_BACKGROUND
;
904 timer_flags
|= THREAD_CALL_DELAY_USER_NORMAL
;
906 if (kn
->kn_sfflags
& NOTE_LEEWAY
)
907 timer_flags
|= THREAD_CALL_DELAY_LEEWAY
;
909 thread_call_enter_delayed_with_leeway(callout
, NULL
,
910 kn
->kn_ext
[0], kn
->kn_ext
[1], timer_flags
);
912 kn
->kn_hookid
|= TIMER_RUNNING
;
923 * Shut down the timer if it's running, and free the callout.
926 filt_timerdetach(struct knote
*kn
)
928 thread_call_t callout
;
932 callout
= (thread_call_t
)kn
->kn_hook
;
933 filt_timercancel(kn
);
937 thread_call_free(callout
);
943 filt_timer(struct knote
*kn
, long hint
)
948 /* real timer pop -- timer lock held by filt_timerexpire */
951 if (((kn
->kn_hookid
& TIMER_CANCELWAIT
) == 0) &&
952 ((kn
->kn_flags
& EV_ONESHOT
) == 0)) {
954 /* evaluate next time to fire */
955 filt_timerupdate(kn
);
958 unsigned int timer_flags
= 0;
960 /* keep the callout and re-arm */
961 if (kn
->kn_sfflags
& NOTE_CRITICAL
)
962 timer_flags
|= THREAD_CALL_DELAY_USER_CRITICAL
;
963 else if (kn
->kn_sfflags
& NOTE_BACKGROUND
)
964 timer_flags
|= THREAD_CALL_DELAY_USER_BACKGROUND
;
966 timer_flags
|= THREAD_CALL_DELAY_USER_NORMAL
;
968 if (kn
->kn_sfflags
& NOTE_LEEWAY
)
969 timer_flags
|= THREAD_CALL_DELAY_LEEWAY
;
971 thread_call_enter_delayed_with_leeway(kn
->kn_hook
, NULL
,
972 kn
->kn_ext
[0], kn
->kn_ext
[1], timer_flags
);
974 kn
->kn_hookid
|= TIMER_RUNNING
;
984 result
= (kn
->kn_data
!= 0);
993 * filt_timertouch - update knote with new user input
995 * Cancel and restart the timer based on new user data. When
996 * the user picks up a knote, clear the count of how many timer
997 * pops have gone off (in kn_data).
1000 filt_timertouch(struct knote
*kn
, struct kevent64_s
*kev
, long type
)
1006 case EVENT_REGISTER
:
1007 /* cancel current call */
1008 filt_timercancel(kn
);
1010 /* recalculate deadline */
1011 kn
->kn_sdata
= kev
->data
;
1012 kn
->kn_sfflags
= kev
->fflags
;
1013 kn
->kn_ext
[0] = kev
->ext
[0];
1014 kn
->kn_ext
[1] = kev
->ext
[1];
1016 error
= filt_timervalidate(kn
);
1018 /* no way to report error, so mark it in the knote */
1019 kn
->kn_flags
|= EV_ERROR
;
1020 kn
->kn_data
= error
;
1024 /* start timer if necessary */
1025 filt_timerupdate(kn
);
1027 if (kn
->kn_ext
[0]) {
1028 unsigned int timer_flags
= 0;
1029 if (kn
->kn_sfflags
& NOTE_CRITICAL
)
1030 timer_flags
|= THREAD_CALL_DELAY_USER_CRITICAL
;
1031 else if (kn
->kn_sfflags
& NOTE_BACKGROUND
)
1032 timer_flags
|= THREAD_CALL_DELAY_USER_BACKGROUND
;
1034 timer_flags
|= THREAD_CALL_DELAY_USER_NORMAL
;
1036 if (kn
->kn_sfflags
& NOTE_LEEWAY
)
1037 timer_flags
|= THREAD_CALL_DELAY_LEEWAY
;
1039 thread_call_enter_delayed_with_leeway(kn
->kn_hook
, NULL
,
1040 kn
->kn_ext
[0], kn
->kn_ext
[1], timer_flags
);
1042 kn
->kn_hookid
|= TIMER_RUNNING
;
1044 /* pretend the timer has fired */
1051 /* reset the timer pop count in kn_data */
1052 *kev
= kn
->kn_kevent
;
1055 if (kn
->kn_flags
& EV_CLEAR
)
1059 panic("%s: - invalid type (%ld)", __func__
, type
);
1067 filt_timerlock(void)
1069 lck_mtx_lock(&_filt_timerlock
);
1073 filt_timerunlock(void)
1075 lck_mtx_unlock(&_filt_timerlock
);
1079 filt_userattach(struct knote
*kn
)
1081 /* EVFILT_USER knotes are not attached to anything in the kernel */
1083 if (kn
->kn_fflags
& NOTE_TRIGGER
) {
1092 filt_userdetach(__unused
struct knote
*kn
)
1094 /* EVFILT_USER knotes are not attached to anything in the kernel */
1098 filt_user(struct knote
*kn
, __unused
long hint
)
1100 return (kn
->kn_hookid
);
1104 filt_usertouch(struct knote
*kn
, struct kevent64_s
*kev
, long type
)
1108 case EVENT_REGISTER
:
1109 if (kev
->fflags
& NOTE_TRIGGER
) {
1113 ffctrl
= kev
->fflags
& NOTE_FFCTRLMASK
;
1114 kev
->fflags
&= NOTE_FFLAGSMASK
;
1119 OSBitAndAtomic(kev
->fflags
, &kn
->kn_sfflags
);
1122 OSBitOrAtomic(kev
->fflags
, &kn
->kn_sfflags
);
1125 kn
->kn_sfflags
= kev
->fflags
;
1128 kn
->kn_sdata
= kev
->data
;
1131 *kev
= kn
->kn_kevent
;
1132 kev
->fflags
= (volatile UInt32
)kn
->kn_sfflags
;
1133 kev
->data
= kn
->kn_sdata
;
1134 if (kn
->kn_flags
& EV_CLEAR
) {
1141 panic("%s: - invalid type (%ld)", __func__
, type
);
1147 * JMM - placeholder for not-yet-implemented filters
1150 filt_badattach(__unused
struct knote
*kn
)
1156 kqueue_alloc(struct proc
*p
)
1158 struct filedesc
*fdp
= p
->p_fd
;
1161 MALLOC_ZONE(kq
, struct kqueue
*, sizeof (struct kqueue
), M_KQUEUE
,
1164 wait_queue_set_t wqs
;
1166 wqs
= wait_queue_set_alloc(SYNC_POLICY_FIFO
|
1167 SYNC_POLICY_PREPOST
);
1169 bzero(kq
, sizeof (struct kqueue
));
1170 lck_spin_init(&kq
->kq_lock
, kq_lck_grp
, kq_lck_attr
);
1171 TAILQ_INIT(&kq
->kq_head
);
1175 FREE_ZONE(kq
, sizeof (struct kqueue
), M_KQUEUE
);
1179 if (fdp
->fd_knlistsize
< 0) {
1181 if (fdp
->fd_knlistsize
< 0)
1182 fdp
->fd_knlistsize
= 0; /* this process has had a kq */
1190 * kqueue_dealloc - detach all knotes from a kqueue and free it
1192 * We walk each list looking for knotes referencing this
1193 * this kqueue. If we find one, we try to drop it. But
1194 * if we fail to get a drop reference, that will wait
1195 * until it is dropped. So, we can just restart again
1196 * safe in the assumption that the list will eventually
1197 * not contain any more references to this kqueue (either
1198 * we dropped them all, or someone else did).
1200 * Assumes no new events are being added to the kqueue.
1201 * Nothing locked on entry or exit.
1204 kqueue_dealloc(struct kqueue
*kq
)
1206 struct proc
*p
= kq
->kq_p
;
1207 struct filedesc
*fdp
= p
->p_fd
;
1212 for (i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
1213 kn
= SLIST_FIRST(&fdp
->fd_knlist
[i
]);
1214 while (kn
!= NULL
) {
1215 if (kq
== kn
->kn_kq
) {
1218 /* drop it ourselves or wait */
1219 if (kqlock2knotedrop(kq
, kn
)) {
1220 kn
->kn_fop
->f_detach(kn
);
1224 /* start over at beginning of list */
1225 kn
= SLIST_FIRST(&fdp
->fd_knlist
[i
]);
1228 kn
= SLIST_NEXT(kn
, kn_link
);
1231 if (fdp
->fd_knhashmask
!= 0) {
1232 for (i
= 0; i
< (int)fdp
->fd_knhashmask
+ 1; i
++) {
1233 kn
= SLIST_FIRST(&fdp
->fd_knhash
[i
]);
1234 while (kn
!= NULL
) {
1235 if (kq
== kn
->kn_kq
) {
1238 /* drop it ourselves or wait */
1239 if (kqlock2knotedrop(kq
, kn
)) {
1240 kn
->kn_fop
->f_detach(kn
);
1244 /* start over at beginning of list */
1245 kn
= SLIST_FIRST(&fdp
->fd_knhash
[i
]);
1248 kn
= SLIST_NEXT(kn
, kn_link
);
1255 * before freeing the wait queue set for this kqueue,
1256 * make sure it is unlinked from all its containing (select) sets.
1258 wait_queue_unlink_all((wait_queue_t
)kq
->kq_wqs
);
1259 wait_queue_set_free(kq
->kq_wqs
);
1260 lck_spin_destroy(&kq
->kq_lock
, kq_lck_grp
);
1261 FREE_ZONE(kq
, sizeof (struct kqueue
), M_KQUEUE
);
1265 kqueue_body(struct proc
*p
, fp_allocfn_t fp_zalloc
, void *cra
, int32_t *retval
)
1268 struct fileproc
*fp
;
1271 error
= falloc_withalloc(p
,
1272 &fp
, &fd
, vfs_context_current(), fp_zalloc
, cra
);
1277 kq
= kqueue_alloc(p
);
1283 fp
->f_flag
= FREAD
| FWRITE
;
1284 fp
->f_ops
= &kqueueops
;
1288 procfdtbl_releasefd(p
, fd
, NULL
);
1289 fp_drop(p
, fd
, fp
, 1);
1297 kqueue(struct proc
*p
, __unused
struct kqueue_args
*uap
, int32_t *retval
)
1299 return (kqueue_body(p
, fileproc_alloc_init
, NULL
, retval
));
1303 kevent_copyin(user_addr_t
*addrp
, struct kevent64_s
*kevp
, struct proc
*p
,
1310 advance
= sizeof (struct kevent64_s
);
1311 error
= copyin(*addrp
, (caddr_t
)kevp
, advance
);
1312 } else if (IS_64BIT_PROCESS(p
)) {
1313 struct user64_kevent kev64
;
1314 bzero(kevp
, sizeof (struct kevent64_s
));
1316 advance
= sizeof (kev64
);
1317 error
= copyin(*addrp
, (caddr_t
)&kev64
, advance
);
1320 kevp
->ident
= kev64
.ident
;
1321 kevp
->filter
= kev64
.filter
;
1322 kevp
->flags
= kev64
.flags
;
1323 kevp
->fflags
= kev64
.fflags
;
1324 kevp
->data
= kev64
.data
;
1325 kevp
->udata
= kev64
.udata
;
1327 struct user32_kevent kev32
;
1328 bzero(kevp
, sizeof (struct kevent64_s
));
1330 advance
= sizeof (kev32
);
1331 error
= copyin(*addrp
, (caddr_t
)&kev32
, advance
);
1334 kevp
->ident
= (uintptr_t)kev32
.ident
;
1335 kevp
->filter
= kev32
.filter
;
1336 kevp
->flags
= kev32
.flags
;
1337 kevp
->fflags
= kev32
.fflags
;
1338 kevp
->data
= (intptr_t)kev32
.data
;
1339 kevp
->udata
= CAST_USER_ADDR_T(kev32
.udata
);
1347 kevent_copyout(struct kevent64_s
*kevp
, user_addr_t
*addrp
, struct proc
*p
,
1354 advance
= sizeof (struct kevent64_s
);
1355 error
= copyout((caddr_t
)kevp
, *addrp
, advance
);
1356 } else if (IS_64BIT_PROCESS(p
)) {
1357 struct user64_kevent kev64
;
1360 * deal with the special case of a user-supplied
1361 * value of (uintptr_t)-1.
1363 kev64
.ident
= (kevp
->ident
== (uintptr_t)-1) ?
1364 (uint64_t)-1LL : (uint64_t)kevp
->ident
;
1366 kev64
.filter
= kevp
->filter
;
1367 kev64
.flags
= kevp
->flags
;
1368 kev64
.fflags
= kevp
->fflags
;
1369 kev64
.data
= (int64_t) kevp
->data
;
1370 kev64
.udata
= kevp
->udata
;
1371 advance
= sizeof (kev64
);
1372 error
= copyout((caddr_t
)&kev64
, *addrp
, advance
);
1374 struct user32_kevent kev32
;
1376 kev32
.ident
= (uint32_t)kevp
->ident
;
1377 kev32
.filter
= kevp
->filter
;
1378 kev32
.flags
= kevp
->flags
;
1379 kev32
.fflags
= kevp
->fflags
;
1380 kev32
.data
= (int32_t)kevp
->data
;
1381 kev32
.udata
= kevp
->udata
;
1382 advance
= sizeof (kev32
);
1383 error
= copyout((caddr_t
)&kev32
, *addrp
, advance
);
1391 * kevent_continue - continue a kevent syscall after blocking
1393 * assume we inherit a use count on the kq fileglob.
1397 kevent_continue(__unused
struct kqueue
*kq
, void *data
, int error
)
1399 struct _kevent
*cont_args
;
1400 struct fileproc
*fp
;
1404 struct proc
*p
= current_proc();
1406 cont_args
= (struct _kevent
*)data
;
1407 noutputs
= cont_args
->eventout
;
1408 retval
= cont_args
->retval
;
1412 fp_drop(p
, fd
, fp
, 0);
1414 /* don't restart after signals... */
1415 if (error
== ERESTART
)
1417 else if (error
== EWOULDBLOCK
)
1421 unix_syscall_return(error
);
1425 * kevent - [syscall] register and wait for kernel events
1429 kevent(struct proc
*p
, struct kevent_args
*uap
, int32_t *retval
)
1431 return (kevent_internal(p
,
1439 0, /* no flags from old kevent() call */
1444 kevent64(struct proc
*p
, struct kevent64_args
*uap
, int32_t *retval
)
1446 return (kevent_internal(p
,
1459 kevent_internal(struct proc
*p
, int iskev64
, user_addr_t changelist
,
1460 int nchanges
, user_addr_t ueventlist
, int nevents
, int fd
,
1461 user_addr_t utimeout
, __unused
unsigned int flags
,
1464 struct _kevent
*cont_args
;
1467 struct fileproc
*fp
;
1468 struct kevent64_s kev
;
1469 int error
, noutputs
;
1472 /* convert timeout to absolute - if we have one */
1473 if (utimeout
!= USER_ADDR_NULL
) {
1475 if (IS_64BIT_PROCESS(p
)) {
1476 struct user64_timespec ts
;
1477 error
= copyin(utimeout
, &ts
, sizeof(ts
));
1478 if ((ts
.tv_sec
& 0xFFFFFFFF00000000ull
) != 0)
1481 TIMESPEC_TO_TIMEVAL(&rtv
, &ts
);
1483 struct user32_timespec ts
;
1484 error
= copyin(utimeout
, &ts
, sizeof(ts
));
1485 TIMESPEC_TO_TIMEVAL(&rtv
, &ts
);
1489 if (itimerfix(&rtv
))
1491 getmicrouptime(&atv
);
1492 timevaladd(&atv
, &rtv
);
1498 /* get a usecount for the kq itself */
1499 if ((error
= fp_getfkq(p
, fd
, &fp
, &kq
)) != 0)
1502 /* each kq should only be used for events of one type */
1504 if (kq
->kq_state
& (KQ_KEV32
| KQ_KEV64
)) {
1505 if (((iskev64
&& (kq
->kq_state
& KQ_KEV32
)) ||
1506 (!iskev64
&& (kq
->kq_state
& KQ_KEV64
)))) {
1512 kq
->kq_state
|= (iskev64
? KQ_KEV64
: KQ_KEV32
);
1516 /* register all the change requests the user provided... */
1518 while (nchanges
> 0 && error
== 0) {
1519 error
= kevent_copyin(&changelist
, &kev
, p
, iskev64
);
1523 kev
.flags
&= ~EV_SYSFLAGS
;
1524 error
= kevent_register(kq
, &kev
, p
);
1525 if ((error
|| (kev
.flags
& EV_RECEIPT
)) && nevents
> 0) {
1526 kev
.flags
= EV_ERROR
;
1528 error
= kevent_copyout(&kev
, &ueventlist
, p
, iskev64
);
1537 /* store the continuation/completion data in the uthread */
1538 ut
= (uthread_t
)get_bsdthread_info(current_thread());
1539 cont_args
= &ut
->uu_kevent
.ss_kevent
;
1542 cont_args
->retval
= retval
;
1543 cont_args
->eventlist
= ueventlist
;
1544 cont_args
->eventcount
= nevents
;
1545 cont_args
->eventout
= noutputs
;
1546 cont_args
->eventsize
= iskev64
;
1548 if (nevents
> 0 && noutputs
== 0 && error
== 0)
1549 error
= kqueue_scan(kq
, kevent_callback
,
1550 kevent_continue
, cont_args
,
1552 kevent_continue(kq
, cont_args
, error
);
1555 fp_drop(p
, fd
, fp
, 0);
1561 * kevent_callback - callback for each individual event
1563 * called with nothing locked
1564 * caller holds a reference on the kqueue
1567 kevent_callback(__unused
struct kqueue
*kq
, struct kevent64_s
*kevp
,
1570 struct _kevent
*cont_args
;
1574 cont_args
= (struct _kevent
*)data
;
1575 assert(cont_args
->eventout
< cont_args
->eventcount
);
1577 iskev64
= cont_args
->eventsize
;
1580 * Copy out the appropriate amount of event data for this user.
1582 error
= kevent_copyout(kevp
, &cont_args
->eventlist
, current_proc(),
1586 * If there isn't space for additional events, return
1587 * a harmless error to stop the processing here
1589 if (error
== 0 && ++cont_args
->eventout
== cont_args
->eventcount
)
1590 error
= EWOULDBLOCK
;
1595 * kevent_description - format a description of a kevent for diagnostic output
1597 * called with a 128-byte string buffer
1601 kevent_description(struct kevent64_s
*kevp
, char *s
, size_t n
)
1605 "{.ident=%#llx, .filter=%d, .flags=%#x, .fflags=%#x, .data=%#llx, .udata=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
1619 * kevent_register - add a new event to a kqueue
1621 * Creates a mapping between the event source and
1622 * the kqueue via a knote data structure.
1624 * Because many/most the event sources are file
1625 * descriptor related, the knote is linked off
1626 * the filedescriptor table for quick access.
1628 * called with nothing locked
1629 * caller holds a reference on the kqueue
1633 kevent_register(struct kqueue
*kq
, struct kevent64_s
*kev
,
1634 __unused
struct proc
*ctxp
)
1636 struct proc
*p
= kq
->kq_p
;
1637 struct filedesc
*fdp
= p
->p_fd
;
1638 struct filterops
*fops
;
1639 struct fileproc
*fp
= NULL
;
1640 struct knote
*kn
= NULL
;
1643 if (kev
->filter
< 0) {
1644 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
1646 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
1650 * filter attach routine is responsible for insuring that
1651 * the identifier can be attached to it.
1653 printf("unknown filter: %d\n", kev
->filter
);
1658 /* this iocount needs to be dropped if it is not registered */
1660 if (fops
->f_isfd
&& (error
= fp_lookup(p
, kev
->ident
, &fp
, 1)) != 0) {
1666 /* fd-based knotes are linked off the fd table */
1667 if (kev
->ident
< (u_int
)fdp
->fd_knlistsize
) {
1668 SLIST_FOREACH(kn
, &fdp
->fd_knlist
[kev
->ident
], kn_link
)
1669 if (kq
== kn
->kn_kq
&&
1670 kev
->filter
== kn
->kn_filter
)
1674 /* hash non-fd knotes here too */
1675 if (fdp
->fd_knhashmask
!= 0) {
1678 list
= &fdp
->fd_knhash
[
1679 KN_HASH((u_long
)kev
->ident
, fdp
->fd_knhashmask
)];
1680 SLIST_FOREACH(kn
, list
, kn_link
)
1681 if (kev
->ident
== kn
->kn_id
&&
1683 kev
->filter
== kn
->kn_filter
)
1689 * kn now contains the matching knote, or NULL if no match
1692 if ((kev
->flags
& (EV_ADD
|EV_DELETE
)) == EV_ADD
) {
1701 kn
->kn_tq
= &kq
->kq_head
;
1703 kn
->kn_sfflags
= kev
->fflags
;
1704 kn
->kn_sdata
= kev
->data
;
1707 kn
->kn_kevent
= *kev
;
1708 kn
->kn_inuse
= 1; /* for f_attach() */
1709 kn
->kn_status
= KN_ATTACHING
;
1711 /* before anyone can find it */
1712 if (kev
->flags
& EV_DISABLE
)
1713 kn
->kn_status
|= KN_DISABLED
;
1715 error
= knote_fdpattach(kn
, fdp
, p
);
1724 * apply reference count to knote structure, and
1725 * do not release it at the end of this routine.
1729 error
= fops
->f_attach(kn
);
1735 * Failed to attach correctly, so drop.
1736 * All other possible users/droppers
1737 * have deferred to us.
1739 kn
->kn_status
|= KN_DROPPING
;
1743 } else if (kn
->kn_status
& KN_DROPPING
) {
1745 * Attach succeeded, but someone else
1746 * deferred their drop - now we have
1747 * to do it for them (after detaching).
1750 kn
->kn_fop
->f_detach(kn
);
1754 kn
->kn_status
&= ~KN_ATTACHING
;
1762 /* existing knote - get kqueue lock */
1766 if (kev
->flags
& EV_DELETE
) {
1768 kn
->kn_status
|= KN_DISABLED
;
1769 if (kqlock2knotedrop(kq
, kn
)) {
1770 kn
->kn_fop
->f_detach(kn
);
1776 /* update status flags for existing knote */
1777 if (kev
->flags
& EV_DISABLE
) {
1779 kn
->kn_status
|= KN_DISABLED
;
1780 } else if (kev
->flags
& EV_ENABLE
) {
1781 kn
->kn_status
&= ~KN_DISABLED
;
1782 if (kn
->kn_status
& KN_ACTIVE
)
1787 * The user may change some filter values after the
1788 * initial EV_ADD, but doing so will not reset any
1789 * filter which have already been triggered.
1791 kn
->kn_kevent
.udata
= kev
->udata
;
1792 if (fops
->f_isfd
|| fops
->f_touch
== NULL
) {
1793 kn
->kn_sfflags
= kev
->fflags
;
1794 kn
->kn_sdata
= kev
->data
;
1798 * If somebody is in the middle of dropping this
1799 * knote - go find/insert a new one. But we have
1800 * wait for this one to go away first. Attaches
1801 * running in parallel may also drop/modify the
1802 * knote. Wait for those to complete as well and
1803 * then start over if we encounter one.
1805 if (!kqlock2knoteusewait(kq
, kn
)) {
1806 /* kqueue, proc_fdlock both unlocked */
1811 * Call touch routine to notify filter of changes
1814 if (!fops
->f_isfd
&& fops
->f_touch
!= NULL
)
1815 fops
->f_touch(kn
, kev
, EVENT_REGISTER
);
1817 /* still have use ref on knote */
1820 * If the knote is not marked to always stay enqueued,
1821 * invoke the filter routine to see if it should be
1824 if ((kn
->kn_status
& KN_STAYQUEUED
) == 0 && kn
->kn_fop
->f_event(kn
, 0)) {
1825 if (knoteuse2kqlock(kq
, kn
))
1826 knote_activate(kn
, 1);
1834 fp_drop(p
, kev
->ident
, fp
, 0);
1840 * knote_process - process a triggered event
1842 * Validate that it is really still a triggered event
1843 * by calling the filter routines (if necessary). Hold
1844 * a use reference on the knote to avoid it being detached.
1845 * If it is still considered triggered, invoke the callback
1846 * routine provided and move it to the provided inprocess
1849 * caller holds a reference on the kqueue.
1850 * kqueue locked on entry and exit - but may be dropped
1853 knote_process(struct knote
*kn
,
1854 kevent_callback_t callback
,
1856 struct kqtailq
*inprocessp
,
1859 struct kqueue
*kq
= kn
->kn_kq
;
1860 struct kevent64_s kev
;
1866 * Determine the kevent state we want to return.
1868 * Some event states need to be revalidated before returning
1869 * them, others we take the snapshot at the time the event
1872 * Events with non-NULL f_touch operations must be touched.
1873 * Triggered events must fill in kev for the callback.
1875 * Convert our lock to a use-count and call the event's
1876 * filter routine(s) to update.
1878 if ((kn
->kn_status
& KN_DISABLED
) != 0) {
1885 revalidate
= ((kn
->kn_status
& KN_STAYQUEUED
) != 0 ||
1886 (kn
->kn_flags
& EV_ONESHOT
) == 0);
1887 touch
= (!kn
->kn_fop
->f_isfd
&& kn
->kn_fop
->f_touch
!= NULL
);
1889 if (revalidate
|| touch
) {
1891 knote_deactivate(kn
);
1893 /* call the filter/touch routines with just a ref */
1894 if (kqlock2knoteuse(kq
, kn
)) {
1895 /* if we have to revalidate, call the filter */
1897 result
= kn
->kn_fop
->f_event(kn
, 0);
1901 * capture the kevent data - using touch if
1904 if (result
&& touch
) {
1905 kn
->kn_fop
->f_touch(kn
, &kev
,
1910 * convert back to a kqlock - bail if the knote
1913 if (!knoteuse2kqlock(kq
, kn
)) {
1914 return (EJUSTRETURN
);
1915 } else if (result
) {
1917 * if revalidated as alive, make sure
1920 if (!(kn
->kn_status
& KN_ACTIVE
)) {
1921 knote_activate(kn
, 0);
1925 * capture all events that occurred
1929 kev
= kn
->kn_kevent
;
1932 } else if ((kn
->kn_status
& KN_STAYQUEUED
) == 0) {
1934 * was already dequeued, so just bail on
1937 return (EJUSTRETURN
);
1940 return (EJUSTRETURN
);
1943 kev
= kn
->kn_kevent
;
1947 /* move knote onto inprocess queue */
1948 assert(kn
->kn_tq
== &kq
->kq_head
);
1949 TAILQ_REMOVE(&kq
->kq_head
, kn
, kn_tqe
);
1950 kn
->kn_tq
= inprocessp
;
1951 TAILQ_INSERT_TAIL(inprocessp
, kn
, kn_tqe
);
1954 * Determine how to dispatch the knote for future event handling.
1955 * not-fired: just return (do not callout).
1956 * One-shot: deactivate it.
1957 * Clear: deactivate and clear the state.
1958 * Dispatch: don't clear state, just deactivate it and mark it disabled.
1959 * All others: just leave where they are.
1963 return (EJUSTRETURN
);
1964 } else if ((kn
->kn_flags
& EV_ONESHOT
) != 0) {
1965 knote_deactivate(kn
);
1966 if (kqlock2knotedrop(kq
, kn
)) {
1967 kn
->kn_fop
->f_detach(kn
);
1970 } else if ((kn
->kn_flags
& (EV_CLEAR
| EV_DISPATCH
)) != 0) {
1971 if ((kn
->kn_flags
& EV_DISPATCH
) != 0) {
1972 /* deactivate and disable all dispatch knotes */
1973 knote_deactivate(kn
);
1974 kn
->kn_status
|= KN_DISABLED
;
1975 } else if (!touch
|| kn
->kn_fflags
== 0) {
1976 /* only deactivate if nothing since the touch */
1977 knote_deactivate(kn
);
1979 if (!touch
&& (kn
->kn_flags
& EV_CLEAR
) != 0) {
1980 /* manually clear non-touch knotes */
1987 * leave on inprocess queue. We'll
1988 * move all the remaining ones back
1989 * the kq queue and wakeup any
1990 * waiters when we are done.
1995 /* callback to handle each event as we find it */
1996 error
= (callback
)(kq
, &kev
, data
);
2003 * Return 0 to indicate that processing should proceed,
2004 * -1 if there is nothing to process.
2006 * Called with kqueue locked and returns the same way,
2007 * but may drop lock temporarily.
2010 kqueue_begin_processing(struct kqueue
*kq
)
2013 if (kq
->kq_count
== 0) {
2017 /* if someone else is processing the queue, wait */
2018 if (kq
->kq_nprocess
!= 0) {
2019 wait_queue_assert_wait((wait_queue_t
)kq
->kq_wqs
,
2020 &kq
->kq_nprocess
, THREAD_UNINT
, 0);
2021 kq
->kq_state
|= KQ_PROCWAIT
;
2023 thread_block(THREAD_CONTINUE_NULL
);
2026 kq
->kq_nprocess
= 1;
2033 * Called with kqueue lock held.
2036 kqueue_end_processing(struct kqueue
*kq
)
2038 kq
->kq_nprocess
= 0;
2039 if (kq
->kq_state
& KQ_PROCWAIT
) {
2040 kq
->kq_state
&= ~KQ_PROCWAIT
;
2041 wait_queue_wakeup_all((wait_queue_t
)kq
->kq_wqs
,
2042 &kq
->kq_nprocess
, THREAD_AWAKENED
);
2047 * kqueue_process - process the triggered events in a kqueue
2049 * Walk the queued knotes and validate that they are
2050 * really still triggered events by calling the filter
2051 * routines (if necessary). Hold a use reference on
2052 * the knote to avoid it being detached. For each event
2053 * that is still considered triggered, invoke the
2054 * callback routine provided.
2056 * caller holds a reference on the kqueue.
2057 * kqueue locked on entry and exit - but may be dropped
2058 * kqueue list locked (held for duration of call)
2062 kqueue_process(struct kqueue
*kq
,
2063 kevent_callback_t callback
,
2068 struct kqtailq inprocess
;
2073 TAILQ_INIT(&inprocess
);
2075 if (kqueue_begin_processing(kq
) == -1) {
2077 /* Nothing to process */
2082 * Clear any pre-posted status from previous runs, so we
2083 * only detect events that occur during this run.
2085 wait_queue_sub_clearrefs(kq
->kq_wqs
);
2088 * loop through the enqueued knotes, processing each one and
2089 * revalidating those that need it. As they are processed,
2090 * they get moved to the inprocess queue (so the loop can end).
2095 while (error
== 0 &&
2096 (kn
= TAILQ_FIRST(&kq
->kq_head
)) != NULL
) {
2097 error
= knote_process(kn
, callback
, data
, &inprocess
, p
);
2098 if (error
== EJUSTRETURN
)
2105 * With the kqueue still locked, move any knotes
2106 * remaining on the inprocess queue back to the
2107 * kq's queue and wake up any waiters.
2109 while ((kn
= TAILQ_FIRST(&inprocess
)) != NULL
) {
2110 assert(kn
->kn_tq
== &inprocess
);
2111 TAILQ_REMOVE(&inprocess
, kn
, kn_tqe
);
2112 kn
->kn_tq
= &kq
->kq_head
;
2113 TAILQ_INSERT_TAIL(&kq
->kq_head
, kn
, kn_tqe
);
2116 kqueue_end_processing(kq
);
2124 kqueue_scan_continue(void *data
, wait_result_t wait_result
)
2126 thread_t self
= current_thread();
2127 uthread_t ut
= (uthread_t
)get_bsdthread_info(self
);
2128 struct _kqueue_scan
* cont_args
= &ut
->uu_kevent
.ss_kqueue_scan
;
2129 struct kqueue
*kq
= (struct kqueue
*)data
;
2133 /* convert the (previous) wait_result to a proper error */
2134 switch (wait_result
) {
2135 case THREAD_AWAKENED
:
2137 error
= kqueue_process(kq
, cont_args
->call
, cont_args
, &count
,
2139 if (error
== 0 && count
== 0) {
2140 wait_queue_assert_wait((wait_queue_t
)kq
->kq_wqs
,
2141 KQ_EVENT
, THREAD_ABORTSAFE
, cont_args
->deadline
);
2142 kq
->kq_state
|= KQ_SLEEP
;
2144 thread_block_parameter(kqueue_scan_continue
, kq
);
2149 case THREAD_TIMED_OUT
:
2150 error
= EWOULDBLOCK
;
2152 case THREAD_INTERRUPTED
:
2156 panic("%s: - invalid wait_result (%d)", __func__
,
2161 /* call the continuation with the results */
2162 assert(cont_args
->cont
!= NULL
);
2163 (cont_args
->cont
)(kq
, cont_args
->data
, error
);
2168 * kqueue_scan - scan and wait for events in a kqueue
2170 * Process the triggered events in a kqueue.
2172 * If there are no events triggered arrange to
2173 * wait for them. If the caller provided a
2174 * continuation routine, then kevent_scan will
2177 * The callback routine must be valid.
2178 * The caller must hold a use-count reference on the kq.
2182 kqueue_scan(struct kqueue
*kq
,
2183 kevent_callback_t callback
,
2184 kqueue_continue_t continuation
,
2186 struct timeval
*atvp
,
2189 thread_continue_t cont
= THREAD_CONTINUE_NULL
;
2194 assert(callback
!= NULL
);
2198 wait_result_t wait_result
;
2202 * Make a pass through the kq to find events already
2206 error
= kqueue_process(kq
, callback
, data
, &count
, p
);
2208 break; /* lock still held */
2210 /* looks like we have to consider blocking */
2213 /* convert the timeout to a deadline once */
2214 if (atvp
->tv_sec
|| atvp
->tv_usec
) {
2217 clock_get_uptime(&now
);
2218 nanoseconds_to_absolutetime((uint64_t)atvp
->tv_sec
* NSEC_PER_SEC
+
2219 atvp
->tv_usec
* (long)NSEC_PER_USEC
,
2221 if (now
>= deadline
) {
2222 /* non-blocking call */
2223 error
= EWOULDBLOCK
;
2224 break; /* lock still held */
2227 clock_absolutetime_interval_to_deadline(deadline
, &deadline
);
2229 deadline
= 0; /* block forever */
2233 uthread_t ut
= (uthread_t
)get_bsdthread_info(current_thread());
2234 struct _kqueue_scan
*cont_args
= &ut
->uu_kevent
.ss_kqueue_scan
;
2236 cont_args
->call
= callback
;
2237 cont_args
->cont
= continuation
;
2238 cont_args
->deadline
= deadline
;
2239 cont_args
->data
= data
;
2240 cont
= kqueue_scan_continue
;
2244 /* go ahead and wait */
2245 wait_queue_assert_wait_with_leeway((wait_queue_t
)kq
->kq_wqs
,
2246 KQ_EVENT
, THREAD_ABORTSAFE
, TIMEOUT_URGENCY_USER_NORMAL
,
2248 kq
->kq_state
|= KQ_SLEEP
;
2250 wait_result
= thread_block_parameter(cont
, kq
);
2251 /* NOTREACHED if (continuation != NULL) */
2253 switch (wait_result
) {
2254 case THREAD_AWAKENED
:
2256 case THREAD_TIMED_OUT
:
2257 return (EWOULDBLOCK
);
2258 case THREAD_INTERRUPTED
:
2261 panic("%s: - bad wait_result (%d)", __func__
,
2273 * This could be expanded to call kqueue_scan, if desired.
2277 kqueue_read(__unused
struct fileproc
*fp
,
2278 __unused
struct uio
*uio
,
2280 __unused vfs_context_t ctx
)
2287 kqueue_write(__unused
struct fileproc
*fp
,
2288 __unused
struct uio
*uio
,
2290 __unused vfs_context_t ctx
)
2297 kqueue_ioctl(__unused
struct fileproc
*fp
,
2298 __unused u_long com
,
2299 __unused caddr_t data
,
2300 __unused vfs_context_t ctx
)
2307 kqueue_select(struct fileproc
*fp
, int which
, void *wql
,
2308 __unused vfs_context_t ctx
)
2310 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
2312 struct kqtailq inprocessq
;
2318 TAILQ_INIT(&inprocessq
);
2322 * If this is the first pass, link the wait queue associated with the
2323 * the kqueue onto the wait queue set for the select(). Normally we
2324 * use selrecord() for this, but it uses the wait queue within the
2325 * selinfo structure and we need to use the main one for the kqueue to
2326 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
2327 * (The select() call will unlink them when it ends).
2330 thread_t cur_act
= current_thread();
2331 struct uthread
* ut
= get_bsdthread_info(cur_act
);
2333 kq
->kq_state
|= KQ_SEL
;
2334 wait_queue_link_noalloc((wait_queue_t
)kq
->kq_wqs
, ut
->uu_wqset
,
2335 (wait_queue_link_t
)wql
);
2338 if (kqueue_begin_processing(kq
) == -1) {
2343 if (kq
->kq_count
!= 0) {
2345 * there is something queued - but it might be a
2346 * KN_STAYQUEUED knote, which may or may not have
2347 * any events pending. So, we have to walk the
2348 * list of knotes to see, and peek at the stay-
2349 * queued ones to be really sure.
2351 while ((kn
= (struct knote
*)TAILQ_FIRST(&kq
->kq_head
)) != NULL
) {
2352 if ((kn
->kn_status
& KN_STAYQUEUED
) == 0) {
2357 TAILQ_REMOVE(&kq
->kq_head
, kn
, kn_tqe
);
2358 TAILQ_INSERT_TAIL(&inprocessq
, kn
, kn_tqe
);
2360 if (kqlock2knoteuse(kq
, kn
)) {
2363 peek
= kn
->kn_fop
->f_peek(kn
);
2364 if (knoteuse2kqlock(kq
, kn
)) {
2377 /* Return knotes to active queue */
2378 while ((kn
= TAILQ_FIRST(&inprocessq
)) != NULL
) {
2379 TAILQ_REMOVE(&inprocessq
, kn
, kn_tqe
);
2380 kn
->kn_tq
= &kq
->kq_head
;
2381 TAILQ_INSERT_TAIL(&kq
->kq_head
, kn
, kn_tqe
);
2384 kqueue_end_processing(kq
);
2394 kqueue_close(struct fileglob
*fg
, __unused vfs_context_t ctx
)
2396 struct kqueue
*kq
= (struct kqueue
*)fg
->fg_data
;
2405 * The callers has taken a use-count reference on this kqueue and will donate it
2406 * to the kqueue we are being added to. This keeps the kqueue from closing until
2407 * that relationship is torn down.
2410 kqueue_kqfilter(__unused
struct fileproc
*fp
, struct knote
*kn
, __unused vfs_context_t ctx
)
2412 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
2413 struct kqueue
*parentkq
= kn
->kn_kq
;
2415 if (parentkq
== kq
||
2416 kn
->kn_filter
!= EVFILT_READ
)
2420 * We have to avoid creating a cycle when nesting kqueues
2421 * inside another. Rather than trying to walk the whole
2422 * potential DAG of nested kqueues, we just use a simple
2423 * ceiling protocol. When a kqueue is inserted into another,
2424 * we check that the (future) parent is not already nested
2425 * into another kqueue at a lower level than the potenial
2426 * child (because it could indicate a cycle). If that test
2427 * passes, we just mark the nesting levels accordingly.
2431 if (parentkq
->kq_level
> 0 &&
2432 parentkq
->kq_level
< kq
->kq_level
)
2437 /* set parent level appropriately */
2438 if (parentkq
->kq_level
== 0)
2439 parentkq
->kq_level
= 2;
2440 if (parentkq
->kq_level
< kq
->kq_level
+ 1)
2441 parentkq
->kq_level
= kq
->kq_level
+ 1;
2444 kn
->kn_fop
= &kqread_filtops
;
2446 KNOTE_ATTACH(&kq
->kq_sel
.si_note
, kn
);
2447 /* indicate nesting in child, if needed */
2448 if (kq
->kq_level
== 0)
2456 * kqueue_drain - called when kq is closed
2460 kqueue_drain(struct fileproc
*fp
, __unused vfs_context_t ctx
)
2462 struct kqueue
*kq
= (struct kqueue
*)fp
->f_fglob
->fg_data
;
2464 kqueue_wakeup(kq
, 1);
2471 kqueue_stat(struct fileproc
*fp
, void *ub
, int isstat64
, __unused vfs_context_t ctx
)
2474 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
2475 if (isstat64
!= 0) {
2476 struct stat64
*sb64
= (struct stat64
*)ub
;
2478 bzero((void *)sb64
, sizeof(*sb64
));
2479 sb64
->st_size
= kq
->kq_count
;
2480 if (kq
->kq_state
& KQ_KEV64
)
2481 sb64
->st_blksize
= sizeof(struct kevent64_s
);
2483 sb64
->st_blksize
= sizeof(struct kevent
);
2484 sb64
->st_mode
= S_IFIFO
;
2486 struct stat
*sb
= (struct stat
*)ub
;
2488 bzero((void *)sb
, sizeof(*sb
));
2489 sb
->st_size
= kq
->kq_count
;
2490 if (kq
->kq_state
& KQ_KEV64
)
2491 sb
->st_blksize
= sizeof(struct kevent64_s
);
2493 sb
->st_blksize
= sizeof(struct kevent
);
2494 sb
->st_mode
= S_IFIFO
;
2501 * Called with the kqueue locked
2504 kqueue_wakeup(struct kqueue
*kq
, int closed
)
2506 if ((kq
->kq_state
& (KQ_SLEEP
| KQ_SEL
)) != 0 || kq
->kq_nprocess
> 0) {
2507 kq
->kq_state
&= ~(KQ_SLEEP
| KQ_SEL
);
2508 wait_queue_wakeup_all((wait_queue_t
)kq
->kq_wqs
, KQ_EVENT
,
2509 (closed
) ? THREAD_INTERRUPTED
: THREAD_AWAKENED
);
2514 klist_init(struct klist
*list
)
2521 * Query/Post each knote in the object's list
2523 * The object lock protects the list. It is assumed
2524 * that the filter/event routine for the object can
2525 * determine that the object is already locked (via
2526 * the hint) and not deadlock itself.
2528 * The object lock should also hold off pending
2529 * detach/drop operations. But we'll prevent it here
2530 * too - just in case.
2533 knote(struct klist
*list
, long hint
)
2537 SLIST_FOREACH(kn
, list
, kn_selnext
) {
2538 struct kqueue
*kq
= kn
->kn_kq
;
2541 if (kqlock2knoteuse(kq
, kn
)) {
2544 /* call the event with only a use count */
2545 result
= kn
->kn_fop
->f_event(kn
, hint
);
2547 /* if its not going away and triggered */
2548 if (knoteuse2kqlock(kq
, kn
) && result
)
2549 knote_activate(kn
, 1);
2550 /* lock held again */
2557 * attach a knote to the specified list. Return true if this is the first entry.
2558 * The list is protected by whatever lock the object it is associated with uses.
2561 knote_attach(struct klist
*list
, struct knote
*kn
)
2563 int ret
= SLIST_EMPTY(list
);
2564 SLIST_INSERT_HEAD(list
, kn
, kn_selnext
);
2569 * detach a knote from the specified list. Return true if that was the last entry.
2570 * The list is protected by whatever lock the object it is associated with uses.
2573 knote_detach(struct klist
*list
, struct knote
*kn
)
2575 SLIST_REMOVE(list
, kn
, knote
, kn_selnext
);
2576 return (SLIST_EMPTY(list
));
2580 * For a given knote, link a provided wait queue directly with the kqueue.
2581 * Wakeups will happen via recursive wait queue support. But nothing will move
2582 * the knote to the active list at wakeup (nothing calls knote()). Instead,
2583 * we permanently enqueue them here.
2585 * kqueue and knote references are held by caller.
2587 * caller provides the wait queue link structure.
2590 knote_link_wait_queue(struct knote
*kn
, struct wait_queue
*wq
, wait_queue_link_t wql
)
2592 struct kqueue
*kq
= kn
->kn_kq
;
2595 kr
= wait_queue_link_noalloc(wq
, kq
->kq_wqs
, wql
);
2596 if (kr
== KERN_SUCCESS
) {
2597 knote_markstayqueued(kn
);
2605 * Unlink the provided wait queue from the kqueue associated with a knote.
2606 * Also remove it from the magic list of directly attached knotes.
2608 * Note that the unlink may have already happened from the other side, so
2609 * ignore any failures to unlink and just remove it from the kqueue list.
2611 * On success, caller is responsible for the link structure
2614 knote_unlink_wait_queue(struct knote
*kn
, struct wait_queue
*wq
, wait_queue_link_t
*wqlp
)
2616 struct kqueue
*kq
= kn
->kn_kq
;
2619 kr
= wait_queue_unlink_nofree(wq
, kq
->kq_wqs
, wqlp
);
2621 kn
->kn_status
&= ~KN_STAYQUEUED
;
2624 return ((kr
!= KERN_SUCCESS
) ? EINVAL
: 0);
2628 * remove all knotes referencing a specified fd
2630 * Essentially an inlined knote_remove & knote_drop
2631 * when we know for sure that the thing is a file
2633 * Entered with the proc_fd lock already held.
2634 * It returns the same way, but may drop it temporarily.
2637 knote_fdclose(struct proc
*p
, int fd
)
2639 struct filedesc
*fdp
= p
->p_fd
;
2643 list
= &fdp
->fd_knlist
[fd
];
2644 while ((kn
= SLIST_FIRST(list
)) != NULL
) {
2645 struct kqueue
*kq
= kn
->kn_kq
;
2648 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
2649 __func__
, kq
->kq_p
, p
);
2655 * Convert the lock to a drop ref.
2656 * If we get it, go ahead and drop it.
2657 * Otherwise, we waited for it to
2658 * be dropped by the other guy, so
2659 * it is safe to move on in the list.
2661 if (kqlock2knotedrop(kq
, kn
)) {
2662 kn
->kn_fop
->f_detach(kn
);
2668 /* the fd tables may have changed - start over */
2669 list
= &fdp
->fd_knlist
[fd
];
2673 /* proc_fdlock held on entry (and exit) */
2675 knote_fdpattach(struct knote
*kn
, struct filedesc
*fdp
, struct proc
*p
)
2677 struct klist
*list
= NULL
;
2679 if (! kn
->kn_fop
->f_isfd
) {
2680 if (fdp
->fd_knhashmask
== 0)
2681 fdp
->fd_knhash
= hashinit(CONFIG_KN_HASHSIZE
, M_KQUEUE
,
2682 &fdp
->fd_knhashmask
);
2683 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
2685 if ((u_int
)fdp
->fd_knlistsize
<= kn
->kn_id
) {
2688 if (kn
->kn_id
>= (uint64_t)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
2689 || kn
->kn_id
>= (uint64_t)maxfiles
)
2692 /* have to grow the fd_knlist */
2693 size
= fdp
->fd_knlistsize
;
2694 while (size
<= kn
->kn_id
)
2697 if (size
>= (UINT_MAX
/sizeof(struct klist
*)))
2700 MALLOC(list
, struct klist
*,
2701 size
* sizeof(struct klist
*), M_KQUEUE
, M_WAITOK
);
2705 bcopy((caddr_t
)fdp
->fd_knlist
, (caddr_t
)list
,
2706 fdp
->fd_knlistsize
* sizeof(struct klist
*));
2707 bzero((caddr_t
)list
+
2708 fdp
->fd_knlistsize
* sizeof(struct klist
*),
2709 (size
- fdp
->fd_knlistsize
) * sizeof(struct klist
*));
2710 FREE(fdp
->fd_knlist
, M_KQUEUE
);
2711 fdp
->fd_knlist
= list
;
2712 fdp
->fd_knlistsize
= size
;
2714 list
= &fdp
->fd_knlist
[kn
->kn_id
];
2716 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
2723 * should be called at spl == 0, since we don't want to hold spl
2724 * while calling fdrop and free.
2727 knote_drop(struct knote
*kn
, __unused
struct proc
*ctxp
)
2729 struct kqueue
*kq
= kn
->kn_kq
;
2730 struct proc
*p
= kq
->kq_p
;
2731 struct filedesc
*fdp
= p
->p_fd
;
2736 if (kn
->kn_fop
->f_isfd
)
2737 list
= &fdp
->fd_knlist
[kn
->kn_id
];
2739 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
2741 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
2744 needswakeup
= (kn
->kn_status
& KN_USEWAIT
);
2749 wait_queue_wakeup_all((wait_queue_t
)kq
->kq_wqs
, &kn
->kn_status
,
2752 if (kn
->kn_fop
->f_isfd
)
2753 fp_drop(p
, kn
->kn_id
, kn
->kn_fp
, 0);
2758 /* called with kqueue lock held */
2760 knote_activate(struct knote
*kn
, int propagate
)
2762 struct kqueue
*kq
= kn
->kn_kq
;
2764 kn
->kn_status
|= KN_ACTIVE
;
2766 kqueue_wakeup(kq
, 0);
2768 /* this is a real event: wake up the parent kq, too */
2770 KNOTE(&kq
->kq_sel
.si_note
, 0);
2773 /* called with kqueue lock held */
2775 knote_deactivate(struct knote
*kn
)
2777 kn
->kn_status
&= ~KN_ACTIVE
;
2781 /* called with kqueue lock held */
2783 knote_enqueue(struct knote
*kn
)
2785 if ((kn
->kn_status
& (KN_QUEUED
| KN_STAYQUEUED
)) == KN_STAYQUEUED
||
2786 (kn
->kn_status
& (KN_QUEUED
| KN_STAYQUEUED
| KN_DISABLED
)) == 0) {
2787 struct kqtailq
*tq
= kn
->kn_tq
;
2788 struct kqueue
*kq
= kn
->kn_kq
;
2790 TAILQ_INSERT_TAIL(tq
, kn
, kn_tqe
);
2791 kn
->kn_status
|= KN_QUEUED
;
2796 /* called with kqueue lock held */
2798 knote_dequeue(struct knote
*kn
)
2800 struct kqueue
*kq
= kn
->kn_kq
;
2802 if ((kn
->kn_status
& (KN_QUEUED
| KN_STAYQUEUED
)) == KN_QUEUED
) {
2803 struct kqtailq
*tq
= kn
->kn_tq
;
2805 TAILQ_REMOVE(tq
, kn
, kn_tqe
);
2806 kn
->kn_tq
= &kq
->kq_head
;
2807 kn
->kn_status
&= ~KN_QUEUED
;
2815 knote_zone
= zinit(sizeof(struct knote
), 8192*sizeof(struct knote
),
2816 8192, "knote zone");
2818 /* allocate kq lock group attribute and group */
2819 kq_lck_grp_attr
= lck_grp_attr_alloc_init();
2821 kq_lck_grp
= lck_grp_alloc_init("kqueue", kq_lck_grp_attr
);
2823 /* Allocate kq lock attribute */
2824 kq_lck_attr
= lck_attr_alloc_init();
2826 /* Initialize the timer filter lock */
2827 lck_mtx_init(&_filt_timerlock
, kq_lck_grp
, kq_lck_attr
);
2829 #if VM_PRESSURE_EVENTS
2830 /* Initialize the vm pressure list lock */
2831 vm_pressure_init(kq_lck_grp
, kq_lck_attr
);
2834 #if CONFIG_MEMORYSTATUS
2835 /* Initialize the memorystatus list lock */
2836 memorystatus_kevent_init(kq_lck_grp
, kq_lck_attr
);
2839 SYSINIT(knote
, SI_SUB_PSEUDO
, SI_ORDER_ANY
, knote_init
, NULL
)
2841 static struct knote
*
2844 return ((struct knote
*)zalloc(knote_zone
));
2848 knote_free(struct knote
*kn
)
2850 zfree(knote_zone
, kn
);
2854 #include <sys/param.h>
2855 #include <sys/socket.h>
2856 #include <sys/protosw.h>
2857 #include <sys/domain.h>
2858 #include <sys/mbuf.h>
2859 #include <sys/kern_event.h>
2860 #include <sys/malloc.h>
2861 #include <sys/sys_domain.h>
2862 #include <sys/syslog.h>
2864 static lck_grp_attr_t
*kev_lck_grp_attr
;
2865 static lck_attr_t
*kev_lck_attr
;
2866 static lck_grp_t
*kev_lck_grp
;
2867 static decl_lck_rw_data(,kev_lck_data
);
2868 static lck_rw_t
*kev_rwlock
= &kev_lck_data
;
2870 static int kev_attach(struct socket
*so
, int proto
, struct proc
*p
);
2871 static int kev_detach(struct socket
*so
);
2872 static int kev_control(struct socket
*so
, u_long cmd
, caddr_t data
,
2873 struct ifnet
*ifp
, struct proc
*p
);
2874 static lck_mtx_t
* event_getlock(struct socket
*, int);
2875 static int event_lock(struct socket
*, int, void *);
2876 static int event_unlock(struct socket
*, int, void *);
2878 static int event_sofreelastref(struct socket
*);
2879 static void kev_delete(struct kern_event_pcb
*);
2881 static struct pr_usrreqs event_usrreqs
= {
2882 .pru_attach
= kev_attach
,
2883 .pru_control
= kev_control
,
2884 .pru_detach
= kev_detach
,
2885 .pru_soreceive
= soreceive
,
2888 static struct protosw eventsw
[] = {
2890 .pr_type
= SOCK_RAW
,
2891 .pr_protocol
= SYSPROTO_EVENT
,
2892 .pr_flags
= PR_ATOMIC
,
2893 .pr_usrreqs
= &event_usrreqs
,
2894 .pr_lock
= event_lock
,
2895 .pr_unlock
= event_unlock
,
2896 .pr_getlock
= event_getlock
,
2901 event_getlock(struct socket
*so
, int locktype
)
2903 #pragma unused(locktype)
2904 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*)so
->so_pcb
;
2906 if (so
->so_pcb
!= NULL
) {
2907 if (so
->so_usecount
< 0)
2908 panic("%s: so=%p usecount=%d lrh= %s\n", __func__
,
2909 so
, so
->so_usecount
, solockhistory_nr(so
));
2912 panic("%s: so=%p NULL NO so_pcb %s\n", __func__
,
2913 so
, solockhistory_nr(so
));
2916 return (&ev_pcb
->evp_mtx
);
2920 event_lock(struct socket
*so
, int refcount
, void *lr
)
2925 lr_saved
= __builtin_return_address(0);
2929 if (so
->so_pcb
!= NULL
) {
2930 lck_mtx_lock(&((struct kern_event_pcb
*)so
->so_pcb
)->evp_mtx
);
2932 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__
,
2933 so
, lr_saved
, solockhistory_nr(so
));
2937 if (so
->so_usecount
< 0) {
2938 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__
,
2939 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
,
2940 solockhistory_nr(so
));
2947 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
2948 so
->next_lock_lr
= (so
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
2953 event_unlock(struct socket
*so
, int refcount
, void *lr
)
2956 lck_mtx_t
*mutex_held
;
2959 lr_saved
= __builtin_return_address(0);
2966 if (so
->so_usecount
< 0) {
2967 panic("%s: so=%p usecount=%d lrh= %s\n", __func__
,
2968 so
, so
->so_usecount
, solockhistory_nr(so
));
2971 if (so
->so_pcb
== NULL
) {
2972 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__
,
2973 so
, so
->so_usecount
, (void *)lr_saved
,
2974 solockhistory_nr(so
));
2977 mutex_held
= (&((struct kern_event_pcb
*)so
->so_pcb
)->evp_mtx
);
2979 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
2980 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
2981 so
->next_unlock_lr
= (so
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
2983 if (so
->so_usecount
== 0) {
2984 VERIFY(so
->so_flags
& SOF_PCBCLEARING
);
2985 event_sofreelastref(so
);
2987 lck_mtx_unlock(mutex_held
);
2994 event_sofreelastref(struct socket
*so
)
2996 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*)so
->so_pcb
;
2998 lck_mtx_assert(&(ev_pcb
->evp_mtx
), LCK_MTX_ASSERT_OWNED
);
3003 * Disable upcall in the event another thread is in kev_post_msg()
3004 * appending record to the receive socket buffer, since sbwakeup()
3005 * may release the socket lock otherwise.
3007 so
->so_rcv
.sb_flags
&= ~SB_UPCALL
;
3008 so
->so_snd
.sb_flags
&= ~SB_UPCALL
;
3009 so
->so_event
= NULL
;
3010 lck_mtx_unlock(&(ev_pcb
->evp_mtx
));
3012 lck_mtx_assert(&(ev_pcb
->evp_mtx
), LCK_MTX_ASSERT_NOTOWNED
);
3013 lck_rw_lock_exclusive(kev_rwlock
);
3014 LIST_REMOVE(ev_pcb
, evp_link
);
3015 lck_rw_done(kev_rwlock
);
3018 sofreelastref(so
, 1);
3022 static int event_proto_count
= (sizeof (eventsw
) / sizeof (struct protosw
));
3025 struct kern_event_head kern_event_head
;
3027 static u_int32_t static_event_id
= 0;
3029 #define EVPCB_ZONE_MAX 65536
3030 #define EVPCB_ZONE_NAME "kerneventpcb"
3031 static struct zone
*ev_pcb_zone
;
3034 * Install the protosw's for the NKE manager. Invoked at extension load time
3037 kern_event_init(struct domain
*dp
)
3042 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
3043 VERIFY(dp
== systemdomain
);
3045 kev_lck_grp_attr
= lck_grp_attr_alloc_init();
3046 if (kev_lck_grp_attr
== NULL
) {
3047 panic("%s: lck_grp_attr_alloc_init failed\n", __func__
);
3051 kev_lck_grp
= lck_grp_alloc_init("Kernel Event Protocol",
3053 if (kev_lck_grp
== NULL
) {
3054 panic("%s: lck_grp_alloc_init failed\n", __func__
);
3058 kev_lck_attr
= lck_attr_alloc_init();
3059 if (kev_lck_attr
== NULL
) {
3060 panic("%s: lck_attr_alloc_init failed\n", __func__
);
3064 lck_rw_init(kev_rwlock
, kev_lck_grp
, kev_lck_attr
);
3065 if (kev_rwlock
== NULL
) {
3066 panic("%s: lck_mtx_alloc_init failed\n", __func__
);
3070 for (i
= 0, pr
= &eventsw
[0]; i
< event_proto_count
; i
++, pr
++)
3071 net_add_proto(pr
, dp
, 1);
3073 ev_pcb_zone
= zinit(sizeof(struct kern_event_pcb
),
3074 EVPCB_ZONE_MAX
* sizeof(struct kern_event_pcb
), 0, EVPCB_ZONE_NAME
);
3075 if (ev_pcb_zone
== NULL
) {
3076 panic("%s: failed allocating ev_pcb_zone", __func__
);
3079 zone_change(ev_pcb_zone
, Z_EXPAND
, TRUE
);
3080 zone_change(ev_pcb_zone
, Z_CALLERACCT
, TRUE
);
3084 kev_attach(struct socket
*so
, __unused
int proto
, __unused
struct proc
*p
)
3087 struct kern_event_pcb
*ev_pcb
;
3089 error
= soreserve(so
, KEV_SNDSPACE
, KEV_RECVSPACE
);
3093 if ((ev_pcb
= (struct kern_event_pcb
*)zalloc(ev_pcb_zone
)) == NULL
) {
3096 bzero(ev_pcb
, sizeof(struct kern_event_pcb
));
3097 lck_mtx_init(&ev_pcb
->evp_mtx
, kev_lck_grp
, kev_lck_attr
);
3099 ev_pcb
->evp_socket
= so
;
3100 ev_pcb
->evp_vendor_code_filter
= 0xffffffff;
3102 so
->so_pcb
= (caddr_t
) ev_pcb
;
3103 lck_rw_lock_exclusive(kev_rwlock
);
3104 LIST_INSERT_HEAD(&kern_event_head
, ev_pcb
, evp_link
);
3105 lck_rw_done(kev_rwlock
);
3111 kev_delete(struct kern_event_pcb
*ev_pcb
)
3113 VERIFY(ev_pcb
!= NULL
);
3114 lck_mtx_destroy(&ev_pcb
->evp_mtx
, kev_lck_grp
);
3115 zfree(ev_pcb_zone
, ev_pcb
);
3119 kev_detach(struct socket
*so
)
3121 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
3123 if (ev_pcb
!= NULL
) {
3124 soisdisconnected(so
);
3125 so
->so_flags
|= SOF_PCBCLEARING
;
3132 * For now, kev_vendor_code and mbuf_tags use the same
3135 errno_t
kev_vendor_code_find(
3137 u_int32_t
*out_vendor_code
)
3139 if (strlen(string
) >= KEV_VENDOR_CODE_MAX_STR_LEN
) {
3142 return (net_str_id_find_internal(string
, out_vendor_code
,
3143 NSI_VENDOR_CODE
, 1));
3147 kev_msg_post(struct kev_msg
*event_msg
)
3149 mbuf_tag_id_t min_vendor
, max_vendor
;
3151 net_str_id_first_last(&min_vendor
, &max_vendor
, NSI_VENDOR_CODE
);
3153 if (event_msg
== NULL
)
3157 * Limit third parties to posting events for registered vendor codes
3160 if (event_msg
->vendor_code
< min_vendor
||
3161 event_msg
->vendor_code
> max_vendor
)
3164 return (kev_post_msg(event_msg
));
3168 kev_post_msg(struct kev_msg
*event_msg
)
3170 struct mbuf
*m
, *m2
;
3171 struct kern_event_pcb
*ev_pcb
;
3172 struct kern_event_msg
*ev
;
3174 u_int32_t total_size
;
3177 /* Verify the message is small enough to fit in one mbuf w/o cluster */
3178 total_size
= KEV_MSG_HEADER_SIZE
;
3180 for (i
= 0; i
< 5; i
++) {
3181 if (event_msg
->dv
[i
].data_length
== 0)
3183 total_size
+= event_msg
->dv
[i
].data_length
;
3186 if (total_size
> MLEN
) {
3190 m
= m_get(M_DONTWAIT
, MT_DATA
);
3194 ev
= mtod(m
, struct kern_event_msg
*);
3195 total_size
= KEV_MSG_HEADER_SIZE
;
3197 tmp
= (char *) &ev
->event_data
[0];
3198 for (i
= 0; i
< 5; i
++) {
3199 if (event_msg
->dv
[i
].data_length
== 0)
3202 total_size
+= event_msg
->dv
[i
].data_length
;
3203 bcopy(event_msg
->dv
[i
].data_ptr
, tmp
,
3204 event_msg
->dv
[i
].data_length
);
3205 tmp
+= event_msg
->dv
[i
].data_length
;
3208 ev
->id
= ++static_event_id
;
3209 ev
->total_size
= total_size
;
3210 ev
->vendor_code
= event_msg
->vendor_code
;
3211 ev
->kev_class
= event_msg
->kev_class
;
3212 ev
->kev_subclass
= event_msg
->kev_subclass
;
3213 ev
->event_code
= event_msg
->event_code
;
3215 m
->m_len
= total_size
;
3216 lck_rw_lock_shared(kev_rwlock
);
3217 for (ev_pcb
= LIST_FIRST(&kern_event_head
);
3219 ev_pcb
= LIST_NEXT(ev_pcb
, evp_link
)) {
3220 lck_mtx_lock(&ev_pcb
->evp_mtx
);
3221 if (ev_pcb
->evp_socket
->so_pcb
== NULL
) {
3222 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
3225 if (ev_pcb
->evp_vendor_code_filter
!= KEV_ANY_VENDOR
) {
3226 if (ev_pcb
->evp_vendor_code_filter
!= ev
->vendor_code
) {
3227 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
3231 if (ev_pcb
->evp_class_filter
!= KEV_ANY_CLASS
) {
3232 if (ev_pcb
->evp_class_filter
!= ev
->kev_class
) {
3233 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
3237 if ((ev_pcb
->evp_subclass_filter
!= KEV_ANY_SUBCLASS
) &&
3238 (ev_pcb
->evp_subclass_filter
!= ev
->kev_subclass
)) {
3239 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
3245 m2
= m_copym(m
, 0, m
->m_len
, M_NOWAIT
);
3248 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
3249 lck_rw_done(kev_rwlock
);
3252 if (sbappendrecord(&ev_pcb
->evp_socket
->so_rcv
, m2
))
3253 sorwakeup(ev_pcb
->evp_socket
);
3254 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
3257 lck_rw_done(kev_rwlock
);
3263 kev_control(struct socket
*so
,
3266 __unused
struct ifnet
*ifp
,
3267 __unused
struct proc
*p
)
3269 struct kev_request
*kev_req
= (struct kev_request
*) data
;
3270 struct kern_event_pcb
*ev_pcb
;
3271 struct kev_vendor_code
*kev_vendor
;
3272 u_int32_t
*id_value
= (u_int32_t
*) data
;
3276 *id_value
= static_event_id
;
3279 ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
3280 ev_pcb
->evp_vendor_code_filter
= kev_req
->vendor_code
;
3281 ev_pcb
->evp_class_filter
= kev_req
->kev_class
;
3282 ev_pcb
->evp_subclass_filter
= kev_req
->kev_subclass
;
3285 ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
3286 kev_req
->vendor_code
= ev_pcb
->evp_vendor_code_filter
;
3287 kev_req
->kev_class
= ev_pcb
->evp_class_filter
;
3288 kev_req
->kev_subclass
= ev_pcb
->evp_subclass_filter
;
3290 case SIOCGKEVVENDOR
:
3291 kev_vendor
= (struct kev_vendor_code
*)data
;
3292 /* Make sure string is NULL terminated */
3293 kev_vendor
->vendor_string
[KEV_VENDOR_CODE_MAX_STR_LEN
-1] = 0;
3294 return (net_str_id_find_internal(kev_vendor
->vendor_string
,
3295 &kev_vendor
->vendor_code
, NSI_VENDOR_CODE
, 0));
3303 #endif /* SOCKETS */
3307 fill_kqueueinfo(struct kqueue
*kq
, struct kqueue_info
* kinfo
)
3309 struct vinfo_stat
* st
;
3311 /* No need for the funnel as fd is kept alive */
3312 st
= &kinfo
->kq_stat
;
3314 st
->vst_size
= kq
->kq_count
;
3315 if (kq
->kq_state
& KQ_KEV64
)
3316 st
->vst_blksize
= sizeof(struct kevent64_s
);
3318 st
->vst_blksize
= sizeof(struct kevent
);
3319 st
->vst_mode
= S_IFIFO
;
3320 if (kq
->kq_state
& KQ_SEL
)
3321 kinfo
->kq_state
|= PROC_KQUEUE_SELECT
;
3322 if (kq
->kq_state
& KQ_SLEEP
)
3323 kinfo
->kq_state
|= PROC_KQUEUE_SLEEP
;
3330 knote_markstayqueued(struct knote
*kn
)
3333 kn
->kn_status
|= KN_STAYQUEUED
;
3335 kqunlock(kn
->kn_kq
);