]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_event.c
xnu-2422.90.20.tar.gz
[apple/xnu.git] / bsd / kern / kern_event.c
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 *
28 */
55e303ae
A
29/*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
1c79356b
A
54/*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
91447636 57#include <stdint.h>
1c79356b 58
55e303ae
A
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/filedesc.h>
62#include <sys/kernel.h>
91447636
A
63#include <sys/proc_internal.h>
64#include <sys/kauth.h>
39236c6e 65#include <sys/malloc.h>
55e303ae 66#include <sys/unistd.h>
91447636 67#include <sys/file_internal.h>
55e303ae
A
68#include <sys/fcntl.h>
69#include <sys/select.h>
70#include <sys/queue.h>
71#include <sys/event.h>
72#include <sys/eventvar.h>
73#include <sys/protosw.h>
74#include <sys/socket.h>
75#include <sys/socketvar.h>
76#include <sys/stat.h>
77#include <sys/sysctl.h>
78#include <sys/uio.h>
91447636
A
79#include <sys/sysproto.h>
80#include <sys/user.h>
b0d623f7 81#include <sys/vnode_internal.h>
91447636 82#include <string.h>
0c530ab8 83#include <sys/proc_info.h>
39236c6e 84#include <sys/codesign.h>
91447636
A
85
86#include <kern/lock.h>
87#include <kern/clock.h>
88#include <kern/thread_call.h>
89#include <kern/sched_prim.h>
55e303ae 90#include <kern/zalloc.h>
91447636
A
91#include <kern/assert.h>
92
93#include <libkern/libkern.h>
b0d623f7 94#include "net/net_str_id.h"
55e303ae 95
6d2010ae 96#include <mach/task.h>
316670eb
A
97
98#if VM_PRESSURE_EVENTS
6d2010ae 99#include <kern/vm_pressure.h>
316670eb 100#endif
6d2010ae 101
39236c6e
A
102#if CONFIG_MEMORYSTATUS
103#include <sys/kern_memorystatus.h>
104#endif
105
55e303ae
A
106MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
107
39236c6e 108#define KQ_EVENT NULL
b0d623f7 109
91447636
A
110static inline void kqlock(struct kqueue *kq);
111static inline void kqunlock(struct kqueue *kq);
112
39236c6e
A
113static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
114static int kqlock2knoteusewait(struct kqueue *kq, struct knote *kn);
115static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
116static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn);
117
118static void kqueue_wakeup(struct kqueue *kq, int closed);
119static int kqueue_read(struct fileproc *fp, struct uio *uio,
120 int flags, vfs_context_t ctx);
121static int kqueue_write(struct fileproc *fp, struct uio *uio,
122 int flags, vfs_context_t ctx);
123static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data,
124 vfs_context_t ctx);
125static int kqueue_select(struct fileproc *fp, int which, void *wql,
126 vfs_context_t ctx);
127static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
128static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
129 vfs_context_t ctx);
130static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
131extern int kqueue_stat(struct fileproc *fp, void *ub, int isstat64,
132 vfs_context_t ctx);
133
134static const struct fileops kqueueops = {
135 .fo_type = DTYPE_KQUEUE,
136 .fo_read = kqueue_read,
137 .fo_write = kqueue_write,
138 .fo_ioctl = kqueue_ioctl,
139 .fo_select = kqueue_select,
140 .fo_close = kqueue_close,
141 .fo_kqfilter = kqueue_kqfilter,
b0d623f7 142 .fo_drain = kqueue_drain,
55e303ae
A
143};
144
b0d623f7 145static int kevent_internal(struct proc *p, int iskev64, user_addr_t changelist,
39236c6e
A
146 int nchanges, user_addr_t eventlist, int nevents, int fd,
147 user_addr_t utimeout, unsigned int flags, int32_t *retval);
148static int kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp,
149 struct proc *p, int iskev64);
150static int kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp,
151 struct proc *p, int iskev64);
b0d623f7 152char * kevent_description(struct kevent64_s *kevp, char *s, size_t n);
91447636 153
39236c6e
A
154static int kevent_callback(struct kqueue *kq, struct kevent64_s *kevp,
155 void *data);
156static void kevent_continue(struct kqueue *kq, void *data, int error);
157static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
158static int kqueue_process(struct kqueue *kq, kevent_callback_t callback,
159 void *data, int *countp, struct proc *p);
160static int kqueue_begin_processing(struct kqueue *kq);
161static void kqueue_end_processing(struct kqueue *kq);
162static int knote_process(struct knote *kn, kevent_callback_t callback,
163 void *data, struct kqtailq *inprocessp, struct proc *p);
164static void knote_put(struct knote *kn);
165static int knote_fdpattach(struct knote *kn, struct filedesc *fdp,
166 struct proc *p);
167static void knote_drop(struct knote *kn, struct proc *p);
168static void knote_activate(struct knote *kn, int);
169static void knote_deactivate(struct knote *kn);
170static void knote_enqueue(struct knote *kn);
171static void knote_dequeue(struct knote *kn);
172static struct knote *knote_alloc(void);
173static void knote_free(struct knote *kn);
174
175static int filt_fileattach(struct knote *kn);
b0d623f7 176static struct filterops file_filtops = {
39236c6e
A
177 .f_isfd = 1,
178 .f_attach = filt_fileattach,
b0d623f7 179};
55e303ae 180
39236c6e
A
181static void filt_kqdetach(struct knote *kn);
182static int filt_kqueue(struct knote *kn, long hint);
b0d623f7 183static struct filterops kqread_filtops = {
39236c6e
A
184 .f_isfd = 1,
185 .f_detach = filt_kqdetach,
186 .f_event = filt_kqueue,
b0d623f7 187};
55e303ae 188
39236c6e
A
189/* placeholder for not-yet-implemented filters */
190static int filt_badattach(struct knote *kn);
b0d623f7 191static struct filterops bad_filtops = {
39236c6e 192 .f_attach = filt_badattach,
b0d623f7 193};
55e303ae 194
39236c6e
A
195static int filt_procattach(struct knote *kn);
196static void filt_procdetach(struct knote *kn);
197static int filt_proc(struct knote *kn, long hint);
b0d623f7 198static struct filterops proc_filtops = {
39236c6e
A
199 .f_attach = filt_procattach,
200 .f_detach = filt_procdetach,
201 .f_event = filt_proc,
b0d623f7 202};
55e303ae 203
316670eb 204#if VM_PRESSURE_EVENTS
6d2010ae
A
205static int filt_vmattach(struct knote *kn);
206static void filt_vmdetach(struct knote *kn);
207static int filt_vm(struct knote *kn, long hint);
208static struct filterops vm_filtops = {
209 .f_attach = filt_vmattach,
210 .f_detach = filt_vmdetach,
211 .f_event = filt_vm,
212};
316670eb 213#endif /* VM_PRESSURE_EVENTS */
6d2010ae 214
39236c6e
A
215#if CONFIG_MEMORYSTATUS
216extern struct filterops memorystatus_filtops;
217#endif /* CONFIG_MEMORYSTATUS */
218
55e303ae
A
219extern struct filterops fs_filtops;
220
221extern struct filterops sig_filtops;
222
91447636 223/* Timer filter */
39236c6e
A
224static int filt_timerattach(struct knote *kn);
225static void filt_timerdetach(struct knote *kn);
226static int filt_timer(struct knote *kn, long hint);
227static void filt_timertouch(struct knote *kn, struct kevent64_s *kev,
228 long type);
b0d623f7 229static struct filterops timer_filtops = {
39236c6e
A
230 .f_attach = filt_timerattach,
231 .f_detach = filt_timerdetach,
232 .f_event = filt_timer,
233 .f_touch = filt_timertouch,
b0d623f7 234};
55e303ae 235
b0d623f7 236/* Helpers */
39236c6e
A
237static void filt_timerexpire(void *knx, void *param1);
238static int filt_timervalidate(struct knote *kn);
239static void filt_timerupdate(struct knote *kn);
240static void filt_timercancel(struct knote *kn);
b0d623f7 241
39236c6e
A
242#define TIMER_RUNNING 0x1
243#define TIMER_CANCELWAIT 0x2
55e303ae 244
91447636 245static lck_mtx_t _filt_timerlock;
39236c6e
A
246static void filt_timerlock(void);
247static void filt_timerunlock(void);
55e303ae 248
39236c6e 249static zone_t knote_zone;
55e303ae 250
39236c6e 251#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
55e303ae
A
252
253#if 0
254extern struct filterops aio_filtops;
255#endif
256
b0d623f7
A
257/* Mach portset filter */
258extern struct filterops machport_filtops;
259
260/* User filter */
39236c6e
A
261static int filt_userattach(struct knote *kn);
262static void filt_userdetach(struct knote *kn);
263static int filt_user(struct knote *kn, long hint);
264static void filt_usertouch(struct knote *kn, struct kevent64_s *kev,
265 long type);
b0d623f7 266static struct filterops user_filtops = {
39236c6e
A
267 .f_attach = filt_userattach,
268 .f_detach = filt_userdetach,
269 .f_event = filt_user,
270 .f_touch = filt_usertouch,
b0d623f7
A
271};
272
55e303ae 273/*
39236c6e 274 * Table for all system-defined filters.
55e303ae
A
275 */
276static struct filterops *sysfilt_ops[] = {
277 &file_filtops, /* EVFILT_READ */
278 &file_filtops, /* EVFILT_WRITE */
279#if 0
280 &aio_filtops, /* EVFILT_AIO */
281#else
282 &bad_filtops, /* EVFILT_AIO */
283#endif
284 &file_filtops, /* EVFILT_VNODE */
285 &proc_filtops, /* EVFILT_PROC */
286 &sig_filtops, /* EVFILT_SIGNAL */
55e303ae 287 &timer_filtops, /* EVFILT_TIMER */
b0d623f7
A
288 &machport_filtops, /* EVFILT_MACHPORT */
289 &fs_filtops, /* EVFILT_FS */
290 &user_filtops, /* EVFILT_USER */
6d2010ae 291 &bad_filtops, /* unused */
316670eb 292#if VM_PRESSURE_EVENTS
6d2010ae 293 &vm_filtops, /* EVFILT_VM */
316670eb
A
294#else
295 &bad_filtops, /* EVFILT_VM */
296#endif
297 &file_filtops, /* EVFILT_SOCK */
39236c6e
A
298#if CONFIG_MEMORYSTATUS
299 &memorystatus_filtops, /* EVFILT_MEMORYSTATUS */
300#else
301 &bad_filtops, /* EVFILT_MEMORYSTATUS */
302#endif
55e303ae
A
303};
304
91447636
A
305/*
306 * kqueue/note lock attributes and implementations
307 *
308 * kqueues have locks, while knotes have use counts
309 * Most of the knote state is guarded by the object lock.
310 * the knote "inuse" count and status use the kqueue lock.
311 */
312lck_grp_attr_t * kq_lck_grp_attr;
313lck_grp_t * kq_lck_grp;
314lck_attr_t * kq_lck_attr;
315
316static inline void
317kqlock(struct kqueue *kq)
318{
319 lck_spin_lock(&kq->kq_lock);
320}
321
322static inline void
323kqunlock(struct kqueue *kq)
324{
325 lck_spin_unlock(&kq->kq_lock);
326}
327
39236c6e 328/*
91447636
A
329 * Convert a kq lock to a knote use referece.
330 *
331 * If the knote is being dropped, we can't get
332 * a use reference, so just return with it
333 * still locked.
91447636
A
334 * - kq locked at entry
335 * - unlock on exit if we get the use reference
336 */
337static int
338kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
339{
340 if (kn->kn_status & KN_DROPPING)
39236c6e 341 return (0);
91447636
A
342 kn->kn_inuse++;
343 kqunlock(kq);
39236c6e
A
344 return (1);
345}
91447636 346
39236c6e 347/*
b0d623f7
A
348 * Convert a kq lock to a knote use referece,
349 * but wait for attach and drop events to complete.
91447636
A
350 *
351 * If the knote is being dropped, we can't get
352 * a use reference, so just return with it
353 * still locked.
91447636
A
354 * - kq locked at entry
355 * - kq always unlocked on exit
356 */
357static int
358kqlock2knoteusewait(struct kqueue *kq, struct knote *kn)
359{
b0d623f7
A
360 if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
361 kn->kn_status |= KN_USEWAIT;
39236c6e
A
362 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
363 &kn->kn_status, THREAD_UNINT, 0);
91447636
A
364 kqunlock(kq);
365 thread_block(THREAD_CONTINUE_NULL);
39236c6e 366 return (0);
91447636 367 }
b0d623f7
A
368 kn->kn_inuse++;
369 kqunlock(kq);
39236c6e
A
370 return (1);
371}
b0d623f7 372
39236c6e 373/*
91447636
A
374 * Convert from a knote use reference back to kq lock.
375 *
376 * Drop a use reference and wake any waiters if
377 * this is the last one.
378 *
379 * The exit return indicates if the knote is
380 * still alive - but the kqueue lock is taken
381 * unconditionally.
382 */
383static int
384knoteuse2kqlock(struct kqueue *kq, struct knote *kn)
385{
386 kqlock(kq);
b0d623f7
A
387 if (--kn->kn_inuse == 0) {
388 if ((kn->kn_status & KN_ATTACHING) != 0) {
389 kn->kn_status &= ~KN_ATTACHING;
390 }
391 if ((kn->kn_status & KN_USEWAIT) != 0) {
392 kn->kn_status &= ~KN_USEWAIT;
39236c6e
A
393 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs,
394 &kn->kn_status, THREAD_AWAKENED);
b0d623f7 395 }
91447636
A
396 }
397 return ((kn->kn_status & KN_DROPPING) == 0);
39236c6e 398}
91447636 399
39236c6e
A
400/*
401 * Convert a kq lock to a knote drop reference.
91447636
A
402 *
403 * If the knote is in use, wait for the use count
404 * to subside. We first mark our intention to drop
405 * it - keeping other users from "piling on."
406 * If we are too late, we have to wait for the
407 * other drop to complete.
39236c6e 408 *
91447636
A
409 * - kq locked at entry
410 * - always unlocked on exit.
411 * - caller can't hold any locks that would prevent
412 * the other dropper from completing.
413 */
414static int
415kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
416{
b0d623f7 417 int oktodrop;
91447636 418
b0d623f7
A
419 oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0);
420 kn->kn_status |= KN_DROPPING;
421 if (oktodrop) {
422 if (kn->kn_inuse == 0) {
91447636 423 kqunlock(kq);
39236c6e 424 return (oktodrop);
b0d623f7 425 }
91447636 426 }
b0d623f7 427 kn->kn_status |= KN_USEWAIT;
39236c6e
A
428 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kn->kn_status,
429 THREAD_UNINT, 0);
b0d623f7
A
430 kqunlock(kq);
431 thread_block(THREAD_CONTINUE_NULL);
39236c6e 432 return (oktodrop);
91447636 433}
39236c6e
A
434
435/*
91447636
A
436 * Release a knote use count reference.
437 */
438static void
439knote_put(struct knote *kn)
440{
441 struct kqueue *kq = kn->kn_kq;
442
443 kqlock(kq);
b0d623f7
A
444 if (--kn->kn_inuse == 0) {
445 if ((kn->kn_status & KN_USEWAIT) != 0) {
446 kn->kn_status &= ~KN_USEWAIT;
39236c6e
A
447 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs,
448 &kn->kn_status, THREAD_AWAKENED);
b0d623f7 449 }
91447636
A
450 }
451 kqunlock(kq);
39236c6e 452}
91447636 453
55e303ae
A
454static int
455filt_fileattach(struct knote *kn)
456{
2d21ac55 457 return (fo_kqfilter(kn->kn_fp, kn, vfs_context_current()));
55e303ae
A
458}
459
39236c6e
A
460#define f_flag f_fglob->fg_flag
461#define f_msgcount f_fglob->fg_msgcount
462#define f_cred f_fglob->fg_cred
463#define f_ops f_fglob->fg_ops
464#define f_offset f_fglob->fg_offset
465#define f_data f_fglob->fg_data
91447636 466
55e303ae
A
467static void
468filt_kqdetach(struct knote *kn)
469{
470 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
471
91447636 472 kqlock(kq);
55e303ae 473 KNOTE_DETACH(&kq->kq_sel.si_note, kn);
91447636 474 kqunlock(kq);
55e303ae
A
475}
476
477/*ARGSUSED*/
478static int
91447636 479filt_kqueue(struct knote *kn, __unused long hint)
55e303ae
A
480{
481 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
482
483 kn->kn_data = kq->kq_count;
484 return (kn->kn_data > 0);
485}
486
487static int
488filt_procattach(struct knote *kn)
489{
490 struct proc *p;
2d21ac55
A
491
492 assert(PID_MAX < NOTE_PDATAMASK);
39236c6e 493
2d21ac55 494 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0)
39236c6e 495 return (ENOTSUP);
0c530ab8 496
2d21ac55 497 p = proc_find(kn->kn_id);
91447636 498 if (p == NULL) {
55e303ae 499 return (ESRCH);
91447636 500 }
55e303ae 501
99c3a104
A
502 const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
503
504 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
505 do {
506 pid_t selfpid = proc_selfpid();
507
508 if (p->p_ppid == selfpid)
509 break; /* parent => ok */
510
511 if ((p->p_lflag & P_LTRACED) != 0 &&
512 (p->p_oppid == selfpid))
513 break; /* parent-in-waiting => ok */
514
6d2010ae 515 proc_rele(p);
99c3a104
A
516 return (EACCES);
517 } while (0);
6d2010ae 518
2d21ac55
A
519 proc_klist_lock();
520
521 kn->kn_flags |= EV_CLEAR; /* automatically set */
522 kn->kn_ptr.p_proc = p; /* store the proc handle */
55e303ae 523
55e303ae
A
524 KNOTE_ATTACH(&p->p_klist, kn);
525
2d21ac55
A
526 proc_klist_unlock();
527
528 proc_rele(p);
91447636 529
55e303ae
A
530 return (0);
531}
532
533/*
534 * The knote may be attached to a different process, which may exit,
0c530ab8 535 * leaving nothing for the knote to be attached to. In that case,
2d21ac55 536 * the pointer to the process will have already been nulled out.
55e303ae
A
537 */
538static void
539filt_procdetach(struct knote *kn)
540{
91447636 541 struct proc *p;
91447636 542
2d21ac55 543 proc_klist_lock();
39236c6e 544
2d21ac55
A
545 p = kn->kn_ptr.p_proc;
546 if (p != PROC_NULL) {
547 kn->kn_ptr.p_proc = PROC_NULL;
91447636 548 KNOTE_DETACH(&p->p_klist, kn);
0c530ab8 549 }
2d21ac55
A
550
551 proc_klist_unlock();
55e303ae
A
552}
553
554static int
555filt_proc(struct knote *kn, long hint)
556{
39236c6e
A
557 /*
558 * Note: a lot of bits in hint may be obtained from the knote
559 * To free some of those bits, see <rdar://problem/12592988> Freeing up
560 * bits in hint for filt_proc
561 */
2d21ac55 562 /* hint is 0 when called from above */
0c530ab8
A
563 if (hint != 0) {
564 u_int event;
55e303ae 565
2d21ac55 566 /* ALWAYS CALLED WITH proc_klist_lock when (hint != 0) */
55e303ae 567
0c530ab8
A
568 /*
569 * mask off extra data
570 */
571 event = (u_int)hint & NOTE_PCTRLMASK;
4452a7af 572
4b17d6b6
A
573 /*
574 * termination lifecycle events can happen while a debugger
575 * has reparented a process, in which case notifications
576 * should be quashed except to the tracing parent. When
577 * the debugger reaps the child (either via wait4(2) or
578 * process exit), the child will be reparented to the original
579 * parent and these knotes re-fired.
580 */
581 if (event & NOTE_EXIT) {
582 if ((kn->kn_ptr.p_proc->p_oppid != 0)
583 && (kn->kn_kq->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
584 /*
585 * This knote is not for the current ptrace(2) parent, ignore.
586 */
587 return 0;
588 }
589 }
590
0c530ab8
A
591 /*
592 * if the user is interested in this event, record it.
593 */
594 if (kn->kn_sfflags & event)
595 kn->kn_fflags |= event;
55e303ae 596
39236c6e
A
597#pragma clang diagnostic push
598#pragma clang diagnostic ignored "-Wdeprecated-declarations"
599 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
2d21ac55 600 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
0c530ab8 601 }
39236c6e
A
602#pragma clang diagnostic pop
603
604 if (event == NOTE_EXIT) {
605 kn->kn_data = 0;
606 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
607 kn->kn_fflags |= NOTE_EXITSTATUS;
608 kn->kn_data |= (hint & NOTE_PDATAMASK);
609 }
610 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
611 kn->kn_fflags |= NOTE_EXIT_DETAIL;
612 if ((kn->kn_ptr.p_proc->p_lflag &
613 P_LTERM_DECRYPTFAIL) != 0) {
614 kn->kn_data |= NOTE_EXIT_DECRYPTFAIL;
615 }
616 if ((kn->kn_ptr.p_proc->p_lflag &
617 P_LTERM_JETSAM) != 0) {
618 kn->kn_data |= NOTE_EXIT_MEMORY;
619 switch (kn->kn_ptr.p_proc->p_lflag &
620 P_JETSAM_MASK) {
621 case P_JETSAM_VMPAGESHORTAGE:
622 kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
623 break;
624 case P_JETSAM_VMTHRASHING:
625 kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
626 break;
627 case P_JETSAM_VNODE:
628 kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
629 break;
630 case P_JETSAM_HIWAT:
631 kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
632 break;
633 case P_JETSAM_PID:
634 kn->kn_data |= NOTE_EXIT_MEMORY_PID;
635 break;
636 case P_JETSAM_IDLEEXIT:
637 kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
638 break;
639 }
640 }
641 if ((kn->kn_ptr.p_proc->p_csflags &
642 CS_KILLED) != 0) {
643 kn->kn_data |= NOTE_EXIT_CSERROR;
644 }
645 }
316670eb 646 }
39236c6e 647
0c530ab8 648 }
6601e61a 649
2d21ac55 650 /* atomic check, no locking need when called from above */
39236c6e 651 return (kn->kn_fflags != 0);
55e303ae
A
652}
653
316670eb 654#if VM_PRESSURE_EVENTS
6d2010ae
A
655/*
656 * Virtual memory kevents
657 *
658 * author: Matt Jacobson [matthew_jacobson@apple.com]
659 */
660
661static int
662filt_vmattach(struct knote *kn)
39236c6e
A
663{
664 /*
665 * The note will be cleared once the information has been flushed to
666 * the client. If there is still pressure, we will be re-alerted.
6d2010ae 667 */
39236c6e
A
668 kn->kn_flags |= EV_CLEAR;
669 return (vm_knote_register(kn));
6d2010ae
A
670}
671
672static void
673filt_vmdetach(struct knote *kn)
674{
675 vm_knote_unregister(kn);
676}
677
678static int
679filt_vm(struct knote *kn, long hint)
680{
681 /* hint == 0 means this is just an alive? check (always true) */
39236c6e 682 if (hint != 0) {
316670eb 683 const pid_t pid = (pid_t)hint;
39236c6e
A
684 if ((kn->kn_sfflags & NOTE_VM_PRESSURE) &&
685 (kn->kn_kq->kq_p->p_pid == pid)) {
316670eb 686 kn->kn_fflags |= NOTE_VM_PRESSURE;
6d2010ae
A
687 }
688 }
39236c6e 689
6d2010ae
A
690 return (kn->kn_fflags != 0);
691}
316670eb 692#endif /* VM_PRESSURE_EVENTS */
b0d623f7 693
91447636 694/*
b0d623f7 695 * filt_timervalidate - process data from user
39236c6e
A
696 *
697 * Converts to either interval or deadline format.
698 *
91447636
A
699 * The saved-data field in the knote contains the
700 * time value. The saved filter-flags indicates
701 * the unit of measurement.
702 *
39236c6e
A
703 * After validation, either the saved-data field
704 * contains the interval in absolute time, or ext[0]
705 * contains the expected deadline. If that deadline
b0d623f7
A
706 * is in the past, ext[0] is 0.
707 *
708 * Returns EINVAL for unrecognized units of time.
709 *
710 * Timer filter lock is held.
711 *
91447636
A
712 */
713static int
b0d623f7 714filt_timervalidate(struct knote *kn)
91447636
A
715{
716 uint64_t multiplier;
39236c6e 717 uint64_t raw = 0;
91447636
A
718
719 switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS)) {
720 case NOTE_SECONDS:
721 multiplier = NSEC_PER_SEC;
722 break;
723 case NOTE_USECONDS:
724 multiplier = NSEC_PER_USEC;
725 break;
726 case NOTE_NSECONDS:
727 multiplier = 1;
728 break;
729 case 0: /* milliseconds (default) */
730 multiplier = NSEC_PER_SEC / 1000;
731 break;
732 default:
39236c6e
A
733 return (EINVAL);
734 }
735
736 /* transform the slop delta(leeway) in kn_ext[1] if passed to same time scale */
737 if(kn->kn_sfflags & NOTE_LEEWAY){
738 nanoseconds_to_absolutetime((uint64_t)kn->kn_ext[1] * multiplier, &raw);
739 kn->kn_ext[1] = raw;
91447636 740 }
b0d623f7 741
91447636 742 nanoseconds_to_absolutetime((uint64_t)kn->kn_sdata * multiplier, &raw);
b0d623f7
A
743
744 kn->kn_ext[0] = 0;
745 kn->kn_sdata = 0;
746
747 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
748 clock_sec_t seconds;
749 clock_nsec_t nanoseconds;
91447636
A
750 uint64_t now;
751
752 clock_get_calendar_nanotime(&seconds, &nanoseconds);
39236c6e
A
753 nanoseconds_to_absolutetime((uint64_t)seconds * NSEC_PER_SEC +
754 nanoseconds, &now);
b0d623f7
A
755
756 if (raw < now) {
757 /* time has already passed */
758 kn->kn_ext[0] = 0;
759 } else {
760 raw -= now;
39236c6e
A
761 clock_absolutetime_interval_to_deadline(raw,
762 &kn->kn_ext[0]);
91447636 763 }
b0d623f7
A
764 } else {
765 kn->kn_sdata = raw;
766 }
767
39236c6e 768 return (0);
91447636
A
769}
770
b0d623f7
A
771/*
772 * filt_timerupdate - compute the next deadline
773 *
774 * Repeating timers store their interval in kn_sdata. Absolute
775 * timers have already calculated the deadline, stored in ext[0].
776 *
777 * On return, the next deadline (or zero if no deadline is needed)
778 * is stored in kn_ext[0].
779 *
780 * Timer filter lock is held.
781 */
39236c6e 782static void
b0d623f7
A
783filt_timerupdate(struct knote *kn)
784{
785 /* if there's no interval, deadline is just in kn_ext[0] */
786 if (kn->kn_sdata == 0)
787 return;
788
789 /* if timer hasn't fired before, fire in interval nsecs */
790 if (kn->kn_ext[0] == 0) {
791 clock_absolutetime_interval_to_deadline(kn->kn_sdata,
39236c6e 792 &kn->kn_ext[0]);
b0d623f7 793 } else {
39236c6e
A
794 /*
795 * If timer has fired before, schedule the next pop
796 * relative to the last intended deadline.
b0d623f7 797 *
39236c6e 798 * We could check for whether the deadline has expired,
b0d623f7
A
799 * but the thread call layer can handle that.
800 */
801 kn->kn_ext[0] += kn->kn_sdata;
802 }
803}
804
39236c6e 805/*
91447636
A
806 * filt_timerexpire - the timer callout routine
807 *
39236c6e
A
808 * Just propagate the timer event into the knote
809 * filter routine (by going through the knote
810 * synchronization point). Pass a hint to
811 * indicate this is a real event, not just a
812 * query from above.
91447636 813 */
55e303ae 814static void
91447636 815filt_timerexpire(void *knx, __unused void *spare)
55e303ae 816{
91447636 817 struct klist timer_list;
55e303ae 818 struct knote *kn = knx;
91447636 819
b0d623f7
A
820 filt_timerlock();
821
822 kn->kn_hookid &= ~TIMER_RUNNING;
823
91447636
A
824 /* no "object" for timers, so fake a list */
825 SLIST_INIT(&timer_list);
39236c6e 826 SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext);
91447636 827 KNOTE(&timer_list, 1);
b0d623f7
A
828
829 /* if someone is waiting for timer to pop */
830 if (kn->kn_hookid & TIMER_CANCELWAIT) {
831 struct kqueue *kq = kn->kn_kq;
39236c6e
A
832 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_hook,
833 THREAD_AWAKENED);
b0d623f7
A
834 }
835
836 filt_timerunlock();
837}
838
839/*
840 * Cancel a running timer (or wait for the pop).
841 * Timer filter lock is held.
842 */
843static void
844filt_timercancel(struct knote *kn)
845{
846 struct kqueue *kq = kn->kn_kq;
847 thread_call_t callout = kn->kn_hook;
848 boolean_t cancelled;
849
850 if (kn->kn_hookid & TIMER_RUNNING) {
851 /* cancel the callout if we can */
852 cancelled = thread_call_cancel(callout);
853 if (cancelled) {
854 kn->kn_hookid &= ~TIMER_RUNNING;
855 } else {
856 /* we have to wait for the expire routine. */
857 kn->kn_hookid |= TIMER_CANCELWAIT;
39236c6e
A
858 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
859 &kn->kn_hook, THREAD_UNINT, 0);
b0d623f7
A
860 filt_timerunlock();
861 thread_block(THREAD_CONTINUE_NULL);
862 filt_timerlock();
863 assert((kn->kn_hookid & TIMER_RUNNING) == 0);
864 }
865 }
55e303ae
A
866}
867
868/*
b0d623f7 869 * Allocate a thread call for the knote's lifetime, and kick off the timer.
39236c6e 870 */
55e303ae
A
871static int
872filt_timerattach(struct knote *kn)
873{
91447636 874 thread_call_t callout;
91447636 875 int error;
55e303ae 876
b0d623f7
A
877 callout = thread_call_allocate(filt_timerexpire, kn);
878 if (NULL == callout)
879 return (ENOMEM);
55e303ae 880
b0d623f7
A
881 filt_timerlock();
882 error = filt_timervalidate(kn);
39236c6e 883 if (error != 0) {
b0d623f7
A
884 filt_timerunlock();
885 return (error);
91447636 886 }
55e303ae 887
b0d623f7
A
888 kn->kn_hook = (void*)callout;
889 kn->kn_hookid = 0;
55e303ae 890
91447636
A
891 /* absolute=EV_ONESHOT */
892 if (kn->kn_sfflags & NOTE_ABSOLUTE)
39236c6e 893 kn->kn_flags |= EV_ONESHOT;
91447636 894
b0d623f7
A
895 filt_timerupdate(kn);
896 if (kn->kn_ext[0]) {
91447636 897 kn->kn_flags |= EV_CLEAR;
39236c6e
A
898 unsigned int timer_flags = 0;
899 if (kn->kn_sfflags & NOTE_CRITICAL)
900 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
901 else if (kn->kn_sfflags & NOTE_BACKGROUND)
902 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
903 else
904 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
905
906 if (kn->kn_sfflags & NOTE_LEEWAY)
907 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
908
909 thread_call_enter_delayed_with_leeway(callout, NULL,
910 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
911
b0d623f7 912 kn->kn_hookid |= TIMER_RUNNING;
91447636
A
913 } else {
914 /* fake immediate */
b0d623f7 915 kn->kn_data = 1;
91447636 916 }
b0d623f7 917
91447636 918 filt_timerunlock();
55e303ae
A
919 return (0);
920}
921
b0d623f7
A
922/*
923 * Shut down the timer if it's running, and free the callout.
924 */
55e303ae
A
925static void
926filt_timerdetach(struct knote *kn)
927{
91447636
A
928 thread_call_t callout;
929
930 filt_timerlock();
91447636 931
b0d623f7
A
932 callout = (thread_call_t)kn->kn_hook;
933 filt_timercancel(kn);
39236c6e
A
934
935 filt_timerunlock();
b0d623f7
A
936
937 thread_call_free(callout);
55e303ae
A
938}
939
91447636
A
940
941
55e303ae 942static int
b0d623f7 943filt_timer(struct knote *kn, long hint)
55e303ae 944{
91447636 945 int result;
39236c6e 946
91447636 947 if (hint) {
b0d623f7 948 /* real timer pop -- timer lock held by filt_timerexpire */
91447636
A
949 kn->kn_data++;
950
39236c6e 951 if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) &&
b0d623f7 952 ((kn->kn_flags & EV_ONESHOT) == 0)) {
55e303ae 953
b0d623f7
A
954 /* evaluate next time to fire */
955 filt_timerupdate(kn);
91447636 956
b0d623f7 957 if (kn->kn_ext[0]) {
39236c6e
A
958 unsigned int timer_flags = 0;
959
91447636 960 /* keep the callout and re-arm */
39236c6e
A
961 if (kn->kn_sfflags & NOTE_CRITICAL)
962 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
963 else if (kn->kn_sfflags & NOTE_BACKGROUND)
964 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
965 else
966 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
967
968 if (kn->kn_sfflags & NOTE_LEEWAY)
969 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
970
971 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
972 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
973
b0d623f7 974 kn->kn_hookid |= TIMER_RUNNING;
91447636
A
975 }
976 }
91447636 977
39236c6e
A
978 return (1);
979 }
91447636
A
980
981 /* user-query */
982 filt_timerlock();
983
b0d623f7 984 result = (kn->kn_data != 0);
91447636 985
b0d623f7 986 filt_timerunlock();
39236c6e
A
987
988 return (result);
b0d623f7
A
989}
990
991
992/*
993 * filt_timertouch - update knote with new user input
994 *
39236c6e
A
995 * Cancel and restart the timer based on new user data. When
996 * the user picks up a knote, clear the count of how many timer
997 * pops have gone off (in kn_data).
b0d623f7 998 */
39236c6e 999static void
b0d623f7
A
1000filt_timertouch(struct knote *kn, struct kevent64_s *kev, long type)
1001{
1002 int error;
1003 filt_timerlock();
1004
1005 switch (type) {
1006 case EVENT_REGISTER:
1007 /* cancel current call */
1008 filt_timercancel(kn);
1009
1010 /* recalculate deadline */
1011 kn->kn_sdata = kev->data;
1012 kn->kn_sfflags = kev->fflags;
39236c6e
A
1013 kn->kn_ext[0] = kev->ext[0];
1014 kn->kn_ext[1] = kev->ext[1];
b0d623f7
A
1015
1016 error = filt_timervalidate(kn);
91447636 1017 if (error) {
b0d623f7 1018 /* no way to report error, so mark it in the knote */
91447636
A
1019 kn->kn_flags |= EV_ERROR;
1020 kn->kn_data = error;
b0d623f7 1021 break;
39236c6e 1022 }
b0d623f7
A
1023
1024 /* start timer if necessary */
1025 filt_timerupdate(kn);
39236c6e 1026
b0d623f7 1027 if (kn->kn_ext[0]) {
39236c6e
A
1028 unsigned int timer_flags = 0;
1029 if (kn->kn_sfflags & NOTE_CRITICAL)
1030 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1031 else if (kn->kn_sfflags & NOTE_BACKGROUND)
1032 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1033 else
1034 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1035
1036 if (kn->kn_sfflags & NOTE_LEEWAY)
1037 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1038
1039 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
1040 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
1041
b0d623f7
A
1042 kn->kn_hookid |= TIMER_RUNNING;
1043 } else {
1044 /* pretend the timer has fired */
1045 kn->kn_data = 1;
91447636 1046 }
91447636 1047
b0d623f7
A
1048 break;
1049
1050 case EVENT_PROCESS:
1051 /* reset the timer pop count in kn_data */
1052 *kev = kn->kn_kevent;
1053 kev->ext[0] = 0;
1054 kn->kn_data = 0;
1055 if (kn->kn_flags & EV_CLEAR)
1056 kn->kn_fflags = 0;
1057 break;
1058 default:
39236c6e 1059 panic("%s: - invalid type (%ld)", __func__, type);
b0d623f7
A
1060 break;
1061 }
91447636 1062
91447636 1063 filt_timerunlock();
91447636
A
1064}
1065
1066static void
1067filt_timerlock(void)
1068{
1069 lck_mtx_lock(&_filt_timerlock);
1070}
1071
1072static void
1073filt_timerunlock(void)
1074{
1075 lck_mtx_unlock(&_filt_timerlock);
55e303ae 1076}
55e303ae 1077
b0d623f7
A
1078static int
1079filt_userattach(struct knote *kn)
1080{
39236c6e
A
1081 /* EVFILT_USER knotes are not attached to anything in the kernel */
1082 kn->kn_hook = NULL;
6d2010ae 1083 if (kn->kn_fflags & NOTE_TRIGGER) {
b0d623f7
A
1084 kn->kn_hookid = 1;
1085 } else {
1086 kn->kn_hookid = 0;
1087 }
39236c6e 1088 return (0);
b0d623f7
A
1089}
1090
1091static void
1092filt_userdetach(__unused struct knote *kn)
1093{
39236c6e 1094 /* EVFILT_USER knotes are not attached to anything in the kernel */
b0d623f7
A
1095}
1096
1097static int
1098filt_user(struct knote *kn, __unused long hint)
1099{
39236c6e 1100 return (kn->kn_hookid);
b0d623f7
A
1101}
1102
1103static void
1104filt_usertouch(struct knote *kn, struct kevent64_s *kev, long type)
1105{
39236c6e
A
1106 uint32_t ffctrl;
1107 switch (type) {
1108 case EVENT_REGISTER:
1109 if (kev->fflags & NOTE_TRIGGER) {
1110 kn->kn_hookid = 1;
1111 }
1112
1113 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1114 kev->fflags &= NOTE_FFLAGSMASK;
1115 switch (ffctrl) {
1116 case NOTE_FFNOP:
1117 break;
1118 case NOTE_FFAND:
1119 OSBitAndAtomic(kev->fflags, &kn->kn_sfflags);
1120 break;
1121 case NOTE_FFOR:
1122 OSBitOrAtomic(kev->fflags, &kn->kn_sfflags);
1123 break;
1124 case NOTE_FFCOPY:
1125 kn->kn_sfflags = kev->fflags;
1126 break;
1127 }
1128 kn->kn_sdata = kev->data;
1129 break;
1130 case EVENT_PROCESS:
1131 *kev = kn->kn_kevent;
1132 kev->fflags = (volatile UInt32)kn->kn_sfflags;
1133 kev->data = kn->kn_sdata;
1134 if (kn->kn_flags & EV_CLEAR) {
b0d623f7
A
1135 kn->kn_hookid = 0;
1136 kn->kn_data = 0;
1137 kn->kn_fflags = 0;
1138 }
39236c6e
A
1139 break;
1140 default:
1141 panic("%s: - invalid type (%ld)", __func__, type);
1142 break;
1143 }
b0d623f7
A
1144}
1145
55e303ae
A
1146/*
1147 * JMM - placeholder for not-yet-implemented filters
39236c6e 1148 */
55e303ae 1149static int
91447636 1150filt_badattach(__unused struct knote *kn)
55e303ae 1151{
39236c6e 1152 return (ENOTSUP);
55e303ae
A
1153}
1154
91447636
A
1155struct kqueue *
1156kqueue_alloc(struct proc *p)
1157{
1158 struct filedesc *fdp = p->p_fd;
1159 struct kqueue *kq;
1160
39236c6e
A
1161 MALLOC_ZONE(kq, struct kqueue *, sizeof (struct kqueue), M_KQUEUE,
1162 M_WAITOK);
91447636 1163 if (kq != NULL) {
b0d623f7
A
1164 wait_queue_set_t wqs;
1165
39236c6e
A
1166 wqs = wait_queue_set_alloc(SYNC_POLICY_FIFO |
1167 SYNC_POLICY_PREPOST);
b0d623f7 1168 if (wqs != NULL) {
39236c6e 1169 bzero(kq, sizeof (struct kqueue));
b0d623f7
A
1170 lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
1171 TAILQ_INIT(&kq->kq_head);
1172 kq->kq_wqs = wqs;
1173 kq->kq_p = p;
1174 } else {
39236c6e 1175 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
b0d623f7 1176 }
91447636
A
1177 }
1178
1179 if (fdp->fd_knlistsize < 0) {
1180 proc_fdlock(p);
1181 if (fdp->fd_knlistsize < 0)
39236c6e 1182 fdp->fd_knlistsize = 0; /* this process has had a kq */
91447636
A
1183 proc_fdunlock(p);
1184 }
1185
39236c6e 1186 return (kq);
91447636
A
1187}
1188
91447636
A
1189/*
1190 * kqueue_dealloc - detach all knotes from a kqueue and free it
1191 *
1192 * We walk each list looking for knotes referencing this
1193 * this kqueue. If we find one, we try to drop it. But
1194 * if we fail to get a drop reference, that will wait
1195 * until it is dropped. So, we can just restart again
1196 * safe in the assumption that the list will eventually
1197 * not contain any more references to this kqueue (either
1198 * we dropped them all, or someone else did).
1199 *
1200 * Assumes no new events are being added to the kqueue.
1201 * Nothing locked on entry or exit.
1202 */
1203void
2d21ac55 1204kqueue_dealloc(struct kqueue *kq)
55e303ae 1205{
2d21ac55 1206 struct proc *p = kq->kq_p;
55e303ae 1207 struct filedesc *fdp = p->p_fd;
91447636
A
1208 struct knote *kn;
1209 int i;
1210
1211 proc_fdlock(p);
1212 for (i = 0; i < fdp->fd_knlistsize; i++) {
1213 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
1214 while (kn != NULL) {
1215 if (kq == kn->kn_kq) {
1216 kqlock(kq);
1217 proc_fdunlock(p);
1218 /* drop it ourselves or wait */
1219 if (kqlock2knotedrop(kq, kn)) {
1220 kn->kn_fop->f_detach(kn);
1221 knote_drop(kn, p);
1222 }
1223 proc_fdlock(p);
1224 /* start over at beginning of list */
1225 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
1226 continue;
1227 }
1228 kn = SLIST_NEXT(kn, kn_link);
1229 }
1230 }
1231 if (fdp->fd_knhashmask != 0) {
1232 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
1233 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
1234 while (kn != NULL) {
1235 if (kq == kn->kn_kq) {
1236 kqlock(kq);
1237 proc_fdunlock(p);
1238 /* drop it ourselves or wait */
1239 if (kqlock2knotedrop(kq, kn)) {
1240 kn->kn_fop->f_detach(kn);
1241 knote_drop(kn, p);
1242 }
1243 proc_fdlock(p);
1244 /* start over at beginning of list */
1245 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
1246 continue;
1247 }
1248 kn = SLIST_NEXT(kn, kn_link);
1249 }
1250 }
1251 }
1252 proc_fdunlock(p);
b0d623f7 1253
39236c6e 1254 /*
b0d623f7
A
1255 * before freeing the wait queue set for this kqueue,
1256 * make sure it is unlinked from all its containing (select) sets.
1257 */
1258 wait_queue_unlink_all((wait_queue_t)kq->kq_wqs);
1259 wait_queue_set_free(kq->kq_wqs);
91447636 1260 lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
39236c6e 1261 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
91447636
A
1262}
1263
1264int
39236c6e 1265kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
91447636 1266{
55e303ae 1267 struct kqueue *kq;
91447636 1268 struct fileproc *fp;
55e303ae
A
1269 int fd, error;
1270
39236c6e
A
1271 error = falloc_withalloc(p,
1272 &fp, &fd, vfs_context_current(), fp_zalloc, cra);
91447636 1273 if (error) {
55e303ae 1274 return (error);
91447636
A
1275 }
1276
1277 kq = kqueue_alloc(p);
1278 if (kq == NULL) {
1279 fp_free(p, fd, fp);
1280 return (ENOMEM);
1281 }
1282
55e303ae 1283 fp->f_flag = FREAD | FWRITE;
55e303ae 1284 fp->f_ops = &kqueueops;
39236c6e 1285 fp->f_data = kq;
91447636
A
1286
1287 proc_fdlock(p);
6601e61a 1288 procfdtbl_releasefd(p, fd, NULL);
91447636
A
1289 fp_drop(p, fd, fp, 1);
1290 proc_fdunlock(p);
1291
55e303ae 1292 *retval = fd;
55e303ae
A
1293 return (error);
1294}
1295
39236c6e
A
1296int
1297kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
1298{
1299 return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
1300}
1301
91447636 1302static int
39236c6e
A
1303kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp, struct proc *p,
1304 int iskev64)
55e303ae 1305{
91447636
A
1306 int advance;
1307 int error;
55e303ae 1308
b0d623f7 1309 if (iskev64) {
39236c6e 1310 advance = sizeof (struct kevent64_s);
b0d623f7
A
1311 error = copyin(*addrp, (caddr_t)kevp, advance);
1312 } else if (IS_64BIT_PROCESS(p)) {
1313 struct user64_kevent kev64;
39236c6e 1314 bzero(kevp, sizeof (struct kevent64_s));
91447636 1315
39236c6e 1316 advance = sizeof (kev64);
91447636 1317 error = copyin(*addrp, (caddr_t)&kev64, advance);
55e303ae 1318 if (error)
39236c6e 1319 return (error);
b0d623f7 1320 kevp->ident = kev64.ident;
91447636
A
1321 kevp->filter = kev64.filter;
1322 kevp->flags = kev64.flags;
1323 kevp->fflags = kev64.fflags;
b0d623f7 1324 kevp->data = kev64.data;
91447636
A
1325 kevp->udata = kev64.udata;
1326 } else {
b0d623f7 1327 struct user32_kevent kev32;
39236c6e 1328 bzero(kevp, sizeof (struct kevent64_s));
b0d623f7 1329
39236c6e 1330 advance = sizeof (kev32);
b0d623f7
A
1331 error = copyin(*addrp, (caddr_t)&kev32, advance);
1332 if (error)
39236c6e 1333 return (error);
b0d623f7
A
1334 kevp->ident = (uintptr_t)kev32.ident;
1335 kevp->filter = kev32.filter;
1336 kevp->flags = kev32.flags;
1337 kevp->fflags = kev32.fflags;
1338 kevp->data = (intptr_t)kev32.data;
1339 kevp->udata = CAST_USER_ADDR_T(kev32.udata);
55e303ae 1340 }
91447636
A
1341 if (!error)
1342 *addrp += advance;
39236c6e 1343 return (error);
91447636 1344}
55e303ae 1345
91447636 1346static int
39236c6e
A
1347kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp, struct proc *p,
1348 int iskev64)
91447636
A
1349{
1350 int advance;
1351 int error;
1352
b0d623f7 1353 if (iskev64) {
39236c6e 1354 advance = sizeof (struct kevent64_s);
b0d623f7
A
1355 error = copyout((caddr_t)kevp, *addrp, advance);
1356 } else if (IS_64BIT_PROCESS(p)) {
1357 struct user64_kevent kev64;
91447636 1358
2d21ac55
A
1359 /*
1360 * deal with the special case of a user-supplied
1361 * value of (uintptr_t)-1.
1362 */
1363 kev64.ident = (kevp->ident == (uintptr_t)-1) ?
39236c6e 1364 (uint64_t)-1LL : (uint64_t)kevp->ident;
2d21ac55 1365
91447636
A
1366 kev64.filter = kevp->filter;
1367 kev64.flags = kevp->flags;
1368 kev64.fflags = kevp->fflags;
1369 kev64.data = (int64_t) kevp->data;
1370 kev64.udata = kevp->udata;
39236c6e 1371 advance = sizeof (kev64);
91447636
A
1372 error = copyout((caddr_t)&kev64, *addrp, advance);
1373 } else {
b0d623f7
A
1374 struct user32_kevent kev32;
1375
1376 kev32.ident = (uint32_t)kevp->ident;
1377 kev32.filter = kevp->filter;
1378 kev32.flags = kevp->flags;
1379 kev32.fflags = kevp->fflags;
1380 kev32.data = (int32_t)kevp->data;
1381 kev32.udata = kevp->udata;
39236c6e 1382 advance = sizeof (kev32);
b0d623f7 1383 error = copyout((caddr_t)&kev32, *addrp, advance);
91447636
A
1384 }
1385 if (!error)
1386 *addrp += advance;
39236c6e 1387 return (error);
91447636 1388}
55e303ae 1389
91447636
A
1390/*
1391 * kevent_continue - continue a kevent syscall after blocking
1392 *
1393 * assume we inherit a use count on the kq fileglob.
1394 */
55e303ae 1395
91447636
A
1396static void
1397kevent_continue(__unused struct kqueue *kq, void *data, int error)
1398{
1399 struct _kevent *cont_args;
1400 struct fileproc *fp;
b0d623f7 1401 int32_t *retval;
91447636
A
1402 int noutputs;
1403 int fd;
1404 struct proc *p = current_proc();
1405
1406 cont_args = (struct _kevent *)data;
1407 noutputs = cont_args->eventout;
1408 retval = cont_args->retval;
1409 fd = cont_args->fd;
1410 fp = cont_args->fp;
1411
1412 fp_drop(p, fd, fp, 0);
1413
1414 /* don't restart after signals... */
1415 if (error == ERESTART)
1416 error = EINTR;
1417 else if (error == EWOULDBLOCK)
1418 error = 0;
1419 if (error == 0)
1420 *retval = noutputs;
1421 unix_syscall_return(error);
1422}
55e303ae 1423
91447636
A
1424/*
1425 * kevent - [syscall] register and wait for kernel events
1426 *
1427 */
91447636 1428int
b0d623f7
A
1429kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
1430{
39236c6e
A
1431 return (kevent_internal(p,
1432 0,
1433 uap->changelist,
1434 uap->nchanges,
1435 uap->eventlist,
1436 uap->nevents,
1437 uap->fd,
1438 uap->timeout,
1439 0, /* no flags from old kevent() call */
1440 retval));
1441}
1442
b0d623f7
A
1443int
1444kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
1445{
39236c6e
A
1446 return (kevent_internal(p,
1447 1,
1448 uap->changelist,
1449 uap->nchanges,
1450 uap->eventlist,
1451 uap->nevents,
1452 uap->fd,
1453 uap->timeout,
1454 uap->flags,
1455 retval));
b0d623f7 1456}
91447636 1457
b0d623f7 1458static int
39236c6e
A
1459kevent_internal(struct proc *p, int iskev64, user_addr_t changelist,
1460 int nchanges, user_addr_t ueventlist, int nevents, int fd,
1461 user_addr_t utimeout, __unused unsigned int flags,
1462 int32_t *retval)
b0d623f7 1463{
91447636
A
1464 struct _kevent *cont_args;
1465 uthread_t ut;
1466 struct kqueue *kq;
1467 struct fileproc *fp;
b0d623f7 1468 struct kevent64_s kev;
91447636
A
1469 int error, noutputs;
1470 struct timeval atv;
1471
1472 /* convert timeout to absolute - if we have one */
b0d623f7 1473 if (utimeout != USER_ADDR_NULL) {
91447636 1474 struct timeval rtv;
b0d623f7
A
1475 if (IS_64BIT_PROCESS(p)) {
1476 struct user64_timespec ts;
1477 error = copyin(utimeout, &ts, sizeof(ts));
91447636
A
1478 if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
1479 error = EINVAL;
1480 else
1481 TIMESPEC_TO_TIMEVAL(&rtv, &ts);
1482 } else {
b0d623f7
A
1483 struct user32_timespec ts;
1484 error = copyin(utimeout, &ts, sizeof(ts));
91447636
A
1485 TIMESPEC_TO_TIMEVAL(&rtv, &ts);
1486 }
55e303ae 1487 if (error)
39236c6e 1488 return (error);
91447636 1489 if (itimerfix(&rtv))
39236c6e 1490 return (EINVAL);
91447636
A
1491 getmicrouptime(&atv);
1492 timevaladd(&atv, &rtv);
1493 } else {
1494 atv.tv_sec = 0;
1495 atv.tv_usec = 0;
1496 }
55e303ae 1497
91447636
A
1498 /* get a usecount for the kq itself */
1499 if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
39236c6e
A
1500 return (error);
1501
b0d623f7
A
1502 /* each kq should only be used for events of one type */
1503 kqlock(kq);
1504 if (kq->kq_state & (KQ_KEV32 | KQ_KEV64)) {
1505 if (((iskev64 && (kq->kq_state & KQ_KEV32)) ||
1506 (!iskev64 && (kq->kq_state & KQ_KEV64)))) {
1507 error = EINVAL;
1508 kqunlock(kq);
1509 goto errorout;
1510 }
1511 } else {
1512 kq->kq_state |= (iskev64 ? KQ_KEV64 : KQ_KEV32);
1513 }
1514 kqunlock(kq);
91447636
A
1515
1516 /* register all the change requests the user provided... */
1517 noutputs = 0;
3a60a9f5 1518 while (nchanges > 0 && error == 0) {
b0d623f7 1519 error = kevent_copyin(&changelist, &kev, p, iskev64);
91447636
A
1520 if (error)
1521 break;
39236c6e 1522
91447636
A
1523 kev.flags &= ~EV_SYSFLAGS;
1524 error = kevent_register(kq, &kev, p);
2d21ac55 1525 if ((error || (kev.flags & EV_RECEIPT)) && nevents > 0) {
91447636
A
1526 kev.flags = EV_ERROR;
1527 kev.data = error;
b0d623f7 1528 error = kevent_copyout(&kev, &ueventlist, p, iskev64);
3a60a9f5
A
1529 if (error == 0) {
1530 nevents--;
1531 noutputs++;
1532 }
55e303ae 1533 }
91447636 1534 nchanges--;
55e303ae
A
1535 }
1536
91447636
A
1537 /* store the continuation/completion data in the uthread */
1538 ut = (uthread_t)get_bsdthread_info(current_thread());
b0d623f7 1539 cont_args = &ut->uu_kevent.ss_kevent;
91447636
A
1540 cont_args->fp = fp;
1541 cont_args->fd = fd;
1542 cont_args->retval = retval;
1543 cont_args->eventlist = ueventlist;
1544 cont_args->eventcount = nevents;
1545 cont_args->eventout = noutputs;
b0d623f7 1546 cont_args->eventsize = iskev64;
91447636
A
1547
1548 if (nevents > 0 && noutputs == 0 && error == 0)
b0d623f7 1549 error = kqueue_scan(kq, kevent_callback,
39236c6e
A
1550 kevent_continue, cont_args,
1551 &atv, p);
91447636 1552 kevent_continue(kq, cont_args, error);
b0d623f7
A
1553
1554errorout:
1555 fp_drop(p, fd, fp, 0);
39236c6e 1556 return (error);
91447636
A
1557}
1558
1559
1560/*
1561 * kevent_callback - callback for each individual event
1562 *
39236c6e
A
1563 * called with nothing locked
1564 * caller holds a reference on the kqueue
91447636 1565 */
91447636 1566static int
39236c6e
A
1567kevent_callback(__unused struct kqueue *kq, struct kevent64_s *kevp,
1568 void *data)
91447636
A
1569{
1570 struct _kevent *cont_args;
1571 int error;
b0d623f7 1572 int iskev64;
91447636
A
1573
1574 cont_args = (struct _kevent *)data;
2d21ac55 1575 assert(cont_args->eventout < cont_args->eventcount);
91447636 1576
b0d623f7
A
1577 iskev64 = cont_args->eventsize;
1578
91447636
A
1579 /*
1580 * Copy out the appropriate amount of event data for this user.
1581 */
39236c6e
A
1582 error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
1583 iskev64);
91447636
A
1584
1585 /*
1586 * If there isn't space for additional events, return
1587 * a harmless error to stop the processing here
1588 */
1589 if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
39236c6e
A
1590 error = EWOULDBLOCK;
1591 return (error);
55e303ae
A
1592}
1593
b0d623f7
A
1594/*
1595 * kevent_description - format a description of a kevent for diagnostic output
1596 *
39236c6e 1597 * called with a 128-byte string buffer
b0d623f7
A
1598 */
1599
1600char *
1601kevent_description(struct kevent64_s *kevp, char *s, size_t n)
1602{
39236c6e
A
1603 snprintf(s, n,
1604 "kevent="
1605 "{.ident=%#llx, .filter=%d, .flags=%#x, .fflags=%#x, .data=%#llx, .udata=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
1606 kevp->ident,
1607 kevp->filter,
1608 kevp->flags,
1609 kevp->fflags,
1610 kevp->data,
1611 kevp->udata,
1612 kevp->ext[0],
1613 kevp->ext[1]);
1614
1615 return (s);
b0d623f7
A
1616}
1617
91447636
A
1618/*
1619 * kevent_register - add a new event to a kqueue
1620 *
1621 * Creates a mapping between the event source and
1622 * the kqueue via a knote data structure.
1623 *
1624 * Because many/most the event sources are file
1625 * descriptor related, the knote is linked off
1626 * the filedescriptor table for quick access.
1627 *
1628 * called with nothing locked
1629 * caller holds a reference on the kqueue
1630 */
1631
55e303ae 1632int
39236c6e
A
1633kevent_register(struct kqueue *kq, struct kevent64_s *kev,
1634 __unused struct proc *ctxp)
55e303ae 1635{
2d21ac55
A
1636 struct proc *p = kq->kq_p;
1637 struct filedesc *fdp = p->p_fd;
55e303ae 1638 struct filterops *fops;
91447636 1639 struct fileproc *fp = NULL;
55e303ae 1640 struct knote *kn = NULL;
91447636 1641 int error = 0;
55e303ae
A
1642
1643 if (kev->filter < 0) {
1644 if (kev->filter + EVFILT_SYSCOUNT < 0)
1645 return (EINVAL);
1646 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
1647 } else {
1648 /*
1649 * XXX
1650 * filter attach routine is responsible for insuring that
1651 * the identifier can be attached to it.
1652 */
1653 printf("unknown filter: %d\n", kev->filter);
1654 return (EINVAL);
1655 }
1656
39236c6e 1657restart:
91447636 1658 /* this iocount needs to be dropped if it is not registered */
b0d623f7
A
1659 proc_fdlock(p);
1660 if (fops->f_isfd && (error = fp_lookup(p, kev->ident, &fp, 1)) != 0) {
1661 proc_fdunlock(p);
39236c6e 1662 return (error);
b0d623f7 1663 }
55e303ae 1664
91447636
A
1665 if (fops->f_isfd) {
1666 /* fd-based knotes are linked off the fd table */
1667 if (kev->ident < (u_int)fdp->fd_knlistsize) {
55e303ae
A
1668 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
1669 if (kq == kn->kn_kq &&
1670 kev->filter == kn->kn_filter)
1671 break;
1672 }
1673 } else {
91447636 1674 /* hash non-fd knotes here too */
55e303ae
A
1675 if (fdp->fd_knhashmask != 0) {
1676 struct klist *list;
39236c6e 1677
55e303ae
A
1678 list = &fdp->fd_knhash[
1679 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
1680 SLIST_FOREACH(kn, list, kn_link)
1681 if (kev->ident == kn->kn_id &&
1682 kq == kn->kn_kq &&
1683 kev->filter == kn->kn_filter)
1684 break;
1685 }
1686 }
1687
91447636
A
1688 /*
1689 * kn now contains the matching knote, or NULL if no match
1690 */
1691 if (kn == NULL) {
1692 if ((kev->flags & (EV_ADD|EV_DELETE)) == EV_ADD) {
1693 kn = knote_alloc();
1694 if (kn == NULL) {
1695 proc_fdunlock(p);
1696 error = ENOMEM;
1697 goto done;
1698 }
1699 kn->kn_fp = fp;
1700 kn->kn_kq = kq;
1701 kn->kn_tq = &kq->kq_head;
1702 kn->kn_fop = fops;
1703 kn->kn_sfflags = kev->fflags;
1704 kn->kn_sdata = kev->data;
1705 kev->fflags = 0;
1706 kev->data = 0;
1707 kn->kn_kevent = *kev;
1708 kn->kn_inuse = 1; /* for f_attach() */
b0d623f7 1709 kn->kn_status = KN_ATTACHING;
91447636
A
1710
1711 /* before anyone can find it */
1712 if (kev->flags & EV_DISABLE)
1713 kn->kn_status |= KN_DISABLED;
1714
1715 error = knote_fdpattach(kn, fdp, p);
1716 proc_fdunlock(p);
1717
1718 if (error) {
1719 knote_free(kn);
1720 goto done;
1721 }
1722
1723 /*
1724 * apply reference count to knote structure, and
1725 * do not release it at the end of this routine.
1726 */
1727 fp = NULL;
1728
b0d623f7
A
1729 error = fops->f_attach(kn);
1730
b0d623f7 1731 kqlock(kq);
6d2010ae 1732
7e4a7d39
A
1733 if (error != 0) {
1734 /*
1735 * Failed to attach correctly, so drop.
1736 * All other possible users/droppers
1737 * have deferred to us.
1738 */
b0d623f7
A
1739 kn->kn_status |= KN_DROPPING;
1740 kqunlock(kq);
91447636
A
1741 knote_drop(kn, p);
1742 goto done;
7e4a7d39
A
1743 } else if (kn->kn_status & KN_DROPPING) {
1744 /*
1745 * Attach succeeded, but someone else
1746 * deferred their drop - now we have
1747 * to do it for them (after detaching).
1748 */
1749 kqunlock(kq);
1750 kn->kn_fop->f_detach(kn);
1751 knote_drop(kn, p);
1752 goto done;
91447636 1753 }
b0d623f7
A
1754 kn->kn_status &= ~KN_ATTACHING;
1755 kqunlock(kq);
91447636
A
1756 } else {
1757 proc_fdunlock(p);
1758 error = ENOENT;
1759 goto done;
1760 }
1761 } else {
1762 /* existing knote - get kqueue lock */
1763 kqlock(kq);
1764 proc_fdunlock(p);
39236c6e 1765
91447636
A
1766 if (kev->flags & EV_DELETE) {
1767 knote_dequeue(kn);
1768 kn->kn_status |= KN_DISABLED;
1769 if (kqlock2knotedrop(kq, kn)) {
1770 kn->kn_fop->f_detach(kn);
1771 knote_drop(kn, p);
1772 }
1773 goto done;
1774 }
39236c6e 1775
91447636
A
1776 /* update status flags for existing knote */
1777 if (kev->flags & EV_DISABLE) {
1778 knote_dequeue(kn);
1779 kn->kn_status |= KN_DISABLED;
1780 } else if (kev->flags & EV_ENABLE) {
1781 kn->kn_status &= ~KN_DISABLED;
1782 if (kn->kn_status & KN_ACTIVE)
1783 knote_enqueue(kn);
1784 }
1785
b7266188
A
1786 /*
1787 * The user may change some filter values after the
39236c6e 1788 * initial EV_ADD, but doing so will not reset any
b7266188
A
1789 * filter which have already been triggered.
1790 */
1791 kn->kn_kevent.udata = kev->udata;
1792 if (fops->f_isfd || fops->f_touch == NULL) {
39236c6e
A
1793 kn->kn_sfflags = kev->fflags;
1794 kn->kn_sdata = kev->data;
b7266188
A
1795 }
1796
91447636
A
1797 /*
1798 * If somebody is in the middle of dropping this
1799 * knote - go find/insert a new one. But we have
b0d623f7
A
1800 * wait for this one to go away first. Attaches
1801 * running in parallel may also drop/modify the
1802 * knote. Wait for those to complete as well and
1803 * then start over if we encounter one.
91447636 1804 */
b0d623f7
A
1805 if (!kqlock2knoteusewait(kq, kn)) {
1806 /* kqueue, proc_fdlock both unlocked */
91447636 1807 goto restart;
b0d623f7 1808 }
91447636
A
1809
1810 /*
b7266188
A
1811 * Call touch routine to notify filter of changes
1812 * in filter values.
91447636 1813 */
b0d623f7 1814 if (!fops->f_isfd && fops->f_touch != NULL)
39236c6e 1815 fops->f_touch(kn, kev, EVENT_REGISTER);
91447636 1816 }
91447636 1817 /* still have use ref on knote */
b0d623f7
A
1818
1819 /*
1820 * If the knote is not marked to always stay enqueued,
1821 * invoke the filter routine to see if it should be
1822 * enqueued now.
1823 */
1824 if ((kn->kn_status & KN_STAYQUEUED) == 0 && kn->kn_fop->f_event(kn, 0)) {
91447636 1825 if (knoteuse2kqlock(kq, kn))
b0d623f7 1826 knote_activate(kn, 1);
91447636
A
1827 kqunlock(kq);
1828 } else {
1829 knote_put(kn);
1830 }
1831
1832done:
1833 if (fp != NULL)
1834 fp_drop(p, kev->ident, fp, 0);
1835 return (error);
1836}
1837
b0d623f7
A
1838
1839/*
1840 * knote_process - process a triggered event
1841 *
1842 * Validate that it is really still a triggered event
1843 * by calling the filter routines (if necessary). Hold
1844 * a use reference on the knote to avoid it being detached.
1845 * If it is still considered triggered, invoke the callback
1846 * routine provided and move it to the provided inprocess
1847 * queue.
1848 *
1849 * caller holds a reference on the kqueue.
1850 * kqueue locked on entry and exit - but may be dropped
1851 */
1852static int
39236c6e
A
1853knote_process(struct knote *kn,
1854 kevent_callback_t callback,
1855 void *data,
1856 struct kqtailq *inprocessp,
1857 struct proc *p)
b0d623f7
A
1858{
1859 struct kqueue *kq = kn->kn_kq;
1860 struct kevent64_s kev;
1861 int touch;
1862 int result;
1863 int error;
1864
1865 /*
1866 * Determine the kevent state we want to return.
1867 *
1868 * Some event states need to be revalidated before returning
1869 * them, others we take the snapshot at the time the event
1870 * was enqueued.
1871 *
1872 * Events with non-NULL f_touch operations must be touched.
1873 * Triggered events must fill in kev for the callback.
1874 *
1875 * Convert our lock to a use-count and call the event's
1876 * filter routine(s) to update.
1877 */
1878 if ((kn->kn_status & KN_DISABLED) != 0) {
1879 result = 0;
1880 touch = 0;
1881 } else {
1882 int revalidate;
1883
1884 result = 1;
1885 revalidate = ((kn->kn_status & KN_STAYQUEUED) != 0 ||
39236c6e
A
1886 (kn->kn_flags & EV_ONESHOT) == 0);
1887 touch = (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL);
b0d623f7
A
1888
1889 if (revalidate || touch) {
1890 if (revalidate)
1891 knote_deactivate(kn);
39236c6e 1892
b0d623f7
A
1893 /* call the filter/touch routines with just a ref */
1894 if (kqlock2knoteuse(kq, kn)) {
b0d623f7
A
1895 /* if we have to revalidate, call the filter */
1896 if (revalidate) {
1897 result = kn->kn_fop->f_event(kn, 0);
1898 }
1899
39236c6e
A
1900 /*
1901 * capture the kevent data - using touch if
1902 * specified
1903 */
b7266188 1904 if (result && touch) {
39236c6e
A
1905 kn->kn_fop->f_touch(kn, &kev,
1906 EVENT_PROCESS);
b0d623f7 1907 }
b7266188 1908
39236c6e
A
1909 /*
1910 * convert back to a kqlock - bail if the knote
1911 * went away
1912 */
b0d623f7 1913 if (!knoteuse2kqlock(kq, kn)) {
39236c6e 1914 return (EJUSTRETURN);
b0d623f7 1915 } else if (result) {
39236c6e
A
1916 /*
1917 * if revalidated as alive, make sure
1918 * it's active
1919 */
b0d623f7
A
1920 if (!(kn->kn_status & KN_ACTIVE)) {
1921 knote_activate(kn, 0);
1922 }
b7266188 1923
39236c6e
A
1924 /*
1925 * capture all events that occurred
1926 * during filter
1927 */
b7266188
A
1928 if (!touch) {
1929 kev = kn->kn_kevent;
1930 }
1931
b0d623f7 1932 } else if ((kn->kn_status & KN_STAYQUEUED) == 0) {
39236c6e
A
1933 /*
1934 * was already dequeued, so just bail on
1935 * this one
1936 */
1937 return (EJUSTRETURN);
b0d623f7
A
1938 }
1939 } else {
39236c6e 1940 return (EJUSTRETURN);
b0d623f7
A
1941 }
1942 } else {
1943 kev = kn->kn_kevent;
1944 }
1945 }
39236c6e 1946
b0d623f7
A
1947 /* move knote onto inprocess queue */
1948 assert(kn->kn_tq == &kq->kq_head);
1949 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1950 kn->kn_tq = inprocessp;
1951 TAILQ_INSERT_TAIL(inprocessp, kn, kn_tqe);
1952
1953 /*
1954 * Determine how to dispatch the knote for future event handling.
1955 * not-fired: just return (do not callout).
1956 * One-shot: deactivate it.
1957 * Clear: deactivate and clear the state.
1958 * Dispatch: don't clear state, just deactivate it and mark it disabled.
1959 * All others: just leave where they are.
1960 */
1961
1962 if (result == 0) {
39236c6e 1963 return (EJUSTRETURN);
b7266188 1964 } else if ((kn->kn_flags & EV_ONESHOT) != 0) {
b0d623f7
A
1965 knote_deactivate(kn);
1966 if (kqlock2knotedrop(kq, kn)) {
1967 kn->kn_fop->f_detach(kn);
1968 knote_drop(kn, p);
1969 }
b7266188
A
1970 } else if ((kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) != 0) {
1971 if ((kn->kn_flags & EV_DISPATCH) != 0) {
1972 /* deactivate and disable all dispatch knotes */
1973 knote_deactivate(kn);
1974 kn->kn_status |= KN_DISABLED;
1975 } else if (!touch || kn->kn_fflags == 0) {
1976 /* only deactivate if nothing since the touch */
1977 knote_deactivate(kn);
1978 }
1979 if (!touch && (kn->kn_flags & EV_CLEAR) != 0) {
1980 /* manually clear non-touch knotes */
b0d623f7
A
1981 kn->kn_data = 0;
1982 kn->kn_fflags = 0;
1983 }
b0d623f7
A
1984 kqunlock(kq);
1985 } else {
1986 /*
1987 * leave on inprocess queue. We'll
1988 * move all the remaining ones back
1989 * the kq queue and wakeup any
1990 * waiters when we are done.
1991 */
1992 kqunlock(kq);
1993 }
1994
1995 /* callback to handle each event as we find it */
1996 error = (callback)(kq, &kev, data);
39236c6e 1997
b0d623f7 1998 kqlock(kq);
39236c6e 1999 return (error);
b0d623f7
A
2000}
2001
6d2010ae
A
2002/*
2003 * Return 0 to indicate that processing should proceed,
2004 * -1 if there is nothing to process.
2005 *
2006 * Called with kqueue locked and returns the same way,
2007 * but may drop lock temporarily.
2008 */
2009static int
2010kqueue_begin_processing(struct kqueue *kq)
2011{
2012 for (;;) {
2013 if (kq->kq_count == 0) {
39236c6e 2014 return (-1);
6d2010ae
A
2015 }
2016
2017 /* if someone else is processing the queue, wait */
2018 if (kq->kq_nprocess != 0) {
39236c6e
A
2019 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
2020 &kq->kq_nprocess, THREAD_UNINT, 0);
6d2010ae
A
2021 kq->kq_state |= KQ_PROCWAIT;
2022 kqunlock(kq);
2023 thread_block(THREAD_CONTINUE_NULL);
2024 kqlock(kq);
2025 } else {
2026 kq->kq_nprocess = 1;
39236c6e 2027 return (0);
6d2010ae
A
2028 }
2029 }
2030}
2031
2032/*
2033 * Called with kqueue lock held.
2034 */
2035static void
2036kqueue_end_processing(struct kqueue *kq)
2037{
2038 kq->kq_nprocess = 0;
2039 if (kq->kq_state & KQ_PROCWAIT) {
2040 kq->kq_state &= ~KQ_PROCWAIT;
39236c6e
A
2041 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs,
2042 &kq->kq_nprocess, THREAD_AWAKENED);
6d2010ae
A
2043 }
2044}
b0d623f7 2045
91447636 2046/*
b0d623f7 2047 * kqueue_process - process the triggered events in a kqueue
91447636
A
2048 *
2049 * Walk the queued knotes and validate that they are
2050 * really still triggered events by calling the filter
2051 * routines (if necessary). Hold a use reference on
2052 * the knote to avoid it being detached. For each event
2053 * that is still considered triggered, invoke the
2054 * callback routine provided.
2055 *
2056 * caller holds a reference on the kqueue.
2057 * kqueue locked on entry and exit - but may be dropped
b0d623f7 2058 * kqueue list locked (held for duration of call)
91447636
A
2059 */
2060
2061static int
b0d623f7 2062kqueue_process(struct kqueue *kq,
39236c6e
A
2063 kevent_callback_t callback,
2064 void *data,
2065 int *countp,
2066 struct proc *p)
91447636 2067{
39236c6e 2068 struct kqtailq inprocess;
91447636 2069 struct knote *kn;
91447636
A
2070 int nevents;
2071 int error;
2072
39236c6e 2073 TAILQ_INIT(&inprocess);
6d2010ae
A
2074
2075 if (kqueue_begin_processing(kq) == -1) {
91447636 2076 *countp = 0;
6d2010ae 2077 /* Nothing to process */
39236c6e 2078 return (0);
91447636
A
2079 }
2080
b0d623f7 2081 /*
39236c6e
A
2082 * Clear any pre-posted status from previous runs, so we
2083 * only detect events that occur during this run.
b0d623f7
A
2084 */
2085 wait_queue_sub_clearrefs(kq->kq_wqs);
2086
2087 /*
2088 * loop through the enqueued knotes, processing each one and
2089 * revalidating those that need it. As they are processed,
2090 * they get moved to the inprocess queue (so the loop can end).
2091 */
91447636
A
2092 error = 0;
2093 nevents = 0;
b0d623f7 2094
91447636 2095 while (error == 0 &&
39236c6e 2096 (kn = TAILQ_FIRST(&kq->kq_head)) != NULL) {
b0d623f7
A
2097 error = knote_process(kn, callback, data, &inprocess, p);
2098 if (error == EJUSTRETURN)
2099 error = 0;
2100 else
2101 nevents++;
55e303ae
A
2102 }
2103
91447636
A
2104 /*
2105 * With the kqueue still locked, move any knotes
b0d623f7 2106 * remaining on the inprocess queue back to the
91447636
A
2107 * kq's queue and wake up any waiters.
2108 */
b0d623f7
A
2109 while ((kn = TAILQ_FIRST(&inprocess)) != NULL) {
2110 assert(kn->kn_tq == &inprocess);
2111 TAILQ_REMOVE(&inprocess, kn, kn_tqe);
91447636
A
2112 kn->kn_tq = &kq->kq_head;
2113 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
55e303ae 2114 }
6d2010ae
A
2115
2116 kqueue_end_processing(kq);
55e303ae 2117
91447636 2118 *countp = nevents;
39236c6e 2119 return (error);
55e303ae
A
2120}
2121
91447636
A
2122
2123static void
b0d623f7 2124kqueue_scan_continue(void *data, wait_result_t wait_result)
55e303ae 2125{
b0d623f7
A
2126 thread_t self = current_thread();
2127 uthread_t ut = (uthread_t)get_bsdthread_info(self);
2128 struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
91447636
A
2129 struct kqueue *kq = (struct kqueue *)data;
2130 int error;
2131 int count;
2132
2133 /* convert the (previous) wait_result to a proper error */
2134 switch (wait_result) {
2135 case THREAD_AWAKENED:
2136 kqlock(kq);
39236c6e
A
2137 error = kqueue_process(kq, cont_args->call, cont_args, &count,
2138 current_proc());
91447636 2139 if (error == 0 && count == 0) {
39236c6e
A
2140 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
2141 KQ_EVENT, THREAD_ABORTSAFE, cont_args->deadline);
91447636
A
2142 kq->kq_state |= KQ_SLEEP;
2143 kqunlock(kq);
b0d623f7 2144 thread_block_parameter(kqueue_scan_continue, kq);
91447636 2145 /* NOTREACHED */
55e303ae 2146 }
91447636
A
2147 kqunlock(kq);
2148 break;
2149 case THREAD_TIMED_OUT:
39236c6e 2150 error = EWOULDBLOCK;
91447636
A
2151 break;
2152 case THREAD_INTERRUPTED:
2153 error = EINTR;
2154 break;
2155 default:
39236c6e
A
2156 panic("%s: - invalid wait_result (%d)", __func__,
2157 wait_result);
91447636 2158 error = 0;
55e303ae 2159 }
39236c6e 2160
91447636
A
2161 /* call the continuation with the results */
2162 assert(cont_args->cont != NULL);
2163 (cont_args->cont)(kq, cont_args->data, error);
2164}
55e303ae 2165
55e303ae 2166
91447636 2167/*
b0d623f7 2168 * kqueue_scan - scan and wait for events in a kqueue
91447636
A
2169 *
2170 * Process the triggered events in a kqueue.
2171 *
2172 * If there are no events triggered arrange to
2173 * wait for them. If the caller provided a
2174 * continuation routine, then kevent_scan will
2175 * also.
2176 *
2177 * The callback routine must be valid.
2178 * The caller must hold a use-count reference on the kq.
2179 */
55e303ae 2180
91447636 2181int
39236c6e 2182kqueue_scan(struct kqueue *kq,
91447636 2183 kevent_callback_t callback,
b0d623f7 2184 kqueue_continue_t continuation,
91447636
A
2185 void *data,
2186 struct timeval *atvp,
2187 struct proc *p)
2188{
2189 thread_continue_t cont = THREAD_CONTINUE_NULL;
2190 uint64_t deadline;
2191 int error;
2192 int first;
55e303ae 2193
91447636 2194 assert(callback != NULL);
55e303ae 2195
91447636
A
2196 first = 1;
2197 for (;;) {
2198 wait_result_t wait_result;
2199 int count;
2200
2201 /*
2202 * Make a pass through the kq to find events already
39236c6e 2203 * triggered.
91447636
A
2204 */
2205 kqlock(kq);
b0d623f7 2206 error = kqueue_process(kq, callback, data, &count, p);
91447636
A
2207 if (error || count)
2208 break; /* lock still held */
2209
2210 /* looks like we have to consider blocking */
2211 if (first) {
2212 first = 0;
2213 /* convert the timeout to a deadline once */
2214 if (atvp->tv_sec || atvp->tv_usec) {
91447636 2215 uint64_t now;
39236c6e 2216
91447636
A
2217 clock_get_uptime(&now);
2218 nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
39236c6e 2219 atvp->tv_usec * (long)NSEC_PER_USEC,
91447636
A
2220 &deadline);
2221 if (now >= deadline) {
2222 /* non-blocking call */
2223 error = EWOULDBLOCK;
2224 break; /* lock still held */
2225 }
2226 deadline -= now;
2227 clock_absolutetime_interval_to_deadline(deadline, &deadline);
55e303ae 2228 } else {
91447636
A
2229 deadline = 0; /* block forever */
2230 }
2231
2232 if (continuation) {
2233 uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
b0d623f7 2234 struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan;
39236c6e 2235
91447636
A
2236 cont_args->call = callback;
2237 cont_args->cont = continuation;
2238 cont_args->deadline = deadline;
2239 cont_args->data = data;
b0d623f7 2240 cont = kqueue_scan_continue;
55e303ae
A
2241 }
2242 }
91447636
A
2243
2244 /* go ahead and wait */
39236c6e
A
2245 wait_queue_assert_wait_with_leeway((wait_queue_t)kq->kq_wqs,
2246 KQ_EVENT, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL,
2247 deadline, 0);
91447636
A
2248 kq->kq_state |= KQ_SLEEP;
2249 kqunlock(kq);
2250 wait_result = thread_block_parameter(cont, kq);
2251 /* NOTREACHED if (continuation != NULL) */
2252
2253 switch (wait_result) {
2254 case THREAD_AWAKENED:
2255 continue;
2256 case THREAD_TIMED_OUT:
39236c6e 2257 return (EWOULDBLOCK);
91447636 2258 case THREAD_INTERRUPTED:
39236c6e 2259 return (EINTR);
91447636 2260 default:
39236c6e
A
2261 panic("%s: - bad wait_result (%d)", __func__,
2262 wait_result);
91447636
A
2263 error = 0;
2264 }
55e303ae 2265 }
91447636 2266 kqunlock(kq);
39236c6e 2267 return (error);
55e303ae
A
2268}
2269
91447636 2270
55e303ae
A
2271/*
2272 * XXX
2273 * This could be expanded to call kqueue_scan, if desired.
2274 */
2275/*ARGSUSED*/
2276static int
39236c6e
A
2277kqueue_read(__unused struct fileproc *fp,
2278 __unused struct uio *uio,
2279 __unused int flags,
2280 __unused vfs_context_t ctx)
55e303ae
A
2281{
2282 return (ENXIO);
2283}
2284
2285/*ARGSUSED*/
2286static int
39236c6e
A
2287kqueue_write(__unused struct fileproc *fp,
2288 __unused struct uio *uio,
2289 __unused int flags,
2290 __unused vfs_context_t ctx)
55e303ae
A
2291{
2292 return (ENXIO);
2293}
2294
2295/*ARGSUSED*/
2296static int
39236c6e
A
2297kqueue_ioctl(__unused struct fileproc *fp,
2298 __unused u_long com,
2299 __unused caddr_t data,
2300 __unused vfs_context_t ctx)
55e303ae
A
2301{
2302 return (ENOTTY);
2303}
2304
2305/*ARGSUSED*/
2306static int
39236c6e
A
2307kqueue_select(struct fileproc *fp, int which, void *wql,
2308 __unused vfs_context_t ctx)
55e303ae
A
2309{
2310 struct kqueue *kq = (struct kqueue *)fp->f_data;
6d2010ae
A
2311 struct knote *kn;
2312 struct kqtailq inprocessq;
2313 int retnum = 0;
39236c6e 2314
b0d623f7 2315 if (which != FREAD)
39236c6e 2316 return (0);
b0d623f7 2317
6d2010ae
A
2318 TAILQ_INIT(&inprocessq);
2319
b0d623f7 2320 kqlock(kq);
39236c6e 2321 /*
b0d623f7
A
2322 * If this is the first pass, link the wait queue associated with the
2323 * the kqueue onto the wait queue set for the select(). Normally we
2324 * use selrecord() for this, but it uses the wait queue within the
2325 * selinfo structure and we need to use the main one for the kqueue to
2326 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
2327 * (The select() call will unlink them when it ends).
2328 */
2329 if (wql != NULL) {
39236c6e 2330 thread_t cur_act = current_thread();
b0d623f7
A
2331 struct uthread * ut = get_bsdthread_info(cur_act);
2332
2333 kq->kq_state |= KQ_SEL;
2334 wait_queue_link_noalloc((wait_queue_t)kq->kq_wqs, ut->uu_wqset,
39236c6e 2335 (wait_queue_link_t)wql);
b0d623f7
A
2336 }
2337
6d2010ae
A
2338 if (kqueue_begin_processing(kq) == -1) {
2339 kqunlock(kq);
39236c6e 2340 return (0);
6d2010ae 2341 }
b0d623f7 2342
6d2010ae 2343 if (kq->kq_count != 0) {
b0d623f7
A
2344 /*
2345 * there is something queued - but it might be a
2346 * KN_STAYQUEUED knote, which may or may not have
2347 * any events pending. So, we have to walk the
2348 * list of knotes to see, and peek at the stay-
2349 * queued ones to be really sure.
2350 */
39236c6e 2351 while ((kn = (struct knote *)TAILQ_FIRST(&kq->kq_head)) != NULL) {
6d2010ae
A
2352 if ((kn->kn_status & KN_STAYQUEUED) == 0) {
2353 retnum = 1;
2354 goto out;
b0d623f7 2355 }
6d2010ae
A
2356
2357 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2358 TAILQ_INSERT_TAIL(&inprocessq, kn, kn_tqe);
2359
2360 if (kqlock2knoteuse(kq, kn)) {
2361 unsigned peek;
2362
2363 peek = kn->kn_fop->f_peek(kn);
2364 if (knoteuse2kqlock(kq, kn)) {
2365 if (peek > 0) {
2366 retnum = 1;
2367 goto out;
2368 }
2369 } else {
2370 retnum = 0;
2371 }
39236c6e 2372 }
55e303ae 2373 }
b0d623f7
A
2374 }
2375
6d2010ae
A
2376out:
2377 /* Return knotes to active queue */
2378 while ((kn = TAILQ_FIRST(&inprocessq)) != NULL) {
2379 TAILQ_REMOVE(&inprocessq, kn, kn_tqe);
2380 kn->kn_tq = &kq->kq_head;
2381 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
55e303ae 2382 }
b0d623f7 2383
6d2010ae 2384 kqueue_end_processing(kq);
b0d623f7 2385 kqunlock(kq);
39236c6e 2386 return (retnum);
55e303ae
A
2387}
2388
91447636
A
2389/*
2390 * kqueue_close -
2391 */
55e303ae
A
2392/*ARGSUSED*/
2393static int
2d21ac55 2394kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
55e303ae 2395{
91447636 2396 struct kqueue *kq = (struct kqueue *)fg->fg_data;
55e303ae 2397
2d21ac55 2398 kqueue_dealloc(kq);
91447636 2399 fg->fg_data = NULL;
55e303ae
A
2400 return (0);
2401}
2402
2403/*ARGSUSED*/
91447636
A
2404/*
2405 * The callers has taken a use-count reference on this kqueue and will donate it
2406 * to the kqueue we are being added to. This keeps the kqueue from closing until
2407 * that relationship is torn down.
2408 */
55e303ae 2409static int
2d21ac55 2410kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_context_t ctx)
55e303ae
A
2411{
2412 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
2d21ac55 2413 struct kqueue *parentkq = kn->kn_kq;
55e303ae 2414
2d21ac55
A
2415 if (parentkq == kq ||
2416 kn->kn_filter != EVFILT_READ)
55e303ae
A
2417 return (1);
2418
2d21ac55
A
2419 /*
2420 * We have to avoid creating a cycle when nesting kqueues
2421 * inside another. Rather than trying to walk the whole
2422 * potential DAG of nested kqueues, we just use a simple
2423 * ceiling protocol. When a kqueue is inserted into another,
2424 * we check that the (future) parent is not already nested
2425 * into another kqueue at a lower level than the potenial
2426 * child (because it could indicate a cycle). If that test
2427 * passes, we just mark the nesting levels accordingly.
2428 */
2429
2430 kqlock(parentkq);
39236c6e 2431 if (parentkq->kq_level > 0 &&
2d21ac55
A
2432 parentkq->kq_level < kq->kq_level)
2433 {
2434 kqunlock(parentkq);
2435 return (1);
2436 } else {
2437 /* set parent level appropriately */
2438 if (parentkq->kq_level == 0)
2439 parentkq->kq_level = 2;
2440 if (parentkq->kq_level < kq->kq_level + 1)
2441 parentkq->kq_level = kq->kq_level + 1;
2442 kqunlock(parentkq);
2443
2444 kn->kn_fop = &kqread_filtops;
2445 kqlock(kq);
2446 KNOTE_ATTACH(&kq->kq_sel.si_note, kn);
2447 /* indicate nesting in child, if needed */
2448 if (kq->kq_level == 0)
2449 kq->kq_level = 1;
2450 kqunlock(kq);
2451 return (0);
2452 }
55e303ae
A
2453}
2454
b0d623f7
A
2455/*
2456 * kqueue_drain - called when kq is closed
2457 */
2458/*ARGSUSED*/
2459static int
2460kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
2461{
2462 struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
2463 kqlock(kq);
2464 kqueue_wakeup(kq, 1);
2465 kqunlock(kq);
39236c6e 2466 return (0);
b0d623f7
A
2467}
2468
55e303ae
A
2469/*ARGSUSED*/
2470int
2d21ac55 2471kqueue_stat(struct fileproc *fp, void *ub, int isstat64, __unused vfs_context_t ctx)
55e303ae 2472{
2d21ac55 2473
55e303ae 2474 struct kqueue *kq = (struct kqueue *)fp->f_data;
2d21ac55 2475 if (isstat64 != 0) {
b0d623f7
A
2476 struct stat64 *sb64 = (struct stat64 *)ub;
2477
2d21ac55
A
2478 bzero((void *)sb64, sizeof(*sb64));
2479 sb64->st_size = kq->kq_count;
b0d623f7
A
2480 if (kq->kq_state & KQ_KEV64)
2481 sb64->st_blksize = sizeof(struct kevent64_s);
2482 else
2483 sb64->st_blksize = sizeof(struct kevent);
2d21ac55
A
2484 sb64->st_mode = S_IFIFO;
2485 } else {
b0d623f7
A
2486 struct stat *sb = (struct stat *)ub;
2487
2d21ac55
A
2488 bzero((void *)sb, sizeof(*sb));
2489 sb->st_size = kq->kq_count;
b0d623f7
A
2490 if (kq->kq_state & KQ_KEV64)
2491 sb->st_blksize = sizeof(struct kevent64_s);
2492 else
2493 sb->st_blksize = sizeof(struct kevent);
2d21ac55
A
2494 sb->st_mode = S_IFIFO;
2495 }
55e303ae 2496
55e303ae
A
2497 return (0);
2498}
2499
91447636
A
2500/*
2501 * Called with the kqueue locked
2502 */
55e303ae 2503static void
b0d623f7 2504kqueue_wakeup(struct kqueue *kq, int closed)
55e303ae 2505{
b0d623f7
A
2506 if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0 || kq->kq_nprocess > 0) {
2507 kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
39236c6e
A
2508 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, KQ_EVENT,
2509 (closed) ? THREAD_INTERRUPTED : THREAD_AWAKENED);
91447636 2510 }
55e303ae
A
2511}
2512
2513void
2514klist_init(struct klist *list)
2515{
2516 SLIST_INIT(list);
2517}
2518
91447636 2519
55e303ae 2520/*
91447636
A
2521 * Query/Post each knote in the object's list
2522 *
2523 * The object lock protects the list. It is assumed
2524 * that the filter/event routine for the object can
2525 * determine that the object is already locked (via
b0d623f7 2526 * the hint) and not deadlock itself.
91447636
A
2527 *
2528 * The object lock should also hold off pending
2529 * detach/drop operations. But we'll prevent it here
2530 * too - just in case.
55e303ae
A
2531 */
2532void
2533knote(struct klist *list, long hint)
2534{
2535 struct knote *kn;
2536
91447636
A
2537 SLIST_FOREACH(kn, list, kn_selnext) {
2538 struct kqueue *kq = kn->kn_kq;
2539
2540 kqlock(kq);
2541 if (kqlock2knoteuse(kq, kn)) {
2542 int result;
2543
2544 /* call the event with only a use count */
2545 result = kn->kn_fop->f_event(kn, hint);
2546
2547 /* if its not going away and triggered */
2548 if (knoteuse2kqlock(kq, kn) && result)
b0d623f7 2549 knote_activate(kn, 1);
91447636
A
2550 /* lock held again */
2551 }
2552 kqunlock(kq);
2553 }
55e303ae
A
2554}
2555
2556/*
2557 * attach a knote to the specified list. Return true if this is the first entry.
91447636 2558 * The list is protected by whatever lock the object it is associated with uses.
55e303ae
A
2559 */
2560int
2561knote_attach(struct klist *list, struct knote *kn)
2562{
2563 int ret = SLIST_EMPTY(list);
2564 SLIST_INSERT_HEAD(list, kn, kn_selnext);
39236c6e 2565 return (ret);
55e303ae
A
2566}
2567
2568/*
2569 * detach a knote from the specified list. Return true if that was the last entry.
91447636 2570 * The list is protected by whatever lock the object it is associated with uses.
55e303ae
A
2571 */
2572int
2573knote_detach(struct klist *list, struct knote *kn)
2574{
2575 SLIST_REMOVE(list, kn, knote, kn_selnext);
39236c6e 2576 return (SLIST_EMPTY(list));
55e303ae
A
2577}
2578
b0d623f7
A
2579/*
2580 * For a given knote, link a provided wait queue directly with the kqueue.
39236c6e 2581 * Wakeups will happen via recursive wait queue support. But nothing will move
b0d623f7
A
2582 * the knote to the active list at wakeup (nothing calls knote()). Instead,
2583 * we permanently enqueue them here.
2584 *
2585 * kqueue and knote references are held by caller.
316670eb
A
2586 *
2587 * caller provides the wait queue link structure.
b0d623f7
A
2588 */
2589int
316670eb 2590knote_link_wait_queue(struct knote *kn, struct wait_queue *wq, wait_queue_link_t wql)
b0d623f7
A
2591{
2592 struct kqueue *kq = kn->kn_kq;
2593 kern_return_t kr;
2594
316670eb 2595 kr = wait_queue_link_noalloc(wq, kq->kq_wqs, wql);
b0d623f7 2596 if (kr == KERN_SUCCESS) {
6d2010ae 2597 knote_markstayqueued(kn);
39236c6e 2598 return (0);
b0d623f7 2599 } else {
39236c6e 2600 return (EINVAL);
b0d623f7
A
2601 }
2602}
2603
2604/*
2605 * Unlink the provided wait queue from the kqueue associated with a knote.
2606 * Also remove it from the magic list of directly attached knotes.
2607 *
2608 * Note that the unlink may have already happened from the other side, so
2609 * ignore any failures to unlink and just remove it from the kqueue list.
316670eb
A
2610 *
2611 * On success, caller is responsible for the link structure
b0d623f7 2612 */
316670eb
A
2613int
2614knote_unlink_wait_queue(struct knote *kn, struct wait_queue *wq, wait_queue_link_t *wqlp)
b0d623f7
A
2615{
2616 struct kqueue *kq = kn->kn_kq;
316670eb 2617 kern_return_t kr;
b0d623f7 2618
316670eb 2619 kr = wait_queue_unlink_nofree(wq, kq->kq_wqs, wqlp);
b0d623f7
A
2620 kqlock(kq);
2621 kn->kn_status &= ~KN_STAYQUEUED;
2622 knote_dequeue(kn);
2623 kqunlock(kq);
39236c6e 2624 return ((kr != KERN_SUCCESS) ? EINVAL : 0);
b0d623f7
A
2625}
2626
55e303ae 2627/*
91447636
A
2628 * remove all knotes referencing a specified fd
2629 *
2630 * Essentially an inlined knote_remove & knote_drop
2631 * when we know for sure that the thing is a file
39236c6e 2632 *
91447636
A
2633 * Entered with the proc_fd lock already held.
2634 * It returns the same way, but may drop it temporarily.
55e303ae
A
2635 */
2636void
91447636 2637knote_fdclose(struct proc *p, int fd)
55e303ae 2638{
91447636
A
2639 struct filedesc *fdp = p->p_fd;
2640 struct klist *list;
55e303ae
A
2641 struct knote *kn;
2642
91447636 2643 list = &fdp->fd_knlist[fd];
55e303ae 2644 while ((kn = SLIST_FIRST(list)) != NULL) {
91447636 2645 struct kqueue *kq = kn->kn_kq;
55e303ae 2646
2d21ac55 2647 if (kq->kq_p != p)
39236c6e
A
2648 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
2649 __func__, kq->kq_p, p);
2d21ac55 2650
91447636
A
2651 kqlock(kq);
2652 proc_fdunlock(p);
2653
2654 /*
2655 * Convert the lock to a drop ref.
2656 * If we get it, go ahead and drop it.
2657 * Otherwise, we waited for it to
2658 * be dropped by the other guy, so
2659 * it is safe to move on in the list.
2660 */
2661 if (kqlock2knotedrop(kq, kn)) {
2662 kn->kn_fop->f_detach(kn);
2663 knote_drop(kn, p);
2664 }
39236c6e 2665
91447636 2666 proc_fdlock(p);
55e303ae 2667
91447636
A
2668 /* the fd tables may have changed - start over */
2669 list = &fdp->fd_knlist[fd];
2670 }
55e303ae
A
2671}
2672
91447636
A
2673/* proc_fdlock held on entry (and exit) */
2674static int
316670eb 2675knote_fdpattach(struct knote *kn, struct filedesc *fdp, struct proc *p)
55e303ae 2676{
91447636 2677 struct klist *list = NULL;
55e303ae
A
2678
2679 if (! kn->kn_fop->f_isfd) {
2680 if (fdp->fd_knhashmask == 0)
2d21ac55 2681 fdp->fd_knhash = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
55e303ae
A
2682 &fdp->fd_knhashmask);
2683 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
91447636
A
2684 } else {
2685 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
2686 u_int size = 0;
2687
39236c6e 2688 if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
316670eb
A
2689 || kn->kn_id >= (uint64_t)maxfiles)
2690 return (EINVAL);
39236c6e 2691
91447636
A
2692 /* have to grow the fd_knlist */
2693 size = fdp->fd_knlistsize;
2694 while (size <= kn->kn_id)
2695 size += KQEXTENT;
316670eb
A
2696
2697 if (size >= (UINT_MAX/sizeof(struct klist *)))
2698 return (EINVAL);
2699
91447636 2700 MALLOC(list, struct klist *,
39236c6e 2701 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
91447636
A
2702 if (list == NULL)
2703 return (ENOMEM);
39236c6e 2704
91447636 2705 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
39236c6e 2706 fdp->fd_knlistsize * sizeof(struct klist *));
91447636 2707 bzero((caddr_t)list +
39236c6e
A
2708 fdp->fd_knlistsize * sizeof(struct klist *),
2709 (size - fdp->fd_knlistsize) * sizeof(struct klist *));
55e303ae 2710 FREE(fdp->fd_knlist, M_KQUEUE);
91447636
A
2711 fdp->fd_knlist = list;
2712 fdp->fd_knlistsize = size;
2713 }
2714 list = &fdp->fd_knlist[kn->kn_id];
55e303ae 2715 }
55e303ae 2716 SLIST_INSERT_HEAD(list, kn, kn_link);
91447636 2717 return (0);
55e303ae
A
2718}
2719
91447636
A
2720
2721
55e303ae
A
2722/*
2723 * should be called at spl == 0, since we don't want to hold spl
2724 * while calling fdrop and free.
2725 */
2726static void
2d21ac55 2727knote_drop(struct knote *kn, __unused struct proc *ctxp)
55e303ae 2728{
91447636 2729 struct kqueue *kq = kn->kn_kq;
2d21ac55 2730 struct proc *p = kq->kq_p;
39236c6e 2731 struct filedesc *fdp = p->p_fd;
55e303ae 2732 struct klist *list;
b0d623f7 2733 int needswakeup;
55e303ae 2734
91447636 2735 proc_fdlock(p);
55e303ae
A
2736 if (kn->kn_fop->f_isfd)
2737 list = &fdp->fd_knlist[kn->kn_id];
2738 else
2739 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
2740
2741 SLIST_REMOVE(list, kn, knote, kn_link);
91447636
A
2742 kqlock(kq);
2743 knote_dequeue(kn);
b0d623f7 2744 needswakeup = (kn->kn_status & KN_USEWAIT);
91447636
A
2745 kqunlock(kq);
2746 proc_fdunlock(p);
2747
b0d623f7 2748 if (needswakeup)
39236c6e
A
2749 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_status,
2750 THREAD_AWAKENED);
b0d623f7 2751
55e303ae 2752 if (kn->kn_fop->f_isfd)
91447636
A
2753 fp_drop(p, kn->kn_id, kn->kn_fp, 0);
2754
55e303ae
A
2755 knote_free(kn);
2756}
2757
91447636
A
2758/* called with kqueue lock held */
2759static void
b0d623f7 2760knote_activate(struct knote *kn, int propagate)
91447636
A
2761{
2762 struct kqueue *kq = kn->kn_kq;
2763
2764 kn->kn_status |= KN_ACTIVE;
2765 knote_enqueue(kn);
b0d623f7
A
2766 kqueue_wakeup(kq, 0);
2767
2768 /* this is a real event: wake up the parent kq, too */
2769 if (propagate)
2770 KNOTE(&kq->kq_sel.si_note, 0);
2771}
91447636
A
2772
2773/* called with kqueue lock held */
2774static void
2775knote_deactivate(struct knote *kn)
39236c6e 2776{
91447636
A
2777 kn->kn_status &= ~KN_ACTIVE;
2778 knote_dequeue(kn);
2779}
55e303ae 2780
91447636 2781/* called with kqueue lock held */
55e303ae
A
2782static void
2783knote_enqueue(struct knote *kn)
2784{
b0d623f7
A
2785 if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_STAYQUEUED ||
2786 (kn->kn_status & (KN_QUEUED | KN_STAYQUEUED | KN_DISABLED)) == 0) {
91447636 2787 struct kqtailq *tq = kn->kn_tq;
b0d623f7 2788 struct kqueue *kq = kn->kn_kq;
55e303ae 2789
39236c6e 2790 TAILQ_INSERT_TAIL(tq, kn, kn_tqe);
91447636
A
2791 kn->kn_status |= KN_QUEUED;
2792 kq->kq_count++;
2793 }
55e303ae
A
2794}
2795
91447636 2796/* called with kqueue lock held */
55e303ae
A
2797static void
2798knote_dequeue(struct knote *kn)
2799{
2800 struct kqueue *kq = kn->kn_kq;
55e303ae 2801
b0d623f7 2802 if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_QUEUED) {
91447636 2803 struct kqtailq *tq = kn->kn_tq;
55e303ae 2804
39236c6e 2805 TAILQ_REMOVE(tq, kn, kn_tqe);
91447636
A
2806 kn->kn_tq = &kq->kq_head;
2807 kn->kn_status &= ~KN_QUEUED;
2808 kq->kq_count--;
2809 }
55e303ae
A
2810}
2811
2812void
2813knote_init(void)
2814{
39236c6e
A
2815 knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
2816 8192, "knote zone");
91447636
A
2817
2818 /* allocate kq lock group attribute and group */
39236c6e 2819 kq_lck_grp_attr = lck_grp_attr_alloc_init();
91447636
A
2820
2821 kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
2822
2823 /* Allocate kq lock attribute */
2824 kq_lck_attr = lck_attr_alloc_init();
91447636
A
2825
2826 /* Initialize the timer filter lock */
2827 lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr);
39236c6e 2828
316670eb
A
2829#if VM_PRESSURE_EVENTS
2830 /* Initialize the vm pressure list lock */
2831 vm_pressure_init(kq_lck_grp, kq_lck_attr);
2832#endif
39236c6e
A
2833
2834#if CONFIG_MEMORYSTATUS
2835 /* Initialize the memorystatus list lock */
2836 memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
2837#endif
55e303ae
A
2838}
2839SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
2840
2841static struct knote *
2842knote_alloc(void)
2843{
2844 return ((struct knote *)zalloc(knote_zone));
2845}
2846
2847static void
2848knote_free(struct knote *kn)
2849{
91447636 2850 zfree(knote_zone, kn);
55e303ae
A
2851}
2852
2d21ac55 2853#if SOCKETS
1c79356b
A
2854#include <sys/param.h>
2855#include <sys/socket.h>
2856#include <sys/protosw.h>
2857#include <sys/domain.h>
2858#include <sys/mbuf.h>
2859#include <sys/kern_event.h>
2860#include <sys/malloc.h>
9bccf70c
A
2861#include <sys/sys_domain.h>
2862#include <sys/syslog.h>
1c79356b 2863
39236c6e
A
2864static lck_grp_attr_t *kev_lck_grp_attr;
2865static lck_attr_t *kev_lck_attr;
2866static lck_grp_t *kev_lck_grp;
2867static decl_lck_rw_data(,kev_lck_data);
2868static lck_rw_t *kev_rwlock = &kev_lck_data;
1c79356b 2869
91447636
A
2870static int kev_attach(struct socket *so, int proto, struct proc *p);
2871static int kev_detach(struct socket *so);
39236c6e
A
2872static int kev_control(struct socket *so, u_long cmd, caddr_t data,
2873 struct ifnet *ifp, struct proc *p);
2874static lck_mtx_t * event_getlock(struct socket *, int);
2875static int event_lock(struct socket *, int, void *);
2876static int event_unlock(struct socket *, int, void *);
2877
2878static int event_sofreelastref(struct socket *);
2879static void kev_delete(struct kern_event_pcb *);
2880
2881static struct pr_usrreqs event_usrreqs = {
2882 .pru_attach = kev_attach,
2883 .pru_control = kev_control,
2884 .pru_detach = kev_detach,
2885 .pru_soreceive = soreceive,
91447636 2886};
1c79356b 2887
39236c6e
A
2888static struct protosw eventsw[] = {
2889{
2890 .pr_type = SOCK_RAW,
2891 .pr_protocol = SYSPROTO_EVENT,
2892 .pr_flags = PR_ATOMIC,
2893 .pr_usrreqs = &event_usrreqs,
2894 .pr_lock = event_lock,
2895 .pr_unlock = event_unlock,
2896 .pr_getlock = event_getlock,
2897}
1c79356b
A
2898};
2899
39236c6e
A
2900static lck_mtx_t *
2901event_getlock(struct socket *so, int locktype)
2902{
2903#pragma unused(locktype)
2904 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
2905
2906 if (so->so_pcb != NULL) {
2907 if (so->so_usecount < 0)
2908 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
2909 so, so->so_usecount, solockhistory_nr(so));
2910 /* NOTREACHED */
2911 } else {
2912 panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
2913 so, solockhistory_nr(so));
2914 /* NOTREACHED */
2915 }
2916 return (&ev_pcb->evp_mtx);
2917}
2918
2919static int
2920event_lock(struct socket *so, int refcount, void *lr)
2921{
2922 void *lr_saved;
2923
2924 if (lr == NULL)
2925 lr_saved = __builtin_return_address(0);
2926 else
2927 lr_saved = lr;
2928
2929 if (so->so_pcb != NULL) {
2930 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
2931 } else {
2932 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2933 so, lr_saved, solockhistory_nr(so));
2934 /* NOTREACHED */
2935 }
2936
2937 if (so->so_usecount < 0) {
2938 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
2939 so, so->so_pcb, lr_saved, so->so_usecount,
2940 solockhistory_nr(so));
2941 /* NOTREACHED */
2942 }
2943
2944 if (refcount)
2945 so->so_usecount++;
2946
2947 so->lock_lr[so->next_lock_lr] = lr_saved;
2948 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
2949 return (0);
2950}
2951
2952static int
2953event_unlock(struct socket *so, int refcount, void *lr)
2954{
2955 void *lr_saved;
2956 lck_mtx_t *mutex_held;
2957
2958 if (lr == NULL)
2959 lr_saved = __builtin_return_address(0);
2960 else
2961 lr_saved = lr;
2962
2963 if (refcount)
2964 so->so_usecount--;
2965
2966 if (so->so_usecount < 0) {
2967 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
2968 so, so->so_usecount, solockhistory_nr(so));
2969 /* NOTREACHED */
2970 }
2971 if (so->so_pcb == NULL) {
2972 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
2973 so, so->so_usecount, (void *)lr_saved,
2974 solockhistory_nr(so));
2975 /* NOTREACHED */
2976 }
2977 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
2978
2979 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2980 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2981 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
2982
2983 if (so->so_usecount == 0) {
2984 VERIFY(so->so_flags & SOF_PCBCLEARING);
2985 event_sofreelastref(so);
2986 } else {
2987 lck_mtx_unlock(mutex_held);
2988 }
2989
2990 return (0);
2991}
2992
2993static int
2994event_sofreelastref(struct socket *so)
2995{
2996 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
2997
2998 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
2999
3000 so->so_pcb = NULL;
3001
3002 /*
3003 * Disable upcall in the event another thread is in kev_post_msg()
3004 * appending record to the receive socket buffer, since sbwakeup()
3005 * may release the socket lock otherwise.
3006 */
3007 so->so_rcv.sb_flags &= ~SB_UPCALL;
3008 so->so_snd.sb_flags &= ~SB_UPCALL;
3009 so->so_event = NULL;
3010 lck_mtx_unlock(&(ev_pcb->evp_mtx));
3011
3012 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
3013 lck_rw_lock_exclusive(kev_rwlock);
3014 LIST_REMOVE(ev_pcb, evp_link);
3015 lck_rw_done(kev_rwlock);
3016 kev_delete(ev_pcb);
3017
3018 sofreelastref(so, 1);
3019 return (0);
3020}
3021
3022static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
3023
1c79356b
A
3024static
3025struct kern_event_head kern_event_head;
3026
b0d623f7 3027static u_int32_t static_event_id = 0;
39236c6e
A
3028
3029#define EVPCB_ZONE_MAX 65536
3030#define EVPCB_ZONE_NAME "kerneventpcb"
3031static struct zone *ev_pcb_zone;
1c79356b 3032
9bccf70c 3033/*
39236c6e 3034 * Install the protosw's for the NKE manager. Invoked at extension load time
9bccf70c 3035 */
39236c6e
A
3036void
3037kern_event_init(struct domain *dp)
9bccf70c 3038{
39236c6e
A
3039 struct protosw *pr;
3040 int i;
3041
3042 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
3043 VERIFY(dp == systemdomain);
3044
3045 kev_lck_grp_attr = lck_grp_attr_alloc_init();
3046 if (kev_lck_grp_attr == NULL) {
3047 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
3048 /* NOTREACHED */
3049 }
3050
3051 kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol",
3052 kev_lck_grp_attr);
3053 if (kev_lck_grp == NULL) {
3054 panic("%s: lck_grp_alloc_init failed\n", __func__);
3055 /* NOTREACHED */
3056 }
3057
3058 kev_lck_attr = lck_attr_alloc_init();
3059 if (kev_lck_attr == NULL) {
3060 panic("%s: lck_attr_alloc_init failed\n", __func__);
3061 /* NOTREACHED */
3062 }
9bccf70c 3063
39236c6e
A
3064 lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr);
3065 if (kev_rwlock == NULL) {
3066 panic("%s: lck_mtx_alloc_init failed\n", __func__);
3067 /* NOTREACHED */
91447636 3068 }
39236c6e
A
3069
3070 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++)
3071 net_add_proto(pr, dp, 1);
3072
3073 ev_pcb_zone = zinit(sizeof(struct kern_event_pcb),
3074 EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME);
3075 if (ev_pcb_zone == NULL) {
3076 panic("%s: failed allocating ev_pcb_zone", __func__);
3077 /* NOTREACHED */
3078 }
3079 zone_change(ev_pcb_zone, Z_EXPAND, TRUE);
3080 zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE);
9bccf70c
A
3081}
3082
91447636
A
3083static int
3084kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
1c79356b 3085{
39236c6e
A
3086 int error = 0;
3087 struct kern_event_pcb *ev_pcb;
1c79356b 3088
39236c6e
A
3089 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
3090 if (error != 0)
3091 return (error);
55e303ae 3092
39236c6e
A
3093 if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) {
3094 return (ENOBUFS);
3095 }
3096 bzero(ev_pcb, sizeof(struct kern_event_pcb));
3097 lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr);
1c79356b 3098
39236c6e
A
3099 ev_pcb->evp_socket = so;
3100 ev_pcb->evp_vendor_code_filter = 0xffffffff;
1c79356b 3101
39236c6e
A
3102 so->so_pcb = (caddr_t) ev_pcb;
3103 lck_rw_lock_exclusive(kev_rwlock);
3104 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
3105 lck_rw_done(kev_rwlock);
1c79356b 3106
39236c6e 3107 return (error);
1c79356b
A
3108}
3109
39236c6e
A
3110static void
3111kev_delete(struct kern_event_pcb *ev_pcb)
3112{
3113 VERIFY(ev_pcb != NULL);
3114 lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp);
3115 zfree(ev_pcb_zone, ev_pcb);
3116}
1c79356b 3117
91447636
A
3118static int
3119kev_detach(struct socket *so)
1c79356b 3120{
39236c6e 3121 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
1c79356b 3122
39236c6e
A
3123 if (ev_pcb != NULL) {
3124 soisdisconnected(so);
91447636 3125 so->so_flags |= SOF_PCBCLEARING;
39236c6e 3126 }
1c79356b 3127
39236c6e 3128 return (0);
1c79356b
A
3129}
3130
91447636 3131/*
2d21ac55 3132 * For now, kev_vendor_code and mbuf_tags use the same
91447636
A
3133 * mechanism.
3134 */
91447636
A
3135errno_t kev_vendor_code_find(
3136 const char *string,
2d21ac55 3137 u_int32_t *out_vendor_code)
91447636
A
3138{
3139 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
39236c6e 3140 return (EINVAL);
91447636 3141 }
39236c6e
A
3142 return (net_str_id_find_internal(string, out_vendor_code,
3143 NSI_VENDOR_CODE, 1));
91447636
A
3144}
3145
39236c6e
A
3146errno_t
3147kev_msg_post(struct kev_msg *event_msg)
91447636 3148{
39236c6e
A
3149 mbuf_tag_id_t min_vendor, max_vendor;
3150
b0d623f7 3151 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
39236c6e 3152
91447636 3153 if (event_msg == NULL)
39236c6e
A
3154 return (EINVAL);
3155
3156 /*
3157 * Limit third parties to posting events for registered vendor codes
3158 * only
3159 */
91447636 3160 if (event_msg->vendor_code < min_vendor ||
39236c6e
A
3161 event_msg->vendor_code > max_vendor)
3162 return (EINVAL);
3163
3164 return (kev_post_msg(event_msg));
91447636 3165}
1c79356b 3166
39236c6e
A
3167int
3168kev_post_msg(struct kev_msg *event_msg)
1c79356b 3169{
39236c6e
A
3170 struct mbuf *m, *m2;
3171 struct kern_event_pcb *ev_pcb;
3172 struct kern_event_msg *ev;
3173 char *tmp;
3174 u_int32_t total_size;
3175 int i;
1c79356b 3176
91447636
A
3177 /* Verify the message is small enough to fit in one mbuf w/o cluster */
3178 total_size = KEV_MSG_HEADER_SIZE;
39236c6e 3179
91447636
A
3180 for (i = 0; i < 5; i++) {
3181 if (event_msg->dv[i].data_length == 0)
3182 break;
3183 total_size += event_msg->dv[i].data_length;
3184 }
39236c6e 3185
91447636 3186 if (total_size > MLEN) {
39236c6e
A
3187 return (EMSGSIZE);
3188 }
3189
3190 m = m_get(M_DONTWAIT, MT_DATA);
3191 if (m == 0)
3192 return (ENOBUFS);
3193
3194 ev = mtod(m, struct kern_event_msg *);
3195 total_size = KEV_MSG_HEADER_SIZE;
3196
3197 tmp = (char *) &ev->event_data[0];
3198 for (i = 0; i < 5; i++) {
3199 if (event_msg->dv[i].data_length == 0)
3200 break;
3201
3202 total_size += event_msg->dv[i].data_length;
3203 bcopy(event_msg->dv[i].data_ptr, tmp,
3204 event_msg->dv[i].data_length);
3205 tmp += event_msg->dv[i].data_length;
3206 }
3207
3208 ev->id = ++static_event_id;
3209 ev->total_size = total_size;
3210 ev->vendor_code = event_msg->vendor_code;
3211 ev->kev_class = event_msg->kev_class;
3212 ev->kev_subclass = event_msg->kev_subclass;
3213 ev->event_code = event_msg->event_code;
3214
3215 m->m_len = total_size;
3216 lck_rw_lock_shared(kev_rwlock);
3217 for (ev_pcb = LIST_FIRST(&kern_event_head);
3218 ev_pcb;
3219 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
3220 lck_mtx_lock(&ev_pcb->evp_mtx);
3221 if (ev_pcb->evp_socket->so_pcb == NULL) {
3222 lck_mtx_unlock(&ev_pcb->evp_mtx);
3223 continue;
3224 }
3225 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
3226 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
3227 lck_mtx_unlock(&ev_pcb->evp_mtx);
3228 continue;
3229 }
3230
3231 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
3232 if (ev_pcb->evp_class_filter != ev->kev_class) {
3233 lck_mtx_unlock(&ev_pcb->evp_mtx);
3234 continue;
3235 }
3236
3237 if ((ev_pcb->evp_subclass_filter != KEV_ANY_SUBCLASS) &&
3238 (ev_pcb->evp_subclass_filter != ev->kev_subclass)) {
3239 lck_mtx_unlock(&ev_pcb->evp_mtx);
3240 continue;
3241 }
3242 }
3243 }
3244
3245 m2 = m_copym(m, 0, m->m_len, M_NOWAIT);
3246 if (m2 == 0) {
3247 m_free(m);
3248 lck_mtx_unlock(&ev_pcb->evp_mtx);
3249 lck_rw_done(kev_rwlock);
3250 return (ENOBUFS);
3251 }
3252 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2))
3253 sorwakeup(ev_pcb->evp_socket);
3254 lck_mtx_unlock(&ev_pcb->evp_mtx);
3255 }
3256 m_free(m);
3257 lck_rw_done(kev_rwlock);
3258
3259 return (0);
1c79356b
A
3260}
3261
91447636 3262static int
39236c6e
A
3263kev_control(struct socket *so,
3264 u_long cmd,
3265 caddr_t data,
3266 __unused struct ifnet *ifp,
3267 __unused struct proc *p)
1c79356b 3268{
91447636
A
3269 struct kev_request *kev_req = (struct kev_request *) data;
3270 struct kern_event_pcb *ev_pcb;
3271 struct kev_vendor_code *kev_vendor;
b0d623f7 3272 u_int32_t *id_value = (u_int32_t *) data;
39236c6e 3273
91447636 3274 switch (cmd) {
91447636
A
3275 case SIOCGKEVID:
3276 *id_value = static_event_id;
3277 break;
91447636
A
3278 case SIOCSKEVFILT:
3279 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
39236c6e
A
3280 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
3281 ev_pcb->evp_class_filter = kev_req->kev_class;
3282 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
91447636 3283 break;
91447636
A
3284 case SIOCGKEVFILT:
3285 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
39236c6e
A
3286 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
3287 kev_req->kev_class = ev_pcb->evp_class_filter;
3288 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
91447636 3289 break;
91447636 3290 case SIOCGKEVVENDOR:
39236c6e 3291 kev_vendor = (struct kev_vendor_code *)data;
91447636
A
3292 /* Make sure string is NULL terminated */
3293 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0;
39236c6e
A
3294 return (net_str_id_find_internal(kev_vendor->vendor_string,
3295 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0));
91447636 3296 default:
39236c6e 3297 return (ENOTSUP);
91447636 3298 }
39236c6e
A
3299
3300 return (0);
1c79356b
A
3301}
3302
2d21ac55 3303#endif /* SOCKETS */
1c79356b 3304
1c79356b 3305
0c530ab8
A
3306int
3307fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
3308{
2d21ac55 3309 struct vinfo_stat * st;
0c530ab8
A
3310
3311 /* No need for the funnel as fd is kept alive */
0c530ab8
A
3312 st = &kinfo->kq_stat;
3313
2d21ac55 3314 st->vst_size = kq->kq_count;
b0d623f7
A
3315 if (kq->kq_state & KQ_KEV64)
3316 st->vst_blksize = sizeof(struct kevent64_s);
3317 else
3318 st->vst_blksize = sizeof(struct kevent);
2d21ac55 3319 st->vst_mode = S_IFIFO;
0c530ab8
A
3320 if (kq->kq_state & KQ_SEL)
3321 kinfo->kq_state |= PROC_KQUEUE_SELECT;
3322 if (kq->kq_state & KQ_SLEEP)
3323 kinfo->kq_state |= PROC_KQUEUE_SLEEP;
3324
39236c6e 3325 return (0);
0c530ab8 3326}
1c79356b 3327
6d2010ae
A
3328
3329void
3330knote_markstayqueued(struct knote *kn)
3331{
3332 kqlock(kn->kn_kq);
3333 kn->kn_status |= KN_STAYQUEUED;
3334 knote_enqueue(kn);
3335 kqunlock(kn->kn_kq);
3336}