]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_event.c
xnu-2782.20.48.tar.gz
[apple/xnu.git] / bsd / kern / kern_event.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29 /*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54 /*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
57 #include <stdint.h>
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/filedesc.h>
62 #include <sys/kernel.h>
63 #include <sys/proc_internal.h>
64 #include <sys/kauth.h>
65 #include <sys/malloc.h>
66 #include <sys/unistd.h>
67 #include <sys/file_internal.h>
68 #include <sys/fcntl.h>
69 #include <sys/select.h>
70 #include <sys/queue.h>
71 #include <sys/event.h>
72 #include <sys/eventvar.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/stat.h>
77 #include <sys/sysctl.h>
78 #include <sys/uio.h>
79 #include <sys/sysproto.h>
80 #include <sys/user.h>
81 #include <sys/vnode_internal.h>
82 #include <string.h>
83 #include <sys/proc_info.h>
84 #include <sys/codesign.h>
85
86 #include <kern/locks.h>
87 #include <kern/clock.h>
88 #include <kern/thread_call.h>
89 #include <kern/sched_prim.h>
90 #include <kern/wait_queue.h>
91 #include <kern/zalloc.h>
92 #include <kern/assert.h>
93
94 #include <libkern/libkern.h>
95 #include "net/net_str_id.h"
96
97 #include <mach/task.h>
98
99 #if VM_PRESSURE_EVENTS
100 #include <kern/vm_pressure.h>
101 #endif
102
103 #if CONFIG_MEMORYSTATUS
104 #include <sys/kern_memorystatus.h>
105 #endif
106
107 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
108
109 #define KQ_EVENT NULL
110
111 static inline void kqlock(struct kqueue *kq);
112 static inline void kqunlock(struct kqueue *kq);
113
114 static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
115 static int kqlock2knoteusewait(struct kqueue *kq, struct knote *kn);
116 static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
117 static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn);
118
119 static void kqueue_wakeup(struct kqueue *kq, int closed);
120 static int kqueue_read(struct fileproc *fp, struct uio *uio,
121 int flags, vfs_context_t ctx);
122 static int kqueue_write(struct fileproc *fp, struct uio *uio,
123 int flags, vfs_context_t ctx);
124 static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data,
125 vfs_context_t ctx);
126 static int kqueue_select(struct fileproc *fp, int which, void *wql,
127 vfs_context_t ctx);
128 static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
129 static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
130 vfs_context_t ctx);
131 static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
132
133 static const struct fileops kqueueops = {
134 .fo_type = DTYPE_KQUEUE,
135 .fo_read = kqueue_read,
136 .fo_write = kqueue_write,
137 .fo_ioctl = kqueue_ioctl,
138 .fo_select = kqueue_select,
139 .fo_close = kqueue_close,
140 .fo_kqfilter = kqueue_kqfilter,
141 .fo_drain = kqueue_drain,
142 };
143
144 static int kevent_internal(struct proc *p, int iskev64, user_addr_t changelist,
145 int nchanges, user_addr_t eventlist, int nevents, int fd,
146 user_addr_t utimeout, unsigned int flags, int32_t *retval);
147 static int kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp,
148 struct proc *p, int iskev64);
149 static int kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp,
150 struct proc *p, int iskev64);
151 char * kevent_description(struct kevent64_s *kevp, char *s, size_t n);
152
153 static int kevent_callback(struct kqueue *kq, struct kevent64_s *kevp,
154 void *data);
155 static void kevent_continue(struct kqueue *kq, void *data, int error);
156 static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
157 static int kqueue_process(struct kqueue *kq, kevent_callback_t callback,
158 void *data, int *countp, struct proc *p);
159 static int kqueue_begin_processing(struct kqueue *kq);
160 static void kqueue_end_processing(struct kqueue *kq);
161 static int knote_process(struct knote *kn, kevent_callback_t callback,
162 void *data, struct kqtailq *inprocessp, struct proc *p);
163 static void knote_put(struct knote *kn);
164 static int knote_fdpattach(struct knote *kn, struct filedesc *fdp,
165 struct proc *p);
166 static void knote_drop(struct knote *kn, struct proc *p);
167 static void knote_activate(struct knote *kn, int);
168 static void knote_deactivate(struct knote *kn);
169 static void knote_enqueue(struct knote *kn);
170 static void knote_dequeue(struct knote *kn);
171 static struct knote *knote_alloc(void);
172 static void knote_free(struct knote *kn);
173
174 static int filt_fileattach(struct knote *kn);
175 static struct filterops file_filtops = {
176 .f_isfd = 1,
177 .f_attach = filt_fileattach,
178 };
179
180 static void filt_kqdetach(struct knote *kn);
181 static int filt_kqueue(struct knote *kn, long hint);
182 static struct filterops kqread_filtops = {
183 .f_isfd = 1,
184 .f_detach = filt_kqdetach,
185 .f_event = filt_kqueue,
186 };
187
188 /* placeholder for not-yet-implemented filters */
189 static int filt_badattach(struct knote *kn);
190 static struct filterops bad_filtops = {
191 .f_attach = filt_badattach,
192 };
193
194 static int filt_procattach(struct knote *kn);
195 static void filt_procdetach(struct knote *kn);
196 static int filt_proc(struct knote *kn, long hint);
197 static struct filterops proc_filtops = {
198 .f_attach = filt_procattach,
199 .f_detach = filt_procdetach,
200 .f_event = filt_proc,
201 };
202
203 #if VM_PRESSURE_EVENTS
204 static int filt_vmattach(struct knote *kn);
205 static void filt_vmdetach(struct knote *kn);
206 static int filt_vm(struct knote *kn, long hint);
207 static struct filterops vm_filtops = {
208 .f_attach = filt_vmattach,
209 .f_detach = filt_vmdetach,
210 .f_event = filt_vm,
211 };
212 #endif /* VM_PRESSURE_EVENTS */
213
214 #if CONFIG_MEMORYSTATUS
215 extern struct filterops memorystatus_filtops;
216 #endif /* CONFIG_MEMORYSTATUS */
217
218 extern struct filterops fs_filtops;
219
220 extern struct filterops sig_filtops;
221
222 /* Timer filter */
223 static int filt_timerattach(struct knote *kn);
224 static void filt_timerdetach(struct knote *kn);
225 static int filt_timer(struct knote *kn, long hint);
226 static void filt_timertouch(struct knote *kn, struct kevent64_s *kev,
227 long type);
228 static struct filterops timer_filtops = {
229 .f_attach = filt_timerattach,
230 .f_detach = filt_timerdetach,
231 .f_event = filt_timer,
232 .f_touch = filt_timertouch,
233 };
234
235 /* Helpers */
236 static void filt_timerexpire(void *knx, void *param1);
237 static int filt_timervalidate(struct knote *kn);
238 static void filt_timerupdate(struct knote *kn);
239 static void filt_timercancel(struct knote *kn);
240
241 #define TIMER_RUNNING 0x1
242 #define TIMER_CANCELWAIT 0x2
243
244 static lck_mtx_t _filt_timerlock;
245 static void filt_timerlock(void);
246 static void filt_timerunlock(void);
247
248 static zone_t knote_zone;
249
250 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
251
252 #if 0
253 extern struct filterops aio_filtops;
254 #endif
255
256 /* Mach portset filter */
257 extern struct filterops machport_filtops;
258
259 /* User filter */
260 static int filt_userattach(struct knote *kn);
261 static void filt_userdetach(struct knote *kn);
262 static int filt_user(struct knote *kn, long hint);
263 static void filt_usertouch(struct knote *kn, struct kevent64_s *kev,
264 long type);
265 static struct filterops user_filtops = {
266 .f_attach = filt_userattach,
267 .f_detach = filt_userdetach,
268 .f_event = filt_user,
269 .f_touch = filt_usertouch,
270 };
271
272 /*
273 * Table for all system-defined filters.
274 */
275 static struct filterops *sysfilt_ops[] = {
276 &file_filtops, /* EVFILT_READ */
277 &file_filtops, /* EVFILT_WRITE */
278 #if 0
279 &aio_filtops, /* EVFILT_AIO */
280 #else
281 &bad_filtops, /* EVFILT_AIO */
282 #endif
283 &file_filtops, /* EVFILT_VNODE */
284 &proc_filtops, /* EVFILT_PROC */
285 &sig_filtops, /* EVFILT_SIGNAL */
286 &timer_filtops, /* EVFILT_TIMER */
287 &machport_filtops, /* EVFILT_MACHPORT */
288 &fs_filtops, /* EVFILT_FS */
289 &user_filtops, /* EVFILT_USER */
290 &bad_filtops, /* unused */
291 #if VM_PRESSURE_EVENTS
292 &vm_filtops, /* EVFILT_VM */
293 #else
294 &bad_filtops, /* EVFILT_VM */
295 #endif
296 &file_filtops, /* EVFILT_SOCK */
297 #if CONFIG_MEMORYSTATUS
298 &memorystatus_filtops, /* EVFILT_MEMORYSTATUS */
299 #else
300 &bad_filtops, /* EVFILT_MEMORYSTATUS */
301 #endif
302 };
303
304 /*
305 * kqueue/note lock attributes and implementations
306 *
307 * kqueues have locks, while knotes have use counts
308 * Most of the knote state is guarded by the object lock.
309 * the knote "inuse" count and status use the kqueue lock.
310 */
311 lck_grp_attr_t * kq_lck_grp_attr;
312 lck_grp_t * kq_lck_grp;
313 lck_attr_t * kq_lck_attr;
314
315 static inline void
316 kqlock(struct kqueue *kq)
317 {
318 lck_spin_lock(&kq->kq_lock);
319 }
320
321 static inline void
322 kqunlock(struct kqueue *kq)
323 {
324 lck_spin_unlock(&kq->kq_lock);
325 }
326
327 /*
328 * Convert a kq lock to a knote use referece.
329 *
330 * If the knote is being dropped, we can't get
331 * a use reference, so just return with it
332 * still locked.
333 * - kq locked at entry
334 * - unlock on exit if we get the use reference
335 */
336 static int
337 kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
338 {
339 if (kn->kn_status & KN_DROPPING)
340 return (0);
341 kn->kn_inuse++;
342 kqunlock(kq);
343 return (1);
344 }
345
346 /*
347 * Convert a kq lock to a knote use referece,
348 * but wait for attach and drop events to complete.
349 *
350 * If the knote is being dropped, we can't get
351 * a use reference, so just return with it
352 * still locked.
353 * - kq locked at entry
354 * - kq always unlocked on exit
355 */
356 static int
357 kqlock2knoteusewait(struct kqueue *kq, struct knote *kn)
358 {
359 if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
360 kn->kn_status |= KN_USEWAIT;
361 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
362 &kn->kn_status, THREAD_UNINT, 0);
363 kqunlock(kq);
364 thread_block(THREAD_CONTINUE_NULL);
365 return (0);
366 }
367 kn->kn_inuse++;
368 kqunlock(kq);
369 return (1);
370 }
371
372 /*
373 * Convert from a knote use reference back to kq lock.
374 *
375 * Drop a use reference and wake any waiters if
376 * this is the last one.
377 *
378 * The exit return indicates if the knote is
379 * still alive - but the kqueue lock is taken
380 * unconditionally.
381 */
382 static int
383 knoteuse2kqlock(struct kqueue *kq, struct knote *kn)
384 {
385 kqlock(kq);
386 if (--kn->kn_inuse == 0) {
387 if ((kn->kn_status & KN_ATTACHING) != 0) {
388 kn->kn_status &= ~KN_ATTACHING;
389 }
390 if ((kn->kn_status & KN_USEWAIT) != 0) {
391 kn->kn_status &= ~KN_USEWAIT;
392 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs,
393 &kn->kn_status, THREAD_AWAKENED);
394 }
395 }
396 return ((kn->kn_status & KN_DROPPING) == 0);
397 }
398
399 /*
400 * Convert a kq lock to a knote drop reference.
401 *
402 * If the knote is in use, wait for the use count
403 * to subside. We first mark our intention to drop
404 * it - keeping other users from "piling on."
405 * If we are too late, we have to wait for the
406 * other drop to complete.
407 *
408 * - kq locked at entry
409 * - always unlocked on exit.
410 * - caller can't hold any locks that would prevent
411 * the other dropper from completing.
412 */
413 static int
414 kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
415 {
416 int oktodrop;
417
418 oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0);
419 kn->kn_status &= ~KN_STAYQUEUED;
420 kn->kn_status |= KN_DROPPING;
421 if (oktodrop) {
422 if (kn->kn_inuse == 0) {
423 kqunlock(kq);
424 return (oktodrop);
425 }
426 }
427 kn->kn_status |= KN_USEWAIT;
428 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kn->kn_status,
429 THREAD_UNINT, 0);
430 kqunlock(kq);
431 thread_block(THREAD_CONTINUE_NULL);
432 return (oktodrop);
433 }
434
435 /*
436 * Release a knote use count reference.
437 */
438 static void
439 knote_put(struct knote *kn)
440 {
441 struct kqueue *kq = kn->kn_kq;
442
443 kqlock(kq);
444 if (--kn->kn_inuse == 0) {
445 if ((kn->kn_status & KN_USEWAIT) != 0) {
446 kn->kn_status &= ~KN_USEWAIT;
447 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs,
448 &kn->kn_status, THREAD_AWAKENED);
449 }
450 }
451 kqunlock(kq);
452 }
453
454 static int
455 filt_fileattach(struct knote *kn)
456 {
457 return (fo_kqfilter(kn->kn_fp, kn, vfs_context_current()));
458 }
459
460 #define f_flag f_fglob->fg_flag
461 #define f_msgcount f_fglob->fg_msgcount
462 #define f_cred f_fglob->fg_cred
463 #define f_ops f_fglob->fg_ops
464 #define f_offset f_fglob->fg_offset
465 #define f_data f_fglob->fg_data
466
467 static void
468 filt_kqdetach(struct knote *kn)
469 {
470 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
471
472 kqlock(kq);
473 KNOTE_DETACH(&kq->kq_sel.si_note, kn);
474 kqunlock(kq);
475 }
476
477 /*ARGSUSED*/
478 static int
479 filt_kqueue(struct knote *kn, __unused long hint)
480 {
481 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
482
483 kn->kn_data = kq->kq_count;
484 return (kn->kn_data > 0);
485 }
486
487 static int
488 filt_procattach(struct knote *kn)
489 {
490 struct proc *p;
491
492 assert(PID_MAX < NOTE_PDATAMASK);
493
494 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0)
495 return (ENOTSUP);
496
497 p = proc_find(kn->kn_id);
498 if (p == NULL) {
499 return (ESRCH);
500 }
501
502 const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
503
504 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
505 do {
506 pid_t selfpid = proc_selfpid();
507
508 if (p->p_ppid == selfpid)
509 break; /* parent => ok */
510
511 if ((p->p_lflag & P_LTRACED) != 0 &&
512 (p->p_oppid == selfpid))
513 break; /* parent-in-waiting => ok */
514
515 proc_rele(p);
516 return (EACCES);
517 } while (0);
518
519 proc_klist_lock();
520
521 kn->kn_flags |= EV_CLEAR; /* automatically set */
522 kn->kn_ptr.p_proc = p; /* store the proc handle */
523
524 KNOTE_ATTACH(&p->p_klist, kn);
525
526 proc_klist_unlock();
527
528 proc_rele(p);
529
530 return (0);
531 }
532
533 /*
534 * The knote may be attached to a different process, which may exit,
535 * leaving nothing for the knote to be attached to. In that case,
536 * the pointer to the process will have already been nulled out.
537 */
538 static void
539 filt_procdetach(struct knote *kn)
540 {
541 struct proc *p;
542
543 proc_klist_lock();
544
545 p = kn->kn_ptr.p_proc;
546 if (p != PROC_NULL) {
547 kn->kn_ptr.p_proc = PROC_NULL;
548 KNOTE_DETACH(&p->p_klist, kn);
549 }
550
551 proc_klist_unlock();
552 }
553
554 static int
555 filt_proc(struct knote *kn, long hint)
556 {
557 /*
558 * Note: a lot of bits in hint may be obtained from the knote
559 * To free some of those bits, see <rdar://problem/12592988> Freeing up
560 * bits in hint for filt_proc
561 */
562 /* hint is 0 when called from above */
563 if (hint != 0) {
564 u_int event;
565
566 /* ALWAYS CALLED WITH proc_klist_lock when (hint != 0) */
567
568 /*
569 * mask off extra data
570 */
571 event = (u_int)hint & NOTE_PCTRLMASK;
572
573 /*
574 * termination lifecycle events can happen while a debugger
575 * has reparented a process, in which case notifications
576 * should be quashed except to the tracing parent. When
577 * the debugger reaps the child (either via wait4(2) or
578 * process exit), the child will be reparented to the original
579 * parent and these knotes re-fired.
580 */
581 if (event & NOTE_EXIT) {
582 if ((kn->kn_ptr.p_proc->p_oppid != 0)
583 && (kn->kn_kq->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
584 /*
585 * This knote is not for the current ptrace(2) parent, ignore.
586 */
587 return 0;
588 }
589 }
590
591 /*
592 * if the user is interested in this event, record it.
593 */
594 if (kn->kn_sfflags & event)
595 kn->kn_fflags |= event;
596
597 #pragma clang diagnostic push
598 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
599 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
600 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
601 }
602 #pragma clang diagnostic pop
603
604
605 /*
606 * The kernel has a wrapper in place that returns the same data
607 * as is collected here, in kn_data. Any changes to how
608 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
609 * should also be reflected in the proc_pidnoteexit() wrapper.
610 */
611 if (event == NOTE_EXIT) {
612 kn->kn_data = 0;
613 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
614 kn->kn_fflags |= NOTE_EXITSTATUS;
615 kn->kn_data |= (hint & NOTE_PDATAMASK);
616 }
617 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
618 kn->kn_fflags |= NOTE_EXIT_DETAIL;
619 if ((kn->kn_ptr.p_proc->p_lflag &
620 P_LTERM_DECRYPTFAIL) != 0) {
621 kn->kn_data |= NOTE_EXIT_DECRYPTFAIL;
622 }
623 if ((kn->kn_ptr.p_proc->p_lflag &
624 P_LTERM_JETSAM) != 0) {
625 kn->kn_data |= NOTE_EXIT_MEMORY;
626 switch (kn->kn_ptr.p_proc->p_lflag &
627 P_JETSAM_MASK) {
628 case P_JETSAM_VMPAGESHORTAGE:
629 kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
630 break;
631 case P_JETSAM_VMTHRASHING:
632 kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
633 break;
634 case P_JETSAM_FCTHRASHING:
635 kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
636 break;
637 case P_JETSAM_VNODE:
638 kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
639 break;
640 case P_JETSAM_HIWAT:
641 kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
642 break;
643 case P_JETSAM_PID:
644 kn->kn_data |= NOTE_EXIT_MEMORY_PID;
645 break;
646 case P_JETSAM_IDLEEXIT:
647 kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
648 break;
649 }
650 }
651 if ((kn->kn_ptr.p_proc->p_csflags &
652 CS_KILLED) != 0) {
653 kn->kn_data |= NOTE_EXIT_CSERROR;
654 }
655 }
656 }
657 }
658
659 /* atomic check, no locking need when called from above */
660 return (kn->kn_fflags != 0);
661 }
662
663 #if VM_PRESSURE_EVENTS
664 /*
665 * Virtual memory kevents
666 *
667 * author: Matt Jacobson [matthew_jacobson@apple.com]
668 */
669
670 static int
671 filt_vmattach(struct knote *kn)
672 {
673 /*
674 * The note will be cleared once the information has been flushed to
675 * the client. If there is still pressure, we will be re-alerted.
676 */
677 kn->kn_flags |= EV_CLEAR;
678 return (vm_knote_register(kn));
679 }
680
681 static void
682 filt_vmdetach(struct knote *kn)
683 {
684 vm_knote_unregister(kn);
685 }
686
687 static int
688 filt_vm(struct knote *kn, long hint)
689 {
690 /* hint == 0 means this is just an alive? check (always true) */
691 if (hint != 0) {
692 const pid_t pid = (pid_t)hint;
693 if ((kn->kn_sfflags & NOTE_VM_PRESSURE) &&
694 (kn->kn_kq->kq_p->p_pid == pid)) {
695 kn->kn_fflags |= NOTE_VM_PRESSURE;
696 }
697 }
698
699 return (kn->kn_fflags != 0);
700 }
701 #endif /* VM_PRESSURE_EVENTS */
702
703 /*
704 * filt_timervalidate - process data from user
705 *
706 * Converts to either interval or deadline format.
707 *
708 * The saved-data field in the knote contains the
709 * time value. The saved filter-flags indicates
710 * the unit of measurement.
711 *
712 * After validation, either the saved-data field
713 * contains the interval in absolute time, or ext[0]
714 * contains the expected deadline. If that deadline
715 * is in the past, ext[0] is 0.
716 *
717 * Returns EINVAL for unrecognized units of time.
718 *
719 * Timer filter lock is held.
720 *
721 */
722 static int
723 filt_timervalidate(struct knote *kn)
724 {
725 uint64_t multiplier;
726 uint64_t raw = 0;
727
728 switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS)) {
729 case NOTE_SECONDS:
730 multiplier = NSEC_PER_SEC;
731 break;
732 case NOTE_USECONDS:
733 multiplier = NSEC_PER_USEC;
734 break;
735 case NOTE_NSECONDS:
736 multiplier = 1;
737 break;
738 case 0: /* milliseconds (default) */
739 multiplier = NSEC_PER_SEC / 1000;
740 break;
741 default:
742 return (EINVAL);
743 }
744
745 /* transform the slop delta(leeway) in kn_ext[1] if passed to same time scale */
746 if(kn->kn_sfflags & NOTE_LEEWAY){
747 nanoseconds_to_absolutetime((uint64_t)kn->kn_ext[1] * multiplier, &raw);
748 kn->kn_ext[1] = raw;
749 }
750
751 nanoseconds_to_absolutetime((uint64_t)kn->kn_sdata * multiplier, &raw);
752
753 kn->kn_ext[0] = 0;
754 kn->kn_sdata = 0;
755
756 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
757 clock_sec_t seconds;
758 clock_nsec_t nanoseconds;
759 uint64_t now;
760
761 clock_get_calendar_nanotime(&seconds, &nanoseconds);
762 nanoseconds_to_absolutetime((uint64_t)seconds * NSEC_PER_SEC +
763 nanoseconds, &now);
764
765 if (raw < now) {
766 /* time has already passed */
767 kn->kn_ext[0] = 0;
768 } else {
769 raw -= now;
770 clock_absolutetime_interval_to_deadline(raw,
771 &kn->kn_ext[0]);
772 }
773 } else {
774 kn->kn_sdata = raw;
775 }
776
777 return (0);
778 }
779
780 /*
781 * filt_timerupdate - compute the next deadline
782 *
783 * Repeating timers store their interval in kn_sdata. Absolute
784 * timers have already calculated the deadline, stored in ext[0].
785 *
786 * On return, the next deadline (or zero if no deadline is needed)
787 * is stored in kn_ext[0].
788 *
789 * Timer filter lock is held.
790 */
791 static void
792 filt_timerupdate(struct knote *kn)
793 {
794 /* if there's no interval, deadline is just in kn_ext[0] */
795 if (kn->kn_sdata == 0)
796 return;
797
798 /* if timer hasn't fired before, fire in interval nsecs */
799 if (kn->kn_ext[0] == 0) {
800 clock_absolutetime_interval_to_deadline(kn->kn_sdata,
801 &kn->kn_ext[0]);
802 } else {
803 /*
804 * If timer has fired before, schedule the next pop
805 * relative to the last intended deadline.
806 *
807 * We could check for whether the deadline has expired,
808 * but the thread call layer can handle that.
809 */
810 kn->kn_ext[0] += kn->kn_sdata;
811 }
812 }
813
814 /*
815 * filt_timerexpire - the timer callout routine
816 *
817 * Just propagate the timer event into the knote
818 * filter routine (by going through the knote
819 * synchronization point). Pass a hint to
820 * indicate this is a real event, not just a
821 * query from above.
822 */
823 static void
824 filt_timerexpire(void *knx, __unused void *spare)
825 {
826 struct klist timer_list;
827 struct knote *kn = knx;
828
829 filt_timerlock();
830
831 kn->kn_hookid &= ~TIMER_RUNNING;
832
833 /* no "object" for timers, so fake a list */
834 SLIST_INIT(&timer_list);
835 SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext);
836 KNOTE(&timer_list, 1);
837
838 /* if someone is waiting for timer to pop */
839 if (kn->kn_hookid & TIMER_CANCELWAIT) {
840 struct kqueue *kq = kn->kn_kq;
841 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_hook,
842 THREAD_AWAKENED);
843 }
844
845 filt_timerunlock();
846 }
847
848 /*
849 * Cancel a running timer (or wait for the pop).
850 * Timer filter lock is held.
851 */
852 static void
853 filt_timercancel(struct knote *kn)
854 {
855 struct kqueue *kq = kn->kn_kq;
856 thread_call_t callout = kn->kn_hook;
857 boolean_t cancelled;
858
859 if (kn->kn_hookid & TIMER_RUNNING) {
860 /* cancel the callout if we can */
861 cancelled = thread_call_cancel(callout);
862 if (cancelled) {
863 kn->kn_hookid &= ~TIMER_RUNNING;
864 } else {
865 /* we have to wait for the expire routine. */
866 kn->kn_hookid |= TIMER_CANCELWAIT;
867 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
868 &kn->kn_hook, THREAD_UNINT, 0);
869 filt_timerunlock();
870 thread_block(THREAD_CONTINUE_NULL);
871 filt_timerlock();
872 assert((kn->kn_hookid & TIMER_RUNNING) == 0);
873 }
874 }
875 }
876
877 /*
878 * Allocate a thread call for the knote's lifetime, and kick off the timer.
879 */
880 static int
881 filt_timerattach(struct knote *kn)
882 {
883 thread_call_t callout;
884 int error;
885
886 callout = thread_call_allocate(filt_timerexpire, kn);
887 if (NULL == callout)
888 return (ENOMEM);
889
890 filt_timerlock();
891 error = filt_timervalidate(kn);
892 if (error != 0) {
893 filt_timerunlock();
894 return (error);
895 }
896
897 kn->kn_hook = (void*)callout;
898 kn->kn_hookid = 0;
899
900 /* absolute=EV_ONESHOT */
901 if (kn->kn_sfflags & NOTE_ABSOLUTE)
902 kn->kn_flags |= EV_ONESHOT;
903
904 filt_timerupdate(kn);
905 if (kn->kn_ext[0]) {
906 kn->kn_flags |= EV_CLEAR;
907 unsigned int timer_flags = 0;
908 if (kn->kn_sfflags & NOTE_CRITICAL)
909 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
910 else if (kn->kn_sfflags & NOTE_BACKGROUND)
911 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
912 else
913 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
914
915 if (kn->kn_sfflags & NOTE_LEEWAY)
916 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
917
918 thread_call_enter_delayed_with_leeway(callout, NULL,
919 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
920
921 kn->kn_hookid |= TIMER_RUNNING;
922 } else {
923 /* fake immediate */
924 kn->kn_data = 1;
925 }
926
927 filt_timerunlock();
928 return (0);
929 }
930
931 /*
932 * Shut down the timer if it's running, and free the callout.
933 */
934 static void
935 filt_timerdetach(struct knote *kn)
936 {
937 thread_call_t callout;
938
939 filt_timerlock();
940
941 callout = (thread_call_t)kn->kn_hook;
942 filt_timercancel(kn);
943
944 filt_timerunlock();
945
946 thread_call_free(callout);
947 }
948
949
950
951 static int
952 filt_timer(struct knote *kn, long hint)
953 {
954 int result;
955
956 if (hint) {
957 /* real timer pop -- timer lock held by filt_timerexpire */
958 kn->kn_data++;
959
960 if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) &&
961 ((kn->kn_flags & EV_ONESHOT) == 0)) {
962
963 /* evaluate next time to fire */
964 filt_timerupdate(kn);
965
966 if (kn->kn_ext[0]) {
967 unsigned int timer_flags = 0;
968
969 /* keep the callout and re-arm */
970 if (kn->kn_sfflags & NOTE_CRITICAL)
971 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
972 else if (kn->kn_sfflags & NOTE_BACKGROUND)
973 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
974 else
975 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
976
977 if (kn->kn_sfflags & NOTE_LEEWAY)
978 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
979
980 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
981 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
982
983 kn->kn_hookid |= TIMER_RUNNING;
984 }
985 }
986
987 return (1);
988 }
989
990 /* user-query */
991 filt_timerlock();
992
993 result = (kn->kn_data != 0);
994
995 filt_timerunlock();
996
997 return (result);
998 }
999
1000
1001 /*
1002 * filt_timertouch - update knote with new user input
1003 *
1004 * Cancel and restart the timer based on new user data. When
1005 * the user picks up a knote, clear the count of how many timer
1006 * pops have gone off (in kn_data).
1007 */
1008 static void
1009 filt_timertouch(struct knote *kn, struct kevent64_s *kev, long type)
1010 {
1011 int error;
1012 filt_timerlock();
1013
1014 switch (type) {
1015 case EVENT_REGISTER:
1016 /* cancel current call */
1017 filt_timercancel(kn);
1018
1019 /* recalculate deadline */
1020 kn->kn_sdata = kev->data;
1021 kn->kn_sfflags = kev->fflags;
1022 kn->kn_ext[0] = kev->ext[0];
1023 kn->kn_ext[1] = kev->ext[1];
1024
1025 error = filt_timervalidate(kn);
1026 if (error) {
1027 /* no way to report error, so mark it in the knote */
1028 kn->kn_flags |= EV_ERROR;
1029 kn->kn_data = error;
1030 break;
1031 }
1032
1033 /* start timer if necessary */
1034 filt_timerupdate(kn);
1035
1036 if (kn->kn_ext[0]) {
1037 unsigned int timer_flags = 0;
1038 if (kn->kn_sfflags & NOTE_CRITICAL)
1039 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1040 else if (kn->kn_sfflags & NOTE_BACKGROUND)
1041 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1042 else
1043 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1044
1045 if (kn->kn_sfflags & NOTE_LEEWAY)
1046 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1047
1048 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
1049 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
1050
1051 kn->kn_hookid |= TIMER_RUNNING;
1052 } else {
1053 /* pretend the timer has fired */
1054 kn->kn_data = 1;
1055 }
1056
1057 break;
1058
1059 case EVENT_PROCESS:
1060 /* reset the timer pop count in kn_data */
1061 *kev = kn->kn_kevent;
1062 kev->ext[0] = 0;
1063 kn->kn_data = 0;
1064 if (kn->kn_flags & EV_CLEAR)
1065 kn->kn_fflags = 0;
1066 break;
1067 default:
1068 panic("%s: - invalid type (%ld)", __func__, type);
1069 break;
1070 }
1071
1072 filt_timerunlock();
1073 }
1074
1075 static void
1076 filt_timerlock(void)
1077 {
1078 lck_mtx_lock(&_filt_timerlock);
1079 }
1080
1081 static void
1082 filt_timerunlock(void)
1083 {
1084 lck_mtx_unlock(&_filt_timerlock);
1085 }
1086
1087 static int
1088 filt_userattach(struct knote *kn)
1089 {
1090 /* EVFILT_USER knotes are not attached to anything in the kernel */
1091 kn->kn_hook = NULL;
1092 if (kn->kn_fflags & NOTE_TRIGGER) {
1093 kn->kn_hookid = 1;
1094 } else {
1095 kn->kn_hookid = 0;
1096 }
1097 return (0);
1098 }
1099
1100 static void
1101 filt_userdetach(__unused struct knote *kn)
1102 {
1103 /* EVFILT_USER knotes are not attached to anything in the kernel */
1104 }
1105
1106 static int
1107 filt_user(struct knote *kn, __unused long hint)
1108 {
1109 return (kn->kn_hookid);
1110 }
1111
1112 static void
1113 filt_usertouch(struct knote *kn, struct kevent64_s *kev, long type)
1114 {
1115 uint32_t ffctrl;
1116 switch (type) {
1117 case EVENT_REGISTER:
1118 if (kev->fflags & NOTE_TRIGGER) {
1119 kn->kn_hookid = 1;
1120 }
1121
1122 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1123 kev->fflags &= NOTE_FFLAGSMASK;
1124 switch (ffctrl) {
1125 case NOTE_FFNOP:
1126 break;
1127 case NOTE_FFAND:
1128 OSBitAndAtomic(kev->fflags, &kn->kn_sfflags);
1129 break;
1130 case NOTE_FFOR:
1131 OSBitOrAtomic(kev->fflags, &kn->kn_sfflags);
1132 break;
1133 case NOTE_FFCOPY:
1134 kn->kn_sfflags = kev->fflags;
1135 break;
1136 }
1137 kn->kn_sdata = kev->data;
1138 break;
1139 case EVENT_PROCESS:
1140 *kev = kn->kn_kevent;
1141 kev->fflags = (volatile UInt32)kn->kn_sfflags;
1142 kev->data = kn->kn_sdata;
1143 if (kn->kn_flags & EV_CLEAR) {
1144 kn->kn_hookid = 0;
1145 kn->kn_data = 0;
1146 kn->kn_fflags = 0;
1147 }
1148 break;
1149 default:
1150 panic("%s: - invalid type (%ld)", __func__, type);
1151 break;
1152 }
1153 }
1154
1155 /*
1156 * JMM - placeholder for not-yet-implemented filters
1157 */
1158 static int
1159 filt_badattach(__unused struct knote *kn)
1160 {
1161 return (ENOTSUP);
1162 }
1163
1164 struct kqueue *
1165 kqueue_alloc(struct proc *p)
1166 {
1167 struct filedesc *fdp = p->p_fd;
1168 struct kqueue *kq;
1169
1170 MALLOC_ZONE(kq, struct kqueue *, sizeof (struct kqueue), M_KQUEUE,
1171 M_WAITOK);
1172 if (kq != NULL) {
1173 wait_queue_set_t wqs;
1174
1175 wqs = wait_queue_set_alloc(SYNC_POLICY_FIFO |
1176 SYNC_POLICY_PREPOST);
1177 if (wqs != NULL) {
1178 bzero(kq, sizeof (struct kqueue));
1179 lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
1180 TAILQ_INIT(&kq->kq_head);
1181 kq->kq_wqs = wqs;
1182 kq->kq_p = p;
1183 } else {
1184 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
1185 kq = NULL;
1186 }
1187 }
1188
1189 if (fdp->fd_knlistsize < 0) {
1190 proc_fdlock(p);
1191 if (fdp->fd_knlistsize < 0)
1192 fdp->fd_knlistsize = 0; /* this process has had a kq */
1193 proc_fdunlock(p);
1194 }
1195
1196 return (kq);
1197 }
1198
1199 /*
1200 * kqueue_dealloc - detach all knotes from a kqueue and free it
1201 *
1202 * We walk each list looking for knotes referencing this
1203 * this kqueue. If we find one, we try to drop it. But
1204 * if we fail to get a drop reference, that will wait
1205 * until it is dropped. So, we can just restart again
1206 * safe in the assumption that the list will eventually
1207 * not contain any more references to this kqueue (either
1208 * we dropped them all, or someone else did).
1209 *
1210 * Assumes no new events are being added to the kqueue.
1211 * Nothing locked on entry or exit.
1212 */
1213 void
1214 kqueue_dealloc(struct kqueue *kq)
1215 {
1216 struct proc *p = kq->kq_p;
1217 struct filedesc *fdp = p->p_fd;
1218 struct knote *kn;
1219 int i;
1220
1221 proc_fdlock(p);
1222 for (i = 0; i < fdp->fd_knlistsize; i++) {
1223 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
1224 while (kn != NULL) {
1225 if (kq == kn->kn_kq) {
1226 kqlock(kq);
1227 proc_fdunlock(p);
1228 /* drop it ourselves or wait */
1229 if (kqlock2knotedrop(kq, kn)) {
1230 kn->kn_fop->f_detach(kn);
1231 knote_drop(kn, p);
1232 }
1233 proc_fdlock(p);
1234 /* start over at beginning of list */
1235 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
1236 continue;
1237 }
1238 kn = SLIST_NEXT(kn, kn_link);
1239 }
1240 }
1241 if (fdp->fd_knhashmask != 0) {
1242 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
1243 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
1244 while (kn != NULL) {
1245 if (kq == kn->kn_kq) {
1246 kqlock(kq);
1247 proc_fdunlock(p);
1248 /* drop it ourselves or wait */
1249 if (kqlock2knotedrop(kq, kn)) {
1250 kn->kn_fop->f_detach(kn);
1251 knote_drop(kn, p);
1252 }
1253 proc_fdlock(p);
1254 /* start over at beginning of list */
1255 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
1256 continue;
1257 }
1258 kn = SLIST_NEXT(kn, kn_link);
1259 }
1260 }
1261 }
1262 proc_fdunlock(p);
1263
1264 /*
1265 * before freeing the wait queue set for this kqueue,
1266 * make sure it is unlinked from all its containing (select) sets.
1267 */
1268 wait_queue_unlink_all((wait_queue_t)kq->kq_wqs);
1269 wait_queue_set_free(kq->kq_wqs);
1270 lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
1271 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
1272 }
1273
1274 int
1275 kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
1276 {
1277 struct kqueue *kq;
1278 struct fileproc *fp;
1279 int fd, error;
1280
1281 error = falloc_withalloc(p,
1282 &fp, &fd, vfs_context_current(), fp_zalloc, cra);
1283 if (error) {
1284 return (error);
1285 }
1286
1287 kq = kqueue_alloc(p);
1288 if (kq == NULL) {
1289 fp_free(p, fd, fp);
1290 return (ENOMEM);
1291 }
1292
1293 fp->f_flag = FREAD | FWRITE;
1294 fp->f_ops = &kqueueops;
1295 fp->f_data = kq;
1296
1297 proc_fdlock(p);
1298 *fdflags(p, fd) |= UF_EXCLOSE;
1299 procfdtbl_releasefd(p, fd, NULL);
1300 fp_drop(p, fd, fp, 1);
1301 proc_fdunlock(p);
1302
1303 *retval = fd;
1304 return (error);
1305 }
1306
1307 int
1308 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
1309 {
1310 return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
1311 }
1312
1313 static int
1314 kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp, struct proc *p,
1315 int iskev64)
1316 {
1317 int advance;
1318 int error;
1319
1320 if (iskev64) {
1321 advance = sizeof (struct kevent64_s);
1322 error = copyin(*addrp, (caddr_t)kevp, advance);
1323 } else if (IS_64BIT_PROCESS(p)) {
1324 struct user64_kevent kev64;
1325 bzero(kevp, sizeof (struct kevent64_s));
1326
1327 advance = sizeof (kev64);
1328 error = copyin(*addrp, (caddr_t)&kev64, advance);
1329 if (error)
1330 return (error);
1331 kevp->ident = kev64.ident;
1332 kevp->filter = kev64.filter;
1333 kevp->flags = kev64.flags;
1334 kevp->fflags = kev64.fflags;
1335 kevp->data = kev64.data;
1336 kevp->udata = kev64.udata;
1337 } else {
1338 struct user32_kevent kev32;
1339 bzero(kevp, sizeof (struct kevent64_s));
1340
1341 advance = sizeof (kev32);
1342 error = copyin(*addrp, (caddr_t)&kev32, advance);
1343 if (error)
1344 return (error);
1345 kevp->ident = (uintptr_t)kev32.ident;
1346 kevp->filter = kev32.filter;
1347 kevp->flags = kev32.flags;
1348 kevp->fflags = kev32.fflags;
1349 kevp->data = (intptr_t)kev32.data;
1350 kevp->udata = CAST_USER_ADDR_T(kev32.udata);
1351 }
1352 if (!error)
1353 *addrp += advance;
1354 return (error);
1355 }
1356
1357 static int
1358 kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp, struct proc *p,
1359 int iskev64)
1360 {
1361 int advance;
1362 int error;
1363
1364 if (iskev64) {
1365 advance = sizeof (struct kevent64_s);
1366 error = copyout((caddr_t)kevp, *addrp, advance);
1367 } else if (IS_64BIT_PROCESS(p)) {
1368 struct user64_kevent kev64;
1369
1370 /*
1371 * deal with the special case of a user-supplied
1372 * value of (uintptr_t)-1.
1373 */
1374 kev64.ident = (kevp->ident == (uintptr_t)-1) ?
1375 (uint64_t)-1LL : (uint64_t)kevp->ident;
1376
1377 kev64.filter = kevp->filter;
1378 kev64.flags = kevp->flags;
1379 kev64.fflags = kevp->fflags;
1380 kev64.data = (int64_t) kevp->data;
1381 kev64.udata = kevp->udata;
1382 advance = sizeof (kev64);
1383 error = copyout((caddr_t)&kev64, *addrp, advance);
1384 } else {
1385 struct user32_kevent kev32;
1386
1387 kev32.ident = (uint32_t)kevp->ident;
1388 kev32.filter = kevp->filter;
1389 kev32.flags = kevp->flags;
1390 kev32.fflags = kevp->fflags;
1391 kev32.data = (int32_t)kevp->data;
1392 kev32.udata = kevp->udata;
1393 advance = sizeof (kev32);
1394 error = copyout((caddr_t)&kev32, *addrp, advance);
1395 }
1396 if (!error)
1397 *addrp += advance;
1398 return (error);
1399 }
1400
1401 /*
1402 * kevent_continue - continue a kevent syscall after blocking
1403 *
1404 * assume we inherit a use count on the kq fileglob.
1405 */
1406
1407 static void
1408 kevent_continue(__unused struct kqueue *kq, void *data, int error)
1409 {
1410 struct _kevent *cont_args;
1411 struct fileproc *fp;
1412 int32_t *retval;
1413 int noutputs;
1414 int fd;
1415 struct proc *p = current_proc();
1416
1417 cont_args = (struct _kevent *)data;
1418 noutputs = cont_args->eventout;
1419 retval = cont_args->retval;
1420 fd = cont_args->fd;
1421 fp = cont_args->fp;
1422
1423 fp_drop(p, fd, fp, 0);
1424
1425 /* don't restart after signals... */
1426 if (error == ERESTART)
1427 error = EINTR;
1428 else if (error == EWOULDBLOCK)
1429 error = 0;
1430 if (error == 0)
1431 *retval = noutputs;
1432 unix_syscall_return(error);
1433 }
1434
1435 /*
1436 * kevent - [syscall] register and wait for kernel events
1437 *
1438 */
1439 int
1440 kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
1441 {
1442 return (kevent_internal(p,
1443 0,
1444 uap->changelist,
1445 uap->nchanges,
1446 uap->eventlist,
1447 uap->nevents,
1448 uap->fd,
1449 uap->timeout,
1450 0, /* no flags from old kevent() call */
1451 retval));
1452 }
1453
1454 int
1455 kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
1456 {
1457 return (kevent_internal(p,
1458 1,
1459 uap->changelist,
1460 uap->nchanges,
1461 uap->eventlist,
1462 uap->nevents,
1463 uap->fd,
1464 uap->timeout,
1465 uap->flags,
1466 retval));
1467 }
1468
1469 static int
1470 kevent_internal(struct proc *p, int iskev64, user_addr_t changelist,
1471 int nchanges, user_addr_t ueventlist, int nevents, int fd,
1472 user_addr_t utimeout, __unused unsigned int flags,
1473 int32_t *retval)
1474 {
1475 struct _kevent *cont_args;
1476 uthread_t ut;
1477 struct kqueue *kq;
1478 struct fileproc *fp;
1479 struct kevent64_s kev;
1480 int error, noutputs;
1481 struct timeval atv;
1482
1483 /* convert timeout to absolute - if we have one */
1484 if (utimeout != USER_ADDR_NULL) {
1485 struct timeval rtv;
1486 if (IS_64BIT_PROCESS(p)) {
1487 struct user64_timespec ts;
1488 error = copyin(utimeout, &ts, sizeof(ts));
1489 if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
1490 error = EINVAL;
1491 else
1492 TIMESPEC_TO_TIMEVAL(&rtv, &ts);
1493 } else {
1494 struct user32_timespec ts;
1495 error = copyin(utimeout, &ts, sizeof(ts));
1496 TIMESPEC_TO_TIMEVAL(&rtv, &ts);
1497 }
1498 if (error)
1499 return (error);
1500 if (itimerfix(&rtv))
1501 return (EINVAL);
1502 getmicrouptime(&atv);
1503 timevaladd(&atv, &rtv);
1504 } else {
1505 atv.tv_sec = 0;
1506 atv.tv_usec = 0;
1507 }
1508
1509 /* get a usecount for the kq itself */
1510 if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
1511 return (error);
1512
1513 /* each kq should only be used for events of one type */
1514 kqlock(kq);
1515 if (kq->kq_state & (KQ_KEV32 | KQ_KEV64)) {
1516 if (((iskev64 && (kq->kq_state & KQ_KEV32)) ||
1517 (!iskev64 && (kq->kq_state & KQ_KEV64)))) {
1518 error = EINVAL;
1519 kqunlock(kq);
1520 goto errorout;
1521 }
1522 } else {
1523 kq->kq_state |= (iskev64 ? KQ_KEV64 : KQ_KEV32);
1524 }
1525 kqunlock(kq);
1526
1527 /* register all the change requests the user provided... */
1528 noutputs = 0;
1529 while (nchanges > 0 && error == 0) {
1530 error = kevent_copyin(&changelist, &kev, p, iskev64);
1531 if (error)
1532 break;
1533
1534 kev.flags &= ~EV_SYSFLAGS;
1535 error = kevent_register(kq, &kev, p);
1536 if ((error || (kev.flags & EV_RECEIPT)) && nevents > 0) {
1537 kev.flags = EV_ERROR;
1538 kev.data = error;
1539 error = kevent_copyout(&kev, &ueventlist, p, iskev64);
1540 if (error == 0) {
1541 nevents--;
1542 noutputs++;
1543 }
1544 }
1545 nchanges--;
1546 }
1547
1548 /* store the continuation/completion data in the uthread */
1549 ut = (uthread_t)get_bsdthread_info(current_thread());
1550 cont_args = &ut->uu_kevent.ss_kevent;
1551 cont_args->fp = fp;
1552 cont_args->fd = fd;
1553 cont_args->retval = retval;
1554 cont_args->eventlist = ueventlist;
1555 cont_args->eventcount = nevents;
1556 cont_args->eventout = noutputs;
1557 cont_args->eventsize = iskev64;
1558
1559 if (nevents > 0 && noutputs == 0 && error == 0)
1560 error = kqueue_scan(kq, kevent_callback,
1561 kevent_continue, cont_args,
1562 &atv, p);
1563 kevent_continue(kq, cont_args, error);
1564
1565 errorout:
1566 fp_drop(p, fd, fp, 0);
1567 return (error);
1568 }
1569
1570
1571 /*
1572 * kevent_callback - callback for each individual event
1573 *
1574 * called with nothing locked
1575 * caller holds a reference on the kqueue
1576 */
1577 static int
1578 kevent_callback(__unused struct kqueue *kq, struct kevent64_s *kevp,
1579 void *data)
1580 {
1581 struct _kevent *cont_args;
1582 int error;
1583 int iskev64;
1584
1585 cont_args = (struct _kevent *)data;
1586 assert(cont_args->eventout < cont_args->eventcount);
1587
1588 iskev64 = cont_args->eventsize;
1589
1590 /*
1591 * Copy out the appropriate amount of event data for this user.
1592 */
1593 error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
1594 iskev64);
1595
1596 /*
1597 * If there isn't space for additional events, return
1598 * a harmless error to stop the processing here
1599 */
1600 if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
1601 error = EWOULDBLOCK;
1602 return (error);
1603 }
1604
1605 /*
1606 * kevent_description - format a description of a kevent for diagnostic output
1607 *
1608 * called with a 128-byte string buffer
1609 */
1610
1611 char *
1612 kevent_description(struct kevent64_s *kevp, char *s, size_t n)
1613 {
1614 snprintf(s, n,
1615 "kevent="
1616 "{.ident=%#llx, .filter=%d, .flags=%#x, .fflags=%#x, .data=%#llx, .udata=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
1617 kevp->ident,
1618 kevp->filter,
1619 kevp->flags,
1620 kevp->fflags,
1621 kevp->data,
1622 kevp->udata,
1623 kevp->ext[0],
1624 kevp->ext[1]);
1625
1626 return (s);
1627 }
1628
1629 /*
1630 * kevent_register - add a new event to a kqueue
1631 *
1632 * Creates a mapping between the event source and
1633 * the kqueue via a knote data structure.
1634 *
1635 * Because many/most the event sources are file
1636 * descriptor related, the knote is linked off
1637 * the filedescriptor table for quick access.
1638 *
1639 * called with nothing locked
1640 * caller holds a reference on the kqueue
1641 */
1642
1643 int
1644 kevent_register(struct kqueue *kq, struct kevent64_s *kev,
1645 __unused struct proc *ctxp)
1646 {
1647 struct proc *p = kq->kq_p;
1648 struct filedesc *fdp = p->p_fd;
1649 struct filterops *fops;
1650 struct fileproc *fp = NULL;
1651 struct knote *kn = NULL;
1652 int error = 0;
1653
1654 if (kev->filter < 0) {
1655 if (kev->filter + EVFILT_SYSCOUNT < 0)
1656 return (EINVAL);
1657 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
1658 } else {
1659 /*
1660 * XXX
1661 * filter attach routine is responsible for insuring that
1662 * the identifier can be attached to it.
1663 */
1664 printf("unknown filter: %d\n", kev->filter);
1665 return (EINVAL);
1666 }
1667
1668 restart:
1669 /* this iocount needs to be dropped if it is not registered */
1670 proc_fdlock(p);
1671 if (fops->f_isfd && (error = fp_lookup(p, kev->ident, &fp, 1)) != 0) {
1672 proc_fdunlock(p);
1673 return (error);
1674 }
1675
1676 if (fops->f_isfd) {
1677 /* fd-based knotes are linked off the fd table */
1678 if (kev->ident < (u_int)fdp->fd_knlistsize) {
1679 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
1680 if (kq == kn->kn_kq &&
1681 kev->filter == kn->kn_filter)
1682 break;
1683 }
1684 } else {
1685 /* hash non-fd knotes here too */
1686 if (fdp->fd_knhashmask != 0) {
1687 struct klist *list;
1688
1689 list = &fdp->fd_knhash[
1690 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
1691 SLIST_FOREACH(kn, list, kn_link)
1692 if (kev->ident == kn->kn_id &&
1693 kq == kn->kn_kq &&
1694 kev->filter == kn->kn_filter)
1695 break;
1696 }
1697 }
1698
1699 /*
1700 * kn now contains the matching knote, or NULL if no match
1701 */
1702 if (kn == NULL) {
1703 if ((kev->flags & (EV_ADD|EV_DELETE)) == EV_ADD) {
1704 kn = knote_alloc();
1705 if (kn == NULL) {
1706 proc_fdunlock(p);
1707 error = ENOMEM;
1708 goto done;
1709 }
1710 kn->kn_fp = fp;
1711 kn->kn_kq = kq;
1712 kn->kn_tq = &kq->kq_head;
1713 kn->kn_fop = fops;
1714 kn->kn_sfflags = kev->fflags;
1715 kn->kn_sdata = kev->data;
1716 kev->fflags = 0;
1717 kev->data = 0;
1718 kn->kn_kevent = *kev;
1719 kn->kn_inuse = 1; /* for f_attach() */
1720 kn->kn_status = KN_ATTACHING;
1721
1722 /* before anyone can find it */
1723 if (kev->flags & EV_DISABLE)
1724 kn->kn_status |= KN_DISABLED;
1725
1726 error = knote_fdpattach(kn, fdp, p);
1727 proc_fdunlock(p);
1728
1729 if (error) {
1730 knote_free(kn);
1731 goto done;
1732 }
1733
1734 /*
1735 * apply reference count to knote structure, and
1736 * do not release it at the end of this routine.
1737 */
1738 fp = NULL;
1739
1740 error = fops->f_attach(kn);
1741
1742 kqlock(kq);
1743
1744 if (error != 0) {
1745 /*
1746 * Failed to attach correctly, so drop.
1747 * All other possible users/droppers
1748 * have deferred to us.
1749 */
1750 kn->kn_status |= KN_DROPPING;
1751 kqunlock(kq);
1752 knote_drop(kn, p);
1753 goto done;
1754 } else if (kn->kn_status & KN_DROPPING) {
1755 /*
1756 * Attach succeeded, but someone else
1757 * deferred their drop - now we have
1758 * to do it for them (after detaching).
1759 */
1760 kqunlock(kq);
1761 kn->kn_fop->f_detach(kn);
1762 knote_drop(kn, p);
1763 goto done;
1764 }
1765 kn->kn_status &= ~KN_ATTACHING;
1766 kqunlock(kq);
1767 } else {
1768 proc_fdunlock(p);
1769 error = ENOENT;
1770 goto done;
1771 }
1772 } else {
1773 /* existing knote - get kqueue lock */
1774 kqlock(kq);
1775 proc_fdunlock(p);
1776
1777 if (kev->flags & EV_DELETE) {
1778 knote_dequeue(kn);
1779 kn->kn_status |= KN_DISABLED;
1780 if (kqlock2knotedrop(kq, kn)) {
1781 kn->kn_fop->f_detach(kn);
1782 knote_drop(kn, p);
1783 }
1784 goto done;
1785 }
1786
1787 /* update status flags for existing knote */
1788 if (kev->flags & EV_DISABLE) {
1789 knote_dequeue(kn);
1790 kn->kn_status |= KN_DISABLED;
1791 } else if (kev->flags & EV_ENABLE) {
1792 kn->kn_status &= ~KN_DISABLED;
1793 if (kn->kn_status & KN_ACTIVE)
1794 knote_enqueue(kn);
1795 }
1796
1797 /*
1798 * The user may change some filter values after the
1799 * initial EV_ADD, but doing so will not reset any
1800 * filter which have already been triggered.
1801 */
1802 kn->kn_kevent.udata = kev->udata;
1803 if (fops->f_isfd || fops->f_touch == NULL) {
1804 kn->kn_sfflags = kev->fflags;
1805 kn->kn_sdata = kev->data;
1806 }
1807
1808 /*
1809 * If somebody is in the middle of dropping this
1810 * knote - go find/insert a new one. But we have
1811 * wait for this one to go away first. Attaches
1812 * running in parallel may also drop/modify the
1813 * knote. Wait for those to complete as well and
1814 * then start over if we encounter one.
1815 */
1816 if (!kqlock2knoteusewait(kq, kn)) {
1817 /* kqueue, proc_fdlock both unlocked */
1818 goto restart;
1819 }
1820
1821 /*
1822 * Call touch routine to notify filter of changes
1823 * in filter values.
1824 */
1825 if (!fops->f_isfd && fops->f_touch != NULL)
1826 fops->f_touch(kn, kev, EVENT_REGISTER);
1827 }
1828 /* still have use ref on knote */
1829
1830 /*
1831 * If the knote is not marked to always stay enqueued,
1832 * invoke the filter routine to see if it should be
1833 * enqueued now.
1834 */
1835 if ((kn->kn_status & KN_STAYQUEUED) == 0 && kn->kn_fop->f_event(kn, 0)) {
1836 if (knoteuse2kqlock(kq, kn))
1837 knote_activate(kn, 1);
1838 kqunlock(kq);
1839 } else {
1840 knote_put(kn);
1841 }
1842
1843 done:
1844 if (fp != NULL)
1845 fp_drop(p, kev->ident, fp, 0);
1846 return (error);
1847 }
1848
1849
1850 /*
1851 * knote_process - process a triggered event
1852 *
1853 * Validate that it is really still a triggered event
1854 * by calling the filter routines (if necessary). Hold
1855 * a use reference on the knote to avoid it being detached.
1856 * If it is still considered triggered, invoke the callback
1857 * routine provided and move it to the provided inprocess
1858 * queue.
1859 *
1860 * caller holds a reference on the kqueue.
1861 * kqueue locked on entry and exit - but may be dropped
1862 */
1863 static int
1864 knote_process(struct knote *kn,
1865 kevent_callback_t callback,
1866 void *data,
1867 struct kqtailq *inprocessp,
1868 struct proc *p)
1869 {
1870 struct kqueue *kq = kn->kn_kq;
1871 struct kevent64_s kev;
1872 int touch;
1873 int result;
1874 int error;
1875
1876 /*
1877 * Determine the kevent state we want to return.
1878 *
1879 * Some event states need to be revalidated before returning
1880 * them, others we take the snapshot at the time the event
1881 * was enqueued.
1882 *
1883 * Events with non-NULL f_touch operations must be touched.
1884 * Triggered events must fill in kev for the callback.
1885 *
1886 * Convert our lock to a use-count and call the event's
1887 * filter routine(s) to update.
1888 */
1889 if ((kn->kn_status & KN_DISABLED) != 0) {
1890 result = 0;
1891 touch = 0;
1892 } else {
1893 int revalidate;
1894
1895 result = 1;
1896 revalidate = ((kn->kn_status & KN_STAYQUEUED) != 0 ||
1897 (kn->kn_flags & EV_ONESHOT) == 0);
1898 touch = (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL);
1899
1900 if (revalidate || touch) {
1901 if (revalidate)
1902 knote_deactivate(kn);
1903
1904 /* call the filter/touch routines with just a ref */
1905 if (kqlock2knoteuse(kq, kn)) {
1906 /* if we have to revalidate, call the filter */
1907 if (revalidate) {
1908 result = kn->kn_fop->f_event(kn, 0);
1909 }
1910
1911 /*
1912 * capture the kevent data - using touch if
1913 * specified
1914 */
1915 if (result && touch) {
1916 kn->kn_fop->f_touch(kn, &kev,
1917 EVENT_PROCESS);
1918 }
1919
1920 /*
1921 * convert back to a kqlock - bail if the knote
1922 * went away
1923 */
1924 if (!knoteuse2kqlock(kq, kn)) {
1925 return (EJUSTRETURN);
1926 } else if (result) {
1927 /*
1928 * if revalidated as alive, make sure
1929 * it's active
1930 */
1931 if (!(kn->kn_status & KN_ACTIVE)) {
1932 knote_activate(kn, 0);
1933 }
1934
1935 /*
1936 * capture all events that occurred
1937 * during filter
1938 */
1939 if (!touch) {
1940 kev = kn->kn_kevent;
1941 }
1942
1943 } else if ((kn->kn_status & KN_STAYQUEUED) == 0) {
1944 /*
1945 * was already dequeued, so just bail on
1946 * this one
1947 */
1948 return (EJUSTRETURN);
1949 }
1950 } else {
1951 return (EJUSTRETURN);
1952 }
1953 } else {
1954 kev = kn->kn_kevent;
1955 }
1956 }
1957
1958 /* move knote onto inprocess queue */
1959 assert(kn->kn_tq == &kq->kq_head);
1960 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1961 kn->kn_tq = inprocessp;
1962 TAILQ_INSERT_TAIL(inprocessp, kn, kn_tqe);
1963
1964 /*
1965 * Determine how to dispatch the knote for future event handling.
1966 * not-fired: just return (do not callout).
1967 * One-shot: deactivate it.
1968 * Clear: deactivate and clear the state.
1969 * Dispatch: don't clear state, just deactivate it and mark it disabled.
1970 * All others: just leave where they are.
1971 */
1972
1973 if (result == 0) {
1974 return (EJUSTRETURN);
1975 } else if ((kn->kn_flags & EV_ONESHOT) != 0) {
1976 knote_deactivate(kn);
1977 if (kqlock2knotedrop(kq, kn)) {
1978 kn->kn_fop->f_detach(kn);
1979 knote_drop(kn, p);
1980 }
1981 } else if ((kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) != 0) {
1982 if ((kn->kn_flags & EV_DISPATCH) != 0) {
1983 /* deactivate and disable all dispatch knotes */
1984 knote_deactivate(kn);
1985 kn->kn_status |= KN_DISABLED;
1986 } else if (!touch || kn->kn_fflags == 0) {
1987 /* only deactivate if nothing since the touch */
1988 knote_deactivate(kn);
1989 }
1990 if (!touch && (kn->kn_flags & EV_CLEAR) != 0) {
1991 /* manually clear non-touch knotes */
1992 kn->kn_data = 0;
1993 kn->kn_fflags = 0;
1994 }
1995 kqunlock(kq);
1996 } else {
1997 /*
1998 * leave on inprocess queue. We'll
1999 * move all the remaining ones back
2000 * the kq queue and wakeup any
2001 * waiters when we are done.
2002 */
2003 kqunlock(kq);
2004 }
2005
2006 /* callback to handle each event as we find it */
2007 error = (callback)(kq, &kev, data);
2008
2009 kqlock(kq);
2010 return (error);
2011 }
2012
2013 /*
2014 * Return 0 to indicate that processing should proceed,
2015 * -1 if there is nothing to process.
2016 *
2017 * Called with kqueue locked and returns the same way,
2018 * but may drop lock temporarily.
2019 */
2020 static int
2021 kqueue_begin_processing(struct kqueue *kq)
2022 {
2023 for (;;) {
2024 if (kq->kq_count == 0) {
2025 return (-1);
2026 }
2027
2028 /* if someone else is processing the queue, wait */
2029 if (kq->kq_nprocess != 0) {
2030 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
2031 &kq->kq_nprocess, THREAD_UNINT, 0);
2032 kq->kq_state |= KQ_PROCWAIT;
2033 kqunlock(kq);
2034 thread_block(THREAD_CONTINUE_NULL);
2035 kqlock(kq);
2036 } else {
2037 kq->kq_nprocess = 1;
2038 return (0);
2039 }
2040 }
2041 }
2042
2043 /*
2044 * Called with kqueue lock held.
2045 */
2046 static void
2047 kqueue_end_processing(struct kqueue *kq)
2048 {
2049 kq->kq_nprocess = 0;
2050 if (kq->kq_state & KQ_PROCWAIT) {
2051 kq->kq_state &= ~KQ_PROCWAIT;
2052 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs,
2053 &kq->kq_nprocess, THREAD_AWAKENED);
2054 }
2055 }
2056
2057 /*
2058 * kqueue_process - process the triggered events in a kqueue
2059 *
2060 * Walk the queued knotes and validate that they are
2061 * really still triggered events by calling the filter
2062 * routines (if necessary). Hold a use reference on
2063 * the knote to avoid it being detached. For each event
2064 * that is still considered triggered, invoke the
2065 * callback routine provided.
2066 *
2067 * caller holds a reference on the kqueue.
2068 * kqueue locked on entry and exit - but may be dropped
2069 * kqueue list locked (held for duration of call)
2070 */
2071
2072 static int
2073 kqueue_process(struct kqueue *kq,
2074 kevent_callback_t callback,
2075 void *data,
2076 int *countp,
2077 struct proc *p)
2078 {
2079 struct kqtailq inprocess;
2080 struct knote *kn;
2081 int nevents;
2082 int error;
2083
2084 TAILQ_INIT(&inprocess);
2085
2086 if (kqueue_begin_processing(kq) == -1) {
2087 *countp = 0;
2088 /* Nothing to process */
2089 return (0);
2090 }
2091
2092 /*
2093 * Clear any pre-posted status from previous runs, so we
2094 * only detect events that occur during this run.
2095 */
2096 wait_queue_sub_clearrefs(kq->kq_wqs);
2097
2098 /*
2099 * loop through the enqueued knotes, processing each one and
2100 * revalidating those that need it. As they are processed,
2101 * they get moved to the inprocess queue (so the loop can end).
2102 */
2103 error = 0;
2104 nevents = 0;
2105
2106 while (error == 0 &&
2107 (kn = TAILQ_FIRST(&kq->kq_head)) != NULL) {
2108 error = knote_process(kn, callback, data, &inprocess, p);
2109 if (error == EJUSTRETURN)
2110 error = 0;
2111 else
2112 nevents++;
2113 }
2114
2115 /*
2116 * With the kqueue still locked, move any knotes
2117 * remaining on the inprocess queue back to the
2118 * kq's queue and wake up any waiters.
2119 */
2120 while ((kn = TAILQ_FIRST(&inprocess)) != NULL) {
2121 assert(kn->kn_tq == &inprocess);
2122 TAILQ_REMOVE(&inprocess, kn, kn_tqe);
2123 kn->kn_tq = &kq->kq_head;
2124 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2125 }
2126
2127 kqueue_end_processing(kq);
2128
2129 *countp = nevents;
2130 return (error);
2131 }
2132
2133
2134 static void
2135 kqueue_scan_continue(void *data, wait_result_t wait_result)
2136 {
2137 thread_t self = current_thread();
2138 uthread_t ut = (uthread_t)get_bsdthread_info(self);
2139 struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
2140 struct kqueue *kq = (struct kqueue *)data;
2141 int error;
2142 int count;
2143
2144 /* convert the (previous) wait_result to a proper error */
2145 switch (wait_result) {
2146 case THREAD_AWAKENED:
2147 kqlock(kq);
2148 error = kqueue_process(kq, cont_args->call, cont_args, &count,
2149 current_proc());
2150 if (error == 0 && count == 0) {
2151 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
2152 KQ_EVENT, THREAD_ABORTSAFE, cont_args->deadline);
2153 kq->kq_state |= KQ_SLEEP;
2154 kqunlock(kq);
2155 thread_block_parameter(kqueue_scan_continue, kq);
2156 /* NOTREACHED */
2157 }
2158 kqunlock(kq);
2159 break;
2160 case THREAD_TIMED_OUT:
2161 error = EWOULDBLOCK;
2162 break;
2163 case THREAD_INTERRUPTED:
2164 error = EINTR;
2165 break;
2166 default:
2167 panic("%s: - invalid wait_result (%d)", __func__,
2168 wait_result);
2169 error = 0;
2170 }
2171
2172 /* call the continuation with the results */
2173 assert(cont_args->cont != NULL);
2174 (cont_args->cont)(kq, cont_args->data, error);
2175 }
2176
2177
2178 /*
2179 * kqueue_scan - scan and wait for events in a kqueue
2180 *
2181 * Process the triggered events in a kqueue.
2182 *
2183 * If there are no events triggered arrange to
2184 * wait for them. If the caller provided a
2185 * continuation routine, then kevent_scan will
2186 * also.
2187 *
2188 * The callback routine must be valid.
2189 * The caller must hold a use-count reference on the kq.
2190 */
2191
2192 int
2193 kqueue_scan(struct kqueue *kq,
2194 kevent_callback_t callback,
2195 kqueue_continue_t continuation,
2196 void *data,
2197 struct timeval *atvp,
2198 struct proc *p)
2199 {
2200 thread_continue_t cont = THREAD_CONTINUE_NULL;
2201 uint64_t deadline;
2202 int error;
2203 int first;
2204
2205 assert(callback != NULL);
2206
2207 first = 1;
2208 for (;;) {
2209 wait_result_t wait_result;
2210 int count;
2211
2212 /*
2213 * Make a pass through the kq to find events already
2214 * triggered.
2215 */
2216 kqlock(kq);
2217 error = kqueue_process(kq, callback, data, &count, p);
2218 if (error || count)
2219 break; /* lock still held */
2220
2221 /* looks like we have to consider blocking */
2222 if (first) {
2223 first = 0;
2224 /* convert the timeout to a deadline once */
2225 if (atvp->tv_sec || atvp->tv_usec) {
2226 uint64_t now;
2227
2228 clock_get_uptime(&now);
2229 nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
2230 atvp->tv_usec * (long)NSEC_PER_USEC,
2231 &deadline);
2232 if (now >= deadline) {
2233 /* non-blocking call */
2234 error = EWOULDBLOCK;
2235 break; /* lock still held */
2236 }
2237 deadline -= now;
2238 clock_absolutetime_interval_to_deadline(deadline, &deadline);
2239 } else {
2240 deadline = 0; /* block forever */
2241 }
2242
2243 if (continuation) {
2244 uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
2245 struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan;
2246
2247 cont_args->call = callback;
2248 cont_args->cont = continuation;
2249 cont_args->deadline = deadline;
2250 cont_args->data = data;
2251 cont = kqueue_scan_continue;
2252 }
2253 }
2254
2255 /* go ahead and wait */
2256 wait_queue_assert_wait_with_leeway((wait_queue_t)kq->kq_wqs,
2257 KQ_EVENT, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL,
2258 deadline, 0);
2259 kq->kq_state |= KQ_SLEEP;
2260 kqunlock(kq);
2261 wait_result = thread_block_parameter(cont, kq);
2262 /* NOTREACHED if (continuation != NULL) */
2263
2264 switch (wait_result) {
2265 case THREAD_AWAKENED:
2266 continue;
2267 case THREAD_TIMED_OUT:
2268 return (EWOULDBLOCK);
2269 case THREAD_INTERRUPTED:
2270 return (EINTR);
2271 default:
2272 panic("%s: - bad wait_result (%d)", __func__,
2273 wait_result);
2274 error = 0;
2275 }
2276 }
2277 kqunlock(kq);
2278 return (error);
2279 }
2280
2281
2282 /*
2283 * XXX
2284 * This could be expanded to call kqueue_scan, if desired.
2285 */
2286 /*ARGSUSED*/
2287 static int
2288 kqueue_read(__unused struct fileproc *fp,
2289 __unused struct uio *uio,
2290 __unused int flags,
2291 __unused vfs_context_t ctx)
2292 {
2293 return (ENXIO);
2294 }
2295
2296 /*ARGSUSED*/
2297 static int
2298 kqueue_write(__unused struct fileproc *fp,
2299 __unused struct uio *uio,
2300 __unused int flags,
2301 __unused vfs_context_t ctx)
2302 {
2303 return (ENXIO);
2304 }
2305
2306 /*ARGSUSED*/
2307 static int
2308 kqueue_ioctl(__unused struct fileproc *fp,
2309 __unused u_long com,
2310 __unused caddr_t data,
2311 __unused vfs_context_t ctx)
2312 {
2313 return (ENOTTY);
2314 }
2315
2316 /*ARGSUSED*/
2317 static int
2318 kqueue_select(struct fileproc *fp, int which, void *wql,
2319 __unused vfs_context_t ctx)
2320 {
2321 struct kqueue *kq = (struct kqueue *)fp->f_data;
2322 struct knote *kn;
2323 struct kqtailq inprocessq;
2324 int retnum = 0;
2325
2326 if (which != FREAD)
2327 return (0);
2328
2329 TAILQ_INIT(&inprocessq);
2330
2331 kqlock(kq);
2332 /*
2333 * If this is the first pass, link the wait queue associated with the
2334 * the kqueue onto the wait queue set for the select(). Normally we
2335 * use selrecord() for this, but it uses the wait queue within the
2336 * selinfo structure and we need to use the main one for the kqueue to
2337 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
2338 * (The select() call will unlink them when it ends).
2339 */
2340 if (wql != NULL) {
2341 thread_t cur_act = current_thread();
2342 struct uthread * ut = get_bsdthread_info(cur_act);
2343
2344 kq->kq_state |= KQ_SEL;
2345 wait_queue_link_noalloc((wait_queue_t)kq->kq_wqs, ut->uu_wqset,
2346 (wait_queue_link_t)wql);
2347 }
2348
2349 if (kqueue_begin_processing(kq) == -1) {
2350 kqunlock(kq);
2351 return (0);
2352 }
2353
2354 if (kq->kq_count != 0) {
2355 /*
2356 * there is something queued - but it might be a
2357 * KN_STAYQUEUED knote, which may or may not have
2358 * any events pending. So, we have to walk the
2359 * list of knotes to see, and peek at the stay-
2360 * queued ones to be really sure.
2361 */
2362 while ((kn = (struct knote *)TAILQ_FIRST(&kq->kq_head)) != NULL) {
2363 if ((kn->kn_status & KN_STAYQUEUED) == 0) {
2364 retnum = 1;
2365 goto out;
2366 }
2367
2368 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2369 TAILQ_INSERT_TAIL(&inprocessq, kn, kn_tqe);
2370
2371 if (kqlock2knoteuse(kq, kn)) {
2372 unsigned peek;
2373
2374 peek = kn->kn_fop->f_peek(kn);
2375 if (knoteuse2kqlock(kq, kn)) {
2376 if (peek > 0) {
2377 retnum = 1;
2378 goto out;
2379 }
2380 } else {
2381 retnum = 0;
2382 }
2383 }
2384 }
2385 }
2386
2387 out:
2388 /* Return knotes to active queue */
2389 while ((kn = TAILQ_FIRST(&inprocessq)) != NULL) {
2390 TAILQ_REMOVE(&inprocessq, kn, kn_tqe);
2391 kn->kn_tq = &kq->kq_head;
2392 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2393 }
2394
2395 kqueue_end_processing(kq);
2396 kqunlock(kq);
2397 return (retnum);
2398 }
2399
2400 /*
2401 * kqueue_close -
2402 */
2403 /*ARGSUSED*/
2404 static int
2405 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
2406 {
2407 struct kqueue *kq = (struct kqueue *)fg->fg_data;
2408
2409 kqueue_dealloc(kq);
2410 fg->fg_data = NULL;
2411 return (0);
2412 }
2413
2414 /*ARGSUSED*/
2415 /*
2416 * The callers has taken a use-count reference on this kqueue and will donate it
2417 * to the kqueue we are being added to. This keeps the kqueue from closing until
2418 * that relationship is torn down.
2419 */
2420 static int
2421 kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_context_t ctx)
2422 {
2423 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
2424 struct kqueue *parentkq = kn->kn_kq;
2425
2426 if (parentkq == kq ||
2427 kn->kn_filter != EVFILT_READ)
2428 return (1);
2429
2430 /*
2431 * We have to avoid creating a cycle when nesting kqueues
2432 * inside another. Rather than trying to walk the whole
2433 * potential DAG of nested kqueues, we just use a simple
2434 * ceiling protocol. When a kqueue is inserted into another,
2435 * we check that the (future) parent is not already nested
2436 * into another kqueue at a lower level than the potenial
2437 * child (because it could indicate a cycle). If that test
2438 * passes, we just mark the nesting levels accordingly.
2439 */
2440
2441 kqlock(parentkq);
2442 if (parentkq->kq_level > 0 &&
2443 parentkq->kq_level < kq->kq_level)
2444 {
2445 kqunlock(parentkq);
2446 return (1);
2447 } else {
2448 /* set parent level appropriately */
2449 if (parentkq->kq_level == 0)
2450 parentkq->kq_level = 2;
2451 if (parentkq->kq_level < kq->kq_level + 1)
2452 parentkq->kq_level = kq->kq_level + 1;
2453 kqunlock(parentkq);
2454
2455 kn->kn_fop = &kqread_filtops;
2456 kqlock(kq);
2457 KNOTE_ATTACH(&kq->kq_sel.si_note, kn);
2458 /* indicate nesting in child, if needed */
2459 if (kq->kq_level == 0)
2460 kq->kq_level = 1;
2461 kqunlock(kq);
2462 return (0);
2463 }
2464 }
2465
2466 /*
2467 * kqueue_drain - called when kq is closed
2468 */
2469 /*ARGSUSED*/
2470 static int
2471 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
2472 {
2473 struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
2474 kqlock(kq);
2475 kqueue_wakeup(kq, 1);
2476 kqunlock(kq);
2477 return (0);
2478 }
2479
2480 /*ARGSUSED*/
2481 int
2482 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
2483 {
2484 kqlock(kq);
2485 if (isstat64 != 0) {
2486 struct stat64 *sb64 = (struct stat64 *)ub;
2487
2488 bzero((void *)sb64, sizeof(*sb64));
2489 sb64->st_size = kq->kq_count;
2490 if (kq->kq_state & KQ_KEV64)
2491 sb64->st_blksize = sizeof(struct kevent64_s);
2492 else
2493 sb64->st_blksize = IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) : sizeof(struct user32_kevent);
2494 sb64->st_mode = S_IFIFO;
2495 } else {
2496 struct stat *sb = (struct stat *)ub;
2497
2498 bzero((void *)sb, sizeof(*sb));
2499 sb->st_size = kq->kq_count;
2500 if (kq->kq_state & KQ_KEV64)
2501 sb->st_blksize = sizeof(struct kevent64_s);
2502 else
2503 sb->st_blksize = IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) : sizeof(struct user32_kevent);
2504 sb->st_mode = S_IFIFO;
2505 }
2506 kqunlock(kq);
2507 return (0);
2508 }
2509
2510 /*
2511 * Called with the kqueue locked
2512 */
2513 static void
2514 kqueue_wakeup(struct kqueue *kq, int closed)
2515 {
2516 if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0 || kq->kq_nprocess > 0) {
2517 kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
2518 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, KQ_EVENT,
2519 (closed) ? THREAD_INTERRUPTED : THREAD_AWAKENED);
2520 }
2521 }
2522
2523 void
2524 klist_init(struct klist *list)
2525 {
2526 SLIST_INIT(list);
2527 }
2528
2529
2530 /*
2531 * Query/Post each knote in the object's list
2532 *
2533 * The object lock protects the list. It is assumed
2534 * that the filter/event routine for the object can
2535 * determine that the object is already locked (via
2536 * the hint) and not deadlock itself.
2537 *
2538 * The object lock should also hold off pending
2539 * detach/drop operations. But we'll prevent it here
2540 * too - just in case.
2541 */
2542 void
2543 knote(struct klist *list, long hint)
2544 {
2545 struct knote *kn;
2546
2547 SLIST_FOREACH(kn, list, kn_selnext) {
2548 struct kqueue *kq = kn->kn_kq;
2549
2550 kqlock(kq);
2551 if (kqlock2knoteuse(kq, kn)) {
2552 int result;
2553
2554 /* call the event with only a use count */
2555 result = kn->kn_fop->f_event(kn, hint);
2556
2557 /* if its not going away and triggered */
2558 if (knoteuse2kqlock(kq, kn) && result)
2559 knote_activate(kn, 1);
2560 /* lock held again */
2561 }
2562 kqunlock(kq);
2563 }
2564 }
2565
2566 /*
2567 * attach a knote to the specified list. Return true if this is the first entry.
2568 * The list is protected by whatever lock the object it is associated with uses.
2569 */
2570 int
2571 knote_attach(struct klist *list, struct knote *kn)
2572 {
2573 int ret = SLIST_EMPTY(list);
2574 SLIST_INSERT_HEAD(list, kn, kn_selnext);
2575 return (ret);
2576 }
2577
2578 /*
2579 * detach a knote from the specified list. Return true if that was the last entry.
2580 * The list is protected by whatever lock the object it is associated with uses.
2581 */
2582 int
2583 knote_detach(struct klist *list, struct knote *kn)
2584 {
2585 SLIST_REMOVE(list, kn, knote, kn_selnext);
2586 return (SLIST_EMPTY(list));
2587 }
2588
2589 /*
2590 * For a given knote, link a provided wait queue directly with the kqueue.
2591 * Wakeups will happen via recursive wait queue support. But nothing will move
2592 * the knote to the active list at wakeup (nothing calls knote()). Instead,
2593 * we permanently enqueue them here.
2594 *
2595 * kqueue and knote references are held by caller.
2596 *
2597 * caller provides the wait queue link structure.
2598 */
2599 int
2600 knote_link_wait_queue(struct knote *kn, struct wait_queue *wq, wait_queue_link_t wql)
2601 {
2602 struct kqueue *kq = kn->kn_kq;
2603 kern_return_t kr;
2604
2605 kr = wait_queue_link_noalloc(wq, kq->kq_wqs, wql);
2606 if (kr == KERN_SUCCESS) {
2607 knote_markstayqueued(kn);
2608 return (0);
2609 } else {
2610 return (EINVAL);
2611 }
2612 }
2613
2614 /*
2615 * Unlink the provided wait queue from the kqueue associated with a knote.
2616 * Also remove it from the magic list of directly attached knotes.
2617 *
2618 * Note that the unlink may have already happened from the other side, so
2619 * ignore any failures to unlink and just remove it from the kqueue list.
2620 *
2621 * On success, caller is responsible for the link structure
2622 */
2623 int
2624 knote_unlink_wait_queue(struct knote *kn, struct wait_queue *wq, wait_queue_link_t *wqlp)
2625 {
2626 struct kqueue *kq = kn->kn_kq;
2627 kern_return_t kr;
2628
2629 kr = wait_queue_unlink_nofree(wq, kq->kq_wqs, wqlp);
2630 knote_clearstayqueued(kn);
2631 return ((kr != KERN_SUCCESS) ? EINVAL : 0);
2632 }
2633
2634 /*
2635 * remove all knotes referencing a specified fd
2636 *
2637 * Essentially an inlined knote_remove & knote_drop
2638 * when we know for sure that the thing is a file
2639 *
2640 * Entered with the proc_fd lock already held.
2641 * It returns the same way, but may drop it temporarily.
2642 */
2643 void
2644 knote_fdclose(struct proc *p, int fd)
2645 {
2646 struct filedesc *fdp = p->p_fd;
2647 struct klist *list;
2648 struct knote *kn;
2649
2650 list = &fdp->fd_knlist[fd];
2651 while ((kn = SLIST_FIRST(list)) != NULL) {
2652 struct kqueue *kq = kn->kn_kq;
2653
2654 if (kq->kq_p != p)
2655 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
2656 __func__, kq->kq_p, p);
2657
2658 kqlock(kq);
2659 proc_fdunlock(p);
2660
2661 /*
2662 * Convert the lock to a drop ref.
2663 * If we get it, go ahead and drop it.
2664 * Otherwise, we waited for it to
2665 * be dropped by the other guy, so
2666 * it is safe to move on in the list.
2667 */
2668 if (kqlock2knotedrop(kq, kn)) {
2669 kn->kn_fop->f_detach(kn);
2670 knote_drop(kn, p);
2671 }
2672
2673 proc_fdlock(p);
2674
2675 /* the fd tables may have changed - start over */
2676 list = &fdp->fd_knlist[fd];
2677 }
2678 }
2679
2680 /* proc_fdlock held on entry (and exit) */
2681 static int
2682 knote_fdpattach(struct knote *kn, struct filedesc *fdp, struct proc *p)
2683 {
2684 struct klist *list = NULL;
2685
2686 if (! kn->kn_fop->f_isfd) {
2687 if (fdp->fd_knhashmask == 0)
2688 fdp->fd_knhash = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
2689 &fdp->fd_knhashmask);
2690 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
2691 } else {
2692 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
2693 u_int size = 0;
2694
2695 if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
2696 || kn->kn_id >= (uint64_t)maxfiles)
2697 return (EINVAL);
2698
2699 /* have to grow the fd_knlist */
2700 size = fdp->fd_knlistsize;
2701 while (size <= kn->kn_id)
2702 size += KQEXTENT;
2703
2704 if (size >= (UINT_MAX/sizeof(struct klist *)))
2705 return (EINVAL);
2706
2707 MALLOC(list, struct klist *,
2708 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
2709 if (list == NULL)
2710 return (ENOMEM);
2711
2712 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
2713 fdp->fd_knlistsize * sizeof(struct klist *));
2714 bzero((caddr_t)list +
2715 fdp->fd_knlistsize * sizeof(struct klist *),
2716 (size - fdp->fd_knlistsize) * sizeof(struct klist *));
2717 FREE(fdp->fd_knlist, M_KQUEUE);
2718 fdp->fd_knlist = list;
2719 fdp->fd_knlistsize = size;
2720 }
2721 list = &fdp->fd_knlist[kn->kn_id];
2722 }
2723 SLIST_INSERT_HEAD(list, kn, kn_link);
2724 return (0);
2725 }
2726
2727
2728
2729 /*
2730 * should be called at spl == 0, since we don't want to hold spl
2731 * while calling fdrop and free.
2732 */
2733 static void
2734 knote_drop(struct knote *kn, __unused struct proc *ctxp)
2735 {
2736 struct kqueue *kq = kn->kn_kq;
2737 struct proc *p = kq->kq_p;
2738 struct filedesc *fdp = p->p_fd;
2739 struct klist *list;
2740 int needswakeup;
2741
2742 proc_fdlock(p);
2743 if (kn->kn_fop->f_isfd)
2744 list = &fdp->fd_knlist[kn->kn_id];
2745 else
2746 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
2747
2748 SLIST_REMOVE(list, kn, knote, kn_link);
2749 kqlock(kq);
2750 knote_dequeue(kn);
2751 needswakeup = (kn->kn_status & KN_USEWAIT);
2752 kqunlock(kq);
2753 proc_fdunlock(p);
2754
2755 if (needswakeup)
2756 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_status,
2757 THREAD_AWAKENED);
2758
2759 if (kn->kn_fop->f_isfd)
2760 fp_drop(p, kn->kn_id, kn->kn_fp, 0);
2761
2762 knote_free(kn);
2763 }
2764
2765 /* called with kqueue lock held */
2766 static void
2767 knote_activate(struct knote *kn, int propagate)
2768 {
2769 struct kqueue *kq = kn->kn_kq;
2770
2771 kn->kn_status |= KN_ACTIVE;
2772 knote_enqueue(kn);
2773 kqueue_wakeup(kq, 0);
2774
2775 /* this is a real event: wake up the parent kq, too */
2776 if (propagate)
2777 KNOTE(&kq->kq_sel.si_note, 0);
2778 }
2779
2780 /* called with kqueue lock held */
2781 static void
2782 knote_deactivate(struct knote *kn)
2783 {
2784 kn->kn_status &= ~KN_ACTIVE;
2785 knote_dequeue(kn);
2786 }
2787
2788 /* called with kqueue lock held */
2789 static void
2790 knote_enqueue(struct knote *kn)
2791 {
2792 if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_STAYQUEUED ||
2793 (kn->kn_status & (KN_QUEUED | KN_STAYQUEUED | KN_DISABLED)) == 0) {
2794 struct kqtailq *tq = kn->kn_tq;
2795 struct kqueue *kq = kn->kn_kq;
2796
2797 TAILQ_INSERT_TAIL(tq, kn, kn_tqe);
2798 kn->kn_status |= KN_QUEUED;
2799 kq->kq_count++;
2800 }
2801 }
2802
2803 /* called with kqueue lock held */
2804 static void
2805 knote_dequeue(struct knote *kn)
2806 {
2807 struct kqueue *kq = kn->kn_kq;
2808
2809 if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_QUEUED) {
2810 struct kqtailq *tq = kn->kn_tq;
2811
2812 TAILQ_REMOVE(tq, kn, kn_tqe);
2813 kn->kn_tq = &kq->kq_head;
2814 kn->kn_status &= ~KN_QUEUED;
2815 kq->kq_count--;
2816 }
2817 }
2818
2819 void
2820 knote_init(void)
2821 {
2822 knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
2823 8192, "knote zone");
2824
2825 /* allocate kq lock group attribute and group */
2826 kq_lck_grp_attr = lck_grp_attr_alloc_init();
2827
2828 kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
2829
2830 /* Allocate kq lock attribute */
2831 kq_lck_attr = lck_attr_alloc_init();
2832
2833 /* Initialize the timer filter lock */
2834 lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr);
2835
2836 #if VM_PRESSURE_EVENTS
2837 /* Initialize the vm pressure list lock */
2838 vm_pressure_init(kq_lck_grp, kq_lck_attr);
2839 #endif
2840
2841 #if CONFIG_MEMORYSTATUS
2842 /* Initialize the memorystatus list lock */
2843 memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
2844 #endif
2845 }
2846 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
2847
2848 static struct knote *
2849 knote_alloc(void)
2850 {
2851 return ((struct knote *)zalloc(knote_zone));
2852 }
2853
2854 static void
2855 knote_free(struct knote *kn)
2856 {
2857 zfree(knote_zone, kn);
2858 }
2859
2860 #if SOCKETS
2861 #include <sys/param.h>
2862 #include <sys/socket.h>
2863 #include <sys/protosw.h>
2864 #include <sys/domain.h>
2865 #include <sys/mbuf.h>
2866 #include <sys/kern_event.h>
2867 #include <sys/malloc.h>
2868 #include <sys/sys_domain.h>
2869 #include <sys/syslog.h>
2870
2871 #ifndef ROUNDUP64
2872 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
2873 #endif
2874
2875 #ifndef ADVANCE64
2876 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
2877 #endif
2878
2879 static lck_grp_attr_t *kev_lck_grp_attr;
2880 static lck_attr_t *kev_lck_attr;
2881 static lck_grp_t *kev_lck_grp;
2882 static decl_lck_rw_data(,kev_lck_data);
2883 static lck_rw_t *kev_rwlock = &kev_lck_data;
2884
2885 static int kev_attach(struct socket *so, int proto, struct proc *p);
2886 static int kev_detach(struct socket *so);
2887 static int kev_control(struct socket *so, u_long cmd, caddr_t data,
2888 struct ifnet *ifp, struct proc *p);
2889 static lck_mtx_t * event_getlock(struct socket *, int);
2890 static int event_lock(struct socket *, int, void *);
2891 static int event_unlock(struct socket *, int, void *);
2892
2893 static int event_sofreelastref(struct socket *);
2894 static void kev_delete(struct kern_event_pcb *);
2895
2896 static struct pr_usrreqs event_usrreqs = {
2897 .pru_attach = kev_attach,
2898 .pru_control = kev_control,
2899 .pru_detach = kev_detach,
2900 .pru_soreceive = soreceive,
2901 };
2902
2903 static struct protosw eventsw[] = {
2904 {
2905 .pr_type = SOCK_RAW,
2906 .pr_protocol = SYSPROTO_EVENT,
2907 .pr_flags = PR_ATOMIC,
2908 .pr_usrreqs = &event_usrreqs,
2909 .pr_lock = event_lock,
2910 .pr_unlock = event_unlock,
2911 .pr_getlock = event_getlock,
2912 }
2913 };
2914
2915 __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
2916 __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
2917
2918 SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
2919 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family");
2920
2921 struct kevtstat kevtstat;
2922 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
2923 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2924 kevt_getstat, "S,kevtstat", "");
2925
2926 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
2927 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2928 kevt_pcblist, "S,xkevtpcb", "");
2929
2930 static lck_mtx_t *
2931 event_getlock(struct socket *so, int locktype)
2932 {
2933 #pragma unused(locktype)
2934 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
2935
2936 if (so->so_pcb != NULL) {
2937 if (so->so_usecount < 0)
2938 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
2939 so, so->so_usecount, solockhistory_nr(so));
2940 /* NOTREACHED */
2941 } else {
2942 panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
2943 so, solockhistory_nr(so));
2944 /* NOTREACHED */
2945 }
2946 return (&ev_pcb->evp_mtx);
2947 }
2948
2949 static int
2950 event_lock(struct socket *so, int refcount, void *lr)
2951 {
2952 void *lr_saved;
2953
2954 if (lr == NULL)
2955 lr_saved = __builtin_return_address(0);
2956 else
2957 lr_saved = lr;
2958
2959 if (so->so_pcb != NULL) {
2960 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
2961 } else {
2962 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2963 so, lr_saved, solockhistory_nr(so));
2964 /* NOTREACHED */
2965 }
2966
2967 if (so->so_usecount < 0) {
2968 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
2969 so, so->so_pcb, lr_saved, so->so_usecount,
2970 solockhistory_nr(so));
2971 /* NOTREACHED */
2972 }
2973
2974 if (refcount)
2975 so->so_usecount++;
2976
2977 so->lock_lr[so->next_lock_lr] = lr_saved;
2978 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
2979 return (0);
2980 }
2981
2982 static int
2983 event_unlock(struct socket *so, int refcount, void *lr)
2984 {
2985 void *lr_saved;
2986 lck_mtx_t *mutex_held;
2987
2988 if (lr == NULL)
2989 lr_saved = __builtin_return_address(0);
2990 else
2991 lr_saved = lr;
2992
2993 if (refcount)
2994 so->so_usecount--;
2995
2996 if (so->so_usecount < 0) {
2997 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
2998 so, so->so_usecount, solockhistory_nr(so));
2999 /* NOTREACHED */
3000 }
3001 if (so->so_pcb == NULL) {
3002 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
3003 so, so->so_usecount, (void *)lr_saved,
3004 solockhistory_nr(so));
3005 /* NOTREACHED */
3006 }
3007 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
3008
3009 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
3010 so->unlock_lr[so->next_unlock_lr] = lr_saved;
3011 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
3012
3013 if (so->so_usecount == 0) {
3014 VERIFY(so->so_flags & SOF_PCBCLEARING);
3015 event_sofreelastref(so);
3016 } else {
3017 lck_mtx_unlock(mutex_held);
3018 }
3019
3020 return (0);
3021 }
3022
3023 static int
3024 event_sofreelastref(struct socket *so)
3025 {
3026 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
3027
3028 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
3029
3030 so->so_pcb = NULL;
3031
3032 /*
3033 * Disable upcall in the event another thread is in kev_post_msg()
3034 * appending record to the receive socket buffer, since sbwakeup()
3035 * may release the socket lock otherwise.
3036 */
3037 so->so_rcv.sb_flags &= ~SB_UPCALL;
3038 so->so_snd.sb_flags &= ~SB_UPCALL;
3039 so->so_event = sonullevent;
3040 lck_mtx_unlock(&(ev_pcb->evp_mtx));
3041
3042 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
3043 lck_rw_lock_exclusive(kev_rwlock);
3044 LIST_REMOVE(ev_pcb, evp_link);
3045 kevtstat.kes_pcbcount--;
3046 kevtstat.kes_gencnt++;
3047 lck_rw_done(kev_rwlock);
3048 kev_delete(ev_pcb);
3049
3050 sofreelastref(so, 1);
3051 return (0);
3052 }
3053
3054 static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
3055
3056 static
3057 struct kern_event_head kern_event_head;
3058
3059 static u_int32_t static_event_id = 0;
3060
3061 #define EVPCB_ZONE_MAX 65536
3062 #define EVPCB_ZONE_NAME "kerneventpcb"
3063 static struct zone *ev_pcb_zone;
3064
3065 /*
3066 * Install the protosw's for the NKE manager. Invoked at extension load time
3067 */
3068 void
3069 kern_event_init(struct domain *dp)
3070 {
3071 struct protosw *pr;
3072 int i;
3073
3074 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
3075 VERIFY(dp == systemdomain);
3076
3077 kev_lck_grp_attr = lck_grp_attr_alloc_init();
3078 if (kev_lck_grp_attr == NULL) {
3079 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
3080 /* NOTREACHED */
3081 }
3082
3083 kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol",
3084 kev_lck_grp_attr);
3085 if (kev_lck_grp == NULL) {
3086 panic("%s: lck_grp_alloc_init failed\n", __func__);
3087 /* NOTREACHED */
3088 }
3089
3090 kev_lck_attr = lck_attr_alloc_init();
3091 if (kev_lck_attr == NULL) {
3092 panic("%s: lck_attr_alloc_init failed\n", __func__);
3093 /* NOTREACHED */
3094 }
3095
3096 lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr);
3097 if (kev_rwlock == NULL) {
3098 panic("%s: lck_mtx_alloc_init failed\n", __func__);
3099 /* NOTREACHED */
3100 }
3101
3102 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++)
3103 net_add_proto(pr, dp, 1);
3104
3105 ev_pcb_zone = zinit(sizeof(struct kern_event_pcb),
3106 EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME);
3107 if (ev_pcb_zone == NULL) {
3108 panic("%s: failed allocating ev_pcb_zone", __func__);
3109 /* NOTREACHED */
3110 }
3111 zone_change(ev_pcb_zone, Z_EXPAND, TRUE);
3112 zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE);
3113 }
3114
3115 static int
3116 kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
3117 {
3118 int error = 0;
3119 struct kern_event_pcb *ev_pcb;
3120
3121 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
3122 if (error != 0)
3123 return (error);
3124
3125 if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) {
3126 return (ENOBUFS);
3127 }
3128 bzero(ev_pcb, sizeof(struct kern_event_pcb));
3129 lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr);
3130
3131 ev_pcb->evp_socket = so;
3132 ev_pcb->evp_vendor_code_filter = 0xffffffff;
3133
3134 so->so_pcb = (caddr_t) ev_pcb;
3135 lck_rw_lock_exclusive(kev_rwlock);
3136 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
3137 kevtstat.kes_pcbcount++;
3138 kevtstat.kes_gencnt++;
3139 lck_rw_done(kev_rwlock);
3140
3141 return (error);
3142 }
3143
3144 static void
3145 kev_delete(struct kern_event_pcb *ev_pcb)
3146 {
3147 VERIFY(ev_pcb != NULL);
3148 lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp);
3149 zfree(ev_pcb_zone, ev_pcb);
3150 }
3151
3152 static int
3153 kev_detach(struct socket *so)
3154 {
3155 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
3156
3157 if (ev_pcb != NULL) {
3158 soisdisconnected(so);
3159 so->so_flags |= SOF_PCBCLEARING;
3160 }
3161
3162 return (0);
3163 }
3164
3165 /*
3166 * For now, kev_vendor_code and mbuf_tags use the same
3167 * mechanism.
3168 */
3169 errno_t kev_vendor_code_find(
3170 const char *string,
3171 u_int32_t *out_vendor_code)
3172 {
3173 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
3174 return (EINVAL);
3175 }
3176 return (net_str_id_find_internal(string, out_vendor_code,
3177 NSI_VENDOR_CODE, 1));
3178 }
3179
3180 errno_t
3181 kev_msg_post(struct kev_msg *event_msg)
3182 {
3183 mbuf_tag_id_t min_vendor, max_vendor;
3184
3185 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
3186
3187 if (event_msg == NULL)
3188 return (EINVAL);
3189
3190 /*
3191 * Limit third parties to posting events for registered vendor codes
3192 * only
3193 */
3194 if (event_msg->vendor_code < min_vendor ||
3195 event_msg->vendor_code > max_vendor) {
3196 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_badvendor);
3197 return (EINVAL);
3198 }
3199 return (kev_post_msg(event_msg));
3200 }
3201
3202 int
3203 kev_post_msg(struct kev_msg *event_msg)
3204 {
3205 struct mbuf *m, *m2;
3206 struct kern_event_pcb *ev_pcb;
3207 struct kern_event_msg *ev;
3208 char *tmp;
3209 u_int32_t total_size;
3210 int i;
3211
3212 /* Verify the message is small enough to fit in one mbuf w/o cluster */
3213 total_size = KEV_MSG_HEADER_SIZE;
3214
3215 for (i = 0; i < 5; i++) {
3216 if (event_msg->dv[i].data_length == 0)
3217 break;
3218 total_size += event_msg->dv[i].data_length;
3219 }
3220
3221 if (total_size > MLEN) {
3222 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_toobig);
3223 return (EMSGSIZE);
3224 }
3225
3226 m = m_get(M_DONTWAIT, MT_DATA);
3227 if (m == 0) {
3228 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
3229 return (ENOMEM);
3230 }
3231 ev = mtod(m, struct kern_event_msg *);
3232 total_size = KEV_MSG_HEADER_SIZE;
3233
3234 tmp = (char *) &ev->event_data[0];
3235 for (i = 0; i < 5; i++) {
3236 if (event_msg->dv[i].data_length == 0)
3237 break;
3238
3239 total_size += event_msg->dv[i].data_length;
3240 bcopy(event_msg->dv[i].data_ptr, tmp,
3241 event_msg->dv[i].data_length);
3242 tmp += event_msg->dv[i].data_length;
3243 }
3244
3245 ev->id = ++static_event_id;
3246 ev->total_size = total_size;
3247 ev->vendor_code = event_msg->vendor_code;
3248 ev->kev_class = event_msg->kev_class;
3249 ev->kev_subclass = event_msg->kev_subclass;
3250 ev->event_code = event_msg->event_code;
3251
3252 m->m_len = total_size;
3253 lck_rw_lock_shared(kev_rwlock);
3254 for (ev_pcb = LIST_FIRST(&kern_event_head);
3255 ev_pcb;
3256 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
3257 lck_mtx_lock(&ev_pcb->evp_mtx);
3258 if (ev_pcb->evp_socket->so_pcb == NULL) {
3259 lck_mtx_unlock(&ev_pcb->evp_mtx);
3260 continue;
3261 }
3262 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
3263 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
3264 lck_mtx_unlock(&ev_pcb->evp_mtx);
3265 continue;
3266 }
3267
3268 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
3269 if (ev_pcb->evp_class_filter != ev->kev_class) {
3270 lck_mtx_unlock(&ev_pcb->evp_mtx);
3271 continue;
3272 }
3273
3274 if ((ev_pcb->evp_subclass_filter !=
3275 KEV_ANY_SUBCLASS) &&
3276 (ev_pcb->evp_subclass_filter !=
3277 ev->kev_subclass)) {
3278 lck_mtx_unlock(&ev_pcb->evp_mtx);
3279 continue;
3280 }
3281 }
3282 }
3283
3284 m2 = m_copym(m, 0, m->m_len, M_NOWAIT);
3285 if (m2 == 0) {
3286 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
3287 m_free(m);
3288 lck_mtx_unlock(&ev_pcb->evp_mtx);
3289 lck_rw_done(kev_rwlock);
3290 return (ENOMEM);
3291 }
3292 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
3293 /*
3294 * We use "m" for the socket stats as it would be
3295 * unsafe to use "m2"
3296 */
3297 so_inc_recv_data_stat(ev_pcb->evp_socket,
3298 1, m->m_len, SO_TC_BE);
3299
3300 sorwakeup(ev_pcb->evp_socket);
3301 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_posted);
3302 } else {
3303 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_fullsock);
3304 }
3305 lck_mtx_unlock(&ev_pcb->evp_mtx);
3306 }
3307 m_free(m);
3308 lck_rw_done(kev_rwlock);
3309
3310 return (0);
3311 }
3312
3313 static int
3314 kev_control(struct socket *so,
3315 u_long cmd,
3316 caddr_t data,
3317 __unused struct ifnet *ifp,
3318 __unused struct proc *p)
3319 {
3320 struct kev_request *kev_req = (struct kev_request *) data;
3321 struct kern_event_pcb *ev_pcb;
3322 struct kev_vendor_code *kev_vendor;
3323 u_int32_t *id_value = (u_int32_t *) data;
3324
3325 switch (cmd) {
3326 case SIOCGKEVID:
3327 *id_value = static_event_id;
3328 break;
3329 case SIOCSKEVFILT:
3330 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
3331 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
3332 ev_pcb->evp_class_filter = kev_req->kev_class;
3333 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
3334 break;
3335 case SIOCGKEVFILT:
3336 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
3337 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
3338 kev_req->kev_class = ev_pcb->evp_class_filter;
3339 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
3340 break;
3341 case SIOCGKEVVENDOR:
3342 kev_vendor = (struct kev_vendor_code *)data;
3343 /* Make sure string is NULL terminated */
3344 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0;
3345 return (net_str_id_find_internal(kev_vendor->vendor_string,
3346 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0));
3347 default:
3348 return (ENOTSUP);
3349 }
3350
3351 return (0);
3352 }
3353
3354 int
3355 kevt_getstat SYSCTL_HANDLER_ARGS
3356 {
3357 #pragma unused(oidp, arg1, arg2)
3358 int error = 0;
3359
3360 lck_rw_lock_shared(kev_rwlock);
3361
3362 if (req->newptr != USER_ADDR_NULL) {
3363 error = EPERM;
3364 goto done;
3365 }
3366 if (req->oldptr == USER_ADDR_NULL) {
3367 req->oldidx = sizeof(struct kevtstat);
3368 goto done;
3369 }
3370
3371 error = SYSCTL_OUT(req, &kevtstat,
3372 MIN(sizeof(struct kevtstat), req->oldlen));
3373 done:
3374 lck_rw_done(kev_rwlock);
3375
3376 return (error);
3377 }
3378
3379 __private_extern__ int
3380 kevt_pcblist SYSCTL_HANDLER_ARGS
3381 {
3382 #pragma unused(oidp, arg1, arg2)
3383 int error = 0;
3384 int n, i;
3385 struct xsystmgen xsg;
3386 void *buf = NULL;
3387 size_t item_size = ROUNDUP64(sizeof (struct xkevtpcb)) +
3388 ROUNDUP64(sizeof (struct xsocket_n)) +
3389 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
3390 ROUNDUP64(sizeof (struct xsockstat_n));
3391 struct kern_event_pcb *ev_pcb;
3392
3393 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
3394 if (buf == NULL)
3395 return (ENOMEM);
3396
3397 lck_rw_lock_shared(kev_rwlock);
3398
3399 n = kevtstat.kes_pcbcount;
3400
3401 if (req->oldptr == USER_ADDR_NULL) {
3402 req->oldidx = (n + n/8) * item_size;
3403 goto done;
3404 }
3405 if (req->newptr != USER_ADDR_NULL) {
3406 error = EPERM;
3407 goto done;
3408 }
3409 bzero(&xsg, sizeof (xsg));
3410 xsg.xg_len = sizeof (xsg);
3411 xsg.xg_count = n;
3412 xsg.xg_gen = kevtstat.kes_gencnt;
3413 xsg.xg_sogen = so_gencnt;
3414 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
3415 if (error) {
3416 goto done;
3417 }
3418 /*
3419 * We are done if there is no pcb
3420 */
3421 if (n == 0) {
3422 goto done;
3423 }
3424
3425 i = 0;
3426 for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
3427 i < n && ev_pcb != NULL;
3428 i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
3429 struct xkevtpcb *xk = (struct xkevtpcb *)buf;
3430 struct xsocket_n *xso = (struct xsocket_n *)
3431 ADVANCE64(xk, sizeof (*xk));
3432 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
3433 ADVANCE64(xso, sizeof (*xso));
3434 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
3435 ADVANCE64(xsbrcv, sizeof (*xsbrcv));
3436 struct xsockstat_n *xsostats = (struct xsockstat_n *)
3437 ADVANCE64(xsbsnd, sizeof (*xsbsnd));
3438
3439 bzero(buf, item_size);
3440
3441 lck_mtx_lock(&ev_pcb->evp_mtx);
3442
3443 xk->kep_len = sizeof(struct xkevtpcb);
3444 xk->kep_kind = XSO_EVT;
3445 xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
3446 xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
3447 xk->kep_class_filter = ev_pcb->evp_class_filter;
3448 xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
3449
3450 sotoxsocket_n(ev_pcb->evp_socket, xso);
3451 sbtoxsockbuf_n(ev_pcb->evp_socket ?
3452 &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
3453 sbtoxsockbuf_n(ev_pcb->evp_socket ?
3454 &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
3455 sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
3456
3457 lck_mtx_unlock(&ev_pcb->evp_mtx);
3458
3459 error = SYSCTL_OUT(req, buf, item_size);
3460 }
3461
3462 if (error == 0) {
3463 /*
3464 * Give the user an updated idea of our state.
3465 * If the generation differs from what we told
3466 * her before, she knows that something happened
3467 * while we were processing this request, and it
3468 * might be necessary to retry.
3469 */
3470 bzero(&xsg, sizeof (xsg));
3471 xsg.xg_len = sizeof (xsg);
3472 xsg.xg_count = n;
3473 xsg.xg_gen = kevtstat.kes_gencnt;
3474 xsg.xg_sogen = so_gencnt;
3475 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
3476 if (error) {
3477 goto done;
3478 }
3479 }
3480
3481 done:
3482 lck_rw_done(kev_rwlock);
3483
3484 return (error);
3485 }
3486
3487 #endif /* SOCKETS */
3488
3489
3490 int
3491 fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
3492 {
3493 struct vinfo_stat * st;
3494
3495 st = &kinfo->kq_stat;
3496
3497 st->vst_size = kq->kq_count;
3498 if (kq->kq_state & KQ_KEV64)
3499 st->vst_blksize = sizeof(struct kevent64_s);
3500 else
3501 st->vst_blksize = sizeof(struct kevent);
3502 st->vst_mode = S_IFIFO;
3503 if (kq->kq_state & KQ_SEL)
3504 kinfo->kq_state |= PROC_KQUEUE_SELECT;
3505 if (kq->kq_state & KQ_SLEEP)
3506 kinfo->kq_state |= PROC_KQUEUE_SLEEP;
3507
3508 return (0);
3509 }
3510
3511
3512 void
3513 knote_markstayqueued(struct knote *kn)
3514 {
3515 kqlock(kn->kn_kq);
3516 kn->kn_status |= KN_STAYQUEUED;
3517 knote_enqueue(kn);
3518 kqunlock(kn->kn_kq);
3519 }
3520
3521 void
3522 knote_clearstayqueued(struct knote *kn)
3523 {
3524 kqlock(kn->kn_kq);
3525 kn->kn_status &= ~KN_STAYQUEUED;
3526 knote_dequeue(kn);
3527 kqunlock(kn->kn_kq);
3528 }