]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_event.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / kern / kern_event.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29 /*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54 /*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
57 #include <stdint.h>
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/filedesc.h>
62 #include <sys/kernel.h>
63 #include <sys/proc_internal.h>
64 #include <sys/kauth.h>
65 #include <sys/malloc.h>
66 #include <sys/unistd.h>
67 #include <sys/file_internal.h>
68 #include <sys/fcntl.h>
69 #include <sys/select.h>
70 #include <sys/queue.h>
71 #include <sys/event.h>
72 #include <sys/eventvar.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/stat.h>
77 #include <sys/sysctl.h>
78 #include <sys/uio.h>
79 #include <sys/sysproto.h>
80 #include <sys/user.h>
81 #include <sys/vnode_internal.h>
82 #include <string.h>
83 #include <sys/proc_info.h>
84 #include <sys/codesign.h>
85 #include <sys/pthread_shims.h>
86
87 #include <kern/locks.h>
88 #include <kern/clock.h>
89 #include <kern/policy_internal.h>
90 #include <kern/thread_call.h>
91 #include <kern/sched_prim.h>
92 #include <kern/waitq.h>
93 #include <kern/zalloc.h>
94 #include <kern/kalloc.h>
95 #include <kern/assert.h>
96
97 #include <libkern/libkern.h>
98 #include "net/net_str_id.h"
99
100 #include <mach/task.h>
101
102 #if CONFIG_MEMORYSTATUS
103 #include <sys/kern_memorystatus.h>
104 #endif
105
106 /*
107 * JMM - this typedef needs to be unified with pthread_priority_t
108 * and mach_msg_priority_t. It also needs to be the same type
109 * everywhere.
110 */
111 typedef int32_t qos_t;
112
113 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
114
115 #define KQ_EVENT NO_EVENT64
116
117 static inline void kqlock(struct kqueue *kq);
118 static inline void kqunlock(struct kqueue *kq);
119
120 static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
121 static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
122 static int kqlock2knotedetach(struct kqueue *kq, struct knote *kn);
123 static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int defer_drop);
124
125 static int kqueue_read(struct fileproc *fp, struct uio *uio,
126 int flags, vfs_context_t ctx);
127 static int kqueue_write(struct fileproc *fp, struct uio *uio,
128 int flags, vfs_context_t ctx);
129 static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data,
130 vfs_context_t ctx);
131 static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
132 vfs_context_t ctx);
133 static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
134 static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
135 vfs_context_t ctx);
136 static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
137
138 static const struct fileops kqueueops = {
139 .fo_type = DTYPE_KQUEUE,
140 .fo_read = kqueue_read,
141 .fo_write = kqueue_write,
142 .fo_ioctl = kqueue_ioctl,
143 .fo_select = kqueue_select,
144 .fo_close = kqueue_close,
145 .fo_kqfilter = kqueue_kqfilter,
146 .fo_drain = kqueue_drain,
147 };
148
149 static int kevent_internal(struct proc *p, int fd,
150 user_addr_t changelist, int nchanges,
151 user_addr_t eventlist, int nevents,
152 user_addr_t data_out, uint64_t data_available,
153 unsigned int flags, user_addr_t utimeout,
154 kqueue_continue_t continuation,
155 int32_t *retval);
156 static int kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp,
157 struct proc *p, unsigned int flags);
158 static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp,
159 struct proc *p, unsigned int flags);
160 char * kevent_description(struct kevent_internal_s *kevp, char *s, size_t n);
161
162 static void kqueue_interrupt(struct kqueue *kq);
163 static int kevent_callback(struct kqueue *kq, struct kevent_internal_s *kevp,
164 void *data);
165 static void kevent_continue(struct kqueue *kq, void *data, int error);
166 static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
167 static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, void *callback_data,
168 struct filt_process_s *process_data, kq_index_t servicer_qos_index,
169 int *countp, struct proc *p);
170 static int kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags);
171 static void kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags);
172 static struct kqtailq *kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index);
173 static struct kqtailq *kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index);
174 static int kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index);
175
176 static struct kqtailq *kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index);
177
178 static void kqworkq_request_thread(struct kqworkq *kqwq, kq_index_t qos_index);
179 static void kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index, uint32_t type);
180 static void kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index);
181 static void kqworkq_bind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
182 static void kqworkq_unbind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
183 static struct kqrequest *kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
184
185
186 static int knote_process(struct knote *kn, kevent_callback_t callback, void *callback_data,
187 struct filt_process_s *process_data, struct proc *p);
188 #if 0
189 static void knote_put(struct knote *kn);
190 #endif
191
192 static int knote_fdadd(struct knote *kn, struct proc *p);
193 static void knote_fdremove(struct knote *kn, struct proc *p);
194 static struct knote *knote_fdfind(struct kqueue *kq, struct kevent_internal_s *kev, struct proc *p);
195
196 static void knote_drop(struct knote *kn, struct proc *p);
197 static struct knote *knote_alloc(void);
198 static void knote_free(struct knote *kn);
199
200 static void knote_activate(struct knote *kn);
201 static void knote_deactivate(struct knote *kn);
202
203 static void knote_enable(struct knote *kn);
204 static void knote_disable(struct knote *kn);
205
206 static int knote_enqueue(struct knote *kn);
207 static void knote_dequeue(struct knote *kn);
208
209 static void knote_suppress(struct knote *kn);
210 static void knote_unsuppress(struct knote *kn);
211 static void knote_wakeup(struct knote *kn);
212
213 static kq_index_t knote_get_queue_index(struct knote *kn);
214 static struct kqtailq *knote_get_queue(struct knote *kn);
215 static struct kqtailq *knote_get_suppressed_queue(struct knote *kn);
216 static kq_index_t knote_get_req_index(struct knote *kn);
217 static kq_index_t knote_get_qos_index(struct knote *kn);
218 static void knote_set_qos_index(struct knote *kn, kq_index_t qos_index);
219 static kq_index_t knote_get_qos_override_index(struct knote *kn);
220 static void knote_set_qos_override_index(struct knote *kn, kq_index_t qos_index);
221
222 static int filt_fileattach(struct knote *kn);
223 static struct filterops file_filtops = {
224 .f_isfd = 1,
225 .f_attach = filt_fileattach,
226 };
227
228 static void filt_kqdetach(struct knote *kn);
229 static int filt_kqueue(struct knote *kn, long hint);
230 static int filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev);
231 static int filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
232 static struct filterops kqread_filtops = {
233 .f_isfd = 1,
234 .f_detach = filt_kqdetach,
235 .f_event = filt_kqueue,
236 .f_touch = filt_kqtouch,
237 .f_process = filt_kqprocess,
238 };
239
240 /* placeholder for not-yet-implemented filters */
241 static int filt_badattach(struct knote *kn);
242 static struct filterops bad_filtops = {
243 .f_attach = filt_badattach,
244 };
245
246 static int filt_procattach(struct knote *kn);
247 static void filt_procdetach(struct knote *kn);
248 static int filt_proc(struct knote *kn, long hint);
249 static int filt_proctouch(struct knote *kn, struct kevent_internal_s *kev);
250 static int filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
251 static struct filterops proc_filtops = {
252 .f_attach = filt_procattach,
253 .f_detach = filt_procdetach,
254 .f_event = filt_proc,
255 .f_touch = filt_proctouch,
256 .f_process = filt_procprocess,
257 };
258
259 #if CONFIG_MEMORYSTATUS
260 extern struct filterops memorystatus_filtops;
261 #endif /* CONFIG_MEMORYSTATUS */
262
263 extern struct filterops fs_filtops;
264
265 extern struct filterops sig_filtops;
266
267 /* Timer filter */
268 static int filt_timerattach(struct knote *kn);
269 static void filt_timerdetach(struct knote *kn);
270 static int filt_timer(struct knote *kn, long hint);
271 static int filt_timertouch(struct knote *kn, struct kevent_internal_s *kev);
272 static int filt_timerprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
273 static struct filterops timer_filtops = {
274 .f_attach = filt_timerattach,
275 .f_detach = filt_timerdetach,
276 .f_event = filt_timer,
277 .f_touch = filt_timertouch,
278 .f_process = filt_timerprocess,
279 };
280
281 /* Helpers */
282 static void filt_timerexpire(void *knx, void *param1);
283 static int filt_timervalidate(struct knote *kn);
284 static void filt_timerupdate(struct knote *kn, int num_fired);
285 static void filt_timercancel(struct knote *kn);
286
287 #define TIMER_RUNNING 0x1
288 #define TIMER_CANCELWAIT 0x2
289
290 static lck_mtx_t _filt_timerlock;
291 static void filt_timerlock(void);
292 static void filt_timerunlock(void);
293
294 static zone_t knote_zone;
295 static zone_t kqfile_zone;
296 static zone_t kqworkq_zone;
297
298 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
299
300 #if 0
301 extern struct filterops aio_filtops;
302 #endif
303
304 /* Mach portset filter */
305 extern struct filterops machport_filtops;
306
307 /* User filter */
308 static int filt_userattach(struct knote *kn);
309 static void filt_userdetach(struct knote *kn);
310 static int filt_user(struct knote *kn, long hint);
311 static int filt_usertouch(struct knote *kn, struct kevent_internal_s *kev);
312 static int filt_userprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
313 static struct filterops user_filtops = {
314 .f_attach = filt_userattach,
315 .f_detach = filt_userdetach,
316 .f_event = filt_user,
317 .f_touch = filt_usertouch,
318 .f_process = filt_userprocess,
319 };
320
321 static lck_spin_t _filt_userlock;
322 static void filt_userlock(void);
323 static void filt_userunlock(void);
324
325 extern struct filterops pipe_rfiltops;
326 extern struct filterops pipe_wfiltops;
327 extern struct filterops ptsd_kqops;
328 extern struct filterops soread_filtops;
329 extern struct filterops sowrite_filtops;
330 extern struct filterops sock_filtops;
331 extern struct filterops soexcept_filtops;
332 extern struct filterops spec_filtops;
333 extern struct filterops bpfread_filtops;
334 extern struct filterops necp_fd_rfiltops;
335 extern struct filterops skywalk_channel_rfiltops;
336 extern struct filterops skywalk_channel_wfiltops;
337 extern struct filterops fsevent_filtops;
338 extern struct filterops vnode_filtops;
339
340 /*
341 *
342 * Rules for adding new filters to the system:
343 * Public filters:
344 * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
345 * in the exported section of the header
346 * - Update the EVFILT_SYSCOUNT value to reflect the new addition
347 * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
348 * of the Public Filters section in the array.
349 * Private filters:
350 * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
351 * in the XNU_KERNEL_PRIVATE section of the header
352 * - Update the EVFILTID_MAX value to reflect the new addition
353 * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
354 * the Private filters section of the array.
355 */
356 static struct filterops *sysfilt_ops[EVFILTID_MAX] = {
357 /* Public Filters */
358 [~EVFILT_READ] = &file_filtops,
359 [~EVFILT_WRITE] = &file_filtops,
360 [~EVFILT_AIO] = &bad_filtops,
361 [~EVFILT_VNODE] = &file_filtops,
362 [~EVFILT_PROC] = &proc_filtops,
363 [~EVFILT_SIGNAL] = &sig_filtops,
364 [~EVFILT_TIMER] = &timer_filtops,
365 [~EVFILT_MACHPORT] = &machport_filtops,
366 [~EVFILT_FS] = &fs_filtops,
367 [~EVFILT_USER] = &user_filtops,
368 &bad_filtops,
369 &bad_filtops,
370 [~EVFILT_SOCK] = &file_filtops,
371 #if CONFIG_MEMORYSTATUS
372 [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
373 #else
374 [~EVFILT_MEMORYSTATUS] = &bad_filtops,
375 #endif
376 [~EVFILT_EXCEPT] = &file_filtops,
377
378 /* Private filters */
379 [EVFILTID_KQREAD] = &kqread_filtops,
380 [EVFILTID_PIPE_R] = &pipe_rfiltops,
381 [EVFILTID_PIPE_W] = &pipe_wfiltops,
382 [EVFILTID_PTSD] = &ptsd_kqops,
383 [EVFILTID_SOREAD] = &soread_filtops,
384 [EVFILTID_SOWRITE] = &sowrite_filtops,
385 [EVFILTID_SCK] = &sock_filtops,
386 [EVFILTID_SOEXCEPT] = &soexcept_filtops,
387 [EVFILTID_SPEC] = &spec_filtops,
388 [EVFILTID_BPFREAD] = &bpfread_filtops,
389 [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
390 [EVFILTID_FSEVENT] = &fsevent_filtops,
391 [EVFILTID_VN] = &vnode_filtops
392 };
393
394 /* waitq prepost callback */
395 void waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos);
396
397 #ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
398 #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 /* pthread event manager bit */
399 #endif
400 #ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
401 #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 /* request overcommit threads */
402 #endif
403 #ifndef _PTHREAD_PRIORITY_QOS_CLASS_MASK
404 #define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00 /* QoS class mask */
405 #endif
406 #ifndef _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32
407 #define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32 8
408 #endif
409
410 static inline
411 qos_t canonicalize_kevent_qos(qos_t qos)
412 {
413 unsigned long canonical;
414
415 /* preserve manager and overcommit flags in this case */
416 canonical = pthread_priority_canonicalize(qos, FALSE);
417 return (qos_t)canonical;
418 }
419
420 static inline
421 kq_index_t qos_index_from_qos(qos_t qos, boolean_t propagation)
422 {
423 kq_index_t qos_index;
424 unsigned long flags = 0;
425
426 qos_index = (kq_index_t)thread_qos_from_pthread_priority(
427 (unsigned long)qos, &flags);
428
429 if (!propagation && (flags & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG))
430 return KQWQ_QOS_MANAGER;
431
432 return qos_index;
433 }
434
435 static inline
436 qos_t qos_from_qos_index(kq_index_t qos_index)
437 {
438 if (qos_index == KQWQ_QOS_MANAGER)
439 return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
440
441 if (qos_index == 0)
442 return 0; /* Unspecified */
443
444 /* Should have support from pthread kext support */
445 return (1 << (qos_index - 1 +
446 _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32));
447 }
448
449 static inline
450 kq_index_t qos_index_for_servicer(int qos_class, thread_t thread, int flags)
451 {
452 kq_index_t qos_index;
453
454 if (flags & KEVENT_FLAG_WORKQ_MANAGER)
455 return KQWQ_QOS_MANAGER;
456
457 /*
458 * If the caller didn't pass in a class (legacy pthread kext)
459 * the we use the thread policy QoS of the current thread.
460 */
461 assert(qos_class != -1);
462 if (qos_class == -1)
463 qos_index = proc_get_thread_policy(thread,
464 TASK_POLICY_ATTRIBUTE,
465 TASK_POLICY_QOS);
466 else
467 qos_index = (kq_index_t)qos_class;
468
469 assert(qos_index > 0 && qos_index < KQWQ_NQOS);
470
471 return qos_index;
472 }
473
474 /*
475 * kqueue/note lock implementations
476 *
477 * The kqueue lock guards the kq state, the state of its queues,
478 * and the kqueue-aware status and use counts of individual knotes.
479 *
480 * The kqueue workq lock is used to protect state guarding the
481 * interaction of the kqueue with the workq. This state cannot
482 * be guarded by the kq lock - as it needs to be taken when we
483 * already have the waitq set lock held (during the waitq hook
484 * callback). It might be better to use the waitq lock itself
485 * for this, but the IRQ requirements make that difficult).
486 *
487 * Knote flags, filter flags, and associated data are protected
488 * by the underlying object lock - and are only ever looked at
489 * by calling the filter to get a [consistent] snapshot of that
490 * data.
491 */
492 lck_grp_attr_t * kq_lck_grp_attr;
493 lck_grp_t * kq_lck_grp;
494 lck_attr_t * kq_lck_attr;
495
496 static inline void
497 kqlock(struct kqueue *kq)
498 {
499 lck_spin_lock(&kq->kq_lock);
500 }
501
502 static inline void
503 kqunlock(struct kqueue *kq)
504 {
505 lck_spin_unlock(&kq->kq_lock);
506 }
507
508
509 /*
510 * Convert a kq lock to a knote use referece.
511 *
512 * If the knote is being dropped, or has
513 * vanished, we can't get a use reference.
514 * Just return with it still locked.
515 *
516 * - kq locked at entry
517 * - unlock on exit if we get the use reference
518 */
519 static int
520 kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
521 {
522 if (kn->kn_status & (KN_DROPPING | KN_VANISHED))
523 return (0);
524
525 assert(kn->kn_status & KN_ATTACHED);
526 kn->kn_inuse++;
527 kqunlock(kq);
528 return (1);
529 }
530
531
532 /*
533 * Convert from a knote use reference back to kq lock.
534 *
535 * Drop a use reference and wake any waiters if
536 * this is the last one.
537 *
538 * If someone is trying to drop the knote, but the
539 * caller has events they must deliver, take
540 * responsibility for the drop later - and wake the
541 * other attempted dropper in a manner that informs
542 * him of the transfer of responsibility.
543 *
544 * The exit return indicates if the knote is still alive
545 * (or if not, the other dropper has been given the green
546 * light to drop it).
547 *
548 * The kqueue lock is re-taken unconditionally.
549 */
550 static int
551 knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int steal_drop)
552 {
553 int dropped = 0;
554
555 kqlock(kq);
556 if (--kn->kn_inuse == 0) {
557
558 if ((kn->kn_status & KN_ATTACHING) != 0) {
559 kn->kn_status &= ~KN_ATTACHING;
560 }
561
562 if ((kn->kn_status & KN_USEWAIT) != 0) {
563 wait_result_t result;
564
565 /* If we need to, try and steal the drop */
566 if (kn->kn_status & KN_DROPPING) {
567 if (steal_drop && !(kn->kn_status & KN_STOLENDROP)) {
568 kn->kn_status |= KN_STOLENDROP;
569 } else {
570 dropped = 1;
571 }
572 }
573
574 /* wakeup indicating if ANY USE stole the drop */
575 result = (kn->kn_status & KN_STOLENDROP) ?
576 THREAD_RESTART : THREAD_AWAKENED;
577
578 kn->kn_status &= ~KN_USEWAIT;
579 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
580 CAST_EVENT64_T(&kn->kn_status),
581 result,
582 WAITQ_ALL_PRIORITIES);
583 } else {
584 /* should have seen use-wait if dropping with use refs */
585 assert((kn->kn_status & (KN_DROPPING|KN_STOLENDROP)) == 0);
586 }
587
588 } else if (kn->kn_status & KN_DROPPING) {
589 /* not the last ref but want to steal a drop if present */
590 if (steal_drop && ((kn->kn_status & KN_STOLENDROP) == 0)) {
591 kn->kn_status |= KN_STOLENDROP;
592
593 /* but we now have to wait to be the last ref */
594 kn->kn_status |= KN_USEWAIT;
595 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
596 CAST_EVENT64_T(&kn->kn_status),
597 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
598 kqunlock(kq);
599 thread_block(THREAD_CONTINUE_NULL);
600 kqlock(kq);
601 } else {
602 dropped = 1;
603 }
604 }
605
606 return (!dropped);
607 }
608
609 /*
610 * Convert a kq lock to a knote use reference
611 * (for the purpose of detaching AND vanishing it).
612 *
613 * If the knote is being dropped, we can't get
614 * a detach reference, so wait for the knote to
615 * finish dropping before returning.
616 *
617 * If the knote is being used for other purposes,
618 * we cannot detach it until those uses are done
619 * as well. Again, just wait for them to finish
620 * (caller will start over at lookup).
621 *
622 * - kq locked at entry
623 * - unlocked on exit
624 */
625 static int
626 kqlock2knotedetach(struct kqueue *kq, struct knote *kn)
627 {
628 if ((kn->kn_status & KN_DROPPING) || kn->kn_inuse) {
629 /* have to wait for dropper or current uses to go away */
630 kn->kn_status |= KN_USEWAIT;
631 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
632 CAST_EVENT64_T(&kn->kn_status),
633 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
634 kqunlock(kq);
635 thread_block(THREAD_CONTINUE_NULL);
636 return (0);
637 }
638 assert((kn->kn_status & KN_VANISHED) == 0);
639 assert(kn->kn_status & KN_ATTACHED);
640 kn->kn_status &= ~KN_ATTACHED;
641 kn->kn_status |= KN_VANISHED;
642 kn->kn_inuse++;
643 kqunlock(kq);
644 return (1);
645 }
646
647 /*
648 * Convert a kq lock to a knote drop reference.
649 *
650 * If the knote is in use, wait for the use count
651 * to subside. We first mark our intention to drop
652 * it - keeping other users from "piling on."
653 * If we are too late, we have to wait for the
654 * other drop to complete.
655 *
656 * - kq locked at entry
657 * - always unlocked on exit.
658 * - caller can't hold any locks that would prevent
659 * the other dropper from completing.
660 */
661 static int
662 kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
663 {
664 int oktodrop;
665 wait_result_t result;
666
667 oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0);
668 /* if another thread is attaching, they will become the dropping thread */
669 kn->kn_status |= KN_DROPPING;
670 knote_unsuppress(kn);
671 knote_dequeue(kn);
672 if (oktodrop) {
673 if (kn->kn_inuse == 0) {
674 kqunlock(kq);
675 return (oktodrop);
676 }
677 }
678 kn->kn_status |= KN_USEWAIT;
679 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
680 CAST_EVENT64_T(&kn->kn_status),
681 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
682 kqunlock(kq);
683 result = thread_block(THREAD_CONTINUE_NULL);
684 /* THREAD_RESTART == another thread stole the knote drop */
685 return (result == THREAD_AWAKENED);
686 }
687
688 #if 0
689 /*
690 * Release a knote use count reference.
691 */
692 static void
693 knote_put(struct knote *kn)
694 {
695 struct kqueue *kq = knote_get_kq(kn);
696
697 kqlock(kq);
698 if (--kn->kn_inuse == 0) {
699 if ((kn->kn_status & KN_USEWAIT) != 0) {
700 kn->kn_status &= ~KN_USEWAIT;
701 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
702 CAST_EVENT64_T(&kn->kn_status),
703 THREAD_AWAKENED,
704 WAITQ_ALL_PRIORITIES);
705 }
706 }
707 kqunlock(kq);
708 }
709 #endif
710
711 static int
712 filt_fileattach(struct knote *kn)
713 {
714 return (fo_kqfilter(kn->kn_fp, kn, vfs_context_current()));
715 }
716
717 #define f_flag f_fglob->fg_flag
718 #define f_msgcount f_fglob->fg_msgcount
719 #define f_cred f_fglob->fg_cred
720 #define f_ops f_fglob->fg_ops
721 #define f_offset f_fglob->fg_offset
722 #define f_data f_fglob->fg_data
723
724 static void
725 filt_kqdetach(struct knote *kn)
726 {
727 struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
728 struct kqueue *kq = &kqf->kqf_kqueue;
729
730 kqlock(kq);
731 KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
732 kqunlock(kq);
733 }
734
735 /*ARGSUSED*/
736 static int
737 filt_kqueue(struct knote *kn, __unused long hint)
738 {
739 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
740 int count;
741
742 count = kq->kq_count;
743 return (count > 0);
744 }
745
746 static int
747 filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev)
748 {
749 #pragma unused(kev)
750 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
751 int res;
752
753 kqlock(kq);
754 kn->kn_data = kq->kq_count;
755 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
756 kn->kn_udata = kev->udata;
757 res = (kn->kn_data > 0);
758
759 kqunlock(kq);
760
761 return res;
762 }
763
764 static int
765 filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
766 {
767 #pragma unused(data)
768 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
769 int res;
770
771 kqlock(kq);
772 kn->kn_data = kq->kq_count;
773 res = (kn->kn_data > 0);
774 if (res) {
775 *kev = kn->kn_kevent;
776 if (kn->kn_flags & EV_CLEAR)
777 kn->kn_data = 0;
778 }
779 kqunlock(kq);
780
781 return res;
782 }
783
784 static int
785 filt_procattach(struct knote *kn)
786 {
787 struct proc *p;
788
789 assert(PID_MAX < NOTE_PDATAMASK);
790
791 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
792 kn->kn_flags = EV_ERROR;
793 kn->kn_data = ENOTSUP;
794 return 0;
795 }
796
797 p = proc_find(kn->kn_id);
798 if (p == NULL) {
799 kn->kn_flags = EV_ERROR;
800 kn->kn_data = ESRCH;
801 return 0;
802 }
803
804 const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
805
806 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
807 do {
808 pid_t selfpid = proc_selfpid();
809
810 if (p->p_ppid == selfpid)
811 break; /* parent => ok */
812
813 if ((p->p_lflag & P_LTRACED) != 0 &&
814 (p->p_oppid == selfpid))
815 break; /* parent-in-waiting => ok */
816
817 proc_rele(p);
818 kn->kn_flags = EV_ERROR;
819 kn->kn_data = EACCES;
820 return 0;
821 } while (0);
822
823 proc_klist_lock();
824
825 kn->kn_ptr.p_proc = p; /* store the proc handle */
826
827 KNOTE_ATTACH(&p->p_klist, kn);
828
829 proc_klist_unlock();
830
831 proc_rele(p);
832
833 /*
834 * only captures edge-triggered events after this point
835 * so it can't already be fired.
836 */
837 return (0);
838 }
839
840
841 /*
842 * The knote may be attached to a different process, which may exit,
843 * leaving nothing for the knote to be attached to. In that case,
844 * the pointer to the process will have already been nulled out.
845 */
846 static void
847 filt_procdetach(struct knote *kn)
848 {
849 struct proc *p;
850
851 proc_klist_lock();
852
853 p = kn->kn_ptr.p_proc;
854 if (p != PROC_NULL) {
855 kn->kn_ptr.p_proc = PROC_NULL;
856 KNOTE_DETACH(&p->p_klist, kn);
857 }
858
859 proc_klist_unlock();
860 }
861
862 static int
863 filt_proc(struct knote *kn, long hint)
864 {
865 u_int event;
866
867 /* ALWAYS CALLED WITH proc_klist_lock */
868
869 /*
870 * Note: a lot of bits in hint may be obtained from the knote
871 * To free some of those bits, see <rdar://problem/12592988> Freeing up
872 * bits in hint for filt_proc
873 *
874 * mask off extra data
875 */
876 event = (u_int)hint & NOTE_PCTRLMASK;
877
878 /*
879 * termination lifecycle events can happen while a debugger
880 * has reparented a process, in which case notifications
881 * should be quashed except to the tracing parent. When
882 * the debugger reaps the child (either via wait4(2) or
883 * process exit), the child will be reparented to the original
884 * parent and these knotes re-fired.
885 */
886 if (event & NOTE_EXIT) {
887 if ((kn->kn_ptr.p_proc->p_oppid != 0)
888 && (knote_get_kq(kn)->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
889 /*
890 * This knote is not for the current ptrace(2) parent, ignore.
891 */
892 return 0;
893 }
894 }
895
896 /*
897 * if the user is interested in this event, record it.
898 */
899 if (kn->kn_sfflags & event)
900 kn->kn_fflags |= event;
901
902 #pragma clang diagnostic push
903 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
904 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
905 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
906 }
907 #pragma clang diagnostic pop
908
909
910 /*
911 * The kernel has a wrapper in place that returns the same data
912 * as is collected here, in kn_data. Any changes to how
913 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
914 * should also be reflected in the proc_pidnoteexit() wrapper.
915 */
916 if (event == NOTE_EXIT) {
917 kn->kn_data = 0;
918 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
919 kn->kn_fflags |= NOTE_EXITSTATUS;
920 kn->kn_data |= (hint & NOTE_PDATAMASK);
921 }
922 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
923 kn->kn_fflags |= NOTE_EXIT_DETAIL;
924 if ((kn->kn_ptr.p_proc->p_lflag &
925 P_LTERM_DECRYPTFAIL) != 0) {
926 kn->kn_data |= NOTE_EXIT_DECRYPTFAIL;
927 }
928 if ((kn->kn_ptr.p_proc->p_lflag &
929 P_LTERM_JETSAM) != 0) {
930 kn->kn_data |= NOTE_EXIT_MEMORY;
931 switch (kn->kn_ptr.p_proc->p_lflag & P_JETSAM_MASK) {
932 case P_JETSAM_VMPAGESHORTAGE:
933 kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
934 break;
935 case P_JETSAM_VMTHRASHING:
936 kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
937 break;
938 case P_JETSAM_FCTHRASHING:
939 kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
940 break;
941 case P_JETSAM_VNODE:
942 kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
943 break;
944 case P_JETSAM_HIWAT:
945 kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
946 break;
947 case P_JETSAM_PID:
948 kn->kn_data |= NOTE_EXIT_MEMORY_PID;
949 break;
950 case P_JETSAM_IDLEEXIT:
951 kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
952 break;
953 }
954 }
955 if ((kn->kn_ptr.p_proc->p_csflags &
956 CS_KILLED) != 0) {
957 kn->kn_data |= NOTE_EXIT_CSERROR;
958 }
959 }
960 }
961
962 /* if we have any matching state, activate the knote */
963 return (kn->kn_fflags != 0);
964 }
965
966 static int
967 filt_proctouch(struct knote *kn, struct kevent_internal_s *kev)
968 {
969 int res;
970
971 proc_klist_lock();
972
973 /* accept new filter flags and mask off output events no long interesting */
974 kn->kn_sfflags = kev->fflags;
975 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
976 kn->kn_udata = kev->udata;
977
978 /* restrict the current results to the (smaller?) set of new interest */
979 /*
980 * For compatibility with previous implementations, we leave kn_fflags
981 * as they were before.
982 */
983 //kn->kn_fflags &= kn->kn_sfflags;
984
985 res = (kn->kn_fflags != 0);
986
987 proc_klist_unlock();
988
989 return res;
990 }
991
992 static int
993 filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
994 {
995 #pragma unused(data)
996 int res;
997
998 proc_klist_lock();
999 res = (kn->kn_fflags != 0);
1000 if (res) {
1001 *kev = kn->kn_kevent;
1002 kn->kn_flags |= EV_CLEAR; /* automatically set */
1003 kn->kn_fflags = 0;
1004 kn->kn_data = 0;
1005 }
1006 proc_klist_unlock();
1007 return res;
1008 }
1009
1010 /*
1011 * filt_timervalidate - process data from user
1012 *
1013 * Converts to either interval or deadline format.
1014 *
1015 * The saved-data field in the knote contains the
1016 * time value. The saved filter-flags indicates
1017 * the unit of measurement.
1018 *
1019 * After validation, either the saved-data field
1020 * contains the interval in absolute time, or ext[0]
1021 * contains the expected deadline. If that deadline
1022 * is in the past, ext[0] is 0.
1023 *
1024 * Returns EINVAL for unrecognized units of time.
1025 *
1026 * Timer filter lock is held.
1027 *
1028 */
1029 static int
1030 filt_timervalidate(struct knote *kn)
1031 {
1032 uint64_t multiplier;
1033 uint64_t raw = 0;
1034
1035 switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS)) {
1036 case NOTE_SECONDS:
1037 multiplier = NSEC_PER_SEC;
1038 break;
1039 case NOTE_USECONDS:
1040 multiplier = NSEC_PER_USEC;
1041 break;
1042 case NOTE_NSECONDS:
1043 multiplier = 1;
1044 break;
1045 case 0: /* milliseconds (default) */
1046 multiplier = NSEC_PER_SEC / 1000;
1047 break;
1048 default:
1049 return (EINVAL);
1050 }
1051
1052 /* transform the slop delta(leeway) in kn_ext[1] if passed to same time scale */
1053 if(kn->kn_sfflags & NOTE_LEEWAY){
1054 nanoseconds_to_absolutetime((uint64_t)kn->kn_ext[1] * multiplier, &raw);
1055 kn->kn_ext[1] = raw;
1056 }
1057
1058 nanoseconds_to_absolutetime((uint64_t)kn->kn_sdata * multiplier, &raw);
1059
1060 kn->kn_ext[0] = 0;
1061 kn->kn_sdata = 0;
1062
1063 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
1064 clock_sec_t seconds;
1065 clock_nsec_t nanoseconds;
1066 uint64_t now;
1067
1068 clock_get_calendar_nanotime(&seconds, &nanoseconds);
1069 nanoseconds_to_absolutetime((uint64_t)seconds * NSEC_PER_SEC +
1070 nanoseconds, &now);
1071
1072 /* if time is in the future */
1073 if (now < raw) {
1074 raw -= now;
1075
1076 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1077 clock_continuoustime_interval_to_deadline(raw,
1078 &kn->kn_ext[0]);
1079 } else {
1080 clock_absolutetime_interval_to_deadline(raw,
1081 &kn->kn_ext[0]);
1082 }
1083 }
1084 } else {
1085 kn->kn_sdata = raw;
1086 }
1087
1088 return (0);
1089 }
1090
1091 /*
1092 * filt_timerupdate - compute the next deadline
1093 *
1094 * Repeating timers store their interval in kn_sdata. Absolute
1095 * timers have already calculated the deadline, stored in ext[0].
1096 *
1097 * On return, the next deadline (or zero if no deadline is needed)
1098 * is stored in kn_ext[0].
1099 *
1100 * Timer filter lock is held.
1101 */
1102 static void
1103 filt_timerupdate(struct knote *kn, int num_fired)
1104 {
1105 assert(num_fired > 0);
1106
1107 /* if there's no interval, deadline is just in kn_ext[0] */
1108 if (kn->kn_sdata == 0)
1109 return;
1110
1111 /* if timer hasn't fired before, fire in interval nsecs */
1112 if (kn->kn_ext[0] == 0) {
1113 assert(num_fired == 1);
1114 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1115 clock_continuoustime_interval_to_deadline(kn->kn_sdata,
1116 &kn->kn_ext[0]);
1117 } else {
1118 clock_absolutetime_interval_to_deadline(kn->kn_sdata,
1119 &kn->kn_ext[0]);
1120 }
1121 } else {
1122 /*
1123 * If timer has fired before, schedule the next pop
1124 * relative to the last intended deadline.
1125 *
1126 * We could check for whether the deadline has expired,
1127 * but the thread call layer can handle that.
1128 *
1129 * Go forward an additional number of periods, in the case the
1130 * timer fired multiple times while the system was asleep.
1131 */
1132 kn->kn_ext[0] += (kn->kn_sdata * num_fired);
1133 }
1134 }
1135
1136 /*
1137 * filt_timerexpire - the timer callout routine
1138 *
1139 * Just propagate the timer event into the knote
1140 * filter routine (by going through the knote
1141 * synchronization point). Pass a hint to
1142 * indicate this is a real event, not just a
1143 * query from above.
1144 */
1145 static void
1146 filt_timerexpire(void *knx, __unused void *spare)
1147 {
1148 struct klist timer_list;
1149 struct knote *kn = knx;
1150
1151 filt_timerlock();
1152
1153 kn->kn_hookid &= ~TIMER_RUNNING;
1154
1155 /* no "object" for timers, so fake a list */
1156 SLIST_INIT(&timer_list);
1157 SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext);
1158 KNOTE(&timer_list, 1);
1159
1160 /* if someone is waiting for timer to pop */
1161 if (kn->kn_hookid & TIMER_CANCELWAIT) {
1162 struct kqueue *kq = knote_get_kq(kn);
1163 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
1164 CAST_EVENT64_T(&kn->kn_hook),
1165 THREAD_AWAKENED,
1166 WAITQ_ALL_PRIORITIES);
1167 }
1168
1169 filt_timerunlock();
1170 }
1171
1172 /*
1173 * Cancel a running timer (or wait for the pop).
1174 * Timer filter lock is held.
1175 */
1176 static void
1177 filt_timercancel(struct knote *kn)
1178 {
1179 struct kqueue *kq = knote_get_kq(kn);
1180 thread_call_t callout = kn->kn_hook;
1181 boolean_t cancelled;
1182
1183 if (kn->kn_hookid & TIMER_RUNNING) {
1184 /* cancel the callout if we can */
1185 cancelled = thread_call_cancel(callout);
1186 if (cancelled) {
1187 kn->kn_hookid &= ~TIMER_RUNNING;
1188 } else {
1189 /* we have to wait for the expire routine. */
1190 kn->kn_hookid |= TIMER_CANCELWAIT;
1191 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
1192 CAST_EVENT64_T(&kn->kn_hook),
1193 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
1194 filt_timerunlock();
1195 thread_block(THREAD_CONTINUE_NULL);
1196 filt_timerlock();
1197 assert((kn->kn_hookid & TIMER_RUNNING) == 0);
1198 }
1199 }
1200 }
1201
1202 /*
1203 * Allocate a thread call for the knote's lifetime, and kick off the timer.
1204 */
1205 static int
1206 filt_timerattach(struct knote *kn)
1207 {
1208 thread_call_t callout;
1209 int error;
1210 int res;
1211
1212 callout = thread_call_allocate(filt_timerexpire, kn);
1213 if (NULL == callout) {
1214 kn->kn_flags = EV_ERROR;
1215 kn->kn_data = ENOMEM;
1216 return 0;
1217 }
1218
1219 filt_timerlock();
1220 error = filt_timervalidate(kn);
1221 if (error != 0) {
1222 filt_timerunlock();
1223 thread_call_free(callout);
1224 kn->kn_flags = EV_ERROR;
1225 kn->kn_data = error;
1226 return 0;
1227 }
1228
1229 kn->kn_hook = (void*)callout;
1230 kn->kn_hookid = 0;
1231
1232 /* absolute=EV_ONESHOT */
1233 if (kn->kn_sfflags & NOTE_ABSOLUTE)
1234 kn->kn_flags |= EV_ONESHOT;
1235
1236 filt_timerupdate(kn, 1);
1237 if (kn->kn_ext[0]) {
1238 kn->kn_flags |= EV_CLEAR;
1239 unsigned int timer_flags = 0;
1240 if (kn->kn_sfflags & NOTE_CRITICAL)
1241 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1242 else if (kn->kn_sfflags & NOTE_BACKGROUND)
1243 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1244 else
1245 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1246
1247 if (kn->kn_sfflags & NOTE_LEEWAY)
1248 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1249 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
1250 timer_flags |= THREAD_CALL_CONTINUOUS;
1251
1252 thread_call_enter_delayed_with_leeway(callout, NULL,
1253 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
1254
1255 kn->kn_hookid |= TIMER_RUNNING;
1256 } else {
1257 /* fake immediate */
1258 kn->kn_data = 1;
1259 }
1260
1261 res = (kn->kn_data > 0);
1262
1263 filt_timerunlock();
1264
1265 return res;
1266 }
1267
1268 /*
1269 * Shut down the timer if it's running, and free the callout.
1270 */
1271 static void
1272 filt_timerdetach(struct knote *kn)
1273 {
1274 thread_call_t callout;
1275
1276 filt_timerlock();
1277
1278 callout = (thread_call_t)kn->kn_hook;
1279 filt_timercancel(kn);
1280
1281 filt_timerunlock();
1282
1283 thread_call_free(callout);
1284 }
1285
1286
1287 static int filt_timer_num_fired(struct knote *kn)
1288 {
1289 /* by default we fire a timer once */
1290 int num_fired = 1;
1291
1292 /*
1293 * When the time base is mach_continuous_time, we have to calculate
1294 * the number of times the timer fired while we were asleep.
1295 */
1296 if ((kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) &&
1297 (kn->kn_sdata != 0) &&
1298 (kn->kn_ext[0] != 0))
1299 {
1300 const uint64_t now = mach_continuous_time();
1301 // time for timer to fire (right now) is kn_ext[0]
1302 // kn_sdata is period for timer to fire
1303 assert(now >= kn->kn_ext[0]);
1304 assert(kn->kn_sdata > 0);
1305
1306 const uint64_t overrun_ticks = now - kn->kn_ext[0];
1307 const uint64_t kn_sdata = kn->kn_sdata;
1308
1309 if (overrun_ticks < kn_sdata) {
1310 num_fired = 1;
1311 } else if (overrun_ticks < (kn_sdata << 1)) {
1312 num_fired = 2;
1313 } else {
1314 num_fired = (overrun_ticks / kn_sdata) + 1;
1315 }
1316 }
1317
1318 return num_fired;
1319 }
1320
1321 /*
1322 * filt_timer - post events to a timer knote
1323 *
1324 * Count the timer fire and re-arm as requested.
1325 * This always crosses the threshold of interest,
1326 * so always return an indication that the knote
1327 * should be activated (if not already).
1328 */
1329 static int
1330 filt_timer(
1331 struct knote *kn,
1332 long hint)
1333 {
1334 #pragma unused(hint)
1335
1336 /* real timer pop -- timer lock held by filt_timerexpire */
1337 int num_fired = filt_timer_num_fired(kn);
1338 kn->kn_data += num_fired;
1339
1340 if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) &&
1341 ((kn->kn_flags & EV_ONESHOT) == 0)) {
1342 /* evaluate next time to fire */
1343 filt_timerupdate(kn, num_fired);
1344
1345 if (kn->kn_ext[0]) {
1346 unsigned int timer_flags = 0;
1347
1348 /* keep the callout and re-arm */
1349 if (kn->kn_sfflags & NOTE_CRITICAL)
1350 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1351 else if (kn->kn_sfflags & NOTE_BACKGROUND)
1352 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1353 else
1354 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1355
1356 if (kn->kn_sfflags & NOTE_LEEWAY)
1357 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1358
1359 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
1360 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
1361
1362 kn->kn_hookid |= TIMER_RUNNING;
1363 }
1364 }
1365 return (1);
1366 }
1367
1368
1369
1370 /*
1371 * filt_timertouch - update timer knote with new user input
1372 *
1373 * Cancel and restart the timer based on new user data. When
1374 * the user picks up a knote, clear the count of how many timer
1375 * pops have gone off (in kn_data).
1376 */
1377 static int
1378 filt_timertouch(
1379 struct knote *kn,
1380 struct kevent_internal_s *kev)
1381 {
1382 int error;
1383 int res;
1384
1385 filt_timerlock();
1386
1387 /* cancel current call */
1388 filt_timercancel(kn);
1389
1390 /* capture the new values used to compute deadline */
1391 kn->kn_sdata = kev->data;
1392 kn->kn_sfflags = kev->fflags;
1393 kn->kn_ext[0] = kev->ext[0];
1394 kn->kn_ext[1] = kev->ext[1];
1395
1396 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
1397 kn->kn_udata = kev->udata;
1398
1399 /* recalculate deadline */
1400 error = filt_timervalidate(kn);
1401 if (error) {
1402 /* no way to report error, so mark it in the knote */
1403 filt_timerunlock();
1404 kn->kn_flags |= EV_ERROR;
1405 kn->kn_data = error;
1406 return 1;
1407 }
1408
1409 /* start timer if necessary */
1410 filt_timerupdate(kn, 1);
1411
1412 if (kn->kn_ext[0]) {
1413 unsigned int timer_flags = 0;
1414 if (kn->kn_sfflags & NOTE_CRITICAL)
1415 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1416 else if (kn->kn_sfflags & NOTE_BACKGROUND)
1417 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1418 else
1419 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1420
1421 if (kn->kn_sfflags & NOTE_LEEWAY)
1422 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1423
1424 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
1425 kn->kn_ext[0], kn->kn_ext[1], timer_flags);
1426
1427 kn->kn_hookid |= TIMER_RUNNING;
1428 } else {
1429 /* pretend the timer has fired */
1430 kn->kn_data = 1;
1431 }
1432
1433 /* capture if already fired */
1434 res = (kn->kn_data > 0);
1435
1436 filt_timerunlock();
1437
1438 return res;
1439 }
1440
1441 /*
1442 * filt_timerprocess - query state of knote and snapshot event data
1443 *
1444 * Determine if the timer has fired in the past, snapshot the state
1445 * of the kevent for returning to user-space, and clear pending event
1446 * counters for the next time.
1447 */
1448 static int
1449 filt_timerprocess(
1450 struct knote *kn,
1451 __unused struct filt_process_s *data,
1452 struct kevent_internal_s *kev)
1453 {
1454 filt_timerlock();
1455
1456 /* user-query */
1457 if (kn->kn_data == 0) {
1458 filt_timerunlock();
1459 return 0;
1460 }
1461
1462 /*
1463 * Copy out the interesting kevent state,
1464 * but don't leak out the raw time calculations.
1465 */
1466 *kev = kn->kn_kevent;
1467 kev->ext[0] = 0;
1468 /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
1469
1470 /*
1471 * reset the timer pop count in kn_data
1472 * and (optionally) clear the fflags.
1473 */
1474 kn->kn_data = 0;
1475 if (kn->kn_flags & EV_CLEAR)
1476 kn->kn_fflags = 0;
1477
1478 filt_timerunlock();
1479 return 1;
1480 }
1481
1482 static void
1483 filt_timerlock(void)
1484 {
1485 lck_mtx_lock(&_filt_timerlock);
1486 }
1487
1488 static void
1489 filt_timerunlock(void)
1490 {
1491 lck_mtx_unlock(&_filt_timerlock);
1492 }
1493
1494 static void
1495 filt_userlock(void)
1496 {
1497 lck_spin_lock(&_filt_userlock);
1498 }
1499
1500 static void
1501 filt_userunlock(void)
1502 {
1503 lck_spin_unlock(&_filt_userlock);
1504 }
1505
1506 static int
1507 filt_userattach(struct knote *kn)
1508 {
1509 /* EVFILT_USER knotes are not attached to anything in the kernel */
1510 /* Cant discover this knote until after attach - so no lock needed */
1511 kn->kn_hook = NULL;
1512 if (kn->kn_fflags & NOTE_TRIGGER) {
1513 kn->kn_hookid = 1;
1514 } else {
1515 kn->kn_hookid = 0;
1516 }
1517 return (kn->kn_hookid);
1518 }
1519
1520 static void
1521 filt_userdetach(__unused struct knote *kn)
1522 {
1523 /* EVFILT_USER knotes are not attached to anything in the kernel */
1524 }
1525
1526 static int
1527 filt_user(
1528 __unused struct knote *kn,
1529 __unused long hint)
1530 {
1531 panic("filt_user");
1532 return 0;
1533 }
1534
1535 static int
1536 filt_usertouch(
1537 struct knote *kn,
1538 struct kevent_internal_s *kev)
1539 {
1540 uint32_t ffctrl;
1541 int fflags;
1542 int active;
1543
1544 filt_userlock();
1545
1546 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1547 fflags = kev->fflags & NOTE_FFLAGSMASK;
1548 switch (ffctrl) {
1549 case NOTE_FFNOP:
1550 break;
1551 case NOTE_FFAND:
1552 kn->kn_sfflags &= fflags;
1553 break;
1554 case NOTE_FFOR:
1555 kn->kn_sfflags |= fflags;
1556 break;
1557 case NOTE_FFCOPY:
1558 kn->kn_sfflags = fflags;
1559 break;
1560 }
1561 kn->kn_sdata = kev->data;
1562
1563 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
1564 kn->kn_udata = kev->udata;
1565
1566 if (kev->fflags & NOTE_TRIGGER) {
1567 kn->kn_hookid = 1;
1568 }
1569 active = kn->kn_hookid;
1570
1571 filt_userunlock();
1572
1573 return (active);
1574 }
1575
1576 static int
1577 filt_userprocess(
1578 struct knote *kn,
1579 __unused struct filt_process_s *data,
1580 struct kevent_internal_s *kev)
1581 {
1582 filt_userlock();
1583
1584 if (kn->kn_hookid == 0) {
1585 filt_userunlock();
1586 return 0;
1587 }
1588
1589 *kev = kn->kn_kevent;
1590 kev->fflags = (volatile UInt32)kn->kn_sfflags;
1591 kev->data = kn->kn_sdata;
1592 if (kn->kn_flags & EV_CLEAR) {
1593 kn->kn_hookid = 0;
1594 kn->kn_data = 0;
1595 kn->kn_fflags = 0;
1596 }
1597 filt_userunlock();
1598
1599 return 1;
1600 }
1601
1602 /*
1603 * JMM - placeholder for not-yet-implemented filters
1604 */
1605 static int
1606 filt_badattach(__unused struct knote *kn)
1607 {
1608 kn->kn_flags |= EV_ERROR;
1609 kn->kn_data = ENOTSUP;
1610 return 0;
1611 }
1612
1613 struct kqueue *
1614 kqueue_alloc(struct proc *p, unsigned int flags)
1615 {
1616 struct filedesc *fdp = p->p_fd;
1617 struct kqueue *kq = NULL;
1618 int policy;
1619 void *hook;
1620 uint64_t kq_addr_offset;
1621
1622 if (flags & KEVENT_FLAG_WORKQ) {
1623 struct kqworkq *kqwq;
1624 int i;
1625
1626 kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
1627 if (kqwq == NULL)
1628 return NULL;
1629
1630 kq = &kqwq->kqwq_kqueue;
1631 bzero(kqwq, sizeof (struct kqworkq));
1632
1633 kqwq->kqwq_state = KQ_WORKQ;
1634
1635 for (i = 0; i < KQWQ_NBUCKETS; i++) {
1636 TAILQ_INIT(&kq->kq_queue[i]);
1637 }
1638 for (i = 0; i < KQWQ_NQOS; i++) {
1639 TAILQ_INIT(&kqwq->kqwq_request[i].kqr_suppressed);
1640 }
1641
1642 lck_spin_init(&kqwq->kqwq_reqlock, kq_lck_grp, kq_lck_attr);
1643 policy = SYNC_POLICY_FIFO;
1644 hook = (void *)kqwq;
1645
1646 } else {
1647 struct kqfile *kqf;
1648
1649 kqf = (struct kqfile *)zalloc(kqfile_zone);
1650 if (kqf == NULL)
1651 return NULL;
1652
1653 kq = &kqf->kqf_kqueue;
1654 bzero(kqf, sizeof (struct kqfile));
1655 TAILQ_INIT(&kq->kq_queue[0]);
1656 TAILQ_INIT(&kqf->kqf_suppressed);
1657
1658 policy = SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST;
1659 hook = NULL;
1660
1661 }
1662
1663 waitq_set_init(&kq->kq_wqs, policy, NULL, hook);
1664 lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
1665 kq->kq_p = p;
1666
1667 if (fdp->fd_knlistsize < 0) {
1668 proc_fdlock(p);
1669 if (fdp->fd_knlistsize < 0)
1670 fdp->fd_knlistsize = 0; /* this process has had a kq */
1671 proc_fdunlock(p);
1672 }
1673
1674 kq_addr_offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
1675 /* Assert that the address can be pointer compacted for use with knote */
1676 assert(kq_addr_offset < (uint64_t)(1ull << KNOTE_KQ_BITSIZE));
1677 return (kq);
1678 }
1679
1680 /*
1681 * kqueue_dealloc - detach all knotes from a kqueue and free it
1682 *
1683 * We walk each list looking for knotes referencing this
1684 * this kqueue. If we find one, we try to drop it. But
1685 * if we fail to get a drop reference, that will wait
1686 * until it is dropped. So, we can just restart again
1687 * safe in the assumption that the list will eventually
1688 * not contain any more references to this kqueue (either
1689 * we dropped them all, or someone else did).
1690 *
1691 * Assumes no new events are being added to the kqueue.
1692 * Nothing locked on entry or exit.
1693 */
1694 void
1695 kqueue_dealloc(struct kqueue *kq)
1696 {
1697 struct proc *p;
1698 struct filedesc *fdp;
1699 struct knote *kn;
1700 int i;
1701
1702 if (kq == NULL)
1703 return;
1704
1705 p = kq->kq_p;
1706 fdp = p->p_fd;
1707
1708 proc_fdlock(p);
1709 for (i = 0; i < fdp->fd_knlistsize; i++) {
1710 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
1711 while (kn != NULL) {
1712 if (kq == knote_get_kq(kn)) {
1713 kqlock(kq);
1714 proc_fdunlock(p);
1715 /* drop it ourselves or wait */
1716 if (kqlock2knotedrop(kq, kn)) {
1717 knote_drop(kn, p);
1718 }
1719 proc_fdlock(p);
1720 /* start over at beginning of list */
1721 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
1722 continue;
1723 }
1724 kn = SLIST_NEXT(kn, kn_link);
1725 }
1726 }
1727 if (fdp->fd_knhashmask != 0) {
1728 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
1729 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
1730 while (kn != NULL) {
1731 if (kq == knote_get_kq(kn)) {
1732 kqlock(kq);
1733 proc_fdunlock(p);
1734 /* drop it ourselves or wait */
1735 if (kqlock2knotedrop(kq, kn)) {
1736 knote_drop(kn, p);
1737 }
1738 proc_fdlock(p);
1739 /* start over at beginning of list */
1740 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
1741 continue;
1742 }
1743 kn = SLIST_NEXT(kn, kn_link);
1744 }
1745 }
1746 }
1747 proc_fdunlock(p);
1748
1749 /*
1750 * waitq_set_deinit() remove the KQ's waitq set from
1751 * any select sets to which it may belong.
1752 */
1753 waitq_set_deinit(&kq->kq_wqs);
1754 lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
1755
1756 if (kq->kq_state & KQ_WORKQ) {
1757 struct kqworkq *kqwq = (struct kqworkq *)kq;
1758
1759 lck_spin_destroy(&kqwq->kqwq_reqlock, kq_lck_grp);
1760 zfree(kqworkq_zone, kqwq);
1761 } else {
1762 struct kqfile *kqf = (struct kqfile *)kq;
1763
1764 zfree(kqfile_zone, kqf);
1765 }
1766 }
1767
1768 int
1769 kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
1770 {
1771 struct kqueue *kq;
1772 struct fileproc *fp;
1773 int fd, error;
1774
1775 error = falloc_withalloc(p,
1776 &fp, &fd, vfs_context_current(), fp_zalloc, cra);
1777 if (error) {
1778 return (error);
1779 }
1780
1781 kq = kqueue_alloc(p, 0);
1782 if (kq == NULL) {
1783 fp_free(p, fd, fp);
1784 return (ENOMEM);
1785 }
1786
1787 fp->f_flag = FREAD | FWRITE;
1788 fp->f_ops = &kqueueops;
1789 fp->f_data = kq;
1790
1791 proc_fdlock(p);
1792 *fdflags(p, fd) |= UF_EXCLOSE;
1793 procfdtbl_releasefd(p, fd, NULL);
1794 fp_drop(p, fd, fp, 1);
1795 proc_fdunlock(p);
1796
1797 *retval = fd;
1798 return (error);
1799 }
1800
1801 int
1802 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
1803 {
1804 return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
1805 }
1806
1807 static int
1808 kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p,
1809 unsigned int flags)
1810 {
1811 int advance;
1812 int error;
1813
1814 if (flags & KEVENT_FLAG_LEGACY32) {
1815 bzero(kevp, sizeof (*kevp));
1816
1817 if (IS_64BIT_PROCESS(p)) {
1818 struct user64_kevent kev64;
1819
1820 advance = sizeof (kev64);
1821 error = copyin(*addrp, (caddr_t)&kev64, advance);
1822 if (error)
1823 return (error);
1824 kevp->ident = kev64.ident;
1825 kevp->filter = kev64.filter;
1826 kevp->flags = kev64.flags;
1827 kevp->udata = kev64.udata;
1828 kevp->fflags = kev64.fflags;
1829 kevp->data = kev64.data;
1830 } else {
1831 struct user32_kevent kev32;
1832
1833 advance = sizeof (kev32);
1834 error = copyin(*addrp, (caddr_t)&kev32, advance);
1835 if (error)
1836 return (error);
1837 kevp->ident = (uintptr_t)kev32.ident;
1838 kevp->filter = kev32.filter;
1839 kevp->flags = kev32.flags;
1840 kevp->udata = CAST_USER_ADDR_T(kev32.udata);
1841 kevp->fflags = kev32.fflags;
1842 kevp->data = (intptr_t)kev32.data;
1843 }
1844 } else if (flags & KEVENT_FLAG_LEGACY64) {
1845 struct kevent64_s kev64;
1846
1847 bzero(kevp, sizeof (*kevp));
1848
1849 advance = sizeof (struct kevent64_s);
1850 error = copyin(*addrp, (caddr_t)&kev64, advance);
1851 if (error)
1852 return(error);
1853 kevp->ident = kev64.ident;
1854 kevp->filter = kev64.filter;
1855 kevp->flags = kev64.flags;
1856 kevp->udata = kev64.udata;
1857 kevp->fflags = kev64.fflags;
1858 kevp->data = kev64.data;
1859 kevp->ext[0] = kev64.ext[0];
1860 kevp->ext[1] = kev64.ext[1];
1861
1862 } else {
1863 struct kevent_qos_s kevqos;
1864
1865 bzero(kevp, sizeof (*kevp));
1866
1867 advance = sizeof (struct kevent_qos_s);
1868 error = copyin(*addrp, (caddr_t)&kevqos, advance);
1869 if (error)
1870 return error;
1871 kevp->ident = kevqos.ident;
1872 kevp->filter = kevqos.filter;
1873 kevp->flags = kevqos.flags;
1874 kevp->qos = kevqos.qos;
1875 // kevp->xflags = kevqos.xflags;
1876 kevp->udata = kevqos.udata;
1877 kevp->fflags = kevqos.fflags;
1878 kevp->data = kevqos.data;
1879 kevp->ext[0] = kevqos.ext[0];
1880 kevp->ext[1] = kevqos.ext[1];
1881 kevp->ext[2] = kevqos.ext[2];
1882 kevp->ext[3] = kevqos.ext[3];
1883 }
1884 if (!error)
1885 *addrp += advance;
1886 return (error);
1887 }
1888
1889 static int
1890 kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *p,
1891 unsigned int flags)
1892 {
1893 user_addr_t addr = *addrp;
1894 int advance;
1895 int error;
1896
1897 /*
1898 * fully initialize the differnt output event structure
1899 * types from the internal kevent (and some universal
1900 * defaults for fields not represented in the internal
1901 * form).
1902 */
1903 if (flags & KEVENT_FLAG_LEGACY32) {
1904 assert((flags & KEVENT_FLAG_STACK_EVENTS) == 0);
1905
1906 if (IS_64BIT_PROCESS(p)) {
1907 struct user64_kevent kev64;
1908
1909 advance = sizeof (kev64);
1910 bzero(&kev64, advance);
1911
1912 /*
1913 * deal with the special case of a user-supplied
1914 * value of (uintptr_t)-1.
1915 */
1916 kev64.ident = (kevp->ident == (uintptr_t)-1) ?
1917 (uint64_t)-1LL : (uint64_t)kevp->ident;
1918
1919 kev64.filter = kevp->filter;
1920 kev64.flags = kevp->flags;
1921 kev64.fflags = kevp->fflags;
1922 kev64.data = (int64_t) kevp->data;
1923 kev64.udata = kevp->udata;
1924 error = copyout((caddr_t)&kev64, addr, advance);
1925 } else {
1926 struct user32_kevent kev32;
1927
1928 advance = sizeof (kev32);
1929 bzero(&kev32, advance);
1930 kev32.ident = (uint32_t)kevp->ident;
1931 kev32.filter = kevp->filter;
1932 kev32.flags = kevp->flags;
1933 kev32.fflags = kevp->fflags;
1934 kev32.data = (int32_t)kevp->data;
1935 kev32.udata = kevp->udata;
1936 error = copyout((caddr_t)&kev32, addr, advance);
1937 }
1938 } else if (flags & KEVENT_FLAG_LEGACY64) {
1939 struct kevent64_s kev64;
1940
1941 advance = sizeof (struct kevent64_s);
1942 if (flags & KEVENT_FLAG_STACK_EVENTS) {
1943 addr -= advance;
1944 }
1945 bzero(&kev64, advance);
1946 kev64.ident = kevp->ident;
1947 kev64.filter = kevp->filter;
1948 kev64.flags = kevp->flags;
1949 kev64.fflags = kevp->fflags;
1950 kev64.data = (int64_t) kevp->data;
1951 kev64.udata = kevp->udata;
1952 kev64.ext[0] = kevp->ext[0];
1953 kev64.ext[1] = kevp->ext[1];
1954 error = copyout((caddr_t)&kev64, addr, advance);
1955 } else {
1956 struct kevent_qos_s kevqos;
1957
1958 advance = sizeof (struct kevent_qos_s);
1959 if (flags & KEVENT_FLAG_STACK_EVENTS) {
1960 addr -= advance;
1961 }
1962 bzero(&kevqos, advance);
1963 kevqos.ident = kevp->ident;
1964 kevqos.filter = kevp->filter;
1965 kevqos.flags = kevp->flags;
1966 kevqos.qos = kevp->qos;
1967 kevqos.udata = kevp->udata;
1968 kevqos.fflags = kevp->fflags;
1969 kevqos.xflags = 0;
1970 kevqos.data = (int64_t) kevp->data;
1971 kevqos.ext[0] = kevp->ext[0];
1972 kevqos.ext[1] = kevp->ext[1];
1973 kevqos.ext[2] = kevp->ext[2];
1974 kevqos.ext[3] = kevp->ext[3];
1975 error = copyout((caddr_t)&kevqos, addr, advance);
1976 }
1977 if (!error) {
1978 if (flags & KEVENT_FLAG_STACK_EVENTS)
1979 *addrp = addr;
1980 else
1981 *addrp = addr + advance;
1982 }
1983 return (error);
1984 }
1985
1986 static int
1987 kevent_get_data_size(struct proc *p,
1988 uint64_t data_available,
1989 unsigned int flags,
1990 user_size_t *residp)
1991 {
1992 user_size_t resid;
1993 int error = 0;
1994
1995 if (data_available != USER_ADDR_NULL) {
1996 if (flags & KEVENT_FLAG_KERNEL) {
1997 resid = *(user_size_t *)(uintptr_t)data_available;
1998 } else if (IS_64BIT_PROCESS(p)) {
1999 user64_size_t usize;
2000 error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
2001 resid = (user_size_t)usize;
2002 } else {
2003 user32_size_t usize;
2004 error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
2005 resid = (user_size_t)usize;
2006 }
2007 if (error)
2008 return(error);
2009 } else {
2010 resid = 0;
2011 }
2012 *residp = resid;
2013 return 0;
2014 }
2015
2016 static int
2017 kevent_put_data_size(struct proc *p,
2018 uint64_t data_available,
2019 unsigned int flags,
2020 user_size_t resid)
2021 {
2022 int error = 0;
2023
2024 if (data_available) {
2025 if (flags & KEVENT_FLAG_KERNEL) {
2026 *(user_size_t *)(uintptr_t)data_available = resid;
2027 } else if (IS_64BIT_PROCESS(p)) {
2028 user64_size_t usize = (user64_size_t)resid;
2029 error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
2030 } else {
2031 user32_size_t usize = (user32_size_t)resid;
2032 error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
2033 }
2034 }
2035 return error;
2036 }
2037
2038 /*
2039 * kevent_continue - continue a kevent syscall after blocking
2040 *
2041 * assume we inherit a use count on the kq fileglob.
2042 */
2043
2044 __attribute__((noreturn))
2045 static void
2046 kevent_continue(__unused struct kqueue *kq, void *data, int error)
2047 {
2048 struct _kevent *cont_args;
2049 struct fileproc *fp;
2050 uint64_t data_available;
2051 user_size_t data_size;
2052 user_size_t data_resid;
2053 unsigned int flags;
2054 int32_t *retval;
2055 int noutputs;
2056 int fd;
2057 struct proc *p = current_proc();
2058
2059 cont_args = (struct _kevent *)data;
2060 data_available = cont_args->data_available;
2061 flags = cont_args->process_data.fp_flags;
2062 data_size = cont_args->process_data.fp_data_size;
2063 data_resid = cont_args->process_data.fp_data_resid;
2064 noutputs = cont_args->eventout;
2065 retval = cont_args->retval;
2066 fd = cont_args->fd;
2067 fp = cont_args->fp;
2068
2069 if (fp != NULL)
2070 fp_drop(p, fd, fp, 0);
2071
2072 /* don't abandon other output just because of residual copyout failures */
2073 if (error == 0 && data_available && data_resid != data_size) {
2074 (void)kevent_put_data_size(p, data_available, flags, data_resid);
2075 }
2076
2077 /* don't restart after signals... */
2078 if (error == ERESTART)
2079 error = EINTR;
2080 else if (error == EWOULDBLOCK)
2081 error = 0;
2082 if (error == 0)
2083 *retval = noutputs;
2084 unix_syscall_return(error);
2085 }
2086
2087 /*
2088 * kevent - [syscall] register and wait for kernel events
2089 *
2090 */
2091 int
2092 kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
2093 {
2094 unsigned int flags = KEVENT_FLAG_LEGACY32;
2095
2096 return kevent_internal(p,
2097 uap->fd,
2098 uap->changelist, uap->nchanges,
2099 uap->eventlist, uap->nevents,
2100 0ULL, 0ULL,
2101 flags,
2102 uap->timeout,
2103 kevent_continue,
2104 retval);
2105 }
2106
2107 int
2108 kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
2109 {
2110 unsigned int flags;
2111
2112 /* restrict to user flags and set legacy64 */
2113 flags = uap->flags & KEVENT_FLAG_USER;
2114 flags |= KEVENT_FLAG_LEGACY64;
2115
2116 return kevent_internal(p,
2117 uap->fd,
2118 uap->changelist, uap->nchanges,
2119 uap->eventlist, uap->nevents,
2120 0ULL, 0ULL,
2121 flags,
2122 uap->timeout,
2123 kevent_continue,
2124 retval);
2125 }
2126
2127 int
2128 kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
2129 {
2130 /* restrict to user flags */
2131 uap->flags &= KEVENT_FLAG_USER;
2132
2133 return kevent_internal(p,
2134 uap->fd,
2135 uap->changelist, uap->nchanges,
2136 uap->eventlist, uap->nevents,
2137 uap->data_out, (uint64_t)uap->data_available,
2138 uap->flags,
2139 0ULL,
2140 kevent_continue,
2141 retval);
2142 }
2143
2144 int
2145 kevent_qos_internal(struct proc *p, int fd,
2146 user_addr_t changelist, int nchanges,
2147 user_addr_t eventlist, int nevents,
2148 user_addr_t data_out, user_size_t *data_available,
2149 unsigned int flags,
2150 int32_t *retval)
2151 {
2152 return kevent_internal(p,
2153 fd,
2154 changelist, nchanges,
2155 eventlist, nevents,
2156 data_out, (uint64_t)data_available,
2157 (flags | KEVENT_FLAG_KERNEL),
2158 0ULL,
2159 NULL,
2160 retval);
2161 }
2162
2163 static int
2164 kevent_get_timeout(struct proc *p,
2165 user_addr_t utimeout,
2166 unsigned int flags,
2167 struct timeval *atvp)
2168 {
2169 struct timeval atv;
2170 int error = 0;
2171
2172 if (flags & KEVENT_FLAG_IMMEDIATE) {
2173 getmicrouptime(&atv);
2174 } else if (utimeout != USER_ADDR_NULL) {
2175 struct timeval rtv;
2176 if (flags & KEVENT_FLAG_KERNEL) {
2177 struct timespec *tsp = (struct timespec *)utimeout;
2178 TIMESPEC_TO_TIMEVAL(&rtv, tsp);
2179 } else if (IS_64BIT_PROCESS(p)) {
2180 struct user64_timespec ts;
2181 error = copyin(utimeout, &ts, sizeof(ts));
2182 if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
2183 error = EINVAL;
2184 else
2185 TIMESPEC_TO_TIMEVAL(&rtv, &ts);
2186 } else {
2187 struct user32_timespec ts;
2188 error = copyin(utimeout, &ts, sizeof(ts));
2189 TIMESPEC_TO_TIMEVAL(&rtv, &ts);
2190 }
2191 if (error)
2192 return (error);
2193 if (itimerfix(&rtv))
2194 return (EINVAL);
2195 getmicrouptime(&atv);
2196 timevaladd(&atv, &rtv);
2197 } else {
2198 /* wait forever value */
2199 atv.tv_sec = 0;
2200 atv.tv_usec = 0;
2201 }
2202 *atvp = atv;
2203 return 0;
2204 }
2205
2206 static int
2207 kevent_set_kq_mode(struct kqueue *kq, unsigned int flags)
2208 {
2209 /* each kq should only be used for events of one type */
2210 kqlock(kq);
2211 if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
2212 if (flags & KEVENT_FLAG_LEGACY32) {
2213 if ((kq->kq_state & KQ_KEV32) == 0) {
2214 kqunlock(kq);
2215 return EINVAL;
2216 }
2217 } else if (kq->kq_state & KQ_KEV32) {
2218 kqunlock(kq);
2219 return EINVAL;
2220 }
2221 } else if (flags & KEVENT_FLAG_LEGACY32) {
2222 kq->kq_state |= KQ_KEV32;
2223 } else {
2224 /* JMM - set KQ_KEVQOS when we are ready for exclusive */
2225 kq->kq_state |= KQ_KEV64;
2226 }
2227 kqunlock(kq);
2228 return 0;
2229 }
2230
2231 static int
2232 kevent_get_kq(struct proc *p, int fd, unsigned int flags, struct fileproc **fpp, struct kqueue **kqp)
2233 {
2234 struct fileproc *fp = NULL;
2235 struct kqueue *kq;
2236 int error;
2237
2238 if (flags & KEVENT_FLAG_WORKQ) {
2239 /*
2240 * use the private kq associated with the proc workq.
2241 * Just being a thread within the process (and not
2242 * being the exit/exec thread) is enough to hold a
2243 * reference on this special kq.
2244 */
2245 kq = p->p_wqkqueue;
2246 if (kq == NULL) {
2247 struct kqueue *alloc_kq = kqueue_alloc(p, KEVENT_FLAG_WORKQ);
2248 if (alloc_kq == NULL)
2249 return ENOMEM;
2250
2251 proc_fdlock(p);
2252 if (p->p_wqkqueue == NULL) {
2253 kq = p->p_wqkqueue = alloc_kq;
2254 proc_fdunlock(p);
2255 } else {
2256 proc_fdunlock(p);
2257 kq = p->p_wqkqueue;
2258 kqueue_dealloc(alloc_kq);
2259 }
2260 }
2261 } else {
2262 /* get a usecount for the kq itself */
2263 if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
2264 return (error);
2265 }
2266 if ((error = kevent_set_kq_mode(kq, flags)) != 0) {
2267 /* drop the usecount */
2268 if (fp != NULL)
2269 fp_drop(p, fd, fp, 0);
2270 return error;
2271 }
2272
2273 *fpp = fp;
2274 *kqp = kq;
2275 return 0;
2276 }
2277
2278
2279 static int
2280 kevent_internal(struct proc *p,
2281 int fd,
2282 user_addr_t changelist, int nchanges,
2283 user_addr_t ueventlist, int nevents,
2284 user_addr_t data_out, uint64_t data_available,
2285 unsigned int flags,
2286 user_addr_t utimeout,
2287 kqueue_continue_t continuation,
2288 int32_t *retval)
2289 {
2290 struct _kevent *cont_args;
2291 uthread_t ut;
2292 struct kqueue *kq;
2293 struct fileproc *fp = NULL;
2294 struct kevent_internal_s kev;
2295 int error, noutputs;
2296 struct timeval atv;
2297 user_size_t data_size;
2298 user_size_t data_resid;
2299
2300 /* Don't allow user-space threads to process output events from the workq kq */
2301 if ((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ &&
2302 !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0)
2303 return EINVAL;
2304
2305 /* prepare to deal with stack-wise allocation of out events */
2306 if (flags & KEVENT_FLAG_STACK_EVENTS) {
2307 int scale = ((flags & KEVENT_FLAG_LEGACY32) ?
2308 (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
2309 sizeof(struct user32_kevent)) :
2310 ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
2311 sizeof(struct kevent_qos_s)));
2312 ueventlist += nevents * scale;
2313 }
2314
2315 /* convert timeout to absolute - if we have one (and not immediate) */
2316 error = kevent_get_timeout(p, utimeout, flags, &atv);
2317 if (error)
2318 return error;
2319
2320 /* copyin initial value of data residual from data_available */
2321 error = kevent_get_data_size(p, data_available, flags, &data_size);
2322 if (error)
2323 return error;
2324
2325 /* get the kq we are going to be working on */
2326 error = kevent_get_kq(p, fd, flags, &fp, &kq);
2327 if (error)
2328 return error;
2329
2330 /* register all the change requests the user provided... */
2331 noutputs = 0;
2332 while (nchanges > 0 && error == 0) {
2333 error = kevent_copyin(&changelist, &kev, p, flags);
2334 if (error)
2335 break;
2336
2337 /* Make sure user doesn't pass in any system flags */
2338 kev.flags &= ~EV_SYSFLAGS;
2339
2340 kevent_register(kq, &kev, p);
2341
2342 if (nevents > 0 &&
2343 ((kev.flags & EV_ERROR) || (kev.flags & EV_RECEIPT))) {
2344 if (kev.flags & EV_RECEIPT) {
2345 kev.flags |= EV_ERROR;
2346 kev.data = 0;
2347 }
2348 error = kevent_copyout(&kev, &ueventlist, p, flags);
2349 if (error == 0) {
2350 nevents--;
2351 noutputs++;
2352 }
2353 } else if (kev.flags & EV_ERROR) {
2354 error = kev.data;
2355 }
2356 nchanges--;
2357 }
2358
2359 /* short-circuit the scan if we only want error events */
2360 if (flags & KEVENT_FLAG_ERROR_EVENTS)
2361 nevents = 0;
2362
2363 /* process pending events */
2364 if (nevents > 0 && noutputs == 0 && error == 0) {
2365
2366 /* store the continuation/completion data in the uthread */
2367 ut = (uthread_t)get_bsdthread_info(current_thread());
2368 cont_args = &ut->uu_kevent.ss_kevent;
2369 cont_args->fp = fp;
2370 cont_args->fd = fd;
2371 cont_args->retval = retval;
2372 cont_args->eventlist = ueventlist;
2373 cont_args->eventcount = nevents;
2374 cont_args->eventout = noutputs;
2375 cont_args->data_available = data_available;
2376 cont_args->process_data.fp_fd = fd;
2377 cont_args->process_data.fp_flags = flags;
2378 cont_args->process_data.fp_data_out = data_out;
2379 cont_args->process_data.fp_data_size = data_size;
2380 cont_args->process_data.fp_data_resid = data_size;
2381
2382 error = kqueue_scan(kq, kevent_callback,
2383 continuation, cont_args,
2384 &cont_args->process_data,
2385 &atv, p);
2386
2387 /* process remaining outputs */
2388 noutputs = cont_args->eventout;
2389 data_resid = cont_args->process_data.fp_data_resid;
2390
2391 /* copyout residual data size value (if it needs to be copied out) */
2392 /* don't abandon other output just because of residual copyout failures */
2393 if (error == 0 && data_available && data_resid != data_size) {
2394 (void)kevent_put_data_size(p, data_available, flags, data_resid);
2395 }
2396 }
2397
2398 /* don't restart after signals... */
2399 if (error == ERESTART)
2400 error = EINTR;
2401 else if (error == EWOULDBLOCK)
2402 error = 0;
2403 if (error == 0)
2404 *retval = noutputs;
2405 if (fp != NULL)
2406 fp_drop(p, fd, fp, 0);
2407 return (error);
2408 }
2409
2410
2411 /*
2412 * kevent_callback - callback for each individual event
2413 *
2414 * called with nothing locked
2415 * caller holds a reference on the kqueue
2416 */
2417 static int
2418 kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp,
2419 void *data)
2420 {
2421 struct _kevent *cont_args;
2422 int error;
2423
2424 cont_args = (struct _kevent *)data;
2425 assert(cont_args->eventout < cont_args->eventcount);
2426
2427 /*
2428 * Copy out the appropriate amount of event data for this user.
2429 */
2430 error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
2431 cont_args->process_data.fp_flags);
2432
2433 /*
2434 * If there isn't space for additional events, return
2435 * a harmless error to stop the processing here
2436 */
2437 if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
2438 error = EWOULDBLOCK;
2439 return (error);
2440 }
2441
2442 /*
2443 * kevent_description - format a description of a kevent for diagnostic output
2444 *
2445 * called with a 256-byte string buffer
2446 */
2447
2448 char *
2449 kevent_description(struct kevent_internal_s *kevp, char *s, size_t n)
2450 {
2451 snprintf(s, n,
2452 "kevent="
2453 "{.ident=%#llx, .filter=%d, .flags=%#x, .udata=%#llx, .fflags=%#x, .data=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
2454 kevp->ident,
2455 kevp->filter,
2456 kevp->flags,
2457 kevp->udata,
2458 kevp->fflags,
2459 kevp->data,
2460 kevp->ext[0],
2461 kevp->ext[1] );
2462
2463 return (s);
2464 }
2465
2466 /*
2467 * kevent_register - add a new event to a kqueue
2468 *
2469 * Creates a mapping between the event source and
2470 * the kqueue via a knote data structure.
2471 *
2472 * Because many/most the event sources are file
2473 * descriptor related, the knote is linked off
2474 * the filedescriptor table for quick access.
2475 *
2476 * called with nothing locked
2477 * caller holds a reference on the kqueue
2478 */
2479
2480 void
2481 kevent_register(struct kqueue *kq, struct kevent_internal_s *kev,
2482 __unused struct proc *ctxp)
2483 {
2484 struct proc *p = kq->kq_p;
2485 struct filterops *fops;
2486 struct knote *kn = NULL;
2487 int result = 0;
2488 int error = 0;
2489
2490 if (kev->filter < 0) {
2491 if (kev->filter + EVFILT_SYSCOUNT < 0) {
2492 error = EINVAL;
2493 goto out;
2494 }
2495 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
2496 } else {
2497 error = EINVAL;
2498 goto out;
2499 }
2500
2501 /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
2502 if ((kev->flags & EV_VANISHED) &&
2503 (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) {
2504 error = EINVAL;
2505 goto out;
2506 }
2507
2508 /* Simplify the flags - delete and disable overrule */
2509 if (kev->flags & EV_DELETE)
2510 kev->flags &= ~EV_ADD;
2511 if (kev->flags & EV_DISABLE)
2512 kev->flags &= ~EV_ENABLE;
2513
2514 restart:
2515
2516 proc_fdlock(p);
2517
2518 /* find the matching knote from the fd tables/hashes */
2519 kn = knote_fdfind(kq, kev, p);
2520
2521 if (kn == NULL) {
2522 if (kev->flags & EV_ADD) {
2523 struct fileproc *fp = NULL;
2524
2525 /* grab a file reference for the new knote */
2526 if (fops->f_isfd) {
2527 if ((error = fp_lookup(p, kev->ident, &fp, 1)) != 0) {
2528 proc_fdunlock(p);
2529 goto out;
2530 }
2531 }
2532
2533 kn = knote_alloc();
2534 if (kn == NULL) {
2535 proc_fdunlock(p);
2536 error = ENOMEM;
2537 if (fp != NULL)
2538 fp_drop(p, kev->ident, fp, 0);
2539 goto out;
2540 }
2541
2542 kn->kn_fp = fp;
2543 knote_set_kq(kn,kq);
2544 kn->kn_filtid = ~kev->filter;
2545 kn->kn_inuse = 1; /* for f_attach() */
2546 kn->kn_status = KN_ATTACHING | KN_ATTACHED;
2547
2548 /* was vanish support requested */
2549 if (kev->flags & EV_VANISHED) {
2550 kev->flags &= ~EV_VANISHED;
2551 kn->kn_status |= KN_REQVANISH;
2552 }
2553
2554 /* snapshot matching/dispatching protcol flags into knote */
2555 if (kev->flags & EV_DISPATCH)
2556 kn->kn_status |= KN_DISPATCH;
2557 if (kev->flags & EV_UDATA_SPECIFIC)
2558 kn->kn_status |= KN_UDATA_SPECIFIC;
2559
2560 /*
2561 * copy the kevent state into knote
2562 * protocol is that fflags and data
2563 * are saved off, and cleared before
2564 * calling the attach routine.
2565 */
2566 kn->kn_kevent = *kev;
2567 kn->kn_sfflags = kev->fflags;
2568 kn->kn_sdata = kev->data;
2569 kn->kn_fflags = 0;
2570 kn->kn_data = 0;
2571
2572 /* invoke pthread kext to convert kevent qos to thread qos */
2573 if (kq->kq_state & KQ_WORKQ) {
2574 kn->kn_qos = canonicalize_kevent_qos(kn->kn_qos);
2575 knote_set_qos_index(kn, qos_index_from_qos(kn->kn_qos, FALSE));
2576 knote_set_qos_override_index(kn, QOS_INDEX_KQFILE);
2577 assert(knote_get_qos_index(kn) < KQWQ_NQOS);
2578 } else {
2579 knote_set_qos_index(kn, QOS_INDEX_KQFILE);
2580 knote_set_qos_override_index(kn, QOS_INDEX_KQFILE);
2581 }
2582
2583 /* before anyone can find it */
2584 if (kev->flags & EV_DISABLE)
2585 knote_disable(kn);
2586
2587 /* Add the knote for lookup thru the fd table */
2588 error = knote_fdadd(kn, p);
2589 proc_fdunlock(p);
2590
2591 if (error) {
2592 knote_free(kn);
2593 if (fp != NULL)
2594 fp_drop(p, kev->ident, fp, 0);
2595 goto out;
2596 }
2597
2598 /* fp reference count now applies to knote */
2599
2600 /* call filter attach routine */
2601 result = fops->f_attach(kn);
2602
2603 /*
2604 * Trade knote use count for kq lock.
2605 * Cannot be dropped because we held
2606 * KN_ATTACHING throughout.
2607 */
2608 knoteuse2kqlock(kq, kn, 1);
2609
2610 if (kn->kn_flags & EV_ERROR) {
2611 /*
2612 * Failed to attach correctly, so drop.
2613 * All other possible users/droppers
2614 * have deferred to us. Save the error
2615 * to return to our caller.
2616 */
2617 kn->kn_status &= ~KN_ATTACHED;
2618 kn->kn_status |= KN_DROPPING;
2619 error = kn->kn_data;
2620 kqunlock(kq);
2621 knote_drop(kn, p);
2622 goto out;
2623 }
2624
2625 /* end "attaching" phase - now just attached */
2626 kn->kn_status &= ~KN_ATTACHING;
2627
2628 if (kn->kn_status & KN_DROPPING) {
2629 /*
2630 * Attach succeeded, but someone else
2631 * deferred their drop - now we have
2632 * to do it for them.
2633 */
2634 kqunlock(kq);
2635 knote_drop(kn, p);
2636 goto out;
2637 }
2638
2639 /*
2640 * If the attach routine indicated that an
2641 * event is already fired, activate the knote.
2642 */
2643 if (result)
2644 knote_activate(kn);
2645
2646 } else {
2647 proc_fdunlock(p);
2648 error = ENOENT;
2649 goto out;
2650 }
2651
2652 } else {
2653 /* existing knote - get kqueue lock */
2654 kqlock(kq);
2655 proc_fdunlock(p);
2656
2657 if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
2658 /*
2659 * The knote is not in a stable state, wait for that
2660 * transition to complete and then redrive the lookup.
2661 */
2662 kn->kn_status |= KN_USEWAIT;
2663 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
2664 CAST_EVENT64_T(&kn->kn_status),
2665 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
2666 kqunlock(kq);
2667 thread_block(THREAD_CONTINUE_NULL);
2668 goto restart;
2669 }
2670
2671 if (kev->flags & EV_DELETE) {
2672
2673 /*
2674 * If attempting to delete a disabled dispatch2 knote,
2675 * we must wait for the knote to be re-enabled (unless
2676 * it is being re-enabled atomically here).
2677 */
2678 if ((kev->flags & EV_ENABLE) == 0 &&
2679 (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) ==
2680 (KN_DISPATCH2 | KN_DISABLED)) {
2681 kn->kn_status |= KN_DEFERDELETE;
2682 kqunlock(kq);
2683 error = EINPROGRESS;
2684 } else if (kqlock2knotedrop(kq, kn)) {
2685 knote_drop(kn, p);
2686 } else {
2687 /*
2688 * The kqueue is unlocked, it's not being
2689 * dropped, and kqlock2knotedrop returned 0:
2690 * this means that someone stole the drop of
2691 * the knote from us.
2692 */
2693 error = EINPROGRESS;
2694 }
2695 goto out;
2696 }
2697
2698 /*
2699 * If we are re-enabling a deferred-delete knote,
2700 * just enable it now and avoid calling the
2701 * filter touch routine (it has delivered its
2702 * last event already).
2703 */
2704 if ((kev->flags & EV_ENABLE) &&
2705 (kn->kn_status & KN_DEFERDELETE)) {
2706 assert(kn->kn_status & KN_DISABLED);
2707 knote_activate(kn);
2708 knote_enable(kn);
2709 kqunlock(kq);
2710 goto out;
2711 }
2712
2713 /*
2714 * If we are disabling, do it before unlocking and
2715 * calling the touch routine (so no processing can
2716 * see the new kevent state before the disable is
2717 * applied).
2718 */
2719 if (kev->flags & EV_DISABLE)
2720 knote_disable(kn);
2721
2722 /*
2723 * Convert the kqlock to a use reference on the
2724 * knote so we can call the filter touch routine.
2725 */
2726 if (kqlock2knoteuse(kq, kn)) {
2727
2728 /*
2729 * Call touch routine to notify filter of changes
2730 * in filter values (and to re-determine if any
2731 * events are fired).
2732 */
2733 result = knote_fops(kn)->f_touch(kn, kev);
2734
2735 /* Get the kq lock back (don't defer droppers). */
2736 if (!knoteuse2kqlock(kq, kn, 0)) {
2737 kqunlock(kq);
2738 goto out;
2739 }
2740
2741 /* Activate it if the touch routine said to */
2742 if (result)
2743 knote_activate(kn);
2744 }
2745
2746 /* Enable the knote if called for */
2747 if (kev->flags & EV_ENABLE)
2748 knote_enable(kn);
2749
2750 }
2751
2752 /* still have kqlock held and knote is valid */
2753 kqunlock(kq);
2754
2755 out:
2756 /* output local errors through the kevent */
2757 if (error) {
2758 kev->flags |= EV_ERROR;
2759 kev->data = error;
2760 }
2761 }
2762
2763
2764 /*
2765 * knote_process - process a triggered event
2766 *
2767 * Validate that it is really still a triggered event
2768 * by calling the filter routines (if necessary). Hold
2769 * a use reference on the knote to avoid it being detached.
2770 *
2771 * If it is still considered triggered, we will have taken
2772 * a copy of the state under the filter lock. We use that
2773 * snapshot to dispatch the knote for future processing (or
2774 * not, if this was a lost event).
2775 *
2776 * Our caller assures us that nobody else can be processing
2777 * events from this knote during the whole operation. But
2778 * others can be touching or posting events to the knote
2779 * interspersed with our processing it.
2780 *
2781 * caller holds a reference on the kqueue.
2782 * kqueue locked on entry and exit - but may be dropped
2783 */
2784 static int
2785 knote_process(struct knote *kn,
2786 kevent_callback_t callback,
2787 void *callback_data,
2788 struct filt_process_s *process_data,
2789 struct proc *p)
2790 {
2791 struct kevent_internal_s kev;
2792 struct kqueue *kq = knote_get_kq(kn);
2793 int result = 0;
2794 int error = 0;
2795
2796 bzero(&kev, sizeof(kev));
2797
2798 /*
2799 * Must be active or stayactive
2800 * Must be queued and not disabled/suppressed
2801 */
2802 assert(kn->kn_status & KN_QUEUED);
2803 assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE));
2804 assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING)));
2805
2806 /*
2807 * For deferred-drop or vanished events, we just create a fake
2808 * event to acknowledge end-of-life. Otherwise, we call the
2809 * filter's process routine to snapshot the kevent state under
2810 * the filter's locking protocol.
2811 */
2812 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
2813 /* create fake event */
2814 kev.filter = kn->kn_filter;
2815 kev.ident = kn->kn_id;
2816 kev.qos = kn->kn_qos;
2817 kev.flags = (kn->kn_status & KN_DEFERDELETE) ?
2818 EV_DELETE : EV_VANISHED;
2819 kev.flags |= (EV_DISPATCH2 | EV_ONESHOT);
2820 kev.udata = kn->kn_udata;
2821 result = 1;
2822
2823 knote_suppress(kn);
2824 } else {
2825
2826 /* deactivate - so new activations indicate a wakeup */
2827 knote_deactivate(kn);
2828
2829 /* suppress knotes to avoid returning the same event multiple times in a single call. */
2830 knote_suppress(kn);
2831
2832 /* convert lock to a knote use reference */
2833 if (!kqlock2knoteuse(kq, kn))
2834 panic("dropping knote found on queue\n");
2835
2836 /* call out to the filter to process with just a ref */
2837 result = knote_fops(kn)->f_process(kn, process_data, &kev);
2838
2839 /*
2840 * convert our reference back to a lock. accept drop
2841 * responsibility from others if we've committed to
2842 * delivering event data.
2843 */
2844 if (!knoteuse2kqlock(kq, kn, result)) {
2845 /* knote dropped */
2846 kn = NULL;
2847 }
2848 }
2849
2850 if (kn != NULL) {
2851 /*
2852 * Determine how to dispatch the knote for future event handling.
2853 * not-fired: just return (do not callout, leave deactivated).
2854 * One-shot: If dispatch2, enter deferred-delete mode (unless this is
2855 * is the deferred delete event delivery itself). Otherwise,
2856 * drop it.
2857 * stolendrop:We took responsibility for someone else's drop attempt.
2858 * treat this just like one-shot and prepare to turn it back
2859 * into a deferred delete if required.
2860 * Dispatch: don't clear state, just mark it disabled.
2861 * Cleared: just leave it deactivated.
2862 * Others: re-activate as there may be more events to handle.
2863 * This will not wake up more handlers right now, but
2864 * at the completion of handling events it may trigger
2865 * more handler threads (TODO: optimize based on more than
2866 * just this one event being detected by the filter).
2867 */
2868
2869 if (result == 0)
2870 return (EJUSTRETURN);
2871
2872 if ((kev.flags & EV_ONESHOT) || (kn->kn_status & KN_STOLENDROP)) {
2873 if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) {
2874 /* defer dropping non-delete oneshot dispatch2 events */
2875 kn->kn_status |= KN_DEFERDELETE;
2876 knote_disable(kn);
2877
2878 /* if we took over another's drop clear those flags here */
2879 if (kn->kn_status & KN_STOLENDROP) {
2880 assert(kn->kn_status & KN_DROPPING);
2881 /*
2882 * the knote will be dropped when the
2883 * deferred deletion occurs
2884 */
2885 kn->kn_status &= ~(KN_DROPPING|KN_STOLENDROP);
2886 }
2887 } else if (kn->kn_status & KN_STOLENDROP) {
2888 /* We now own the drop of the knote. */
2889 assert(kn->kn_status & KN_DROPPING);
2890 knote_unsuppress(kn);
2891 kqunlock(kq);
2892 knote_drop(kn, p);
2893 kqlock(kq);
2894 } else if (kqlock2knotedrop(kq, kn)) {
2895 /* just EV_ONESHOT, _not_ DISPATCH2 */
2896 knote_drop(kn, p);
2897 kqlock(kq);
2898 }
2899 } else if (kn->kn_status & KN_DISPATCH) {
2900 /* disable all dispatch knotes */
2901 knote_disable(kn);
2902 } else if ((kev.flags & EV_CLEAR) == 0) {
2903 /* re-activate in case there are more events */
2904 knote_activate(kn);
2905 }
2906 }
2907
2908 /*
2909 * callback to handle each event as we find it.
2910 * If we have to detach and drop the knote, do
2911 * it while we have the kq unlocked.
2912 */
2913 if (result) {
2914 kqunlock(kq);
2915 error = (callback)(kq, &kev, callback_data);
2916 kqlock(kq);
2917 }
2918 return (error);
2919 }
2920
2921
2922 /*
2923 * Return 0 to indicate that processing should proceed,
2924 * -1 if there is nothing to process.
2925 *
2926 * Called with kqueue locked and returns the same way,
2927 * but may drop lock temporarily.
2928 */
2929 static int
2930 kqworkq_begin_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
2931 {
2932 struct kqrequest *kqr;
2933 thread_t self = current_thread();
2934 __assert_only struct uthread *ut = get_bsdthread_info(self);
2935 thread_t thread;
2936
2937 assert(kqwq->kqwq_state & KQ_WORKQ);
2938 assert(qos_index < KQWQ_NQOS);
2939
2940 kqwq_req_lock(kqwq);
2941 kqr = kqworkq_get_request(kqwq, qos_index);
2942
2943 thread = kqr->kqr_thread;
2944
2945 /* manager skips buckets that haven't ask for its help */
2946 if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
2947
2948 /* If nothing for manager to do, just return */
2949 if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
2950 assert(kqr->kqr_thread != self);
2951 kqwq_req_unlock(kqwq);
2952 return -1;
2953 }
2954
2955 /* bind manager thread from this time on */
2956 kqworkq_bind_thread(kqwq, qos_index, self, flags);
2957
2958 } else {
2959 /* must have been bound by now */
2960 assert(thread == self);
2961 assert(ut->uu_kqueue_bound == qos_index);
2962 assert((ut->uu_kqueue_flags & flags) == ut->uu_kqueue_flags);
2963 }
2964
2965 /* nobody else should still be processing */
2966 assert(kqr->kqr_state & KQWQ_THREQUESTED);
2967 assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
2968
2969 /* anything left to process? */
2970 if (kqueue_queue_empty(&kqwq->kqwq_kqueue, qos_index)) {
2971 kqwq_req_unlock(kqwq);
2972 return -1;
2973 }
2974
2975 /* convert to processing mode */
2976 /* reset workq triggers and thread requests - maybe processing */
2977 kqr->kqr_state &= ~(KQWQ_HOOKCALLED | KQWQ_WAKEUP);
2978 kqr->kqr_state |= KQWQ_PROCESSING;
2979 kqwq_req_unlock(kqwq);
2980 return 0;
2981 }
2982
2983 /*
2984 * Return 0 to indicate that processing should proceed,
2985 * -1 if there is nothing to process.
2986 *
2987 * Called with kqueue locked and returns the same way,
2988 * but may drop lock temporarily.
2989 * May block.
2990 */
2991 static int
2992 kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
2993 {
2994 struct kqtailq *suppressq;
2995
2996 if (kq->kq_state & KQ_WORKQ)
2997 return kqworkq_begin_processing((struct kqworkq *)kq, qos_index, flags);
2998
2999 assert(qos_index == QOS_INDEX_KQFILE);
3000
3001 /* wait to become the exclusive processing thread */
3002 for (;;) {
3003 if (kq->kq_state & KQ_DRAIN)
3004 return -1;
3005
3006 if ((kq->kq_state & KQ_PROCESSING) == 0)
3007 break;
3008
3009 /* if someone else is processing the queue, wait */
3010 kq->kq_state |= KQ_PROCWAIT;
3011 suppressq = kqueue_get_suppressed_queue(kq, qos_index);
3012 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
3013 CAST_EVENT64_T(suppressq),
3014 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
3015
3016 kqunlock(kq);
3017 thread_block(THREAD_CONTINUE_NULL);
3018 kqlock(kq);
3019 }
3020
3021 /* Nobody else processing */
3022
3023 /* clear pre-posts and KQ_WAKEUP now, in case we bail early */
3024 waitq_set_clear_preposts(&kq->kq_wqs);
3025 kq->kq_state &= ~KQ_WAKEUP;
3026
3027 /* anything left to process? */
3028 if (kqueue_queue_empty(kq, qos_index))
3029 return -1;
3030
3031 /* convert to processing mode */
3032 kq->kq_state |= KQ_PROCESSING;
3033
3034 return 0;
3035 }
3036
3037 /*
3038 * kqworkq_end_processing - Complete the processing of a workq kqueue
3039 *
3040 * We may have to request new threads.
3041 * This can happen there are no waiting processing threads and:
3042 * - there were active events we never got to (count > 0)
3043 * - we pended waitq hook callouts during processing
3044 * - we pended wakeups while processing (or unsuppressing)
3045 *
3046 * Called with kqueue lock held.
3047 */
3048 static void
3049 kqworkq_end_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
3050 {
3051 #pragma unused(flags)
3052
3053 struct kqueue *kq = &kqwq->kqwq_kqueue;
3054 struct kqtailq *suppressq = kqueue_get_suppressed_queue(kq, qos_index);
3055
3056 thread_t self = current_thread();
3057 __assert_only struct uthread *ut = get_bsdthread_info(self);
3058 struct knote *kn;
3059 struct kqrequest *kqr;
3060 int queued_events;
3061 uint16_t pended;
3062 thread_t thread;
3063
3064 assert(kqwq->kqwq_state & KQ_WORKQ);
3065 assert(qos_index < KQWQ_NQOS);
3066
3067 /* leave early if we are not even processing */
3068 kqwq_req_lock(kqwq);
3069 kqr = kqworkq_get_request(kqwq, qos_index);
3070 thread = kqr->kqr_thread;
3071
3072 if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
3073 assert(ut->uu_kqueue_bound == KQWQ_QOS_MANAGER);
3074 assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
3075
3076 /* if this bucket didn't need manager help, bail */
3077 if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
3078 assert(thread != self);
3079 kqwq_req_unlock(kqwq);
3080 return;
3081 }
3082
3083 assert(kqr->kqr_state & KQWQ_THREQUESTED);
3084
3085 /* unbound bucket - see if still needs servicing */
3086 if (thread == THREAD_NULL) {
3087 assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
3088 assert(TAILQ_EMPTY(suppressq));
3089 } else {
3090 assert(thread == self);
3091 }
3092
3093 } else {
3094 assert(thread == self);
3095 assert(ut->uu_kqueue_bound == qos_index);
3096 assert((ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0);
3097 }
3098
3099 kqwq_req_unlock(kqwq);
3100
3101 /* Any events queued before we put suppressed ones back? */
3102 queued_events = !kqueue_queue_empty(kq, qos_index);
3103
3104 /*
3105 * Return suppressed knotes to their original state.
3106 * For workq kqueues, suppressed ones that are still
3107 * truly active (not just forced into the queue) will
3108 * set flags we check below to see if anything got
3109 * woken up.
3110 */
3111 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
3112 assert(kn->kn_status & KN_SUPPRESSED);
3113 knote_unsuppress(kn);
3114 }
3115
3116 kqwq_req_lock(kqwq);
3117
3118 /* Determine if wakeup-type events were pended during servicing */
3119 pended = (kqr->kqr_state & (KQWQ_HOOKCALLED | KQWQ_WAKEUP));
3120
3121 /* unbind thread thread */
3122 kqworkq_unbind_thread(kqwq, qos_index, self, flags);
3123
3124 /* Indicate that we are done processing */
3125 kqr->kqr_state &= ~(KQWQ_PROCESSING | \
3126 KQWQ_THREQUESTED | KQWQ_THMANAGER);
3127
3128 /*
3129 * request a new thread if events have happened
3130 * (not just putting stay-active events back).
3131 */
3132 if ((queued_events || pended) &&
3133 !kqueue_queue_empty(kq, qos_index)) {
3134 kqworkq_request_thread(kqwq, qos_index);
3135 }
3136
3137 kqwq_req_unlock(kqwq);
3138 }
3139
3140 /*
3141 * Called with kqueue lock held.
3142 */
3143 static void
3144 kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
3145 {
3146 struct knote *kn;
3147 struct kqtailq *suppressq;
3148 int procwait;
3149
3150 if (kq->kq_state & KQ_WORKQ) {
3151 kqworkq_end_processing((struct kqworkq *)kq, qos_index, flags);
3152 return;
3153 }
3154
3155 assert(qos_index == QOS_INDEX_KQFILE);
3156
3157 /*
3158 * Return suppressed knotes to their original state.
3159 * For workq kqueues, suppressed ones that are still
3160 * truly active (not just forced into the queue) will
3161 * set flags we check below to see if anything got
3162 * woken up.
3163 */
3164 suppressq = kqueue_get_suppressed_queue(kq, qos_index);
3165 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
3166 assert(kn->kn_status & KN_SUPPRESSED);
3167 knote_unsuppress(kn);
3168 }
3169
3170 procwait = (kq->kq_state & KQ_PROCWAIT);
3171 kq->kq_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
3172
3173 if (procwait) {
3174 /* first wake up any thread already waiting to process */
3175 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
3176 CAST_EVENT64_T(suppressq),
3177 THREAD_AWAKENED,
3178 WAITQ_ALL_PRIORITIES);
3179 }
3180 }
3181
3182 /*
3183 * kevent_qos_internal_bind - bind thread to processing kqueue
3184 *
3185 * Indicates that the provided thread will be responsible for
3186 * servicing the particular QoS class index specified in the
3187 * parameters. Once the binding is done, any overrides that may
3188 * be associated with the cooresponding events can be applied.
3189 *
3190 * This should be called as soon as the thread identity is known,
3191 * preferably while still at high priority during creation.
3192 *
3193 * - caller holds a reference on the kqueue.
3194 * - the thread MUST call kevent_qos_internal after being bound
3195 * or the bucket of events may never be delivered.
3196 * - Nothing locked (may take mutex or block).
3197 */
3198
3199 int
3200 kevent_qos_internal_bind(
3201 struct proc *p,
3202 int qos_class,
3203 thread_t thread,
3204 unsigned int flags)
3205 {
3206 struct fileproc *fp = NULL;
3207 struct kqueue *kq = NULL;
3208 struct kqworkq *kqwq;
3209 struct kqrequest *kqr;
3210 struct uthread *ut;
3211 kq_index_t qos_index;
3212 int res = 0;
3213
3214 assert(thread != THREAD_NULL);
3215 assert(flags & KEVENT_FLAG_WORKQ);
3216
3217 if (thread == THREAD_NULL ||
3218 (flags & KEVENT_FLAG_WORKQ) == 0) {
3219 return EINVAL;
3220 }
3221
3222 ut = get_bsdthread_info(thread);
3223
3224 /* find the kqueue */
3225 res = kevent_get_kq(p, -1, flags, &fp, &kq);
3226 assert(fp == NULL);
3227 if (res)
3228 return res;
3229
3230 /* get the qos index we're going to service */
3231 qos_index = qos_index_for_servicer(qos_class, thread, flags);
3232
3233 /* No need to bind the manager thread to any bucket */
3234 if (qos_index == KQWQ_QOS_MANAGER) {
3235 assert(ut->uu_kqueue_bound == 0);
3236 ut->uu_kqueue_bound = qos_index;
3237 ut->uu_kqueue_flags = flags;
3238 return 0;
3239 }
3240
3241 kqlock(kq);
3242 assert(kq->kq_state & KQ_WORKQ);
3243
3244 kqwq = (struct kqworkq *)kq;
3245 kqr = kqworkq_get_request(kqwq, qos_index);
3246
3247 kqwq_req_lock(kqwq);
3248
3249 /*
3250 * A (non-emergency) request should have been made
3251 * and nobody should already be servicing this bucket.
3252 */
3253 assert(kqr->kqr_state & KQWQ_THREQUESTED);
3254 assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
3255 assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
3256
3257 /* Is this is an extraneous bind? */
3258 if (thread == kqr->kqr_thread) {
3259 assert(ut->uu_kqueue_bound == qos_index);
3260 goto out;
3261 }
3262
3263 /* nobody else bound and we're not bound elsewhere */
3264 assert(ut->uu_kqueue_bound == 0);
3265 assert(ut->uu_kqueue_flags == 0);
3266 assert(kqr->kqr_thread == THREAD_NULL);
3267
3268 /* Don't bind if there is a conflict */
3269 if (kqr->kqr_thread != THREAD_NULL ||
3270 (kqr->kqr_state & KQWQ_THMANAGER)) {
3271 res = EINPROGRESS;
3272 goto out;
3273 }
3274
3275 /* finally bind the thread */
3276 kqr->kqr_thread = thread;
3277 ut->uu_kqueue_bound = qos_index;
3278 ut->uu_kqueue_flags = flags;
3279
3280 /* add any pending overrides to the thread */
3281 if (kqr->kqr_override_delta) {
3282 thread_add_ipc_override(thread, qos_index + kqr->kqr_override_delta);
3283 }
3284
3285 out:
3286 kqwq_req_unlock(kqwq);
3287 kqunlock(kq);
3288
3289 return res;
3290 }
3291
3292 /*
3293 * kevent_qos_internal_unbind - unbind thread from processing kqueue
3294 *
3295 * End processing the per-QoS bucket of events and allow other threads
3296 * to be requested for future servicing.
3297 *
3298 * caller holds a reference on the kqueue.
3299 * thread is the current thread.
3300 */
3301
3302 int
3303 kevent_qos_internal_unbind(
3304 struct proc *p,
3305 int qos_class,
3306 thread_t thread,
3307 unsigned int flags)
3308 {
3309 struct kqueue *kq;
3310 struct uthread *ut;
3311 struct fileproc *fp = NULL;
3312 kq_index_t qos_index;
3313 kq_index_t end_index;
3314 int res;
3315
3316 assert(flags & KEVENT_FLAG_WORKQ);
3317 assert(thread == current_thread());
3318
3319 if (thread == THREAD_NULL ||
3320 (flags & KEVENT_FLAG_WORKQ) == 0)
3321 return EINVAL;
3322
3323 /* get the kq */
3324 res = kevent_get_kq(p, -1, flags, &fp, &kq);
3325 assert(fp == NULL);
3326 if (res)
3327 return res;
3328
3329 assert(kq->kq_state & KQ_WORKQ);
3330
3331 /* get the index we have been servicing */
3332 qos_index = qos_index_for_servicer(qos_class, thread, flags);
3333
3334 ut = get_bsdthread_info(thread);
3335
3336 /* early out if we were already unbound - or never bound */
3337 if (ut->uu_kqueue_bound != qos_index) {
3338 __assert_only struct kqworkq *kqwq = (struct kqworkq *)kq;
3339 __assert_only struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
3340
3341 assert(ut->uu_kqueue_bound == 0);
3342 assert(ut->uu_kqueue_flags == 0);
3343 assert(kqr->kqr_thread != thread);
3344 return EALREADY;
3345 }
3346
3347 /* unbind from all the buckets we might own */
3348 end_index = (qos_index == KQWQ_QOS_MANAGER) ?
3349 0 : qos_index;
3350 kqlock(kq);
3351 do {
3352 kqueue_end_processing(kq, qos_index, flags);
3353 } while (qos_index-- > end_index);
3354 kqunlock(kq);
3355
3356 /* indicate that we are done processing in the uthread */
3357 ut->uu_kqueue_bound = 0;
3358 ut->uu_kqueue_flags = 0;
3359
3360 return 0;
3361 }
3362
3363 /*
3364 * kqueue_process - process the triggered events in a kqueue
3365 *
3366 * Walk the queued knotes and validate that they are
3367 * really still triggered events by calling the filter
3368 * routines (if necessary). Hold a use reference on
3369 * the knote to avoid it being detached. For each event
3370 * that is still considered triggered, invoke the
3371 * callback routine provided.
3372 *
3373 * caller holds a reference on the kqueue.
3374 * kqueue locked on entry and exit - but may be dropped
3375 * kqueue list locked (held for duration of call)
3376 */
3377
3378 static int
3379 kqueue_process(struct kqueue *kq,
3380 kevent_callback_t callback,
3381 void *callback_data,
3382 struct filt_process_s *process_data,
3383 kq_index_t servicer_qos_index,
3384 int *countp,
3385 struct proc *p)
3386 {
3387 unsigned int flags = process_data ? process_data->fp_flags : 0;
3388 kq_index_t start_index, end_index, i;
3389 struct knote *kn;
3390 int nevents = 0;
3391 int error = 0;
3392
3393 /*
3394 * Based on the native QoS of the servicer,
3395 * determine the range of QoSes that need checking
3396 */
3397 start_index = servicer_qos_index;
3398 end_index = (start_index == KQWQ_QOS_MANAGER) ? 0 : start_index;
3399
3400 i = start_index;
3401
3402 do {
3403 if (kqueue_begin_processing(kq, i, flags) == -1) {
3404 *countp = 0;
3405 /* Nothing to process */
3406 continue;
3407 }
3408
3409 /*
3410 * loop through the enqueued knotes, processing each one and
3411 * revalidating those that need it. As they are processed,
3412 * they get moved to the inprocess queue (so the loop can end).
3413 */
3414 error = 0;
3415
3416 struct kqtailq *base_queue = kqueue_get_base_queue(kq, i);
3417 struct kqtailq *queue = kqueue_get_high_queue(kq, i);
3418 do {
3419 while (error == 0 &&
3420 (kn = TAILQ_FIRST(queue)) != NULL) {
3421 /* Process the knote */
3422 error = knote_process(kn, callback, callback_data, process_data, p);
3423 if (error == EJUSTRETURN)
3424 error = 0;
3425 else
3426 nevents++;
3427
3428 /* break out if no more space for additional events */
3429 if (error == EWOULDBLOCK) {
3430 if ((kq->kq_state & KQ_WORKQ) == 0)
3431 kqueue_end_processing(kq, i, flags);
3432 error = 0;
3433 goto out;
3434 }
3435 }
3436 } while (error == 0 && queue-- > base_queue);
3437
3438 /* let somebody else process events if we're not in workq mode */
3439 if ((kq->kq_state & KQ_WORKQ) == 0)
3440 kqueue_end_processing(kq, i, flags);
3441
3442 } while (i-- > end_index);
3443
3444 out:
3445 *countp = nevents;
3446 return (error);
3447 }
3448
3449 static void
3450 kqueue_scan_continue(void *data, wait_result_t wait_result)
3451 {
3452 thread_t self = current_thread();
3453 uthread_t ut = (uthread_t)get_bsdthread_info(self);
3454 struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
3455 struct kqueue *kq = (struct kqueue *)data;
3456 struct filt_process_s *process_data = cont_args->process_data;
3457 int error;
3458 int count;
3459
3460 /* convert the (previous) wait_result to a proper error */
3461 switch (wait_result) {
3462 case THREAD_AWAKENED: {
3463 kqlock(kq);
3464 retry:
3465 error = kqueue_process(kq, cont_args->call, cont_args->data,
3466 process_data, cont_args->servicer_qos_index,
3467 &count, current_proc());
3468 if (error == 0 && count == 0) {
3469 if (kq->kq_state & KQ_WAKEUP)
3470 goto retry;
3471 waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
3472 KQ_EVENT, THREAD_ABORTSAFE,
3473 cont_args->deadline);
3474 kq->kq_state |= KQ_SLEEP;
3475 kqunlock(kq);
3476 thread_block_parameter(kqueue_scan_continue, kq);
3477 /* NOTREACHED */
3478 }
3479 kqunlock(kq);
3480 } break;
3481 case THREAD_TIMED_OUT:
3482 error = EWOULDBLOCK;
3483 break;
3484 case THREAD_INTERRUPTED:
3485 error = EINTR;
3486 break;
3487 case THREAD_RESTART:
3488 error = EBADF;
3489 break;
3490 default:
3491 panic("%s: - invalid wait_result (%d)", __func__,
3492 wait_result);
3493 error = 0;
3494 }
3495
3496 /* call the continuation with the results */
3497 assert(cont_args->cont != NULL);
3498 (cont_args->cont)(kq, cont_args->data, error);
3499 }
3500
3501
3502 /*
3503 * kqueue_scan - scan and wait for events in a kqueue
3504 *
3505 * Process the triggered events in a kqueue.
3506 *
3507 * If there are no events triggered arrange to
3508 * wait for them. If the caller provided a
3509 * continuation routine, then kevent_scan will
3510 * also.
3511 *
3512 * The callback routine must be valid.
3513 * The caller must hold a use-count reference on the kq.
3514 */
3515
3516 int
3517 kqueue_scan(struct kqueue *kq,
3518 kevent_callback_t callback,
3519 kqueue_continue_t continuation,
3520 void *callback_data,
3521 struct filt_process_s *process_data,
3522 struct timeval *atvp,
3523 struct proc *p)
3524 {
3525 thread_continue_t cont = THREAD_CONTINUE_NULL;
3526 kq_index_t servicer_qos_index;
3527 unsigned int flags;
3528 uint64_t deadline;
3529 int error;
3530 int first;
3531 int fd;
3532
3533 assert(callback != NULL);
3534
3535 /*
3536 * Determine which QoS index we are servicing
3537 */
3538 flags = (process_data) ? process_data->fp_flags : 0;
3539 fd = (process_data) ? process_data->fp_fd : -1;
3540 servicer_qos_index = (kq->kq_state & KQ_WORKQ) ?
3541 qos_index_for_servicer(fd, current_thread(), flags) :
3542 QOS_INDEX_KQFILE;
3543
3544 first = 1;
3545 for (;;) {
3546 wait_result_t wait_result;
3547 int count;
3548
3549 /*
3550 * Make a pass through the kq to find events already
3551 * triggered.
3552 */
3553 kqlock(kq);
3554 error = kqueue_process(kq, callback, callback_data,
3555 process_data, servicer_qos_index,
3556 &count, p);
3557 if (error || count)
3558 break; /* lock still held */
3559
3560 /* looks like we have to consider blocking */
3561 if (first) {
3562 first = 0;
3563 /* convert the timeout to a deadline once */
3564 if (atvp->tv_sec || atvp->tv_usec) {
3565 uint64_t now;
3566
3567 clock_get_uptime(&now);
3568 nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
3569 atvp->tv_usec * (long)NSEC_PER_USEC,
3570 &deadline);
3571 if (now >= deadline) {
3572 /* non-blocking call */
3573 error = EWOULDBLOCK;
3574 break; /* lock still held */
3575 }
3576 deadline -= now;
3577 clock_absolutetime_interval_to_deadline(deadline, &deadline);
3578 } else {
3579 deadline = 0; /* block forever */
3580 }
3581
3582 if (continuation) {
3583 uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
3584 struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan;
3585
3586 cont_args->call = callback;
3587 cont_args->cont = continuation;
3588 cont_args->deadline = deadline;
3589 cont_args->data = callback_data;
3590 cont_args->process_data = process_data;
3591 cont_args->servicer_qos_index = servicer_qos_index;
3592 cont = kqueue_scan_continue;
3593 }
3594 }
3595
3596 /* If awakened during processing, try again */
3597 if (kq->kq_state & KQ_WAKEUP) {
3598 kqunlock(kq);
3599 continue;
3600 }
3601
3602 /* go ahead and wait */
3603 waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
3604 KQ_EVENT, THREAD_ABORTSAFE,
3605 TIMEOUT_URGENCY_USER_NORMAL,
3606 deadline, TIMEOUT_NO_LEEWAY);
3607 kq->kq_state |= KQ_SLEEP;
3608 kqunlock(kq);
3609 wait_result = thread_block_parameter(cont, kq);
3610 /* NOTREACHED if (continuation != NULL) */
3611
3612 switch (wait_result) {
3613 case THREAD_AWAKENED:
3614 continue;
3615 case THREAD_TIMED_OUT:
3616 return EWOULDBLOCK;
3617 case THREAD_INTERRUPTED:
3618 return EINTR;
3619 case THREAD_RESTART:
3620 return EBADF;
3621 default:
3622 panic("%s: - bad wait_result (%d)", __func__,
3623 wait_result);
3624 error = 0;
3625 }
3626 }
3627 kqunlock(kq);
3628 return (error);
3629 }
3630
3631
3632 /*
3633 * XXX
3634 * This could be expanded to call kqueue_scan, if desired.
3635 */
3636 /*ARGSUSED*/
3637 static int
3638 kqueue_read(__unused struct fileproc *fp,
3639 __unused struct uio *uio,
3640 __unused int flags,
3641 __unused vfs_context_t ctx)
3642 {
3643 return (ENXIO);
3644 }
3645
3646 /*ARGSUSED*/
3647 static int
3648 kqueue_write(__unused struct fileproc *fp,
3649 __unused struct uio *uio,
3650 __unused int flags,
3651 __unused vfs_context_t ctx)
3652 {
3653 return (ENXIO);
3654 }
3655
3656 /*ARGSUSED*/
3657 static int
3658 kqueue_ioctl(__unused struct fileproc *fp,
3659 __unused u_long com,
3660 __unused caddr_t data,
3661 __unused vfs_context_t ctx)
3662 {
3663 return (ENOTTY);
3664 }
3665
3666 /*ARGSUSED*/
3667 static int
3668 kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
3669 __unused vfs_context_t ctx)
3670 {
3671 struct kqueue *kq = (struct kqueue *)fp->f_data;
3672 struct kqtailq *queue;
3673 struct kqtailq *suppressq;
3674 struct knote *kn;
3675 int retnum = 0;
3676
3677 if (which != FREAD)
3678 return (0);
3679
3680 kqlock(kq);
3681
3682 assert((kq->kq_state & KQ_WORKQ) == 0);
3683
3684 /*
3685 * If this is the first pass, link the wait queue associated with the
3686 * the kqueue onto the wait queue set for the select(). Normally we
3687 * use selrecord() for this, but it uses the wait queue within the
3688 * selinfo structure and we need to use the main one for the kqueue to
3689 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
3690 * (The select() call will unlink them when it ends).
3691 */
3692 if (wq_link_id != NULL) {
3693 thread_t cur_act = current_thread();
3694 struct uthread * ut = get_bsdthread_info(cur_act);
3695
3696 kq->kq_state |= KQ_SEL;
3697 waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset,
3698 WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
3699
3700 /* always consume the reserved link object */
3701 waitq_link_release(*(uint64_t *)wq_link_id);
3702 *(uint64_t *)wq_link_id = 0;
3703
3704 /*
3705 * selprocess() is expecting that we send it back the waitq
3706 * that was just added to the thread's waitq set. In order
3707 * to not change the selrecord() API (which is exported to
3708 * kexts), we pass this value back through the
3709 * void *wq_link_id pointer we were passed. We need to use
3710 * memcpy here because the pointer may not be properly aligned
3711 * on 32-bit systems.
3712 */
3713 void *wqptr = &kq->kq_wqs;
3714 memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
3715 }
3716
3717 if (kqueue_begin_processing(kq, QOS_INDEX_KQFILE, 0) == -1) {
3718 kqunlock(kq);
3719 return (0);
3720 }
3721
3722 queue = kqueue_get_base_queue(kq, QOS_INDEX_KQFILE);
3723 if (!TAILQ_EMPTY(queue)) {
3724 /*
3725 * there is something queued - but it might be a
3726 * KN_STAYACTIVE knote, which may or may not have
3727 * any events pending. Otherwise, we have to walk
3728 * the list of knotes to see, and peek at the
3729 * (non-vanished) stay-active ones to be really sure.
3730 */
3731 while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
3732 if (kn->kn_status & KN_ACTIVE) {
3733 retnum = 1;
3734 goto out;
3735 }
3736 assert(kn->kn_status & KN_STAYACTIVE);
3737 knote_suppress(kn);
3738 }
3739
3740 /*
3741 * There were no regular events on the queue, so take
3742 * a deeper look at the stay-queued ones we suppressed.
3743 */
3744 suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
3745 while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
3746 unsigned peek = 1;
3747
3748 /* If didn't vanish while suppressed - peek at it */
3749 if (kqlock2knoteuse(kq, kn)) {
3750
3751 peek = knote_fops(kn)->f_peek(kn);
3752
3753 /* if it dropped while getting lock - move on */
3754 if (!knoteuse2kqlock(kq, kn, 0))
3755 continue;
3756 }
3757
3758 /* unsuppress it */
3759 knote_unsuppress(kn);
3760
3761 /* has data or it has to report a vanish */
3762 if (peek > 0) {
3763 retnum = 1;
3764 goto out;
3765 }
3766 }
3767 }
3768
3769 out:
3770 kqueue_end_processing(kq, QOS_INDEX_KQFILE, 0);
3771 kqunlock(kq);
3772 return (retnum);
3773 }
3774
3775 /*
3776 * kqueue_close -
3777 */
3778 /*ARGSUSED*/
3779 static int
3780 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
3781 {
3782 struct kqfile *kqf = (struct kqfile *)fg->fg_data;
3783
3784 assert((kqf->kqf_state & KQ_WORKQ) == 0);
3785 kqueue_dealloc(&kqf->kqf_kqueue);
3786 fg->fg_data = NULL;
3787 return (0);
3788 }
3789
3790 /*ARGSUSED*/
3791 /*
3792 * The callers has taken a use-count reference on this kqueue and will donate it
3793 * to the kqueue we are being added to. This keeps the kqueue from closing until
3794 * that relationship is torn down.
3795 */
3796 static int
3797 kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_context_t ctx)
3798 {
3799 struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
3800 struct kqueue *kq = &kqf->kqf_kqueue;
3801 struct kqueue *parentkq = knote_get_kq(kn);
3802
3803 assert((kqf->kqf_state & KQ_WORKQ) == 0);
3804
3805 if (parentkq == kq ||
3806 kn->kn_filter != EVFILT_READ) {
3807 kn->kn_flags = EV_ERROR;
3808 kn->kn_data = EINVAL;
3809 return 0;
3810 }
3811
3812 /*
3813 * We have to avoid creating a cycle when nesting kqueues
3814 * inside another. Rather than trying to walk the whole
3815 * potential DAG of nested kqueues, we just use a simple
3816 * ceiling protocol. When a kqueue is inserted into another,
3817 * we check that the (future) parent is not already nested
3818 * into another kqueue at a lower level than the potenial
3819 * child (because it could indicate a cycle). If that test
3820 * passes, we just mark the nesting levels accordingly.
3821 */
3822
3823 kqlock(parentkq);
3824 if (parentkq->kq_level > 0 &&
3825 parentkq->kq_level < kq->kq_level)
3826 {
3827 kqunlock(parentkq);
3828 kn->kn_flags = EV_ERROR;
3829 kn->kn_data = EINVAL;
3830 return 0;
3831 } else {
3832 /* set parent level appropriately */
3833 if (parentkq->kq_level == 0)
3834 parentkq->kq_level = 2;
3835 if (parentkq->kq_level < kq->kq_level + 1)
3836 parentkq->kq_level = kq->kq_level + 1;
3837 kqunlock(parentkq);
3838
3839 kn->kn_filtid = EVFILTID_KQREAD;
3840 kqlock(kq);
3841 KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
3842 /* indicate nesting in child, if needed */
3843 if (kq->kq_level == 0)
3844 kq->kq_level = 1;
3845
3846 int count = kq->kq_count;
3847 kqunlock(kq);
3848 return (count > 0);
3849 }
3850 }
3851
3852 /*
3853 * kqueue_drain - called when kq is closed
3854 */
3855 /*ARGSUSED*/
3856 static int
3857 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
3858 {
3859 struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
3860
3861 assert((kq->kq_state & KQ_WORKQ) == 0);
3862
3863 kqlock(kq);
3864 kq->kq_state |= KQ_DRAIN;
3865 kqueue_interrupt(kq);
3866 kqunlock(kq);
3867 return (0);
3868 }
3869
3870 /*ARGSUSED*/
3871 int
3872 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
3873 {
3874 assert((kq->kq_state & KQ_WORKQ) == 0);
3875
3876 kqlock(kq);
3877 if (isstat64 != 0) {
3878 struct stat64 *sb64 = (struct stat64 *)ub;
3879
3880 bzero((void *)sb64, sizeof(*sb64));
3881 sb64->st_size = kq->kq_count;
3882 if (kq->kq_state & KQ_KEV_QOS)
3883 sb64->st_blksize = sizeof(struct kevent_qos_s);
3884 else if (kq->kq_state & KQ_KEV64)
3885 sb64->st_blksize = sizeof(struct kevent64_s);
3886 else if (IS_64BIT_PROCESS(p))
3887 sb64->st_blksize = sizeof(struct user64_kevent);
3888 else
3889 sb64->st_blksize = sizeof(struct user32_kevent);
3890 sb64->st_mode = S_IFIFO;
3891 } else {
3892 struct stat *sb = (struct stat *)ub;
3893
3894 bzero((void *)sb, sizeof(*sb));
3895 sb->st_size = kq->kq_count;
3896 if (kq->kq_state & KQ_KEV_QOS)
3897 sb->st_blksize = sizeof(struct kevent_qos_s);
3898 else if (kq->kq_state & KQ_KEV64)
3899 sb->st_blksize = sizeof(struct kevent64_s);
3900 else if (IS_64BIT_PROCESS(p))
3901 sb->st_blksize = sizeof(struct user64_kevent);
3902 else
3903 sb->st_blksize = sizeof(struct user32_kevent);
3904 sb->st_mode = S_IFIFO;
3905 }
3906 kqunlock(kq);
3907 return (0);
3908 }
3909
3910
3911 /*
3912 * Interact with the pthread kext to request a servicing there.
3913 * Eventually, this will request threads at specific QoS levels.
3914 * For now, it only requests a dispatch-manager-QoS thread, and
3915 * only one-at-a-time.
3916 *
3917 * - Caller holds the workq request lock
3918 *
3919 * - May be called with the kqueue's wait queue set locked,
3920 * so cannot do anything that could recurse on that.
3921 */
3922 static void
3923 kqworkq_request_thread(
3924 struct kqworkq *kqwq,
3925 kq_index_t qos_index)
3926 {
3927 struct kqrequest *kqr;
3928
3929 assert(kqwq->kqwq_state & KQ_WORKQ);
3930 assert(qos_index < KQWQ_NQOS);
3931
3932 kqr = kqworkq_get_request(kqwq, qos_index);
3933
3934 /*
3935 * If we have already requested a thread, and it hasn't
3936 * started processing yet, there's no use hammering away
3937 * on the pthread kext.
3938 */
3939 if (kqr->kqr_state & KQWQ_THREQUESTED)
3940 return;
3941
3942 assert(kqr->kqr_thread == THREAD_NULL);
3943
3944 /* request additional workq threads if appropriate */
3945 if (pthread_functions != NULL &&
3946 pthread_functions->workq_reqthreads != NULL) {
3947 unsigned int flags = KEVENT_FLAG_WORKQ;
3948
3949 /* Compute a priority based on qos_index. */
3950 struct workq_reqthreads_req_s request = {
3951 .priority = qos_from_qos_index(qos_index),
3952 .count = 1
3953 };
3954
3955 thread_t wqthread;
3956 wqthread = (*pthread_functions->workq_reqthreads)(kqwq->kqwq_p, 1, &request);
3957 kqr->kqr_state |= KQWQ_THREQUESTED;
3958
3959 /* Have we been switched to the emergency/manager thread? */
3960 if (wqthread == (thread_t)-1) {
3961 flags |= KEVENT_FLAG_WORKQ_MANAGER;
3962 wqthread = THREAD_NULL;
3963 } else if (qos_index == KQWQ_QOS_MANAGER)
3964 flags |= KEVENT_FLAG_WORKQ_MANAGER;
3965
3966 /* bind the thread */
3967 kqworkq_bind_thread(kqwq, qos_index, wqthread, flags);
3968 }
3969 }
3970
3971 /*
3972 * If we aren't already busy processing events [for this QoS],
3973 * request workq thread support as appropriate.
3974 *
3975 * TBD - for now, we don't segregate out processing by QoS.
3976 *
3977 * - May be called with the kqueue's wait queue set locked,
3978 * so cannot do anything that could recurse on that.
3979 */
3980 static void
3981 kqworkq_request_help(
3982 struct kqworkq *kqwq,
3983 kq_index_t qos_index,
3984 uint32_t type)
3985 {
3986 struct kqrequest *kqr;
3987
3988 /* convert to thread qos value */
3989 assert(qos_index < KQWQ_NQOS);
3990
3991 kqwq_req_lock(kqwq);
3992 kqr = kqworkq_get_request(kqwq, qos_index);
3993
3994 /*
3995 * If someone is processing the queue, just mark what type
3996 * of attempt this was (from a kq wakeup or from a waitq hook).
3997 * They'll be noticed at the end of servicing and a new thread
3998 * will be requested at that point.
3999 */
4000 if (kqr->kqr_state & KQWQ_PROCESSING) {
4001 kqr->kqr_state |= type;
4002 kqwq_req_unlock(kqwq);
4003 return;
4004 }
4005
4006 kqworkq_request_thread(kqwq, qos_index);
4007 kqwq_req_unlock(kqwq);
4008 }
4009
4010 /*
4011 * These arrays described the low and high qindexes for a given qos_index.
4012 * The values come from the chart in <sys/eventvar.h> (must stay in sync).
4013 */
4014 static kq_index_t _kq_base_index[KQWQ_NQOS] = {0, 0, 6, 11, 15, 18, 20, 21};
4015 static kq_index_t _kq_high_index[KQWQ_NQOS] = {0, 5, 10, 14, 17, 19, 20, 21};
4016
4017 static struct kqtailq *
4018 kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index)
4019 {
4020 assert(qos_index < KQWQ_NQOS);
4021 return &kq->kq_queue[_kq_base_index[qos_index]];
4022 }
4023
4024 static struct kqtailq *
4025 kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index)
4026 {
4027 assert(qos_index < KQWQ_NQOS);
4028 return &kq->kq_queue[_kq_high_index[qos_index]];
4029 }
4030
4031 static int
4032 kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index)
4033 {
4034 struct kqtailq *base_queue = kqueue_get_base_queue(kq, qos_index);
4035 struct kqtailq *queue = kqueue_get_high_queue(kq, qos_index);
4036
4037 do {
4038 if (!TAILQ_EMPTY(queue))
4039 return 0;
4040 } while (queue-- > base_queue);
4041 return 1;
4042 }
4043
4044 static struct kqtailq *
4045 kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index)
4046 {
4047 if (kq->kq_state & KQ_WORKQ) {
4048 struct kqworkq *kqwq = (struct kqworkq *)kq;
4049 struct kqrequest *kqr;
4050
4051 kqr = kqworkq_get_request(kqwq, qos_index);
4052 return &kqr->kqr_suppressed;
4053 } else {
4054 struct kqfile *kqf = (struct kqfile *)kq;
4055 return &kqf->kqf_suppressed;
4056 }
4057 }
4058
4059 static kq_index_t
4060 knote_get_queue_index(struct knote *kn)
4061 {
4062 kq_index_t override_index = knote_get_qos_override_index(kn);
4063 kq_index_t qos_index = knote_get_qos_index(kn);
4064 struct kqueue *kq = knote_get_kq(kn);
4065 kq_index_t res;
4066
4067 if ((kq->kq_state & KQ_WORKQ) == 0) {
4068 assert(qos_index == 0);
4069 assert(override_index == 0);
4070 }
4071 res = _kq_base_index[qos_index];
4072 if (override_index > qos_index)
4073 res += override_index - qos_index;
4074
4075 assert(res <= _kq_high_index[qos_index]);
4076 return res;
4077 }
4078
4079 static struct kqtailq *
4080 knote_get_queue(struct knote *kn)
4081 {
4082 kq_index_t qindex = knote_get_queue_index(kn);
4083
4084 return &(knote_get_kq(kn))->kq_queue[qindex];
4085 }
4086
4087 static struct kqtailq *
4088 knote_get_suppressed_queue(struct knote *kn)
4089 {
4090 kq_index_t qos_index = knote_get_qos_index(kn);
4091 struct kqueue *kq = knote_get_kq(kn);
4092
4093 return kqueue_get_suppressed_queue(kq, qos_index);
4094 }
4095
4096 static kq_index_t
4097 knote_get_req_index(struct knote *kn)
4098 {
4099 return kn->kn_req_index;
4100 }
4101
4102 static kq_index_t
4103 knote_get_qos_index(struct knote *kn)
4104 {
4105 return kn->kn_qos_index;
4106 }
4107
4108 static void
4109 knote_set_qos_index(struct knote *kn, kq_index_t qos_index)
4110 {
4111 struct kqueue *kq = knote_get_kq(kn);
4112
4113 assert(qos_index < KQWQ_NQOS);
4114 assert((kn->kn_status & KN_QUEUED) == 0);
4115
4116 if (kq->kq_state & KQ_WORKQ)
4117 assert(qos_index > QOS_INDEX_KQFILE);
4118 else
4119 assert(qos_index == QOS_INDEX_KQFILE);
4120
4121 /* always set requested */
4122 kn->kn_req_index = qos_index;
4123
4124 /* only adjust in-use qos index when not suppressed */
4125 if ((kn->kn_status & KN_SUPPRESSED) == 0)
4126 kn->kn_qos_index = qos_index;
4127 }
4128
4129 static kq_index_t
4130 knote_get_qos_override_index(struct knote *kn)
4131 {
4132 return kn->kn_qos_override;
4133 }
4134
4135 static void
4136 knote_set_qos_override_index(struct knote *kn, kq_index_t override_index)
4137 {
4138 struct kqueue *kq = knote_get_kq(kn);
4139 kq_index_t qos_index = knote_get_qos_index(kn);
4140
4141 assert((kn->kn_status & KN_QUEUED) == 0);
4142
4143 if (override_index == KQWQ_QOS_MANAGER)
4144 assert(qos_index == KQWQ_QOS_MANAGER);
4145 else
4146 assert(override_index < KQWQ_QOS_MANAGER);
4147
4148 kn->kn_qos_override = override_index;
4149
4150 /*
4151 * If this is a workq kqueue, apply the override to the
4152 * workq servicing thread.
4153 */
4154 if (kq->kq_state & KQ_WORKQ) {
4155 struct kqworkq *kqwq = (struct kqworkq *)kq;
4156
4157 assert(qos_index > QOS_INDEX_KQFILE);
4158 kqworkq_update_override(kqwq, qos_index, override_index);
4159 }
4160 }
4161
4162 static void
4163 kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index)
4164 {
4165 struct kqrequest *kqr;
4166 kq_index_t new_delta;
4167 kq_index_t old_delta;
4168
4169 new_delta = (override_index > qos_index) ?
4170 override_index - qos_index : 0;
4171
4172 kqr = kqworkq_get_request(kqwq, qos_index);
4173
4174 kqwq_req_lock(kqwq);
4175 old_delta = kqr->kqr_override_delta;
4176
4177 if (new_delta > old_delta) {
4178 thread_t wqthread = kqr->kqr_thread;
4179
4180 /* store the new override delta */
4181 kqr->kqr_override_delta = new_delta;
4182
4183 /* apply the override to [incoming?] servicing thread */
4184 if (wqthread) {
4185 /* only apply if non-manager */
4186 if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
4187 if (old_delta)
4188 thread_update_ipc_override(wqthread, override_index);
4189 else
4190 thread_add_ipc_override(wqthread, override_index);
4191 }
4192 }
4193 }
4194 kqwq_req_unlock(kqwq);
4195 }
4196
4197 /* called with the kqworkq lock held */
4198 static void
4199 kqworkq_bind_thread(
4200 struct kqworkq *kqwq,
4201 kq_index_t qos_index,
4202 thread_t thread,
4203 unsigned int flags)
4204 {
4205 struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
4206 thread_t old_thread = kqr->kqr_thread;
4207 struct uthread *ut;
4208
4209 assert(kqr->kqr_state & KQWQ_THREQUESTED);
4210
4211 /* If no identity yet, just set flags as needed */
4212 if (thread == THREAD_NULL) {
4213 assert(old_thread == THREAD_NULL);
4214
4215 /* emergency or unindetified */
4216 if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
4217 assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
4218 kqr->kqr_state |= KQWQ_THMANAGER;
4219 }
4220 return;
4221 }
4222
4223 /* Known thread identity */
4224 ut = get_bsdthread_info(thread);
4225
4226 /*
4227 * If this is a manager, and the manager request bit is
4228 * not set, assure no other thread is bound. If the bit
4229 * is set, make sure the old thread is us (or not set).
4230 */
4231 if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
4232 if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
4233 assert(old_thread == THREAD_NULL);
4234 kqr->kqr_state |= KQWQ_THMANAGER;
4235 } else if (old_thread == THREAD_NULL) {
4236 kqr->kqr_thread = thread;
4237 ut->uu_kqueue_bound = KQWQ_QOS_MANAGER;
4238 ut->uu_kqueue_flags = (KEVENT_FLAG_WORKQ |
4239 KEVENT_FLAG_WORKQ_MANAGER);
4240 } else {
4241 assert(thread == old_thread);
4242 assert(ut->uu_kqueue_bound == KQWQ_QOS_MANAGER);
4243 assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
4244 }
4245 return;
4246 }
4247
4248 /* Just a normal one-queue servicing thread */
4249 assert(old_thread == THREAD_NULL);
4250 assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
4251
4252 kqr->kqr_thread = thread;
4253
4254 /* apply an ipc QoS override if one is needed */
4255 if (kqr->kqr_override_delta)
4256 thread_add_ipc_override(thread, qos_index + kqr->kqr_override_delta);
4257
4258 /* indicate that we are processing in the uthread */
4259 ut->uu_kqueue_bound = qos_index;
4260 ut->uu_kqueue_flags = flags;
4261 }
4262
4263 /* called with the kqworkq lock held */
4264 static void
4265 kqworkq_unbind_thread(
4266 struct kqworkq *kqwq,
4267 kq_index_t qos_index,
4268 thread_t thread,
4269 __unused unsigned int flags)
4270 {
4271 struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
4272 kq_index_t override = 0;
4273
4274 assert(thread == current_thread());
4275
4276 /*
4277 * If there is an override, drop it from the current thread
4278 * and then we are free to recompute (a potentially lower)
4279 * minimum override to apply to the next thread request.
4280 */
4281 if (kqr->kqr_override_delta) {
4282 struct kqtailq *base_queue = kqueue_get_base_queue(&kqwq->kqwq_kqueue, qos_index);
4283 struct kqtailq *queue = kqueue_get_high_queue(&kqwq->kqwq_kqueue, qos_index);
4284
4285 /* if not bound to a manager thread, drop the current ipc override */
4286 if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
4287 assert(thread == kqr->kqr_thread);
4288 thread_drop_ipc_override(thread);
4289 }
4290
4291 /* recompute the new override */
4292 do {
4293 if (!TAILQ_EMPTY(queue)) {
4294 override = queue - base_queue;
4295 break;
4296 }
4297 } while (queue-- > base_queue);
4298 }
4299
4300 /* unbind the thread and apply the new override */
4301 kqr->kqr_thread = THREAD_NULL;
4302 kqr->kqr_override_delta = override;
4303 }
4304
4305 struct kqrequest *
4306 kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
4307 {
4308 assert(qos_index < KQWQ_NQOS);
4309 return &kqwq->kqwq_request[qos_index];
4310 }
4311
4312 void
4313 knote_adjust_qos(struct knote *kn, qos_t new_qos, qos_t new_override)
4314 {
4315 if (knote_get_kq(kn)->kq_state & KQ_WORKQ) {
4316 kq_index_t new_qos_index;
4317 kq_index_t new_override_index;
4318 kq_index_t servicer_qos_index;
4319
4320 new_qos_index = qos_index_from_qos(new_qos, FALSE);
4321 new_override_index = qos_index_from_qos(new_override, TRUE);
4322
4323 /* make sure the servicer qos acts as a floor */
4324 servicer_qos_index = qos_index_from_qos(kn->kn_qos, FALSE);
4325 if (servicer_qos_index > new_qos_index)
4326 new_qos_index = servicer_qos_index;
4327 if (servicer_qos_index > new_override_index)
4328 new_override_index = servicer_qos_index;
4329
4330 kqlock(knote_get_kq(kn));
4331 if (new_qos_index != knote_get_req_index(kn) ||
4332 new_override_index != knote_get_qos_override_index(kn)) {
4333 if (kn->kn_status & KN_QUEUED) {
4334 knote_dequeue(kn);
4335 knote_set_qos_index(kn, new_qos_index);
4336 knote_set_qos_override_index(kn, new_override_index);
4337 knote_enqueue(kn);
4338 knote_wakeup(kn);
4339 } else {
4340 knote_set_qos_index(kn, new_qos_index);
4341 knote_set_qos_override_index(kn, new_override_index);
4342 }
4343 }
4344 kqunlock(knote_get_kq(kn));
4345 }
4346 }
4347
4348 static void
4349 knote_wakeup(struct knote *kn)
4350 {
4351 struct kqueue *kq = knote_get_kq(kn);
4352
4353 if (kq->kq_state & KQ_WORKQ) {
4354 /* request a servicing thread */
4355 struct kqworkq *kqwq = (struct kqworkq *)kq;
4356 kq_index_t qos_index = knote_get_qos_index(kn);
4357
4358 kqworkq_request_help(kqwq, qos_index, KQWQ_WAKEUP);
4359
4360 } else {
4361 struct kqfile *kqf = (struct kqfile *)kq;
4362
4363 /* flag wakeups during processing */
4364 if (kq->kq_state & KQ_PROCESSING)
4365 kq->kq_state |= KQ_WAKEUP;
4366
4367 /* wakeup a thread waiting on this queue */
4368 if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
4369 kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
4370 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
4371 KQ_EVENT,
4372 THREAD_AWAKENED,
4373 WAITQ_ALL_PRIORITIES);
4374 }
4375
4376 /* wakeup other kqueues/select sets we're inside */
4377 KNOTE(&kqf->kqf_sel.si_note, 0);
4378 }
4379 }
4380
4381 /*
4382 * Called with the kqueue locked
4383 */
4384 static void
4385 kqueue_interrupt(struct kqueue *kq)
4386 {
4387 assert((kq->kq_state & KQ_WORKQ) == 0);
4388
4389 /* wakeup sleeping threads */
4390 if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) {
4391 kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
4392 (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
4393 KQ_EVENT,
4394 THREAD_RESTART,
4395 WAITQ_ALL_PRIORITIES);
4396 }
4397
4398 /* wakeup threads waiting their turn to process */
4399 if (kq->kq_state & KQ_PROCWAIT) {
4400 struct kqtailq *suppressq;
4401
4402 assert(kq->kq_state & KQ_PROCESSING);
4403
4404 kq->kq_state &= ~KQ_PROCWAIT;
4405 suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
4406 (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
4407 CAST_EVENT64_T(suppressq),
4408 THREAD_RESTART,
4409 WAITQ_ALL_PRIORITIES);
4410 }
4411 }
4412
4413 /*
4414 * Called back from waitq code when no threads waiting and the hook was set.
4415 *
4416 * Interrupts are likely disabled and spin locks are held - minimal work
4417 * can be done in this context!!!
4418 *
4419 * JMM - in the future, this will try to determine which knotes match the
4420 * wait queue wakeup and apply these wakeups against those knotes themselves.
4421 * For now, all the events dispatched this way are dispatch-manager handled,
4422 * so hard-code that for now.
4423 */
4424 void
4425 waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos)
4426 {
4427 #pragma unused(knote_hook, qos)
4428
4429 struct kqworkq *kqwq = (struct kqworkq *)kq_hook;
4430
4431 assert(kqwq->kqwq_state & KQ_WORKQ);
4432 kqworkq_request_help(kqwq, KQWQ_QOS_MANAGER, KQWQ_HOOKCALLED);
4433 }
4434
4435 void
4436 klist_init(struct klist *list)
4437 {
4438 SLIST_INIT(list);
4439 }
4440
4441
4442 /*
4443 * Query/Post each knote in the object's list
4444 *
4445 * The object lock protects the list. It is assumed
4446 * that the filter/event routine for the object can
4447 * determine that the object is already locked (via
4448 * the hint) and not deadlock itself.
4449 *
4450 * The object lock should also hold off pending
4451 * detach/drop operations. But we'll prevent it here
4452 * too (by taking a use reference) - just in case.
4453 */
4454 void
4455 knote(struct klist *list, long hint)
4456 {
4457 struct knote *kn;
4458
4459 SLIST_FOREACH(kn, list, kn_selnext) {
4460 struct kqueue *kq = knote_get_kq(kn);
4461
4462 kqlock(kq);
4463
4464 /* If we can get a use reference - deliver event */
4465 if (kqlock2knoteuse(kq, kn)) {
4466 int result;
4467
4468 /* call the event with only a use count */
4469 result = knote_fops(kn)->f_event(kn, hint);
4470
4471 /* if its not going away and triggered */
4472 if (knoteuse2kqlock(kq, kn, 0) && result)
4473 knote_activate(kn);
4474 /* kq lock held */
4475 }
4476 kqunlock(kq);
4477 }
4478 }
4479
4480 /*
4481 * attach a knote to the specified list. Return true if this is the first entry.
4482 * The list is protected by whatever lock the object it is associated with uses.
4483 */
4484 int
4485 knote_attach(struct klist *list, struct knote *kn)
4486 {
4487 int ret = SLIST_EMPTY(list);
4488 SLIST_INSERT_HEAD(list, kn, kn_selnext);
4489 return (ret);
4490 }
4491
4492 /*
4493 * detach a knote from the specified list. Return true if that was the last entry.
4494 * The list is protected by whatever lock the object it is associated with uses.
4495 */
4496 int
4497 knote_detach(struct klist *list, struct knote *kn)
4498 {
4499 SLIST_REMOVE(list, kn, knote, kn_selnext);
4500 return (SLIST_EMPTY(list));
4501 }
4502
4503 /*
4504 * knote_vanish - Indicate that the source has vanished
4505 *
4506 * If the knote has requested EV_VANISHED delivery,
4507 * arrange for that. Otherwise, deliver a NOTE_REVOKE
4508 * event for backward compatibility.
4509 *
4510 * The knote is marked as having vanished, but is not
4511 * actually detached from the source in this instance.
4512 * The actual detach is deferred until the knote drop.
4513 *
4514 * Our caller already has the object lock held. Calling
4515 * the detach routine would try to take that lock
4516 * recursively - which likely is not supported.
4517 */
4518 void
4519 knote_vanish(struct klist *list)
4520 {
4521 struct knote *kn;
4522 struct knote *kn_next;
4523
4524 SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
4525 struct kqueue *kq = knote_get_kq(kn);
4526 int result;
4527
4528 kqlock(kq);
4529 if ((kn->kn_status & KN_DROPPING) == 0) {
4530
4531 /* If EV_VANISH supported - prepare to deliver one */
4532 if (kn->kn_status & KN_REQVANISH) {
4533 kn->kn_status |= KN_VANISHED;
4534 knote_activate(kn);
4535
4536 } else if (kqlock2knoteuse(kq, kn)) {
4537 /* call the event with only a use count */
4538 result = knote_fops(kn)->f_event(kn, NOTE_REVOKE);
4539
4540 /* if its not going away and triggered */
4541 if (knoteuse2kqlock(kq, kn, 0) && result)
4542 knote_activate(kn);
4543 /* lock held again */
4544 }
4545 }
4546 kqunlock(kq);
4547 }
4548 }
4549
4550 /*
4551 * For a given knote, link a provided wait queue directly with the kqueue.
4552 * Wakeups will happen via recursive wait queue support. But nothing will move
4553 * the knote to the active list at wakeup (nothing calls knote()). Instead,
4554 * we permanently enqueue them here.
4555 *
4556 * kqueue and knote references are held by caller.
4557 * waitq locked by caller.
4558 *
4559 * caller provides the wait queue link structure.
4560 */
4561 int
4562 knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
4563 {
4564 struct kqueue *kq = knote_get_kq(kn);
4565 kern_return_t kr;
4566
4567 kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
4568 if (kr == KERN_SUCCESS) {
4569 knote_markstayactive(kn);
4570 return (0);
4571 } else {
4572 return (EINVAL);
4573 }
4574 }
4575
4576 /*
4577 * Unlink the provided wait queue from the kqueue associated with a knote.
4578 * Also remove it from the magic list of directly attached knotes.
4579 *
4580 * Note that the unlink may have already happened from the other side, so
4581 * ignore any failures to unlink and just remove it from the kqueue list.
4582 *
4583 * On success, caller is responsible for the link structure
4584 */
4585 int
4586 knote_unlink_waitq(struct knote *kn, struct waitq *wq)
4587 {
4588 struct kqueue *kq = knote_get_kq(kn);
4589 kern_return_t kr;
4590
4591 kr = waitq_unlink(wq, &kq->kq_wqs);
4592 knote_clearstayactive(kn);
4593 return ((kr != KERN_SUCCESS) ? EINVAL : 0);
4594 }
4595
4596 /*
4597 * remove all knotes referencing a specified fd
4598 *
4599 * Essentially an inlined knote_remove & knote_drop
4600 * when we know for sure that the thing is a file
4601 *
4602 * Entered with the proc_fd lock already held.
4603 * It returns the same way, but may drop it temporarily.
4604 */
4605 void
4606 knote_fdclose(struct proc *p, int fd, int force)
4607 {
4608 struct klist *list;
4609 struct knote *kn;
4610
4611 restart:
4612 list = &p->p_fd->fd_knlist[fd];
4613 SLIST_FOREACH(kn, list, kn_link) {
4614 struct kqueue *kq = knote_get_kq(kn);
4615
4616 kqlock(kq);
4617
4618 if (kq->kq_p != p)
4619 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
4620 __func__, kq->kq_p, p);
4621
4622 /*
4623 * If the knote supports EV_VANISHED delivery,
4624 * transition it to vanished mode (or skip over
4625 * it if already vanished).
4626 */
4627 if (!force && (kn->kn_status & KN_REQVANISH)) {
4628
4629 if ((kn->kn_status & KN_VANISHED) == 0) {
4630 proc_fdunlock(p);
4631
4632 /* get detach reference (also marks vanished) */
4633 if (kqlock2knotedetach(kq, kn)) {
4634
4635 /* detach knote and drop fp use reference */
4636 knote_fops(kn)->f_detach(kn);
4637 if (knote_fops(kn)->f_isfd)
4638 fp_drop(p, kn->kn_id, kn->kn_fp, 0);
4639
4640 /* activate it if it's still in existence */
4641 if (knoteuse2kqlock(kq, kn, 0)) {
4642 knote_activate(kn);
4643 }
4644 kqunlock(kq);
4645 }
4646 proc_fdlock(p);
4647 goto restart;
4648 } else {
4649 kqunlock(kq);
4650 continue;
4651 }
4652 }
4653
4654 proc_fdunlock(p);
4655
4656 /*
4657 * Convert the kq lock to a drop ref.
4658 * If we get it, go ahead and drop it.
4659 * Otherwise, we waited for the blocking
4660 * condition to complete. Either way,
4661 * we dropped the fdlock so start over.
4662 */
4663 if (kqlock2knotedrop(kq, kn)) {
4664 knote_drop(kn, p);
4665 }
4666
4667 proc_fdlock(p);
4668 goto restart;
4669 }
4670 }
4671
4672 /*
4673 * knote_fdadd - Add knote to the fd table for process
4674 *
4675 * All file-based filters associate a list of knotes by file
4676 * descriptor index. All other filters hash the knote by ident.
4677 *
4678 * May have to grow the table of knote lists to cover the
4679 * file descriptor index presented.
4680 *
4681 * proc_fdlock held on entry (and exit)
4682 */
4683 static int
4684 knote_fdadd(struct knote *kn, struct proc *p)
4685 {
4686 struct filedesc *fdp = p->p_fd;
4687 struct klist *list = NULL;
4688
4689 if (! knote_fops(kn)->f_isfd) {
4690 if (fdp->fd_knhashmask == 0)
4691 fdp->fd_knhash = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
4692 &fdp->fd_knhashmask);
4693 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
4694 } else {
4695 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
4696 u_int size = 0;
4697
4698 if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
4699 || kn->kn_id >= (uint64_t)maxfiles)
4700 return (EINVAL);
4701
4702 /* have to grow the fd_knlist */
4703 size = fdp->fd_knlistsize;
4704 while (size <= kn->kn_id)
4705 size += KQEXTENT;
4706
4707 if (size >= (UINT_MAX/sizeof(struct klist *)))
4708 return (EINVAL);
4709
4710 MALLOC(list, struct klist *,
4711 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
4712 if (list == NULL)
4713 return (ENOMEM);
4714
4715 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
4716 fdp->fd_knlistsize * sizeof(struct klist *));
4717 bzero((caddr_t)list +
4718 fdp->fd_knlistsize * sizeof(struct klist *),
4719 (size - fdp->fd_knlistsize) * sizeof(struct klist *));
4720 FREE(fdp->fd_knlist, M_KQUEUE);
4721 fdp->fd_knlist = list;
4722 fdp->fd_knlistsize = size;
4723 }
4724 list = &fdp->fd_knlist[kn->kn_id];
4725 }
4726 SLIST_INSERT_HEAD(list, kn, kn_link);
4727 return (0);
4728 }
4729
4730 /*
4731 * knote_fdremove - remove a knote from the fd table for process
4732 *
4733 * If the filter is file-based, remove based on fd index.
4734 * Otherwise remove from the hash based on the ident.
4735 *
4736 * proc_fdlock held on entry (and exit)
4737 */
4738 static void
4739 knote_fdremove(struct knote *kn, struct proc *p)
4740 {
4741 struct filedesc *fdp = p->p_fd;
4742 struct klist *list = NULL;
4743
4744 if (knote_fops(kn)->f_isfd) {
4745 assert ((u_int)fdp->fd_knlistsize > kn->kn_id);
4746 list = &fdp->fd_knlist[kn->kn_id];
4747 } else {
4748 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
4749 }
4750 SLIST_REMOVE(list, kn, knote, kn_link);
4751 }
4752
4753 /*
4754 * knote_fdfind - lookup a knote in the fd table for process
4755 *
4756 * If the filter is file-based, lookup based on fd index.
4757 * Otherwise use a hash based on the ident.
4758 *
4759 * Matching is based on kq, filter, and ident. Optionally,
4760 * it may also be based on the udata field in the kevent -
4761 * allowing multiple event registration for the file object
4762 * per kqueue.
4763 *
4764 * proc_fdlock held on entry (and exit)
4765 */
4766 static struct knote *
4767 knote_fdfind(struct kqueue *kq,
4768 struct kevent_internal_s *kev,
4769 struct proc *p)
4770 {
4771 struct filedesc *fdp = p->p_fd;
4772 struct klist *list = NULL;
4773 struct knote *kn = NULL;
4774 struct filterops *fops;
4775
4776 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
4777
4778 /*
4779 * determine where to look for the knote
4780 */
4781 if (fops->f_isfd) {
4782 /* fd-based knotes are linked off the fd table */
4783 if (kev->ident < (u_int)fdp->fd_knlistsize) {
4784 list = &fdp->fd_knlist[kev->ident];
4785 }
4786 } else if (fdp->fd_knhashmask != 0) {
4787 /* hash non-fd knotes here too */
4788 list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
4789 }
4790
4791 /*
4792 * scan the selected list looking for a match
4793 */
4794 if (list != NULL) {
4795 SLIST_FOREACH(kn, list, kn_link) {
4796 if (kq == knote_get_kq(kn) &&
4797 kev->ident == kn->kn_id &&
4798 kev->filter == kn->kn_filter) {
4799 if (kev->flags & EV_UDATA_SPECIFIC) {
4800 if ((kn->kn_status & KN_UDATA_SPECIFIC) &&
4801 kev->udata == kn->kn_udata) {
4802 break; /* matching udata-specific knote */
4803 }
4804 } else if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) {
4805 break; /* matching non-udata-specific knote */
4806 }
4807 }
4808 }
4809 }
4810 return kn;
4811 }
4812
4813 /*
4814 * knote_drop - disconnect and drop the knote
4815 *
4816 * Called with the kqueue unlocked and holding a
4817 * "drop reference" on the knote in question.
4818 * This reference is most often aquired thru a call
4819 * to kqlock2knotedrop(). But it can also be acquired
4820 * through stealing a drop reference via a call to
4821 * knoteuse2knotedrop() or during the initial attach
4822 * of the knote.
4823 *
4824 * The knote may have already been detached from
4825 * (or not yet attached to) its source object.
4826 */
4827 static void
4828 knote_drop(struct knote *kn, __unused struct proc *ctxp)
4829 {
4830 struct kqueue *kq = knote_get_kq(kn);
4831 struct proc *p = kq->kq_p;
4832 int needswakeup;
4833
4834 /* We have to have a dropping reference on the knote */
4835 assert(kn->kn_status & KN_DROPPING);
4836
4837 /* If we are attached, disconnect from the source first */
4838 if (kn->kn_status & KN_ATTACHED) {
4839 knote_fops(kn)->f_detach(kn);
4840 }
4841
4842 proc_fdlock(p);
4843
4844 /* Remove the source from the appropriate hash */
4845 knote_fdremove(kn, p);
4846
4847 /* trade fdlock for kq lock */
4848 kqlock(kq);
4849 proc_fdunlock(p);
4850
4851 /* determine if anyone needs to know about the drop */
4852 assert((kn->kn_status & (KN_SUPPRESSED | KN_QUEUED)) == 0);
4853 needswakeup = (kn->kn_status & KN_USEWAIT);
4854 kqunlock(kq);
4855
4856 if (needswakeup)
4857 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
4858 CAST_EVENT64_T(&kn->kn_status),
4859 THREAD_RESTART,
4860 WAITQ_ALL_PRIORITIES);
4861
4862 if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0))
4863 fp_drop(p, kn->kn_id, kn->kn_fp, 0);
4864
4865 knote_free(kn);
4866 }
4867
4868 /* called with kqueue lock held */
4869 static void
4870 knote_activate(struct knote *kn)
4871 {
4872 if (kn->kn_status & KN_ACTIVE)
4873 return;
4874
4875 kn->kn_status |= KN_ACTIVE;
4876 if (knote_enqueue(kn))
4877 knote_wakeup(kn);
4878 }
4879
4880 /* called with kqueue lock held */
4881 static void
4882 knote_deactivate(struct knote *kn)
4883 {
4884 kn->kn_status &= ~KN_ACTIVE;
4885 if ((kn->kn_status & KN_STAYACTIVE) == 0)
4886 knote_dequeue(kn);
4887 }
4888
4889 /* called with kqueue lock held */
4890 static void
4891 knote_enable(struct knote *kn)
4892 {
4893 if ((kn->kn_status & KN_DISABLED) == 0)
4894 return;
4895
4896 kn->kn_status &= ~KN_DISABLED;
4897 if (knote_enqueue(kn))
4898 knote_wakeup(kn);
4899 }
4900
4901 /* called with kqueue lock held */
4902 static void
4903 knote_disable(struct knote *kn)
4904 {
4905 if (kn->kn_status & KN_DISABLED)
4906 return;
4907
4908 kn->kn_status |= KN_DISABLED;
4909 knote_dequeue(kn);
4910 }
4911
4912 /* called with kqueue lock held */
4913 static void
4914 knote_suppress(struct knote *kn)
4915 {
4916 struct kqtailq *suppressq;
4917
4918 if (kn->kn_status & KN_SUPPRESSED)
4919 return;
4920
4921 knote_dequeue(kn);
4922 kn->kn_status |= KN_SUPPRESSED;
4923 suppressq = knote_get_suppressed_queue(kn);
4924 TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
4925 }
4926
4927 /* called with kqueue lock held */
4928 static void
4929 knote_unsuppress(struct knote *kn)
4930 {
4931 struct kqtailq *suppressq;
4932
4933 if ((kn->kn_status & KN_SUPPRESSED) == 0)
4934 return;
4935
4936 kn->kn_status &= ~KN_SUPPRESSED;
4937 suppressq = knote_get_suppressed_queue(kn);
4938 TAILQ_REMOVE(suppressq, kn, kn_tqe);
4939
4940 /* udate in-use qos to equal requested qos */
4941 kn->kn_qos_index = kn->kn_req_index;
4942
4943 /* don't wakeup if unsuppressing just a stay-active knote */
4944 if (knote_enqueue(kn) &&
4945 (kn->kn_status & KN_ACTIVE))
4946 knote_wakeup(kn);
4947 }
4948
4949 /* called with kqueue lock held */
4950 static int
4951 knote_enqueue(struct knote *kn)
4952 {
4953 if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0 ||
4954 (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)))
4955 return 0;
4956
4957 if ((kn->kn_status & KN_QUEUED) == 0) {
4958 struct kqtailq *queue = knote_get_queue(kn);
4959 struct kqueue *kq = knote_get_kq(kn);
4960
4961 TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
4962 kn->kn_status |= KN_QUEUED;
4963 kq->kq_count++;
4964 return 1;
4965 }
4966 return ((kn->kn_status & KN_STAYACTIVE) != 0);
4967 }
4968
4969
4970 /* called with kqueue lock held */
4971 static void
4972 knote_dequeue(struct knote *kn)
4973 {
4974 struct kqueue *kq = knote_get_kq(kn);
4975 struct kqtailq *queue;
4976
4977 if ((kn->kn_status & KN_QUEUED) == 0)
4978 return;
4979
4980 queue = knote_get_queue(kn);
4981 TAILQ_REMOVE(queue, kn, kn_tqe);
4982 kn->kn_status &= ~KN_QUEUED;
4983 kq->kq_count--;
4984 }
4985
4986 void
4987 knote_init(void)
4988 {
4989 knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
4990 8192, "knote zone");
4991
4992 kqfile_zone = zinit(sizeof(struct kqfile), 8192*sizeof(struct kqfile),
4993 8192, "kqueue file zone");
4994
4995 kqworkq_zone = zinit(sizeof(struct kqworkq), 8192*sizeof(struct kqworkq),
4996 8192, "kqueue workq zone");
4997
4998 /* allocate kq lock group attribute and group */
4999 kq_lck_grp_attr = lck_grp_attr_alloc_init();
5000
5001 kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
5002
5003 /* Allocate kq lock attribute */
5004 kq_lck_attr = lck_attr_alloc_init();
5005
5006 /* Initialize the timer filter lock */
5007 lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr);
5008
5009 /* Initialize the user filter lock */
5010 lck_spin_init(&_filt_userlock, kq_lck_grp, kq_lck_attr);
5011
5012 #if CONFIG_MEMORYSTATUS
5013 /* Initialize the memorystatus list lock */
5014 memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
5015 #endif
5016 }
5017 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
5018
5019 struct filterops *
5020 knote_fops(struct knote *kn)
5021 {
5022 return sysfilt_ops[kn->kn_filtid];
5023 }
5024
5025 static struct knote *
5026 knote_alloc(void)
5027 {
5028 return ((struct knote *)zalloc(knote_zone));
5029 }
5030
5031 static void
5032 knote_free(struct knote *kn)
5033 {
5034 zfree(knote_zone, kn);
5035 }
5036
5037 #if SOCKETS
5038 #include <sys/param.h>
5039 #include <sys/socket.h>
5040 #include <sys/protosw.h>
5041 #include <sys/domain.h>
5042 #include <sys/mbuf.h>
5043 #include <sys/kern_event.h>
5044 #include <sys/malloc.h>
5045 #include <sys/sys_domain.h>
5046 #include <sys/syslog.h>
5047
5048 #ifndef ROUNDUP64
5049 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
5050 #endif
5051
5052 #ifndef ADVANCE64
5053 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
5054 #endif
5055
5056 static lck_grp_attr_t *kev_lck_grp_attr;
5057 static lck_attr_t *kev_lck_attr;
5058 static lck_grp_t *kev_lck_grp;
5059 static decl_lck_rw_data(,kev_lck_data);
5060 static lck_rw_t *kev_rwlock = &kev_lck_data;
5061
5062 static int kev_attach(struct socket *so, int proto, struct proc *p);
5063 static int kev_detach(struct socket *so);
5064 static int kev_control(struct socket *so, u_long cmd, caddr_t data,
5065 struct ifnet *ifp, struct proc *p);
5066 static lck_mtx_t * event_getlock(struct socket *, int);
5067 static int event_lock(struct socket *, int, void *);
5068 static int event_unlock(struct socket *, int, void *);
5069
5070 static int event_sofreelastref(struct socket *);
5071 static void kev_delete(struct kern_event_pcb *);
5072
5073 static struct pr_usrreqs event_usrreqs = {
5074 .pru_attach = kev_attach,
5075 .pru_control = kev_control,
5076 .pru_detach = kev_detach,
5077 .pru_soreceive = soreceive,
5078 };
5079
5080 static struct protosw eventsw[] = {
5081 {
5082 .pr_type = SOCK_RAW,
5083 .pr_protocol = SYSPROTO_EVENT,
5084 .pr_flags = PR_ATOMIC,
5085 .pr_usrreqs = &event_usrreqs,
5086 .pr_lock = event_lock,
5087 .pr_unlock = event_unlock,
5088 .pr_getlock = event_getlock,
5089 }
5090 };
5091
5092 __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
5093 __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
5094
5095 SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
5096 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family");
5097
5098 struct kevtstat kevtstat;
5099 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
5100 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
5101 kevt_getstat, "S,kevtstat", "");
5102
5103 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
5104 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
5105 kevt_pcblist, "S,xkevtpcb", "");
5106
5107 static lck_mtx_t *
5108 event_getlock(struct socket *so, int locktype)
5109 {
5110 #pragma unused(locktype)
5111 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
5112
5113 if (so->so_pcb != NULL) {
5114 if (so->so_usecount < 0)
5115 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
5116 so, so->so_usecount, solockhistory_nr(so));
5117 /* NOTREACHED */
5118 } else {
5119 panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
5120 so, solockhistory_nr(so));
5121 /* NOTREACHED */
5122 }
5123 return (&ev_pcb->evp_mtx);
5124 }
5125
5126 static int
5127 event_lock(struct socket *so, int refcount, void *lr)
5128 {
5129 void *lr_saved;
5130
5131 if (lr == NULL)
5132 lr_saved = __builtin_return_address(0);
5133 else
5134 lr_saved = lr;
5135
5136 if (so->so_pcb != NULL) {
5137 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
5138 } else {
5139 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
5140 so, lr_saved, solockhistory_nr(so));
5141 /* NOTREACHED */
5142 }
5143
5144 if (so->so_usecount < 0) {
5145 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
5146 so, so->so_pcb, lr_saved, so->so_usecount,
5147 solockhistory_nr(so));
5148 /* NOTREACHED */
5149 }
5150
5151 if (refcount)
5152 so->so_usecount++;
5153
5154 so->lock_lr[so->next_lock_lr] = lr_saved;
5155 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
5156 return (0);
5157 }
5158
5159 static int
5160 event_unlock(struct socket *so, int refcount, void *lr)
5161 {
5162 void *lr_saved;
5163 lck_mtx_t *mutex_held;
5164
5165 if (lr == NULL)
5166 lr_saved = __builtin_return_address(0);
5167 else
5168 lr_saved = lr;
5169
5170 if (refcount) {
5171 VERIFY(so->so_usecount > 0);
5172 so->so_usecount--;
5173 }
5174 if (so->so_usecount < 0) {
5175 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
5176 so, so->so_usecount, solockhistory_nr(so));
5177 /* NOTREACHED */
5178 }
5179 if (so->so_pcb == NULL) {
5180 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
5181 so, so->so_usecount, (void *)lr_saved,
5182 solockhistory_nr(so));
5183 /* NOTREACHED */
5184 }
5185 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
5186
5187 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
5188 so->unlock_lr[so->next_unlock_lr] = lr_saved;
5189 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
5190
5191 if (so->so_usecount == 0) {
5192 VERIFY(so->so_flags & SOF_PCBCLEARING);
5193 event_sofreelastref(so);
5194 } else {
5195 lck_mtx_unlock(mutex_held);
5196 }
5197
5198 return (0);
5199 }
5200
5201 static int
5202 event_sofreelastref(struct socket *so)
5203 {
5204 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
5205
5206 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
5207
5208 so->so_pcb = NULL;
5209
5210 /*
5211 * Disable upcall in the event another thread is in kev_post_msg()
5212 * appending record to the receive socket buffer, since sbwakeup()
5213 * may release the socket lock otherwise.
5214 */
5215 so->so_rcv.sb_flags &= ~SB_UPCALL;
5216 so->so_snd.sb_flags &= ~SB_UPCALL;
5217 so->so_event = sonullevent;
5218 lck_mtx_unlock(&(ev_pcb->evp_mtx));
5219
5220 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
5221 lck_rw_lock_exclusive(kev_rwlock);
5222 LIST_REMOVE(ev_pcb, evp_link);
5223 kevtstat.kes_pcbcount--;
5224 kevtstat.kes_gencnt++;
5225 lck_rw_done(kev_rwlock);
5226 kev_delete(ev_pcb);
5227
5228 sofreelastref(so, 1);
5229 return (0);
5230 }
5231
5232 static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
5233
5234 static
5235 struct kern_event_head kern_event_head;
5236
5237 static u_int32_t static_event_id = 0;
5238
5239 #define EVPCB_ZONE_MAX 65536
5240 #define EVPCB_ZONE_NAME "kerneventpcb"
5241 static struct zone *ev_pcb_zone;
5242
5243 /*
5244 * Install the protosw's for the NKE manager. Invoked at extension load time
5245 */
5246 void
5247 kern_event_init(struct domain *dp)
5248 {
5249 struct protosw *pr;
5250 int i;
5251
5252 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
5253 VERIFY(dp == systemdomain);
5254
5255 kev_lck_grp_attr = lck_grp_attr_alloc_init();
5256 if (kev_lck_grp_attr == NULL) {
5257 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
5258 /* NOTREACHED */
5259 }
5260
5261 kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol",
5262 kev_lck_grp_attr);
5263 if (kev_lck_grp == NULL) {
5264 panic("%s: lck_grp_alloc_init failed\n", __func__);
5265 /* NOTREACHED */
5266 }
5267
5268 kev_lck_attr = lck_attr_alloc_init();
5269 if (kev_lck_attr == NULL) {
5270 panic("%s: lck_attr_alloc_init failed\n", __func__);
5271 /* NOTREACHED */
5272 }
5273
5274 lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr);
5275 if (kev_rwlock == NULL) {
5276 panic("%s: lck_mtx_alloc_init failed\n", __func__);
5277 /* NOTREACHED */
5278 }
5279
5280 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++)
5281 net_add_proto(pr, dp, 1);
5282
5283 ev_pcb_zone = zinit(sizeof(struct kern_event_pcb),
5284 EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME);
5285 if (ev_pcb_zone == NULL) {
5286 panic("%s: failed allocating ev_pcb_zone", __func__);
5287 /* NOTREACHED */
5288 }
5289 zone_change(ev_pcb_zone, Z_EXPAND, TRUE);
5290 zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE);
5291 }
5292
5293 static int
5294 kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
5295 {
5296 int error = 0;
5297 struct kern_event_pcb *ev_pcb;
5298
5299 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
5300 if (error != 0)
5301 return (error);
5302
5303 if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) {
5304 return (ENOBUFS);
5305 }
5306 bzero(ev_pcb, sizeof(struct kern_event_pcb));
5307 lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr);
5308
5309 ev_pcb->evp_socket = so;
5310 ev_pcb->evp_vendor_code_filter = 0xffffffff;
5311
5312 so->so_pcb = (caddr_t) ev_pcb;
5313 lck_rw_lock_exclusive(kev_rwlock);
5314 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
5315 kevtstat.kes_pcbcount++;
5316 kevtstat.kes_gencnt++;
5317 lck_rw_done(kev_rwlock);
5318
5319 return (error);
5320 }
5321
5322 static void
5323 kev_delete(struct kern_event_pcb *ev_pcb)
5324 {
5325 VERIFY(ev_pcb != NULL);
5326 lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp);
5327 zfree(ev_pcb_zone, ev_pcb);
5328 }
5329
5330 static int
5331 kev_detach(struct socket *so)
5332 {
5333 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
5334
5335 if (ev_pcb != NULL) {
5336 soisdisconnected(so);
5337 so->so_flags |= SOF_PCBCLEARING;
5338 }
5339
5340 return (0);
5341 }
5342
5343 /*
5344 * For now, kev_vendor_code and mbuf_tags use the same
5345 * mechanism.
5346 */
5347 errno_t kev_vendor_code_find(
5348 const char *string,
5349 u_int32_t *out_vendor_code)
5350 {
5351 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
5352 return (EINVAL);
5353 }
5354 return (net_str_id_find_internal(string, out_vendor_code,
5355 NSI_VENDOR_CODE, 1));
5356 }
5357
5358 errno_t
5359 kev_msg_post(struct kev_msg *event_msg)
5360 {
5361 mbuf_tag_id_t min_vendor, max_vendor;
5362
5363 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
5364
5365 if (event_msg == NULL)
5366 return (EINVAL);
5367
5368 /*
5369 * Limit third parties to posting events for registered vendor codes
5370 * only
5371 */
5372 if (event_msg->vendor_code < min_vendor ||
5373 event_msg->vendor_code > max_vendor) {
5374 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_badvendor);
5375 return (EINVAL);
5376 }
5377 return (kev_post_msg(event_msg));
5378 }
5379
5380 int
5381 kev_post_msg(struct kev_msg *event_msg)
5382 {
5383 struct mbuf *m, *m2;
5384 struct kern_event_pcb *ev_pcb;
5385 struct kern_event_msg *ev;
5386 char *tmp;
5387 u_int32_t total_size;
5388 int i;
5389
5390 /* Verify the message is small enough to fit in one mbuf w/o cluster */
5391 total_size = KEV_MSG_HEADER_SIZE;
5392
5393 for (i = 0; i < 5; i++) {
5394 if (event_msg->dv[i].data_length == 0)
5395 break;
5396 total_size += event_msg->dv[i].data_length;
5397 }
5398
5399 if (total_size > MLEN) {
5400 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_toobig);
5401 return (EMSGSIZE);
5402 }
5403
5404 m = m_get(M_DONTWAIT, MT_DATA);
5405 if (m == 0) {
5406 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
5407 return (ENOMEM);
5408 }
5409 ev = mtod(m, struct kern_event_msg *);
5410 total_size = KEV_MSG_HEADER_SIZE;
5411
5412 tmp = (char *) &ev->event_data[0];
5413 for (i = 0; i < 5; i++) {
5414 if (event_msg->dv[i].data_length == 0)
5415 break;
5416
5417 total_size += event_msg->dv[i].data_length;
5418 bcopy(event_msg->dv[i].data_ptr, tmp,
5419 event_msg->dv[i].data_length);
5420 tmp += event_msg->dv[i].data_length;
5421 }
5422
5423 ev->id = ++static_event_id;
5424 ev->total_size = total_size;
5425 ev->vendor_code = event_msg->vendor_code;
5426 ev->kev_class = event_msg->kev_class;
5427 ev->kev_subclass = event_msg->kev_subclass;
5428 ev->event_code = event_msg->event_code;
5429
5430 m->m_len = total_size;
5431 lck_rw_lock_shared(kev_rwlock);
5432 for (ev_pcb = LIST_FIRST(&kern_event_head);
5433 ev_pcb;
5434 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
5435 lck_mtx_lock(&ev_pcb->evp_mtx);
5436 if (ev_pcb->evp_socket->so_pcb == NULL) {
5437 lck_mtx_unlock(&ev_pcb->evp_mtx);
5438 continue;
5439 }
5440 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
5441 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
5442 lck_mtx_unlock(&ev_pcb->evp_mtx);
5443 continue;
5444 }
5445
5446 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
5447 if (ev_pcb->evp_class_filter != ev->kev_class) {
5448 lck_mtx_unlock(&ev_pcb->evp_mtx);
5449 continue;
5450 }
5451
5452 if ((ev_pcb->evp_subclass_filter !=
5453 KEV_ANY_SUBCLASS) &&
5454 (ev_pcb->evp_subclass_filter !=
5455 ev->kev_subclass)) {
5456 lck_mtx_unlock(&ev_pcb->evp_mtx);
5457 continue;
5458 }
5459 }
5460 }
5461
5462 m2 = m_copym(m, 0, m->m_len, M_NOWAIT);
5463 if (m2 == 0) {
5464 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
5465 m_free(m);
5466 lck_mtx_unlock(&ev_pcb->evp_mtx);
5467 lck_rw_done(kev_rwlock);
5468 return (ENOMEM);
5469 }
5470 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
5471 /*
5472 * We use "m" for the socket stats as it would be
5473 * unsafe to use "m2"
5474 */
5475 so_inc_recv_data_stat(ev_pcb->evp_socket,
5476 1, m->m_len, MBUF_TC_BE);
5477
5478 sorwakeup(ev_pcb->evp_socket);
5479 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_posted);
5480 } else {
5481 OSIncrementAtomic64((SInt64 *)&kevtstat.kes_fullsock);
5482 }
5483 lck_mtx_unlock(&ev_pcb->evp_mtx);
5484 }
5485 m_free(m);
5486 lck_rw_done(kev_rwlock);
5487
5488 return (0);
5489 }
5490
5491 static int
5492 kev_control(struct socket *so,
5493 u_long cmd,
5494 caddr_t data,
5495 __unused struct ifnet *ifp,
5496 __unused struct proc *p)
5497 {
5498 struct kev_request *kev_req = (struct kev_request *) data;
5499 struct kern_event_pcb *ev_pcb;
5500 struct kev_vendor_code *kev_vendor;
5501 u_int32_t *id_value = (u_int32_t *) data;
5502
5503 switch (cmd) {
5504 case SIOCGKEVID:
5505 *id_value = static_event_id;
5506 break;
5507 case SIOCSKEVFILT:
5508 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
5509 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
5510 ev_pcb->evp_class_filter = kev_req->kev_class;
5511 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
5512 break;
5513 case SIOCGKEVFILT:
5514 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
5515 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
5516 kev_req->kev_class = ev_pcb->evp_class_filter;
5517 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
5518 break;
5519 case SIOCGKEVVENDOR:
5520 kev_vendor = (struct kev_vendor_code *)data;
5521 /* Make sure string is NULL terminated */
5522 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0;
5523 return (net_str_id_find_internal(kev_vendor->vendor_string,
5524 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0));
5525 default:
5526 return (ENOTSUP);
5527 }
5528
5529 return (0);
5530 }
5531
5532 int
5533 kevt_getstat SYSCTL_HANDLER_ARGS
5534 {
5535 #pragma unused(oidp, arg1, arg2)
5536 int error = 0;
5537
5538 lck_rw_lock_shared(kev_rwlock);
5539
5540 if (req->newptr != USER_ADDR_NULL) {
5541 error = EPERM;
5542 goto done;
5543 }
5544 if (req->oldptr == USER_ADDR_NULL) {
5545 req->oldidx = sizeof(struct kevtstat);
5546 goto done;
5547 }
5548
5549 error = SYSCTL_OUT(req, &kevtstat,
5550 MIN(sizeof(struct kevtstat), req->oldlen));
5551 done:
5552 lck_rw_done(kev_rwlock);
5553
5554 return (error);
5555 }
5556
5557 __private_extern__ int
5558 kevt_pcblist SYSCTL_HANDLER_ARGS
5559 {
5560 #pragma unused(oidp, arg1, arg2)
5561 int error = 0;
5562 int n, i;
5563 struct xsystmgen xsg;
5564 void *buf = NULL;
5565 size_t item_size = ROUNDUP64(sizeof (struct xkevtpcb)) +
5566 ROUNDUP64(sizeof (struct xsocket_n)) +
5567 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
5568 ROUNDUP64(sizeof (struct xsockstat_n));
5569 struct kern_event_pcb *ev_pcb;
5570
5571 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
5572 if (buf == NULL)
5573 return (ENOMEM);
5574
5575 lck_rw_lock_shared(kev_rwlock);
5576
5577 n = kevtstat.kes_pcbcount;
5578
5579 if (req->oldptr == USER_ADDR_NULL) {
5580 req->oldidx = (n + n/8) * item_size;
5581 goto done;
5582 }
5583 if (req->newptr != USER_ADDR_NULL) {
5584 error = EPERM;
5585 goto done;
5586 }
5587 bzero(&xsg, sizeof (xsg));
5588 xsg.xg_len = sizeof (xsg);
5589 xsg.xg_count = n;
5590 xsg.xg_gen = kevtstat.kes_gencnt;
5591 xsg.xg_sogen = so_gencnt;
5592 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
5593 if (error) {
5594 goto done;
5595 }
5596 /*
5597 * We are done if there is no pcb
5598 */
5599 if (n == 0) {
5600 goto done;
5601 }
5602
5603 i = 0;
5604 for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
5605 i < n && ev_pcb != NULL;
5606 i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
5607 struct xkevtpcb *xk = (struct xkevtpcb *)buf;
5608 struct xsocket_n *xso = (struct xsocket_n *)
5609 ADVANCE64(xk, sizeof (*xk));
5610 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
5611 ADVANCE64(xso, sizeof (*xso));
5612 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
5613 ADVANCE64(xsbrcv, sizeof (*xsbrcv));
5614 struct xsockstat_n *xsostats = (struct xsockstat_n *)
5615 ADVANCE64(xsbsnd, sizeof (*xsbsnd));
5616
5617 bzero(buf, item_size);
5618
5619 lck_mtx_lock(&ev_pcb->evp_mtx);
5620
5621 xk->kep_len = sizeof(struct xkevtpcb);
5622 xk->kep_kind = XSO_EVT;
5623 xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
5624 xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
5625 xk->kep_class_filter = ev_pcb->evp_class_filter;
5626 xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
5627
5628 sotoxsocket_n(ev_pcb->evp_socket, xso);
5629 sbtoxsockbuf_n(ev_pcb->evp_socket ?
5630 &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
5631 sbtoxsockbuf_n(ev_pcb->evp_socket ?
5632 &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
5633 sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
5634
5635 lck_mtx_unlock(&ev_pcb->evp_mtx);
5636
5637 error = SYSCTL_OUT(req, buf, item_size);
5638 }
5639
5640 if (error == 0) {
5641 /*
5642 * Give the user an updated idea of our state.
5643 * If the generation differs from what we told
5644 * her before, she knows that something happened
5645 * while we were processing this request, and it
5646 * might be necessary to retry.
5647 */
5648 bzero(&xsg, sizeof (xsg));
5649 xsg.xg_len = sizeof (xsg);
5650 xsg.xg_count = n;
5651 xsg.xg_gen = kevtstat.kes_gencnt;
5652 xsg.xg_sogen = so_gencnt;
5653 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
5654 if (error) {
5655 goto done;
5656 }
5657 }
5658
5659 done:
5660 lck_rw_done(kev_rwlock);
5661
5662 return (error);
5663 }
5664
5665 #endif /* SOCKETS */
5666
5667
5668 int
5669 fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
5670 {
5671 struct vinfo_stat * st;
5672
5673 st = &kinfo->kq_stat;
5674
5675 st->vst_size = kq->kq_count;
5676 if (kq->kq_state & KQ_KEV_QOS)
5677 st->vst_blksize = sizeof(struct kevent_qos_s);
5678 else if (kq->kq_state & KQ_KEV64)
5679 st->vst_blksize = sizeof(struct kevent64_s);
5680 else
5681 st->vst_blksize = sizeof(struct kevent);
5682 st->vst_mode = S_IFIFO;
5683
5684 /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
5685 #define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS)
5686 kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK;
5687
5688 return (0);
5689 }
5690
5691
5692 void
5693 knote_markstayactive(struct knote *kn)
5694 {
5695 kqlock(knote_get_kq(kn));
5696 kn->kn_status |= KN_STAYACTIVE;
5697
5698 /* handle all stayactive knotes on the manager */
5699 if (knote_get_kq(kn)->kq_state & KQ_WORKQ)
5700 knote_set_qos_index(kn, KQWQ_QOS_MANAGER);
5701
5702 knote_activate(kn);
5703 kqunlock(knote_get_kq(kn));
5704 }
5705
5706 void
5707 knote_clearstayactive(struct knote *kn)
5708 {
5709 kqlock(knote_get_kq(kn));
5710 kn->kn_status &= ~KN_STAYACTIVE;
5711 knote_deactivate(kn);
5712 kqunlock(knote_get_kq(kn));
5713 }
5714
5715 static unsigned long
5716 kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
5717 unsigned long buflen, unsigned long nknotes)
5718 {
5719 struct kevent_internal_s *kevp;
5720 for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
5721 if (kq == knote_get_kq(kn)) {
5722 if (nknotes < buflen) {
5723 struct kevent_extinfo *info = &buf[nknotes];
5724 struct kevent_qos_s kevqos;
5725
5726 kqlock(kq);
5727 kevp = &(kn->kn_kevent);
5728
5729 bzero(&kevqos, sizeof(kevqos));
5730 kevqos.ident = kevp->ident;
5731 kevqos.filter = kevp->filter;
5732 kevqos.flags = kevp->flags;
5733 kevqos.fflags = kevp->fflags;
5734 kevqos.data = (int64_t) kevp->data;
5735 kevqos.udata = kevp->udata;
5736 kevqos.ext[0] = kevp->ext[0];
5737 kevqos.ext[1] = kevp->ext[1];
5738
5739 memcpy(&info->kqext_kev, &kevqos, sizeof(info->kqext_kev));
5740 info->kqext_sdata = kn->kn_sdata;
5741 info->kqext_status = kn->kn_status;
5742 info->kqext_sfflags = kn->kn_sfflags;
5743
5744 kqunlock(kq);
5745 }
5746
5747 /* we return total number of knotes, which may be more than requested */
5748 nknotes++;
5749 }
5750 }
5751
5752 return nknotes;
5753 }
5754
5755 int
5756 pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
5757 uint32_t bufsize, int32_t *retval)
5758 {
5759 struct knote *kn;
5760 int i;
5761 int err = 0;
5762 struct filedesc *fdp = p->p_fd;
5763 unsigned long nknotes = 0;
5764 unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
5765 struct kevent_extinfo *kqext = NULL;
5766
5767 /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
5768 buflen = min(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
5769
5770 kqext = kalloc(buflen * sizeof(struct kevent_extinfo));
5771 if (kqext == NULL) {
5772 err = ENOMEM;
5773 goto out;
5774 }
5775 bzero(kqext, buflen * sizeof(struct kevent_extinfo));
5776
5777 proc_fdlock(p);
5778
5779 for (i = 0; i < fdp->fd_knlistsize; i++) {
5780 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
5781 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
5782 }
5783
5784 if (fdp->fd_knhashmask != 0) {
5785 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
5786 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
5787 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
5788 }
5789 }
5790
5791 proc_fdunlock(p);
5792
5793 assert(bufsize >= sizeof(struct kevent_extinfo) * min(buflen, nknotes));
5794 err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * min(buflen, nknotes));
5795
5796 out:
5797 if (kqext) {
5798 kfree(kqext, buflen * sizeof(struct kevent_extinfo));
5799 kqext = NULL;
5800 }
5801
5802 if (!err) {
5803 *retval = min(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
5804 }
5805 return err;
5806 }
5807
5808 static unsigned long
5809 kevent_udatainfo_emit(struct kqueue *kq, struct knote *kn, uint64_t *buf,
5810 unsigned long buflen, unsigned long nknotes)
5811 {
5812 struct kevent_internal_s *kevp;
5813 for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
5814 if (kq == knote_get_kq(kn)) {
5815 if (nknotes < buflen) {
5816 kqlock(kq);
5817 kevp = &(kn->kn_kevent);
5818 buf[nknotes] = kevp->udata;
5819 kqunlock(kq);
5820 }
5821
5822 /* we return total number of knotes, which may be more than requested */
5823 nknotes++;
5824 }
5825 }
5826
5827 return nknotes;
5828 }
5829
5830 int
5831 pid_kqueue_udatainfo(proc_t p, struct kqueue *kq, uint64_t *buf,
5832 uint32_t bufsize)
5833 {
5834 struct knote *kn;
5835 int i;
5836 struct filedesc *fdp = p->p_fd;
5837 unsigned long nknotes = 0;
5838 unsigned long buflen = bufsize / sizeof(uint64_t);
5839
5840 proc_fdlock(p);
5841
5842 for (i = 0; i < fdp->fd_knlistsize; i++) {
5843 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
5844 nknotes = kevent_udatainfo_emit(kq, kn, buf, buflen, nknotes);
5845 }
5846
5847 if (fdp->fd_knhashmask != 0) {
5848 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
5849 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
5850 nknotes = kevent_udatainfo_emit(kq, kn, buf, buflen, nknotes);
5851 }
5852 }
5853
5854 proc_fdunlock(p);
5855 return (int)nknotes;
5856 }
5857