]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_event.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_event.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29 /*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54 /*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
57 #include <stdint.h>
58 #include <machine/atomic.h>
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/filedesc.h>
63 #include <sys/kernel.h>
64 #include <sys/proc_internal.h>
65 #include <sys/kauth.h>
66 #include <sys/malloc.h>
67 #include <sys/unistd.h>
68 #include <sys/file_internal.h>
69 #include <sys/fcntl.h>
70 #include <sys/select.h>
71 #include <sys/queue.h>
72 #include <sys/event.h>
73 #include <sys/eventvar.h>
74 #include <sys/protosw.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/stat.h>
78 #include <sys/syscall.h> // SYS_* constants
79 #include <sys/sysctl.h>
80 #include <sys/uio.h>
81 #include <sys/sysproto.h>
82 #include <sys/user.h>
83 #include <sys/vnode_internal.h>
84 #include <string.h>
85 #include <sys/proc_info.h>
86 #include <sys/codesign.h>
87 #include <sys/pthread_shims.h>
88 #include <sys/kdebug.h>
89 #include <os/base.h>
90 #include <pexpert/pexpert.h>
91
92 #include <kern/locks.h>
93 #include <kern/clock.h>
94 #include <kern/cpu_data.h>
95 #include <kern/policy_internal.h>
96 #include <kern/thread_call.h>
97 #include <kern/sched_prim.h>
98 #include <kern/waitq.h>
99 #include <kern/zalloc.h>
100 #include <kern/kalloc.h>
101 #include <kern/assert.h>
102 #include <kern/ast.h>
103 #include <kern/thread.h>
104 #include <kern/kcdata.h>
105
106 #include <pthread/priority_private.h>
107 #include <pthread/workqueue_syscalls.h>
108 #include <pthread/workqueue_internal.h>
109 #include <libkern/libkern.h>
110
111 #include "net/net_str_id.h"
112
113 #include <mach/task.h>
114 #include <libkern/section_keywords.h>
115
116 #if CONFIG_MEMORYSTATUS
117 #include <sys/kern_memorystatus.h>
118 #endif
119
120 #if DEVELOPMENT || DEBUG
121 #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
122 #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
123 TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug", 0);
124 #endif
125
126 static LCK_GRP_DECLARE(kq_lck_grp, "kqueue");
127 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params =
128 VM_PACKING_PARAMS(KNOTE_KQ_PACKED);
129
130 extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */
131 extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */
132
133 #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
134
135 /*
136 * If you need accounting for KM_KQUEUE consider using
137 * KALLOC_HEAP_DEFINE to define a zone view.
138 */
139 #define KM_KQUEUE KHEAP_DEFAULT
140
141 #define KQ_EVENT NO_EVENT64
142
143 static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
144 vfs_context_t ctx);
145 static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
146 static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
147 struct kevent_qos_s *kev);
148 static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
149
150 static const struct fileops kqueueops = {
151 .fo_type = DTYPE_KQUEUE,
152 .fo_read = fo_no_read,
153 .fo_write = fo_no_write,
154 .fo_ioctl = fo_no_ioctl,
155 .fo_select = kqueue_select,
156 .fo_close = kqueue_close,
157 .fo_drain = kqueue_drain,
158 .fo_kqfilter = kqueue_kqfilter,
159 };
160
161 static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *);
162 static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result);
163 static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
164 thread_continue_t cont, struct _kevent_register *cont_args) __dead2;
165 static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
166 static void kevent_register_wait_cleanup(struct knote *kn);
167
168 static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
169 static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags);
170
171 static void kqworkq_unbind(proc_t p, workq_threadreq_t);
172 static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread);
173 static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
174
175 static void kqworkloop_unbind(struct kqworkloop *kwql);
176
177 enum kqwl_unbind_locked_mode {
178 KQWL_OVERRIDE_DROP_IMMEDIATELY,
179 KQWL_OVERRIDE_DROP_DELAYED,
180 };
181 static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread,
182 enum kqwl_unbind_locked_mode how);
183 static void kqworkloop_unbind_delayed_override_drop(thread_t thread);
184 static kq_index_t kqworkloop_override(struct kqworkloop *kqwl);
185 static void kqworkloop_set_overcommit(struct kqworkloop *kqwl);
186 enum {
187 KQWL_UTQ_NONE,
188 /*
189 * The wakeup qos is the qos of QUEUED knotes.
190 *
191 * This QoS is accounted for with the events override in the
192 * kqr_override_index field. It is raised each time a new knote is queued at
193 * a given QoS. The kqwl_wakeup_indexes field is a superset of the non empty
194 * knote buckets and is recomputed after each event delivery.
195 */
196 KQWL_UTQ_UPDATE_WAKEUP_QOS,
197 KQWL_UTQ_UPDATE_STAYACTIVE_QOS,
198 KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
199 KQWL_UTQ_UNBINDING, /* attempt to rebind */
200 KQWL_UTQ_PARKING,
201 /*
202 * The wakeup override is for suppressed knotes that have fired again at
203 * a higher QoS than the one for which they are suppressed already.
204 * This override is cleared when the knote suppressed list becomes empty.
205 */
206 KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
207 KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
208 /*
209 * The QoS is the maximum QoS of an event enqueued on this workloop in
210 * userland. It is copied from the only EVFILT_WORKLOOP knote with
211 * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
212 * such knote, this QoS is 0.
213 */
214 KQWL_UTQ_SET_QOS_INDEX,
215 KQWL_UTQ_REDRIVE_EVENTS,
216 };
217 static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
218 static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
219
220 static struct knote *knote_alloc(void);
221 static void knote_free(struct knote *kn);
222 static int kq_add_knote(struct kqueue *kq, struct knote *kn,
223 struct knote_lock_ctx *knlc, struct proc *p);
224 static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq,
225 struct kevent_qos_s *kev, bool is_fd, struct proc *p);
226
227 static void knote_activate(kqueue_t kqu, struct knote *kn, int result);
228 static void knote_dequeue(kqueue_t kqu, struct knote *kn);
229
230 static void knote_apply_touch(kqueue_t kqu, struct knote *kn,
231 struct kevent_qos_s *kev, int result);
232 static void knote_suppress(kqueue_t kqu, struct knote *kn);
233 static void knote_unsuppress(kqueue_t kqu, struct knote *kn);
234 static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc);
235
236 // both these functions may dequeue the knote and it is up to the caller
237 // to enqueue the knote back
238 static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
239 static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp);
240
241 static ZONE_DECLARE(knote_zone, "knote zone",
242 sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM);
243 static ZONE_DECLARE(kqfile_zone, "kqueue file zone",
244 sizeof(struct kqfile), ZC_ZFREE_CLEARMEM);
245 static ZONE_DECLARE(kqworkq_zone, "kqueue workq zone",
246 sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM);
247 static ZONE_DECLARE(kqworkloop_zone, "kqueue workloop zone",
248 sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM);
249
250 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
251
252 static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev);
253 static void filt_no_detach(struct knote *kn);
254 static int filt_bad_event(struct knote *kn, long hint);
255 static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev);
256 static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev);
257
258 SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
259 .f_attach = filt_no_attach,
260 .f_detach = filt_no_detach,
261 .f_event = filt_bad_event,
262 .f_touch = filt_bad_touch,
263 .f_process = filt_bad_process,
264 };
265
266 #if CONFIG_MEMORYSTATUS
267 extern const struct filterops memorystatus_filtops;
268 #endif /* CONFIG_MEMORYSTATUS */
269 extern const struct filterops fs_filtops;
270 extern const struct filterops sig_filtops;
271 extern const struct filterops machport_filtops;
272 extern const struct filterops pipe_nfiltops;
273 extern const struct filterops pipe_rfiltops;
274 extern const struct filterops pipe_wfiltops;
275 extern const struct filterops ptsd_kqops;
276 extern const struct filterops ptmx_kqops;
277 extern const struct filterops soread_filtops;
278 extern const struct filterops sowrite_filtops;
279 extern const struct filterops sock_filtops;
280 extern const struct filterops soexcept_filtops;
281 extern const struct filterops spec_filtops;
282 extern const struct filterops bpfread_filtops;
283 extern const struct filterops necp_fd_rfiltops;
284 extern const struct filterops fsevent_filtops;
285 extern const struct filterops vnode_filtops;
286 extern const struct filterops tty_filtops;
287
288 const static struct filterops file_filtops;
289 const static struct filterops kqread_filtops;
290 const static struct filterops proc_filtops;
291 const static struct filterops timer_filtops;
292 const static struct filterops user_filtops;
293 const static struct filterops workloop_filtops;
294
295 /*
296 *
297 * Rules for adding new filters to the system:
298 * Public filters:
299 * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
300 * in the exported section of the header
301 * - Update the EVFILT_SYSCOUNT value to reflect the new addition
302 * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
303 * of the Public Filters section in the array.
304 * Private filters:
305 * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
306 * in the XNU_KERNEL_PRIVATE section of the header
307 * - Update the EVFILTID_MAX value to reflect the new addition
308 * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
309 * the Private filters section of the array.
310 */
311 static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true");
312 static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = {
313 /* Public Filters */
314 [~EVFILT_READ] = &file_filtops,
315 [~EVFILT_WRITE] = &file_filtops,
316 [~EVFILT_AIO] = &bad_filtops,
317 [~EVFILT_VNODE] = &file_filtops,
318 [~EVFILT_PROC] = &proc_filtops,
319 [~EVFILT_SIGNAL] = &sig_filtops,
320 [~EVFILT_TIMER] = &timer_filtops,
321 [~EVFILT_MACHPORT] = &machport_filtops,
322 [~EVFILT_FS] = &fs_filtops,
323 [~EVFILT_USER] = &user_filtops,
324 [~EVFILT_UNUSED_11] = &bad_filtops,
325 [~EVFILT_VM] = &bad_filtops,
326 [~EVFILT_SOCK] = &file_filtops,
327 #if CONFIG_MEMORYSTATUS
328 [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
329 #else
330 [~EVFILT_MEMORYSTATUS] = &bad_filtops,
331 #endif
332 [~EVFILT_EXCEPT] = &file_filtops,
333 [~EVFILT_WORKLOOP] = &workloop_filtops,
334
335 /* Private filters */
336 [EVFILTID_KQREAD] = &kqread_filtops,
337 [EVFILTID_PIPE_N] = &pipe_nfiltops,
338 [EVFILTID_PIPE_R] = &pipe_rfiltops,
339 [EVFILTID_PIPE_W] = &pipe_wfiltops,
340 [EVFILTID_PTSD] = &ptsd_kqops,
341 [EVFILTID_SOREAD] = &soread_filtops,
342 [EVFILTID_SOWRITE] = &sowrite_filtops,
343 [EVFILTID_SCK] = &sock_filtops,
344 [EVFILTID_SOEXCEPT] = &soexcept_filtops,
345 [EVFILTID_SPEC] = &spec_filtops,
346 [EVFILTID_BPFREAD] = &bpfread_filtops,
347 [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
348 [EVFILTID_FSEVENT] = &fsevent_filtops,
349 [EVFILTID_VN] = &vnode_filtops,
350 [EVFILTID_TTY] = &tty_filtops,
351 [EVFILTID_PTMX] = &ptmx_kqops,
352
353 /* fake filter for detached knotes, keep last */
354 [EVFILTID_DETACHED] = &bad_filtops,
355 };
356
357 /* waitq prepost callback */
358 void waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *kq_hook);
359
360 static inline bool
361 kqr_thread_bound(workq_threadreq_t kqr)
362 {
363 return kqr->tr_state == WORKQ_TR_STATE_BOUND;
364 }
365
366 static inline bool
367 kqr_thread_requested_pending(workq_threadreq_t kqr)
368 {
369 workq_tr_state_t tr_state = kqr->tr_state;
370 return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND;
371 }
372
373 static inline bool
374 kqr_thread_requested(workq_threadreq_t kqr)
375 {
376 return kqr->tr_state != WORKQ_TR_STATE_IDLE;
377 }
378
379 static inline thread_t
380 kqr_thread_fast(workq_threadreq_t kqr)
381 {
382 assert(kqr_thread_bound(kqr));
383 return kqr->tr_thread;
384 }
385
386 static inline thread_t
387 kqr_thread(workq_threadreq_t kqr)
388 {
389 return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL;
390 }
391
392 static inline struct kqworkloop *
393 kqr_kqworkloop(workq_threadreq_t kqr)
394 {
395 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
396 return __container_of(kqr, struct kqworkloop, kqwl_request);
397 }
398 return NULL;
399 }
400
401 static inline kqueue_t
402 kqr_kqueue(proc_t p, workq_threadreq_t kqr)
403 {
404 kqueue_t kqu;
405 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
406 kqu.kqwl = kqr_kqworkloop(kqr);
407 } else {
408 kqu.kqwq = p->p_fd->fd_wqkqueue;
409 assert(kqr >= kqu.kqwq->kqwq_request &&
410 kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
411 }
412 return kqu;
413 }
414
415 /*
416 * kqueue/note lock implementations
417 *
418 * The kqueue lock guards the kq state, the state of its queues,
419 * and the kqueue-aware status and locks of individual knotes.
420 *
421 * The kqueue workq lock is used to protect state guarding the
422 * interaction of the kqueue with the workq. This state cannot
423 * be guarded by the kq lock - as it needs to be taken when we
424 * already have the waitq set lock held (during the waitq hook
425 * callback). It might be better to use the waitq lock itself
426 * for this, but the IRQ requirements make that difficult).
427 *
428 * Knote flags, filter flags, and associated data are protected
429 * by the underlying object lock - and are only ever looked at
430 * by calling the filter to get a [consistent] snapshot of that
431 * data.
432 */
433
434 static inline void
435 kqlock(kqueue_t kqu)
436 {
437 lck_spin_lock(&kqu.kq->kq_lock);
438 }
439
440 static inline void
441 kqlock_held(__assert_only kqueue_t kqu)
442 {
443 LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
444 }
445
446 static inline void
447 kqunlock(kqueue_t kqu)
448 {
449 lck_spin_unlock(&kqu.kq->kq_lock);
450 }
451
452 static inline void
453 knhash_lock(struct filedesc *fdp)
454 {
455 lck_mtx_lock(&fdp->fd_knhashlock);
456 }
457
458 static inline void
459 knhash_unlock(struct filedesc *fdp)
460 {
461 lck_mtx_unlock(&fdp->fd_knhashlock);
462 }
463
464 /* wait event for knote locks */
465 static inline event_t
466 knote_lock_wev(struct knote *kn)
467 {
468 return (event_t)(&kn->kn_hook);
469 }
470
471 /* wait event for kevent_register_wait_* */
472 static inline event64_t
473 knote_filt_wev64(struct knote *kn)
474 {
475 /* kdp_workloop_sync_wait_find_owner knows about this */
476 return CAST_EVENT64_T(kn);
477 }
478
479 /* wait event for knote_post/knote_drop */
480 static inline event64_t
481 knote_post_wev64(struct knote *kn)
482 {
483 return CAST_EVENT64_T(&kn->kn_kevent);
484 }
485
486 /*!
487 * @function knote_has_qos
488 *
489 * @brief
490 * Whether the knote has a regular QoS.
491 *
492 * @discussion
493 * kn_qos_override is:
494 * - 0 on kqfiles
495 * - THREAD_QOS_LAST for special buckets (stayactive, manager)
496 *
497 * Other values mean the knote participates to QoS propagation.
498 */
499 static inline bool
500 knote_has_qos(struct knote *kn)
501 {
502 return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST;
503 }
504
505 #pragma mark knote locks
506
507 /*
508 * Enum used by the knote_lock_* functions.
509 *
510 * KNOTE_KQ_LOCK_ALWAYS
511 * The function will always return with the kq lock held.
512 *
513 * KNOTE_KQ_LOCK_ON_SUCCESS
514 * The function will return with the kq lock held if it was successful
515 * (knote_lock() is the only function that can fail).
516 *
517 * KNOTE_KQ_LOCK_ON_FAILURE
518 * The function will return with the kq lock held if it was unsuccessful
519 * (knote_lock() is the only function that can fail).
520 *
521 * KNOTE_KQ_UNLOCK:
522 * The function returns with the kq unlocked.
523 */
524 enum kqlocking {
525 KNOTE_KQ_LOCK_ALWAYS,
526 KNOTE_KQ_LOCK_ON_SUCCESS,
527 KNOTE_KQ_LOCK_ON_FAILURE,
528 KNOTE_KQ_UNLOCK,
529 };
530
531 static struct knote_lock_ctx *
532 knote_lock_ctx_find(kqueue_t kqu, struct knote *kn)
533 {
534 struct knote_lock_ctx *ctx;
535 LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) {
536 if (ctx->knlc_knote == kn) {
537 return ctx;
538 }
539 }
540 panic("knote lock context not found: %p", kn);
541 __builtin_trap();
542 }
543
544 /* slowpath of knote_lock() */
545 __attribute__((noinline))
546 static bool __result_use_check
547 knote_lock_slow(kqueue_t kqu, struct knote *kn,
548 struct knote_lock_ctx *knlc, int kqlocking)
549 {
550 struct knote_lock_ctx *owner_lc;
551 struct uthread *uth = current_uthread();
552 wait_result_t wr;
553
554 kqlock_held(kqu);
555
556 owner_lc = knote_lock_ctx_find(kqu, kn);
557 #if DEBUG || DEVELOPMENT
558 knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
559 #endif
560 owner_lc->knlc_waiters++;
561
562 /*
563 * Make our lock context visible to knote_unlock()
564 */
565 uth->uu_knlock = knlc;
566
567 wr = lck_spin_sleep_with_inheritor(&kqu.kq->kq_lock, LCK_SLEEP_UNLOCK,
568 knote_lock_wev(kn), owner_lc->knlc_thread,
569 THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
570
571 if (wr == THREAD_RESTART) {
572 /*
573 * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
574 * We need to cleanup the state since no one did.
575 */
576 uth->uu_knlock = NULL;
577 #if DEBUG || DEVELOPMENT
578 assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
579 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
580 #endif
581
582 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
583 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
584 kqlock(kqu);
585 }
586 return false;
587 } else {
588 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
589 kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
590 kqlock(kqu);
591 #if DEBUG || DEVELOPMENT
592 /*
593 * This state is set under the lock so we can't
594 * really assert this unless we hold the lock.
595 */
596 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
597 #endif
598 }
599 return true;
600 }
601 }
602
603 /*
604 * Attempts to take the "knote" lock.
605 *
606 * Called with the kqueue lock held.
607 *
608 * Returns true if the knote lock is acquired, false if it has been dropped
609 */
610 static bool __result_use_check
611 knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc,
612 enum kqlocking kqlocking)
613 {
614 kqlock_held(kqu);
615
616 #if DEBUG || DEVELOPMENT
617 assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
618 #endif
619 knlc->knlc_knote = kn;
620 knlc->knlc_thread = current_thread();
621 knlc->knlc_waiters = 0;
622
623 if (__improbable(kn->kn_status & KN_LOCKED)) {
624 return knote_lock_slow(kqu, kn, knlc, kqlocking);
625 }
626
627 /*
628 * When the knote will be dropped, the knote lock is taken before
629 * KN_DROPPING is set, and then the knote will be removed from any
630 * hash table that references it before the lock is canceled.
631 */
632 assert((kn->kn_status & KN_DROPPING) == 0);
633 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link);
634 kn->kn_status |= KN_LOCKED;
635 #if DEBUG || DEVELOPMENT
636 knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
637 #endif
638
639 if (kqlocking == KNOTE_KQ_UNLOCK ||
640 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
641 kqunlock(kqu);
642 }
643 return true;
644 }
645
646 /*
647 * Unlocks a knote successfully locked with knote_lock().
648 *
649 * Called with the kqueue lock held.
650 *
651 * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
652 */
653 static void
654 knote_unlock(kqueue_t kqu, struct knote *kn,
655 struct knote_lock_ctx *knlc, enum kqlocking kqlocking)
656 {
657 kqlock_held(kqu);
658
659 assert(knlc->knlc_knote == kn);
660 assert(kn->kn_status & KN_LOCKED);
661 #if DEBUG || DEVELOPMENT
662 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
663 #endif
664
665 LIST_REMOVE(knlc, knlc_link);
666
667 if (knlc->knlc_waiters) {
668 thread_t thread = THREAD_NULL;
669
670 wakeup_one_with_inheritor(knote_lock_wev(kn), THREAD_AWAKENED,
671 LCK_WAKE_DEFAULT, &thread);
672
673 /*
674 * knote_lock_slow() publishes the lock context of waiters
675 * in uthread::uu_knlock.
676 *
677 * Reach out and make this context the new owner.
678 */
679 struct uthread *ut = get_bsdthread_info(thread);
680 struct knote_lock_ctx *next_owner_lc = ut->uu_knlock;
681
682 assert(next_owner_lc->knlc_knote == kn);
683 next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1;
684 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link);
685 #if DEBUG || DEVELOPMENT
686 next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
687 #endif
688 ut->uu_knlock = NULL;
689 thread_deallocate_safe(thread);
690 } else {
691 kn->kn_status &= ~KN_LOCKED;
692 }
693
694 if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) {
695 /*
696 * No f_event() in flight anymore, we can leave QoS "Merge" mode
697 *
698 * See knote_adjust_qos()
699 */
700 kn->kn_status &= ~KN_MERGE_QOS;
701 }
702 if (kqlocking == KNOTE_KQ_UNLOCK) {
703 kqunlock(kqu);
704 }
705 #if DEBUG || DEVELOPMENT
706 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
707 #endif
708 }
709
710 /*
711 * Aborts all waiters for a knote lock, and unlock the knote.
712 *
713 * Called with the kqueue lock held.
714 *
715 * Returns with the kqueue unlocked.
716 */
717 static void
718 knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
719 struct knote_lock_ctx *knlc)
720 {
721 kqlock_held(kq);
722
723 assert(knlc->knlc_knote == kn);
724 assert(kn->kn_status & KN_LOCKED);
725 assert(kn->kn_status & KN_DROPPING);
726
727 LIST_REMOVE(knlc, knlc_link);
728 kn->kn_status &= ~KN_LOCKED;
729 kqunlock(kq);
730
731 if (knlc->knlc_waiters) {
732 wakeup_all_with_inheritor(knote_lock_wev(kn), THREAD_RESTART);
733 }
734 #if DEBUG || DEVELOPMENT
735 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
736 #endif
737 }
738
739 /*
740 * Call the f_event hook of a given filter.
741 *
742 * Takes a use count to protect against concurrent drops.
743 */
744 static void
745 knote_post(struct knote *kn, long hint)
746 {
747 struct kqueue *kq = knote_get_kq(kn);
748 int dropping, result;
749
750 kqlock(kq);
751
752 if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) {
753 return kqunlock(kq);
754 }
755
756 if (__improbable(kn->kn_status & KN_POSTING)) {
757 panic("KNOTE() called concurrently on knote %p", kn);
758 }
759
760 kn->kn_status |= KN_POSTING;
761
762 kqunlock(kq);
763 result = filter_call(knote_fops(kn), f_event(kn, hint));
764 kqlock(kq);
765
766 dropping = (kn->kn_status & KN_DROPPING);
767
768 if (!dropping && (result & FILTER_ACTIVE)) {
769 knote_activate(kq, kn, result);
770 }
771
772 if ((kn->kn_status & KN_LOCKED) == 0) {
773 /*
774 * There's no other f_* call in flight, we can leave QoS "Merge" mode.
775 *
776 * See knote_adjust_qos()
777 */
778 kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS);
779 } else {
780 kn->kn_status &= ~KN_POSTING;
781 }
782
783 if (__improbable(dropping)) {
784 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, knote_post_wev64(kn),
785 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
786 }
787
788 kqunlock(kq);
789 }
790
791 /*
792 * Called by knote_drop() to wait for the last f_event() caller to be done.
793 *
794 * - kq locked at entry
795 * - kq unlocked at exit
796 */
797 static void
798 knote_wait_for_post(struct kqueue *kq, struct knote *kn)
799 {
800 wait_result_t wr = THREAD_NOT_WAITING;
801
802 kqlock_held(kq);
803
804 assert(kn->kn_status & KN_DROPPING);
805
806 if (kn->kn_status & KN_POSTING) {
807 wr = waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
808 knote_post_wev64(kn), THREAD_UNINT | THREAD_WAIT_NOREPORT,
809 TIMEOUT_WAIT_FOREVER);
810 }
811 kqunlock(kq);
812 if (wr == THREAD_WAITING) {
813 thread_block(THREAD_CONTINUE_NULL);
814 }
815 }
816
817 #pragma mark knote helpers for filters
818
819 OS_ALWAYS_INLINE
820 void
821 knote_set_error(struct knote *kn, int error)
822 {
823 kn->kn_flags |= EV_ERROR;
824 kn->kn_sdata = error;
825 }
826
827 OS_ALWAYS_INLINE
828 int64_t
829 knote_low_watermark(const struct knote *kn)
830 {
831 return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1;
832 }
833
834 /*!
835 * @function knote_fill_kevent_with_sdata
836 *
837 * @brief
838 * Fills in a kevent from the current content of a knote.
839 *
840 * @discussion
841 * This is meant to be called from filter's f_event hooks.
842 * The kevent data is filled with kn->kn_sdata.
843 *
844 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
845 *
846 * Using knote_fill_kevent is typically preferred.
847 */
848 OS_ALWAYS_INLINE
849 void
850 knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
851 {
852 #define knote_assert_aliases(name1, offs1, name2) \
853 static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
854 offsetof(struct kevent_internal_s, name2), \
855 "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
856 /*
857 * All the code makes assumptions on these aliasing,
858 * so make sure we fail the build if we ever ever ever break them.
859 */
860 knote_assert_aliases(ident, 0, kei_ident);
861 #ifdef __LITTLE_ENDIAN__
862 knote_assert_aliases(filter, 0, kei_filter); // non trivial overlap
863 knote_assert_aliases(filter, 1, kei_filtid); // non trivial overlap
864 #else
865 knote_assert_aliases(filter, 0, kei_filtid); // non trivial overlap
866 knote_assert_aliases(filter, 1, kei_filter); // non trivial overlap
867 #endif
868 knote_assert_aliases(flags, 0, kei_flags);
869 knote_assert_aliases(qos, 0, kei_qos);
870 knote_assert_aliases(udata, 0, kei_udata);
871 knote_assert_aliases(fflags, 0, kei_fflags);
872 knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
873 knote_assert_aliases(data, 0, kei_sdata); // non trivial overlap
874 knote_assert_aliases(ext, 0, kei_ext);
875 #undef knote_assert_aliases
876
877 /*
878 * Fix the differences between kevent_qos_s and kevent_internal_s:
879 * - xflags is where kn_sfflags lives, we need to zero it
880 * - fixup the high bits of `filter` where kn_filtid lives
881 */
882 *kev = *(struct kevent_qos_s *)&kn->kn_kevent;
883 kev->xflags = 0;
884 kev->filter |= 0xff00;
885 if (kn->kn_flags & EV_CLEAR) {
886 kn->kn_fflags = 0;
887 }
888 }
889
890 /*!
891 * @function knote_fill_kevent
892 *
893 * @brief
894 * Fills in a kevent from the current content of a knote.
895 *
896 * @discussion
897 * This is meant to be called from filter's f_event hooks.
898 * The kevent data is filled with the passed in data.
899 *
900 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
901 */
902 OS_ALWAYS_INLINE
903 void
904 knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
905 {
906 knote_fill_kevent_with_sdata(kn, kev);
907 kev->filter = kn->kn_filter;
908 kev->data = data;
909 }
910
911
912 #pragma mark file_filtops
913
914 static int
915 filt_fileattach(struct knote *kn, struct kevent_qos_s *kev)
916 {
917 return fo_kqfilter(kn->kn_fp, kn, kev);
918 }
919
920 SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
921 .f_isfd = 1,
922 .f_attach = filt_fileattach,
923 };
924
925 #pragma mark kqread_filtops
926
927 #define f_flag fp_glob->fg_flag
928 #define f_ops fp_glob->fg_ops
929 #define f_data fp_glob->fg_data
930 #define f_lflags fp_glob->fg_lflags
931
932 static void
933 filt_kqdetach(struct knote *kn)
934 {
935 struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
936 struct kqueue *kq = &kqf->kqf_kqueue;
937
938 kqlock(kq);
939 KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
940 kqunlock(kq);
941 }
942
943 static int
944 filt_kqueue(struct knote *kn, __unused long hint)
945 {
946 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
947
948 return kq->kq_count > 0;
949 }
950
951 static int
952 filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev)
953 {
954 #pragma unused(kev)
955 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
956 int res;
957
958 kqlock(kq);
959 res = (kq->kq_count > 0);
960 kqunlock(kq);
961
962 return res;
963 }
964
965 static int
966 filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev)
967 {
968 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
969 int res = 0;
970
971 kqlock(kq);
972 if (kq->kq_count) {
973 knote_fill_kevent(kn, kev, kq->kq_count);
974 res = 1;
975 }
976 kqunlock(kq);
977
978 return res;
979 }
980
981 SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
982 .f_isfd = 1,
983 .f_detach = filt_kqdetach,
984 .f_event = filt_kqueue,
985 .f_touch = filt_kqtouch,
986 .f_process = filt_kqprocess,
987 };
988
989 #pragma mark proc_filtops
990
991 static int
992 filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev)
993 {
994 struct proc *p;
995
996 assert(PID_MAX < NOTE_PDATAMASK);
997
998 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
999 knote_set_error(kn, ENOTSUP);
1000 return 0;
1001 }
1002
1003 p = proc_find((int)kn->kn_id);
1004 if (p == NULL) {
1005 knote_set_error(kn, ESRCH);
1006 return 0;
1007 }
1008
1009 const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
1010
1011 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) {
1012 do {
1013 pid_t selfpid = proc_selfpid();
1014
1015 if (p->p_ppid == selfpid) {
1016 break; /* parent => ok */
1017 }
1018 if ((p->p_lflag & P_LTRACED) != 0 &&
1019 (p->p_oppid == selfpid)) {
1020 break; /* parent-in-waiting => ok */
1021 }
1022 if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) {
1023 break; /* allowed to signal => ok */
1024 }
1025 proc_rele(p);
1026 knote_set_error(kn, EACCES);
1027 return 0;
1028 } while (0);
1029 }
1030
1031 kn->kn_proc = p;
1032 kn->kn_flags |= EV_CLEAR; /* automatically set */
1033 kn->kn_sdata = 0; /* incoming data is ignored */
1034
1035 proc_klist_lock();
1036
1037 KNOTE_ATTACH(&p->p_klist, kn);
1038
1039 proc_klist_unlock();
1040
1041 proc_rele(p);
1042
1043 /*
1044 * only captures edge-triggered events after this point
1045 * so it can't already be fired.
1046 */
1047 return 0;
1048 }
1049
1050
1051 /*
1052 * The knote may be attached to a different process, which may exit,
1053 * leaving nothing for the knote to be attached to. In that case,
1054 * the pointer to the process will have already been nulled out.
1055 */
1056 static void
1057 filt_procdetach(struct knote *kn)
1058 {
1059 struct proc *p;
1060
1061 proc_klist_lock();
1062
1063 p = kn->kn_proc;
1064 if (p != PROC_NULL) {
1065 kn->kn_proc = PROC_NULL;
1066 KNOTE_DETACH(&p->p_klist, kn);
1067 }
1068
1069 proc_klist_unlock();
1070 }
1071
1072 static int
1073 filt_procevent(struct knote *kn, long hint)
1074 {
1075 u_int event;
1076
1077 /* ALWAYS CALLED WITH proc_klist_lock */
1078
1079 /*
1080 * Note: a lot of bits in hint may be obtained from the knote
1081 * To free some of those bits, see <rdar://problem/12592988> Freeing up
1082 * bits in hint for filt_procevent
1083 *
1084 * mask off extra data
1085 */
1086 event = (u_int)hint & NOTE_PCTRLMASK;
1087
1088 /*
1089 * termination lifecycle events can happen while a debugger
1090 * has reparented a process, in which case notifications
1091 * should be quashed except to the tracing parent. When
1092 * the debugger reaps the child (either via wait4(2) or
1093 * process exit), the child will be reparented to the original
1094 * parent and these knotes re-fired.
1095 */
1096 if (event & NOTE_EXIT) {
1097 if ((kn->kn_proc->p_oppid != 0)
1098 && (knote_get_kq(kn)->kq_p->p_pid != kn->kn_proc->p_ppid)) {
1099 /*
1100 * This knote is not for the current ptrace(2) parent, ignore.
1101 */
1102 return 0;
1103 }
1104 }
1105
1106 /*
1107 * if the user is interested in this event, record it.
1108 */
1109 if (kn->kn_sfflags & event) {
1110 kn->kn_fflags |= event;
1111 }
1112
1113 #pragma clang diagnostic push
1114 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1115 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
1116 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1117 }
1118 #pragma clang diagnostic pop
1119
1120
1121 /*
1122 * The kernel has a wrapper in place that returns the same data
1123 * as is collected here, in kn_hook32. Any changes to how
1124 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
1125 * should also be reflected in the proc_pidnoteexit() wrapper.
1126 */
1127 if (event == NOTE_EXIT) {
1128 kn->kn_hook32 = 0;
1129 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
1130 kn->kn_fflags |= NOTE_EXITSTATUS;
1131 kn->kn_hook32 |= (hint & NOTE_PDATAMASK);
1132 }
1133 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
1134 kn->kn_fflags |= NOTE_EXIT_DETAIL;
1135 if ((kn->kn_proc->p_lflag &
1136 P_LTERM_DECRYPTFAIL) != 0) {
1137 kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL;
1138 }
1139 if ((kn->kn_proc->p_lflag &
1140 P_LTERM_JETSAM) != 0) {
1141 kn->kn_hook32 |= NOTE_EXIT_MEMORY;
1142 switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) {
1143 case P_JETSAM_VMPAGESHORTAGE:
1144 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
1145 break;
1146 case P_JETSAM_VMTHRASHING:
1147 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING;
1148 break;
1149 case P_JETSAM_FCTHRASHING:
1150 kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING;
1151 break;
1152 case P_JETSAM_VNODE:
1153 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE;
1154 break;
1155 case P_JETSAM_HIWAT:
1156 kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT;
1157 break;
1158 case P_JETSAM_PID:
1159 kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID;
1160 break;
1161 case P_JETSAM_IDLEEXIT:
1162 kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE;
1163 break;
1164 }
1165 }
1166 if ((kn->kn_proc->p_csflags &
1167 CS_KILLED) != 0) {
1168 kn->kn_hook32 |= NOTE_EXIT_CSERROR;
1169 }
1170 }
1171 }
1172
1173 /* if we have any matching state, activate the knote */
1174 return kn->kn_fflags != 0;
1175 }
1176
1177 static int
1178 filt_proctouch(struct knote *kn, struct kevent_qos_s *kev)
1179 {
1180 int res;
1181
1182 proc_klist_lock();
1183
1184 /* accept new filter flags and mask off output events no long interesting */
1185 kn->kn_sfflags = kev->fflags;
1186
1187 /* restrict the current results to the (smaller?) set of new interest */
1188 /*
1189 * For compatibility with previous implementations, we leave kn_fflags
1190 * as they were before.
1191 */
1192 //kn->kn_fflags &= kn->kn_sfflags;
1193
1194 res = (kn->kn_fflags != 0);
1195
1196 proc_klist_unlock();
1197
1198 return res;
1199 }
1200
1201 static int
1202 filt_procprocess(struct knote *kn, struct kevent_qos_s *kev)
1203 {
1204 int res = 0;
1205
1206 proc_klist_lock();
1207 if (kn->kn_fflags) {
1208 knote_fill_kevent(kn, kev, kn->kn_hook32);
1209 kn->kn_hook32 = 0;
1210 res = 1;
1211 }
1212 proc_klist_unlock();
1213 return res;
1214 }
1215
1216 SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
1217 .f_attach = filt_procattach,
1218 .f_detach = filt_procdetach,
1219 .f_event = filt_procevent,
1220 .f_touch = filt_proctouch,
1221 .f_process = filt_procprocess,
1222 };
1223
1224 #pragma mark timer_filtops
1225
1226 struct filt_timer_params {
1227 uint64_t deadline; /* deadline in abs/cont time
1228 * (or 0 if NOTE_ABSOLUTE and deadline is in past) */
1229 uint64_t leeway; /* leeway in abstime, or 0 if none */
1230 uint64_t interval; /* interval in abstime or 0 if non-repeating timer */
1231 };
1232
1233 /*
1234 * Values stored in the knote at rest (using Mach absolute time units)
1235 *
1236 * kn->kn_thcall where the thread_call object is stored
1237 * kn->kn_ext[0] next deadline or 0 if immediate expiration
1238 * kn->kn_ext[1] leeway value
1239 * kn->kn_sdata interval timer: the interval
1240 * absolute/deadline timer: 0
1241 * kn->kn_hook32 timer state (with gencount)
1242 *
1243 * TIMER_IDLE:
1244 * The timer has either never been scheduled or been cancelled.
1245 * It is safe to schedule a new one in this state.
1246 *
1247 * TIMER_ARMED:
1248 * The timer has been scheduled
1249 *
1250 * TIMER_FIRED
1251 * The timer has fired and an event needs to be delivered.
1252 * When in this state, the callout may still be running.
1253 *
1254 * TIMER_IMMEDIATE
1255 * The timer has fired at registration time, and the callout was never
1256 * dispatched.
1257 */
1258 #define TIMER_IDLE 0x0
1259 #define TIMER_ARMED 0x1
1260 #define TIMER_FIRED 0x2
1261 #define TIMER_IMMEDIATE 0x3
1262 #define TIMER_STATE_MASK 0x3
1263 #define TIMER_GEN_INC 0x4
1264
1265 static void
1266 filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
1267 {
1268 kn->kn_ext[0] = params->deadline;
1269 kn->kn_ext[1] = params->leeway;
1270 kn->kn_sdata = params->interval;
1271 }
1272
1273 /*
1274 * filt_timervalidate - process data from user
1275 *
1276 * Sets up the deadline, interval, and leeway from the provided user data
1277 *
1278 * Input:
1279 * kn_sdata timer deadline or interval time
1280 * kn_sfflags style of timer, unit of measurement
1281 *
1282 * Output:
1283 * struct filter_timer_params to apply to the filter with
1284 * filt_timer_set_params when changes are ready to be commited.
1285 *
1286 * Returns:
1287 * EINVAL Invalid user data parameters
1288 * ERANGE Various overflows with the parameters
1289 *
1290 * Called with timer filter lock held.
1291 */
1292 static int
1293 filt_timervalidate(const struct kevent_qos_s *kev,
1294 struct filt_timer_params *params)
1295 {
1296 /*
1297 * There are 5 knobs that need to be chosen for a timer registration:
1298 *
1299 * A) Units of time (what is the time duration of the specified number)
1300 * Absolute and interval take:
1301 * NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME
1302 * Defaults to milliseconds if not specified
1303 *
1304 * B) Clock epoch (what is the zero point of the specified number)
1305 * For interval, there is none
1306 * For absolute, defaults to the gettimeofday/calendar epoch
1307 * With NOTE_MACHTIME, uses mach_absolute_time()
1308 * With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time()
1309 *
1310 * C) The knote's behavior on delivery
1311 * Interval timer causes the knote to arm for the next interval unless one-shot is set
1312 * Absolute is a forced one-shot timer which deletes on delivery
1313 * TODO: Add a way for absolute to be not forced one-shot
1314 *
1315 * D) Whether the time duration is relative to now or absolute
1316 * Interval fires at now + duration when it is set up
1317 * Absolute fires at now + difference between now walltime and passed in walltime
1318 * With NOTE_MACHTIME it fires at an absolute MAT or MCT.
1319 *
1320 * E) Whether the timer continues to tick across sleep
1321 * By default all three do not.
1322 * For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep
1323 * With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME:
1324 * expires when mach_continuous_time() is > the passed in value.
1325 */
1326
1327 uint64_t multiplier;
1328
1329 boolean_t use_abstime = FALSE;
1330
1331 switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) {
1332 case NOTE_SECONDS:
1333 multiplier = NSEC_PER_SEC;
1334 break;
1335 case NOTE_USECONDS:
1336 multiplier = NSEC_PER_USEC;
1337 break;
1338 case NOTE_NSECONDS:
1339 multiplier = 1;
1340 break;
1341 case NOTE_MACHTIME:
1342 multiplier = 0;
1343 use_abstime = TRUE;
1344 break;
1345 case 0: /* milliseconds (default) */
1346 multiplier = NSEC_PER_SEC / 1000;
1347 break;
1348 default:
1349 return EINVAL;
1350 }
1351
1352 /* transform the leeway in kn_ext[1] to same time scale */
1353 if (kev->fflags & NOTE_LEEWAY) {
1354 uint64_t leeway_abs;
1355
1356 if (use_abstime) {
1357 leeway_abs = (uint64_t)kev->ext[1];
1358 } else {
1359 uint64_t leeway_ns;
1360 if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) {
1361 return ERANGE;
1362 }
1363
1364 nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
1365 }
1366
1367 params->leeway = leeway_abs;
1368 } else {
1369 params->leeway = 0;
1370 }
1371
1372 if (kev->fflags & NOTE_ABSOLUTE) {
1373 uint64_t deadline_abs;
1374
1375 if (use_abstime) {
1376 deadline_abs = (uint64_t)kev->data;
1377 } else {
1378 uint64_t calendar_deadline_ns;
1379
1380 if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) {
1381 return ERANGE;
1382 }
1383
1384 /* calendar_deadline_ns is in nanoseconds since the epoch */
1385
1386 clock_sec_t seconds;
1387 clock_nsec_t nanoseconds;
1388
1389 /*
1390 * Note that the conversion through wall-time is only done once.
1391 *
1392 * If the relationship between MAT and gettimeofday changes,
1393 * the underlying timer does not update.
1394 *
1395 * TODO: build a wall-time denominated timer_call queue
1396 * and a flag to request DTRTing with wall-time timers
1397 */
1398 clock_get_calendar_nanotime(&seconds, &nanoseconds);
1399
1400 uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds;
1401
1402 /* if deadline is in the future */
1403 if (calendar_now_ns < calendar_deadline_ns) {
1404 uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns;
1405 uint64_t interval_abs;
1406
1407 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1408
1409 /*
1410 * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only
1411 * causes the timer to keep ticking across sleep, but
1412 * it does not change the calendar timebase.
1413 */
1414
1415 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1416 clock_continuoustime_interval_to_deadline(interval_abs,
1417 &deadline_abs);
1418 } else {
1419 clock_absolutetime_interval_to_deadline(interval_abs,
1420 &deadline_abs);
1421 }
1422 } else {
1423 deadline_abs = 0; /* cause immediate expiration */
1424 }
1425 }
1426
1427 params->deadline = deadline_abs;
1428 params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */
1429 } else if (kev->data < 0) {
1430 /*
1431 * Negative interval timers fire immediately, once.
1432 *
1433 * Ideally a negative interval would be an error, but certain clients
1434 * pass negative values on accident, and expect an event back.
1435 *
1436 * In the old implementation the timer would repeat with no delay
1437 * N times until mach_absolute_time() + (N * interval) underflowed,
1438 * then it would wait ~forever by accidentally arming a timer for the far future.
1439 *
1440 * We now skip the power-wasting hot spin phase and go straight to the idle phase.
1441 */
1442
1443 params->deadline = 0; /* expire immediately */
1444 params->interval = 0; /* non-repeating */
1445 } else {
1446 uint64_t interval_abs = 0;
1447
1448 if (use_abstime) {
1449 interval_abs = (uint64_t)kev->data;
1450 } else {
1451 uint64_t interval_ns;
1452 if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) {
1453 return ERANGE;
1454 }
1455
1456 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1457 }
1458
1459 uint64_t deadline = 0;
1460
1461 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1462 clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
1463 } else {
1464 clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
1465 }
1466
1467 params->deadline = deadline;
1468 params->interval = interval_abs;
1469 }
1470
1471 return 0;
1472 }
1473
1474 /*
1475 * filt_timerexpire - the timer callout routine
1476 */
1477 static void
1478 filt_timerexpire(void *knx, void *state_on_arm)
1479 {
1480 struct knote *kn = knx;
1481
1482 uint32_t state = (uint32_t)(uintptr_t)state_on_arm;
1483 uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED;
1484
1485 if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) {
1486 // our f_event always would say FILTER_ACTIVE,
1487 // so be leaner and just do it.
1488 struct kqueue *kq = knote_get_kq(kn);
1489 kqlock(kq);
1490 knote_activate(kq, kn, FILTER_ACTIVE);
1491 kqunlock(kq);
1492 } else {
1493 /*
1494 * The timer has been reprogrammed or canceled since it was armed,
1495 * and this is a late firing for the timer, just ignore it.
1496 */
1497 }
1498 }
1499
1500 /*
1501 * Does this deadline needs a timer armed for it, or has it expired?
1502 */
1503 static bool
1504 filt_timer_is_ready(struct knote *kn)
1505 {
1506 uint64_t now, deadline = kn->kn_ext[0];
1507
1508 if (deadline == 0) {
1509 return true;
1510 }
1511
1512 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1513 now = mach_continuous_time();
1514 } else {
1515 now = mach_absolute_time();
1516 }
1517 return deadline <= now;
1518 }
1519
1520 /*
1521 * Arm a timer
1522 *
1523 * It is the responsibility of the caller to make sure the timer call
1524 * has completed or been cancelled properly prior to arming it.
1525 */
1526 static void
1527 filt_timerarm(struct knote *kn)
1528 {
1529 uint64_t deadline = kn->kn_ext[0];
1530 uint64_t leeway = kn->kn_ext[1];
1531 uint32_t state;
1532
1533 int filter_flags = kn->kn_sfflags;
1534 unsigned int timer_flags = 0;
1535
1536 if (filter_flags & NOTE_CRITICAL) {
1537 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1538 } else if (filter_flags & NOTE_BACKGROUND) {
1539 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1540 } else {
1541 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1542 }
1543
1544 if (filter_flags & NOTE_LEEWAY) {
1545 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1546 }
1547
1548 if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) {
1549 timer_flags |= THREAD_CALL_CONTINUOUS;
1550 }
1551
1552 /*
1553 * Move to ARMED.
1554 *
1555 * We increase the gencount, and setup the thread call with this expected
1556 * state. It means that if there was a previous generation of the timer in
1557 * flight that needs to be ignored, then 3 things are possible:
1558 *
1559 * - the timer fires first, filt_timerexpire() and sets the state to FIRED
1560 * but we clobber it with ARMED and a new gencount. The knote will still
1561 * be activated, but filt_timerprocess() which is serialized with this
1562 * call will not see the FIRED bit set and will not deliver an event.
1563 *
1564 * - this code runs first, but filt_timerexpire() comes second. Because it
1565 * knows an old gencount, it will debounce and not activate the knote.
1566 *
1567 * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below
1568 * will just cancel it properly.
1569 *
1570 * This is important as userspace expects to never be woken up for past
1571 * timers after filt_timertouch ran.
1572 */
1573 state = os_atomic_load(&kn->kn_hook32, relaxed);
1574 state &= ~TIMER_STATE_MASK;
1575 state += TIMER_GEN_INC + TIMER_ARMED;
1576 os_atomic_store(&kn->kn_hook32, state, relaxed);
1577
1578 thread_call_enter_delayed_with_leeway(kn->kn_thcall,
1579 (void *)(uintptr_t)state, deadline, leeway, timer_flags);
1580 }
1581
1582 /*
1583 * Mark a timer as "already fired" when it is being reprogrammed
1584 *
1585 * If there is a timer in flight, this will do a best effort at canceling it,
1586 * but will not wait. If the thread call was in flight, having set the
1587 * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this
1588 * cancelation.
1589 */
1590 static void
1591 filt_timerfire_immediate(struct knote *kn)
1592 {
1593 uint32_t state;
1594
1595 static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK,
1596 "validate that this atomic or will transition to IMMEDIATE");
1597 state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1598
1599 if ((state & TIMER_STATE_MASK) == TIMER_ARMED) {
1600 thread_call_cancel(kn->kn_thcall);
1601 }
1602 }
1603
1604 /*
1605 * Allocate a thread call for the knote's lifetime, and kick off the timer.
1606 */
1607 static int
1608 filt_timerattach(struct knote *kn, struct kevent_qos_s *kev)
1609 {
1610 thread_call_t callout;
1611 struct filt_timer_params params;
1612 int error;
1613
1614 if ((error = filt_timervalidate(kev, &params)) != 0) {
1615 knote_set_error(kn, error);
1616 return 0;
1617 }
1618
1619 callout = thread_call_allocate_with_options(filt_timerexpire,
1620 (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
1621 THREAD_CALL_OPTIONS_ONCE);
1622
1623 if (NULL == callout) {
1624 knote_set_error(kn, ENOMEM);
1625 return 0;
1626 }
1627
1628 filt_timer_set_params(kn, &params);
1629 kn->kn_thcall = callout;
1630 kn->kn_flags |= EV_CLEAR;
1631 os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
1632
1633 /* NOTE_ABSOLUTE implies EV_ONESHOT */
1634 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
1635 kn->kn_flags |= EV_ONESHOT;
1636 }
1637
1638 if (filt_timer_is_ready(kn)) {
1639 os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1640 return FILTER_ACTIVE;
1641 } else {
1642 filt_timerarm(kn);
1643 return 0;
1644 }
1645 }
1646
1647 /*
1648 * Shut down the timer if it's running, and free the callout.
1649 */
1650 static void
1651 filt_timerdetach(struct knote *kn)
1652 {
1653 __assert_only boolean_t freed;
1654
1655 /*
1656 * Unconditionally cancel to make sure there can't be any filt_timerexpire()
1657 * running anymore.
1658 */
1659 thread_call_cancel_wait(kn->kn_thcall);
1660 freed = thread_call_free(kn->kn_thcall);
1661 assert(freed);
1662 }
1663
1664 /*
1665 * filt_timertouch - update timer knote with new user input
1666 *
1667 * Cancel and restart the timer based on new user data. When
1668 * the user picks up a knote, clear the count of how many timer
1669 * pops have gone off (in kn_data).
1670 */
1671 static int
1672 filt_timertouch(struct knote *kn, struct kevent_qos_s *kev)
1673 {
1674 struct filt_timer_params params;
1675 uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
1676 int error;
1677
1678 if (changed_flags & NOTE_ABSOLUTE) {
1679 kev->flags |= EV_ERROR;
1680 kev->data = EINVAL;
1681 return 0;
1682 }
1683
1684 if ((error = filt_timervalidate(kev, &params)) != 0) {
1685 kev->flags |= EV_ERROR;
1686 kev->data = error;
1687 return 0;
1688 }
1689
1690 /* capture the new values used to compute deadline */
1691 filt_timer_set_params(kn, &params);
1692 kn->kn_sfflags = kev->fflags;
1693
1694 if (filt_timer_is_ready(kn)) {
1695 filt_timerfire_immediate(kn);
1696 return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
1697 } else {
1698 filt_timerarm(kn);
1699 return FILTER_UPDATE_REQ_QOS;
1700 }
1701 }
1702
1703 /*
1704 * filt_timerprocess - query state of knote and snapshot event data
1705 *
1706 * Determine if the timer has fired in the past, snapshot the state
1707 * of the kevent for returning to user-space, and clear pending event
1708 * counters for the next time.
1709 */
1710 static int
1711 filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev)
1712 {
1713 uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed);
1714
1715 /*
1716 * filt_timerprocess is serialized with any filter routine except for
1717 * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
1718 * transition, and on success, activates the knote.
1719 *
1720 * Hence, we don't need atomic modifications of the state, only to peek at
1721 * whether we see any of the "FIRED" state, and if we do, it is safe to
1722 * do simple state machine transitions.
1723 */
1724 switch (state & TIMER_STATE_MASK) {
1725 case TIMER_IDLE:
1726 case TIMER_ARMED:
1727 /*
1728 * This can happen if a touch resets a timer that had fired
1729 * without being processed
1730 */
1731 return 0;
1732 }
1733
1734 os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed);
1735
1736 /*
1737 * Copy out the interesting kevent state,
1738 * but don't leak out the raw time calculations.
1739 *
1740 * TODO: potential enhancements - tell the user about:
1741 * - deadline to which this timer thought it was expiring
1742 * - return kn_sfflags in the fflags field so the client can know
1743 * under what flags the timer fired
1744 */
1745 knote_fill_kevent(kn, kev, 1);
1746 kev->ext[0] = 0;
1747 /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
1748
1749 if (kn->kn_sdata != 0) {
1750 /*
1751 * This is a 'repeating' timer, so we have to emit
1752 * how many intervals expired between the arm
1753 * and the process.
1754 *
1755 * A very strange style of interface, because
1756 * this could easily be done in the client...
1757 */
1758
1759 uint64_t now;
1760
1761 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1762 now = mach_continuous_time();
1763 } else {
1764 now = mach_absolute_time();
1765 }
1766
1767 uint64_t first_deadline = kn->kn_ext[0];
1768 uint64_t interval_abs = kn->kn_sdata;
1769 uint64_t orig_arm_time = first_deadline - interval_abs;
1770
1771 assert(now > orig_arm_time);
1772 assert(now > first_deadline);
1773
1774 uint64_t elapsed = now - orig_arm_time;
1775
1776 uint64_t num_fired = elapsed / interval_abs;
1777
1778 /*
1779 * To reach this code, we must have seen the timer pop
1780 * and be in repeating mode, so therefore it must have been
1781 * more than 'interval' time since the attach or last
1782 * successful touch.
1783 */
1784 assert(num_fired > 0);
1785
1786 /* report how many intervals have elapsed to the user */
1787 kev->data = (int64_t)num_fired;
1788
1789 /* We only need to re-arm the timer if it's not about to be destroyed */
1790 if ((kn->kn_flags & EV_ONESHOT) == 0) {
1791 /* fire at the end of the next interval */
1792 uint64_t new_deadline = first_deadline + num_fired * interval_abs;
1793
1794 assert(new_deadline > now);
1795
1796 kn->kn_ext[0] = new_deadline;
1797
1798 /*
1799 * This can't shortcut setting up the thread call, because
1800 * knote_process deactivates EV_CLEAR knotes unconditionnally.
1801 */
1802 filt_timerarm(kn);
1803 }
1804 }
1805
1806 return FILTER_ACTIVE;
1807 }
1808
1809 SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
1810 .f_extended_codes = true,
1811 .f_attach = filt_timerattach,
1812 .f_detach = filt_timerdetach,
1813 .f_event = filt_bad_event,
1814 .f_touch = filt_timertouch,
1815 .f_process = filt_timerprocess,
1816 };
1817
1818 #pragma mark user_filtops
1819
1820 static int
1821 filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1822 {
1823 if (kn->kn_sfflags & NOTE_TRIGGER) {
1824 kn->kn_hook32 = FILTER_ACTIVE;
1825 } else {
1826 kn->kn_hook32 = 0;
1827 }
1828 return kn->kn_hook32;
1829 }
1830
1831 static int
1832 filt_usertouch(struct knote *kn, struct kevent_qos_s *kev)
1833 {
1834 uint32_t ffctrl;
1835 int fflags;
1836
1837 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1838 fflags = kev->fflags & NOTE_FFLAGSMASK;
1839 switch (ffctrl) {
1840 case NOTE_FFNOP:
1841 break;
1842 case NOTE_FFAND:
1843 kn->kn_sfflags &= fflags;
1844 break;
1845 case NOTE_FFOR:
1846 kn->kn_sfflags |= fflags;
1847 break;
1848 case NOTE_FFCOPY:
1849 kn->kn_sfflags = fflags;
1850 break;
1851 }
1852 kn->kn_sdata = kev->data;
1853
1854 if (kev->fflags & NOTE_TRIGGER) {
1855 kn->kn_hook32 = FILTER_ACTIVE;
1856 }
1857 return (int)kn->kn_hook32;
1858 }
1859
1860 static int
1861 filt_userprocess(struct knote *kn, struct kevent_qos_s *kev)
1862 {
1863 int result = (int)kn->kn_hook32;
1864
1865 if (result) {
1866 /* EVFILT_USER returns the data that was passed in */
1867 knote_fill_kevent_with_sdata(kn, kev);
1868 kev->fflags = kn->kn_sfflags;
1869 if (kn->kn_flags & EV_CLEAR) {
1870 /* knote_fill_kevent cleared kn_fflags */
1871 kn->kn_hook32 = 0;
1872 }
1873 }
1874
1875 return result;
1876 }
1877
1878 SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
1879 .f_extended_codes = true,
1880 .f_attach = filt_userattach,
1881 .f_detach = filt_no_detach,
1882 .f_event = filt_bad_event,
1883 .f_touch = filt_usertouch,
1884 .f_process = filt_userprocess,
1885 };
1886
1887 #pragma mark workloop_filtops
1888
1889 #define EPREEMPTDISABLED (-1)
1890
1891 static inline void
1892 filt_wllock(struct kqworkloop *kqwl)
1893 {
1894 lck_spin_lock(&kqwl->kqwl_statelock);
1895 }
1896
1897 static inline void
1898 filt_wlunlock(struct kqworkloop *kqwl)
1899 {
1900 lck_spin_unlock(&kqwl->kqwl_statelock);
1901 }
1902
1903 /*
1904 * Returns true when the interlock for the turnstile is the workqueue lock
1905 *
1906 * When this is the case, all turnstiles operations are delegated
1907 * to the workqueue subsystem.
1908 *
1909 * This is required because kqueue_threadreq_bind_prepost only holds the
1910 * workqueue lock but needs to move the inheritor from the workloop turnstile
1911 * away from the creator thread, so that this now fulfilled request cannot be
1912 * picked anymore by other threads.
1913 */
1914 static inline bool
1915 filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
1916 {
1917 return kqr_thread_requested_pending(&kqwl->kqwl_request);
1918 }
1919
1920 static void
1921 filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
1922 turnstile_update_flags_t flags)
1923 {
1924 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1925 workq_threadreq_t kqr = &kqwl->kqwl_request;
1926
1927 /*
1928 * binding to the workq should always happen through
1929 * workq_kern_threadreq_update_inheritor()
1930 */
1931 assert(!filt_wlturnstile_interlock_is_workq(kqwl));
1932
1933 if ((inheritor = kqwl->kqwl_owner)) {
1934 flags |= TURNSTILE_INHERITOR_THREAD;
1935 } else if ((inheritor = kqr_thread(kqr))) {
1936 flags |= TURNSTILE_INHERITOR_THREAD;
1937 }
1938
1939 turnstile_update_inheritor(ts, inheritor, flags);
1940 }
1941
1942 #define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
1943 #define FILT_WLATTACH 0
1944 #define FILT_WLTOUCH 1
1945 #define FILT_WLDROP 2
1946
1947 __result_use_check
1948 static int
1949 filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
1950 struct kevent_qos_s *kev, kq_index_t qos_index, int op)
1951 {
1952 user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
1953 workq_threadreq_t kqr = &kqwl->kqwl_request;
1954 thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
1955 kq_index_t cur_override = THREAD_QOS_UNSPECIFIED;
1956 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
1957 int action = KQWL_UTQ_NONE, error = 0;
1958 bool wl_inheritor_updated = false, needs_wake = false;
1959 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
1960 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
1961 uint64_t udata = 0;
1962 struct turnstile *ts = TURNSTILE_NULL;
1963
1964 filt_wllock(kqwl);
1965
1966 again:
1967 new_owner = cur_owner = kqwl->kqwl_owner;
1968
1969 /*
1970 * Phase 1:
1971 *
1972 * If asked, load the uint64 value at the user provided address and compare
1973 * it against the passed in mask and expected value.
1974 *
1975 * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
1976 * a thread reference.
1977 *
1978 * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
1979 * the current thread, then end ownership.
1980 *
1981 * Lastly decide whether we need to perform a QoS update.
1982 */
1983 if (uaddr) {
1984 /*
1985 * Until <rdar://problem/24999882> exists,
1986 * disabling preemption copyin forces any
1987 * vm_fault we encounter to fail.
1988 */
1989 error = copyin_atomic64(uaddr, &udata);
1990
1991 /*
1992 * If we get EFAULT, drop locks, and retry.
1993 * If we still get an error report it,
1994 * else assume the memory has been faulted
1995 * and attempt to copyin under lock again.
1996 */
1997 switch (error) {
1998 case 0:
1999 break;
2000 case EFAULT:
2001 if (efault_retry-- > 0) {
2002 filt_wlunlock(kqwl);
2003 error = copyin_atomic64(uaddr, &udata);
2004 filt_wllock(kqwl);
2005 if (error == 0) {
2006 goto again;
2007 }
2008 }
2009 OS_FALLTHROUGH;
2010 default:
2011 goto out;
2012 }
2013
2014 /* Update state as copied in. */
2015 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2016
2017 if ((udata & mask) != (kdata & mask)) {
2018 error = ESTALE;
2019 } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
2020 /*
2021 * Decipher the owner port name, and translate accordingly.
2022 * The low 2 bits were borrowed for other flags, so mask them off.
2023 *
2024 * Then attempt translation to a thread reference or fail.
2025 */
2026 mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
2027 if (name != MACH_PORT_NULL) {
2028 name = ipc_entry_name_mask(name);
2029 extra_thread_ref = port_name_to_thread(name,
2030 PORT_TO_THREAD_IN_CURRENT_TASK);
2031 if (extra_thread_ref == THREAD_NULL) {
2032 error = EOWNERDEAD;
2033 goto out;
2034 }
2035 new_owner = extra_thread_ref;
2036 }
2037 }
2038 }
2039
2040 if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
2041 new_owner = THREAD_NULL;
2042 }
2043
2044 if (error == 0) {
2045 if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
2046 action = KQWL_UTQ_SET_QOS_INDEX;
2047 } else if (qos_index && kqr->tr_kq_qos_index != qos_index) {
2048 action = KQWL_UTQ_SET_QOS_INDEX;
2049 }
2050
2051 if (op == FILT_WLTOUCH) {
2052 /*
2053 * Save off any additional fflags/data we just accepted
2054 * But only keep the last round of "update" bits we acted on which helps
2055 * debugging a lot.
2056 */
2057 kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
2058 kn->kn_sfflags |= kev->fflags;
2059 if (kev->fflags & NOTE_WL_SYNC_WAKE) {
2060 needs_wake = (kn->kn_thread != THREAD_NULL);
2061 }
2062 } else if (op == FILT_WLDROP) {
2063 if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
2064 NOTE_WL_SYNC_WAIT) {
2065 /*
2066 * When deleting a SYNC_WAIT knote that hasn't been woken up
2067 * explicitly, issue a wake up.
2068 */
2069 kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
2070 needs_wake = (kn->kn_thread != THREAD_NULL);
2071 }
2072 }
2073 }
2074
2075 /*
2076 * Phase 2:
2077 *
2078 * Commit ownership and QoS changes if any, possibly wake up waiters
2079 */
2080
2081 if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
2082 goto out;
2083 }
2084
2085 kqlock(kqwl);
2086
2087 /* If already tracked as servicer, don't track as owner */
2088 if (new_owner == kqr_thread(kqr)) {
2089 new_owner = THREAD_NULL;
2090 }
2091
2092 if (cur_owner != new_owner) {
2093 kqwl->kqwl_owner = new_owner;
2094 if (new_owner == extra_thread_ref) {
2095 /* we just transfered this ref to kqwl_owner */
2096 extra_thread_ref = THREAD_NULL;
2097 }
2098 cur_override = kqworkloop_override(kqwl);
2099
2100 if (new_owner) {
2101 /* override it before we drop the old */
2102 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2103 thread_add_kevent_override(new_owner, cur_override);
2104 }
2105 if (kqr_thread_requested_pending(kqr)) {
2106 if (action == KQWL_UTQ_NONE) {
2107 action = KQWL_UTQ_REDRIVE_EVENTS;
2108 }
2109 }
2110 } else {
2111 if (!kqr_thread_requested(kqr) && kqr->tr_kq_wakeup) {
2112 if (action == KQWL_UTQ_NONE) {
2113 action = KQWL_UTQ_REDRIVE_EVENTS;
2114 }
2115 }
2116 }
2117 }
2118
2119 if (action != KQWL_UTQ_NONE) {
2120 kqworkloop_update_threads_qos(kqwl, action, qos_index);
2121 }
2122
2123 ts = kqwl->kqwl_turnstile;
2124 if (cur_owner != new_owner && ts) {
2125 if (action == KQWL_UTQ_REDRIVE_EVENTS) {
2126 /*
2127 * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
2128 * the code went through workq_kern_threadreq_initiate()
2129 * and the workqueue has set the inheritor already
2130 */
2131 assert(filt_wlturnstile_interlock_is_workq(kqwl));
2132 } else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2133 workq_kern_threadreq_lock(kqwl->kqwl_p);
2134 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
2135 ts, TURNSTILE_IMMEDIATE_UPDATE);
2136 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2137 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2138 /*
2139 * If the workq is no longer the interlock, then
2140 * workq_kern_threadreq_update_inheritor() has finished a bind
2141 * and we need to fallback to the regular path.
2142 */
2143 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2144 }
2145 wl_inheritor_updated = true;
2146 } else {
2147 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2148 wl_inheritor_updated = true;
2149 }
2150
2151 /*
2152 * We need a turnstile reference because we are dropping the interlock
2153 * and the caller has not called turnstile_prepare.
2154 */
2155 if (wl_inheritor_updated) {
2156 turnstile_reference(ts);
2157 }
2158 }
2159
2160 if (needs_wake && ts) {
2161 waitq_wakeup64_thread(&ts->ts_waitq, knote_filt_wev64(kn),
2162 kn->kn_thread, THREAD_AWAKENED);
2163 if (op == FILT_WLATTACH || op == FILT_WLTOUCH) {
2164 disable_preemption();
2165 error = EPREEMPTDISABLED;
2166 }
2167 }
2168
2169 kqunlock(kqwl);
2170
2171 out:
2172 /*
2173 * Phase 3:
2174 *
2175 * Unlock and cleanup various lingering references and things.
2176 */
2177 filt_wlunlock(kqwl);
2178
2179 #if CONFIG_WORKLOOP_DEBUG
2180 KQWL_HISTORY_WRITE_ENTRY(kqwl, {
2181 .updater = current_thread(),
2182 .servicer = kqr_thread(kqr), /* Note: racy */
2183 .old_owner = cur_owner,
2184 .new_owner = new_owner,
2185
2186 .kev_ident = kev->ident,
2187 .error = (int16_t)error,
2188 .kev_flags = kev->flags,
2189 .kev_fflags = kev->fflags,
2190
2191 .kev_mask = mask,
2192 .kev_value = kdata,
2193 .in_value = udata,
2194 });
2195 #endif // CONFIG_WORKLOOP_DEBUG
2196
2197 if (wl_inheritor_updated) {
2198 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2199 turnstile_deallocate_safe(ts);
2200 }
2201
2202 if (cur_owner && new_owner != cur_owner) {
2203 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2204 thread_drop_kevent_override(cur_owner);
2205 }
2206 thread_deallocate_safe(cur_owner);
2207 }
2208 if (extra_thread_ref) {
2209 thread_deallocate_safe(extra_thread_ref);
2210 }
2211 return error;
2212 }
2213
2214 /*
2215 * Remembers the last updated that came in from userspace for debugging reasons.
2216 * - fflags is mirrored from the userspace kevent
2217 * - ext[i, i != VALUE] is mirrored from the userspace kevent
2218 * - ext[VALUE] is set to what the kernel loaded atomically
2219 * - data is set to the error if any
2220 */
2221 static inline void
2222 filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev,
2223 int error)
2224 {
2225 kn->kn_fflags = kev->fflags;
2226 kn->kn_sdata = error;
2227 memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
2228 }
2229
2230 static int
2231 filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn,
2232 struct kevent_qos_s *kev, int op)
2233 {
2234 user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR];
2235 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2236 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2237 uint64_t udata = 0;
2238 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2239 int error = 0;
2240
2241 if (op == FILT_WLATTACH) {
2242 (void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue);
2243 } else if (uaddr == 0) {
2244 return 0;
2245 }
2246
2247 filt_wllock(kqwl);
2248
2249 again:
2250
2251 /*
2252 * Do the debounce thing, the lock serializing the state is the knote lock.
2253 */
2254 if (uaddr) {
2255 /*
2256 * Until <rdar://problem/24999882> exists,
2257 * disabling preemption copyin forces any
2258 * vm_fault we encounter to fail.
2259 */
2260 error = copyin_atomic64(uaddr, &udata);
2261
2262 /*
2263 * If we get EFAULT, drop locks, and retry.
2264 * If we still get an error report it,
2265 * else assume the memory has been faulted
2266 * and attempt to copyin under lock again.
2267 */
2268 switch (error) {
2269 case 0:
2270 break;
2271 case EFAULT:
2272 if (efault_retry-- > 0) {
2273 filt_wlunlock(kqwl);
2274 error = copyin_atomic64(uaddr, &udata);
2275 filt_wllock(kqwl);
2276 if (error == 0) {
2277 goto again;
2278 }
2279 }
2280 OS_FALLTHROUGH;
2281 default:
2282 goto out;
2283 }
2284
2285 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2286 kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata;
2287
2288 if ((udata & mask) != (kdata & mask)) {
2289 error = ESTALE;
2290 goto out;
2291 }
2292 }
2293
2294 if (op == FILT_WLATTACH) {
2295 error = filt_wlattach_sync_ipc(kn);
2296 if (error == 0) {
2297 disable_preemption();
2298 error = EPREEMPTDISABLED;
2299 }
2300 }
2301
2302 out:
2303 filt_wlunlock(kqwl);
2304 return error;
2305 }
2306
2307 static int
2308 filt_wlattach(struct knote *kn, struct kevent_qos_s *kev)
2309 {
2310 struct kqueue *kq = knote_get_kq(kn);
2311 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2312 int error = 0, result = 0;
2313 kq_index_t qos_index = 0;
2314
2315 if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) {
2316 error = ENOTSUP;
2317 goto out;
2318 }
2319
2320 uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
2321 switch (command) {
2322 case NOTE_WL_THREAD_REQUEST:
2323 if (kn->kn_id != kqwl->kqwl_dynamicid) {
2324 error = EINVAL;
2325 goto out;
2326 }
2327 qos_index = _pthread_priority_thread_qos(kn->kn_qos);
2328 if (qos_index == THREAD_QOS_UNSPECIFIED) {
2329 error = ERANGE;
2330 goto out;
2331 }
2332 if (kqwl->kqwl_request.tr_kq_qos_index) {
2333 /*
2334 * There already is a thread request, and well, you're only allowed
2335 * one per workloop, so fail the attach.
2336 */
2337 error = EALREADY;
2338 goto out;
2339 }
2340 break;
2341 case NOTE_WL_SYNC_WAIT:
2342 case NOTE_WL_SYNC_WAKE:
2343 if (kn->kn_id == kqwl->kqwl_dynamicid) {
2344 error = EINVAL;
2345 goto out;
2346 }
2347 if ((kn->kn_flags & EV_DISABLE) == 0) {
2348 error = EINVAL;
2349 goto out;
2350 }
2351 if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) {
2352 error = EINVAL;
2353 goto out;
2354 }
2355 break;
2356
2357 case NOTE_WL_SYNC_IPC:
2358 if ((kn->kn_flags & EV_DISABLE) == 0) {
2359 error = EINVAL;
2360 goto out;
2361 }
2362 if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) {
2363 error = EINVAL;
2364 goto out;
2365 }
2366 break;
2367 default:
2368 error = EINVAL;
2369 goto out;
2370 }
2371
2372 if (command == NOTE_WL_SYNC_IPC) {
2373 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH);
2374 } else {
2375 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
2376 }
2377
2378 if (error == EPREEMPTDISABLED) {
2379 error = 0;
2380 result = FILTER_THREADREQ_NODEFEER;
2381 }
2382 out:
2383 if (error) {
2384 /* If userland wants ESTALE to be hidden, fail the attach anyway */
2385 if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
2386 error = 0;
2387 }
2388 knote_set_error(kn, error);
2389 return result;
2390 }
2391 if (command == NOTE_WL_SYNC_WAIT) {
2392 return kevent_register_wait_prepare(kn, kev, result);
2393 }
2394 /* Just attaching the thread request successfully will fire it */
2395 if (command == NOTE_WL_THREAD_REQUEST) {
2396 /*
2397 * Thread Request knotes need an explicit touch to be active again,
2398 * so delivering an event needs to also consume it.
2399 */
2400 kn->kn_flags |= EV_CLEAR;
2401 return result | FILTER_ACTIVE;
2402 }
2403 return result;
2404 }
2405
2406 static void __dead2
2407 filt_wlwait_continue(void *parameter, wait_result_t wr)
2408 {
2409 struct _kevent_register *cont_args = parameter;
2410 struct kqworkloop *kqwl = cont_args->kqwl;
2411
2412 kqlock(kqwl);
2413 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2414 workq_kern_threadreq_lock(kqwl->kqwl_p);
2415 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2416 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2417 } else {
2418 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2419 }
2420 kqunlock(kqwl);
2421
2422 turnstile_cleanup();
2423
2424 if (wr == THREAD_INTERRUPTED) {
2425 cont_args->kev.flags |= EV_ERROR;
2426 cont_args->kev.data = EINTR;
2427 } else if (wr != THREAD_AWAKENED) {
2428 panic("Unexpected wait result: %d", wr);
2429 }
2430
2431 kevent_register_wait_return(cont_args);
2432 }
2433
2434 /*
2435 * Called with the workloop mutex held, most of the time never returns as it
2436 * calls filt_wlwait_continue through a continuation.
2437 */
2438 static void __dead2
2439 filt_wlpost_register_wait(struct uthread *uth, struct knote *kn,
2440 struct _kevent_register *cont_args)
2441 {
2442 struct kqworkloop *kqwl = cont_args->kqwl;
2443 workq_threadreq_t kqr = &kqwl->kqwl_request;
2444 struct turnstile *ts;
2445 bool workq_locked = false;
2446
2447 kqlock_held(kqwl);
2448
2449 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2450 workq_kern_threadreq_lock(kqwl->kqwl_p);
2451 workq_locked = true;
2452 }
2453
2454 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
2455 TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
2456
2457 if (workq_locked) {
2458 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
2459 &kqwl->kqwl_request, kqwl->kqwl_owner, ts,
2460 TURNSTILE_DELAYED_UPDATE);
2461 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2462 /*
2463 * if the interlock is no longer the workqueue lock,
2464 * then we don't need to hold it anymore.
2465 */
2466 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2467 workq_locked = false;
2468 }
2469 }
2470 if (!workq_locked) {
2471 /*
2472 * If the interlock is the workloop's, then it's our responsibility to
2473 * call update_inheritor, so just do it.
2474 */
2475 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
2476 }
2477
2478 thread_set_pending_block_hint(uth->uu_thread, kThreadWaitWorkloopSyncWait);
2479 waitq_assert_wait64(&ts->ts_waitq, knote_filt_wev64(kn),
2480 THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
2481
2482 if (workq_locked) {
2483 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2484 }
2485
2486 thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr);
2487 if (thread) {
2488 thread_reference(thread);
2489 }
2490
2491 kevent_register_wait_block(ts, thread, filt_wlwait_continue, cont_args);
2492 }
2493
2494 /* called in stackshot context to report the thread responsible for blocking this thread */
2495 void
2496 kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
2497 event64_t event, thread_waitinfo_t *waitinfo)
2498 {
2499 extern zone_t thread_zone;
2500 struct knote *kn = (struct knote *)event;
2501
2502 zone_require(knote_zone, kn);
2503
2504 assert(kn->kn_thread == thread);
2505
2506 struct kqueue *kq = knote_get_kq(kn);
2507
2508 zone_require(kqworkloop_zone, kq);
2509 assert(kq->kq_state & KQ_WORKLOOP);
2510
2511 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2512 workq_threadreq_t kqr = &kqwl->kqwl_request;
2513
2514 thread_t kqwl_owner = kqwl->kqwl_owner;
2515
2516 if (kqwl_owner != THREAD_NULL) {
2517 zone_require(thread_zone, kqwl_owner);
2518 waitinfo->owner = thread_tid(kqwl->kqwl_owner);
2519 } else if (kqr_thread_requested_pending(kqr)) {
2520 waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
2521 } else if (kqr->tr_state >= WORKQ_TR_STATE_BINDING) {
2522 zone_require(thread_zone, kqr->tr_thread);
2523 waitinfo->owner = thread_tid(kqr->tr_thread);
2524 } else {
2525 waitinfo->owner = 0;
2526 }
2527
2528 waitinfo->context = kqwl->kqwl_dynamicid;
2529 }
2530
2531 static void
2532 filt_wldetach(struct knote *kn)
2533 {
2534 if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) {
2535 filt_wldetach_sync_ipc(kn);
2536 } else if (kn->kn_thread) {
2537 kevent_register_wait_cleanup(kn);
2538 }
2539 }
2540
2541 static int
2542 filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev,
2543 thread_qos_t *qos_index)
2544 {
2545 uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
2546 uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
2547
2548 if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
2549 return EINVAL;
2550 }
2551 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2552 if (kev->flags & EV_DELETE) {
2553 return EINVAL;
2554 }
2555 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2556 return EINVAL;
2557 }
2558 if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
2559 return ERANGE;
2560 }
2561 }
2562
2563 switch (new_commands) {
2564 case NOTE_WL_THREAD_REQUEST:
2565 /* thread requests can only update themselves */
2566 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2567 return EINVAL;
2568 }
2569 break;
2570
2571 case NOTE_WL_SYNC_WAIT:
2572 if (kev->fflags & NOTE_WL_END_OWNERSHIP) {
2573 return EINVAL;
2574 }
2575 goto sync_checks;
2576
2577 case NOTE_WL_SYNC_WAKE:
2578 sync_checks:
2579 if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) {
2580 return EINVAL;
2581 }
2582 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2583 return EINVAL;
2584 }
2585 break;
2586
2587 case NOTE_WL_SYNC_IPC:
2588 if (sav_commands != NOTE_WL_SYNC_IPC) {
2589 return EINVAL;
2590 }
2591 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2592 return EINVAL;
2593 }
2594 break;
2595
2596 default:
2597 return EINVAL;
2598 }
2599 return 0;
2600 }
2601
2602 static int
2603 filt_wltouch(struct knote *kn, struct kevent_qos_s *kev)
2604 {
2605 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2606 thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
2607 int result = 0;
2608
2609 int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
2610 if (error) {
2611 goto out;
2612 }
2613
2614 uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK;
2615 if (command == NOTE_WL_SYNC_IPC) {
2616 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH);
2617 } else {
2618 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
2619 filt_wlremember_last_update(kn, kev, error);
2620 }
2621 if (error == EPREEMPTDISABLED) {
2622 error = 0;
2623 result = FILTER_THREADREQ_NODEFEER;
2624 }
2625
2626 out:
2627 if (error) {
2628 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2629 /* If userland wants ESTALE to be hidden, do not activate */
2630 return result;
2631 }
2632 kev->flags |= EV_ERROR;
2633 kev->data = error;
2634 return result;
2635 }
2636 if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
2637 return kevent_register_wait_prepare(kn, kev, result);
2638 }
2639 /* Just touching the thread request successfully will fire it */
2640 if (command == NOTE_WL_THREAD_REQUEST) {
2641 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2642 result |= FILTER_UPDATE_REQ_QOS;
2643 }
2644 result |= FILTER_ACTIVE;
2645 }
2646 return result;
2647 }
2648
2649 static bool
2650 filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev)
2651 {
2652 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2653
2654 int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
2655 if (error) {
2656 goto out;
2657 }
2658
2659 uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK);
2660 if (command == NOTE_WL_SYNC_IPC) {
2661 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP);
2662 } else {
2663 error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
2664 filt_wlremember_last_update(kn, kev, error);
2665 }
2666 assert(error != EPREEMPTDISABLED);
2667
2668 out:
2669 if (error) {
2670 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2671 return false;
2672 }
2673 kev->flags |= EV_ERROR;
2674 kev->data = error;
2675 return false;
2676 }
2677 return true;
2678 }
2679
2680 static int
2681 filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev)
2682 {
2683 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2684 int rc = 0;
2685
2686 assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
2687
2688 kqlock(kqwl);
2689
2690 if (kqwl->kqwl_owner) {
2691 /*
2692 * <rdar://problem/33584321> userspace sometimes due to events being
2693 * delivered but not triggering a drain session can cause a process
2694 * of the thread request knote.
2695 *
2696 * When that happens, the automatic deactivation due to process
2697 * would swallow the event, so we have to activate the knote again.
2698 */
2699 knote_activate(kqwl, kn, FILTER_ACTIVE);
2700 } else {
2701 #if DEBUG || DEVELOPMENT
2702 if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
2703 /*
2704 * see src/queue_internal.h in libdispatch
2705 */
2706 #define DISPATCH_QUEUE_ENQUEUED 0x1ull
2707 user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
2708 task_t t = current_task();
2709 uint64_t val;
2710 if (addr && task_is_active(t) && !task_is_halting(t) &&
2711 copyin_atomic64(addr, &val) == 0 &&
2712 val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
2713 (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
2714 panic("kevent: workloop %#016llx is not enqueued "
2715 "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
2716 kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
2717 }
2718 }
2719 #endif
2720 knote_fill_kevent(kn, kev, 0);
2721 kev->fflags = kn->kn_sfflags;
2722 rc |= FILTER_ACTIVE;
2723 }
2724
2725 kqunlock(kqwl);
2726
2727 if (rc & FILTER_ACTIVE) {
2728 workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
2729 }
2730 return rc;
2731 }
2732
2733 SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
2734 .f_extended_codes = true,
2735 .f_attach = filt_wlattach,
2736 .f_detach = filt_wldetach,
2737 .f_event = filt_bad_event,
2738 .f_touch = filt_wltouch,
2739 .f_process = filt_wlprocess,
2740 .f_allow_drop = filt_wlallow_drop,
2741 .f_post_register_wait = filt_wlpost_register_wait,
2742 };
2743
2744 #pragma mark - kqueues allocation and deallocation
2745
2746 /*!
2747 * @enum kqworkloop_dealloc_flags_t
2748 *
2749 * @brief
2750 * Flags that alter kqworkloop_dealloc() behavior.
2751 *
2752 * @const KQWL_DEALLOC_NONE
2753 * Convenient name for "no flags".
2754 *
2755 * @const KQWL_DEALLOC_SKIP_HASH_REMOVE
2756 * Do not remove the workloop fromt he hash table.
2757 * This is used for process tear-down codepaths as the workloops have been
2758 * removed by the caller already.
2759 */
2760 OS_OPTIONS(kqworkloop_dealloc_flags, unsigned,
2761 KQWL_DEALLOC_NONE = 0x0000,
2762 KQWL_DEALLOC_SKIP_HASH_REMOVE = 0x0001,
2763 );
2764
2765 static void
2766 kqworkloop_dealloc(struct kqworkloop *, kqworkloop_dealloc_flags_t, uint32_t);
2767
2768 OS_NOINLINE OS_COLD OS_NORETURN
2769 static void
2770 kqworkloop_retain_panic(struct kqworkloop *kqwl, uint32_t previous)
2771 {
2772 if (previous == 0) {
2773 panic("kq(%p) resurrection", kqwl);
2774 } else {
2775 panic("kq(%p) retain overflow", kqwl);
2776 }
2777 }
2778
2779 OS_NOINLINE OS_COLD OS_NORETURN
2780 static void
2781 kqworkloop_release_panic(struct kqworkloop *kqwl)
2782 {
2783 panic("kq(%p) over-release", kqwl);
2784 }
2785
2786 OS_ALWAYS_INLINE
2787 static inline bool
2788 kqworkloop_try_retain(struct kqworkloop *kqwl)
2789 {
2790 uint32_t old_ref, new_ref;
2791 os_atomic_rmw_loop(&kqwl->kqwl_retains, old_ref, new_ref, relaxed, {
2792 if (__improbable(old_ref == 0)) {
2793 os_atomic_rmw_loop_give_up(return false);
2794 }
2795 if (__improbable(old_ref >= KQ_WORKLOOP_RETAINS_MAX)) {
2796 kqworkloop_retain_panic(kqwl, old_ref);
2797 }
2798 new_ref = old_ref + 1;
2799 });
2800 return true;
2801 }
2802
2803 OS_ALWAYS_INLINE
2804 static inline void
2805 kqworkloop_retain(struct kqworkloop *kqwl)
2806 {
2807 uint32_t previous = os_atomic_inc_orig(&kqwl->kqwl_retains, relaxed);
2808 if (__improbable(previous == 0 || previous >= KQ_WORKLOOP_RETAINS_MAX)) {
2809 kqworkloop_retain_panic(kqwl, previous);
2810 }
2811 }
2812
2813 OS_ALWAYS_INLINE
2814 static inline void
2815 kqueue_retain(kqueue_t kqu)
2816 {
2817 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2818 kqworkloop_retain(kqu.kqwl);
2819 }
2820 }
2821
2822 OS_ALWAYS_INLINE
2823 static inline void
2824 kqworkloop_release_live(struct kqworkloop *kqwl)
2825 {
2826 uint32_t refs = os_atomic_dec_orig(&kqwl->kqwl_retains, relaxed);
2827 if (__improbable(refs <= 1)) {
2828 kqworkloop_release_panic(kqwl);
2829 }
2830 }
2831
2832 OS_ALWAYS_INLINE
2833 static inline void
2834 kqueue_release_live(kqueue_t kqu)
2835 {
2836 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2837 kqworkloop_release_live(kqu.kqwl);
2838 }
2839 }
2840
2841 OS_ALWAYS_INLINE
2842 static inline void
2843 kqworkloop_release(struct kqworkloop *kqwl)
2844 {
2845 uint32_t refs = os_atomic_dec_orig(&kqwl->kqwl_retains, relaxed);
2846
2847 if (__improbable(refs <= 1)) {
2848 kqworkloop_dealloc(kqwl, KQWL_DEALLOC_NONE, refs - 1);
2849 }
2850 }
2851
2852 OS_ALWAYS_INLINE
2853 static inline void
2854 kqueue_release(kqueue_t kqu)
2855 {
2856 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2857 kqworkloop_release(kqu.kqwl);
2858 }
2859 }
2860
2861 /*!
2862 * @function kqueue_destroy
2863 *
2864 * @brief
2865 * Common part to all kqueue dealloc functions.
2866 */
2867 OS_NOINLINE
2868 static void
2869 kqueue_destroy(kqueue_t kqu, zone_t zone)
2870 {
2871 /*
2872 * waitq_set_deinit() remove the KQ's waitq set from
2873 * any select sets to which it may belong.
2874 *
2875 * The order of these deinits matter: before waitq_set_deinit() returns,
2876 * waitq_set__CALLING_PREPOST_HOOK__ may be called and it will take the
2877 * kq_lock.
2878 */
2879 waitq_set_deinit(&kqu.kq->kq_wqs);
2880 lck_spin_destroy(&kqu.kq->kq_lock, &kq_lck_grp);
2881
2882 zfree(zone, kqu.kq);
2883 }
2884
2885 /*!
2886 * @function kqueue_init
2887 *
2888 * @brief
2889 * Common part to all kqueue alloc functions.
2890 */
2891 static kqueue_t
2892 kqueue_init(kqueue_t kqu, waitq_set_prepost_hook_t *hook, int policy)
2893 {
2894 waitq_set_init(&kqu.kq->kq_wqs, policy, NULL, hook);
2895 lck_spin_init(&kqu.kq->kq_lock, &kq_lck_grp, LCK_ATTR_NULL);
2896 return kqu;
2897 }
2898
2899 #pragma mark kqfile allocation and deallocation
2900
2901 /*!
2902 * @function kqueue_dealloc
2903 *
2904 * @brief
2905 * Detach all knotes from a kqfile and free it.
2906 *
2907 * @discussion
2908 * We walk each list looking for knotes referencing this
2909 * this kqueue. If we find one, we try to drop it. But
2910 * if we fail to get a drop reference, that will wait
2911 * until it is dropped. So, we can just restart again
2912 * safe in the assumption that the list will eventually
2913 * not contain any more references to this kqueue (either
2914 * we dropped them all, or someone else did).
2915 *
2916 * Assumes no new events are being added to the kqueue.
2917 * Nothing locked on entry or exit.
2918 */
2919 void
2920 kqueue_dealloc(struct kqueue *kq)
2921 {
2922 KNOTE_LOCK_CTX(knlc);
2923 struct proc *p = kq->kq_p;
2924 struct filedesc *fdp = p->p_fd;
2925 struct knote *kn;
2926
2927 assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
2928
2929 proc_fdlock(p);
2930 for (int i = 0; i < fdp->fd_knlistsize; i++) {
2931 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2932 while (kn != NULL) {
2933 if (kq == knote_get_kq(kn)) {
2934 kqlock(kq);
2935 proc_fdunlock(p);
2936 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2937 knote_drop(kq, kn, &knlc);
2938 }
2939 proc_fdlock(p);
2940 /* start over at beginning of list */
2941 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2942 continue;
2943 }
2944 kn = SLIST_NEXT(kn, kn_link);
2945 }
2946 }
2947
2948 knhash_lock(fdp);
2949 proc_fdunlock(p);
2950
2951 if (fdp->fd_knhashmask != 0) {
2952 for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
2953 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2954 while (kn != NULL) {
2955 if (kq == knote_get_kq(kn)) {
2956 kqlock(kq);
2957 knhash_unlock(fdp);
2958 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2959 knote_drop(kq, kn, &knlc);
2960 }
2961 knhash_lock(fdp);
2962 /* start over at beginning of list */
2963 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2964 continue;
2965 }
2966 kn = SLIST_NEXT(kn, kn_link);
2967 }
2968 }
2969 }
2970 knhash_unlock(fdp);
2971
2972 kqueue_destroy(kq, kqfile_zone);
2973 }
2974
2975 /*!
2976 * @function kqueue_alloc
2977 *
2978 * @brief
2979 * Allocate a kqfile.
2980 */
2981 struct kqueue *
2982 kqueue_alloc(struct proc *p)
2983 {
2984 struct kqfile *kqf;
2985
2986 /*
2987 * kqfiles are created with kqueue() so we need to wait for
2988 * the first kevent syscall to know which bit among
2989 * KQ_KEV_{32,64,QOS} will be set in kqf_state
2990 */
2991 kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO);
2992 kqf->kqf_p = p;
2993 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue);
2994 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed);
2995
2996 return kqueue_init(kqf, NULL, SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST).kq;
2997 }
2998
2999 /*!
3000 * @function kqueue_internal
3001 *
3002 * @brief
3003 * Core implementation for kqueue and guarded_kqueue_np()
3004 */
3005 int
3006 kqueue_internal(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
3007 {
3008 struct kqueue *kq;
3009 struct fileproc *fp;
3010 int fd, error;
3011
3012 error = falloc_withalloc(p, &fp, &fd, vfs_context_current(), fp_zalloc, cra);
3013 if (error) {
3014 return error;
3015 }
3016
3017 kq = kqueue_alloc(p);
3018 if (kq == NULL) {
3019 fp_free(p, fd, fp);
3020 return ENOMEM;
3021 }
3022
3023 fp->f_flag = FREAD | FWRITE;
3024 fp->f_ops = &kqueueops;
3025 fp->f_data = kq;
3026 fp->f_lflags |= FG_CONFINED;
3027
3028 proc_fdlock(p);
3029 *fdflags(p, fd) |= UF_EXCLOSE | UF_FORKCLOSE;
3030 procfdtbl_releasefd(p, fd, NULL);
3031 fp_drop(p, fd, fp, 1);
3032 proc_fdunlock(p);
3033
3034 *retval = fd;
3035 return error;
3036 }
3037
3038 /*!
3039 * @function kqueue
3040 *
3041 * @brief
3042 * The kqueue syscall.
3043 */
3044 int
3045 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
3046 {
3047 return kqueue_internal(p, fileproc_alloc_init, NULL, retval);
3048 }
3049
3050 #pragma mark kqworkq allocation and deallocation
3051
3052 /*!
3053 * @function kqworkq_dealloc
3054 *
3055 * @brief
3056 * Deallocates a workqueue kqueue.
3057 *
3058 * @discussion
3059 * This only happens at process death, or for races with concurrent
3060 * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
3061 * this kqueue, either there are none, or someone else took care of them.
3062 */
3063 void
3064 kqworkq_dealloc(struct kqworkq *kqwq)
3065 {
3066 kqueue_destroy(kqwq, kqworkq_zone);
3067 }
3068
3069 /*!
3070 * @function kqworkq_alloc
3071 *
3072 * @brief
3073 * Allocates a workqueue kqueue.
3074 *
3075 * @discussion
3076 * This is the slow path of kevent_get_kqwq.
3077 * This takes care of making sure procs have a single workq kqueue.
3078 */
3079 OS_NOINLINE
3080 static struct kqworkq *
3081 kqworkq_alloc(struct proc *p, unsigned int flags)
3082 {
3083 struct kqworkq *kqwq, *tmp;
3084
3085 kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO);
3086
3087 assert((flags & KEVENT_FLAG_LEGACY32) == 0);
3088 if (flags & KEVENT_FLAG_LEGACY64) {
3089 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64;
3090 } else {
3091 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS;
3092 }
3093 kqwq->kqwq_p = p;
3094
3095 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3096 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]);
3097 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]);
3098 }
3099 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3100 /*
3101 * Because of how the bucketized system works, we mix overcommit
3102 * sources with not overcommit: each time we move a knote from
3103 * one bucket to the next due to overrides, we'd had to track
3104 * overcommitness, and it's really not worth it in the workloop
3105 * enabled world that track this faithfully.
3106 *
3107 * Incidentally, this behaves like the original manager-based
3108 * kqwq where event delivery always happened (hence is
3109 * "overcommit")
3110 */
3111 kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE;
3112 kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT;
3113 if (i != KQWQ_QOS_MANAGER) {
3114 kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
3115 }
3116 kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i;
3117 }
3118
3119 kqueue_init(kqwq, &kqwq->kqwq_waitq_hook, SYNC_POLICY_FIFO);
3120
3121 if (!os_atomic_cmpxchgv(&p->p_fd->fd_wqkqueue, NULL, kqwq, &tmp, release)) {
3122 kqworkq_dealloc(kqwq);
3123 return tmp;
3124 }
3125
3126 return kqwq;
3127 }
3128
3129 #pragma mark kqworkloop allocation and deallocation
3130
3131 #define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
3132 #define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
3133
3134 OS_ALWAYS_INLINE
3135 static inline void
3136 kqhash_lock(struct filedesc *fdp)
3137 {
3138 lck_mtx_lock_spin_always(&fdp->fd_kqhashlock);
3139 }
3140
3141 OS_ALWAYS_INLINE
3142 static inline void
3143 kqhash_unlock(struct filedesc *fdp)
3144 {
3145 lck_mtx_unlock(&fdp->fd_kqhashlock);
3146 }
3147
3148 OS_ALWAYS_INLINE
3149 static inline void
3150 kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id,
3151 struct kqworkloop *kqwl)
3152 {
3153 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3154 LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
3155 }
3156
3157 OS_ALWAYS_INLINE
3158 static inline struct kqworkloop *
3159 kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id)
3160 {
3161 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3162 struct kqworkloop *kqwl;
3163
3164 LIST_FOREACH(kqwl, list, kqwl_hashlink) {
3165 if (kqwl->kqwl_dynamicid == id) {
3166 return kqwl;
3167 }
3168 }
3169 return NULL;
3170 }
3171
3172 static struct kqworkloop *
3173 kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id)
3174 {
3175 struct kqworkloop *kqwl = NULL;
3176
3177 kqhash_lock(fdp);
3178 if (__probable(fdp->fd_kqhash)) {
3179 kqwl = kqworkloop_hash_lookup_locked(fdp, kq_id);
3180 if (kqwl && !kqworkloop_try_retain(kqwl)) {
3181 kqwl = NULL;
3182 }
3183 }
3184 kqhash_unlock(fdp);
3185 return kqwl;
3186 }
3187
3188 OS_NOINLINE
3189 static void
3190 kqworkloop_hash_init(struct filedesc *fdp)
3191 {
3192 struct kqwllist *alloc_hash;
3193 u_long alloc_mask;
3194
3195 kqhash_unlock(fdp);
3196 alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
3197 kqhash_lock(fdp);
3198
3199 /* See if we won the race */
3200 if (__probable(fdp->fd_kqhashmask == 0)) {
3201 fdp->fd_kqhash = alloc_hash;
3202 fdp->fd_kqhashmask = alloc_mask;
3203 } else {
3204 kqhash_unlock(fdp);
3205 hashdestroy(alloc_hash, M_KQUEUE, alloc_mask);
3206 kqhash_lock(fdp);
3207 }
3208 }
3209
3210 /*!
3211 * @function kqworkloop_dealloc
3212 *
3213 * @brief
3214 * Deallocates a workloop kqueue.
3215 *
3216 * @discussion
3217 * Knotes hold references on the workloop, so we can't really reach this
3218 * function unless all of these are already gone.
3219 *
3220 * Nothing locked on entry or exit.
3221 *
3222 * @param flags
3223 * Unless KQWL_DEALLOC_SKIP_HASH_REMOVE is set, the workloop is removed
3224 * from its hash table.
3225 *
3226 * @param current_ref
3227 * This function is also called to undo a kqworkloop_alloc in case of
3228 * allocation races, expected_ref is the current refcount that is expected
3229 * on the workloop object, usually 0, and 1 when a dealloc race is resolved.
3230 */
3231 static void
3232 kqworkloop_dealloc(struct kqworkloop *kqwl, kqworkloop_dealloc_flags_t flags,
3233 uint32_t current_ref)
3234 {
3235 thread_t cur_owner;
3236
3237 if (__improbable(current_ref > 1)) {
3238 kqworkloop_release_panic(kqwl);
3239 }
3240 assert(kqwl->kqwl_retains == current_ref);
3241
3242 /* pair with kqunlock() and other kq locks */
3243 os_atomic_thread_fence(acquire);
3244
3245 cur_owner = kqwl->kqwl_owner;
3246 if (cur_owner) {
3247 if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
3248 thread_drop_kevent_override(cur_owner);
3249 }
3250 thread_deallocate(cur_owner);
3251 kqwl->kqwl_owner = THREAD_NULL;
3252 }
3253
3254 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
3255 struct turnstile *ts;
3256 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
3257 &ts, TURNSTILE_WORKLOOPS);
3258 turnstile_cleanup();
3259 turnstile_deallocate(ts);
3260 }
3261
3262 if ((flags & KQWL_DEALLOC_SKIP_HASH_REMOVE) == 0) {
3263 struct filedesc *fdp = kqwl->kqwl_p->p_fd;
3264
3265 kqhash_lock(fdp);
3266 LIST_REMOVE(kqwl, kqwl_hashlink);
3267 kqhash_unlock(fdp);
3268 }
3269
3270 assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed));
3271 assert(kqwl->kqwl_owner == THREAD_NULL);
3272 assert(kqwl->kqwl_turnstile == TURNSTILE_NULL);
3273
3274 lck_spin_destroy(&kqwl->kqwl_statelock, &kq_lck_grp);
3275 kqueue_destroy(kqwl, kqworkloop_zone);
3276 }
3277
3278 /*!
3279 * @function kqworkloop_alloc
3280 *
3281 * @brief
3282 * Allocates a workloop kqueue.
3283 */
3284 static void
3285 kqworkloop_init(struct kqworkloop *kqwl, proc_t p,
3286 kqueue_id_t id, workq_threadreq_param_t *trp)
3287 {
3288 kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS;
3289 kqwl->kqwl_retains = 1; /* donate a retain to creator */
3290 kqwl->kqwl_dynamicid = id;
3291 kqwl->kqwl_p = p;
3292 if (trp) {
3293 kqwl->kqwl_params = trp->trp_value;
3294 }
3295
3296 workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP;
3297 if (trp) {
3298 if (trp->trp_flags & TRP_PRIORITY) {
3299 tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS;
3300 }
3301 if (trp->trp_flags) {
3302 tr_flags |= WORKQ_TR_FLAG_WL_PARAMS;
3303 }
3304 }
3305 kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE;
3306 kqwl->kqwl_request.tr_flags = tr_flags;
3307
3308 for (int i = 0; i < KQWL_NBUCKETS; i++) {
3309 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]);
3310 }
3311 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed);
3312
3313 lck_spin_init(&kqwl->kqwl_statelock, &kq_lck_grp, LCK_ATTR_NULL);
3314
3315 kqueue_init(kqwl, &kqwl->kqwl_waitq_hook, SYNC_POLICY_FIFO);
3316 }
3317
3318 /*!
3319 * @function kqworkloop_get_or_create
3320 *
3321 * @brief
3322 * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
3323 *
3324 * @returns
3325 * 0: success
3326 * EINVAL: invalid parameters
3327 * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
3328 * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
3329 * ENOMEM: allocation failed
3330 */
3331 static int
3332 kqworkloop_get_or_create(struct proc *p, kqueue_id_t id,
3333 workq_threadreq_param_t *trp, unsigned int flags, struct kqworkloop **kqwlp)
3334 {
3335 struct filedesc *fdp = p->p_fd;
3336 struct kqworkloop *alloc_kqwl = NULL;
3337 struct kqworkloop *kqwl = NULL;
3338 int error = 0;
3339
3340 assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
3341
3342 if (id == 0 || id == (kqueue_id_t)-1) {
3343 return EINVAL;
3344 }
3345
3346 for (;;) {
3347 kqhash_lock(fdp);
3348 if (__improbable(fdp->fd_kqhash == NULL)) {
3349 kqworkloop_hash_init(fdp);
3350 }
3351
3352 kqwl = kqworkloop_hash_lookup_locked(fdp, id);
3353 if (kqwl) {
3354 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
3355 /*
3356 * If MUST_NOT_EXIST was passed, even if we would have failed
3357 * the try_retain, it could have gone the other way, and
3358 * userspace can't tell. Let'em fix their race.
3359 */
3360 error = EEXIST;
3361 break;
3362 }
3363
3364 if (__probable(kqworkloop_try_retain(kqwl))) {
3365 /*
3366 * This is a valid live workloop !
3367 */
3368 *kqwlp = kqwl;
3369 error = 0;
3370 break;
3371 }
3372 }
3373
3374 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) {
3375 error = ENOENT;
3376 break;
3377 }
3378
3379 /*
3380 * We didn't find what we were looking for.
3381 *
3382 * If this is the second time we reach this point (alloc_kqwl != NULL),
3383 * then we're done.
3384 *
3385 * If this is the first time we reach this point (alloc_kqwl == NULL),
3386 * then try to allocate one without blocking.
3387 */
3388 if (__probable(alloc_kqwl == NULL)) {
3389 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO);
3390 }
3391 if (__probable(alloc_kqwl)) {
3392 kqworkloop_init(alloc_kqwl, p, id, trp);
3393 kqworkloop_hash_insert_locked(fdp, id, alloc_kqwl);
3394 kqhash_unlock(fdp);
3395 *kqwlp = alloc_kqwl;
3396 return 0;
3397 }
3398
3399 /*
3400 * We have to block to allocate a workloop, drop the lock,
3401 * allocate one, but then we need to retry lookups as someone
3402 * else could race with us.
3403 */
3404 kqhash_unlock(fdp);
3405
3406 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO);
3407 }
3408
3409 kqhash_unlock(fdp);
3410
3411 if (__improbable(alloc_kqwl)) {
3412 zfree(kqworkloop_zone, alloc_kqwl);
3413 }
3414
3415 return error;
3416 }
3417
3418 #pragma mark - knotes
3419
3420 static int
3421 filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
3422 {
3423 knote_set_error(kn, ENOTSUP);
3424 return 0;
3425 }
3426
3427 static void
3428 filt_no_detach(__unused struct knote *kn)
3429 {
3430 }
3431
3432 static int __dead2
3433 filt_bad_event(struct knote *kn, long hint)
3434 {
3435 panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
3436 }
3437
3438 static int __dead2
3439 filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev)
3440 {
3441 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3442 }
3443
3444 static int __dead2
3445 filt_bad_process(struct knote *kn, struct kevent_qos_s *kev)
3446 {
3447 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3448 }
3449
3450 /*
3451 * knotes_dealloc - detach all knotes for the process and drop them
3452 *
3453 * Called with proc_fdlock held.
3454 * Returns with it locked.
3455 * May drop it temporarily.
3456 * Process is in such a state that it will not try to allocate
3457 * any more knotes during this process (stopped for exit or exec).
3458 */
3459 void
3460 knotes_dealloc(proc_t p)
3461 {
3462 struct filedesc *fdp = p->p_fd;
3463 struct kqueue *kq;
3464 struct knote *kn;
3465 struct klist *kn_hash = NULL;
3466 u_long kn_hashmask;
3467 int i;
3468
3469 /* Close all the fd-indexed knotes up front */
3470 if (fdp->fd_knlistsize > 0) {
3471 for (i = 0; i < fdp->fd_knlistsize; i++) {
3472 while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
3473 kq = knote_get_kq(kn);
3474 kqlock(kq);
3475 proc_fdunlock(p);
3476 knote_drop(kq, kn, NULL);
3477 proc_fdlock(p);
3478 }
3479 }
3480 /* free the table */
3481 kheap_free(KM_KQUEUE, fdp->fd_knlist,
3482 fdp->fd_knlistsize * sizeof(struct klist *));
3483 }
3484 fdp->fd_knlistsize = 0;
3485
3486 knhash_lock(fdp);
3487 proc_fdunlock(p);
3488
3489 /* Clean out all the hashed knotes as well */
3490 if (fdp->fd_knhashmask != 0) {
3491 for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
3492 while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
3493 kq = knote_get_kq(kn);
3494 kqlock(kq);
3495 knhash_unlock(fdp);
3496 knote_drop(kq, kn, NULL);
3497 knhash_lock(fdp);
3498 }
3499 }
3500 kn_hash = fdp->fd_knhash;
3501 kn_hashmask = fdp->fd_knhashmask;
3502 fdp->fd_knhashmask = 0;
3503 fdp->fd_knhash = NULL;
3504 }
3505
3506 knhash_unlock(fdp);
3507
3508 if (kn_hash) {
3509 hashdestroy(kn_hash, M_KQUEUE, kn_hashmask);
3510 }
3511
3512 proc_fdlock(p);
3513 }
3514
3515 /*
3516 * kqworkloops_dealloc - rebalance retains on kqworkloops created with
3517 * scheduling parameters
3518 *
3519 * Called with proc_fdlock held.
3520 * Returns with it locked.
3521 * Process is in such a state that it will not try to allocate
3522 * any more knotes during this process (stopped for exit or exec).
3523 */
3524 void
3525 kqworkloops_dealloc(proc_t p)
3526 {
3527 struct filedesc *fdp = p->p_fd;
3528 struct kqworkloop *kqwl, *kqwln;
3529 struct kqwllist tofree;
3530
3531 if (!(fdp->fd_flags & FD_WORKLOOP)) {
3532 return;
3533 }
3534
3535 kqhash_lock(fdp);
3536
3537 if (fdp->fd_kqhashmask == 0) {
3538 kqhash_unlock(fdp);
3539 return;
3540 }
3541
3542 LIST_INIT(&tofree);
3543
3544 for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) {
3545 LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) {
3546 /*
3547 * kqworkloops that have scheduling parameters have an
3548 * implicit retain from kqueue_workloop_ctl that needs
3549 * to be balanced on process exit.
3550 */
3551 assert(kqwl->kqwl_params);
3552 LIST_REMOVE(kqwl, kqwl_hashlink);
3553 LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
3554 }
3555 }
3556
3557 kqhash_unlock(fdp);
3558
3559 LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
3560 kqworkloop_dealloc(kqwl, KQWL_DEALLOC_SKIP_HASH_REMOVE, 1);
3561 }
3562 }
3563
3564 static int
3565 kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
3566 struct kevent_qos_s *kev)
3567 {
3568 /* We don't care about the priority of a disabled or deleted knote */
3569 if (kev->flags & (EV_DISABLE | EV_DELETE)) {
3570 return 0;
3571 }
3572
3573 if (kq->kq_state & KQ_WORKLOOP) {
3574 /*
3575 * Workloops need valid priorities with a QOS (excluding manager) for
3576 * any enabled knote.
3577 *
3578 * When it is pre-existing, just make sure it has a valid QoS as
3579 * kevent_register() will not use the incoming priority (filters who do
3580 * have the responsibility to validate it again, see filt_wltouch).
3581 *
3582 * If the knote is being made, validate the incoming priority.
3583 */
3584 if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
3585 return ERANGE;
3586 }
3587 }
3588
3589 return 0;
3590 }
3591
3592 /*
3593 * Prepare a filter for waiting after register.
3594 *
3595 * The f_post_register_wait hook will be called later by kevent_register()
3596 * and should call kevent_register_wait_block()
3597 */
3598 static int
3599 kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc)
3600 {
3601 thread_t thread = current_thread();
3602
3603 assert(knote_fops(kn)->f_extended_codes);
3604
3605 if (kn->kn_thread == NULL) {
3606 thread_reference(thread);
3607 kn->kn_thread = thread;
3608 } else if (kn->kn_thread != thread) {
3609 /*
3610 * kn_thread may be set from a previous aborted wait
3611 * However, it has to be from the same thread.
3612 */
3613 kev->flags |= EV_ERROR;
3614 kev->data = EXDEV;
3615 return 0;
3616 }
3617
3618 return FILTER_REGISTER_WAIT | rc;
3619 }
3620
3621 /*
3622 * Cleanup a kevent_register_wait_prepare() effect for threads that have been
3623 * aborted instead of properly woken up with thread_wakeup_thread().
3624 */
3625 static void
3626 kevent_register_wait_cleanup(struct knote *kn)
3627 {
3628 thread_t thread = kn->kn_thread;
3629 kn->kn_thread = NULL;
3630 thread_deallocate(thread);
3631 }
3632
3633 /*
3634 * Must be called at the end of a f_post_register_wait call from a filter.
3635 */
3636 static void
3637 kevent_register_wait_block(struct turnstile *ts, thread_t thread,
3638 thread_continue_t cont, struct _kevent_register *cont_args)
3639 {
3640 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
3641 kqunlock(cont_args->kqwl);
3642 cont_args->handoff_thread = thread;
3643 thread_handoff_parameter(thread, cont, cont_args, THREAD_HANDOFF_NONE);
3644 }
3645
3646 /*
3647 * Called by Filters using a f_post_register_wait to return from their wait.
3648 */
3649 static void
3650 kevent_register_wait_return(struct _kevent_register *cont_args)
3651 {
3652 struct kqworkloop *kqwl = cont_args->kqwl;
3653 struct kevent_qos_s *kev = &cont_args->kev;
3654 int error = 0;
3655
3656 if (cont_args->handoff_thread) {
3657 thread_deallocate(cont_args->handoff_thread);
3658 }
3659
3660 if (kev->flags & (EV_ERROR | EV_RECEIPT)) {
3661 if ((kev->flags & EV_ERROR) == 0) {
3662 kev->flags |= EV_ERROR;
3663 kev->data = 0;
3664 }
3665 error = kevent_modern_copyout(kev, &cont_args->ueventlist);
3666 if (error == 0) {
3667 cont_args->eventout++;
3668 }
3669 }
3670
3671 kqworkloop_release(kqwl);
3672 if (error == 0) {
3673 *(int32_t *)&current_uthread()->uu_rval = cont_args->eventout;
3674 }
3675 unix_syscall_return(error);
3676 }
3677
3678 /*
3679 * kevent_register - add a new event to a kqueue
3680 *
3681 * Creates a mapping between the event source and
3682 * the kqueue via a knote data structure.
3683 *
3684 * Because many/most the event sources are file
3685 * descriptor related, the knote is linked off
3686 * the filedescriptor table for quick access.
3687 *
3688 * called with nothing locked
3689 * caller holds a reference on the kqueue
3690 */
3691
3692 int
3693 kevent_register(struct kqueue *kq, struct kevent_qos_s *kev,
3694 struct knote **kn_out)
3695 {
3696 struct proc *p = kq->kq_p;
3697 const struct filterops *fops;
3698 struct knote *kn = NULL;
3699 int result = 0, error = 0;
3700 unsigned short kev_flags = kev->flags;
3701 KNOTE_LOCK_CTX(knlc);
3702
3703 if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) {
3704 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
3705 } else {
3706 error = EINVAL;
3707 goto out;
3708 }
3709
3710 /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
3711 if (__improbable((kev->flags & EV_VANISHED) &&
3712 (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) {
3713 error = EINVAL;
3714 goto out;
3715 }
3716
3717 /* Simplify the flags - delete and disable overrule */
3718 if (kev->flags & EV_DELETE) {
3719 kev->flags &= ~EV_ADD;
3720 }
3721 if (kev->flags & EV_DISABLE) {
3722 kev->flags &= ~EV_ENABLE;
3723 }
3724
3725 if (kq->kq_state & KQ_WORKLOOP) {
3726 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
3727 ((struct kqworkloop *)kq)->kqwl_dynamicid,
3728 kev->udata, kev->flags, kev->filter);
3729 } else if (kq->kq_state & KQ_WORKQ) {
3730 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
3731 0, kev->udata, kev->flags, kev->filter);
3732 } else {
3733 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
3734 VM_KERNEL_UNSLIDE_OR_PERM(kq),
3735 kev->udata, kev->flags, kev->filter);
3736 }
3737
3738 restart:
3739 /* find the matching knote from the fd tables/hashes */
3740 kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
3741 error = kevent_register_validate_priority(kq, kn, kev);
3742 result = 0;
3743 if (error) {
3744 goto out;
3745 }
3746
3747 if (kn == NULL && (kev->flags & EV_ADD) == 0) {
3748 /*
3749 * No knote found, EV_ADD wasn't specified
3750 */
3751
3752 if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
3753 (kq->kq_state & KQ_WORKLOOP)) {
3754 /*
3755 * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
3756 * that doesn't care about ENOENT, so just pretend the deletion
3757 * happened.
3758 */
3759 } else {
3760 error = ENOENT;
3761 }
3762 goto out;
3763 } else if (kn == NULL) {
3764 /*
3765 * No knote found, need to attach a new one (attach)
3766 */
3767
3768 struct fileproc *knote_fp = NULL;
3769
3770 /* grab a file reference for the new knote */
3771 if (fops->f_isfd) {
3772 if ((error = fp_lookup(p, (int)kev->ident, &knote_fp, 0)) != 0) {
3773 goto out;
3774 }
3775 }
3776
3777 kn = knote_alloc();
3778 if (kn == NULL) {
3779 error = ENOMEM;
3780 if (knote_fp != NULL) {
3781 fp_drop(p, (int)kev->ident, knote_fp, 0);
3782 }
3783 goto out;
3784 }
3785
3786 kn->kn_fp = knote_fp;
3787 kn->kn_is_fd = fops->f_isfd;
3788 kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED);
3789 kn->kn_status = 0;
3790
3791 /* was vanish support requested */
3792 if (kev->flags & EV_VANISHED) {
3793 kev->flags &= ~EV_VANISHED;
3794 kn->kn_status |= KN_REQVANISH;
3795 }
3796
3797 /* snapshot matching/dispatching protocol flags into knote */
3798 if (kev->flags & EV_DISABLE) {
3799 kn->kn_status |= KN_DISABLED;
3800 }
3801
3802 /*
3803 * copy the kevent state into knote
3804 * protocol is that fflags and data
3805 * are saved off, and cleared before
3806 * calling the attach routine.
3807 *
3808 * - kn->kn_sfflags aliases with kev->xflags
3809 * - kn->kn_sdata aliases with kev->data
3810 * - kn->kn_filter is the top 8 bits of kev->filter
3811 */
3812 kn->kn_kevent = *(struct kevent_internal_s *)kev;
3813 kn->kn_sfflags = kev->fflags;
3814 kn->kn_filtid = (uint8_t)~kev->filter;
3815 kn->kn_fflags = 0;
3816 knote_reset_priority(kq, kn, kev->qos);
3817
3818 /* Add the knote for lookup thru the fd table */
3819 error = kq_add_knote(kq, kn, &knlc, p);
3820 if (error) {
3821 knote_free(kn);
3822 if (knote_fp != NULL) {
3823 fp_drop(p, (int)kev->ident, knote_fp, 0);
3824 }
3825
3826 if (error == ERESTART) {
3827 goto restart;
3828 }
3829 goto out;
3830 }
3831
3832 /* fp reference count now applies to knote */
3833
3834 /*
3835 * we can't use filter_call() because f_attach can change the filter ops
3836 * for a filter that supports f_extended_codes, so we need to reload
3837 * knote_fops() and not use `fops`.
3838 */
3839 result = fops->f_attach(kn, kev);
3840 if (result && !knote_fops(kn)->f_extended_codes) {
3841 result = FILTER_ACTIVE;
3842 }
3843
3844 kqlock(kq);
3845
3846 if (result & FILTER_THREADREQ_NODEFEER) {
3847 enable_preemption();
3848 }
3849
3850 if (kn->kn_flags & EV_ERROR) {
3851 /*
3852 * Failed to attach correctly, so drop.
3853 */
3854 kn->kn_filtid = EVFILTID_DETACHED;
3855 error = (int)kn->kn_sdata;
3856 knote_drop(kq, kn, &knlc);
3857 result = 0;
3858 goto out;
3859 }
3860
3861 /*
3862 * end "attaching" phase - now just attached
3863 *
3864 * Mark the thread request overcommit, if appropos
3865 *
3866 * If the attach routine indicated that an
3867 * event is already fired, activate the knote.
3868 */
3869 if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
3870 (kq->kq_state & KQ_WORKLOOP)) {
3871 kqworkloop_set_overcommit((struct kqworkloop *)kq);
3872 }
3873 } else if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
3874 /*
3875 * The knote was dropped while we were waiting for the lock,
3876 * we need to re-evaluate entirely
3877 */
3878
3879 goto restart;
3880 } else if (kev->flags & EV_DELETE) {
3881 /*
3882 * Deletion of a knote (drop)
3883 *
3884 * If the filter wants to filter drop events, let it do so.
3885 *
3886 * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
3887 * we must wait for the knote to be re-enabled (unless it is being
3888 * re-enabled atomically here).
3889 */
3890
3891 if (knote_fops(kn)->f_allow_drop) {
3892 bool drop;
3893
3894 kqunlock(kq);
3895 drop = knote_fops(kn)->f_allow_drop(kn, kev);
3896 kqlock(kq);
3897
3898 if (!drop) {
3899 goto out_unlock;
3900 }
3901 }
3902
3903 if ((kev->flags & EV_ENABLE) == 0 &&
3904 (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
3905 (kn->kn_status & KN_DISABLED) != 0) {
3906 kn->kn_status |= KN_DEFERDELETE;
3907 error = EINPROGRESS;
3908 goto out_unlock;
3909 }
3910
3911 knote_drop(kq, kn, &knlc);
3912 goto out;
3913 } else {
3914 /*
3915 * Regular update of a knote (touch)
3916 *
3917 * Call touch routine to notify filter of changes in filter values
3918 * (and to re-determine if any events are fired).
3919 *
3920 * If the knote is in defer-delete, avoid calling the filter touch
3921 * routine (it has delivered its last event already).
3922 *
3923 * If the touch routine had no failure,
3924 * apply the requested side effects to the knote.
3925 */
3926
3927 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
3928 if (kev->flags & EV_ENABLE) {
3929 result = FILTER_ACTIVE;
3930 }
3931 } else {
3932 kqunlock(kq);
3933 result = filter_call(knote_fops(kn), f_touch(kn, kev));
3934 kqlock(kq);
3935 if (result & FILTER_THREADREQ_NODEFEER) {
3936 enable_preemption();
3937 }
3938 }
3939
3940 if (kev->flags & EV_ERROR) {
3941 result = 0;
3942 goto out_unlock;
3943 }
3944
3945 if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 &&
3946 kn->kn_udata != kev->udata) {
3947 // this allows klist_copy_udata() not to take locks
3948 os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed);
3949 }
3950 if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) {
3951 kn->kn_status |= KN_DISABLED;
3952 knote_dequeue(kq, kn);
3953 }
3954 }
3955
3956 /* accept new kevent state */
3957 knote_apply_touch(kq, kn, kev, result);
3958
3959 out_unlock:
3960 /*
3961 * When the filter asked for a post-register wait,
3962 * we leave the kqueue locked for kevent_register()
3963 * to call the filter's f_post_register_wait hook.
3964 */
3965 if (result & FILTER_REGISTER_WAIT) {
3966 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
3967 *kn_out = kn;
3968 } else {
3969 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
3970 }
3971
3972 out:
3973 /* output local errors through the kevent */
3974 if (error) {
3975 kev->flags |= EV_ERROR;
3976 kev->data = error;
3977 }
3978 return result;
3979 }
3980
3981 /*
3982 * knote_process - process a triggered event
3983 *
3984 * Validate that it is really still a triggered event
3985 * by calling the filter routines (if necessary). Hold
3986 * a use reference on the knote to avoid it being detached.
3987 *
3988 * If it is still considered triggered, we will have taken
3989 * a copy of the state under the filter lock. We use that
3990 * snapshot to dispatch the knote for future processing (or
3991 * not, if this was a lost event).
3992 *
3993 * Our caller assures us that nobody else can be processing
3994 * events from this knote during the whole operation. But
3995 * others can be touching or posting events to the knote
3996 * interspersed with our processing it.
3997 *
3998 * caller holds a reference on the kqueue.
3999 * kqueue locked on entry and exit - but may be dropped
4000 */
4001 static int
4002 knote_process(struct knote *kn, kevent_ctx_t kectx,
4003 kevent_callback_t callback)
4004 {
4005 struct kevent_qos_s kev;
4006 struct kqueue *kq = knote_get_kq(kn);
4007 KNOTE_LOCK_CTX(knlc);
4008 int result = FILTER_ACTIVE;
4009 int error = 0;
4010 bool drop = false;
4011
4012 /*
4013 * Must be active or stayactive
4014 * Must be queued and not disabled/suppressed or dropping
4015 */
4016 assert(kn->kn_status & KN_QUEUED);
4017 assert(kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE));
4018 assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)));
4019
4020 if (kq->kq_state & KQ_WORKLOOP) {
4021 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
4022 ((struct kqworkloop *)kq)->kqwl_dynamicid,
4023 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4024 kn->kn_filtid);
4025 } else if (kq->kq_state & KQ_WORKQ) {
4026 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
4027 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4028 kn->kn_filtid);
4029 } else {
4030 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
4031 VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
4032 kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
4033 }
4034
4035 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
4036 /*
4037 * When the knote is dropping or has dropped,
4038 * then there's nothing we want to process.
4039 */
4040 return EJUSTRETURN;
4041 }
4042
4043 /*
4044 * While waiting for the knote lock, we may have dropped the kq lock.
4045 * and a touch may have disabled and dequeued the knote.
4046 */
4047 if (!(kn->kn_status & KN_QUEUED)) {
4048 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4049 return EJUSTRETURN;
4050 }
4051
4052 /*
4053 * For deferred-drop or vanished events, we just create a fake
4054 * event to acknowledge end-of-life. Otherwise, we call the
4055 * filter's process routine to snapshot the kevent state under
4056 * the filter's locking protocol.
4057 *
4058 * suppress knotes to avoid returning the same event multiple times in
4059 * a single call.
4060 */
4061 knote_suppress(kq, kn);
4062
4063 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4064 uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT;
4065 if (kn->kn_status & KN_DEFERDELETE) {
4066 kev_flags |= EV_DELETE;
4067 } else {
4068 kev_flags |= EV_VANISHED;
4069 }
4070
4071 /* create fake event */
4072 kev = (struct kevent_qos_s){
4073 .filter = kn->kn_filter,
4074 .ident = kn->kn_id,
4075 .flags = kev_flags,
4076 .udata = kn->kn_udata,
4077 };
4078 } else {
4079 kqunlock(kq);
4080 kev = (struct kevent_qos_s) { };
4081 result = filter_call(knote_fops(kn), f_process(kn, &kev));
4082 kqlock(kq);
4083 }
4084
4085 /*
4086 * Determine how to dispatch the knote for future event handling.
4087 * not-fired: just return (do not callout, leave deactivated).
4088 * One-shot: If dispatch2, enter deferred-delete mode (unless this is
4089 * is the deferred delete event delivery itself). Otherwise,
4090 * drop it.
4091 * Dispatch: don't clear state, just mark it disabled.
4092 * Cleared: just leave it deactivated.
4093 * Others: re-activate as there may be more events to handle.
4094 * This will not wake up more handlers right now, but
4095 * at the completion of handling events it may trigger
4096 * more handler threads (TODO: optimize based on more than
4097 * just this one event being detected by the filter).
4098 */
4099 if ((result & FILTER_ACTIVE) == 0) {
4100 if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
4101 /*
4102 * Stay active knotes should not be unsuppressed or we'd create an
4103 * infinite loop.
4104 *
4105 * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
4106 * within f_process() but that doesn't necessarily make them
4107 * ready to process, so we should leave them be.
4108 *
4109 * For other knotes, since we will not return an event,
4110 * there's no point keeping the knote suppressed.
4111 */
4112 knote_unsuppress(kq, kn);
4113 }
4114 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4115 return EJUSTRETURN;
4116 }
4117
4118 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
4119 knote_adjust_qos(kq, kn, result);
4120 }
4121 kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
4122
4123 if (kev.flags & EV_ONESHOT) {
4124 if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4125 (kn->kn_status & KN_DEFERDELETE) == 0) {
4126 /* defer dropping non-delete oneshot dispatch2 events */
4127 kn->kn_status |= KN_DEFERDELETE | KN_DISABLED;
4128 } else {
4129 drop = true;
4130 }
4131 } else if (kn->kn_flags & EV_DISPATCH) {
4132 /* disable all dispatch knotes */
4133 kn->kn_status |= KN_DISABLED;
4134 } else if ((kn->kn_flags & EV_CLEAR) == 0) {
4135 /* re-activate in case there are more events */
4136 knote_activate(kq, kn, FILTER_ACTIVE);
4137 }
4138
4139 /*
4140 * callback to handle each event as we find it.
4141 * If we have to detach and drop the knote, do
4142 * it while we have the kq unlocked.
4143 */
4144 if (drop) {
4145 knote_drop(kq, kn, &knlc);
4146 } else {
4147 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4148 }
4149
4150 if (kev.flags & EV_VANISHED) {
4151 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
4152 kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4153 kn->kn_filtid);
4154 }
4155
4156 error = (callback)(&kev, kectx);
4157 kqlock(kq);
4158 return error;
4159 }
4160
4161 /*
4162 * Returns -1 if the kqueue was unbound and processing should not happen
4163 */
4164 #define KQWQAE_BEGIN_PROCESSING 1
4165 #define KQWQAE_END_PROCESSING 2
4166 #define KQWQAE_UNBIND 3
4167 static int
4168 kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr,
4169 int kevent_flags, int kqwqae_op)
4170 {
4171 thread_qos_t old_override = THREAD_QOS_UNSPECIFIED;
4172 thread_t thread = kqr_thread_fast(kqr);
4173 struct knote *kn;
4174 int rc = 0;
4175 bool unbind;
4176 struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index];
4177
4178 kqlock_held(&kqwq->kqwq_kqueue);
4179
4180 if (!TAILQ_EMPTY(suppressq)) {
4181 /*
4182 * Return suppressed knotes to their original state.
4183 * For workq kqueues, suppressed ones that are still
4184 * truly active (not just forced into the queue) will
4185 * set flags we check below to see if anything got
4186 * woken up.
4187 */
4188 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4189 assert(kn->kn_status & KN_SUPPRESSED);
4190 knote_unsuppress(kqwq, kn);
4191 }
4192 }
4193
4194 #if DEBUG || DEVELOPMENT
4195 thread_t self = current_thread();
4196 struct uthread *ut = get_bsdthread_info(self);
4197
4198 assert(thread == self);
4199 assert(ut->uu_kqr_bound == kqr);
4200 #endif // DEBUG || DEVELOPMENT
4201
4202 if (kqwqae_op == KQWQAE_UNBIND) {
4203 unbind = true;
4204 } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
4205 unbind = false;
4206 } else {
4207 unbind = !kqr->tr_kq_wakeup;
4208 }
4209 if (unbind) {
4210 old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
4211 rc = -1;
4212 /*
4213 * request a new thread if we didn't process the whole queue or real events
4214 * have happened (not just putting stay-active events back).
4215 */
4216 if (kqr->tr_kq_wakeup) {
4217 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
4218 kqr->tr_kq_qos_index, 0);
4219 }
4220 }
4221
4222 if (rc == 0) {
4223 /*
4224 * Reset wakeup bit to notice events firing while we are processing,
4225 * as we cannot rely on the bucket queue emptiness because of stay
4226 * active knotes.
4227 */
4228 kqr->tr_kq_wakeup = false;
4229 }
4230
4231 if (old_override) {
4232 thread_drop_kevent_override(thread);
4233 }
4234
4235 return rc;
4236 }
4237
4238 /*
4239 * Return 0 to indicate that processing should proceed,
4240 * -1 if there is nothing to process.
4241 *
4242 * Called with kqueue locked and returns the same way,
4243 * but may drop lock temporarily.
4244 */
4245 static int
4246 kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4247 int kevent_flags)
4248 {
4249 int rc = 0;
4250
4251 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
4252 0, kqr->tr_kq_qos_index);
4253
4254 rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4255 KQWQAE_BEGIN_PROCESSING);
4256
4257 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
4258 thread_tid(kqr_thread(kqr)), kqr->tr_kq_wakeup);
4259
4260 return rc;
4261 }
4262
4263 static thread_qos_t
4264 kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
4265 {
4266 kq_index_t qos = THREAD_QOS_UNSPECIFIED;
4267 struct knote *kn, *tmp;
4268
4269 kqlock_held(kqwl);
4270
4271 TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) {
4272 /*
4273 * If a knote that can adjust QoS is disabled because of the automatic
4274 * behavior of EV_DISPATCH, the knotes should stay suppressed so that
4275 * further overrides keep pushing.
4276 */
4277 if (knote_fops(kn)->f_adjusts_qos && (kn->kn_status & KN_DISABLED) &&
4278 (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 &&
4279 (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
4280 qos = MAX(qos, kn->kn_qos_override);
4281 continue;
4282 }
4283 knote_unsuppress(kqwl, kn);
4284 }
4285
4286 return qos;
4287 }
4288
4289 static int
4290 kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
4291 {
4292 workq_threadreq_t kqr = &kqwl->kqwl_request;
4293 struct kqueue *kq = &kqwl->kqwl_kqueue;
4294 thread_qos_t qos_override;
4295 thread_t thread = kqr_thread_fast(kqr);
4296 int rc = 0, op = KQWL_UTQ_NONE;
4297
4298 kqlock_held(kq);
4299
4300 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
4301 kqwl->kqwl_dynamicid, 0, 0);
4302
4303 /* nobody else should still be processing */
4304 assert((kq->kq_state & KQ_PROCESSING) == 0);
4305
4306 kq->kq_state |= KQ_PROCESSING;
4307
4308 if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
4309 op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
4310 }
4311
4312 if (kevent_flags & KEVENT_FLAG_PARKING) {
4313 /*
4314 * When "parking" we want to process events and if no events are found
4315 * unbind.
4316 *
4317 * However, non overcommit threads sometimes park even when they have
4318 * more work so that the pool can narrow. For these, we need to unbind
4319 * early, so that calling kqworkloop_update_threads_qos() can ask the
4320 * workqueue subsystem whether the thread should park despite having
4321 * pending events.
4322 */
4323 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
4324 op = KQWL_UTQ_PARKING;
4325 } else {
4326 op = KQWL_UTQ_UNBINDING;
4327 }
4328 }
4329 if (op == KQWL_UTQ_NONE) {
4330 goto done;
4331 }
4332
4333 qos_override = kqworkloop_acknowledge_events(kqwl);
4334
4335 if (op == KQWL_UTQ_UNBINDING) {
4336 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_IMMEDIATELY);
4337 kqworkloop_release_live(kqwl);
4338 }
4339 kqworkloop_update_threads_qos(kqwl, op, qos_override);
4340 if (op == KQWL_UTQ_PARKING) {
4341 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
4342 /*
4343 * We cannot trust tr_kq_wakeup when looking at stay active knotes.
4344 * We need to process once, and kqworkloop_end_processing will
4345 * handle the unbind.
4346 */
4347 } else if (!kqr->tr_kq_wakeup || kqwl->kqwl_owner) {
4348 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4349 kqworkloop_release_live(kqwl);
4350 rc = -1;
4351 }
4352 } else if (op == KQWL_UTQ_UNBINDING) {
4353 if (kqr_thread(kqr) == thread) {
4354 /*
4355 * The thread request fired again, passed the admission check and
4356 * got bound to the current thread again.
4357 */
4358 } else {
4359 rc = -1;
4360 }
4361 }
4362
4363 if (rc == 0) {
4364 /*
4365 * Reset wakeup bit to notice stay active events firing while we are
4366 * processing, as we cannot rely on the stayactive bucket emptiness.
4367 */
4368 kqwl->kqwl_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
4369 } else {
4370 kq->kq_state &= ~KQ_PROCESSING;
4371 }
4372
4373 if (rc == -1) {
4374 kqworkloop_unbind_delayed_override_drop(thread);
4375 }
4376
4377 done:
4378 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
4379 kqwl->kqwl_dynamicid, 0, 0);
4380
4381 return rc;
4382 }
4383
4384 /*
4385 * Return 0 to indicate that processing should proceed,
4386 * -1 if there is nothing to process.
4387 * EBADF if the kqueue is draining
4388 *
4389 * Called with kqueue locked and returns the same way,
4390 * but may drop lock temporarily.
4391 * May block.
4392 */
4393 static int
4394 kqfile_begin_processing(struct kqfile *kq)
4395 {
4396 struct kqtailq *suppressq;
4397
4398 kqlock_held(kq);
4399
4400 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4401 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
4402 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4403
4404 /* wait to become the exclusive processing thread */
4405 for (;;) {
4406 if (kq->kqf_state & KQ_DRAIN) {
4407 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4408 VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
4409 return EBADF;
4410 }
4411
4412 if ((kq->kqf_state & KQ_PROCESSING) == 0) {
4413 break;
4414 }
4415
4416 /* if someone else is processing the queue, wait */
4417 kq->kqf_state |= KQ_PROCWAIT;
4418 suppressq = &kq->kqf_suppressed;
4419 waitq_assert_wait64((struct waitq *)&kq->kqf_wqs,
4420 CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT,
4421 TIMEOUT_WAIT_FOREVER);
4422
4423 kqunlock(kq);
4424 thread_block(THREAD_CONTINUE_NULL);
4425 kqlock(kq);
4426 }
4427
4428 /* Nobody else processing */
4429
4430 /* clear pre-posts and KQ_WAKEUP now, in case we bail early */
4431 waitq_set_clear_preposts(&kq->kqf_wqs);
4432 kq->kqf_state &= ~KQ_WAKEUP;
4433
4434 /* anything left to process? */
4435 if (TAILQ_EMPTY(&kq->kqf_queue)) {
4436 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4437 VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
4438 return -1;
4439 }
4440
4441 /* convert to processing mode */
4442 kq->kqf_state |= KQ_PROCESSING;
4443
4444 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4445 VM_KERNEL_UNSLIDE_OR_PERM(kq));
4446
4447 return 0;
4448 }
4449
4450 /*
4451 * Try to end the processing, only called when a workq thread is attempting to
4452 * park (KEVENT_FLAG_PARKING is set).
4453 *
4454 * When returning -1, the kqworkq is setup again so that it is ready to be
4455 * processed.
4456 */
4457 static int
4458 kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4459 int kevent_flags)
4460 {
4461 if (!TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index])) {
4462 /* remember we didn't process everything */
4463 kqr->tr_kq_wakeup = true;
4464 }
4465
4466 if (kevent_flags & KEVENT_FLAG_PARKING) {
4467 /*
4468 * if acknowledge events "succeeds" it means there are events,
4469 * which is a failure condition for end_processing.
4470 */
4471 int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4472 KQWQAE_END_PROCESSING);
4473 if (rc == 0) {
4474 return -1;
4475 }
4476 }
4477
4478 return 0;
4479 }
4480
4481 /*
4482 * Try to end the processing, only called when a workq thread is attempting to
4483 * park (KEVENT_FLAG_PARKING is set).
4484 *
4485 * When returning -1, the kqworkq is setup again so that it is ready to be
4486 * processed (as if kqworkloop_begin_processing had just been called).
4487 *
4488 * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
4489 * the kqworkloop is unbound from its servicer as a side effect.
4490 */
4491 static int
4492 kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
4493 {
4494 struct kqueue *kq = &kqwl->kqwl_kqueue;
4495 workq_threadreq_t kqr = &kqwl->kqwl_request;
4496 thread_qos_t qos_override;
4497 thread_t thread = kqr_thread_fast(kqr);
4498 int rc = 0;
4499
4500 kqlock_held(kq);
4501
4502 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
4503 kqwl->kqwl_dynamicid, 0, 0);
4504
4505 if (flags & KQ_PROCESSING) {
4506 assert(kq->kq_state & KQ_PROCESSING);
4507
4508 /*
4509 * If we still have queued stayactive knotes, remember we didn't finish
4510 * processing all of them. This should be extremely rare and would
4511 * require to have a lot of them registered and fired.
4512 */
4513 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
4514 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS,
4515 KQWL_BUCKET_STAYACTIVE);
4516 }
4517
4518 /*
4519 * When KEVENT_FLAG_PARKING is set, we need to attempt an unbind while
4520 * still under the lock.
4521 *
4522 * So we do everything kqworkloop_unbind() would do, but because we're
4523 * inside kqueue_process(), if the workloop actually received events
4524 * while our locks were dropped, we have the opportunity to fail the end
4525 * processing and loop again.
4526 *
4527 * This avoids going through the process-wide workqueue lock hence
4528 * scales better.
4529 */
4530 if (kevent_flags & KEVENT_FLAG_PARKING) {
4531 qos_override = kqworkloop_acknowledge_events(kqwl);
4532 }
4533 }
4534
4535 if (kevent_flags & KEVENT_FLAG_PARKING) {
4536 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
4537 if (kqr->tr_kq_wakeup && !kqwl->kqwl_owner) {
4538 /*
4539 * Reset wakeup bit to notice stay active events firing while we are
4540 * processing, as we cannot rely on the stayactive bucket emptiness.
4541 */
4542 kqwl->kqwl_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
4543 rc = -1;
4544 } else {
4545 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4546 kqworkloop_release_live(kqwl);
4547 kq->kq_state &= ~flags;
4548 }
4549 } else {
4550 kq->kq_state &= ~flags;
4551 kq->kq_state |= KQ_R2K_ARMED;
4552 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
4553 }
4554
4555 if ((kevent_flags & KEVENT_FLAG_PARKING) && rc == 0) {
4556 kqworkloop_unbind_delayed_override_drop(thread);
4557 }
4558
4559 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
4560 kqwl->kqwl_dynamicid, 0, 0);
4561
4562 return rc;
4563 }
4564
4565 /*
4566 * Called with kqueue lock held.
4567 *
4568 * 0: no more events
4569 * -1: has more events
4570 * EBADF: kqueue is in draining mode
4571 */
4572 static int
4573 kqfile_end_processing(struct kqfile *kq)
4574 {
4575 struct kqtailq *suppressq = &kq->kqf_suppressed;
4576 struct knote *kn;
4577 int procwait;
4578
4579 kqlock_held(kq);
4580
4581 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4582
4583 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
4584 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4585
4586 /*
4587 * Return suppressed knotes to their original state.
4588 */
4589 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4590 assert(kn->kn_status & KN_SUPPRESSED);
4591 knote_unsuppress(kq, kn);
4592 }
4593
4594 procwait = (kq->kqf_state & KQ_PROCWAIT);
4595 kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
4596
4597 if (procwait) {
4598 /* first wake up any thread already waiting to process */
4599 waitq_wakeup64_all((struct waitq *)&kq->kqf_wqs,
4600 CAST_EVENT64_T(suppressq), THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
4601 }
4602
4603 if (kq->kqf_state & KQ_DRAIN) {
4604 return EBADF;
4605 }
4606 return (kq->kqf_state & KQ_WAKEUP) ? -1 : 0;
4607 }
4608
4609 static int
4610 kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
4611 struct kqueue_workloop_params *params, int *retval)
4612 {
4613 int error = 0;
4614 struct kqworkloop *kqwl;
4615 struct filedesc *fdp = p->p_fd;
4616 workq_threadreq_param_t trp = { };
4617
4618 switch (cmd) {
4619 case KQ_WORKLOOP_CREATE:
4620 if (!params->kqwlp_flags) {
4621 error = EINVAL;
4622 break;
4623 }
4624
4625 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
4626 (params->kqwlp_sched_pri < 1 ||
4627 params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) {
4628 error = EINVAL;
4629 break;
4630 }
4631
4632 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
4633 invalid_policy(params->kqwlp_sched_pol)) {
4634 error = EINVAL;
4635 break;
4636 }
4637
4638 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
4639 (params->kqwlp_cpu_percent <= 0 ||
4640 params->kqwlp_cpu_percent > 100 ||
4641 params->kqwlp_cpu_refillms <= 0 ||
4642 params->kqwlp_cpu_refillms > 0x00ffffff)) {
4643 error = EINVAL;
4644 break;
4645 }
4646
4647 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
4648 trp.trp_flags |= TRP_PRIORITY;
4649 trp.trp_pri = (uint8_t)params->kqwlp_sched_pri;
4650 }
4651 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
4652 trp.trp_flags |= TRP_POLICY;
4653 trp.trp_pol = (uint8_t)params->kqwlp_sched_pol;
4654 }
4655 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
4656 trp.trp_flags |= TRP_CPUPERCENT;
4657 trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
4658 trp.trp_refillms = params->kqwlp_cpu_refillms;
4659 }
4660
4661 error = kqworkloop_get_or_create(p, params->kqwlp_id, &trp,
4662 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4663 KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &kqwl);
4664 if (error) {
4665 break;
4666 }
4667
4668 if (!(fdp->fd_flags & FD_WORKLOOP)) {
4669 /* FD_WORKLOOP indicates we've ever created a workloop
4670 * via this syscall but its only ever added to a process, never
4671 * removed.
4672 */
4673 proc_fdlock(p);
4674 fdp->fd_flags |= FD_WORKLOOP;
4675 proc_fdunlock(p);
4676 }
4677 break;
4678 case KQ_WORKLOOP_DESTROY:
4679 error = kqworkloop_get_or_create(p, params->kqwlp_id, NULL,
4680 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4681 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &kqwl);
4682 if (error) {
4683 break;
4684 }
4685 kqlock(kqwl);
4686 trp.trp_value = kqwl->kqwl_params;
4687 if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
4688 trp.trp_flags |= TRP_RELEASED;
4689 kqwl->kqwl_params = trp.trp_value;
4690 kqworkloop_release_live(kqwl);
4691 } else {
4692 error = EINVAL;
4693 }
4694 kqunlock(kqwl);
4695 kqworkloop_release(kqwl);
4696 break;
4697 }
4698 *retval = 0;
4699 return error;
4700 }
4701
4702 int
4703 kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
4704 {
4705 struct kqueue_workloop_params params = {
4706 .kqwlp_id = 0,
4707 };
4708 if (uap->sz < sizeof(params.kqwlp_version)) {
4709 return EINVAL;
4710 }
4711
4712 size_t copyin_sz = MIN(sizeof(params), uap->sz);
4713 int rv = copyin(uap->addr, &params, copyin_sz);
4714 if (rv) {
4715 return rv;
4716 }
4717
4718 if (params.kqwlp_version != (int)uap->sz) {
4719 return EINVAL;
4720 }
4721
4722 return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, &params,
4723 retval);
4724 }
4725
4726 /*ARGSUSED*/
4727 static int
4728 kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
4729 __unused vfs_context_t ctx)
4730 {
4731 struct kqfile *kq = (struct kqfile *)fp->f_data;
4732 struct kqtailq *suppressq = &kq->kqf_suppressed;
4733 struct kqtailq *queue = &kq->kqf_queue;
4734 struct knote *kn;
4735 int retnum = 0;
4736
4737 if (which != FREAD) {
4738 return 0;
4739 }
4740
4741 kqlock(kq);
4742
4743 assert((kq->kqf_state & KQ_WORKQ) == 0);
4744
4745 /*
4746 * If this is the first pass, link the wait queue associated with the
4747 * the kqueue onto the wait queue set for the select(). Normally we
4748 * use selrecord() for this, but it uses the wait queue within the
4749 * selinfo structure and we need to use the main one for the kqueue to
4750 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
4751 * (The select() call will unlink them when it ends).
4752 */
4753 if (wq_link_id != NULL) {
4754 thread_t cur_act = current_thread();
4755 struct uthread * ut = get_bsdthread_info(cur_act);
4756
4757 kq->kqf_state |= KQ_SEL;
4758 waitq_link((struct waitq *)&kq->kqf_wqs, ut->uu_wqset,
4759 WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
4760
4761 /* always consume the reserved link object */
4762 waitq_link_release(*(uint64_t *)wq_link_id);
4763 *(uint64_t *)wq_link_id = 0;
4764
4765 /*
4766 * selprocess() is expecting that we send it back the waitq
4767 * that was just added to the thread's waitq set. In order
4768 * to not change the selrecord() API (which is exported to
4769 * kexts), we pass this value back through the
4770 * void *wq_link_id pointer we were passed. We need to use
4771 * memcpy here because the pointer may not be properly aligned
4772 * on 32-bit systems.
4773 */
4774 void *wqptr = &kq->kqf_wqs;
4775 memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
4776 }
4777
4778 if (kqfile_begin_processing(kq) == -1) {
4779 kqunlock(kq);
4780 return 0;
4781 }
4782
4783 if (!TAILQ_EMPTY(queue)) {
4784 /*
4785 * there is something queued - but it might be a
4786 * KN_STAYACTIVE knote, which may or may not have
4787 * any events pending. Otherwise, we have to walk
4788 * the list of knotes to see, and peek at the
4789 * (non-vanished) stay-active ones to be really sure.
4790 */
4791 while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
4792 if (kn->kn_status & KN_ACTIVE) {
4793 retnum = 1;
4794 goto out;
4795 }
4796 assert(kn->kn_status & KN_STAYACTIVE);
4797 knote_suppress(kq, kn);
4798 }
4799
4800 /*
4801 * There were no regular events on the queue, so take
4802 * a deeper look at the stay-queued ones we suppressed.
4803 */
4804 while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
4805 KNOTE_LOCK_CTX(knlc);
4806 int result = 0;
4807
4808 /* If didn't vanish while suppressed - peek at it */
4809 if ((kn->kn_status & KN_DROPPING) || !knote_lock(kq, kn, &knlc,
4810 KNOTE_KQ_LOCK_ON_FAILURE)) {
4811 continue;
4812 }
4813
4814 result = filter_call(knote_fops(kn), f_peek(kn));
4815
4816 kqlock(kq);
4817 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4818
4819 /* unsuppress it */
4820 knote_unsuppress(kq, kn);
4821
4822 /* has data or it has to report a vanish */
4823 if (result & FILTER_ACTIVE) {
4824 retnum = 1;
4825 goto out;
4826 }
4827 }
4828 }
4829
4830 out:
4831 kqfile_end_processing(kq);
4832 kqunlock(kq);
4833 return retnum;
4834 }
4835
4836 /*
4837 * kqueue_close -
4838 */
4839 /*ARGSUSED*/
4840 static int
4841 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
4842 {
4843 struct kqfile *kqf = (struct kqfile *)fg->fg_data;
4844
4845 assert((kqf->kqf_state & KQ_WORKQ) == 0);
4846 kqueue_dealloc(&kqf->kqf_kqueue);
4847 fg->fg_data = NULL;
4848 return 0;
4849 }
4850
4851 /*
4852 * Max depth of the nested kq path that can be created.
4853 * Note that this has to be less than the size of kq_level
4854 * to avoid wrapping around and mislabeling the level.
4855 */
4856 #define MAX_NESTED_KQ 1000
4857
4858 /*ARGSUSED*/
4859 /*
4860 * The callers has taken a use-count reference on this kqueue and will donate it
4861 * to the kqueue we are being added to. This keeps the kqueue from closing until
4862 * that relationship is torn down.
4863 */
4864 static int
4865 kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
4866 __unused struct kevent_qos_s *kev)
4867 {
4868 struct kqfile *kqf = (struct kqfile *)fp->f_data;
4869 struct kqueue *kq = &kqf->kqf_kqueue;
4870 struct kqueue *parentkq = knote_get_kq(kn);
4871
4872 assert((kqf->kqf_state & KQ_WORKQ) == 0);
4873
4874 if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
4875 knote_set_error(kn, EINVAL);
4876 return 0;
4877 }
4878
4879 /*
4880 * We have to avoid creating a cycle when nesting kqueues
4881 * inside another. Rather than trying to walk the whole
4882 * potential DAG of nested kqueues, we just use a simple
4883 * ceiling protocol. When a kqueue is inserted into another,
4884 * we check that the (future) parent is not already nested
4885 * into another kqueue at a lower level than the potenial
4886 * child (because it could indicate a cycle). If that test
4887 * passes, we just mark the nesting levels accordingly.
4888 *
4889 * Only up to MAX_NESTED_KQ can be nested.
4890 *
4891 * Note: kqworkq and kqworkloop cannot be nested and have reused their
4892 * kq_level field, so ignore these as parent.
4893 */
4894
4895 kqlock(parentkq);
4896
4897 if ((parentkq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
4898 if (parentkq->kq_level > 0 &&
4899 parentkq->kq_level < kq->kq_level) {
4900 kqunlock(parentkq);
4901 knote_set_error(kn, EINVAL);
4902 return 0;
4903 }
4904
4905 /* set parent level appropriately */
4906 uint16_t plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
4907 if (plevel < kq->kq_level + 1) {
4908 if (kq->kq_level + 1 > MAX_NESTED_KQ) {
4909 kqunlock(parentkq);
4910 knote_set_error(kn, EINVAL);
4911 return 0;
4912 }
4913 plevel = kq->kq_level + 1;
4914 }
4915
4916 parentkq->kq_level = plevel;
4917 }
4918
4919 kqunlock(parentkq);
4920
4921 kn->kn_filtid = EVFILTID_KQREAD;
4922 kqlock(kq);
4923 KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
4924 /* indicate nesting in child, if needed */
4925 if (kq->kq_level == 0) {
4926 kq->kq_level = 1;
4927 }
4928
4929 int count = kq->kq_count;
4930 kqunlock(kq);
4931 return count > 0;
4932 }
4933
4934 /*
4935 * kqueue_drain - called when kq is closed
4936 */
4937 /*ARGSUSED*/
4938 static int
4939 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
4940 {
4941 struct kqfile *kqf = (struct kqfile *)fp->fp_glob->fg_data;
4942
4943 assert((kqf->kqf_state & KQ_WORKQ) == 0);
4944
4945 kqlock(kqf);
4946 kqf->kqf_state |= KQ_DRAIN;
4947
4948 /* wakeup sleeping threads */
4949 if ((kqf->kqf_state & (KQ_SLEEP | KQ_SEL)) != 0) {
4950 kqf->kqf_state &= ~(KQ_SLEEP | KQ_SEL);
4951 (void)waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs,
4952 KQ_EVENT,
4953 THREAD_RESTART,
4954 WAITQ_ALL_PRIORITIES);
4955 }
4956
4957 /* wakeup threads waiting their turn to process */
4958 if (kqf->kqf_state & KQ_PROCWAIT) {
4959 assert(kqf->kqf_state & KQ_PROCESSING);
4960
4961 kqf->kqf_state &= ~KQ_PROCWAIT;
4962 (void)waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs,
4963 CAST_EVENT64_T(&kqf->kqf_suppressed),
4964 THREAD_RESTART, WAITQ_ALL_PRIORITIES);
4965 }
4966
4967 kqunlock(kqf);
4968 return 0;
4969 }
4970
4971 /*ARGSUSED*/
4972 int
4973 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
4974 {
4975 assert((kq->kq_state & KQ_WORKQ) == 0);
4976
4977 kqlock(kq);
4978 if (isstat64 != 0) {
4979 struct stat64 *sb64 = (struct stat64 *)ub;
4980
4981 bzero((void *)sb64, sizeof(*sb64));
4982 sb64->st_size = kq->kq_count;
4983 if (kq->kq_state & KQ_KEV_QOS) {
4984 sb64->st_blksize = sizeof(struct kevent_qos_s);
4985 } else if (kq->kq_state & KQ_KEV64) {
4986 sb64->st_blksize = sizeof(struct kevent64_s);
4987 } else if (IS_64BIT_PROCESS(p)) {
4988 sb64->st_blksize = sizeof(struct user64_kevent);
4989 } else {
4990 sb64->st_blksize = sizeof(struct user32_kevent);
4991 }
4992 sb64->st_mode = S_IFIFO;
4993 } else {
4994 struct stat *sb = (struct stat *)ub;
4995
4996 bzero((void *)sb, sizeof(*sb));
4997 sb->st_size = kq->kq_count;
4998 if (kq->kq_state & KQ_KEV_QOS) {
4999 sb->st_blksize = sizeof(struct kevent_qos_s);
5000 } else if (kq->kq_state & KQ_KEV64) {
5001 sb->st_blksize = sizeof(struct kevent64_s);
5002 } else if (IS_64BIT_PROCESS(p)) {
5003 sb->st_blksize = sizeof(struct user64_kevent);
5004 } else {
5005 sb->st_blksize = sizeof(struct user32_kevent);
5006 }
5007 sb->st_mode = S_IFIFO;
5008 }
5009 kqunlock(kq);
5010 return 0;
5011 }
5012
5013 static inline bool
5014 kqueue_threadreq_can_use_ast(struct kqueue *kq)
5015 {
5016 if (current_proc() == kq->kq_p) {
5017 /*
5018 * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
5019 * do combined send/receive and in the case of self-IPC, the AST may bet
5020 * set on a thread that will not return to userspace and needs the
5021 * thread the AST would create to unblock itself.
5022 *
5023 * At this time, we really want to target:
5024 *
5025 * - kevent variants that can cause thread creations, and dispatch
5026 * really only uses kevent_qos and kevent_id,
5027 *
5028 * - workq_kernreturn (directly about thread creations)
5029 *
5030 * - bsdthread_ctl which is used for qos changes and has direct impact
5031 * on the creator thread scheduling decisions.
5032 */
5033 switch (current_uthread()->syscall_code) {
5034 case SYS_kevent_qos:
5035 case SYS_kevent_id:
5036 case SYS_workq_kernreturn:
5037 case SYS_bsdthread_ctl:
5038 return true;
5039 }
5040 }
5041 return false;
5042 }
5043
5044 /*
5045 * Interact with the pthread kext to request a servicing there at a specific QoS
5046 * level.
5047 *
5048 * - Caller holds the workq request lock
5049 *
5050 * - May be called with the kqueue's wait queue set locked,
5051 * so cannot do anything that could recurse on that.
5052 */
5053 static void
5054 kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t kqr,
5055 kq_index_t qos, int flags)
5056 {
5057 assert(kqr->tr_kq_wakeup);
5058 assert(kqr_thread(kqr) == THREAD_NULL);
5059 assert(!kqr_thread_requested(kqr));
5060 struct turnstile *ts = TURNSTILE_NULL;
5061
5062 if (workq_is_exiting(kq->kq_p)) {
5063 return;
5064 }
5065
5066 kqlock_held(kq);
5067
5068 if (kq->kq_state & KQ_WORKLOOP) {
5069 __assert_only struct kqworkloop *kqwl = (struct kqworkloop *)kq;
5070
5071 assert(kqwl->kqwl_owner == THREAD_NULL);
5072 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
5073 kqwl->kqwl_dynamicid, 0, qos, kqr->tr_kq_wakeup);
5074 ts = kqwl->kqwl_turnstile;
5075 /* Add a thread request reference on the kqueue. */
5076 kqworkloop_retain(kqwl);
5077 } else {
5078 assert(kq->kq_state & KQ_WORKQ);
5079 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST),
5080 -1, 0, qos, kqr->tr_kq_wakeup);
5081 }
5082
5083 /*
5084 * New-style thread request supported.
5085 * Provide the pthread kext a pointer to a workq_threadreq_s structure for
5086 * its use until a corresponding kqueue_threadreq_bind callback.
5087 */
5088 if (kqueue_threadreq_can_use_ast(kq)) {
5089 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5090 }
5091 if (qos == KQWQ_QOS_MANAGER) {
5092 qos = WORKQ_THREAD_QOS_MANAGER;
5093 }
5094 if (!workq_kern_threadreq_initiate(kq->kq_p, kqr, ts, qos, flags)) {
5095 /*
5096 * Process is shutting down or exec'ing.
5097 * All the kqueues are going to be cleaned up
5098 * soon. Forget we even asked for a thread -
5099 * and make sure we don't ask for more.
5100 */
5101 kq->kq_state &= ~KQ_R2K_ARMED;
5102 kqueue_release_live(kq);
5103 }
5104 }
5105
5106 /*
5107 * kqueue_threadreq_bind_prepost - prepost the bind to kevent
5108 *
5109 * This is used when kqueue_threadreq_bind may cause a lock inversion.
5110 */
5111 __attribute__((always_inline))
5112 void
5113 kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t kqr,
5114 struct uthread *ut)
5115 {
5116 ut->uu_kqr_bound = kqr;
5117 kqr->tr_thread = ut->uu_thread;
5118 kqr->tr_state = WORKQ_TR_STATE_BINDING;
5119 }
5120
5121 /*
5122 * kqueue_threadreq_bind_commit - commit a bind prepost
5123 *
5124 * The workq code has to commit any binding prepost before the thread has
5125 * a chance to come back to userspace (and do kevent syscalls) or be aborted.
5126 */
5127 void
5128 kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
5129 {
5130 struct uthread *ut = get_bsdthread_info(thread);
5131 workq_threadreq_t kqr = ut->uu_kqr_bound;
5132 kqueue_t kqu = kqr_kqueue(p, kqr);
5133
5134 kqlock(kqu);
5135 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5136 kqueue_threadreq_bind(p, kqr, thread, 0);
5137 }
5138 kqunlock(kqu);
5139 }
5140
5141 static void
5142 kqueue_threadreq_modify(kqueue_t kqu, workq_threadreq_t kqr, kq_index_t qos,
5143 workq_kern_threadreq_flags_t flags)
5144 {
5145 assert(kqr_thread_requested_pending(kqr));
5146
5147 kqlock_held(kqu);
5148
5149 if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5150 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5151 }
5152 workq_kern_threadreq_modify(kqu.kq->kq_p, kqr, qos, flags);
5153 }
5154
5155 /*
5156 * kqueue_threadreq_bind - bind thread to processing kqrequest
5157 *
5158 * The provided thread will be responsible for delivering events
5159 * associated with the given kqrequest. Bind it and get ready for
5160 * the thread to eventually arrive.
5161 */
5162 void
5163 kqueue_threadreq_bind(struct proc *p, workq_threadreq_t kqr, thread_t thread,
5164 unsigned int flags)
5165 {
5166 kqueue_t kqu = kqr_kqueue(p, kqr);
5167 struct uthread *ut = get_bsdthread_info(thread);
5168
5169 kqlock_held(kqu);
5170
5171 assert(ut->uu_kqueue_override == 0);
5172
5173 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5174 assert(ut->uu_kqr_bound == kqr);
5175 assert(kqr->tr_thread == thread);
5176 } else {
5177 assert(kqr_thread_requested_pending(kqr));
5178 assert(kqr->tr_thread == THREAD_NULL);
5179 assert(ut->uu_kqr_bound == NULL);
5180 ut->uu_kqr_bound = kqr;
5181 kqr->tr_thread = thread;
5182 }
5183
5184 kqr->tr_state = WORKQ_TR_STATE_BOUND;
5185
5186 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5187 struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
5188
5189 if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
5190 /*
5191 * <rdar://problem/38626999> shows that asserting here is not ok.
5192 *
5193 * This is not supposed to happen for correct use of the interface,
5194 * but it is sadly possible for userspace (with the help of memory
5195 * corruption, such as over-release of a dispatch queue) to make
5196 * the creator thread the "owner" of a workloop.
5197 *
5198 * Once that happens, and that creator thread picks up the same
5199 * workloop as a servicer, we trip this codepath. We need to fixup
5200 * the state to forget about this thread being the owner, as the
5201 * entire workloop state machine expects servicers to never be
5202 * owners and everything would basically go downhill from here.
5203 */
5204 kqu.kqwl->kqwl_owner = THREAD_NULL;
5205 if (kqworkloop_override(kqu.kqwl)) {
5206 thread_drop_kevent_override(thread);
5207 }
5208 }
5209
5210 if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
5211 /*
5212 * Past this point, the interlock is the kq req lock again,
5213 * so we can fix the inheritor for good.
5214 */
5215 filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5216 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
5217 }
5218
5219 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
5220 thread_tid(thread), kqr->tr_kq_qos_index,
5221 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
5222
5223 ut->uu_kqueue_override = kqr->tr_kq_override_index;
5224 if (kqr->tr_kq_override_index) {
5225 thread_add_servicer_override(thread, kqr->tr_kq_override_index);
5226 }
5227 } else {
5228 assert(kqr->tr_kq_override_index == 0);
5229
5230 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
5231 thread_tid(thread), kqr->tr_kq_qos_index,
5232 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
5233 }
5234 }
5235
5236 /*
5237 * kqueue_threadreq_cancel - abort a pending thread request
5238 *
5239 * Called when exiting/exec'ing. Forget our pending request.
5240 */
5241 void
5242 kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t kqr)
5243 {
5244 kqueue_release(kqr_kqueue(p, kqr));
5245 }
5246
5247 workq_threadreq_param_t
5248 kqueue_threadreq_workloop_param(workq_threadreq_t kqr)
5249 {
5250 struct kqworkloop *kqwl;
5251 workq_threadreq_param_t trp;
5252
5253 assert(kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
5254 kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
5255 trp.trp_value = kqwl->kqwl_params;
5256 return trp;
5257 }
5258
5259 /*
5260 * kqueue_threadreq_unbind - unbind thread from processing kqueue
5261 *
5262 * End processing the per-QoS bucket of events and allow other threads
5263 * to be requested for future servicing.
5264 *
5265 * caller holds a reference on the kqueue.
5266 */
5267 void
5268 kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t kqr)
5269 {
5270 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
5271 kqworkloop_unbind(kqr_kqworkloop(kqr));
5272 } else {
5273 kqworkq_unbind(p, kqr);
5274 }
5275 }
5276
5277 /*
5278 * If we aren't already busy processing events [for this QoS],
5279 * request workq thread support as appropriate.
5280 *
5281 * TBD - for now, we don't segregate out processing by QoS.
5282 *
5283 * - May be called with the kqueue's wait queue set locked,
5284 * so cannot do anything that could recurse on that.
5285 */
5286 static void
5287 kqworkq_wakeup(struct kqworkq *kqwq, kq_index_t qos_index)
5288 {
5289 workq_threadreq_t kqr = kqworkq_get_request(kqwq, qos_index);
5290
5291 /* convert to thread qos value */
5292 assert(qos_index < KQWQ_NBUCKETS);
5293
5294 if (!kqr->tr_kq_wakeup) {
5295 kqr->tr_kq_wakeup = true;
5296 if (!kqr_thread_requested(kqr)) {
5297 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
5298 }
5299 }
5300 }
5301
5302 /*
5303 * This represent the asynchronous QoS a given workloop contributes,
5304 * hence is the max of the current active knotes (override index)
5305 * and the workloop max qos (userspace async qos).
5306 */
5307 static kq_index_t
5308 kqworkloop_override(struct kqworkloop *kqwl)
5309 {
5310 workq_threadreq_t kqr = &kqwl->kqwl_request;
5311 return MAX(kqr->tr_kq_qos_index, kqr->tr_kq_override_index);
5312 }
5313
5314 static inline void
5315 kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
5316 {
5317 workq_threadreq_t kqr = &kqwl->kqwl_request;
5318
5319 kqlock_held(kqwl);
5320
5321 if (kqwl->kqwl_state & KQ_R2K_ARMED) {
5322 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5323 act_set_astkevent(kqr_thread_fast(kqr), AST_KEVENT_RETURN_TO_KERNEL);
5324 }
5325 }
5326
5327 static void
5328 kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
5329 {
5330 workq_threadreq_t kqr = &kqwl->kqwl_request;
5331 struct kqueue *kq = &kqwl->kqwl_kqueue;
5332 kq_index_t old_override = kqworkloop_override(kqwl);
5333 kq_index_t i;
5334
5335 kqlock_held(kqwl);
5336
5337 switch (op) {
5338 case KQWL_UTQ_UPDATE_WAKEUP_QOS:
5339 if (qos == KQWL_BUCKET_STAYACTIVE) {
5340 /*
5341 * the KQWL_BUCKET_STAYACTIVE is not a QoS bucket, we only remember
5342 * a high watermark (kqwl_stayactive_qos) of any stay active knote
5343 * that was ever registered with this workloop.
5344 *
5345 * When waitq_set__CALLING_PREPOST_HOOK__() wakes up any stay active
5346 * knote, we use this high-watermark as a wakeup-index, and also set
5347 * the magic KQWL_BUCKET_STAYACTIVE bit to make sure we remember
5348 * there is at least one stay active knote fired until the next full
5349 * processing of this bucket.
5350 */
5351 kqwl->kqwl_wakeup_indexes |= KQWL_STAYACTIVE_FIRED_BIT;
5352 qos = kqwl->kqwl_stayactive_qos;
5353 assert(qos);
5354 }
5355 if (kqwl->kqwl_wakeup_indexes & (1 << qos)) {
5356 assert(kqr->tr_kq_wakeup);
5357 break;
5358 }
5359
5360 kqwl->kqwl_wakeup_indexes |= (1 << qos);
5361 kqr->tr_kq_wakeup = true;
5362 kqworkloop_request_fire_r2k_notification(kqwl);
5363 goto recompute;
5364
5365 case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
5366 assert(qos);
5367 if (kqwl->kqwl_stayactive_qos < qos) {
5368 kqwl->kqwl_stayactive_qos = qos;
5369 if (kqwl->kqwl_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT) {
5370 assert(kqr->tr_kq_wakeup);
5371 kqwl->kqwl_wakeup_indexes |= (1 << qos);
5372 goto recompute;
5373 }
5374 }
5375 break;
5376
5377 case KQWL_UTQ_PARKING:
5378 case KQWL_UTQ_UNBINDING:
5379 kqr->tr_kq_override_index = qos;
5380 OS_FALLTHROUGH;
5381 case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
5382 if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
5383 assert(qos == THREAD_QOS_UNSPECIFIED);
5384 }
5385 i = KQWL_BUCKET_STAYACTIVE;
5386 if (TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5387 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5388 }
5389 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i]) &&
5390 (kqwl->kqwl_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) {
5391 /*
5392 * If the KQWL_STAYACTIVE_FIRED_BIT is set, it means a stay active
5393 * knote may have fired, so we need to merge in kqwl_stayactive_qos.
5394 *
5395 * Unlike other buckets, this one is never empty but could be idle.
5396 */
5397 kqwl->kqwl_wakeup_indexes &= KQWL_STAYACTIVE_FIRED_BIT;
5398 kqwl->kqwl_wakeup_indexes |= (1 << kqwl->kqwl_stayactive_qos);
5399 } else {
5400 kqwl->kqwl_wakeup_indexes = 0;
5401 }
5402 for (i = THREAD_QOS_UNSPECIFIED + 1; i < KQWL_BUCKET_STAYACTIVE; i++) {
5403 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i])) {
5404 kqwl->kqwl_wakeup_indexes |= (1 << i);
5405 }
5406 }
5407 if (kqwl->kqwl_wakeup_indexes) {
5408 kqr->tr_kq_wakeup = true;
5409 kqworkloop_request_fire_r2k_notification(kqwl);
5410 } else {
5411 kqr->tr_kq_wakeup = false;
5412 }
5413 goto recompute;
5414
5415 case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
5416 kqr->tr_kq_override_index = qos;
5417 goto recompute;
5418
5419 case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
5420 recompute:
5421 /*
5422 * When modifying the wakeup QoS or the override QoS, we always need to
5423 * maintain our invariant that kqr_override_index is at least as large
5424 * as the highest QoS for which an event is fired.
5425 *
5426 * However this override index can be larger when there is an overriden
5427 * suppressed knote pushing on the kqueue.
5428 */
5429 if (kqwl->kqwl_wakeup_indexes > (1 << qos)) {
5430 qos = (uint8_t)(fls(kqwl->kqwl_wakeup_indexes) - 1); /* fls is 1-based */
5431 }
5432 if (kqr->tr_kq_override_index < qos) {
5433 kqr->tr_kq_override_index = qos;
5434 }
5435 break;
5436
5437 case KQWL_UTQ_REDRIVE_EVENTS:
5438 break;
5439
5440 case KQWL_UTQ_SET_QOS_INDEX:
5441 kqr->tr_kq_qos_index = qos;
5442 break;
5443
5444 default:
5445 panic("unknown kqwl thread qos update operation: %d", op);
5446 }
5447
5448 thread_t kqwl_owner = kqwl->kqwl_owner;
5449 thread_t servicer = kqr_thread(kqr);
5450 boolean_t qos_changed = FALSE;
5451 kq_index_t new_override = kqworkloop_override(kqwl);
5452
5453 /*
5454 * Apply the diffs to the owner if applicable
5455 */
5456 if (kqwl_owner) {
5457 #if 0
5458 /* JMM - need new trace hooks for owner overrides */
5459 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
5460 kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->tr_kq_qos_index,
5461 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
5462 #endif
5463 if (new_override == old_override) {
5464 // nothing to do
5465 } else if (old_override == THREAD_QOS_UNSPECIFIED) {
5466 thread_add_kevent_override(kqwl_owner, new_override);
5467 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5468 thread_drop_kevent_override(kqwl_owner);
5469 } else { /* old_override != new_override */
5470 thread_update_kevent_override(kqwl_owner, new_override);
5471 }
5472 }
5473
5474 /*
5475 * apply the diffs to the servicer
5476 */
5477 if (!kqr_thread_requested(kqr)) {
5478 /*
5479 * No servicer, nor thread-request
5480 *
5481 * Make a new thread request, unless there is an owner (or the workloop
5482 * is suspended in userland) or if there is no asynchronous work in the
5483 * first place.
5484 */
5485
5486 if (kqwl_owner == NULL && kqr->tr_kq_wakeup) {
5487 int initiate_flags = 0;
5488 if (op == KQWL_UTQ_UNBINDING) {
5489 initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
5490 }
5491 kqueue_threadreq_initiate(kq, kqr, new_override, initiate_flags);
5492 }
5493 } else if (servicer) {
5494 /*
5495 * Servicer in flight
5496 *
5497 * Just apply the diff to the servicer
5498 */
5499 struct uthread *ut = get_bsdthread_info(servicer);
5500 if (ut->uu_kqueue_override != new_override) {
5501 if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
5502 thread_add_servicer_override(servicer, new_override);
5503 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5504 thread_drop_servicer_override(servicer);
5505 } else { /* ut->uu_kqueue_override != new_override */
5506 thread_update_servicer_override(servicer, new_override);
5507 }
5508 ut->uu_kqueue_override = new_override;
5509 qos_changed = TRUE;
5510 }
5511 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5512 /*
5513 * No events to deliver anymore.
5514 *
5515 * However canceling with turnstiles is challenging, so the fact that
5516 * the request isn't useful will be discovered by the servicer himself
5517 * later on.
5518 */
5519 } else if (old_override != new_override) {
5520 /*
5521 * Request is in flight
5522 *
5523 * Apply the diff to the thread request
5524 */
5525 kqueue_threadreq_modify(kq, kqr, new_override, WORKQ_THREADREQ_NONE);
5526 qos_changed = TRUE;
5527 }
5528
5529 if (qos_changed) {
5530 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
5531 thread_tid(servicer), kqr->tr_kq_qos_index,
5532 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
5533 }
5534 }
5535
5536 static void
5537 kqworkloop_wakeup(struct kqworkloop *kqwl, kq_index_t qos)
5538 {
5539 if ((kqwl->kqwl_state & KQ_PROCESSING) &&
5540 kqr_thread(&kqwl->kqwl_request) == current_thread()) {
5541 /*
5542 * kqworkloop_end_processing() will perform the required QoS
5543 * computations when it unsets the processing mode.
5544 */
5545 return;
5546 }
5547
5548 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos);
5549 }
5550
5551 static struct kqtailq *
5552 kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
5553 {
5554 if (kq.kq->kq_state & KQ_WORKLOOP) {
5555 return &kq.kqwl->kqwl_suppressed;
5556 } else if (kq.kq->kq_state & KQ_WORKQ) {
5557 return &kq.kqwq->kqwq_suppressed[kn->kn_qos_index];
5558 } else {
5559 return &kq.kqf->kqf_suppressed;
5560 }
5561 }
5562
5563 struct turnstile *
5564 kqueue_alloc_turnstile(kqueue_t kqu)
5565 {
5566 struct kqworkloop *kqwl = kqu.kqwl;
5567 kq_state_t kq_state;
5568
5569 kq_state = os_atomic_load(&kqu.kq->kq_state, dependency);
5570 if (kq_state & KQ_HAS_TURNSTILE) {
5571 /* force a dependency to pair with the atomic or with release below */
5572 return os_atomic_load_with_dependency_on(&kqwl->kqwl_turnstile,
5573 (uintptr_t)kq_state);
5574 }
5575
5576 if (!(kq_state & KQ_WORKLOOP)) {
5577 return TURNSTILE_NULL;
5578 }
5579
5580 struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
5581 bool workq_locked = false;
5582
5583 kqlock(kqu);
5584
5585 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5586 workq_locked = true;
5587 workq_kern_threadreq_lock(kqwl->kqwl_p);
5588 }
5589
5590 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
5591 free_ts = ts;
5592 ts = kqwl->kqwl_turnstile;
5593 } else {
5594 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
5595 ts, TURNSTILE_WORKLOOPS);
5596
5597 /* release-barrier to pair with the unlocked load of kqwl_turnstile above */
5598 os_atomic_or(&kqwl->kqwl_state, KQ_HAS_TURNSTILE, release);
5599
5600 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5601 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
5602 &kqwl->kqwl_request, kqwl->kqwl_owner,
5603 ts, TURNSTILE_IMMEDIATE_UPDATE);
5604 /*
5605 * The workq may no longer be the interlock after this.
5606 * In which case the inheritor wasn't updated.
5607 */
5608 }
5609 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
5610 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5611 }
5612 }
5613
5614 if (workq_locked) {
5615 workq_kern_threadreq_unlock(kqwl->kqwl_p);
5616 }
5617
5618 kqunlock(kqu);
5619
5620 if (free_ts) {
5621 turnstile_deallocate(free_ts);
5622 } else {
5623 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
5624 }
5625 return ts;
5626 }
5627
5628 __attribute__((always_inline))
5629 struct turnstile *
5630 kqueue_turnstile(kqueue_t kqu)
5631 {
5632 kq_state_t kq_state = os_atomic_load(&kqu.kq->kq_state, relaxed);
5633 if (kq_state & KQ_WORKLOOP) {
5634 return os_atomic_load(&kqu.kqwl->kqwl_turnstile, relaxed);
5635 }
5636 return TURNSTILE_NULL;
5637 }
5638
5639 __attribute__((always_inline))
5640 struct turnstile *
5641 kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)
5642 {
5643 struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
5644 if (kqwl) {
5645 return os_atomic_load(&kqwl->kqwl_turnstile, relaxed);
5646 }
5647 return TURNSTILE_NULL;
5648 }
5649
5650 static void
5651 kqworkloop_set_overcommit(struct kqworkloop *kqwl)
5652 {
5653 workq_threadreq_t kqr = &kqwl->kqwl_request;
5654
5655 /*
5656 * This test is racy, but since we never remove this bit,
5657 * it allows us to avoid taking a lock.
5658 */
5659 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
5660 return;
5661 }
5662
5663 kqlock_held(kqwl);
5664
5665 if (kqr_thread_requested_pending(kqr)) {
5666 kqueue_threadreq_modify(kqwl, kqr, kqr->tr_qos,
5667 WORKQ_THREADREQ_MAKE_OVERCOMMIT);
5668 } else {
5669 kqr->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
5670 }
5671 }
5672
5673 static void
5674 kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
5675 kq_index_t override_index)
5676 {
5677 workq_threadreq_t kqr;
5678 kq_index_t old_override_index;
5679 kq_index_t queue_index = kn->kn_qos_index;
5680
5681 if (override_index <= queue_index) {
5682 return;
5683 }
5684
5685 kqr = kqworkq_get_request(kqwq, queue_index);
5686
5687 kqlock_held(kqwq);
5688
5689 old_override_index = kqr->tr_kq_override_index;
5690 if (override_index > MAX(kqr->tr_kq_qos_index, old_override_index)) {
5691 thread_t servicer = kqr_thread(kqr);
5692 kqr->tr_kq_override_index = override_index;
5693
5694 /* apply the override to [incoming?] servicing thread */
5695 if (servicer) {
5696 if (old_override_index) {
5697 thread_update_kevent_override(servicer, override_index);
5698 } else {
5699 thread_add_kevent_override(servicer, override_index);
5700 }
5701 }
5702 }
5703 }
5704
5705 static void
5706 kqueue_update_override(kqueue_t kqu, struct knote *kn, thread_qos_t qos)
5707 {
5708 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5709 kqworkloop_update_threads_qos(kqu.kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
5710 qos);
5711 } else {
5712 kqworkq_update_override(kqu.kqwq, kn, qos);
5713 }
5714 }
5715
5716 static void
5717 kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread,
5718 enum kqwl_unbind_locked_mode how)
5719 {
5720 struct uthread *ut = get_bsdthread_info(thread);
5721 workq_threadreq_t kqr = &kqwl->kqwl_request;
5722
5723 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
5724 thread_tid(thread), 0, 0);
5725
5726 kqlock_held(kqwl);
5727
5728 assert(ut->uu_kqr_bound == kqr);
5729 ut->uu_kqr_bound = NULL;
5730 if (how == KQWL_OVERRIDE_DROP_IMMEDIATELY &&
5731 ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5732 thread_drop_servicer_override(thread);
5733 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5734 }
5735
5736 if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
5737 turnstile_update_inheritor(kqwl->kqwl_turnstile,
5738 TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
5739 turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
5740 TURNSTILE_INTERLOCK_HELD);
5741 }
5742
5743 kqr->tr_thread = THREAD_NULL;
5744 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5745 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5746 }
5747
5748 static void
5749 kqworkloop_unbind_delayed_override_drop(thread_t thread)
5750 {
5751 struct uthread *ut = get_bsdthread_info(thread);
5752 assert(ut->uu_kqr_bound == NULL);
5753 if (ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5754 thread_drop_servicer_override(thread);
5755 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5756 }
5757 }
5758
5759 /*
5760 * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
5761 *
5762 * It will acknowledge events, and possibly request a new thread if:
5763 * - there were active events left
5764 * - we pended waitq hook callouts during processing
5765 * - we pended wakeups while processing (or unsuppressing)
5766 *
5767 * Called with kqueue lock held.
5768 */
5769 static void
5770 kqworkloop_unbind(struct kqworkloop *kqwl)
5771 {
5772 struct kqueue *kq = &kqwl->kqwl_kqueue;
5773 workq_threadreq_t kqr = &kqwl->kqwl_request;
5774 thread_t thread = kqr_thread_fast(kqr);
5775 int op = KQWL_UTQ_PARKING;
5776 kq_index_t qos_override = THREAD_QOS_UNSPECIFIED;
5777
5778 assert(thread == current_thread());
5779
5780 kqlock(kqwl);
5781
5782 /*
5783 * Forcing the KQ_PROCESSING flag allows for QoS updates because of
5784 * unsuppressing knotes not to be applied until the eventual call to
5785 * kqworkloop_update_threads_qos() below.
5786 */
5787 assert((kq->kq_state & KQ_PROCESSING) == 0);
5788 if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5789 kq->kq_state |= KQ_PROCESSING;
5790 qos_override = kqworkloop_acknowledge_events(kqwl);
5791 kq->kq_state &= ~KQ_PROCESSING;
5792 }
5793
5794 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
5795 kqworkloop_update_threads_qos(kqwl, op, qos_override);
5796
5797 kqunlock(kqwl);
5798
5799 /*
5800 * Drop the override on the current thread last, after the call to
5801 * kqworkloop_update_threads_qos above.
5802 */
5803 kqworkloop_unbind_delayed_override_drop(thread);
5804
5805 /* If last reference, dealloc the workloop kq */
5806 kqworkloop_release(kqwl);
5807 }
5808
5809 static thread_qos_t
5810 kqworkq_unbind_locked(struct kqworkq *kqwq,
5811 workq_threadreq_t kqr, thread_t thread)
5812 {
5813 struct uthread *ut = get_bsdthread_info(thread);
5814 kq_index_t old_override = kqr->tr_kq_override_index;
5815
5816 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
5817 thread_tid(kqr_thread(kqr)), kqr->tr_kq_qos_index, 0);
5818
5819 kqlock_held(kqwq);
5820
5821 assert(ut->uu_kqr_bound == kqr);
5822 ut->uu_kqr_bound = NULL;
5823 kqr->tr_thread = THREAD_NULL;
5824 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5825 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5826 kqwq->kqwq_state &= ~KQ_R2K_ARMED;
5827
5828 return old_override;
5829 }
5830
5831 /*
5832 * kqworkq_unbind - unbind of a workq kqueue from a thread
5833 *
5834 * We may have to request new threads.
5835 * This can happen there are no waiting processing threads and:
5836 * - there were active events we never got to (count > 0)
5837 * - we pended waitq hook callouts during processing
5838 * - we pended wakeups while processing (or unsuppressing)
5839 */
5840 static void
5841 kqworkq_unbind(proc_t p, workq_threadreq_t kqr)
5842 {
5843 struct kqworkq *kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue;
5844 __assert_only int rc;
5845
5846 kqlock(kqwq);
5847 rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
5848 assert(rc == -1);
5849 kqunlock(kqwq);
5850 }
5851
5852 workq_threadreq_t
5853 kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
5854 {
5855 assert(qos_index < KQWQ_NBUCKETS);
5856 return &kqwq->kqwq_request[qos_index];
5857 }
5858
5859 static void
5860 knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp)
5861 {
5862 kq_index_t qos = _pthread_priority_thread_qos(pp);
5863
5864 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5865 assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
5866 pp = _pthread_priority_normalize(pp);
5867 } else if (kqu.kq->kq_state & KQ_WORKQ) {
5868 if (qos == THREAD_QOS_UNSPECIFIED) {
5869 /* On workqueues, outside of QoS means MANAGER */
5870 qos = KQWQ_QOS_MANAGER;
5871 pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
5872 } else {
5873 pp = _pthread_priority_normalize(pp);
5874 }
5875 } else {
5876 pp = _pthread_unspecified_priority();
5877 qos = THREAD_QOS_UNSPECIFIED;
5878 }
5879
5880 kn->kn_qos = (int32_t)pp;
5881
5882 if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
5883 /* Never lower QoS when in "Merge" mode */
5884 kn->kn_qos_override = qos;
5885 }
5886
5887 /* only adjust in-use qos index when not suppressed */
5888 if (kn->kn_status & KN_SUPPRESSED) {
5889 kqueue_update_override(kqu, kn, qos);
5890 } else if (kn->kn_qos_index != qos) {
5891 knote_dequeue(kqu, kn);
5892 kn->kn_qos_index = qos;
5893 }
5894 }
5895
5896 static void
5897 knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
5898 {
5899 thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
5900
5901 kqlock_held(kq);
5902
5903 assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
5904 assert(qos_index < THREAD_QOS_LAST);
5905
5906 /*
5907 * Early exit for knotes that should not change QoS
5908 */
5909 if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
5910 panic("filter %d cannot change QoS", kn->kn_filtid);
5911 } else if (__improbable(!knote_has_qos(kn))) {
5912 return;
5913 }
5914
5915 /*
5916 * knotes with the FALLBACK flag will only use their registration QoS if the
5917 * incoming event has no QoS, else, the registration QoS acts as a floor.
5918 */
5919 thread_qos_t req_qos = _pthread_priority_thread_qos_fast(kn->kn_qos);
5920 if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
5921 if (qos_index == THREAD_QOS_UNSPECIFIED) {
5922 qos_index = req_qos;
5923 }
5924 } else {
5925 if (qos_index < req_qos) {
5926 qos_index = req_qos;
5927 }
5928 }
5929 if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
5930 /* Never lower QoS when in "Merge" mode */
5931 return;
5932 }
5933
5934 if ((kn->kn_status & KN_LOCKED) && (kn->kn_status & KN_POSTING)) {
5935 /*
5936 * When we're trying to update the QoS override and that both an
5937 * f_event() and other f_* calls are running concurrently, any of these
5938 * in flight calls may want to perform overrides that aren't properly
5939 * serialized with each other.
5940 *
5941 * The first update that observes this racy situation enters a "Merge"
5942 * mode which causes subsequent override requests to saturate the
5943 * override instead of replacing its value.
5944 *
5945 * This mode is left when knote_unlock() or knote_post()
5946 * observe that no other f_* routine is in flight.
5947 */
5948 kn->kn_status |= KN_MERGE_QOS;
5949 }
5950
5951 /*
5952 * Now apply the override if it changed.
5953 */
5954
5955 if (kn->kn_qos_override == qos_index) {
5956 return;
5957 }
5958
5959 kn->kn_qos_override = qos_index;
5960
5961 if (kn->kn_status & KN_SUPPRESSED) {
5962 /*
5963 * For suppressed events, the kn_qos_index field cannot be touched as it
5964 * allows us to know on which supress queue the knote is for a kqworkq.
5965 *
5966 * Also, there's no natural push applied on the kqueues when this field
5967 * changes anyway. We hence need to apply manual overrides in this case,
5968 * which will be cleared when the events are later acknowledged.
5969 */
5970 kqueue_update_override(kq, kn, qos_index);
5971 } else if (kn->kn_qos_index != qos_index) {
5972 knote_dequeue(kq, kn);
5973 kn->kn_qos_index = qos_index;
5974 }
5975 }
5976
5977 /*
5978 * Called back from waitq code when no threads waiting and the hook was set.
5979 *
5980 * Preemption is disabled - minimal work can be done in this context!!!
5981 */
5982 void
5983 waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *kq_hook)
5984 {
5985 kqueue_t kqu;
5986
5987 kqu.kq = __container_of(kq_hook, struct kqueue, kq_waitq_hook);
5988 assert(kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
5989
5990 kqlock(kqu);
5991
5992 if (kqu.kq->kq_count > 0) {
5993 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5994 kqworkloop_wakeup(kqu.kqwl, KQWL_BUCKET_STAYACTIVE);
5995 } else {
5996 kqworkq_wakeup(kqu.kqwq, KQWQ_QOS_MANAGER);
5997 }
5998 }
5999
6000 kqunlock(kqu);
6001 }
6002
6003 void
6004 klist_init(struct klist *list)
6005 {
6006 SLIST_INIT(list);
6007 }
6008
6009
6010 /*
6011 * Query/Post each knote in the object's list
6012 *
6013 * The object lock protects the list. It is assumed
6014 * that the filter/event routine for the object can
6015 * determine that the object is already locked (via
6016 * the hint) and not deadlock itself.
6017 *
6018 * The object lock should also hold off pending
6019 * detach/drop operations.
6020 */
6021 void
6022 knote(struct klist *list, long hint)
6023 {
6024 struct knote *kn;
6025
6026 SLIST_FOREACH(kn, list, kn_selnext) {
6027 knote_post(kn, hint);
6028 }
6029 }
6030
6031 /*
6032 * attach a knote to the specified list. Return true if this is the first entry.
6033 * The list is protected by whatever lock the object it is associated with uses.
6034 */
6035 int
6036 knote_attach(struct klist *list, struct knote *kn)
6037 {
6038 int ret = SLIST_EMPTY(list);
6039 SLIST_INSERT_HEAD(list, kn, kn_selnext);
6040 return ret;
6041 }
6042
6043 /*
6044 * detach a knote from the specified list. Return true if that was the last entry.
6045 * The list is protected by whatever lock the object it is associated with uses.
6046 */
6047 int
6048 knote_detach(struct klist *list, struct knote *kn)
6049 {
6050 SLIST_REMOVE(list, kn, knote, kn_selnext);
6051 return SLIST_EMPTY(list);
6052 }
6053
6054 /*
6055 * knote_vanish - Indicate that the source has vanished
6056 *
6057 * If the knote has requested EV_VANISHED delivery,
6058 * arrange for that. Otherwise, deliver a NOTE_REVOKE
6059 * event for backward compatibility.
6060 *
6061 * The knote is marked as having vanished, but is not
6062 * actually detached from the source in this instance.
6063 * The actual detach is deferred until the knote drop.
6064 *
6065 * Our caller already has the object lock held. Calling
6066 * the detach routine would try to take that lock
6067 * recursively - which likely is not supported.
6068 */
6069 void
6070 knote_vanish(struct klist *list, bool make_active)
6071 {
6072 struct knote *kn;
6073 struct knote *kn_next;
6074
6075 SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
6076 struct kqueue *kq = knote_get_kq(kn);
6077
6078 kqlock(kq);
6079 if (__probable(kn->kn_status & KN_REQVANISH)) {
6080 /*
6081 * If EV_VANISH supported - prepare to deliver one
6082 */
6083 kn->kn_status |= KN_VANISHED;
6084 } else {
6085 /*
6086 * Handle the legacy way to indicate that the port/portset was
6087 * deallocated or left the current Mach portspace (modern technique
6088 * is with an EV_VANISHED protocol).
6089 *
6090 * Deliver an EV_EOF event for these changes (hopefully it will get
6091 * delivered before the port name recycles to the same generation
6092 * count and someone tries to re-register a kevent for it or the
6093 * events are udata-specific - avoiding a conflict).
6094 */
6095 kn->kn_flags |= EV_EOF | EV_ONESHOT;
6096 }
6097 if (make_active) {
6098 knote_activate(kq, kn, FILTER_ACTIVE);
6099 }
6100 kqunlock(kq);
6101 }
6102 }
6103
6104 /*
6105 * Force a lazy allocation of the waitqset link
6106 * of the kq_wqs associated with the kn
6107 * if it wasn't already allocated.
6108 *
6109 * This allows knote_link_waitq to never block
6110 * if reserved_link is not NULL.
6111 */
6112 void
6113 knote_link_waitqset_lazy_alloc(struct knote *kn)
6114 {
6115 struct kqueue *kq = knote_get_kq(kn);
6116 waitq_set_lazy_init_link(&kq->kq_wqs);
6117 }
6118
6119 /*
6120 * Check if a lazy allocation for the waitqset link
6121 * of the kq_wqs is needed.
6122 */
6123 boolean_t
6124 knote_link_waitqset_should_lazy_alloc(struct knote *kn)
6125 {
6126 struct kqueue *kq = knote_get_kq(kn);
6127 return waitq_set_should_lazy_init_link(&kq->kq_wqs);
6128 }
6129
6130 /*
6131 * For a given knote, link a provided wait queue directly with the kqueue.
6132 * Wakeups will happen via recursive wait queue support. But nothing will move
6133 * the knote to the active list at wakeup (nothing calls knote()). Instead,
6134 * we permanently enqueue them here.
6135 *
6136 * kqueue and knote references are held by caller.
6137 * waitq locked by caller.
6138 *
6139 * caller provides the wait queue link structure and insures that the kq->kq_wqs
6140 * is linked by previously calling knote_link_waitqset_lazy_alloc.
6141 */
6142 int
6143 knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
6144 {
6145 struct kqueue *kq = knote_get_kq(kn);
6146 kern_return_t kr;
6147
6148 kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
6149 if (kr == KERN_SUCCESS) {
6150 knote_markstayactive(kn);
6151 return 0;
6152 } else {
6153 return EINVAL;
6154 }
6155 }
6156
6157 /*
6158 * Unlink the provided wait queue from the kqueue associated with a knote.
6159 * Also remove it from the magic list of directly attached knotes.
6160 *
6161 * Note that the unlink may have already happened from the other side, so
6162 * ignore any failures to unlink and just remove it from the kqueue list.
6163 *
6164 * On success, caller is responsible for the link structure
6165 */
6166 int
6167 knote_unlink_waitq(struct knote *kn, struct waitq *wq)
6168 {
6169 struct kqueue *kq = knote_get_kq(kn);
6170 kern_return_t kr;
6171
6172 kr = waitq_unlink(wq, &kq->kq_wqs);
6173 knote_clearstayactive(kn);
6174 return (kr != KERN_SUCCESS) ? EINVAL : 0;
6175 }
6176
6177 /*
6178 * remove all knotes referencing a specified fd
6179 *
6180 * Entered with the proc_fd lock already held.
6181 * It returns the same way, but may drop it temporarily.
6182 */
6183 void
6184 knote_fdclose(struct proc *p, int fd)
6185 {
6186 struct klist *list;
6187 struct knote *kn;
6188 KNOTE_LOCK_CTX(knlc);
6189
6190 restart:
6191 list = &p->p_fd->fd_knlist[fd];
6192 SLIST_FOREACH(kn, list, kn_link) {
6193 struct kqueue *kq = knote_get_kq(kn);
6194
6195 kqlock(kq);
6196
6197 if (kq->kq_p != p) {
6198 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
6199 __func__, kq->kq_p, p);
6200 }
6201
6202 /*
6203 * If the knote supports EV_VANISHED delivery,
6204 * transition it to vanished mode (or skip over
6205 * it if already vanished).
6206 */
6207 if (kn->kn_status & KN_VANISHED) {
6208 kqunlock(kq);
6209 continue;
6210 }
6211
6212 proc_fdunlock(p);
6213 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
6214 /* the knote was dropped by someone, nothing to do */
6215 } else if (kn->kn_status & KN_REQVANISH) {
6216 kn->kn_status |= KN_VANISHED;
6217
6218 kqunlock(kq);
6219 knote_fops(kn)->f_detach(kn);
6220 if (kn->kn_is_fd) {
6221 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6222 }
6223 kn->kn_filtid = EVFILTID_DETACHED;
6224 kqlock(kq);
6225
6226 knote_activate(kq, kn, FILTER_ACTIVE);
6227 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
6228 } else {
6229 knote_drop(kq, kn, &knlc);
6230 }
6231
6232 proc_fdlock(p);
6233 goto restart;
6234 }
6235 }
6236
6237 /*
6238 * knote_fdfind - lookup a knote in the fd table for process
6239 *
6240 * If the filter is file-based, lookup based on fd index.
6241 * Otherwise use a hash based on the ident.
6242 *
6243 * Matching is based on kq, filter, and ident. Optionally,
6244 * it may also be based on the udata field in the kevent -
6245 * allowing multiple event registration for the file object
6246 * per kqueue.
6247 *
6248 * fd_knhashlock or fdlock held on entry (and exit)
6249 */
6250 static struct knote *
6251 knote_fdfind(struct kqueue *kq,
6252 const struct kevent_internal_s *kev,
6253 bool is_fd,
6254 struct proc *p)
6255 {
6256 struct filedesc *fdp = p->p_fd;
6257 struct klist *list = NULL;
6258 struct knote *kn = NULL;
6259
6260 /*
6261 * determine where to look for the knote
6262 */
6263 if (is_fd) {
6264 /* fd-based knotes are linked off the fd table */
6265 if (kev->kei_ident < (u_int)fdp->fd_knlistsize) {
6266 list = &fdp->fd_knlist[kev->kei_ident];
6267 }
6268 } else if (fdp->fd_knhashmask != 0) {
6269 /* hash non-fd knotes here too */
6270 list = &fdp->fd_knhash[KN_HASH((u_long)kev->kei_ident, fdp->fd_knhashmask)];
6271 }
6272
6273 /*
6274 * scan the selected list looking for a match
6275 */
6276 if (list != NULL) {
6277 SLIST_FOREACH(kn, list, kn_link) {
6278 if (kq == knote_get_kq(kn) &&
6279 kev->kei_ident == kn->kn_id &&
6280 kev->kei_filter == kn->kn_filter) {
6281 if (kev->kei_flags & EV_UDATA_SPECIFIC) {
6282 if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
6283 kev->kei_udata == kn->kn_udata) {
6284 break; /* matching udata-specific knote */
6285 }
6286 } else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
6287 break; /* matching non-udata-specific knote */
6288 }
6289 }
6290 }
6291 }
6292 return kn;
6293 }
6294
6295 /*
6296 * kq_add_knote- Add knote to the fd table for process
6297 * while checking for duplicates.
6298 *
6299 * All file-based filters associate a list of knotes by file
6300 * descriptor index. All other filters hash the knote by ident.
6301 *
6302 * May have to grow the table of knote lists to cover the
6303 * file descriptor index presented.
6304 *
6305 * fd_knhashlock and fdlock unheld on entry (and exit).
6306 *
6307 * Takes a rwlock boost if inserting the knote is successful.
6308 */
6309 static int
6310 kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
6311 struct proc *p)
6312 {
6313 struct filedesc *fdp = p->p_fd;
6314 struct klist *list = NULL;
6315 int ret = 0;
6316 bool is_fd = kn->kn_is_fd;
6317 uint64_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE);
6318
6319 if (is_fd) {
6320 proc_fdlock(p);
6321 } else {
6322 knhash_lock(fdp);
6323 }
6324
6325 if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
6326 /* found an existing knote: we can't add this one */
6327 ret = ERESTART;
6328 goto out_locked;
6329 }
6330
6331 /* knote was not found: add it now */
6332 if (!is_fd) {
6333 if (fdp->fd_knhashmask == 0) {
6334 u_long size = 0;
6335
6336 list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
6337 if (list == NULL) {
6338 ret = ENOMEM;
6339 goto out_locked;
6340 }
6341
6342 fdp->fd_knhash = list;
6343 fdp->fd_knhashmask = size;
6344 }
6345
6346 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6347 SLIST_INSERT_HEAD(list, kn, kn_link);
6348 ret = 0;
6349 goto out_locked;
6350 } else {
6351 /* knote is fd based */
6352
6353 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
6354 u_int size = 0;
6355
6356 /* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */
6357 if (kn->kn_id >= (uint64_t) nofile
6358 || kn->kn_id >= (uint64_t)maxfilesperproc) {
6359 ret = EINVAL;
6360 goto out_locked;
6361 }
6362 /* have to grow the fd_knlist */
6363 size = fdp->fd_knlistsize;
6364 while (size <= kn->kn_id) {
6365 size += KQEXTENT;
6366 }
6367
6368 if (size >= (UINT_MAX / sizeof(struct klist *))) {
6369 ret = EINVAL;
6370 goto out_locked;
6371 }
6372
6373 list = kheap_alloc(KM_KQUEUE, size * sizeof(struct klist *),
6374 Z_WAITOK);
6375 if (list == NULL) {
6376 ret = ENOMEM;
6377 goto out_locked;
6378 }
6379
6380 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
6381 fdp->fd_knlistsize * sizeof(struct klist *));
6382 bzero((caddr_t)list +
6383 fdp->fd_knlistsize * sizeof(struct klist *),
6384 (size - fdp->fd_knlistsize) * sizeof(struct klist *));
6385 kheap_free(KM_KQUEUE, fdp->fd_knlist,
6386 fdp->fd_knlistsize * sizeof(struct klist *));
6387 fdp->fd_knlist = list;
6388 fdp->fd_knlistsize = size;
6389 }
6390
6391 list = &fdp->fd_knlist[kn->kn_id];
6392 SLIST_INSERT_HEAD(list, kn, kn_link);
6393 ret = 0;
6394 goto out_locked;
6395 }
6396
6397 out_locked:
6398 if (ret == 0) {
6399 kqlock(kq);
6400 assert((kn->kn_status & KN_LOCKED) == 0);
6401 (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
6402 kqueue_retain(kq); /* retain a kq ref */
6403 }
6404 if (is_fd) {
6405 proc_fdunlock(p);
6406 } else {
6407 knhash_unlock(fdp);
6408 }
6409
6410 return ret;
6411 }
6412
6413 /*
6414 * kq_remove_knote - remove a knote from the fd table for process
6415 *
6416 * If the filter is file-based, remove based on fd index.
6417 * Otherwise remove from the hash based on the ident.
6418 *
6419 * fd_knhashlock and fdlock unheld on entry (and exit).
6420 */
6421 static void
6422 kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
6423 struct knote_lock_ctx *knlc)
6424 {
6425 struct filedesc *fdp = p->p_fd;
6426 struct klist *list = NULL;
6427 uint16_t kq_state;
6428 bool is_fd = kn->kn_is_fd;
6429
6430 if (is_fd) {
6431 proc_fdlock(p);
6432 } else {
6433 knhash_lock(fdp);
6434 }
6435
6436 if (is_fd) {
6437 assert((u_int)fdp->fd_knlistsize > kn->kn_id);
6438 list = &fdp->fd_knlist[kn->kn_id];
6439 } else {
6440 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6441 }
6442 SLIST_REMOVE(list, kn, knote, kn_link);
6443
6444 kqlock(kq);
6445 kq_state = kq->kq_state;
6446 if (knlc) {
6447 knote_unlock_cancel(kq, kn, knlc);
6448 } else {
6449 kqunlock(kq);
6450 }
6451 if (is_fd) {
6452 proc_fdunlock(p);
6453 } else {
6454 knhash_unlock(fdp);
6455 }
6456
6457 if (kq_state & KQ_DYNAMIC) {
6458 kqworkloop_release((struct kqworkloop *)kq);
6459 }
6460 }
6461
6462 /*
6463 * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
6464 * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
6465 *
6466 * fd_knhashlock or fdlock unheld on entry (and exit)
6467 */
6468
6469 static struct knote *
6470 kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_qos_s *kev,
6471 bool is_fd, struct proc *p)
6472 {
6473 struct filedesc *fdp = p->p_fd;
6474 struct knote *kn;
6475
6476 if (is_fd) {
6477 proc_fdlock(p);
6478 } else {
6479 knhash_lock(fdp);
6480 }
6481
6482 /*
6483 * Temporary horrible hack:
6484 * this cast is gross and will go away in a future change.
6485 * It is OK to do because we don't look at xflags/s_fflags,
6486 * and that when we cast down the kev this way,
6487 * the truncated filter field works.
6488 */
6489 kn = knote_fdfind(kq, (struct kevent_internal_s *)kev, is_fd, p);
6490
6491 if (kn) {
6492 kqlock(kq);
6493 assert(knote_get_kq(kn) == kq);
6494 }
6495
6496 if (is_fd) {
6497 proc_fdunlock(p);
6498 } else {
6499 knhash_unlock(fdp);
6500 }
6501
6502 return kn;
6503 }
6504
6505 __attribute__((noinline))
6506 static void
6507 kqfile_wakeup(struct kqfile *kqf, __unused kq_index_t qos)
6508 {
6509 /* flag wakeups during processing */
6510 if (kqf->kqf_state & KQ_PROCESSING) {
6511 kqf->kqf_state |= KQ_WAKEUP;
6512 }
6513
6514 /* wakeup a thread waiting on this queue */
6515 if (kqf->kqf_state & (KQ_SLEEP | KQ_SEL)) {
6516 kqf->kqf_state &= ~(KQ_SLEEP | KQ_SEL);
6517 waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs, KQ_EVENT,
6518 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
6519 }
6520
6521 /* wakeup other kqueues/select sets we're inside */
6522 KNOTE(&kqf->kqf_sel.si_note, 0);
6523 }
6524
6525 static struct kqtailq *
6526 knote_get_tailq(kqueue_t kqu, struct knote *kn)
6527 {
6528 kq_index_t qos_index = kn->kn_qos_index;
6529
6530 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6531 assert(qos_index < KQWL_NBUCKETS);
6532 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6533 assert(qos_index < KQWQ_NBUCKETS);
6534 } else {
6535 assert(qos_index == QOS_INDEX_KQFILE);
6536 }
6537 static_assert(offsetof(struct kqueue, kq_queue) == sizeof(struct kqueue),
6538 "struct kqueue::kq_queue must be exactly at the end");
6539 return &kqu.kq->kq_queue[qos_index];
6540 }
6541
6542 static void
6543 knote_enqueue(kqueue_t kqu, struct knote *kn, kn_status_t wakeup_mask)
6544 {
6545 kqlock_held(kqu);
6546
6547 if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
6548 return;
6549 }
6550
6551 if (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)) {
6552 return;
6553 }
6554
6555 if ((kn->kn_status & KN_QUEUED) == 0) {
6556 struct kqtailq *queue = knote_get_tailq(kqu, kn);
6557
6558 TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
6559 kn->kn_status |= KN_QUEUED;
6560 kqu.kq->kq_count++;
6561 } else if ((kn->kn_status & KN_STAYACTIVE) == 0) {
6562 return;
6563 }
6564
6565 if (kn->kn_status & wakeup_mask) {
6566 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6567 kqworkloop_wakeup(kqu.kqwl, kn->kn_qos_index);
6568 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6569 kqworkq_wakeup(kqu.kqwq, kn->kn_qos_index);
6570 } else {
6571 kqfile_wakeup(kqu.kqf, kn->kn_qos_index);
6572 }
6573 }
6574 }
6575
6576 __attribute__((always_inline))
6577 static inline void
6578 knote_dequeue(kqueue_t kqu, struct knote *kn)
6579 {
6580 if (kn->kn_status & KN_QUEUED) {
6581 struct kqtailq *queue = knote_get_tailq(kqu, kn);
6582
6583 // attaching the knote calls knote_reset_priority() without
6584 // the kqlock which is fine, so we can't call kqlock_held()
6585 // if we're not queued.
6586 kqlock_held(kqu);
6587
6588 TAILQ_REMOVE(queue, kn, kn_tqe);
6589 kn->kn_status &= ~KN_QUEUED;
6590 kqu.kq->kq_count--;
6591 }
6592 }
6593
6594 /* called with kqueue lock held */
6595 static void
6596 knote_suppress(kqueue_t kqu, struct knote *kn)
6597 {
6598 struct kqtailq *suppressq;
6599
6600 kqlock_held(kqu);
6601
6602 assert((kn->kn_status & KN_SUPPRESSED) == 0);
6603 assert(kn->kn_status & KN_QUEUED);
6604
6605 knote_dequeue(kqu, kn);
6606 /* deactivate - so new activations indicate a wakeup */
6607 kn->kn_status &= ~KN_ACTIVE;
6608 kn->kn_status |= KN_SUPPRESSED;
6609 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6610 TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
6611 }
6612
6613 __attribute__((always_inline))
6614 static inline void
6615 knote_unsuppress_noqueue(kqueue_t kqu, struct knote *kn)
6616 {
6617 struct kqtailq *suppressq;
6618
6619 kqlock_held(kqu);
6620
6621 assert(kn->kn_status & KN_SUPPRESSED);
6622
6623 kn->kn_status &= ~KN_SUPPRESSED;
6624 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6625 TAILQ_REMOVE(suppressq, kn, kn_tqe);
6626
6627 /*
6628 * If the knote is no longer active, reset its push,
6629 * and resynchronize kn_qos_index with kn_qos_override
6630 * for knotes with a real qos.
6631 */
6632 if ((kn->kn_status & KN_ACTIVE) == 0 && knote_has_qos(kn)) {
6633 kn->kn_qos_override = _pthread_priority_thread_qos_fast(kn->kn_qos);
6634 }
6635 kn->kn_qos_index = kn->kn_qos_override;
6636 }
6637
6638 /* called with kqueue lock held */
6639 static void
6640 knote_unsuppress(kqueue_t kqu, struct knote *kn)
6641 {
6642 if (kn->kn_status & KN_SUPPRESSED) {
6643 knote_unsuppress_noqueue(kqu, kn);
6644
6645 /* don't wakeup if unsuppressing just a stay-active knote */
6646 knote_enqueue(kqu, kn, KN_ACTIVE);
6647 }
6648 }
6649
6650 __attribute__((always_inline))
6651 static inline void
6652 knote_mark_active(struct knote *kn)
6653 {
6654 if ((kn->kn_status & KN_ACTIVE) == 0) {
6655 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
6656 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
6657 kn->kn_filtid);
6658 }
6659
6660 kn->kn_status |= KN_ACTIVE;
6661 }
6662
6663 /* called with kqueue lock held */
6664 static void
6665 knote_activate(kqueue_t kqu, struct knote *kn, int result)
6666 {
6667 assert(result & FILTER_ACTIVE);
6668 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
6669 // may dequeue the knote
6670 knote_adjust_qos(kqu.kq, kn, result);
6671 }
6672 knote_mark_active(kn);
6673 knote_enqueue(kqu, kn, KN_ACTIVE | KN_STAYACTIVE);
6674 }
6675
6676 /*
6677 * This function applies changes requested by f_attach or f_touch for
6678 * a given filter. It proceeds in a carefully chosen order to help
6679 * every single transition do the minimal amount of work possible.
6680 */
6681 static void
6682 knote_apply_touch(kqueue_t kqu, struct knote *kn, struct kevent_qos_s *kev,
6683 int result)
6684 {
6685 kn_status_t wakeup_mask = KN_ACTIVE;
6686
6687 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
6688 /*
6689 * When a stayactive knote is reenabled, we may have missed wakeups
6690 * while it was disabled, so we need to poll it. To do so, ask
6691 * knote_enqueue() below to reenqueue it.
6692 */
6693 wakeup_mask |= KN_STAYACTIVE;
6694 kn->kn_status &= ~KN_DISABLED;
6695
6696 /*
6697 * it is possible for userland to have knotes registered for a given
6698 * workloop `wl_orig` but really handled on another workloop `wl_new`.
6699 *
6700 * In that case, rearming will happen from the servicer thread of
6701 * `wl_new` which if `wl_orig` is no longer being serviced, would cause
6702 * this knote to stay suppressed forever if we only relied on
6703 * kqworkloop_acknowledge_events to be called by `wl_orig`.
6704 *
6705 * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
6706 * unsuppress because that would mess with the processing phase of
6707 * `wl_orig`, however it also means kqworkloop_acknowledge_events()
6708 * will be called.
6709 */
6710 if (__improbable(kn->kn_status & KN_SUPPRESSED)) {
6711 if ((kqu.kq->kq_state & KQ_PROCESSING) == 0) {
6712 knote_unsuppress_noqueue(kqu, kn);
6713 }
6714 }
6715 }
6716
6717 if ((result & FILTER_UPDATE_REQ_QOS) && kev->qos && kev->qos != kn->kn_qos) {
6718 // may dequeue the knote
6719 knote_reset_priority(kqu, kn, kev->qos);
6720 }
6721
6722 /*
6723 * When we unsuppress above, or because of knote_reset_priority(),
6724 * the knote may have been dequeued, we need to restore the invariant
6725 * that if the knote is active it needs to be queued now that
6726 * we're done applying changes.
6727 */
6728 if (result & FILTER_ACTIVE) {
6729 knote_activate(kqu, kn, result);
6730 } else {
6731 knote_enqueue(kqu, kn, wakeup_mask);
6732 }
6733
6734 if ((result & FILTER_THREADREQ_NODEFEER) &&
6735 act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ)) {
6736 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
6737 }
6738 }
6739
6740 /*
6741 * knote_drop - disconnect and drop the knote
6742 *
6743 * Called with the kqueue locked, returns with the kqueue unlocked.
6744 *
6745 * If a knote locking context is passed, it is canceled.
6746 *
6747 * The knote may have already been detached from
6748 * (or not yet attached to) its source object.
6749 */
6750 static void
6751 knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
6752 {
6753 struct proc *p = kq->kq_p;
6754
6755 kqlock_held(kq);
6756
6757 assert((kn->kn_status & KN_DROPPING) == 0);
6758 if (knlc == NULL) {
6759 assert((kn->kn_status & KN_LOCKED) == 0);
6760 }
6761 kn->kn_status |= KN_DROPPING;
6762
6763 if (kn->kn_status & KN_SUPPRESSED) {
6764 knote_unsuppress_noqueue(kq, kn);
6765 } else {
6766 knote_dequeue(kq, kn);
6767 }
6768 knote_wait_for_post(kq, kn);
6769
6770 knote_fops(kn)->f_detach(kn);
6771
6772 /* kq may be freed when kq_remove_knote() returns */
6773 kq_remove_knote(kq, kn, p, knlc);
6774 if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) {
6775 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6776 }
6777
6778 knote_free(kn);
6779 }
6780
6781 void
6782 knote_init(void)
6783 {
6784 #if CONFIG_MEMORYSTATUS
6785 /* Initialize the memorystatus list lock */
6786 memorystatus_kevent_init(&kq_lck_grp, LCK_ATTR_NULL);
6787 #endif
6788 }
6789 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
6790
6791 const struct filterops *
6792 knote_fops(struct knote *kn)
6793 {
6794 return sysfilt_ops[kn->kn_filtid];
6795 }
6796
6797 static struct knote *
6798 knote_alloc(void)
6799 {
6800 return zalloc_flags(knote_zone, Z_WAITOK | Z_ZERO);
6801 }
6802
6803 static void
6804 knote_free(struct knote *kn)
6805 {
6806 assert((kn->kn_status & (KN_LOCKED | KN_POSTING)) == 0);
6807 zfree(knote_zone, kn);
6808 }
6809
6810 #pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
6811
6812 kevent_ctx_t
6813 kevent_get_context(thread_t thread)
6814 {
6815 uthread_t ut = get_bsdthread_info(thread);
6816 return &ut->uu_save.uus_kevent;
6817 }
6818
6819 static inline bool
6820 kevent_args_requesting_events(unsigned int flags, int nevents)
6821 {
6822 return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0;
6823 }
6824
6825 static inline int
6826 kevent_adjust_flags_for_proc(proc_t p, int flags)
6827 {
6828 __builtin_assume(p);
6829 return flags | (IS_64BIT_PROCESS(p) ? KEVENT_FLAG_PROC64 : 0);
6830 }
6831
6832 /*!
6833 * @function kevent_get_kqfile
6834 *
6835 * @brief
6836 * Lookup a kqfile by fd.
6837 *
6838 * @discussion
6839 * Callers: kevent, kevent64, kevent_qos
6840 *
6841 * This is not assumed to be a fastpath (kqfile interfaces are legacy)
6842 */
6843 OS_NOINLINE
6844 static int
6845 kevent_get_kqfile(struct proc *p, int fd, int flags,
6846 struct fileproc **fpp, struct kqueue **kqp)
6847 {
6848 int error = 0;
6849 struct kqueue *kq;
6850
6851 error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, fpp);
6852 if (__improbable(error)) {
6853 return error;
6854 }
6855 kq = (struct kqueue *)(*fpp)->f_data;
6856
6857 uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed);
6858 if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) {
6859 kqlock(kq);
6860 kq_state = kq->kq_state;
6861 if (!(kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS))) {
6862 if (flags & KEVENT_FLAG_LEGACY32) {
6863 kq_state |= KQ_KEV32;
6864 } else if (flags & KEVENT_FLAG_LEGACY64) {
6865 kq_state |= KQ_KEV64;
6866 } else {
6867 kq_state |= KQ_KEV_QOS;
6868 }
6869 kq->kq_state = kq_state;
6870 }
6871 kqunlock(kq);
6872 }
6873
6874 /*
6875 * kqfiles can't be used through the legacy kevent()
6876 * and other interfaces at the same time.
6877 */
6878 if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) !=
6879 (bool)(kq_state & KQ_KEV32))) {
6880 fp_drop(p, fd, *fpp, 0);
6881 return EINVAL;
6882 }
6883
6884 *kqp = kq;
6885 return 0;
6886 }
6887
6888 /*!
6889 * @function kevent_get_kqwq
6890 *
6891 * @brief
6892 * Lookup or create the process kqwq (faspath).
6893 *
6894 * @discussion
6895 * Callers: kevent64, kevent_qos
6896 */
6897 OS_ALWAYS_INLINE
6898 static int
6899 kevent_get_kqwq(proc_t p, int flags, int nevents, struct kqueue **kqp)
6900 {
6901 struct kqworkq *kqwq = p->p_fd->fd_wqkqueue;
6902
6903 if (__improbable(kevent_args_requesting_events(flags, nevents))) {
6904 return EINVAL;
6905 }
6906 if (__improbable(kqwq == NULL)) {
6907 kqwq = kqworkq_alloc(p, flags);
6908 if (__improbable(kqwq == NULL)) {
6909 return ENOMEM;
6910 }
6911 }
6912
6913 *kqp = &kqwq->kqwq_kqueue;
6914 return 0;
6915 }
6916
6917 #pragma mark kevent copyio
6918
6919 /*!
6920 * @function kevent_get_data_size
6921 *
6922 * @brief
6923 * Copies in the extra data size from user-space.
6924 */
6925 static int
6926 kevent_get_data_size(int flags, user_addr_t data_avail, user_addr_t data_out,
6927 kevent_ctx_t kectx)
6928 {
6929 if (!data_avail || !data_out) {
6930 kectx->kec_data_size = 0;
6931 kectx->kec_data_resid = 0;
6932 } else if (flags & KEVENT_FLAG_PROC64) {
6933 user64_size_t usize = 0;
6934 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6935 if (__improbable(error)) {
6936 return error;
6937 }
6938 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6939 } else {
6940 user32_size_t usize = 0;
6941 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6942 if (__improbable(error)) {
6943 return error;
6944 }
6945 kectx->kec_data_avail = data_avail;
6946 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6947 }
6948 kectx->kec_data_out = data_out;
6949 kectx->kec_data_avail = data_avail;
6950 return 0;
6951 }
6952
6953 /*!
6954 * @function kevent_put_data_size
6955 *
6956 * @brief
6957 * Copies out the residual data size to user-space if any has been used.
6958 */
6959 static int
6960 kevent_put_data_size(unsigned int flags, kevent_ctx_t kectx)
6961 {
6962 if (kectx->kec_data_resid == kectx->kec_data_size) {
6963 return 0;
6964 }
6965 if (flags & KEVENT_FLAG_KERNEL) {
6966 *(user_size_t *)(uintptr_t)kectx->kec_data_avail = kectx->kec_data_resid;
6967 return 0;
6968 }
6969 if (flags & KEVENT_FLAG_PROC64) {
6970 user64_size_t usize = (user64_size_t)kectx->kec_data_resid;
6971 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6972 } else {
6973 user32_size_t usize = (user32_size_t)kectx->kec_data_resid;
6974 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6975 }
6976 }
6977
6978 /*!
6979 * @function kevent_legacy_copyin
6980 *
6981 * @brief
6982 * Handles the copyin of a kevent/kevent64 event.
6983 */
6984 static int
6985 kevent_legacy_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp, unsigned int flags)
6986 {
6987 int error;
6988
6989 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
6990
6991 if (flags & KEVENT_FLAG_LEGACY64) {
6992 struct kevent64_s kev64;
6993
6994 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6995 if (__improbable(error)) {
6996 return error;
6997 }
6998 *addrp += sizeof(kev64);
6999 *kevp = (struct kevent_qos_s){
7000 .ident = kev64.ident,
7001 .filter = kev64.filter,
7002 /* Make sure user doesn't pass in any system flags */
7003 .flags = kev64.flags & ~EV_SYSFLAGS,
7004 .udata = kev64.udata,
7005 .fflags = kev64.fflags,
7006 .data = kev64.data,
7007 .ext[0] = kev64.ext[0],
7008 .ext[1] = kev64.ext[1],
7009 };
7010 } else if (flags & KEVENT_FLAG_PROC64) {
7011 struct user64_kevent kev64;
7012
7013 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
7014 if (__improbable(error)) {
7015 return error;
7016 }
7017 *addrp += sizeof(kev64);
7018 *kevp = (struct kevent_qos_s){
7019 .ident = kev64.ident,
7020 .filter = kev64.filter,
7021 /* Make sure user doesn't pass in any system flags */
7022 .flags = kev64.flags & ~EV_SYSFLAGS,
7023 .udata = kev64.udata,
7024 .fflags = kev64.fflags,
7025 .data = kev64.data,
7026 };
7027 } else {
7028 struct user32_kevent kev32;
7029
7030 error = copyin(*addrp, (caddr_t)&kev32, sizeof(kev32));
7031 if (__improbable(error)) {
7032 return error;
7033 }
7034 *addrp += sizeof(kev32);
7035 *kevp = (struct kevent_qos_s){
7036 .ident = (uintptr_t)kev32.ident,
7037 .filter = kev32.filter,
7038 /* Make sure user doesn't pass in any system flags */
7039 .flags = kev32.flags & ~EV_SYSFLAGS,
7040 .udata = CAST_USER_ADDR_T(kev32.udata),
7041 .fflags = kev32.fflags,
7042 .data = (intptr_t)kev32.data,
7043 };
7044 }
7045
7046 return 0;
7047 }
7048
7049 /*!
7050 * @function kevent_modern_copyin
7051 *
7052 * @brief
7053 * Handles the copyin of a kevent_qos/kevent_id event.
7054 */
7055 static int
7056 kevent_modern_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp)
7057 {
7058 int error = copyin(*addrp, (caddr_t)kevp, sizeof(struct kevent_qos_s));
7059 if (__probable(!error)) {
7060 /* Make sure user doesn't pass in any system flags */
7061 *addrp += sizeof(struct kevent_qos_s);
7062 kevp->flags &= ~EV_SYSFLAGS;
7063 }
7064 return error;
7065 }
7066
7067 /*!
7068 * @function kevent_legacy_copyout
7069 *
7070 * @brief
7071 * Handles the copyout of a kevent/kevent64 event.
7072 */
7073 static int
7074 kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned int flags)
7075 {
7076 int advance;
7077 int error;
7078
7079 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
7080
7081 /*
7082 * fully initialize the differnt output event structure
7083 * types from the internal kevent (and some universal
7084 * defaults for fields not represented in the internal
7085 * form).
7086 *
7087 * Note: these structures have no padding hence the C99
7088 * initializers below do not leak kernel info.
7089 */
7090 if (flags & KEVENT_FLAG_LEGACY64) {
7091 struct kevent64_s kev64 = {
7092 .ident = kevp->ident,
7093 .filter = kevp->filter,
7094 .flags = kevp->flags,
7095 .fflags = kevp->fflags,
7096 .data = (int64_t)kevp->data,
7097 .udata = kevp->udata,
7098 .ext[0] = kevp->ext[0],
7099 .ext[1] = kevp->ext[1],
7100 };
7101 advance = sizeof(struct kevent64_s);
7102 error = copyout((caddr_t)&kev64, *addrp, advance);
7103 } else if (flags & KEVENT_FLAG_PROC64) {
7104 /*
7105 * deal with the special case of a user-supplied
7106 * value of (uintptr_t)-1.
7107 */
7108 uint64_t ident = (kevp->ident == (uintptr_t)-1) ?
7109 (uint64_t)-1LL : (uint64_t)kevp->ident;
7110 struct user64_kevent kev64 = {
7111 .ident = ident,
7112 .filter = kevp->filter,
7113 .flags = kevp->flags,
7114 .fflags = kevp->fflags,
7115 .data = (int64_t) kevp->data,
7116 .udata = (user_addr_t) kevp->udata,
7117 };
7118 advance = sizeof(kev64);
7119 error = copyout((caddr_t)&kev64, *addrp, advance);
7120 } else {
7121 struct user32_kevent kev32 = {
7122 .ident = (uint32_t)kevp->ident,
7123 .filter = kevp->filter,
7124 .flags = kevp->flags,
7125 .fflags = kevp->fflags,
7126 .data = (int32_t)kevp->data,
7127 .udata = (uint32_t)kevp->udata,
7128 };
7129 advance = sizeof(kev32);
7130 error = copyout((caddr_t)&kev32, *addrp, advance);
7131 }
7132 if (__probable(!error)) {
7133 *addrp += advance;
7134 }
7135 return error;
7136 }
7137
7138 /*!
7139 * @function kevent_modern_copyout
7140 *
7141 * @brief
7142 * Handles the copyout of a kevent_qos/kevent_id event.
7143 */
7144 OS_ALWAYS_INLINE
7145 static inline int
7146 kevent_modern_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp)
7147 {
7148 int error = copyout((caddr_t)kevp, *addrp, sizeof(struct kevent_qos_s));
7149 if (__probable(!error)) {
7150 *addrp += sizeof(struct kevent_qos_s);
7151 }
7152 return error;
7153 }
7154
7155 #pragma mark kevent core implementation
7156
7157 /*!
7158 * @function kevent_callback_inline
7159 *
7160 * @brief
7161 * Callback for each individual event
7162 *
7163 * @discussion
7164 * This is meant to be inlined in kevent_modern_callback and
7165 * kevent_legacy_callback.
7166 */
7167 OS_ALWAYS_INLINE
7168 static inline int
7169 kevent_callback_inline(struct kevent_qos_s *kevp, kevent_ctx_t kectx, bool legacy)
7170 {
7171 int error;
7172
7173 assert(kectx->kec_process_noutputs < kectx->kec_process_nevents);
7174
7175 /*
7176 * Copy out the appropriate amount of event data for this user.
7177 */
7178 if (legacy) {
7179 error = kevent_legacy_copyout(kevp, &kectx->kec_process_eventlist,
7180 kectx->kec_process_flags);
7181 } else {
7182 error = kevent_modern_copyout(kevp, &kectx->kec_process_eventlist);
7183 }
7184
7185 /*
7186 * If there isn't space for additional events, return
7187 * a harmless error to stop the processing here
7188 */
7189 if (error == 0 && ++kectx->kec_process_noutputs == kectx->kec_process_nevents) {
7190 error = EWOULDBLOCK;
7191 }
7192 return error;
7193 }
7194
7195 /*!
7196 * @function kevent_modern_callback
7197 *
7198 * @brief
7199 * Callback for each individual modern event.
7200 *
7201 * @discussion
7202 * This callback handles kevent_qos/kevent_id events.
7203 */
7204 static int
7205 kevent_modern_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7206 {
7207 return kevent_callback_inline(kevp, kectx, /*legacy*/ false);
7208 }
7209
7210 /*!
7211 * @function kevent_legacy_callback
7212 *
7213 * @brief
7214 * Callback for each individual legacy event.
7215 *
7216 * @discussion
7217 * This callback handles kevent/kevent64 events.
7218 */
7219 static int
7220 kevent_legacy_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7221 {
7222 return kevent_callback_inline(kevp, kectx, /*legacy*/ true);
7223 }
7224
7225 /*!
7226 * @function kevent_cleanup
7227 *
7228 * @brief
7229 * Handles the cleanup returning from a kevent call.
7230 *
7231 * @discussion
7232 * kevent entry points will take a reference on workloops,
7233 * and a usecount on the fileglob of kqfiles.
7234 *
7235 * This function undoes this on the exit paths of kevents.
7236 *
7237 * @returns
7238 * The error to return to userspace.
7239 */
7240 static int
7241 kevent_cleanup(kqueue_t kqu, int flags, int error, kevent_ctx_t kectx)
7242 {
7243 // poll should not call any codepath leading to this
7244 assert((flags & KEVENT_FLAG_POLL) == 0);
7245
7246 if (flags & KEVENT_FLAG_WORKLOOP) {
7247 kqworkloop_release(kqu.kqwl);
7248 } else if (flags & KEVENT_FLAG_WORKQ) {
7249 /* nothing held */
7250 } else {
7251 fp_drop(kqu.kqf->kqf_p, kectx->kec_fd, kectx->kec_fp, 0);
7252 }
7253
7254 /* don't restart after signals... */
7255 if (error == ERESTART) {
7256 error = EINTR;
7257 } else if (error == 0) {
7258 /* don't abandon other output just because of residual copyout failures */
7259 (void)kevent_put_data_size(flags, kectx);
7260 }
7261
7262 if (flags & KEVENT_FLAG_PARKING) {
7263 thread_t th = current_thread();
7264 struct uthread *uth = get_bsdthread_info(th);
7265 if (uth->uu_kqr_bound) {
7266 thread_unfreeze_base_pri(th);
7267 }
7268 }
7269 return error;
7270 }
7271
7272 /*!
7273 * @function kqueue_process
7274 *
7275 * @brief
7276 * Process the triggered events in a kqueue.
7277 *
7278 * @discussion
7279 * Walk the queued knotes and validate that they are really still triggered
7280 * events by calling the filter routines (if necessary).
7281 *
7282 * For each event that is still considered triggered, invoke the callback
7283 * routine provided.
7284 *
7285 * caller holds a reference on the kqueue.
7286 * kqueue locked on entry and exit - but may be dropped
7287 * kqueue list locked (held for duration of call)
7288 *
7289 * This is only called by kqueue_scan() so that the compiler can inline it.
7290 *
7291 * @returns
7292 * - 0: no event was returned, no other error occured
7293 * - EBADF: the kqueue is being destroyed (KQ_DRAIN is set)
7294 * - EWOULDBLOCK: (not an error) events have been found and we should return
7295 * - EFAULT: copyout failed
7296 * - filter specific errors
7297 */
7298 static int
7299 kqueue_process(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7300 kevent_callback_t callback)
7301 {
7302 workq_threadreq_t kqr = current_uthread()->uu_kqr_bound;
7303 struct knote *kn;
7304 int error = 0, rc = 0;
7305 struct kqtailq *base_queue, *queue;
7306 #if DEBUG || DEVELOPMENT
7307 int retries = 64;
7308 #endif
7309 uint16_t kq_type = (kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
7310
7311 if (kq_type & KQ_WORKQ) {
7312 rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
7313 } else if (kq_type & KQ_WORKLOOP) {
7314 rc = kqworkloop_begin_processing(kqu.kqwl, flags);
7315 } else {
7316 kqfile_retry:
7317 rc = kqfile_begin_processing(kqu.kqf);
7318 if (rc == EBADF) {
7319 return EBADF;
7320 }
7321 }
7322
7323 if (rc == -1) {
7324 /* Nothing to process */
7325 return 0;
7326 }
7327
7328 /*
7329 * loop through the enqueued knotes associated with this request,
7330 * processing each one. Each request may have several queues
7331 * of knotes to process (depending on the type of kqueue) so we
7332 * have to loop through all the queues as long as we have additional
7333 * space.
7334 */
7335
7336 process_again:
7337 if (kq_type & KQ_WORKQ) {
7338 base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index];
7339 } else if (kq_type & KQ_WORKLOOP) {
7340 base_queue = &kqu.kqwl->kqwl_queue[0];
7341 queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
7342 } else {
7343 base_queue = queue = &kqu.kqf->kqf_queue;
7344 }
7345
7346 do {
7347 while ((kn = TAILQ_FIRST(queue)) != NULL) {
7348 error = knote_process(kn, kectx, callback);
7349 if (error == EJUSTRETURN) {
7350 error = 0;
7351 } else if (__improbable(error)) {
7352 /* error is EWOULDBLOCK when the out event array is full */
7353 goto stop_processing;
7354 }
7355 }
7356 } while (queue-- > base_queue);
7357
7358 if (kectx->kec_process_noutputs) {
7359 /* callers will transform this into no error */
7360 error = EWOULDBLOCK;
7361 }
7362
7363 stop_processing:
7364 /*
7365 * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
7366 * we want to unbind the kqrequest from the thread.
7367 *
7368 * However, because the kq locks are dropped several times during process,
7369 * new knotes may have fired again, in which case, we want to fail the end
7370 * processing and process again, until it converges.
7371 *
7372 * If we have an error or returned events, end processing never fails.
7373 */
7374 if (error) {
7375 flags &= ~KEVENT_FLAG_PARKING;
7376 }
7377 if (kq_type & KQ_WORKQ) {
7378 rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
7379 } else if (kq_type & KQ_WORKLOOP) {
7380 rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
7381 } else {
7382 rc = kqfile_end_processing(kqu.kqf);
7383 }
7384
7385 if (__probable(error)) {
7386 return error;
7387 }
7388
7389 if (__probable(rc >= 0)) {
7390 assert(rc == 0 || rc == EBADF);
7391 return rc;
7392 }
7393
7394 #if DEBUG || DEVELOPMENT
7395 if (retries-- == 0) {
7396 panic("kevent: way too many knote_process retries, kq: %p (0x%04x)",
7397 kqu.kq, kqu.kq->kq_state);
7398 }
7399 #endif
7400 if (kq_type & (KQ_WORKQ | KQ_WORKLOOP)) {
7401 assert(flags & KEVENT_FLAG_PARKING);
7402 goto process_again;
7403 } else {
7404 goto kqfile_retry;
7405 }
7406 }
7407
7408 /*!
7409 * @function kqueue_scan_continue
7410 *
7411 * @brief
7412 * The continuation used by kqueue_scan for kevent entry points.
7413 *
7414 * @discussion
7415 * Assumes we inherit a use/ref count on the kq or its fileglob.
7416 *
7417 * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
7418 * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
7419 */
7420 OS_NORETURN OS_NOINLINE
7421 static void
7422 kqueue_scan_continue(void *data, wait_result_t wait_result)
7423 {
7424 uthread_t ut = current_uthread();
7425 kevent_ctx_t kectx = &ut->uu_save.uus_kevent;
7426 int error = 0, flags = kectx->kec_process_flags;
7427 struct kqueue *kq = data;
7428
7429 /*
7430 * only kevent variants call in here, so we know the callback is
7431 * kevent_legacy_callback or kevent_modern_callback.
7432 */
7433 assert((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0);
7434
7435 switch (wait_result) {
7436 case THREAD_AWAKENED:
7437 if (__improbable(flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64))) {
7438 error = kqueue_scan(kq, flags, kectx, kevent_legacy_callback);
7439 } else {
7440 error = kqueue_scan(kq, flags, kectx, kevent_modern_callback);
7441 }
7442 break;
7443 case THREAD_TIMED_OUT:
7444 error = 0;
7445 break;
7446 case THREAD_INTERRUPTED:
7447 error = EINTR;
7448 break;
7449 case THREAD_RESTART:
7450 error = EBADF;
7451 break;
7452 default:
7453 panic("%s: - invalid wait_result (%d)", __func__, wait_result);
7454 }
7455
7456
7457 error = kevent_cleanup(kq, flags, error, kectx);
7458 *(int32_t *)&ut->uu_rval = kectx->kec_process_noutputs;
7459 unix_syscall_return(error);
7460 }
7461
7462 /*!
7463 * @function kqueue_scan
7464 *
7465 * @brief
7466 * Scan and wait for events in a kqueue (used by poll & kevent).
7467 *
7468 * @discussion
7469 * Process the triggered events in a kqueue.
7470 *
7471 * If there are no events triggered arrange to wait for them:
7472 * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
7473 * - possibly until kectx->kec_deadline expires
7474 *
7475 * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
7476 * are set, then it will wait in the kqueue_scan_continue continuation.
7477 *
7478 * poll() will block in place, and KEVENT_FLAG_KERNEL calls
7479 * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
7480 *
7481 * @param kq
7482 * The kqueue being scanned.
7483 *
7484 * @param flags
7485 * The KEVENT_FLAG_* flags for this call.
7486 *
7487 * @param kectx
7488 * The context used for this scan.
7489 * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
7490 *
7491 * @param callback
7492 * The callback to be called on events sucessfully processed.
7493 * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
7494 */
7495 int
7496 kqueue_scan(struct kqueue *kq, int flags, kevent_ctx_t kectx,
7497 kevent_callback_t callback)
7498 {
7499 int error;
7500
7501 for (;;) {
7502 kqlock(kq);
7503 error = kqueue_process(kq, flags, kectx, callback);
7504
7505 /*
7506 * If we got an error, events returned (EWOULDBLOCK)
7507 * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
7508 * just return.
7509 */
7510 if (__probable(error || (flags & KEVENT_FLAG_IMMEDIATE))) {
7511 kqunlock(kq);
7512 return error == EWOULDBLOCK ? 0 : error;
7513 }
7514
7515 waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
7516 KQ_EVENT, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL,
7517 kectx->kec_deadline, TIMEOUT_NO_LEEWAY);
7518 kq->kq_state |= KQ_SLEEP;
7519
7520 kqunlock(kq);
7521
7522 if (__probable((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0)) {
7523 thread_block_parameter(kqueue_scan_continue, kq);
7524 __builtin_unreachable();
7525 }
7526
7527 wait_result_t wr = thread_block(THREAD_CONTINUE_NULL);
7528 switch (wr) {
7529 case THREAD_AWAKENED:
7530 break;
7531 case THREAD_TIMED_OUT:
7532 return 0;
7533 case THREAD_INTERRUPTED:
7534 return EINTR;
7535 case THREAD_RESTART:
7536 return EBADF;
7537 default:
7538 panic("%s: - bad wait_result (%d)", __func__, wr);
7539 }
7540 }
7541 }
7542
7543 /*!
7544 * @function kevent_internal
7545 *
7546 * @brief
7547 * Common kevent code.
7548 *
7549 * @discussion
7550 * Needs to be inlined to specialize for legacy or modern and
7551 * eliminate dead code.
7552 *
7553 * This is the core logic of kevent entry points, that will:
7554 * - register kevents
7555 * - optionally scan the kqueue for events
7556 *
7557 * The caller is giving kevent_internal a reference on the kqueue
7558 * or its fileproc that needs to be cleaned up by kevent_cleanup().
7559 */
7560 OS_ALWAYS_INLINE
7561 static inline int
7562 kevent_internal(kqueue_t kqu,
7563 user_addr_t changelist, int nchanges,
7564 user_addr_t ueventlist, int nevents,
7565 int flags, kevent_ctx_t kectx, int32_t *retval,
7566 bool legacy)
7567 {
7568 int error = 0, noutputs = 0, register_rc;
7569
7570 /* only bound threads can receive events on workloops */
7571 if (!legacy && (flags & KEVENT_FLAG_WORKLOOP)) {
7572 #if CONFIG_WORKLOOP_DEBUG
7573 UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
7574 .uu_kqid = kqu.kqwl->kqwl_dynamicid,
7575 .uu_kq = error ? NULL : kqu.kq,
7576 .uu_error = error,
7577 .uu_nchanges = nchanges,
7578 .uu_nevents = nevents,
7579 .uu_flags = flags,
7580 });
7581 #endif // CONFIG_WORKLOOP_DEBUG
7582
7583 if (flags & KEVENT_FLAG_KERNEL) {
7584 /* see kevent_workq_internal */
7585 error = copyout(&kqu.kqwl->kqwl_dynamicid,
7586 ueventlist - sizeof(kqueue_id_t), sizeof(kqueue_id_t));
7587 kectx->kec_data_resid -= sizeof(kqueue_id_t);
7588 if (__improbable(error)) {
7589 goto out;
7590 }
7591 }
7592
7593 if (kevent_args_requesting_events(flags, nevents)) {
7594 /*
7595 * Disable the R2K notification while doing a register, if the
7596 * caller wants events too, we don't want the AST to be set if we
7597 * will process these events soon.
7598 */
7599 kqlock(kqu);
7600 kqu.kq->kq_state &= ~KQ_R2K_ARMED;
7601 kqunlock(kqu);
7602 flags |= KEVENT_FLAG_NEEDS_END_PROCESSING;
7603 }
7604 }
7605
7606 /* register all the change requests the user provided... */
7607 while (nchanges > 0 && error == 0) {
7608 struct kevent_qos_s kev;
7609 struct knote *kn = NULL;
7610
7611 if (legacy) {
7612 error = kevent_legacy_copyin(&changelist, &kev, flags);
7613 } else {
7614 error = kevent_modern_copyin(&changelist, &kev);
7615 }
7616 if (error) {
7617 break;
7618 }
7619
7620 register_rc = kevent_register(kqu.kq, &kev, &kn);
7621 if (__improbable(!legacy && (register_rc & FILTER_REGISTER_WAIT))) {
7622 thread_t thread = current_thread();
7623
7624 kqlock_held(kqu);
7625
7626 if (act_clear_astkevent(thread, AST_KEVENT_REDRIVE_THREADREQ)) {
7627 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
7628 }
7629
7630 // f_post_register_wait is meant to call a continuation and not to
7631 // return, which is why we don't support FILTER_REGISTER_WAIT if
7632 // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
7633 // waits isn't the last.
7634 //
7635 // It is implementable, but not used by any userspace code at the
7636 // moment, so for now return ENOTSUP if someone tries to do it.
7637 if (nchanges == 1 && noutputs < nevents &&
7638 (flags & KEVENT_FLAG_KERNEL) == 0 &&
7639 (flags & KEVENT_FLAG_PARKING) == 0 &&
7640 (flags & KEVENT_FLAG_ERROR_EVENTS) &&
7641 (flags & KEVENT_FLAG_WORKLOOP)) {
7642 uthread_t ut = get_bsdthread_info(thread);
7643
7644 /*
7645 * store the continuation/completion data in the uthread
7646 *
7647 * Note: the kectx aliases with this,
7648 * and is destroyed in the process.
7649 */
7650 ut->uu_save.uus_kevent_register = (struct _kevent_register){
7651 .kev = kev,
7652 .kqwl = kqu.kqwl,
7653 .eventout = noutputs,
7654 .ueventlist = ueventlist,
7655 };
7656 knote_fops(kn)->f_post_register_wait(ut, kn,
7657 &ut->uu_save.uus_kevent_register);
7658 __builtin_unreachable();
7659 }
7660 kqunlock(kqu);
7661
7662 kev.flags |= EV_ERROR;
7663 kev.data = ENOTSUP;
7664 } else {
7665 assert((register_rc & FILTER_REGISTER_WAIT) == 0);
7666 }
7667
7668 // keep in sync with kevent_register_wait_return()
7669 if (noutputs < nevents && (kev.flags & (EV_ERROR | EV_RECEIPT))) {
7670 if ((kev.flags & EV_ERROR) == 0) {
7671 kev.flags |= EV_ERROR;
7672 kev.data = 0;
7673 }
7674 if (legacy) {
7675 error = kevent_legacy_copyout(&kev, &ueventlist, flags);
7676 } else {
7677 error = kevent_modern_copyout(&kev, &ueventlist);
7678 }
7679 if (error == 0) {
7680 noutputs++;
7681 }
7682 } else if (kev.flags & EV_ERROR) {
7683 error = (int)kev.data;
7684 }
7685 nchanges--;
7686 }
7687
7688 if ((flags & KEVENT_FLAG_ERROR_EVENTS) == 0 &&
7689 nevents > 0 && noutputs == 0 && error == 0) {
7690 kectx->kec_process_flags = flags;
7691 kectx->kec_process_nevents = nevents;
7692 kectx->kec_process_noutputs = 0;
7693 kectx->kec_process_eventlist = ueventlist;
7694
7695 if (legacy) {
7696 error = kqueue_scan(kqu.kq, flags, kectx, kevent_legacy_callback);
7697 } else {
7698 error = kqueue_scan(kqu.kq, flags, kectx, kevent_modern_callback);
7699 }
7700
7701 noutputs = kectx->kec_process_noutputs;
7702 } else if (!legacy && (flags & KEVENT_FLAG_NEEDS_END_PROCESSING)) {
7703 /*
7704 * If we didn't through kqworkloop_end_processing(),
7705 * we need to do it here.
7706 *
7707 * kqueue_scan will call kqworkloop_end_processing(),
7708 * so we only need to do it if we didn't scan.
7709 */
7710 kqlock(kqu);
7711 kqworkloop_end_processing(kqu.kqwl, 0, 0);
7712 kqunlock(kqu);
7713 }
7714
7715 *retval = noutputs;
7716 out:
7717 return kevent_cleanup(kqu.kq, flags, error, kectx);
7718 }
7719
7720 #pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
7721
7722 /*!
7723 * @function kevent_modern_internal
7724 *
7725 * @brief
7726 * The backend of the kevent_id and kevent_workq_internal entry points.
7727 *
7728 * @discussion
7729 * Needs to be inline due to the number of arguments.
7730 */
7731 OS_NOINLINE
7732 static int
7733 kevent_modern_internal(kqueue_t kqu,
7734 user_addr_t changelist, int nchanges,
7735 user_addr_t ueventlist, int nevents,
7736 int flags, kevent_ctx_t kectx, int32_t *retval)
7737 {
7738 return kevent_internal(kqu.kq, changelist, nchanges,
7739 ueventlist, nevents, flags, kectx, retval, /*legacy*/ false);
7740 }
7741
7742 /*!
7743 * @function kevent_id
7744 *
7745 * @brief
7746 * The kevent_id() syscall.
7747 */
7748 int
7749 kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
7750 {
7751 int error, flags = uap->flags & KEVENT_FLAG_USER;
7752 uthread_t uth = current_uthread();
7753 workq_threadreq_t kqr = uth->uu_kqr_bound;
7754 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7755 kqueue_t kqu;
7756
7757 flags = kevent_adjust_flags_for_proc(p, flags);
7758 flags |= KEVENT_FLAG_DYNAMIC_KQUEUE;
7759
7760 if (__improbable((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)) !=
7761 KEVENT_FLAG_WORKLOOP)) {
7762 return EINVAL;
7763 }
7764
7765 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7766 if (__improbable(error)) {
7767 return error;
7768 }
7769
7770 kectx->kec_deadline = 0;
7771 kectx->kec_fp = NULL;
7772 kectx->kec_fd = -1;
7773 /* the kec_process_* fields are filled if kqueue_scann is called only */
7774
7775 /*
7776 * Get the kq we are going to be working on
7777 * As a fastpath, look at the currently bound workloop.
7778 */
7779 kqu.kqwl = kqr ? kqr_kqworkloop(kqr) : NULL;
7780 if (kqu.kqwl && kqu.kqwl->kqwl_dynamicid == uap->id) {
7781 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
7782 return EEXIST;
7783 }
7784 kqworkloop_retain(kqu.kqwl);
7785 } else if (__improbable(kevent_args_requesting_events(flags, uap->nevents))) {
7786 return EXDEV;
7787 } else {
7788 error = kqworkloop_get_or_create(p, uap->id, NULL, flags, &kqu.kqwl);
7789 if (__improbable(error)) {
7790 return error;
7791 }
7792 }
7793
7794 return kevent_modern_internal(kqu, uap->changelist, uap->nchanges,
7795 uap->eventlist, uap->nevents, flags, kectx, retval);
7796 }
7797
7798 /**!
7799 * @function kevent_workq_internal
7800 *
7801 * @discussion
7802 * This function is exported for the sake of the workqueue subsystem.
7803 *
7804 * It is called in two ways:
7805 * - when a thread is about to go to userspace to ask for pending event
7806 * - when a thread is returning from userspace with events back
7807 *
7808 * the workqueue subsystem will only use the following flags:
7809 * - KEVENT_FLAG_STACK_DATA (always)
7810 * - KEVENT_FLAG_IMMEDIATE (always)
7811 * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
7812 * userspace).
7813 *
7814 * It implicitly acts on the bound kqueue, and for the case of workloops
7815 * will copyout the kqueue ID before anything else.
7816 *
7817 *
7818 * Pthread will have setup the various arguments to fit this stack layout:
7819 *
7820 * +-------....----+--------------+-----------+--------------------+
7821 * | user stack | data avail | nevents | pthread_self() |
7822 * +-------....----+--------------+-----------+--------------------+
7823 * ^ ^
7824 * data_out eventlist
7825 *
7826 * When a workloop is used, the workloop ID is copied out right before
7827 * the eventlist and is taken from the data buffer.
7828 *
7829 * @warning
7830 * This function is carefuly tailored to not make any call except the final tail
7831 * call into kevent_modern_internal. (LTO inlines current_uthread()).
7832 *
7833 * This function is performance sensitive due to the workq subsystem.
7834 */
7835 int
7836 kevent_workq_internal(struct proc *p,
7837 user_addr_t changelist, int nchanges,
7838 user_addr_t eventlist, int nevents,
7839 user_addr_t data_out, user_size_t *data_available,
7840 unsigned int flags, int32_t *retval)
7841 {
7842 uthread_t uth = current_uthread();
7843 workq_threadreq_t kqr = uth->uu_kqr_bound;
7844 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7845 kqueue_t kqu;
7846
7847 assert(flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE) ||
7848 flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_PARKING));
7849
7850 kectx->kec_data_out = data_out;
7851 kectx->kec_data_avail = (uint64_t)data_available;
7852 kectx->kec_data_size = *data_available;
7853 kectx->kec_data_resid = *data_available;
7854 kectx->kec_deadline = 0;
7855 kectx->kec_fp = NULL;
7856 kectx->kec_fd = -1;
7857 /* the kec_process_* fields are filled if kqueue_scann is called only */
7858
7859 flags = kevent_adjust_flags_for_proc(p, flags);
7860
7861 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
7862 kqu.kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
7863 kqworkloop_retain(kqu.kqwl);
7864
7865 flags |= KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_DYNAMIC_KQUEUE |
7866 KEVENT_FLAG_KERNEL;
7867 } else {
7868 kqu.kqwq = p->p_fd->fd_wqkqueue;
7869
7870 flags |= KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL;
7871 }
7872
7873 return kevent_modern_internal(kqu, changelist, nchanges,
7874 eventlist, nevents, flags, kectx, retval);
7875 }
7876
7877 /*!
7878 * @function kevent_qos
7879 *
7880 * @brief
7881 * The kevent_qos() syscall.
7882 */
7883 int
7884 kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
7885 {
7886 uthread_t uth = current_uthread();
7887 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7888 int error, flags = uap->flags & KEVENT_FLAG_USER;
7889 struct kqueue *kq;
7890
7891 if (__improbable(flags & KEVENT_ID_FLAG_USER)) {
7892 return EINVAL;
7893 }
7894
7895 flags = kevent_adjust_flags_for_proc(p, flags);
7896
7897 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7898 if (__improbable(error)) {
7899 return error;
7900 }
7901
7902 kectx->kec_deadline = 0;
7903 kectx->kec_fp = NULL;
7904 kectx->kec_fd = uap->fd;
7905 /* the kec_process_* fields are filled if kqueue_scann is called only */
7906
7907 /* get the kq we are going to be working on */
7908 if (__probable(flags & KEVENT_FLAG_WORKQ)) {
7909 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7910 } else {
7911 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7912 }
7913 if (__improbable(error)) {
7914 return error;
7915 }
7916
7917 return kevent_modern_internal(kq, uap->changelist, uap->nchanges,
7918 uap->eventlist, uap->nevents, flags, kectx, retval);
7919 }
7920
7921 #pragma mark legacy syscalls: kevent, kevent64
7922
7923 /*!
7924 * @function kevent_legacy_get_deadline
7925 *
7926 * @brief
7927 * Compute the deadline for the legacy kevent syscalls.
7928 *
7929 * @discussion
7930 * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
7931 * as this takes precedence over the deadline.
7932 *
7933 * This function will fail if utimeout is USER_ADDR_NULL
7934 * (the caller should check).
7935 */
7936 static int
7937 kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline)
7938 {
7939 struct timespec ts;
7940
7941 if (flags & KEVENT_FLAG_PROC64) {
7942 struct user64_timespec ts64;
7943 int error = copyin(utimeout, &ts64, sizeof(ts64));
7944 if (__improbable(error)) {
7945 return error;
7946 }
7947 ts.tv_sec = (unsigned long)ts64.tv_sec;
7948 ts.tv_nsec = (long)ts64.tv_nsec;
7949 } else {
7950 struct user32_timespec ts32;
7951 int error = copyin(utimeout, &ts32, sizeof(ts32));
7952 if (__improbable(error)) {
7953 return error;
7954 }
7955 ts.tv_sec = ts32.tv_sec;
7956 ts.tv_nsec = ts32.tv_nsec;
7957 }
7958 if (!timespec_is_valid(&ts)) {
7959 return EINVAL;
7960 }
7961
7962 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), deadline);
7963 return 0;
7964 }
7965
7966 /*!
7967 * @function kevent_legacy_internal
7968 *
7969 * @brief
7970 * The core implementation for kevent and kevent64
7971 */
7972 OS_NOINLINE
7973 static int
7974 kevent_legacy_internal(struct proc *p, struct kevent64_args *uap,
7975 int32_t *retval, int flags)
7976 {
7977 uthread_t uth = current_uthread();
7978 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7979 struct kqueue *kq;
7980 int error;
7981
7982 if (__improbable(uap->flags & KEVENT_ID_FLAG_USER)) {
7983 return EINVAL;
7984 }
7985
7986 flags = kevent_adjust_flags_for_proc(p, flags);
7987
7988 kectx->kec_data_out = 0;
7989 kectx->kec_data_avail = 0;
7990 kectx->kec_data_size = 0;
7991 kectx->kec_data_resid = 0;
7992 kectx->kec_deadline = 0;
7993 kectx->kec_fp = NULL;
7994 kectx->kec_fd = uap->fd;
7995 /* the kec_process_* fields are filled if kqueue_scann is called only */
7996
7997 /* convert timeout to absolute - if we have one (and not immediate) */
7998 if (__improbable(uap->timeout && !(flags & KEVENT_FLAG_IMMEDIATE))) {
7999 error = kevent_legacy_get_deadline(flags, uap->timeout,
8000 &kectx->kec_deadline);
8001 if (__improbable(error)) {
8002 return error;
8003 }
8004 }
8005
8006 /* get the kq we are going to be working on */
8007 if (flags & KEVENT_FLAG_WORKQ) {
8008 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
8009 } else {
8010 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
8011 }
8012 if (__improbable(error)) {
8013 return error;
8014 }
8015
8016 return kevent_internal(kq, uap->changelist, uap->nchanges,
8017 uap->eventlist, uap->nevents, flags, kectx, retval,
8018 /*legacy*/ true);
8019 }
8020
8021 /*!
8022 * @function kevent
8023 *
8024 * @brief
8025 * The legacy kevent() syscall.
8026 */
8027 int
8028 kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
8029 {
8030 struct kevent64_args args = {
8031 .fd = uap->fd,
8032 .changelist = uap->changelist,
8033 .nchanges = uap->nchanges,
8034 .eventlist = uap->eventlist,
8035 .nevents = uap->nevents,
8036 .timeout = uap->timeout,
8037 };
8038
8039 return kevent_legacy_internal(p, &args, retval, KEVENT_FLAG_LEGACY32);
8040 }
8041
8042 /*!
8043 * @function kevent64
8044 *
8045 * @brief
8046 * The legacy kevent64() syscall.
8047 */
8048 int
8049 kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
8050 {
8051 int flags = (uap->flags & KEVENT_FLAG_USER) | KEVENT_FLAG_LEGACY64;
8052 return kevent_legacy_internal(p, uap, retval, flags);
8053 }
8054
8055 #pragma mark - socket interface
8056
8057 #if SOCKETS
8058 #include <sys/param.h>
8059 #include <sys/socket.h>
8060 #include <sys/protosw.h>
8061 #include <sys/domain.h>
8062 #include <sys/mbuf.h>
8063 #include <sys/kern_event.h>
8064 #include <sys/malloc.h>
8065 #include <sys/sys_domain.h>
8066 #include <sys/syslog.h>
8067
8068 #ifndef ROUNDUP64
8069 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
8070 #endif
8071
8072 #ifndef ADVANCE64
8073 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
8074 #endif
8075
8076 static LCK_GRP_DECLARE(kev_lck_grp, "Kernel Event Protocol");
8077 static LCK_RW_DECLARE(kev_rwlock, &kev_lck_grp);
8078
8079 static int kev_attach(struct socket *so, int proto, struct proc *p);
8080 static int kev_detach(struct socket *so);
8081 static int kev_control(struct socket *so, u_long cmd, caddr_t data,
8082 struct ifnet *ifp, struct proc *p);
8083 static lck_mtx_t * event_getlock(struct socket *, int);
8084 static int event_lock(struct socket *, int, void *);
8085 static int event_unlock(struct socket *, int, void *);
8086
8087 static int event_sofreelastref(struct socket *);
8088 static void kev_delete(struct kern_event_pcb *);
8089
8090 static struct pr_usrreqs event_usrreqs = {
8091 .pru_attach = kev_attach,
8092 .pru_control = kev_control,
8093 .pru_detach = kev_detach,
8094 .pru_soreceive = soreceive,
8095 };
8096
8097 static struct protosw eventsw[] = {
8098 {
8099 .pr_type = SOCK_RAW,
8100 .pr_protocol = SYSPROTO_EVENT,
8101 .pr_flags = PR_ATOMIC,
8102 .pr_usrreqs = &event_usrreqs,
8103 .pr_lock = event_lock,
8104 .pr_unlock = event_unlock,
8105 .pr_getlock = event_getlock,
8106 }
8107 };
8108
8109 __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
8110 __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
8111
8112 SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
8113 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family");
8114
8115 struct kevtstat kevtstat;
8116 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
8117 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8118 kevt_getstat, "S,kevtstat", "");
8119
8120 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
8121 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8122 kevt_pcblist, "S,xkevtpcb", "");
8123
8124 static lck_mtx_t *
8125 event_getlock(struct socket *so, int flags)
8126 {
8127 #pragma unused(flags)
8128 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8129
8130 if (so->so_pcb != NULL) {
8131 if (so->so_usecount < 0) {
8132 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
8133 so, so->so_usecount, solockhistory_nr(so));
8134 }
8135 /* NOTREACHED */
8136 } else {
8137 panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
8138 so, solockhistory_nr(so));
8139 /* NOTREACHED */
8140 }
8141 return &ev_pcb->evp_mtx;
8142 }
8143
8144 static int
8145 event_lock(struct socket *so, int refcount, void *lr)
8146 {
8147 void *lr_saved;
8148
8149 if (lr == NULL) {
8150 lr_saved = __builtin_return_address(0);
8151 } else {
8152 lr_saved = lr;
8153 }
8154
8155 if (so->so_pcb != NULL) {
8156 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8157 } else {
8158 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
8159 so, lr_saved, solockhistory_nr(so));
8160 /* NOTREACHED */
8161 }
8162
8163 if (so->so_usecount < 0) {
8164 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
8165 so, so->so_pcb, lr_saved, so->so_usecount,
8166 solockhistory_nr(so));
8167 /* NOTREACHED */
8168 }
8169
8170 if (refcount) {
8171 so->so_usecount++;
8172 }
8173
8174 so->lock_lr[so->next_lock_lr] = lr_saved;
8175 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
8176 return 0;
8177 }
8178
8179 static int
8180 event_unlock(struct socket *so, int refcount, void *lr)
8181 {
8182 void *lr_saved;
8183 lck_mtx_t *mutex_held;
8184
8185 if (lr == NULL) {
8186 lr_saved = __builtin_return_address(0);
8187 } else {
8188 lr_saved = lr;
8189 }
8190
8191 if (refcount) {
8192 so->so_usecount--;
8193 }
8194 if (so->so_usecount < 0) {
8195 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
8196 so, so->so_usecount, solockhistory_nr(so));
8197 /* NOTREACHED */
8198 }
8199 if (so->so_pcb == NULL) {
8200 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
8201 so, so->so_usecount, (void *)lr_saved,
8202 solockhistory_nr(so));
8203 /* NOTREACHED */
8204 }
8205 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8206
8207 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
8208 so->unlock_lr[so->next_unlock_lr] = lr_saved;
8209 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
8210
8211 if (so->so_usecount == 0) {
8212 VERIFY(so->so_flags & SOF_PCBCLEARING);
8213 event_sofreelastref(so);
8214 } else {
8215 lck_mtx_unlock(mutex_held);
8216 }
8217
8218 return 0;
8219 }
8220
8221 static int
8222 event_sofreelastref(struct socket *so)
8223 {
8224 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8225
8226 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
8227
8228 so->so_pcb = NULL;
8229
8230 /*
8231 * Disable upcall in the event another thread is in kev_post_msg()
8232 * appending record to the receive socket buffer, since sbwakeup()
8233 * may release the socket lock otherwise.
8234 */
8235 so->so_rcv.sb_flags &= ~SB_UPCALL;
8236 so->so_snd.sb_flags &= ~SB_UPCALL;
8237 so->so_event = sonullevent;
8238 lck_mtx_unlock(&(ev_pcb->evp_mtx));
8239
8240 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
8241 lck_rw_lock_exclusive(&kev_rwlock);
8242 LIST_REMOVE(ev_pcb, evp_link);
8243 kevtstat.kes_pcbcount--;
8244 kevtstat.kes_gencnt++;
8245 lck_rw_done(&kev_rwlock);
8246 kev_delete(ev_pcb);
8247
8248 sofreelastref(so, 1);
8249 return 0;
8250 }
8251
8252 static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw));
8253
8254 static
8255 struct kern_event_head kern_event_head;
8256
8257 static u_int32_t static_event_id = 0;
8258
8259 static ZONE_DECLARE(ev_pcb_zone, "kerneventpcb",
8260 sizeof(struct kern_event_pcb), ZC_ZFREE_CLEARMEM);
8261
8262 /*
8263 * Install the protosw's for the NKE manager. Invoked at extension load time
8264 */
8265 void
8266 kern_event_init(struct domain *dp)
8267 {
8268 struct protosw *pr;
8269 int i;
8270
8271 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
8272 VERIFY(dp == systemdomain);
8273
8274 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) {
8275 net_add_proto(pr, dp, 1);
8276 }
8277 }
8278
8279 static int
8280 kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
8281 {
8282 int error = 0;
8283 struct kern_event_pcb *ev_pcb;
8284
8285 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
8286 if (error != 0) {
8287 return error;
8288 }
8289
8290 ev_pcb = zalloc_flags(ev_pcb_zone, Z_WAITOK | Z_ZERO);
8291 lck_mtx_init(&ev_pcb->evp_mtx, &kev_lck_grp, LCK_ATTR_NULL);
8292
8293 ev_pcb->evp_socket = so;
8294 ev_pcb->evp_vendor_code_filter = 0xffffffff;
8295
8296 so->so_pcb = (caddr_t) ev_pcb;
8297 lck_rw_lock_exclusive(&kev_rwlock);
8298 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
8299 kevtstat.kes_pcbcount++;
8300 kevtstat.kes_gencnt++;
8301 lck_rw_done(&kev_rwlock);
8302
8303 return error;
8304 }
8305
8306 static void
8307 kev_delete(struct kern_event_pcb *ev_pcb)
8308 {
8309 VERIFY(ev_pcb != NULL);
8310 lck_mtx_destroy(&ev_pcb->evp_mtx, &kev_lck_grp);
8311 zfree(ev_pcb_zone, ev_pcb);
8312 }
8313
8314 static int
8315 kev_detach(struct socket *so)
8316 {
8317 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8318
8319 if (ev_pcb != NULL) {
8320 soisdisconnected(so);
8321 so->so_flags |= SOF_PCBCLEARING;
8322 }
8323
8324 return 0;
8325 }
8326
8327 /*
8328 * For now, kev_vendor_code and mbuf_tags use the same
8329 * mechanism.
8330 */
8331 errno_t
8332 kev_vendor_code_find(
8333 const char *string,
8334 u_int32_t *out_vendor_code)
8335 {
8336 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
8337 return EINVAL;
8338 }
8339 return net_str_id_find_internal(string, out_vendor_code,
8340 NSI_VENDOR_CODE, 1);
8341 }
8342
8343 errno_t
8344 kev_msg_post(struct kev_msg *event_msg)
8345 {
8346 mbuf_tag_id_t min_vendor, max_vendor;
8347
8348 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
8349
8350 if (event_msg == NULL) {
8351 return EINVAL;
8352 }
8353
8354 /*
8355 * Limit third parties to posting events for registered vendor codes
8356 * only
8357 */
8358 if (event_msg->vendor_code < min_vendor ||
8359 event_msg->vendor_code > max_vendor) {
8360 os_atomic_inc(&kevtstat.kes_badvendor, relaxed);
8361 return EINVAL;
8362 }
8363 return kev_post_msg(event_msg);
8364 }
8365
8366 int
8367 kev_post_msg(struct kev_msg *event_msg)
8368 {
8369 struct mbuf *m, *m2;
8370 struct kern_event_pcb *ev_pcb;
8371 struct kern_event_msg *ev;
8372 char *tmp;
8373 u_int32_t total_size;
8374 int i;
8375
8376 /* Verify the message is small enough to fit in one mbuf w/o cluster */
8377 total_size = KEV_MSG_HEADER_SIZE;
8378
8379 for (i = 0; i < 5; i++) {
8380 if (event_msg->dv[i].data_length == 0) {
8381 break;
8382 }
8383 total_size += event_msg->dv[i].data_length;
8384 }
8385
8386 if (total_size > MLEN) {
8387 os_atomic_inc(&kevtstat.kes_toobig, relaxed);
8388 return EMSGSIZE;
8389 }
8390
8391 m = m_get(M_WAIT, MT_DATA);
8392 if (m == 0) {
8393 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8394 return ENOMEM;
8395 }
8396 ev = mtod(m, struct kern_event_msg *);
8397 total_size = KEV_MSG_HEADER_SIZE;
8398
8399 tmp = (char *) &ev->event_data[0];
8400 for (i = 0; i < 5; i++) {
8401 if (event_msg->dv[i].data_length == 0) {
8402 break;
8403 }
8404
8405 total_size += event_msg->dv[i].data_length;
8406 bcopy(event_msg->dv[i].data_ptr, tmp,
8407 event_msg->dv[i].data_length);
8408 tmp += event_msg->dv[i].data_length;
8409 }
8410
8411 ev->id = ++static_event_id;
8412 ev->total_size = total_size;
8413 ev->vendor_code = event_msg->vendor_code;
8414 ev->kev_class = event_msg->kev_class;
8415 ev->kev_subclass = event_msg->kev_subclass;
8416 ev->event_code = event_msg->event_code;
8417
8418 m->m_len = total_size;
8419 lck_rw_lock_shared(&kev_rwlock);
8420 for (ev_pcb = LIST_FIRST(&kern_event_head);
8421 ev_pcb;
8422 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8423 lck_mtx_lock(&ev_pcb->evp_mtx);
8424 if (ev_pcb->evp_socket->so_pcb == NULL) {
8425 lck_mtx_unlock(&ev_pcb->evp_mtx);
8426 continue;
8427 }
8428 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
8429 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
8430 lck_mtx_unlock(&ev_pcb->evp_mtx);
8431 continue;
8432 }
8433
8434 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
8435 if (ev_pcb->evp_class_filter != ev->kev_class) {
8436 lck_mtx_unlock(&ev_pcb->evp_mtx);
8437 continue;
8438 }
8439
8440 if ((ev_pcb->evp_subclass_filter !=
8441 KEV_ANY_SUBCLASS) &&
8442 (ev_pcb->evp_subclass_filter !=
8443 ev->kev_subclass)) {
8444 lck_mtx_unlock(&ev_pcb->evp_mtx);
8445 continue;
8446 }
8447 }
8448 }
8449
8450 m2 = m_copym(m, 0, m->m_len, M_WAIT);
8451 if (m2 == 0) {
8452 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8453 m_free(m);
8454 lck_mtx_unlock(&ev_pcb->evp_mtx);
8455 lck_rw_done(&kev_rwlock);
8456 return ENOMEM;
8457 }
8458 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
8459 /*
8460 * We use "m" for the socket stats as it would be
8461 * unsafe to use "m2"
8462 */
8463 so_inc_recv_data_stat(ev_pcb->evp_socket,
8464 1, m->m_len, MBUF_TC_BE);
8465
8466 sorwakeup(ev_pcb->evp_socket);
8467 os_atomic_inc(&kevtstat.kes_posted, relaxed);
8468 } else {
8469 os_atomic_inc(&kevtstat.kes_fullsock, relaxed);
8470 }
8471 lck_mtx_unlock(&ev_pcb->evp_mtx);
8472 }
8473 m_free(m);
8474 lck_rw_done(&kev_rwlock);
8475
8476 return 0;
8477 }
8478
8479 static int
8480 kev_control(struct socket *so,
8481 u_long cmd,
8482 caddr_t data,
8483 __unused struct ifnet *ifp,
8484 __unused struct proc *p)
8485 {
8486 struct kev_request *kev_req = (struct kev_request *) data;
8487 struct kern_event_pcb *ev_pcb;
8488 struct kev_vendor_code *kev_vendor;
8489 u_int32_t *id_value = (u_int32_t *) data;
8490
8491 switch (cmd) {
8492 case SIOCGKEVID:
8493 *id_value = static_event_id;
8494 break;
8495 case SIOCSKEVFILT:
8496 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8497 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
8498 ev_pcb->evp_class_filter = kev_req->kev_class;
8499 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
8500 break;
8501 case SIOCGKEVFILT:
8502 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8503 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
8504 kev_req->kev_class = ev_pcb->evp_class_filter;
8505 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
8506 break;
8507 case SIOCGKEVVENDOR:
8508 kev_vendor = (struct kev_vendor_code *)data;
8509 /* Make sure string is NULL terminated */
8510 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0;
8511 return net_str_id_find_internal(kev_vendor->vendor_string,
8512 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0);
8513 default:
8514 return ENOTSUP;
8515 }
8516
8517 return 0;
8518 }
8519
8520 int
8521 kevt_getstat SYSCTL_HANDLER_ARGS
8522 {
8523 #pragma unused(oidp, arg1, arg2)
8524 int error = 0;
8525
8526 lck_rw_lock_shared(&kev_rwlock);
8527
8528 if (req->newptr != USER_ADDR_NULL) {
8529 error = EPERM;
8530 goto done;
8531 }
8532 if (req->oldptr == USER_ADDR_NULL) {
8533 req->oldidx = sizeof(struct kevtstat);
8534 goto done;
8535 }
8536
8537 error = SYSCTL_OUT(req, &kevtstat,
8538 MIN(sizeof(struct kevtstat), req->oldlen));
8539 done:
8540 lck_rw_done(&kev_rwlock);
8541
8542 return error;
8543 }
8544
8545 __private_extern__ int
8546 kevt_pcblist SYSCTL_HANDLER_ARGS
8547 {
8548 #pragma unused(oidp, arg1, arg2)
8549 int error = 0;
8550 uint64_t n, i;
8551 struct xsystmgen xsg;
8552 void *buf = NULL;
8553 size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) +
8554 ROUNDUP64(sizeof(struct xsocket_n)) +
8555 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
8556 ROUNDUP64(sizeof(struct xsockstat_n));
8557 struct kern_event_pcb *ev_pcb;
8558
8559 buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
8560 if (buf == NULL) {
8561 return ENOMEM;
8562 }
8563
8564 lck_rw_lock_shared(&kev_rwlock);
8565
8566 n = kevtstat.kes_pcbcount;
8567
8568 if (req->oldptr == USER_ADDR_NULL) {
8569 req->oldidx = (size_t) ((n + n / 8) * item_size);
8570 goto done;
8571 }
8572 if (req->newptr != USER_ADDR_NULL) {
8573 error = EPERM;
8574 goto done;
8575 }
8576 bzero(&xsg, sizeof(xsg));
8577 xsg.xg_len = sizeof(xsg);
8578 xsg.xg_count = n;
8579 xsg.xg_gen = kevtstat.kes_gencnt;
8580 xsg.xg_sogen = so_gencnt;
8581 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8582 if (error) {
8583 goto done;
8584 }
8585 /*
8586 * We are done if there is no pcb
8587 */
8588 if (n == 0) {
8589 goto done;
8590 }
8591
8592 i = 0;
8593 for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
8594 i < n && ev_pcb != NULL;
8595 i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8596 struct xkevtpcb *xk = (struct xkevtpcb *)buf;
8597 struct xsocket_n *xso = (struct xsocket_n *)
8598 ADVANCE64(xk, sizeof(*xk));
8599 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
8600 ADVANCE64(xso, sizeof(*xso));
8601 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
8602 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
8603 struct xsockstat_n *xsostats = (struct xsockstat_n *)
8604 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
8605
8606 bzero(buf, item_size);
8607
8608 lck_mtx_lock(&ev_pcb->evp_mtx);
8609
8610 xk->kep_len = sizeof(struct xkevtpcb);
8611 xk->kep_kind = XSO_EVT;
8612 xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
8613 xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
8614 xk->kep_class_filter = ev_pcb->evp_class_filter;
8615 xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
8616
8617 sotoxsocket_n(ev_pcb->evp_socket, xso);
8618 sbtoxsockbuf_n(ev_pcb->evp_socket ?
8619 &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
8620 sbtoxsockbuf_n(ev_pcb->evp_socket ?
8621 &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
8622 sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
8623
8624 lck_mtx_unlock(&ev_pcb->evp_mtx);
8625
8626 error = SYSCTL_OUT(req, buf, item_size);
8627 }
8628
8629 if (error == 0) {
8630 /*
8631 * Give the user an updated idea of our state.
8632 * If the generation differs from what we told
8633 * her before, she knows that something happened
8634 * while we were processing this request, and it
8635 * might be necessary to retry.
8636 */
8637 bzero(&xsg, sizeof(xsg));
8638 xsg.xg_len = sizeof(xsg);
8639 xsg.xg_count = n;
8640 xsg.xg_gen = kevtstat.kes_gencnt;
8641 xsg.xg_sogen = so_gencnt;
8642 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8643 if (error) {
8644 goto done;
8645 }
8646 }
8647
8648 done:
8649 lck_rw_done(&kev_rwlock);
8650
8651 kheap_free(KHEAP_TEMP, buf, item_size);
8652 return error;
8653 }
8654
8655 #endif /* SOCKETS */
8656
8657
8658 int
8659 fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
8660 {
8661 struct vinfo_stat * st;
8662
8663 st = &kinfo->kq_stat;
8664
8665 st->vst_size = kq->kq_count;
8666 if (kq->kq_state & KQ_KEV_QOS) {
8667 st->vst_blksize = sizeof(struct kevent_qos_s);
8668 } else if (kq->kq_state & KQ_KEV64) {
8669 st->vst_blksize = sizeof(struct kevent64_s);
8670 } else {
8671 st->vst_blksize = sizeof(struct kevent);
8672 }
8673 st->vst_mode = S_IFIFO;
8674 st->vst_ino = (kq->kq_state & KQ_DYNAMIC) ?
8675 ((struct kqworkloop *)kq)->kqwl_dynamicid : 0;
8676
8677 /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
8678 #define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
8679 kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK;
8680
8681 return 0;
8682 }
8683
8684 static int
8685 fill_kqueue_dyninfo(struct kqworkloop *kqwl, struct kqueue_dyninfo *kqdi)
8686 {
8687 workq_threadreq_t kqr = &kqwl->kqwl_request;
8688 workq_threadreq_param_t trp = {};
8689 int err;
8690
8691 if ((kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
8692 return EINVAL;
8693 }
8694
8695 if ((err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi->kqdi_info))) {
8696 return err;
8697 }
8698
8699 kqlock(kqwl);
8700
8701 kqdi->kqdi_servicer = thread_tid(kqr_thread(kqr));
8702 kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
8703 kqdi->kqdi_request_state = kqr->tr_state;
8704 kqdi->kqdi_async_qos = kqr->tr_kq_qos_index;
8705 kqdi->kqdi_events_qos = kqr->tr_kq_override_index;
8706 kqdi->kqdi_sync_waiters = 0;
8707 kqdi->kqdi_sync_waiter_qos = 0;
8708
8709 trp.trp_value = kqwl->kqwl_params;
8710 if (trp.trp_flags & TRP_PRIORITY) {
8711 kqdi->kqdi_pri = trp.trp_pri;
8712 } else {
8713 kqdi->kqdi_pri = 0;
8714 }
8715
8716 if (trp.trp_flags & TRP_POLICY) {
8717 kqdi->kqdi_pol = trp.trp_pol;
8718 } else {
8719 kqdi->kqdi_pol = 0;
8720 }
8721
8722 if (trp.trp_flags & TRP_CPUPERCENT) {
8723 kqdi->kqdi_cpupercent = trp.trp_cpupercent;
8724 } else {
8725 kqdi->kqdi_cpupercent = 0;
8726 }
8727
8728 kqunlock(kqwl);
8729
8730 return 0;
8731 }
8732
8733
8734 void
8735 knote_markstayactive(struct knote *kn)
8736 {
8737 struct kqueue *kq = knote_get_kq(kn);
8738 kq_index_t qos;
8739
8740 kqlock(kq);
8741 kn->kn_status |= KN_STAYACTIVE;
8742
8743 /*
8744 * Making a knote stay active is a property of the knote that must be
8745 * established before it is fully attached.
8746 */
8747 assert((kn->kn_status & (KN_QUEUED | KN_SUPPRESSED)) == 0);
8748
8749 /* handle all stayactive knotes on the (appropriate) manager */
8750 if (kq->kq_state & KQ_WORKLOOP) {
8751 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
8752
8753 qos = _pthread_priority_thread_qos(kn->kn_qos);
8754 assert(qos && qos < THREAD_QOS_LAST);
8755 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS, qos);
8756 qos = KQWL_BUCKET_STAYACTIVE;
8757 } else if (kq->kq_state & KQ_WORKQ) {
8758 qos = KQWQ_QOS_MANAGER;
8759 } else {
8760 qos = THREAD_QOS_UNSPECIFIED;
8761 }
8762
8763 kn->kn_qos_override = qos;
8764 kn->kn_qos_index = qos;
8765
8766 knote_activate(kq, kn, FILTER_ACTIVE);
8767 kqunlock(kq);
8768 }
8769
8770 void
8771 knote_clearstayactive(struct knote *kn)
8772 {
8773 struct kqueue *kq = knote_get_kq(kn);
8774 kqlock(kq);
8775 kn->kn_status &= ~(KN_STAYACTIVE | KN_ACTIVE);
8776 knote_dequeue(kq, kn);
8777 kqunlock(kq);
8778 }
8779
8780 static unsigned long
8781 kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
8782 unsigned long buflen, unsigned long nknotes)
8783 {
8784 for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
8785 if (kq == knote_get_kq(kn)) {
8786 if (nknotes < buflen) {
8787 struct kevent_extinfo *info = &buf[nknotes];
8788
8789 kqlock(kq);
8790
8791 info->kqext_kev = *(struct kevent_qos_s *)&kn->kn_kevent;
8792 if (knote_has_qos(kn)) {
8793 info->kqext_kev.qos =
8794 _pthread_priority_thread_qos_fast(kn->kn_qos);
8795 } else {
8796 info->kqext_kev.qos = kn->kn_qos_override;
8797 }
8798 info->kqext_kev.filter |= 0xff00; /* sign extend filter */
8799 info->kqext_kev.xflags = 0; /* this is where sfflags lives */
8800 info->kqext_kev.data = 0; /* this is where sdata lives */
8801 info->kqext_sdata = kn->kn_sdata;
8802 info->kqext_status = kn->kn_status;
8803 info->kqext_sfflags = kn->kn_sfflags;
8804
8805 kqunlock(kq);
8806 }
8807
8808 /* we return total number of knotes, which may be more than requested */
8809 nknotes++;
8810 }
8811 }
8812
8813 return nknotes;
8814 }
8815
8816 int
8817 kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
8818 int32_t *nkqueues_out)
8819 {
8820 proc_t p = (proc_t)proc;
8821 struct filedesc *fdp = p->p_fd;
8822 unsigned int nkqueues = 0;
8823 unsigned long ubuflen = ubufsize / sizeof(kqueue_id_t);
8824 size_t buflen, bufsize;
8825 kqueue_id_t *kq_ids = NULL;
8826 int err = 0;
8827
8828 assert(p != NULL);
8829
8830 if (ubuf == USER_ADDR_NULL && ubufsize != 0) {
8831 err = EINVAL;
8832 goto out;
8833 }
8834
8835 buflen = MIN(ubuflen, PROC_PIDDYNKQUEUES_MAX);
8836
8837 if (ubuflen != 0) {
8838 if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) {
8839 err = ERANGE;
8840 goto out;
8841 }
8842 kq_ids = kheap_alloc(KHEAP_TEMP, bufsize, Z_WAITOK | Z_ZERO);
8843 if (!kq_ids) {
8844 err = ENOMEM;
8845 goto out;
8846 }
8847 }
8848
8849 kqhash_lock(fdp);
8850
8851 if (fdp->fd_kqhashmask > 0) {
8852 for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
8853 struct kqworkloop *kqwl;
8854
8855 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
8856 /* report the number of kqueues, even if they don't all fit */
8857 if (nkqueues < buflen) {
8858 kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
8859 }
8860 nkqueues++;
8861 }
8862 }
8863 }
8864
8865 kqhash_unlock(fdp);
8866
8867 if (kq_ids) {
8868 size_t copysize;
8869 if (os_mul_overflow(sizeof(kqueue_id_t), MIN(buflen, nkqueues), &copysize)) {
8870 err = ERANGE;
8871 goto out;
8872 }
8873
8874 assert(ubufsize >= copysize);
8875 err = copyout(kq_ids, ubuf, copysize);
8876 }
8877
8878 out:
8879 if (kq_ids) {
8880 kheap_free(KHEAP_TEMP, kq_ids, bufsize);
8881 }
8882
8883 if (!err) {
8884 *nkqueues_out = (int)min(nkqueues, PROC_PIDDYNKQUEUES_MAX);
8885 }
8886 return err;
8887 }
8888
8889 int
8890 kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8891 uint32_t ubufsize, int32_t *size_out)
8892 {
8893 proc_t p = (proc_t)proc;
8894 struct kqworkloop *kqwl;
8895 int err = 0;
8896 struct kqueue_dyninfo kqdi = { };
8897
8898 assert(p != NULL);
8899
8900 if (ubufsize < sizeof(struct kqueue_info)) {
8901 return ENOBUFS;
8902 }
8903
8904 kqwl = kqworkloop_hash_lookup_and_retain(p->p_fd, kq_id);
8905 if (!kqwl) {
8906 return ESRCH;
8907 }
8908
8909 /*
8910 * backward compatibility: allow the argument to this call to only be
8911 * a struct kqueue_info
8912 */
8913 if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
8914 ubufsize = sizeof(struct kqueue_dyninfo);
8915 err = fill_kqueue_dyninfo(kqwl, &kqdi);
8916 } else {
8917 ubufsize = sizeof(struct kqueue_info);
8918 err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi.kqdi_info);
8919 }
8920 if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
8921 *size_out = ubufsize;
8922 }
8923 kqworkloop_release(kqwl);
8924 return err;
8925 }
8926
8927 int
8928 kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8929 uint32_t ubufsize, int32_t *nknotes_out)
8930 {
8931 proc_t p = (proc_t)proc;
8932 struct kqworkloop *kqwl;
8933 int err;
8934
8935 kqwl = kqworkloop_hash_lookup_and_retain(p->p_fd, kq_id);
8936 if (!kqwl) {
8937 return ESRCH;
8938 }
8939
8940 err = pid_kqueue_extinfo(p, &kqwl->kqwl_kqueue, ubuf, ubufsize, nknotes_out);
8941 kqworkloop_release(kqwl);
8942 return err;
8943 }
8944
8945 int
8946 pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
8947 uint32_t bufsize, int32_t *retval)
8948 {
8949 struct knote *kn;
8950 int i;
8951 int err = 0;
8952 struct filedesc *fdp = p->p_fd;
8953 unsigned long nknotes = 0;
8954 unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
8955 struct kevent_extinfo *kqext = NULL;
8956
8957 /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
8958 buflen = MIN(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
8959
8960 kqext = kheap_alloc(KHEAP_TEMP,
8961 buflen * sizeof(struct kevent_extinfo), Z_WAITOK | Z_ZERO);
8962 if (kqext == NULL) {
8963 err = ENOMEM;
8964 goto out;
8965 }
8966
8967 proc_fdlock(p);
8968 for (i = 0; i < fdp->fd_knlistsize; i++) {
8969 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
8970 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8971 }
8972 proc_fdunlock(p);
8973
8974 if (fdp->fd_knhashmask != 0) {
8975 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
8976 knhash_lock(fdp);
8977 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
8978 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8979 knhash_unlock(fdp);
8980 }
8981 }
8982
8983 assert(bufsize >= sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8984 err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8985
8986 out:
8987 kheap_free(KHEAP_TEMP, kqext, buflen * sizeof(struct kevent_extinfo));
8988
8989 if (!err) {
8990 *retval = (int32_t)MIN(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
8991 }
8992 return err;
8993 }
8994
8995 static unsigned int
8996 klist_copy_udata(struct klist *list, uint64_t *buf,
8997 unsigned int buflen, unsigned int nknotes)
8998 {
8999 struct knote *kn;
9000 SLIST_FOREACH(kn, list, kn_link) {
9001 if (nknotes < buflen) {
9002 /*
9003 * kevent_register will always set kn_udata atomically
9004 * so that we don't have to take any kqlock here.
9005 */
9006 buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed);
9007 }
9008 /* we return total number of knotes, which may be more than requested */
9009 nknotes++;
9010 }
9011
9012 return nknotes;
9013 }
9014
9015 int
9016 kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize)
9017 {
9018 proc_t p = (proc_t)proc;
9019 struct filedesc *fdp = p->p_fd;
9020 unsigned int nuptrs = 0;
9021 unsigned int buflen = bufsize / sizeof(uint64_t);
9022 struct kqworkloop *kqwl;
9023
9024 if (buflen > 0) {
9025 assert(buf != NULL);
9026 }
9027
9028 proc_fdlock(p);
9029 for (int i = 0; i < fdp->fd_knlistsize; i++) {
9030 nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
9031 }
9032 proc_fdunlock(p);
9033
9034 knhash_lock(fdp);
9035 if (fdp->fd_knhashmask != 0) {
9036 for (size_t i = 0; i < fdp->fd_knhashmask + 1; i++) {
9037 nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
9038 }
9039 }
9040 knhash_unlock(fdp);
9041
9042 kqhash_lock(fdp);
9043 if (fdp->fd_kqhashmask != 0) {
9044 for (size_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
9045 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
9046 if (nuptrs < buflen) {
9047 buf[nuptrs] = kqwl->kqwl_dynamicid;
9048 }
9049 nuptrs++;
9050 }
9051 }
9052 }
9053 kqhash_unlock(fdp);
9054
9055 return (int)nuptrs;
9056 }
9057
9058 static void
9059 kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
9060 {
9061 uint64_t ast_addr;
9062 bool proc_is_64bit = !!(p->p_flag & P_LP64);
9063 size_t user_addr_size = proc_is_64bit ? 8 : 4;
9064 uint32_t ast_flags32 = 0;
9065 uint64_t ast_flags64 = 0;
9066 struct uthread *ut = get_bsdthread_info(thread);
9067
9068 if (ut->uu_kqr_bound != NULL) {
9069 ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
9070 }
9071
9072 if (ast_flags64 == 0) {
9073 return;
9074 }
9075
9076 if (!(p->p_flag & P_LP64)) {
9077 ast_flags32 = (uint32_t)ast_flags64;
9078 assert(ast_flags64 < 0x100000000ull);
9079 }
9080
9081 ast_addr = thread_rettokern_addr(thread);
9082 if (ast_addr == 0) {
9083 return;
9084 }
9085
9086 if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
9087 (user_addr_t)ast_addr,
9088 user_addr_size) != 0) {
9089 printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
9090 "ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr);
9091 }
9092 }
9093
9094 void
9095 kevent_ast(thread_t thread, uint16_t bits)
9096 {
9097 proc_t p = current_proc();
9098
9099 if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
9100 workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
9101 }
9102 if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
9103 kevent_set_return_to_kernel_user_tsd(p, thread);
9104 }
9105 }
9106
9107 #if DEVELOPMENT || DEBUG
9108
9109 #define KEVENT_SYSCTL_BOUND_ID 1
9110
9111 static int
9112 kevent_sysctl SYSCTL_HANDLER_ARGS
9113 {
9114 #pragma unused(oidp, arg2)
9115 uintptr_t type = (uintptr_t)arg1;
9116 uint64_t bound_id = 0;
9117
9118 if (type != KEVENT_SYSCTL_BOUND_ID) {
9119 return EINVAL;
9120 }
9121
9122 if (req->newptr) {
9123 return EINVAL;
9124 }
9125
9126 struct uthread *ut = get_bsdthread_info(current_thread());
9127 if (!ut) {
9128 return EFAULT;
9129 }
9130
9131 workq_threadreq_t kqr = ut->uu_kqr_bound;
9132 if (kqr) {
9133 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
9134 bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
9135 } else {
9136 bound_id = -1;
9137 }
9138 }
9139
9140 return sysctl_io_number(req, bound_id, sizeof(bound_id), NULL, NULL);
9141 }
9142
9143 SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
9144 "kevent information");
9145
9146 SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
9147 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
9148 (void *)KEVENT_SYSCTL_BOUND_ID,
9149 sizeof(kqueue_id_t), kevent_sysctl, "Q",
9150 "get the ID of the bound kqueue");
9151
9152 #endif /* DEVELOPMENT || DEBUG */