]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_event.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_event.c
CommitLineData
1c79356b 1/*
f427ee49 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 *
28 */
55e303ae
A
29/*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
1c79356b
A
54/*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
91447636 57#include <stdint.h>
d9a64523 58#include <machine/atomic.h>
1c79356b 59
55e303ae
A
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/filedesc.h>
63#include <sys/kernel.h>
91447636
A
64#include <sys/proc_internal.h>
65#include <sys/kauth.h>
39236c6e 66#include <sys/malloc.h>
55e303ae 67#include <sys/unistd.h>
91447636 68#include <sys/file_internal.h>
55e303ae
A
69#include <sys/fcntl.h>
70#include <sys/select.h>
71#include <sys/queue.h>
72#include <sys/event.h>
73#include <sys/eventvar.h>
74#include <sys/protosw.h>
75#include <sys/socket.h>
76#include <sys/socketvar.h>
77#include <sys/stat.h>
0a7de745 78#include <sys/syscall.h> // SYS_* constants
55e303ae
A
79#include <sys/sysctl.h>
80#include <sys/uio.h>
91447636
A
81#include <sys/sysproto.h>
82#include <sys/user.h>
b0d623f7 83#include <sys/vnode_internal.h>
91447636 84#include <string.h>
0c530ab8 85#include <sys/proc_info.h>
39236c6e 86#include <sys/codesign.h>
3e170ce0 87#include <sys/pthread_shims.h>
5ba3f43e 88#include <sys/kdebug.h>
cb323159 89#include <os/base.h>
d9a64523 90#include <pexpert/pexpert.h>
91447636 91
fe8ab488 92#include <kern/locks.h>
91447636 93#include <kern/clock.h>
5ba3f43e 94#include <kern/cpu_data.h>
39037602 95#include <kern/policy_internal.h>
91447636
A
96#include <kern/thread_call.h>
97#include <kern/sched_prim.h>
3e170ce0 98#include <kern/waitq.h>
55e303ae 99#include <kern/zalloc.h>
3e170ce0 100#include <kern/kalloc.h>
91447636 101#include <kern/assert.h>
5ba3f43e
A
102#include <kern/ast.h>
103#include <kern/thread.h>
104#include <kern/kcdata.h>
91447636 105
d9a64523
A
106#include <pthread/priority_private.h>
107#include <pthread/workqueue_syscalls.h>
108#include <pthread/workqueue_internal.h>
91447636 109#include <libkern/libkern.h>
5ba3f43e 110
b0d623f7 111#include "net/net_str_id.h"
55e303ae 112
6d2010ae 113#include <mach/task.h>
5ba3f43e 114#include <libkern/section_keywords.h>
316670eb 115
39236c6e
A
116#if CONFIG_MEMORYSTATUS
117#include <sys/kern_memorystatus.h>
118#endif
119
f427ee49
A
120#if DEVELOPMENT || DEBUG
121#define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
122#define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
123TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug", 0);
124#endif
125
126static LCK_GRP_DECLARE(kq_lck_grp, "kqueue");
127SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params =
128 VM_PACKING_PARAMS(KNOTE_KQ_PACKED);
129
5ba3f43e 130extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */
f427ee49 131extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */
5ba3f43e
A
132
133#define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
134
c3c9b80d
A
135/*
136 * If you need accounting for KM_KQUEUE consider using
137 * KALLOC_HEAP_DEFINE to define a zone view.
138 */
139#define KM_KQUEUE KHEAP_DEFAULT
55e303ae 140
0a7de745 141#define KQ_EVENT NO_EVENT64
b0d623f7 142
3e170ce0 143static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
0a7de745 144 vfs_context_t ctx);
39236c6e
A
145static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
146static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
cb323159 147 struct kevent_qos_s *kev);
39236c6e 148static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
39236c6e
A
149
150static const struct fileops kqueueops = {
cb323159
A
151 .fo_type = DTYPE_KQUEUE,
152 .fo_read = fo_no_read,
153 .fo_write = fo_no_write,
154 .fo_ioctl = fo_no_ioctl,
155 .fo_select = kqueue_select,
156 .fo_close = kqueue_close,
157 .fo_drain = kqueue_drain,
39236c6e 158 .fo_kqfilter = kqueue_kqfilter,
55e303ae
A
159};
160
cb323159
A
161static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *);
162static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result);
d9a64523 163static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
cb323159 164 thread_continue_t cont, struct _kevent_register *cont_args) __dead2;
d9a64523
A
165static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
166static void kevent_register_wait_cleanup(struct knote *kn);
39037602 167
d9a64523 168static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
cb323159
A
169static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags);
170
171static void kqworkq_unbind(proc_t p, workq_threadreq_t);
172static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread);
173static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
39037602 174
cb323159 175static void kqworkloop_unbind(struct kqworkloop *kwql);
39037602 176
cb323159
A
177enum kqwl_unbind_locked_mode {
178 KQWL_OVERRIDE_DROP_IMMEDIATELY,
179 KQWL_OVERRIDE_DROP_DELAYED,
180};
181static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread,
182 enum kqwl_unbind_locked_mode how);
183static void kqworkloop_unbind_delayed_override_drop(thread_t thread);
184static kq_index_t kqworkloop_override(struct kqworkloop *kqwl);
185static void kqworkloop_set_overcommit(struct kqworkloop *kqwl);
5ba3f43e
A
186enum {
187 KQWL_UTQ_NONE,
188 /*
189 * The wakeup qos is the qos of QUEUED knotes.
190 *
191 * This QoS is accounted for with the events override in the
192 * kqr_override_index field. It is raised each time a new knote is queued at
cb323159 193 * a given QoS. The kqwl_wakeup_indexes field is a superset of the non empty
5ba3f43e
A
194 * knote buckets and is recomputed after each event delivery.
195 */
196 KQWL_UTQ_UPDATE_WAKEUP_QOS,
197 KQWL_UTQ_UPDATE_STAYACTIVE_QOS,
198 KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
d9a64523
A
199 KQWL_UTQ_UNBINDING, /* attempt to rebind */
200 KQWL_UTQ_PARKING,
5ba3f43e
A
201 /*
202 * The wakeup override is for suppressed knotes that have fired again at
203 * a higher QoS than the one for which they are suppressed already.
204 * This override is cleared when the knote suppressed list becomes empty.
205 */
206 KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
207 KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
208 /*
d9a64523 209 * The QoS is the maximum QoS of an event enqueued on this workloop in
5ba3f43e
A
210 * userland. It is copied from the only EVFILT_WORKLOOP knote with
211 * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
212 * such knote, this QoS is 0.
213 */
d9a64523 214 KQWL_UTQ_SET_QOS_INDEX,
5ba3f43e
A
215 KQWL_UTQ_REDRIVE_EVENTS,
216};
217static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
d9a64523 218static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
39037602 219
39236c6e
A
220static struct knote *knote_alloc(void);
221static void knote_free(struct knote *kn);
cb323159
A
222static int kq_add_knote(struct kqueue *kq, struct knote *kn,
223 struct knote_lock_ctx *knlc, struct proc *p);
224static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq,
225 struct kevent_qos_s *kev, bool is_fd, struct proc *p);
39236c6e 226
cb323159
A
227static void knote_activate(kqueue_t kqu, struct knote *kn, int result);
228static void knote_dequeue(kqueue_t kqu, struct knote *kn);
39037602 229
cb323159
A
230static void knote_apply_touch(kqueue_t kqu, struct knote *kn,
231 struct kevent_qos_s *kev, int result);
232static void knote_suppress(kqueue_t kqu, struct knote *kn);
233static void knote_unsuppress(kqueue_t kqu, struct knote *kn);
234static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc);
39037602 235
cb323159
A
236// both these functions may dequeue the knote and it is up to the caller
237// to enqueue the knote back
d9a64523 238static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
cb323159 239static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp);
39037602 240
f427ee49
A
241static ZONE_DECLARE(knote_zone, "knote zone",
242 sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM);
243static ZONE_DECLARE(kqfile_zone, "kqueue file zone",
244 sizeof(struct kqfile), ZC_ZFREE_CLEARMEM);
245static ZONE_DECLARE(kqworkq_zone, "kqueue workq zone",
246 sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM);
247static ZONE_DECLARE(kqworkloop_zone, "kqueue workloop zone",
248 sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM);
d9a64523 249
0a7de745 250#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
55e303ae 251
cb323159
A
252static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev);
253static void filt_no_detach(struct knote *kn);
254static int filt_bad_event(struct knote *kn, long hint);
255static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev);
256static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev);
257
5ba3f43e 258SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
cb323159
A
259 .f_attach = filt_no_attach,
260 .f_detach = filt_no_detach,
261 .f_event = filt_bad_event,
262 .f_touch = filt_bad_touch,
263 .f_process = filt_bad_process,
b0d623f7 264};
55e303ae 265
39236c6e 266#if CONFIG_MEMORYSTATUS
5ba3f43e 267extern const struct filterops memorystatus_filtops;
39236c6e 268#endif /* CONFIG_MEMORYSTATUS */
5ba3f43e 269extern const struct filterops fs_filtops;
5ba3f43e 270extern const struct filterops sig_filtops;
5ba3f43e 271extern const struct filterops machport_filtops;
cb323159 272extern const struct filterops pipe_nfiltops;
5ba3f43e
A
273extern const struct filterops pipe_rfiltops;
274extern const struct filterops pipe_wfiltops;
275extern const struct filterops ptsd_kqops;
5c9f4661 276extern const struct filterops ptmx_kqops;
5ba3f43e
A
277extern const struct filterops soread_filtops;
278extern const struct filterops sowrite_filtops;
279extern const struct filterops sock_filtops;
280extern const struct filterops soexcept_filtops;
281extern const struct filterops spec_filtops;
282extern const struct filterops bpfread_filtops;
283extern const struct filterops necp_fd_rfiltops;
284extern const struct filterops fsevent_filtops;
285extern const struct filterops vnode_filtops;
286extern const struct filterops tty_filtops;
287
d9a64523
A
288const static struct filterops file_filtops;
289const static struct filterops kqread_filtops;
290const static struct filterops proc_filtops;
5ba3f43e 291const static struct filterops timer_filtops;
d9a64523
A
292const static struct filterops user_filtops;
293const static struct filterops workloop_filtops;
39037602 294
55e303ae 295/*
39037602
A
296 *
297 * Rules for adding new filters to the system:
298 * Public filters:
299 * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
300 * in the exported section of the header
301 * - Update the EVFILT_SYSCOUNT value to reflect the new addition
d9a64523 302 * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
39037602
A
303 * of the Public Filters section in the array.
304 * Private filters:
305 * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
306 * in the XNU_KERNEL_PRIVATE section of the header
307 * - Update the EVFILTID_MAX value to reflect the new addition
d9a64523
A
308 * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
309 * the Private filters section of the array.
55e303ae 310 */
cb323159
A
311static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true");
312static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = {
39037602 313 /* Public Filters */
d9a64523
A
314 [~EVFILT_READ] = &file_filtops,
315 [~EVFILT_WRITE] = &file_filtops,
316 [~EVFILT_AIO] = &bad_filtops,
317 [~EVFILT_VNODE] = &file_filtops,
318 [~EVFILT_PROC] = &proc_filtops,
319 [~EVFILT_SIGNAL] = &sig_filtops,
320 [~EVFILT_TIMER] = &timer_filtops,
321 [~EVFILT_MACHPORT] = &machport_filtops,
322 [~EVFILT_FS] = &fs_filtops,
323 [~EVFILT_USER] = &user_filtops,
cb323159 324 [~EVFILT_UNUSED_11] = &bad_filtops,
d9a64523
A
325 [~EVFILT_VM] = &bad_filtops,
326 [~EVFILT_SOCK] = &file_filtops,
39236c6e 327#if CONFIG_MEMORYSTATUS
d9a64523 328 [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
39236c6e 329#else
d9a64523 330 [~EVFILT_MEMORYSTATUS] = &bad_filtops,
39236c6e 331#endif
d9a64523 332 [~EVFILT_EXCEPT] = &file_filtops,
5ba3f43e
A
333 [~EVFILT_WORKLOOP] = &workloop_filtops,
334
39037602 335 /* Private filters */
d9a64523 336 [EVFILTID_KQREAD] = &kqread_filtops,
cb323159 337 [EVFILTID_PIPE_N] = &pipe_nfiltops,
d9a64523
A
338 [EVFILTID_PIPE_R] = &pipe_rfiltops,
339 [EVFILTID_PIPE_W] = &pipe_wfiltops,
340 [EVFILTID_PTSD] = &ptsd_kqops,
341 [EVFILTID_SOREAD] = &soread_filtops,
342 [EVFILTID_SOWRITE] = &sowrite_filtops,
343 [EVFILTID_SCK] = &sock_filtops,
344 [EVFILTID_SOEXCEPT] = &soexcept_filtops,
345 [EVFILTID_SPEC] = &spec_filtops,
346 [EVFILTID_BPFREAD] = &bpfread_filtops,
347 [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
348 [EVFILTID_FSEVENT] = &fsevent_filtops,
349 [EVFILTID_VN] = &vnode_filtops,
350 [EVFILTID_TTY] = &tty_filtops,
351 [EVFILTID_PTMX] = &ptmx_kqops,
cb323159
A
352
353 /* fake filter for detached knotes, keep last */
354 [EVFILTID_DETACHED] = &bad_filtops,
55e303ae
A
355};
356
39037602 357/* waitq prepost callback */
cb323159
A
358void waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *kq_hook);
359
360static inline bool
361kqr_thread_bound(workq_threadreq_t kqr)
362{
363 return kqr->tr_state == WORKQ_TR_STATE_BOUND;
364}
365
366static inline bool
367kqr_thread_requested_pending(workq_threadreq_t kqr)
368{
369 workq_tr_state_t tr_state = kqr->tr_state;
370 return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND;
371}
372
373static inline bool
374kqr_thread_requested(workq_threadreq_t kqr)
375{
376 return kqr->tr_state != WORKQ_TR_STATE_IDLE;
377}
378
379static inline thread_t
380kqr_thread_fast(workq_threadreq_t kqr)
381{
382 assert(kqr_thread_bound(kqr));
383 return kqr->tr_thread;
384}
385
386static inline thread_t
387kqr_thread(workq_threadreq_t kqr)
388{
389 return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL;
390}
39037602 391
d9a64523 392static inline struct kqworkloop *
cb323159 393kqr_kqworkloop(workq_threadreq_t kqr)
39037602 394{
cb323159 395 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
d9a64523 396 return __container_of(kqr, struct kqworkloop, kqwl_request);
5ba3f43e 397 }
d9a64523 398 return NULL;
39037602
A
399}
400
d9a64523 401static inline kqueue_t
cb323159 402kqr_kqueue(proc_t p, workq_threadreq_t kqr)
5ba3f43e 403{
d9a64523 404 kqueue_t kqu;
cb323159 405 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
d9a64523
A
406 kqu.kqwl = kqr_kqworkloop(kqr);
407 } else {
cb323159 408 kqu.kqwq = p->p_fd->fd_wqkqueue;
d9a64523 409 assert(kqr >= kqu.kqwq->kqwq_request &&
0a7de745 410 kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
5ba3f43e 411 }
d9a64523 412 return kqu;
5ba3f43e
A
413}
414
91447636 415/*
39037602
A
416 * kqueue/note lock implementations
417 *
418 * The kqueue lock guards the kq state, the state of its queues,
d9a64523 419 * and the kqueue-aware status and locks of individual knotes.
91447636 420 *
39037602
A
421 * The kqueue workq lock is used to protect state guarding the
422 * interaction of the kqueue with the workq. This state cannot
423 * be guarded by the kq lock - as it needs to be taken when we
424 * already have the waitq set lock held (during the waitq hook
425 * callback). It might be better to use the waitq lock itself
426 * for this, but the IRQ requirements make that difficult).
427 *
428 * Knote flags, filter flags, and associated data are protected
429 * by the underlying object lock - and are only ever looked at
430 * by calling the filter to get a [consistent] snapshot of that
431 * data.
91447636 432 */
d9a64523
A
433
434static inline void
435kqlock(kqueue_t kqu)
436{
437 lck_spin_lock(&kqu.kq->kq_lock);
438}
439
440static inline void
441kqlock_held(__assert_only kqueue_t kqu)
442{
443 LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
444}
445
446static inline void
447kqunlock(kqueue_t kqu)
448{
449 lck_spin_unlock(&kqu.kq->kq_lock);
450}
91447636
A
451
452static inline void
cb323159 453knhash_lock(struct filedesc *fdp)
91447636 454{
cb323159 455 lck_mtx_lock(&fdp->fd_knhashlock);
91447636
A
456}
457
5ba3f43e 458static inline void
cb323159 459knhash_unlock(struct filedesc *fdp)
5ba3f43e 460{
cb323159 461 lck_mtx_unlock(&fdp->fd_knhashlock);
5ba3f43e
A
462}
463
cb323159
A
464/* wait event for knote locks */
465static inline event_t
466knote_lock_wev(struct knote *kn)
91447636 467{
cb323159 468 return (event_t)(&kn->kn_hook);
91447636
A
469}
470
cb323159
A
471/* wait event for kevent_register_wait_* */
472static inline event64_t
473knote_filt_wev64(struct knote *kn)
5ba3f43e 474{
cb323159
A
475 /* kdp_workloop_sync_wait_find_owner knows about this */
476 return CAST_EVENT64_T(kn);
5ba3f43e
A
477}
478
cb323159
A
479/* wait event for knote_post/knote_drop */
480static inline event64_t
481knote_post_wev64(struct knote *kn)
482{
483 return CAST_EVENT64_T(&kn->kn_kevent);
484}
485
486/*!
487 * @function knote_has_qos
488 *
489 * @brief
490 * Whether the knote has a regular QoS.
491 *
492 * @discussion
493 * kn_qos_override is:
494 * - 0 on kqfiles
495 * - THREAD_QOS_LAST for special buckets (stayactive, manager)
496 *
497 * Other values mean the knote participates to QoS propagation.
498 */
499static inline bool
500knote_has_qos(struct knote *kn)
5ba3f43e 501{
cb323159 502 return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST;
5ba3f43e
A
503}
504
d9a64523 505#pragma mark knote locks
39037602 506
39236c6e 507/*
d9a64523 508 * Enum used by the knote_lock_* functions.
91447636 509 *
d9a64523
A
510 * KNOTE_KQ_LOCK_ALWAYS
511 * The function will always return with the kq lock held.
39037602 512 *
cb323159 513 * KNOTE_KQ_LOCK_ON_SUCCESS
d9a64523
A
514 * The function will return with the kq lock held if it was successful
515 * (knote_lock() is the only function that can fail).
516 *
cb323159 517 * KNOTE_KQ_LOCK_ON_FAILURE
d9a64523
A
518 * The function will return with the kq lock held if it was unsuccessful
519 * (knote_lock() is the only function that can fail).
520 *
521 * KNOTE_KQ_UNLOCK:
522 * The function returns with the kq unlocked.
91447636 523 */
cb323159
A
524enum kqlocking {
525 KNOTE_KQ_LOCK_ALWAYS,
526 KNOTE_KQ_LOCK_ON_SUCCESS,
527 KNOTE_KQ_LOCK_ON_FAILURE,
528 KNOTE_KQ_UNLOCK,
529};
91447636 530
d9a64523 531static struct knote_lock_ctx *
cb323159 532knote_lock_ctx_find(kqueue_t kqu, struct knote *kn)
d9a64523
A
533{
534 struct knote_lock_ctx *ctx;
cb323159 535 LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) {
0a7de745
A
536 if (ctx->knlc_knote == kn) {
537 return ctx;
538 }
5ba3f43e 539 }
d9a64523
A
540 panic("knote lock context not found: %p", kn);
541 __builtin_trap();
39236c6e 542}
b0d623f7 543
d9a64523
A
544/* slowpath of knote_lock() */
545__attribute__((noinline))
546static bool __result_use_check
cb323159 547knote_lock_slow(kqueue_t kqu, struct knote *kn,
0a7de745 548 struct knote_lock_ctx *knlc, int kqlocking)
d9a64523 549{
cb323159
A
550 struct knote_lock_ctx *owner_lc;
551 struct uthread *uth = current_uthread();
552 wait_result_t wr;
d9a64523 553
cb323159 554 kqlock_held(kqu);
d9a64523 555
cb323159 556 owner_lc = knote_lock_ctx_find(kqu, kn);
d9a64523
A
557#if DEBUG || DEVELOPMENT
558 knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
559#endif
cb323159 560 owner_lc->knlc_waiters++;
d9a64523 561
cb323159
A
562 /*
563 * Make our lock context visible to knote_unlock()
564 */
565 uth->uu_knlock = knlc;
5ba3f43e 566
cb323159
A
567 wr = lck_spin_sleep_with_inheritor(&kqu.kq->kq_lock, LCK_SLEEP_UNLOCK,
568 knote_lock_wev(kn), owner_lc->knlc_thread,
569 THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
570
571 if (wr == THREAD_RESTART) {
572 /*
573 * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
574 * We need to cleanup the state since no one did.
575 */
576 uth->uu_knlock = NULL;
d9a64523
A
577#if DEBUG || DEVELOPMENT
578 assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
579 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
580#endif
cb323159
A
581
582 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
583 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
584 kqlock(kqu);
585 }
d9a64523 586 return false;
cb323159
A
587 } else {
588 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
589 kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
590 kqlock(kqu);
d9a64523 591#if DEBUG || DEVELOPMENT
cb323159
A
592 /*
593 * This state is set under the lock so we can't
594 * really assert this unless we hold the lock.
595 */
596 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
d9a64523 597#endif
cb323159
A
598 }
599 return true;
d9a64523 600 }
5ba3f43e 601}
39037602 602
39236c6e 603/*
d9a64523 604 * Attempts to take the "knote" lock.
91447636 605 *
d9a64523 606 * Called with the kqueue lock held.
39037602 607 *
d9a64523 608 * Returns true if the knote lock is acquired, false if it has been dropped
91447636 609 */
d9a64523 610static bool __result_use_check
cb323159
A
611knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc,
612 enum kqlocking kqlocking)
91447636 613{
cb323159 614 kqlock_held(kqu);
39037602 615
d9a64523
A
616#if DEBUG || DEVELOPMENT
617 assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
618#endif
619 knlc->knlc_knote = kn;
620 knlc->knlc_thread = current_thread();
cb323159 621 knlc->knlc_waiters = 0;
d9a64523
A
622
623 if (__improbable(kn->kn_status & KN_LOCKED)) {
cb323159 624 return knote_lock_slow(kqu, kn, knlc, kqlocking);
5ba3f43e
A
625 }
626
d9a64523
A
627 /*
628 * When the knote will be dropped, the knote lock is taken before
629 * KN_DROPPING is set, and then the knote will be removed from any
630 * hash table that references it before the lock is canceled.
631 */
632 assert((kn->kn_status & KN_DROPPING) == 0);
cb323159 633 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link);
d9a64523
A
634 kn->kn_status |= KN_LOCKED;
635#if DEBUG || DEVELOPMENT
636 knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
637#endif
39037602 638
d9a64523 639 if (kqlocking == KNOTE_KQ_UNLOCK ||
0a7de745 640 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
cb323159 641 kqunlock(kqu);
d9a64523
A
642 }
643 return true;
644}
39037602 645
d9a64523
A
646/*
647 * Unlocks a knote successfully locked with knote_lock().
648 *
649 * Called with the kqueue lock held.
650 *
cb323159 651 * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
d9a64523
A
652 */
653static void
cb323159
A
654knote_unlock(kqueue_t kqu, struct knote *kn,
655 struct knote_lock_ctx *knlc, enum kqlocking kqlocking)
d9a64523 656{
cb323159 657 kqlock_held(kqu);
39037602 658
d9a64523
A
659 assert(knlc->knlc_knote == kn);
660 assert(kn->kn_status & KN_LOCKED);
661#if DEBUG || DEVELOPMENT
662 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
663#endif
39037602 664
cb323159 665 LIST_REMOVE(knlc, knlc_link);
39037602 666
cb323159
A
667 if (knlc->knlc_waiters) {
668 thread_t thread = THREAD_NULL;
39037602 669
cb323159
A
670 wakeup_one_with_inheritor(knote_lock_wev(kn), THREAD_AWAKENED,
671 LCK_WAKE_DEFAULT, &thread);
672
673 /*
674 * knote_lock_slow() publishes the lock context of waiters
675 * in uthread::uu_knlock.
676 *
677 * Reach out and make this context the new owner.
678 */
679 struct uthread *ut = get_bsdthread_info(thread);
680 struct knote_lock_ctx *next_owner_lc = ut->uu_knlock;
39037602 681
cb323159
A
682 assert(next_owner_lc->knlc_knote == kn);
683 next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1;
684 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link);
d9a64523
A
685#if DEBUG || DEVELOPMENT
686 next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
687#endif
cb323159
A
688 ut->uu_knlock = NULL;
689 thread_deallocate_safe(thread);
d9a64523
A
690 } else {
691 kn->kn_status &= ~KN_LOCKED;
91447636 692 }
cb323159
A
693
694 if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) {
d9a64523
A
695 /*
696 * No f_event() in flight anymore, we can leave QoS "Merge" mode
697 *
cb323159 698 * See knote_adjust_qos()
d9a64523
A
699 */
700 kn->kn_status &= ~KN_MERGE_QOS;
701 }
cb323159
A
702 if (kqlocking == KNOTE_KQ_UNLOCK) {
703 kqunlock(kqu);
d9a64523
A
704 }
705#if DEBUG || DEVELOPMENT
706 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
707#endif
39037602
A
708}
709
710/*
d9a64523 711 * Aborts all waiters for a knote lock, and unlock the knote.
39037602 712 *
d9a64523 713 * Called with the kqueue lock held.
39037602 714 *
cb323159 715 * Returns with the kqueue unlocked.
39037602 716 */
d9a64523
A
717static void
718knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
cb323159 719 struct knote_lock_ctx *knlc)
39037602 720{
d9a64523
A
721 kqlock_held(kq);
722
723 assert(knlc->knlc_knote == kn);
724 assert(kn->kn_status & KN_LOCKED);
725 assert(kn->kn_status & KN_DROPPING);
726
cb323159 727 LIST_REMOVE(knlc, knlc_link);
d9a64523 728 kn->kn_status &= ~KN_LOCKED;
cb323159 729 kqunlock(kq);
d9a64523 730
cb323159
A
731 if (knlc->knlc_waiters) {
732 wakeup_all_with_inheritor(knote_lock_wev(kn), THREAD_RESTART);
5ba3f43e 733 }
d9a64523
A
734#if DEBUG || DEVELOPMENT
735 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
736#endif
39236c6e 737}
91447636 738
39236c6e 739/*
d9a64523 740 * Call the f_event hook of a given filter.
91447636 741 *
d9a64523 742 * Takes a use count to protect against concurrent drops.
91447636 743 */
d9a64523 744static void
cb323159 745knote_post(struct knote *kn, long hint)
91447636 746{
cb323159
A
747 struct kqueue *kq = knote_get_kq(kn);
748 int dropping, result;
91447636 749
cb323159 750 kqlock(kq);
d9a64523 751
cb323159
A
752 if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) {
753 return kqunlock(kq);
754 }
755
756 if (__improbable(kn->kn_status & KN_POSTING)) {
757 panic("KNOTE() called concurrently on knote %p", kn);
0a7de745 758 }
d9a64523 759
cb323159
A
760 kn->kn_status |= KN_POSTING;
761
d9a64523
A
762 kqunlock(kq);
763 result = filter_call(knote_fops(kn), f_event(kn, hint));
764 kqlock(kq);
765
766 dropping = (kn->kn_status & KN_DROPPING);
767
768 if (!dropping && (result & FILTER_ACTIVE)) {
cb323159 769 knote_activate(kq, kn, result);
d9a64523
A
770 }
771
cb323159
A
772 if ((kn->kn_status & KN_LOCKED) == 0) {
773 /*
774 * There's no other f_* call in flight, we can leave QoS "Merge" mode.
775 *
776 * See knote_adjust_qos()
777 */
778 kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS);
779 } else {
780 kn->kn_status &= ~KN_POSTING;
91447636 781 }
cb323159
A
782
783 if (__improbable(dropping)) {
784 waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, knote_post_wev64(kn),
785 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
786 }
787
788 kqunlock(kq);
91447636 789}
39236c6e
A
790
791/*
d9a64523
A
792 * Called by knote_drop() to wait for the last f_event() caller to be done.
793 *
794 * - kq locked at entry
795 * - kq unlocked at exit
91447636
A
796 */
797static void
cb323159 798knote_wait_for_post(struct kqueue *kq, struct knote *kn)
91447636 799{
d9a64523 800 wait_result_t wr = THREAD_NOT_WAITING;
91447636 801
d9a64523
A
802 kqlock_held(kq);
803
804 assert(kn->kn_status & KN_DROPPING);
805
cb323159 806 if (kn->kn_status & KN_POSTING) {
d9a64523 807 wr = waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
cb323159
A
808 knote_post_wev64(kn), THREAD_UNINT | THREAD_WAIT_NOREPORT,
809 TIMEOUT_WAIT_FOREVER);
91447636
A
810 }
811 kqunlock(kq);
d9a64523
A
812 if (wr == THREAD_WAITING) {
813 thread_block(THREAD_CONTINUE_NULL);
814 }
39236c6e 815}
d9a64523 816
cb323159
A
817#pragma mark knote helpers for filters
818
819OS_ALWAYS_INLINE
820void
821knote_set_error(struct knote *kn, int error)
822{
823 kn->kn_flags |= EV_ERROR;
824 kn->kn_sdata = error;
825}
826
827OS_ALWAYS_INLINE
828int64_t
829knote_low_watermark(const struct knote *kn)
830{
831 return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1;
832}
833
834/*!
835 * @function knote_fill_kevent_with_sdata
836 *
837 * @brief
838 * Fills in a kevent from the current content of a knote.
839 *
840 * @discussion
841 * This is meant to be called from filter's f_event hooks.
842 * The kevent data is filled with kn->kn_sdata.
843 *
844 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
845 *
846 * Using knote_fill_kevent is typically preferred.
847 */
848OS_ALWAYS_INLINE
849void
850knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
851{
852#define knote_assert_aliases(name1, offs1, name2) \
853 static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
854 offsetof(struct kevent_internal_s, name2), \
855 "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
856 /*
857 * All the code makes assumptions on these aliasing,
858 * so make sure we fail the build if we ever ever ever break them.
859 */
860 knote_assert_aliases(ident, 0, kei_ident);
861#ifdef __LITTLE_ENDIAN__
862 knote_assert_aliases(filter, 0, kei_filter); // non trivial overlap
863 knote_assert_aliases(filter, 1, kei_filtid); // non trivial overlap
864#else
865 knote_assert_aliases(filter, 0, kei_filtid); // non trivial overlap
866 knote_assert_aliases(filter, 1, kei_filter); // non trivial overlap
867#endif
868 knote_assert_aliases(flags, 0, kei_flags);
869 knote_assert_aliases(qos, 0, kei_qos);
870 knote_assert_aliases(udata, 0, kei_udata);
871 knote_assert_aliases(fflags, 0, kei_fflags);
872 knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
873 knote_assert_aliases(data, 0, kei_sdata); // non trivial overlap
874 knote_assert_aliases(ext, 0, kei_ext);
875#undef knote_assert_aliases
876
877 /*
878 * Fix the differences between kevent_qos_s and kevent_internal_s:
879 * - xflags is where kn_sfflags lives, we need to zero it
880 * - fixup the high bits of `filter` where kn_filtid lives
881 */
882 *kev = *(struct kevent_qos_s *)&kn->kn_kevent;
883 kev->xflags = 0;
884 kev->filter |= 0xff00;
885 if (kn->kn_flags & EV_CLEAR) {
886 kn->kn_fflags = 0;
887 }
888}
889
890/*!
891 * @function knote_fill_kevent
892 *
893 * @brief
894 * Fills in a kevent from the current content of a knote.
895 *
896 * @discussion
897 * This is meant to be called from filter's f_event hooks.
898 * The kevent data is filled with the passed in data.
899 *
900 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
901 */
902OS_ALWAYS_INLINE
903void
904knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
905{
906 knote_fill_kevent_with_sdata(kn, kev);
907 kev->filter = kn->kn_filter;
908 kev->data = data;
909}
910
911
d9a64523 912#pragma mark file_filtops
91447636 913
55e303ae 914static int
cb323159 915filt_fileattach(struct knote *kn, struct kevent_qos_s *kev)
55e303ae 916{
cb323159 917 return fo_kqfilter(kn->kn_fp, kn, kev);
55e303ae
A
918}
919
d9a64523
A
920SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
921 .f_isfd = 1,
922 .f_attach = filt_fileattach,
923};
924
925#pragma mark kqread_filtops
926
f427ee49
A
927#define f_flag fp_glob->fg_flag
928#define f_ops fp_glob->fg_ops
929#define f_data fp_glob->fg_data
930#define f_lflags fp_glob->fg_lflags
d9a64523
A
931
932static void
55e303ae
A
933filt_kqdetach(struct knote *kn)
934{
39037602
A
935 struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
936 struct kqueue *kq = &kqf->kqf_kqueue;
55e303ae 937
91447636 938 kqlock(kq);
39037602 939 KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
91447636 940 kqunlock(kq);
55e303ae
A
941}
942
55e303ae 943static int
91447636 944filt_kqueue(struct knote *kn, __unused long hint)
55e303ae
A
945{
946 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
39037602 947
0a7de745 948 return kq->kq_count > 0;
39037602
A
949}
950
951static int
cb323159 952filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev)
39037602
A
953{
954#pragma unused(kev)
955 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
956 int res;
957
958 kqlock(kq);
cb323159 959 res = (kq->kq_count > 0);
39037602
A
960 kqunlock(kq);
961
962 return res;
963}
964
965static int
cb323159 966filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev)
39037602 967{
39037602 968 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
cb323159 969 int res = 0;
55e303ae 970
39037602 971 kqlock(kq);
cb323159
A
972 if (kq->kq_count) {
973 knote_fill_kevent(kn, kev, kq->kq_count);
974 res = 1;
39037602
A
975 }
976 kqunlock(kq);
977
978 return res;
55e303ae
A
979}
980
d9a64523
A
981SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
982 .f_isfd = 1,
983 .f_detach = filt_kqdetach,
984 .f_event = filt_kqueue,
985 .f_touch = filt_kqtouch,
986 .f_process = filt_kqprocess,
987};
988
989#pragma mark proc_filtops
5ba3f43e 990
55e303ae 991static int
cb323159 992filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev)
55e303ae
A
993{
994 struct proc *p;
2d21ac55
A
995
996 assert(PID_MAX < NOTE_PDATAMASK);
39236c6e 997
39037602 998 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
d9a64523 999 knote_set_error(kn, ENOTSUP);
39037602
A
1000 return 0;
1001 }
0c530ab8 1002
f427ee49 1003 p = proc_find((int)kn->kn_id);
91447636 1004 if (p == NULL) {
d9a64523 1005 knote_set_error(kn, ESRCH);
39037602 1006 return 0;
91447636 1007 }
55e303ae 1008
cb323159 1009 const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
99c3a104 1010
0a7de745 1011 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) {
99c3a104
A
1012 do {
1013 pid_t selfpid = proc_selfpid();
1014
0a7de745
A
1015 if (p->p_ppid == selfpid) {
1016 break; /* parent => ok */
1017 }
99c3a104 1018 if ((p->p_lflag & P_LTRACED) != 0 &&
0a7de745
A
1019 (p->p_oppid == selfpid)) {
1020 break; /* parent-in-waiting => ok */
1021 }
f427ee49
A
1022 if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) {
1023 break; /* allowed to signal => ok */
1024 }
6d2010ae 1025 proc_rele(p);
d9a64523 1026 knote_set_error(kn, EACCES);
39037602 1027 return 0;
99c3a104 1028 } while (0);
0a7de745 1029 }
6d2010ae 1030
cb323159
A
1031 kn->kn_proc = p;
1032 kn->kn_flags |= EV_CLEAR; /* automatically set */
1033 kn->kn_sdata = 0; /* incoming data is ignored */
2d21ac55 1034
cb323159 1035 proc_klist_lock();
55e303ae 1036
55e303ae
A
1037 KNOTE_ATTACH(&p->p_klist, kn);
1038
2d21ac55
A
1039 proc_klist_unlock();
1040
1041 proc_rele(p);
91447636 1042
39037602
A
1043 /*
1044 * only captures edge-triggered events after this point
1045 * so it can't already be fired.
1046 */
0a7de745 1047 return 0;
55e303ae
A
1048}
1049
39037602 1050
55e303ae
A
1051/*
1052 * The knote may be attached to a different process, which may exit,
0c530ab8 1053 * leaving nothing for the knote to be attached to. In that case,
2d21ac55 1054 * the pointer to the process will have already been nulled out.
55e303ae
A
1055 */
1056static void
1057filt_procdetach(struct knote *kn)
1058{
91447636 1059 struct proc *p;
91447636 1060
2d21ac55 1061 proc_klist_lock();
39236c6e 1062
cb323159 1063 p = kn->kn_proc;
2d21ac55 1064 if (p != PROC_NULL) {
cb323159 1065 kn->kn_proc = PROC_NULL;
91447636 1066 KNOTE_DETACH(&p->p_klist, kn);
0c530ab8 1067 }
2d21ac55
A
1068
1069 proc_klist_unlock();
55e303ae
A
1070}
1071
1072static int
cb323159 1073filt_procevent(struct knote *kn, long hint)
55e303ae 1074{
39037602
A
1075 u_int event;
1076
1077 /* ALWAYS CALLED WITH proc_klist_lock */
1078
39236c6e
A
1079 /*
1080 * Note: a lot of bits in hint may be obtained from the knote
1081 * To free some of those bits, see <rdar://problem/12592988> Freeing up
cb323159 1082 * bits in hint for filt_procevent
39037602
A
1083 *
1084 * mask off extra data
39236c6e 1085 */
39037602 1086 event = (u_int)hint & NOTE_PCTRLMASK;
4452a7af 1087
39037602
A
1088 /*
1089 * termination lifecycle events can happen while a debugger
1090 * has reparented a process, in which case notifications
1091 * should be quashed except to the tracing parent. When
1092 * the debugger reaps the child (either via wait4(2) or
1093 * process exit), the child will be reparented to the original
1094 * parent and these knotes re-fired.
1095 */
1096 if (event & NOTE_EXIT) {
cb323159
A
1097 if ((kn->kn_proc->p_oppid != 0)
1098 && (knote_get_kq(kn)->kq_p->p_pid != kn->kn_proc->p_ppid)) {
39037602
A
1099 /*
1100 * This knote is not for the current ptrace(2) parent, ignore.
1101 */
1102 return 0;
1103 }
d9a64523 1104 }
4b17d6b6 1105
39037602
A
1106 /*
1107 * if the user is interested in this event, record it.
1108 */
0a7de745 1109 if (kn->kn_sfflags & event) {
39037602 1110 kn->kn_fflags |= event;
0a7de745 1111 }
55e303ae 1112
39236c6e
A
1113#pragma clang diagnostic push
1114#pragma clang diagnostic ignored "-Wdeprecated-declarations"
39037602
A
1115 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
1116 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1117 }
39236c6e
A
1118#pragma clang diagnostic pop
1119
fe8ab488 1120
39037602
A
1121 /*
1122 * The kernel has a wrapper in place that returns the same data
94ff46dc 1123 * as is collected here, in kn_hook32. Any changes to how
39037602
A
1124 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
1125 * should also be reflected in the proc_pidnoteexit() wrapper.
1126 */
1127 if (event == NOTE_EXIT) {
94ff46dc 1128 kn->kn_hook32 = 0;
39037602
A
1129 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
1130 kn->kn_fflags |= NOTE_EXITSTATUS;
94ff46dc 1131 kn->kn_hook32 |= (hint & NOTE_PDATAMASK);
39037602
A
1132 }
1133 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
1134 kn->kn_fflags |= NOTE_EXIT_DETAIL;
cb323159 1135 if ((kn->kn_proc->p_lflag &
0a7de745 1136 P_LTERM_DECRYPTFAIL) != 0) {
94ff46dc 1137 kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL;
39236c6e 1138 }
cb323159 1139 if ((kn->kn_proc->p_lflag &
0a7de745 1140 P_LTERM_JETSAM) != 0) {
94ff46dc 1141 kn->kn_hook32 |= NOTE_EXIT_MEMORY;
cb323159 1142 switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) {
39037602 1143 case P_JETSAM_VMPAGESHORTAGE:
94ff46dc 1144 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
39037602
A
1145 break;
1146 case P_JETSAM_VMTHRASHING:
94ff46dc 1147 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING;
39037602
A
1148 break;
1149 case P_JETSAM_FCTHRASHING:
94ff46dc 1150 kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING;
39037602
A
1151 break;
1152 case P_JETSAM_VNODE:
94ff46dc 1153 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE;
39037602
A
1154 break;
1155 case P_JETSAM_HIWAT:
94ff46dc 1156 kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT;
39037602
A
1157 break;
1158 case P_JETSAM_PID:
94ff46dc 1159 kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID;
39037602
A
1160 break;
1161 case P_JETSAM_IDLEEXIT:
94ff46dc 1162 kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE;
39037602 1163 break;
39236c6e
A
1164 }
1165 }
cb323159 1166 if ((kn->kn_proc->p_csflags &
0a7de745 1167 CS_KILLED) != 0) {
94ff46dc 1168 kn->kn_hook32 |= NOTE_EXIT_CSERROR;
39037602 1169 }
316670eb 1170 }
0c530ab8 1171 }
6601e61a 1172
39037602 1173 /* if we have any matching state, activate the knote */
0a7de745 1174 return kn->kn_fflags != 0;
55e303ae
A
1175}
1176
6d2010ae 1177static int
cb323159 1178filt_proctouch(struct knote *kn, struct kevent_qos_s *kev)
39236c6e 1179{
39037602
A
1180 int res;
1181
1182 proc_klist_lock();
1183
1184 /* accept new filter flags and mask off output events no long interesting */
1185 kn->kn_sfflags = kev->fflags;
39037602
A
1186
1187 /* restrict the current results to the (smaller?) set of new interest */
39236c6e 1188 /*
39037602
A
1189 * For compatibility with previous implementations, we leave kn_fflags
1190 * as they were before.
6d2010ae 1191 */
39037602 1192 //kn->kn_fflags &= kn->kn_sfflags;
6d2010ae 1193
39037602
A
1194 res = (kn->kn_fflags != 0);
1195
1196 proc_klist_unlock();
1197
1198 return res;
6d2010ae
A
1199}
1200
1201static int
cb323159 1202filt_procprocess(struct knote *kn, struct kevent_qos_s *kev)
6d2010ae 1203{
cb323159 1204 int res = 0;
39236c6e 1205
39037602 1206 proc_klist_lock();
cb323159 1207 if (kn->kn_fflags) {
94ff46dc
A
1208 knote_fill_kevent(kn, kev, kn->kn_hook32);
1209 kn->kn_hook32 = 0;
cb323159 1210 res = 1;
39037602
A
1211 }
1212 proc_klist_unlock();
1213 return res;
6d2010ae 1214}
b0d623f7 1215
d9a64523 1216SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
cb323159
A
1217 .f_attach = filt_procattach,
1218 .f_detach = filt_procdetach,
1219 .f_event = filt_procevent,
1220 .f_touch = filt_proctouch,
d9a64523
A
1221 .f_process = filt_procprocess,
1222};
5ba3f43e 1223
d9a64523 1224#pragma mark timer_filtops
5ba3f43e 1225
d9a64523
A
1226struct filt_timer_params {
1227 uint64_t deadline; /* deadline in abs/cont time
0a7de745 1228 * (or 0 if NOTE_ABSOLUTE and deadline is in past) */
d9a64523
A
1229 uint64_t leeway; /* leeway in abstime, or 0 if none */
1230 uint64_t interval; /* interval in abstime or 0 if non-repeating timer */
1231};
5ba3f43e 1232
91447636 1233/*
5ba3f43e 1234 * Values stored in the knote at rest (using Mach absolute time units)
39236c6e 1235 *
cb323159 1236 * kn->kn_thcall where the thread_call object is stored
5ba3f43e
A
1237 * kn->kn_ext[0] next deadline or 0 if immediate expiration
1238 * kn->kn_ext[1] leeway value
1239 * kn->kn_sdata interval timer: the interval
1240 * absolute/deadline timer: 0
f427ee49 1241 * kn->kn_hook32 timer state (with gencount)
d9a64523
A
1242 *
1243 * TIMER_IDLE:
1244 * The timer has either never been scheduled or been cancelled.
1245 * It is safe to schedule a new one in this state.
1246 *
1247 * TIMER_ARMED:
1248 * The timer has been scheduled
1249 *
1250 * TIMER_FIRED
1251 * The timer has fired and an event needs to be delivered.
1252 * When in this state, the callout may still be running.
1253 *
1254 * TIMER_IMMEDIATE
1255 * The timer has fired at registration time, and the callout was never
1256 * dispatched.
5ba3f43e 1257 */
d9a64523 1258#define TIMER_IDLE 0x0
0a7de745 1259#define TIMER_ARMED 0x1
d9a64523
A
1260#define TIMER_FIRED 0x2
1261#define TIMER_IMMEDIATE 0x3
f427ee49
A
1262#define TIMER_STATE_MASK 0x3
1263#define TIMER_GEN_INC 0x4
5ba3f43e 1264
d9a64523
A
1265static void
1266filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
5ba3f43e 1267{
d9a64523
A
1268 kn->kn_ext[0] = params->deadline;
1269 kn->kn_ext[1] = params->leeway;
1270 kn->kn_sdata = params->interval;
5ba3f43e
A
1271}
1272
5ba3f43e
A
1273/*
1274 * filt_timervalidate - process data from user
39236c6e 1275 *
5ba3f43e 1276 * Sets up the deadline, interval, and leeway from the provided user data
91447636 1277 *
5ba3f43e
A
1278 * Input:
1279 * kn_sdata timer deadline or interval time
1280 * kn_sfflags style of timer, unit of measurement
b0d623f7 1281 *
5ba3f43e 1282 * Output:
d9a64523
A
1283 * struct filter_timer_params to apply to the filter with
1284 * filt_timer_set_params when changes are ready to be commited.
b0d623f7 1285 *
5ba3f43e
A
1286 * Returns:
1287 * EINVAL Invalid user data parameters
d9a64523 1288 * ERANGE Various overflows with the parameters
b0d623f7 1289 *
5ba3f43e 1290 * Called with timer filter lock held.
91447636
A
1291 */
1292static int
cb323159 1293filt_timervalidate(const struct kevent_qos_s *kev,
0a7de745 1294 struct filt_timer_params *params)
91447636 1295{
5ba3f43e 1296 /*
d9a64523 1297 * There are 5 knobs that need to be chosen for a timer registration:
5ba3f43e
A
1298 *
1299 * A) Units of time (what is the time duration of the specified number)
1300 * Absolute and interval take:
1301 * NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME
1302 * Defaults to milliseconds if not specified
1303 *
1304 * B) Clock epoch (what is the zero point of the specified number)
1305 * For interval, there is none
1306 * For absolute, defaults to the gettimeofday/calendar epoch
1307 * With NOTE_MACHTIME, uses mach_absolute_time()
1308 * With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time()
1309 *
1310 * C) The knote's behavior on delivery
1311 * Interval timer causes the knote to arm for the next interval unless one-shot is set
1312 * Absolute is a forced one-shot timer which deletes on delivery
1313 * TODO: Add a way for absolute to be not forced one-shot
1314 *
1315 * D) Whether the time duration is relative to now or absolute
1316 * Interval fires at now + duration when it is set up
1317 * Absolute fires at now + difference between now walltime and passed in walltime
1318 * With NOTE_MACHTIME it fires at an absolute MAT or MCT.
1319 *
1320 * E) Whether the timer continues to tick across sleep
1321 * By default all three do not.
1322 * For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep
1323 * With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME:
1324 * expires when mach_continuous_time() is > the passed in value.
1325 */
1326
91447636 1327 uint64_t multiplier;
91447636 1328
5ba3f43e
A
1329 boolean_t use_abstime = FALSE;
1330
0a7de745 1331 switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) {
91447636
A
1332 case NOTE_SECONDS:
1333 multiplier = NSEC_PER_SEC;
1334 break;
1335 case NOTE_USECONDS:
1336 multiplier = NSEC_PER_USEC;
1337 break;
1338 case NOTE_NSECONDS:
1339 multiplier = 1;
1340 break;
5ba3f43e
A
1341 case NOTE_MACHTIME:
1342 multiplier = 0;
1343 use_abstime = TRUE;
1344 break;
91447636
A
1345 case 0: /* milliseconds (default) */
1346 multiplier = NSEC_PER_SEC / 1000;
1347 break;
1348 default:
0a7de745 1349 return EINVAL;
39236c6e
A
1350 }
1351
5ba3f43e 1352 /* transform the leeway in kn_ext[1] to same time scale */
d9a64523 1353 if (kev->fflags & NOTE_LEEWAY) {
5ba3f43e
A
1354 uint64_t leeway_abs;
1355
1356 if (use_abstime) {
d9a64523 1357 leeway_abs = (uint64_t)kev->ext[1];
0a7de745 1358 } else {
5ba3f43e 1359 uint64_t leeway_ns;
0a7de745
A
1360 if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) {
1361 return ERANGE;
1362 }
b0d623f7 1363
5ba3f43e
A
1364 nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
1365 }
b0d623f7 1366
d9a64523
A
1367 params->leeway = leeway_abs;
1368 } else {
1369 params->leeway = 0;
5ba3f43e 1370 }
b0d623f7 1371
d9a64523 1372 if (kev->fflags & NOTE_ABSOLUTE) {
5ba3f43e
A
1373 uint64_t deadline_abs;
1374
1375 if (use_abstime) {
d9a64523 1376 deadline_abs = (uint64_t)kev->data;
5ba3f43e
A
1377 } else {
1378 uint64_t calendar_deadline_ns;
1379
0a7de745
A
1380 if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) {
1381 return ERANGE;
1382 }
5ba3f43e
A
1383
1384 /* calendar_deadline_ns is in nanoseconds since the epoch */
1385
1386 clock_sec_t seconds;
1387 clock_nsec_t nanoseconds;
1388
1389 /*
1390 * Note that the conversion through wall-time is only done once.
1391 *
1392 * If the relationship between MAT and gettimeofday changes,
1393 * the underlying timer does not update.
1394 *
1395 * TODO: build a wall-time denominated timer_call queue
1396 * and a flag to request DTRTing with wall-time timers
1397 */
1398 clock_get_calendar_nanotime(&seconds, &nanoseconds);
1399
1400 uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds;
91447636 1401
5ba3f43e
A
1402 /* if deadline is in the future */
1403 if (calendar_now_ns < calendar_deadline_ns) {
1404 uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns;
1405 uint64_t interval_abs;
b0d623f7 1406
5ba3f43e
A
1407 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1408
1409 /*
1410 * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only
1411 * causes the timer to keep ticking across sleep, but
1412 * it does not change the calendar timebase.
1413 */
39037602 1414
0a7de745 1415 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
5ba3f43e 1416 clock_continuoustime_interval_to_deadline(interval_abs,
0a7de745
A
1417 &deadline_abs);
1418 } else {
5ba3f43e 1419 clock_absolutetime_interval_to_deadline(interval_abs,
0a7de745
A
1420 &deadline_abs);
1421 }
39037602 1422 } else {
5ba3f43e 1423 deadline_abs = 0; /* cause immediate expiration */
39037602 1424 }
91447636 1425 }
5ba3f43e 1426
d9a64523
A
1427 params->deadline = deadline_abs;
1428 params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */
1429 } else if (kev->data < 0) {
5ba3f43e
A
1430 /*
1431 * Negative interval timers fire immediately, once.
1432 *
1433 * Ideally a negative interval would be an error, but certain clients
1434 * pass negative values on accident, and expect an event back.
1435 *
1436 * In the old implementation the timer would repeat with no delay
1437 * N times until mach_absolute_time() + (N * interval) underflowed,
1438 * then it would wait ~forever by accidentally arming a timer for the far future.
1439 *
1440 * We now skip the power-wasting hot spin phase and go straight to the idle phase.
1441 */
1442
d9a64523
A
1443 params->deadline = 0; /* expire immediately */
1444 params->interval = 0; /* non-repeating */
b0d623f7 1445 } else {
5ba3f43e
A
1446 uint64_t interval_abs = 0;
1447
1448 if (use_abstime) {
d9a64523 1449 interval_abs = (uint64_t)kev->data;
5ba3f43e
A
1450 } else {
1451 uint64_t interval_ns;
0a7de745
A
1452 if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) {
1453 return ERANGE;
1454 }
5ba3f43e
A
1455
1456 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1457 }
1458
1459 uint64_t deadline = 0;
1460
0a7de745 1461 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
5ba3f43e 1462 clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
0a7de745 1463 } else {
5ba3f43e 1464 clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
0a7de745 1465 }
5ba3f43e 1466
d9a64523
A
1467 params->deadline = deadline;
1468 params->interval = interval_abs;
b0d623f7
A
1469 }
1470
0a7de745 1471 return 0;
91447636
A
1472}
1473
39236c6e 1474/*
91447636 1475 * filt_timerexpire - the timer callout routine
91447636 1476 */
55e303ae 1477static void
f427ee49 1478filt_timerexpire(void *knx, void *state_on_arm)
55e303ae
A
1479{
1480 struct knote *kn = knx;
91447636 1481
f427ee49
A
1482 uint32_t state = (uint32_t)(uintptr_t)state_on_arm;
1483 uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED;
1484
1485 if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) {
d9a64523
A
1486 // our f_event always would say FILTER_ACTIVE,
1487 // so be leaner and just do it.
39037602 1488 struct kqueue *kq = knote_get_kq(kn);
d9a64523 1489 kqlock(kq);
cb323159 1490 knote_activate(kq, kn, FILTER_ACTIVE);
d9a64523
A
1491 kqunlock(kq);
1492 } else {
1493 /*
f427ee49
A
1494 * The timer has been reprogrammed or canceled since it was armed,
1495 * and this is a late firing for the timer, just ignore it.
d9a64523 1496 */
b0d623f7 1497 }
d9a64523 1498}
5ba3f43e 1499
d9a64523
A
1500/*
1501 * Does this deadline needs a timer armed for it, or has it expired?
1502 */
1503static bool
1504filt_timer_is_ready(struct knote *kn)
1505{
1506 uint64_t now, deadline = kn->kn_ext[0];
5ba3f43e 1507
d9a64523
A
1508 if (deadline == 0) {
1509 return true;
1510 }
5ba3f43e 1511
d9a64523
A
1512 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1513 now = mach_continuous_time();
1514 } else {
1515 now = mach_absolute_time();
1516 }
1517 return deadline <= now;
5ba3f43e
A
1518}
1519
d9a64523
A
1520/*
1521 * Arm a timer
1522 *
1523 * It is the responsibility of the caller to make sure the timer call
1524 * has completed or been cancelled properly prior to arming it.
1525 */
5ba3f43e
A
1526static void
1527filt_timerarm(struct knote *kn)
1528{
5ba3f43e
A
1529 uint64_t deadline = kn->kn_ext[0];
1530 uint64_t leeway = kn->kn_ext[1];
f427ee49 1531 uint32_t state;
5ba3f43e
A
1532
1533 int filter_flags = kn->kn_sfflags;
1534 unsigned int timer_flags = 0;
1535
0a7de745 1536 if (filter_flags & NOTE_CRITICAL) {
5ba3f43e 1537 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
0a7de745 1538 } else if (filter_flags & NOTE_BACKGROUND) {
5ba3f43e 1539 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
0a7de745 1540 } else {
5ba3f43e 1541 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
0a7de745 1542 }
5ba3f43e 1543
0a7de745 1544 if (filter_flags & NOTE_LEEWAY) {
5ba3f43e 1545 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
0a7de745 1546 }
5ba3f43e 1547
0a7de745 1548 if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) {
5ba3f43e 1549 timer_flags |= THREAD_CALL_CONTINUOUS;
0a7de745 1550 }
5ba3f43e 1551
f427ee49
A
1552 /*
1553 * Move to ARMED.
1554 *
1555 * We increase the gencount, and setup the thread call with this expected
1556 * state. It means that if there was a previous generation of the timer in
1557 * flight that needs to be ignored, then 3 things are possible:
1558 *
1559 * - the timer fires first, filt_timerexpire() and sets the state to FIRED
1560 * but we clobber it with ARMED and a new gencount. The knote will still
1561 * be activated, but filt_timerprocess() which is serialized with this
1562 * call will not see the FIRED bit set and will not deliver an event.
1563 *
1564 * - this code runs first, but filt_timerexpire() comes second. Because it
1565 * knows an old gencount, it will debounce and not activate the knote.
1566 *
1567 * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below
1568 * will just cancel it properly.
1569 *
1570 * This is important as userspace expects to never be woken up for past
1571 * timers after filt_timertouch ran.
1572 */
1573 state = os_atomic_load(&kn->kn_hook32, relaxed);
1574 state &= ~TIMER_STATE_MASK;
1575 state += TIMER_GEN_INC + TIMER_ARMED;
1576 os_atomic_store(&kn->kn_hook32, state, relaxed);
1577
1578 thread_call_enter_delayed_with_leeway(kn->kn_thcall,
1579 (void *)(uintptr_t)state, deadline, leeway, timer_flags);
1580}
1581
1582/*
1583 * Mark a timer as "already fired" when it is being reprogrammed
1584 *
1585 * If there is a timer in flight, this will do a best effort at canceling it,
1586 * but will not wait. If the thread call was in flight, having set the
1587 * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this
1588 * cancelation.
1589 */
1590static void
1591filt_timerfire_immediate(struct knote *kn)
1592{
1593 uint32_t state;
1594
1595 static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK,
1596 "validate that this atomic or will transition to IMMEDIATE");
1597 state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1598
1599 if ((state & TIMER_STATE_MASK) == TIMER_ARMED) {
1600 thread_call_cancel(kn->kn_thcall);
1601 }
55e303ae
A
1602}
1603
1604/*
b0d623f7 1605 * Allocate a thread call for the knote's lifetime, and kick off the timer.
39236c6e 1606 */
55e303ae 1607static int
cb323159 1608filt_timerattach(struct knote *kn, struct kevent_qos_s *kev)
55e303ae 1609{
91447636 1610 thread_call_t callout;
d9a64523 1611 struct filt_timer_params params;
91447636 1612 int error;
55e303ae 1613
d9a64523
A
1614 if ((error = filt_timervalidate(kev, &params)) != 0) {
1615 knote_set_error(kn, error);
1616 return 0;
1617 }
1618
5ba3f43e 1619 callout = thread_call_allocate_with_options(filt_timerexpire,
0a7de745
A
1620 (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
1621 THREAD_CALL_OPTIONS_ONCE);
5ba3f43e 1622
39037602 1623 if (NULL == callout) {
d9a64523 1624 knote_set_error(kn, ENOMEM);
39037602 1625 return 0;
91447636 1626 }
55e303ae 1627
d9a64523 1628 filt_timer_set_params(kn, &params);
cb323159 1629 kn->kn_thcall = callout;
5ba3f43e 1630 kn->kn_flags |= EV_CLEAR;
cb323159 1631 os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
55e303ae 1632
5ba3f43e 1633 /* NOTE_ABSOLUTE implies EV_ONESHOT */
0a7de745 1634 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
39236c6e 1635 kn->kn_flags |= EV_ONESHOT;
0a7de745 1636 }
91447636 1637
d9a64523 1638 if (filt_timer_is_ready(kn)) {
cb323159 1639 os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
d9a64523 1640 return FILTER_ACTIVE;
5ba3f43e
A
1641 } else {
1642 filt_timerarm(kn);
d9a64523 1643 return 0;
91447636 1644 }
55e303ae
A
1645}
1646
b0d623f7
A
1647/*
1648 * Shut down the timer if it's running, and free the callout.
1649 */
55e303ae
A
1650static void
1651filt_timerdetach(struct knote *kn)
1652{
d9a64523 1653 __assert_only boolean_t freed;
b0d623f7 1654
d9a64523
A
1655 /*
1656 * Unconditionally cancel to make sure there can't be any filt_timerexpire()
1657 * running anymore.
1658 */
cb323159
A
1659 thread_call_cancel_wait(kn->kn_thcall);
1660 freed = thread_call_free(kn->kn_thcall);
5ba3f43e 1661 assert(freed);
b0d623f7
A
1662}
1663
39037602
A
1664/*
1665 * filt_timertouch - update timer knote with new user input
1666 *
1667 * Cancel and restart the timer based on new user data. When
1668 * the user picks up a knote, clear the count of how many timer
1669 * pops have gone off (in kn_data).
1670 */
1671static int
cb323159 1672filt_timertouch(struct knote *kn, struct kevent_qos_s *kev)
39037602 1673{
d9a64523
A
1674 struct filt_timer_params params;
1675 uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
39037602 1676 int error;
39037602 1677
d9a64523
A
1678 if (changed_flags & NOTE_ABSOLUTE) {
1679 kev->flags |= EV_ERROR;
1680 kev->data = EINVAL;
1681 return 0;
1682 }
39037602 1683
d9a64523
A
1684 if ((error = filt_timervalidate(kev, &params)) != 0) {
1685 kev->flags |= EV_ERROR;
1686 kev->data = error;
1687 return 0;
1688 }
5ba3f43e 1689
39037602 1690 /* capture the new values used to compute deadline */
d9a64523 1691 filt_timer_set_params(kn, &params);
39037602 1692 kn->kn_sfflags = kev->fflags;
39037602 1693
d9a64523 1694 if (filt_timer_is_ready(kn)) {
f427ee49 1695 filt_timerfire_immediate(kn);
d9a64523 1696 return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
5ba3f43e
A
1697 } else {
1698 filt_timerarm(kn);
d9a64523 1699 return FILTER_UPDATE_REQ_QOS;
39037602 1700 }
39037602
A
1701}
1702
1703/*
1704 * filt_timerprocess - query state of knote and snapshot event data
1705 *
1706 * Determine if the timer has fired in the past, snapshot the state
1707 * of the kevent for returning to user-space, and clear pending event
1708 * counters for the next time.
1709 */
1710static int
cb323159 1711filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev)
39037602 1712{
f427ee49
A
1713 uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed);
1714
d9a64523
A
1715 /*
1716 * filt_timerprocess is serialized with any filter routine except for
1717 * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
1718 * transition, and on success, activates the knote.
1719 *
1720 * Hence, we don't need atomic modifications of the state, only to peek at
1721 * whether we see any of the "FIRED" state, and if we do, it is safe to
1722 * do simple state machine transitions.
1723 */
f427ee49 1724 switch (state & TIMER_STATE_MASK) {
d9a64523
A
1725 case TIMER_IDLE:
1726 case TIMER_ARMED:
5ba3f43e 1727 /*
5ba3f43e
A
1728 * This can happen if a touch resets a timer that had fired
1729 * without being processed
1730 */
39037602 1731 return 0;
b0d623f7 1732 }
91447636 1733
f427ee49 1734 os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed);
d9a64523
A
1735
1736 /*
1737 * Copy out the interesting kevent state,
1738 * but don't leak out the raw time calculations.
1739 *
1740 * TODO: potential enhancements - tell the user about:
1741 * - deadline to which this timer thought it was expiring
1742 * - return kn_sfflags in the fflags field so the client can know
1743 * under what flags the timer fired
1744 */
cb323159 1745 knote_fill_kevent(kn, kev, 1);
d9a64523
A
1746 kev->ext[0] = 0;
1747 /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
1748
cb323159 1749 if (kn->kn_sdata != 0) {
5ba3f43e
A
1750 /*
1751 * This is a 'repeating' timer, so we have to emit
1752 * how many intervals expired between the arm
1753 * and the process.
1754 *
1755 * A very strange style of interface, because
1756 * this could easily be done in the client...
1757 */
1758
5ba3f43e
A
1759 uint64_t now;
1760
0a7de745 1761 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
5ba3f43e 1762 now = mach_continuous_time();
0a7de745 1763 } else {
5ba3f43e 1764 now = mach_absolute_time();
0a7de745 1765 }
5ba3f43e
A
1766
1767 uint64_t first_deadline = kn->kn_ext[0];
1768 uint64_t interval_abs = kn->kn_sdata;
1769 uint64_t orig_arm_time = first_deadline - interval_abs;
1770
1771 assert(now > orig_arm_time);
1772 assert(now > first_deadline);
1773
1774 uint64_t elapsed = now - orig_arm_time;
1775
1776 uint64_t num_fired = elapsed / interval_abs;
1777
1778 /*
1779 * To reach this code, we must have seen the timer pop
1780 * and be in repeating mode, so therefore it must have been
1781 * more than 'interval' time since the attach or last
1782 * successful touch.
5ba3f43e
A
1783 */
1784 assert(num_fired > 0);
1785
1786 /* report how many intervals have elapsed to the user */
d9a64523 1787 kev->data = (int64_t)num_fired;
5ba3f43e
A
1788
1789 /* We only need to re-arm the timer if it's not about to be destroyed */
1790 if ((kn->kn_flags & EV_ONESHOT) == 0) {
1791 /* fire at the end of the next interval */
1792 uint64_t new_deadline = first_deadline + num_fired * interval_abs;
1793
1794 assert(new_deadline > now);
1795
1796 kn->kn_ext[0] = new_deadline;
1797
d9a64523
A
1798 /*
1799 * This can't shortcut setting up the thread call, because
1800 * knote_process deactivates EV_CLEAR knotes unconditionnally.
1801 */
5ba3f43e
A
1802 filt_timerarm(kn);
1803 }
1804 }
1805
d9a64523 1806 return FILTER_ACTIVE;
91447636
A
1807}
1808
5ba3f43e 1809SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
d9a64523 1810 .f_extended_codes = true,
5ba3f43e
A
1811 .f_attach = filt_timerattach,
1812 .f_detach = filt_timerdetach,
cb323159 1813 .f_event = filt_bad_event,
5ba3f43e
A
1814 .f_touch = filt_timertouch,
1815 .f_process = filt_timerprocess,
1816};
1817
d9a64523 1818#pragma mark user_filtops
39037602 1819
b0d623f7 1820static int
cb323159 1821filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev)
b0d623f7 1822{
5ba3f43e 1823 if (kn->kn_sfflags & NOTE_TRIGGER) {
cb323159 1824 kn->kn_hook32 = FILTER_ACTIVE;
b0d623f7 1825 } else {
cb323159 1826 kn->kn_hook32 = 0;
b0d623f7 1827 }
cb323159 1828 return kn->kn_hook32;
b0d623f7
A
1829}
1830
1831static int
cb323159 1832filt_usertouch(struct knote *kn, struct kevent_qos_s *kev)
b0d623f7 1833{
39236c6e 1834 uint32_t ffctrl;
39037602 1835 int fflags;
39037602
A
1836
1837 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1838 fflags = kev->fflags & NOTE_FFLAGSMASK;
1839 switch (ffctrl) {
1840 case NOTE_FFNOP:
39236c6e 1841 break;
39037602
A
1842 case NOTE_FFAND:
1843 kn->kn_sfflags &= fflags;
39236c6e 1844 break;
39037602
A
1845 case NOTE_FFOR:
1846 kn->kn_sfflags |= fflags;
1847 break;
1848 case NOTE_FFCOPY:
1849 kn->kn_sfflags = fflags;
39236c6e
A
1850 break;
1851 }
39037602
A
1852 kn->kn_sdata = kev->data;
1853
39037602 1854 if (kev->fflags & NOTE_TRIGGER) {
cb323159 1855 kn->kn_hook32 = FILTER_ACTIVE;
39037602 1856 }
cb323159 1857 return (int)kn->kn_hook32;
39037602
A
1858}
1859
1860static int
cb323159 1861filt_userprocess(struct knote *kn, struct kevent_qos_s *kev)
39037602 1862{
cb323159 1863 int result = (int)kn->kn_hook32;
39037602 1864
d9a64523 1865 if (result) {
cb323159
A
1866 /* EVFILT_USER returns the data that was passed in */
1867 knote_fill_kevent_with_sdata(kn, kev);
d9a64523 1868 kev->fflags = kn->kn_sfflags;
d9a64523 1869 if (kn->kn_flags & EV_CLEAR) {
cb323159
A
1870 /* knote_fill_kevent cleared kn_fflags */
1871 kn->kn_hook32 = 0;
d9a64523 1872 }
39037602 1873 }
39037602 1874
d9a64523 1875 return result;
b0d623f7
A
1876}
1877
d9a64523
A
1878SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
1879 .f_extended_codes = true,
1880 .f_attach = filt_userattach,
cb323159
A
1881 .f_detach = filt_no_detach,
1882 .f_event = filt_bad_event,
d9a64523
A
1883 .f_touch = filt_usertouch,
1884 .f_process = filt_userprocess,
1885};
5ba3f43e 1886
d9a64523 1887#pragma mark workloop_filtops
5ba3f43e 1888
cb323159
A
1889#define EPREEMPTDISABLED (-1)
1890
5ba3f43e
A
1891static inline void
1892filt_wllock(struct kqworkloop *kqwl)
55e303ae 1893{
cb323159 1894 lck_spin_lock(&kqwl->kqwl_statelock);
55e303ae
A
1895}
1896
5ba3f43e
A
1897static inline void
1898filt_wlunlock(struct kqworkloop *kqwl)
91447636 1899{
cb323159 1900 lck_spin_unlock(&kqwl->kqwl_statelock);
5ba3f43e 1901}
91447636 1902
d9a64523
A
1903/*
1904 * Returns true when the interlock for the turnstile is the workqueue lock
1905 *
1906 * When this is the case, all turnstiles operations are delegated
1907 * to the workqueue subsystem.
1908 *
1909 * This is required because kqueue_threadreq_bind_prepost only holds the
1910 * workqueue lock but needs to move the inheritor from the workloop turnstile
1911 * away from the creator thread, so that this now fulfilled request cannot be
1912 * picked anymore by other threads.
1913 */
5ba3f43e 1914static inline bool
d9a64523 1915filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
5ba3f43e 1916{
cb323159 1917 return kqr_thread_requested_pending(&kqwl->kqwl_request);
5ba3f43e 1918}
39037602 1919
d9a64523
A
1920static void
1921filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
0a7de745 1922 turnstile_update_flags_t flags)
5ba3f43e 1923{
d9a64523 1924 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
cb323159 1925 workq_threadreq_t kqr = &kqwl->kqwl_request;
39037602 1926
d9a64523
A
1927 /*
1928 * binding to the workq should always happen through
1929 * workq_kern_threadreq_update_inheritor()
1930 */
1931 assert(!filt_wlturnstile_interlock_is_workq(kqwl));
39037602 1932
d9a64523
A
1933 if ((inheritor = kqwl->kqwl_owner)) {
1934 flags |= TURNSTILE_INHERITOR_THREAD;
cb323159 1935 } else if ((inheritor = kqr_thread(kqr))) {
d9a64523 1936 flags |= TURNSTILE_INHERITOR_THREAD;
5ba3f43e 1937 }
d9a64523
A
1938
1939 turnstile_update_inheritor(ts, inheritor, flags);
5ba3f43e 1940}
39037602 1941
cb323159 1942#define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
d9a64523
A
1943#define FILT_WLATTACH 0
1944#define FILT_WLTOUCH 1
1945#define FILT_WLDROP 2
1946
5ba3f43e
A
1947__result_use_check
1948static int
d9a64523 1949filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
cb323159 1950 struct kevent_qos_s *kev, kq_index_t qos_index, int op)
5ba3f43e 1951{
d9a64523 1952 user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
cb323159 1953 workq_threadreq_t kqr = &kqwl->kqwl_request;
5ba3f43e 1954 thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
cb323159
A
1955 kq_index_t cur_override = THREAD_QOS_UNSPECIFIED;
1956 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
d9a64523 1957 int action = KQWL_UTQ_NONE, error = 0;
cb323159 1958 bool wl_inheritor_updated = false, needs_wake = false;
d9a64523
A
1959 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
1960 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
1961 uint64_t udata = 0;
cb323159 1962 struct turnstile *ts = TURNSTILE_NULL;
d9a64523 1963
cb323159 1964 filt_wllock(kqwl);
39037602 1965
cb323159
A
1966again:
1967 new_owner = cur_owner = kqwl->kqwl_owner;
91447636 1968
5ba3f43e 1969 /*
d9a64523
A
1970 * Phase 1:
1971 *
1972 * If asked, load the uint64 value at the user provided address and compare
1973 * it against the passed in mask and expected value.
1974 *
1975 * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
1976 * a thread reference.
1977 *
1978 * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
1979 * the current thread, then end ownership.
1980 *
1981 * Lastly decide whether we need to perform a QoS update.
5ba3f43e 1982 */
d9a64523 1983 if (uaddr) {
cb323159
A
1984 /*
1985 * Until <rdar://problem/24999882> exists,
1986 * disabling preemption copyin forces any
1987 * vm_fault we encounter to fail.
1988 */
1989 error = copyin_atomic64(uaddr, &udata);
1990
1991 /*
1992 * If we get EFAULT, drop locks, and retry.
1993 * If we still get an error report it,
1994 * else assume the memory has been faulted
1995 * and attempt to copyin under lock again.
1996 */
1997 switch (error) {
1998 case 0:
1999 break;
2000 case EFAULT:
2001 if (efault_retry-- > 0) {
2002 filt_wlunlock(kqwl);
2003 error = copyin_atomic64(uaddr, &udata);
2004 filt_wllock(kqwl);
2005 if (error == 0) {
2006 goto again;
2007 }
2008 }
f427ee49 2009 OS_FALLTHROUGH;
cb323159 2010 default:
d9a64523
A
2011 goto out;
2012 }
2013
2014 /* Update state as copied in. */
2015 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2016
2017 if ((udata & mask) != (kdata & mask)) {
2018 error = ESTALE;
2019 } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
5ba3f43e 2020 /*
d9a64523
A
2021 * Decipher the owner port name, and translate accordingly.
2022 * The low 2 bits were borrowed for other flags, so mask them off.
2023 *
2024 * Then attempt translation to a thread reference or fail.
5ba3f43e 2025 */
d9a64523
A
2026 mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
2027 if (name != MACH_PORT_NULL) {
2028 name = ipc_entry_name_mask(name);
cb323159
A
2029 extra_thread_ref = port_name_to_thread(name,
2030 PORT_TO_THREAD_IN_CURRENT_TASK);
d9a64523
A
2031 if (extra_thread_ref == THREAD_NULL) {
2032 error = EOWNERDEAD;
2033 goto out;
2034 }
2035 new_owner = extra_thread_ref;
2036 }
5ba3f43e 2037 }
91447636
A
2038 }
2039
d9a64523
A
2040 if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
2041 new_owner = THREAD_NULL;
2042 }
2043
2044 if (error == 0) {
2045 if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
2046 action = KQWL_UTQ_SET_QOS_INDEX;
cb323159 2047 } else if (qos_index && kqr->tr_kq_qos_index != qos_index) {
d9a64523
A
2048 action = KQWL_UTQ_SET_QOS_INDEX;
2049 }
2050
2051 if (op == FILT_WLTOUCH) {
2052 /*
2053 * Save off any additional fflags/data we just accepted
2054 * But only keep the last round of "update" bits we acted on which helps
2055 * debugging a lot.
2056 */
2057 kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
2058 kn->kn_sfflags |= kev->fflags;
d9a64523 2059 if (kev->fflags & NOTE_WL_SYNC_WAKE) {
cb323159 2060 needs_wake = (kn->kn_thread != THREAD_NULL);
d9a64523
A
2061 }
2062 } else if (op == FILT_WLDROP) {
2063 if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
0a7de745 2064 NOTE_WL_SYNC_WAIT) {
d9a64523
A
2065 /*
2066 * When deleting a SYNC_WAIT knote that hasn't been woken up
2067 * explicitly, issue a wake up.
2068 */
2069 kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
cb323159 2070 needs_wake = (kn->kn_thread != THREAD_NULL);
d9a64523
A
2071 }
2072 }
5ba3f43e 2073 }
d9a64523
A
2074
2075 /*
2076 * Phase 2:
2077 *
2078 * Commit ownership and QoS changes if any, possibly wake up waiters
2079 */
2080
2081 if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
5ba3f43e
A
2082 goto out;
2083 }
91447636 2084
cb323159 2085 kqlock(kqwl);
3e170ce0 2086
5ba3f43e 2087 /* If already tracked as servicer, don't track as owner */
cb323159 2088 if (new_owner == kqr_thread(kqr)) {
d9a64523 2089 new_owner = THREAD_NULL;
5ba3f43e 2090 }
3e170ce0 2091
5ba3f43e
A
2092 if (cur_owner != new_owner) {
2093 kqwl->kqwl_owner = new_owner;
2094 if (new_owner == extra_thread_ref) {
2095 /* we just transfered this ref to kqwl_owner */
2096 extra_thread_ref = THREAD_NULL;
2097 }
cb323159 2098 cur_override = kqworkloop_override(kqwl);
5ba3f43e 2099
d9a64523 2100 if (new_owner) {
5ba3f43e 2101 /* override it before we drop the old */
cb323159
A
2102 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2103 thread_add_kevent_override(new_owner, cur_override);
5ba3f43e 2104 }
cb323159 2105 if (kqr_thread_requested_pending(kqr)) {
5ba3f43e
A
2106 if (action == KQWL_UTQ_NONE) {
2107 action = KQWL_UTQ_REDRIVE_EVENTS;
91447636 2108 }
91447636 2109 }
d9a64523 2110 } else {
cb323159 2111 if (!kqr_thread_requested(kqr) && kqr->tr_kq_wakeup) {
5ba3f43e
A
2112 if (action == KQWL_UTQ_NONE) {
2113 action = KQWL_UTQ_REDRIVE_EVENTS;
91447636 2114 }
91447636
A
2115 }
2116 }
2117 }
b0d623f7 2118
5ba3f43e 2119 if (action != KQWL_UTQ_NONE) {
d9a64523 2120 kqworkloop_update_threads_qos(kqwl, action, qos_index);
5ba3f43e 2121 }
39037602 2122
cb323159 2123 ts = kqwl->kqwl_turnstile;
d9a64523
A
2124 if (cur_owner != new_owner && ts) {
2125 if (action == KQWL_UTQ_REDRIVE_EVENTS) {
2126 /*
2127 * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
2128 * the code went through workq_kern_threadreq_initiate()
2129 * and the workqueue has set the inheritor already
2130 */
2131 assert(filt_wlturnstile_interlock_is_workq(kqwl));
2132 } else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2133 workq_kern_threadreq_lock(kqwl->kqwl_p);
2134 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
0a7de745 2135 ts, TURNSTILE_IMMEDIATE_UPDATE);
d9a64523
A
2136 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2137 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2138 /*
2139 * If the workq is no longer the interlock, then
2140 * workq_kern_threadreq_update_inheritor() has finished a bind
2141 * and we need to fallback to the regular path.
2142 */
2143 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2144 }
2145 wl_inheritor_updated = true;
2146 } else {
2147 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2148 wl_inheritor_updated = true;
5ba3f43e 2149 }
d9a64523
A
2150
2151 /*
2152 * We need a turnstile reference because we are dropping the interlock
2153 * and the caller has not called turnstile_prepare.
2154 */
2155 if (wl_inheritor_updated) {
2156 turnstile_reference(ts);
5ba3f43e 2157 }
5ba3f43e 2158 }
39037602 2159
d9a64523 2160 if (needs_wake && ts) {
cb323159
A
2161 waitq_wakeup64_thread(&ts->ts_waitq, knote_filt_wev64(kn),
2162 kn->kn_thread, THREAD_AWAKENED);
2163 if (op == FILT_WLATTACH || op == FILT_WLTOUCH) {
2164 disable_preemption();
2165 error = EPREEMPTDISABLED;
2166 }
39037602 2167 }
91447636 2168
cb323159 2169 kqunlock(kqwl);
91447636 2170
d9a64523
A
2171out:
2172 /*
2173 * Phase 3:
2174 *
2175 * Unlock and cleanup various lingering references and things.
2176 */
cb323159 2177 filt_wlunlock(kqwl);
91447636 2178
d9a64523
A
2179#if CONFIG_WORKLOOP_DEBUG
2180 KQWL_HISTORY_WRITE_ENTRY(kqwl, {
2181 .updater = current_thread(),
cb323159 2182 .servicer = kqr_thread(kqr), /* Note: racy */
d9a64523
A
2183 .old_owner = cur_owner,
2184 .new_owner = new_owner,
91447636 2185
d9a64523
A
2186 .kev_ident = kev->ident,
2187 .error = (int16_t)error,
2188 .kev_flags = kev->flags,
2189 .kev_fflags = kev->fflags,
91447636 2190
d9a64523
A
2191 .kev_mask = mask,
2192 .kev_value = kdata,
2193 .in_value = udata,
2194 });
2195#endif // CONFIG_WORKLOOP_DEBUG
5ba3f43e 2196
cb323159
A
2197 if (wl_inheritor_updated) {
2198 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2199 turnstile_deallocate_safe(ts);
2200 }
2201
d9a64523 2202 if (cur_owner && new_owner != cur_owner) {
cb323159
A
2203 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2204 thread_drop_kevent_override(cur_owner);
5ba3f43e 2205 }
cb323159 2206 thread_deallocate_safe(cur_owner);
5ba3f43e 2207 }
d9a64523 2208 if (extra_thread_ref) {
cb323159 2209 thread_deallocate_safe(extra_thread_ref);
d9a64523
A
2210 }
2211 return error;
5ba3f43e
A
2212}
2213
2214/*
2215 * Remembers the last updated that came in from userspace for debugging reasons.
2216 * - fflags is mirrored from the userspace kevent
2217 * - ext[i, i != VALUE] is mirrored from the userspace kevent
2218 * - ext[VALUE] is set to what the kernel loaded atomically
2219 * - data is set to the error if any
2220 */
2221static inline void
cb323159 2222filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev,
0a7de745 2223 int error)
5ba3f43e 2224{
5ba3f43e 2225 kn->kn_fflags = kev->fflags;
cb323159 2226 kn->kn_sdata = error;
5ba3f43e
A
2227 memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
2228}
2229
5ba3f43e 2230static int
cb323159
A
2231filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn,
2232 struct kevent_qos_s *kev, int op)
5ba3f43e 2233{
f427ee49 2234 user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR];
cb323159
A
2235 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2236 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2237 uint64_t udata = 0;
2238 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
5ba3f43e 2239 int error = 0;
5ba3f43e 2240
cb323159
A
2241 if (op == FILT_WLATTACH) {
2242 (void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue);
2243 } else if (uaddr == 0) {
2244 return 0;
5ba3f43e
A
2245 }
2246
cb323159 2247 filt_wllock(kqwl);
5ba3f43e 2248
cb323159
A
2249again:
2250
2251 /*
2252 * Do the debounce thing, the lock serializing the state is the knote lock.
2253 */
2254 if (uaddr) {
2255 /*
2256 * Until <rdar://problem/24999882> exists,
2257 * disabling preemption copyin forces any
2258 * vm_fault we encounter to fail.
2259 */
2260 error = copyin_atomic64(uaddr, &udata);
2261
2262 /*
2263 * If we get EFAULT, drop locks, and retry.
2264 * If we still get an error report it,
2265 * else assume the memory has been faulted
2266 * and attempt to copyin under lock again.
2267 */
2268 switch (error) {
2269 case 0:
2270 break;
2271 case EFAULT:
2272 if (efault_retry-- > 0) {
2273 filt_wlunlock(kqwl);
2274 error = copyin_atomic64(uaddr, &udata);
2275 filt_wllock(kqwl);
2276 if (error == 0) {
2277 goto again;
2278 }
2279 }
f427ee49 2280 OS_FALLTHROUGH;
cb323159 2281 default:
5ba3f43e
A
2282 goto out;
2283 }
cb323159
A
2284
2285 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2286 kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata;
2287
2288 if ((udata & mask) != (kdata & mask)) {
2289 error = ESTALE;
2290 goto out;
2291 }
2292 }
2293
2294 if (op == FILT_WLATTACH) {
2295 error = filt_wlattach_sync_ipc(kn);
2296 if (error == 0) {
2297 disable_preemption();
2298 error = EPREEMPTDISABLED;
2299 }
2300 }
2301
2302out:
2303 filt_wlunlock(kqwl);
2304 return error;
2305}
2306
2307static int
2308filt_wlattach(struct knote *kn, struct kevent_qos_s *kev)
2309{
2310 struct kqueue *kq = knote_get_kq(kn);
2311 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2312 int error = 0, result = 0;
2313 kq_index_t qos_index = 0;
2314
2315 if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) {
2316 error = ENOTSUP;
2317 goto out;
2318 }
2319
2320 uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
2321 switch (command) {
2322 case NOTE_WL_THREAD_REQUEST:
2323 if (kn->kn_id != kqwl->kqwl_dynamicid) {
2324 error = EINVAL;
2325 goto out;
2326 }
2327 qos_index = _pthread_priority_thread_qos(kn->kn_qos);
2328 if (qos_index == THREAD_QOS_UNSPECIFIED) {
2329 error = ERANGE;
2330 goto out;
2331 }
2332 if (kqwl->kqwl_request.tr_kq_qos_index) {
2333 /*
2334 * There already is a thread request, and well, you're only allowed
2335 * one per workloop, so fail the attach.
2336 */
2337 error = EALREADY;
d9a64523
A
2338 goto out;
2339 }
5ba3f43e
A
2340 break;
2341 case NOTE_WL_SYNC_WAIT:
2342 case NOTE_WL_SYNC_WAKE:
5ba3f43e
A
2343 if (kn->kn_id == kqwl->kqwl_dynamicid) {
2344 error = EINVAL;
2345 goto out;
2346 }
2347 if ((kn->kn_flags & EV_DISABLE) == 0) {
2348 error = EINVAL;
2349 goto out;
2350 }
2351 if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) {
2352 error = EINVAL;
2353 goto out;
2354 }
2355 break;
cb323159
A
2356
2357 case NOTE_WL_SYNC_IPC:
2358 if ((kn->kn_flags & EV_DISABLE) == 0) {
2359 error = EINVAL;
2360 goto out;
2361 }
2362 if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) {
2363 error = EINVAL;
2364 goto out;
2365 }
2366 break;
5ba3f43e
A
2367 default:
2368 error = EINVAL;
2369 goto out;
2370 }
2371
cb323159
A
2372 if (command == NOTE_WL_SYNC_IPC) {
2373 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH);
2374 } else {
2375 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
2376 }
5ba3f43e 2377
cb323159
A
2378 if (error == EPREEMPTDISABLED) {
2379 error = 0;
2380 result = FILTER_THREADREQ_NODEFEER;
2381 }
5ba3f43e
A
2382out:
2383 if (error) {
5ba3f43e
A
2384 /* If userland wants ESTALE to be hidden, fail the attach anyway */
2385 if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
2386 error = 0;
2387 }
d9a64523 2388 knote_set_error(kn, error);
cb323159 2389 return result;
5ba3f43e 2390 }
d9a64523 2391 if (command == NOTE_WL_SYNC_WAIT) {
cb323159 2392 return kevent_register_wait_prepare(kn, kev, result);
d9a64523 2393 }
5ba3f43e 2394 /* Just attaching the thread request successfully will fire it */
d9a64523
A
2395 if (command == NOTE_WL_THREAD_REQUEST) {
2396 /*
2397 * Thread Request knotes need an explicit touch to be active again,
2398 * so delivering an event needs to also consume it.
2399 */
2400 kn->kn_flags |= EV_CLEAR;
cb323159 2401 return result | FILTER_ACTIVE;
d9a64523 2402 }
cb323159 2403 return result;
5ba3f43e
A
2404}
2405
d9a64523
A
2406static void __dead2
2407filt_wlwait_continue(void *parameter, wait_result_t wr)
5ba3f43e 2408{
d9a64523 2409 struct _kevent_register *cont_args = parameter;
cb323159 2410 struct kqworkloop *kqwl = cont_args->kqwl;
5ba3f43e 2411
cb323159 2412 kqlock(kqwl);
d9a64523
A
2413 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2414 workq_kern_threadreq_lock(kqwl->kqwl_p);
cb323159 2415 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
d9a64523
A
2416 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2417 } else {
cb323159 2418 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
d9a64523 2419 }
cb323159 2420 kqunlock(kqwl);
5ba3f43e 2421
d9a64523 2422 turnstile_cleanup();
5ba3f43e 2423
d9a64523
A
2424 if (wr == THREAD_INTERRUPTED) {
2425 cont_args->kev.flags |= EV_ERROR;
2426 cont_args->kev.data = EINTR;
2427 } else if (wr != THREAD_AWAKENED) {
2428 panic("Unexpected wait result: %d", wr);
2429 }
5ba3f43e 2430
d9a64523
A
2431 kevent_register_wait_return(cont_args);
2432}
5ba3f43e 2433
d9a64523
A
2434/*
2435 * Called with the workloop mutex held, most of the time never returns as it
2436 * calls filt_wlwait_continue through a continuation.
2437 */
2438static void __dead2
cb323159 2439filt_wlpost_register_wait(struct uthread *uth, struct knote *kn,
0a7de745 2440 struct _kevent_register *cont_args)
d9a64523 2441{
cb323159
A
2442 struct kqworkloop *kqwl = cont_args->kqwl;
2443 workq_threadreq_t kqr = &kqwl->kqwl_request;
d9a64523
A
2444 struct turnstile *ts;
2445 bool workq_locked = false;
5ba3f43e 2446
cb323159 2447 kqlock_held(kqwl);
5ba3f43e 2448
d9a64523
A
2449 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2450 workq_kern_threadreq_lock(kqwl->kqwl_p);
2451 workq_locked = true;
2452 }
5ba3f43e 2453
d9a64523 2454 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
0a7de745 2455 TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
5ba3f43e 2456
d9a64523
A
2457 if (workq_locked) {
2458 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
0a7de745
A
2459 &kqwl->kqwl_request, kqwl->kqwl_owner, ts,
2460 TURNSTILE_DELAYED_UPDATE);
d9a64523
A
2461 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2462 /*
2463 * if the interlock is no longer the workqueue lock,
2464 * then we don't need to hold it anymore.
2465 */
2466 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2467 workq_locked = false;
5ba3f43e 2468 }
d9a64523
A
2469 }
2470 if (!workq_locked) {
2471 /*
2472 * If the interlock is the workloop's, then it's our responsibility to
2473 * call update_inheritor, so just do it.
2474 */
2475 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
2476 }
5ba3f43e 2477
d9a64523 2478 thread_set_pending_block_hint(uth->uu_thread, kThreadWaitWorkloopSyncWait);
cb323159 2479 waitq_assert_wait64(&ts->ts_waitq, knote_filt_wev64(kn),
0a7de745 2480 THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
5ba3f43e 2481
d9a64523
A
2482 if (workq_locked) {
2483 workq_kern_threadreq_unlock(kqwl->kqwl_p);
5ba3f43e
A
2484 }
2485
cb323159 2486 thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr);
d9a64523
A
2487 if (thread) {
2488 thread_reference(thread);
5ba3f43e 2489 }
d9a64523 2490
cb323159 2491 kevent_register_wait_block(ts, thread, filt_wlwait_continue, cont_args);
5ba3f43e
A
2492}
2493
2494/* called in stackshot context to report the thread responsible for blocking this thread */
2495void
2496kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
0a7de745 2497 event64_t event, thread_waitinfo_t *waitinfo)
5ba3f43e 2498{
f427ee49 2499 extern zone_t thread_zone;
d9a64523 2500 struct knote *kn = (struct knote *)event;
f427ee49
A
2501
2502 zone_require(knote_zone, kn);
5ba3f43e 2503
cb323159 2504 assert(kn->kn_thread == thread);
5ba3f43e
A
2505
2506 struct kqueue *kq = knote_get_kq(kn);
f427ee49
A
2507
2508 zone_require(kqworkloop_zone, kq);
5ba3f43e
A
2509 assert(kq->kq_state & KQ_WORKLOOP);
2510
2511 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
cb323159 2512 workq_threadreq_t kqr = &kqwl->kqwl_request;
5ba3f43e
A
2513
2514 thread_t kqwl_owner = kqwl->kqwl_owner;
5ba3f43e 2515
d9a64523 2516 if (kqwl_owner != THREAD_NULL) {
f427ee49 2517 zone_require(thread_zone, kqwl_owner);
5ba3f43e 2518 waitinfo->owner = thread_tid(kqwl->kqwl_owner);
cb323159 2519 } else if (kqr_thread_requested_pending(kqr)) {
5ba3f43e 2520 waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
cb323159 2521 } else if (kqr->tr_state >= WORKQ_TR_STATE_BINDING) {
f427ee49 2522 zone_require(thread_zone, kqr->tr_thread);
cb323159 2523 waitinfo->owner = thread_tid(kqr->tr_thread);
5ba3f43e
A
2524 } else {
2525 waitinfo->owner = 0;
2526 }
2527
2528 waitinfo->context = kqwl->kqwl_dynamicid;
5ba3f43e
A
2529}
2530
2531static void
cb323159 2532filt_wldetach(struct knote *kn)
5ba3f43e 2533{
cb323159
A
2534 if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) {
2535 filt_wldetach_sync_ipc(kn);
2536 } else if (kn->kn_thread) {
d9a64523
A
2537 kevent_register_wait_cleanup(kn);
2538 }
5ba3f43e
A
2539}
2540
2541static int
cb323159 2542filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev,
0a7de745 2543 thread_qos_t *qos_index)
5ba3f43e 2544{
cb323159
A
2545 uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
2546 uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
d9a64523
A
2547
2548 if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
2549 return EINVAL;
2550 }
2551 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2552 if (kev->flags & EV_DELETE) {
2553 return EINVAL;
2554 }
2555 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2556 return EINVAL;
2557 }
2558 if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
2559 return ERANGE;
2560 }
2561 }
5ba3f43e
A
2562
2563 switch (new_commands) {
2564 case NOTE_WL_THREAD_REQUEST:
2565 /* thread requests can only update themselves */
0a7de745 2566 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
d9a64523 2567 return EINVAL;
0a7de745 2568 }
5ba3f43e
A
2569 break;
2570
2571 case NOTE_WL_SYNC_WAIT:
0a7de745 2572 if (kev->fflags & NOTE_WL_END_OWNERSHIP) {
d9a64523 2573 return EINVAL;
0a7de745 2574 }
d9a64523
A
2575 goto sync_checks;
2576
5ba3f43e 2577 case NOTE_WL_SYNC_WAKE:
0a7de745
A
2578sync_checks:
2579 if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) {
d9a64523 2580 return EINVAL;
0a7de745
A
2581 }
2582 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
d9a64523 2583 return EINVAL;
0a7de745 2584 }
5ba3f43e
A
2585 break;
2586
cb323159
A
2587 case NOTE_WL_SYNC_IPC:
2588 if (sav_commands != NOTE_WL_SYNC_IPC) {
2589 return EINVAL;
2590 }
2591 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2592 return EINVAL;
2593 }
2594 break;
2595
5ba3f43e 2596 default:
d9a64523 2597 return EINVAL;
5ba3f43e 2598 }
d9a64523 2599 return 0;
5ba3f43e
A
2600}
2601
2602static int
cb323159 2603filt_wltouch(struct knote *kn, struct kevent_qos_s *kev)
5ba3f43e 2604{
d9a64523
A
2605 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2606 thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
cb323159 2607 int result = 0;
5ba3f43e 2608
d9a64523 2609 int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
5ba3f43e
A
2610 if (error) {
2611 goto out;
2612 }
2613
cb323159
A
2614 uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK;
2615 if (command == NOTE_WL_SYNC_IPC) {
2616 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH);
2617 } else {
2618 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
2619 filt_wlremember_last_update(kn, kev, error);
2620 }
2621 if (error == EPREEMPTDISABLED) {
2622 error = 0;
2623 result = FILTER_THREADREQ_NODEFEER;
5ba3f43e
A
2624 }
2625
5ba3f43e
A
2626out:
2627 if (error) {
2628 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2629 /* If userland wants ESTALE to be hidden, do not activate */
cb323159 2630 return result;
5ba3f43e
A
2631 }
2632 kev->flags |= EV_ERROR;
2633 kev->data = error;
cb323159 2634 return result;
5ba3f43e 2635 }
d9a64523 2636 if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
cb323159 2637 return kevent_register_wait_prepare(kn, kev, result);
d9a64523 2638 }
5ba3f43e 2639 /* Just touching the thread request successfully will fire it */
d9a64523
A
2640 if (command == NOTE_WL_THREAD_REQUEST) {
2641 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
cb323159 2642 result |= FILTER_UPDATE_REQ_QOS;
d9a64523 2643 }
cb323159 2644 result |= FILTER_ACTIVE;
d9a64523 2645 }
cb323159 2646 return result;
5ba3f43e
A
2647}
2648
d9a64523 2649static bool
cb323159 2650filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev)
5ba3f43e 2651{
d9a64523 2652 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
5ba3f43e 2653
d9a64523 2654 int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
5ba3f43e
A
2655 if (error) {
2656 goto out;
2657 }
2658
cb323159
A
2659 uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK);
2660 if (command == NOTE_WL_SYNC_IPC) {
2661 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP);
2662 } else {
2663 error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
2664 filt_wlremember_last_update(kn, kev, error);
5ba3f43e 2665 }
cb323159 2666 assert(error != EPREEMPTDISABLED);
5ba3f43e 2667
5ba3f43e 2668out:
d9a64523
A
2669 if (error) {
2670 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2671 return false;
5ba3f43e 2672 }
d9a64523
A
2673 kev->flags |= EV_ERROR;
2674 kev->data = error;
2675 return false;
5ba3f43e 2676 }
d9a64523 2677 return true;
5ba3f43e
A
2678}
2679
2680static int
cb323159 2681filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev)
5ba3f43e 2682{
d9a64523 2683 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
5ba3f43e
A
2684 int rc = 0;
2685
5ba3f43e 2686 assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
d9a64523 2687
cb323159 2688 kqlock(kqwl);
d9a64523
A
2689
2690 if (kqwl->kqwl_owner) {
2691 /*
2692 * <rdar://problem/33584321> userspace sometimes due to events being
2693 * delivered but not triggering a drain session can cause a process
2694 * of the thread request knote.
2695 *
2696 * When that happens, the automatic deactivation due to process
2697 * would swallow the event, so we have to activate the knote again.
2698 */
cb323159 2699 knote_activate(kqwl, kn, FILTER_ACTIVE);
d9a64523
A
2700 } else {
2701#if DEBUG || DEVELOPMENT
f427ee49 2702 if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
5ba3f43e 2703 /*
d9a64523 2704 * see src/queue_internal.h in libdispatch
5ba3f43e 2705 */
d9a64523 2706#define DISPATCH_QUEUE_ENQUEUED 0x1ull
5ba3f43e
A
2707 user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
2708 task_t t = current_task();
2709 uint64_t val;
2710 if (addr && task_is_active(t) && !task_is_halting(t) &&
cb323159 2711 copyin_atomic64(addr, &val) == 0 &&
0a7de745
A
2712 val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
2713 (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
5ba3f43e 2714 panic("kevent: workloop %#016llx is not enqueued "
0a7de745
A
2715 "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
2716 kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
5ba3f43e 2717 }
5ba3f43e 2718 }
d9a64523 2719#endif
cb323159 2720 knote_fill_kevent(kn, kev, 0);
d9a64523 2721 kev->fflags = kn->kn_sfflags;
d9a64523
A
2722 rc |= FILTER_ACTIVE;
2723 }
2724
cb323159 2725 kqunlock(kqwl);
d9a64523
A
2726
2727 if (rc & FILTER_ACTIVE) {
2728 workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
5ba3f43e
A
2729 }
2730 return rc;
2731}
2732
d9a64523
A
2733SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
2734 .f_extended_codes = true,
2735 .f_attach = filt_wlattach,
2736 .f_detach = filt_wldetach,
cb323159 2737 .f_event = filt_bad_event,
d9a64523
A
2738 .f_touch = filt_wltouch,
2739 .f_process = filt_wlprocess,
2740 .f_allow_drop = filt_wlallow_drop,
2741 .f_post_register_wait = filt_wlpost_register_wait,
2742};
2743
cb323159 2744#pragma mark - kqueues allocation and deallocation
5ba3f43e 2745
cb323159
A
2746/*!
2747 * @enum kqworkloop_dealloc_flags_t
2748 *
2749 * @brief
2750 * Flags that alter kqworkloop_dealloc() behavior.
2751 *
2752 * @const KQWL_DEALLOC_NONE
2753 * Convenient name for "no flags".
2754 *
2755 * @const KQWL_DEALLOC_SKIP_HASH_REMOVE
2756 * Do not remove the workloop fromt he hash table.
2757 * This is used for process tear-down codepaths as the workloops have been
2758 * removed by the caller already.
5ba3f43e 2759 */
cb323159
A
2760OS_OPTIONS(kqworkloop_dealloc_flags, unsigned,
2761 KQWL_DEALLOC_NONE = 0x0000,
2762 KQWL_DEALLOC_SKIP_HASH_REMOVE = 0x0001,
2763 );
2764
2765static void
2766kqworkloop_dealloc(struct kqworkloop *, kqworkloop_dealloc_flags_t, uint32_t);
2767
2768OS_NOINLINE OS_COLD OS_NORETURN
2769static void
2770kqworkloop_retain_panic(struct kqworkloop *kqwl, uint32_t previous)
d9a64523 2771{
cb323159
A
2772 if (previous == 0) {
2773 panic("kq(%p) resurrection", kqwl);
2774 } else {
2775 panic("kq(%p) retain overflow", kqwl);
2776 }
d9a64523
A
2777}
2778
cb323159
A
2779OS_NOINLINE OS_COLD OS_NORETURN
2780static void
2781kqworkloop_release_panic(struct kqworkloop *kqwl)
5ba3f43e 2782{
cb323159 2783 panic("kq(%p) over-release", kqwl);
5ba3f43e
A
2784}
2785
cb323159
A
2786OS_ALWAYS_INLINE
2787static inline bool
2788kqworkloop_try_retain(struct kqworkloop *kqwl)
5ba3f43e 2789{
cb323159
A
2790 uint32_t old_ref, new_ref;
2791 os_atomic_rmw_loop(&kqwl->kqwl_retains, old_ref, new_ref, relaxed, {
2792 if (__improbable(old_ref == 0)) {
2793 os_atomic_rmw_loop_give_up(return false);
5ba3f43e 2794 }
cb323159
A
2795 if (__improbable(old_ref >= KQ_WORKLOOP_RETAINS_MAX)) {
2796 kqworkloop_retain_panic(kqwl, old_ref);
0a7de745 2797 }
cb323159
A
2798 new_ref = old_ref + 1;
2799 });
2800 return true;
2801}
5ba3f43e 2802
cb323159
A
2803OS_ALWAYS_INLINE
2804static inline void
2805kqworkloop_retain(struct kqworkloop *kqwl)
2806{
2807 uint32_t previous = os_atomic_inc_orig(&kqwl->kqwl_retains, relaxed);
2808 if (__improbable(previous == 0 || previous >= KQ_WORKLOOP_RETAINS_MAX)) {
2809 kqworkloop_retain_panic(kqwl, previous);
2810 }
2811}
5ba3f43e 2812
cb323159
A
2813OS_ALWAYS_INLINE
2814static inline void
2815kqueue_retain(kqueue_t kqu)
2816{
2817 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2818 kqworkloop_retain(kqu.kqwl);
2819 }
2820}
5ba3f43e 2821
cb323159
A
2822OS_ALWAYS_INLINE
2823static inline void
2824kqworkloop_release_live(struct kqworkloop *kqwl)
2825{
2826 uint32_t refs = os_atomic_dec_orig(&kqwl->kqwl_retains, relaxed);
2827 if (__improbable(refs <= 1)) {
2828 kqworkloop_release_panic(kqwl);
2829 }
2830}
5ba3f43e 2831
cb323159
A
2832OS_ALWAYS_INLINE
2833static inline void
2834kqueue_release_live(kqueue_t kqu)
2835{
2836 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2837 kqworkloop_release_live(kqu.kqwl);
2838 }
2839}
d9a64523 2840
cb323159
A
2841OS_ALWAYS_INLINE
2842static inline void
2843kqworkloop_release(struct kqworkloop *kqwl)
2844{
2845 uint32_t refs = os_atomic_dec_orig(&kqwl->kqwl_retains, relaxed);
5ba3f43e 2846
cb323159
A
2847 if (__improbable(refs <= 1)) {
2848 kqworkloop_dealloc(kqwl, KQWL_DEALLOC_NONE, refs - 1);
2849 }
2850}
d9a64523 2851
cb323159
A
2852OS_ALWAYS_INLINE
2853static inline void
2854kqueue_release(kqueue_t kqu)
2855{
2856 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2857 kqworkloop_release(kqu.kqwl);
5ba3f43e 2858 }
cb323159 2859}
5ba3f43e 2860
cb323159
A
2861/*!
2862 * @function kqueue_destroy
2863 *
2864 * @brief
2865 * Common part to all kqueue dealloc functions.
2866 */
2867OS_NOINLINE
2868static void
2869kqueue_destroy(kqueue_t kqu, zone_t zone)
2870{
2871 /*
2872 * waitq_set_deinit() remove the KQ's waitq set from
2873 * any select sets to which it may belong.
2874 *
2875 * The order of these deinits matter: before waitq_set_deinit() returns,
2876 * waitq_set__CALLING_PREPOST_HOOK__ may be called and it will take the
2877 * kq_lock.
2878 */
2879 waitq_set_deinit(&kqu.kq->kq_wqs);
f427ee49 2880 lck_spin_destroy(&kqu.kq->kq_lock, &kq_lck_grp);
5ba3f43e 2881
cb323159
A
2882 zfree(zone, kqu.kq);
2883}
5ba3f43e 2884
cb323159
A
2885/*!
2886 * @function kqueue_init
2887 *
2888 * @brief
2889 * Common part to all kqueue alloc functions.
2890 */
2891static kqueue_t
2892kqueue_init(kqueue_t kqu, waitq_set_prepost_hook_t *hook, int policy)
2893{
2894 waitq_set_init(&kqu.kq->kq_wqs, policy, NULL, hook);
f427ee49 2895 lck_spin_init(&kqu.kq->kq_lock, &kq_lck_grp, LCK_ATTR_NULL);
cb323159 2896 return kqu;
5ba3f43e
A
2897}
2898
cb323159
A
2899#pragma mark kqfile allocation and deallocation
2900
2901/*!
2902 * @function kqueue_dealloc
5ba3f43e 2903 *
cb323159
A
2904 * @brief
2905 * Detach all knotes from a kqfile and free it.
2906 *
2907 * @discussion
2908 * We walk each list looking for knotes referencing this
2909 * this kqueue. If we find one, we try to drop it. But
2910 * if we fail to get a drop reference, that will wait
2911 * until it is dropped. So, we can just restart again
2912 * safe in the assumption that the list will eventually
2913 * not contain any more references to this kqueue (either
2914 * we dropped them all, or someone else did).
2915 *
2916 * Assumes no new events are being added to the kqueue.
2917 * Nothing locked on entry or exit.
5ba3f43e
A
2918 */
2919void
cb323159 2920kqueue_dealloc(struct kqueue *kq)
5ba3f43e 2921{
cb323159
A
2922 KNOTE_LOCK_CTX(knlc);
2923 struct proc *p = kq->kq_p;
5ba3f43e 2924 struct filedesc *fdp = p->p_fd;
5ba3f43e 2925 struct knote *kn;
5ba3f43e 2926
cb323159
A
2927 assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
2928
2929 proc_fdlock(p);
2930 for (int i = 0; i < fdp->fd_knlistsize; i++) {
2931 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2932 while (kn != NULL) {
2933 if (kq == knote_get_kq(kn)) {
5ba3f43e
A
2934 kqlock(kq);
2935 proc_fdunlock(p);
cb323159
A
2936 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2937 knote_drop(kq, kn, &knlc);
2938 }
5ba3f43e 2939 proc_fdlock(p);
cb323159
A
2940 /* start over at beginning of list */
2941 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2942 continue;
5ba3f43e 2943 }
cb323159 2944 kn = SLIST_NEXT(kn, kn_link);
5ba3f43e 2945 }
5ba3f43e 2946 }
5ba3f43e 2947
cb323159 2948 knhash_lock(fdp);
5ba3f43e
A
2949 proc_fdunlock(p);
2950
5ba3f43e 2951 if (fdp->fd_knhashmask != 0) {
cb323159
A
2952 for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
2953 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2954 while (kn != NULL) {
2955 if (kq == knote_get_kq(kn)) {
2956 kqlock(kq);
2957 knhash_unlock(fdp);
2958 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2959 knote_drop(kq, kn, &knlc);
2960 }
2961 knhash_lock(fdp);
2962 /* start over at beginning of list */
2963 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2964 continue;
2965 }
2966 kn = SLIST_NEXT(kn, kn_link);
5ba3f43e
A
2967 }
2968 }
0a7de745 2969 }
cb323159 2970 knhash_unlock(fdp);
5ba3f43e 2971
cb323159 2972 kqueue_destroy(kq, kqfile_zone);
5ba3f43e
A
2973}
2974
cb323159
A
2975/*!
2976 * @function kqueue_alloc
d9a64523 2977 *
cb323159
A
2978 * @brief
2979 * Allocate a kqfile.
d9a64523 2980 */
cb323159
A
2981struct kqueue *
2982kqueue_alloc(struct proc *p)
d9a64523 2983{
cb323159 2984 struct kqfile *kqf;
d9a64523 2985
cb323159
A
2986 /*
2987 * kqfiles are created with kqueue() so we need to wait for
2988 * the first kevent syscall to know which bit among
2989 * KQ_KEV_{32,64,QOS} will be set in kqf_state
2990 */
f427ee49 2991 kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO);
cb323159
A
2992 kqf->kqf_p = p;
2993 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue);
2994 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed);
d9a64523 2995
cb323159 2996 return kqueue_init(kqf, NULL, SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST).kq;
d9a64523 2997}
5ba3f43e 2998
cb323159
A
2999/*!
3000 * @function kqueue_internal
3001 *
3002 * @brief
3003 * Core implementation for kqueue and guarded_kqueue_np()
5ba3f43e 3004 */
cb323159
A
3005int
3006kqueue_internal(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
5ba3f43e 3007{
cb323159
A
3008 struct kqueue *kq;
3009 struct fileproc *fp;
3010 int fd, error;
5ba3f43e 3011
cb323159 3012 error = falloc_withalloc(p, &fp, &fd, vfs_context_current(), fp_zalloc, cra);
5ba3f43e 3013 if (error) {
0a7de745 3014 return error;
5ba3f43e
A
3015 }
3016
cb323159 3017 kq = kqueue_alloc(p);
5ba3f43e
A
3018 if (kq == NULL) {
3019 fp_free(p, fd, fp);
0a7de745 3020 return ENOMEM;
5ba3f43e
A
3021 }
3022
3023 fp->f_flag = FREAD | FWRITE;
3024 fp->f_ops = &kqueueops;
3025 fp->f_data = kq;
0a7de745 3026 fp->f_lflags |= FG_CONFINED;
5ba3f43e
A
3027
3028 proc_fdlock(p);
0a7de745 3029 *fdflags(p, fd) |= UF_EXCLOSE | UF_FORKCLOSE;
5ba3f43e
A
3030 procfdtbl_releasefd(p, fd, NULL);
3031 fp_drop(p, fd, fp, 1);
3032 proc_fdunlock(p);
3033
3034 *retval = fd;
0a7de745 3035 return error;
55e303ae
A
3036}
3037
cb323159
A
3038/*!
3039 * @function kqueue
3040 *
3041 * @brief
3042 * The kqueue syscall.
3043 */
39236c6e
A
3044int
3045kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
3046{
cb323159 3047 return kqueue_internal(p, fileproc_alloc_init, NULL, retval);
39236c6e
A
3048}
3049
cb323159 3050#pragma mark kqworkq allocation and deallocation
91447636 3051
cb323159
A
3052/*!
3053 * @function kqworkq_dealloc
3054 *
3055 * @brief
3056 * Deallocates a workqueue kqueue.
3057 *
3058 * @discussion
3059 * This only happens at process death, or for races with concurrent
3060 * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
3061 * this kqueue, either there are none, or someone else took care of them.
3062 */
3063void
3064kqworkq_dealloc(struct kqworkq *kqwq)
3065{
3066 kqueue_destroy(kqwq, kqworkq_zone);
3067}
3e170ce0 3068
cb323159
A
3069/*!
3070 * @function kqworkq_alloc
3071 *
3072 * @brief
3073 * Allocates a workqueue kqueue.
3074 *
3075 * @discussion
3076 * This is the slow path of kevent_get_kqwq.
3077 * This takes care of making sure procs have a single workq kqueue.
3078 */
3079OS_NOINLINE
3080static struct kqworkq *
3081kqworkq_alloc(struct proc *p, unsigned int flags)
3082{
3083 struct kqworkq *kqwq, *tmp;
3e170ce0 3084
f427ee49 3085 kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO);
3e170ce0 3086
cb323159
A
3087 assert((flags & KEVENT_FLAG_LEGACY32) == 0);
3088 if (flags & KEVENT_FLAG_LEGACY64) {
3089 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64;
3090 } else {
3091 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS;
3092 }
3093 kqwq->kqwq_p = p;
3e170ce0 3094
cb323159
A
3095 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3096 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]);
3097 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]);
3098 }
3099 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3100 /*
3101 * Because of how the bucketized system works, we mix overcommit
3102 * sources with not overcommit: each time we move a knote from
3103 * one bucket to the next due to overrides, we'd had to track
3104 * overcommitness, and it's really not worth it in the workloop
3105 * enabled world that track this faithfully.
3106 *
3107 * Incidentally, this behaves like the original manager-based
3108 * kqwq where event delivery always happened (hence is
3109 * "overcommit")
3110 */
3111 kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE;
3112 kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT;
3113 if (i != KQWQ_QOS_MANAGER) {
3114 kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
0a7de745 3115 }
f427ee49 3116 kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i;
cb323159 3117 }
b0d623f7 3118
cb323159 3119 kqueue_init(kqwq, &kqwq->kqwq_waitq_hook, SYNC_POLICY_FIFO);
3e170ce0 3120
cb323159
A
3121 if (!os_atomic_cmpxchgv(&p->p_fd->fd_wqkqueue, NULL, kqwq, &tmp, release)) {
3122 kqworkq_dealloc(kqwq);
3123 return tmp;
0a7de745 3124 }
55e303ae 3125
cb323159
A
3126 return kqwq;
3127}
91447636 3128
cb323159 3129#pragma mark kqworkloop allocation and deallocation
91447636 3130
cb323159
A
3131#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
3132#define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
3e170ce0 3133
cb323159
A
3134OS_ALWAYS_INLINE
3135static inline void
3136kqhash_lock(struct filedesc *fdp)
3137{
3138 lck_mtx_lock_spin_always(&fdp->fd_kqhashlock);
3139}
d9a64523 3140
cb323159
A
3141OS_ALWAYS_INLINE
3142static inline void
3143kqhash_unlock(struct filedesc *fdp)
3144{
3145 lck_mtx_unlock(&fdp->fd_kqhashlock);
3146}
2d21ac55 3147
cb323159
A
3148OS_ALWAYS_INLINE
3149static inline void
3150kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id,
3151 struct kqworkloop *kqwl)
3152{
3153 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3154 LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
91447636 3155}
55e303ae 3156
cb323159
A
3157OS_ALWAYS_INLINE
3158static inline struct kqworkloop *
3159kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id)
39037602 3160{
cb323159
A
3161 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3162 struct kqworkloop *kqwl;
39037602 3163
cb323159
A
3164 LIST_FOREACH(kqwl, list, kqwl_hashlink) {
3165 if (kqwl->kqwl_dynamicid == id) {
3166 return kqwl;
0a7de745 3167 }
39037602 3168 }
cb323159 3169 return NULL;
39037602
A
3170}
3171
cb323159
A
3172static struct kqworkloop *
3173kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id)
39037602 3174{
cb323159 3175 struct kqworkloop *kqwl = NULL;
39037602 3176
cb323159
A
3177 kqhash_lock(fdp);
3178 if (__probable(fdp->fd_kqhash)) {
3179 kqwl = kqworkloop_hash_lookup_locked(fdp, kq_id);
3180 if (kqwl && !kqworkloop_try_retain(kqwl)) {
3181 kqwl = NULL;
39037602
A
3182 }
3183 }
cb323159
A
3184 kqhash_unlock(fdp);
3185 return kqwl;
39037602
A
3186}
3187
cb323159 3188OS_NOINLINE
91447636 3189static void
cb323159 3190kqworkloop_hash_init(struct filedesc *fdp)
91447636 3191{
cb323159
A
3192 struct kqwllist *alloc_hash;
3193 u_long alloc_mask;
39037602 3194
cb323159
A
3195 kqhash_unlock(fdp);
3196 alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
3197 kqhash_lock(fdp);
3198
3199 /* See if we won the race */
3200 if (__probable(fdp->fd_kqhashmask == 0)) {
3201 fdp->fd_kqhash = alloc_hash;
3202 fdp->fd_kqhashmask = alloc_mask;
3203 } else {
3204 kqhash_unlock(fdp);
f427ee49 3205 hashdestroy(alloc_hash, M_KQUEUE, alloc_mask);
cb323159 3206 kqhash_lock(fdp);
0a7de745 3207 }
91447636 3208}
55e303ae 3209
cb323159
A
3210/*!
3211 * @function kqworkloop_dealloc
3212 *
3213 * @brief
3214 * Deallocates a workloop kqueue.
3215 *
3216 * @discussion
3217 * Knotes hold references on the workloop, so we can't really reach this
3218 * function unless all of these are already gone.
3219 *
3220 * Nothing locked on entry or exit.
91447636 3221 *
cb323159
A
3222 * @param flags
3223 * Unless KQWL_DEALLOC_SKIP_HASH_REMOVE is set, the workloop is removed
3224 * from its hash table.
3225 *
3226 * @param current_ref
3227 * This function is also called to undo a kqworkloop_alloc in case of
3228 * allocation races, expected_ref is the current refcount that is expected
3229 * on the workloop object, usually 0, and 1 when a dealloc race is resolved.
91447636 3230 */
cb323159
A
3231static void
3232kqworkloop_dealloc(struct kqworkloop *kqwl, kqworkloop_dealloc_flags_t flags,
3233 uint32_t current_ref)
b0d623f7 3234{
cb323159 3235 thread_t cur_owner;
3e170ce0 3236
cb323159
A
3237 if (__improbable(current_ref > 1)) {
3238 kqworkloop_release_panic(kqwl);
3239 }
3240 assert(kqwl->kqwl_retains == current_ref);
39236c6e 3241
cb323159
A
3242 /* pair with kqunlock() and other kq locks */
3243 os_atomic_thread_fence(acquire);
3e170ce0 3244
cb323159
A
3245 cur_owner = kqwl->kqwl_owner;
3246 if (cur_owner) {
3247 if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
3248 thread_drop_kevent_override(cur_owner);
3249 }
3250 thread_deallocate(cur_owner);
3251 kqwl->kqwl_owner = THREAD_NULL;
3252 }
3e170ce0 3253
cb323159
A
3254 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
3255 struct turnstile *ts;
3256 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
3257 &ts, TURNSTILE_WORKLOOPS);
3258 turnstile_cleanup();
3259 turnstile_deallocate(ts);
3260 }
5ba3f43e 3261
cb323159
A
3262 if ((flags & KQWL_DEALLOC_SKIP_HASH_REMOVE) == 0) {
3263 struct filedesc *fdp = kqwl->kqwl_p->p_fd;
5ba3f43e 3264
cb323159
A
3265 kqhash_lock(fdp);
3266 LIST_REMOVE(kqwl, kqwl_hashlink);
3267 kqhash_unlock(fdp);
3268 }
91447636 3269
cb323159
A
3270 assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed));
3271 assert(kqwl->kqwl_owner == THREAD_NULL);
3272 assert(kqwl->kqwl_turnstile == TURNSTILE_NULL);
3e170ce0 3273
f427ee49 3274 lck_spin_destroy(&kqwl->kqwl_statelock, &kq_lck_grp);
cb323159 3275 kqueue_destroy(kqwl, kqworkloop_zone);
3e170ce0 3276}
d9a64523 3277
cb323159
A
3278/*!
3279 * @function kqworkloop_alloc
3280 *
3281 * @brief
3282 * Allocates a workloop kqueue.
3283 */
3284static void
3285kqworkloop_init(struct kqworkloop *kqwl, proc_t p,
3286 kqueue_id_t id, workq_threadreq_param_t *trp)
b0d623f7 3287{
cb323159
A
3288 kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS;
3289 kqwl->kqwl_retains = 1; /* donate a retain to creator */
3290 kqwl->kqwl_dynamicid = id;
3291 kqwl->kqwl_p = p;
3292 if (trp) {
3293 kqwl->kqwl_params = trp->trp_value;
3294 }
3295
3296 workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP;
3297 if (trp) {
3298 if (trp->trp_flags & TRP_PRIORITY) {
3299 tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS;
0a7de745 3300 }
cb323159
A
3301 if (trp->trp_flags) {
3302 tr_flags |= WORKQ_TR_FLAG_WL_PARAMS;
0a7de745 3303 }
91447636 3304 }
cb323159
A
3305 kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE;
3306 kqwl->kqwl_request.tr_flags = tr_flags;
3307
3308 for (int i = 0; i < KQWL_NBUCKETS; i++) {
3309 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]);
3310 }
3311 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed);
3312
f427ee49 3313 lck_spin_init(&kqwl->kqwl_statelock, &kq_lck_grp, LCK_ATTR_NULL);
cb323159
A
3314
3315 kqueue_init(kqwl, &kqwl->kqwl_waitq_hook, SYNC_POLICY_FIFO);
39037602
A
3316}
3317
cb323159
A
3318/*!
3319 * @function kqworkloop_get_or_create
3320 *
3321 * @brief
3322 * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
3323 *
3324 * @returns
3325 * 0: success
3326 * EINVAL: invalid parameters
3327 * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
3328 * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
3329 * ENOMEM: allocation failed
3330 */
5ba3f43e 3331static int
cb323159
A
3332kqworkloop_get_or_create(struct proc *p, kqueue_id_t id,
3333 workq_threadreq_param_t *trp, unsigned int flags, struct kqworkloop **kqwlp)
5ba3f43e 3334{
cb323159
A
3335 struct filedesc *fdp = p->p_fd;
3336 struct kqworkloop *alloc_kqwl = NULL;
3337 struct kqworkloop *kqwl = NULL;
3338 int error = 0;
5ba3f43e 3339
cb323159 3340 assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
5ba3f43e 3341
cb323159
A
3342 if (id == 0 || id == (kqueue_id_t)-1) {
3343 return EINVAL;
3344 }
5ba3f43e 3345
cb323159
A
3346 for (;;) {
3347 kqhash_lock(fdp);
3348 if (__improbable(fdp->fd_kqhash == NULL)) {
3349 kqworkloop_hash_init(fdp);
3350 }
5ba3f43e 3351
cb323159
A
3352 kqwl = kqworkloop_hash_lookup_locked(fdp, id);
3353 if (kqwl) {
3354 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
3355 /*
3356 * If MUST_NOT_EXIST was passed, even if we would have failed
3357 * the try_retain, it could have gone the other way, and
3358 * userspace can't tell. Let'em fix their race.
3359 */
3360 error = EEXIST;
3361 break;
3362 }
5ba3f43e 3363
cb323159
A
3364 if (__probable(kqworkloop_try_retain(kqwl))) {
3365 /*
3366 * This is a valid live workloop !
3367 */
3368 *kqwlp = kqwl;
3369 error = 0;
3370 break;
3371 }
3372 }
5ba3f43e 3373
cb323159
A
3374 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) {
3375 error = ENOENT;
3376 break;
3377 }
5ba3f43e 3378
cb323159
A
3379 /*
3380 * We didn't find what we were looking for.
3381 *
3382 * If this is the second time we reach this point (alloc_kqwl != NULL),
3383 * then we're done.
3384 *
3385 * If this is the first time we reach this point (alloc_kqwl == NULL),
3386 * then try to allocate one without blocking.
3387 */
3388 if (__probable(alloc_kqwl == NULL)) {
f427ee49 3389 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO);
cb323159
A
3390 }
3391 if (__probable(alloc_kqwl)) {
3392 kqworkloop_init(alloc_kqwl, p, id, trp);
3393 kqworkloop_hash_insert_locked(fdp, id, alloc_kqwl);
3394 kqhash_unlock(fdp);
3395 *kqwlp = alloc_kqwl;
3396 return 0;
3397 }
5ba3f43e 3398
cb323159
A
3399 /*
3400 * We have to block to allocate a workloop, drop the lock,
3401 * allocate one, but then we need to retry lookups as someone
3402 * else could race with us.
3403 */
3404 kqhash_unlock(fdp);
5ba3f43e 3405
f427ee49 3406 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO);
5ba3f43e 3407 }
5ba3f43e 3408
cb323159 3409 kqhash_unlock(fdp);
5ba3f43e 3410
cb323159
A
3411 if (__improbable(alloc_kqwl)) {
3412 zfree(kqworkloop_zone, alloc_kqwl);
5ba3f43e
A
3413 }
3414
cb323159
A
3415 return error;
3416}
5ba3f43e 3417
cb323159 3418#pragma mark - knotes
5ba3f43e 3419
cb323159
A
3420static int
3421filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
3422{
3423 knote_set_error(kn, ENOTSUP);
3424 return 0;
5ba3f43e
A
3425}
3426
5ba3f43e 3427static void
cb323159 3428filt_no_detach(__unused struct knote *kn)
5ba3f43e 3429{
cb323159 3430}
5ba3f43e 3431
cb323159
A
3432static int __dead2
3433filt_bad_event(struct knote *kn, long hint)
3434{
3435 panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
3436}
5ba3f43e 3437
cb323159
A
3438static int __dead2
3439filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev)
3440{
3441 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
5ba3f43e
A
3442}
3443
cb323159
A
3444static int __dead2
3445filt_bad_process(struct knote *kn, struct kevent_qos_s *kev)
5ba3f43e 3446{
cb323159
A
3447 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3448}
5ba3f43e 3449
cb323159
A
3450/*
3451 * knotes_dealloc - detach all knotes for the process and drop them
3452 *
3453 * Called with proc_fdlock held.
3454 * Returns with it locked.
3455 * May drop it temporarily.
3456 * Process is in such a state that it will not try to allocate
3457 * any more knotes during this process (stopped for exit or exec).
3458 */
3459void
3460knotes_dealloc(proc_t p)
3461{
3462 struct filedesc *fdp = p->p_fd;
3463 struct kqueue *kq;
3464 struct knote *kn;
3465 struct klist *kn_hash = NULL;
f427ee49 3466 u_long kn_hashmask;
cb323159 3467 int i;
5ba3f43e 3468
cb323159
A
3469 /* Close all the fd-indexed knotes up front */
3470 if (fdp->fd_knlistsize > 0) {
3471 for (i = 0; i < fdp->fd_knlistsize; i++) {
3472 while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
3473 kq = knote_get_kq(kn);
3474 kqlock(kq);
3475 proc_fdunlock(p);
3476 knote_drop(kq, kn, NULL);
3477 proc_fdlock(p);
3478 }
3479 }
3480 /* free the table */
c3c9b80d
A
3481 kheap_free(KM_KQUEUE, fdp->fd_knlist,
3482 fdp->fd_knlistsize * sizeof(struct klist *));
0a7de745 3483 }
cb323159 3484 fdp->fd_knlistsize = 0;
5ba3f43e 3485
cb323159
A
3486 knhash_lock(fdp);
3487 proc_fdunlock(p);
5ba3f43e 3488
cb323159
A
3489 /* Clean out all the hashed knotes as well */
3490 if (fdp->fd_knhashmask != 0) {
3491 for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
3492 while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
3493 kq = knote_get_kq(kn);
3494 kqlock(kq);
3495 knhash_unlock(fdp);
3496 knote_drop(kq, kn, NULL);
3497 knhash_lock(fdp);
3498 }
5ba3f43e 3499 }
cb323159 3500 kn_hash = fdp->fd_knhash;
f427ee49 3501 kn_hashmask = fdp->fd_knhashmask;
cb323159
A
3502 fdp->fd_knhashmask = 0;
3503 fdp->fd_knhash = NULL;
5ba3f43e 3504 }
5ba3f43e 3505
cb323159
A
3506 knhash_unlock(fdp);
3507
cb323159 3508 if (kn_hash) {
f427ee49 3509 hashdestroy(kn_hash, M_KQUEUE, kn_hashmask);
5ba3f43e 3510 }
cb323159
A
3511
3512 proc_fdlock(p);
5ba3f43e
A
3513}
3514
d9a64523
A
3515/*
3516 * kqworkloops_dealloc - rebalance retains on kqworkloops created with
3517 * scheduling parameters
3518 *
3519 * Called with proc_fdlock held.
3520 * Returns with it locked.
3521 * Process is in such a state that it will not try to allocate
3522 * any more knotes during this process (stopped for exit or exec).
3523 */
3524void
3525kqworkloops_dealloc(proc_t p)
5ba3f43e 3526{
d9a64523 3527 struct filedesc *fdp = p->p_fd;
d9a64523 3528 struct kqworkloop *kqwl, *kqwln;
cb323159 3529 struct kqwllist tofree;
d9a64523
A
3530
3531 if (!(fdp->fd_flags & FD_WORKLOOP)) {
3532 return;
3533 }
3534
cb323159
A
3535 kqhash_lock(fdp);
3536
3537 if (fdp->fd_kqhashmask == 0) {
3538 kqhash_unlock(fdp);
3539 return;
3540 }
d9a64523 3541
cb323159 3542 LIST_INIT(&tofree);
5ba3f43e 3543
cb323159
A
3544 for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) {
3545 LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) {
d9a64523
A
3546 /*
3547 * kqworkloops that have scheduling parameters have an
3548 * implicit retain from kqueue_workloop_ctl that needs
3549 * to be balanced on process exit.
3550 */
3551 assert(kqwl->kqwl_params);
cb323159
A
3552 LIST_REMOVE(kqwl, kqwl_hashlink);
3553 LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
d9a64523
A
3554 }
3555 }
5ba3f43e 3556
cb323159 3557 kqhash_unlock(fdp);
5ba3f43e 3558
cb323159
A
3559 LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
3560 kqworkloop_dealloc(kqwl, KQWL_DEALLOC_SKIP_HASH_REMOVE, 1);
d9a64523
A
3561 }
3562}
5ba3f43e 3563
5ba3f43e 3564static int
cb323159
A
3565kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
3566 struct kevent_qos_s *kev)
3567{
3568 /* We don't care about the priority of a disabled or deleted knote */
3569 if (kev->flags & (EV_DISABLE | EV_DELETE)) {
3570 return 0;
3571 }
d9a64523 3572
cb323159 3573 if (kq->kq_state & KQ_WORKLOOP) {
d9a64523 3574 /*
cb323159
A
3575 * Workloops need valid priorities with a QOS (excluding manager) for
3576 * any enabled knote.
3577 *
3578 * When it is pre-existing, just make sure it has a valid QoS as
3579 * kevent_register() will not use the incoming priority (filters who do
3580 * have the responsibility to validate it again, see filt_wltouch).
3581 *
3582 * If the knote is being made, validate the incoming priority.
d9a64523 3583 */
cb323159
A
3584 if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
3585 return ERANGE;
5ba3f43e 3586 }
cb323159 3587 }
5ba3f43e 3588
cb323159
A
3589 return 0;
3590}
d9a64523 3591
cb323159
A
3592/*
3593 * Prepare a filter for waiting after register.
3594 *
3595 * The f_post_register_wait hook will be called later by kevent_register()
3596 * and should call kevent_register_wait_block()
3597 */
3598static int
3599kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc)
3600{
3601 thread_t thread = current_thread();
5ba3f43e 3602
cb323159 3603 assert(knote_fops(kn)->f_extended_codes);
5ba3f43e 3604
cb323159
A
3605 if (kn->kn_thread == NULL) {
3606 thread_reference(thread);
3607 kn->kn_thread = thread;
3608 } else if (kn->kn_thread != thread) {
5ba3f43e 3609 /*
cb323159
A
3610 * kn_thread may be set from a previous aborted wait
3611 * However, it has to be from the same thread.
5ba3f43e 3612 */
cb323159
A
3613 kev->flags |= EV_ERROR;
3614 kev->data = EXDEV;
3615 return 0;
d9a64523 3616 }
5ba3f43e 3617
cb323159 3618 return FILTER_REGISTER_WAIT | rc;
5ba3f43e
A
3619}
3620
cb323159
A
3621/*
3622 * Cleanup a kevent_register_wait_prepare() effect for threads that have been
3623 * aborted instead of properly woken up with thread_wakeup_thread().
3624 */
5ba3f43e 3625static void
cb323159 3626kevent_register_wait_cleanup(struct knote *kn)
5ba3f43e 3627{
cb323159
A
3628 thread_t thread = kn->kn_thread;
3629 kn->kn_thread = NULL;
3630 thread_deallocate(thread);
5ba3f43e
A
3631}
3632
cb323159
A
3633/*
3634 * Must be called at the end of a f_post_register_wait call from a filter.
3635 */
3636static void
3637kevent_register_wait_block(struct turnstile *ts, thread_t thread,
3638 thread_continue_t cont, struct _kevent_register *cont_args)
5ba3f43e 3639{
cb323159
A
3640 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
3641 kqunlock(cont_args->kqwl);
3642 cont_args->handoff_thread = thread;
f427ee49 3643 thread_handoff_parameter(thread, cont, cont_args, THREAD_HANDOFF_NONE);
5ba3f43e
A
3644}
3645
cb323159
A
3646/*
3647 * Called by Filters using a f_post_register_wait to return from their wait.
3648 */
3649static void
3650kevent_register_wait_return(struct _kevent_register *cont_args)
5ba3f43e 3651{
cb323159
A
3652 struct kqworkloop *kqwl = cont_args->kqwl;
3653 struct kevent_qos_s *kev = &cont_args->kev;
3654 int error = 0;
5ba3f43e 3655
cb323159
A
3656 if (cont_args->handoff_thread) {
3657 thread_deallocate(cont_args->handoff_thread);
5ba3f43e
A
3658 }
3659
cb323159
A
3660 if (kev->flags & (EV_ERROR | EV_RECEIPT)) {
3661 if ((kev->flags & EV_ERROR) == 0) {
3662 kev->flags |= EV_ERROR;
3663 kev->data = 0;
d9a64523 3664 }
cb323159
A
3665 error = kevent_modern_copyout(kev, &cont_args->ueventlist);
3666 if (error == 0) {
3667 cont_args->eventout++;
d9a64523 3668 }
5ba3f43e 3669 }
cb323159
A
3670
3671 kqworkloop_release(kqwl);
3672 if (error == 0) {
3673 *(int32_t *)&current_uthread()->uu_rval = cont_args->eventout;
d9a64523 3674 }
cb323159 3675 unix_syscall_return(error);
5ba3f43e
A
3676}
3677
cb323159
A
3678/*
3679 * kevent_register - add a new event to a kqueue
3680 *
3681 * Creates a mapping between the event source and
3682 * the kqueue via a knote data structure.
3683 *
3684 * Because many/most the event sources are file
3685 * descriptor related, the knote is linked off
3686 * the filedescriptor table for quick access.
3687 *
3688 * called with nothing locked
3689 * caller holds a reference on the kqueue
3690 */
39037602 3691
cb323159
A
3692int
3693kevent_register(struct kqueue *kq, struct kevent_qos_s *kev,
3694 struct knote **kn_out)
39037602 3695{
cb323159
A
3696 struct proc *p = kq->kq_p;
3697 const struct filterops *fops;
3698 struct knote *kn = NULL;
3699 int result = 0, error = 0;
3700 unsigned short kev_flags = kev->flags;
d9a64523 3701 KNOTE_LOCK_CTX(knlc);
39037602 3702
cb323159
A
3703 if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) {
3704 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
3705 } else {
3706 error = EINVAL;
3707 goto out;
0a7de745 3708 }
5ba3f43e 3709
cb323159
A
3710 /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
3711 if (__improbable((kev->flags & EV_VANISHED) &&
3712 (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) {
3713 error = EINVAL;
3714 goto out;
0a7de745 3715 }
a39ff7e2 3716
cb323159
A
3717 /* Simplify the flags - delete and disable overrule */
3718 if (kev->flags & EV_DELETE) {
3719 kev->flags &= ~EV_ADD;
5ba3f43e 3720 }
cb323159
A
3721 if (kev->flags & EV_DISABLE) {
3722 kev->flags &= ~EV_ENABLE;
b0d623f7 3723 }
39037602 3724
cb323159 3725 if (kq->kq_state & KQ_WORKLOOP) {
94ff46dc 3726 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
cb323159
A
3727 ((struct kqworkloop *)kq)->kqwl_dynamicid,
3728 kev->udata, kev->flags, kev->filter);
3729 } else if (kq->kq_state & KQ_WORKQ) {
94ff46dc 3730 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
cb323159
A
3731 0, kev->udata, kev->flags, kev->filter);
3732 } else {
94ff46dc 3733 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
cb323159
A
3734 VM_KERNEL_UNSLIDE_OR_PERM(kq),
3735 kev->udata, kev->flags, kev->filter);
0a7de745 3736 }
d9a64523 3737
cb323159
A
3738restart:
3739 /* find the matching knote from the fd tables/hashes */
3740 kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
3741 error = kevent_register_validate_priority(kq, kn, kev);
3742 result = 0;
0a7de745 3743 if (error) {
cb323159 3744 goto out;
0a7de745 3745 }
39037602 3746
cb323159
A
3747 if (kn == NULL && (kev->flags & EV_ADD) == 0) {
3748 /*
3749 * No knote found, EV_ADD wasn't specified
3750 */
91447636 3751
cb323159
A
3752 if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
3753 (kq->kq_state & KQ_WORKLOOP)) {
3754 /*
3755 * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
3756 * that doesn't care about ENOENT, so just pretend the deletion
3757 * happened.
3758 */
3759 } else {
3760 error = ENOENT;
3761 }
3762 goto out;
3763 } else if (kn == NULL) {
3764 /*
3765 * No knote found, need to attach a new one (attach)
3766 */
5ba3f43e 3767
cb323159 3768 struct fileproc *knote_fp = NULL;
5ba3f43e 3769
cb323159
A
3770 /* grab a file reference for the new knote */
3771 if (fops->f_isfd) {
f427ee49 3772 if ((error = fp_lookup(p, (int)kev->ident, &knote_fp, 0)) != 0) {
5ba3f43e 3773 goto out;
d9a64523 3774 }
d9a64523
A
3775 }
3776
cb323159
A
3777 kn = knote_alloc();
3778 if (kn == NULL) {
3779 error = ENOMEM;
3780 if (knote_fp != NULL) {
f427ee49 3781 fp_drop(p, (int)kev->ident, knote_fp, 0);
cb323159
A
3782 }
3783 goto out;
5ba3f43e 3784 }
5ba3f43e 3785
cb323159
A
3786 kn->kn_fp = knote_fp;
3787 kn->kn_is_fd = fops->f_isfd;
f427ee49 3788 kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED);
cb323159
A
3789 kn->kn_status = 0;
3790
3791 /* was vanish support requested */
3792 if (kev->flags & EV_VANISHED) {
3793 kev->flags &= ~EV_VANISHED;
3794 kn->kn_status |= KN_REQVANISH;
0a7de745 3795 }
39236c6e 3796
f427ee49 3797 /* snapshot matching/dispatching protocol flags into knote */
cb323159
A
3798 if (kev->flags & EV_DISABLE) {
3799 kn->kn_status |= KN_DISABLED;
3800 }
39037602 3801
cb323159
A
3802 /*
3803 * copy the kevent state into knote
3804 * protocol is that fflags and data
3805 * are saved off, and cleared before
3806 * calling the attach routine.
3807 *
3808 * - kn->kn_sfflags aliases with kev->xflags
3809 * - kn->kn_sdata aliases with kev->data
3810 * - kn->kn_filter is the top 8 bits of kev->filter
3811 */
3812 kn->kn_kevent = *(struct kevent_internal_s *)kev;
3813 kn->kn_sfflags = kev->fflags;
3814 kn->kn_filtid = (uint8_t)~kev->filter;
3815 kn->kn_fflags = 0;
3816 knote_reset_priority(kq, kn, kev->qos);
d9a64523 3817
cb323159
A
3818 /* Add the knote for lookup thru the fd table */
3819 error = kq_add_knote(kq, kn, &knlc, p);
3820 if (error) {
3821 knote_free(kn);
3822 if (knote_fp != NULL) {
f427ee49 3823 fp_drop(p, (int)kev->ident, knote_fp, 0);
d9a64523
A
3824 }
3825
cb323159
A
3826 if (error == ERESTART) {
3827 goto restart;
3a60a9f5 3828 }
cb323159 3829 goto out;
55e303ae 3830 }
55e303ae 3831
cb323159 3832 /* fp reference count now applies to knote */
91447636 3833
d9a64523 3834 /*
cb323159
A
3835 * we can't use filter_call() because f_attach can change the filter ops
3836 * for a filter that supports f_extended_codes, so we need to reload
3837 * knote_fops() and not use `fops`.
d9a64523 3838 */
cb323159
A
3839 result = fops->f_attach(kn, kev);
3840 if (result && !knote_fops(kn)->f_extended_codes) {
3841 result = FILTER_ACTIVE;
3842 }
d9a64523 3843
cb323159 3844 kqlock(kq);
3e170ce0 3845
cb323159
A
3846 if (result & FILTER_THREADREQ_NODEFEER) {
3847 enable_preemption();
3848 }
39037602 3849
cb323159
A
3850 if (kn->kn_flags & EV_ERROR) {
3851 /*
3852 * Failed to attach correctly, so drop.
3853 */
3854 kn->kn_filtid = EVFILTID_DETACHED;
f427ee49 3855 error = (int)kn->kn_sdata;
cb323159
A
3856 knote_drop(kq, kn, &knlc);
3857 result = 0;
3858 goto out;
39037602 3859 }
b0d623f7 3860
d9a64523 3861 /*
cb323159
A
3862 * end "attaching" phase - now just attached
3863 *
3864 * Mark the thread request overcommit, if appropos
3865 *
3866 * If the attach routine indicated that an
3867 * event is already fired, activate the knote.
3868 */
3869 if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
3870 (kq->kq_state & KQ_WORKLOOP)) {
3871 kqworkloop_set_overcommit((struct kqworkloop *)kq);
3872 }
3873 } else if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
3874 /*
3875 * The knote was dropped while we were waiting for the lock,
3876 * we need to re-evaluate entirely
d9a64523 3877 */
91447636 3878
cb323159
A
3879 goto restart;
3880 } else if (kev->flags & EV_DELETE) {
3881 /*
3882 * Deletion of a knote (drop)
3883 *
3884 * If the filter wants to filter drop events, let it do so.
3885 *
3886 * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
3887 * we must wait for the knote to be re-enabled (unless it is being
3888 * re-enabled atomically here).
3889 */
55e303ae 3890
cb323159
A
3891 if (knote_fops(kn)->f_allow_drop) {
3892 bool drop;
b0d623f7 3893
cb323159
A
3894 kqunlock(kq);
3895 drop = knote_fops(kn)->f_allow_drop(kn, kev);
3896 kqlock(kq);
39236c6e 3897
cb323159
A
3898 if (!drop) {
3899 goto out_unlock;
3900 }
3901 }
b0d623f7 3902
cb323159
A
3903 if ((kev->flags & EV_ENABLE) == 0 &&
3904 (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
3905 (kn->kn_status & KN_DISABLED) != 0) {
3906 kn->kn_status |= KN_DEFERDELETE;
3907 error = EINPROGRESS;
3908 goto out_unlock;
3909 }
d9a64523 3910
cb323159
A
3911 knote_drop(kq, kn, &knlc);
3912 goto out;
3913 } else {
d9a64523 3914 /*
cb323159 3915 * Regular update of a knote (touch)
d9a64523 3916 *
cb323159
A
3917 * Call touch routine to notify filter of changes in filter values
3918 * (and to re-determine if any events are fired).
d9a64523 3919 *
cb323159
A
3920 * If the knote is in defer-delete, avoid calling the filter touch
3921 * routine (it has delivered its last event already).
3922 *
3923 * If the touch routine had no failure,
3924 * apply the requested side effects to the knote.
d9a64523 3925 */
cb323159
A
3926
3927 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
3928 if (kev->flags & EV_ENABLE) {
3929 result = FILTER_ACTIVE;
3930 }
3931 } else {
3932 kqunlock(kq);
3933 result = filter_call(knote_fops(kn), f_touch(kn, kev));
3934 kqlock(kq);
3935 if (result & FILTER_THREADREQ_NODEFEER) {
3936 enable_preemption();
3937 }
3938 }
3939
3940 if (kev->flags & EV_ERROR) {
3941 result = 0;
3942 goto out_unlock;
3943 }
3944
3945 if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 &&
3946 kn->kn_udata != kev->udata) {
3947 // this allows klist_copy_udata() not to take locks
3948 os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed);
3949 }
3950 if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) {
3951 kn->kn_status |= KN_DISABLED;
3952 knote_dequeue(kq, kn);
d9a64523
A
3953 }
3954 }
3955
cb323159
A
3956 /* accept new kevent state */
3957 knote_apply_touch(kq, kn, kev, result);
3958
3959out_unlock:
3960 /*
3961 * When the filter asked for a post-register wait,
3962 * we leave the kqueue locked for kevent_register()
3963 * to call the filter's f_post_register_wait hook.
3964 */
3965 if (result & FILTER_REGISTER_WAIT) {
3966 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
3967 *kn_out = kn;
3968 } else {
3969 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
3970 }
3971
3972out:
3973 /* output local errors through the kevent */
3974 if (error) {
3975 kev->flags |= EV_ERROR;
3976 kev->data = error;
3977 }
3978 return result;
d9a64523
A
3979}
3980
3981/*
cb323159 3982 * knote_process - process a triggered event
d9a64523 3983 *
cb323159
A
3984 * Validate that it is really still a triggered event
3985 * by calling the filter routines (if necessary). Hold
3986 * a use reference on the knote to avoid it being detached.
3987 *
3988 * If it is still considered triggered, we will have taken
3989 * a copy of the state under the filter lock. We use that
3990 * snapshot to dispatch the knote for future processing (or
3991 * not, if this was a lost event).
3992 *
3993 * Our caller assures us that nobody else can be processing
3994 * events from this knote during the whole operation. But
3995 * others can be touching or posting events to the knote
3996 * interspersed with our processing it.
3997 *
3998 * caller holds a reference on the kqueue.
3999 * kqueue locked on entry and exit - but may be dropped
d9a64523
A
4000 */
4001static int
cb323159
A
4002knote_process(struct knote *kn, kevent_ctx_t kectx,
4003 kevent_callback_t callback)
d9a64523 4004{
cb323159
A
4005 struct kevent_qos_s kev;
4006 struct kqueue *kq = knote_get_kq(kn);
4007 KNOTE_LOCK_CTX(knlc);
4008 int result = FILTER_ACTIVE;
4009 int error = 0;
4010 bool drop = false;
d9a64523 4011
cb323159
A
4012 /*
4013 * Must be active or stayactive
4014 * Must be queued and not disabled/suppressed or dropping
4015 */
4016 assert(kn->kn_status & KN_QUEUED);
4017 assert(kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE));
4018 assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)));
d9a64523 4019
cb323159 4020 if (kq->kq_state & KQ_WORKLOOP) {
94ff46dc 4021 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
cb323159
A
4022 ((struct kqworkloop *)kq)->kqwl_dynamicid,
4023 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4024 kn->kn_filtid);
4025 } else if (kq->kq_state & KQ_WORKQ) {
94ff46dc 4026 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
cb323159
A
4027 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4028 kn->kn_filtid);
4029 } else {
94ff46dc 4030 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
cb323159
A
4031 VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
4032 kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
4033 }
4034
4035 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
d9a64523 4036 /*
cb323159
A
4037 * When the knote is dropping or has dropped,
4038 * then there's nothing we want to process.
d9a64523 4039 */
cb323159 4040 return EJUSTRETURN;
d9a64523
A
4041 }
4042
cb323159
A
4043 /*
4044 * While waiting for the knote lock, we may have dropped the kq lock.
4045 * and a touch may have disabled and dequeued the knote.
4046 */
4047 if (!(kn->kn_status & KN_QUEUED)) {
4048 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4049 return EJUSTRETURN;
4050 }
d9a64523 4051
cb323159
A
4052 /*
4053 * For deferred-drop or vanished events, we just create a fake
4054 * event to acknowledge end-of-life. Otherwise, we call the
4055 * filter's process routine to snapshot the kevent state under
4056 * the filter's locking protocol.
4057 *
4058 * suppress knotes to avoid returning the same event multiple times in
4059 * a single call.
4060 */
4061 knote_suppress(kq, kn);
d9a64523 4062
cb323159 4063 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
f427ee49 4064 uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT;
cb323159
A
4065 if (kn->kn_status & KN_DEFERDELETE) {
4066 kev_flags |= EV_DELETE;
4067 } else {
4068 kev_flags |= EV_VANISHED;
4069 }
d9a64523 4070
cb323159
A
4071 /* create fake event */
4072 kev = (struct kevent_qos_s){
4073 .filter = kn->kn_filter,
4074 .ident = kn->kn_id,
4075 .flags = kev_flags,
4076 .udata = kn->kn_udata,
4077 };
4078 } else {
4079 kqunlock(kq);
4080 kev = (struct kevent_qos_s) { };
4081 result = filter_call(knote_fops(kn), f_process(kn, &kev));
4082 kqlock(kq);
d9a64523
A
4083 }
4084
cb323159
A
4085 /*
4086 * Determine how to dispatch the knote for future event handling.
4087 * not-fired: just return (do not callout, leave deactivated).
4088 * One-shot: If dispatch2, enter deferred-delete mode (unless this is
4089 * is the deferred delete event delivery itself). Otherwise,
4090 * drop it.
4091 * Dispatch: don't clear state, just mark it disabled.
4092 * Cleared: just leave it deactivated.
4093 * Others: re-activate as there may be more events to handle.
4094 * This will not wake up more handlers right now, but
4095 * at the completion of handling events it may trigger
4096 * more handler threads (TODO: optimize based on more than
4097 * just this one event being detected by the filter).
4098 */
4099 if ((result & FILTER_ACTIVE) == 0) {
4100 if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
4101 /*
4102 * Stay active knotes should not be unsuppressed or we'd create an
4103 * infinite loop.
4104 *
4105 * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
4106 * within f_process() but that doesn't necessarily make them
4107 * ready to process, so we should leave them be.
4108 *
4109 * For other knotes, since we will not return an event,
4110 * there's no point keeping the knote suppressed.
4111 */
4112 knote_unsuppress(kq, kn);
d9a64523 4113 }
cb323159
A
4114 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4115 return EJUSTRETURN;
4116 }
4117
4118 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
4119 knote_adjust_qos(kq, kn, result);
4120 }
4121 kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
4122
4123 if (kev.flags & EV_ONESHOT) {
4124 if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4125 (kn->kn_status & KN_DEFERDELETE) == 0) {
4126 /* defer dropping non-delete oneshot dispatch2 events */
4127 kn->kn_status |= KN_DEFERDELETE | KN_DISABLED;
4128 } else {
4129 drop = true;
0a7de745 4130 }
cb323159
A
4131 } else if (kn->kn_flags & EV_DISPATCH) {
4132 /* disable all dispatch knotes */
4133 kn->kn_status |= KN_DISABLED;
4134 } else if ((kn->kn_flags & EV_CLEAR) == 0) {
4135 /* re-activate in case there are more events */
4136 knote_activate(kq, kn, FILTER_ACTIVE);
d9a64523
A
4137 }
4138
cb323159
A
4139 /*
4140 * callback to handle each event as we find it.
4141 * If we have to detach and drop the knote, do
4142 * it while we have the kq unlocked.
4143 */
4144 if (drop) {
4145 knote_drop(kq, kn, &knlc);
4146 } else {
4147 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
d9a64523 4148 }
d9a64523 4149
cb323159 4150 if (kev.flags & EV_VANISHED) {
94ff46dc 4151 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
cb323159
A
4152 kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4153 kn->kn_filtid);
4154 }
91447636 4155
cb323159
A
4156 error = (callback)(&kev, kectx);
4157 kqlock(kq);
4158 return error;
4159}
4160
4161/*
4162 * Returns -1 if the kqueue was unbound and processing should not happen
4163 */
4164#define KQWQAE_BEGIN_PROCESSING 1
4165#define KQWQAE_END_PROCESSING 2
4166#define KQWQAE_UNBIND 3
4167static int
4168kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr,
4169 int kevent_flags, int kqwqae_op)
55e303ae 4170{
cb323159
A
4171 thread_qos_t old_override = THREAD_QOS_UNSPECIFIED;
4172 thread_t thread = kqr_thread_fast(kqr);
4173 struct knote *kn;
4174 int rc = 0;
4175 bool unbind;
4176 struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index];
55e303ae 4177
cb323159
A
4178 kqlock_held(&kqwq->kqwq_kqueue);
4179
4180 if (!TAILQ_EMPTY(suppressq)) {
4181 /*
4182 * Return suppressed knotes to their original state.
4183 * For workq kqueues, suppressed ones that are still
4184 * truly active (not just forced into the queue) will
4185 * set flags we check below to see if anything got
4186 * woken up.
4187 */
4188 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4189 assert(kn->kn_status & KN_SUPPRESSED);
4190 knote_unsuppress(kqwq, kn);
39037602 4191 }
55e303ae
A
4192 }
4193
cb323159
A
4194#if DEBUG || DEVELOPMENT
4195 thread_t self = current_thread();
4196 struct uthread *ut = get_bsdthread_info(self);
39037602 4197
cb323159
A
4198 assert(thread == self);
4199 assert(ut->uu_kqr_bound == kqr);
4200#endif // DEBUG || DEVELOPMENT
39037602 4201
cb323159
A
4202 if (kqwqae_op == KQWQAE_UNBIND) {
4203 unbind = true;
4204 } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
4205 unbind = false;
5ba3f43e 4206 } else {
cb323159 4207 unbind = !kqr->tr_kq_wakeup;
d9a64523 4208 }
cb323159
A
4209 if (unbind) {
4210 old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
4211 rc = -1;
d9a64523 4212 /*
cb323159
A
4213 * request a new thread if we didn't process the whole queue or real events
4214 * have happened (not just putting stay-active events back).
d9a64523 4215 */
cb323159
A
4216 if (kqr->tr_kq_wakeup) {
4217 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
4218 kqr->tr_kq_qos_index, 0);
d9a64523 4219 }
cb323159
A
4220 }
4221
4222 if (rc == 0) {
d9a64523 4223 /*
cb323159
A
4224 * Reset wakeup bit to notice events firing while we are processing,
4225 * as we cannot rely on the bucket queue emptiness because of stay
4226 * active knotes.
d9a64523 4227 */
cb323159
A
4228 kqr->tr_kq_wakeup = false;
4229 }
d9a64523 4230
cb323159
A
4231 if (old_override) {
4232 thread_drop_kevent_override(thread);
4233 }
39037602 4234
cb323159
A
4235 return rc;
4236}
39037602 4237
cb323159
A
4238/*
4239 * Return 0 to indicate that processing should proceed,
4240 * -1 if there is nothing to process.
4241 *
4242 * Called with kqueue locked and returns the same way,
4243 * but may drop lock temporarily.
4244 */
4245static int
4246kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4247 int kevent_flags)
4248{
4249 int rc = 0;
39037602 4250
94ff46dc 4251 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
cb323159 4252 0, kqr->tr_kq_qos_index);
39037602 4253
cb323159
A
4254 rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4255 KQWQAE_BEGIN_PROCESSING);
91447636 4256
94ff46dc 4257 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
cb323159 4258 thread_tid(kqr_thread(kqr)), kqr->tr_kq_wakeup);
91447636 4259
cb323159
A
4260 return rc;
4261}
d9a64523 4262
cb323159
A
4263static thread_qos_t
4264kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
4265{
4266 kq_index_t qos = THREAD_QOS_UNSPECIFIED;
4267 struct knote *kn, *tmp;
d9a64523 4268
cb323159 4269 kqlock_held(kqwl);
91447636 4270
cb323159 4271 TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) {
d9a64523 4272 /*
cb323159
A
4273 * If a knote that can adjust QoS is disabled because of the automatic
4274 * behavior of EV_DISPATCH, the knotes should stay suppressed so that
4275 * further overrides keep pushing.
d9a64523 4276 */
cb323159
A
4277 if (knote_fops(kn)->f_adjusts_qos && (kn->kn_status & KN_DISABLED) &&
4278 (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 &&
4279 (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
4280 qos = MAX(qos, kn->kn_qos_override);
4281 continue;
d9a64523 4282 }
cb323159
A
4283 knote_unsuppress(kqwl, kn);
4284 }
6d2010ae 4285
cb323159
A
4286 return qos;
4287}
39037602 4288
cb323159
A
4289static int
4290kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
4291{
4292 workq_threadreq_t kqr = &kqwl->kqwl_request;
4293 struct kqueue *kq = &kqwl->kqwl_kqueue;
4294 thread_qos_t qos_override;
4295 thread_t thread = kqr_thread_fast(kqr);
4296 int rc = 0, op = KQWL_UTQ_NONE;
5ba3f43e 4297
cb323159 4298 kqlock_held(kq);
39236c6e 4299
94ff46dc 4300 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
cb323159 4301 kqwl->kqwl_dynamicid, 0, 0);
39037602 4302
cb323159
A
4303 /* nobody else should still be processing */
4304 assert((kq->kq_state & KQ_PROCESSING) == 0);
39037602 4305
cb323159 4306 kq->kq_state |= KQ_PROCESSING;
91447636 4307
cb323159
A
4308 if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
4309 op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
4310 }
b7266188 4311
cb323159 4312 if (kevent_flags & KEVENT_FLAG_PARKING) {
91447636 4313 /*
cb323159
A
4314 * When "parking" we want to process events and if no events are found
4315 * unbind.
d9a64523 4316 *
cb323159
A
4317 * However, non overcommit threads sometimes park even when they have
4318 * more work so that the pool can narrow. For these, we need to unbind
4319 * early, so that calling kqworkloop_update_threads_qos() can ask the
4320 * workqueue subsystem whether the thread should park despite having
4321 * pending events.
91447636 4322 */
cb323159
A
4323 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
4324 op = KQWL_UTQ_PARKING;
d9a64523 4325 } else {
cb323159 4326 op = KQWL_UTQ_UNBINDING;
d9a64523 4327 }
cb323159
A
4328 }
4329 if (op == KQWL_UTQ_NONE) {
4330 goto done;
4331 }
5ba3f43e 4332
cb323159
A
4333 qos_override = kqworkloop_acknowledge_events(kqwl);
4334
4335 if (op == KQWL_UTQ_UNBINDING) {
4336 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_IMMEDIATELY);
4337 kqworkloop_release_live(kqwl);
4338 }
4339 kqworkloop_update_threads_qos(kqwl, op, qos_override);
4340 if (op == KQWL_UTQ_PARKING) {
4341 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
4342 /*
4343 * We cannot trust tr_kq_wakeup when looking at stay active knotes.
4344 * We need to process once, and kqworkloop_end_processing will
4345 * handle the unbind.
4346 */
4347 } else if (!kqr->tr_kq_wakeup || kqwl->kqwl_owner) {
4348 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4349 kqworkloop_release_live(kqwl);
4350 rc = -1;
4351 }
4352 } else if (op == KQWL_UTQ_UNBINDING) {
4353 if (kqr_thread(kqr) == thread) {
4354 /*
4355 * The thread request fired again, passed the admission check and
4356 * got bound to the current thread again.
4357 */
d9a64523 4358 } else {
cb323159 4359 rc = -1;
39037602 4360 }
91447636
A
4361 }
4362
cb323159 4363 if (rc == 0) {
d9a64523 4364 /*
cb323159
A
4365 * Reset wakeup bit to notice stay active events firing while we are
4366 * processing, as we cannot rely on the stayactive bucket emptiness.
d9a64523 4367 */
cb323159
A
4368 kqwl->kqwl_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
4369 } else {
4370 kq->kq_state &= ~KQ_PROCESSING;
d9a64523 4371 }
39037602 4372
cb323159
A
4373 if (rc == -1) {
4374 kqworkloop_unbind_delayed_override_drop(thread);
39037602 4375 }
cb323159
A
4376
4377done:
94ff46dc 4378 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
cb323159
A
4379 kqwl->kqwl_dynamicid, 0, 0);
4380
4381 return rc;
91447636
A
4382}
4383
b0d623f7 4384/*
cb323159
A
4385 * Return 0 to indicate that processing should proceed,
4386 * -1 if there is nothing to process.
4387 * EBADF if the kqueue is draining
b0d623f7 4388 *
cb323159
A
4389 * Called with kqueue locked and returns the same way,
4390 * but may drop lock temporarily.
4391 * May block.
b0d623f7
A
4392 */
4393static int
cb323159 4394kqfile_begin_processing(struct kqfile *kq)
b0d623f7 4395{
cb323159 4396 struct kqtailq *suppressq;
b0d623f7 4397
cb323159 4398 kqlock_held(kq);
b0d623f7 4399
cb323159 4400 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
94ff46dc 4401 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
cb323159 4402 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
5ba3f43e 4403
cb323159
A
4404 /* wait to become the exclusive processing thread */
4405 for (;;) {
4406 if (kq->kqf_state & KQ_DRAIN) {
94ff46dc 4407 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
cb323159
A
4408 VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
4409 return EBADF;
4410 }
d9a64523 4411
cb323159
A
4412 if ((kq->kqf_state & KQ_PROCESSING) == 0) {
4413 break;
4414 }
d9a64523 4415
cb323159
A
4416 /* if someone else is processing the queue, wait */
4417 kq->kqf_state |= KQ_PROCWAIT;
4418 suppressq = &kq->kqf_suppressed;
4419 waitq_assert_wait64((struct waitq *)&kq->kqf_wqs,
4420 CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT,
4421 TIMEOUT_WAIT_FOREVER);
b0d623f7 4422
d9a64523 4423 kqunlock(kq);
cb323159 4424 thread_block(THREAD_CONTINUE_NULL);
d9a64523
A
4425 kqlock(kq);
4426 }
39037602 4427
cb323159 4428 /* Nobody else processing */
39037602 4429
cb323159
A
4430 /* clear pre-posts and KQ_WAKEUP now, in case we bail early */
4431 waitq_set_clear_preposts(&kq->kqf_wqs);
4432 kq->kqf_state &= ~KQ_WAKEUP;
39037602 4433
cb323159
A
4434 /* anything left to process? */
4435 if (TAILQ_EMPTY(&kq->kqf_queue)) {
94ff46dc 4436 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
cb323159
A
4437 VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
4438 return -1;
b0d623f7 4439 }
39236c6e 4440
cb323159
A
4441 /* convert to processing mode */
4442 kq->kqf_state |= KQ_PROCESSING;
b0d623f7 4443
94ff46dc 4444 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
cb323159
A
4445 VM_KERNEL_UNSLIDE_OR_PERM(kq));
4446
4447 return 0;
4448}
4449
4450/*
4451 * Try to end the processing, only called when a workq thread is attempting to
4452 * park (KEVENT_FLAG_PARKING is set).
4453 *
4454 * When returning -1, the kqworkq is setup again so that it is ready to be
4455 * processed.
4456 */
4457static int
4458kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4459 int kevent_flags)
4460{
4461 if (!TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index])) {
4462 /* remember we didn't process everything */
4463 kqr->tr_kq_wakeup = true;
4464 }
4465
4466 if (kevent_flags & KEVENT_FLAG_PARKING) {
4467 /*
4468 * if acknowledge events "succeeds" it means there are events,
4469 * which is a failure condition for end_processing.
4470 */
4471 int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4472 KQWQAE_END_PROCESSING);
4473 if (rc == 0) {
4474 return -1;
4475 }
d9a64523
A
4476 }
4477
cb323159 4478 return 0;
d9a64523 4479}
39037602
A
4480
4481/*
cb323159
A
4482 * Try to end the processing, only called when a workq thread is attempting to
4483 * park (KEVENT_FLAG_PARKING is set).
4484 *
4485 * When returning -1, the kqworkq is setup again so that it is ready to be
4486 * processed (as if kqworkloop_begin_processing had just been called).
4487 *
4488 * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
4489 * the kqworkloop is unbound from its servicer as a side effect.
39037602
A
4490 */
4491static int
cb323159 4492kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
39037602 4493{
cb323159
A
4494 struct kqueue *kq = &kqwl->kqwl_kqueue;
4495 workq_threadreq_t kqr = &kqwl->kqwl_request;
4496 thread_qos_t qos_override;
4497 thread_t thread = kqr_thread_fast(kqr);
d9a64523 4498 int rc = 0;
39037602 4499
cb323159 4500 kqlock_held(kq);
39037602 4501
94ff46dc 4502 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
cb323159 4503 kqwl->kqwl_dynamicid, 0, 0);
39037602 4504
cb323159
A
4505 if (flags & KQ_PROCESSING) {
4506 assert(kq->kq_state & KQ_PROCESSING);
d9a64523 4507
d9a64523 4508 /*
cb323159
A
4509 * If we still have queued stayactive knotes, remember we didn't finish
4510 * processing all of them. This should be extremely rare and would
4511 * require to have a lot of them registered and fired.
d9a64523 4512 */
cb323159
A
4513 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
4514 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS,
4515 KQWL_BUCKET_STAYACTIVE);
b0d623f7 4516 }
39037602 4517
d9a64523 4518 /*
cb323159
A
4519 * When KEVENT_FLAG_PARKING is set, we need to attempt an unbind while
4520 * still under the lock.
4521 *
4522 * So we do everything kqworkloop_unbind() would do, but because we're
4523 * inside kqueue_process(), if the workloop actually received events
4524 * while our locks were dropped, we have the opportunity to fail the end
4525 * processing and loop again.
4526 *
4527 * This avoids going through the process-wide workqueue lock hence
4528 * scales better.
d9a64523 4529 */
cb323159
A
4530 if (kevent_flags & KEVENT_FLAG_PARKING) {
4531 qos_override = kqworkloop_acknowledge_events(kqwl);
4532 }
b0d623f7
A
4533 }
4534
cb323159
A
4535 if (kevent_flags & KEVENT_FLAG_PARKING) {
4536 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
4537 if (kqr->tr_kq_wakeup && !kqwl->kqwl_owner) {
4538 /*
4539 * Reset wakeup bit to notice stay active events firing while we are
4540 * processing, as we cannot rely on the stayactive bucket emptiness.
4541 */
4542 kqwl->kqwl_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
4543 rc = -1;
4544 } else {
4545 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4546 kqworkloop_release_live(kqwl);
4547 kq->kq_state &= ~flags;
4548 }
4549 } else {
4550 kq->kq_state &= ~flags;
4551 kq->kq_state |= KQ_R2K_ARMED;
4552 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
4553 }
5ba3f43e 4554
cb323159
A
4555 if ((kevent_flags & KEVENT_FLAG_PARKING) && rc == 0) {
4556 kqworkloop_unbind_delayed_override_drop(thread);
d9a64523 4557 }
39236c6e 4558
94ff46dc 4559 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
cb323159
A
4560 kqwl->kqwl_dynamicid, 0, 0);
4561
d9a64523
A
4562 return rc;
4563}
4564
4565/*
cb323159 4566 * Called with kqueue lock held.
d9a64523 4567 *
cb323159
A
4568 * 0: no more events
4569 * -1: has more events
4570 * EBADF: kqueue is in draining mode
d9a64523
A
4571 */
4572static int
cb323159 4573kqfile_end_processing(struct kqfile *kq)
d9a64523 4574{
cb323159
A
4575 struct kqtailq *suppressq = &kq->kqf_suppressed;
4576 struct knote *kn;
4577 int procwait;
d9a64523 4578
cb323159 4579 kqlock_held(kq);
d9a64523 4580
cb323159 4581 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
5ba3f43e 4582
94ff46dc 4583 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
cb323159 4584 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
5ba3f43e 4585
cb323159
A
4586 /*
4587 * Return suppressed knotes to their original state.
4588 */
4589 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4590 assert(kn->kn_status & KN_SUPPRESSED);
4591 knote_unsuppress(kq, kn);
4592 }
b0d623f7 4593
cb323159
A
4594 procwait = (kq->kqf_state & KQ_PROCWAIT);
4595 kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
5ba3f43e 4596
cb323159
A
4597 if (procwait) {
4598 /* first wake up any thread already waiting to process */
4599 waitq_wakeup64_all((struct waitq *)&kq->kqf_wqs,
4600 CAST_EVENT64_T(suppressq), THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
4601 }
5ba3f43e 4602
cb323159
A
4603 if (kq->kqf_state & KQ_DRAIN) {
4604 return EBADF;
5ba3f43e 4605 }
cb323159 4606 return (kq->kqf_state & KQ_WAKEUP) ? -1 : 0;
5ba3f43e
A
4607}
4608
cb323159
A
4609static int
4610kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
4611 struct kqueue_workloop_params *params, int *retval)
5ba3f43e 4612{
cb323159
A
4613 int error = 0;
4614 struct kqworkloop *kqwl;
4615 struct filedesc *fdp = p->p_fd;
4616 workq_threadreq_param_t trp = { };
4617
4618 switch (cmd) {
4619 case KQ_WORKLOOP_CREATE:
4620 if (!params->kqwlp_flags) {
4621 error = EINVAL;
4622 break;
4623 }
5ba3f43e 4624
cb323159
A
4625 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
4626 (params->kqwlp_sched_pri < 1 ||
4627 params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) {
4628 error = EINVAL;
4629 break;
4630 }
5ba3f43e 4631
cb323159
A
4632 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
4633 invalid_policy(params->kqwlp_sched_pol)) {
4634 error = EINVAL;
4635 break;
4636 }
4637
4638 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
4639 (params->kqwlp_cpu_percent <= 0 ||
4640 params->kqwlp_cpu_percent > 100 ||
4641 params->kqwlp_cpu_refillms <= 0 ||
4642 params->kqwlp_cpu_refillms > 0x00ffffff)) {
4643 error = EINVAL;
4644 break;
5ba3f43e 4645 }
cb323159
A
4646
4647 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
4648 trp.trp_flags |= TRP_PRIORITY;
f427ee49 4649 trp.trp_pri = (uint8_t)params->kqwlp_sched_pri;
cb323159
A
4650 }
4651 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
4652 trp.trp_flags |= TRP_POLICY;
f427ee49 4653 trp.trp_pol = (uint8_t)params->kqwlp_sched_pol;
cb323159
A
4654 }
4655 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
4656 trp.trp_flags |= TRP_CPUPERCENT;
4657 trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
4658 trp.trp_refillms = params->kqwlp_cpu_refillms;
4659 }
4660
4661 error = kqworkloop_get_or_create(p, params->kqwlp_id, &trp,
4662 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4663 KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &kqwl);
4664 if (error) {
4665 break;
4666 }
4667
4668 if (!(fdp->fd_flags & FD_WORKLOOP)) {
4669 /* FD_WORKLOOP indicates we've ever created a workloop
4670 * via this syscall but its only ever added to a process, never
4671 * removed.
4672 */
4673 proc_fdlock(p);
4674 fdp->fd_flags |= FD_WORKLOOP;
4675 proc_fdunlock(p);
4676 }
4677 break;
4678 case KQ_WORKLOOP_DESTROY:
4679 error = kqworkloop_get_or_create(p, params->kqwlp_id, NULL,
4680 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4681 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &kqwl);
4682 if (error) {
4683 break;
4684 }
4685 kqlock(kqwl);
4686 trp.trp_value = kqwl->kqwl_params;
4687 if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
4688 trp.trp_flags |= TRP_RELEASED;
94ff46dc 4689 kqwl->kqwl_params = trp.trp_value;
cb323159
A
4690 kqworkloop_release_live(kqwl);
4691 } else {
4692 error = EINVAL;
4693 }
4694 kqunlock(kqwl);
4695 kqworkloop_release(kqwl);
4696 break;
4697 }
4698 *retval = 0;
4699 return error;
4700}
4701
4702int
4703kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
4704{
4705 struct kqueue_workloop_params params = {
4706 .kqwlp_id = 0,
4707 };
4708 if (uap->sz < sizeof(params.kqwlp_version)) {
4709 return EINVAL;
4710 }
4711
4712 size_t copyin_sz = MIN(sizeof(params), uap->sz);
4713 int rv = copyin(uap->addr, &params, copyin_sz);
4714 if (rv) {
4715 return rv;
4716 }
4717
4718 if (params.kqwlp_version != (int)uap->sz) {
4719 return EINVAL;
5ba3f43e 4720 }
d9a64523 4721
cb323159
A
4722 return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, &params,
4723 retval);
5ba3f43e
A
4724}
4725
cb323159 4726/*ARGSUSED*/
5ba3f43e 4727static int
cb323159
A
4728kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
4729 __unused vfs_context_t ctx)
5ba3f43e 4730{
cb323159
A
4731 struct kqfile *kq = (struct kqfile *)fp->f_data;
4732 struct kqtailq *suppressq = &kq->kqf_suppressed;
4733 struct kqtailq *queue = &kq->kqf_queue;
4734 struct knote *kn;
4735 int retnum = 0;
5ba3f43e 4736
cb323159
A
4737 if (which != FREAD) {
4738 return 0;
4739 }
5ba3f43e 4740
cb323159 4741 kqlock(kq);
5ba3f43e 4742
cb323159 4743 assert((kq->kqf_state & KQ_WORKQ) == 0);
5ba3f43e 4744
cb323159
A
4745 /*
4746 * If this is the first pass, link the wait queue associated with the
4747 * the kqueue onto the wait queue set for the select(). Normally we
4748 * use selrecord() for this, but it uses the wait queue within the
4749 * selinfo structure and we need to use the main one for the kqueue to
4750 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
4751 * (The select() call will unlink them when it ends).
4752 */
4753 if (wq_link_id != NULL) {
4754 thread_t cur_act = current_thread();
4755 struct uthread * ut = get_bsdthread_info(cur_act);
5ba3f43e 4756
cb323159
A
4757 kq->kqf_state |= KQ_SEL;
4758 waitq_link((struct waitq *)&kq->kqf_wqs, ut->uu_wqset,
4759 WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
4760
4761 /* always consume the reserved link object */
4762 waitq_link_release(*(uint64_t *)wq_link_id);
4763 *(uint64_t *)wq_link_id = 0;
d9a64523 4764
d9a64523 4765 /*
cb323159
A
4766 * selprocess() is expecting that we send it back the waitq
4767 * that was just added to the thread's waitq set. In order
4768 * to not change the selrecord() API (which is exported to
4769 * kexts), we pass this value back through the
4770 * void *wq_link_id pointer we were passed. We need to use
4771 * memcpy here because the pointer may not be properly aligned
4772 * on 32-bit systems.
d9a64523 4773 */
cb323159
A
4774 void *wqptr = &kq->kqf_wqs;
4775 memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
d9a64523
A
4776 }
4777
cb323159
A
4778 if (kqfile_begin_processing(kq) == -1) {
4779 kqunlock(kq);
4780 return 0;
d9a64523 4781 }
cb323159
A
4782
4783 if (!TAILQ_EMPTY(queue)) {
4784 /*
4785 * there is something queued - but it might be a
4786 * KN_STAYACTIVE knote, which may or may not have
4787 * any events pending. Otherwise, we have to walk
4788 * the list of knotes to see, and peek at the
4789 * (non-vanished) stay-active ones to be really sure.
4790 */
4791 while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
4792 if (kn->kn_status & KN_ACTIVE) {
4793 retnum = 1;
4794 goto out;
4795 }
4796 assert(kn->kn_status & KN_STAYACTIVE);
4797 knote_suppress(kq, kn);
d9a64523 4798 }
5ba3f43e 4799
d9a64523 4800 /*
cb323159
A
4801 * There were no regular events on the queue, so take
4802 * a deeper look at the stay-queued ones we suppressed.
d9a64523 4803 */
cb323159
A
4804 while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
4805 KNOTE_LOCK_CTX(knlc);
4806 int result = 0;
d9a64523 4807
cb323159
A
4808 /* If didn't vanish while suppressed - peek at it */
4809 if ((kn->kn_status & KN_DROPPING) || !knote_lock(kq, kn, &knlc,
4810 KNOTE_KQ_LOCK_ON_FAILURE)) {
4811 continue;
4812 }
d9a64523 4813
cb323159 4814 result = filter_call(knote_fops(kn), f_peek(kn));
d9a64523 4815
cb323159
A
4816 kqlock(kq);
4817 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
5ba3f43e 4818
cb323159
A
4819 /* unsuppress it */
4820 knote_unsuppress(kq, kn);
4821
4822 /* has data or it has to report a vanish */
4823 if (result & FILTER_ACTIVE) {
4824 retnum = 1;
4825 goto out;
4826 }
4827 }
4828 }
4829
4830out:
4831 kqfile_end_processing(kq);
4832 kqunlock(kq);
4833 return retnum;
5ba3f43e
A
4834}
4835
6d2010ae 4836/*
cb323159 4837 * kqueue_close -
6d2010ae 4838 */
cb323159 4839/*ARGSUSED*/
6d2010ae 4840static int
cb323159 4841kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
6d2010ae 4842{
cb323159 4843 struct kqfile *kqf = (struct kqfile *)fg->fg_data;
5ba3f43e 4844
cb323159
A
4845 assert((kqf->kqf_state & KQ_WORKQ) == 0);
4846 kqueue_dealloc(&kqf->kqf_kqueue);
4847 fg->fg_data = NULL;
39037602
A
4848 return 0;
4849}
4850
4851/*
cb323159
A
4852 * Max depth of the nested kq path that can be created.
4853 * Note that this has to be less than the size of kq_level
4854 * to avoid wrapping around and mislabeling the level.
4855 */
4856#define MAX_NESTED_KQ 1000
4857
4858/*ARGSUSED*/
4859/*
4860 * The callers has taken a use-count reference on this kqueue and will donate it
4861 * to the kqueue we are being added to. This keeps the kqueue from closing until
4862 * that relationship is torn down.
39037602 4863 */
d9a64523 4864static int
cb323159
A
4865kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
4866 __unused struct kevent_qos_s *kev)
39037602 4867{
cb323159
A
4868 struct kqfile *kqf = (struct kqfile *)fp->f_data;
4869 struct kqueue *kq = &kqf->kqf_kqueue;
4870 struct kqueue *parentkq = knote_get_kq(kn);
4871
4872 assert((kqf->kqf_state & KQ_WORKQ) == 0);
4873
4874 if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
4875 knote_set_error(kn, EINVAL);
4876 return 0;
39037602
A
4877 }
4878
cb323159
A
4879 /*
4880 * We have to avoid creating a cycle when nesting kqueues
4881 * inside another. Rather than trying to walk the whole
4882 * potential DAG of nested kqueues, we just use a simple
4883 * ceiling protocol. When a kqueue is inserted into another,
4884 * we check that the (future) parent is not already nested
4885 * into another kqueue at a lower level than the potenial
4886 * child (because it could indicate a cycle). If that test
4887 * passes, we just mark the nesting levels accordingly.
4888 *
4889 * Only up to MAX_NESTED_KQ can be nested.
4890 *
4891 * Note: kqworkq and kqworkloop cannot be nested and have reused their
4892 * kq_level field, so ignore these as parent.
4893 */
4894
4895 kqlock(parentkq);
4896
4897 if ((parentkq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
4898 if (parentkq->kq_level > 0 &&
4899 parentkq->kq_level < kq->kq_level) {
4900 kqunlock(parentkq);
4901 knote_set_error(kn, EINVAL);
4902 return 0;
4903 }
4904
4905 /* set parent level appropriately */
4906 uint16_t plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
4907 if (plevel < kq->kq_level + 1) {
4908 if (kq->kq_level + 1 > MAX_NESTED_KQ) {
4909 kqunlock(parentkq);
4910 knote_set_error(kn, EINVAL);
4911 return 0;
4912 }
4913 plevel = kq->kq_level + 1;
d9a64523 4914 }
cb323159
A
4915
4916 parentkq->kq_level = plevel;
39037602
A
4917 }
4918
cb323159
A
4919 kqunlock(parentkq);
4920
4921 kn->kn_filtid = EVFILTID_KQREAD;
4922 kqlock(kq);
4923 KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
4924 /* indicate nesting in child, if needed */
4925 if (kq->kq_level == 0) {
4926 kq->kq_level = 1;
4927 }
4928
4929 int count = kq->kq_count;
4930 kqunlock(kq);
4931 return count > 0;
5ba3f43e
A
4932}
4933
4934/*
cb323159 4935 * kqueue_drain - called when kq is closed
5ba3f43e 4936 */
cb323159 4937/*ARGSUSED*/
5ba3f43e 4938static int
cb323159 4939kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
5ba3f43e 4940{
f427ee49 4941 struct kqfile *kqf = (struct kqfile *)fp->fp_glob->fg_data;
5ba3f43e 4942
cb323159 4943 assert((kqf->kqf_state & KQ_WORKQ) == 0);
5ba3f43e 4944
cb323159
A
4945 kqlock(kqf);
4946 kqf->kqf_state |= KQ_DRAIN;
5ba3f43e 4947
cb323159
A
4948 /* wakeup sleeping threads */
4949 if ((kqf->kqf_state & (KQ_SLEEP | KQ_SEL)) != 0) {
4950 kqf->kqf_state &= ~(KQ_SLEEP | KQ_SEL);
4951 (void)waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs,
4952 KQ_EVENT,
4953 THREAD_RESTART,
4954 WAITQ_ALL_PRIORITIES);
4955 }
5ba3f43e 4956
cb323159
A
4957 /* wakeup threads waiting their turn to process */
4958 if (kqf->kqf_state & KQ_PROCWAIT) {
4959 assert(kqf->kqf_state & KQ_PROCESSING);
5ba3f43e 4960
cb323159
A
4961 kqf->kqf_state &= ~KQ_PROCWAIT;
4962 (void)waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs,
4963 CAST_EVENT64_T(&kqf->kqf_suppressed),
4964 THREAD_RESTART, WAITQ_ALL_PRIORITIES);
5ba3f43e 4965 }
5ba3f43e 4966
cb323159
A
4967 kqunlock(kqf);
4968 return 0;
4969}
4970
4971/*ARGSUSED*/
4972int
4973kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
4974{
4975 assert((kq->kq_state & KQ_WORKQ) == 0);
5ba3f43e 4976
cb323159
A
4977 kqlock(kq);
4978 if (isstat64 != 0) {
4979 struct stat64 *sb64 = (struct stat64 *)ub;
4980
4981 bzero((void *)sb64, sizeof(*sb64));
4982 sb64->st_size = kq->kq_count;
4983 if (kq->kq_state & KQ_KEV_QOS) {
4984 sb64->st_blksize = sizeof(struct kevent_qos_s);
4985 } else if (kq->kq_state & KQ_KEV64) {
4986 sb64->st_blksize = sizeof(struct kevent64_s);
4987 } else if (IS_64BIT_PROCESS(p)) {
4988 sb64->st_blksize = sizeof(struct user64_kevent);
d9a64523 4989 } else {
cb323159 4990 sb64->st_blksize = sizeof(struct user32_kevent);
d9a64523 4991 }
cb323159 4992 sb64->st_mode = S_IFIFO;
d9a64523 4993 } else {
cb323159 4994 struct stat *sb = (struct stat *)ub;
5ba3f43e 4995
cb323159
A
4996 bzero((void *)sb, sizeof(*sb));
4997 sb->st_size = kq->kq_count;
4998 if (kq->kq_state & KQ_KEV_QOS) {
4999 sb->st_blksize = sizeof(struct kevent_qos_s);
5000 } else if (kq->kq_state & KQ_KEV64) {
5001 sb->st_blksize = sizeof(struct kevent64_s);
5002 } else if (IS_64BIT_PROCESS(p)) {
5003 sb->st_blksize = sizeof(struct user64_kevent);
5004 } else {
5005 sb->st_blksize = sizeof(struct user32_kevent);
5006 }
5007 sb->st_mode = S_IFIFO;
5ba3f43e 5008 }
cb323159
A
5009 kqunlock(kq);
5010 return 0;
5011}
5ba3f43e 5012
cb323159
A
5013static inline bool
5014kqueue_threadreq_can_use_ast(struct kqueue *kq)
5015{
5016 if (current_proc() == kq->kq_p) {
5017 /*
5018 * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
5019 * do combined send/receive and in the case of self-IPC, the AST may bet
5020 * set on a thread that will not return to userspace and needs the
5021 * thread the AST would create to unblock itself.
5022 *
5023 * At this time, we really want to target:
5024 *
5025 * - kevent variants that can cause thread creations, and dispatch
5026 * really only uses kevent_qos and kevent_id,
5027 *
5028 * - workq_kernreturn (directly about thread creations)
5029 *
5030 * - bsdthread_ctl which is used for qos changes and has direct impact
5031 * on the creator thread scheduling decisions.
5032 */
5033 switch (current_uthread()->syscall_code) {
5034 case SYS_kevent_qos:
5035 case SYS_kevent_id:
5036 case SYS_workq_kernreturn:
5037 case SYS_bsdthread_ctl:
5038 return true;
5039 }
5040 }
5041 return false;
39037602
A
5042}
5043
5044/*
cb323159
A
5045 * Interact with the pthread kext to request a servicing there at a specific QoS
5046 * level.
5047 *
5048 * - Caller holds the workq request lock
5049 *
5050 * - May be called with the kqueue's wait queue set locked,
5051 * so cannot do anything that could recurse on that.
39037602 5052 */
5ba3f43e 5053static void
cb323159
A
5054kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t kqr,
5055 kq_index_t qos, int flags)
5ba3f43e 5056{
cb323159
A
5057 assert(kqr->tr_kq_wakeup);
5058 assert(kqr_thread(kqr) == THREAD_NULL);
5059 assert(!kqr_thread_requested(kqr));
5060 struct turnstile *ts = TURNSTILE_NULL;
5061
5062 if (workq_is_exiting(kq->kq_p)) {
5063 return;
5064 }
5ba3f43e 5065
d9a64523 5066 kqlock_held(kq);
5ba3f43e 5067
cb323159
A
5068 if (kq->kq_state & KQ_WORKLOOP) {
5069 __assert_only struct kqworkloop *kqwl = (struct kqworkloop *)kq;
5ba3f43e 5070
cb323159 5071 assert(kqwl->kqwl_owner == THREAD_NULL);
94ff46dc 5072 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
cb323159
A
5073 kqwl->kqwl_dynamicid, 0, qos, kqr->tr_kq_wakeup);
5074 ts = kqwl->kqwl_turnstile;
5075 /* Add a thread request reference on the kqueue. */
5076 kqworkloop_retain(kqwl);
5077 } else {
5078 assert(kq->kq_state & KQ_WORKQ);
94ff46dc 5079 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST),
cb323159
A
5080 -1, 0, qos, kqr->tr_kq_wakeup);
5081 }
5ba3f43e 5082
d9a64523 5083 /*
cb323159
A
5084 * New-style thread request supported.
5085 * Provide the pthread kext a pointer to a workq_threadreq_s structure for
5086 * its use until a corresponding kqueue_threadreq_bind callback.
d9a64523 5087 */
cb323159
A
5088 if (kqueue_threadreq_can_use_ast(kq)) {
5089 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5090 }
5091 if (qos == KQWQ_QOS_MANAGER) {
5092 qos = WORKQ_THREAD_QOS_MANAGER;
5093 }
5094 if (!workq_kern_threadreq_initiate(kq->kq_p, kqr, ts, qos, flags)) {
5095 /*
5096 * Process is shutting down or exec'ing.
5097 * All the kqueues are going to be cleaned up
5098 * soon. Forget we even asked for a thread -
5099 * and make sure we don't ask for more.
5100 */
5101 kq->kq_state &= ~KQ_R2K_ARMED;
5102 kqueue_release_live(kq);
d9a64523 5103 }
cb323159 5104}
5ba3f43e 5105
cb323159
A
5106/*
5107 * kqueue_threadreq_bind_prepost - prepost the bind to kevent
5108 *
5109 * This is used when kqueue_threadreq_bind may cause a lock inversion.
5110 */
5111__attribute__((always_inline))
5112void
5113kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t kqr,
5114 struct uthread *ut)
5115{
5116 ut->uu_kqr_bound = kqr;
5117 kqr->tr_thread = ut->uu_thread;
5118 kqr->tr_state = WORKQ_TR_STATE_BINDING;
5119}
5ba3f43e 5120
cb323159
A
5121/*
5122 * kqueue_threadreq_bind_commit - commit a bind prepost
5123 *
5124 * The workq code has to commit any binding prepost before the thread has
5125 * a chance to come back to userspace (and do kevent syscalls) or be aborted.
5126 */
5127void
5128kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
5129{
5130 struct uthread *ut = get_bsdthread_info(thread);
5131 workq_threadreq_t kqr = ut->uu_kqr_bound;
5132 kqueue_t kqu = kqr_kqueue(p, kqr);
5133
5134 kqlock(kqu);
5135 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5136 kqueue_threadreq_bind(p, kqr, thread, 0);
d9a64523 5137 }
cb323159 5138 kqunlock(kqu);
39037602
A
5139}
5140
cb323159
A
5141static void
5142kqueue_threadreq_modify(kqueue_t kqu, workq_threadreq_t kqr, kq_index_t qos,
5143 workq_kern_threadreq_flags_t flags)
6d2010ae 5144{
cb323159 5145 assert(kqr_thread_requested_pending(kqr));
39037602 5146
cb323159 5147 kqlock_held(kqu);
39037602 5148
cb323159
A
5149 if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5150 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5151 }
5152 workq_kern_threadreq_modify(kqu.kq->kq_p, kqr, qos, flags);
5153}
5154
5155/*
5156 * kqueue_threadreq_bind - bind thread to processing kqrequest
5157 *
5158 * The provided thread will be responsible for delivering events
5159 * associated with the given kqrequest. Bind it and get ready for
5160 * the thread to eventually arrive.
5161 */
5162void
5163kqueue_threadreq_bind(struct proc *p, workq_threadreq_t kqr, thread_t thread,
5164 unsigned int flags)
5165{
5166 kqueue_t kqu = kqr_kqueue(p, kqr);
5167 struct uthread *ut = get_bsdthread_info(thread);
39037602 5168
cb323159 5169 kqlock_held(kqu);
39037602 5170
cb323159 5171 assert(ut->uu_kqueue_override == 0);
39037602 5172
cb323159
A
5173 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5174 assert(ut->uu_kqr_bound == kqr);
5175 assert(kqr->tr_thread == thread);
5176 } else {
5177 assert(kqr_thread_requested_pending(kqr));
5178 assert(kqr->tr_thread == THREAD_NULL);
5179 assert(ut->uu_kqr_bound == NULL);
5180 ut->uu_kqr_bound = kqr;
5181 kqr->tr_thread = thread;
5182 }
39037602 5183
cb323159 5184 kqr->tr_state = WORKQ_TR_STATE_BOUND;
39037602 5185
cb323159
A
5186 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5187 struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
5188
5189 if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
5190 /*
5191 * <rdar://problem/38626999> shows that asserting here is not ok.
5192 *
5193 * This is not supposed to happen for correct use of the interface,
5194 * but it is sadly possible for userspace (with the help of memory
5195 * corruption, such as over-release of a dispatch queue) to make
5196 * the creator thread the "owner" of a workloop.
5197 *
5198 * Once that happens, and that creator thread picks up the same
5199 * workloop as a servicer, we trip this codepath. We need to fixup
5200 * the state to forget about this thread being the owner, as the
5201 * entire workloop state machine expects servicers to never be
5202 * owners and everything would basically go downhill from here.
d9a64523 5203 */
cb323159
A
5204 kqu.kqwl->kqwl_owner = THREAD_NULL;
5205 if (kqworkloop_override(kqu.kqwl)) {
5206 thread_drop_kevent_override(thread);
5207 }
d9a64523 5208 }
cb323159
A
5209
5210 if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
5211 /*
5212 * Past this point, the interlock is the kq req lock again,
5213 * so we can fix the inheritor for good.
5214 */
5215 filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5216 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
d9a64523 5217 }
cb323159 5218
94ff46dc 5219 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
cb323159
A
5220 thread_tid(thread), kqr->tr_kq_qos_index,
5221 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
5222
5223 ut->uu_kqueue_override = kqr->tr_kq_override_index;
5224 if (kqr->tr_kq_override_index) {
5225 thread_add_servicer_override(thread, kqr->tr_kq_override_index);
5ba3f43e 5226 }
cb323159
A
5227 } else {
5228 assert(kqr->tr_kq_override_index == 0);
5229
94ff46dc 5230 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
cb323159
A
5231 thread_tid(thread), kqr->tr_kq_qos_index,
5232 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
d9a64523 5233 }
d9a64523
A
5234}
5235
cb323159
A
5236/*
5237 * kqueue_threadreq_cancel - abort a pending thread request
5238 *
5239 * Called when exiting/exec'ing. Forget our pending request.
5240 */
5241void
5242kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t kqr)
d9a64523 5243{
cb323159
A
5244 kqueue_release(kqr_kqueue(p, kqr));
5245}
39037602 5246
cb323159
A
5247workq_threadreq_param_t
5248kqueue_threadreq_workloop_param(workq_threadreq_t kqr)
5249{
5250 struct kqworkloop *kqwl;
5251 workq_threadreq_param_t trp;
d9a64523 5252
cb323159
A
5253 assert(kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
5254 kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
5255 trp.trp_value = kqwl->kqwl_params;
5256 return trp;
6d2010ae 5257}
b0d623f7 5258
91447636 5259/*
cb323159 5260 * kqueue_threadreq_unbind - unbind thread from processing kqueue
d9a64523 5261 *
cb323159
A
5262 * End processing the per-QoS bucket of events and allow other threads
5263 * to be requested for future servicing.
91447636
A
5264 *
5265 * caller holds a reference on the kqueue.
91447636 5266 */
cb323159
A
5267void
5268kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t kqr)
91447636 5269{
cb323159
A
5270 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
5271 kqworkloop_unbind(kqr_kqworkloop(kqr));
5ba3f43e 5272 } else {
cb323159 5273 kqworkq_unbind(p, kqr);
5ba3f43e 5274 }
cb323159 5275}
b0d623f7 5276
cb323159
A
5277/*
5278 * If we aren't already busy processing events [for this QoS],
5279 * request workq thread support as appropriate.
5280 *
5281 * TBD - for now, we don't segregate out processing by QoS.
5282 *
5283 * - May be called with the kqueue's wait queue set locked,
5284 * so cannot do anything that could recurse on that.
5285 */
5286static void
5287kqworkq_wakeup(struct kqworkq *kqwq, kq_index_t qos_index)
5288{
5289 workq_threadreq_t kqr = kqworkq_get_request(kqwq, qos_index);
5290
5291 /* convert to thread qos value */
5292 assert(qos_index < KQWQ_NBUCKETS);
5293
5294 if (!kqr->tr_kq_wakeup) {
5295 kqr->tr_kq_wakeup = true;
5296 if (!kqr_thread_requested(kqr)) {
5297 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
5298 }
d9a64523 5299 }
cb323159 5300}
b0d623f7 5301
cb323159
A
5302/*
5303 * This represent the asynchronous QoS a given workloop contributes,
5304 * hence is the max of the current active knotes (override index)
5305 * and the workloop max qos (userspace async qos).
5306 */
5307static kq_index_t
5308kqworkloop_override(struct kqworkloop *kqwl)
5309{
5310 workq_threadreq_t kqr = &kqwl->kqwl_request;
5311 return MAX(kqr->tr_kq_qos_index, kqr->tr_kq_override_index);
5312}
55e303ae 5313
cb323159
A
5314static inline void
5315kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
5316{
5317 workq_threadreq_t kqr = &kqwl->kqwl_request;
5318
5319 kqlock_held(kqwl);
5320
5321 if (kqwl->kqwl_state & KQ_R2K_ARMED) {
5322 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5323 act_set_astkevent(kqr_thread_fast(kqr), AST_KEVENT_RETURN_TO_KERNEL);
d9a64523 5324 }
cb323159 5325}
39037602 5326
cb323159
A
5327static void
5328kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
5329{
5330 workq_threadreq_t kqr = &kqwl->kqwl_request;
5331 struct kqueue *kq = &kqwl->kqwl_kqueue;
5332 kq_index_t old_override = kqworkloop_override(kqwl);
5333 kq_index_t i;
6d2010ae 5334
cb323159
A
5335 kqlock_held(kqwl);
5336
5337 switch (op) {
5338 case KQWL_UTQ_UPDATE_WAKEUP_QOS:
5339 if (qos == KQWL_BUCKET_STAYACTIVE) {
5340 /*
5341 * the KQWL_BUCKET_STAYACTIVE is not a QoS bucket, we only remember
5342 * a high watermark (kqwl_stayactive_qos) of any stay active knote
5343 * that was ever registered with this workloop.
5344 *
5345 * When waitq_set__CALLING_PREPOST_HOOK__() wakes up any stay active
5346 * knote, we use this high-watermark as a wakeup-index, and also set
5347 * the magic KQWL_BUCKET_STAYACTIVE bit to make sure we remember
5348 * there is at least one stay active knote fired until the next full
5349 * processing of this bucket.
5350 */
5351 kqwl->kqwl_wakeup_indexes |= KQWL_STAYACTIVE_FIRED_BIT;
5352 qos = kqwl->kqwl_stayactive_qos;
5353 assert(qos);
5354 }
5355 if (kqwl->kqwl_wakeup_indexes & (1 << qos)) {
5356 assert(kqr->tr_kq_wakeup);
5ba3f43e
A
5357 break;
5358 }
55e303ae 5359
cb323159
A
5360 kqwl->kqwl_wakeup_indexes |= (1 << qos);
5361 kqr->tr_kq_wakeup = true;
5362 kqworkloop_request_fire_r2k_notification(kqwl);
5363 goto recompute;
d9a64523 5364
cb323159
A
5365 case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
5366 assert(qos);
5367 if (kqwl->kqwl_stayactive_qos < qos) {
5368 kqwl->kqwl_stayactive_qos = qos;
5369 if (kqwl->kqwl_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT) {
5370 assert(kqr->tr_kq_wakeup);
5371 kqwl->kqwl_wakeup_indexes |= (1 << qos);
5372 goto recompute;
5373 }
5374 }
5375 break;
5376
5377 case KQWL_UTQ_PARKING:
5378 case KQWL_UTQ_UNBINDING:
5379 kqr->tr_kq_override_index = qos;
f427ee49 5380 OS_FALLTHROUGH;
cb323159
A
5381 case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
5382 if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
5383 assert(qos == THREAD_QOS_UNSPECIFIED);
5384 }
5385 i = KQWL_BUCKET_STAYACTIVE;
5386 if (TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5387 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5388 }
5389 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i]) &&
5390 (kqwl->kqwl_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) {
5391 /*
5392 * If the KQWL_STAYACTIVE_FIRED_BIT is set, it means a stay active
5393 * knote may have fired, so we need to merge in kqwl_stayactive_qos.
5394 *
5395 * Unlike other buckets, this one is never empty but could be idle.
5396 */
5397 kqwl->kqwl_wakeup_indexes &= KQWL_STAYACTIVE_FIRED_BIT;
5398 kqwl->kqwl_wakeup_indexes |= (1 << kqwl->kqwl_stayactive_qos);
5399 } else {
5400 kqwl->kqwl_wakeup_indexes = 0;
d9a64523 5401 }
cb323159
A
5402 for (i = THREAD_QOS_UNSPECIFIED + 1; i < KQWL_BUCKET_STAYACTIVE; i++) {
5403 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i])) {
5404 kqwl->kqwl_wakeup_indexes |= (1 << i);
5ba3f43e 5405 }
cb323159
A
5406 }
5407 if (kqwl->kqwl_wakeup_indexes) {
5408 kqr->tr_kq_wakeup = true;
5409 kqworkloop_request_fire_r2k_notification(kqwl);
5410 } else {
5411 kqr->tr_kq_wakeup = false;
5412 }
5413 goto recompute;
5ba3f43e 5414
cb323159
A
5415 case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
5416 kqr->tr_kq_override_index = qos;
5417 goto recompute;
5ba3f43e 5418
cb323159
A
5419 case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
5420recompute:
5421 /*
5422 * When modifying the wakeup QoS or the override QoS, we always need to
5423 * maintain our invariant that kqr_override_index is at least as large
5424 * as the highest QoS for which an event is fired.
5425 *
5426 * However this override index can be larger when there is an overriden
5427 * suppressed knote pushing on the kqueue.
5428 */
5429 if (kqwl->kqwl_wakeup_indexes > (1 << qos)) {
f427ee49 5430 qos = (uint8_t)(fls(kqwl->kqwl_wakeup_indexes) - 1); /* fls is 1-based */
cb323159
A
5431 }
5432 if (kqr->tr_kq_override_index < qos) {
5433 kqr->tr_kq_override_index = qos;
55e303ae 5434 }
91447636 5435 break;
cb323159
A
5436
5437 case KQWL_UTQ_REDRIVE_EVENTS:
91447636 5438 break;
cb323159
A
5439
5440 case KQWL_UTQ_SET_QOS_INDEX:
5441 kqr->tr_kq_qos_index = qos;
39037602 5442 break;
cb323159 5443
91447636 5444 default:
cb323159 5445 panic("unknown kqwl thread qos update operation: %d", op);
55e303ae 5446 }
39236c6e 5447
cb323159
A
5448 thread_t kqwl_owner = kqwl->kqwl_owner;
5449 thread_t servicer = kqr_thread(kqr);
5450 boolean_t qos_changed = FALSE;
5451 kq_index_t new_override = kqworkloop_override(kqwl);
55e303ae 5452
39037602 5453 /*
cb323159 5454 * Apply the diffs to the owner if applicable
39037602 5455 */
cb323159
A
5456 if (kqwl_owner) {
5457#if 0
5458 /* JMM - need new trace hooks for owner overrides */
94ff46dc 5459 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
cb323159
A
5460 kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->tr_kq_qos_index,
5461 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
5462#endif
5463 if (new_override == old_override) {
5464 // nothing to do
5465 } else if (old_override == THREAD_QOS_UNSPECIFIED) {
5466 thread_add_kevent_override(kqwl_owner, new_override);
5467 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5468 thread_drop_kevent_override(kqwl_owner);
5469 } else { /* old_override != new_override */
5470 thread_update_kevent_override(kqwl_owner, new_override);
5471 }
5472 }
91447636 5473
cb323159
A
5474 /*
5475 * apply the diffs to the servicer
5476 */
5477 if (!kqr_thread_requested(kqr)) {
91447636 5478 /*
cb323159
A
5479 * No servicer, nor thread-request
5480 *
5481 * Make a new thread request, unless there is an owner (or the workloop
5482 * is suspended in userland) or if there is no asynchronous work in the
5483 * first place.
91447636 5484 */
91447636 5485
cb323159
A
5486 if (kqwl_owner == NULL && kqr->tr_kq_wakeup) {
5487 int initiate_flags = 0;
5488 if (op == KQWL_UTQ_UNBINDING) {
5489 initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
55e303ae 5490 }
cb323159 5491 kqueue_threadreq_initiate(kq, kqr, new_override, initiate_flags);
55e303ae 5492 }
cb323159
A
5493 } else if (servicer) {
5494 /*
5495 * Servicer in flight
5496 *
5497 * Just apply the diff to the servicer
5498 */
5499 struct uthread *ut = get_bsdthread_info(servicer);
5500 if (ut->uu_kqueue_override != new_override) {
5501 if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
5502 thread_add_servicer_override(servicer, new_override);
5503 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5504 thread_drop_servicer_override(servicer);
5505 } else { /* ut->uu_kqueue_override != new_override */
5506 thread_update_servicer_override(servicer, new_override);
5507 }
5508 ut->uu_kqueue_override = new_override;
5509 qos_changed = TRUE;
39037602 5510 }
cb323159
A
5511 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5512 /*
5513 * No events to deliver anymore.
5514 *
5515 * However canceling with turnstiles is challenging, so the fact that
5516 * the request isn't useful will be discovered by the servicer himself
5517 * later on.
5518 */
5519 } else if (old_override != new_override) {
5520 /*
5521 * Request is in flight
5522 *
5523 * Apply the diff to the thread request
5524 */
5525 kqueue_threadreq_modify(kq, kqr, new_override, WORKQ_THREADREQ_NONE);
5526 qos_changed = TRUE;
5527 }
39037602 5528
cb323159 5529 if (qos_changed) {
94ff46dc 5530 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
cb323159
A
5531 thread_tid(servicer), kqr->tr_kq_qos_index,
5532 (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
55e303ae 5533 }
55e303ae
A
5534}
5535
cb323159
A
5536static void
5537kqworkloop_wakeup(struct kqworkloop *kqwl, kq_index_t qos)
55e303ae 5538{
cb323159
A
5539 if ((kqwl->kqwl_state & KQ_PROCESSING) &&
5540 kqr_thread(&kqwl->kqwl_request) == current_thread()) {
5541 /*
5542 * kqworkloop_end_processing() will perform the required QoS
5543 * computations when it unsets the processing mode.
5544 */
5545 return;
5546 }
55e303ae 5547
cb323159 5548 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos);
55e303ae
A
5549}
5550
cb323159
A
5551static struct kqtailq *
5552kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
55e303ae 5553{
cb323159
A
5554 if (kq.kq->kq_state & KQ_WORKLOOP) {
5555 return &kq.kqwl->kqwl_suppressed;
5556 } else if (kq.kq->kq_state & KQ_WORKQ) {
5557 return &kq.kqwq->kqwq_suppressed[kn->kn_qos_index];
5558 } else {
5559 return &kq.kqf->kqf_suppressed;
5560 }
55e303ae
A
5561}
5562
cb323159
A
5563struct turnstile *
5564kqueue_alloc_turnstile(kqueue_t kqu)
55e303ae 5565{
cb323159
A
5566 struct kqworkloop *kqwl = kqu.kqwl;
5567 kq_state_t kq_state;
39236c6e 5568
cb323159
A
5569 kq_state = os_atomic_load(&kqu.kq->kq_state, dependency);
5570 if (kq_state & KQ_HAS_TURNSTILE) {
5571 /* force a dependency to pair with the atomic or with release below */
5572 return os_atomic_load_with_dependency_on(&kqwl->kqwl_turnstile,
5573 (uintptr_t)kq_state);
0a7de745 5574 }
b0d623f7 5575
cb323159
A
5576 if (!(kq_state & KQ_WORKLOOP)) {
5577 return TURNSTILE_NULL;
5578 }
39037602 5579
cb323159
A
5580 struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
5581 bool workq_locked = false;
39037602 5582
cb323159 5583 kqlock(kqu);
b0d623f7 5584
cb323159
A
5585 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5586 workq_locked = true;
5587 workq_kern_threadreq_lock(kqwl->kqwl_p);
5588 }
3e170ce0 5589
cb323159
A
5590 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
5591 free_ts = ts;
5592 ts = kqwl->kqwl_turnstile;
5593 } else {
5594 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
5595 ts, TURNSTILE_WORKLOOPS);
3e170ce0 5596
cb323159
A
5597 /* release-barrier to pair with the unlocked load of kqwl_turnstile above */
5598 os_atomic_or(&kqwl->kqwl_state, KQ_HAS_TURNSTILE, release);
5599
5600 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5601 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
5602 &kqwl->kqwl_request, kqwl->kqwl_owner,
5603 ts, TURNSTILE_IMMEDIATE_UPDATE);
5604 /*
5605 * The workq may no longer be the interlock after this.
5606 * In which case the inheritor wasn't updated.
5607 */
5608 }
5609 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
5610 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5611 }
5612 }
5613
5614 if (workq_locked) {
5615 workq_kern_threadreq_unlock(kqwl->kqwl_p);
5616 }
5617
5618 kqunlock(kqu);
5619
5620 if (free_ts) {
5621 turnstile_deallocate(free_ts);
5622 } else {
5623 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
5624 }
5625 return ts;
5626}
5627
5628__attribute__((always_inline))
5629struct turnstile *
5630kqueue_turnstile(kqueue_t kqu)
5631{
5632 kq_state_t kq_state = os_atomic_load(&kqu.kq->kq_state, relaxed);
5633 if (kq_state & KQ_WORKLOOP) {
5634 return os_atomic_load(&kqu.kqwl->kqwl_turnstile, relaxed);
5635 }
5636 return TURNSTILE_NULL;
5637}
5638
5639__attribute__((always_inline))
5640struct turnstile *
5641kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)
5642{
5643 struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
5644 if (kqwl) {
5645 return os_atomic_load(&kqwl->kqwl_turnstile, relaxed);
b0d623f7 5646 }
cb323159
A
5647 return TURNSTILE_NULL;
5648}
5649
5650static void
5651kqworkloop_set_overcommit(struct kqworkloop *kqwl)
5652{
5653 workq_threadreq_t kqr = &kqwl->kqwl_request;
b0d623f7 5654
cb323159
A
5655 /*
5656 * This test is racy, but since we never remove this bit,
5657 * it allows us to avoid taking a lock.
5658 */
5659 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
5660 return;
6d2010ae 5661 }
b0d623f7 5662
cb323159 5663 kqlock_held(kqwl);
6d2010ae 5664
cb323159
A
5665 if (kqr_thread_requested_pending(kqr)) {
5666 kqueue_threadreq_modify(kqwl, kqr, kqr->tr_qos,
5667 WORKQ_THREADREQ_MAKE_OVERCOMMIT);
5668 } else {
5669 kqr->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
5670 }
5671}
6d2010ae 5672
cb323159
A
5673static void
5674kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
5675 kq_index_t override_index)
5676{
5677 workq_threadreq_t kqr;
5678 kq_index_t old_override_index;
5679 kq_index_t queue_index = kn->kn_qos_index;
39037602 5680
cb323159
A
5681 if (override_index <= queue_index) {
5682 return;
5683 }
d9a64523 5684
cb323159 5685 kqr = kqworkq_get_request(kqwq, queue_index);
d9a64523 5686
cb323159 5687 kqlock_held(kqwq);
39037602 5688
cb323159
A
5689 old_override_index = kqr->tr_kq_override_index;
5690 if (override_index > MAX(kqr->tr_kq_qos_index, old_override_index)) {
5691 thread_t servicer = kqr_thread(kqr);
5692 kqr->tr_kq_override_index = override_index;
5693
5694 /* apply the override to [incoming?] servicing thread */
5695 if (servicer) {
5696 if (old_override_index) {
5697 thread_update_kevent_override(servicer, override_index);
5698 } else {
5699 thread_add_kevent_override(servicer, override_index);
39236c6e 5700 }
55e303ae 5701 }
b0d623f7 5702 }
55e303ae
A
5703}
5704
cb323159
A
5705static void
5706kqueue_update_override(kqueue_t kqu, struct knote *kn, thread_qos_t qos)
55e303ae 5707{
cb323159
A
5708 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5709 kqworkloop_update_threads_qos(kqu.kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
5710 qos);
5711 } else {
5712 kqworkq_update_override(kqu.kqwq, kn, qos);
5713 }
55e303ae
A
5714}
5715
cb323159
A
5716static void
5717kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread,
5718 enum kqwl_unbind_locked_mode how)
55e303ae 5719{
cb323159
A
5720 struct uthread *ut = get_bsdthread_info(thread);
5721 workq_threadreq_t kqr = &kqwl->kqwl_request;
55e303ae 5722
94ff46dc 5723 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
cb323159 5724 thread_tid(thread), 0, 0);
55e303ae 5725
cb323159 5726 kqlock_held(kqwl);
2d21ac55 5727
cb323159
A
5728 assert(ut->uu_kqr_bound == kqr);
5729 ut->uu_kqr_bound = NULL;
5730 if (how == KQWL_OVERRIDE_DROP_IMMEDIATELY &&
5731 ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5732 thread_drop_servicer_override(thread);
5733 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5734 }
d9a64523 5735
cb323159
A
5736 if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
5737 turnstile_update_inheritor(kqwl->kqwl_turnstile,
5738 TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
5739 turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
5740 TURNSTILE_INTERLOCK_HELD);
5741 }
2d21ac55 5742
cb323159
A
5743 kqr->tr_thread = THREAD_NULL;
5744 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5745 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5746}
39037602 5747
cb323159
A
5748static void
5749kqworkloop_unbind_delayed_override_drop(thread_t thread)
5750{
5751 struct uthread *ut = get_bsdthread_info(thread);
5752 assert(ut->uu_kqr_bound == NULL);
5753 if (ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5754 thread_drop_servicer_override(thread);
5755 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
2d21ac55 5756 }
55e303ae
A
5757}
5758
b0d623f7 5759/*
cb323159
A
5760 * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
5761 *
5762 * It will acknowledge events, and possibly request a new thread if:
5763 * - there were active events left
5764 * - we pended waitq hook callouts during processing
5765 * - we pended wakeups while processing (or unsuppressing)
5766 *
5767 * Called with kqueue lock held.
b0d623f7 5768 */
cb323159
A
5769static void
5770kqworkloop_unbind(struct kqworkloop *kqwl)
b0d623f7 5771{
cb323159
A
5772 struct kqueue *kq = &kqwl->kqwl_kqueue;
5773 workq_threadreq_t kqr = &kqwl->kqwl_request;
5774 thread_t thread = kqr_thread_fast(kqr);
5775 int op = KQWL_UTQ_PARKING;
5776 kq_index_t qos_override = THREAD_QOS_UNSPECIFIED;
39037602 5777
cb323159 5778 assert(thread == current_thread());
39037602 5779
cb323159 5780 kqlock(kqwl);
b0d623f7 5781
cb323159
A
5782 /*
5783 * Forcing the KQ_PROCESSING flag allows for QoS updates because of
5784 * unsuppressing knotes not to be applied until the eventual call to
5785 * kqworkloop_update_threads_qos() below.
5786 */
5787 assert((kq->kq_state & KQ_PROCESSING) == 0);
5788 if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5789 kq->kq_state |= KQ_PROCESSING;
5790 qos_override = kqworkloop_acknowledge_events(kqwl);
5791 kq->kq_state &= ~KQ_PROCESSING;
5792 }
39037602 5793
cb323159
A
5794 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
5795 kqworkloop_update_threads_qos(kqwl, op, qos_override);
b0d623f7 5796
cb323159 5797 kqunlock(kqwl);
5ba3f43e 5798
cb323159
A
5799 /*
5800 * Drop the override on the current thread last, after the call to
5801 * kqworkloop_update_threads_qos above.
5802 */
5803 kqworkloop_unbind_delayed_override_drop(thread);
5804
5805 /* If last reference, dealloc the workloop kq */
5806 kqworkloop_release(kqwl);
0a7de745
A
5807}
5808
cb323159
A
5809static thread_qos_t
5810kqworkq_unbind_locked(struct kqworkq *kqwq,
5811 workq_threadreq_t kqr, thread_t thread)
5812{
5813 struct uthread *ut = get_bsdthread_info(thread);
5814 kq_index_t old_override = kqr->tr_kq_override_index;
5815
94ff46dc 5816 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
cb323159
A
5817 thread_tid(kqr_thread(kqr)), kqr->tr_kq_qos_index, 0);
5818
5819 kqlock_held(kqwq);
5820
5821 assert(ut->uu_kqr_bound == kqr);
5822 ut->uu_kqr_bound = NULL;
5823 kqr->tr_thread = THREAD_NULL;
5824 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5825 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5826 kqwq->kqwq_state &= ~KQ_R2K_ARMED;
5827
5828 return old_override;
5ba3f43e
A
5829}
5830
5831/*
cb323159 5832 * kqworkq_unbind - unbind of a workq kqueue from a thread
5ba3f43e 5833 *
cb323159
A
5834 * We may have to request new threads.
5835 * This can happen there are no waiting processing threads and:
5836 * - there were active events we never got to (count > 0)
5837 * - we pended waitq hook callouts during processing
5838 * - we pended wakeups while processing (or unsuppressing)
5ba3f43e
A
5839 */
5840static void
cb323159 5841kqworkq_unbind(proc_t p, workq_threadreq_t kqr)
5ba3f43e 5842{
cb323159
A
5843 struct kqworkq *kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue;
5844 __assert_only int rc;
5ba3f43e 5845
cb323159
A
5846 kqlock(kqwq);
5847 rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
5848 assert(rc == -1);
5849 kqunlock(kqwq);
5850}
5ba3f43e 5851
cb323159
A
5852workq_threadreq_t
5853kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
5854{
5855 assert(qos_index < KQWQ_NBUCKETS);
5856 return &kqwq->kqwq_request[qos_index];
5857}
5ba3f43e 5858
cb323159
A
5859static void
5860knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp)
5861{
5862 kq_index_t qos = _pthread_priority_thread_qos(pp);
5ba3f43e 5863
cb323159
A
5864 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5865 assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
5866 pp = _pthread_priority_normalize(pp);
5867 } else if (kqu.kq->kq_state & KQ_WORKQ) {
5868 if (qos == THREAD_QOS_UNSPECIFIED) {
5869 /* On workqueues, outside of QoS means MANAGER */
5870 qos = KQWQ_QOS_MANAGER;
5871 pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
5872 } else {
5873 pp = _pthread_priority_normalize(pp);
5874 }
d9a64523 5875 } else {
cb323159
A
5876 pp = _pthread_unspecified_priority();
5877 qos = THREAD_QOS_UNSPECIFIED;
5878 }
5879
f427ee49 5880 kn->kn_qos = (int32_t)pp;
cb323159
A
5881
5882 if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
5883 /* Never lower QoS when in "Merge" mode */
5884 kn->kn_qos_override = qos;
5885 }
5886
5887 /* only adjust in-use qos index when not suppressed */
5888 if (kn->kn_status & KN_SUPPRESSED) {
5889 kqueue_update_override(kqu, kn, qos);
5890 } else if (kn->kn_qos_index != qos) {
5891 knote_dequeue(kqu, kn);
5892 kn->kn_qos_index = qos;
d9a64523 5893 }
cb323159
A
5894}
5895
5896static void
5897knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
5898{
5899 thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
d9a64523 5900
cb323159
A
5901 kqlock_held(kq);
5902
5903 assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
5904 assert(qos_index < THREAD_QOS_LAST);
5ba3f43e 5905
d9a64523 5906 /*
cb323159 5907 * Early exit for knotes that should not change QoS
d9a64523 5908 */
cb323159
A
5909 if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
5910 panic("filter %d cannot change QoS", kn->kn_filtid);
5911 } else if (__improbable(!knote_has_qos(kn))) {
5912 return;
d9a64523 5913 }
cb323159
A
5914
5915 /*
5916 * knotes with the FALLBACK flag will only use their registration QoS if the
5917 * incoming event has no QoS, else, the registration QoS acts as a floor.
5918 */
5919 thread_qos_t req_qos = _pthread_priority_thread_qos_fast(kn->kn_qos);
5920 if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
5921 if (qos_index == THREAD_QOS_UNSPECIFIED) {
5922 qos_index = req_qos;
5923 }
5924 } else {
5925 if (qos_index < req_qos) {
5926 qos_index = req_qos;
5927 }
d9a64523 5928 }
cb323159
A
5929 if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
5930 /* Never lower QoS when in "Merge" mode */
5931 return;
5932 }
5933
5934 if ((kn->kn_status & KN_LOCKED) && (kn->kn_status & KN_POSTING)) {
5ba3f43e 5935 /*
cb323159
A
5936 * When we're trying to update the QoS override and that both an
5937 * f_event() and other f_* calls are running concurrently, any of these
5938 * in flight calls may want to perform overrides that aren't properly
5939 * serialized with each other.
5940 *
5941 * The first update that observes this racy situation enters a "Merge"
5942 * mode which causes subsequent override requests to saturate the
5943 * override instead of replacing its value.
5944 *
5945 * This mode is left when knote_unlock() or knote_post()
5946 * observe that no other f_* routine is in flight.
5ba3f43e 5947 */
cb323159 5948 kn->kn_status |= KN_MERGE_QOS;
5ba3f43e 5949 }
5ba3f43e 5950
cb323159
A
5951 /*
5952 * Now apply the override if it changed.
5953 */
d9a64523 5954
cb323159
A
5955 if (kn->kn_qos_override == qos_index) {
5956 return;
5957 }
d9a64523 5958
cb323159
A
5959 kn->kn_qos_override = qos_index;
5960
5961 if (kn->kn_status & KN_SUPPRESSED) {
d9a64523 5962 /*
cb323159
A
5963 * For suppressed events, the kn_qos_index field cannot be touched as it
5964 * allows us to know on which supress queue the knote is for a kqworkq.
5965 *
5966 * Also, there's no natural push applied on the kqueues when this field
5967 * changes anyway. We hence need to apply manual overrides in this case,
5968 * which will be cleared when the events are later acknowledged.
d9a64523 5969 */
cb323159
A
5970 kqueue_update_override(kq, kn, qos_index);
5971 } else if (kn->kn_qos_index != qos_index) {
5972 knote_dequeue(kq, kn);
5973 kn->kn_qos_index = qos_index;
d9a64523
A
5974 }
5975}
5976
5977/*
cb323159 5978 * Called back from waitq code when no threads waiting and the hook was set.
5ba3f43e 5979 *
cb323159 5980 * Preemption is disabled - minimal work can be done in this context!!!
5ba3f43e 5981 */
d9a64523 5982void
cb323159 5983waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *kq_hook)
d9a64523 5984{
cb323159 5985 kqueue_t kqu;
d9a64523 5986
cb323159
A
5987 kqu.kq = __container_of(kq_hook, struct kqueue, kq_waitq_hook);
5988 assert(kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
5989
5990 kqlock(kqu);
5991
5992 if (kqu.kq->kq_count > 0) {
5993 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5994 kqworkloop_wakeup(kqu.kqwl, KQWL_BUCKET_STAYACTIVE);
5995 } else {
5996 kqworkq_wakeup(kqu.kqwq, KQWQ_QOS_MANAGER);
5997 }
d9a64523 5998 }
cb323159
A
5999
6000 kqunlock(kqu);
d9a64523
A
6001}
6002
cb323159
A
6003void
6004klist_init(struct klist *list)
5ba3f43e 6005{
cb323159 6006 SLIST_INIT(list);
d9a64523
A
6007}
6008
cb323159 6009
d9a64523 6010/*
cb323159 6011 * Query/Post each knote in the object's list
d9a64523 6012 *
cb323159
A
6013 * The object lock protects the list. It is assumed
6014 * that the filter/event routine for the object can
6015 * determine that the object is already locked (via
6016 * the hint) and not deadlock itself.
6017 *
6018 * The object lock should also hold off pending
6019 * detach/drop operations.
d9a64523
A
6020 */
6021void
cb323159 6022knote(struct klist *list, long hint)
d9a64523 6023{
cb323159 6024 struct knote *kn;
d9a64523 6025
cb323159
A
6026 SLIST_FOREACH(kn, list, kn_selnext) {
6027 knote_post(kn, hint);
d9a64523 6028 }
cb323159 6029}
d9a64523 6030
cb323159
A
6031/*
6032 * attach a knote to the specified list. Return true if this is the first entry.
6033 * The list is protected by whatever lock the object it is associated with uses.
6034 */
6035int
6036knote_attach(struct klist *list, struct knote *kn)
6037{
6038 int ret = SLIST_EMPTY(list);
6039 SLIST_INSERT_HEAD(list, kn, kn_selnext);
6040 return ret;
6041}
d9a64523 6042
cb323159
A
6043/*
6044 * detach a knote from the specified list. Return true if that was the last entry.
6045 * The list is protected by whatever lock the object it is associated with uses.
6046 */
6047int
6048knote_detach(struct klist *list, struct knote *kn)
6049{
6050 SLIST_REMOVE(list, kn, knote, kn_selnext);
6051 return SLIST_EMPTY(list);
6052}
d9a64523 6053
cb323159
A
6054/*
6055 * knote_vanish - Indicate that the source has vanished
6056 *
6057 * If the knote has requested EV_VANISHED delivery,
6058 * arrange for that. Otherwise, deliver a NOTE_REVOKE
6059 * event for backward compatibility.
6060 *
6061 * The knote is marked as having vanished, but is not
6062 * actually detached from the source in this instance.
6063 * The actual detach is deferred until the knote drop.
6064 *
6065 * Our caller already has the object lock held. Calling
6066 * the detach routine would try to take that lock
6067 * recursively - which likely is not supported.
6068 */
6069void
6070knote_vanish(struct klist *list, bool make_active)
6071{
6072 struct knote *kn;
6073 struct knote *kn_next;
5ba3f43e 6074
cb323159
A
6075 SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
6076 struct kqueue *kq = knote_get_kq(kn);
6077
6078 kqlock(kq);
6079 if (__probable(kn->kn_status & KN_REQVANISH)) {
d9a64523 6080 /*
cb323159 6081 * If EV_VANISH supported - prepare to deliver one
d9a64523 6082 */
cb323159
A
6083 kn->kn_status |= KN_VANISHED;
6084 } else {
6085 /*
6086 * Handle the legacy way to indicate that the port/portset was
6087 * deallocated or left the current Mach portspace (modern technique
6088 * is with an EV_VANISHED protocol).
6089 *
6090 * Deliver an EV_EOF event for these changes (hopefully it will get
6091 * delivered before the port name recycles to the same generation
6092 * count and someone tries to re-register a kevent for it or the
6093 * events are udata-specific - avoiding a conflict).
6094 */
6095 kn->kn_flags |= EV_EOF | EV_ONESHOT;
d9a64523 6096 }
cb323159
A
6097 if (make_active) {
6098 knote_activate(kq, kn, FILTER_ACTIVE);
d9a64523 6099 }
cb323159 6100 kqunlock(kq);
5ba3f43e 6101 }
5ba3f43e
A
6102}
6103
d9a64523 6104/*
cb323159
A
6105 * Force a lazy allocation of the waitqset link
6106 * of the kq_wqs associated with the kn
6107 * if it wasn't already allocated.
d9a64523 6108 *
cb323159
A
6109 * This allows knote_link_waitq to never block
6110 * if reserved_link is not NULL.
d9a64523
A
6111 */
6112void
cb323159 6113knote_link_waitqset_lazy_alloc(struct knote *kn)
5ba3f43e 6114{
cb323159
A
6115 struct kqueue *kq = knote_get_kq(kn);
6116 waitq_set_lazy_init_link(&kq->kq_wqs);
5ba3f43e
A
6117}
6118
cb323159
A
6119/*
6120 * Check if a lazy allocation for the waitqset link
6121 * of the kq_wqs is needed.
6122 */
6123boolean_t
6124knote_link_waitqset_should_lazy_alloc(struct knote *kn)
5ba3f43e 6125{
cb323159
A
6126 struct kqueue *kq = knote_get_kq(kn);
6127 return waitq_set_should_lazy_init_link(&kq->kq_wqs);
d9a64523 6128}
5ba3f43e 6129
d9a64523 6130/*
cb323159
A
6131 * For a given knote, link a provided wait queue directly with the kqueue.
6132 * Wakeups will happen via recursive wait queue support. But nothing will move
6133 * the knote to the active list at wakeup (nothing calls knote()). Instead,
6134 * we permanently enqueue them here.
d9a64523 6135 *
cb323159
A
6136 * kqueue and knote references are held by caller.
6137 * waitq locked by caller.
d9a64523 6138 *
cb323159
A
6139 * caller provides the wait queue link structure and insures that the kq->kq_wqs
6140 * is linked by previously calling knote_link_waitqset_lazy_alloc.
d9a64523 6141 */
cb323159
A
6142int
6143knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
d9a64523 6144{
cb323159
A
6145 struct kqueue *kq = knote_get_kq(kn);
6146 kern_return_t kr;
6147
6148 kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
6149 if (kr == KERN_SUCCESS) {
6150 knote_markstayactive(kn);
6151 return 0;
5ba3f43e 6152 } else {
cb323159 6153 return EINVAL;
5ba3f43e
A
6154 }
6155}
6156
6157/*
cb323159
A
6158 * Unlink the provided wait queue from the kqueue associated with a knote.
6159 * Also remove it from the magic list of directly attached knotes.
5ba3f43e 6160 *
cb323159
A
6161 * Note that the unlink may have already happened from the other side, so
6162 * ignore any failures to unlink and just remove it from the kqueue list.
5ba3f43e 6163 *
cb323159 6164 * On success, caller is responsible for the link structure
5ba3f43e 6165 */
cb323159
A
6166int
6167knote_unlink_waitq(struct knote *kn, struct waitq *wq)
5ba3f43e 6168{
cb323159
A
6169 struct kqueue *kq = knote_get_kq(kn);
6170 kern_return_t kr;
5ba3f43e 6171
cb323159
A
6172 kr = waitq_unlink(wq, &kq->kq_wqs);
6173 knote_clearstayactive(kn);
6174 return (kr != KERN_SUCCESS) ? EINVAL : 0;
5ba3f43e
A
6175}
6176
cb323159
A
6177/*
6178 * remove all knotes referencing a specified fd
6179 *
6180 * Entered with the proc_fd lock already held.
6181 * It returns the same way, but may drop it temporarily.
6182 */
6183void
6184knote_fdclose(struct proc *p, int fd)
5ba3f43e 6185{
cb323159
A
6186 struct klist *list;
6187 struct knote *kn;
6188 KNOTE_LOCK_CTX(knlc);
5ba3f43e 6189
cb323159
A
6190restart:
6191 list = &p->p_fd->fd_knlist[fd];
6192 SLIST_FOREACH(kn, list, kn_link) {
6193 struct kqueue *kq = knote_get_kq(kn);
5ba3f43e 6194
cb323159 6195 kqlock(kq);
5ba3f43e 6196
cb323159
A
6197 if (kq->kq_p != p) {
6198 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
6199 __func__, kq->kq_p, p);
5ba3f43e 6200 }
cb323159
A
6201
6202 /*
6203 * If the knote supports EV_VANISHED delivery,
6204 * transition it to vanished mode (or skip over
6205 * it if already vanished).
6206 */
6207 if (kn->kn_status & KN_VANISHED) {
6208 kqunlock(kq);
6209 continue;
5ba3f43e
A
6210 }
6211
cb323159
A
6212 proc_fdunlock(p);
6213 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
6214 /* the knote was dropped by someone, nothing to do */
6215 } else if (kn->kn_status & KN_REQVANISH) {
6216 kn->kn_status |= KN_VANISHED;
5ba3f43e 6217
cb323159
A
6218 kqunlock(kq);
6219 knote_fops(kn)->f_detach(kn);
6220 if (kn->kn_is_fd) {
f427ee49 6221 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
5ba3f43e 6222 }
cb323159
A
6223 kn->kn_filtid = EVFILTID_DETACHED;
6224 kqlock(kq);
5ba3f43e 6225
cb323159
A
6226 knote_activate(kq, kn, FILTER_ACTIVE);
6227 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
5ba3f43e 6228 } else {
cb323159 6229 knote_drop(kq, kn, &knlc);
5ba3f43e 6230 }
39037602 6231
cb323159
A
6232 proc_fdlock(p);
6233 goto restart;
5ba3f43e 6234 }
cb323159 6235}
39037602 6236
cb323159
A
6237/*
6238 * knote_fdfind - lookup a knote in the fd table for process
6239 *
6240 * If the filter is file-based, lookup based on fd index.
6241 * Otherwise use a hash based on the ident.
6242 *
6243 * Matching is based on kq, filter, and ident. Optionally,
6244 * it may also be based on the udata field in the kevent -
6245 * allowing multiple event registration for the file object
6246 * per kqueue.
6247 *
6248 * fd_knhashlock or fdlock held on entry (and exit)
6249 */
6250static struct knote *
6251knote_fdfind(struct kqueue *kq,
6252 const struct kevent_internal_s *kev,
6253 bool is_fd,
6254 struct proc *p)
6255{
6256 struct filedesc *fdp = p->p_fd;
6257 struct klist *list = NULL;
6258 struct knote *kn = NULL;
39037602 6259
5ba3f43e 6260 /*
cb323159 6261 * determine where to look for the knote
5ba3f43e 6262 */
cb323159
A
6263 if (is_fd) {
6264 /* fd-based knotes are linked off the fd table */
6265 if (kev->kei_ident < (u_int)fdp->fd_knlistsize) {
6266 list = &fdp->fd_knlist[kev->kei_ident];
5ba3f43e 6267 }
cb323159
A
6268 } else if (fdp->fd_knhashmask != 0) {
6269 /* hash non-fd knotes here too */
6270 list = &fdp->fd_knhash[KN_HASH((u_long)kev->kei_ident, fdp->fd_knhashmask)];
5ba3f43e 6271 }
39037602 6272
5ba3f43e 6273 /*
cb323159 6274 * scan the selected list looking for a match
39037602 6275 */
cb323159
A
6276 if (list != NULL) {
6277 SLIST_FOREACH(kn, list, kn_link) {
6278 if (kq == knote_get_kq(kn) &&
6279 kev->kei_ident == kn->kn_id &&
6280 kev->kei_filter == kn->kn_filter) {
6281 if (kev->kei_flags & EV_UDATA_SPECIFIC) {
6282 if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
6283 kev->kei_udata == kn->kn_udata) {
6284 break; /* matching udata-specific knote */
6285 }
6286 } else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
6287 break; /* matching non-udata-specific knote */
6288 }
d9a64523 6289 }
5ba3f43e 6290 }
39037602 6291 }
cb323159 6292 return kn;
39037602
A
6293}
6294
cb323159
A
6295/*
6296 * kq_add_knote- Add knote to the fd table for process
6297 * while checking for duplicates.
6298 *
6299 * All file-based filters associate a list of knotes by file
6300 * descriptor index. All other filters hash the knote by ident.
6301 *
6302 * May have to grow the table of knote lists to cover the
6303 * file descriptor index presented.
6304 *
6305 * fd_knhashlock and fdlock unheld on entry (and exit).
6306 *
6307 * Takes a rwlock boost if inserting the knote is successful.
6308 */
6309static int
6310kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
6311 struct proc *p)
39037602 6312{
cb323159
A
6313 struct filedesc *fdp = p->p_fd;
6314 struct klist *list = NULL;
6315 int ret = 0;
6316 bool is_fd = kn->kn_is_fd;
f427ee49 6317 uint64_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE);
39037602 6318
cb323159
A
6319 if (is_fd) {
6320 proc_fdlock(p);
5ba3f43e 6321 } else {
cb323159 6322 knhash_lock(fdp);
5ba3f43e 6323 }
39037602 6324
cb323159
A
6325 if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
6326 /* found an existing knote: we can't add this one */
6327 ret = ERESTART;
6328 goto out_locked;
5ba3f43e 6329 }
39037602 6330
cb323159
A
6331 /* knote was not found: add it now */
6332 if (!is_fd) {
6333 if (fdp->fd_knhashmask == 0) {
6334 u_long size = 0;
39037602 6335
cb323159
A
6336 list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
6337 if (list == NULL) {
6338 ret = ENOMEM;
6339 goto out_locked;
6340 }
39037602 6341
cb323159
A
6342 fdp->fd_knhash = list;
6343 fdp->fd_knhashmask = size;
6344 }
5ba3f43e 6345
cb323159
A
6346 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6347 SLIST_INSERT_HEAD(list, kn, kn_link);
6348 ret = 0;
6349 goto out_locked;
6350 } else {
6351 /* knote is fd based */
39037602 6352
cb323159
A
6353 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
6354 u_int size = 0;
d9a64523 6355
f427ee49
A
6356 /* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */
6357 if (kn->kn_id >= (uint64_t) nofile
6358 || kn->kn_id >= (uint64_t)maxfilesperproc) {
cb323159
A
6359 ret = EINVAL;
6360 goto out_locked;
6361 }
6362 /* have to grow the fd_knlist */
6363 size = fdp->fd_knlistsize;
6364 while (size <= kn->kn_id) {
6365 size += KQEXTENT;
6366 }
5ba3f43e 6367
cb323159
A
6368 if (size >= (UINT_MAX / sizeof(struct klist *))) {
6369 ret = EINVAL;
6370 goto out_locked;
6371 }
d9a64523 6372
c3c9b80d
A
6373 list = kheap_alloc(KM_KQUEUE, size * sizeof(struct klist *),
6374 Z_WAITOK);
cb323159
A
6375 if (list == NULL) {
6376 ret = ENOMEM;
6377 goto out_locked;
6378 }
39037602 6379
cb323159
A
6380 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
6381 fdp->fd_knlistsize * sizeof(struct klist *));
6382 bzero((caddr_t)list +
6383 fdp->fd_knlistsize * sizeof(struct klist *),
6384 (size - fdp->fd_knlistsize) * sizeof(struct klist *));
c3c9b80d
A
6385 kheap_free(KM_KQUEUE, fdp->fd_knlist,
6386 fdp->fd_knlistsize * sizeof(struct klist *));
cb323159
A
6387 fdp->fd_knlist = list;
6388 fdp->fd_knlistsize = size;
6389 }
39037602 6390
cb323159
A
6391 list = &fdp->fd_knlist[kn->kn_id];
6392 SLIST_INSERT_HEAD(list, kn, kn_link);
6393 ret = 0;
6394 goto out_locked;
39037602 6395 }
39037602 6396
cb323159
A
6397out_locked:
6398 if (ret == 0) {
6399 kqlock(kq);
6400 assert((kn->kn_status & KN_LOCKED) == 0);
6401 (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
6402 kqueue_retain(kq); /* retain a kq ref */
6403 }
6404 if (is_fd) {
6405 proc_fdunlock(p);
6406 } else {
6407 knhash_unlock(fdp);
6408 }
39037602 6409
cb323159 6410 return ret;
39037602
A
6411}
6412
cb323159
A
6413/*
6414 * kq_remove_knote - remove a knote from the fd table for process
6415 *
6416 * If the filter is file-based, remove based on fd index.
6417 * Otherwise remove from the hash based on the ident.
6418 *
6419 * fd_knhashlock and fdlock unheld on entry (and exit).
6420 */
39037602 6421static void
cb323159
A
6422kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
6423 struct knote_lock_ctx *knlc)
39037602 6424{
cb323159
A
6425 struct filedesc *fdp = p->p_fd;
6426 struct klist *list = NULL;
6427 uint16_t kq_state;
6428 bool is_fd = kn->kn_is_fd;
39037602 6429
cb323159
A
6430 if (is_fd) {
6431 proc_fdlock(p);
d9a64523 6432 } else {
cb323159 6433 knhash_lock(fdp);
d9a64523 6434 }
39037602 6435
cb323159
A
6436 if (is_fd) {
6437 assert((u_int)fdp->fd_knlistsize > kn->kn_id);
6438 list = &fdp->fd_knlist[kn->kn_id];
6439 } else {
6440 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6441 }
6442 SLIST_REMOVE(list, kn, knote, kn_link);
d9a64523 6443
cb323159
A
6444 kqlock(kq);
6445 kq_state = kq->kq_state;
6446 if (knlc) {
6447 knote_unlock_cancel(kq, kn, knlc);
6448 } else {
6449 kqunlock(kq);
6450 }
6451 if (is_fd) {
6452 proc_fdunlock(p);
6453 } else {
6454 knhash_unlock(fdp);
d9a64523 6455 }
39037602 6456
cb323159
A
6457 if (kq_state & KQ_DYNAMIC) {
6458 kqworkloop_release((struct kqworkloop *)kq);
d9a64523 6459 }
39037602
A
6460}
6461
cb323159
A
6462/*
6463 * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
6464 * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
6465 *
6466 * fd_knhashlock or fdlock unheld on entry (and exit)
6467 */
6468
6469static struct knote *
6470kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_qos_s *kev,
6471 bool is_fd, struct proc *p)
5ba3f43e 6472{
cb323159
A
6473 struct filedesc *fdp = p->p_fd;
6474 struct knote *kn;
5ba3f43e 6475
cb323159
A
6476 if (is_fd) {
6477 proc_fdlock(p);
6478 } else {
6479 knhash_lock(fdp);
6480 }
5ba3f43e 6481
cb323159
A
6482 /*
6483 * Temporary horrible hack:
6484 * this cast is gross and will go away in a future change.
6485 * It is OK to do because we don't look at xflags/s_fflags,
6486 * and that when we cast down the kev this way,
6487 * the truncated filter field works.
6488 */
6489 kn = knote_fdfind(kq, (struct kevent_internal_s *)kev, is_fd, p);
5ba3f43e 6490
cb323159
A
6491 if (kn) {
6492 kqlock(kq);
6493 assert(knote_get_kq(kn) == kq);
5ba3f43e 6494 }
5ba3f43e 6495
cb323159
A
6496 if (is_fd) {
6497 proc_fdunlock(p);
6498 } else {
6499 knhash_unlock(fdp);
6500 }
6501
6502 return kn;
39037602
A
6503}
6504
cb323159 6505__attribute__((noinline))
39037602 6506static void
cb323159 6507kqfile_wakeup(struct kqfile *kqf, __unused kq_index_t qos)
39037602 6508{
cb323159
A
6509 /* flag wakeups during processing */
6510 if (kqf->kqf_state & KQ_PROCESSING) {
6511 kqf->kqf_state |= KQ_WAKEUP;
5ba3f43e 6512 }
39037602 6513
cb323159
A
6514 /* wakeup a thread waiting on this queue */
6515 if (kqf->kqf_state & (KQ_SLEEP | KQ_SEL)) {
6516 kqf->kqf_state &= ~(KQ_SLEEP | KQ_SEL);
6517 waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs, KQ_EVENT,
6518 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
39037602 6519 }
39037602 6520
cb323159
A
6521 /* wakeup other kqueues/select sets we're inside */
6522 KNOTE(&kqf->kqf_sel.si_note, 0);
5ba3f43e
A
6523}
6524
cb323159
A
6525static struct kqtailq *
6526knote_get_tailq(kqueue_t kqu, struct knote *kn)
5ba3f43e 6527{
cb323159 6528 kq_index_t qos_index = kn->kn_qos_index;
5ba3f43e 6529
cb323159
A
6530 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6531 assert(qos_index < KQWL_NBUCKETS);
6532 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6533 assert(qos_index < KQWQ_NBUCKETS);
6534 } else {
6535 assert(qos_index == QOS_INDEX_KQFILE);
5ba3f43e 6536 }
cb323159
A
6537 static_assert(offsetof(struct kqueue, kq_queue) == sizeof(struct kqueue),
6538 "struct kqueue::kq_queue must be exactly at the end");
6539 return &kqu.kq->kq_queue[qos_index];
5ba3f43e
A
6540}
6541
39037602 6542static void
cb323159 6543knote_enqueue(kqueue_t kqu, struct knote *kn, kn_status_t wakeup_mask)
39037602 6544{
cb323159 6545 kqlock_held(kqu);
39037602 6546
cb323159
A
6547 if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
6548 return;
39037602
A
6549 }
6550
cb323159
A
6551 if (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)) {
6552 return;
6553 }
39037602 6554
cb323159
A
6555 if ((kn->kn_status & KN_QUEUED) == 0) {
6556 struct kqtailq *queue = knote_get_tailq(kqu, kn);
5ba3f43e 6557
cb323159
A
6558 TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
6559 kn->kn_status |= KN_QUEUED;
6560 kqu.kq->kq_count++;
6561 } else if ((kn->kn_status & KN_STAYACTIVE) == 0) {
6562 return;
5ba3f43e 6563 }
d9a64523 6564
cb323159
A
6565 if (kn->kn_status & wakeup_mask) {
6566 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6567 kqworkloop_wakeup(kqu.kqwl, kn->kn_qos_index);
6568 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6569 kqworkq_wakeup(kqu.kqwq, kn->kn_qos_index);
6570 } else {
6571 kqfile_wakeup(kqu.kqf, kn->kn_qos_index);
6572 }
6573 }
39037602
A
6574}
6575
cb323159
A
6576__attribute__((always_inline))
6577static inline void
6578knote_dequeue(kqueue_t kqu, struct knote *kn)
39037602 6579{
cb323159
A
6580 if (kn->kn_status & KN_QUEUED) {
6581 struct kqtailq *queue = knote_get_tailq(kqu, kn);
5ba3f43e 6582
cb323159
A
6583 // attaching the knote calls knote_reset_priority() without
6584 // the kqlock which is fine, so we can't call kqlock_held()
6585 // if we're not queued.
6586 kqlock_held(kqu);
39037602 6587
cb323159
A
6588 TAILQ_REMOVE(queue, kn, kn_tqe);
6589 kn->kn_status &= ~KN_QUEUED;
6590 kqu.kq->kq_count--;
6591 }
d9a64523 6592}
39037602 6593
cb323159 6594/* called with kqueue lock held */
d9a64523 6595static void
cb323159 6596knote_suppress(kqueue_t kqu, struct knote *kn)
39037602 6597{
cb323159 6598 struct kqtailq *suppressq;
39037602 6599
cb323159 6600 kqlock_held(kqu);
39037602 6601
cb323159
A
6602 assert((kn->kn_status & KN_SUPPRESSED) == 0);
6603 assert(kn->kn_status & KN_QUEUED);
39037602 6604
cb323159
A
6605 knote_dequeue(kqu, kn);
6606 /* deactivate - so new activations indicate a wakeup */
6607 kn->kn_status &= ~KN_ACTIVE;
6608 kn->kn_status |= KN_SUPPRESSED;
6609 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6610 TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
5ba3f43e
A
6611}
6612
cb323159
A
6613__attribute__((always_inline))
6614static inline void
6615knote_unsuppress_noqueue(kqueue_t kqu, struct knote *kn)
5ba3f43e 6616{
cb323159 6617 struct kqtailq *suppressq;
5ba3f43e 6618
cb323159 6619 kqlock_held(kqu);
5ba3f43e 6620
cb323159 6621 assert(kn->kn_status & KN_SUPPRESSED);
5ba3f43e 6622
cb323159
A
6623 kn->kn_status &= ~KN_SUPPRESSED;
6624 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6625 TAILQ_REMOVE(suppressq, kn, kn_tqe);
5ba3f43e 6626
d9a64523 6627 /*
cb323159
A
6628 * If the knote is no longer active, reset its push,
6629 * and resynchronize kn_qos_index with kn_qos_override
6630 * for knotes with a real qos.
d9a64523 6631 */
cb323159
A
6632 if ((kn->kn_status & KN_ACTIVE) == 0 && knote_has_qos(kn)) {
6633 kn->kn_qos_override = _pthread_priority_thread_qos_fast(kn->kn_qos);
d9a64523 6634 }
cb323159
A
6635 kn->kn_qos_index = kn->kn_qos_override;
6636}
5ba3f43e 6637
cb323159
A
6638/* called with kqueue lock held */
6639static void
6640knote_unsuppress(kqueue_t kqu, struct knote *kn)
6641{
6642 if (kn->kn_status & KN_SUPPRESSED) {
6643 knote_unsuppress_noqueue(kqu, kn);
6644
6645 /* don't wakeup if unsuppressing just a stay-active knote */
6646 knote_enqueue(kqu, kn, KN_ACTIVE);
d9a64523 6647 }
cb323159 6648}
5ba3f43e 6649
cb323159
A
6650__attribute__((always_inline))
6651static inline void
6652knote_mark_active(struct knote *kn)
6653{
6654 if ((kn->kn_status & KN_ACTIVE) == 0) {
94ff46dc 6655 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
cb323159
A
6656 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
6657 kn->kn_filtid);
d9a64523 6658 }
5ba3f43e 6659
cb323159 6660 kn->kn_status |= KN_ACTIVE;
d9a64523 6661}
5ba3f43e 6662
cb323159 6663/* called with kqueue lock held */
d9a64523 6664static void
cb323159 6665knote_activate(kqueue_t kqu, struct knote *kn, int result)
d9a64523 6666{
cb323159
A
6667 assert(result & FILTER_ACTIVE);
6668 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
6669 // may dequeue the knote
6670 knote_adjust_qos(kqu.kq, kn, result);
39037602 6671 }
cb323159
A
6672 knote_mark_active(kn);
6673 knote_enqueue(kqu, kn, KN_ACTIVE | KN_STAYACTIVE);
39037602
A
6674}
6675
cb323159
A
6676/*
6677 * This function applies changes requested by f_attach or f_touch for
6678 * a given filter. It proceeds in a carefully chosen order to help
6679 * every single transition do the minimal amount of work possible.
6680 */
39037602 6681static void
cb323159
A
6682knote_apply_touch(kqueue_t kqu, struct knote *kn, struct kevent_qos_s *kev,
6683 int result)
39037602 6684{
cb323159 6685 kn_status_t wakeup_mask = KN_ACTIVE;
39037602 6686
cb323159
A
6687 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
6688 /*
6689 * When a stayactive knote is reenabled, we may have missed wakeups
6690 * while it was disabled, so we need to poll it. To do so, ask
6691 * knote_enqueue() below to reenqueue it.
6692 */
6693 wakeup_mask |= KN_STAYACTIVE;
6694 kn->kn_status &= ~KN_DISABLED;
39037602 6695
d9a64523 6696 /*
cb323159
A
6697 * it is possible for userland to have knotes registered for a given
6698 * workloop `wl_orig` but really handled on another workloop `wl_new`.
6699 *
6700 * In that case, rearming will happen from the servicer thread of
6701 * `wl_new` which if `wl_orig` is no longer being serviced, would cause
6702 * this knote to stay suppressed forever if we only relied on
6703 * kqworkloop_acknowledge_events to be called by `wl_orig`.
6704 *
6705 * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
6706 * unsuppress because that would mess with the processing phase of
6707 * `wl_orig`, however it also means kqworkloop_acknowledge_events()
6708 * will be called.
d9a64523 6709 */
cb323159
A
6710 if (__improbable(kn->kn_status & KN_SUPPRESSED)) {
6711 if ((kqu.kq->kq_state & KQ_PROCESSING) == 0) {
6712 knote_unsuppress_noqueue(kqu, kn);
6713 }
5ba3f43e 6714 }
cb323159 6715 }
39037602 6716
cb323159
A
6717 if ((result & FILTER_UPDATE_REQ_QOS) && kev->qos && kev->qos != kn->kn_qos) {
6718 // may dequeue the knote
6719 knote_reset_priority(kqu, kn, kev->qos);
6720 }
39037602 6721
cb323159
A
6722 /*
6723 * When we unsuppress above, or because of knote_reset_priority(),
6724 * the knote may have been dequeued, we need to restore the invariant
6725 * that if the knote is active it needs to be queued now that
6726 * we're done applying changes.
6727 */
6728 if (result & FILTER_ACTIVE) {
6729 knote_activate(kqu, kn, result);
6730 } else {
6731 knote_enqueue(kqu, kn, wakeup_mask);
6732 }
b0d623f7 6733
cb323159
A
6734 if ((result & FILTER_THREADREQ_NODEFEER) &&
6735 act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ)) {
6736 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
2d21ac55 6737 }
55e303ae 6738}
5ba3f43e 6739
91447636 6740/*
cb323159
A
6741 * knote_drop - disconnect and drop the knote
6742 *
6743 * Called with the kqueue locked, returns with the kqueue unlocked.
6744 *
6745 * If a knote locking context is passed, it is canceled.
6746 *
6747 * The knote may have already been detached from
6748 * (or not yet attached to) its source object.
91447636 6749 */
55e303ae 6750static void
cb323159 6751knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
55e303ae 6752{
cb323159 6753 struct proc *p = kq->kq_p;
3e170ce0 6754
cb323159
A
6755 kqlock_held(kq);
6756
6757 assert((kn->kn_status & KN_DROPPING) == 0);
6758 if (knlc == NULL) {
6759 assert((kn->kn_status & KN_LOCKED) == 0);
3e170ce0 6760 }
cb323159 6761 kn->kn_status |= KN_DROPPING;
3e170ce0 6762
cb323159
A
6763 if (kn->kn_status & KN_SUPPRESSED) {
6764 knote_unsuppress_noqueue(kq, kn);
6765 } else {
6766 knote_dequeue(kq, kn);
6767 }
6768 knote_wait_for_post(kq, kn);
3e170ce0 6769
cb323159 6770 knote_fops(kn)->f_detach(kn);
39037602 6771
cb323159
A
6772 /* kq may be freed when kq_remove_knote() returns */
6773 kq_remove_knote(kq, kn, p, knlc);
6774 if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) {
f427ee49 6775 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
91447636 6776 }
cb323159
A
6777
6778 knote_free(kn);
55e303ae
A
6779}
6780
39037602 6781void
cb323159 6782knote_init(void)
39037602 6783{
cb323159
A
6784#if CONFIG_MEMORYSTATUS
6785 /* Initialize the memorystatus list lock */
f427ee49 6786 memorystatus_kevent_init(&kq_lck_grp, LCK_ATTR_NULL);
cb323159 6787#endif
39037602 6788}
cb323159 6789SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
39037602 6790
cb323159
A
6791const struct filterops *
6792knote_fops(struct knote *kn)
55e303ae 6793{
cb323159 6794 return sysfilt_ops[kn->kn_filtid];
55e303ae
A
6795}
6796
cb323159
A
6797static struct knote *
6798knote_alloc(void)
6799{
f427ee49 6800 return zalloc_flags(knote_zone, Z_WAITOK | Z_ZERO);
cb323159 6801}
91447636 6802
cb323159
A
6803static void
6804knote_free(struct knote *kn)
55e303ae 6805{
cb323159
A
6806 assert((kn->kn_status & (KN_LOCKED | KN_POSTING)) == 0);
6807 zfree(knote_zone, kn);
6808}
55e303ae 6809
cb323159
A
6810#pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
6811
6812kevent_ctx_t
6813kevent_get_context(thread_t thread)
6814{
6815 uthread_t ut = get_bsdthread_info(thread);
6816 return &ut->uu_save.uus_kevent;
55e303ae
A
6817}
6818
cb323159
A
6819static inline bool
6820kevent_args_requesting_events(unsigned int flags, int nevents)
55e303ae 6821{
cb323159 6822 return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0;
55e303ae
A
6823}
6824
cb323159
A
6825static inline int
6826kevent_adjust_flags_for_proc(proc_t p, int flags)
55e303ae 6827{
cb323159
A
6828 __builtin_assume(p);
6829 return flags | (IS_64BIT_PROCESS(p) ? KEVENT_FLAG_PROC64 : 0);
55e303ae
A
6830}
6831
cb323159
A
6832/*!
6833 * @function kevent_get_kqfile
39037602 6834 *
cb323159
A
6835 * @brief
6836 * Lookup a kqfile by fd.
39037602 6837 *
cb323159
A
6838 * @discussion
6839 * Callers: kevent, kevent64, kevent_qos
39037602 6840 *
cb323159 6841 * This is not assumed to be a fastpath (kqfile interfaces are legacy)
39037602 6842 */
cb323159
A
6843OS_NOINLINE
6844static int
6845kevent_get_kqfile(struct proc *p, int fd, int flags,
f427ee49 6846 struct fileproc **fpp, struct kqueue **kqp)
39037602 6847{
cb323159
A
6848 int error = 0;
6849 struct kqueue *kq;
39037602 6850
f427ee49 6851 error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, fpp);
cb323159
A
6852 if (__improbable(error)) {
6853 return error;
6854 }
f427ee49 6855 kq = (struct kqueue *)(*fpp)->f_data;
39037602 6856
cb323159
A
6857 uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed);
6858 if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) {
39037602 6859 kqlock(kq);
cb323159
A
6860 kq_state = kq->kq_state;
6861 if (!(kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS))) {
6862 if (flags & KEVENT_FLAG_LEGACY32) {
6863 kq_state |= KQ_KEV32;
6864 } else if (flags & KEVENT_FLAG_LEGACY64) {
6865 kq_state |= KQ_KEV64;
6866 } else {
6867 kq_state |= KQ_KEV_QOS;
6868 }
6869 kq->kq_state = kq_state;
39037602
A
6870 }
6871 kqunlock(kq);
6872 }
cb323159
A
6873
6874 /*
6875 * kqfiles can't be used through the legacy kevent()
6876 * and other interfaces at the same time.
6877 */
6878 if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) !=
6879 (bool)(kq_state & KQ_KEV32))) {
f427ee49 6880 fp_drop(p, fd, *fpp, 0);
cb323159
A
6881 return EINVAL;
6882 }
6883
6884 *kqp = kq;
6885 return 0;
39037602
A
6886}
6887
cb323159
A
6888/*!
6889 * @function kevent_get_kqwq
d9a64523 6890 *
cb323159
A
6891 * @brief
6892 * Lookup or create the process kqwq (faspath).
6893 *
6894 * @discussion
6895 * Callers: kevent64, kevent_qos
d9a64523 6896 */
cb323159
A
6897OS_ALWAYS_INLINE
6898static int
6899kevent_get_kqwq(proc_t p, int flags, int nevents, struct kqueue **kqp)
d9a64523 6900{
cb323159 6901 struct kqworkq *kqwq = p->p_fd->fd_wqkqueue;
d9a64523 6902
cb323159
A
6903 if (__improbable(kevent_args_requesting_events(flags, nevents))) {
6904 return EINVAL;
6905 }
6906 if (__improbable(kqwq == NULL)) {
6907 kqwq = kqworkq_alloc(p, flags);
6908 if (__improbable(kqwq == NULL)) {
6909 return ENOMEM;
6910 }
6911 }
6912
6913 *kqp = &kqwq->kqwq_kqueue;
6914 return 0;
d9a64523
A
6915}
6916
cb323159
A
6917#pragma mark kevent copyio
6918
6919/*!
6920 * @function kevent_get_data_size
316670eb 6921 *
cb323159
A
6922 * @brief
6923 * Copies in the extra data size from user-space.
b0d623f7 6924 */
cb323159
A
6925static int
6926kevent_get_data_size(int flags, user_addr_t data_avail, user_addr_t data_out,
6927 kevent_ctx_t kectx)
6928{
6929 if (!data_avail || !data_out) {
6930 kectx->kec_data_size = 0;
6931 kectx->kec_data_resid = 0;
6932 } else if (flags & KEVENT_FLAG_PROC64) {
6933 user64_size_t usize = 0;
6934 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6935 if (__improbable(error)) {
6936 return error;
6937 }
6938 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
b0d623f7 6939 } else {
cb323159
A
6940 user32_size_t usize = 0;
6941 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6942 if (__improbable(error)) {
6943 return error;
6944 }
6945 kectx->kec_data_avail = data_avail;
6946 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
b0d623f7 6947 }
cb323159
A
6948 kectx->kec_data_out = data_out;
6949 kectx->kec_data_avail = data_avail;
6950 return 0;
b0d623f7
A
6951}
6952
cb323159
A
6953/*!
6954 * @function kevent_put_data_size
316670eb 6955 *
cb323159
A
6956 * @brief
6957 * Copies out the residual data size to user-space if any has been used.
b0d623f7 6958 */
cb323159
A
6959static int
6960kevent_put_data_size(unsigned int flags, kevent_ctx_t kectx)
b0d623f7 6961{
cb323159
A
6962 if (kectx->kec_data_resid == kectx->kec_data_size) {
6963 return 0;
6964 }
6965 if (flags & KEVENT_FLAG_KERNEL) {
6966 *(user_size_t *)(uintptr_t)kectx->kec_data_avail = kectx->kec_data_resid;
6967 return 0;
6968 }
6969 if (flags & KEVENT_FLAG_PROC64) {
6970 user64_size_t usize = (user64_size_t)kectx->kec_data_resid;
6971 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6972 } else {
6973 user32_size_t usize = (user32_size_t)kectx->kec_data_resid;
6974 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6975 }
b0d623f7
A
6976}
6977
cb323159
A
6978/*!
6979 * @function kevent_legacy_copyin
91447636 6980 *
cb323159
A
6981 * @brief
6982 * Handles the copyin of a kevent/kevent64 event.
55e303ae 6983 */
cb323159
A
6984static int
6985kevent_legacy_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp, unsigned int flags)
55e303ae 6986{
cb323159 6987 int error;
55e303ae 6988
cb323159 6989 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
39037602 6990
cb323159
A
6991 if (flags & KEVENT_FLAG_LEGACY64) {
6992 struct kevent64_s kev64;
55e303ae 6993
cb323159
A
6994 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6995 if (__improbable(error)) {
6996 return error;
6997 }
6998 *addrp += sizeof(kev64);
6999 *kevp = (struct kevent_qos_s){
7000 .ident = kev64.ident,
7001 .filter = kev64.filter,
7002 /* Make sure user doesn't pass in any system flags */
7003 .flags = kev64.flags & ~EV_SYSFLAGS,
7004 .udata = kev64.udata,
7005 .fflags = kev64.fflags,
7006 .data = kev64.data,
7007 .ext[0] = kev64.ext[0],
7008 .ext[1] = kev64.ext[1],
7009 };
7010 } else if (flags & KEVENT_FLAG_PROC64) {
7011 struct user64_kevent kev64;
7012
7013 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
7014 if (__improbable(error)) {
7015 return error;
0a7de745 7016 }
cb323159
A
7017 *addrp += sizeof(kev64);
7018 *kevp = (struct kevent_qos_s){
7019 .ident = kev64.ident,
7020 .filter = kev64.filter,
7021 /* Make sure user doesn't pass in any system flags */
7022 .flags = kev64.flags & ~EV_SYSFLAGS,
7023 .udata = kev64.udata,
7024 .fflags = kev64.fflags,
7025 .data = kev64.data,
7026 };
7027 } else {
7028 struct user32_kevent kev32;
2d21ac55 7029
cb323159
A
7030 error = copyin(*addrp, (caddr_t)&kev32, sizeof(kev32));
7031 if (__improbable(error)) {
7032 return error;
39037602 7033 }
cb323159
A
7034 *addrp += sizeof(kev32);
7035 *kevp = (struct kevent_qos_s){
7036 .ident = (uintptr_t)kev32.ident,
7037 .filter = kev32.filter,
7038 /* Make sure user doesn't pass in any system flags */
7039 .flags = kev32.flags & ~EV_SYSFLAGS,
7040 .udata = CAST_USER_ADDR_T(kev32.udata),
7041 .fflags = kev32.fflags,
7042 .data = (intptr_t)kev32.data,
7043 };
7044 }
39037602 7045
cb323159
A
7046 return 0;
7047}
91447636 7048
cb323159
A
7049/*!
7050 * @function kevent_modern_copyin
7051 *
7052 * @brief
7053 * Handles the copyin of a kevent_qos/kevent_id event.
7054 */
7055static int
7056kevent_modern_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp)
7057{
7058 int error = copyin(*addrp, (caddr_t)kevp, sizeof(struct kevent_qos_s));
7059 if (__probable(!error)) {
7060 /* Make sure user doesn't pass in any system flags */
7061 *addrp += sizeof(struct kevent_qos_s);
7062 kevp->flags &= ~EV_SYSFLAGS;
7063 }
7064 return error;
7065}
d9a64523 7066
cb323159
A
7067/*!
7068 * @function kevent_legacy_copyout
7069 *
7070 * @brief
7071 * Handles the copyout of a kevent/kevent64 event.
7072 */
7073static int
7074kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned int flags)
7075{
7076 int advance;
7077 int error;
39236c6e 7078
cb323159
A
7079 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
7080
7081 /*
7082 * fully initialize the differnt output event structure
7083 * types from the internal kevent (and some universal
7084 * defaults for fields not represented in the internal
7085 * form).
7086 *
7087 * Note: these structures have no padding hence the C99
7088 * initializers below do not leak kernel info.
7089 */
7090 if (flags & KEVENT_FLAG_LEGACY64) {
7091 struct kevent64_s kev64 = {
7092 .ident = kevp->ident,
7093 .filter = kevp->filter,
7094 .flags = kevp->flags,
7095 .fflags = kevp->fflags,
7096 .data = (int64_t)kevp->data,
7097 .udata = kevp->udata,
7098 .ext[0] = kevp->ext[0],
7099 .ext[1] = kevp->ext[1],
7100 };
7101 advance = sizeof(struct kevent64_s);
7102 error = copyout((caddr_t)&kev64, *addrp, advance);
7103 } else if (flags & KEVENT_FLAG_PROC64) {
7104 /*
7105 * deal with the special case of a user-supplied
7106 * value of (uintptr_t)-1.
7107 */
7108 uint64_t ident = (kevp->ident == (uintptr_t)-1) ?
7109 (uint64_t)-1LL : (uint64_t)kevp->ident;
7110 struct user64_kevent kev64 = {
7111 .ident = ident,
7112 .filter = kevp->filter,
7113 .flags = kevp->flags,
7114 .fflags = kevp->fflags,
7115 .data = (int64_t) kevp->data,
f427ee49 7116 .udata = (user_addr_t) kevp->udata,
cb323159
A
7117 };
7118 advance = sizeof(kev64);
7119 error = copyout((caddr_t)&kev64, *addrp, advance);
7120 } else {
7121 struct user32_kevent kev32 = {
7122 .ident = (uint32_t)kevp->ident,
7123 .filter = kevp->filter,
7124 .flags = kevp->flags,
7125 .fflags = kevp->fflags,
7126 .data = (int32_t)kevp->data,
f427ee49 7127 .udata = (uint32_t)kevp->udata,
cb323159
A
7128 };
7129 advance = sizeof(kev32);
7130 error = copyout((caddr_t)&kev32, *addrp, advance);
7131 }
7132 if (__probable(!error)) {
7133 *addrp += advance;
91447636 7134 }
cb323159 7135 return error;
55e303ae
A
7136}
7137
cb323159
A
7138/*!
7139 * @function kevent_modern_copyout
5ba3f43e 7140 *
cb323159
A
7141 * @brief
7142 * Handles the copyout of a kevent_qos/kevent_id event.
7143 */
7144OS_ALWAYS_INLINE
7145static inline int
7146kevent_modern_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp)
7147{
7148 int error = copyout((caddr_t)kevp, *addrp, sizeof(struct kevent_qos_s));
7149 if (__probable(!error)) {
7150 *addrp += sizeof(struct kevent_qos_s);
7151 }
7152 return error;
7153}
7154
7155#pragma mark kevent core implementation
7156
7157/*!
7158 * @function kevent_callback_inline
5ba3f43e 7159 *
cb323159
A
7160 * @brief
7161 * Callback for each individual event
5ba3f43e 7162 *
cb323159
A
7163 * @discussion
7164 * This is meant to be inlined in kevent_modern_callback and
7165 * kevent_legacy_callback.
5ba3f43e 7166 */
cb323159
A
7167OS_ALWAYS_INLINE
7168static inline int
7169kevent_callback_inline(struct kevent_qos_s *kevp, kevent_ctx_t kectx, bool legacy)
5ba3f43e 7170{
cb323159
A
7171 int error;
7172
7173 assert(kectx->kec_process_noutputs < kectx->kec_process_nevents);
5ba3f43e 7174
d9a64523 7175 /*
cb323159 7176 * Copy out the appropriate amount of event data for this user.
5ba3f43e 7177 */
cb323159
A
7178 if (legacy) {
7179 error = kevent_legacy_copyout(kevp, &kectx->kec_process_eventlist,
7180 kectx->kec_process_flags);
7181 } else {
7182 error = kevent_modern_copyout(kevp, &kectx->kec_process_eventlist);
5ba3f43e
A
7183 }
7184
7185 /*
cb323159
A
7186 * If there isn't space for additional events, return
7187 * a harmless error to stop the processing here
5ba3f43e 7188 */
cb323159
A
7189 if (error == 0 && ++kectx->kec_process_noutputs == kectx->kec_process_nevents) {
7190 error = EWOULDBLOCK;
5ba3f43e 7191 }
cb323159 7192 return error;
5ba3f43e
A
7193}
7194
cb323159
A
7195/*!
7196 * @function kevent_modern_callback
39037602 7197 *
cb323159
A
7198 * @brief
7199 * Callback for each individual modern event.
39037602 7200 *
cb323159
A
7201 * @discussion
7202 * This callback handles kevent_qos/kevent_id events.
7203 */
7204static int
7205kevent_modern_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7206{
7207 return kevent_callback_inline(kevp, kectx, /*legacy*/ false);
7208}
7209
7210/*!
7211 * @function kevent_legacy_callback
39037602 7212 *
cb323159
A
7213 * @brief
7214 * Callback for each individual legacy event.
5ba3f43e 7215 *
cb323159
A
7216 * @discussion
7217 * This callback handles kevent/kevent64 events.
39037602 7218 */
91447636 7219static int
cb323159 7220kevent_legacy_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
55e303ae 7221{
cb323159
A
7222 return kevent_callback_inline(kevp, kectx, /*legacy*/ true);
7223}
5ba3f43e 7224
cb323159
A
7225/*!
7226 * @function kevent_cleanup
7227 *
7228 * @brief
7229 * Handles the cleanup returning from a kevent call.
7230 *
7231 * @discussion
7232 * kevent entry points will take a reference on workloops,
7233 * and a usecount on the fileglob of kqfiles.
7234 *
7235 * This function undoes this on the exit paths of kevents.
7236 *
7237 * @returns
7238 * The error to return to userspace.
7239 */
7240static int
7241kevent_cleanup(kqueue_t kqu, int flags, int error, kevent_ctx_t kectx)
7242{
7243 // poll should not call any codepath leading to this
7244 assert((flags & KEVENT_FLAG_POLL) == 0);
7245
7246 if (flags & KEVENT_FLAG_WORKLOOP) {
7247 kqworkloop_release(kqu.kqwl);
7248 } else if (flags & KEVENT_FLAG_WORKQ) {
7249 /* nothing held */
0a7de745 7250 } else {
cb323159 7251 fp_drop(kqu.kqf->kqf_p, kectx->kec_fd, kectx->kec_fp, 0);
0a7de745 7252 }
5ba3f43e 7253
cb323159
A
7254 /* don't restart after signals... */
7255 if (error == ERESTART) {
7256 error = EINTR;
7257 } else if (error == 0) {
7258 /* don't abandon other output just because of residual copyout failures */
7259 (void)kevent_put_data_size(flags, kectx);
5ba3f43e
A
7260 }
7261
cb323159
A
7262 if (flags & KEVENT_FLAG_PARKING) {
7263 thread_t th = current_thread();
7264 struct uthread *uth = get_bsdthread_info(th);
7265 if (uth->uu_kqr_bound) {
7266 thread_unfreeze_base_pri(th);
5ba3f43e 7267 }
cb323159
A
7268 }
7269 return error;
7270}
55e303ae 7271
cb323159
A
7272/*!
7273 * @function kqueue_process
7274 *
7275 * @brief
7276 * Process the triggered events in a kqueue.
7277 *
7278 * @discussion
7279 * Walk the queued knotes and validate that they are really still triggered
7280 * events by calling the filter routines (if necessary).
7281 *
7282 * For each event that is still considered triggered, invoke the callback
7283 * routine provided.
7284 *
7285 * caller holds a reference on the kqueue.
7286 * kqueue locked on entry and exit - but may be dropped
7287 * kqueue list locked (held for duration of call)
7288 *
7289 * This is only called by kqueue_scan() so that the compiler can inline it.
7290 *
7291 * @returns
7292 * - 0: no event was returned, no other error occured
7293 * - EBADF: the kqueue is being destroyed (KQ_DRAIN is set)
7294 * - EWOULDBLOCK: (not an error) events have been found and we should return
7295 * - EFAULT: copyout failed
7296 * - filter specific errors
7297 */
7298static int
7299kqueue_process(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7300 kevent_callback_t callback)
7301{
7302 workq_threadreq_t kqr = current_uthread()->uu_kqr_bound;
7303 struct knote *kn;
7304 int error = 0, rc = 0;
7305 struct kqtailq *base_queue, *queue;
7306#if DEBUG || DEVELOPMENT
7307 int retries = 64;
7308#endif
7309 uint16_t kq_type = (kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
7310
7311 if (kq_type & KQ_WORKQ) {
7312 rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
7313 } else if (kq_type & KQ_WORKLOOP) {
7314 rc = kqworkloop_begin_processing(kqu.kqwl, flags);
91447636 7315 } else {
cb323159
A
7316kqfile_retry:
7317 rc = kqfile_begin_processing(kqu.kqf);
7318 if (rc == EBADF) {
7319 return EBADF;
7320 }
7321 }
5ba3f43e 7322
cb323159
A
7323 if (rc == -1) {
7324 /* Nothing to process */
7325 return 0;
7326 }
91447636 7327
cb323159
A
7328 /*
7329 * loop through the enqueued knotes associated with this request,
7330 * processing each one. Each request may have several queues
7331 * of knotes to process (depending on the type of kqueue) so we
7332 * have to loop through all the queues as long as we have additional
7333 * space.
7334 */
316670eb 7335
cb323159
A
7336process_again:
7337 if (kq_type & KQ_WORKQ) {
7338 base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index];
7339 } else if (kq_type & KQ_WORKLOOP) {
7340 base_queue = &kqu.kqwl->kqwl_queue[0];
7341 queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
7342 } else {
7343 base_queue = queue = &kqu.kqf->kqf_queue;
7344 }
316670eb 7345
cb323159
A
7346 do {
7347 while ((kn = TAILQ_FIRST(queue)) != NULL) {
7348 error = knote_process(kn, kectx, callback);
7349 if (error == EJUSTRETURN) {
7350 error = 0;
7351 } else if (__improbable(error)) {
7352 /* error is EWOULDBLOCK when the out event array is full */
7353 goto stop_processing;
5ba3f43e 7354 }
91447636 7355 }
cb323159 7356 } while (queue-- > base_queue);
5ba3f43e 7357
cb323159
A
7358 if (kectx->kec_process_noutputs) {
7359 /* callers will transform this into no error */
7360 error = EWOULDBLOCK;
55e303ae 7361 }
5ba3f43e 7362
cb323159
A
7363stop_processing:
7364 /*
7365 * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
7366 * we want to unbind the kqrequest from the thread.
7367 *
7368 * However, because the kq locks are dropped several times during process,
7369 * new knotes may have fired again, in which case, we want to fail the end
7370 * processing and process again, until it converges.
7371 *
7372 * If we have an error or returned events, end processing never fails.
7373 */
7374 if (error) {
7375 flags &= ~KEVENT_FLAG_PARKING;
5ba3f43e 7376 }
cb323159
A
7377 if (kq_type & KQ_WORKQ) {
7378 rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
7379 } else if (kq_type & KQ_WORKLOOP) {
7380 rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
0a7de745 7381 } else {
cb323159 7382 rc = kqfile_end_processing(kqu.kqf);
0a7de745 7383 }
5ba3f43e 7384
cb323159
A
7385 if (__probable(error)) {
7386 return error;
7387 }
7388
7389 if (__probable(rc >= 0)) {
7390 assert(rc == 0 || rc == EBADF);
7391 return rc;
7392 }
7393
7394#if DEBUG || DEVELOPMENT
7395 if (retries-- == 0) {
7396 panic("kevent: way too many knote_process retries, kq: %p (0x%04x)",
7397 kqu.kq, kqu.kq->kq_state);
7398 }
7399#endif
7400 if (kq_type & (KQ_WORKQ | KQ_WORKLOOP)) {
7401 assert(flags & KEVENT_FLAG_PARKING);
7402 goto process_again;
7403 } else {
7404 goto kqfile_retry;
7405 }
55e303ae
A
7406}
7407
cb323159
A
7408/*!
7409 * @function kqueue_scan_continue
39037602 7410 *
cb323159
A
7411 * @brief
7412 * The continuation used by kqueue_scan for kevent entry points.
39037602 7413 *
cb323159
A
7414 * @discussion
7415 * Assumes we inherit a use/ref count on the kq or its fileglob.
7416 *
7417 * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
7418 * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
39037602 7419 */
cb323159 7420OS_NORETURN OS_NOINLINE
39037602 7421static void
cb323159 7422kqueue_scan_continue(void *data, wait_result_t wait_result)
39037602 7423{
cb323159
A
7424 uthread_t ut = current_uthread();
7425 kevent_ctx_t kectx = &ut->uu_save.uus_kevent;
7426 int error = 0, flags = kectx->kec_process_flags;
7427 struct kqueue *kq = data;
5ba3f43e 7428
cb323159
A
7429 /*
7430 * only kevent variants call in here, so we know the callback is
7431 * kevent_legacy_callback or kevent_modern_callback.
7432 */
7433 assert((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0);
5ba3f43e 7434
cb323159
A
7435 switch (wait_result) {
7436 case THREAD_AWAKENED:
7437 if (__improbable(flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64))) {
7438 error = kqueue_scan(kq, flags, kectx, kevent_legacy_callback);
7439 } else {
7440 error = kqueue_scan(kq, flags, kectx, kevent_modern_callback);
7441 }
7442 break;
7443 case THREAD_TIMED_OUT:
7444 error = 0;
7445 break;
7446 case THREAD_INTERRUPTED:
7447 error = EINTR;
7448 break;
7449 case THREAD_RESTART:
7450 error = EBADF;
7451 break;
7452 default:
7453 panic("%s: - invalid wait_result (%d)", __func__, wait_result);
39037602 7454 }
5ba3f43e 7455
d9a64523 7456
cb323159
A
7457 error = kevent_cleanup(kq, flags, error, kectx);
7458 *(int32_t *)&ut->uu_rval = kectx->kec_process_noutputs;
7459 unix_syscall_return(error);
39037602
A
7460}
7461
cb323159
A
7462/*!
7463 * @function kqueue_scan
39037602 7464 *
cb323159
A
7465 * @brief
7466 * Scan and wait for events in a kqueue (used by poll & kevent).
7467 *
7468 * @discussion
7469 * Process the triggered events in a kqueue.
7470 *
7471 * If there are no events triggered arrange to wait for them:
7472 * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
7473 * - possibly until kectx->kec_deadline expires
7474 *
7475 * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
7476 * are set, then it will wait in the kqueue_scan_continue continuation.
7477 *
7478 * poll() will block in place, and KEVENT_FLAG_KERNEL calls
7479 * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
7480 *
7481 * @param kq
7482 * The kqueue being scanned.
7483 *
7484 * @param flags
7485 * The KEVENT_FLAG_* flags for this call.
7486 *
7487 * @param kectx
7488 * The context used for this scan.
7489 * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
7490 *
7491 * @param callback
7492 * The callback to be called on events sucessfully processed.
7493 * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
39037602 7494 */
cb323159
A
7495int
7496kqueue_scan(struct kqueue *kq, int flags, kevent_ctx_t kectx,
7497 kevent_callback_t callback)
5ba3f43e 7498{
cb323159 7499 int error;
5ba3f43e 7500
cb323159
A
7501 for (;;) {
7502 kqlock(kq);
7503 error = kqueue_process(kq, flags, kectx, callback);
5ba3f43e 7504
cb323159
A
7505 /*
7506 * If we got an error, events returned (EWOULDBLOCK)
7507 * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
7508 * just return.
7509 */
7510 if (__probable(error || (flags & KEVENT_FLAG_IMMEDIATE))) {
7511 kqunlock(kq);
7512 return error == EWOULDBLOCK ? 0 : error;
7513 }
5ba3f43e 7514
cb323159
A
7515 waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
7516 KQ_EVENT, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL,
7517 kectx->kec_deadline, TIMEOUT_NO_LEEWAY);
7518 kq->kq_state |= KQ_SLEEP;
91447636 7519
cb323159 7520 kqunlock(kq);
5ba3f43e 7521
cb323159
A
7522 if (__probable((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0)) {
7523 thread_block_parameter(kqueue_scan_continue, kq);
7524 __builtin_unreachable();
7525 }
7526
7527 wait_result_t wr = thread_block(THREAD_CONTINUE_NULL);
7528 switch (wr) {
7529 case THREAD_AWAKENED:
7530 break;
7531 case THREAD_TIMED_OUT:
7532 return 0;
7533 case THREAD_INTERRUPTED:
7534 return EINTR;
7535 case THREAD_RESTART:
7536 return EBADF;
7537 default:
7538 panic("%s: - bad wait_result (%d)", __func__, wr);
7539 }
7540 }
5ba3f43e 7541}
cb323159
A
7542
7543/*!
7544 * @function kevent_internal
39037602 7545 *
cb323159
A
7546 * @brief
7547 * Common kevent code.
d9a64523 7548 *
cb323159
A
7549 * @discussion
7550 * Needs to be inlined to specialize for legacy or modern and
7551 * eliminate dead code.
39037602 7552 *
cb323159
A
7553 * This is the core logic of kevent entry points, that will:
7554 * - register kevents
7555 * - optionally scan the kqueue for events
7556 *
7557 * The caller is giving kevent_internal a reference on the kqueue
7558 * or its fileproc that needs to be cleaned up by kevent_cleanup().
55e303ae 7559 */
cb323159
A
7560OS_ALWAYS_INLINE
7561static inline int
7562kevent_internal(kqueue_t kqu,
7563 user_addr_t changelist, int nchanges,
7564 user_addr_t ueventlist, int nevents,
7565 int flags, kevent_ctx_t kectx, int32_t *retval,
7566 bool legacy)
55e303ae 7567{
cb323159 7568 int error = 0, noutputs = 0, register_rc;
d9a64523 7569
cb323159
A
7570 /* only bound threads can receive events on workloops */
7571 if (!legacy && (flags & KEVENT_FLAG_WORKLOOP)) {
7572#if CONFIG_WORKLOOP_DEBUG
7573 UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
7574 .uu_kqid = kqu.kqwl->kqwl_dynamicid,
7575 .uu_kq = error ? NULL : kqu.kq,
7576 .uu_error = error,
7577 .uu_nchanges = nchanges,
7578 .uu_nevents = nevents,
7579 .uu_flags = flags,
7580 });
7581#endif // CONFIG_WORKLOOP_DEBUG
d9a64523 7582
cb323159
A
7583 if (flags & KEVENT_FLAG_KERNEL) {
7584 /* see kevent_workq_internal */
7585 error = copyout(&kqu.kqwl->kqwl_dynamicid,
7586 ueventlist - sizeof(kqueue_id_t), sizeof(kqueue_id_t));
7587 kectx->kec_data_resid -= sizeof(kqueue_id_t);
7588 if (__improbable(error)) {
7589 goto out;
7590 }
7591 }
7592
7593 if (kevent_args_requesting_events(flags, nevents)) {
7594 /*
7595 * Disable the R2K notification while doing a register, if the
7596 * caller wants events too, we don't want the AST to be set if we
7597 * will process these events soon.
7598 */
7599 kqlock(kqu);
7600 kqu.kq->kq_state &= ~KQ_R2K_ARMED;
7601 kqunlock(kqu);
7602 flags |= KEVENT_FLAG_NEEDS_END_PROCESSING;
7603 }
d9a64523 7604 }
d9a64523 7605
cb323159
A
7606 /* register all the change requests the user provided... */
7607 while (nchanges > 0 && error == 0) {
7608 struct kevent_qos_s kev;
7609 struct knote *kn = NULL;
7610
7611 if (legacy) {
7612 error = kevent_legacy_copyin(&changelist, &kev, flags);
7613 } else {
7614 error = kevent_modern_copyin(&changelist, &kev);
7615 }
7616 if (error) {
7617 break;
7618 }
7619
7620 register_rc = kevent_register(kqu.kq, &kev, &kn);
7621 if (__improbable(!legacy && (register_rc & FILTER_REGISTER_WAIT))) {
7622 thread_t thread = current_thread();
7623
7624 kqlock_held(kqu);
7625
7626 if (act_clear_astkevent(thread, AST_KEVENT_REDRIVE_THREADREQ)) {
7627 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
7628 }
7629
7630 // f_post_register_wait is meant to call a continuation and not to
7631 // return, which is why we don't support FILTER_REGISTER_WAIT if
7632 // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
7633 // waits isn't the last.
7634 //
7635 // It is implementable, but not used by any userspace code at the
7636 // moment, so for now return ENOTSUP if someone tries to do it.
7637 if (nchanges == 1 && noutputs < nevents &&
7638 (flags & KEVENT_FLAG_KERNEL) == 0 &&
7639 (flags & KEVENT_FLAG_PARKING) == 0 &&
7640 (flags & KEVENT_FLAG_ERROR_EVENTS) &&
7641 (flags & KEVENT_FLAG_WORKLOOP)) {
7642 uthread_t ut = get_bsdthread_info(thread);
7643
7644 /*
7645 * store the continuation/completion data in the uthread
7646 *
7647 * Note: the kectx aliases with this,
7648 * and is destroyed in the process.
7649 */
7650 ut->uu_save.uus_kevent_register = (struct _kevent_register){
7651 .kev = kev,
7652 .kqwl = kqu.kqwl,
7653 .eventout = noutputs,
7654 .ueventlist = ueventlist,
7655 };
7656 knote_fops(kn)->f_post_register_wait(ut, kn,
7657 &ut->uu_save.uus_kevent_register);
7658 __builtin_unreachable();
7659 }
7660 kqunlock(kqu);
7661
7662 kev.flags |= EV_ERROR;
7663 kev.data = ENOTSUP;
7664 } else {
7665 assert((register_rc & FILTER_REGISTER_WAIT) == 0);
7666 }
39037602 7667
cb323159
A
7668 // keep in sync with kevent_register_wait_return()
7669 if (noutputs < nevents && (kev.flags & (EV_ERROR | EV_RECEIPT))) {
7670 if ((kev.flags & EV_ERROR) == 0) {
7671 kev.flags |= EV_ERROR;
7672 kev.data = 0;
7673 }
7674 if (legacy) {
7675 error = kevent_legacy_copyout(&kev, &ueventlist, flags);
7676 } else {
7677 error = kevent_modern_copyout(&kev, &ueventlist);
7678 }
7679 if (error == 0) {
7680 noutputs++;
7681 }
7682 } else if (kev.flags & EV_ERROR) {
f427ee49 7683 error = (int)kev.data;
cb323159
A
7684 }
7685 nchanges--;
39037602
A
7686 }
7687
cb323159
A
7688 if ((flags & KEVENT_FLAG_ERROR_EVENTS) == 0 &&
7689 nevents > 0 && noutputs == 0 && error == 0) {
7690 kectx->kec_process_flags = flags;
7691 kectx->kec_process_nevents = nevents;
7692 kectx->kec_process_noutputs = 0;
7693 kectx->kec_process_eventlist = ueventlist;
7694
7695 if (legacy) {
7696 error = kqueue_scan(kqu.kq, flags, kectx, kevent_legacy_callback);
7697 } else {
7698 error = kqueue_scan(kqu.kq, flags, kectx, kevent_modern_callback);
7699 }
7700
7701 noutputs = kectx->kec_process_noutputs;
7702 } else if (!legacy && (flags & KEVENT_FLAG_NEEDS_END_PROCESSING)) {
7703 /*
7704 * If we didn't through kqworkloop_end_processing(),
7705 * we need to do it here.
7706 *
7707 * kqueue_scan will call kqworkloop_end_processing(),
7708 * so we only need to do it if we didn't scan.
7709 */
7710 kqlock(kqu);
7711 kqworkloop_end_processing(kqu.kqwl, 0, 0);
7712 kqunlock(kqu);
0a7de745 7713 }
91447636 7714
cb323159
A
7715 *retval = noutputs;
7716out:
7717 return kevent_cleanup(kqu.kq, flags, error, kectx);
55e303ae
A
7718}
7719
cb323159 7720#pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
5ba3f43e 7721
cb323159
A
7722/*!
7723 * @function kevent_modern_internal
7724 *
7725 * @brief
7726 * The backend of the kevent_id and kevent_workq_internal entry points.
7727 *
7728 * @discussion
7729 * Needs to be inline due to the number of arguments.
7730 */
7731OS_NOINLINE
7732static int
7733kevent_modern_internal(kqueue_t kqu,
7734 user_addr_t changelist, int nchanges,
7735 user_addr_t ueventlist, int nevents,
7736 int flags, kevent_ctx_t kectx, int32_t *retval)
7737{
7738 return kevent_internal(kqu.kq, changelist, nchanges,
7739 ueventlist, nevents, flags, kectx, retval, /*legacy*/ false);
b0d623f7 7740}
91447636 7741
cb323159
A
7742/*!
7743 * @function kevent_id
7744 *
7745 * @brief
7746 * The kevent_id() syscall.
7747 */
7748int
7749kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
39236c6e 7750{
cb323159
A
7751 int error, flags = uap->flags & KEVENT_FLAG_USER;
7752 uthread_t uth = current_uthread();
7753 workq_threadreq_t kqr = uth->uu_kqr_bound;
7754 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7755 kqueue_t kqu;
7756
7757 flags = kevent_adjust_flags_for_proc(p, flags);
7758 flags |= KEVENT_FLAG_DYNAMIC_KQUEUE;
7759
7760 if (__improbable((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)) !=
7761 KEVENT_FLAG_WORKLOOP)) {
7762 return EINVAL;
0a7de745 7763 }
39037602 7764
cb323159
A
7765 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7766 if (__improbable(error)) {
7767 return error;
0a7de745 7768 }
39037602 7769
cb323159
A
7770 kectx->kec_deadline = 0;
7771 kectx->kec_fp = NULL;
7772 kectx->kec_fd = -1;
7773 /* the kec_process_* fields are filled if kqueue_scann is called only */
5ba3f43e 7774
cb323159
A
7775 /*
7776 * Get the kq we are going to be working on
7777 * As a fastpath, look at the currently bound workloop.
7778 */
7779 kqu.kqwl = kqr ? kqr_kqworkloop(kqr) : NULL;
7780 if (kqu.kqwl && kqu.kqwl->kqwl_dynamicid == uap->id) {
7781 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
7782 return EEXIST;
7783 }
7784 kqworkloop_retain(kqu.kqwl);
7785 } else if (__improbable(kevent_args_requesting_events(flags, uap->nevents))) {
7786 return EXDEV;
7787 } else {
7788 error = kqworkloop_get_or_create(p, uap->id, NULL, flags, &kqu.kqwl);
7789 if (__improbable(error)) {
7790 return error;
5ba3f43e 7791 }
5ba3f43e 7792 }
cb323159
A
7793
7794 return kevent_modern_internal(kqu, uap->changelist, uap->nchanges,
7795 uap->eventlist, uap->nevents, flags, kectx, retval);
39037602
A
7796}
7797
cb323159
A
7798/**!
7799 * @function kevent_workq_internal
7800 *
7801 * @discussion
7802 * This function is exported for the sake of the workqueue subsystem.
7803 *
7804 * It is called in two ways:
7805 * - when a thread is about to go to userspace to ask for pending event
7806 * - when a thread is returning from userspace with events back
7807 *
7808 * the workqueue subsystem will only use the following flags:
7809 * - KEVENT_FLAG_STACK_DATA (always)
7810 * - KEVENT_FLAG_IMMEDIATE (always)
7811 * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
7812 * userspace).
7813 *
7814 * It implicitly acts on the bound kqueue, and for the case of workloops
7815 * will copyout the kqueue ID before anything else.
7816 *
7817 *
7818 * Pthread will have setup the various arguments to fit this stack layout:
7819 *
7820 * +-------....----+--------------+-----------+--------------------+
7821 * | user stack | data avail | nevents | pthread_self() |
7822 * +-------....----+--------------+-----------+--------------------+
7823 * ^ ^
7824 * data_out eventlist
7825 *
7826 * When a workloop is used, the workloop ID is copied out right before
7827 * the eventlist and is taken from the data buffer.
7828 *
7829 * @warning
7830 * This function is carefuly tailored to not make any call except the final tail
7831 * call into kevent_modern_internal. (LTO inlines current_uthread()).
7832 *
7833 * This function is performance sensitive due to the workq subsystem.
7834 */
7835int
7836kevent_workq_internal(struct proc *p,
7837 user_addr_t changelist, int nchanges,
7838 user_addr_t eventlist, int nevents,
7839 user_addr_t data_out, user_size_t *data_available,
7840 unsigned int flags, int32_t *retval)
39037602 7841{
cb323159
A
7842 uthread_t uth = current_uthread();
7843 workq_threadreq_t kqr = uth->uu_kqr_bound;
7844 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7845 kqueue_t kqu;
39037602 7846
cb323159
A
7847 assert(flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE) ||
7848 flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_PARKING));
39037602 7849
cb323159
A
7850 kectx->kec_data_out = data_out;
7851 kectx->kec_data_avail = (uint64_t)data_available;
7852 kectx->kec_data_size = *data_available;
7853 kectx->kec_data_resid = *data_available;
7854 kectx->kec_deadline = 0;
7855 kectx->kec_fp = NULL;
7856 kectx->kec_fd = -1;
7857 /* the kec_process_* fields are filled if kqueue_scann is called only */
5ba3f43e 7858
cb323159 7859 flags = kevent_adjust_flags_for_proc(p, flags);
39037602 7860
cb323159
A
7861 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
7862 kqu.kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
7863 kqworkloop_retain(kqu.kqwl);
7864
7865 flags |= KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_DYNAMIC_KQUEUE |
7866 KEVENT_FLAG_KERNEL;
7867 } else {
7868 kqu.kqwq = p->p_fd->fd_wqkqueue;
7869
7870 flags |= KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL;
0a7de745 7871 }
39037602 7872
cb323159
A
7873 return kevent_modern_internal(kqu, changelist, nchanges,
7874 eventlist, nevents, flags, kectx, retval);
91447636 7875}
55e303ae 7876
cb323159
A
7877/*!
7878 * @function kevent_qos
7879 *
7880 * @brief
7881 * The kevent_qos() syscall.
7882 */
7883int
7884kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
39037602 7885{
cb323159
A
7886 uthread_t uth = current_uthread();
7887 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7888 int error, flags = uap->flags & KEVENT_FLAG_USER;
7889 struct kqueue *kq;
39037602 7890
cb323159
A
7891 if (__improbable(flags & KEVENT_ID_FLAG_USER)) {
7892 return EINVAL;
0a7de745 7893 }
39037602 7894
cb323159 7895 flags = kevent_adjust_flags_for_proc(p, flags);
39037602 7896
cb323159
A
7897 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7898 if (__improbable(error)) {
7899 return error;
5ba3f43e
A
7900 }
7901
cb323159
A
7902 kectx->kec_deadline = 0;
7903 kectx->kec_fp = NULL;
7904 kectx->kec_fd = uap->fd;
7905 /* the kec_process_* fields are filled if kqueue_scann is called only */
5ba3f43e 7906
cb323159
A
7907 /* get the kq we are going to be working on */
7908 if (__probable(flags & KEVENT_FLAG_WORKQ)) {
7909 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7910 } else {
7911 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
5ba3f43e 7912 }
cb323159
A
7913 if (__improbable(error)) {
7914 return error;
0a7de745 7915 }
55e303ae 7916
cb323159
A
7917 return kevent_modern_internal(kq, uap->changelist, uap->nchanges,
7918 uap->eventlist, uap->nevents, flags, kectx, retval);
55e303ae
A
7919}
7920
cb323159 7921#pragma mark legacy syscalls: kevent, kevent64
39037602 7922
cb323159
A
7923/*!
7924 * @function kevent_legacy_get_deadline
7925 *
7926 * @brief
7927 * Compute the deadline for the legacy kevent syscalls.
7928 *
7929 * @discussion
7930 * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
7931 * as this takes precedence over the deadline.
7932 *
7933 * This function will fail if utimeout is USER_ADDR_NULL
7934 * (the caller should check).
7935 */
7936static int
7937kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline)
55e303ae 7938{
cb323159 7939 struct timespec ts;
5ba3f43e 7940
cb323159
A
7941 if (flags & KEVENT_FLAG_PROC64) {
7942 struct user64_timespec ts64;
7943 int error = copyin(utimeout, &ts64, sizeof(ts64));
7944 if (__improbable(error)) {
7945 return error;
7946 }
f427ee49
A
7947 ts.tv_sec = (unsigned long)ts64.tv_sec;
7948 ts.tv_nsec = (long)ts64.tv_nsec;
cb323159
A
7949 } else {
7950 struct user32_timespec ts32;
7951 int error = copyin(utimeout, &ts32, sizeof(ts32));
7952 if (__improbable(error)) {
7953 return error;
7954 }
7955 ts.tv_sec = ts32.tv_sec;
7956 ts.tv_nsec = ts32.tv_nsec;
7957 }
7958 if (!timespec_is_valid(&ts)) {
7959 return EINVAL;
0a7de745 7960 }
55e303ae 7961
cb323159
A
7962 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), deadline);
7963 return 0;
55e303ae
A
7964}
7965
cb323159
A
7966/*!
7967 * @function kevent_legacy_internal
7968 *
7969 * @brief
7970 * The core implementation for kevent and kevent64
7971 */
7972OS_NOINLINE
7973static int
7974kevent_legacy_internal(struct proc *p, struct kevent64_args *uap,
7975 int32_t *retval, int flags)
55e303ae 7976{
cb323159
A
7977 uthread_t uth = current_uthread();
7978 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7979 struct kqueue *kq;
7980 int error;
39037602 7981
cb323159
A
7982 if (__improbable(uap->flags & KEVENT_ID_FLAG_USER)) {
7983 return EINVAL;
7984 }
91447636 7985
cb323159 7986 flags = kevent_adjust_flags_for_proc(p, flags);
5ba3f43e 7987
cb323159
A
7988 kectx->kec_data_out = 0;
7989 kectx->kec_data_avail = 0;
7990 kectx->kec_data_size = 0;
7991 kectx->kec_data_resid = 0;
7992 kectx->kec_deadline = 0;
7993 kectx->kec_fp = NULL;
7994 kectx->kec_fd = uap->fd;
7995 /* the kec_process_* fields are filled if kqueue_scann is called only */
91447636 7996
cb323159
A
7997 /* convert timeout to absolute - if we have one (and not immediate) */
7998 if (__improbable(uap->timeout && !(flags & KEVENT_FLAG_IMMEDIATE))) {
7999 error = kevent_legacy_get_deadline(flags, uap->timeout,
8000 &kectx->kec_deadline);
8001 if (__improbable(error)) {
8002 return error;
8003 }
8004 }
91447636 8005
cb323159
A
8006 /* get the kq we are going to be working on */
8007 if (flags & KEVENT_FLAG_WORKQ) {
8008 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
8009 } else {
8010 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
8011 }
8012 if (__improbable(error)) {
8013 return error;
8014 }
91447636 8015
cb323159
A
8016 return kevent_internal(kq, uap->changelist, uap->nchanges,
8017 uap->eventlist, uap->nevents, flags, kectx, retval,
8018 /*legacy*/ true);
55e303ae 8019}
55e303ae 8020
cb323159
A
8021/*!
8022 * @function kevent
8023 *
8024 * @brief
8025 * The legacy kevent() syscall.
8026 */
8027int
8028kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
39037602 8029{
cb323159
A
8030 struct kevent64_args args = {
8031 .fd = uap->fd,
8032 .changelist = uap->changelist,
8033 .nchanges = uap->nchanges,
8034 .eventlist = uap->eventlist,
8035 .nevents = uap->nevents,
8036 .timeout = uap->timeout,
8037 };
39037602 8038
cb323159 8039 return kevent_legacy_internal(p, &args, retval, KEVENT_FLAG_LEGACY32);
55e303ae
A
8040}
8041
cb323159
A
8042/*!
8043 * @function kevent64
8044 *
8045 * @brief
8046 * The legacy kevent64() syscall.
8047 */
8048int
8049kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
55e303ae 8050{
cb323159
A
8051 int flags = (uap->flags & KEVENT_FLAG_USER) | KEVENT_FLAG_LEGACY64;
8052 return kevent_legacy_internal(p, uap, retval, flags);
55e303ae
A
8053}
8054
cb323159
A
8055#pragma mark - socket interface
8056
2d21ac55 8057#if SOCKETS
1c79356b
A
8058#include <sys/param.h>
8059#include <sys/socket.h>
8060#include <sys/protosw.h>
8061#include <sys/domain.h>
8062#include <sys/mbuf.h>
8063#include <sys/kern_event.h>
8064#include <sys/malloc.h>
9bccf70c
A
8065#include <sys/sys_domain.h>
8066#include <sys/syslog.h>
1c79356b 8067
fe8ab488 8068#ifndef ROUNDUP64
0a7de745 8069#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
fe8ab488
A
8070#endif
8071
8072#ifndef ADVANCE64
0a7de745 8073#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
fe8ab488
A
8074#endif
8075
f427ee49
A
8076static LCK_GRP_DECLARE(kev_lck_grp, "Kernel Event Protocol");
8077static LCK_RW_DECLARE(kev_rwlock, &kev_lck_grp);
1c79356b 8078
91447636
A
8079static int kev_attach(struct socket *so, int proto, struct proc *p);
8080static int kev_detach(struct socket *so);
39236c6e 8081static int kev_control(struct socket *so, u_long cmd, caddr_t data,
0a7de745 8082 struct ifnet *ifp, struct proc *p);
39236c6e
A
8083static lck_mtx_t * event_getlock(struct socket *, int);
8084static int event_lock(struct socket *, int, void *);
8085static int event_unlock(struct socket *, int, void *);
8086
8087static int event_sofreelastref(struct socket *);
8088static void kev_delete(struct kern_event_pcb *);
8089
8090static struct pr_usrreqs event_usrreqs = {
0a7de745
A
8091 .pru_attach = kev_attach,
8092 .pru_control = kev_control,
8093 .pru_detach = kev_detach,
8094 .pru_soreceive = soreceive,
91447636 8095};
1c79356b 8096
39236c6e 8097static struct protosw eventsw[] = {
0a7de745
A
8098 {
8099 .pr_type = SOCK_RAW,
8100 .pr_protocol = SYSPROTO_EVENT,
8101 .pr_flags = PR_ATOMIC,
8102 .pr_usrreqs = &event_usrreqs,
8103 .pr_lock = event_lock,
8104 .pr_unlock = event_unlock,
8105 .pr_getlock = event_getlock,
8106 }
1c79356b
A
8107};
8108
fe8ab488
A
8109__private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
8110__private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
8111
8112SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
0a7de745 8113 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family");
fe8ab488
A
8114
8115struct kevtstat kevtstat;
8116SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
0a7de745
A
8117 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8118 kevt_getstat, "S,kevtstat", "");
fe8ab488
A
8119
8120SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
0a7de745
A
8121 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8122 kevt_pcblist, "S,xkevtpcb", "");
fe8ab488 8123
39236c6e 8124static lck_mtx_t *
5ba3f43e 8125event_getlock(struct socket *so, int flags)
39236c6e 8126{
5ba3f43e 8127#pragma unused(flags)
39236c6e
A
8128 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8129
0a7de745
A
8130 if (so->so_pcb != NULL) {
8131 if (so->so_usecount < 0) {
39236c6e
A
8132 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
8133 so, so->so_usecount, solockhistory_nr(so));
0a7de745
A
8134 }
8135 /* NOTREACHED */
39236c6e
A
8136 } else {
8137 panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
8138 so, solockhistory_nr(so));
8139 /* NOTREACHED */
8140 }
0a7de745 8141 return &ev_pcb->evp_mtx;
39236c6e
A
8142}
8143
8144static int
8145event_lock(struct socket *so, int refcount, void *lr)
8146{
8147 void *lr_saved;
8148
0a7de745 8149 if (lr == NULL) {
39236c6e 8150 lr_saved = __builtin_return_address(0);
0a7de745 8151 } else {
39236c6e 8152 lr_saved = lr;
0a7de745 8153 }
39236c6e
A
8154
8155 if (so->so_pcb != NULL) {
8156 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
0a7de745 8157 } else {
39236c6e
A
8158 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
8159 so, lr_saved, solockhistory_nr(so));
8160 /* NOTREACHED */
8161 }
8162
8163 if (so->so_usecount < 0) {
8164 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
8165 so, so->so_pcb, lr_saved, so->so_usecount,
8166 solockhistory_nr(so));
8167 /* NOTREACHED */
8168 }
8169
0a7de745 8170 if (refcount) {
39236c6e 8171 so->so_usecount++;
0a7de745 8172 }
39236c6e
A
8173
8174 so->lock_lr[so->next_lock_lr] = lr_saved;
0a7de745
A
8175 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
8176 return 0;
39236c6e
A
8177}
8178
8179static int
8180event_unlock(struct socket *so, int refcount, void *lr)
8181{
8182 void *lr_saved;
8183 lck_mtx_t *mutex_held;
8184
0a7de745 8185 if (lr == NULL) {
39236c6e 8186 lr_saved = __builtin_return_address(0);
0a7de745 8187 } else {
39236c6e 8188 lr_saved = lr;
0a7de745 8189 }
39236c6e 8190
d190cdc3 8191 if (refcount) {
39236c6e 8192 so->so_usecount--;
d190cdc3 8193 }
39236c6e
A
8194 if (so->so_usecount < 0) {
8195 panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
8196 so, so->so_usecount, solockhistory_nr(so));
8197 /* NOTREACHED */
8198 }
8199 if (so->so_pcb == NULL) {
8200 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
8201 so, so->so_usecount, (void *)lr_saved,
8202 solockhistory_nr(so));
8203 /* NOTREACHED */
8204 }
8205 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8206
5ba3f43e 8207 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
39236c6e 8208 so->unlock_lr[so->next_unlock_lr] = lr_saved;
0a7de745 8209 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
39236c6e
A
8210
8211 if (so->so_usecount == 0) {
8212 VERIFY(so->so_flags & SOF_PCBCLEARING);
8213 event_sofreelastref(so);
8214 } else {
8215 lck_mtx_unlock(mutex_held);
8216 }
8217
0a7de745 8218 return 0;
39236c6e
A
8219}
8220
8221static int
8222event_sofreelastref(struct socket *so)
8223{
8224 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8225
5ba3f43e 8226 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
39236c6e
A
8227
8228 so->so_pcb = NULL;
8229
8230 /*
8231 * Disable upcall in the event another thread is in kev_post_msg()
8232 * appending record to the receive socket buffer, since sbwakeup()
8233 * may release the socket lock otherwise.
8234 */
8235 so->so_rcv.sb_flags &= ~SB_UPCALL;
8236 so->so_snd.sb_flags &= ~SB_UPCALL;
fe8ab488 8237 so->so_event = sonullevent;
39236c6e
A
8238 lck_mtx_unlock(&(ev_pcb->evp_mtx));
8239
5ba3f43e 8240 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
f427ee49 8241 lck_rw_lock_exclusive(&kev_rwlock);
39236c6e 8242 LIST_REMOVE(ev_pcb, evp_link);
fe8ab488
A
8243 kevtstat.kes_pcbcount--;
8244 kevtstat.kes_gencnt++;
f427ee49 8245 lck_rw_done(&kev_rwlock);
39236c6e
A
8246 kev_delete(ev_pcb);
8247
8248 sofreelastref(so, 1);
0a7de745 8249 return 0;
39236c6e
A
8250}
8251
0a7de745 8252static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw));
39236c6e 8253
1c79356b
A
8254static
8255struct kern_event_head kern_event_head;
8256
b0d623f7 8257static u_int32_t static_event_id = 0;
39236c6e 8258
f427ee49
A
8259static ZONE_DECLARE(ev_pcb_zone, "kerneventpcb",
8260 sizeof(struct kern_event_pcb), ZC_ZFREE_CLEARMEM);
1c79356b 8261
9bccf70c 8262/*
39236c6e 8263 * Install the protosw's for the NKE manager. Invoked at extension load time
9bccf70c 8264 */
39236c6e
A
8265void
8266kern_event_init(struct domain *dp)
9bccf70c 8267{
39236c6e
A
8268 struct protosw *pr;
8269 int i;
8270
8271 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
8272 VERIFY(dp == systemdomain);
8273
0a7de745 8274 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) {
39236c6e 8275 net_add_proto(pr, dp, 1);
0a7de745 8276 }
9bccf70c
A
8277}
8278
91447636
A
8279static int
8280kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
1c79356b 8281{
39236c6e
A
8282 int error = 0;
8283 struct kern_event_pcb *ev_pcb;
1c79356b 8284
39236c6e 8285 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
0a7de745
A
8286 if (error != 0) {
8287 return error;
8288 }
55e303ae 8289
f427ee49
A
8290 ev_pcb = zalloc_flags(ev_pcb_zone, Z_WAITOK | Z_ZERO);
8291 lck_mtx_init(&ev_pcb->evp_mtx, &kev_lck_grp, LCK_ATTR_NULL);
1c79356b 8292
39236c6e
A
8293 ev_pcb->evp_socket = so;
8294 ev_pcb->evp_vendor_code_filter = 0xffffffff;
1c79356b 8295
39236c6e 8296 so->so_pcb = (caddr_t) ev_pcb;
f427ee49 8297 lck_rw_lock_exclusive(&kev_rwlock);
39236c6e 8298 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
fe8ab488
A
8299 kevtstat.kes_pcbcount++;
8300 kevtstat.kes_gencnt++;
f427ee49 8301 lck_rw_done(&kev_rwlock);
1c79356b 8302
0a7de745 8303 return error;
1c79356b
A
8304}
8305
39236c6e
A
8306static void
8307kev_delete(struct kern_event_pcb *ev_pcb)
8308{
8309 VERIFY(ev_pcb != NULL);
f427ee49 8310 lck_mtx_destroy(&ev_pcb->evp_mtx, &kev_lck_grp);
39236c6e
A
8311 zfree(ev_pcb_zone, ev_pcb);
8312}
1c79356b 8313
91447636
A
8314static int
8315kev_detach(struct socket *so)
1c79356b 8316{
39236c6e 8317 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
1c79356b 8318
39236c6e
A
8319 if (ev_pcb != NULL) {
8320 soisdisconnected(so);
91447636 8321 so->so_flags |= SOF_PCBCLEARING;
39236c6e 8322 }
1c79356b 8323
0a7de745 8324 return 0;
1c79356b
A
8325}
8326
91447636 8327/*
2d21ac55 8328 * For now, kev_vendor_code and mbuf_tags use the same
91447636
A
8329 * mechanism.
8330 */
0a7de745
A
8331errno_t
8332kev_vendor_code_find(
8333 const char *string,
8334 u_int32_t *out_vendor_code)
91447636
A
8335{
8336 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
0a7de745 8337 return EINVAL;
91447636 8338 }
0a7de745
A
8339 return net_str_id_find_internal(string, out_vendor_code,
8340 NSI_VENDOR_CODE, 1);
91447636
A
8341}
8342
39236c6e
A
8343errno_t
8344kev_msg_post(struct kev_msg *event_msg)
91447636 8345{
39236c6e
A
8346 mbuf_tag_id_t min_vendor, max_vendor;
8347
b0d623f7 8348 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
39236c6e 8349
0a7de745
A
8350 if (event_msg == NULL) {
8351 return EINVAL;
8352 }
39236c6e 8353
d9a64523 8354 /*
39236c6e
A
8355 * Limit third parties to posting events for registered vendor codes
8356 * only
8357 */
91447636 8358 if (event_msg->vendor_code < min_vendor ||
fe8ab488 8359 event_msg->vendor_code > max_vendor) {
cb323159 8360 os_atomic_inc(&kevtstat.kes_badvendor, relaxed);
0a7de745 8361 return EINVAL;
fe8ab488 8362 }
0a7de745 8363 return kev_post_msg(event_msg);
91447636 8364}
1c79356b 8365
39236c6e
A
8366int
8367kev_post_msg(struct kev_msg *event_msg)
1c79356b 8368{
39236c6e
A
8369 struct mbuf *m, *m2;
8370 struct kern_event_pcb *ev_pcb;
8371 struct kern_event_msg *ev;
8372 char *tmp;
8373 u_int32_t total_size;
8374 int i;
1c79356b 8375
91447636
A
8376 /* Verify the message is small enough to fit in one mbuf w/o cluster */
8377 total_size = KEV_MSG_HEADER_SIZE;
39236c6e 8378
91447636 8379 for (i = 0; i < 5; i++) {
0a7de745 8380 if (event_msg->dv[i].data_length == 0) {
91447636 8381 break;
0a7de745 8382 }
91447636
A
8383 total_size += event_msg->dv[i].data_length;
8384 }
39236c6e 8385
91447636 8386 if (total_size > MLEN) {
cb323159 8387 os_atomic_inc(&kevtstat.kes_toobig, relaxed);
0a7de745 8388 return EMSGSIZE;
39236c6e
A
8389 }
8390
5ba3f43e 8391 m = m_get(M_WAIT, MT_DATA);
fe8ab488 8392 if (m == 0) {
cb323159 8393 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
0a7de745 8394 return ENOMEM;
fe8ab488 8395 }
39236c6e
A
8396 ev = mtod(m, struct kern_event_msg *);
8397 total_size = KEV_MSG_HEADER_SIZE;
8398
8399 tmp = (char *) &ev->event_data[0];
8400 for (i = 0; i < 5; i++) {
0a7de745 8401 if (event_msg->dv[i].data_length == 0) {
39236c6e 8402 break;
0a7de745 8403 }
39236c6e
A
8404
8405 total_size += event_msg->dv[i].data_length;
8406 bcopy(event_msg->dv[i].data_ptr, tmp,
8407 event_msg->dv[i].data_length);
8408 tmp += event_msg->dv[i].data_length;
8409 }
8410
8411 ev->id = ++static_event_id;
8412 ev->total_size = total_size;
8413 ev->vendor_code = event_msg->vendor_code;
8414 ev->kev_class = event_msg->kev_class;
8415 ev->kev_subclass = event_msg->kev_subclass;
8416 ev->event_code = event_msg->event_code;
8417
8418 m->m_len = total_size;
f427ee49 8419 lck_rw_lock_shared(&kev_rwlock);
39236c6e
A
8420 for (ev_pcb = LIST_FIRST(&kern_event_head);
8421 ev_pcb;
8422 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8423 lck_mtx_lock(&ev_pcb->evp_mtx);
8424 if (ev_pcb->evp_socket->so_pcb == NULL) {
8425 lck_mtx_unlock(&ev_pcb->evp_mtx);
8426 continue;
8427 }
8428 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
8429 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
8430 lck_mtx_unlock(&ev_pcb->evp_mtx);
8431 continue;
8432 }
8433
8434 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
8435 if (ev_pcb->evp_class_filter != ev->kev_class) {
8436 lck_mtx_unlock(&ev_pcb->evp_mtx);
8437 continue;
8438 }
8439
fe8ab488
A
8440 if ((ev_pcb->evp_subclass_filter !=
8441 KEV_ANY_SUBCLASS) &&
8442 (ev_pcb->evp_subclass_filter !=
8443 ev->kev_subclass)) {
39236c6e
A
8444 lck_mtx_unlock(&ev_pcb->evp_mtx);
8445 continue;
8446 }
8447 }
8448 }
8449
5ba3f43e 8450 m2 = m_copym(m, 0, m->m_len, M_WAIT);
39236c6e 8451 if (m2 == 0) {
cb323159 8452 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
39236c6e
A
8453 m_free(m);
8454 lck_mtx_unlock(&ev_pcb->evp_mtx);
f427ee49 8455 lck_rw_done(&kev_rwlock);
0a7de745 8456 return ENOMEM;
39236c6e 8457 }
fe8ab488
A
8458 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
8459 /*
8460 * We use "m" for the socket stats as it would be
8461 * unsafe to use "m2"
8462 */
8463 so_inc_recv_data_stat(ev_pcb->evp_socket,
39037602 8464 1, m->m_len, MBUF_TC_BE);
fe8ab488 8465
39236c6e 8466 sorwakeup(ev_pcb->evp_socket);
cb323159 8467 os_atomic_inc(&kevtstat.kes_posted, relaxed);
fe8ab488 8468 } else {
cb323159 8469 os_atomic_inc(&kevtstat.kes_fullsock, relaxed);
fe8ab488 8470 }
39236c6e
A
8471 lck_mtx_unlock(&ev_pcb->evp_mtx);
8472 }
8473 m_free(m);
f427ee49 8474 lck_rw_done(&kev_rwlock);
39236c6e 8475
0a7de745 8476 return 0;
1c79356b
A
8477}
8478
91447636 8479static int
39236c6e 8480kev_control(struct socket *so,
0a7de745
A
8481 u_long cmd,
8482 caddr_t data,
8483 __unused struct ifnet *ifp,
8484 __unused struct proc *p)
1c79356b 8485{
91447636
A
8486 struct kev_request *kev_req = (struct kev_request *) data;
8487 struct kern_event_pcb *ev_pcb;
8488 struct kev_vendor_code *kev_vendor;
b0d623f7 8489 u_int32_t *id_value = (u_int32_t *) data;
39236c6e 8490
91447636 8491 switch (cmd) {
0a7de745
A
8492 case SIOCGKEVID:
8493 *id_value = static_event_id;
8494 break;
8495 case SIOCSKEVFILT:
8496 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8497 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
8498 ev_pcb->evp_class_filter = kev_req->kev_class;
8499 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
8500 break;
8501 case SIOCGKEVFILT:
8502 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8503 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
8504 kev_req->kev_class = ev_pcb->evp_class_filter;
8505 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
8506 break;
8507 case SIOCGKEVVENDOR:
8508 kev_vendor = (struct kev_vendor_code *)data;
8509 /* Make sure string is NULL terminated */
8510 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0;
8511 return net_str_id_find_internal(kev_vendor->vendor_string,
8512 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0);
8513 default:
8514 return ENOTSUP;
91447636 8515 }
39236c6e 8516
0a7de745 8517 return 0;
1c79356b
A
8518}
8519
fe8ab488
A
8520int
8521kevt_getstat SYSCTL_HANDLER_ARGS
8522{
8523#pragma unused(oidp, arg1, arg2)
8524 int error = 0;
8525
f427ee49 8526 lck_rw_lock_shared(&kev_rwlock);
fe8ab488
A
8527
8528 if (req->newptr != USER_ADDR_NULL) {
8529 error = EPERM;
8530 goto done;
8531 }
8532 if (req->oldptr == USER_ADDR_NULL) {
8533 req->oldidx = sizeof(struct kevtstat);
8534 goto done;
8535 }
8536
8537 error = SYSCTL_OUT(req, &kevtstat,
8538 MIN(sizeof(struct kevtstat), req->oldlen));
8539done:
f427ee49 8540 lck_rw_done(&kev_rwlock);
fe8ab488 8541
0a7de745 8542 return error;
fe8ab488
A
8543}
8544
8545__private_extern__ int
8546kevt_pcblist SYSCTL_HANDLER_ARGS
8547{
8548#pragma unused(oidp, arg1, arg2)
8549 int error = 0;
f427ee49 8550 uint64_t n, i;
fe8ab488
A
8551 struct xsystmgen xsg;
8552 void *buf = NULL;
0a7de745
A
8553 size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) +
8554 ROUNDUP64(sizeof(struct xsocket_n)) +
8555 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
8556 ROUNDUP64(sizeof(struct xsockstat_n));
fe8ab488
A
8557 struct kern_event_pcb *ev_pcb;
8558
c3c9b80d 8559 buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
0a7de745
A
8560 if (buf == NULL) {
8561 return ENOMEM;
8562 }
fe8ab488 8563
f427ee49 8564 lck_rw_lock_shared(&kev_rwlock);
fe8ab488
A
8565
8566 n = kevtstat.kes_pcbcount;
8567
8568 if (req->oldptr == USER_ADDR_NULL) {
f427ee49 8569 req->oldidx = (size_t) ((n + n / 8) * item_size);
fe8ab488
A
8570 goto done;
8571 }
8572 if (req->newptr != USER_ADDR_NULL) {
8573 error = EPERM;
8574 goto done;
8575 }
0a7de745
A
8576 bzero(&xsg, sizeof(xsg));
8577 xsg.xg_len = sizeof(xsg);
fe8ab488
A
8578 xsg.xg_count = n;
8579 xsg.xg_gen = kevtstat.kes_gencnt;
8580 xsg.xg_sogen = so_gencnt;
0a7de745 8581 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
fe8ab488
A
8582 if (error) {
8583 goto done;
8584 }
8585 /*
8586 * We are done if there is no pcb
8587 */
8588 if (n == 0) {
8589 goto done;
8590 }
8591
8592 i = 0;
8593 for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
8594 i < n && ev_pcb != NULL;
8595 i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8596 struct xkevtpcb *xk = (struct xkevtpcb *)buf;
8597 struct xsocket_n *xso = (struct xsocket_n *)
0a7de745 8598 ADVANCE64(xk, sizeof(*xk));
fe8ab488 8599 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
0a7de745 8600 ADVANCE64(xso, sizeof(*xso));
fe8ab488 8601 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
0a7de745 8602 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
fe8ab488 8603 struct xsockstat_n *xsostats = (struct xsockstat_n *)
0a7de745 8604 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
fe8ab488
A
8605
8606 bzero(buf, item_size);
8607
8608 lck_mtx_lock(&ev_pcb->evp_mtx);
8609
8610 xk->kep_len = sizeof(struct xkevtpcb);
8611 xk->kep_kind = XSO_EVT;
8612 xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
8613 xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
8614 xk->kep_class_filter = ev_pcb->evp_class_filter;
8615 xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
8616
8617 sotoxsocket_n(ev_pcb->evp_socket, xso);
8618 sbtoxsockbuf_n(ev_pcb->evp_socket ?
0a7de745 8619 &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
fe8ab488 8620 sbtoxsockbuf_n(ev_pcb->evp_socket ?
0a7de745 8621 &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
fe8ab488
A
8622 sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
8623
8624 lck_mtx_unlock(&ev_pcb->evp_mtx);
8625
8626 error = SYSCTL_OUT(req, buf, item_size);
8627 }
8628
8629 if (error == 0) {
8630 /*
8631 * Give the user an updated idea of our state.
8632 * If the generation differs from what we told
8633 * her before, she knows that something happened
8634 * while we were processing this request, and it
8635 * might be necessary to retry.
8636 */
0a7de745
A
8637 bzero(&xsg, sizeof(xsg));
8638 xsg.xg_len = sizeof(xsg);
fe8ab488
A
8639 xsg.xg_count = n;
8640 xsg.xg_gen = kevtstat.kes_gencnt;
8641 xsg.xg_sogen = so_gencnt;
0a7de745 8642 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
fe8ab488
A
8643 if (error) {
8644 goto done;
8645 }
8646 }
8647
8648done:
f427ee49
A
8649 lck_rw_done(&kev_rwlock);
8650
c3c9b80d 8651 kheap_free(KHEAP_TEMP, buf, item_size);
0a7de745 8652 return error;
fe8ab488
A
8653}
8654
2d21ac55 8655#endif /* SOCKETS */
1c79356b 8656
1c79356b 8657
0c530ab8
A
8658int
8659fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
8660{
2d21ac55 8661 struct vinfo_stat * st;
0c530ab8 8662
0c530ab8
A
8663 st = &kinfo->kq_stat;
8664
2d21ac55 8665 st->vst_size = kq->kq_count;
0a7de745 8666 if (kq->kq_state & KQ_KEV_QOS) {
3e170ce0 8667 st->vst_blksize = sizeof(struct kevent_qos_s);
0a7de745 8668 } else if (kq->kq_state & KQ_KEV64) {
b0d623f7 8669 st->vst_blksize = sizeof(struct kevent64_s);
0a7de745 8670 } else {
b0d623f7 8671 st->vst_blksize = sizeof(struct kevent);
0a7de745 8672 }
2d21ac55 8673 st->vst_mode = S_IFIFO;
5ba3f43e 8674 st->vst_ino = (kq->kq_state & KQ_DYNAMIC) ?
0a7de745 8675 ((struct kqworkloop *)kq)->kqwl_dynamicid : 0;
3e170ce0
A
8676
8677 /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
5ba3f43e 8678#define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
3e170ce0 8679 kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK;
0c530ab8 8680
0a7de745 8681 return 0;
0c530ab8 8682}
1c79356b 8683
5ba3f43e 8684static int
cb323159 8685fill_kqueue_dyninfo(struct kqworkloop *kqwl, struct kqueue_dyninfo *kqdi)
5ba3f43e 8686{
cb323159 8687 workq_threadreq_t kqr = &kqwl->kqwl_request;
d9a64523 8688 workq_threadreq_param_t trp = {};
5ba3f43e
A
8689 int err;
8690
cb323159 8691 if ((kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
5ba3f43e
A
8692 return EINVAL;
8693 }
8694
cb323159 8695 if ((err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi->kqdi_info))) {
5ba3f43e
A
8696 return err;
8697 }
8698
cb323159 8699 kqlock(kqwl);
5ba3f43e 8700
cb323159 8701 kqdi->kqdi_servicer = thread_tid(kqr_thread(kqr));
d9a64523 8702 kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
cb323159
A
8703 kqdi->kqdi_request_state = kqr->tr_state;
8704 kqdi->kqdi_async_qos = kqr->tr_kq_qos_index;
8705 kqdi->kqdi_events_qos = kqr->tr_kq_override_index;
8706 kqdi->kqdi_sync_waiters = 0;
d9a64523
A
8707 kqdi->kqdi_sync_waiter_qos = 0;
8708
8709 trp.trp_value = kqwl->kqwl_params;
0a7de745 8710 if (trp.trp_flags & TRP_PRIORITY) {
d9a64523 8711 kqdi->kqdi_pri = trp.trp_pri;
0a7de745 8712 } else {
d9a64523 8713 kqdi->kqdi_pri = 0;
0a7de745 8714 }
5ba3f43e 8715
0a7de745 8716 if (trp.trp_flags & TRP_POLICY) {
d9a64523 8717 kqdi->kqdi_pol = trp.trp_pol;
0a7de745 8718 } else {
d9a64523 8719 kqdi->kqdi_pol = 0;
0a7de745 8720 }
d9a64523 8721
0a7de745 8722 if (trp.trp_flags & TRP_CPUPERCENT) {
d9a64523 8723 kqdi->kqdi_cpupercent = trp.trp_cpupercent;
0a7de745 8724 } else {
d9a64523 8725 kqdi->kqdi_cpupercent = 0;
0a7de745 8726 }
d9a64523 8727
cb323159 8728 kqunlock(kqwl);
5ba3f43e
A
8729
8730 return 0;
8731}
8732
6d2010ae
A
8733
8734void
39037602 8735knote_markstayactive(struct knote *kn)
6d2010ae 8736{
5ba3f43e 8737 struct kqueue *kq = knote_get_kq(kn);
d9a64523 8738 kq_index_t qos;
5ba3f43e
A
8739
8740 kqlock(kq);
39037602
A
8741 kn->kn_status |= KN_STAYACTIVE;
8742
5ba3f43e
A
8743 /*
8744 * Making a knote stay active is a property of the knote that must be
8745 * established before it is fully attached.
8746 */
d9a64523 8747 assert((kn->kn_status & (KN_QUEUED | KN_SUPPRESSED)) == 0);
5ba3f43e
A
8748
8749 /* handle all stayactive knotes on the (appropriate) manager */
cb323159 8750 if (kq->kq_state & KQ_WORKLOOP) {
5ba3f43e 8751 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
d9a64523
A
8752
8753 qos = _pthread_priority_thread_qos(kn->kn_qos);
8754 assert(qos && qos < THREAD_QOS_LAST);
d9a64523 8755 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS, qos);
d9a64523 8756 qos = KQWL_BUCKET_STAYACTIVE;
cb323159
A
8757 } else if (kq->kq_state & KQ_WORKQ) {
8758 qos = KQWQ_QOS_MANAGER;
d9a64523
A
8759 } else {
8760 qos = THREAD_QOS_UNSPECIFIED;
5ba3f43e 8761 }
39037602 8762
d9a64523
A
8763 kn->kn_qos_override = qos;
8764 kn->kn_qos_index = qos;
8765
cb323159 8766 knote_activate(kq, kn, FILTER_ACTIVE);
5ba3f43e 8767 kqunlock(kq);
6d2010ae 8768}
04b8595b
A
8769
8770void
39037602 8771knote_clearstayactive(struct knote *kn)
04b8595b 8772{
cb323159
A
8773 struct kqueue *kq = knote_get_kq(kn);
8774 kqlock(kq);
8775 kn->kn_status &= ~(KN_STAYACTIVE | KN_ACTIVE);
8776 knote_dequeue(kq, kn);
8777 kqunlock(kq);
04b8595b 8778}
3e170ce0
A
8779
8780static unsigned long
8781kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
0a7de745 8782 unsigned long buflen, unsigned long nknotes)
3e170ce0 8783{
3e170ce0 8784 for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
39037602 8785 if (kq == knote_get_kq(kn)) {
3e170ce0
A
8786 if (nknotes < buflen) {
8787 struct kevent_extinfo *info = &buf[nknotes];
8788
8789 kqlock(kq);
5ba3f43e 8790
cb323159
A
8791 info->kqext_kev = *(struct kevent_qos_s *)&kn->kn_kevent;
8792 if (knote_has_qos(kn)) {
8793 info->kqext_kev.qos =
8794 _pthread_priority_thread_qos_fast(kn->kn_qos);
8795 } else {
8796 info->kqext_kev.qos = kn->kn_qos_override;
8797 }
8798 info->kqext_kev.filter |= 0xff00; /* sign extend filter */
8799 info->kqext_kev.xflags = 0; /* this is where sfflags lives */
8800 info->kqext_kev.data = 0; /* this is where sdata lives */
8801 info->kqext_sdata = kn->kn_sdata;
8802 info->kqext_status = kn->kn_status;
8803 info->kqext_sfflags = kn->kn_sfflags;
3e170ce0
A
8804
8805 kqunlock(kq);
8806 }
8807
8808 /* we return total number of knotes, which may be more than requested */
8809 nknotes++;
8810 }
8811 }
8812
8813 return nknotes;
8814}
8815
5ba3f43e
A
8816int
8817kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
0a7de745 8818 int32_t *nkqueues_out)
5ba3f43e
A
8819{
8820 proc_t p = (proc_t)proc;
8821 struct filedesc *fdp = p->p_fd;
8822 unsigned int nkqueues = 0;
8823 unsigned long ubuflen = ubufsize / sizeof(kqueue_id_t);
8824 size_t buflen, bufsize;
8825 kqueue_id_t *kq_ids = NULL;
8826 int err = 0;
8827
8828 assert(p != NULL);
8829
8830 if (ubuf == USER_ADDR_NULL && ubufsize != 0) {
8831 err = EINVAL;
8832 goto out;
8833 }
8834
f427ee49 8835 buflen = MIN(ubuflen, PROC_PIDDYNKQUEUES_MAX);
5ba3f43e
A
8836
8837 if (ubuflen != 0) {
8838 if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) {
8839 err = ERANGE;
8840 goto out;
8841 }
f427ee49 8842 kq_ids = kheap_alloc(KHEAP_TEMP, bufsize, Z_WAITOK | Z_ZERO);
e8c3f781
A
8843 if (!kq_ids) {
8844 err = ENOMEM;
8845 goto out;
8846 }
5ba3f43e
A
8847 }
8848
cb323159 8849 kqhash_lock(fdp);
5ba3f43e
A
8850
8851 if (fdp->fd_kqhashmask > 0) {
8852 for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
8853 struct kqworkloop *kqwl;
8854
cb323159 8855 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
5ba3f43e
A
8856 /* report the number of kqueues, even if they don't all fit */
8857 if (nkqueues < buflen) {
8858 kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
8859 }
8860 nkqueues++;
8861 }
8862 }
8863 }
8864
cb323159 8865 kqhash_unlock(fdp);
5ba3f43e
A
8866
8867 if (kq_ids) {
8868 size_t copysize;
f427ee49 8869 if (os_mul_overflow(sizeof(kqueue_id_t), MIN(buflen, nkqueues), &copysize)) {
5ba3f43e
A
8870 err = ERANGE;
8871 goto out;
8872 }
8873
8874 assert(ubufsize >= copysize);
8875 err = copyout(kq_ids, ubuf, copysize);
8876 }
8877
8878out:
8879 if (kq_ids) {
f427ee49 8880 kheap_free(KHEAP_TEMP, kq_ids, bufsize);
5ba3f43e
A
8881 }
8882
8883 if (!err) {
8884 *nkqueues_out = (int)min(nkqueues, PROC_PIDDYNKQUEUES_MAX);
8885 }
8886 return err;
8887}
8888
8889int
8890kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
0a7de745 8891 uint32_t ubufsize, int32_t *size_out)
5ba3f43e
A
8892{
8893 proc_t p = (proc_t)proc;
cb323159 8894 struct kqworkloop *kqwl;
5ba3f43e
A
8895 int err = 0;
8896 struct kqueue_dyninfo kqdi = { };
8897
8898 assert(p != NULL);
8899
8900 if (ubufsize < sizeof(struct kqueue_info)) {
8901 return ENOBUFS;
8902 }
8903
cb323159
A
8904 kqwl = kqworkloop_hash_lookup_and_retain(p->p_fd, kq_id);
8905 if (!kqwl) {
5ba3f43e
A
8906 return ESRCH;
8907 }
5ba3f43e
A
8908
8909 /*
8910 * backward compatibility: allow the argument to this call to only be
8911 * a struct kqueue_info
8912 */
8913 if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
8914 ubufsize = sizeof(struct kqueue_dyninfo);
cb323159 8915 err = fill_kqueue_dyninfo(kqwl, &kqdi);
5ba3f43e
A
8916 } else {
8917 ubufsize = sizeof(struct kqueue_info);
cb323159 8918 err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi.kqdi_info);
5ba3f43e
A
8919 }
8920 if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
8921 *size_out = ubufsize;
8922 }
cb323159 8923 kqworkloop_release(kqwl);
5ba3f43e
A
8924 return err;
8925}
8926
8927int
8928kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
0a7de745 8929 uint32_t ubufsize, int32_t *nknotes_out)
5ba3f43e
A
8930{
8931 proc_t p = (proc_t)proc;
cb323159 8932 struct kqworkloop *kqwl;
5ba3f43e
A
8933 int err;
8934
cb323159
A
8935 kqwl = kqworkloop_hash_lookup_and_retain(p->p_fd, kq_id);
8936 if (!kqwl) {
5ba3f43e
A
8937 return ESRCH;
8938 }
5ba3f43e 8939
cb323159
A
8940 err = pid_kqueue_extinfo(p, &kqwl->kqwl_kqueue, ubuf, ubufsize, nknotes_out);
8941 kqworkloop_release(kqwl);
5ba3f43e
A
8942 return err;
8943}
8944
3e170ce0
A
8945int
8946pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
0a7de745 8947 uint32_t bufsize, int32_t *retval)
3e170ce0
A
8948{
8949 struct knote *kn;
8950 int i;
8951 int err = 0;
8952 struct filedesc *fdp = p->p_fd;
8953 unsigned long nknotes = 0;
8954 unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
8955 struct kevent_extinfo *kqext = NULL;
8956
39037602 8957 /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
f427ee49 8958 buflen = MIN(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
39037602 8959
f427ee49
A
8960 kqext = kheap_alloc(KHEAP_TEMP,
8961 buflen * sizeof(struct kevent_extinfo), Z_WAITOK | Z_ZERO);
3e170ce0
A
8962 if (kqext == NULL) {
8963 err = ENOMEM;
8964 goto out;
8965 }
3e170ce0
A
8966
8967 proc_fdlock(p);
3e170ce0
A
8968 for (i = 0; i < fdp->fd_knlistsize; i++) {
8969 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
8970 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8971 }
5ba3f43e 8972 proc_fdunlock(p);
3e170ce0
A
8973
8974 if (fdp->fd_knhashmask != 0) {
8975 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
cb323159 8976 knhash_lock(fdp);
3e170ce0
A
8977 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
8978 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
cb323159 8979 knhash_unlock(fdp);
3e170ce0
A
8980 }
8981 }
8982
f427ee49
A
8983 assert(bufsize >= sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8984 err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
3e170ce0 8985
d9a64523 8986out:
c3c9b80d 8987 kheap_free(KHEAP_TEMP, kqext, buflen * sizeof(struct kevent_extinfo));
3e170ce0 8988
39037602 8989 if (!err) {
f427ee49 8990 *retval = (int32_t)MIN(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
39037602 8991 }
3e170ce0
A
8992 return err;
8993}
39037602 8994
5ba3f43e
A
8995static unsigned int
8996klist_copy_udata(struct klist *list, uint64_t *buf,
0a7de745 8997 unsigned int buflen, unsigned int nknotes)
39037602 8998{
5ba3f43e
A
8999 struct knote *kn;
9000 SLIST_FOREACH(kn, list, kn_link) {
9001 if (nknotes < buflen) {
cb323159
A
9002 /*
9003 * kevent_register will always set kn_udata atomically
9004 * so that we don't have to take any kqlock here.
9005 */
9006 buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed);
39037602 9007 }
5ba3f43e
A
9008 /* we return total number of knotes, which may be more than requested */
9009 nknotes++;
39037602
A
9010 }
9011
9012 return nknotes;
9013}
9014
9015int
f427ee49 9016kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize)
39037602 9017{
5ba3f43e 9018 proc_t p = (proc_t)proc;
39037602 9019 struct filedesc *fdp = p->p_fd;
5ba3f43e 9020 unsigned int nuptrs = 0;
f427ee49 9021 unsigned int buflen = bufsize / sizeof(uint64_t);
cb323159 9022 struct kqworkloop *kqwl;
39037602 9023
5ba3f43e
A
9024 if (buflen > 0) {
9025 assert(buf != NULL);
9026 }
9027
39037602 9028 proc_fdlock(p);
5ba3f43e
A
9029 for (int i = 0; i < fdp->fd_knlistsize; i++) {
9030 nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
9031 }
5ba3f43e 9032 proc_fdunlock(p);
cb323159
A
9033
9034 knhash_lock(fdp);
5ba3f43e 9035 if (fdp->fd_knhashmask != 0) {
cb323159 9036 for (size_t i = 0; i < fdp->fd_knhashmask + 1; i++) {
5ba3f43e
A
9037 nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
9038 }
9039 }
cb323159 9040 knhash_unlock(fdp);
39037602 9041
cb323159 9042 kqhash_lock(fdp);
5ba3f43e 9043 if (fdp->fd_kqhashmask != 0) {
cb323159
A
9044 for (size_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
9045 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
9046 if (nuptrs < buflen) {
9047 buf[nuptrs] = kqwl->kqwl_dynamicid;
9048 }
9049 nuptrs++;
9050 }
5ba3f43e 9051 }
39037602 9052 }
cb323159 9053 kqhash_unlock(fdp);
39037602 9054
5ba3f43e
A
9055 return (int)nuptrs;
9056}
9057
5ba3f43e
A
9058static void
9059kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
9060{
9061 uint64_t ast_addr;
9062 bool proc_is_64bit = !!(p->p_flag & P_LP64);
9063 size_t user_addr_size = proc_is_64bit ? 8 : 4;
9064 uint32_t ast_flags32 = 0;
9065 uint64_t ast_flags64 = 0;
9066 struct uthread *ut = get_bsdthread_info(thread);
9067
d9a64523
A
9068 if (ut->uu_kqr_bound != NULL) {
9069 ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
39037602
A
9070 }
9071
5ba3f43e
A
9072 if (ast_flags64 == 0) {
9073 return;
9074 }
9075
9076 if (!(p->p_flag & P_LP64)) {
9077 ast_flags32 = (uint32_t)ast_flags64;
9078 assert(ast_flags64 < 0x100000000ull);
9079 }
9080
9081 ast_addr = thread_rettokern_addr(thread);
9082 if (ast_addr == 0) {
9083 return;
9084 }
9085
9086 if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
0a7de745
A
9087 (user_addr_t)ast_addr,
9088 user_addr_size) != 0) {
5ba3f43e 9089 printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
0a7de745 9090 "ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr);
5ba3f43e
A
9091 }
9092}
9093
9094void
9095kevent_ast(thread_t thread, uint16_t bits)
9096{
9097 proc_t p = current_proc();
9098
9099 if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
d9a64523 9100 workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
5ba3f43e
A
9101 }
9102 if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
9103 kevent_set_return_to_kernel_user_tsd(p, thread);
9104 }
9105}
9106
9107#if DEVELOPMENT || DEBUG
9108
9109#define KEVENT_SYSCTL_BOUND_ID 1
9110
9111static int
9112kevent_sysctl SYSCTL_HANDLER_ARGS
9113{
9114#pragma unused(oidp, arg2)
9115 uintptr_t type = (uintptr_t)arg1;
9116 uint64_t bound_id = 0;
5ba3f43e
A
9117
9118 if (type != KEVENT_SYSCTL_BOUND_ID) {
9119 return EINVAL;
9120 }
9121
9122 if (req->newptr) {
9123 return EINVAL;
9124 }
9125
d9a64523 9126 struct uthread *ut = get_bsdthread_info(current_thread());
5ba3f43e
A
9127 if (!ut) {
9128 return EFAULT;
9129 }
9130
cb323159 9131 workq_threadreq_t kqr = ut->uu_kqr_bound;
d9a64523 9132 if (kqr) {
cb323159 9133 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
d9a64523
A
9134 bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
9135 } else {
5ba3f43e
A
9136 bound_id = -1;
9137 }
9138 }
9139
9140 return sysctl_io_number(req, bound_id, sizeof(bound_id), NULL, NULL);
39037602
A
9141}
9142
5ba3f43e 9143SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
0a7de745 9144 "kevent information");
5ba3f43e
A
9145
9146SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
0a7de745
A
9147 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
9148 (void *)KEVENT_SYSCTL_BOUND_ID,
9149 sizeof(kqueue_id_t), kevent_sysctl, "Q",
9150 "get the ID of the bound kqueue");
5ba3f43e
A
9151
9152#endif /* DEVELOPMENT || DEBUG */