2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)kern_event.c 1.0 (3/31/2000)
58 #include <machine/atomic.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/filedesc.h>
63 #include <sys/kernel.h>
64 #include <sys/proc_internal.h>
65 #include <sys/kauth.h>
66 #include <sys/malloc.h>
67 #include <sys/unistd.h>
68 #include <sys/file_internal.h>
69 #include <sys/fcntl.h>
70 #include <sys/select.h>
71 #include <sys/queue.h>
72 #include <sys/event.h>
73 #include <sys/eventvar.h>
74 #include <sys/protosw.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
78 #include <sys/syscall.h> // SYS_* constants
79 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
83 #include <sys/vnode_internal.h>
85 #include <sys/proc_info.h>
86 #include <sys/codesign.h>
87 #include <sys/pthread_shims.h>
88 #include <sys/kdebug.h>
90 #include <pexpert/pexpert.h>
92 #include <kern/locks.h>
93 #include <kern/clock.h>
94 #include <kern/cpu_data.h>
95 #include <kern/policy_internal.h>
96 #include <kern/thread_call.h>
97 #include <kern/sched_prim.h>
98 #include <kern/waitq.h>
99 #include <kern/zalloc.h>
100 #include <kern/kalloc.h>
101 #include <kern/assert.h>
102 #include <kern/ast.h>
103 #include <kern/thread.h>
104 #include <kern/kcdata.h>
106 #include <pthread/priority_private.h>
107 #include <pthread/workqueue_syscalls.h>
108 #include <pthread/workqueue_internal.h>
109 #include <libkern/libkern.h>
111 #include "net/net_str_id.h"
113 #include <mach/task.h>
114 #include <libkern/section_keywords.h>
116 #if CONFIG_MEMORYSTATUS
117 #include <sys/kern_memorystatus.h>
120 #if DEVELOPMENT || DEBUG
121 #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
122 #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
123 TUNABLE(uint32_t, kevent_debug_flags
, "kevent_debug", 0);
126 static LCK_GRP_DECLARE(kq_lck_grp
, "kqueue");
127 SECURITY_READ_ONLY_EARLY(vm_packing_params_t
) kn_kq_packing_params
=
128 VM_PACKING_PARAMS(KNOTE_KQ_PACKED
);
130 extern mach_port_name_t
ipc_entry_name_mask(mach_port_name_t name
); /* osfmk/ipc/ipc_entry.h */
131 extern int cansignal(struct proc
*, kauth_cred_t
, struct proc
*, int); /* bsd/kern/kern_sig.c */
133 #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
136 * If you need accounting for KM_KQUEUE consider using
137 * KALLOC_HEAP_DEFINE to define a zone view.
139 #define KM_KQUEUE KHEAP_DEFAULT
141 #define KQ_EVENT NO_EVENT64
143 static int kqueue_select(struct fileproc
*fp
, int which
, void *wq_link_id
,
145 static int kqueue_close(struct fileglob
*fg
, vfs_context_t ctx
);
146 static int kqueue_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
147 struct kevent_qos_s
*kev
);
148 static int kqueue_drain(struct fileproc
*fp
, vfs_context_t ctx
);
150 static const struct fileops kqueueops
= {
151 .fo_type
= DTYPE_KQUEUE
,
152 .fo_read
= fo_no_read
,
153 .fo_write
= fo_no_write
,
154 .fo_ioctl
= fo_no_ioctl
,
155 .fo_select
= kqueue_select
,
156 .fo_close
= kqueue_close
,
157 .fo_drain
= kqueue_drain
,
158 .fo_kqfilter
= kqueue_kqfilter
,
161 static inline int kevent_modern_copyout(struct kevent_qos_s
*, user_addr_t
*);
162 static int kevent_register_wait_prepare(struct knote
*kn
, struct kevent_qos_s
*kev
, int result
);
163 static void kevent_register_wait_block(struct turnstile
*ts
, thread_t handoff_thread
,
164 thread_continue_t cont
, struct _kevent_register
*cont_args
) __dead2
;
165 static void kevent_register_wait_return(struct _kevent_register
*cont_args
) __dead2
;
166 static void kevent_register_wait_cleanup(struct knote
*kn
);
168 static struct kqtailq
*kqueue_get_suppressed_queue(kqueue_t kq
, struct knote
*kn
);
169 static void kqueue_threadreq_initiate(struct kqueue
*kq
, workq_threadreq_t
, kq_index_t qos
, int flags
);
171 static void kqworkq_unbind(proc_t p
, workq_threadreq_t
);
172 static thread_qos_t
kqworkq_unbind_locked(struct kqworkq
*kqwq
, workq_threadreq_t
, thread_t thread
);
173 static workq_threadreq_t
kqworkq_get_request(struct kqworkq
*kqwq
, kq_index_t qos_index
);
175 static void kqworkloop_unbind(struct kqworkloop
*kwql
);
177 enum kqwl_unbind_locked_mode
{
178 KQWL_OVERRIDE_DROP_IMMEDIATELY
,
179 KQWL_OVERRIDE_DROP_DELAYED
,
181 static void kqworkloop_unbind_locked(struct kqworkloop
*kwql
, thread_t thread
,
182 enum kqwl_unbind_locked_mode how
);
183 static void kqworkloop_unbind_delayed_override_drop(thread_t thread
);
184 static kq_index_t
kqworkloop_override(struct kqworkloop
*kqwl
);
185 static void kqworkloop_set_overcommit(struct kqworkloop
*kqwl
);
189 * The wakeup qos is the qos of QUEUED knotes.
191 * This QoS is accounted for with the events override in the
192 * kqr_override_index field. It is raised each time a new knote is queued at
193 * a given QoS. The kqwl_wakeup_indexes field is a superset of the non empty
194 * knote buckets and is recomputed after each event delivery.
196 KQWL_UTQ_UPDATE_WAKEUP_QOS
,
197 KQWL_UTQ_UPDATE_STAYACTIVE_QOS
,
198 KQWL_UTQ_RECOMPUTE_WAKEUP_QOS
,
199 KQWL_UTQ_UNBINDING
, /* attempt to rebind */
202 * The wakeup override is for suppressed knotes that have fired again at
203 * a higher QoS than the one for which they are suppressed already.
204 * This override is cleared when the knote suppressed list becomes empty.
206 KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE
,
207 KQWL_UTQ_RESET_WAKEUP_OVERRIDE
,
209 * The QoS is the maximum QoS of an event enqueued on this workloop in
210 * userland. It is copied from the only EVFILT_WORKLOOP knote with
211 * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
212 * such knote, this QoS is 0.
214 KQWL_UTQ_SET_QOS_INDEX
,
215 KQWL_UTQ_REDRIVE_EVENTS
,
217 static void kqworkloop_update_threads_qos(struct kqworkloop
*kqwl
, int op
, kq_index_t qos
);
218 static int kqworkloop_end_processing(struct kqworkloop
*kqwl
, int flags
, int kevent_flags
);
220 static struct knote
*knote_alloc(void);
221 static void knote_free(struct knote
*kn
);
222 static int kq_add_knote(struct kqueue
*kq
, struct knote
*kn
,
223 struct knote_lock_ctx
*knlc
, struct proc
*p
);
224 static struct knote
*kq_find_knote_and_kq_lock(struct kqueue
*kq
,
225 struct kevent_qos_s
*kev
, bool is_fd
, struct proc
*p
);
227 static void knote_activate(kqueue_t kqu
, struct knote
*kn
, int result
);
228 static void knote_dequeue(kqueue_t kqu
, struct knote
*kn
);
230 static void knote_apply_touch(kqueue_t kqu
, struct knote
*kn
,
231 struct kevent_qos_s
*kev
, int result
);
232 static void knote_suppress(kqueue_t kqu
, struct knote
*kn
);
233 static void knote_unsuppress(kqueue_t kqu
, struct knote
*kn
);
234 static void knote_drop(kqueue_t kqu
, struct knote
*kn
, struct knote_lock_ctx
*knlc
);
236 // both these functions may dequeue the knote and it is up to the caller
237 // to enqueue the knote back
238 static void knote_adjust_qos(struct kqueue
*kq
, struct knote
*kn
, int result
);
239 static void knote_reset_priority(kqueue_t kqu
, struct knote
*kn
, pthread_priority_t pp
);
241 static ZONE_DECLARE(knote_zone
, "knote zone",
242 sizeof(struct knote
), ZC_CACHING
| ZC_ZFREE_CLEARMEM
);
243 static ZONE_DECLARE(kqfile_zone
, "kqueue file zone",
244 sizeof(struct kqfile
), ZC_ZFREE_CLEARMEM
);
245 static ZONE_DECLARE(kqworkq_zone
, "kqueue workq zone",
246 sizeof(struct kqworkq
), ZC_ZFREE_CLEARMEM
);
247 static ZONE_DECLARE(kqworkloop_zone
, "kqueue workloop zone",
248 sizeof(struct kqworkloop
), ZC_CACHING
| ZC_ZFREE_CLEARMEM
);
250 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
252 static int filt_no_attach(struct knote
*kn
, struct kevent_qos_s
*kev
);
253 static void filt_no_detach(struct knote
*kn
);
254 static int filt_bad_event(struct knote
*kn
, long hint
);
255 static int filt_bad_touch(struct knote
*kn
, struct kevent_qos_s
*kev
);
256 static int filt_bad_process(struct knote
*kn
, struct kevent_qos_s
*kev
);
258 SECURITY_READ_ONLY_EARLY(static struct filterops
) bad_filtops
= {
259 .f_attach
= filt_no_attach
,
260 .f_detach
= filt_no_detach
,
261 .f_event
= filt_bad_event
,
262 .f_touch
= filt_bad_touch
,
263 .f_process
= filt_bad_process
,
266 #if CONFIG_MEMORYSTATUS
267 extern const struct filterops memorystatus_filtops
;
268 #endif /* CONFIG_MEMORYSTATUS */
269 extern const struct filterops fs_filtops
;
270 extern const struct filterops sig_filtops
;
271 extern const struct filterops machport_filtops
;
272 extern const struct filterops pipe_nfiltops
;
273 extern const struct filterops pipe_rfiltops
;
274 extern const struct filterops pipe_wfiltops
;
275 extern const struct filterops ptsd_kqops
;
276 extern const struct filterops ptmx_kqops
;
277 extern const struct filterops soread_filtops
;
278 extern const struct filterops sowrite_filtops
;
279 extern const struct filterops sock_filtops
;
280 extern const struct filterops soexcept_filtops
;
281 extern const struct filterops spec_filtops
;
282 extern const struct filterops bpfread_filtops
;
283 extern const struct filterops necp_fd_rfiltops
;
284 extern const struct filterops fsevent_filtops
;
285 extern const struct filterops vnode_filtops
;
286 extern const struct filterops tty_filtops
;
288 const static struct filterops file_filtops
;
289 const static struct filterops kqread_filtops
;
290 const static struct filterops proc_filtops
;
291 const static struct filterops timer_filtops
;
292 const static struct filterops user_filtops
;
293 const static struct filterops workloop_filtops
;
297 * Rules for adding new filters to the system:
299 * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
300 * in the exported section of the header
301 * - Update the EVFILT_SYSCOUNT value to reflect the new addition
302 * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
303 * of the Public Filters section in the array.
305 * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
306 * in the XNU_KERNEL_PRIVATE section of the header
307 * - Update the EVFILTID_MAX value to reflect the new addition
308 * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
309 * the Private filters section of the array.
311 static_assert(EVFILTID_MAX
< UINT8_MAX
, "kn_filtid expects this to be true");
312 static const struct filterops
* const sysfilt_ops
[EVFILTID_MAX
] = {
314 [~EVFILT_READ
] = &file_filtops
,
315 [~EVFILT_WRITE
] = &file_filtops
,
316 [~EVFILT_AIO
] = &bad_filtops
,
317 [~EVFILT_VNODE
] = &file_filtops
,
318 [~EVFILT_PROC
] = &proc_filtops
,
319 [~EVFILT_SIGNAL
] = &sig_filtops
,
320 [~EVFILT_TIMER
] = &timer_filtops
,
321 [~EVFILT_MACHPORT
] = &machport_filtops
,
322 [~EVFILT_FS
] = &fs_filtops
,
323 [~EVFILT_USER
] = &user_filtops
,
324 [~EVFILT_UNUSED_11
] = &bad_filtops
,
325 [~EVFILT_VM
] = &bad_filtops
,
326 [~EVFILT_SOCK
] = &file_filtops
,
327 #if CONFIG_MEMORYSTATUS
328 [~EVFILT_MEMORYSTATUS
] = &memorystatus_filtops
,
330 [~EVFILT_MEMORYSTATUS
] = &bad_filtops
,
332 [~EVFILT_EXCEPT
] = &file_filtops
,
333 [~EVFILT_WORKLOOP
] = &workloop_filtops
,
335 /* Private filters */
336 [EVFILTID_KQREAD
] = &kqread_filtops
,
337 [EVFILTID_PIPE_N
] = &pipe_nfiltops
,
338 [EVFILTID_PIPE_R
] = &pipe_rfiltops
,
339 [EVFILTID_PIPE_W
] = &pipe_wfiltops
,
340 [EVFILTID_PTSD
] = &ptsd_kqops
,
341 [EVFILTID_SOREAD
] = &soread_filtops
,
342 [EVFILTID_SOWRITE
] = &sowrite_filtops
,
343 [EVFILTID_SCK
] = &sock_filtops
,
344 [EVFILTID_SOEXCEPT
] = &soexcept_filtops
,
345 [EVFILTID_SPEC
] = &spec_filtops
,
346 [EVFILTID_BPFREAD
] = &bpfread_filtops
,
347 [EVFILTID_NECP_FD
] = &necp_fd_rfiltops
,
348 [EVFILTID_FSEVENT
] = &fsevent_filtops
,
349 [EVFILTID_VN
] = &vnode_filtops
,
350 [EVFILTID_TTY
] = &tty_filtops
,
351 [EVFILTID_PTMX
] = &ptmx_kqops
,
353 /* fake filter for detached knotes, keep last */
354 [EVFILTID_DETACHED
] = &bad_filtops
,
357 /* waitq prepost callback */
358 void waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t
*kq_hook
);
361 kqr_thread_bound(workq_threadreq_t kqr
)
363 return kqr
->tr_state
== WORKQ_TR_STATE_BOUND
;
367 kqr_thread_requested_pending(workq_threadreq_t kqr
)
369 workq_tr_state_t tr_state
= kqr
->tr_state
;
370 return tr_state
> WORKQ_TR_STATE_IDLE
&& tr_state
< WORKQ_TR_STATE_BOUND
;
374 kqr_thread_requested(workq_threadreq_t kqr
)
376 return kqr
->tr_state
!= WORKQ_TR_STATE_IDLE
;
379 static inline thread_t
380 kqr_thread_fast(workq_threadreq_t kqr
)
382 assert(kqr_thread_bound(kqr
));
383 return kqr
->tr_thread
;
386 static inline thread_t
387 kqr_thread(workq_threadreq_t kqr
)
389 return kqr_thread_bound(kqr
) ? kqr
->tr_thread
: THREAD_NULL
;
392 static inline struct kqworkloop
*
393 kqr_kqworkloop(workq_threadreq_t kqr
)
395 if (kqr
->tr_flags
& WORKQ_TR_FLAG_WORKLOOP
) {
396 return __container_of(kqr
, struct kqworkloop
, kqwl_request
);
401 static inline kqueue_t
402 kqr_kqueue(proc_t p
, workq_threadreq_t kqr
)
405 if (kqr
->tr_flags
& WORKQ_TR_FLAG_WORKLOOP
) {
406 kqu
.kqwl
= kqr_kqworkloop(kqr
);
408 kqu
.kqwq
= p
->p_fd
->fd_wqkqueue
;
409 assert(kqr
>= kqu
.kqwq
->kqwq_request
&&
410 kqr
< kqu
.kqwq
->kqwq_request
+ KQWQ_NBUCKETS
);
416 * kqueue/note lock implementations
418 * The kqueue lock guards the kq state, the state of its queues,
419 * and the kqueue-aware status and locks of individual knotes.
421 * The kqueue workq lock is used to protect state guarding the
422 * interaction of the kqueue with the workq. This state cannot
423 * be guarded by the kq lock - as it needs to be taken when we
424 * already have the waitq set lock held (during the waitq hook
425 * callback). It might be better to use the waitq lock itself
426 * for this, but the IRQ requirements make that difficult).
428 * Knote flags, filter flags, and associated data are protected
429 * by the underlying object lock - and are only ever looked at
430 * by calling the filter to get a [consistent] snapshot of that
437 lck_spin_lock(&kqu
.kq
->kq_lock
);
441 kqlock_held(__assert_only kqueue_t kqu
)
443 LCK_SPIN_ASSERT(&kqu
.kq
->kq_lock
, LCK_ASSERT_OWNED
);
447 kqunlock(kqueue_t kqu
)
449 lck_spin_unlock(&kqu
.kq
->kq_lock
);
453 knhash_lock(struct filedesc
*fdp
)
455 lck_mtx_lock(&fdp
->fd_knhashlock
);
459 knhash_unlock(struct filedesc
*fdp
)
461 lck_mtx_unlock(&fdp
->fd_knhashlock
);
464 /* wait event for knote locks */
465 static inline event_t
466 knote_lock_wev(struct knote
*kn
)
468 return (event_t
)(&kn
->kn_hook
);
471 /* wait event for kevent_register_wait_* */
472 static inline event64_t
473 knote_filt_wev64(struct knote
*kn
)
475 /* kdp_workloop_sync_wait_find_owner knows about this */
476 return CAST_EVENT64_T(kn
);
479 /* wait event for knote_post/knote_drop */
480 static inline event64_t
481 knote_post_wev64(struct knote
*kn
)
483 return CAST_EVENT64_T(&kn
->kn_kevent
);
487 * @function knote_has_qos
490 * Whether the knote has a regular QoS.
493 * kn_qos_override is:
495 * - THREAD_QOS_LAST for special buckets (stayactive, manager)
497 * Other values mean the knote participates to QoS propagation.
500 knote_has_qos(struct knote
*kn
)
502 return kn
->kn_qos_override
> 0 && kn
->kn_qos_override
< THREAD_QOS_LAST
;
505 #pragma mark knote locks
508 * Enum used by the knote_lock_* functions.
510 * KNOTE_KQ_LOCK_ALWAYS
511 * The function will always return with the kq lock held.
513 * KNOTE_KQ_LOCK_ON_SUCCESS
514 * The function will return with the kq lock held if it was successful
515 * (knote_lock() is the only function that can fail).
517 * KNOTE_KQ_LOCK_ON_FAILURE
518 * The function will return with the kq lock held if it was unsuccessful
519 * (knote_lock() is the only function that can fail).
522 * The function returns with the kq unlocked.
525 KNOTE_KQ_LOCK_ALWAYS
,
526 KNOTE_KQ_LOCK_ON_SUCCESS
,
527 KNOTE_KQ_LOCK_ON_FAILURE
,
531 static struct knote_lock_ctx
*
532 knote_lock_ctx_find(kqueue_t kqu
, struct knote
*kn
)
534 struct knote_lock_ctx
*ctx
;
535 LIST_FOREACH(ctx
, &kqu
.kq
->kq_knlocks
, knlc_link
) {
536 if (ctx
->knlc_knote
== kn
) {
540 panic("knote lock context not found: %p", kn
);
544 /* slowpath of knote_lock() */
545 __attribute__((noinline
))
546 static bool __result_use_check
547 knote_lock_slow(kqueue_t kqu
, struct knote
*kn
,
548 struct knote_lock_ctx
*knlc
, int kqlocking
)
550 struct knote_lock_ctx
*owner_lc
;
551 struct uthread
*uth
= current_uthread();
556 owner_lc
= knote_lock_ctx_find(kqu
, kn
);
557 #if DEBUG || DEVELOPMENT
558 knlc
->knlc_state
= KNOTE_LOCK_CTX_WAITING
;
560 owner_lc
->knlc_waiters
++;
563 * Make our lock context visible to knote_unlock()
565 uth
->uu_knlock
= knlc
;
567 wr
= lck_spin_sleep_with_inheritor(&kqu
.kq
->kq_lock
, LCK_SLEEP_UNLOCK
,
568 knote_lock_wev(kn
), owner_lc
->knlc_thread
,
569 THREAD_UNINT
| THREAD_WAIT_NOREPORT
, TIMEOUT_WAIT_FOREVER
);
571 if (wr
== THREAD_RESTART
) {
573 * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
574 * We need to cleanup the state since no one did.
576 uth
->uu_knlock
= NULL
;
577 #if DEBUG || DEVELOPMENT
578 assert(knlc
->knlc_state
== KNOTE_LOCK_CTX_WAITING
);
579 knlc
->knlc_state
= KNOTE_LOCK_CTX_UNLOCKED
;
582 if (kqlocking
== KNOTE_KQ_LOCK_ALWAYS
||
583 kqlocking
== KNOTE_KQ_LOCK_ON_FAILURE
) {
588 if (kqlocking
== KNOTE_KQ_LOCK_ALWAYS
||
589 kqlocking
== KNOTE_KQ_LOCK_ON_SUCCESS
) {
591 #if DEBUG || DEVELOPMENT
593 * This state is set under the lock so we can't
594 * really assert this unless we hold the lock.
596 assert(knlc
->knlc_state
== KNOTE_LOCK_CTX_LOCKED
);
604 * Attempts to take the "knote" lock.
606 * Called with the kqueue lock held.
608 * Returns true if the knote lock is acquired, false if it has been dropped
610 static bool __result_use_check
611 knote_lock(kqueue_t kqu
, struct knote
*kn
, struct knote_lock_ctx
*knlc
,
612 enum kqlocking kqlocking
)
616 #if DEBUG || DEVELOPMENT
617 assert(knlc
->knlc_state
== KNOTE_LOCK_CTX_UNLOCKED
);
619 knlc
->knlc_knote
= kn
;
620 knlc
->knlc_thread
= current_thread();
621 knlc
->knlc_waiters
= 0;
623 if (__improbable(kn
->kn_status
& KN_LOCKED
)) {
624 return knote_lock_slow(kqu
, kn
, knlc
, kqlocking
);
628 * When the knote will be dropped, the knote lock is taken before
629 * KN_DROPPING is set, and then the knote will be removed from any
630 * hash table that references it before the lock is canceled.
632 assert((kn
->kn_status
& KN_DROPPING
) == 0);
633 LIST_INSERT_HEAD(&kqu
.kq
->kq_knlocks
, knlc
, knlc_link
);
634 kn
->kn_status
|= KN_LOCKED
;
635 #if DEBUG || DEVELOPMENT
636 knlc
->knlc_state
= KNOTE_LOCK_CTX_LOCKED
;
639 if (kqlocking
== KNOTE_KQ_UNLOCK
||
640 kqlocking
== KNOTE_KQ_LOCK_ON_FAILURE
) {
647 * Unlocks a knote successfully locked with knote_lock().
649 * Called with the kqueue lock held.
651 * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
654 knote_unlock(kqueue_t kqu
, struct knote
*kn
,
655 struct knote_lock_ctx
*knlc
, enum kqlocking kqlocking
)
659 assert(knlc
->knlc_knote
== kn
);
660 assert(kn
->kn_status
& KN_LOCKED
);
661 #if DEBUG || DEVELOPMENT
662 assert(knlc
->knlc_state
== KNOTE_LOCK_CTX_LOCKED
);
665 LIST_REMOVE(knlc
, knlc_link
);
667 if (knlc
->knlc_waiters
) {
668 thread_t thread
= THREAD_NULL
;
670 wakeup_one_with_inheritor(knote_lock_wev(kn
), THREAD_AWAKENED
,
671 LCK_WAKE_DEFAULT
, &thread
);
674 * knote_lock_slow() publishes the lock context of waiters
675 * in uthread::uu_knlock.
677 * Reach out and make this context the new owner.
679 struct uthread
*ut
= get_bsdthread_info(thread
);
680 struct knote_lock_ctx
*next_owner_lc
= ut
->uu_knlock
;
682 assert(next_owner_lc
->knlc_knote
== kn
);
683 next_owner_lc
->knlc_waiters
= knlc
->knlc_waiters
- 1;
684 LIST_INSERT_HEAD(&kqu
.kq
->kq_knlocks
, next_owner_lc
, knlc_link
);
685 #if DEBUG || DEVELOPMENT
686 next_owner_lc
->knlc_state
= KNOTE_LOCK_CTX_LOCKED
;
688 ut
->uu_knlock
= NULL
;
689 thread_deallocate_safe(thread
);
691 kn
->kn_status
&= ~KN_LOCKED
;
694 if ((kn
->kn_status
& KN_MERGE_QOS
) && !(kn
->kn_status
& KN_POSTING
)) {
696 * No f_event() in flight anymore, we can leave QoS "Merge" mode
698 * See knote_adjust_qos()
700 kn
->kn_status
&= ~KN_MERGE_QOS
;
702 if (kqlocking
== KNOTE_KQ_UNLOCK
) {
705 #if DEBUG || DEVELOPMENT
706 knlc
->knlc_state
= KNOTE_LOCK_CTX_UNLOCKED
;
711 * Aborts all waiters for a knote lock, and unlock the knote.
713 * Called with the kqueue lock held.
715 * Returns with the kqueue unlocked.
718 knote_unlock_cancel(struct kqueue
*kq
, struct knote
*kn
,
719 struct knote_lock_ctx
*knlc
)
723 assert(knlc
->knlc_knote
== kn
);
724 assert(kn
->kn_status
& KN_LOCKED
);
725 assert(kn
->kn_status
& KN_DROPPING
);
727 LIST_REMOVE(knlc
, knlc_link
);
728 kn
->kn_status
&= ~KN_LOCKED
;
731 if (knlc
->knlc_waiters
) {
732 wakeup_all_with_inheritor(knote_lock_wev(kn
), THREAD_RESTART
);
734 #if DEBUG || DEVELOPMENT
735 knlc
->knlc_state
= KNOTE_LOCK_CTX_UNLOCKED
;
740 * Call the f_event hook of a given filter.
742 * Takes a use count to protect against concurrent drops.
745 knote_post(struct knote
*kn
, long hint
)
747 struct kqueue
*kq
= knote_get_kq(kn
);
748 int dropping
, result
;
752 if (__improbable(kn
->kn_status
& (KN_DROPPING
| KN_VANISHED
))) {
756 if (__improbable(kn
->kn_status
& KN_POSTING
)) {
757 panic("KNOTE() called concurrently on knote %p", kn
);
760 kn
->kn_status
|= KN_POSTING
;
763 result
= filter_call(knote_fops(kn
), f_event(kn
, hint
));
766 dropping
= (kn
->kn_status
& KN_DROPPING
);
768 if (!dropping
&& (result
& FILTER_ACTIVE
)) {
769 knote_activate(kq
, kn
, result
);
772 if ((kn
->kn_status
& KN_LOCKED
) == 0) {
774 * There's no other f_* call in flight, we can leave QoS "Merge" mode.
776 * See knote_adjust_qos()
778 kn
->kn_status
&= ~(KN_POSTING
| KN_MERGE_QOS
);
780 kn
->kn_status
&= ~KN_POSTING
;
783 if (__improbable(dropping
)) {
784 waitq_wakeup64_all((struct waitq
*)&kq
->kq_wqs
, knote_post_wev64(kn
),
785 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
792 * Called by knote_drop() to wait for the last f_event() caller to be done.
794 * - kq locked at entry
795 * - kq unlocked at exit
798 knote_wait_for_post(struct kqueue
*kq
, struct knote
*kn
)
800 wait_result_t wr
= THREAD_NOT_WAITING
;
804 assert(kn
->kn_status
& KN_DROPPING
);
806 if (kn
->kn_status
& KN_POSTING
) {
807 wr
= waitq_assert_wait64((struct waitq
*)&kq
->kq_wqs
,
808 knote_post_wev64(kn
), THREAD_UNINT
| THREAD_WAIT_NOREPORT
,
809 TIMEOUT_WAIT_FOREVER
);
812 if (wr
== THREAD_WAITING
) {
813 thread_block(THREAD_CONTINUE_NULL
);
817 #pragma mark knote helpers for filters
821 knote_set_error(struct knote
*kn
, int error
)
823 kn
->kn_flags
|= EV_ERROR
;
824 kn
->kn_sdata
= error
;
829 knote_low_watermark(const struct knote
*kn
)
831 return (kn
->kn_sfflags
& NOTE_LOWAT
) ? kn
->kn_sdata
: 1;
835 * @function knote_fill_kevent_with_sdata
838 * Fills in a kevent from the current content of a knote.
841 * This is meant to be called from filter's f_event hooks.
842 * The kevent data is filled with kn->kn_sdata.
844 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
846 * Using knote_fill_kevent is typically preferred.
850 knote_fill_kevent_with_sdata(struct knote
*kn
, struct kevent_qos_s
*kev
)
852 #define knote_assert_aliases(name1, offs1, name2) \
853 static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
854 offsetof(struct kevent_internal_s, name2), \
855 "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
857 * All the code makes assumptions on these aliasing,
858 * so make sure we fail the build if we ever ever ever break them.
860 knote_assert_aliases(ident
, 0, kei_ident
);
861 #ifdef __LITTLE_ENDIAN__
862 knote_assert_aliases(filter
, 0, kei_filter
); // non trivial overlap
863 knote_assert_aliases(filter
, 1, kei_filtid
); // non trivial overlap
865 knote_assert_aliases(filter
, 0, kei_filtid
); // non trivial overlap
866 knote_assert_aliases(filter
, 1, kei_filter
); // non trivial overlap
868 knote_assert_aliases(flags
, 0, kei_flags
);
869 knote_assert_aliases(qos
, 0, kei_qos
);
870 knote_assert_aliases(udata
, 0, kei_udata
);
871 knote_assert_aliases(fflags
, 0, kei_fflags
);
872 knote_assert_aliases(xflags
, 0, kei_sfflags
); // non trivial overlap
873 knote_assert_aliases(data
, 0, kei_sdata
); // non trivial overlap
874 knote_assert_aliases(ext
, 0, kei_ext
);
875 #undef knote_assert_aliases
878 * Fix the differences between kevent_qos_s and kevent_internal_s:
879 * - xflags is where kn_sfflags lives, we need to zero it
880 * - fixup the high bits of `filter` where kn_filtid lives
882 *kev
= *(struct kevent_qos_s
*)&kn
->kn_kevent
;
884 kev
->filter
|= 0xff00;
885 if (kn
->kn_flags
& EV_CLEAR
) {
891 * @function knote_fill_kevent
894 * Fills in a kevent from the current content of a knote.
897 * This is meant to be called from filter's f_event hooks.
898 * The kevent data is filled with the passed in data.
900 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
904 knote_fill_kevent(struct knote
*kn
, struct kevent_qos_s
*kev
, int64_t data
)
906 knote_fill_kevent_with_sdata(kn
, kev
);
907 kev
->filter
= kn
->kn_filter
;
912 #pragma mark file_filtops
915 filt_fileattach(struct knote
*kn
, struct kevent_qos_s
*kev
)
917 return fo_kqfilter(kn
->kn_fp
, kn
, kev
);
920 SECURITY_READ_ONLY_EARLY(static struct filterops
) file_filtops
= {
922 .f_attach
= filt_fileattach
,
925 #pragma mark kqread_filtops
927 #define f_flag fp_glob->fg_flag
928 #define f_ops fp_glob->fg_ops
929 #define f_data fp_glob->fg_data
930 #define f_lflags fp_glob->fg_lflags
933 filt_kqdetach(struct knote
*kn
)
935 struct kqfile
*kqf
= (struct kqfile
*)kn
->kn_fp
->f_data
;
936 struct kqueue
*kq
= &kqf
->kqf_kqueue
;
939 KNOTE_DETACH(&kqf
->kqf_sel
.si_note
, kn
);
944 filt_kqueue(struct knote
*kn
, __unused
long hint
)
946 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
948 return kq
->kq_count
> 0;
952 filt_kqtouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
955 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
959 res
= (kq
->kq_count
> 0);
966 filt_kqprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
968 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
973 knote_fill_kevent(kn
, kev
, kq
->kq_count
);
981 SECURITY_READ_ONLY_EARLY(static struct filterops
) kqread_filtops
= {
983 .f_detach
= filt_kqdetach
,
984 .f_event
= filt_kqueue
,
985 .f_touch
= filt_kqtouch
,
986 .f_process
= filt_kqprocess
,
989 #pragma mark proc_filtops
992 filt_procattach(struct knote
*kn
, __unused
struct kevent_qos_s
*kev
)
996 assert(PID_MAX
< NOTE_PDATAMASK
);
998 if ((kn
->kn_sfflags
& (NOTE_TRACK
| NOTE_TRACKERR
| NOTE_CHILD
)) != 0) {
999 knote_set_error(kn
, ENOTSUP
);
1003 p
= proc_find((int)kn
->kn_id
);
1005 knote_set_error(kn
, ESRCH
);
1009 const uint32_t NoteExitStatusBits
= NOTE_EXIT
| NOTE_EXITSTATUS
;
1011 if ((kn
->kn_sfflags
& NoteExitStatusBits
) == NoteExitStatusBits
) {
1013 pid_t selfpid
= proc_selfpid();
1015 if (p
->p_ppid
== selfpid
) {
1016 break; /* parent => ok */
1018 if ((p
->p_lflag
& P_LTRACED
) != 0 &&
1019 (p
->p_oppid
== selfpid
)) {
1020 break; /* parent-in-waiting => ok */
1022 if (cansignal(current_proc(), kauth_cred_get(), p
, SIGKILL
)) {
1023 break; /* allowed to signal => ok */
1026 knote_set_error(kn
, EACCES
);
1032 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
1033 kn
->kn_sdata
= 0; /* incoming data is ignored */
1037 KNOTE_ATTACH(&p
->p_klist
, kn
);
1039 proc_klist_unlock();
1044 * only captures edge-triggered events after this point
1045 * so it can't already be fired.
1052 * The knote may be attached to a different process, which may exit,
1053 * leaving nothing for the knote to be attached to. In that case,
1054 * the pointer to the process will have already been nulled out.
1057 filt_procdetach(struct knote
*kn
)
1064 if (p
!= PROC_NULL
) {
1065 kn
->kn_proc
= PROC_NULL
;
1066 KNOTE_DETACH(&p
->p_klist
, kn
);
1069 proc_klist_unlock();
1073 filt_procevent(struct knote
*kn
, long hint
)
1077 /* ALWAYS CALLED WITH proc_klist_lock */
1080 * Note: a lot of bits in hint may be obtained from the knote
1081 * To free some of those bits, see <rdar://problem/12592988> Freeing up
1082 * bits in hint for filt_procevent
1084 * mask off extra data
1086 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
1089 * termination lifecycle events can happen while a debugger
1090 * has reparented a process, in which case notifications
1091 * should be quashed except to the tracing parent. When
1092 * the debugger reaps the child (either via wait4(2) or
1093 * process exit), the child will be reparented to the original
1094 * parent and these knotes re-fired.
1096 if (event
& NOTE_EXIT
) {
1097 if ((kn
->kn_proc
->p_oppid
!= 0)
1098 && (knote_get_kq(kn
)->kq_p
->p_pid
!= kn
->kn_proc
->p_ppid
)) {
1100 * This knote is not for the current ptrace(2) parent, ignore.
1107 * if the user is interested in this event, record it.
1109 if (kn
->kn_sfflags
& event
) {
1110 kn
->kn_fflags
|= event
;
1113 #pragma clang diagnostic push
1114 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1115 if ((event
== NOTE_REAP
) || ((event
== NOTE_EXIT
) && !(kn
->kn_sfflags
& NOTE_REAP
))) {
1116 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
1118 #pragma clang diagnostic pop
1122 * The kernel has a wrapper in place that returns the same data
1123 * as is collected here, in kn_hook32. Any changes to how
1124 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
1125 * should also be reflected in the proc_pidnoteexit() wrapper.
1127 if (event
== NOTE_EXIT
) {
1129 if ((kn
->kn_sfflags
& NOTE_EXITSTATUS
) != 0) {
1130 kn
->kn_fflags
|= NOTE_EXITSTATUS
;
1131 kn
->kn_hook32
|= (hint
& NOTE_PDATAMASK
);
1133 if ((kn
->kn_sfflags
& NOTE_EXIT_DETAIL
) != 0) {
1134 kn
->kn_fflags
|= NOTE_EXIT_DETAIL
;
1135 if ((kn
->kn_proc
->p_lflag
&
1136 P_LTERM_DECRYPTFAIL
) != 0) {
1137 kn
->kn_hook32
|= NOTE_EXIT_DECRYPTFAIL
;
1139 if ((kn
->kn_proc
->p_lflag
&
1140 P_LTERM_JETSAM
) != 0) {
1141 kn
->kn_hook32
|= NOTE_EXIT_MEMORY
;
1142 switch (kn
->kn_proc
->p_lflag
& P_JETSAM_MASK
) {
1143 case P_JETSAM_VMPAGESHORTAGE
:
1144 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_VMPAGESHORTAGE
;
1146 case P_JETSAM_VMTHRASHING
:
1147 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_VMTHRASHING
;
1149 case P_JETSAM_FCTHRASHING
:
1150 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_FCTHRASHING
;
1152 case P_JETSAM_VNODE
:
1153 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_VNODE
;
1155 case P_JETSAM_HIWAT
:
1156 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_HIWAT
;
1159 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_PID
;
1161 case P_JETSAM_IDLEEXIT
:
1162 kn
->kn_hook32
|= NOTE_EXIT_MEMORY_IDLE
;
1166 if ((kn
->kn_proc
->p_csflags
&
1168 kn
->kn_hook32
|= NOTE_EXIT_CSERROR
;
1173 /* if we have any matching state, activate the knote */
1174 return kn
->kn_fflags
!= 0;
1178 filt_proctouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
1184 /* accept new filter flags and mask off output events no long interesting */
1185 kn
->kn_sfflags
= kev
->fflags
;
1187 /* restrict the current results to the (smaller?) set of new interest */
1189 * For compatibility with previous implementations, we leave kn_fflags
1190 * as they were before.
1192 //kn->kn_fflags &= kn->kn_sfflags;
1194 res
= (kn
->kn_fflags
!= 0);
1196 proc_klist_unlock();
1202 filt_procprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
1207 if (kn
->kn_fflags
) {
1208 knote_fill_kevent(kn
, kev
, kn
->kn_hook32
);
1212 proc_klist_unlock();
1216 SECURITY_READ_ONLY_EARLY(static struct filterops
) proc_filtops
= {
1217 .f_attach
= filt_procattach
,
1218 .f_detach
= filt_procdetach
,
1219 .f_event
= filt_procevent
,
1220 .f_touch
= filt_proctouch
,
1221 .f_process
= filt_procprocess
,
1224 #pragma mark timer_filtops
1226 struct filt_timer_params
{
1227 uint64_t deadline
; /* deadline in abs/cont time
1228 * (or 0 if NOTE_ABSOLUTE and deadline is in past) */
1229 uint64_t leeway
; /* leeway in abstime, or 0 if none */
1230 uint64_t interval
; /* interval in abstime or 0 if non-repeating timer */
1234 * Values stored in the knote at rest (using Mach absolute time units)
1236 * kn->kn_thcall where the thread_call object is stored
1237 * kn->kn_ext[0] next deadline or 0 if immediate expiration
1238 * kn->kn_ext[1] leeway value
1239 * kn->kn_sdata interval timer: the interval
1240 * absolute/deadline timer: 0
1241 * kn->kn_hook32 timer state (with gencount)
1244 * The timer has either never been scheduled or been cancelled.
1245 * It is safe to schedule a new one in this state.
1248 * The timer has been scheduled
1251 * The timer has fired and an event needs to be delivered.
1252 * When in this state, the callout may still be running.
1255 * The timer has fired at registration time, and the callout was never
1258 #define TIMER_IDLE 0x0
1259 #define TIMER_ARMED 0x1
1260 #define TIMER_FIRED 0x2
1261 #define TIMER_IMMEDIATE 0x3
1262 #define TIMER_STATE_MASK 0x3
1263 #define TIMER_GEN_INC 0x4
1266 filt_timer_set_params(struct knote
*kn
, struct filt_timer_params
*params
)
1268 kn
->kn_ext
[0] = params
->deadline
;
1269 kn
->kn_ext
[1] = params
->leeway
;
1270 kn
->kn_sdata
= params
->interval
;
1274 * filt_timervalidate - process data from user
1276 * Sets up the deadline, interval, and leeway from the provided user data
1279 * kn_sdata timer deadline or interval time
1280 * kn_sfflags style of timer, unit of measurement
1283 * struct filter_timer_params to apply to the filter with
1284 * filt_timer_set_params when changes are ready to be commited.
1287 * EINVAL Invalid user data parameters
1288 * ERANGE Various overflows with the parameters
1290 * Called with timer filter lock held.
1293 filt_timervalidate(const struct kevent_qos_s
*kev
,
1294 struct filt_timer_params
*params
)
1297 * There are 5 knobs that need to be chosen for a timer registration:
1299 * A) Units of time (what is the time duration of the specified number)
1300 * Absolute and interval take:
1301 * NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME
1302 * Defaults to milliseconds if not specified
1304 * B) Clock epoch (what is the zero point of the specified number)
1305 * For interval, there is none
1306 * For absolute, defaults to the gettimeofday/calendar epoch
1307 * With NOTE_MACHTIME, uses mach_absolute_time()
1308 * With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time()
1310 * C) The knote's behavior on delivery
1311 * Interval timer causes the knote to arm for the next interval unless one-shot is set
1312 * Absolute is a forced one-shot timer which deletes on delivery
1313 * TODO: Add a way for absolute to be not forced one-shot
1315 * D) Whether the time duration is relative to now or absolute
1316 * Interval fires at now + duration when it is set up
1317 * Absolute fires at now + difference between now walltime and passed in walltime
1318 * With NOTE_MACHTIME it fires at an absolute MAT or MCT.
1320 * E) Whether the timer continues to tick across sleep
1321 * By default all three do not.
1322 * For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep
1323 * With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME:
1324 * expires when mach_continuous_time() is > the passed in value.
1327 uint64_t multiplier
;
1329 boolean_t use_abstime
= FALSE
;
1331 switch (kev
->fflags
& (NOTE_SECONDS
| NOTE_USECONDS
| NOTE_NSECONDS
| NOTE_MACHTIME
)) {
1333 multiplier
= NSEC_PER_SEC
;
1336 multiplier
= NSEC_PER_USEC
;
1345 case 0: /* milliseconds (default) */
1346 multiplier
= NSEC_PER_SEC
/ 1000;
1352 /* transform the leeway in kn_ext[1] to same time scale */
1353 if (kev
->fflags
& NOTE_LEEWAY
) {
1354 uint64_t leeway_abs
;
1357 leeway_abs
= (uint64_t)kev
->ext
[1];
1360 if (os_mul_overflow((uint64_t)kev
->ext
[1], multiplier
, &leeway_ns
)) {
1364 nanoseconds_to_absolutetime(leeway_ns
, &leeway_abs
);
1367 params
->leeway
= leeway_abs
;
1372 if (kev
->fflags
& NOTE_ABSOLUTE
) {
1373 uint64_t deadline_abs
;
1376 deadline_abs
= (uint64_t)kev
->data
;
1378 uint64_t calendar_deadline_ns
;
1380 if (os_mul_overflow((uint64_t)kev
->data
, multiplier
, &calendar_deadline_ns
)) {
1384 /* calendar_deadline_ns is in nanoseconds since the epoch */
1386 clock_sec_t seconds
;
1387 clock_nsec_t nanoseconds
;
1390 * Note that the conversion through wall-time is only done once.
1392 * If the relationship between MAT and gettimeofday changes,
1393 * the underlying timer does not update.
1395 * TODO: build a wall-time denominated timer_call queue
1396 * and a flag to request DTRTing with wall-time timers
1398 clock_get_calendar_nanotime(&seconds
, &nanoseconds
);
1400 uint64_t calendar_now_ns
= (uint64_t)seconds
* NSEC_PER_SEC
+ nanoseconds
;
1402 /* if deadline is in the future */
1403 if (calendar_now_ns
< calendar_deadline_ns
) {
1404 uint64_t interval_ns
= calendar_deadline_ns
- calendar_now_ns
;
1405 uint64_t interval_abs
;
1407 nanoseconds_to_absolutetime(interval_ns
, &interval_abs
);
1410 * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only
1411 * causes the timer to keep ticking across sleep, but
1412 * it does not change the calendar timebase.
1415 if (kev
->fflags
& NOTE_MACH_CONTINUOUS_TIME
) {
1416 clock_continuoustime_interval_to_deadline(interval_abs
,
1419 clock_absolutetime_interval_to_deadline(interval_abs
,
1423 deadline_abs
= 0; /* cause immediate expiration */
1427 params
->deadline
= deadline_abs
;
1428 params
->interval
= 0; /* NOTE_ABSOLUTE is non-repeating */
1429 } else if (kev
->data
< 0) {
1431 * Negative interval timers fire immediately, once.
1433 * Ideally a negative interval would be an error, but certain clients
1434 * pass negative values on accident, and expect an event back.
1436 * In the old implementation the timer would repeat with no delay
1437 * N times until mach_absolute_time() + (N * interval) underflowed,
1438 * then it would wait ~forever by accidentally arming a timer for the far future.
1440 * We now skip the power-wasting hot spin phase and go straight to the idle phase.
1443 params
->deadline
= 0; /* expire immediately */
1444 params
->interval
= 0; /* non-repeating */
1446 uint64_t interval_abs
= 0;
1449 interval_abs
= (uint64_t)kev
->data
;
1451 uint64_t interval_ns
;
1452 if (os_mul_overflow((uint64_t)kev
->data
, multiplier
, &interval_ns
)) {
1456 nanoseconds_to_absolutetime(interval_ns
, &interval_abs
);
1459 uint64_t deadline
= 0;
1461 if (kev
->fflags
& NOTE_MACH_CONTINUOUS_TIME
) {
1462 clock_continuoustime_interval_to_deadline(interval_abs
, &deadline
);
1464 clock_absolutetime_interval_to_deadline(interval_abs
, &deadline
);
1467 params
->deadline
= deadline
;
1468 params
->interval
= interval_abs
;
1475 * filt_timerexpire - the timer callout routine
1478 filt_timerexpire(void *knx
, void *state_on_arm
)
1480 struct knote
*kn
= knx
;
1482 uint32_t state
= (uint32_t)(uintptr_t)state_on_arm
;
1483 uint32_t fired_state
= state
^ TIMER_ARMED
^ TIMER_FIRED
;
1485 if (os_atomic_cmpxchg(&kn
->kn_hook32
, state
, fired_state
, relaxed
)) {
1486 // our f_event always would say FILTER_ACTIVE,
1487 // so be leaner and just do it.
1488 struct kqueue
*kq
= knote_get_kq(kn
);
1490 knote_activate(kq
, kn
, FILTER_ACTIVE
);
1494 * The timer has been reprogrammed or canceled since it was armed,
1495 * and this is a late firing for the timer, just ignore it.
1501 * Does this deadline needs a timer armed for it, or has it expired?
1504 filt_timer_is_ready(struct knote
*kn
)
1506 uint64_t now
, deadline
= kn
->kn_ext
[0];
1508 if (deadline
== 0) {
1512 if (kn
->kn_sfflags
& NOTE_MACH_CONTINUOUS_TIME
) {
1513 now
= mach_continuous_time();
1515 now
= mach_absolute_time();
1517 return deadline
<= now
;
1523 * It is the responsibility of the caller to make sure the timer call
1524 * has completed or been cancelled properly prior to arming it.
1527 filt_timerarm(struct knote
*kn
)
1529 uint64_t deadline
= kn
->kn_ext
[0];
1530 uint64_t leeway
= kn
->kn_ext
[1];
1533 int filter_flags
= kn
->kn_sfflags
;
1534 unsigned int timer_flags
= 0;
1536 if (filter_flags
& NOTE_CRITICAL
) {
1537 timer_flags
|= THREAD_CALL_DELAY_USER_CRITICAL
;
1538 } else if (filter_flags
& NOTE_BACKGROUND
) {
1539 timer_flags
|= THREAD_CALL_DELAY_USER_BACKGROUND
;
1541 timer_flags
|= THREAD_CALL_DELAY_USER_NORMAL
;
1544 if (filter_flags
& NOTE_LEEWAY
) {
1545 timer_flags
|= THREAD_CALL_DELAY_LEEWAY
;
1548 if (filter_flags
& NOTE_MACH_CONTINUOUS_TIME
) {
1549 timer_flags
|= THREAD_CALL_CONTINUOUS
;
1555 * We increase the gencount, and setup the thread call with this expected
1556 * state. It means that if there was a previous generation of the timer in
1557 * flight that needs to be ignored, then 3 things are possible:
1559 * - the timer fires first, filt_timerexpire() and sets the state to FIRED
1560 * but we clobber it with ARMED and a new gencount. The knote will still
1561 * be activated, but filt_timerprocess() which is serialized with this
1562 * call will not see the FIRED bit set and will not deliver an event.
1564 * - this code runs first, but filt_timerexpire() comes second. Because it
1565 * knows an old gencount, it will debounce and not activate the knote.
1567 * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below
1568 * will just cancel it properly.
1570 * This is important as userspace expects to never be woken up for past
1571 * timers after filt_timertouch ran.
1573 state
= os_atomic_load(&kn
->kn_hook32
, relaxed
);
1574 state
&= ~TIMER_STATE_MASK
;
1575 state
+= TIMER_GEN_INC
+ TIMER_ARMED
;
1576 os_atomic_store(&kn
->kn_hook32
, state
, relaxed
);
1578 thread_call_enter_delayed_with_leeway(kn
->kn_thcall
,
1579 (void *)(uintptr_t)state
, deadline
, leeway
, timer_flags
);
1583 * Mark a timer as "already fired" when it is being reprogrammed
1585 * If there is a timer in flight, this will do a best effort at canceling it,
1586 * but will not wait. If the thread call was in flight, having set the
1587 * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this
1591 filt_timerfire_immediate(struct knote
*kn
)
1595 static_assert(TIMER_IMMEDIATE
== TIMER_STATE_MASK
,
1596 "validate that this atomic or will transition to IMMEDIATE");
1597 state
= os_atomic_or_orig(&kn
->kn_hook32
, TIMER_IMMEDIATE
, relaxed
);
1599 if ((state
& TIMER_STATE_MASK
) == TIMER_ARMED
) {
1600 thread_call_cancel(kn
->kn_thcall
);
1605 * Allocate a thread call for the knote's lifetime, and kick off the timer.
1608 filt_timerattach(struct knote
*kn
, struct kevent_qos_s
*kev
)
1610 thread_call_t callout
;
1611 struct filt_timer_params params
;
1614 if ((error
= filt_timervalidate(kev
, ¶ms
)) != 0) {
1615 knote_set_error(kn
, error
);
1619 callout
= thread_call_allocate_with_options(filt_timerexpire
,
1620 (thread_call_param_t
)kn
, THREAD_CALL_PRIORITY_HIGH
,
1621 THREAD_CALL_OPTIONS_ONCE
);
1623 if (NULL
== callout
) {
1624 knote_set_error(kn
, ENOMEM
);
1628 filt_timer_set_params(kn
, ¶ms
);
1629 kn
->kn_thcall
= callout
;
1630 kn
->kn_flags
|= EV_CLEAR
;
1631 os_atomic_store(&kn
->kn_hook32
, TIMER_IDLE
, relaxed
);
1633 /* NOTE_ABSOLUTE implies EV_ONESHOT */
1634 if (kn
->kn_sfflags
& NOTE_ABSOLUTE
) {
1635 kn
->kn_flags
|= EV_ONESHOT
;
1638 if (filt_timer_is_ready(kn
)) {
1639 os_atomic_store(&kn
->kn_hook32
, TIMER_IMMEDIATE
, relaxed
);
1640 return FILTER_ACTIVE
;
1648 * Shut down the timer if it's running, and free the callout.
1651 filt_timerdetach(struct knote
*kn
)
1653 __assert_only boolean_t freed
;
1656 * Unconditionally cancel to make sure there can't be any filt_timerexpire()
1659 thread_call_cancel_wait(kn
->kn_thcall
);
1660 freed
= thread_call_free(kn
->kn_thcall
);
1665 * filt_timertouch - update timer knote with new user input
1667 * Cancel and restart the timer based on new user data. When
1668 * the user picks up a knote, clear the count of how many timer
1669 * pops have gone off (in kn_data).
1672 filt_timertouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
1674 struct filt_timer_params params
;
1675 uint32_t changed_flags
= (kn
->kn_sfflags
^ kev
->fflags
);
1678 if (changed_flags
& NOTE_ABSOLUTE
) {
1679 kev
->flags
|= EV_ERROR
;
1684 if ((error
= filt_timervalidate(kev
, ¶ms
)) != 0) {
1685 kev
->flags
|= EV_ERROR
;
1690 /* capture the new values used to compute deadline */
1691 filt_timer_set_params(kn
, ¶ms
);
1692 kn
->kn_sfflags
= kev
->fflags
;
1694 if (filt_timer_is_ready(kn
)) {
1695 filt_timerfire_immediate(kn
);
1696 return FILTER_ACTIVE
| FILTER_UPDATE_REQ_QOS
;
1699 return FILTER_UPDATE_REQ_QOS
;
1704 * filt_timerprocess - query state of knote and snapshot event data
1706 * Determine if the timer has fired in the past, snapshot the state
1707 * of the kevent for returning to user-space, and clear pending event
1708 * counters for the next time.
1711 filt_timerprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
1713 uint32_t state
= os_atomic_load(&kn
->kn_hook32
, relaxed
);
1716 * filt_timerprocess is serialized with any filter routine except for
1717 * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
1718 * transition, and on success, activates the knote.
1720 * Hence, we don't need atomic modifications of the state, only to peek at
1721 * whether we see any of the "FIRED" state, and if we do, it is safe to
1722 * do simple state machine transitions.
1724 switch (state
& TIMER_STATE_MASK
) {
1728 * This can happen if a touch resets a timer that had fired
1729 * without being processed
1734 os_atomic_store(&kn
->kn_hook32
, state
& ~TIMER_STATE_MASK
, relaxed
);
1737 * Copy out the interesting kevent state,
1738 * but don't leak out the raw time calculations.
1740 * TODO: potential enhancements - tell the user about:
1741 * - deadline to which this timer thought it was expiring
1742 * - return kn_sfflags in the fflags field so the client can know
1743 * under what flags the timer fired
1745 knote_fill_kevent(kn
, kev
, 1);
1747 /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
1749 if (kn
->kn_sdata
!= 0) {
1751 * This is a 'repeating' timer, so we have to emit
1752 * how many intervals expired between the arm
1755 * A very strange style of interface, because
1756 * this could easily be done in the client...
1761 if (kn
->kn_sfflags
& NOTE_MACH_CONTINUOUS_TIME
) {
1762 now
= mach_continuous_time();
1764 now
= mach_absolute_time();
1767 uint64_t first_deadline
= kn
->kn_ext
[0];
1768 uint64_t interval_abs
= kn
->kn_sdata
;
1769 uint64_t orig_arm_time
= first_deadline
- interval_abs
;
1771 assert(now
> orig_arm_time
);
1772 assert(now
> first_deadline
);
1774 uint64_t elapsed
= now
- orig_arm_time
;
1776 uint64_t num_fired
= elapsed
/ interval_abs
;
1779 * To reach this code, we must have seen the timer pop
1780 * and be in repeating mode, so therefore it must have been
1781 * more than 'interval' time since the attach or last
1784 assert(num_fired
> 0);
1786 /* report how many intervals have elapsed to the user */
1787 kev
->data
= (int64_t)num_fired
;
1789 /* We only need to re-arm the timer if it's not about to be destroyed */
1790 if ((kn
->kn_flags
& EV_ONESHOT
) == 0) {
1791 /* fire at the end of the next interval */
1792 uint64_t new_deadline
= first_deadline
+ num_fired
* interval_abs
;
1794 assert(new_deadline
> now
);
1796 kn
->kn_ext
[0] = new_deadline
;
1799 * This can't shortcut setting up the thread call, because
1800 * knote_process deactivates EV_CLEAR knotes unconditionnally.
1806 return FILTER_ACTIVE
;
1809 SECURITY_READ_ONLY_EARLY(static struct filterops
) timer_filtops
= {
1810 .f_extended_codes
= true,
1811 .f_attach
= filt_timerattach
,
1812 .f_detach
= filt_timerdetach
,
1813 .f_event
= filt_bad_event
,
1814 .f_touch
= filt_timertouch
,
1815 .f_process
= filt_timerprocess
,
1818 #pragma mark user_filtops
1821 filt_userattach(struct knote
*kn
, __unused
struct kevent_qos_s
*kev
)
1823 if (kn
->kn_sfflags
& NOTE_TRIGGER
) {
1824 kn
->kn_hook32
= FILTER_ACTIVE
;
1828 return kn
->kn_hook32
;
1832 filt_usertouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
1837 ffctrl
= kev
->fflags
& NOTE_FFCTRLMASK
;
1838 fflags
= kev
->fflags
& NOTE_FFLAGSMASK
;
1843 kn
->kn_sfflags
&= fflags
;
1846 kn
->kn_sfflags
|= fflags
;
1849 kn
->kn_sfflags
= fflags
;
1852 kn
->kn_sdata
= kev
->data
;
1854 if (kev
->fflags
& NOTE_TRIGGER
) {
1855 kn
->kn_hook32
= FILTER_ACTIVE
;
1857 return (int)kn
->kn_hook32
;
1861 filt_userprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
1863 int result
= (int)kn
->kn_hook32
;
1866 /* EVFILT_USER returns the data that was passed in */
1867 knote_fill_kevent_with_sdata(kn
, kev
);
1868 kev
->fflags
= kn
->kn_sfflags
;
1869 if (kn
->kn_flags
& EV_CLEAR
) {
1870 /* knote_fill_kevent cleared kn_fflags */
1878 SECURITY_READ_ONLY_EARLY(static struct filterops
) user_filtops
= {
1879 .f_extended_codes
= true,
1880 .f_attach
= filt_userattach
,
1881 .f_detach
= filt_no_detach
,
1882 .f_event
= filt_bad_event
,
1883 .f_touch
= filt_usertouch
,
1884 .f_process
= filt_userprocess
,
1887 #pragma mark workloop_filtops
1889 #define EPREEMPTDISABLED (-1)
1892 filt_wllock(struct kqworkloop
*kqwl
)
1894 lck_spin_lock(&kqwl
->kqwl_statelock
);
1898 filt_wlunlock(struct kqworkloop
*kqwl
)
1900 lck_spin_unlock(&kqwl
->kqwl_statelock
);
1904 * Returns true when the interlock for the turnstile is the workqueue lock
1906 * When this is the case, all turnstiles operations are delegated
1907 * to the workqueue subsystem.
1909 * This is required because kqueue_threadreq_bind_prepost only holds the
1910 * workqueue lock but needs to move the inheritor from the workloop turnstile
1911 * away from the creator thread, so that this now fulfilled request cannot be
1912 * picked anymore by other threads.
1915 filt_wlturnstile_interlock_is_workq(struct kqworkloop
*kqwl
)
1917 return kqr_thread_requested_pending(&kqwl
->kqwl_request
);
1921 filt_wlupdate_inheritor(struct kqworkloop
*kqwl
, struct turnstile
*ts
,
1922 turnstile_update_flags_t flags
)
1924 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
1925 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
1928 * binding to the workq should always happen through
1929 * workq_kern_threadreq_update_inheritor()
1931 assert(!filt_wlturnstile_interlock_is_workq(kqwl
));
1933 if ((inheritor
= kqwl
->kqwl_owner
)) {
1934 flags
|= TURNSTILE_INHERITOR_THREAD
;
1935 } else if ((inheritor
= kqr_thread(kqr
))) {
1936 flags
|= TURNSTILE_INHERITOR_THREAD
;
1939 turnstile_update_inheritor(ts
, inheritor
, flags
);
1942 #define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
1943 #define FILT_WLATTACH 0
1944 #define FILT_WLTOUCH 1
1945 #define FILT_WLDROP 2
1949 filt_wlupdate(struct kqworkloop
*kqwl
, struct knote
*kn
,
1950 struct kevent_qos_s
*kev
, kq_index_t qos_index
, int op
)
1952 user_addr_t uaddr
= CAST_USER_ADDR_T(kev
->ext
[EV_EXTIDX_WL_ADDR
]);
1953 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
1954 thread_t cur_owner
, new_owner
, extra_thread_ref
= THREAD_NULL
;
1955 kq_index_t cur_override
= THREAD_QOS_UNSPECIFIED
;
1956 int efault_retry
= EVFILT_WORKLOOP_EFAULT_RETRY_COUNT
;
1957 int action
= KQWL_UTQ_NONE
, error
= 0;
1958 bool wl_inheritor_updated
= false, needs_wake
= false;
1959 uint64_t kdata
= kev
->ext
[EV_EXTIDX_WL_VALUE
];
1960 uint64_t mask
= kev
->ext
[EV_EXTIDX_WL_MASK
];
1962 struct turnstile
*ts
= TURNSTILE_NULL
;
1967 new_owner
= cur_owner
= kqwl
->kqwl_owner
;
1972 * If asked, load the uint64 value at the user provided address and compare
1973 * it against the passed in mask and expected value.
1975 * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
1976 * a thread reference.
1978 * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
1979 * the current thread, then end ownership.
1981 * Lastly decide whether we need to perform a QoS update.
1985 * Until <rdar://problem/24999882> exists,
1986 * disabling preemption copyin forces any
1987 * vm_fault we encounter to fail.
1989 error
= copyin_atomic64(uaddr
, &udata
);
1992 * If we get EFAULT, drop locks, and retry.
1993 * If we still get an error report it,
1994 * else assume the memory has been faulted
1995 * and attempt to copyin under lock again.
2001 if (efault_retry
-- > 0) {
2002 filt_wlunlock(kqwl
);
2003 error
= copyin_atomic64(uaddr
, &udata
);
2014 /* Update state as copied in. */
2015 kev
->ext
[EV_EXTIDX_WL_VALUE
] = udata
;
2017 if ((udata
& mask
) != (kdata
& mask
)) {
2019 } else if (kev
->fflags
& NOTE_WL_DISCOVER_OWNER
) {
2021 * Decipher the owner port name, and translate accordingly.
2022 * The low 2 bits were borrowed for other flags, so mask them off.
2024 * Then attempt translation to a thread reference or fail.
2026 mach_port_name_t name
= (mach_port_name_t
)udata
& ~0x3;
2027 if (name
!= MACH_PORT_NULL
) {
2028 name
= ipc_entry_name_mask(name
);
2029 extra_thread_ref
= port_name_to_thread(name
,
2030 PORT_TO_THREAD_IN_CURRENT_TASK
);
2031 if (extra_thread_ref
== THREAD_NULL
) {
2035 new_owner
= extra_thread_ref
;
2040 if ((kev
->fflags
& NOTE_WL_END_OWNERSHIP
) && new_owner
== current_thread()) {
2041 new_owner
= THREAD_NULL
;
2045 if ((kev
->fflags
& NOTE_WL_THREAD_REQUEST
) && (kev
->flags
& EV_DELETE
)) {
2046 action
= KQWL_UTQ_SET_QOS_INDEX
;
2047 } else if (qos_index
&& kqr
->tr_kq_qos_index
!= qos_index
) {
2048 action
= KQWL_UTQ_SET_QOS_INDEX
;
2051 if (op
== FILT_WLTOUCH
) {
2053 * Save off any additional fflags/data we just accepted
2054 * But only keep the last round of "update" bits we acted on which helps
2057 kn
->kn_sfflags
&= ~NOTE_WL_UPDATES_MASK
;
2058 kn
->kn_sfflags
|= kev
->fflags
;
2059 if (kev
->fflags
& NOTE_WL_SYNC_WAKE
) {
2060 needs_wake
= (kn
->kn_thread
!= THREAD_NULL
);
2062 } else if (op
== FILT_WLDROP
) {
2063 if ((kn
->kn_sfflags
& (NOTE_WL_SYNC_WAIT
| NOTE_WL_SYNC_WAKE
)) ==
2064 NOTE_WL_SYNC_WAIT
) {
2066 * When deleting a SYNC_WAIT knote that hasn't been woken up
2067 * explicitly, issue a wake up.
2069 kn
->kn_sfflags
|= NOTE_WL_SYNC_WAKE
;
2070 needs_wake
= (kn
->kn_thread
!= THREAD_NULL
);
2078 * Commit ownership and QoS changes if any, possibly wake up waiters
2081 if (cur_owner
== new_owner
&& action
== KQWL_UTQ_NONE
&& !needs_wake
) {
2087 /* If already tracked as servicer, don't track as owner */
2088 if (new_owner
== kqr_thread(kqr
)) {
2089 new_owner
= THREAD_NULL
;
2092 if (cur_owner
!= new_owner
) {
2093 kqwl
->kqwl_owner
= new_owner
;
2094 if (new_owner
== extra_thread_ref
) {
2095 /* we just transfered this ref to kqwl_owner */
2096 extra_thread_ref
= THREAD_NULL
;
2098 cur_override
= kqworkloop_override(kqwl
);
2101 /* override it before we drop the old */
2102 if (cur_override
!= THREAD_QOS_UNSPECIFIED
) {
2103 thread_add_kevent_override(new_owner
, cur_override
);
2105 if (kqr_thread_requested_pending(kqr
)) {
2106 if (action
== KQWL_UTQ_NONE
) {
2107 action
= KQWL_UTQ_REDRIVE_EVENTS
;
2111 if (!kqr_thread_requested(kqr
) && kqr
->tr_kq_wakeup
) {
2112 if (action
== KQWL_UTQ_NONE
) {
2113 action
= KQWL_UTQ_REDRIVE_EVENTS
;
2119 if (action
!= KQWL_UTQ_NONE
) {
2120 kqworkloop_update_threads_qos(kqwl
, action
, qos_index
);
2123 ts
= kqwl
->kqwl_turnstile
;
2124 if (cur_owner
!= new_owner
&& ts
) {
2125 if (action
== KQWL_UTQ_REDRIVE_EVENTS
) {
2127 * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
2128 * the code went through workq_kern_threadreq_initiate()
2129 * and the workqueue has set the inheritor already
2131 assert(filt_wlturnstile_interlock_is_workq(kqwl
));
2132 } else if (filt_wlturnstile_interlock_is_workq(kqwl
)) {
2133 workq_kern_threadreq_lock(kqwl
->kqwl_p
);
2134 workq_kern_threadreq_update_inheritor(kqwl
->kqwl_p
, kqr
, new_owner
,
2135 ts
, TURNSTILE_IMMEDIATE_UPDATE
);
2136 workq_kern_threadreq_unlock(kqwl
->kqwl_p
);
2137 if (!filt_wlturnstile_interlock_is_workq(kqwl
)) {
2139 * If the workq is no longer the interlock, then
2140 * workq_kern_threadreq_update_inheritor() has finished a bind
2141 * and we need to fallback to the regular path.
2143 filt_wlupdate_inheritor(kqwl
, ts
, TURNSTILE_IMMEDIATE_UPDATE
);
2145 wl_inheritor_updated
= true;
2147 filt_wlupdate_inheritor(kqwl
, ts
, TURNSTILE_IMMEDIATE_UPDATE
);
2148 wl_inheritor_updated
= true;
2152 * We need a turnstile reference because we are dropping the interlock
2153 * and the caller has not called turnstile_prepare.
2155 if (wl_inheritor_updated
) {
2156 turnstile_reference(ts
);
2160 if (needs_wake
&& ts
) {
2161 waitq_wakeup64_thread(&ts
->ts_waitq
, knote_filt_wev64(kn
),
2162 kn
->kn_thread
, THREAD_AWAKENED
);
2163 if (op
== FILT_WLATTACH
|| op
== FILT_WLTOUCH
) {
2164 disable_preemption();
2165 error
= EPREEMPTDISABLED
;
2175 * Unlock and cleanup various lingering references and things.
2177 filt_wlunlock(kqwl
);
2179 #if CONFIG_WORKLOOP_DEBUG
2180 KQWL_HISTORY_WRITE_ENTRY(kqwl
, {
2181 .updater
= current_thread(),
2182 .servicer
= kqr_thread(kqr
), /* Note: racy */
2183 .old_owner
= cur_owner
,
2184 .new_owner
= new_owner
,
2186 .kev_ident
= kev
->ident
,
2187 .error
= (int16_t)error
,
2188 .kev_flags
= kev
->flags
,
2189 .kev_fflags
= kev
->fflags
,
2195 #endif // CONFIG_WORKLOOP_DEBUG
2197 if (wl_inheritor_updated
) {
2198 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_NOT_HELD
);
2199 turnstile_deallocate_safe(ts
);
2202 if (cur_owner
&& new_owner
!= cur_owner
) {
2203 if (cur_override
!= THREAD_QOS_UNSPECIFIED
) {
2204 thread_drop_kevent_override(cur_owner
);
2206 thread_deallocate_safe(cur_owner
);
2208 if (extra_thread_ref
) {
2209 thread_deallocate_safe(extra_thread_ref
);
2215 * Remembers the last updated that came in from userspace for debugging reasons.
2216 * - fflags is mirrored from the userspace kevent
2217 * - ext[i, i != VALUE] is mirrored from the userspace kevent
2218 * - ext[VALUE] is set to what the kernel loaded atomically
2219 * - data is set to the error if any
2222 filt_wlremember_last_update(struct knote
*kn
, struct kevent_qos_s
*kev
,
2225 kn
->kn_fflags
= kev
->fflags
;
2226 kn
->kn_sdata
= error
;
2227 memcpy(kn
->kn_ext
, kev
->ext
, sizeof(kev
->ext
));
2231 filt_wlupdate_sync_ipc(struct kqworkloop
*kqwl
, struct knote
*kn
,
2232 struct kevent_qos_s
*kev
, int op
)
2234 user_addr_t uaddr
= (user_addr_t
) kev
->ext
[EV_EXTIDX_WL_ADDR
];
2235 uint64_t kdata
= kev
->ext
[EV_EXTIDX_WL_VALUE
];
2236 uint64_t mask
= kev
->ext
[EV_EXTIDX_WL_MASK
];
2238 int efault_retry
= EVFILT_WORKLOOP_EFAULT_RETRY_COUNT
;
2241 if (op
== FILT_WLATTACH
) {
2242 (void)kqueue_alloc_turnstile(&kqwl
->kqwl_kqueue
);
2243 } else if (uaddr
== 0) {
2252 * Do the debounce thing, the lock serializing the state is the knote lock.
2256 * Until <rdar://problem/24999882> exists,
2257 * disabling preemption copyin forces any
2258 * vm_fault we encounter to fail.
2260 error
= copyin_atomic64(uaddr
, &udata
);
2263 * If we get EFAULT, drop locks, and retry.
2264 * If we still get an error report it,
2265 * else assume the memory has been faulted
2266 * and attempt to copyin under lock again.
2272 if (efault_retry
-- > 0) {
2273 filt_wlunlock(kqwl
);
2274 error
= copyin_atomic64(uaddr
, &udata
);
2285 kev
->ext
[EV_EXTIDX_WL_VALUE
] = udata
;
2286 kn
->kn_ext
[EV_EXTIDX_WL_VALUE
] = udata
;
2288 if ((udata
& mask
) != (kdata
& mask
)) {
2294 if (op
== FILT_WLATTACH
) {
2295 error
= filt_wlattach_sync_ipc(kn
);
2297 disable_preemption();
2298 error
= EPREEMPTDISABLED
;
2303 filt_wlunlock(kqwl
);
2308 filt_wlattach(struct knote
*kn
, struct kevent_qos_s
*kev
)
2310 struct kqueue
*kq
= knote_get_kq(kn
);
2311 struct kqworkloop
*kqwl
= (struct kqworkloop
*)kq
;
2312 int error
= 0, result
= 0;
2313 kq_index_t qos_index
= 0;
2315 if (__improbable((kq
->kq_state
& KQ_WORKLOOP
) == 0)) {
2320 uint32_t command
= (kn
->kn_sfflags
& NOTE_WL_COMMANDS_MASK
);
2322 case NOTE_WL_THREAD_REQUEST
:
2323 if (kn
->kn_id
!= kqwl
->kqwl_dynamicid
) {
2327 qos_index
= _pthread_priority_thread_qos(kn
->kn_qos
);
2328 if (qos_index
== THREAD_QOS_UNSPECIFIED
) {
2332 if (kqwl
->kqwl_request
.tr_kq_qos_index
) {
2334 * There already is a thread request, and well, you're only allowed
2335 * one per workloop, so fail the attach.
2341 case NOTE_WL_SYNC_WAIT
:
2342 case NOTE_WL_SYNC_WAKE
:
2343 if (kn
->kn_id
== kqwl
->kqwl_dynamicid
) {
2347 if ((kn
->kn_flags
& EV_DISABLE
) == 0) {
2351 if (kn
->kn_sfflags
& NOTE_WL_END_OWNERSHIP
) {
2357 case NOTE_WL_SYNC_IPC
:
2358 if ((kn
->kn_flags
& EV_DISABLE
) == 0) {
2362 if (kn
->kn_sfflags
& (NOTE_WL_UPDATE_QOS
| NOTE_WL_DISCOVER_OWNER
)) {
2372 if (command
== NOTE_WL_SYNC_IPC
) {
2373 error
= filt_wlupdate_sync_ipc(kqwl
, kn
, kev
, FILT_WLATTACH
);
2375 error
= filt_wlupdate(kqwl
, kn
, kev
, qos_index
, FILT_WLATTACH
);
2378 if (error
== EPREEMPTDISABLED
) {
2380 result
= FILTER_THREADREQ_NODEFEER
;
2384 /* If userland wants ESTALE to be hidden, fail the attach anyway */
2385 if (error
== ESTALE
&& (kn
->kn_sfflags
& NOTE_WL_IGNORE_ESTALE
)) {
2388 knote_set_error(kn
, error
);
2391 if (command
== NOTE_WL_SYNC_WAIT
) {
2392 return kevent_register_wait_prepare(kn
, kev
, result
);
2394 /* Just attaching the thread request successfully will fire it */
2395 if (command
== NOTE_WL_THREAD_REQUEST
) {
2397 * Thread Request knotes need an explicit touch to be active again,
2398 * so delivering an event needs to also consume it.
2400 kn
->kn_flags
|= EV_CLEAR
;
2401 return result
| FILTER_ACTIVE
;
2407 filt_wlwait_continue(void *parameter
, wait_result_t wr
)
2409 struct _kevent_register
*cont_args
= parameter
;
2410 struct kqworkloop
*kqwl
= cont_args
->kqwl
;
2413 if (filt_wlturnstile_interlock_is_workq(kqwl
)) {
2414 workq_kern_threadreq_lock(kqwl
->kqwl_p
);
2415 turnstile_complete((uintptr_t)kqwl
, &kqwl
->kqwl_turnstile
, NULL
, TURNSTILE_WORKLOOPS
);
2416 workq_kern_threadreq_unlock(kqwl
->kqwl_p
);
2418 turnstile_complete((uintptr_t)kqwl
, &kqwl
->kqwl_turnstile
, NULL
, TURNSTILE_WORKLOOPS
);
2422 turnstile_cleanup();
2424 if (wr
== THREAD_INTERRUPTED
) {
2425 cont_args
->kev
.flags
|= EV_ERROR
;
2426 cont_args
->kev
.data
= EINTR
;
2427 } else if (wr
!= THREAD_AWAKENED
) {
2428 panic("Unexpected wait result: %d", wr
);
2431 kevent_register_wait_return(cont_args
);
2435 * Called with the workloop mutex held, most of the time never returns as it
2436 * calls filt_wlwait_continue through a continuation.
2439 filt_wlpost_register_wait(struct uthread
*uth
, struct knote
*kn
,
2440 struct _kevent_register
*cont_args
)
2442 struct kqworkloop
*kqwl
= cont_args
->kqwl
;
2443 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
2444 struct turnstile
*ts
;
2445 bool workq_locked
= false;
2449 if (filt_wlturnstile_interlock_is_workq(kqwl
)) {
2450 workq_kern_threadreq_lock(kqwl
->kqwl_p
);
2451 workq_locked
= true;
2454 ts
= turnstile_prepare((uintptr_t)kqwl
, &kqwl
->kqwl_turnstile
,
2455 TURNSTILE_NULL
, TURNSTILE_WORKLOOPS
);
2458 workq_kern_threadreq_update_inheritor(kqwl
->kqwl_p
,
2459 &kqwl
->kqwl_request
, kqwl
->kqwl_owner
, ts
,
2460 TURNSTILE_DELAYED_UPDATE
);
2461 if (!filt_wlturnstile_interlock_is_workq(kqwl
)) {
2463 * if the interlock is no longer the workqueue lock,
2464 * then we don't need to hold it anymore.
2466 workq_kern_threadreq_unlock(kqwl
->kqwl_p
);
2467 workq_locked
= false;
2470 if (!workq_locked
) {
2472 * If the interlock is the workloop's, then it's our responsibility to
2473 * call update_inheritor, so just do it.
2475 filt_wlupdate_inheritor(kqwl
, ts
, TURNSTILE_DELAYED_UPDATE
);
2478 thread_set_pending_block_hint(uth
->uu_thread
, kThreadWaitWorkloopSyncWait
);
2479 waitq_assert_wait64(&ts
->ts_waitq
, knote_filt_wev64(kn
),
2480 THREAD_ABORTSAFE
, TIMEOUT_WAIT_FOREVER
);
2483 workq_kern_threadreq_unlock(kqwl
->kqwl_p
);
2486 thread_t thread
= kqwl
->kqwl_owner
?: kqr_thread(kqr
);
2488 thread_reference(thread
);
2491 kevent_register_wait_block(ts
, thread
, filt_wlwait_continue
, cont_args
);
2494 /* called in stackshot context to report the thread responsible for blocking this thread */
2496 kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread
,
2497 event64_t event
, thread_waitinfo_t
*waitinfo
)
2499 extern zone_t thread_zone
;
2500 struct knote
*kn
= (struct knote
*)event
;
2502 zone_require(knote_zone
, kn
);
2504 assert(kn
->kn_thread
== thread
);
2506 struct kqueue
*kq
= knote_get_kq(kn
);
2508 zone_require(kqworkloop_zone
, kq
);
2509 assert(kq
->kq_state
& KQ_WORKLOOP
);
2511 struct kqworkloop
*kqwl
= (struct kqworkloop
*)kq
;
2512 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
2514 thread_t kqwl_owner
= kqwl
->kqwl_owner
;
2516 if (kqwl_owner
!= THREAD_NULL
) {
2517 zone_require(thread_zone
, kqwl_owner
);
2518 waitinfo
->owner
= thread_tid(kqwl
->kqwl_owner
);
2519 } else if (kqr_thread_requested_pending(kqr
)) {
2520 waitinfo
->owner
= STACKSHOT_WAITOWNER_THREQUESTED
;
2521 } else if (kqr
->tr_state
>= WORKQ_TR_STATE_BINDING
) {
2522 zone_require(thread_zone
, kqr
->tr_thread
);
2523 waitinfo
->owner
= thread_tid(kqr
->tr_thread
);
2525 waitinfo
->owner
= 0;
2528 waitinfo
->context
= kqwl
->kqwl_dynamicid
;
2532 filt_wldetach(struct knote
*kn
)
2534 if (kn
->kn_sfflags
& NOTE_WL_SYNC_IPC
) {
2535 filt_wldetach_sync_ipc(kn
);
2536 } else if (kn
->kn_thread
) {
2537 kevent_register_wait_cleanup(kn
);
2542 filt_wlvalidate_kev_flags(struct knote
*kn
, struct kevent_qos_s
*kev
,
2543 thread_qos_t
*qos_index
)
2545 uint32_t new_commands
= kev
->fflags
& NOTE_WL_COMMANDS_MASK
;
2546 uint32_t sav_commands
= kn
->kn_sfflags
& NOTE_WL_COMMANDS_MASK
;
2548 if ((kev
->fflags
& NOTE_WL_DISCOVER_OWNER
) && (kev
->flags
& EV_DELETE
)) {
2551 if (kev
->fflags
& NOTE_WL_UPDATE_QOS
) {
2552 if (kev
->flags
& EV_DELETE
) {
2555 if (sav_commands
!= NOTE_WL_THREAD_REQUEST
) {
2558 if (!(*qos_index
= _pthread_priority_thread_qos(kev
->qos
))) {
2563 switch (new_commands
) {
2564 case NOTE_WL_THREAD_REQUEST
:
2565 /* thread requests can only update themselves */
2566 if (sav_commands
!= NOTE_WL_THREAD_REQUEST
) {
2571 case NOTE_WL_SYNC_WAIT
:
2572 if (kev
->fflags
& NOTE_WL_END_OWNERSHIP
) {
2577 case NOTE_WL_SYNC_WAKE
:
2579 if (!(sav_commands
& (NOTE_WL_SYNC_WAIT
| NOTE_WL_SYNC_WAKE
))) {
2582 if ((kev
->flags
& (EV_ENABLE
| EV_DELETE
)) == EV_ENABLE
) {
2587 case NOTE_WL_SYNC_IPC
:
2588 if (sav_commands
!= NOTE_WL_SYNC_IPC
) {
2591 if ((kev
->flags
& (EV_ENABLE
| EV_DELETE
)) == EV_ENABLE
) {
2603 filt_wltouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
2605 struct kqworkloop
*kqwl
= (struct kqworkloop
*)knote_get_kq(kn
);
2606 thread_qos_t qos_index
= THREAD_QOS_UNSPECIFIED
;
2609 int error
= filt_wlvalidate_kev_flags(kn
, kev
, &qos_index
);
2614 uint32_t command
= kev
->fflags
& NOTE_WL_COMMANDS_MASK
;
2615 if (command
== NOTE_WL_SYNC_IPC
) {
2616 error
= filt_wlupdate_sync_ipc(kqwl
, kn
, kev
, FILT_WLTOUCH
);
2618 error
= filt_wlupdate(kqwl
, kn
, kev
, qos_index
, FILT_WLTOUCH
);
2619 filt_wlremember_last_update(kn
, kev
, error
);
2621 if (error
== EPREEMPTDISABLED
) {
2623 result
= FILTER_THREADREQ_NODEFEER
;
2628 if (error
== ESTALE
&& (kev
->fflags
& NOTE_WL_IGNORE_ESTALE
)) {
2629 /* If userland wants ESTALE to be hidden, do not activate */
2632 kev
->flags
|= EV_ERROR
;
2636 if (command
== NOTE_WL_SYNC_WAIT
&& !(kn
->kn_sfflags
& NOTE_WL_SYNC_WAKE
)) {
2637 return kevent_register_wait_prepare(kn
, kev
, result
);
2639 /* Just touching the thread request successfully will fire it */
2640 if (command
== NOTE_WL_THREAD_REQUEST
) {
2641 if (kev
->fflags
& NOTE_WL_UPDATE_QOS
) {
2642 result
|= FILTER_UPDATE_REQ_QOS
;
2644 result
|= FILTER_ACTIVE
;
2650 filt_wlallow_drop(struct knote
*kn
, struct kevent_qos_s
*kev
)
2652 struct kqworkloop
*kqwl
= (struct kqworkloop
*)knote_get_kq(kn
);
2654 int error
= filt_wlvalidate_kev_flags(kn
, kev
, NULL
);
2659 uint32_t command
= (kev
->fflags
& NOTE_WL_COMMANDS_MASK
);
2660 if (command
== NOTE_WL_SYNC_IPC
) {
2661 error
= filt_wlupdate_sync_ipc(kqwl
, kn
, kev
, FILT_WLDROP
);
2663 error
= filt_wlupdate(kqwl
, kn
, kev
, 0, FILT_WLDROP
);
2664 filt_wlremember_last_update(kn
, kev
, error
);
2666 assert(error
!= EPREEMPTDISABLED
);
2670 if (error
== ESTALE
&& (kev
->fflags
& NOTE_WL_IGNORE_ESTALE
)) {
2673 kev
->flags
|= EV_ERROR
;
2681 filt_wlprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
2683 struct kqworkloop
*kqwl
= (struct kqworkloop
*)knote_get_kq(kn
);
2686 assert(kn
->kn_sfflags
& NOTE_WL_THREAD_REQUEST
);
2690 if (kqwl
->kqwl_owner
) {
2692 * <rdar://problem/33584321> userspace sometimes due to events being
2693 * delivered but not triggering a drain session can cause a process
2694 * of the thread request knote.
2696 * When that happens, the automatic deactivation due to process
2697 * would swallow the event, so we have to activate the knote again.
2699 knote_activate(kqwl
, kn
, FILTER_ACTIVE
);
2701 #if DEBUG || DEVELOPMENT
2702 if (kevent_debug_flags
& KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS
) {
2704 * see src/queue_internal.h in libdispatch
2706 #define DISPATCH_QUEUE_ENQUEUED 0x1ull
2707 user_addr_t addr
= CAST_USER_ADDR_T(kn
->kn_ext
[EV_EXTIDX_WL_ADDR
]);
2708 task_t t
= current_task();
2710 if (addr
&& task_is_active(t
) && !task_is_halting(t
) &&
2711 copyin_atomic64(addr
, &val
) == 0 &&
2712 val
&& (val
& DISPATCH_QUEUE_ENQUEUED
) == 0 &&
2713 (val
>> 48) != 0xdead && (val
>> 48) != 0 && (val
>> 48) != 0xffff) {
2714 panic("kevent: workloop %#016llx is not enqueued "
2715 "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
2716 kn
->kn_udata
, kn
, val
, kn
->kn_ext
[EV_EXTIDX_WL_VALUE
]);
2720 knote_fill_kevent(kn
, kev
, 0);
2721 kev
->fflags
= kn
->kn_sfflags
;
2722 rc
|= FILTER_ACTIVE
;
2727 if (rc
& FILTER_ACTIVE
) {
2728 workq_thread_set_max_qos(kqwl
->kqwl_p
, &kqwl
->kqwl_request
);
2733 SECURITY_READ_ONLY_EARLY(static struct filterops
) workloop_filtops
= {
2734 .f_extended_codes
= true,
2735 .f_attach
= filt_wlattach
,
2736 .f_detach
= filt_wldetach
,
2737 .f_event
= filt_bad_event
,
2738 .f_touch
= filt_wltouch
,
2739 .f_process
= filt_wlprocess
,
2740 .f_allow_drop
= filt_wlallow_drop
,
2741 .f_post_register_wait
= filt_wlpost_register_wait
,
2744 #pragma mark - kqueues allocation and deallocation
2747 * @enum kqworkloop_dealloc_flags_t
2750 * Flags that alter kqworkloop_dealloc() behavior.
2752 * @const KQWL_DEALLOC_NONE
2753 * Convenient name for "no flags".
2755 * @const KQWL_DEALLOC_SKIP_HASH_REMOVE
2756 * Do not remove the workloop fromt he hash table.
2757 * This is used for process tear-down codepaths as the workloops have been
2758 * removed by the caller already.
2760 OS_OPTIONS(kqworkloop_dealloc_flags
, unsigned,
2761 KQWL_DEALLOC_NONE
= 0x0000,
2762 KQWL_DEALLOC_SKIP_HASH_REMOVE
= 0x0001,
2766 kqworkloop_dealloc(struct kqworkloop
*, kqworkloop_dealloc_flags_t
, uint32_t);
2768 OS_NOINLINE OS_COLD OS_NORETURN
2770 kqworkloop_retain_panic(struct kqworkloop
*kqwl
, uint32_t previous
)
2772 if (previous
== 0) {
2773 panic("kq(%p) resurrection", kqwl
);
2775 panic("kq(%p) retain overflow", kqwl
);
2779 OS_NOINLINE OS_COLD OS_NORETURN
2781 kqworkloop_release_panic(struct kqworkloop
*kqwl
)
2783 panic("kq(%p) over-release", kqwl
);
2788 kqworkloop_try_retain(struct kqworkloop
*kqwl
)
2790 uint32_t old_ref
, new_ref
;
2791 os_atomic_rmw_loop(&kqwl
->kqwl_retains
, old_ref
, new_ref
, relaxed
, {
2792 if (__improbable(old_ref
== 0)) {
2793 os_atomic_rmw_loop_give_up(return false);
2795 if (__improbable(old_ref
>= KQ_WORKLOOP_RETAINS_MAX
)) {
2796 kqworkloop_retain_panic(kqwl
, old_ref
);
2798 new_ref
= old_ref
+ 1;
2805 kqworkloop_retain(struct kqworkloop
*kqwl
)
2807 uint32_t previous
= os_atomic_inc_orig(&kqwl
->kqwl_retains
, relaxed
);
2808 if (__improbable(previous
== 0 || previous
>= KQ_WORKLOOP_RETAINS_MAX
)) {
2809 kqworkloop_retain_panic(kqwl
, previous
);
2815 kqueue_retain(kqueue_t kqu
)
2817 if (kqu
.kq
->kq_state
& KQ_DYNAMIC
) {
2818 kqworkloop_retain(kqu
.kqwl
);
2824 kqworkloop_release_live(struct kqworkloop
*kqwl
)
2826 uint32_t refs
= os_atomic_dec_orig(&kqwl
->kqwl_retains
, relaxed
);
2827 if (__improbable(refs
<= 1)) {
2828 kqworkloop_release_panic(kqwl
);
2834 kqueue_release_live(kqueue_t kqu
)
2836 if (kqu
.kq
->kq_state
& KQ_DYNAMIC
) {
2837 kqworkloop_release_live(kqu
.kqwl
);
2843 kqworkloop_release(struct kqworkloop
*kqwl
)
2845 uint32_t refs
= os_atomic_dec_orig(&kqwl
->kqwl_retains
, relaxed
);
2847 if (__improbable(refs
<= 1)) {
2848 kqworkloop_dealloc(kqwl
, KQWL_DEALLOC_NONE
, refs
- 1);
2854 kqueue_release(kqueue_t kqu
)
2856 if (kqu
.kq
->kq_state
& KQ_DYNAMIC
) {
2857 kqworkloop_release(kqu
.kqwl
);
2862 * @function kqueue_destroy
2865 * Common part to all kqueue dealloc functions.
2869 kqueue_destroy(kqueue_t kqu
, zone_t zone
)
2872 * waitq_set_deinit() remove the KQ's waitq set from
2873 * any select sets to which it may belong.
2875 * The order of these deinits matter: before waitq_set_deinit() returns,
2876 * waitq_set__CALLING_PREPOST_HOOK__ may be called and it will take the
2879 waitq_set_deinit(&kqu
.kq
->kq_wqs
);
2880 lck_spin_destroy(&kqu
.kq
->kq_lock
, &kq_lck_grp
);
2882 zfree(zone
, kqu
.kq
);
2886 * @function kqueue_init
2889 * Common part to all kqueue alloc functions.
2892 kqueue_init(kqueue_t kqu
, waitq_set_prepost_hook_t
*hook
, int policy
)
2894 waitq_set_init(&kqu
.kq
->kq_wqs
, policy
, NULL
, hook
);
2895 lck_spin_init(&kqu
.kq
->kq_lock
, &kq_lck_grp
, LCK_ATTR_NULL
);
2899 #pragma mark kqfile allocation and deallocation
2902 * @function kqueue_dealloc
2905 * Detach all knotes from a kqfile and free it.
2908 * We walk each list looking for knotes referencing this
2909 * this kqueue. If we find one, we try to drop it. But
2910 * if we fail to get a drop reference, that will wait
2911 * until it is dropped. So, we can just restart again
2912 * safe in the assumption that the list will eventually
2913 * not contain any more references to this kqueue (either
2914 * we dropped them all, or someone else did).
2916 * Assumes no new events are being added to the kqueue.
2917 * Nothing locked on entry or exit.
2920 kqueue_dealloc(struct kqueue
*kq
)
2922 KNOTE_LOCK_CTX(knlc
);
2923 struct proc
*p
= kq
->kq_p
;
2924 struct filedesc
*fdp
= p
->p_fd
;
2927 assert(kq
&& (kq
->kq_state
& (KQ_WORKLOOP
| KQ_WORKQ
)) == 0);
2930 for (int i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
2931 kn
= SLIST_FIRST(&fdp
->fd_knlist
[i
]);
2932 while (kn
!= NULL
) {
2933 if (kq
== knote_get_kq(kn
)) {
2936 if (knote_lock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ON_SUCCESS
)) {
2937 knote_drop(kq
, kn
, &knlc
);
2940 /* start over at beginning of list */
2941 kn
= SLIST_FIRST(&fdp
->fd_knlist
[i
]);
2944 kn
= SLIST_NEXT(kn
, kn_link
);
2951 if (fdp
->fd_knhashmask
!= 0) {
2952 for (int i
= 0; i
< (int)fdp
->fd_knhashmask
+ 1; i
++) {
2953 kn
= SLIST_FIRST(&fdp
->fd_knhash
[i
]);
2954 while (kn
!= NULL
) {
2955 if (kq
== knote_get_kq(kn
)) {
2958 if (knote_lock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ON_SUCCESS
)) {
2959 knote_drop(kq
, kn
, &knlc
);
2962 /* start over at beginning of list */
2963 kn
= SLIST_FIRST(&fdp
->fd_knhash
[i
]);
2966 kn
= SLIST_NEXT(kn
, kn_link
);
2972 kqueue_destroy(kq
, kqfile_zone
);
2976 * @function kqueue_alloc
2979 * Allocate a kqfile.
2982 kqueue_alloc(struct proc
*p
)
2987 * kqfiles are created with kqueue() so we need to wait for
2988 * the first kevent syscall to know which bit among
2989 * KQ_KEV_{32,64,QOS} will be set in kqf_state
2991 kqf
= zalloc_flags(kqfile_zone
, Z_WAITOK
| Z_ZERO
);
2993 TAILQ_INIT_AFTER_BZERO(&kqf
->kqf_queue
);
2994 TAILQ_INIT_AFTER_BZERO(&kqf
->kqf_suppressed
);
2996 return kqueue_init(kqf
, NULL
, SYNC_POLICY_FIFO
| SYNC_POLICY_PREPOST
).kq
;
3000 * @function kqueue_internal
3003 * Core implementation for kqueue and guarded_kqueue_np()
3006 kqueue_internal(struct proc
*p
, fp_allocfn_t fp_zalloc
, void *cra
, int32_t *retval
)
3009 struct fileproc
*fp
;
3012 error
= falloc_withalloc(p
, &fp
, &fd
, vfs_context_current(), fp_zalloc
, cra
);
3017 kq
= kqueue_alloc(p
);
3023 fp
->f_flag
= FREAD
| FWRITE
;
3024 fp
->f_ops
= &kqueueops
;
3026 fp
->f_lflags
|= FG_CONFINED
;
3029 *fdflags(p
, fd
) |= UF_EXCLOSE
| UF_FORKCLOSE
;
3030 procfdtbl_releasefd(p
, fd
, NULL
);
3031 fp_drop(p
, fd
, fp
, 1);
3042 * The kqueue syscall.
3045 kqueue(struct proc
*p
, __unused
struct kqueue_args
*uap
, int32_t *retval
)
3047 return kqueue_internal(p
, fileproc_alloc_init
, NULL
, retval
);
3050 #pragma mark kqworkq allocation and deallocation
3053 * @function kqworkq_dealloc
3056 * Deallocates a workqueue kqueue.
3059 * This only happens at process death, or for races with concurrent
3060 * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
3061 * this kqueue, either there are none, or someone else took care of them.
3064 kqworkq_dealloc(struct kqworkq
*kqwq
)
3066 kqueue_destroy(kqwq
, kqworkq_zone
);
3070 * @function kqworkq_alloc
3073 * Allocates a workqueue kqueue.
3076 * This is the slow path of kevent_get_kqwq.
3077 * This takes care of making sure procs have a single workq kqueue.
3080 static struct kqworkq
*
3081 kqworkq_alloc(struct proc
*p
, unsigned int flags
)
3083 struct kqworkq
*kqwq
, *tmp
;
3085 kqwq
= zalloc_flags(kqworkq_zone
, Z_WAITOK
| Z_ZERO
);
3087 assert((flags
& KEVENT_FLAG_LEGACY32
) == 0);
3088 if (flags
& KEVENT_FLAG_LEGACY64
) {
3089 kqwq
->kqwq_state
= KQ_WORKQ
| KQ_KEV64
;
3091 kqwq
->kqwq_state
= KQ_WORKQ
| KQ_KEV_QOS
;
3095 for (int i
= 0; i
< KQWQ_NBUCKETS
; i
++) {
3096 TAILQ_INIT_AFTER_BZERO(&kqwq
->kqwq_queue
[i
]);
3097 TAILQ_INIT_AFTER_BZERO(&kqwq
->kqwq_suppressed
[i
]);
3099 for (int i
= 0; i
< KQWQ_NBUCKETS
; i
++) {
3101 * Because of how the bucketized system works, we mix overcommit
3102 * sources with not overcommit: each time we move a knote from
3103 * one bucket to the next due to overrides, we'd had to track
3104 * overcommitness, and it's really not worth it in the workloop
3105 * enabled world that track this faithfully.
3107 * Incidentally, this behaves like the original manager-based
3108 * kqwq where event delivery always happened (hence is
3111 kqwq
->kqwq_request
[i
].tr_state
= WORKQ_TR_STATE_IDLE
;
3112 kqwq
->kqwq_request
[i
].tr_flags
= WORKQ_TR_FLAG_KEVENT
;
3113 if (i
!= KQWQ_QOS_MANAGER
) {
3114 kqwq
->kqwq_request
[i
].tr_flags
|= WORKQ_TR_FLAG_OVERCOMMIT
;
3116 kqwq
->kqwq_request
[i
].tr_kq_qos_index
= (kq_index_t
)i
;
3119 kqueue_init(kqwq
, &kqwq
->kqwq_waitq_hook
, SYNC_POLICY_FIFO
);
3121 if (!os_atomic_cmpxchgv(&p
->p_fd
->fd_wqkqueue
, NULL
, kqwq
, &tmp
, release
)) {
3122 kqworkq_dealloc(kqwq
);
3129 #pragma mark kqworkloop allocation and deallocation
3131 #define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
3132 #define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
3136 kqhash_lock(struct filedesc
*fdp
)
3138 lck_mtx_lock_spin_always(&fdp
->fd_kqhashlock
);
3143 kqhash_unlock(struct filedesc
*fdp
)
3145 lck_mtx_unlock(&fdp
->fd_kqhashlock
);
3150 kqworkloop_hash_insert_locked(struct filedesc
*fdp
, kqueue_id_t id
,
3151 struct kqworkloop
*kqwl
)
3153 struct kqwllist
*list
= &fdp
->fd_kqhash
[KQ_HASH(id
, fdp
->fd_kqhashmask
)];
3154 LIST_INSERT_HEAD(list
, kqwl
, kqwl_hashlink
);
3158 static inline struct kqworkloop
*
3159 kqworkloop_hash_lookup_locked(struct filedesc
*fdp
, kqueue_id_t id
)
3161 struct kqwllist
*list
= &fdp
->fd_kqhash
[KQ_HASH(id
, fdp
->fd_kqhashmask
)];
3162 struct kqworkloop
*kqwl
;
3164 LIST_FOREACH(kqwl
, list
, kqwl_hashlink
) {
3165 if (kqwl
->kqwl_dynamicid
== id
) {
3172 static struct kqworkloop
*
3173 kqworkloop_hash_lookup_and_retain(struct filedesc
*fdp
, kqueue_id_t kq_id
)
3175 struct kqworkloop
*kqwl
= NULL
;
3178 if (__probable(fdp
->fd_kqhash
)) {
3179 kqwl
= kqworkloop_hash_lookup_locked(fdp
, kq_id
);
3180 if (kqwl
&& !kqworkloop_try_retain(kqwl
)) {
3190 kqworkloop_hash_init(struct filedesc
*fdp
)
3192 struct kqwllist
*alloc_hash
;
3196 alloc_hash
= hashinit(CONFIG_KQ_HASHSIZE
, M_KQUEUE
, &alloc_mask
);
3199 /* See if we won the race */
3200 if (__probable(fdp
->fd_kqhashmask
== 0)) {
3201 fdp
->fd_kqhash
= alloc_hash
;
3202 fdp
->fd_kqhashmask
= alloc_mask
;
3205 hashdestroy(alloc_hash
, M_KQUEUE
, alloc_mask
);
3211 * @function kqworkloop_dealloc
3214 * Deallocates a workloop kqueue.
3217 * Knotes hold references on the workloop, so we can't really reach this
3218 * function unless all of these are already gone.
3220 * Nothing locked on entry or exit.
3223 * Unless KQWL_DEALLOC_SKIP_HASH_REMOVE is set, the workloop is removed
3224 * from its hash table.
3226 * @param current_ref
3227 * This function is also called to undo a kqworkloop_alloc in case of
3228 * allocation races, expected_ref is the current refcount that is expected
3229 * on the workloop object, usually 0, and 1 when a dealloc race is resolved.
3232 kqworkloop_dealloc(struct kqworkloop
*kqwl
, kqworkloop_dealloc_flags_t flags
,
3233 uint32_t current_ref
)
3237 if (__improbable(current_ref
> 1)) {
3238 kqworkloop_release_panic(kqwl
);
3240 assert(kqwl
->kqwl_retains
== current_ref
);
3242 /* pair with kqunlock() and other kq locks */
3243 os_atomic_thread_fence(acquire
);
3245 cur_owner
= kqwl
->kqwl_owner
;
3247 if (kqworkloop_override(kqwl
) != THREAD_QOS_UNSPECIFIED
) {
3248 thread_drop_kevent_override(cur_owner
);
3250 thread_deallocate(cur_owner
);
3251 kqwl
->kqwl_owner
= THREAD_NULL
;
3254 if (kqwl
->kqwl_state
& KQ_HAS_TURNSTILE
) {
3255 struct turnstile
*ts
;
3256 turnstile_complete((uintptr_t)kqwl
, &kqwl
->kqwl_turnstile
,
3257 &ts
, TURNSTILE_WORKLOOPS
);
3258 turnstile_cleanup();
3259 turnstile_deallocate(ts
);
3262 if ((flags
& KQWL_DEALLOC_SKIP_HASH_REMOVE
) == 0) {
3263 struct filedesc
*fdp
= kqwl
->kqwl_p
->p_fd
;
3266 LIST_REMOVE(kqwl
, kqwl_hashlink
);
3270 assert(TAILQ_EMPTY(&kqwl
->kqwl_suppressed
));
3271 assert(kqwl
->kqwl_owner
== THREAD_NULL
);
3272 assert(kqwl
->kqwl_turnstile
== TURNSTILE_NULL
);
3274 lck_spin_destroy(&kqwl
->kqwl_statelock
, &kq_lck_grp
);
3275 kqueue_destroy(kqwl
, kqworkloop_zone
);
3279 * @function kqworkloop_alloc
3282 * Allocates a workloop kqueue.
3285 kqworkloop_init(struct kqworkloop
*kqwl
, proc_t p
,
3286 kqueue_id_t id
, workq_threadreq_param_t
*trp
)
3288 kqwl
->kqwl_state
= KQ_WORKLOOP
| KQ_DYNAMIC
| KQ_KEV_QOS
;
3289 kqwl
->kqwl_retains
= 1; /* donate a retain to creator */
3290 kqwl
->kqwl_dynamicid
= id
;
3293 kqwl
->kqwl_params
= trp
->trp_value
;
3296 workq_tr_flags_t tr_flags
= WORKQ_TR_FLAG_WORKLOOP
;
3298 if (trp
->trp_flags
& TRP_PRIORITY
) {
3299 tr_flags
|= WORKQ_TR_FLAG_WL_OUTSIDE_QOS
;
3301 if (trp
->trp_flags
) {
3302 tr_flags
|= WORKQ_TR_FLAG_WL_PARAMS
;
3305 kqwl
->kqwl_request
.tr_state
= WORKQ_TR_STATE_IDLE
;
3306 kqwl
->kqwl_request
.tr_flags
= tr_flags
;
3308 for (int i
= 0; i
< KQWL_NBUCKETS
; i
++) {
3309 TAILQ_INIT_AFTER_BZERO(&kqwl
->kqwl_queue
[i
]);
3311 TAILQ_INIT_AFTER_BZERO(&kqwl
->kqwl_suppressed
);
3313 lck_spin_init(&kqwl
->kqwl_statelock
, &kq_lck_grp
, LCK_ATTR_NULL
);
3315 kqueue_init(kqwl
, &kqwl
->kqwl_waitq_hook
, SYNC_POLICY_FIFO
);
3319 * @function kqworkloop_get_or_create
3322 * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
3326 * EINVAL: invalid parameters
3327 * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
3328 * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
3329 * ENOMEM: allocation failed
3332 kqworkloop_get_or_create(struct proc
*p
, kqueue_id_t id
,
3333 workq_threadreq_param_t
*trp
, unsigned int flags
, struct kqworkloop
**kqwlp
)
3335 struct filedesc
*fdp
= p
->p_fd
;
3336 struct kqworkloop
*alloc_kqwl
= NULL
;
3337 struct kqworkloop
*kqwl
= NULL
;
3340 assert(!trp
|| (flags
& KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST
));
3342 if (id
== 0 || id
== (kqueue_id_t
)-1) {
3348 if (__improbable(fdp
->fd_kqhash
== NULL
)) {
3349 kqworkloop_hash_init(fdp
);
3352 kqwl
= kqworkloop_hash_lookup_locked(fdp
, id
);
3354 if (__improbable(flags
& KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST
)) {
3356 * If MUST_NOT_EXIST was passed, even if we would have failed
3357 * the try_retain, it could have gone the other way, and
3358 * userspace can't tell. Let'em fix their race.
3364 if (__probable(kqworkloop_try_retain(kqwl
))) {
3366 * This is a valid live workloop !
3374 if (__improbable(flags
& KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST
)) {
3380 * We didn't find what we were looking for.
3382 * If this is the second time we reach this point (alloc_kqwl != NULL),
3385 * If this is the first time we reach this point (alloc_kqwl == NULL),
3386 * then try to allocate one without blocking.
3388 if (__probable(alloc_kqwl
== NULL
)) {
3389 alloc_kqwl
= zalloc_flags(kqworkloop_zone
, Z_NOWAIT
| Z_ZERO
);
3391 if (__probable(alloc_kqwl
)) {
3392 kqworkloop_init(alloc_kqwl
, p
, id
, trp
);
3393 kqworkloop_hash_insert_locked(fdp
, id
, alloc_kqwl
);
3395 *kqwlp
= alloc_kqwl
;
3400 * We have to block to allocate a workloop, drop the lock,
3401 * allocate one, but then we need to retry lookups as someone
3402 * else could race with us.
3406 alloc_kqwl
= zalloc_flags(kqworkloop_zone
, Z_WAITOK
| Z_ZERO
);
3411 if (__improbable(alloc_kqwl
)) {
3412 zfree(kqworkloop_zone
, alloc_kqwl
);
3418 #pragma mark - knotes
3421 filt_no_attach(struct knote
*kn
, __unused
struct kevent_qos_s
*kev
)
3423 knote_set_error(kn
, ENOTSUP
);
3428 filt_no_detach(__unused
struct knote
*kn
)
3433 filt_bad_event(struct knote
*kn
, long hint
)
3435 panic("%s[%d](%p, %ld)", __func__
, kn
->kn_filter
, kn
, hint
);
3439 filt_bad_touch(struct knote
*kn
, struct kevent_qos_s
*kev
)
3441 panic("%s[%d](%p, %p)", __func__
, kn
->kn_filter
, kn
, kev
);
3445 filt_bad_process(struct knote
*kn
, struct kevent_qos_s
*kev
)
3447 panic("%s[%d](%p, %p)", __func__
, kn
->kn_filter
, kn
, kev
);
3451 * knotes_dealloc - detach all knotes for the process and drop them
3453 * Called with proc_fdlock held.
3454 * Returns with it locked.
3455 * May drop it temporarily.
3456 * Process is in such a state that it will not try to allocate
3457 * any more knotes during this process (stopped for exit or exec).
3460 knotes_dealloc(proc_t p
)
3462 struct filedesc
*fdp
= p
->p_fd
;
3465 struct klist
*kn_hash
= NULL
;
3469 /* Close all the fd-indexed knotes up front */
3470 if (fdp
->fd_knlistsize
> 0) {
3471 for (i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
3472 while ((kn
= SLIST_FIRST(&fdp
->fd_knlist
[i
])) != NULL
) {
3473 kq
= knote_get_kq(kn
);
3476 knote_drop(kq
, kn
, NULL
);
3480 /* free the table */
3481 kheap_free(KM_KQUEUE
, fdp
->fd_knlist
,
3482 fdp
->fd_knlistsize
* sizeof(struct klist
*));
3484 fdp
->fd_knlistsize
= 0;
3489 /* Clean out all the hashed knotes as well */
3490 if (fdp
->fd_knhashmask
!= 0) {
3491 for (i
= 0; i
<= (int)fdp
->fd_knhashmask
; i
++) {
3492 while ((kn
= SLIST_FIRST(&fdp
->fd_knhash
[i
])) != NULL
) {
3493 kq
= knote_get_kq(kn
);
3496 knote_drop(kq
, kn
, NULL
);
3500 kn_hash
= fdp
->fd_knhash
;
3501 kn_hashmask
= fdp
->fd_knhashmask
;
3502 fdp
->fd_knhashmask
= 0;
3503 fdp
->fd_knhash
= NULL
;
3509 hashdestroy(kn_hash
, M_KQUEUE
, kn_hashmask
);
3516 * kqworkloops_dealloc - rebalance retains on kqworkloops created with
3517 * scheduling parameters
3519 * Called with proc_fdlock held.
3520 * Returns with it locked.
3521 * Process is in such a state that it will not try to allocate
3522 * any more knotes during this process (stopped for exit or exec).
3525 kqworkloops_dealloc(proc_t p
)
3527 struct filedesc
*fdp
= p
->p_fd
;
3528 struct kqworkloop
*kqwl
, *kqwln
;
3529 struct kqwllist tofree
;
3531 if (!(fdp
->fd_flags
& FD_WORKLOOP
)) {
3537 if (fdp
->fd_kqhashmask
== 0) {
3544 for (size_t i
= 0; i
<= fdp
->fd_kqhashmask
; i
++) {
3545 LIST_FOREACH_SAFE(kqwl
, &fdp
->fd_kqhash
[i
], kqwl_hashlink
, kqwln
) {
3547 * kqworkloops that have scheduling parameters have an
3548 * implicit retain from kqueue_workloop_ctl that needs
3549 * to be balanced on process exit.
3551 assert(kqwl
->kqwl_params
);
3552 LIST_REMOVE(kqwl
, kqwl_hashlink
);
3553 LIST_INSERT_HEAD(&tofree
, kqwl
, kqwl_hashlink
);
3559 LIST_FOREACH_SAFE(kqwl
, &tofree
, kqwl_hashlink
, kqwln
) {
3560 kqworkloop_dealloc(kqwl
, KQWL_DEALLOC_SKIP_HASH_REMOVE
, 1);
3565 kevent_register_validate_priority(struct kqueue
*kq
, struct knote
*kn
,
3566 struct kevent_qos_s
*kev
)
3568 /* We don't care about the priority of a disabled or deleted knote */
3569 if (kev
->flags
& (EV_DISABLE
| EV_DELETE
)) {
3573 if (kq
->kq_state
& KQ_WORKLOOP
) {
3575 * Workloops need valid priorities with a QOS (excluding manager) for
3576 * any enabled knote.
3578 * When it is pre-existing, just make sure it has a valid QoS as
3579 * kevent_register() will not use the incoming priority (filters who do
3580 * have the responsibility to validate it again, see filt_wltouch).
3582 * If the knote is being made, validate the incoming priority.
3584 if (!_pthread_priority_thread_qos(kn
? kn
->kn_qos
: kev
->qos
)) {
3593 * Prepare a filter for waiting after register.
3595 * The f_post_register_wait hook will be called later by kevent_register()
3596 * and should call kevent_register_wait_block()
3599 kevent_register_wait_prepare(struct knote
*kn
, struct kevent_qos_s
*kev
, int rc
)
3601 thread_t thread
= current_thread();
3603 assert(knote_fops(kn
)->f_extended_codes
);
3605 if (kn
->kn_thread
== NULL
) {
3606 thread_reference(thread
);
3607 kn
->kn_thread
= thread
;
3608 } else if (kn
->kn_thread
!= thread
) {
3610 * kn_thread may be set from a previous aborted wait
3611 * However, it has to be from the same thread.
3613 kev
->flags
|= EV_ERROR
;
3618 return FILTER_REGISTER_WAIT
| rc
;
3622 * Cleanup a kevent_register_wait_prepare() effect for threads that have been
3623 * aborted instead of properly woken up with thread_wakeup_thread().
3626 kevent_register_wait_cleanup(struct knote
*kn
)
3628 thread_t thread
= kn
->kn_thread
;
3629 kn
->kn_thread
= NULL
;
3630 thread_deallocate(thread
);
3634 * Must be called at the end of a f_post_register_wait call from a filter.
3637 kevent_register_wait_block(struct turnstile
*ts
, thread_t thread
,
3638 thread_continue_t cont
, struct _kevent_register
*cont_args
)
3640 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_HELD
);
3641 kqunlock(cont_args
->kqwl
);
3642 cont_args
->handoff_thread
= thread
;
3643 thread_handoff_parameter(thread
, cont
, cont_args
, THREAD_HANDOFF_NONE
);
3647 * Called by Filters using a f_post_register_wait to return from their wait.
3650 kevent_register_wait_return(struct _kevent_register
*cont_args
)
3652 struct kqworkloop
*kqwl
= cont_args
->kqwl
;
3653 struct kevent_qos_s
*kev
= &cont_args
->kev
;
3656 if (cont_args
->handoff_thread
) {
3657 thread_deallocate(cont_args
->handoff_thread
);
3660 if (kev
->flags
& (EV_ERROR
| EV_RECEIPT
)) {
3661 if ((kev
->flags
& EV_ERROR
) == 0) {
3662 kev
->flags
|= EV_ERROR
;
3665 error
= kevent_modern_copyout(kev
, &cont_args
->ueventlist
);
3667 cont_args
->eventout
++;
3671 kqworkloop_release(kqwl
);
3673 *(int32_t *)¤t_uthread()->uu_rval
= cont_args
->eventout
;
3675 unix_syscall_return(error
);
3679 * kevent_register - add a new event to a kqueue
3681 * Creates a mapping between the event source and
3682 * the kqueue via a knote data structure.
3684 * Because many/most the event sources are file
3685 * descriptor related, the knote is linked off
3686 * the filedescriptor table for quick access.
3688 * called with nothing locked
3689 * caller holds a reference on the kqueue
3693 kevent_register(struct kqueue
*kq
, struct kevent_qos_s
*kev
,
3694 struct knote
**kn_out
)
3696 struct proc
*p
= kq
->kq_p
;
3697 const struct filterops
*fops
;
3698 struct knote
*kn
= NULL
;
3699 int result
= 0, error
= 0;
3700 unsigned short kev_flags
= kev
->flags
;
3701 KNOTE_LOCK_CTX(knlc
);
3703 if (__probable(kev
->filter
< 0 && kev
->filter
+ EVFILT_SYSCOUNT
>= 0)) {
3704 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
3710 /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
3711 if (__improbable((kev
->flags
& EV_VANISHED
) &&
3712 (kev
->flags
& (EV_ADD
| EV_DISPATCH2
)) != (EV_ADD
| EV_DISPATCH2
))) {
3717 /* Simplify the flags - delete and disable overrule */
3718 if (kev
->flags
& EV_DELETE
) {
3719 kev
->flags
&= ~EV_ADD
;
3721 if (kev
->flags
& EV_DISABLE
) {
3722 kev
->flags
&= ~EV_ENABLE
;
3725 if (kq
->kq_state
& KQ_WORKLOOP
) {
3726 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER
),
3727 ((struct kqworkloop
*)kq
)->kqwl_dynamicid
,
3728 kev
->udata
, kev
->flags
, kev
->filter
);
3729 } else if (kq
->kq_state
& KQ_WORKQ
) {
3730 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER
),
3731 0, kev
->udata
, kev
->flags
, kev
->filter
);
3733 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER
),
3734 VM_KERNEL_UNSLIDE_OR_PERM(kq
),
3735 kev
->udata
, kev
->flags
, kev
->filter
);
3739 /* find the matching knote from the fd tables/hashes */
3740 kn
= kq_find_knote_and_kq_lock(kq
, kev
, fops
->f_isfd
, p
);
3741 error
= kevent_register_validate_priority(kq
, kn
, kev
);
3747 if (kn
== NULL
&& (kev
->flags
& EV_ADD
) == 0) {
3749 * No knote found, EV_ADD wasn't specified
3752 if ((kev_flags
& EV_ADD
) && (kev_flags
& EV_DELETE
) &&
3753 (kq
->kq_state
& KQ_WORKLOOP
)) {
3755 * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
3756 * that doesn't care about ENOENT, so just pretend the deletion
3763 } else if (kn
== NULL
) {
3765 * No knote found, need to attach a new one (attach)
3768 struct fileproc
*knote_fp
= NULL
;
3770 /* grab a file reference for the new knote */
3772 if ((error
= fp_lookup(p
, (int)kev
->ident
, &knote_fp
, 0)) != 0) {
3780 if (knote_fp
!= NULL
) {
3781 fp_drop(p
, (int)kev
->ident
, knote_fp
, 0);
3786 kn
->kn_fp
= knote_fp
;
3787 kn
->kn_is_fd
= fops
->f_isfd
;
3788 kn
->kn_kq_packed
= VM_PACK_POINTER((vm_offset_t
)kq
, KNOTE_KQ_PACKED
);
3791 /* was vanish support requested */
3792 if (kev
->flags
& EV_VANISHED
) {
3793 kev
->flags
&= ~EV_VANISHED
;
3794 kn
->kn_status
|= KN_REQVANISH
;
3797 /* snapshot matching/dispatching protocol flags into knote */
3798 if (kev
->flags
& EV_DISABLE
) {
3799 kn
->kn_status
|= KN_DISABLED
;
3803 * copy the kevent state into knote
3804 * protocol is that fflags and data
3805 * are saved off, and cleared before
3806 * calling the attach routine.
3808 * - kn->kn_sfflags aliases with kev->xflags
3809 * - kn->kn_sdata aliases with kev->data
3810 * - kn->kn_filter is the top 8 bits of kev->filter
3812 kn
->kn_kevent
= *(struct kevent_internal_s
*)kev
;
3813 kn
->kn_sfflags
= kev
->fflags
;
3814 kn
->kn_filtid
= (uint8_t)~kev
->filter
;
3816 knote_reset_priority(kq
, kn
, kev
->qos
);
3818 /* Add the knote for lookup thru the fd table */
3819 error
= kq_add_knote(kq
, kn
, &knlc
, p
);
3822 if (knote_fp
!= NULL
) {
3823 fp_drop(p
, (int)kev
->ident
, knote_fp
, 0);
3826 if (error
== ERESTART
) {
3832 /* fp reference count now applies to knote */
3835 * we can't use filter_call() because f_attach can change the filter ops
3836 * for a filter that supports f_extended_codes, so we need to reload
3837 * knote_fops() and not use `fops`.
3839 result
= fops
->f_attach(kn
, kev
);
3840 if (result
&& !knote_fops(kn
)->f_extended_codes
) {
3841 result
= FILTER_ACTIVE
;
3846 if (result
& FILTER_THREADREQ_NODEFEER
) {
3847 enable_preemption();
3850 if (kn
->kn_flags
& EV_ERROR
) {
3852 * Failed to attach correctly, so drop.
3854 kn
->kn_filtid
= EVFILTID_DETACHED
;
3855 error
= (int)kn
->kn_sdata
;
3856 knote_drop(kq
, kn
, &knlc
);
3862 * end "attaching" phase - now just attached
3864 * Mark the thread request overcommit, if appropos
3866 * If the attach routine indicated that an
3867 * event is already fired, activate the knote.
3869 if ((kn
->kn_qos
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
) &&
3870 (kq
->kq_state
& KQ_WORKLOOP
)) {
3871 kqworkloop_set_overcommit((struct kqworkloop
*)kq
);
3873 } else if (!knote_lock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ON_SUCCESS
)) {
3875 * The knote was dropped while we were waiting for the lock,
3876 * we need to re-evaluate entirely
3880 } else if (kev
->flags
& EV_DELETE
) {
3882 * Deletion of a knote (drop)
3884 * If the filter wants to filter drop events, let it do so.
3886 * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
3887 * we must wait for the knote to be re-enabled (unless it is being
3888 * re-enabled atomically here).
3891 if (knote_fops(kn
)->f_allow_drop
) {
3895 drop
= knote_fops(kn
)->f_allow_drop(kn
, kev
);
3903 if ((kev
->flags
& EV_ENABLE
) == 0 &&
3904 (kn
->kn_flags
& EV_DISPATCH2
) == EV_DISPATCH2
&&
3905 (kn
->kn_status
& KN_DISABLED
) != 0) {
3906 kn
->kn_status
|= KN_DEFERDELETE
;
3907 error
= EINPROGRESS
;
3911 knote_drop(kq
, kn
, &knlc
);
3915 * Regular update of a knote (touch)
3917 * Call touch routine to notify filter of changes in filter values
3918 * (and to re-determine if any events are fired).
3920 * If the knote is in defer-delete, avoid calling the filter touch
3921 * routine (it has delivered its last event already).
3923 * If the touch routine had no failure,
3924 * apply the requested side effects to the knote.
3927 if (kn
->kn_status
& (KN_DEFERDELETE
| KN_VANISHED
)) {
3928 if (kev
->flags
& EV_ENABLE
) {
3929 result
= FILTER_ACTIVE
;
3933 result
= filter_call(knote_fops(kn
), f_touch(kn
, kev
));
3935 if (result
& FILTER_THREADREQ_NODEFEER
) {
3936 enable_preemption();
3940 if (kev
->flags
& EV_ERROR
) {
3945 if ((kn
->kn_flags
& EV_UDATA_SPECIFIC
) == 0 &&
3946 kn
->kn_udata
!= kev
->udata
) {
3947 // this allows klist_copy_udata() not to take locks
3948 os_atomic_store_wide(&kn
->kn_udata
, kev
->udata
, relaxed
);
3950 if ((kev
->flags
& EV_DISABLE
) && !(kn
->kn_status
& KN_DISABLED
)) {
3951 kn
->kn_status
|= KN_DISABLED
;
3952 knote_dequeue(kq
, kn
);
3956 /* accept new kevent state */
3957 knote_apply_touch(kq
, kn
, kev
, result
);
3961 * When the filter asked for a post-register wait,
3962 * we leave the kqueue locked for kevent_register()
3963 * to call the filter's f_post_register_wait hook.
3965 if (result
& FILTER_REGISTER_WAIT
) {
3966 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ALWAYS
);
3969 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_UNLOCK
);
3973 /* output local errors through the kevent */
3975 kev
->flags
|= EV_ERROR
;
3982 * knote_process - process a triggered event
3984 * Validate that it is really still a triggered event
3985 * by calling the filter routines (if necessary). Hold
3986 * a use reference on the knote to avoid it being detached.
3988 * If it is still considered triggered, we will have taken
3989 * a copy of the state under the filter lock. We use that
3990 * snapshot to dispatch the knote for future processing (or
3991 * not, if this was a lost event).
3993 * Our caller assures us that nobody else can be processing
3994 * events from this knote during the whole operation. But
3995 * others can be touching or posting events to the knote
3996 * interspersed with our processing it.
3998 * caller holds a reference on the kqueue.
3999 * kqueue locked on entry and exit - but may be dropped
4002 knote_process(struct knote
*kn
, kevent_ctx_t kectx
,
4003 kevent_callback_t callback
)
4005 struct kevent_qos_s kev
;
4006 struct kqueue
*kq
= knote_get_kq(kn
);
4007 KNOTE_LOCK_CTX(knlc
);
4008 int result
= FILTER_ACTIVE
;
4013 * Must be active or stayactive
4014 * Must be queued and not disabled/suppressed or dropping
4016 assert(kn
->kn_status
& KN_QUEUED
);
4017 assert(kn
->kn_status
& (KN_ACTIVE
| KN_STAYACTIVE
));
4018 assert(!(kn
->kn_status
& (KN_DISABLED
| KN_SUPPRESSED
| KN_DROPPING
)));
4020 if (kq
->kq_state
& KQ_WORKLOOP
) {
4021 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS
),
4022 ((struct kqworkloop
*)kq
)->kqwl_dynamicid
,
4023 kn
->kn_udata
, kn
->kn_status
| (kn
->kn_id
<< 32),
4025 } else if (kq
->kq_state
& KQ_WORKQ
) {
4026 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS
),
4027 0, kn
->kn_udata
, kn
->kn_status
| (kn
->kn_id
<< 32),
4030 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS
),
4031 VM_KERNEL_UNSLIDE_OR_PERM(kq
), kn
->kn_udata
,
4032 kn
->kn_status
| (kn
->kn_id
<< 32), kn
->kn_filtid
);
4035 if (!knote_lock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ALWAYS
)) {
4037 * When the knote is dropping or has dropped,
4038 * then there's nothing we want to process.
4044 * While waiting for the knote lock, we may have dropped the kq lock.
4045 * and a touch may have disabled and dequeued the knote.
4047 if (!(kn
->kn_status
& KN_QUEUED
)) {
4048 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ALWAYS
);
4053 * For deferred-drop or vanished events, we just create a fake
4054 * event to acknowledge end-of-life. Otherwise, we call the
4055 * filter's process routine to snapshot the kevent state under
4056 * the filter's locking protocol.
4058 * suppress knotes to avoid returning the same event multiple times in
4061 knote_suppress(kq
, kn
);
4063 if (kn
->kn_status
& (KN_DEFERDELETE
| KN_VANISHED
)) {
4064 uint16_t kev_flags
= EV_DISPATCH2
| EV_ONESHOT
;
4065 if (kn
->kn_status
& KN_DEFERDELETE
) {
4066 kev_flags
|= EV_DELETE
;
4068 kev_flags
|= EV_VANISHED
;
4071 /* create fake event */
4072 kev
= (struct kevent_qos_s
){
4073 .filter
= kn
->kn_filter
,
4076 .udata
= kn
->kn_udata
,
4080 kev
= (struct kevent_qos_s
) { };
4081 result
= filter_call(knote_fops(kn
), f_process(kn
, &kev
));
4086 * Determine how to dispatch the knote for future event handling.
4087 * not-fired: just return (do not callout, leave deactivated).
4088 * One-shot: If dispatch2, enter deferred-delete mode (unless this is
4089 * is the deferred delete event delivery itself). Otherwise,
4091 * Dispatch: don't clear state, just mark it disabled.
4092 * Cleared: just leave it deactivated.
4093 * Others: re-activate as there may be more events to handle.
4094 * This will not wake up more handlers right now, but
4095 * at the completion of handling events it may trigger
4096 * more handler threads (TODO: optimize based on more than
4097 * just this one event being detected by the filter).
4099 if ((result
& FILTER_ACTIVE
) == 0) {
4100 if ((kn
->kn_status
& (KN_ACTIVE
| KN_STAYACTIVE
)) == 0) {
4102 * Stay active knotes should not be unsuppressed or we'd create an
4105 * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
4106 * within f_process() but that doesn't necessarily make them
4107 * ready to process, so we should leave them be.
4109 * For other knotes, since we will not return an event,
4110 * there's no point keeping the knote suppressed.
4112 knote_unsuppress(kq
, kn
);
4114 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ALWAYS
);
4118 if (result
& FILTER_ADJUST_EVENT_QOS_BIT
) {
4119 knote_adjust_qos(kq
, kn
, result
);
4121 kev
.qos
= _pthread_priority_combine(kn
->kn_qos
, kn
->kn_qos_override
);
4123 if (kev
.flags
& EV_ONESHOT
) {
4124 if ((kn
->kn_flags
& EV_DISPATCH2
) == EV_DISPATCH2
&&
4125 (kn
->kn_status
& KN_DEFERDELETE
) == 0) {
4126 /* defer dropping non-delete oneshot dispatch2 events */
4127 kn
->kn_status
|= KN_DEFERDELETE
| KN_DISABLED
;
4131 } else if (kn
->kn_flags
& EV_DISPATCH
) {
4132 /* disable all dispatch knotes */
4133 kn
->kn_status
|= KN_DISABLED
;
4134 } else if ((kn
->kn_flags
& EV_CLEAR
) == 0) {
4135 /* re-activate in case there are more events */
4136 knote_activate(kq
, kn
, FILTER_ACTIVE
);
4140 * callback to handle each event as we find it.
4141 * If we have to detach and drop the knote, do
4142 * it while we have the kq unlocked.
4145 knote_drop(kq
, kn
, &knlc
);
4147 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_UNLOCK
);
4150 if (kev
.flags
& EV_VANISHED
) {
4151 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED
),
4152 kev
.ident
, kn
->kn_udata
, kn
->kn_status
| (kn
->kn_id
<< 32),
4156 error
= (callback
)(&kev
, kectx
);
4162 * Returns -1 if the kqueue was unbound and processing should not happen
4164 #define KQWQAE_BEGIN_PROCESSING 1
4165 #define KQWQAE_END_PROCESSING 2
4166 #define KQWQAE_UNBIND 3
4168 kqworkq_acknowledge_events(struct kqworkq
*kqwq
, workq_threadreq_t kqr
,
4169 int kevent_flags
, int kqwqae_op
)
4171 thread_qos_t old_override
= THREAD_QOS_UNSPECIFIED
;
4172 thread_t thread
= kqr_thread_fast(kqr
);
4176 struct kqtailq
*suppressq
= &kqwq
->kqwq_suppressed
[kqr
->tr_kq_qos_index
];
4178 kqlock_held(&kqwq
->kqwq_kqueue
);
4180 if (!TAILQ_EMPTY(suppressq
)) {
4182 * Return suppressed knotes to their original state.
4183 * For workq kqueues, suppressed ones that are still
4184 * truly active (not just forced into the queue) will
4185 * set flags we check below to see if anything got
4188 while ((kn
= TAILQ_FIRST(suppressq
)) != NULL
) {
4189 assert(kn
->kn_status
& KN_SUPPRESSED
);
4190 knote_unsuppress(kqwq
, kn
);
4194 #if DEBUG || DEVELOPMENT
4195 thread_t self
= current_thread();
4196 struct uthread
*ut
= get_bsdthread_info(self
);
4198 assert(thread
== self
);
4199 assert(ut
->uu_kqr_bound
== kqr
);
4200 #endif // DEBUG || DEVELOPMENT
4202 if (kqwqae_op
== KQWQAE_UNBIND
) {
4204 } else if ((kevent_flags
& KEVENT_FLAG_PARKING
) == 0) {
4207 unbind
= !kqr
->tr_kq_wakeup
;
4210 old_override
= kqworkq_unbind_locked(kqwq
, kqr
, thread
);
4213 * request a new thread if we didn't process the whole queue or real events
4214 * have happened (not just putting stay-active events back).
4216 if (kqr
->tr_kq_wakeup
) {
4217 kqueue_threadreq_initiate(&kqwq
->kqwq_kqueue
, kqr
,
4218 kqr
->tr_kq_qos_index
, 0);
4224 * Reset wakeup bit to notice events firing while we are processing,
4225 * as we cannot rely on the bucket queue emptiness because of stay
4228 kqr
->tr_kq_wakeup
= false;
4232 thread_drop_kevent_override(thread
);
4239 * Return 0 to indicate that processing should proceed,
4240 * -1 if there is nothing to process.
4242 * Called with kqueue locked and returns the same way,
4243 * but may drop lock temporarily.
4246 kqworkq_begin_processing(struct kqworkq
*kqwq
, workq_threadreq_t kqr
,
4251 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN
) | DBG_FUNC_START
,
4252 0, kqr
->tr_kq_qos_index
);
4254 rc
= kqworkq_acknowledge_events(kqwq
, kqr
, kevent_flags
,
4255 KQWQAE_BEGIN_PROCESSING
);
4257 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN
) | DBG_FUNC_END
,
4258 thread_tid(kqr_thread(kqr
)), kqr
->tr_kq_wakeup
);
4264 kqworkloop_acknowledge_events(struct kqworkloop
*kqwl
)
4266 kq_index_t qos
= THREAD_QOS_UNSPECIFIED
;
4267 struct knote
*kn
, *tmp
;
4271 TAILQ_FOREACH_SAFE(kn
, &kqwl
->kqwl_suppressed
, kn_tqe
, tmp
) {
4273 * If a knote that can adjust QoS is disabled because of the automatic
4274 * behavior of EV_DISPATCH, the knotes should stay suppressed so that
4275 * further overrides keep pushing.
4277 if (knote_fops(kn
)->f_adjusts_qos
&& (kn
->kn_status
& KN_DISABLED
) &&
4278 (kn
->kn_status
& (KN_STAYACTIVE
| KN_DROPPING
)) == 0 &&
4279 (kn
->kn_flags
& (EV_DISPATCH
| EV_DISABLE
)) == EV_DISPATCH
) {
4280 qos
= MAX(qos
, kn
->kn_qos_override
);
4283 knote_unsuppress(kqwl
, kn
);
4290 kqworkloop_begin_processing(struct kqworkloop
*kqwl
, unsigned int kevent_flags
)
4292 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
4293 struct kqueue
*kq
= &kqwl
->kqwl_kqueue
;
4294 thread_qos_t qos_override
;
4295 thread_t thread
= kqr_thread_fast(kqr
);
4296 int rc
= 0, op
= KQWL_UTQ_NONE
;
4300 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN
) | DBG_FUNC_START
,
4301 kqwl
->kqwl_dynamicid
, 0, 0);
4303 /* nobody else should still be processing */
4304 assert((kq
->kq_state
& KQ_PROCESSING
) == 0);
4306 kq
->kq_state
|= KQ_PROCESSING
;
4308 if (!TAILQ_EMPTY(&kqwl
->kqwl_suppressed
)) {
4309 op
= KQWL_UTQ_RESET_WAKEUP_OVERRIDE
;
4312 if (kevent_flags
& KEVENT_FLAG_PARKING
) {
4314 * When "parking" we want to process events and if no events are found
4317 * However, non overcommit threads sometimes park even when they have
4318 * more work so that the pool can narrow. For these, we need to unbind
4319 * early, so that calling kqworkloop_update_threads_qos() can ask the
4320 * workqueue subsystem whether the thread should park despite having
4323 if (kqr
->tr_flags
& WORKQ_TR_FLAG_OVERCOMMIT
) {
4324 op
= KQWL_UTQ_PARKING
;
4326 op
= KQWL_UTQ_UNBINDING
;
4329 if (op
== KQWL_UTQ_NONE
) {
4333 qos_override
= kqworkloop_acknowledge_events(kqwl
);
4335 if (op
== KQWL_UTQ_UNBINDING
) {
4336 kqworkloop_unbind_locked(kqwl
, thread
, KQWL_OVERRIDE_DROP_IMMEDIATELY
);
4337 kqworkloop_release_live(kqwl
);
4339 kqworkloop_update_threads_qos(kqwl
, op
, qos_override
);
4340 if (op
== KQWL_UTQ_PARKING
) {
4341 if (!TAILQ_EMPTY(&kqwl
->kqwl_queue
[KQWL_BUCKET_STAYACTIVE
])) {
4343 * We cannot trust tr_kq_wakeup when looking at stay active knotes.
4344 * We need to process once, and kqworkloop_end_processing will
4345 * handle the unbind.
4347 } else if (!kqr
->tr_kq_wakeup
|| kqwl
->kqwl_owner
) {
4348 kqworkloop_unbind_locked(kqwl
, thread
, KQWL_OVERRIDE_DROP_DELAYED
);
4349 kqworkloop_release_live(kqwl
);
4352 } else if (op
== KQWL_UTQ_UNBINDING
) {
4353 if (kqr_thread(kqr
) == thread
) {
4355 * The thread request fired again, passed the admission check and
4356 * got bound to the current thread again.
4365 * Reset wakeup bit to notice stay active events firing while we are
4366 * processing, as we cannot rely on the stayactive bucket emptiness.
4368 kqwl
->kqwl_wakeup_indexes
&= ~KQWL_STAYACTIVE_FIRED_BIT
;
4370 kq
->kq_state
&= ~KQ_PROCESSING
;
4374 kqworkloop_unbind_delayed_override_drop(thread
);
4378 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN
) | DBG_FUNC_END
,
4379 kqwl
->kqwl_dynamicid
, 0, 0);
4385 * Return 0 to indicate that processing should proceed,
4386 * -1 if there is nothing to process.
4387 * EBADF if the kqueue is draining
4389 * Called with kqueue locked and returns the same way,
4390 * but may drop lock temporarily.
4394 kqfile_begin_processing(struct kqfile
*kq
)
4396 struct kqtailq
*suppressq
;
4400 assert((kq
->kqf_state
& (KQ_WORKQ
| KQ_WORKLOOP
)) == 0);
4401 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN
) | DBG_FUNC_START
,
4402 VM_KERNEL_UNSLIDE_OR_PERM(kq
), 0);
4404 /* wait to become the exclusive processing thread */
4406 if (kq
->kqf_state
& KQ_DRAIN
) {
4407 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN
) | DBG_FUNC_END
,
4408 VM_KERNEL_UNSLIDE_OR_PERM(kq
), 2);
4412 if ((kq
->kqf_state
& KQ_PROCESSING
) == 0) {
4416 /* if someone else is processing the queue, wait */
4417 kq
->kqf_state
|= KQ_PROCWAIT
;
4418 suppressq
= &kq
->kqf_suppressed
;
4419 waitq_assert_wait64((struct waitq
*)&kq
->kqf_wqs
,
4420 CAST_EVENT64_T(suppressq
), THREAD_UNINT
| THREAD_WAIT_NOREPORT
,
4421 TIMEOUT_WAIT_FOREVER
);
4424 thread_block(THREAD_CONTINUE_NULL
);
4428 /* Nobody else processing */
4430 /* clear pre-posts and KQ_WAKEUP now, in case we bail early */
4431 waitq_set_clear_preposts(&kq
->kqf_wqs
);
4432 kq
->kqf_state
&= ~KQ_WAKEUP
;
4434 /* anything left to process? */
4435 if (TAILQ_EMPTY(&kq
->kqf_queue
)) {
4436 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN
) | DBG_FUNC_END
,
4437 VM_KERNEL_UNSLIDE_OR_PERM(kq
), 1);
4441 /* convert to processing mode */
4442 kq
->kqf_state
|= KQ_PROCESSING
;
4444 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN
) | DBG_FUNC_END
,
4445 VM_KERNEL_UNSLIDE_OR_PERM(kq
));
4451 * Try to end the processing, only called when a workq thread is attempting to
4452 * park (KEVENT_FLAG_PARKING is set).
4454 * When returning -1, the kqworkq is setup again so that it is ready to be
4458 kqworkq_end_processing(struct kqworkq
*kqwq
, workq_threadreq_t kqr
,
4461 if (!TAILQ_EMPTY(&kqwq
->kqwq_queue
[kqr
->tr_kq_qos_index
])) {
4462 /* remember we didn't process everything */
4463 kqr
->tr_kq_wakeup
= true;
4466 if (kevent_flags
& KEVENT_FLAG_PARKING
) {
4468 * if acknowledge events "succeeds" it means there are events,
4469 * which is a failure condition for end_processing.
4471 int rc
= kqworkq_acknowledge_events(kqwq
, kqr
, kevent_flags
,
4472 KQWQAE_END_PROCESSING
);
4482 * Try to end the processing, only called when a workq thread is attempting to
4483 * park (KEVENT_FLAG_PARKING is set).
4485 * When returning -1, the kqworkq is setup again so that it is ready to be
4486 * processed (as if kqworkloop_begin_processing had just been called).
4488 * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
4489 * the kqworkloop is unbound from its servicer as a side effect.
4492 kqworkloop_end_processing(struct kqworkloop
*kqwl
, int flags
, int kevent_flags
)
4494 struct kqueue
*kq
= &kqwl
->kqwl_kqueue
;
4495 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
4496 thread_qos_t qos_override
;
4497 thread_t thread
= kqr_thread_fast(kqr
);
4502 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END
) | DBG_FUNC_START
,
4503 kqwl
->kqwl_dynamicid
, 0, 0);
4505 if (flags
& KQ_PROCESSING
) {
4506 assert(kq
->kq_state
& KQ_PROCESSING
);
4509 * If we still have queued stayactive knotes, remember we didn't finish
4510 * processing all of them. This should be extremely rare and would
4511 * require to have a lot of them registered and fired.
4513 if (!TAILQ_EMPTY(&kqwl
->kqwl_queue
[KQWL_BUCKET_STAYACTIVE
])) {
4514 kqworkloop_update_threads_qos(kqwl
, KQWL_UTQ_UPDATE_WAKEUP_QOS
,
4515 KQWL_BUCKET_STAYACTIVE
);
4519 * When KEVENT_FLAG_PARKING is set, we need to attempt an unbind while
4520 * still under the lock.
4522 * So we do everything kqworkloop_unbind() would do, but because we're
4523 * inside kqueue_process(), if the workloop actually received events
4524 * while our locks were dropped, we have the opportunity to fail the end
4525 * processing and loop again.
4527 * This avoids going through the process-wide workqueue lock hence
4530 if (kevent_flags
& KEVENT_FLAG_PARKING
) {
4531 qos_override
= kqworkloop_acknowledge_events(kqwl
);
4535 if (kevent_flags
& KEVENT_FLAG_PARKING
) {
4536 kqworkloop_update_threads_qos(kqwl
, KQWL_UTQ_PARKING
, qos_override
);
4537 if (kqr
->tr_kq_wakeup
&& !kqwl
->kqwl_owner
) {
4539 * Reset wakeup bit to notice stay active events firing while we are
4540 * processing, as we cannot rely on the stayactive bucket emptiness.
4542 kqwl
->kqwl_wakeup_indexes
&= ~KQWL_STAYACTIVE_FIRED_BIT
;
4545 kqworkloop_unbind_locked(kqwl
, thread
, KQWL_OVERRIDE_DROP_DELAYED
);
4546 kqworkloop_release_live(kqwl
);
4547 kq
->kq_state
&= ~flags
;
4550 kq
->kq_state
&= ~flags
;
4551 kq
->kq_state
|= KQ_R2K_ARMED
;
4552 kqworkloop_update_threads_qos(kqwl
, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS
, 0);
4555 if ((kevent_flags
& KEVENT_FLAG_PARKING
) && rc
== 0) {
4556 kqworkloop_unbind_delayed_override_drop(thread
);
4559 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END
) | DBG_FUNC_END
,
4560 kqwl
->kqwl_dynamicid
, 0, 0);
4566 * Called with kqueue lock held.
4569 * -1: has more events
4570 * EBADF: kqueue is in draining mode
4573 kqfile_end_processing(struct kqfile
*kq
)
4575 struct kqtailq
*suppressq
= &kq
->kqf_suppressed
;
4581 assert((kq
->kqf_state
& (KQ_WORKQ
| KQ_WORKLOOP
)) == 0);
4583 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END
),
4584 VM_KERNEL_UNSLIDE_OR_PERM(kq
), 0);
4587 * Return suppressed knotes to their original state.
4589 while ((kn
= TAILQ_FIRST(suppressq
)) != NULL
) {
4590 assert(kn
->kn_status
& KN_SUPPRESSED
);
4591 knote_unsuppress(kq
, kn
);
4594 procwait
= (kq
->kqf_state
& KQ_PROCWAIT
);
4595 kq
->kqf_state
&= ~(KQ_PROCESSING
| KQ_PROCWAIT
);
4598 /* first wake up any thread already waiting to process */
4599 waitq_wakeup64_all((struct waitq
*)&kq
->kqf_wqs
,
4600 CAST_EVENT64_T(suppressq
), THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
4603 if (kq
->kqf_state
& KQ_DRAIN
) {
4606 return (kq
->kqf_state
& KQ_WAKEUP
) ? -1 : 0;
4610 kqueue_workloop_ctl_internal(proc_t p
, uintptr_t cmd
, uint64_t __unused options
,
4611 struct kqueue_workloop_params
*params
, int *retval
)
4614 struct kqworkloop
*kqwl
;
4615 struct filedesc
*fdp
= p
->p_fd
;
4616 workq_threadreq_param_t trp
= { };
4619 case KQ_WORKLOOP_CREATE
:
4620 if (!params
->kqwlp_flags
) {
4625 if ((params
->kqwlp_flags
& KQ_WORKLOOP_CREATE_SCHED_PRI
) &&
4626 (params
->kqwlp_sched_pri
< 1 ||
4627 params
->kqwlp_sched_pri
> 63 /* MAXPRI_USER */)) {
4632 if ((params
->kqwlp_flags
& KQ_WORKLOOP_CREATE_SCHED_POL
) &&
4633 invalid_policy(params
->kqwlp_sched_pol
)) {
4638 if ((params
->kqwlp_flags
& KQ_WORKLOOP_CREATE_CPU_PERCENT
) &&
4639 (params
->kqwlp_cpu_percent
<= 0 ||
4640 params
->kqwlp_cpu_percent
> 100 ||
4641 params
->kqwlp_cpu_refillms
<= 0 ||
4642 params
->kqwlp_cpu_refillms
> 0x00ffffff)) {
4647 if (params
->kqwlp_flags
& KQ_WORKLOOP_CREATE_SCHED_PRI
) {
4648 trp
.trp_flags
|= TRP_PRIORITY
;
4649 trp
.trp_pri
= (uint8_t)params
->kqwlp_sched_pri
;
4651 if (params
->kqwlp_flags
& KQ_WORKLOOP_CREATE_SCHED_POL
) {
4652 trp
.trp_flags
|= TRP_POLICY
;
4653 trp
.trp_pol
= (uint8_t)params
->kqwlp_sched_pol
;
4655 if (params
->kqwlp_flags
& KQ_WORKLOOP_CREATE_CPU_PERCENT
) {
4656 trp
.trp_flags
|= TRP_CPUPERCENT
;
4657 trp
.trp_cpupercent
= (uint8_t)params
->kqwlp_cpu_percent
;
4658 trp
.trp_refillms
= params
->kqwlp_cpu_refillms
;
4661 error
= kqworkloop_get_or_create(p
, params
->kqwlp_id
, &trp
,
4662 KEVENT_FLAG_DYNAMIC_KQUEUE
| KEVENT_FLAG_WORKLOOP
|
4663 KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST
, &kqwl
);
4668 if (!(fdp
->fd_flags
& FD_WORKLOOP
)) {
4669 /* FD_WORKLOOP indicates we've ever created a workloop
4670 * via this syscall but its only ever added to a process, never
4674 fdp
->fd_flags
|= FD_WORKLOOP
;
4678 case KQ_WORKLOOP_DESTROY
:
4679 error
= kqworkloop_get_or_create(p
, params
->kqwlp_id
, NULL
,
4680 KEVENT_FLAG_DYNAMIC_KQUEUE
| KEVENT_FLAG_WORKLOOP
|
4681 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST
, &kqwl
);
4686 trp
.trp_value
= kqwl
->kqwl_params
;
4687 if (trp
.trp_flags
&& !(trp
.trp_flags
& TRP_RELEASED
)) {
4688 trp
.trp_flags
|= TRP_RELEASED
;
4689 kqwl
->kqwl_params
= trp
.trp_value
;
4690 kqworkloop_release_live(kqwl
);
4695 kqworkloop_release(kqwl
);
4703 kqueue_workloop_ctl(proc_t p
, struct kqueue_workloop_ctl_args
*uap
, int *retval
)
4705 struct kqueue_workloop_params params
= {
4708 if (uap
->sz
< sizeof(params
.kqwlp_version
)) {
4712 size_t copyin_sz
= MIN(sizeof(params
), uap
->sz
);
4713 int rv
= copyin(uap
->addr
, ¶ms
, copyin_sz
);
4718 if (params
.kqwlp_version
!= (int)uap
->sz
) {
4722 return kqueue_workloop_ctl_internal(p
, uap
->cmd
, uap
->options
, ¶ms
,
4728 kqueue_select(struct fileproc
*fp
, int which
, void *wq_link_id
,
4729 __unused vfs_context_t ctx
)
4731 struct kqfile
*kq
= (struct kqfile
*)fp
->f_data
;
4732 struct kqtailq
*suppressq
= &kq
->kqf_suppressed
;
4733 struct kqtailq
*queue
= &kq
->kqf_queue
;
4737 if (which
!= FREAD
) {
4743 assert((kq
->kqf_state
& KQ_WORKQ
) == 0);
4746 * If this is the first pass, link the wait queue associated with the
4747 * the kqueue onto the wait queue set for the select(). Normally we
4748 * use selrecord() for this, but it uses the wait queue within the
4749 * selinfo structure and we need to use the main one for the kqueue to
4750 * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
4751 * (The select() call will unlink them when it ends).
4753 if (wq_link_id
!= NULL
) {
4754 thread_t cur_act
= current_thread();
4755 struct uthread
* ut
= get_bsdthread_info(cur_act
);
4757 kq
->kqf_state
|= KQ_SEL
;
4758 waitq_link((struct waitq
*)&kq
->kqf_wqs
, ut
->uu_wqset
,
4759 WAITQ_SHOULD_LOCK
, (uint64_t *)wq_link_id
);
4761 /* always consume the reserved link object */
4762 waitq_link_release(*(uint64_t *)wq_link_id
);
4763 *(uint64_t *)wq_link_id
= 0;
4766 * selprocess() is expecting that we send it back the waitq
4767 * that was just added to the thread's waitq set. In order
4768 * to not change the selrecord() API (which is exported to
4769 * kexts), we pass this value back through the
4770 * void *wq_link_id pointer we were passed. We need to use
4771 * memcpy here because the pointer may not be properly aligned
4772 * on 32-bit systems.
4774 void *wqptr
= &kq
->kqf_wqs
;
4775 memcpy(wq_link_id
, (void *)&wqptr
, sizeof(void *));
4778 if (kqfile_begin_processing(kq
) == -1) {
4783 if (!TAILQ_EMPTY(queue
)) {
4785 * there is something queued - but it might be a
4786 * KN_STAYACTIVE knote, which may or may not have
4787 * any events pending. Otherwise, we have to walk
4788 * the list of knotes to see, and peek at the
4789 * (non-vanished) stay-active ones to be really sure.
4791 while ((kn
= (struct knote
*)TAILQ_FIRST(queue
)) != NULL
) {
4792 if (kn
->kn_status
& KN_ACTIVE
) {
4796 assert(kn
->kn_status
& KN_STAYACTIVE
);
4797 knote_suppress(kq
, kn
);
4801 * There were no regular events on the queue, so take
4802 * a deeper look at the stay-queued ones we suppressed.
4804 while ((kn
= (struct knote
*)TAILQ_FIRST(suppressq
)) != NULL
) {
4805 KNOTE_LOCK_CTX(knlc
);
4808 /* If didn't vanish while suppressed - peek at it */
4809 if ((kn
->kn_status
& KN_DROPPING
) || !knote_lock(kq
, kn
, &knlc
,
4810 KNOTE_KQ_LOCK_ON_FAILURE
)) {
4814 result
= filter_call(knote_fops(kn
), f_peek(kn
));
4817 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ALWAYS
);
4820 knote_unsuppress(kq
, kn
);
4822 /* has data or it has to report a vanish */
4823 if (result
& FILTER_ACTIVE
) {
4831 kqfile_end_processing(kq
);
4841 kqueue_close(struct fileglob
*fg
, __unused vfs_context_t ctx
)
4843 struct kqfile
*kqf
= (struct kqfile
*)fg
->fg_data
;
4845 assert((kqf
->kqf_state
& KQ_WORKQ
) == 0);
4846 kqueue_dealloc(&kqf
->kqf_kqueue
);
4852 * Max depth of the nested kq path that can be created.
4853 * Note that this has to be less than the size of kq_level
4854 * to avoid wrapping around and mislabeling the level.
4856 #define MAX_NESTED_KQ 1000
4860 * The callers has taken a use-count reference on this kqueue and will donate it
4861 * to the kqueue we are being added to. This keeps the kqueue from closing until
4862 * that relationship is torn down.
4865 kqueue_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
4866 __unused
struct kevent_qos_s
*kev
)
4868 struct kqfile
*kqf
= (struct kqfile
*)fp
->f_data
;
4869 struct kqueue
*kq
= &kqf
->kqf_kqueue
;
4870 struct kqueue
*parentkq
= knote_get_kq(kn
);
4872 assert((kqf
->kqf_state
& KQ_WORKQ
) == 0);
4874 if (parentkq
== kq
|| kn
->kn_filter
!= EVFILT_READ
) {
4875 knote_set_error(kn
, EINVAL
);
4880 * We have to avoid creating a cycle when nesting kqueues
4881 * inside another. Rather than trying to walk the whole
4882 * potential DAG of nested kqueues, we just use a simple
4883 * ceiling protocol. When a kqueue is inserted into another,
4884 * we check that the (future) parent is not already nested
4885 * into another kqueue at a lower level than the potenial
4886 * child (because it could indicate a cycle). If that test
4887 * passes, we just mark the nesting levels accordingly.
4889 * Only up to MAX_NESTED_KQ can be nested.
4891 * Note: kqworkq and kqworkloop cannot be nested and have reused their
4892 * kq_level field, so ignore these as parent.
4897 if ((parentkq
->kq_state
& (KQ_WORKQ
| KQ_WORKLOOP
)) == 0) {
4898 if (parentkq
->kq_level
> 0 &&
4899 parentkq
->kq_level
< kq
->kq_level
) {
4901 knote_set_error(kn
, EINVAL
);
4905 /* set parent level appropriately */
4906 uint16_t plevel
= (parentkq
->kq_level
== 0)? 2: parentkq
->kq_level
;
4907 if (plevel
< kq
->kq_level
+ 1) {
4908 if (kq
->kq_level
+ 1 > MAX_NESTED_KQ
) {
4910 knote_set_error(kn
, EINVAL
);
4913 plevel
= kq
->kq_level
+ 1;
4916 parentkq
->kq_level
= plevel
;
4921 kn
->kn_filtid
= EVFILTID_KQREAD
;
4923 KNOTE_ATTACH(&kqf
->kqf_sel
.si_note
, kn
);
4924 /* indicate nesting in child, if needed */
4925 if (kq
->kq_level
== 0) {
4929 int count
= kq
->kq_count
;
4935 * kqueue_drain - called when kq is closed
4939 kqueue_drain(struct fileproc
*fp
, __unused vfs_context_t ctx
)
4941 struct kqfile
*kqf
= (struct kqfile
*)fp
->fp_glob
->fg_data
;
4943 assert((kqf
->kqf_state
& KQ_WORKQ
) == 0);
4946 kqf
->kqf_state
|= KQ_DRAIN
;
4948 /* wakeup sleeping threads */
4949 if ((kqf
->kqf_state
& (KQ_SLEEP
| KQ_SEL
)) != 0) {
4950 kqf
->kqf_state
&= ~(KQ_SLEEP
| KQ_SEL
);
4951 (void)waitq_wakeup64_all((struct waitq
*)&kqf
->kqf_wqs
,
4954 WAITQ_ALL_PRIORITIES
);
4957 /* wakeup threads waiting their turn to process */
4958 if (kqf
->kqf_state
& KQ_PROCWAIT
) {
4959 assert(kqf
->kqf_state
& KQ_PROCESSING
);
4961 kqf
->kqf_state
&= ~KQ_PROCWAIT
;
4962 (void)waitq_wakeup64_all((struct waitq
*)&kqf
->kqf_wqs
,
4963 CAST_EVENT64_T(&kqf
->kqf_suppressed
),
4964 THREAD_RESTART
, WAITQ_ALL_PRIORITIES
);
4973 kqueue_stat(struct kqueue
*kq
, void *ub
, int isstat64
, proc_t p
)
4975 assert((kq
->kq_state
& KQ_WORKQ
) == 0);
4978 if (isstat64
!= 0) {
4979 struct stat64
*sb64
= (struct stat64
*)ub
;
4981 bzero((void *)sb64
, sizeof(*sb64
));
4982 sb64
->st_size
= kq
->kq_count
;
4983 if (kq
->kq_state
& KQ_KEV_QOS
) {
4984 sb64
->st_blksize
= sizeof(struct kevent_qos_s
);
4985 } else if (kq
->kq_state
& KQ_KEV64
) {
4986 sb64
->st_blksize
= sizeof(struct kevent64_s
);
4987 } else if (IS_64BIT_PROCESS(p
)) {
4988 sb64
->st_blksize
= sizeof(struct user64_kevent
);
4990 sb64
->st_blksize
= sizeof(struct user32_kevent
);
4992 sb64
->st_mode
= S_IFIFO
;
4994 struct stat
*sb
= (struct stat
*)ub
;
4996 bzero((void *)sb
, sizeof(*sb
));
4997 sb
->st_size
= kq
->kq_count
;
4998 if (kq
->kq_state
& KQ_KEV_QOS
) {
4999 sb
->st_blksize
= sizeof(struct kevent_qos_s
);
5000 } else if (kq
->kq_state
& KQ_KEV64
) {
5001 sb
->st_blksize
= sizeof(struct kevent64_s
);
5002 } else if (IS_64BIT_PROCESS(p
)) {
5003 sb
->st_blksize
= sizeof(struct user64_kevent
);
5005 sb
->st_blksize
= sizeof(struct user32_kevent
);
5007 sb
->st_mode
= S_IFIFO
;
5014 kqueue_threadreq_can_use_ast(struct kqueue
*kq
)
5016 if (current_proc() == kq
->kq_p
) {
5018 * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
5019 * do combined send/receive and in the case of self-IPC, the AST may bet
5020 * set on a thread that will not return to userspace and needs the
5021 * thread the AST would create to unblock itself.
5023 * At this time, we really want to target:
5025 * - kevent variants that can cause thread creations, and dispatch
5026 * really only uses kevent_qos and kevent_id,
5028 * - workq_kernreturn (directly about thread creations)
5030 * - bsdthread_ctl which is used for qos changes and has direct impact
5031 * on the creator thread scheduling decisions.
5033 switch (current_uthread()->syscall_code
) {
5034 case SYS_kevent_qos
:
5036 case SYS_workq_kernreturn
:
5037 case SYS_bsdthread_ctl
:
5045 * Interact with the pthread kext to request a servicing there at a specific QoS
5048 * - Caller holds the workq request lock
5050 * - May be called with the kqueue's wait queue set locked,
5051 * so cannot do anything that could recurse on that.
5054 kqueue_threadreq_initiate(struct kqueue
*kq
, workq_threadreq_t kqr
,
5055 kq_index_t qos
, int flags
)
5057 assert(kqr
->tr_kq_wakeup
);
5058 assert(kqr_thread(kqr
) == THREAD_NULL
);
5059 assert(!kqr_thread_requested(kqr
));
5060 struct turnstile
*ts
= TURNSTILE_NULL
;
5062 if (workq_is_exiting(kq
->kq_p
)) {
5068 if (kq
->kq_state
& KQ_WORKLOOP
) {
5069 __assert_only
struct kqworkloop
*kqwl
= (struct kqworkloop
*)kq
;
5071 assert(kqwl
->kqwl_owner
== THREAD_NULL
);
5072 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST
),
5073 kqwl
->kqwl_dynamicid
, 0, qos
, kqr
->tr_kq_wakeup
);
5074 ts
= kqwl
->kqwl_turnstile
;
5075 /* Add a thread request reference on the kqueue. */
5076 kqworkloop_retain(kqwl
);
5078 assert(kq
->kq_state
& KQ_WORKQ
);
5079 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST
),
5080 -1, 0, qos
, kqr
->tr_kq_wakeup
);
5084 * New-style thread request supported.
5085 * Provide the pthread kext a pointer to a workq_threadreq_s structure for
5086 * its use until a corresponding kqueue_threadreq_bind callback.
5088 if (kqueue_threadreq_can_use_ast(kq
)) {
5089 flags
|= WORKQ_THREADREQ_SET_AST_ON_FAILURE
;
5091 if (qos
== KQWQ_QOS_MANAGER
) {
5092 qos
= WORKQ_THREAD_QOS_MANAGER
;
5094 if (!workq_kern_threadreq_initiate(kq
->kq_p
, kqr
, ts
, qos
, flags
)) {
5096 * Process is shutting down or exec'ing.
5097 * All the kqueues are going to be cleaned up
5098 * soon. Forget we even asked for a thread -
5099 * and make sure we don't ask for more.
5101 kq
->kq_state
&= ~KQ_R2K_ARMED
;
5102 kqueue_release_live(kq
);
5107 * kqueue_threadreq_bind_prepost - prepost the bind to kevent
5109 * This is used when kqueue_threadreq_bind may cause a lock inversion.
5111 __attribute__((always_inline
))
5113 kqueue_threadreq_bind_prepost(struct proc
*p __unused
, workq_threadreq_t kqr
,
5116 ut
->uu_kqr_bound
= kqr
;
5117 kqr
->tr_thread
= ut
->uu_thread
;
5118 kqr
->tr_state
= WORKQ_TR_STATE_BINDING
;
5122 * kqueue_threadreq_bind_commit - commit a bind prepost
5124 * The workq code has to commit any binding prepost before the thread has
5125 * a chance to come back to userspace (and do kevent syscalls) or be aborted.
5128 kqueue_threadreq_bind_commit(struct proc
*p
, thread_t thread
)
5130 struct uthread
*ut
= get_bsdthread_info(thread
);
5131 workq_threadreq_t kqr
= ut
->uu_kqr_bound
;
5132 kqueue_t kqu
= kqr_kqueue(p
, kqr
);
5135 if (kqr
->tr_state
== WORKQ_TR_STATE_BINDING
) {
5136 kqueue_threadreq_bind(p
, kqr
, thread
, 0);
5142 kqueue_threadreq_modify(kqueue_t kqu
, workq_threadreq_t kqr
, kq_index_t qos
,
5143 workq_kern_threadreq_flags_t flags
)
5145 assert(kqr_thread_requested_pending(kqr
));
5149 if (kqueue_threadreq_can_use_ast(kqu
.kq
)) {
5150 flags
|= WORKQ_THREADREQ_SET_AST_ON_FAILURE
;
5152 workq_kern_threadreq_modify(kqu
.kq
->kq_p
, kqr
, qos
, flags
);
5156 * kqueue_threadreq_bind - bind thread to processing kqrequest
5158 * The provided thread will be responsible for delivering events
5159 * associated with the given kqrequest. Bind it and get ready for
5160 * the thread to eventually arrive.
5163 kqueue_threadreq_bind(struct proc
*p
, workq_threadreq_t kqr
, thread_t thread
,
5166 kqueue_t kqu
= kqr_kqueue(p
, kqr
);
5167 struct uthread
*ut
= get_bsdthread_info(thread
);
5171 assert(ut
->uu_kqueue_override
== 0);
5173 if (kqr
->tr_state
== WORKQ_TR_STATE_BINDING
) {
5174 assert(ut
->uu_kqr_bound
== kqr
);
5175 assert(kqr
->tr_thread
== thread
);
5177 assert(kqr_thread_requested_pending(kqr
));
5178 assert(kqr
->tr_thread
== THREAD_NULL
);
5179 assert(ut
->uu_kqr_bound
== NULL
);
5180 ut
->uu_kqr_bound
= kqr
;
5181 kqr
->tr_thread
= thread
;
5184 kqr
->tr_state
= WORKQ_TR_STATE_BOUND
;
5186 if (kqu
.kq
->kq_state
& KQ_WORKLOOP
) {
5187 struct turnstile
*ts
= kqu
.kqwl
->kqwl_turnstile
;
5189 if (__improbable(thread
== kqu
.kqwl
->kqwl_owner
)) {
5191 * <rdar://problem/38626999> shows that asserting here is not ok.
5193 * This is not supposed to happen for correct use of the interface,
5194 * but it is sadly possible for userspace (with the help of memory
5195 * corruption, such as over-release of a dispatch queue) to make
5196 * the creator thread the "owner" of a workloop.
5198 * Once that happens, and that creator thread picks up the same
5199 * workloop as a servicer, we trip this codepath. We need to fixup
5200 * the state to forget about this thread being the owner, as the
5201 * entire workloop state machine expects servicers to never be
5202 * owners and everything would basically go downhill from here.
5204 kqu
.kqwl
->kqwl_owner
= THREAD_NULL
;
5205 if (kqworkloop_override(kqu
.kqwl
)) {
5206 thread_drop_kevent_override(thread
);
5210 if (ts
&& (flags
& KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE
) == 0) {
5212 * Past this point, the interlock is the kq req lock again,
5213 * so we can fix the inheritor for good.
5215 filt_wlupdate_inheritor(kqu
.kqwl
, ts
, TURNSTILE_IMMEDIATE_UPDATE
);
5216 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_HELD
);
5219 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND
), kqu
.kqwl
->kqwl_dynamicid
,
5220 thread_tid(thread
), kqr
->tr_kq_qos_index
,
5221 (kqr
->tr_kq_override_index
<< 16) | kqr
->tr_kq_wakeup
);
5223 ut
->uu_kqueue_override
= kqr
->tr_kq_override_index
;
5224 if (kqr
->tr_kq_override_index
) {
5225 thread_add_servicer_override(thread
, kqr
->tr_kq_override_index
);
5228 assert(kqr
->tr_kq_override_index
== 0);
5230 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND
), -1,
5231 thread_tid(thread
), kqr
->tr_kq_qos_index
,
5232 (kqr
->tr_kq_override_index
<< 16) | kqr
->tr_kq_wakeup
);
5237 * kqueue_threadreq_cancel - abort a pending thread request
5239 * Called when exiting/exec'ing. Forget our pending request.
5242 kqueue_threadreq_cancel(struct proc
*p
, workq_threadreq_t kqr
)
5244 kqueue_release(kqr_kqueue(p
, kqr
));
5247 workq_threadreq_param_t
5248 kqueue_threadreq_workloop_param(workq_threadreq_t kqr
)
5250 struct kqworkloop
*kqwl
;
5251 workq_threadreq_param_t trp
;
5253 assert(kqr
->tr_flags
& WORKQ_TR_FLAG_WORKLOOP
);
5254 kqwl
= __container_of(kqr
, struct kqworkloop
, kqwl_request
);
5255 trp
.trp_value
= kqwl
->kqwl_params
;
5260 * kqueue_threadreq_unbind - unbind thread from processing kqueue
5262 * End processing the per-QoS bucket of events and allow other threads
5263 * to be requested for future servicing.
5265 * caller holds a reference on the kqueue.
5268 kqueue_threadreq_unbind(struct proc
*p
, workq_threadreq_t kqr
)
5270 if (kqr
->tr_flags
& WORKQ_TR_FLAG_WORKLOOP
) {
5271 kqworkloop_unbind(kqr_kqworkloop(kqr
));
5273 kqworkq_unbind(p
, kqr
);
5278 * If we aren't already busy processing events [for this QoS],
5279 * request workq thread support as appropriate.
5281 * TBD - for now, we don't segregate out processing by QoS.
5283 * - May be called with the kqueue's wait queue set locked,
5284 * so cannot do anything that could recurse on that.
5287 kqworkq_wakeup(struct kqworkq
*kqwq
, kq_index_t qos_index
)
5289 workq_threadreq_t kqr
= kqworkq_get_request(kqwq
, qos_index
);
5291 /* convert to thread qos value */
5292 assert(qos_index
< KQWQ_NBUCKETS
);
5294 if (!kqr
->tr_kq_wakeup
) {
5295 kqr
->tr_kq_wakeup
= true;
5296 if (!kqr_thread_requested(kqr
)) {
5297 kqueue_threadreq_initiate(&kqwq
->kqwq_kqueue
, kqr
, qos_index
, 0);
5303 * This represent the asynchronous QoS a given workloop contributes,
5304 * hence is the max of the current active knotes (override index)
5305 * and the workloop max qos (userspace async qos).
5308 kqworkloop_override(struct kqworkloop
*kqwl
)
5310 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
5311 return MAX(kqr
->tr_kq_qos_index
, kqr
->tr_kq_override_index
);
5315 kqworkloop_request_fire_r2k_notification(struct kqworkloop
*kqwl
)
5317 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
5321 if (kqwl
->kqwl_state
& KQ_R2K_ARMED
) {
5322 kqwl
->kqwl_state
&= ~KQ_R2K_ARMED
;
5323 act_set_astkevent(kqr_thread_fast(kqr
), AST_KEVENT_RETURN_TO_KERNEL
);
5328 kqworkloop_update_threads_qos(struct kqworkloop
*kqwl
, int op
, kq_index_t qos
)
5330 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
5331 struct kqueue
*kq
= &kqwl
->kqwl_kqueue
;
5332 kq_index_t old_override
= kqworkloop_override(kqwl
);
5338 case KQWL_UTQ_UPDATE_WAKEUP_QOS
:
5339 if (qos
== KQWL_BUCKET_STAYACTIVE
) {
5341 * the KQWL_BUCKET_STAYACTIVE is not a QoS bucket, we only remember
5342 * a high watermark (kqwl_stayactive_qos) of any stay active knote
5343 * that was ever registered with this workloop.
5345 * When waitq_set__CALLING_PREPOST_HOOK__() wakes up any stay active
5346 * knote, we use this high-watermark as a wakeup-index, and also set
5347 * the magic KQWL_BUCKET_STAYACTIVE bit to make sure we remember
5348 * there is at least one stay active knote fired until the next full
5349 * processing of this bucket.
5351 kqwl
->kqwl_wakeup_indexes
|= KQWL_STAYACTIVE_FIRED_BIT
;
5352 qos
= kqwl
->kqwl_stayactive_qos
;
5355 if (kqwl
->kqwl_wakeup_indexes
& (1 << qos
)) {
5356 assert(kqr
->tr_kq_wakeup
);
5360 kqwl
->kqwl_wakeup_indexes
|= (1 << qos
);
5361 kqr
->tr_kq_wakeup
= true;
5362 kqworkloop_request_fire_r2k_notification(kqwl
);
5365 case KQWL_UTQ_UPDATE_STAYACTIVE_QOS
:
5367 if (kqwl
->kqwl_stayactive_qos
< qos
) {
5368 kqwl
->kqwl_stayactive_qos
= qos
;
5369 if (kqwl
->kqwl_wakeup_indexes
& KQWL_STAYACTIVE_FIRED_BIT
) {
5370 assert(kqr
->tr_kq_wakeup
);
5371 kqwl
->kqwl_wakeup_indexes
|= (1 << qos
);
5377 case KQWL_UTQ_PARKING
:
5378 case KQWL_UTQ_UNBINDING
:
5379 kqr
->tr_kq_override_index
= qos
;
5381 case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS
:
5382 if (op
== KQWL_UTQ_RECOMPUTE_WAKEUP_QOS
) {
5383 assert(qos
== THREAD_QOS_UNSPECIFIED
);
5385 i
= KQWL_BUCKET_STAYACTIVE
;
5386 if (TAILQ_EMPTY(&kqwl
->kqwl_suppressed
)) {
5387 kqr
->tr_kq_override_index
= THREAD_QOS_UNSPECIFIED
;
5389 if (!TAILQ_EMPTY(&kqwl
->kqwl_queue
[i
]) &&
5390 (kqwl
->kqwl_wakeup_indexes
& KQWL_STAYACTIVE_FIRED_BIT
)) {
5392 * If the KQWL_STAYACTIVE_FIRED_BIT is set, it means a stay active
5393 * knote may have fired, so we need to merge in kqwl_stayactive_qos.
5395 * Unlike other buckets, this one is never empty but could be idle.
5397 kqwl
->kqwl_wakeup_indexes
&= KQWL_STAYACTIVE_FIRED_BIT
;
5398 kqwl
->kqwl_wakeup_indexes
|= (1 << kqwl
->kqwl_stayactive_qos
);
5400 kqwl
->kqwl_wakeup_indexes
= 0;
5402 for (i
= THREAD_QOS_UNSPECIFIED
+ 1; i
< KQWL_BUCKET_STAYACTIVE
; i
++) {
5403 if (!TAILQ_EMPTY(&kqwl
->kqwl_queue
[i
])) {
5404 kqwl
->kqwl_wakeup_indexes
|= (1 << i
);
5407 if (kqwl
->kqwl_wakeup_indexes
) {
5408 kqr
->tr_kq_wakeup
= true;
5409 kqworkloop_request_fire_r2k_notification(kqwl
);
5411 kqr
->tr_kq_wakeup
= false;
5415 case KQWL_UTQ_RESET_WAKEUP_OVERRIDE
:
5416 kqr
->tr_kq_override_index
= qos
;
5419 case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE
:
5422 * When modifying the wakeup QoS or the override QoS, we always need to
5423 * maintain our invariant that kqr_override_index is at least as large
5424 * as the highest QoS for which an event is fired.
5426 * However this override index can be larger when there is an overriden
5427 * suppressed knote pushing on the kqueue.
5429 if (kqwl
->kqwl_wakeup_indexes
> (1 << qos
)) {
5430 qos
= (uint8_t)(fls(kqwl
->kqwl_wakeup_indexes
) - 1); /* fls is 1-based */
5432 if (kqr
->tr_kq_override_index
< qos
) {
5433 kqr
->tr_kq_override_index
= qos
;
5437 case KQWL_UTQ_REDRIVE_EVENTS
:
5440 case KQWL_UTQ_SET_QOS_INDEX
:
5441 kqr
->tr_kq_qos_index
= qos
;
5445 panic("unknown kqwl thread qos update operation: %d", op
);
5448 thread_t kqwl_owner
= kqwl
->kqwl_owner
;
5449 thread_t servicer
= kqr_thread(kqr
);
5450 boolean_t qos_changed
= FALSE
;
5451 kq_index_t new_override
= kqworkloop_override(kqwl
);
5454 * Apply the diffs to the owner if applicable
5458 /* JMM - need new trace hooks for owner overrides */
5459 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST
),
5460 kqwl
->kqwl_dynamicid
, thread_tid(kqwl_owner
), kqr
->tr_kq_qos_index
,
5461 (kqr
->tr_kq_override_index
<< 16) | kqr
->tr_kq_wakeup
);
5463 if (new_override
== old_override
) {
5465 } else if (old_override
== THREAD_QOS_UNSPECIFIED
) {
5466 thread_add_kevent_override(kqwl_owner
, new_override
);
5467 } else if (new_override
== THREAD_QOS_UNSPECIFIED
) {
5468 thread_drop_kevent_override(kqwl_owner
);
5469 } else { /* old_override != new_override */
5470 thread_update_kevent_override(kqwl_owner
, new_override
);
5475 * apply the diffs to the servicer
5477 if (!kqr_thread_requested(kqr
)) {
5479 * No servicer, nor thread-request
5481 * Make a new thread request, unless there is an owner (or the workloop
5482 * is suspended in userland) or if there is no asynchronous work in the
5486 if (kqwl_owner
== NULL
&& kqr
->tr_kq_wakeup
) {
5487 int initiate_flags
= 0;
5488 if (op
== KQWL_UTQ_UNBINDING
) {
5489 initiate_flags
= WORKQ_THREADREQ_ATTEMPT_REBIND
;
5491 kqueue_threadreq_initiate(kq
, kqr
, new_override
, initiate_flags
);
5493 } else if (servicer
) {
5495 * Servicer in flight
5497 * Just apply the diff to the servicer
5499 struct uthread
*ut
= get_bsdthread_info(servicer
);
5500 if (ut
->uu_kqueue_override
!= new_override
) {
5501 if (ut
->uu_kqueue_override
== THREAD_QOS_UNSPECIFIED
) {
5502 thread_add_servicer_override(servicer
, new_override
);
5503 } else if (new_override
== THREAD_QOS_UNSPECIFIED
) {
5504 thread_drop_servicer_override(servicer
);
5505 } else { /* ut->uu_kqueue_override != new_override */
5506 thread_update_servicer_override(servicer
, new_override
);
5508 ut
->uu_kqueue_override
= new_override
;
5511 } else if (new_override
== THREAD_QOS_UNSPECIFIED
) {
5513 * No events to deliver anymore.
5515 * However canceling with turnstiles is challenging, so the fact that
5516 * the request isn't useful will be discovered by the servicer himself
5519 } else if (old_override
!= new_override
) {
5521 * Request is in flight
5523 * Apply the diff to the thread request
5525 kqueue_threadreq_modify(kq
, kqr
, new_override
, WORKQ_THREADREQ_NONE
);
5530 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST
), kqwl
->kqwl_dynamicid
,
5531 thread_tid(servicer
), kqr
->tr_kq_qos_index
,
5532 (kqr
->tr_kq_override_index
<< 16) | kqr
->tr_kq_wakeup
);
5537 kqworkloop_wakeup(struct kqworkloop
*kqwl
, kq_index_t qos
)
5539 if ((kqwl
->kqwl_state
& KQ_PROCESSING
) &&
5540 kqr_thread(&kqwl
->kqwl_request
) == current_thread()) {
5542 * kqworkloop_end_processing() will perform the required QoS
5543 * computations when it unsets the processing mode.
5548 kqworkloop_update_threads_qos(kqwl
, KQWL_UTQ_UPDATE_WAKEUP_QOS
, qos
);
5551 static struct kqtailq
*
5552 kqueue_get_suppressed_queue(kqueue_t kq
, struct knote
*kn
)
5554 if (kq
.kq
->kq_state
& KQ_WORKLOOP
) {
5555 return &kq
.kqwl
->kqwl_suppressed
;
5556 } else if (kq
.kq
->kq_state
& KQ_WORKQ
) {
5557 return &kq
.kqwq
->kqwq_suppressed
[kn
->kn_qos_index
];
5559 return &kq
.kqf
->kqf_suppressed
;
5564 kqueue_alloc_turnstile(kqueue_t kqu
)
5566 struct kqworkloop
*kqwl
= kqu
.kqwl
;
5567 kq_state_t kq_state
;
5569 kq_state
= os_atomic_load(&kqu
.kq
->kq_state
, dependency
);
5570 if (kq_state
& KQ_HAS_TURNSTILE
) {
5571 /* force a dependency to pair with the atomic or with release below */
5572 return os_atomic_load_with_dependency_on(&kqwl
->kqwl_turnstile
,
5573 (uintptr_t)kq_state
);
5576 if (!(kq_state
& KQ_WORKLOOP
)) {
5577 return TURNSTILE_NULL
;
5580 struct turnstile
*ts
= turnstile_alloc(), *free_ts
= TURNSTILE_NULL
;
5581 bool workq_locked
= false;
5585 if (filt_wlturnstile_interlock_is_workq(kqwl
)) {
5586 workq_locked
= true;
5587 workq_kern_threadreq_lock(kqwl
->kqwl_p
);
5590 if (kqwl
->kqwl_state
& KQ_HAS_TURNSTILE
) {
5592 ts
= kqwl
->kqwl_turnstile
;
5594 ts
= turnstile_prepare((uintptr_t)kqwl
, &kqwl
->kqwl_turnstile
,
5595 ts
, TURNSTILE_WORKLOOPS
);
5597 /* release-barrier to pair with the unlocked load of kqwl_turnstile above */
5598 os_atomic_or(&kqwl
->kqwl_state
, KQ_HAS_TURNSTILE
, release
);
5600 if (filt_wlturnstile_interlock_is_workq(kqwl
)) {
5601 workq_kern_threadreq_update_inheritor(kqwl
->kqwl_p
,
5602 &kqwl
->kqwl_request
, kqwl
->kqwl_owner
,
5603 ts
, TURNSTILE_IMMEDIATE_UPDATE
);
5605 * The workq may no longer be the interlock after this.
5606 * In which case the inheritor wasn't updated.
5609 if (!filt_wlturnstile_interlock_is_workq(kqwl
)) {
5610 filt_wlupdate_inheritor(kqwl
, ts
, TURNSTILE_IMMEDIATE_UPDATE
);
5615 workq_kern_threadreq_unlock(kqwl
->kqwl_p
);
5621 turnstile_deallocate(free_ts
);
5623 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_NOT_HELD
);
5628 __attribute__((always_inline
))
5630 kqueue_turnstile(kqueue_t kqu
)
5632 kq_state_t kq_state
= os_atomic_load(&kqu
.kq
->kq_state
, relaxed
);
5633 if (kq_state
& KQ_WORKLOOP
) {
5634 return os_atomic_load(&kqu
.kqwl
->kqwl_turnstile
, relaxed
);
5636 return TURNSTILE_NULL
;
5639 __attribute__((always_inline
))
5641 kqueue_threadreq_get_turnstile(workq_threadreq_t kqr
)
5643 struct kqworkloop
*kqwl
= kqr_kqworkloop(kqr
);
5645 return os_atomic_load(&kqwl
->kqwl_turnstile
, relaxed
);
5647 return TURNSTILE_NULL
;
5651 kqworkloop_set_overcommit(struct kqworkloop
*kqwl
)
5653 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
5656 * This test is racy, but since we never remove this bit,
5657 * it allows us to avoid taking a lock.
5659 if (kqr
->tr_flags
& WORKQ_TR_FLAG_OVERCOMMIT
) {
5665 if (kqr_thread_requested_pending(kqr
)) {
5666 kqueue_threadreq_modify(kqwl
, kqr
, kqr
->tr_qos
,
5667 WORKQ_THREADREQ_MAKE_OVERCOMMIT
);
5669 kqr
->tr_flags
|= WORKQ_TR_FLAG_OVERCOMMIT
;
5674 kqworkq_update_override(struct kqworkq
*kqwq
, struct knote
*kn
,
5675 kq_index_t override_index
)
5677 workq_threadreq_t kqr
;
5678 kq_index_t old_override_index
;
5679 kq_index_t queue_index
= kn
->kn_qos_index
;
5681 if (override_index
<= queue_index
) {
5685 kqr
= kqworkq_get_request(kqwq
, queue_index
);
5689 old_override_index
= kqr
->tr_kq_override_index
;
5690 if (override_index
> MAX(kqr
->tr_kq_qos_index
, old_override_index
)) {
5691 thread_t servicer
= kqr_thread(kqr
);
5692 kqr
->tr_kq_override_index
= override_index
;
5694 /* apply the override to [incoming?] servicing thread */
5696 if (old_override_index
) {
5697 thread_update_kevent_override(servicer
, override_index
);
5699 thread_add_kevent_override(servicer
, override_index
);
5706 kqueue_update_override(kqueue_t kqu
, struct knote
*kn
, thread_qos_t qos
)
5708 if (kqu
.kq
->kq_state
& KQ_WORKLOOP
) {
5709 kqworkloop_update_threads_qos(kqu
.kqwl
, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE
,
5712 kqworkq_update_override(kqu
.kqwq
, kn
, qos
);
5717 kqworkloop_unbind_locked(struct kqworkloop
*kqwl
, thread_t thread
,
5718 enum kqwl_unbind_locked_mode how
)
5720 struct uthread
*ut
= get_bsdthread_info(thread
);
5721 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
5723 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND
), kqwl
->kqwl_dynamicid
,
5724 thread_tid(thread
), 0, 0);
5728 assert(ut
->uu_kqr_bound
== kqr
);
5729 ut
->uu_kqr_bound
= NULL
;
5730 if (how
== KQWL_OVERRIDE_DROP_IMMEDIATELY
&&
5731 ut
->uu_kqueue_override
!= THREAD_QOS_UNSPECIFIED
) {
5732 thread_drop_servicer_override(thread
);
5733 ut
->uu_kqueue_override
= THREAD_QOS_UNSPECIFIED
;
5736 if (kqwl
->kqwl_owner
== NULL
&& kqwl
->kqwl_turnstile
) {
5737 turnstile_update_inheritor(kqwl
->kqwl_turnstile
,
5738 TURNSTILE_INHERITOR_NULL
, TURNSTILE_IMMEDIATE_UPDATE
);
5739 turnstile_update_inheritor_complete(kqwl
->kqwl_turnstile
,
5740 TURNSTILE_INTERLOCK_HELD
);
5743 kqr
->tr_thread
= THREAD_NULL
;
5744 kqr
->tr_state
= WORKQ_TR_STATE_IDLE
;
5745 kqwl
->kqwl_state
&= ~KQ_R2K_ARMED
;
5749 kqworkloop_unbind_delayed_override_drop(thread_t thread
)
5751 struct uthread
*ut
= get_bsdthread_info(thread
);
5752 assert(ut
->uu_kqr_bound
== NULL
);
5753 if (ut
->uu_kqueue_override
!= THREAD_QOS_UNSPECIFIED
) {
5754 thread_drop_servicer_override(thread
);
5755 ut
->uu_kqueue_override
= THREAD_QOS_UNSPECIFIED
;
5760 * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
5762 * It will acknowledge events, and possibly request a new thread if:
5763 * - there were active events left
5764 * - we pended waitq hook callouts during processing
5765 * - we pended wakeups while processing (or unsuppressing)
5767 * Called with kqueue lock held.
5770 kqworkloop_unbind(struct kqworkloop
*kqwl
)
5772 struct kqueue
*kq
= &kqwl
->kqwl_kqueue
;
5773 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
5774 thread_t thread
= kqr_thread_fast(kqr
);
5775 int op
= KQWL_UTQ_PARKING
;
5776 kq_index_t qos_override
= THREAD_QOS_UNSPECIFIED
;
5778 assert(thread
== current_thread());
5783 * Forcing the KQ_PROCESSING flag allows for QoS updates because of
5784 * unsuppressing knotes not to be applied until the eventual call to
5785 * kqworkloop_update_threads_qos() below.
5787 assert((kq
->kq_state
& KQ_PROCESSING
) == 0);
5788 if (!TAILQ_EMPTY(&kqwl
->kqwl_suppressed
)) {
5789 kq
->kq_state
|= KQ_PROCESSING
;
5790 qos_override
= kqworkloop_acknowledge_events(kqwl
);
5791 kq
->kq_state
&= ~KQ_PROCESSING
;
5794 kqworkloop_unbind_locked(kqwl
, thread
, KQWL_OVERRIDE_DROP_DELAYED
);
5795 kqworkloop_update_threads_qos(kqwl
, op
, qos_override
);
5800 * Drop the override on the current thread last, after the call to
5801 * kqworkloop_update_threads_qos above.
5803 kqworkloop_unbind_delayed_override_drop(thread
);
5805 /* If last reference, dealloc the workloop kq */
5806 kqworkloop_release(kqwl
);
5810 kqworkq_unbind_locked(struct kqworkq
*kqwq
,
5811 workq_threadreq_t kqr
, thread_t thread
)
5813 struct uthread
*ut
= get_bsdthread_info(thread
);
5814 kq_index_t old_override
= kqr
->tr_kq_override_index
;
5816 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND
), -1,
5817 thread_tid(kqr_thread(kqr
)), kqr
->tr_kq_qos_index
, 0);
5821 assert(ut
->uu_kqr_bound
== kqr
);
5822 ut
->uu_kqr_bound
= NULL
;
5823 kqr
->tr_thread
= THREAD_NULL
;
5824 kqr
->tr_state
= WORKQ_TR_STATE_IDLE
;
5825 kqr
->tr_kq_override_index
= THREAD_QOS_UNSPECIFIED
;
5826 kqwq
->kqwq_state
&= ~KQ_R2K_ARMED
;
5828 return old_override
;
5832 * kqworkq_unbind - unbind of a workq kqueue from a thread
5834 * We may have to request new threads.
5835 * This can happen there are no waiting processing threads and:
5836 * - there were active events we never got to (count > 0)
5837 * - we pended waitq hook callouts during processing
5838 * - we pended wakeups while processing (or unsuppressing)
5841 kqworkq_unbind(proc_t p
, workq_threadreq_t kqr
)
5843 struct kqworkq
*kqwq
= (struct kqworkq
*)p
->p_fd
->fd_wqkqueue
;
5844 __assert_only
int rc
;
5847 rc
= kqworkq_acknowledge_events(kqwq
, kqr
, 0, KQWQAE_UNBIND
);
5853 kqworkq_get_request(struct kqworkq
*kqwq
, kq_index_t qos_index
)
5855 assert(qos_index
< KQWQ_NBUCKETS
);
5856 return &kqwq
->kqwq_request
[qos_index
];
5860 knote_reset_priority(kqueue_t kqu
, struct knote
*kn
, pthread_priority_t pp
)
5862 kq_index_t qos
= _pthread_priority_thread_qos(pp
);
5864 if (kqu
.kq
->kq_state
& KQ_WORKLOOP
) {
5865 assert((pp
& _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
) == 0);
5866 pp
= _pthread_priority_normalize(pp
);
5867 } else if (kqu
.kq
->kq_state
& KQ_WORKQ
) {
5868 if (qos
== THREAD_QOS_UNSPECIFIED
) {
5869 /* On workqueues, outside of QoS means MANAGER */
5870 qos
= KQWQ_QOS_MANAGER
;
5871 pp
= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
5873 pp
= _pthread_priority_normalize(pp
);
5876 pp
= _pthread_unspecified_priority();
5877 qos
= THREAD_QOS_UNSPECIFIED
;
5880 kn
->kn_qos
= (int32_t)pp
;
5882 if ((kn
->kn_status
& KN_MERGE_QOS
) == 0 || qos
> kn
->kn_qos_override
) {
5883 /* Never lower QoS when in "Merge" mode */
5884 kn
->kn_qos_override
= qos
;
5887 /* only adjust in-use qos index when not suppressed */
5888 if (kn
->kn_status
& KN_SUPPRESSED
) {
5889 kqueue_update_override(kqu
, kn
, qos
);
5890 } else if (kn
->kn_qos_index
!= qos
) {
5891 knote_dequeue(kqu
, kn
);
5892 kn
->kn_qos_index
= qos
;
5897 knote_adjust_qos(struct kqueue
*kq
, struct knote
*kn
, int result
)
5899 thread_qos_t qos_index
= (result
>> FILTER_ADJUST_EVENT_QOS_SHIFT
) & 7;
5903 assert(result
& FILTER_ADJUST_EVENT_QOS_BIT
);
5904 assert(qos_index
< THREAD_QOS_LAST
);
5907 * Early exit for knotes that should not change QoS
5909 if (__improbable(!knote_fops(kn
)->f_adjusts_qos
)) {
5910 panic("filter %d cannot change QoS", kn
->kn_filtid
);
5911 } else if (__improbable(!knote_has_qos(kn
))) {
5916 * knotes with the FALLBACK flag will only use their registration QoS if the
5917 * incoming event has no QoS, else, the registration QoS acts as a floor.
5919 thread_qos_t req_qos
= _pthread_priority_thread_qos_fast(kn
->kn_qos
);
5920 if (kn
->kn_qos
& _PTHREAD_PRIORITY_FALLBACK_FLAG
) {
5921 if (qos_index
== THREAD_QOS_UNSPECIFIED
) {
5922 qos_index
= req_qos
;
5925 if (qos_index
< req_qos
) {
5926 qos_index
= req_qos
;
5929 if ((kn
->kn_status
& KN_MERGE_QOS
) && (qos_index
< kn
->kn_qos_override
)) {
5930 /* Never lower QoS when in "Merge" mode */
5934 if ((kn
->kn_status
& KN_LOCKED
) && (kn
->kn_status
& KN_POSTING
)) {
5936 * When we're trying to update the QoS override and that both an
5937 * f_event() and other f_* calls are running concurrently, any of these
5938 * in flight calls may want to perform overrides that aren't properly
5939 * serialized with each other.
5941 * The first update that observes this racy situation enters a "Merge"
5942 * mode which causes subsequent override requests to saturate the
5943 * override instead of replacing its value.
5945 * This mode is left when knote_unlock() or knote_post()
5946 * observe that no other f_* routine is in flight.
5948 kn
->kn_status
|= KN_MERGE_QOS
;
5952 * Now apply the override if it changed.
5955 if (kn
->kn_qos_override
== qos_index
) {
5959 kn
->kn_qos_override
= qos_index
;
5961 if (kn
->kn_status
& KN_SUPPRESSED
) {
5963 * For suppressed events, the kn_qos_index field cannot be touched as it
5964 * allows us to know on which supress queue the knote is for a kqworkq.
5966 * Also, there's no natural push applied on the kqueues when this field
5967 * changes anyway. We hence need to apply manual overrides in this case,
5968 * which will be cleared when the events are later acknowledged.
5970 kqueue_update_override(kq
, kn
, qos_index
);
5971 } else if (kn
->kn_qos_index
!= qos_index
) {
5972 knote_dequeue(kq
, kn
);
5973 kn
->kn_qos_index
= qos_index
;
5978 * Called back from waitq code when no threads waiting and the hook was set.
5980 * Preemption is disabled - minimal work can be done in this context!!!
5983 waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t
*kq_hook
)
5987 kqu
.kq
= __container_of(kq_hook
, struct kqueue
, kq_waitq_hook
);
5988 assert(kqu
.kq
->kq_state
& (KQ_WORKQ
| KQ_WORKLOOP
));
5992 if (kqu
.kq
->kq_count
> 0) {
5993 if (kqu
.kq
->kq_state
& KQ_WORKLOOP
) {
5994 kqworkloop_wakeup(kqu
.kqwl
, KQWL_BUCKET_STAYACTIVE
);
5996 kqworkq_wakeup(kqu
.kqwq
, KQWQ_QOS_MANAGER
);
6004 klist_init(struct klist
*list
)
6011 * Query/Post each knote in the object's list
6013 * The object lock protects the list. It is assumed
6014 * that the filter/event routine for the object can
6015 * determine that the object is already locked (via
6016 * the hint) and not deadlock itself.
6018 * The object lock should also hold off pending
6019 * detach/drop operations.
6022 knote(struct klist
*list
, long hint
)
6026 SLIST_FOREACH(kn
, list
, kn_selnext
) {
6027 knote_post(kn
, hint
);
6032 * attach a knote to the specified list. Return true if this is the first entry.
6033 * The list is protected by whatever lock the object it is associated with uses.
6036 knote_attach(struct klist
*list
, struct knote
*kn
)
6038 int ret
= SLIST_EMPTY(list
);
6039 SLIST_INSERT_HEAD(list
, kn
, kn_selnext
);
6044 * detach a knote from the specified list. Return true if that was the last entry.
6045 * The list is protected by whatever lock the object it is associated with uses.
6048 knote_detach(struct klist
*list
, struct knote
*kn
)
6050 SLIST_REMOVE(list
, kn
, knote
, kn_selnext
);
6051 return SLIST_EMPTY(list
);
6055 * knote_vanish - Indicate that the source has vanished
6057 * If the knote has requested EV_VANISHED delivery,
6058 * arrange for that. Otherwise, deliver a NOTE_REVOKE
6059 * event for backward compatibility.
6061 * The knote is marked as having vanished, but is not
6062 * actually detached from the source in this instance.
6063 * The actual detach is deferred until the knote drop.
6065 * Our caller already has the object lock held. Calling
6066 * the detach routine would try to take that lock
6067 * recursively - which likely is not supported.
6070 knote_vanish(struct klist
*list
, bool make_active
)
6073 struct knote
*kn_next
;
6075 SLIST_FOREACH_SAFE(kn
, list
, kn_selnext
, kn_next
) {
6076 struct kqueue
*kq
= knote_get_kq(kn
);
6079 if (__probable(kn
->kn_status
& KN_REQVANISH
)) {
6081 * If EV_VANISH supported - prepare to deliver one
6083 kn
->kn_status
|= KN_VANISHED
;
6086 * Handle the legacy way to indicate that the port/portset was
6087 * deallocated or left the current Mach portspace (modern technique
6088 * is with an EV_VANISHED protocol).
6090 * Deliver an EV_EOF event for these changes (hopefully it will get
6091 * delivered before the port name recycles to the same generation
6092 * count and someone tries to re-register a kevent for it or the
6093 * events are udata-specific - avoiding a conflict).
6095 kn
->kn_flags
|= EV_EOF
| EV_ONESHOT
;
6098 knote_activate(kq
, kn
, FILTER_ACTIVE
);
6105 * Force a lazy allocation of the waitqset link
6106 * of the kq_wqs associated with the kn
6107 * if it wasn't already allocated.
6109 * This allows knote_link_waitq to never block
6110 * if reserved_link is not NULL.
6113 knote_link_waitqset_lazy_alloc(struct knote
*kn
)
6115 struct kqueue
*kq
= knote_get_kq(kn
);
6116 waitq_set_lazy_init_link(&kq
->kq_wqs
);
6120 * Check if a lazy allocation for the waitqset link
6121 * of the kq_wqs is needed.
6124 knote_link_waitqset_should_lazy_alloc(struct knote
*kn
)
6126 struct kqueue
*kq
= knote_get_kq(kn
);
6127 return waitq_set_should_lazy_init_link(&kq
->kq_wqs
);
6131 * For a given knote, link a provided wait queue directly with the kqueue.
6132 * Wakeups will happen via recursive wait queue support. But nothing will move
6133 * the knote to the active list at wakeup (nothing calls knote()). Instead,
6134 * we permanently enqueue them here.
6136 * kqueue and knote references are held by caller.
6137 * waitq locked by caller.
6139 * caller provides the wait queue link structure and insures that the kq->kq_wqs
6140 * is linked by previously calling knote_link_waitqset_lazy_alloc.
6143 knote_link_waitq(struct knote
*kn
, struct waitq
*wq
, uint64_t *reserved_link
)
6145 struct kqueue
*kq
= knote_get_kq(kn
);
6148 kr
= waitq_link(wq
, &kq
->kq_wqs
, WAITQ_ALREADY_LOCKED
, reserved_link
);
6149 if (kr
== KERN_SUCCESS
) {
6150 knote_markstayactive(kn
);
6158 * Unlink the provided wait queue from the kqueue associated with a knote.
6159 * Also remove it from the magic list of directly attached knotes.
6161 * Note that the unlink may have already happened from the other side, so
6162 * ignore any failures to unlink and just remove it from the kqueue list.
6164 * On success, caller is responsible for the link structure
6167 knote_unlink_waitq(struct knote
*kn
, struct waitq
*wq
)
6169 struct kqueue
*kq
= knote_get_kq(kn
);
6172 kr
= waitq_unlink(wq
, &kq
->kq_wqs
);
6173 knote_clearstayactive(kn
);
6174 return (kr
!= KERN_SUCCESS
) ? EINVAL
: 0;
6178 * remove all knotes referencing a specified fd
6180 * Entered with the proc_fd lock already held.
6181 * It returns the same way, but may drop it temporarily.
6184 knote_fdclose(struct proc
*p
, int fd
)
6188 KNOTE_LOCK_CTX(knlc
);
6191 list
= &p
->p_fd
->fd_knlist
[fd
];
6192 SLIST_FOREACH(kn
, list
, kn_link
) {
6193 struct kqueue
*kq
= knote_get_kq(kn
);
6197 if (kq
->kq_p
!= p
) {
6198 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
6199 __func__
, kq
->kq_p
, p
);
6203 * If the knote supports EV_VANISHED delivery,
6204 * transition it to vanished mode (or skip over
6205 * it if already vanished).
6207 if (kn
->kn_status
& KN_VANISHED
) {
6213 if (!knote_lock(kq
, kn
, &knlc
, KNOTE_KQ_LOCK_ON_SUCCESS
)) {
6214 /* the knote was dropped by someone, nothing to do */
6215 } else if (kn
->kn_status
& KN_REQVANISH
) {
6216 kn
->kn_status
|= KN_VANISHED
;
6219 knote_fops(kn
)->f_detach(kn
);
6221 fp_drop(p
, (int)kn
->kn_id
, kn
->kn_fp
, 0);
6223 kn
->kn_filtid
= EVFILTID_DETACHED
;
6226 knote_activate(kq
, kn
, FILTER_ACTIVE
);
6227 knote_unlock(kq
, kn
, &knlc
, KNOTE_KQ_UNLOCK
);
6229 knote_drop(kq
, kn
, &knlc
);
6238 * knote_fdfind - lookup a knote in the fd table for process
6240 * If the filter is file-based, lookup based on fd index.
6241 * Otherwise use a hash based on the ident.
6243 * Matching is based on kq, filter, and ident. Optionally,
6244 * it may also be based on the udata field in the kevent -
6245 * allowing multiple event registration for the file object
6248 * fd_knhashlock or fdlock held on entry (and exit)
6250 static struct knote
*
6251 knote_fdfind(struct kqueue
*kq
,
6252 const struct kevent_internal_s
*kev
,
6256 struct filedesc
*fdp
= p
->p_fd
;
6257 struct klist
*list
= NULL
;
6258 struct knote
*kn
= NULL
;
6261 * determine where to look for the knote
6264 /* fd-based knotes are linked off the fd table */
6265 if (kev
->kei_ident
< (u_int
)fdp
->fd_knlistsize
) {
6266 list
= &fdp
->fd_knlist
[kev
->kei_ident
];
6268 } else if (fdp
->fd_knhashmask
!= 0) {
6269 /* hash non-fd knotes here too */
6270 list
= &fdp
->fd_knhash
[KN_HASH((u_long
)kev
->kei_ident
, fdp
->fd_knhashmask
)];
6274 * scan the selected list looking for a match
6277 SLIST_FOREACH(kn
, list
, kn_link
) {
6278 if (kq
== knote_get_kq(kn
) &&
6279 kev
->kei_ident
== kn
->kn_id
&&
6280 kev
->kei_filter
== kn
->kn_filter
) {
6281 if (kev
->kei_flags
& EV_UDATA_SPECIFIC
) {
6282 if ((kn
->kn_flags
& EV_UDATA_SPECIFIC
) &&
6283 kev
->kei_udata
== kn
->kn_udata
) {
6284 break; /* matching udata-specific knote */
6286 } else if ((kn
->kn_flags
& EV_UDATA_SPECIFIC
) == 0) {
6287 break; /* matching non-udata-specific knote */
6296 * kq_add_knote- Add knote to the fd table for process
6297 * while checking for duplicates.
6299 * All file-based filters associate a list of knotes by file
6300 * descriptor index. All other filters hash the knote by ident.
6302 * May have to grow the table of knote lists to cover the
6303 * file descriptor index presented.
6305 * fd_knhashlock and fdlock unheld on entry (and exit).
6307 * Takes a rwlock boost if inserting the knote is successful.
6310 kq_add_knote(struct kqueue
*kq
, struct knote
*kn
, struct knote_lock_ctx
*knlc
,
6313 struct filedesc
*fdp
= p
->p_fd
;
6314 struct klist
*list
= NULL
;
6316 bool is_fd
= kn
->kn_is_fd
;
6317 uint64_t nofile
= proc_limitgetcur(p
, RLIMIT_NOFILE
, TRUE
);
6325 if (knote_fdfind(kq
, &kn
->kn_kevent
, is_fd
, p
) != NULL
) {
6326 /* found an existing knote: we can't add this one */
6331 /* knote was not found: add it now */
6333 if (fdp
->fd_knhashmask
== 0) {
6336 list
= hashinit(CONFIG_KN_HASHSIZE
, M_KQUEUE
, &size
);
6342 fdp
->fd_knhash
= list
;
6343 fdp
->fd_knhashmask
= size
;
6346 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
6347 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
6351 /* knote is fd based */
6353 if ((u_int
)fdp
->fd_knlistsize
<= kn
->kn_id
) {
6356 /* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */
6357 if (kn
->kn_id
>= (uint64_t) nofile
6358 || kn
->kn_id
>= (uint64_t)maxfilesperproc
) {
6362 /* have to grow the fd_knlist */
6363 size
= fdp
->fd_knlistsize
;
6364 while (size
<= kn
->kn_id
) {
6368 if (size
>= (UINT_MAX
/ sizeof(struct klist
*))) {
6373 list
= kheap_alloc(KM_KQUEUE
, size
* sizeof(struct klist
*),
6380 bcopy((caddr_t
)fdp
->fd_knlist
, (caddr_t
)list
,
6381 fdp
->fd_knlistsize
* sizeof(struct klist
*));
6382 bzero((caddr_t
)list
+
6383 fdp
->fd_knlistsize
* sizeof(struct klist
*),
6384 (size
- fdp
->fd_knlistsize
) * sizeof(struct klist
*));
6385 kheap_free(KM_KQUEUE
, fdp
->fd_knlist
,
6386 fdp
->fd_knlistsize
* sizeof(struct klist
*));
6387 fdp
->fd_knlist
= list
;
6388 fdp
->fd_knlistsize
= size
;
6391 list
= &fdp
->fd_knlist
[kn
->kn_id
];
6392 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
6400 assert((kn
->kn_status
& KN_LOCKED
) == 0);
6401 (void)knote_lock(kq
, kn
, knlc
, KNOTE_KQ_UNLOCK
);
6402 kqueue_retain(kq
); /* retain a kq ref */
6414 * kq_remove_knote - remove a knote from the fd table for process
6416 * If the filter is file-based, remove based on fd index.
6417 * Otherwise remove from the hash based on the ident.
6419 * fd_knhashlock and fdlock unheld on entry (and exit).
6422 kq_remove_knote(struct kqueue
*kq
, struct knote
*kn
, struct proc
*p
,
6423 struct knote_lock_ctx
*knlc
)
6425 struct filedesc
*fdp
= p
->p_fd
;
6426 struct klist
*list
= NULL
;
6428 bool is_fd
= kn
->kn_is_fd
;
6437 assert((u_int
)fdp
->fd_knlistsize
> kn
->kn_id
);
6438 list
= &fdp
->fd_knlist
[kn
->kn_id
];
6440 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
6442 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
6445 kq_state
= kq
->kq_state
;
6447 knote_unlock_cancel(kq
, kn
, knlc
);
6457 if (kq_state
& KQ_DYNAMIC
) {
6458 kqworkloop_release((struct kqworkloop
*)kq
);
6463 * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
6464 * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
6466 * fd_knhashlock or fdlock unheld on entry (and exit)
6469 static struct knote
*
6470 kq_find_knote_and_kq_lock(struct kqueue
*kq
, struct kevent_qos_s
*kev
,
6471 bool is_fd
, struct proc
*p
)
6473 struct filedesc
*fdp
= p
->p_fd
;
6483 * Temporary horrible hack:
6484 * this cast is gross and will go away in a future change.
6485 * It is OK to do because we don't look at xflags/s_fflags,
6486 * and that when we cast down the kev this way,
6487 * the truncated filter field works.
6489 kn
= knote_fdfind(kq
, (struct kevent_internal_s
*)kev
, is_fd
, p
);
6493 assert(knote_get_kq(kn
) == kq
);
6505 __attribute__((noinline
))
6507 kqfile_wakeup(struct kqfile
*kqf
, __unused kq_index_t qos
)
6509 /* flag wakeups during processing */
6510 if (kqf
->kqf_state
& KQ_PROCESSING
) {
6511 kqf
->kqf_state
|= KQ_WAKEUP
;
6514 /* wakeup a thread waiting on this queue */
6515 if (kqf
->kqf_state
& (KQ_SLEEP
| KQ_SEL
)) {
6516 kqf
->kqf_state
&= ~(KQ_SLEEP
| KQ_SEL
);
6517 waitq_wakeup64_all((struct waitq
*)&kqf
->kqf_wqs
, KQ_EVENT
,
6518 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
6521 /* wakeup other kqueues/select sets we're inside */
6522 KNOTE(&kqf
->kqf_sel
.si_note
, 0);
6525 static struct kqtailq
*
6526 knote_get_tailq(kqueue_t kqu
, struct knote
*kn
)
6528 kq_index_t qos_index
= kn
->kn_qos_index
;
6530 if (kqu
.kq
->kq_state
& KQ_WORKLOOP
) {
6531 assert(qos_index
< KQWL_NBUCKETS
);
6532 } else if (kqu
.kq
->kq_state
& KQ_WORKQ
) {
6533 assert(qos_index
< KQWQ_NBUCKETS
);
6535 assert(qos_index
== QOS_INDEX_KQFILE
);
6537 static_assert(offsetof(struct kqueue
, kq_queue
) == sizeof(struct kqueue
),
6538 "struct kqueue::kq_queue must be exactly at the end");
6539 return &kqu
.kq
->kq_queue
[qos_index
];
6543 knote_enqueue(kqueue_t kqu
, struct knote
*kn
, kn_status_t wakeup_mask
)
6547 if ((kn
->kn_status
& (KN_ACTIVE
| KN_STAYACTIVE
)) == 0) {
6551 if (kn
->kn_status
& (KN_DISABLED
| KN_SUPPRESSED
| KN_DROPPING
)) {
6555 if ((kn
->kn_status
& KN_QUEUED
) == 0) {
6556 struct kqtailq
*queue
= knote_get_tailq(kqu
, kn
);
6558 TAILQ_INSERT_TAIL(queue
, kn
, kn_tqe
);
6559 kn
->kn_status
|= KN_QUEUED
;
6561 } else if ((kn
->kn_status
& KN_STAYACTIVE
) == 0) {
6565 if (kn
->kn_status
& wakeup_mask
) {
6566 if (kqu
.kq
->kq_state
& KQ_WORKLOOP
) {
6567 kqworkloop_wakeup(kqu
.kqwl
, kn
->kn_qos_index
);
6568 } else if (kqu
.kq
->kq_state
& KQ_WORKQ
) {
6569 kqworkq_wakeup(kqu
.kqwq
, kn
->kn_qos_index
);
6571 kqfile_wakeup(kqu
.kqf
, kn
->kn_qos_index
);
6576 __attribute__((always_inline
))
6578 knote_dequeue(kqueue_t kqu
, struct knote
*kn
)
6580 if (kn
->kn_status
& KN_QUEUED
) {
6581 struct kqtailq
*queue
= knote_get_tailq(kqu
, kn
);
6583 // attaching the knote calls knote_reset_priority() without
6584 // the kqlock which is fine, so we can't call kqlock_held()
6585 // if we're not queued.
6588 TAILQ_REMOVE(queue
, kn
, kn_tqe
);
6589 kn
->kn_status
&= ~KN_QUEUED
;
6594 /* called with kqueue lock held */
6596 knote_suppress(kqueue_t kqu
, struct knote
*kn
)
6598 struct kqtailq
*suppressq
;
6602 assert((kn
->kn_status
& KN_SUPPRESSED
) == 0);
6603 assert(kn
->kn_status
& KN_QUEUED
);
6605 knote_dequeue(kqu
, kn
);
6606 /* deactivate - so new activations indicate a wakeup */
6607 kn
->kn_status
&= ~KN_ACTIVE
;
6608 kn
->kn_status
|= KN_SUPPRESSED
;
6609 suppressq
= kqueue_get_suppressed_queue(kqu
, kn
);
6610 TAILQ_INSERT_TAIL(suppressq
, kn
, kn_tqe
);
6613 __attribute__((always_inline
))
6615 knote_unsuppress_noqueue(kqueue_t kqu
, struct knote
*kn
)
6617 struct kqtailq
*suppressq
;
6621 assert(kn
->kn_status
& KN_SUPPRESSED
);
6623 kn
->kn_status
&= ~KN_SUPPRESSED
;
6624 suppressq
= kqueue_get_suppressed_queue(kqu
, kn
);
6625 TAILQ_REMOVE(suppressq
, kn
, kn_tqe
);
6628 * If the knote is no longer active, reset its push,
6629 * and resynchronize kn_qos_index with kn_qos_override
6630 * for knotes with a real qos.
6632 if ((kn
->kn_status
& KN_ACTIVE
) == 0 && knote_has_qos(kn
)) {
6633 kn
->kn_qos_override
= _pthread_priority_thread_qos_fast(kn
->kn_qos
);
6635 kn
->kn_qos_index
= kn
->kn_qos_override
;
6638 /* called with kqueue lock held */
6640 knote_unsuppress(kqueue_t kqu
, struct knote
*kn
)
6642 if (kn
->kn_status
& KN_SUPPRESSED
) {
6643 knote_unsuppress_noqueue(kqu
, kn
);
6645 /* don't wakeup if unsuppressing just a stay-active knote */
6646 knote_enqueue(kqu
, kn
, KN_ACTIVE
);
6650 __attribute__((always_inline
))
6652 knote_mark_active(struct knote
*kn
)
6654 if ((kn
->kn_status
& KN_ACTIVE
) == 0) {
6655 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE
),
6656 kn
->kn_udata
, kn
->kn_status
| (kn
->kn_id
<< 32),
6660 kn
->kn_status
|= KN_ACTIVE
;
6663 /* called with kqueue lock held */
6665 knote_activate(kqueue_t kqu
, struct knote
*kn
, int result
)
6667 assert(result
& FILTER_ACTIVE
);
6668 if (result
& FILTER_ADJUST_EVENT_QOS_BIT
) {
6669 // may dequeue the knote
6670 knote_adjust_qos(kqu
.kq
, kn
, result
);
6672 knote_mark_active(kn
);
6673 knote_enqueue(kqu
, kn
, KN_ACTIVE
| KN_STAYACTIVE
);
6677 * This function applies changes requested by f_attach or f_touch for
6678 * a given filter. It proceeds in a carefully chosen order to help
6679 * every single transition do the minimal amount of work possible.
6682 knote_apply_touch(kqueue_t kqu
, struct knote
*kn
, struct kevent_qos_s
*kev
,
6685 kn_status_t wakeup_mask
= KN_ACTIVE
;
6687 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
6689 * When a stayactive knote is reenabled, we may have missed wakeups
6690 * while it was disabled, so we need to poll it. To do so, ask
6691 * knote_enqueue() below to reenqueue it.
6693 wakeup_mask
|= KN_STAYACTIVE
;
6694 kn
->kn_status
&= ~KN_DISABLED
;
6697 * it is possible for userland to have knotes registered for a given
6698 * workloop `wl_orig` but really handled on another workloop `wl_new`.
6700 * In that case, rearming will happen from the servicer thread of
6701 * `wl_new` which if `wl_orig` is no longer being serviced, would cause
6702 * this knote to stay suppressed forever if we only relied on
6703 * kqworkloop_acknowledge_events to be called by `wl_orig`.
6705 * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
6706 * unsuppress because that would mess with the processing phase of
6707 * `wl_orig`, however it also means kqworkloop_acknowledge_events()
6710 if (__improbable(kn
->kn_status
& KN_SUPPRESSED
)) {
6711 if ((kqu
.kq
->kq_state
& KQ_PROCESSING
) == 0) {
6712 knote_unsuppress_noqueue(kqu
, kn
);
6717 if ((result
& FILTER_UPDATE_REQ_QOS
) && kev
->qos
&& kev
->qos
!= kn
->kn_qos
) {
6718 // may dequeue the knote
6719 knote_reset_priority(kqu
, kn
, kev
->qos
);
6723 * When we unsuppress above, or because of knote_reset_priority(),
6724 * the knote may have been dequeued, we need to restore the invariant
6725 * that if the knote is active it needs to be queued now that
6726 * we're done applying changes.
6728 if (result
& FILTER_ACTIVE
) {
6729 knote_activate(kqu
, kn
, result
);
6731 knote_enqueue(kqu
, kn
, wakeup_mask
);
6734 if ((result
& FILTER_THREADREQ_NODEFEER
) &&
6735 act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ
)) {
6736 workq_kern_threadreq_redrive(kqu
.kq
->kq_p
, WORKQ_THREADREQ_NONE
);
6741 * knote_drop - disconnect and drop the knote
6743 * Called with the kqueue locked, returns with the kqueue unlocked.
6745 * If a knote locking context is passed, it is canceled.
6747 * The knote may have already been detached from
6748 * (or not yet attached to) its source object.
6751 knote_drop(struct kqueue
*kq
, struct knote
*kn
, struct knote_lock_ctx
*knlc
)
6753 struct proc
*p
= kq
->kq_p
;
6757 assert((kn
->kn_status
& KN_DROPPING
) == 0);
6759 assert((kn
->kn_status
& KN_LOCKED
) == 0);
6761 kn
->kn_status
|= KN_DROPPING
;
6763 if (kn
->kn_status
& KN_SUPPRESSED
) {
6764 knote_unsuppress_noqueue(kq
, kn
);
6766 knote_dequeue(kq
, kn
);
6768 knote_wait_for_post(kq
, kn
);
6770 knote_fops(kn
)->f_detach(kn
);
6772 /* kq may be freed when kq_remove_knote() returns */
6773 kq_remove_knote(kq
, kn
, p
, knlc
);
6774 if (kn
->kn_is_fd
&& ((kn
->kn_status
& KN_VANISHED
) == 0)) {
6775 fp_drop(p
, (int)kn
->kn_id
, kn
->kn_fp
, 0);
6784 #if CONFIG_MEMORYSTATUS
6785 /* Initialize the memorystatus list lock */
6786 memorystatus_kevent_init(&kq_lck_grp
, LCK_ATTR_NULL
);
6789 SYSINIT(knote
, SI_SUB_PSEUDO
, SI_ORDER_ANY
, knote_init
, NULL
);
6791 const struct filterops
*
6792 knote_fops(struct knote
*kn
)
6794 return sysfilt_ops
[kn
->kn_filtid
];
6797 static struct knote
*
6800 return zalloc_flags(knote_zone
, Z_WAITOK
| Z_ZERO
);
6804 knote_free(struct knote
*kn
)
6806 assert((kn
->kn_status
& (KN_LOCKED
| KN_POSTING
)) == 0);
6807 zfree(knote_zone
, kn
);
6810 #pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
6813 kevent_get_context(thread_t thread
)
6815 uthread_t ut
= get_bsdthread_info(thread
);
6816 return &ut
->uu_save
.uus_kevent
;
6820 kevent_args_requesting_events(unsigned int flags
, int nevents
)
6822 return !(flags
& KEVENT_FLAG_ERROR_EVENTS
) && nevents
> 0;
6826 kevent_adjust_flags_for_proc(proc_t p
, int flags
)
6828 __builtin_assume(p
);
6829 return flags
| (IS_64BIT_PROCESS(p
) ? KEVENT_FLAG_PROC64
: 0);
6833 * @function kevent_get_kqfile
6836 * Lookup a kqfile by fd.
6839 * Callers: kevent, kevent64, kevent_qos
6841 * This is not assumed to be a fastpath (kqfile interfaces are legacy)
6845 kevent_get_kqfile(struct proc
*p
, int fd
, int flags
,
6846 struct fileproc
**fpp
, struct kqueue
**kqp
)
6851 error
= fp_get_ftype(p
, fd
, DTYPE_KQUEUE
, EBADF
, fpp
);
6852 if (__improbable(error
)) {
6855 kq
= (struct kqueue
*)(*fpp
)->f_data
;
6857 uint16_t kq_state
= os_atomic_load(&kq
->kq_state
, relaxed
);
6858 if (__improbable((kq_state
& (KQ_KEV32
| KQ_KEV64
| KQ_KEV_QOS
)) == 0)) {
6860 kq_state
= kq
->kq_state
;
6861 if (!(kq_state
& (KQ_KEV32
| KQ_KEV64
| KQ_KEV_QOS
))) {
6862 if (flags
& KEVENT_FLAG_LEGACY32
) {
6863 kq_state
|= KQ_KEV32
;
6864 } else if (flags
& KEVENT_FLAG_LEGACY64
) {
6865 kq_state
|= KQ_KEV64
;
6867 kq_state
|= KQ_KEV_QOS
;
6869 kq
->kq_state
= kq_state
;
6875 * kqfiles can't be used through the legacy kevent()
6876 * and other interfaces at the same time.
6878 if (__improbable((bool)(flags
& KEVENT_FLAG_LEGACY32
) !=
6879 (bool)(kq_state
& KQ_KEV32
))) {
6880 fp_drop(p
, fd
, *fpp
, 0);
6889 * @function kevent_get_kqwq
6892 * Lookup or create the process kqwq (faspath).
6895 * Callers: kevent64, kevent_qos
6899 kevent_get_kqwq(proc_t p
, int flags
, int nevents
, struct kqueue
**kqp
)
6901 struct kqworkq
*kqwq
= p
->p_fd
->fd_wqkqueue
;
6903 if (__improbable(kevent_args_requesting_events(flags
, nevents
))) {
6906 if (__improbable(kqwq
== NULL
)) {
6907 kqwq
= kqworkq_alloc(p
, flags
);
6908 if (__improbable(kqwq
== NULL
)) {
6913 *kqp
= &kqwq
->kqwq_kqueue
;
6917 #pragma mark kevent copyio
6920 * @function kevent_get_data_size
6923 * Copies in the extra data size from user-space.
6926 kevent_get_data_size(int flags
, user_addr_t data_avail
, user_addr_t data_out
,
6929 if (!data_avail
|| !data_out
) {
6930 kectx
->kec_data_size
= 0;
6931 kectx
->kec_data_resid
= 0;
6932 } else if (flags
& KEVENT_FLAG_PROC64
) {
6933 user64_size_t usize
= 0;
6934 int error
= copyin((user_addr_t
)data_avail
, &usize
, sizeof(usize
));
6935 if (__improbable(error
)) {
6938 kectx
->kec_data_resid
= kectx
->kec_data_size
= (user_size_t
)usize
;
6940 user32_size_t usize
= 0;
6941 int error
= copyin((user_addr_t
)data_avail
, &usize
, sizeof(usize
));
6942 if (__improbable(error
)) {
6945 kectx
->kec_data_avail
= data_avail
;
6946 kectx
->kec_data_resid
= kectx
->kec_data_size
= (user_size_t
)usize
;
6948 kectx
->kec_data_out
= data_out
;
6949 kectx
->kec_data_avail
= data_avail
;
6954 * @function kevent_put_data_size
6957 * Copies out the residual data size to user-space if any has been used.
6960 kevent_put_data_size(unsigned int flags
, kevent_ctx_t kectx
)
6962 if (kectx
->kec_data_resid
== kectx
->kec_data_size
) {
6965 if (flags
& KEVENT_FLAG_KERNEL
) {
6966 *(user_size_t
*)(uintptr_t)kectx
->kec_data_avail
= kectx
->kec_data_resid
;
6969 if (flags
& KEVENT_FLAG_PROC64
) {
6970 user64_size_t usize
= (user64_size_t
)kectx
->kec_data_resid
;
6971 return copyout(&usize
, (user_addr_t
)kectx
->kec_data_avail
, sizeof(usize
));
6973 user32_size_t usize
= (user32_size_t
)kectx
->kec_data_resid
;
6974 return copyout(&usize
, (user_addr_t
)kectx
->kec_data_avail
, sizeof(usize
));
6979 * @function kevent_legacy_copyin
6982 * Handles the copyin of a kevent/kevent64 event.
6985 kevent_legacy_copyin(user_addr_t
*addrp
, struct kevent_qos_s
*kevp
, unsigned int flags
)
6989 assert((flags
& (KEVENT_FLAG_LEGACY32
| KEVENT_FLAG_LEGACY64
)) != 0);
6991 if (flags
& KEVENT_FLAG_LEGACY64
) {
6992 struct kevent64_s kev64
;
6994 error
= copyin(*addrp
, (caddr_t
)&kev64
, sizeof(kev64
));
6995 if (__improbable(error
)) {
6998 *addrp
+= sizeof(kev64
);
6999 *kevp
= (struct kevent_qos_s
){
7000 .ident
= kev64
.ident
,
7001 .filter
= kev64
.filter
,
7002 /* Make sure user doesn't pass in any system flags */
7003 .flags
= kev64
.flags
& ~EV_SYSFLAGS
,
7004 .udata
= kev64
.udata
,
7005 .fflags
= kev64
.fflags
,
7007 .ext
[0] = kev64
.ext
[0],
7008 .ext
[1] = kev64
.ext
[1],
7010 } else if (flags
& KEVENT_FLAG_PROC64
) {
7011 struct user64_kevent kev64
;
7013 error
= copyin(*addrp
, (caddr_t
)&kev64
, sizeof(kev64
));
7014 if (__improbable(error
)) {
7017 *addrp
+= sizeof(kev64
);
7018 *kevp
= (struct kevent_qos_s
){
7019 .ident
= kev64
.ident
,
7020 .filter
= kev64
.filter
,
7021 /* Make sure user doesn't pass in any system flags */
7022 .flags
= kev64
.flags
& ~EV_SYSFLAGS
,
7023 .udata
= kev64
.udata
,
7024 .fflags
= kev64
.fflags
,
7028 struct user32_kevent kev32
;
7030 error
= copyin(*addrp
, (caddr_t
)&kev32
, sizeof(kev32
));
7031 if (__improbable(error
)) {
7034 *addrp
+= sizeof(kev32
);
7035 *kevp
= (struct kevent_qos_s
){
7036 .ident
= (uintptr_t)kev32
.ident
,
7037 .filter
= kev32
.filter
,
7038 /* Make sure user doesn't pass in any system flags */
7039 .flags
= kev32
.flags
& ~EV_SYSFLAGS
,
7040 .udata
= CAST_USER_ADDR_T(kev32
.udata
),
7041 .fflags
= kev32
.fflags
,
7042 .data
= (intptr_t)kev32
.data
,
7050 * @function kevent_modern_copyin
7053 * Handles the copyin of a kevent_qos/kevent_id event.
7056 kevent_modern_copyin(user_addr_t
*addrp
, struct kevent_qos_s
*kevp
)
7058 int error
= copyin(*addrp
, (caddr_t
)kevp
, sizeof(struct kevent_qos_s
));
7059 if (__probable(!error
)) {
7060 /* Make sure user doesn't pass in any system flags */
7061 *addrp
+= sizeof(struct kevent_qos_s
);
7062 kevp
->flags
&= ~EV_SYSFLAGS
;
7068 * @function kevent_legacy_copyout
7071 * Handles the copyout of a kevent/kevent64 event.
7074 kevent_legacy_copyout(struct kevent_qos_s
*kevp
, user_addr_t
*addrp
, unsigned int flags
)
7079 assert((flags
& (KEVENT_FLAG_LEGACY32
| KEVENT_FLAG_LEGACY64
)) != 0);
7082 * fully initialize the differnt output event structure
7083 * types from the internal kevent (and some universal
7084 * defaults for fields not represented in the internal
7087 * Note: these structures have no padding hence the C99
7088 * initializers below do not leak kernel info.
7090 if (flags
& KEVENT_FLAG_LEGACY64
) {
7091 struct kevent64_s kev64
= {
7092 .ident
= kevp
->ident
,
7093 .filter
= kevp
->filter
,
7094 .flags
= kevp
->flags
,
7095 .fflags
= kevp
->fflags
,
7096 .data
= (int64_t)kevp
->data
,
7097 .udata
= kevp
->udata
,
7098 .ext
[0] = kevp
->ext
[0],
7099 .ext
[1] = kevp
->ext
[1],
7101 advance
= sizeof(struct kevent64_s
);
7102 error
= copyout((caddr_t
)&kev64
, *addrp
, advance
);
7103 } else if (flags
& KEVENT_FLAG_PROC64
) {
7105 * deal with the special case of a user-supplied
7106 * value of (uintptr_t)-1.
7108 uint64_t ident
= (kevp
->ident
== (uintptr_t)-1) ?
7109 (uint64_t)-1LL : (uint64_t)kevp
->ident
;
7110 struct user64_kevent kev64
= {
7112 .filter
= kevp
->filter
,
7113 .flags
= kevp
->flags
,
7114 .fflags
= kevp
->fflags
,
7115 .data
= (int64_t) kevp
->data
,
7116 .udata
= (user_addr_t
) kevp
->udata
,
7118 advance
= sizeof(kev64
);
7119 error
= copyout((caddr_t
)&kev64
, *addrp
, advance
);
7121 struct user32_kevent kev32
= {
7122 .ident
= (uint32_t)kevp
->ident
,
7123 .filter
= kevp
->filter
,
7124 .flags
= kevp
->flags
,
7125 .fflags
= kevp
->fflags
,
7126 .data
= (int32_t)kevp
->data
,
7127 .udata
= (uint32_t)kevp
->udata
,
7129 advance
= sizeof(kev32
);
7130 error
= copyout((caddr_t
)&kev32
, *addrp
, advance
);
7132 if (__probable(!error
)) {
7139 * @function kevent_modern_copyout
7142 * Handles the copyout of a kevent_qos/kevent_id event.
7146 kevent_modern_copyout(struct kevent_qos_s
*kevp
, user_addr_t
*addrp
)
7148 int error
= copyout((caddr_t
)kevp
, *addrp
, sizeof(struct kevent_qos_s
));
7149 if (__probable(!error
)) {
7150 *addrp
+= sizeof(struct kevent_qos_s
);
7155 #pragma mark kevent core implementation
7158 * @function kevent_callback_inline
7161 * Callback for each individual event
7164 * This is meant to be inlined in kevent_modern_callback and
7165 * kevent_legacy_callback.
7169 kevent_callback_inline(struct kevent_qos_s
*kevp
, kevent_ctx_t kectx
, bool legacy
)
7173 assert(kectx
->kec_process_noutputs
< kectx
->kec_process_nevents
);
7176 * Copy out the appropriate amount of event data for this user.
7179 error
= kevent_legacy_copyout(kevp
, &kectx
->kec_process_eventlist
,
7180 kectx
->kec_process_flags
);
7182 error
= kevent_modern_copyout(kevp
, &kectx
->kec_process_eventlist
);
7186 * If there isn't space for additional events, return
7187 * a harmless error to stop the processing here
7189 if (error
== 0 && ++kectx
->kec_process_noutputs
== kectx
->kec_process_nevents
) {
7190 error
= EWOULDBLOCK
;
7196 * @function kevent_modern_callback
7199 * Callback for each individual modern event.
7202 * This callback handles kevent_qos/kevent_id events.
7205 kevent_modern_callback(struct kevent_qos_s
*kevp
, kevent_ctx_t kectx
)
7207 return kevent_callback_inline(kevp
, kectx
, /*legacy*/ false);
7211 * @function kevent_legacy_callback
7214 * Callback for each individual legacy event.
7217 * This callback handles kevent/kevent64 events.
7220 kevent_legacy_callback(struct kevent_qos_s
*kevp
, kevent_ctx_t kectx
)
7222 return kevent_callback_inline(kevp
, kectx
, /*legacy*/ true);
7226 * @function kevent_cleanup
7229 * Handles the cleanup returning from a kevent call.
7232 * kevent entry points will take a reference on workloops,
7233 * and a usecount on the fileglob of kqfiles.
7235 * This function undoes this on the exit paths of kevents.
7238 * The error to return to userspace.
7241 kevent_cleanup(kqueue_t kqu
, int flags
, int error
, kevent_ctx_t kectx
)
7243 // poll should not call any codepath leading to this
7244 assert((flags
& KEVENT_FLAG_POLL
) == 0);
7246 if (flags
& KEVENT_FLAG_WORKLOOP
) {
7247 kqworkloop_release(kqu
.kqwl
);
7248 } else if (flags
& KEVENT_FLAG_WORKQ
) {
7251 fp_drop(kqu
.kqf
->kqf_p
, kectx
->kec_fd
, kectx
->kec_fp
, 0);
7254 /* don't restart after signals... */
7255 if (error
== ERESTART
) {
7257 } else if (error
== 0) {
7258 /* don't abandon other output just because of residual copyout failures */
7259 (void)kevent_put_data_size(flags
, kectx
);
7262 if (flags
& KEVENT_FLAG_PARKING
) {
7263 thread_t th
= current_thread();
7264 struct uthread
*uth
= get_bsdthread_info(th
);
7265 if (uth
->uu_kqr_bound
) {
7266 thread_unfreeze_base_pri(th
);
7273 * @function kqueue_process
7276 * Process the triggered events in a kqueue.
7279 * Walk the queued knotes and validate that they are really still triggered
7280 * events by calling the filter routines (if necessary).
7282 * For each event that is still considered triggered, invoke the callback
7285 * caller holds a reference on the kqueue.
7286 * kqueue locked on entry and exit - but may be dropped
7287 * kqueue list locked (held for duration of call)
7289 * This is only called by kqueue_scan() so that the compiler can inline it.
7292 * - 0: no event was returned, no other error occured
7293 * - EBADF: the kqueue is being destroyed (KQ_DRAIN is set)
7294 * - EWOULDBLOCK: (not an error) events have been found and we should return
7295 * - EFAULT: copyout failed
7296 * - filter specific errors
7299 kqueue_process(kqueue_t kqu
, int flags
, kevent_ctx_t kectx
,
7300 kevent_callback_t callback
)
7302 workq_threadreq_t kqr
= current_uthread()->uu_kqr_bound
;
7304 int error
= 0, rc
= 0;
7305 struct kqtailq
*base_queue
, *queue
;
7306 #if DEBUG || DEVELOPMENT
7309 uint16_t kq_type
= (kqu
.kq
->kq_state
& (KQ_WORKQ
| KQ_WORKLOOP
));
7311 if (kq_type
& KQ_WORKQ
) {
7312 rc
= kqworkq_begin_processing(kqu
.kqwq
, kqr
, flags
);
7313 } else if (kq_type
& KQ_WORKLOOP
) {
7314 rc
= kqworkloop_begin_processing(kqu
.kqwl
, flags
);
7317 rc
= kqfile_begin_processing(kqu
.kqf
);
7324 /* Nothing to process */
7329 * loop through the enqueued knotes associated with this request,
7330 * processing each one. Each request may have several queues
7331 * of knotes to process (depending on the type of kqueue) so we
7332 * have to loop through all the queues as long as we have additional
7337 if (kq_type
& KQ_WORKQ
) {
7338 base_queue
= queue
= &kqu
.kqwq
->kqwq_queue
[kqr
->tr_kq_qos_index
];
7339 } else if (kq_type
& KQ_WORKLOOP
) {
7340 base_queue
= &kqu
.kqwl
->kqwl_queue
[0];
7341 queue
= &kqu
.kqwl
->kqwl_queue
[KQWL_NBUCKETS
- 1];
7343 base_queue
= queue
= &kqu
.kqf
->kqf_queue
;
7347 while ((kn
= TAILQ_FIRST(queue
)) != NULL
) {
7348 error
= knote_process(kn
, kectx
, callback
);
7349 if (error
== EJUSTRETURN
) {
7351 } else if (__improbable(error
)) {
7352 /* error is EWOULDBLOCK when the out event array is full */
7353 goto stop_processing
;
7356 } while (queue
-- > base_queue
);
7358 if (kectx
->kec_process_noutputs
) {
7359 /* callers will transform this into no error */
7360 error
= EWOULDBLOCK
;
7365 * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
7366 * we want to unbind the kqrequest from the thread.
7368 * However, because the kq locks are dropped several times during process,
7369 * new knotes may have fired again, in which case, we want to fail the end
7370 * processing and process again, until it converges.
7372 * If we have an error or returned events, end processing never fails.
7375 flags
&= ~KEVENT_FLAG_PARKING
;
7377 if (kq_type
& KQ_WORKQ
) {
7378 rc
= kqworkq_end_processing(kqu
.kqwq
, kqr
, flags
);
7379 } else if (kq_type
& KQ_WORKLOOP
) {
7380 rc
= kqworkloop_end_processing(kqu
.kqwl
, KQ_PROCESSING
, flags
);
7382 rc
= kqfile_end_processing(kqu
.kqf
);
7385 if (__probable(error
)) {
7389 if (__probable(rc
>= 0)) {
7390 assert(rc
== 0 || rc
== EBADF
);
7394 #if DEBUG || DEVELOPMENT
7395 if (retries
-- == 0) {
7396 panic("kevent: way too many knote_process retries, kq: %p (0x%04x)",
7397 kqu
.kq
, kqu
.kq
->kq_state
);
7400 if (kq_type
& (KQ_WORKQ
| KQ_WORKLOOP
)) {
7401 assert(flags
& KEVENT_FLAG_PARKING
);
7409 * @function kqueue_scan_continue
7412 * The continuation used by kqueue_scan for kevent entry points.
7415 * Assumes we inherit a use/ref count on the kq or its fileglob.
7417 * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
7418 * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
7420 OS_NORETURN OS_NOINLINE
7422 kqueue_scan_continue(void *data
, wait_result_t wait_result
)
7424 uthread_t ut
= current_uthread();
7425 kevent_ctx_t kectx
= &ut
->uu_save
.uus_kevent
;
7426 int error
= 0, flags
= kectx
->kec_process_flags
;
7427 struct kqueue
*kq
= data
;
7430 * only kevent variants call in here, so we know the callback is
7431 * kevent_legacy_callback or kevent_modern_callback.
7433 assert((flags
& (KEVENT_FLAG_POLL
| KEVENT_FLAG_KERNEL
)) == 0);
7435 switch (wait_result
) {
7436 case THREAD_AWAKENED
:
7437 if (__improbable(flags
& (KEVENT_FLAG_LEGACY32
| KEVENT_FLAG_LEGACY64
))) {
7438 error
= kqueue_scan(kq
, flags
, kectx
, kevent_legacy_callback
);
7440 error
= kqueue_scan(kq
, flags
, kectx
, kevent_modern_callback
);
7443 case THREAD_TIMED_OUT
:
7446 case THREAD_INTERRUPTED
:
7449 case THREAD_RESTART
:
7453 panic("%s: - invalid wait_result (%d)", __func__
, wait_result
);
7457 error
= kevent_cleanup(kq
, flags
, error
, kectx
);
7458 *(int32_t *)&ut
->uu_rval
= kectx
->kec_process_noutputs
;
7459 unix_syscall_return(error
);
7463 * @function kqueue_scan
7466 * Scan and wait for events in a kqueue (used by poll & kevent).
7469 * Process the triggered events in a kqueue.
7471 * If there are no events triggered arrange to wait for them:
7472 * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
7473 * - possibly until kectx->kec_deadline expires
7475 * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
7476 * are set, then it will wait in the kqueue_scan_continue continuation.
7478 * poll() will block in place, and KEVENT_FLAG_KERNEL calls
7479 * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
7482 * The kqueue being scanned.
7485 * The KEVENT_FLAG_* flags for this call.
7488 * The context used for this scan.
7489 * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
7492 * The callback to be called on events sucessfully processed.
7493 * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
7496 kqueue_scan(struct kqueue
*kq
, int flags
, kevent_ctx_t kectx
,
7497 kevent_callback_t callback
)
7503 error
= kqueue_process(kq
, flags
, kectx
, callback
);
7506 * If we got an error, events returned (EWOULDBLOCK)
7507 * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
7510 if (__probable(error
|| (flags
& KEVENT_FLAG_IMMEDIATE
))) {
7512 return error
== EWOULDBLOCK
? 0 : error
;
7515 waitq_assert_wait64_leeway((struct waitq
*)&kq
->kq_wqs
,
7516 KQ_EVENT
, THREAD_ABORTSAFE
, TIMEOUT_URGENCY_USER_NORMAL
,
7517 kectx
->kec_deadline
, TIMEOUT_NO_LEEWAY
);
7518 kq
->kq_state
|= KQ_SLEEP
;
7522 if (__probable((flags
& (KEVENT_FLAG_POLL
| KEVENT_FLAG_KERNEL
)) == 0)) {
7523 thread_block_parameter(kqueue_scan_continue
, kq
);
7524 __builtin_unreachable();
7527 wait_result_t wr
= thread_block(THREAD_CONTINUE_NULL
);
7529 case THREAD_AWAKENED
:
7531 case THREAD_TIMED_OUT
:
7533 case THREAD_INTERRUPTED
:
7535 case THREAD_RESTART
:
7538 panic("%s: - bad wait_result (%d)", __func__
, wr
);
7544 * @function kevent_internal
7547 * Common kevent code.
7550 * Needs to be inlined to specialize for legacy or modern and
7551 * eliminate dead code.
7553 * This is the core logic of kevent entry points, that will:
7554 * - register kevents
7555 * - optionally scan the kqueue for events
7557 * The caller is giving kevent_internal a reference on the kqueue
7558 * or its fileproc that needs to be cleaned up by kevent_cleanup().
7562 kevent_internal(kqueue_t kqu
,
7563 user_addr_t changelist
, int nchanges
,
7564 user_addr_t ueventlist
, int nevents
,
7565 int flags
, kevent_ctx_t kectx
, int32_t *retval
,
7568 int error
= 0, noutputs
= 0, register_rc
;
7570 /* only bound threads can receive events on workloops */
7571 if (!legacy
&& (flags
& KEVENT_FLAG_WORKLOOP
)) {
7572 #if CONFIG_WORKLOOP_DEBUG
7573 UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
7574 .uu_kqid
= kqu
.kqwl
->kqwl_dynamicid
,
7575 .uu_kq
= error
? NULL
: kqu
.kq
,
7577 .uu_nchanges
= nchanges
,
7578 .uu_nevents
= nevents
,
7581 #endif // CONFIG_WORKLOOP_DEBUG
7583 if (flags
& KEVENT_FLAG_KERNEL
) {
7584 /* see kevent_workq_internal */
7585 error
= copyout(&kqu
.kqwl
->kqwl_dynamicid
,
7586 ueventlist
- sizeof(kqueue_id_t
), sizeof(kqueue_id_t
));
7587 kectx
->kec_data_resid
-= sizeof(kqueue_id_t
);
7588 if (__improbable(error
)) {
7593 if (kevent_args_requesting_events(flags
, nevents
)) {
7595 * Disable the R2K notification while doing a register, if the
7596 * caller wants events too, we don't want the AST to be set if we
7597 * will process these events soon.
7600 kqu
.kq
->kq_state
&= ~KQ_R2K_ARMED
;
7602 flags
|= KEVENT_FLAG_NEEDS_END_PROCESSING
;
7606 /* register all the change requests the user provided... */
7607 while (nchanges
> 0 && error
== 0) {
7608 struct kevent_qos_s kev
;
7609 struct knote
*kn
= NULL
;
7612 error
= kevent_legacy_copyin(&changelist
, &kev
, flags
);
7614 error
= kevent_modern_copyin(&changelist
, &kev
);
7620 register_rc
= kevent_register(kqu
.kq
, &kev
, &kn
);
7621 if (__improbable(!legacy
&& (register_rc
& FILTER_REGISTER_WAIT
))) {
7622 thread_t thread
= current_thread();
7626 if (act_clear_astkevent(thread
, AST_KEVENT_REDRIVE_THREADREQ
)) {
7627 workq_kern_threadreq_redrive(kqu
.kq
->kq_p
, WORKQ_THREADREQ_NONE
);
7630 // f_post_register_wait is meant to call a continuation and not to
7631 // return, which is why we don't support FILTER_REGISTER_WAIT if
7632 // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
7633 // waits isn't the last.
7635 // It is implementable, but not used by any userspace code at the
7636 // moment, so for now return ENOTSUP if someone tries to do it.
7637 if (nchanges
== 1 && noutputs
< nevents
&&
7638 (flags
& KEVENT_FLAG_KERNEL
) == 0 &&
7639 (flags
& KEVENT_FLAG_PARKING
) == 0 &&
7640 (flags
& KEVENT_FLAG_ERROR_EVENTS
) &&
7641 (flags
& KEVENT_FLAG_WORKLOOP
)) {
7642 uthread_t ut
= get_bsdthread_info(thread
);
7645 * store the continuation/completion data in the uthread
7647 * Note: the kectx aliases with this,
7648 * and is destroyed in the process.
7650 ut
->uu_save
.uus_kevent_register
= (struct _kevent_register
){
7653 .eventout
= noutputs
,
7654 .ueventlist
= ueventlist
,
7656 knote_fops(kn
)->f_post_register_wait(ut
, kn
,
7657 &ut
->uu_save
.uus_kevent_register
);
7658 __builtin_unreachable();
7662 kev
.flags
|= EV_ERROR
;
7665 assert((register_rc
& FILTER_REGISTER_WAIT
) == 0);
7668 // keep in sync with kevent_register_wait_return()
7669 if (noutputs
< nevents
&& (kev
.flags
& (EV_ERROR
| EV_RECEIPT
))) {
7670 if ((kev
.flags
& EV_ERROR
) == 0) {
7671 kev
.flags
|= EV_ERROR
;
7675 error
= kevent_legacy_copyout(&kev
, &ueventlist
, flags
);
7677 error
= kevent_modern_copyout(&kev
, &ueventlist
);
7682 } else if (kev
.flags
& EV_ERROR
) {
7683 error
= (int)kev
.data
;
7688 if ((flags
& KEVENT_FLAG_ERROR_EVENTS
) == 0 &&
7689 nevents
> 0 && noutputs
== 0 && error
== 0) {
7690 kectx
->kec_process_flags
= flags
;
7691 kectx
->kec_process_nevents
= nevents
;
7692 kectx
->kec_process_noutputs
= 0;
7693 kectx
->kec_process_eventlist
= ueventlist
;
7696 error
= kqueue_scan(kqu
.kq
, flags
, kectx
, kevent_legacy_callback
);
7698 error
= kqueue_scan(kqu
.kq
, flags
, kectx
, kevent_modern_callback
);
7701 noutputs
= kectx
->kec_process_noutputs
;
7702 } else if (!legacy
&& (flags
& KEVENT_FLAG_NEEDS_END_PROCESSING
)) {
7704 * If we didn't through kqworkloop_end_processing(),
7705 * we need to do it here.
7707 * kqueue_scan will call kqworkloop_end_processing(),
7708 * so we only need to do it if we didn't scan.
7711 kqworkloop_end_processing(kqu
.kqwl
, 0, 0);
7717 return kevent_cleanup(kqu
.kq
, flags
, error
, kectx
);
7720 #pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
7723 * @function kevent_modern_internal
7726 * The backend of the kevent_id and kevent_workq_internal entry points.
7729 * Needs to be inline due to the number of arguments.
7733 kevent_modern_internal(kqueue_t kqu
,
7734 user_addr_t changelist
, int nchanges
,
7735 user_addr_t ueventlist
, int nevents
,
7736 int flags
, kevent_ctx_t kectx
, int32_t *retval
)
7738 return kevent_internal(kqu
.kq
, changelist
, nchanges
,
7739 ueventlist
, nevents
, flags
, kectx
, retval
, /*legacy*/ false);
7743 * @function kevent_id
7746 * The kevent_id() syscall.
7749 kevent_id(struct proc
*p
, struct kevent_id_args
*uap
, int32_t *retval
)
7751 int error
, flags
= uap
->flags
& KEVENT_FLAG_USER
;
7752 uthread_t uth
= current_uthread();
7753 workq_threadreq_t kqr
= uth
->uu_kqr_bound
;
7754 kevent_ctx_t kectx
= &uth
->uu_save
.uus_kevent
;
7757 flags
= kevent_adjust_flags_for_proc(p
, flags
);
7758 flags
|= KEVENT_FLAG_DYNAMIC_KQUEUE
;
7760 if (__improbable((flags
& (KEVENT_FLAG_WORKQ
| KEVENT_FLAG_WORKLOOP
)) !=
7761 KEVENT_FLAG_WORKLOOP
)) {
7765 error
= kevent_get_data_size(flags
, uap
->data_available
, uap
->data_out
, kectx
);
7766 if (__improbable(error
)) {
7770 kectx
->kec_deadline
= 0;
7771 kectx
->kec_fp
= NULL
;
7773 /* the kec_process_* fields are filled if kqueue_scann is called only */
7776 * Get the kq we are going to be working on
7777 * As a fastpath, look at the currently bound workloop.
7779 kqu
.kqwl
= kqr
? kqr_kqworkloop(kqr
) : NULL
;
7780 if (kqu
.kqwl
&& kqu
.kqwl
->kqwl_dynamicid
== uap
->id
) {
7781 if (__improbable(flags
& KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST
)) {
7784 kqworkloop_retain(kqu
.kqwl
);
7785 } else if (__improbable(kevent_args_requesting_events(flags
, uap
->nevents
))) {
7788 error
= kqworkloop_get_or_create(p
, uap
->id
, NULL
, flags
, &kqu
.kqwl
);
7789 if (__improbable(error
)) {
7794 return kevent_modern_internal(kqu
, uap
->changelist
, uap
->nchanges
,
7795 uap
->eventlist
, uap
->nevents
, flags
, kectx
, retval
);
7799 * @function kevent_workq_internal
7802 * This function is exported for the sake of the workqueue subsystem.
7804 * It is called in two ways:
7805 * - when a thread is about to go to userspace to ask for pending event
7806 * - when a thread is returning from userspace with events back
7808 * the workqueue subsystem will only use the following flags:
7809 * - KEVENT_FLAG_STACK_DATA (always)
7810 * - KEVENT_FLAG_IMMEDIATE (always)
7811 * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
7814 * It implicitly acts on the bound kqueue, and for the case of workloops
7815 * will copyout the kqueue ID before anything else.
7818 * Pthread will have setup the various arguments to fit this stack layout:
7820 * +-------....----+--------------+-----------+--------------------+
7821 * | user stack | data avail | nevents | pthread_self() |
7822 * +-------....----+--------------+-----------+--------------------+
7824 * data_out eventlist
7826 * When a workloop is used, the workloop ID is copied out right before
7827 * the eventlist and is taken from the data buffer.
7830 * This function is carefuly tailored to not make any call except the final tail
7831 * call into kevent_modern_internal. (LTO inlines current_uthread()).
7833 * This function is performance sensitive due to the workq subsystem.
7836 kevent_workq_internal(struct proc
*p
,
7837 user_addr_t changelist
, int nchanges
,
7838 user_addr_t eventlist
, int nevents
,
7839 user_addr_t data_out
, user_size_t
*data_available
,
7840 unsigned int flags
, int32_t *retval
)
7842 uthread_t uth
= current_uthread();
7843 workq_threadreq_t kqr
= uth
->uu_kqr_bound
;
7844 kevent_ctx_t kectx
= &uth
->uu_save
.uus_kevent
;
7847 assert(flags
== (KEVENT_FLAG_STACK_DATA
| KEVENT_FLAG_IMMEDIATE
) ||
7848 flags
== (KEVENT_FLAG_STACK_DATA
| KEVENT_FLAG_IMMEDIATE
| KEVENT_FLAG_PARKING
));
7850 kectx
->kec_data_out
= data_out
;
7851 kectx
->kec_data_avail
= (uint64_t)data_available
;
7852 kectx
->kec_data_size
= *data_available
;
7853 kectx
->kec_data_resid
= *data_available
;
7854 kectx
->kec_deadline
= 0;
7855 kectx
->kec_fp
= NULL
;
7857 /* the kec_process_* fields are filled if kqueue_scann is called only */
7859 flags
= kevent_adjust_flags_for_proc(p
, flags
);
7861 if (kqr
->tr_flags
& WORKQ_TR_FLAG_WORKLOOP
) {
7862 kqu
.kqwl
= __container_of(kqr
, struct kqworkloop
, kqwl_request
);
7863 kqworkloop_retain(kqu
.kqwl
);
7865 flags
|= KEVENT_FLAG_WORKLOOP
| KEVENT_FLAG_DYNAMIC_KQUEUE
|
7868 kqu
.kqwq
= p
->p_fd
->fd_wqkqueue
;
7870 flags
|= KEVENT_FLAG_WORKQ
| KEVENT_FLAG_KERNEL
;
7873 return kevent_modern_internal(kqu
, changelist
, nchanges
,
7874 eventlist
, nevents
, flags
, kectx
, retval
);
7878 * @function kevent_qos
7881 * The kevent_qos() syscall.
7884 kevent_qos(struct proc
*p
, struct kevent_qos_args
*uap
, int32_t *retval
)
7886 uthread_t uth
= current_uthread();
7887 kevent_ctx_t kectx
= &uth
->uu_save
.uus_kevent
;
7888 int error
, flags
= uap
->flags
& KEVENT_FLAG_USER
;
7891 if (__improbable(flags
& KEVENT_ID_FLAG_USER
)) {
7895 flags
= kevent_adjust_flags_for_proc(p
, flags
);
7897 error
= kevent_get_data_size(flags
, uap
->data_available
, uap
->data_out
, kectx
);
7898 if (__improbable(error
)) {
7902 kectx
->kec_deadline
= 0;
7903 kectx
->kec_fp
= NULL
;
7904 kectx
->kec_fd
= uap
->fd
;
7905 /* the kec_process_* fields are filled if kqueue_scann is called only */
7907 /* get the kq we are going to be working on */
7908 if (__probable(flags
& KEVENT_FLAG_WORKQ
)) {
7909 error
= kevent_get_kqwq(p
, flags
, uap
->nevents
, &kq
);
7911 error
= kevent_get_kqfile(p
, uap
->fd
, flags
, &kectx
->kec_fp
, &kq
);
7913 if (__improbable(error
)) {
7917 return kevent_modern_internal(kq
, uap
->changelist
, uap
->nchanges
,
7918 uap
->eventlist
, uap
->nevents
, flags
, kectx
, retval
);
7921 #pragma mark legacy syscalls: kevent, kevent64
7924 * @function kevent_legacy_get_deadline
7927 * Compute the deadline for the legacy kevent syscalls.
7930 * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
7931 * as this takes precedence over the deadline.
7933 * This function will fail if utimeout is USER_ADDR_NULL
7934 * (the caller should check).
7937 kevent_legacy_get_deadline(int flags
, user_addr_t utimeout
, uint64_t *deadline
)
7941 if (flags
& KEVENT_FLAG_PROC64
) {
7942 struct user64_timespec ts64
;
7943 int error
= copyin(utimeout
, &ts64
, sizeof(ts64
));
7944 if (__improbable(error
)) {
7947 ts
.tv_sec
= (unsigned long)ts64
.tv_sec
;
7948 ts
.tv_nsec
= (long)ts64
.tv_nsec
;
7950 struct user32_timespec ts32
;
7951 int error
= copyin(utimeout
, &ts32
, sizeof(ts32
));
7952 if (__improbable(error
)) {
7955 ts
.tv_sec
= ts32
.tv_sec
;
7956 ts
.tv_nsec
= ts32
.tv_nsec
;
7958 if (!timespec_is_valid(&ts
)) {
7962 clock_absolutetime_interval_to_deadline(tstoabstime(&ts
), deadline
);
7967 * @function kevent_legacy_internal
7970 * The core implementation for kevent and kevent64
7974 kevent_legacy_internal(struct proc
*p
, struct kevent64_args
*uap
,
7975 int32_t *retval
, int flags
)
7977 uthread_t uth
= current_uthread();
7978 kevent_ctx_t kectx
= &uth
->uu_save
.uus_kevent
;
7982 if (__improbable(uap
->flags
& KEVENT_ID_FLAG_USER
)) {
7986 flags
= kevent_adjust_flags_for_proc(p
, flags
);
7988 kectx
->kec_data_out
= 0;
7989 kectx
->kec_data_avail
= 0;
7990 kectx
->kec_data_size
= 0;
7991 kectx
->kec_data_resid
= 0;
7992 kectx
->kec_deadline
= 0;
7993 kectx
->kec_fp
= NULL
;
7994 kectx
->kec_fd
= uap
->fd
;
7995 /* the kec_process_* fields are filled if kqueue_scann is called only */
7997 /* convert timeout to absolute - if we have one (and not immediate) */
7998 if (__improbable(uap
->timeout
&& !(flags
& KEVENT_FLAG_IMMEDIATE
))) {
7999 error
= kevent_legacy_get_deadline(flags
, uap
->timeout
,
8000 &kectx
->kec_deadline
);
8001 if (__improbable(error
)) {
8006 /* get the kq we are going to be working on */
8007 if (flags
& KEVENT_FLAG_WORKQ
) {
8008 error
= kevent_get_kqwq(p
, flags
, uap
->nevents
, &kq
);
8010 error
= kevent_get_kqfile(p
, uap
->fd
, flags
, &kectx
->kec_fp
, &kq
);
8012 if (__improbable(error
)) {
8016 return kevent_internal(kq
, uap
->changelist
, uap
->nchanges
,
8017 uap
->eventlist
, uap
->nevents
, flags
, kectx
, retval
,
8025 * The legacy kevent() syscall.
8028 kevent(struct proc
*p
, struct kevent_args
*uap
, int32_t *retval
)
8030 struct kevent64_args args
= {
8032 .changelist
= uap
->changelist
,
8033 .nchanges
= uap
->nchanges
,
8034 .eventlist
= uap
->eventlist
,
8035 .nevents
= uap
->nevents
,
8036 .timeout
= uap
->timeout
,
8039 return kevent_legacy_internal(p
, &args
, retval
, KEVENT_FLAG_LEGACY32
);
8043 * @function kevent64
8046 * The legacy kevent64() syscall.
8049 kevent64(struct proc
*p
, struct kevent64_args
*uap
, int32_t *retval
)
8051 int flags
= (uap
->flags
& KEVENT_FLAG_USER
) | KEVENT_FLAG_LEGACY64
;
8052 return kevent_legacy_internal(p
, uap
, retval
, flags
);
8055 #pragma mark - socket interface
8058 #include <sys/param.h>
8059 #include <sys/socket.h>
8060 #include <sys/protosw.h>
8061 #include <sys/domain.h>
8062 #include <sys/mbuf.h>
8063 #include <sys/kern_event.h>
8064 #include <sys/malloc.h>
8065 #include <sys/sys_domain.h>
8066 #include <sys/syslog.h>
8069 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
8073 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
8076 static LCK_GRP_DECLARE(kev_lck_grp
, "Kernel Event Protocol");
8077 static LCK_RW_DECLARE(kev_rwlock
, &kev_lck_grp
);
8079 static int kev_attach(struct socket
*so
, int proto
, struct proc
*p
);
8080 static int kev_detach(struct socket
*so
);
8081 static int kev_control(struct socket
*so
, u_long cmd
, caddr_t data
,
8082 struct ifnet
*ifp
, struct proc
*p
);
8083 static lck_mtx_t
* event_getlock(struct socket
*, int);
8084 static int event_lock(struct socket
*, int, void *);
8085 static int event_unlock(struct socket
*, int, void *);
8087 static int event_sofreelastref(struct socket
*);
8088 static void kev_delete(struct kern_event_pcb
*);
8090 static struct pr_usrreqs event_usrreqs
= {
8091 .pru_attach
= kev_attach
,
8092 .pru_control
= kev_control
,
8093 .pru_detach
= kev_detach
,
8094 .pru_soreceive
= soreceive
,
8097 static struct protosw eventsw
[] = {
8099 .pr_type
= SOCK_RAW
,
8100 .pr_protocol
= SYSPROTO_EVENT
,
8101 .pr_flags
= PR_ATOMIC
,
8102 .pr_usrreqs
= &event_usrreqs
,
8103 .pr_lock
= event_lock
,
8104 .pr_unlock
= event_unlock
,
8105 .pr_getlock
= event_getlock
,
8109 __private_extern__
int kevt_getstat SYSCTL_HANDLER_ARGS
;
8110 __private_extern__
int kevt_pcblist SYSCTL_HANDLER_ARGS
;
8112 SYSCTL_NODE(_net_systm
, OID_AUTO
, kevt
,
8113 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "Kernel event family");
8115 struct kevtstat kevtstat
;
8116 SYSCTL_PROC(_net_systm_kevt
, OID_AUTO
, stats
,
8117 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
8118 kevt_getstat
, "S,kevtstat", "");
8120 SYSCTL_PROC(_net_systm_kevt
, OID_AUTO
, pcblist
,
8121 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
8122 kevt_pcblist
, "S,xkevtpcb", "");
8125 event_getlock(struct socket
*so
, int flags
)
8127 #pragma unused(flags)
8128 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*)so
->so_pcb
;
8130 if (so
->so_pcb
!= NULL
) {
8131 if (so
->so_usecount
< 0) {
8132 panic("%s: so=%p usecount=%d lrh= %s\n", __func__
,
8133 so
, so
->so_usecount
, solockhistory_nr(so
));
8137 panic("%s: so=%p NULL NO so_pcb %s\n", __func__
,
8138 so
, solockhistory_nr(so
));
8141 return &ev_pcb
->evp_mtx
;
8145 event_lock(struct socket
*so
, int refcount
, void *lr
)
8150 lr_saved
= __builtin_return_address(0);
8155 if (so
->so_pcb
!= NULL
) {
8156 lck_mtx_lock(&((struct kern_event_pcb
*)so
->so_pcb
)->evp_mtx
);
8158 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__
,
8159 so
, lr_saved
, solockhistory_nr(so
));
8163 if (so
->so_usecount
< 0) {
8164 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__
,
8165 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
,
8166 solockhistory_nr(so
));
8174 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
8175 so
->next_lock_lr
= (so
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
8180 event_unlock(struct socket
*so
, int refcount
, void *lr
)
8183 lck_mtx_t
*mutex_held
;
8186 lr_saved
= __builtin_return_address(0);
8194 if (so
->so_usecount
< 0) {
8195 panic("%s: so=%p usecount=%d lrh= %s\n", __func__
,
8196 so
, so
->so_usecount
, solockhistory_nr(so
));
8199 if (so
->so_pcb
== NULL
) {
8200 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__
,
8201 so
, so
->so_usecount
, (void *)lr_saved
,
8202 solockhistory_nr(so
));
8205 mutex_held
= (&((struct kern_event_pcb
*)so
->so_pcb
)->evp_mtx
);
8207 LCK_MTX_ASSERT(mutex_held
, LCK_MTX_ASSERT_OWNED
);
8208 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
8209 so
->next_unlock_lr
= (so
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
8211 if (so
->so_usecount
== 0) {
8212 VERIFY(so
->so_flags
& SOF_PCBCLEARING
);
8213 event_sofreelastref(so
);
8215 lck_mtx_unlock(mutex_held
);
8222 event_sofreelastref(struct socket
*so
)
8224 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*)so
->so_pcb
;
8226 LCK_MTX_ASSERT(&(ev_pcb
->evp_mtx
), LCK_MTX_ASSERT_OWNED
);
8231 * Disable upcall in the event another thread is in kev_post_msg()
8232 * appending record to the receive socket buffer, since sbwakeup()
8233 * may release the socket lock otherwise.
8235 so
->so_rcv
.sb_flags
&= ~SB_UPCALL
;
8236 so
->so_snd
.sb_flags
&= ~SB_UPCALL
;
8237 so
->so_event
= sonullevent
;
8238 lck_mtx_unlock(&(ev_pcb
->evp_mtx
));
8240 LCK_MTX_ASSERT(&(ev_pcb
->evp_mtx
), LCK_MTX_ASSERT_NOTOWNED
);
8241 lck_rw_lock_exclusive(&kev_rwlock
);
8242 LIST_REMOVE(ev_pcb
, evp_link
);
8243 kevtstat
.kes_pcbcount
--;
8244 kevtstat
.kes_gencnt
++;
8245 lck_rw_done(&kev_rwlock
);
8248 sofreelastref(so
, 1);
8252 static int event_proto_count
= (sizeof(eventsw
) / sizeof(struct protosw
));
8255 struct kern_event_head kern_event_head
;
8257 static u_int32_t static_event_id
= 0;
8259 static ZONE_DECLARE(ev_pcb_zone
, "kerneventpcb",
8260 sizeof(struct kern_event_pcb
), ZC_ZFREE_CLEARMEM
);
8263 * Install the protosw's for the NKE manager. Invoked at extension load time
8266 kern_event_init(struct domain
*dp
)
8271 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
8272 VERIFY(dp
== systemdomain
);
8274 for (i
= 0, pr
= &eventsw
[0]; i
< event_proto_count
; i
++, pr
++) {
8275 net_add_proto(pr
, dp
, 1);
8280 kev_attach(struct socket
*so
, __unused
int proto
, __unused
struct proc
*p
)
8283 struct kern_event_pcb
*ev_pcb
;
8285 error
= soreserve(so
, KEV_SNDSPACE
, KEV_RECVSPACE
);
8290 ev_pcb
= zalloc_flags(ev_pcb_zone
, Z_WAITOK
| Z_ZERO
);
8291 lck_mtx_init(&ev_pcb
->evp_mtx
, &kev_lck_grp
, LCK_ATTR_NULL
);
8293 ev_pcb
->evp_socket
= so
;
8294 ev_pcb
->evp_vendor_code_filter
= 0xffffffff;
8296 so
->so_pcb
= (caddr_t
) ev_pcb
;
8297 lck_rw_lock_exclusive(&kev_rwlock
);
8298 LIST_INSERT_HEAD(&kern_event_head
, ev_pcb
, evp_link
);
8299 kevtstat
.kes_pcbcount
++;
8300 kevtstat
.kes_gencnt
++;
8301 lck_rw_done(&kev_rwlock
);
8307 kev_delete(struct kern_event_pcb
*ev_pcb
)
8309 VERIFY(ev_pcb
!= NULL
);
8310 lck_mtx_destroy(&ev_pcb
->evp_mtx
, &kev_lck_grp
);
8311 zfree(ev_pcb_zone
, ev_pcb
);
8315 kev_detach(struct socket
*so
)
8317 struct kern_event_pcb
*ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
8319 if (ev_pcb
!= NULL
) {
8320 soisdisconnected(so
);
8321 so
->so_flags
|= SOF_PCBCLEARING
;
8328 * For now, kev_vendor_code and mbuf_tags use the same
8332 kev_vendor_code_find(
8334 u_int32_t
*out_vendor_code
)
8336 if (strlen(string
) >= KEV_VENDOR_CODE_MAX_STR_LEN
) {
8339 return net_str_id_find_internal(string
, out_vendor_code
,
8340 NSI_VENDOR_CODE
, 1);
8344 kev_msg_post(struct kev_msg
*event_msg
)
8346 mbuf_tag_id_t min_vendor
, max_vendor
;
8348 net_str_id_first_last(&min_vendor
, &max_vendor
, NSI_VENDOR_CODE
);
8350 if (event_msg
== NULL
) {
8355 * Limit third parties to posting events for registered vendor codes
8358 if (event_msg
->vendor_code
< min_vendor
||
8359 event_msg
->vendor_code
> max_vendor
) {
8360 os_atomic_inc(&kevtstat
.kes_badvendor
, relaxed
);
8363 return kev_post_msg(event_msg
);
8367 kev_post_msg(struct kev_msg
*event_msg
)
8369 struct mbuf
*m
, *m2
;
8370 struct kern_event_pcb
*ev_pcb
;
8371 struct kern_event_msg
*ev
;
8373 u_int32_t total_size
;
8376 /* Verify the message is small enough to fit in one mbuf w/o cluster */
8377 total_size
= KEV_MSG_HEADER_SIZE
;
8379 for (i
= 0; i
< 5; i
++) {
8380 if (event_msg
->dv
[i
].data_length
== 0) {
8383 total_size
+= event_msg
->dv
[i
].data_length
;
8386 if (total_size
> MLEN
) {
8387 os_atomic_inc(&kevtstat
.kes_toobig
, relaxed
);
8391 m
= m_get(M_WAIT
, MT_DATA
);
8393 os_atomic_inc(&kevtstat
.kes_nomem
, relaxed
);
8396 ev
= mtod(m
, struct kern_event_msg
*);
8397 total_size
= KEV_MSG_HEADER_SIZE
;
8399 tmp
= (char *) &ev
->event_data
[0];
8400 for (i
= 0; i
< 5; i
++) {
8401 if (event_msg
->dv
[i
].data_length
== 0) {
8405 total_size
+= event_msg
->dv
[i
].data_length
;
8406 bcopy(event_msg
->dv
[i
].data_ptr
, tmp
,
8407 event_msg
->dv
[i
].data_length
);
8408 tmp
+= event_msg
->dv
[i
].data_length
;
8411 ev
->id
= ++static_event_id
;
8412 ev
->total_size
= total_size
;
8413 ev
->vendor_code
= event_msg
->vendor_code
;
8414 ev
->kev_class
= event_msg
->kev_class
;
8415 ev
->kev_subclass
= event_msg
->kev_subclass
;
8416 ev
->event_code
= event_msg
->event_code
;
8418 m
->m_len
= total_size
;
8419 lck_rw_lock_shared(&kev_rwlock
);
8420 for (ev_pcb
= LIST_FIRST(&kern_event_head
);
8422 ev_pcb
= LIST_NEXT(ev_pcb
, evp_link
)) {
8423 lck_mtx_lock(&ev_pcb
->evp_mtx
);
8424 if (ev_pcb
->evp_socket
->so_pcb
== NULL
) {
8425 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8428 if (ev_pcb
->evp_vendor_code_filter
!= KEV_ANY_VENDOR
) {
8429 if (ev_pcb
->evp_vendor_code_filter
!= ev
->vendor_code
) {
8430 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8434 if (ev_pcb
->evp_class_filter
!= KEV_ANY_CLASS
) {
8435 if (ev_pcb
->evp_class_filter
!= ev
->kev_class
) {
8436 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8440 if ((ev_pcb
->evp_subclass_filter
!=
8441 KEV_ANY_SUBCLASS
) &&
8442 (ev_pcb
->evp_subclass_filter
!=
8443 ev
->kev_subclass
)) {
8444 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8450 m2
= m_copym(m
, 0, m
->m_len
, M_WAIT
);
8452 os_atomic_inc(&kevtstat
.kes_nomem
, relaxed
);
8454 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8455 lck_rw_done(&kev_rwlock
);
8458 if (sbappendrecord(&ev_pcb
->evp_socket
->so_rcv
, m2
)) {
8460 * We use "m" for the socket stats as it would be
8461 * unsafe to use "m2"
8463 so_inc_recv_data_stat(ev_pcb
->evp_socket
,
8464 1, m
->m_len
, MBUF_TC_BE
);
8466 sorwakeup(ev_pcb
->evp_socket
);
8467 os_atomic_inc(&kevtstat
.kes_posted
, relaxed
);
8469 os_atomic_inc(&kevtstat
.kes_fullsock
, relaxed
);
8471 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8474 lck_rw_done(&kev_rwlock
);
8480 kev_control(struct socket
*so
,
8483 __unused
struct ifnet
*ifp
,
8484 __unused
struct proc
*p
)
8486 struct kev_request
*kev_req
= (struct kev_request
*) data
;
8487 struct kern_event_pcb
*ev_pcb
;
8488 struct kev_vendor_code
*kev_vendor
;
8489 u_int32_t
*id_value
= (u_int32_t
*) data
;
8493 *id_value
= static_event_id
;
8496 ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
8497 ev_pcb
->evp_vendor_code_filter
= kev_req
->vendor_code
;
8498 ev_pcb
->evp_class_filter
= kev_req
->kev_class
;
8499 ev_pcb
->evp_subclass_filter
= kev_req
->kev_subclass
;
8502 ev_pcb
= (struct kern_event_pcb
*) so
->so_pcb
;
8503 kev_req
->vendor_code
= ev_pcb
->evp_vendor_code_filter
;
8504 kev_req
->kev_class
= ev_pcb
->evp_class_filter
;
8505 kev_req
->kev_subclass
= ev_pcb
->evp_subclass_filter
;
8507 case SIOCGKEVVENDOR
:
8508 kev_vendor
= (struct kev_vendor_code
*)data
;
8509 /* Make sure string is NULL terminated */
8510 kev_vendor
->vendor_string
[KEV_VENDOR_CODE_MAX_STR_LEN
- 1] = 0;
8511 return net_str_id_find_internal(kev_vendor
->vendor_string
,
8512 &kev_vendor
->vendor_code
, NSI_VENDOR_CODE
, 0);
8521 kevt_getstat SYSCTL_HANDLER_ARGS
8523 #pragma unused(oidp, arg1, arg2)
8526 lck_rw_lock_shared(&kev_rwlock
);
8528 if (req
->newptr
!= USER_ADDR_NULL
) {
8532 if (req
->oldptr
== USER_ADDR_NULL
) {
8533 req
->oldidx
= sizeof(struct kevtstat
);
8537 error
= SYSCTL_OUT(req
, &kevtstat
,
8538 MIN(sizeof(struct kevtstat
), req
->oldlen
));
8540 lck_rw_done(&kev_rwlock
);
8545 __private_extern__
int
8546 kevt_pcblist SYSCTL_HANDLER_ARGS
8548 #pragma unused(oidp, arg1, arg2)
8551 struct xsystmgen xsg
;
8553 size_t item_size
= ROUNDUP64(sizeof(struct xkevtpcb
)) +
8554 ROUNDUP64(sizeof(struct xsocket_n
)) +
8555 2 * ROUNDUP64(sizeof(struct xsockbuf_n
)) +
8556 ROUNDUP64(sizeof(struct xsockstat_n
));
8557 struct kern_event_pcb
*ev_pcb
;
8559 buf
= kheap_alloc(KHEAP_TEMP
, item_size
, Z_WAITOK
| Z_ZERO
);
8564 lck_rw_lock_shared(&kev_rwlock
);
8566 n
= kevtstat
.kes_pcbcount
;
8568 if (req
->oldptr
== USER_ADDR_NULL
) {
8569 req
->oldidx
= (size_t) ((n
+ n
/ 8) * item_size
);
8572 if (req
->newptr
!= USER_ADDR_NULL
) {
8576 bzero(&xsg
, sizeof(xsg
));
8577 xsg
.xg_len
= sizeof(xsg
);
8579 xsg
.xg_gen
= kevtstat
.kes_gencnt
;
8580 xsg
.xg_sogen
= so_gencnt
;
8581 error
= SYSCTL_OUT(req
, &xsg
, sizeof(xsg
));
8586 * We are done if there is no pcb
8593 for (i
= 0, ev_pcb
= LIST_FIRST(&kern_event_head
);
8594 i
< n
&& ev_pcb
!= NULL
;
8595 i
++, ev_pcb
= LIST_NEXT(ev_pcb
, evp_link
)) {
8596 struct xkevtpcb
*xk
= (struct xkevtpcb
*)buf
;
8597 struct xsocket_n
*xso
= (struct xsocket_n
*)
8598 ADVANCE64(xk
, sizeof(*xk
));
8599 struct xsockbuf_n
*xsbrcv
= (struct xsockbuf_n
*)
8600 ADVANCE64(xso
, sizeof(*xso
));
8601 struct xsockbuf_n
*xsbsnd
= (struct xsockbuf_n
*)
8602 ADVANCE64(xsbrcv
, sizeof(*xsbrcv
));
8603 struct xsockstat_n
*xsostats
= (struct xsockstat_n
*)
8604 ADVANCE64(xsbsnd
, sizeof(*xsbsnd
));
8606 bzero(buf
, item_size
);
8608 lck_mtx_lock(&ev_pcb
->evp_mtx
);
8610 xk
->kep_len
= sizeof(struct xkevtpcb
);
8611 xk
->kep_kind
= XSO_EVT
;
8612 xk
->kep_evtpcb
= (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb
);
8613 xk
->kep_vendor_code_filter
= ev_pcb
->evp_vendor_code_filter
;
8614 xk
->kep_class_filter
= ev_pcb
->evp_class_filter
;
8615 xk
->kep_subclass_filter
= ev_pcb
->evp_subclass_filter
;
8617 sotoxsocket_n(ev_pcb
->evp_socket
, xso
);
8618 sbtoxsockbuf_n(ev_pcb
->evp_socket
?
8619 &ev_pcb
->evp_socket
->so_rcv
: NULL
, xsbrcv
);
8620 sbtoxsockbuf_n(ev_pcb
->evp_socket
?
8621 &ev_pcb
->evp_socket
->so_snd
: NULL
, xsbsnd
);
8622 sbtoxsockstat_n(ev_pcb
->evp_socket
, xsostats
);
8624 lck_mtx_unlock(&ev_pcb
->evp_mtx
);
8626 error
= SYSCTL_OUT(req
, buf
, item_size
);
8631 * Give the user an updated idea of our state.
8632 * If the generation differs from what we told
8633 * her before, she knows that something happened
8634 * while we were processing this request, and it
8635 * might be necessary to retry.
8637 bzero(&xsg
, sizeof(xsg
));
8638 xsg
.xg_len
= sizeof(xsg
);
8640 xsg
.xg_gen
= kevtstat
.kes_gencnt
;
8641 xsg
.xg_sogen
= so_gencnt
;
8642 error
= SYSCTL_OUT(req
, &xsg
, sizeof(xsg
));
8649 lck_rw_done(&kev_rwlock
);
8651 kheap_free(KHEAP_TEMP
, buf
, item_size
);
8655 #endif /* SOCKETS */
8659 fill_kqueueinfo(struct kqueue
*kq
, struct kqueue_info
* kinfo
)
8661 struct vinfo_stat
* st
;
8663 st
= &kinfo
->kq_stat
;
8665 st
->vst_size
= kq
->kq_count
;
8666 if (kq
->kq_state
& KQ_KEV_QOS
) {
8667 st
->vst_blksize
= sizeof(struct kevent_qos_s
);
8668 } else if (kq
->kq_state
& KQ_KEV64
) {
8669 st
->vst_blksize
= sizeof(struct kevent64_s
);
8671 st
->vst_blksize
= sizeof(struct kevent
);
8673 st
->vst_mode
= S_IFIFO
;
8674 st
->vst_ino
= (kq
->kq_state
& KQ_DYNAMIC
) ?
8675 ((struct kqworkloop
*)kq
)->kqwl_dynamicid
: 0;
8677 /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
8678 #define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
8679 kinfo
->kq_state
= kq
->kq_state
& PROC_KQUEUE_MASK
;
8685 fill_kqueue_dyninfo(struct kqworkloop
*kqwl
, struct kqueue_dyninfo
*kqdi
)
8687 workq_threadreq_t kqr
= &kqwl
->kqwl_request
;
8688 workq_threadreq_param_t trp
= {};
8691 if ((kqwl
->kqwl_state
& KQ_WORKLOOP
) == 0) {
8695 if ((err
= fill_kqueueinfo(&kqwl
->kqwl_kqueue
, &kqdi
->kqdi_info
))) {
8701 kqdi
->kqdi_servicer
= thread_tid(kqr_thread(kqr
));
8702 kqdi
->kqdi_owner
= thread_tid(kqwl
->kqwl_owner
);
8703 kqdi
->kqdi_request_state
= kqr
->tr_state
;
8704 kqdi
->kqdi_async_qos
= kqr
->tr_kq_qos_index
;
8705 kqdi
->kqdi_events_qos
= kqr
->tr_kq_override_index
;
8706 kqdi
->kqdi_sync_waiters
= 0;
8707 kqdi
->kqdi_sync_waiter_qos
= 0;
8709 trp
.trp_value
= kqwl
->kqwl_params
;
8710 if (trp
.trp_flags
& TRP_PRIORITY
) {
8711 kqdi
->kqdi_pri
= trp
.trp_pri
;
8716 if (trp
.trp_flags
& TRP_POLICY
) {
8717 kqdi
->kqdi_pol
= trp
.trp_pol
;
8722 if (trp
.trp_flags
& TRP_CPUPERCENT
) {
8723 kqdi
->kqdi_cpupercent
= trp
.trp_cpupercent
;
8725 kqdi
->kqdi_cpupercent
= 0;
8735 knote_markstayactive(struct knote
*kn
)
8737 struct kqueue
*kq
= knote_get_kq(kn
);
8741 kn
->kn_status
|= KN_STAYACTIVE
;
8744 * Making a knote stay active is a property of the knote that must be
8745 * established before it is fully attached.
8747 assert((kn
->kn_status
& (KN_QUEUED
| KN_SUPPRESSED
)) == 0);
8749 /* handle all stayactive knotes on the (appropriate) manager */
8750 if (kq
->kq_state
& KQ_WORKLOOP
) {
8751 struct kqworkloop
*kqwl
= (struct kqworkloop
*)kq
;
8753 qos
= _pthread_priority_thread_qos(kn
->kn_qos
);
8754 assert(qos
&& qos
< THREAD_QOS_LAST
);
8755 kqworkloop_update_threads_qos(kqwl
, KQWL_UTQ_UPDATE_STAYACTIVE_QOS
, qos
);
8756 qos
= KQWL_BUCKET_STAYACTIVE
;
8757 } else if (kq
->kq_state
& KQ_WORKQ
) {
8758 qos
= KQWQ_QOS_MANAGER
;
8760 qos
= THREAD_QOS_UNSPECIFIED
;
8763 kn
->kn_qos_override
= qos
;
8764 kn
->kn_qos_index
= qos
;
8766 knote_activate(kq
, kn
, FILTER_ACTIVE
);
8771 knote_clearstayactive(struct knote
*kn
)
8773 struct kqueue
*kq
= knote_get_kq(kn
);
8775 kn
->kn_status
&= ~(KN_STAYACTIVE
| KN_ACTIVE
);
8776 knote_dequeue(kq
, kn
);
8780 static unsigned long
8781 kevent_extinfo_emit(struct kqueue
*kq
, struct knote
*kn
, struct kevent_extinfo
*buf
,
8782 unsigned long buflen
, unsigned long nknotes
)
8784 for (; kn
; kn
= SLIST_NEXT(kn
, kn_link
)) {
8785 if (kq
== knote_get_kq(kn
)) {
8786 if (nknotes
< buflen
) {
8787 struct kevent_extinfo
*info
= &buf
[nknotes
];
8791 info
->kqext_kev
= *(struct kevent_qos_s
*)&kn
->kn_kevent
;
8792 if (knote_has_qos(kn
)) {
8793 info
->kqext_kev
.qos
=
8794 _pthread_priority_thread_qos_fast(kn
->kn_qos
);
8796 info
->kqext_kev
.qos
= kn
->kn_qos_override
;
8798 info
->kqext_kev
.filter
|= 0xff00; /* sign extend filter */
8799 info
->kqext_kev
.xflags
= 0; /* this is where sfflags lives */
8800 info
->kqext_kev
.data
= 0; /* this is where sdata lives */
8801 info
->kqext_sdata
= kn
->kn_sdata
;
8802 info
->kqext_status
= kn
->kn_status
;
8803 info
->kqext_sfflags
= kn
->kn_sfflags
;
8808 /* we return total number of knotes, which may be more than requested */
8817 kevent_copyout_proc_dynkqids(void *proc
, user_addr_t ubuf
, uint32_t ubufsize
,
8818 int32_t *nkqueues_out
)
8820 proc_t p
= (proc_t
)proc
;
8821 struct filedesc
*fdp
= p
->p_fd
;
8822 unsigned int nkqueues
= 0;
8823 unsigned long ubuflen
= ubufsize
/ sizeof(kqueue_id_t
);
8824 size_t buflen
, bufsize
;
8825 kqueue_id_t
*kq_ids
= NULL
;
8830 if (ubuf
== USER_ADDR_NULL
&& ubufsize
!= 0) {
8835 buflen
= MIN(ubuflen
, PROC_PIDDYNKQUEUES_MAX
);
8838 if (os_mul_overflow(sizeof(kqueue_id_t
), buflen
, &bufsize
)) {
8842 kq_ids
= kheap_alloc(KHEAP_TEMP
, bufsize
, Z_WAITOK
| Z_ZERO
);
8851 if (fdp
->fd_kqhashmask
> 0) {
8852 for (uint32_t i
= 0; i
< fdp
->fd_kqhashmask
+ 1; i
++) {
8853 struct kqworkloop
*kqwl
;
8855 LIST_FOREACH(kqwl
, &fdp
->fd_kqhash
[i
], kqwl_hashlink
) {
8856 /* report the number of kqueues, even if they don't all fit */
8857 if (nkqueues
< buflen
) {
8858 kq_ids
[nkqueues
] = kqwl
->kqwl_dynamicid
;
8869 if (os_mul_overflow(sizeof(kqueue_id_t
), MIN(buflen
, nkqueues
), ©size
)) {
8874 assert(ubufsize
>= copysize
);
8875 err
= copyout(kq_ids
, ubuf
, copysize
);
8880 kheap_free(KHEAP_TEMP
, kq_ids
, bufsize
);
8884 *nkqueues_out
= (int)min(nkqueues
, PROC_PIDDYNKQUEUES_MAX
);
8890 kevent_copyout_dynkqinfo(void *proc
, kqueue_id_t kq_id
, user_addr_t ubuf
,
8891 uint32_t ubufsize
, int32_t *size_out
)
8893 proc_t p
= (proc_t
)proc
;
8894 struct kqworkloop
*kqwl
;
8896 struct kqueue_dyninfo kqdi
= { };
8900 if (ubufsize
< sizeof(struct kqueue_info
)) {
8904 kqwl
= kqworkloop_hash_lookup_and_retain(p
->p_fd
, kq_id
);
8910 * backward compatibility: allow the argument to this call to only be
8911 * a struct kqueue_info
8913 if (ubufsize
>= sizeof(struct kqueue_dyninfo
)) {
8914 ubufsize
= sizeof(struct kqueue_dyninfo
);
8915 err
= fill_kqueue_dyninfo(kqwl
, &kqdi
);
8917 ubufsize
= sizeof(struct kqueue_info
);
8918 err
= fill_kqueueinfo(&kqwl
->kqwl_kqueue
, &kqdi
.kqdi_info
);
8920 if (err
== 0 && (err
= copyout(&kqdi
, ubuf
, ubufsize
)) == 0) {
8921 *size_out
= ubufsize
;
8923 kqworkloop_release(kqwl
);
8928 kevent_copyout_dynkqextinfo(void *proc
, kqueue_id_t kq_id
, user_addr_t ubuf
,
8929 uint32_t ubufsize
, int32_t *nknotes_out
)
8931 proc_t p
= (proc_t
)proc
;
8932 struct kqworkloop
*kqwl
;
8935 kqwl
= kqworkloop_hash_lookup_and_retain(p
->p_fd
, kq_id
);
8940 err
= pid_kqueue_extinfo(p
, &kqwl
->kqwl_kqueue
, ubuf
, ubufsize
, nknotes_out
);
8941 kqworkloop_release(kqwl
);
8946 pid_kqueue_extinfo(proc_t p
, struct kqueue
*kq
, user_addr_t ubuf
,
8947 uint32_t bufsize
, int32_t *retval
)
8952 struct filedesc
*fdp
= p
->p_fd
;
8953 unsigned long nknotes
= 0;
8954 unsigned long buflen
= bufsize
/ sizeof(struct kevent_extinfo
);
8955 struct kevent_extinfo
*kqext
= NULL
;
8957 /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
8958 buflen
= MIN(buflen
, PROC_PIDFDKQUEUE_KNOTES_MAX
);
8960 kqext
= kheap_alloc(KHEAP_TEMP
,
8961 buflen
* sizeof(struct kevent_extinfo
), Z_WAITOK
| Z_ZERO
);
8962 if (kqext
== NULL
) {
8968 for (i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
8969 kn
= SLIST_FIRST(&fdp
->fd_knlist
[i
]);
8970 nknotes
= kevent_extinfo_emit(kq
, kn
, kqext
, buflen
, nknotes
);
8974 if (fdp
->fd_knhashmask
!= 0) {
8975 for (i
= 0; i
< (int)fdp
->fd_knhashmask
+ 1; i
++) {
8977 kn
= SLIST_FIRST(&fdp
->fd_knhash
[i
]);
8978 nknotes
= kevent_extinfo_emit(kq
, kn
, kqext
, buflen
, nknotes
);
8983 assert(bufsize
>= sizeof(struct kevent_extinfo
) * MIN(buflen
, nknotes
));
8984 err
= copyout(kqext
, ubuf
, sizeof(struct kevent_extinfo
) * MIN(buflen
, nknotes
));
8987 kheap_free(KHEAP_TEMP
, kqext
, buflen
* sizeof(struct kevent_extinfo
));
8990 *retval
= (int32_t)MIN(nknotes
, PROC_PIDFDKQUEUE_KNOTES_MAX
);
8996 klist_copy_udata(struct klist
*list
, uint64_t *buf
,
8997 unsigned int buflen
, unsigned int nknotes
)
9000 SLIST_FOREACH(kn
, list
, kn_link
) {
9001 if (nknotes
< buflen
) {
9003 * kevent_register will always set kn_udata atomically
9004 * so that we don't have to take any kqlock here.
9006 buf
[nknotes
] = os_atomic_load_wide(&kn
->kn_udata
, relaxed
);
9008 /* we return total number of knotes, which may be more than requested */
9016 kevent_proc_copy_uptrs(void *proc
, uint64_t *buf
, uint32_t bufsize
)
9018 proc_t p
= (proc_t
)proc
;
9019 struct filedesc
*fdp
= p
->p_fd
;
9020 unsigned int nuptrs
= 0;
9021 unsigned int buflen
= bufsize
/ sizeof(uint64_t);
9022 struct kqworkloop
*kqwl
;
9025 assert(buf
!= NULL
);
9029 for (int i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
9030 nuptrs
= klist_copy_udata(&fdp
->fd_knlist
[i
], buf
, buflen
, nuptrs
);
9035 if (fdp
->fd_knhashmask
!= 0) {
9036 for (size_t i
= 0; i
< fdp
->fd_knhashmask
+ 1; i
++) {
9037 nuptrs
= klist_copy_udata(&fdp
->fd_knhash
[i
], buf
, buflen
, nuptrs
);
9043 if (fdp
->fd_kqhashmask
!= 0) {
9044 for (size_t i
= 0; i
< fdp
->fd_kqhashmask
+ 1; i
++) {
9045 LIST_FOREACH(kqwl
, &fdp
->fd_kqhash
[i
], kqwl_hashlink
) {
9046 if (nuptrs
< buflen
) {
9047 buf
[nuptrs
] = kqwl
->kqwl_dynamicid
;
9059 kevent_set_return_to_kernel_user_tsd(proc_t p
, thread_t thread
)
9062 bool proc_is_64bit
= !!(p
->p_flag
& P_LP64
);
9063 size_t user_addr_size
= proc_is_64bit
? 8 : 4;
9064 uint32_t ast_flags32
= 0;
9065 uint64_t ast_flags64
= 0;
9066 struct uthread
*ut
= get_bsdthread_info(thread
);
9068 if (ut
->uu_kqr_bound
!= NULL
) {
9069 ast_flags64
|= R2K_WORKLOOP_PENDING_EVENTS
;
9072 if (ast_flags64
== 0) {
9076 if (!(p
->p_flag
& P_LP64
)) {
9077 ast_flags32
= (uint32_t)ast_flags64
;
9078 assert(ast_flags64
< 0x100000000ull
);
9081 ast_addr
= thread_rettokern_addr(thread
);
9082 if (ast_addr
== 0) {
9086 if (copyout((proc_is_64bit
? (void *)&ast_flags64
: (void *)&ast_flags32
),
9087 (user_addr_t
)ast_addr
,
9088 user_addr_size
) != 0) {
9089 printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
9090 "ast_addr = %llu\n", p
->p_pid
, thread_tid(current_thread()), ast_addr
);
9095 kevent_ast(thread_t thread
, uint16_t bits
)
9097 proc_t p
= current_proc();
9099 if (bits
& AST_KEVENT_REDRIVE_THREADREQ
) {
9100 workq_kern_threadreq_redrive(p
, WORKQ_THREADREQ_CAN_CREATE_THREADS
);
9102 if (bits
& AST_KEVENT_RETURN_TO_KERNEL
) {
9103 kevent_set_return_to_kernel_user_tsd(p
, thread
);
9107 #if DEVELOPMENT || DEBUG
9109 #define KEVENT_SYSCTL_BOUND_ID 1
9112 kevent_sysctl SYSCTL_HANDLER_ARGS
9114 #pragma unused(oidp, arg2)
9115 uintptr_t type
= (uintptr_t)arg1
;
9116 uint64_t bound_id
= 0;
9118 if (type
!= KEVENT_SYSCTL_BOUND_ID
) {
9126 struct uthread
*ut
= get_bsdthread_info(current_thread());
9131 workq_threadreq_t kqr
= ut
->uu_kqr_bound
;
9133 if (kqr
->tr_flags
& WORKQ_TR_FLAG_WORKLOOP
) {
9134 bound_id
= kqr_kqworkloop(kqr
)->kqwl_dynamicid
;
9140 return sysctl_io_number(req
, bound_id
, sizeof(bound_id
), NULL
, NULL
);
9143 SYSCTL_NODE(_kern
, OID_AUTO
, kevent
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
9144 "kevent information");
9146 SYSCTL_PROC(_kern_kevent
, OID_AUTO
, bound_id
,
9147 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
9148 (void *)KEVENT_SYSCTL_BOUND_ID
,
9149 sizeof(kqueue_id_t
), kevent_sysctl
, "Q",
9150 "get the ID of the bound kqueue");
9152 #endif /* DEVELOPMENT || DEBUG */