/*
- * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* @(#)kern_event.c 1.0 (3/31/2000)
*/
#include <stdint.h>
-#include <stdatomic.h>
+#include <machine/atomic.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/stat.h>
+#include <sys/syscall.h> // SYS_* constants
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <sys/sysproto.h>
#include <sys/codesign.h>
#include <sys/pthread_shims.h>
#include <sys/kdebug.h>
-#include <sys/reason.h>
-#include <os/reason_private.h>
+#include <os/base.h>
+#include <pexpert/pexpert.h>
#include <kern/locks.h>
#include <kern/clock.h>
#include <kern/thread.h>
#include <kern/kcdata.h>
+#include <pthread/priority_private.h>
+#include <pthread/workqueue_syscalls.h>
+#include <pthread/workqueue_internal.h>
#include <libkern/libkern.h>
-#include <libkern/OSAtomic.h>
#include "net/net_str_id.h"
#include <sys/kern_memorystatus.h>
#endif
-extern thread_t port_name_to_thread(mach_port_name_t port_name); /* osfmk/kern/ipc_tt.h */
extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */
#define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
-/*
- * JMM - this typedef needs to be unified with pthread_priority_t
- * and mach_msg_priority_t. It also needs to be the same type
- * everywhere.
- */
-typedef int32_t qos_t;
-
MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
-#define KQ_EVENT NO_EVENT64
-
-#define KNUSE_NONE 0x0
-#define KNUSE_STEAL_DROP 0x1
-#define KNUSE_BOOST 0x2
-static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn, int flags);
-static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
-static int kqlock2knotedetach(struct kqueue *kq, struct knote *kn, int flags);
-static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int flags);
-
-static int kqueue_read(struct fileproc *fp, struct uio *uio,
- int flags, vfs_context_t ctx);
-static int kqueue_write(struct fileproc *fp, struct uio *uio,
- int flags, vfs_context_t ctx);
-static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data,
- vfs_context_t ctx);
+#define KQ_EVENT NO_EVENT64
+
static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
- vfs_context_t ctx);
+ vfs_context_t ctx);
static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
- struct kevent_internal_s *kev, vfs_context_t ctx);
+ struct kevent_qos_s *kev);
static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
static const struct fileops kqueueops = {
- .fo_type = DTYPE_KQUEUE,
- .fo_read = kqueue_read,
- .fo_write = kqueue_write,
- .fo_ioctl = kqueue_ioctl,
- .fo_select = kqueue_select,
- .fo_close = kqueue_close,
+ .fo_type = DTYPE_KQUEUE,
+ .fo_read = fo_no_read,
+ .fo_write = fo_no_write,
+ .fo_ioctl = fo_no_ioctl,
+ .fo_select = kqueue_select,
+ .fo_close = kqueue_close,
+ .fo_drain = kqueue_drain,
.fo_kqfilter = kqueue_kqfilter,
- .fo_drain = kqueue_drain,
};
-static void kevent_put_kq(struct proc *p, kqueue_id_t id, struct fileproc *fp, struct kqueue *kq);
-static int kevent_internal(struct proc *p,
- kqueue_id_t id, kqueue_id_t *id_out,
- user_addr_t changelist, int nchanges,
- user_addr_t eventlist, int nevents,
- user_addr_t data_out, uint64_t data_available,
- unsigned int flags, user_addr_t utimeout,
- kqueue_continue_t continuation,
- int32_t *retval);
-static int kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp,
- struct proc *p, unsigned int flags);
-static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp,
- struct proc *p, unsigned int flags);
-char * kevent_description(struct kevent_internal_s *kevp, char *s, size_t n);
-
-static void kqueue_interrupt(struct kqueue *kq);
-static int kevent_callback(struct kqueue *kq, struct kevent_internal_s *kevp,
- void *data);
-static void kevent_continue(struct kqueue *kq, void *data, int error);
-static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
-static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, void *callback_data,
- struct filt_process_s *process_data, int *countp, struct proc *p);
-static struct kqtailq *kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index);
-static struct kqtailq *kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index);
-static int kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index);
-
-static struct kqtailq *kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index);
-
-static void kqworkq_request_thread(struct kqworkq *kqwq, kq_index_t qos_index);
-static void kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index);
-static void kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index);
-static void kqworkq_bind_thread_impl(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
-static void kqworkq_unbind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
-static struct kqrequest *kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
+static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *);
+static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result);
+static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
+ thread_continue_t cont, struct _kevent_register *cont_args) __dead2;
+static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
+static void kevent_register_wait_cleanup(struct knote *kn);
-enum {
- KQWL_UO_NONE = 0,
- KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI = 0x1,
- KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI = 0x2,
- KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS = 0x4,
- KQWL_UO_UPDATE_OVERRIDE_LAZY = 0x8
-};
+static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
+static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags);
+
+static void kqworkq_unbind(proc_t p, workq_threadreq_t);
+static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread);
+static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
+
+static void kqworkloop_unbind(struct kqworkloop *kwql);
-static void kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t qos_index, kq_index_t override_index, uint32_t flags);
-static void kqworkloop_bind_thread_impl(struct kqworkloop *kqwl, thread_t thread, unsigned int flags);
-static void kqworkloop_unbind_thread(struct kqworkloop *kqwl, thread_t thread, unsigned int flags);
-static inline kq_index_t kqworkloop_combined_qos(struct kqworkloop *kqwl, boolean_t *);
-static void kqworkloop_update_suppress_sync_count(struct kqrequest *kqr, uint32_t flags);
+enum kqwl_unbind_locked_mode {
+ KQWL_OVERRIDE_DROP_IMMEDIATELY,
+ KQWL_OVERRIDE_DROP_DELAYED,
+};
+static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread,
+ enum kqwl_unbind_locked_mode how);
+static void kqworkloop_unbind_delayed_override_drop(thread_t thread);
+static kq_index_t kqworkloop_override(struct kqworkloop *kqwl);
+static void kqworkloop_set_overcommit(struct kqworkloop *kqwl);
enum {
KQWL_UTQ_NONE,
/*
*
* This QoS is accounted for with the events override in the
* kqr_override_index field. It is raised each time a new knote is queued at
- * a given QoS. The kqr_wakeup_indexes field is a superset of the non empty
+ * a given QoS. The kqwl_wakeup_indexes field is a superset of the non empty
* knote buckets and is recomputed after each event delivery.
*/
KQWL_UTQ_UPDATE_WAKEUP_QOS,
KQWL_UTQ_UPDATE_STAYACTIVE_QOS,
KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
+ KQWL_UTQ_UNBINDING, /* attempt to rebind */
+ KQWL_UTQ_PARKING,
/*
* The wakeup override is for suppressed knotes that have fired again at
* a higher QoS than the one for which they are suppressed already.
KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
/*
- * The async QoS is the maximum QoS of an event enqueued on this workloop in
+ * The QoS is the maximum QoS of an event enqueued on this workloop in
* userland. It is copied from the only EVFILT_WORKLOOP knote with
* a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
* such knote, this QoS is 0.
*/
- KQWL_UTQ_SET_ASYNC_QOS,
- /*
- * The sync waiters QoS is the maximum QoS of any thread blocked on an
- * EVFILT_WORKLOOP knote marked with the NOTE_WL_SYNC_WAIT bit.
- * If there is no such knote, this QoS is 0.
- */
- KQWL_UTQ_SET_SYNC_WAITERS_QOS,
+ KQWL_UTQ_SET_QOS_INDEX,
KQWL_UTQ_REDRIVE_EVENTS,
};
static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
-static void kqworkloop_request_help(struct kqworkloop *kqwl, kq_index_t qos_index);
-
-static int knote_process(struct knote *kn, kevent_callback_t callback, void *callback_data,
- struct filt_process_s *process_data, struct proc *p);
-#if 0
-static void knote_put(struct knote *kn);
-#endif
-
-static int kq_add_knote(struct kqueue *kq, struct knote *kn,
- struct kevent_internal_s *kev, struct proc *p, int *knoteuse_flags);
-static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev, bool is_fd, struct proc *p);
-static void kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p, kn_status_t *kn_status, uint16_t *kq_state);
+static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
-static void knote_drop(struct knote *kn, struct proc *p);
static struct knote *knote_alloc(void);
static void knote_free(struct knote *kn);
+static int kq_add_knote(struct kqueue *kq, struct knote *kn,
+ struct knote_lock_ctx *knlc, struct proc *p);
+static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq,
+ struct kevent_qos_s *kev, bool is_fd, struct proc *p);
-static void knote_activate(struct knote *kn);
-static void knote_deactivate(struct knote *kn);
+static void knote_activate(kqueue_t kqu, struct knote *kn, int result);
+static void knote_dequeue(kqueue_t kqu, struct knote *kn);
-static void knote_enable(struct knote *kn);
-static void knote_disable(struct knote *kn);
+static void knote_apply_touch(kqueue_t kqu, struct knote *kn,
+ struct kevent_qos_s *kev, int result);
+static void knote_suppress(kqueue_t kqu, struct knote *kn);
+static void knote_unsuppress(kqueue_t kqu, struct knote *kn);
+static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc);
-static int knote_enqueue(struct knote *kn);
-static void knote_dequeue(struct knote *kn);
+// both these functions may dequeue the knote and it is up to the caller
+// to enqueue the knote back
+static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
+static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp);
-static void knote_suppress(struct knote *kn);
-static void knote_unsuppress(struct knote *kn);
-static void knote_wakeup(struct knote *kn);
+static zone_t knote_zone;
+static zone_t kqfile_zone;
+static zone_t kqworkq_zone;
+static zone_t kqworkloop_zone;
+#if DEVELOPMENT || DEBUG
+#define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
+#define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
+#define KEVENT_PANIC_BOOT_ARG_INITIALIZED (1U << 31)
-static kq_index_t knote_get_queue_index(struct knote *kn);
-static struct kqtailq *knote_get_queue(struct knote *kn);
-static kq_index_t knote_get_req_index(struct knote *kn);
-static kq_index_t knote_get_qos_index(struct knote *kn);
-static void knote_set_qos_index(struct knote *kn, kq_index_t qos_index);
-static kq_index_t knote_get_qos_override_index(struct knote *kn);
-static kq_index_t knote_get_sync_qos_override_index(struct knote *kn);
-static void knote_set_qos_override_index(struct knote *kn, kq_index_t qos_index, boolean_t override_is_sync);
-static void knote_set_qos_overcommit(struct knote *kn);
+#define KEVENT_PANIC_DEFAULT_VALUE (0)
+static uint32_t
+kevent_debug_flags(void)
+{
+ static uint32_t flags = KEVENT_PANIC_DEFAULT_VALUE;
-static int filt_fileattach(struct knote *kn, struct kevent_internal_s *kev);
-SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
- .f_isfd = 1,
- .f_attach = filt_fileattach,
-};
+ if ((flags & KEVENT_PANIC_BOOT_ARG_INITIALIZED) == 0) {
+ uint32_t value = 0;
+ if (!PE_parse_boot_argn("kevent_debug", &value, sizeof(value))) {
+ value = KEVENT_PANIC_DEFAULT_VALUE;
+ }
+ value |= KEVENT_PANIC_BOOT_ARG_INITIALIZED;
+ os_atomic_store(&flags, value, relaxed);
+ }
+ return flags;
+}
+#endif
-static void filt_kqdetach(struct knote *kn);
-static int filt_kqueue(struct knote *kn, long hint);
-static int filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev);
-static int filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
-SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
- .f_isfd = 1,
- .f_detach = filt_kqdetach,
- .f_event = filt_kqueue,
- .f_touch = filt_kqtouch,
- .f_process = filt_kqprocess,
-};
+#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
-/* placeholder for not-yet-implemented filters */
-static int filt_badattach(struct knote *kn, struct kevent_internal_s *kev);
-SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
- .f_attach = filt_badattach,
-};
+static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev);
+static void filt_no_detach(struct knote *kn);
+static int filt_bad_event(struct knote *kn, long hint);
+static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev);
+static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev);
-static int filt_procattach(struct knote *kn, struct kevent_internal_s *kev);
-static void filt_procdetach(struct knote *kn);
-static int filt_proc(struct knote *kn, long hint);
-static int filt_proctouch(struct knote *kn, struct kevent_internal_s *kev);
-static int filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
-SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
- .f_attach = filt_procattach,
- .f_detach = filt_procdetach,
- .f_event = filt_proc,
- .f_touch = filt_proctouch,
- .f_process = filt_procprocess,
+SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
+ .f_attach = filt_no_attach,
+ .f_detach = filt_no_detach,
+ .f_event = filt_bad_event,
+ .f_touch = filt_bad_touch,
+ .f_process = filt_bad_process,
};
#if CONFIG_MEMORYSTATUS
extern const struct filterops memorystatus_filtops;
#endif /* CONFIG_MEMORYSTATUS */
-
extern const struct filterops fs_filtops;
-
extern const struct filterops sig_filtops;
-
-static zone_t knote_zone;
-static zone_t kqfile_zone;
-static zone_t kqworkq_zone;
-static zone_t kqworkloop_zone;
-
-#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
-
-/* Mach portset filter */
extern const struct filterops machport_filtops;
-
-/* User filter */
-static int filt_userattach(struct knote *kn, struct kevent_internal_s *kev);
-static void filt_userdetach(struct knote *kn);
-static int filt_user(struct knote *kn, long hint);
-static int filt_usertouch(struct knote *kn, struct kevent_internal_s *kev);
-static int filt_userprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
-SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
- .f_attach = filt_userattach,
- .f_detach = filt_userdetach,
- .f_event = filt_user,
- .f_touch = filt_usertouch,
- .f_process = filt_userprocess,
-};
-
-static lck_spin_t _filt_userlock;
-static void filt_userlock(void);
-static void filt_userunlock(void);
-
-/* Workloop filter */
-static bool filt_wlneeds_boost(struct kevent_internal_s *kev);
-static int filt_wlattach(struct knote *kn, struct kevent_internal_s *kev);
-static int filt_wlpost_attach(struct knote *kn, struct kevent_internal_s *kev);
-static void filt_wldetach(struct knote *kn);
-static int filt_wlevent(struct knote *kn, long hint);
-static int filt_wltouch(struct knote *kn, struct kevent_internal_s *kev);
-static int filt_wldrop_and_unlock(struct knote *kn, struct kevent_internal_s *kev);
-static int filt_wlprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
-SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
- .f_needs_boost = filt_wlneeds_boost,
- .f_attach = filt_wlattach,
- .f_post_attach = filt_wlpost_attach,
- .f_detach = filt_wldetach,
- .f_event = filt_wlevent,
- .f_touch = filt_wltouch,
- .f_drop_and_unlock = filt_wldrop_and_unlock,
- .f_process = filt_wlprocess,
-};
-
+extern const struct filterops pipe_nfiltops;
extern const struct filterops pipe_rfiltops;
extern const struct filterops pipe_wfiltops;
extern const struct filterops ptsd_kqops;
extern const struct filterops vnode_filtops;
extern const struct filterops tty_filtops;
+const static struct filterops file_filtops;
+const static struct filterops kqread_filtops;
+const static struct filterops proc_filtops;
const static struct filterops timer_filtops;
+const static struct filterops user_filtops;
+const static struct filterops workloop_filtops;
/*
*
* - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
* in the exported section of the header
* - Update the EVFILT_SYSCOUNT value to reflect the new addition
- * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
+ * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
* of the Public Filters section in the array.
* Private filters:
* - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
* in the XNU_KERNEL_PRIVATE section of the header
* - Update the EVFILTID_MAX value to reflect the new addition
- * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
- * the Private filters section of the array.
+ * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
+ * the Private filters section of the array.
*/
-SECURITY_READ_ONLY_EARLY(static struct filterops *) sysfilt_ops[EVFILTID_MAX] = {
+static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true");
+static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = {
/* Public Filters */
- [~EVFILT_READ] = &file_filtops,
- [~EVFILT_WRITE] = &file_filtops,
- [~EVFILT_AIO] = &bad_filtops,
- [~EVFILT_VNODE] = &file_filtops,
- [~EVFILT_PROC] = &proc_filtops,
- [~EVFILT_SIGNAL] = &sig_filtops,
- [~EVFILT_TIMER] = &timer_filtops,
- [~EVFILT_MACHPORT] = &machport_filtops,
- [~EVFILT_FS] = &fs_filtops,
- [~EVFILT_USER] = &user_filtops,
- &bad_filtops,
- &bad_filtops,
- [~EVFILT_SOCK] = &file_filtops,
+ [~EVFILT_READ] = &file_filtops,
+ [~EVFILT_WRITE] = &file_filtops,
+ [~EVFILT_AIO] = &bad_filtops,
+ [~EVFILT_VNODE] = &file_filtops,
+ [~EVFILT_PROC] = &proc_filtops,
+ [~EVFILT_SIGNAL] = &sig_filtops,
+ [~EVFILT_TIMER] = &timer_filtops,
+ [~EVFILT_MACHPORT] = &machport_filtops,
+ [~EVFILT_FS] = &fs_filtops,
+ [~EVFILT_USER] = &user_filtops,
+ [~EVFILT_UNUSED_11] = &bad_filtops,
+ [~EVFILT_VM] = &bad_filtops,
+ [~EVFILT_SOCK] = &file_filtops,
#if CONFIG_MEMORYSTATUS
- [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
+ [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
#else
- [~EVFILT_MEMORYSTATUS] = &bad_filtops,
+ [~EVFILT_MEMORYSTATUS] = &bad_filtops,
#endif
- [~EVFILT_EXCEPT] = &file_filtops,
-
+ [~EVFILT_EXCEPT] = &file_filtops,
[~EVFILT_WORKLOOP] = &workloop_filtops,
/* Private filters */
- [EVFILTID_KQREAD] = &kqread_filtops,
- [EVFILTID_PIPE_R] = &pipe_rfiltops,
- [EVFILTID_PIPE_W] = &pipe_wfiltops,
- [EVFILTID_PTSD] = &ptsd_kqops,
- [EVFILTID_SOREAD] = &soread_filtops,
- [EVFILTID_SOWRITE] = &sowrite_filtops,
- [EVFILTID_SCK] = &sock_filtops,
- [EVFILTID_SOEXCEPT] = &soexcept_filtops,
- [EVFILTID_SPEC] = &spec_filtops,
- [EVFILTID_BPFREAD] = &bpfread_filtops,
- [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
- [EVFILTID_FSEVENT] = &fsevent_filtops,
- [EVFILTID_VN] = &vnode_filtops,
- [EVFILTID_TTY] = &tty_filtops,
- [EVFILTID_PTMX] = &ptmx_kqops,
+ [EVFILTID_KQREAD] = &kqread_filtops,
+ [EVFILTID_PIPE_N] = &pipe_nfiltops,
+ [EVFILTID_PIPE_R] = &pipe_rfiltops,
+ [EVFILTID_PIPE_W] = &pipe_wfiltops,
+ [EVFILTID_PTSD] = &ptsd_kqops,
+ [EVFILTID_SOREAD] = &soread_filtops,
+ [EVFILTID_SOWRITE] = &sowrite_filtops,
+ [EVFILTID_SCK] = &sock_filtops,
+ [EVFILTID_SOEXCEPT] = &soexcept_filtops,
+ [EVFILTID_SPEC] = &spec_filtops,
+ [EVFILTID_BPFREAD] = &bpfread_filtops,
+ [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
+ [EVFILTID_FSEVENT] = &fsevent_filtops,
+ [EVFILTID_VN] = &vnode_filtops,
+ [EVFILTID_TTY] = &tty_filtops,
+ [EVFILTID_PTMX] = &ptmx_kqops,
+
+ /* fake filter for detached knotes, keep last */
+ [EVFILTID_DETACHED] = &bad_filtops,
};
/* waitq prepost callback */
-void waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos);
-
-#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
-#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 /* pthread event manager bit */
-#endif
-#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
-#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 /* request overcommit threads */
-#endif
-#ifndef _PTHREAD_PRIORITY_QOS_CLASS_MASK
-#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00 /* QoS class mask */
-#endif
-#ifndef _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32
-#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32 8
-#endif
+void waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *kq_hook);
-static inline __kdebug_only
-uintptr_t
-kqr_thread_id(struct kqrequest *kqr)
+static inline bool
+kqr_thread_bound(workq_threadreq_t kqr)
{
- return (uintptr_t)thread_tid(kqr->kqr_thread);
+ return kqr->tr_state == WORKQ_TR_STATE_BOUND;
}
-static inline
-boolean_t is_workqueue_thread(thread_t thread)
+static inline bool
+kqr_thread_requested_pending(workq_threadreq_t kqr)
{
- return (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
+ workq_tr_state_t tr_state = kqr->tr_state;
+ return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND;
}
-static inline
-void knote_canonicalize_kevent_qos(struct knote *kn)
+static inline bool
+kqr_thread_requested(workq_threadreq_t kqr)
{
- struct kqueue *kq = knote_get_kq(kn);
- unsigned long canonical;
-
- if ((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0)
- return;
-
- /* preserve manager and overcommit flags in this case */
- canonical = pthread_priority_canonicalize(kn->kn_qos, FALSE);
- kn->kn_qos = (qos_t)canonical;
+ return kqr->tr_state != WORKQ_TR_STATE_IDLE;
}
-static inline
-kq_index_t qos_index_from_qos(struct knote *kn, qos_t qos, boolean_t propagation)
+static inline thread_t
+kqr_thread_fast(workq_threadreq_t kqr)
{
- struct kqueue *kq = knote_get_kq(kn);
- kq_index_t qos_index;
- unsigned long flags = 0;
-
- if ((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0)
- return QOS_INDEX_KQFILE;
-
- qos_index = (kq_index_t)thread_qos_from_pthread_priority(
- (unsigned long)qos, &flags);
-
- if (kq->kq_state & KQ_WORKQ) {
- /* workq kqueues support requesting a manager thread (non-propagation) */
- if (!propagation && (flags & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG))
- return KQWQ_QOS_MANAGER;
- }
-
- return qos_index;
+ assert(kqr_thread_bound(kqr));
+ return kqr->tr_thread;
}
-static inline
-qos_t qos_from_qos_index(kq_index_t qos_index)
+static inline thread_t
+kqr_thread(workq_threadreq_t kqr)
{
- /* should only happen for KQ_WORKQ */
- if (qos_index == KQWQ_QOS_MANAGER)
- return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-
- if (qos_index == 0)
- return THREAD_QOS_UNSPECIFIED;
-
- /* Should have support from pthread kext support */
- return (1 << (qos_index - 1 +
- _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32));
+ return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL;
}
-/* kqr lock must be held */
-static inline
-unsigned long pthread_priority_for_kqrequest(
- struct kqrequest *kqr,
- kq_index_t qos_index)
+static inline struct kqworkloop *
+kqr_kqworkloop(workq_threadreq_t kqr)
{
- unsigned long priority = qos_from_qos_index(qos_index);
- if (kqr->kqr_state & KQR_THOVERCOMMIT) {
- priority |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ return __container_of(kqr, struct kqworkloop, kqwl_request);
}
- return priority;
+ return NULL;
}
-static inline
-kq_index_t qos_index_for_servicer(int qos_class, thread_t thread, int flags)
+static inline kqueue_t
+kqr_kqueue(proc_t p, workq_threadreq_t kqr)
{
-#pragma unused(thread)
- kq_index_t qos_index;
-
- if (flags & KEVENT_FLAG_WORKQ_MANAGER)
- return KQWQ_QOS_MANAGER;
-
- qos_index = (kq_index_t)qos_class;
- assert(qos_index > 0 && qos_index < KQWQ_QOS_MANAGER);
-
- return qos_index;
+ kqueue_t kqu;
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ kqu.kqwl = kqr_kqworkloop(kqr);
+ } else {
+ kqu.kqwq = p->p_fd->fd_wqkqueue;
+ assert(kqr >= kqu.kqwq->kqwq_request &&
+ kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
+ }
+ return kqu;
}
/*
* kqueue/note lock implementations
*
* The kqueue lock guards the kq state, the state of its queues,
- * and the kqueue-aware status and use counts of individual knotes.
+ * and the kqueue-aware status and locks of individual knotes.
*
* The kqueue workq lock is used to protect state guarding the
* interaction of the kqueue with the workq. This state cannot
* by calling the filter to get a [consistent] snapshot of that
* data.
*/
-lck_grp_attr_t * kq_lck_grp_attr;
-lck_grp_t * kq_lck_grp;
-lck_attr_t * kq_lck_attr;
+static lck_grp_attr_t *kq_lck_grp_attr;
+static lck_grp_t *kq_lck_grp;
+static lck_attr_t *kq_lck_attr;
static inline void
-kqlock(struct kqueue *kq)
+kqlock(kqueue_t kqu)
{
- lck_spin_lock(&kq->kq_lock);
+ lck_spin_lock(&kqu.kq->kq_lock);
}
static inline void
-kqlock_held(__assert_only struct kqueue *kq)
+kqlock_held(__assert_only kqueue_t kqu)
{
- LCK_SPIN_ASSERT(&kq->kq_lock, LCK_ASSERT_OWNED);
+ LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
}
static inline void
-kqunlock(struct kqueue *kq)
+kqunlock(kqueue_t kqu)
{
- lck_spin_unlock(&kq->kq_lock);
+ lck_spin_unlock(&kqu.kq->kq_lock);
}
static inline void
-knhash_lock(proc_t p)
+knhash_lock(struct filedesc *fdp)
{
- lck_mtx_lock(&p->p_fd->fd_knhashlock);
+ lck_mtx_lock(&fdp->fd_knhashlock);
}
static inline void
-knhash_unlock(proc_t p)
+knhash_unlock(struct filedesc *fdp)
{
- lck_mtx_unlock(&p->p_fd->fd_knhashlock);
+ lck_mtx_unlock(&fdp->fd_knhashlock);
}
-
-/*
- * Convert a kq lock to a knote use referece.
- *
- * If the knote is being dropped, or has
- * vanished, we can't get a use reference.
- * Just return with it still locked.
- *
- * - kq locked at entry
- * - unlock on exit if we get the use reference
- */
-static int
-kqlock2knoteuse(struct kqueue *kq, struct knote *kn, int flags)
+/* wait event for knote locks */
+static inline event_t
+knote_lock_wev(struct knote *kn)
{
- if (kn->kn_status & (KN_DROPPING | KN_VANISHED))
- return (0);
+ return (event_t)(&kn->kn_hook);
+}
- assert(kn->kn_status & KN_ATTACHED);
- kn->kn_inuse++;
- if (flags & KNUSE_BOOST) {
- set_thread_rwlock_boost();
- }
- kqunlock(kq);
- return (1);
+/* wait event for kevent_register_wait_* */
+static inline event64_t
+knote_filt_wev64(struct knote *kn)
+{
+ /* kdp_workloop_sync_wait_find_owner knows about this */
+ return CAST_EVENT64_T(kn);
}
-/*
- * - kq locked at entry
- * - kq unlocked at exit
- */
-__disable_tail_calls
-static wait_result_t
-knoteusewait(struct kqueue *kq, struct knote *kn)
-{
- kn->kn_status |= KN_USEWAIT;
- waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(&kn->kn_status),
- THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
- kqunlock(kq);
- return thread_block(THREAD_CONTINUE_NULL);
+/* wait event for knote_post/knote_drop */
+static inline event64_t
+knote_post_wev64(struct knote *kn)
+{
+ return CAST_EVENT64_T(&kn->kn_kevent);
}
-static bool
-knoteuse_needs_boost(struct knote *kn, struct kevent_internal_s *kev)
+/*!
+ * @function knote_has_qos
+ *
+ * @brief
+ * Whether the knote has a regular QoS.
+ *
+ * @discussion
+ * kn_qos_override is:
+ * - 0 on kqfiles
+ * - THREAD_QOS_LAST for special buckets (stayactive, manager)
+ *
+ * Other values mean the knote participates to QoS propagation.
+ */
+static inline bool
+knote_has_qos(struct knote *kn)
{
- if (knote_fops(kn)->f_needs_boost) {
- return knote_fops(kn)->f_needs_boost(kev);
- }
- return false;
+ return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST;
}
+#pragma mark knote locks
+
/*
- * Convert from a knote use reference back to kq lock.
+ * Enum used by the knote_lock_* functions.
*
- * Drop a use reference and wake any waiters if
- * this is the last one.
+ * KNOTE_KQ_LOCK_ALWAYS
+ * The function will always return with the kq lock held.
*
- * If someone is trying to drop the knote, but the
- * caller has events they must deliver, take
- * responsibility for the drop later - and wake the
- * other attempted dropper in a manner that informs
- * him of the transfer of responsibility.
+ * KNOTE_KQ_LOCK_ON_SUCCESS
+ * The function will return with the kq lock held if it was successful
+ * (knote_lock() is the only function that can fail).
*
- * The exit return indicates if the knote is still alive
- * (or if not, the other dropper has been given the green
- * light to drop it).
+ * KNOTE_KQ_LOCK_ON_FAILURE
+ * The function will return with the kq lock held if it was unsuccessful
+ * (knote_lock() is the only function that can fail).
*
- * The kqueue lock is re-taken unconditionally.
+ * KNOTE_KQ_UNLOCK:
+ * The function returns with the kq unlocked.
*/
-static int
-knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int flags)
-{
- int dropped = 0;
- int steal_drop = (flags & KNUSE_STEAL_DROP);
+enum kqlocking {
+ KNOTE_KQ_LOCK_ALWAYS,
+ KNOTE_KQ_LOCK_ON_SUCCESS,
+ KNOTE_KQ_LOCK_ON_FAILURE,
+ KNOTE_KQ_UNLOCK,
+};
- kqlock(kq);
- if (flags & KNUSE_BOOST) {
- clear_thread_rwlock_boost();
+static struct knote_lock_ctx *
+knote_lock_ctx_find(kqueue_t kqu, struct knote *kn)
+{
+ struct knote_lock_ctx *ctx;
+ LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) {
+ if (ctx->knlc_knote == kn) {
+ return ctx;
+ }
}
+ panic("knote lock context not found: %p", kn);
+ __builtin_trap();
+}
- if (--kn->kn_inuse == 0) {
-
- if ((kn->kn_status & KN_ATTACHING) != 0) {
- kn->kn_status &= ~KN_ATTACHING;
- }
+/* slowpath of knote_lock() */
+__attribute__((noinline))
+static bool __result_use_check
+knote_lock_slow(kqueue_t kqu, struct knote *kn,
+ struct knote_lock_ctx *knlc, int kqlocking)
+{
+ struct knote_lock_ctx *owner_lc;
+ struct uthread *uth = current_uthread();
+ wait_result_t wr;
- if ((kn->kn_status & KN_USEWAIT) != 0) {
- wait_result_t result;
+ kqlock_held(kqu);
- /* If we need to, try and steal the drop */
- if (kn->kn_status & KN_DROPPING) {
- if (steal_drop && !(kn->kn_status & KN_STOLENDROP)) {
- kn->kn_status |= KN_STOLENDROP;
- } else {
- dropped = 1;
- }
- }
+ owner_lc = knote_lock_ctx_find(kqu, kn);
+#if DEBUG || DEVELOPMENT
+ knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
+#endif
+ owner_lc->knlc_waiters++;
- /* wakeup indicating if ANY USE stole the drop */
- result = (kn->kn_status & KN_STOLENDROP) ?
- THREAD_RESTART : THREAD_AWAKENED;
+ /*
+ * Make our lock context visible to knote_unlock()
+ */
+ uth->uu_knlock = knlc;
- kn->kn_status &= ~KN_USEWAIT;
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(&kn->kn_status),
- result,
- WAITQ_ALL_PRIORITIES);
- } else {
- /* should have seen use-wait if dropping with use refs */
- assert((kn->kn_status & (KN_DROPPING|KN_STOLENDROP)) == 0);
- }
+ wr = lck_spin_sleep_with_inheritor(&kqu.kq->kq_lock, LCK_SLEEP_UNLOCK,
+ knote_lock_wev(kn), owner_lc->knlc_thread,
+ THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
- } else if (kn->kn_status & KN_DROPPING) {
- /* not the last ref but want to steal a drop if present */
- if (steal_drop && ((kn->kn_status & KN_STOLENDROP) == 0)) {
- kn->kn_status |= KN_STOLENDROP;
+ if (wr == THREAD_RESTART) {
+ /*
+ * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
+ * We need to cleanup the state since no one did.
+ */
+ uth->uu_knlock = NULL;
+#if DEBUG || DEVELOPMENT
+ assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
+ knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
+#endif
- /* but we now have to wait to be the last ref */
- knoteusewait(kq, kn);
- kqlock(kq);
- } else {
- dropped = 1;
+ if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
+ kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
+ kqlock(kqu);
+ }
+ return false;
+ } else {
+ if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
+ kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
+ kqlock(kqu);
+#if DEBUG || DEVELOPMENT
+ /*
+ * This state is set under the lock so we can't
+ * really assert this unless we hold the lock.
+ */
+ assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
+#endif
}
+ return true;
}
-
- return (!dropped);
}
/*
- * Convert a kq lock to a knote use reference
- * (for the purpose of detaching AND vanishing it).
- *
- * If the knote is being dropped, we can't get
- * a detach reference, so wait for the knote to
- * finish dropping before returning.
+ * Attempts to take the "knote" lock.
*
- * If the knote is being used for other purposes,
- * we cannot detach it until those uses are done
- * as well. Again, just wait for them to finish
- * (caller will start over at lookup).
+ * Called with the kqueue lock held.
*
- * - kq locked at entry
- * - unlocked on exit
+ * Returns true if the knote lock is acquired, false if it has been dropped
*/
-static int
-kqlock2knotedetach(struct kqueue *kq, struct knote *kn, int flags)
+static bool __result_use_check
+knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc,
+ enum kqlocking kqlocking)
{
- if ((kn->kn_status & KN_DROPPING) || kn->kn_inuse) {
- /* have to wait for dropper or current uses to go away */
- knoteusewait(kq, kn);
- return (0);
- }
- assert((kn->kn_status & KN_VANISHED) == 0);
- assert(kn->kn_status & KN_ATTACHED);
- kn->kn_status &= ~KN_ATTACHED;
- kn->kn_status |= KN_VANISHED;
- if (flags & KNUSE_BOOST) {
- clear_thread_rwlock_boost();
+ kqlock_held(kqu);
+
+#if DEBUG || DEVELOPMENT
+ assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
+#endif
+ knlc->knlc_knote = kn;
+ knlc->knlc_thread = current_thread();
+ knlc->knlc_waiters = 0;
+
+ if (__improbable(kn->kn_status & KN_LOCKED)) {
+ return knote_lock_slow(kqu, kn, knlc, kqlocking);
}
- kn->kn_inuse++;
- kqunlock(kq);
- return (1);
-}
-/*
- * Convert a kq lock to a knote drop reference.
- *
- * If the knote is in use, wait for the use count
- * to subside. We first mark our intention to drop
- * it - keeping other users from "piling on."
- * If we are too late, we have to wait for the
- * other drop to complete.
- *
- * - kq locked at entry
- * - always unlocked on exit.
- * - caller can't hold any locks that would prevent
- * the other dropper from completing.
- */
-static int
-kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
-{
- int oktodrop;
- wait_result_t result;
+ /*
+ * When the knote will be dropped, the knote lock is taken before
+ * KN_DROPPING is set, and then the knote will be removed from any
+ * hash table that references it before the lock is canceled.
+ */
+ assert((kn->kn_status & KN_DROPPING) == 0);
+ LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link);
+ kn->kn_status |= KN_LOCKED;
+#if DEBUG || DEVELOPMENT
+ knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
+#endif
- oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0);
- /* if another thread is attaching, they will become the dropping thread */
- kn->kn_status |= KN_DROPPING;
- knote_unsuppress(kn);
- knote_dequeue(kn);
- if (oktodrop) {
- if (kn->kn_inuse == 0) {
- kqunlock(kq);
- return (oktodrop);
- }
+ if (kqlocking == KNOTE_KQ_UNLOCK ||
+ kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
+ kqunlock(kqu);
}
- result = knoteusewait(kq, kn);
- /* THREAD_RESTART == another thread stole the knote drop */
- return (result == THREAD_AWAKENED);
+ return true;
}
-#if 0
/*
- * Release a knote use count reference.
+ * Unlocks a knote successfully locked with knote_lock().
+ *
+ * Called with the kqueue lock held.
+ *
+ * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
*/
static void
-knote_put(struct knote *kn)
+knote_unlock(kqueue_t kqu, struct knote *kn,
+ struct knote_lock_ctx *knlc, enum kqlocking kqlocking)
{
- struct kqueue *kq = knote_get_kq(kn);
+ kqlock_held(kqu);
- kqlock(kq);
- if (--kn->kn_inuse == 0) {
- if ((kn->kn_status & KN_USEWAIT) != 0) {
- kn->kn_status &= ~KN_USEWAIT;
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(&kn->kn_status),
- THREAD_AWAKENED,
- WAITQ_ALL_PRIORITIES);
- }
- }
- kqunlock(kq);
-}
+ assert(knlc->knlc_knote == kn);
+ assert(kn->kn_status & KN_LOCKED);
+#if DEBUG || DEVELOPMENT
+ assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
#endif
-static int
-filt_fileattach(struct knote *kn, struct kevent_internal_s *kev)
-{
- return (fo_kqfilter(kn->kn_fp, kn, kev, vfs_context_current()));
-}
+ LIST_REMOVE(knlc, knlc_link);
-#define f_flag f_fglob->fg_flag
-#define f_msgcount f_fglob->fg_msgcount
-#define f_cred f_fglob->fg_cred
-#define f_ops f_fglob->fg_ops
-#define f_offset f_fglob->fg_offset
-#define f_data f_fglob->fg_data
+ if (knlc->knlc_waiters) {
+ thread_t thread = THREAD_NULL;
-static void
-filt_kqdetach(struct knote *kn)
-{
- struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
- struct kqueue *kq = &kqf->kqf_kqueue;
+ wakeup_one_with_inheritor(knote_lock_wev(kn), THREAD_AWAKENED,
+ LCK_WAKE_DEFAULT, &thread);
- kqlock(kq);
- KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
- kqunlock(kq);
-}
+ /*
+ * knote_lock_slow() publishes the lock context of waiters
+ * in uthread::uu_knlock.
+ *
+ * Reach out and make this context the new owner.
+ */
+ struct uthread *ut = get_bsdthread_info(thread);
+ struct knote_lock_ctx *next_owner_lc = ut->uu_knlock;
-/*ARGSUSED*/
-static int
-filt_kqueue(struct knote *kn, __unused long hint)
-{
- struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
- int count;
+ assert(next_owner_lc->knlc_knote == kn);
+ next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1;
+ LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link);
+#if DEBUG || DEVELOPMENT
+ next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
+#endif
+ ut->uu_knlock = NULL;
+ thread_deallocate_safe(thread);
+ } else {
+ kn->kn_status &= ~KN_LOCKED;
+ }
- count = kq->kq_count;
- return (count > 0);
+ if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) {
+ /*
+ * No f_event() in flight anymore, we can leave QoS "Merge" mode
+ *
+ * See knote_adjust_qos()
+ */
+ kn->kn_status &= ~KN_MERGE_QOS;
+ }
+ if (kqlocking == KNOTE_KQ_UNLOCK) {
+ kqunlock(kqu);
+ }
+#if DEBUG || DEVELOPMENT
+ knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
+#endif
}
-static int
-filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev)
+/*
+ * Aborts all waiters for a knote lock, and unlock the knote.
+ *
+ * Called with the kqueue lock held.
+ *
+ * Returns with the kqueue unlocked.
+ */
+static void
+knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
+ struct knote_lock_ctx *knlc)
{
-#pragma unused(kev)
- struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
- int res;
+ kqlock_held(kq);
- kqlock(kq);
- kn->kn_data = kq->kq_count;
- if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
- kn->kn_udata = kev->udata;
- res = (kn->kn_data > 0);
+ assert(knlc->knlc_knote == kn);
+ assert(kn->kn_status & KN_LOCKED);
+ assert(kn->kn_status & KN_DROPPING);
+ LIST_REMOVE(knlc, knlc_link);
+ kn->kn_status &= ~KN_LOCKED;
kqunlock(kq);
- return res;
+ if (knlc->knlc_waiters) {
+ wakeup_all_with_inheritor(knote_lock_wev(kn), THREAD_RESTART);
+ }
+#if DEBUG || DEVELOPMENT
+ knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
+#endif
}
-static int
-filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+/*
+ * Call the f_event hook of a given filter.
+ *
+ * Takes a use count to protect against concurrent drops.
+ */
+static void
+knote_post(struct knote *kn, long hint)
{
-#pragma unused(data)
- struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
- int res;
+ struct kqueue *kq = knote_get_kq(kn);
+ int dropping, result;
kqlock(kq);
- kn->kn_data = kq->kq_count;
- res = (kn->kn_data > 0);
- if (res) {
- *kev = kn->kn_kevent;
- if (kn->kn_flags & EV_CLEAR)
- kn->kn_data = 0;
+
+ if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) {
+ return kqunlock(kq);
}
- kqunlock(kq);
- return res;
-}
+ if (__improbable(kn->kn_status & KN_POSTING)) {
+ panic("KNOTE() called concurrently on knote %p", kn);
+ }
-#pragma mark EVFILT_PROC
+ kn->kn_status |= KN_POSTING;
-static int
-filt_procattach(struct knote *kn, __unused struct kevent_internal_s *kev)
-{
- struct proc *p;
+ kqunlock(kq);
+ result = filter_call(knote_fops(kn), f_event(kn, hint));
+ kqlock(kq);
- assert(PID_MAX < NOTE_PDATAMASK);
+ dropping = (kn->kn_status & KN_DROPPING);
- if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
- kn->kn_flags = EV_ERROR;
- kn->kn_data = ENOTSUP;
- return 0;
+ if (!dropping && (result & FILTER_ACTIVE)) {
+ knote_activate(kq, kn, result);
}
- p = proc_find(kn->kn_id);
- if (p == NULL) {
- kn->kn_flags = EV_ERROR;
- kn->kn_data = ESRCH;
- return 0;
+ if ((kn->kn_status & KN_LOCKED) == 0) {
+ /*
+ * There's no other f_* call in flight, we can leave QoS "Merge" mode.
+ *
+ * See knote_adjust_qos()
+ */
+ kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS);
+ } else {
+ kn->kn_status &= ~KN_POSTING;
}
- const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
+ if (__improbable(dropping)) {
+ waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, knote_post_wev64(kn),
+ THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
+ }
- if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
+ kqunlock(kq);
+}
+
+/*
+ * Called by knote_drop() to wait for the last f_event() caller to be done.
+ *
+ * - kq locked at entry
+ * - kq unlocked at exit
+ */
+static void
+knote_wait_for_post(struct kqueue *kq, struct knote *kn)
+{
+ wait_result_t wr = THREAD_NOT_WAITING;
+
+ kqlock_held(kq);
+
+ assert(kn->kn_status & KN_DROPPING);
+
+ if (kn->kn_status & KN_POSTING) {
+ wr = waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
+ knote_post_wev64(kn), THREAD_UNINT | THREAD_WAIT_NOREPORT,
+ TIMEOUT_WAIT_FOREVER);
+ }
+ kqunlock(kq);
+ if (wr == THREAD_WAITING) {
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+}
+
+#pragma mark knote helpers for filters
+
+OS_ALWAYS_INLINE
+void
+knote_set_error(struct knote *kn, int error)
+{
+ kn->kn_flags |= EV_ERROR;
+ kn->kn_sdata = error;
+}
+
+OS_ALWAYS_INLINE
+int64_t
+knote_low_watermark(const struct knote *kn)
+{
+ return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1;
+}
+
+/*!
+ * @function knote_fill_kevent_with_sdata
+ *
+ * @brief
+ * Fills in a kevent from the current content of a knote.
+ *
+ * @discussion
+ * This is meant to be called from filter's f_event hooks.
+ * The kevent data is filled with kn->kn_sdata.
+ *
+ * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
+ *
+ * Using knote_fill_kevent is typically preferred.
+ */
+OS_ALWAYS_INLINE
+void
+knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
+{
+#define knote_assert_aliases(name1, offs1, name2) \
+ static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
+ offsetof(struct kevent_internal_s, name2), \
+ "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
+ /*
+ * All the code makes assumptions on these aliasing,
+ * so make sure we fail the build if we ever ever ever break them.
+ */
+ knote_assert_aliases(ident, 0, kei_ident);
+#ifdef __LITTLE_ENDIAN__
+ knote_assert_aliases(filter, 0, kei_filter); // non trivial overlap
+ knote_assert_aliases(filter, 1, kei_filtid); // non trivial overlap
+#else
+ knote_assert_aliases(filter, 0, kei_filtid); // non trivial overlap
+ knote_assert_aliases(filter, 1, kei_filter); // non trivial overlap
+#endif
+ knote_assert_aliases(flags, 0, kei_flags);
+ knote_assert_aliases(qos, 0, kei_qos);
+ knote_assert_aliases(udata, 0, kei_udata);
+ knote_assert_aliases(fflags, 0, kei_fflags);
+ knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
+ knote_assert_aliases(data, 0, kei_sdata); // non trivial overlap
+ knote_assert_aliases(ext, 0, kei_ext);
+#undef knote_assert_aliases
+
+ /*
+ * Fix the differences between kevent_qos_s and kevent_internal_s:
+ * - xflags is where kn_sfflags lives, we need to zero it
+ * - fixup the high bits of `filter` where kn_filtid lives
+ */
+ *kev = *(struct kevent_qos_s *)&kn->kn_kevent;
+ kev->xflags = 0;
+ kev->filter |= 0xff00;
+ if (kn->kn_flags & EV_CLEAR) {
+ kn->kn_fflags = 0;
+ }
+}
+
+/*!
+ * @function knote_fill_kevent
+ *
+ * @brief
+ * Fills in a kevent from the current content of a knote.
+ *
+ * @discussion
+ * This is meant to be called from filter's f_event hooks.
+ * The kevent data is filled with the passed in data.
+ *
+ * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
+ */
+OS_ALWAYS_INLINE
+void
+knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
+{
+ knote_fill_kevent_with_sdata(kn, kev);
+ kev->filter = kn->kn_filter;
+ kev->data = data;
+}
+
+
+#pragma mark file_filtops
+
+static int
+filt_fileattach(struct knote *kn, struct kevent_qos_s *kev)
+{
+ return fo_kqfilter(kn->kn_fp, kn, kev);
+}
+
+SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
+ .f_isfd = 1,
+ .f_attach = filt_fileattach,
+};
+
+#pragma mark kqread_filtops
+
+#define f_flag f_fglob->fg_flag
+#define f_ops f_fglob->fg_ops
+#define f_data f_fglob->fg_data
+#define f_lflags f_fglob->fg_lflags
+
+static void
+filt_kqdetach(struct knote *kn)
+{
+ struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
+ struct kqueue *kq = &kqf->kqf_kqueue;
+
+ kqlock(kq);
+ KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
+ kqunlock(kq);
+}
+
+static int
+filt_kqueue(struct knote *kn, __unused long hint)
+{
+ struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+
+ return kq->kq_count > 0;
+}
+
+static int
+filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev)
+{
+#pragma unused(kev)
+ struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+ int res;
+
+ kqlock(kq);
+ res = (kq->kq_count > 0);
+ kqunlock(kq);
+
+ return res;
+}
+
+static int
+filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev)
+{
+ struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
+ int res = 0;
+
+ kqlock(kq);
+ if (kq->kq_count) {
+ knote_fill_kevent(kn, kev, kq->kq_count);
+ res = 1;
+ }
+ kqunlock(kq);
+
+ return res;
+}
+
+SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
+ .f_isfd = 1,
+ .f_detach = filt_kqdetach,
+ .f_event = filt_kqueue,
+ .f_touch = filt_kqtouch,
+ .f_process = filt_kqprocess,
+};
+
+#pragma mark proc_filtops
+
+static int
+filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev)
+{
+ struct proc *p;
+
+ assert(PID_MAX < NOTE_PDATAMASK);
+
+ if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
+ knote_set_error(kn, ENOTSUP);
+ return 0;
+ }
+
+ p = proc_find(kn->kn_id);
+ if (p == NULL) {
+ knote_set_error(kn, ESRCH);
+ return 0;
+ }
+
+ const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
+
+ if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) {
do {
pid_t selfpid = proc_selfpid();
- if (p->p_ppid == selfpid)
- break; /* parent => ok */
-
+ if (p->p_ppid == selfpid) {
+ break; /* parent => ok */
+ }
if ((p->p_lflag & P_LTRACED) != 0 &&
- (p->p_oppid == selfpid))
- break; /* parent-in-waiting => ok */
-
+ (p->p_oppid == selfpid)) {
+ break; /* parent-in-waiting => ok */
+ }
proc_rele(p);
- kn->kn_flags = EV_ERROR;
- kn->kn_data = EACCES;
+ knote_set_error(kn, EACCES);
return 0;
} while (0);
+ }
- proc_klist_lock();
+ kn->kn_proc = p;
+ kn->kn_flags |= EV_CLEAR; /* automatically set */
+ kn->kn_sdata = 0; /* incoming data is ignored */
- kn->kn_ptr.p_proc = p; /* store the proc handle */
+ proc_klist_lock();
KNOTE_ATTACH(&p->p_klist, kn);
* only captures edge-triggered events after this point
* so it can't already be fired.
*/
- return (0);
+ return 0;
}
proc_klist_lock();
- p = kn->kn_ptr.p_proc;
+ p = kn->kn_proc;
if (p != PROC_NULL) {
- kn->kn_ptr.p_proc = PROC_NULL;
+ kn->kn_proc = PROC_NULL;
KNOTE_DETACH(&p->p_klist, kn);
}
}
static int
-filt_proc(struct knote *kn, long hint)
+filt_procevent(struct knote *kn, long hint)
{
u_int event;
/*
* Note: a lot of bits in hint may be obtained from the knote
* To free some of those bits, see <rdar://problem/12592988> Freeing up
- * bits in hint for filt_proc
+ * bits in hint for filt_procevent
*
* mask off extra data
*/
* parent and these knotes re-fired.
*/
if (event & NOTE_EXIT) {
- if ((kn->kn_ptr.p_proc->p_oppid != 0)
- && (knote_get_kq(kn)->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
+ if ((kn->kn_proc->p_oppid != 0)
+ && (knote_get_kq(kn)->kq_p->p_pid != kn->kn_proc->p_ppid)) {
/*
* This knote is not for the current ptrace(2) parent, ignore.
*/
return 0;
}
- }
+ }
/*
* if the user is interested in this event, record it.
*/
- if (kn->kn_sfflags & event)
+ if (kn->kn_sfflags & event) {
kn->kn_fflags |= event;
+ }
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
/*
* The kernel has a wrapper in place that returns the same data
- * as is collected here, in kn_data. Any changes to how
+ * as is collected here, in kn_hook32. Any changes to how
* NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
* should also be reflected in the proc_pidnoteexit() wrapper.
*/
if (event == NOTE_EXIT) {
- kn->kn_data = 0;
+ kn->kn_hook32 = 0;
if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
kn->kn_fflags |= NOTE_EXITSTATUS;
- kn->kn_data |= (hint & NOTE_PDATAMASK);
+ kn->kn_hook32 |= (hint & NOTE_PDATAMASK);
}
if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
kn->kn_fflags |= NOTE_EXIT_DETAIL;
- if ((kn->kn_ptr.p_proc->p_lflag &
- P_LTERM_DECRYPTFAIL) != 0) {
- kn->kn_data |= NOTE_EXIT_DECRYPTFAIL;
+ if ((kn->kn_proc->p_lflag &
+ P_LTERM_DECRYPTFAIL) != 0) {
+ kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL;
}
- if ((kn->kn_ptr.p_proc->p_lflag &
- P_LTERM_JETSAM) != 0) {
- kn->kn_data |= NOTE_EXIT_MEMORY;
- switch (kn->kn_ptr.p_proc->p_lflag & P_JETSAM_MASK) {
+ if ((kn->kn_proc->p_lflag &
+ P_LTERM_JETSAM) != 0) {
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY;
+ switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) {
case P_JETSAM_VMPAGESHORTAGE:
- kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
break;
case P_JETSAM_VMTHRASHING:
- kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING;
break;
case P_JETSAM_FCTHRASHING:
- kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING;
break;
case P_JETSAM_VNODE:
- kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE;
break;
case P_JETSAM_HIWAT:
- kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT;
break;
case P_JETSAM_PID:
- kn->kn_data |= NOTE_EXIT_MEMORY_PID;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID;
break;
case P_JETSAM_IDLEEXIT:
- kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
+ kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE;
break;
}
}
- if ((kn->kn_ptr.p_proc->p_csflags &
- CS_KILLED) != 0) {
- kn->kn_data |= NOTE_EXIT_CSERROR;
+ if ((kn->kn_proc->p_csflags &
+ CS_KILLED) != 0) {
+ kn->kn_hook32 |= NOTE_EXIT_CSERROR;
}
}
}
/* if we have any matching state, activate the knote */
- return (kn->kn_fflags != 0);
+ return kn->kn_fflags != 0;
}
static int
-filt_proctouch(struct knote *kn, struct kevent_internal_s *kev)
+filt_proctouch(struct knote *kn, struct kevent_qos_s *kev)
{
int res;
/* accept new filter flags and mask off output events no long interesting */
kn->kn_sfflags = kev->fflags;
- if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
- kn->kn_udata = kev->udata;
/* restrict the current results to the (smaller?) set of new interest */
/*
}
static int
-filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+filt_procprocess(struct knote *kn, struct kevent_qos_s *kev)
{
-#pragma unused(data)
- int res;
+ int res = 0;
proc_klist_lock();
- res = (kn->kn_fflags != 0);
- if (res) {
- *kev = kn->kn_kevent;
- kn->kn_flags |= EV_CLEAR; /* automatically set */
- kn->kn_fflags = 0;
- kn->kn_data = 0;
+ if (kn->kn_fflags) {
+ knote_fill_kevent(kn, kev, kn->kn_hook32);
+ kn->kn_hook32 = 0;
+ res = 1;
}
proc_klist_unlock();
return res;
}
+SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
+ .f_attach = filt_procattach,
+ .f_detach = filt_procdetach,
+ .f_event = filt_procevent,
+ .f_touch = filt_proctouch,
+ .f_process = filt_procprocess,
+};
-#pragma mark EVFILT_TIMER
+#pragma mark timer_filtops
+struct filt_timer_params {
+ uint64_t deadline; /* deadline in abs/cont time
+ * (or 0 if NOTE_ABSOLUTE and deadline is in past) */
+ uint64_t leeway; /* leeway in abstime, or 0 if none */
+ uint64_t interval; /* interval in abstime or 0 if non-repeating timer */
+};
/*
* Values stored in the knote at rest (using Mach absolute time units)
*
- * kn->kn_hook where the thread_call object is stored
+ * kn->kn_thcall where the thread_call object is stored
* kn->kn_ext[0] next deadline or 0 if immediate expiration
* kn->kn_ext[1] leeway value
* kn->kn_sdata interval timer: the interval
* absolute/deadline timer: 0
- * kn->kn_data fire count
+ * kn->kn_hook32 timer state
+ *
+ * TIMER_IDLE:
+ * The timer has either never been scheduled or been cancelled.
+ * It is safe to schedule a new one in this state.
+ *
+ * TIMER_ARMED:
+ * The timer has been scheduled
+ *
+ * TIMER_FIRED
+ * The timer has fired and an event needs to be delivered.
+ * When in this state, the callout may still be running.
+ *
+ * TIMER_IMMEDIATE
+ * The timer has fired at registration time, and the callout was never
+ * dispatched.
*/
+#define TIMER_IDLE 0x0
+#define TIMER_ARMED 0x1
+#define TIMER_FIRED 0x2
+#define TIMER_IMMEDIATE 0x3
-static lck_mtx_t _filt_timerlock;
-
-static void filt_timerlock(void) { lck_mtx_lock(&_filt_timerlock); }
-static void filt_timerunlock(void) { lck_mtx_unlock(&_filt_timerlock); }
-
-static inline void filt_timer_assert_locked(void)
+static void
+filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
{
- LCK_MTX_ASSERT(&_filt_timerlock, LCK_MTX_ASSERT_OWNED);
+ kn->kn_ext[0] = params->deadline;
+ kn->kn_ext[1] = params->leeway;
+ kn->kn_sdata = params->interval;
}
-/* state flags stored in kn_hookid */
-#define TIMER_RUNNING 0x1
-#define TIMER_CANCELWAIT 0x2
-
/*
* filt_timervalidate - process data from user
*
* kn_sfflags style of timer, unit of measurement
*
* Output:
- * kn_sdata either interval in abstime or 0 if non-repeating timer
- * ext[0] fire deadline in abs/cont time
- * (or 0 if NOTE_ABSOLUTE and deadline is in past)
+ * struct filter_timer_params to apply to the filter with
+ * filt_timer_set_params when changes are ready to be commited.
*
* Returns:
* EINVAL Invalid user data parameters
+ * ERANGE Various overflows with the parameters
*
* Called with timer filter lock held.
*/
static int
-filt_timervalidate(struct knote *kn)
+filt_timervalidate(const struct kevent_qos_s *kev,
+ struct filt_timer_params *params)
{
/*
- * There are 4 knobs that need to be chosen for a timer registration:
+ * There are 5 knobs that need to be chosen for a timer registration:
*
* A) Units of time (what is the time duration of the specified number)
* Absolute and interval take:
* expires when mach_continuous_time() is > the passed in value.
*/
- filt_timer_assert_locked();
-
uint64_t multiplier;
boolean_t use_abstime = FALSE;
- switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS|NOTE_MACHTIME)) {
+ switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) {
case NOTE_SECONDS:
multiplier = NSEC_PER_SEC;
break;
multiplier = NSEC_PER_SEC / 1000;
break;
default:
- return (EINVAL);
+ return EINVAL;
}
/* transform the leeway in kn_ext[1] to same time scale */
- if (kn->kn_sfflags & NOTE_LEEWAY) {
+ if (kev->fflags & NOTE_LEEWAY) {
uint64_t leeway_abs;
if (use_abstime) {
- leeway_abs = (uint64_t)kn->kn_ext[1];
- } else {
+ leeway_abs = (uint64_t)kev->ext[1];
+ } else {
uint64_t leeway_ns;
- if (os_mul_overflow((uint64_t)kn->kn_ext[1], multiplier, &leeway_ns))
- return (ERANGE);
+ if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) {
+ return ERANGE;
+ }
nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
}
- kn->kn_ext[1] = leeway_abs;
+ params->leeway = leeway_abs;
+ } else {
+ params->leeway = 0;
}
- if (kn->kn_sfflags & NOTE_ABSOLUTE) {
+ if (kev->fflags & NOTE_ABSOLUTE) {
uint64_t deadline_abs;
if (use_abstime) {
- deadline_abs = (uint64_t)kn->kn_sdata;
+ deadline_abs = (uint64_t)kev->data;
} else {
uint64_t calendar_deadline_ns;
- if (os_mul_overflow((uint64_t)kn->kn_sdata, multiplier, &calendar_deadline_ns))
- return (ERANGE);
+ if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) {
+ return ERANGE;
+ }
/* calendar_deadline_ns is in nanoseconds since the epoch */
* it does not change the calendar timebase.
*/
- if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
+ if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
clock_continuoustime_interval_to_deadline(interval_abs,
- &deadline_abs);
- else
+ &deadline_abs);
+ } else {
clock_absolutetime_interval_to_deadline(interval_abs,
- &deadline_abs);
+ &deadline_abs);
+ }
} else {
deadline_abs = 0; /* cause immediate expiration */
}
}
- kn->kn_ext[0] = deadline_abs;
- kn->kn_sdata = 0; /* NOTE_ABSOLUTE is non-repeating */
- } else if (kn->kn_sdata < 0) {
+ params->deadline = deadline_abs;
+ params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */
+ } else if (kev->data < 0) {
/*
* Negative interval timers fire immediately, once.
*
* We now skip the power-wasting hot spin phase and go straight to the idle phase.
*/
- kn->kn_sdata = 0; /* non-repeating */
- kn->kn_ext[0] = 0; /* expire immediately */
+ params->deadline = 0; /* expire immediately */
+ params->interval = 0; /* non-repeating */
} else {
uint64_t interval_abs = 0;
if (use_abstime) {
- interval_abs = (uint64_t)kn->kn_sdata;
+ interval_abs = (uint64_t)kev->data;
} else {
uint64_t interval_ns;
- if (os_mul_overflow((uint64_t)kn->kn_sdata, multiplier, &interval_ns))
- return (ERANGE);
+ if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) {
+ return ERANGE;
+ }
nanoseconds_to_absolutetime(interval_ns, &interval_abs);
}
uint64_t deadline = 0;
- if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
+ if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
- else
+ } else {
clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
+ }
- kn->kn_sdata = interval_abs; /* default to a repeating timer */
- kn->kn_ext[0] = deadline;
+ params->deadline = deadline;
+ params->interval = interval_abs;
}
- return (0);
+ return 0;
}
-
-
-
/*
* filt_timerexpire - the timer callout routine
- *
- * Just propagate the timer event into the knote
- * filter routine (by going through the knote
- * synchronization point). Pass a hint to
- * indicate this is a real event, not just a
- * query from above.
*/
static void
filt_timerexpire(void *knx, __unused void *spare)
{
- struct klist timer_list;
struct knote *kn = knx;
+ int v;
- filt_timerlock();
-
- kn->kn_hookid &= ~TIMER_RUNNING;
-
- /* no "object" for timers, so fake a list */
- SLIST_INIT(&timer_list);
- SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext);
-
- KNOTE(&timer_list, 1);
-
- /* if someone is waiting for timer to pop */
- if (kn->kn_hookid & TIMER_CANCELWAIT) {
+ if (os_atomic_cmpxchgv(&kn->kn_hook32, TIMER_ARMED, TIMER_FIRED,
+ &v, relaxed)) {
+ // our f_event always would say FILTER_ACTIVE,
+ // so be leaner and just do it.
struct kqueue *kq = knote_get_kq(kn);
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(&kn->kn_hook),
- THREAD_AWAKENED,
- WAITQ_ALL_PRIORITIES);
-
- kn->kn_hookid &= ~TIMER_CANCELWAIT;
+ kqlock(kq);
+ knote_activate(kq, kn, FILTER_ACTIVE);
+ kqunlock(kq);
+ } else {
+ /*
+ * From TIMER_ARMED, the only allowed transition are:
+ * - to TIMER_FIRED through the timer callout just above
+ * - to TIMER_IDLE due to filt_timercancel() which will wait for the
+ * timer callout (and any possible invocation of filt_timerexpire) to
+ * have finished before the state is changed again.
+ */
+ assert(v == TIMER_IDLE);
}
-
- filt_timerunlock();
}
-/*
- * Cancel a running timer (or wait for the pop).
- * Timer filter lock is held.
- * May drop and retake the timer filter lock.
- */
static void
filt_timercancel(struct knote *kn)
{
- filt_timer_assert_locked();
-
- assert((kn->kn_hookid & TIMER_CANCELWAIT) == 0);
-
- /* if no timer, then we're good */
- if ((kn->kn_hookid & TIMER_RUNNING) == 0)
- return;
-
- thread_call_t callout = (thread_call_t)kn->kn_hook;
-
- /* cancel the callout if we can */
- if (thread_call_cancel(callout)) {
- kn->kn_hookid &= ~TIMER_RUNNING;
- return;
+ if (os_atomic_xchg(&kn->kn_hook32, TIMER_IDLE, relaxed) == TIMER_ARMED) {
+ /* cancel the thread call and wait for any filt_timerexpire in flight */
+ thread_call_cancel_wait(kn->kn_thcall);
}
+}
- /* cancel failed, we have to wait for the in-flight expire routine */
-
- kn->kn_hookid |= TIMER_CANCELWAIT;
-
- struct kqueue *kq = knote_get_kq(kn);
-
- waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(&kn->kn_hook),
- THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
+/*
+ * Does this deadline needs a timer armed for it, or has it expired?
+ */
+static bool
+filt_timer_is_ready(struct knote *kn)
+{
+ uint64_t now, deadline = kn->kn_ext[0];
- filt_timerunlock();
- thread_block(THREAD_CONTINUE_NULL);
- filt_timerlock();
+ if (deadline == 0) {
+ return true;
+ }
- assert((kn->kn_hookid & TIMER_CANCELWAIT) == 0);
- assert((kn->kn_hookid & TIMER_RUNNING) == 0);
+ if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
+ now = mach_continuous_time();
+ } else {
+ now = mach_absolute_time();
+ }
+ return deadline <= now;
}
+/*
+ * Arm a timer
+ *
+ * It is the responsibility of the caller to make sure the timer call
+ * has completed or been cancelled properly prior to arming it.
+ */
static void
filt_timerarm(struct knote *kn)
{
- filt_timer_assert_locked();
-
- assert((kn->kn_hookid & TIMER_RUNNING) == 0);
-
- thread_call_t callout = (thread_call_t)kn->kn_hook;
-
uint64_t deadline = kn->kn_ext[0];
uint64_t leeway = kn->kn_ext[1];
int filter_flags = kn->kn_sfflags;
unsigned int timer_flags = 0;
- if (filter_flags & NOTE_CRITICAL)
+ assert(os_atomic_load(&kn->kn_hook32, relaxed) == TIMER_IDLE);
+
+ if (filter_flags & NOTE_CRITICAL) {
timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
- else if (filter_flags & NOTE_BACKGROUND)
+ } else if (filter_flags & NOTE_BACKGROUND) {
timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
- else
+ } else {
timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
+ }
- if (filter_flags & NOTE_LEEWAY)
+ if (filter_flags & NOTE_LEEWAY) {
timer_flags |= THREAD_CALL_DELAY_LEEWAY;
+ }
- if (filter_flags & NOTE_MACH_CONTINUOUS_TIME)
+ if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) {
timer_flags |= THREAD_CALL_CONTINUOUS;
+ }
- thread_call_enter_delayed_with_leeway(callout, NULL,
- deadline, leeway,
- timer_flags);
-
- kn->kn_hookid |= TIMER_RUNNING;
-}
-
-/*
- * Does this knote need a timer armed for it, or should it be ready immediately?
- */
-static boolean_t
-filt_timer_is_ready(struct knote *kn)
-{
- uint64_t now;
-
- if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
- now = mach_continuous_time();
- else
- now = mach_absolute_time();
-
- uint64_t deadline = kn->kn_ext[0];
-
- if (deadline < now)
- return TRUE;
- else
- return FALSE;
+ os_atomic_store(&kn->kn_hook32, TIMER_ARMED, relaxed);
+ thread_call_enter_delayed_with_leeway(kn->kn_thcall, NULL,
+ deadline, leeway, timer_flags);
}
/*
* Allocate a thread call for the knote's lifetime, and kick off the timer.
*/
static int
-filt_timerattach(struct knote *kn, __unused struct kevent_internal_s *kev)
+filt_timerattach(struct knote *kn, struct kevent_qos_s *kev)
{
thread_call_t callout;
+ struct filt_timer_params params;
int error;
- callout = thread_call_allocate_with_options(filt_timerexpire,
- (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
- THREAD_CALL_OPTIONS_ONCE);
-
- if (NULL == callout) {
- kn->kn_flags = EV_ERROR;
- kn->kn_data = ENOMEM;
+ if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
+ knote_set_error(kn, error);
return 0;
}
- filt_timerlock();
-
- if ((error = filt_timervalidate(kn)) != 0) {
- kn->kn_flags = EV_ERROR;
- kn->kn_data = error;
- filt_timerunlock();
+ callout = thread_call_allocate_with_options(filt_timerexpire,
+ (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
+ THREAD_CALL_OPTIONS_ONCE);
- __assert_only boolean_t freed = thread_call_free(callout);
- assert(freed);
+ if (NULL == callout) {
+ knote_set_error(kn, ENOMEM);
return 0;
}
- kn->kn_hook = (void*)callout;
- kn->kn_hookid = 0;
+ filt_timer_set_params(kn, ¶ms);
+ kn->kn_thcall = callout;
kn->kn_flags |= EV_CLEAR;
+ os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
/* NOTE_ABSOLUTE implies EV_ONESHOT */
- if (kn->kn_sfflags & NOTE_ABSOLUTE)
+ if (kn->kn_sfflags & NOTE_ABSOLUTE) {
kn->kn_flags |= EV_ONESHOT;
+ }
- boolean_t timer_ready = FALSE;
-
- if ((timer_ready = filt_timer_is_ready(kn))) {
- /* cause immediate expiration */
- kn->kn_data = 1;
+ if (filt_timer_is_ready(kn)) {
+ os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
+ return FILTER_ACTIVE;
} else {
filt_timerarm(kn);
+ return 0;
}
-
- filt_timerunlock();
-
- return timer_ready;
}
/*
static void
filt_timerdetach(struct knote *kn)
{
- thread_call_t callout;
-
- filt_timerlock();
-
- callout = (thread_call_t)kn->kn_hook;
- filt_timercancel(kn);
-
- filt_timerunlock();
+ __assert_only boolean_t freed;
- __assert_only boolean_t freed = thread_call_free(callout);
+ /*
+ * Unconditionally cancel to make sure there can't be any filt_timerexpire()
+ * running anymore.
+ */
+ thread_call_cancel_wait(kn->kn_thcall);
+ freed = thread_call_free(kn->kn_thcall);
assert(freed);
}
-/*
- * filt_timerevent - post events to a timer knote
- *
- * Called in the context of filt_timerexpire with
- * the filt_timerlock held
- */
-static int
-filt_timerevent(struct knote *kn, __unused long hint)
-{
- filt_timer_assert_locked();
-
- kn->kn_data = 1;
- return (1);
-}
-
/*
* filt_timertouch - update timer knote with new user input
*
* pops have gone off (in kn_data).
*/
static int
-filt_timertouch(
- struct knote *kn,
- struct kevent_internal_s *kev)
+filt_timertouch(struct knote *kn, struct kevent_qos_s *kev)
{
+ struct filt_timer_params params;
+ uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
int error;
- filt_timerlock();
-
- /*
- * cancel current call - drops and retakes lock
- * TODO: not safe against concurrent touches?
- */
- filt_timercancel(kn);
+ if (changed_flags & NOTE_ABSOLUTE) {
+ kev->flags |= EV_ERROR;
+ kev->data = EINVAL;
+ return 0;
+ }
- /* clear if the timer had previously fired, the user no longer wants to see it */
- kn->kn_data = 0;
+ if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
+ kev->flags |= EV_ERROR;
+ kev->data = error;
+ return 0;
+ }
/* capture the new values used to compute deadline */
- kn->kn_sdata = kev->data;
+ filt_timercancel(kn);
+ filt_timer_set_params(kn, ¶ms);
kn->kn_sfflags = kev->fflags;
- kn->kn_ext[0] = kev->ext[0];
- kn->kn_ext[1] = kev->ext[1];
-
- if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
- kn->kn_udata = kev->udata;
-
- /* recalculate deadline */
- error = filt_timervalidate(kn);
- if (error) {
- /* no way to report error, so mark it in the knote */
- kn->kn_flags |= EV_ERROR;
- kn->kn_data = error;
- filt_timerunlock();
- return 1;
- }
-
- boolean_t timer_ready = FALSE;
- if ((timer_ready = filt_timer_is_ready(kn))) {
- /* cause immediate expiration */
- kn->kn_data = 1;
+ if (filt_timer_is_ready(kn)) {
+ os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
+ return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
} else {
filt_timerarm(kn);
+ return FILTER_UPDATE_REQ_QOS;
}
-
- filt_timerunlock();
-
- return timer_ready;
}
/*
* counters for the next time.
*/
static int
-filt_timerprocess(
- struct knote *kn,
- __unused struct filt_process_s *data,
- struct kevent_internal_s *kev)
+filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev)
{
- filt_timerlock();
-
- if (kn->kn_data == 0 || (kn->kn_hookid & TIMER_CANCELWAIT)) {
+ /*
+ * filt_timerprocess is serialized with any filter routine except for
+ * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
+ * transition, and on success, activates the knote.
+ *
+ * Hence, we don't need atomic modifications of the state, only to peek at
+ * whether we see any of the "FIRED" state, and if we do, it is safe to
+ * do simple state machine transitions.
+ */
+ switch (os_atomic_load(&kn->kn_hook32, relaxed)) {
+ case TIMER_IDLE:
+ case TIMER_ARMED:
/*
- * kn_data = 0:
- * The timer hasn't yet fired, so there's nothing to deliver
- * TIMER_CANCELWAIT:
- * touch is in the middle of canceling the timer,
- * so don't deliver or re-arm anything
- *
* This can happen if a touch resets a timer that had fired
* without being processed
*/
- filt_timerunlock();
return 0;
}
- if (kn->kn_sdata != 0 && ((kn->kn_flags & EV_ERROR) == 0)) {
+ os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
+
+ /*
+ * Copy out the interesting kevent state,
+ * but don't leak out the raw time calculations.
+ *
+ * TODO: potential enhancements - tell the user about:
+ * - deadline to which this timer thought it was expiring
+ * - return kn_sfflags in the fflags field so the client can know
+ * under what flags the timer fired
+ */
+ knote_fill_kevent(kn, kev, 1);
+ kev->ext[0] = 0;
+ /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
+
+ if (kn->kn_sdata != 0) {
/*
* This is a 'repeating' timer, so we have to emit
* how many intervals expired between the arm
* this could easily be done in the client...
*/
- /* The timer better have had expired... */
- assert((kn->kn_hookid & TIMER_RUNNING) == 0);
-
uint64_t now;
- if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
+ if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
now = mach_continuous_time();
- else
+ } else {
now = mach_absolute_time();
+ }
uint64_t first_deadline = kn->kn_ext[0];
uint64_t interval_abs = kn->kn_sdata;
* and be in repeating mode, so therefore it must have been
* more than 'interval' time since the attach or last
* successful touch.
- *
- * An unsuccessful touch would:
- * disarm the timer
- * clear kn_data
- * clear kn_sdata
- * set EV_ERROR
- * all of which will prevent this code from running.
*/
assert(num_fired > 0);
/* report how many intervals have elapsed to the user */
- kn->kn_data = (int64_t) num_fired;
+ kev->data = (int64_t)num_fired;
/* We only need to re-arm the timer if it's not about to be destroyed */
if ((kn->kn_flags & EV_ONESHOT) == 0) {
kn->kn_ext[0] = new_deadline;
+ /*
+ * This can't shortcut setting up the thread call, because
+ * knote_process deactivates EV_CLEAR knotes unconditionnally.
+ */
filt_timerarm(kn);
}
}
- /*
- * Copy out the interesting kevent state,
- * but don't leak out the raw time calculations.
- *
- * TODO: potential enhancements - tell the user about:
- * - deadline to which this timer thought it was expiring
- * - return kn_sfflags in the fflags field so the client can know
- * under what flags the timer fired
- */
- *kev = kn->kn_kevent;
- kev->ext[0] = 0;
- /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
-
- /* we have delivered the event, reset the timer pop count */
- kn->kn_data = 0;
-
- filt_timerunlock();
- return 1;
+ return FILTER_ACTIVE;
}
SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
+ .f_extended_codes = true,
.f_attach = filt_timerattach,
.f_detach = filt_timerdetach,
- .f_event = filt_timerevent,
+ .f_event = filt_bad_event,
.f_touch = filt_timertouch,
.f_process = filt_timerprocess,
};
-
-#pragma mark EVFILT_USER
-
-
-static void
-filt_userlock(void)
-{
- lck_spin_lock(&_filt_userlock);
-}
-
-static void
-filt_userunlock(void)
-{
- lck_spin_unlock(&_filt_userlock);
-}
+#pragma mark user_filtops
static int
-filt_userattach(struct knote *kn, __unused struct kevent_internal_s *kev)
+filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev)
{
- /* EVFILT_USER knotes are not attached to anything in the kernel */
- /* Cant discover this knote until after attach - so no lock needed */
- kn->kn_hook = NULL;
if (kn->kn_sfflags & NOTE_TRIGGER) {
- kn->kn_hookid = 1;
+ kn->kn_hook32 = FILTER_ACTIVE;
} else {
- kn->kn_hookid = 0;
+ kn->kn_hook32 = 0;
}
- return (kn->kn_hookid);
-}
-
-static void
-filt_userdetach(__unused struct knote *kn)
-{
- /* EVFILT_USER knotes are not attached to anything in the kernel */
-}
-
-static int
-filt_user(
- __unused struct knote *kn,
- __unused long hint)
-{
- panic("filt_user");
- return 0;
+ return kn->kn_hook32;
}
static int
-filt_usertouch(
- struct knote *kn,
- struct kevent_internal_s *kev)
+filt_usertouch(struct knote *kn, struct kevent_qos_s *kev)
{
uint32_t ffctrl;
int fflags;
- int active;
-
- filt_userlock();
ffctrl = kev->fflags & NOTE_FFCTRLMASK;
fflags = kev->fflags & NOTE_FFLAGSMASK;
}
kn->kn_sdata = kev->data;
- if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
- kn->kn_udata = kev->udata;
-
if (kev->fflags & NOTE_TRIGGER) {
- kn->kn_hookid = 1;
+ kn->kn_hook32 = FILTER_ACTIVE;
}
- active = kn->kn_hookid;
-
- filt_userunlock();
-
- return (active);
+ return (int)kn->kn_hook32;
}
static int
-filt_userprocess(
- struct knote *kn,
- __unused struct filt_process_s *data,
- struct kevent_internal_s *kev)
+filt_userprocess(struct knote *kn, struct kevent_qos_s *kev)
{
- filt_userlock();
+ int result = (int)kn->kn_hook32;
- if (kn->kn_hookid == 0) {
- filt_userunlock();
- return 0;
- }
-
- *kev = kn->kn_kevent;
- kev->fflags = (volatile UInt32)kn->kn_sfflags;
- kev->data = kn->kn_sdata;
- if (kn->kn_flags & EV_CLEAR) {
- kn->kn_hookid = 0;
- kn->kn_data = 0;
- kn->kn_fflags = 0;
+ if (result) {
+ /* EVFILT_USER returns the data that was passed in */
+ knote_fill_kevent_with_sdata(kn, kev);
+ kev->fflags = kn->kn_sfflags;
+ if (kn->kn_flags & EV_CLEAR) {
+ /* knote_fill_kevent cleared kn_fflags */
+ kn->kn_hook32 = 0;
+ }
}
- filt_userunlock();
- return 1;
+ return result;
}
-#pragma mark EVFILT_WORKLOOP
+SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
+ .f_extended_codes = true,
+ .f_attach = filt_userattach,
+ .f_detach = filt_no_detach,
+ .f_event = filt_bad_event,
+ .f_touch = filt_usertouch,
+ .f_process = filt_userprocess,
+};
+
+#pragma mark workloop_filtops
-#if DEBUG || DEVELOPMENT
-/*
- * see src/queue_internal.h in libdispatch
- */
-#define DISPATCH_QUEUE_ENQUEUED 0x1ull
-#endif
+#define EPREEMPTDISABLED (-1)
static inline void
filt_wllock(struct kqworkloop *kqwl)
{
- lck_mtx_lock(&kqwl->kqwl_statelock);
+ lck_spin_lock(&kqwl->kqwl_statelock);
}
static inline void
filt_wlunlock(struct kqworkloop *kqwl)
{
- lck_mtx_unlock(&kqwl->kqwl_statelock);
+ lck_spin_unlock(&kqwl->kqwl_statelock);
}
-static inline void
-filt_wlheld(__assert_only struct kqworkloop *kqwl)
-{
- LCK_MTX_ASSERT(&kqwl->kqwl_statelock, LCK_MTX_ASSERT_OWNED);
-}
-
-#define WL_OWNER_SUSPENDED ((thread_t)(~0ull)) /* special owner when suspended */
-
+/*
+ * Returns true when the interlock for the turnstile is the workqueue lock
+ *
+ * When this is the case, all turnstiles operations are delegated
+ * to the workqueue subsystem.
+ *
+ * This is required because kqueue_threadreq_bind_prepost only holds the
+ * workqueue lock but needs to move the inheritor from the workloop turnstile
+ * away from the creator thread, so that this now fulfilled request cannot be
+ * picked anymore by other threads.
+ */
static inline bool
-filt_wlowner_is_valid(thread_t owner)
+filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
{
- return owner != THREAD_NULL && owner != WL_OWNER_SUSPENDED;
+ return kqr_thread_requested_pending(&kqwl->kqwl_request);
}
-static inline bool
-filt_wlshould_end_ownership(struct kqworkloop *kqwl,
- struct kevent_internal_s *kev, int error)
+static void
+filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
+ turnstile_update_flags_t flags)
{
- thread_t owner = kqwl->kqwl_owner;
- return (error == 0 || error == ESTALE) &&
- (kev->fflags & NOTE_WL_END_OWNERSHIP) &&
- (owner == current_thread() || owner == WL_OWNER_SUSPENDED);
-}
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
-static inline bool
-filt_wlshould_update_ownership(struct kevent_internal_s *kev, int error)
-{
- return error == 0 && (kev->fflags & NOTE_WL_DISCOVER_OWNER) &&
- kev->ext[EV_EXTIDX_WL_ADDR];
-}
+ /*
+ * binding to the workq should always happen through
+ * workq_kern_threadreq_update_inheritor()
+ */
+ assert(!filt_wlturnstile_interlock_is_workq(kqwl));
-static inline bool
-filt_wlshould_set_async_qos(struct kevent_internal_s *kev, int error,
- kq_index_t async_qos)
-{
- if (error != 0) {
- return false;
- }
- if (async_qos != THREAD_QOS_UNSPECIFIED) {
- return true;
- }
- if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
- /* see filt_wlprocess() */
- return true;
+ if ((inheritor = kqwl->kqwl_owner)) {
+ flags |= TURNSTILE_INHERITOR_THREAD;
+ } else if ((inheritor = kqr_thread(kqr))) {
+ flags |= TURNSTILE_INHERITOR_THREAD;
}
- return false;
+
+ turnstile_update_inheritor(ts, inheritor, flags);
}
+#define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
+#define FILT_WLATTACH 0
+#define FILT_WLTOUCH 1
+#define FILT_WLDROP 2
+
__result_use_check
static int
-filt_wlupdateowner(struct kqworkloop *kqwl, struct kevent_internal_s *kev,
- int error, kq_index_t async_qos)
+filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
+ struct kevent_qos_s *kev, kq_index_t qos_index, int op)
{
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
kq_index_t cur_override = THREAD_QOS_UNSPECIFIED;
- kq_index_t old_owner_override = THREAD_QOS_UNSPECIFIED;
- boolean_t ipc_override_is_sync = false;
- boolean_t old_owner_override_is_sync = false;
- int action = KQWL_UTQ_NONE;
+ int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
+ int action = KQWL_UTQ_NONE, error = 0;
+ bool wl_inheritor_updated = false, needs_wake = false;
+ uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
+ uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
+ uint64_t udata = 0;
+ struct turnstile *ts = TURNSTILE_NULL;
- filt_wlheld(kqwl);
+ filt_wllock(kqwl);
+
+again:
+ new_owner = cur_owner = kqwl->kqwl_owner;
/*
- * The owner is only changed under both the filt_wllock and the
- * kqwl_req_lock. Looking at it with either one held is fine.
+ * Phase 1:
+ *
+ * If asked, load the uint64 value at the user provided address and compare
+ * it against the passed in mask and expected value.
+ *
+ * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
+ * a thread reference.
+ *
+ * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
+ * the current thread, then end ownership.
+ *
+ * Lastly decide whether we need to perform a QoS update.
*/
- cur_owner = kqwl->kqwl_owner;
- if (filt_wlshould_end_ownership(kqwl, kev, error)) {
- new_owner = THREAD_NULL;
- } else if (filt_wlshould_update_ownership(kev, error)) {
+ if (uaddr) {
/*
- * Decipher the owner port name, and translate accordingly.
- * The low 2 bits were borrowed for other flags, so mask them off.
+ * Until <rdar://problem/24999882> exists,
+ * disabling preemption copyin forces any
+ * vm_fault we encounter to fail.
*/
- uint64_t udata = kev->ext[EV_EXTIDX_WL_VALUE];
- mach_port_name_t new_owner_name = (mach_port_name_t)udata & ~0x3;
- if (new_owner_name != MACH_PORT_NULL) {
- new_owner_name = ipc_entry_name_mask(new_owner_name);
- }
-
- if (MACH_PORT_VALID(new_owner_name)) {
- new_owner = port_name_to_thread(new_owner_name);
- if (new_owner == THREAD_NULL)
- return EOWNERDEAD;
- extra_thread_ref = new_owner;
- } else if (new_owner_name == MACH_PORT_DEAD) {
- new_owner = WL_OWNER_SUSPENDED;
- } else {
+ error = copyin_atomic64(uaddr, &udata);
+
+ /*
+ * If we get EFAULT, drop locks, and retry.
+ * If we still get an error report it,
+ * else assume the memory has been faulted
+ * and attempt to copyin under lock again.
+ */
+ switch (error) {
+ case 0:
+ break;
+ case EFAULT:
+ if (efault_retry-- > 0) {
+ filt_wlunlock(kqwl);
+ error = copyin_atomic64(uaddr, &udata);
+ filt_wllock(kqwl);
+ if (error == 0) {
+ goto again;
+ }
+ }
+ /* FALLTHROUGH */
+ default:
+ goto out;
+ }
+
+ /* Update state as copied in. */
+ kev->ext[EV_EXTIDX_WL_VALUE] = udata;
+
+ if ((udata & mask) != (kdata & mask)) {
+ error = ESTALE;
+ } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
/*
- * We never want to learn a new owner that is NULL.
- * Ownership should be ended with END_OWNERSHIP.
+ * Decipher the owner port name, and translate accordingly.
+ * The low 2 bits were borrowed for other flags, so mask them off.
+ *
+ * Then attempt translation to a thread reference or fail.
*/
- new_owner = cur_owner;
+ mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
+ if (name != MACH_PORT_NULL) {
+ name = ipc_entry_name_mask(name);
+ extra_thread_ref = port_name_to_thread(name,
+ PORT_TO_THREAD_IN_CURRENT_TASK);
+ if (extra_thread_ref == THREAD_NULL) {
+ error = EOWNERDEAD;
+ goto out;
+ }
+ new_owner = extra_thread_ref;
+ }
}
- } else {
- new_owner = cur_owner;
}
- if (filt_wlshould_set_async_qos(kev, error, async_qos)) {
- action = KQWL_UTQ_SET_ASYNC_QOS;
+ if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
+ new_owner = THREAD_NULL;
+ }
+
+ if (error == 0) {
+ if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
+ action = KQWL_UTQ_SET_QOS_INDEX;
+ } else if (qos_index && kqr->tr_kq_qos_index != qos_index) {
+ action = KQWL_UTQ_SET_QOS_INDEX;
+ }
+
+ if (op == FILT_WLTOUCH) {
+ /*
+ * Save off any additional fflags/data we just accepted
+ * But only keep the last round of "update" bits we acted on which helps
+ * debugging a lot.
+ */
+ kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
+ kn->kn_sfflags |= kev->fflags;
+ if (kev->fflags & NOTE_WL_SYNC_WAKE) {
+ needs_wake = (kn->kn_thread != THREAD_NULL);
+ }
+ } else if (op == FILT_WLDROP) {
+ if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
+ NOTE_WL_SYNC_WAIT) {
+ /*
+ * When deleting a SYNC_WAIT knote that hasn't been woken up
+ * explicitly, issue a wake up.
+ */
+ kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
+ needs_wake = (kn->kn_thread != THREAD_NULL);
+ }
+ }
}
- if (cur_owner == new_owner && action == KQWL_UTQ_NONE) {
+
+ /*
+ * Phase 2:
+ *
+ * Commit ownership and QoS changes if any, possibly wake up waiters
+ */
+
+ if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
goto out;
}
- kqwl_req_lock(kqwl);
+ kqlock(kqwl);
/* If already tracked as servicer, don't track as owner */
- if ((kqr->kqr_state & KQR_BOUND) && new_owner == kqr->kqr_thread) {
- kqwl->kqwl_owner = new_owner = THREAD_NULL;
+ if (new_owner == kqr_thread(kqr)) {
+ new_owner = THREAD_NULL;
}
if (cur_owner != new_owner) {
/* we just transfered this ref to kqwl_owner */
extra_thread_ref = THREAD_NULL;
}
- cur_override = kqworkloop_combined_qos(kqwl, &ipc_override_is_sync);
- old_owner_override = kqr->kqr_dsync_owner_qos;
- old_owner_override_is_sync = kqr->kqr_owner_override_is_sync;
+ cur_override = kqworkloop_override(kqwl);
- if (filt_wlowner_is_valid(new_owner)) {
+ if (new_owner) {
/* override it before we drop the old */
if (cur_override != THREAD_QOS_UNSPECIFIED) {
- thread_add_ipc_override(new_owner, cur_override);
+ thread_add_kevent_override(new_owner, cur_override);
}
- if (ipc_override_is_sync) {
- thread_add_sync_ipc_override(new_owner);
- }
- /* Update the kqr to indicate that owner has sync ipc override */
- kqr->kqr_dsync_owner_qos = cur_override;
- kqr->kqr_owner_override_is_sync = ipc_override_is_sync;
- thread_starts_owning_workloop(new_owner);
- if ((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED) {
+ if (kqr_thread_requested_pending(kqr)) {
if (action == KQWL_UTQ_NONE) {
action = KQWL_UTQ_REDRIVE_EVENTS;
}
}
- } else if (new_owner == THREAD_NULL) {
- kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED;
- kqr->kqr_owner_override_is_sync = false;
- if ((kqr->kqr_state & (KQR_THREQUESTED | KQR_WAKEUP)) == KQR_WAKEUP) {
+ } else {
+ if (!kqr_thread_requested(kqr) && kqr->tr_kq_wakeup) {
if (action == KQWL_UTQ_NONE) {
action = KQWL_UTQ_REDRIVE_EVENTS;
}
}
if (action != KQWL_UTQ_NONE) {
- kqworkloop_update_threads_qos(kqwl, action, async_qos);
+ kqworkloop_update_threads_qos(kqwl, action, qos_index);
}
- kqwl_req_unlock(kqwl);
-
- /* Now that we are unlocked, drop the override and ref on old owner */
- if (new_owner != cur_owner && filt_wlowner_is_valid(cur_owner)) {
- if (old_owner_override != THREAD_QOS_UNSPECIFIED) {
- thread_drop_ipc_override(cur_owner);
+ ts = kqwl->kqwl_turnstile;
+ if (cur_owner != new_owner && ts) {
+ if (action == KQWL_UTQ_REDRIVE_EVENTS) {
+ /*
+ * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
+ * the code went through workq_kern_threadreq_initiate()
+ * and the workqueue has set the inheritor already
+ */
+ assert(filt_wlturnstile_interlock_is_workq(kqwl));
+ } else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
+ workq_kern_threadreq_lock(kqwl->kqwl_p);
+ workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
+ ts, TURNSTILE_IMMEDIATE_UPDATE);
+ workq_kern_threadreq_unlock(kqwl->kqwl_p);
+ if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
+ /*
+ * If the workq is no longer the interlock, then
+ * workq_kern_threadreq_update_inheritor() has finished a bind
+ * and we need to fallback to the regular path.
+ */
+ filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
+ }
+ wl_inheritor_updated = true;
+ } else {
+ filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
+ wl_inheritor_updated = true;
}
- if (old_owner_override_is_sync) {
- thread_drop_sync_ipc_override(cur_owner);
+
+ /*
+ * We need a turnstile reference because we are dropping the interlock
+ * and the caller has not called turnstile_prepare.
+ */
+ if (wl_inheritor_updated) {
+ turnstile_reference(ts);
}
- thread_ends_owning_workloop(cur_owner);
- thread_deallocate(cur_owner);
}
-out:
- if (extra_thread_ref) {
- thread_deallocate(extra_thread_ref);
+ if (needs_wake && ts) {
+ waitq_wakeup64_thread(&ts->ts_waitq, knote_filt_wev64(kn),
+ kn->kn_thread, THREAD_AWAKENED);
+ if (op == FILT_WLATTACH || op == FILT_WLTOUCH) {
+ disable_preemption();
+ error = EPREEMPTDISABLED;
+ }
}
- return error;
-}
-static int
-filt_wldebounce(
- struct kqworkloop *kqwl,
- struct kevent_internal_s *kev,
- int default_result)
-{
- user_addr_t addr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
- uint64_t udata;
- int error;
+ kqunlock(kqwl);
- /* we must have the workloop state mutex held */
- filt_wlheld(kqwl);
+out:
+ /*
+ * Phase 3:
+ *
+ * Unlock and cleanup various lingering references and things.
+ */
+ filt_wlunlock(kqwl);
- /* Do we have a debounce address to work with? */
- if (addr) {
- uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
- uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
+#if CONFIG_WORKLOOP_DEBUG
+ KQWL_HISTORY_WRITE_ENTRY(kqwl, {
+ .updater = current_thread(),
+ .servicer = kqr_thread(kqr), /* Note: racy */
+ .old_owner = cur_owner,
+ .new_owner = new_owner,
- error = copyin_word(addr, &udata, sizeof(udata));
- if (error) {
- return error;
- }
+ .kev_ident = kev->ident,
+ .error = (int16_t)error,
+ .kev_flags = kev->flags,
+ .kev_fflags = kev->fflags,
- /* update state as copied in */
- kev->ext[EV_EXTIDX_WL_VALUE] = udata;
+ .kev_mask = mask,
+ .kev_value = kdata,
+ .in_value = udata,
+ });
+#endif // CONFIG_WORKLOOP_DEBUG
- /* If the masked bits don't match, reject it as stale */
- if ((udata & mask) != (kdata & mask)) {
- return ESTALE;
- }
+ if (wl_inheritor_updated) {
+ turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
+ turnstile_deallocate_safe(ts);
+ }
-#if DEBUG || DEVELOPMENT
- if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && !(kev->flags & EV_DELETE)) {
- if ((udata & DISPATCH_QUEUE_ENQUEUED) == 0 &&
- (udata >> 48) != 0 && (udata >> 48) != 0xffff) {
- panic("kevent: workloop %#016llx is not enqueued "
- "(kev:%p dq_state:%#016llx)", kev->udata, kev, udata);
- }
+ if (cur_owner && new_owner != cur_owner) {
+ if (cur_override != THREAD_QOS_UNSPECIFIED) {
+ thread_drop_kevent_override(cur_owner);
}
-#endif
+ thread_deallocate_safe(cur_owner);
}
-
- return default_result;
+ if (extra_thread_ref) {
+ thread_deallocate_safe(extra_thread_ref);
+ }
+ return error;
}
/*
* - data is set to the error if any
*/
static inline void
-filt_wlremember_last_update(
- __assert_only struct kqworkloop *kqwl,
- struct knote *kn,
- struct kevent_internal_s *kev,
- int error)
+filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev,
+ int error)
{
- filt_wlheld(kqwl);
kn->kn_fflags = kev->fflags;
- kn->kn_data = error;
+ kn->kn_sdata = error;
memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
}
-/*
- * Return which operations on EVFILT_WORKLOOP need to be protected against
- * knoteusewait() causing priority inversions.
- */
-static bool
-filt_wlneeds_boost(struct kevent_internal_s *kev)
-{
- if (kev == NULL) {
- /*
- * this is an f_process() usecount, and it can cause a drop to wait
- */
- return true;
+static int
+filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn,
+ struct kevent_qos_s *kev, int op)
+{
+ uint64_t uaddr = kev->ext[EV_EXTIDX_WL_ADDR];
+ uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
+ uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
+ uint64_t udata = 0;
+ int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
+ int error = 0;
+
+ if (op == FILT_WLATTACH) {
+ (void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue);
+ } else if (uaddr == 0) {
+ return 0;
}
- if (kev->fflags & NOTE_WL_THREAD_REQUEST) {
+
+ filt_wllock(kqwl);
+
+again:
+
+ /*
+ * Do the debounce thing, the lock serializing the state is the knote lock.
+ */
+ if (uaddr) {
/*
- * All operations on thread requests may starve drops or re-attach of
- * the same knote, all of them need boosts. None of what we do under
- * thread-request usecount holds blocks anyway.
+ * Until <rdar://problem/24999882> exists,
+ * disabling preemption copyin forces any
+ * vm_fault we encounter to fail.
*/
- return true;
- }
- if (kev->fflags & NOTE_WL_SYNC_WAIT) {
+ error = copyin_atomic64(uaddr, &udata);
+
/*
- * this may call filt_wlwait() and we don't want to hold any boost when
- * woken up, this would cause background threads contending on
- * dispatch_sync() to wake up at 64 and be preempted immediately when
- * this drops.
+ * If we get EFAULT, drop locks, and retry.
+ * If we still get an error report it,
+ * else assume the memory has been faulted
+ * and attempt to copyin under lock again.
*/
- return false;
- }
+ switch (error) {
+ case 0:
+ break;
+ case EFAULT:
+ if (efault_retry-- > 0) {
+ filt_wlunlock(kqwl);
+ error = copyin_atomic64(uaddr, &udata);
+ filt_wllock(kqwl);
+ if (error == 0) {
+ goto again;
+ }
+ }
+ /* FALLTHROUGH */
+ default:
+ goto out;
+ }
- /*
- * SYNC_WAIT knotes when deleted don't need to be rushed, there's no
- * detach/reattach race with these ever. In addition to this, when the
- * SYNC_WAIT knote is dropped, the caller is no longer receiving the
- * workloop overrides if any, and we'd rather schedule other threads than
- * him, he's not possibly stalling anything anymore.
- */
- return (kev->flags & EV_DELETE) == 0;
-}
+ kev->ext[EV_EXTIDX_WL_VALUE] = udata;
+ kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata;
-static int
-filt_wlattach(struct knote *kn, struct kevent_internal_s *kev)
+ if ((udata & mask) != (kdata & mask)) {
+ error = ESTALE;
+ goto out;
+ }
+ }
+
+ if (op == FILT_WLATTACH) {
+ error = filt_wlattach_sync_ipc(kn);
+ if (error == 0) {
+ disable_preemption();
+ error = EPREEMPTDISABLED;
+ }
+ }
+
+out:
+ filt_wlunlock(kqwl);
+ return error;
+}
+
+static int
+filt_wlattach(struct knote *kn, struct kevent_qos_s *kev)
{
struct kqueue *kq = knote_get_kq(kn);
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- int error = 0;
+ int error = 0, result = 0;
kq_index_t qos_index = 0;
- if ((kq->kq_state & KQ_WORKLOOP) == 0) {
+ if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) {
error = ENOTSUP;
goto out;
}
-#if DEVELOPMENT || DEBUG
- if (kev->ident == 0 && kev->udata == 0 && kev->fflags == 0) {
- struct kqrequest *kqr = &kqwl->kqwl_request;
-
- kqwl_req_lock(kqwl);
- kev->fflags = 0;
- if (kqr->kqr_dsync_waiters) {
- kev->fflags |= NOTE_WL_SYNC_WAIT;
- }
- if (kqr->kqr_qos_index) {
- kev->fflags |= NOTE_WL_THREAD_REQUEST;
- }
- if (kqwl->kqwl_owner == WL_OWNER_SUSPENDED) {
- kev->ext[0] = ~0ull;
- } else {
- kev->ext[0] = thread_tid(kqwl->kqwl_owner);
- }
- kev->ext[1] = thread_tid(kqwl->kqwl_request.kqr_thread);
- kev->ext[2] = thread_owned_workloops_count(current_thread());
- kev->ext[3] = kn->kn_kevent.ext[3];
- kqwl_req_unlock(kqwl);
- error = EBUSY;
- goto out;
- }
-#endif
-
- /* Some simple validation */
- int command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
+ uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
switch (command) {
case NOTE_WL_THREAD_REQUEST:
if (kn->kn_id != kqwl->kqwl_dynamicid) {
error = EINVAL;
goto out;
}
- qos_index = qos_index_from_qos(kn, kn->kn_qos, FALSE);
- if (qos_index < THREAD_QOS_MAINTENANCE ||
- qos_index > THREAD_QOS_USER_INTERACTIVE) {
+ qos_index = _pthread_priority_thread_qos(kn->kn_qos);
+ if (qos_index == THREAD_QOS_UNSPECIFIED) {
error = ERANGE;
goto out;
}
+ if (kqwl->kqwl_request.tr_kq_qos_index) {
+ /*
+ * There already is a thread request, and well, you're only allowed
+ * one per workloop, so fail the attach.
+ */
+ error = EALREADY;
+ goto out;
+ }
break;
case NOTE_WL_SYNC_WAIT:
case NOTE_WL_SYNC_WAKE:
- if (kq->kq_state & KQ_NO_WQ_THREAD) {
- error = ENOTSUP;
- goto out;
- }
if (kn->kn_id == kqwl->kqwl_dynamicid) {
error = EINVAL;
goto out;
goto out;
}
break;
+
+ case NOTE_WL_SYNC_IPC:
+ if ((kn->kn_flags & EV_DISABLE) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) {
+ error = EINVAL;
+ goto out;
+ }
+ break;
default:
error = EINVAL;
goto out;
}
- filt_wllock(kqwl);
- kn->kn_hook = NULL;
-
- if (command == NOTE_WL_THREAD_REQUEST && kqwl->kqwl_request.kqr_qos_index) {
- /*
- * There already is a thread request, and well, you're only allowed
- * one per workloop, so fail the attach.
- *
- * Note: kqr_qos_index is always set with the wllock held, so we
- * don't need to take the kqr lock.
- */
- error = EALREADY;
+ if (command == NOTE_WL_SYNC_IPC) {
+ error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH);
} else {
- /* Make sure user and kernel are in agreement on important state */
- error = filt_wldebounce(kqwl, kev, 0);
+ error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
}
- error = filt_wlupdateowner(kqwl, kev, error, qos_index);
- filt_wlunlock(kqwl);
+ if (error == EPREEMPTDISABLED) {
+ error = 0;
+ result = FILTER_THREADREQ_NODEFEER;
+ }
out:
if (error) {
- kn->kn_flags |= EV_ERROR;
/* If userland wants ESTALE to be hidden, fail the attach anyway */
if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
error = 0;
}
- kn->kn_data = error;
- return 0;
+ knote_set_error(kn, error);
+ return result;
+ }
+ if (command == NOTE_WL_SYNC_WAIT) {
+ return kevent_register_wait_prepare(kn, kev, result);
}
-
/* Just attaching the thread request successfully will fire it */
- return command == NOTE_WL_THREAD_REQUEST;
+ if (command == NOTE_WL_THREAD_REQUEST) {
+ /*
+ * Thread Request knotes need an explicit touch to be active again,
+ * so delivering an event needs to also consume it.
+ */
+ kn->kn_flags |= EV_CLEAR;
+ return result | FILTER_ACTIVE;
+ }
+ return result;
}
-__attribute__((noinline,not_tail_called))
-static int
-filt_wlwait(struct kqworkloop *kqwl,
- struct knote *kn,
- struct kevent_internal_s *kev)
+static void __dead2
+filt_wlwait_continue(void *parameter, wait_result_t wr)
{
- filt_wlheld(kqwl);
- assert((kn->kn_sfflags & NOTE_WL_SYNC_WAKE) == 0);
+ struct _kevent_register *cont_args = parameter;
+ struct kqworkloop *kqwl = cont_args->kqwl;
- /*
- * Hint to the wakeup side that this thread is waiting. Also used by
- * stackshot for waitinfo.
- */
- kn->kn_hook = current_thread();
-
- thread_set_pending_block_hint(current_thread(), kThreadWaitWorkloopSyncWait);
+ kqlock(kqwl);
+ if (filt_wlturnstile_interlock_is_workq(kqwl)) {
+ workq_kern_threadreq_lock(kqwl->kqwl_p);
+ turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
+ workq_kern_threadreq_unlock(kqwl->kqwl_p);
+ } else {
+ turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
+ }
+ kqunlock(kqwl);
- wait_result_t wr = assert_wait(kn, THREAD_ABORTSAFE);
+ turnstile_cleanup();
- if (wr == THREAD_WAITING) {
- kq_index_t qos_index = qos_index_from_qos(kn, kev->qos, TRUE);
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ if (wr == THREAD_INTERRUPTED) {
+ cont_args->kev.flags |= EV_ERROR;
+ cont_args->kev.data = EINTR;
+ } else if (wr != THREAD_AWAKENED) {
+ panic("Unexpected wait result: %d", wr);
+ }
- thread_t thread_to_handoff = THREAD_NULL; /* holds +1 thread ref */
+ kevent_register_wait_return(cont_args);
+}
- thread_t kqwl_owner = kqwl->kqwl_owner;
- if (filt_wlowner_is_valid(kqwl_owner)) {
- thread_reference(kqwl_owner);
- thread_to_handoff = kqwl_owner;
- }
+/*
+ * Called with the workloop mutex held, most of the time never returns as it
+ * calls filt_wlwait_continue through a continuation.
+ */
+static void __dead2
+filt_wlpost_register_wait(struct uthread *uth, struct knote *kn,
+ struct _kevent_register *cont_args)
+{
+ struct kqworkloop *kqwl = cont_args->kqwl;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
+ struct turnstile *ts;
+ bool workq_locked = false;
- kqwl_req_lock(kqwl);
+ kqlock_held(kqwl);
- if (qos_index) {
- assert(kqr->kqr_dsync_waiters < UINT16_MAX);
- kqr->kqr_dsync_waiters++;
- if (qos_index > kqr->kqr_dsync_waiters_qos) {
- kqworkloop_update_threads_qos(kqwl,
- KQWL_UTQ_SET_SYNC_WAITERS_QOS, qos_index);
- }
- }
+ if (filt_wlturnstile_interlock_is_workq(kqwl)) {
+ workq_kern_threadreq_lock(kqwl->kqwl_p);
+ workq_locked = true;
+ }
- if ((kqr->kqr_state & KQR_BOUND) && thread_to_handoff == THREAD_NULL) {
- assert(kqr->kqr_thread != THREAD_NULL);
- thread_t servicer = kqr->kqr_thread;
+ ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
+ TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
- thread_reference(servicer);
- thread_to_handoff = servicer;
+ if (workq_locked) {
+ workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
+ &kqwl->kqwl_request, kqwl->kqwl_owner, ts,
+ TURNSTILE_DELAYED_UPDATE);
+ if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
+ /*
+ * if the interlock is no longer the workqueue lock,
+ * then we don't need to hold it anymore.
+ */
+ workq_kern_threadreq_unlock(kqwl->kqwl_p);
+ workq_locked = false;
}
+ }
+ if (!workq_locked) {
+ /*
+ * If the interlock is the workloop's, then it's our responsibility to
+ * call update_inheritor, so just do it.
+ */
+ filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
+ }
- kqwl_req_unlock(kqwl);
-
- filt_wlunlock(kqwl);
-
- /* TODO: use continuation based blocking <rdar://problem/31299584> */
-
- /* consume a refcount on thread_to_handoff, then thread_block() */
- wr = thread_handoff(thread_to_handoff);
- thread_to_handoff = THREAD_NULL;
-
- filt_wllock(kqwl);
-
- /* clear waiting state (only one waiting thread - so no race) */
- assert(kn->kn_hook == current_thread());
+ thread_set_pending_block_hint(uth->uu_thread, kThreadWaitWorkloopSyncWait);
+ waitq_assert_wait64(&ts->ts_waitq, knote_filt_wev64(kn),
+ THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
- if (qos_index) {
- kqwl_req_lock(kqwl);
- assert(kqr->kqr_dsync_waiters > 0);
- if (--kqr->kqr_dsync_waiters == 0) {
- assert(kqr->kqr_dsync_waiters_qos);
- kqworkloop_update_threads_qos(kqwl,
- KQWL_UTQ_SET_SYNC_WAITERS_QOS, 0);
- }
- kqwl_req_unlock(kqwl);
- }
+ if (workq_locked) {
+ workq_kern_threadreq_unlock(kqwl->kqwl_p);
}
- kn->kn_hook = NULL;
-
- switch (wr) {
- case THREAD_AWAKENED:
- return 0;
- case THREAD_INTERRUPTED:
- return EINTR;
- case THREAD_RESTART:
- return ECANCELED;
- default:
- panic("filt_wlattach: unexpected wait result %d", wr);
- return EINVAL;
+ thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr);
+ if (thread) {
+ thread_reference(thread);
}
+
+ kevent_register_wait_block(ts, thread, filt_wlwait_continue, cont_args);
}
/* called in stackshot context to report the thread responsible for blocking this thread */
void
kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
- event64_t event,
- thread_waitinfo_t *waitinfo)
+ event64_t event, thread_waitinfo_t *waitinfo)
{
- struct knote *kn = (struct knote*) event;
+ struct knote *kn = (struct knote *)event;
assert(kdp_is_in_zone(kn, "knote zone"));
- assert(kn->kn_hook == thread);
+ assert(kn->kn_thread == thread);
struct kqueue *kq = knote_get_kq(kn);
assert(kdp_is_in_zone(kq, "kqueue workloop zone"));
assert(kq->kq_state & KQ_WORKLOOP);
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
thread_t kqwl_owner = kqwl->kqwl_owner;
- thread_t servicer = kqr->kqr_thread;
- if (kqwl_owner == WL_OWNER_SUSPENDED) {
- waitinfo->owner = STACKSHOT_WAITOWNER_SUSPENDED;
- } else if (kqwl_owner != THREAD_NULL) {
+ if (kqwl_owner != THREAD_NULL) {
assert(kdp_is_in_zone(kqwl_owner, "threads"));
waitinfo->owner = thread_tid(kqwl->kqwl_owner);
- } else if (servicer != THREAD_NULL) {
- assert(kdp_is_in_zone(servicer, "threads"));
-
- waitinfo->owner = thread_tid(servicer);
- } else if (kqr->kqr_state & KQR_THREQUESTED) {
+ } else if (kqr_thread_requested_pending(kqr)) {
waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
+ } else if (kqr->tr_state >= WORKQ_TR_STATE_BINDING) {
+ assert(kdp_is_in_zone(kqr->tr_thread, "threads"));
+ waitinfo->owner = thread_tid(kqr->tr_thread);
} else {
waitinfo->owner = 0;
}
waitinfo->context = kqwl->kqwl_dynamicid;
-
- return;
-}
-
-/*
- * Takes kqueue locked, returns locked, may drop in the middle and/or block for a while
- */
-static int
-filt_wlpost_attach(struct knote *kn, struct kevent_internal_s *kev)
-{
- struct kqueue *kq = knote_get_kq(kn);
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- int error = 0;
-
- if (kev->fflags & NOTE_WL_SYNC_WAIT) {
- if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
- filt_wllock(kqwl);
- /* if the wake has already preposted, don't wait */
- if ((kn->kn_sfflags & NOTE_WL_SYNC_WAKE) == 0)
- error = filt_wlwait(kqwl, kn, kev);
- filt_wlunlock(kqwl);
- knoteuse2kqlock(kq, kn, KNUSE_NONE);
- }
- }
- return error;
}
static void
-filt_wldetach(__assert_only struct knote *kn)
+filt_wldetach(struct knote *kn)
{
- assert(knote_get_kq(kn)->kq_state & KQ_WORKLOOP);
-
- /*
- * Thread requests have nothing to detach.
- * Sync waiters should have been aborted out
- * and drop their refs before we could drop/
- * detach their knotes.
- */
- assert(kn->kn_hook == NULL);
+ if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) {
+ filt_wldetach_sync_ipc(kn);
+ } else if (kn->kn_thread) {
+ kevent_register_wait_cleanup(kn);
+ }
}
static int
-filt_wlevent(
- __unused struct knote *kn,
- __unused long hint)
+filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev,
+ thread_qos_t *qos_index)
{
- panic("filt_wlevent");
- return 0;
-}
+ uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
+ uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
-static int
-filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_internal_s *kev)
-{
- int new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
- int sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
- int error = 0;
+ if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
+ return EINVAL;
+ }
+ if (kev->fflags & NOTE_WL_UPDATE_QOS) {
+ if (kev->flags & EV_DELETE) {
+ return EINVAL;
+ }
+ if (sav_commands != NOTE_WL_THREAD_REQUEST) {
+ return EINVAL;
+ }
+ if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
+ return ERANGE;
+ }
+ }
switch (new_commands) {
case NOTE_WL_THREAD_REQUEST:
/* thread requests can only update themselves */
- if (sav_commands != new_commands)
- error = EINVAL;
+ if (sav_commands != NOTE_WL_THREAD_REQUEST) {
+ return EINVAL;
+ }
break;
case NOTE_WL_SYNC_WAIT:
- if (kev->fflags & NOTE_WL_END_OWNERSHIP)
- error = EINVAL;
- /* FALLTHROUGH */
+ if (kev->fflags & NOTE_WL_END_OWNERSHIP) {
+ return EINVAL;
+ }
+ goto sync_checks;
+
case NOTE_WL_SYNC_WAKE:
- /* waits and wakes can update themselves or their counterparts */
- if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)))
- error = EINVAL;
- if (kev->fflags & NOTE_WL_UPDATE_QOS)
- error = EINVAL;
- if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE)
- error = EINVAL;
- if (kev->flags & EV_DELETE) {
- /*
- * Really this is not supported: there is absolutely no reason
- * whatsoever to want to fail the drop of a NOTE_WL_SYNC_WAIT knote.
- */
- if (kev->ext[EV_EXTIDX_WL_ADDR] && kev->ext[EV_EXTIDX_WL_MASK]) {
- error = EINVAL;
- }
+sync_checks:
+ if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) {
+ return EINVAL;
+ }
+ if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
+ return EINVAL;
+ }
+ break;
+
+ case NOTE_WL_SYNC_IPC:
+ if (sav_commands != NOTE_WL_SYNC_IPC) {
+ return EINVAL;
+ }
+ if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
+ return EINVAL;
}
break;
default:
- error = EINVAL;
- }
- if ((kev->flags & EV_DELETE) && (kev->fflags & NOTE_WL_DISCOVER_OWNER)) {
- error = EINVAL;
+ return EINVAL;
}
- return error;
+ return 0;
}
static int
-filt_wltouch(
- struct knote *kn,
- struct kevent_internal_s *kev)
+filt_wltouch(struct knote *kn, struct kevent_qos_s *kev)
{
- struct kqueue *kq = knote_get_kq(kn);
- int error = 0;
- struct kqworkloop *kqwl;
-
- assert(kq->kq_state & KQ_WORKLOOP);
- kqwl = (struct kqworkloop *)kq;
+ struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
+ thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
+ int result = 0;
- error = filt_wlvalidate_kev_flags(kn, kev);
+ int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
if (error) {
goto out;
}
- filt_wllock(kqwl);
-
- /* Make sure user and kernel are in agreement on important state */
- error = filt_wldebounce(kqwl, kev, 0);
- if (error) {
- error = filt_wlupdateowner(kqwl, kev, error, 0);
- goto out_unlock;
+ uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK;
+ if (command == NOTE_WL_SYNC_IPC) {
+ error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH);
+ } else {
+ error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
+ filt_wlremember_last_update(kn, kev, error);
+ }
+ if (error == EPREEMPTDISABLED) {
+ error = 0;
+ result = FILTER_THREADREQ_NODEFEER;
}
- int new_command = kev->fflags & NOTE_WL_COMMANDS_MASK;
- switch (new_command) {
- case NOTE_WL_THREAD_REQUEST:
- assert(kqwl->kqwl_request.kqr_qos_index != THREAD_QOS_UNSPECIFIED);
- break;
-
- case NOTE_WL_SYNC_WAIT:
- /*
- * we need to allow waiting several times on the same knote because
- * of EINTR. If it's already woken though, it won't block.
- */
- break;
-
- case NOTE_WL_SYNC_WAKE:
- if (kn->kn_sfflags & NOTE_WL_SYNC_WAKE) {
- /* disallow waking the same knote twice */
- error = EALREADY;
- goto out_unlock;
- }
- if (kn->kn_hook) {
- thread_wakeup_thread((event_t)kn, (thread_t)kn->kn_hook);
+out:
+ if (error) {
+ if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
+ /* If userland wants ESTALE to be hidden, do not activate */
+ return result;
}
- break;
-
- default:
- error = EINVAL;
- goto out_unlock;
+ kev->flags |= EV_ERROR;
+ kev->data = error;
+ return result;
}
-
- /*
- * Save off any additional fflags/data we just accepted
- * But only keep the last round of "update" bits we acted on which helps
- * debugging a lot.
- */
- kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
- kn->kn_sfflags |= kev->fflags;
- kn->kn_sdata = kev->data;
-
- kq_index_t qos_index = THREAD_QOS_UNSPECIFIED;
-
- if (kev->fflags & NOTE_WL_UPDATE_QOS) {
- qos_t qos = pthread_priority_canonicalize(kev->qos, FALSE);
-
- if (kn->kn_qos != qos) {
- qos_index = qos_index_from_qos(kn, qos, FALSE);
- if (qos_index == THREAD_QOS_UNSPECIFIED) {
- error = ERANGE;
- goto out_unlock;
- }
- kqlock(kq);
- if (kn->kn_status & KN_QUEUED) {
- knote_dequeue(kn);
- knote_set_qos_index(kn, qos_index);
- knote_enqueue(kn);
- knote_wakeup(kn);
- } else {
- knote_set_qos_index(kn, qos_index);
- }
- kn->kn_qos = qos;
- kqunlock(kq);
+ if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
+ return kevent_register_wait_prepare(kn, kev, result);
+ }
+ /* Just touching the thread request successfully will fire it */
+ if (command == NOTE_WL_THREAD_REQUEST) {
+ if (kev->fflags & NOTE_WL_UPDATE_QOS) {
+ result |= FILTER_UPDATE_REQ_QOS;
}
+ result |= FILTER_ACTIVE;
}
+ return result;
+}
+
+static bool
+filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev)
+{
+ struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
- error = filt_wlupdateowner(kqwl, kev, 0, qos_index);
+ int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
if (error) {
- goto out_unlock;
+ goto out;
}
- if (new_command == NOTE_WL_SYNC_WAIT) {
- /* if the wake has already preposted, don't wait */
- if ((kn->kn_sfflags & NOTE_WL_SYNC_WAKE) == 0)
- error = filt_wlwait(kqwl, kn, kev);
+ uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK);
+ if (command == NOTE_WL_SYNC_IPC) {
+ error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP);
+ } else {
+ error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
+ filt_wlremember_last_update(kn, kev, error);
}
+ assert(error != EPREEMPTDISABLED);
-out_unlock:
- filt_wlremember_last_update(kqwl, kn, kev, error);
- filt_wlunlock(kqwl);
out:
if (error) {
if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
- /* If userland wants ESTALE to be hidden, do not activate */
- return 0;
+ return false;
}
kev->flags |= EV_ERROR;
kev->data = error;
- return 0;
+ return false;
}
- /* Just touching the thread request successfully will fire it */
- return new_command == NOTE_WL_THREAD_REQUEST;
+ return true;
}
static int
-filt_wldrop_and_unlock(
- struct knote *kn,
- struct kevent_internal_s *kev)
+filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev)
{
- struct kqueue *kq = knote_get_kq(kn);
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- int error = 0, knoteuse_flags = KNUSE_NONE;
+ struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
+ int rc = 0;
- kqlock_held(kq);
+ assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
- assert(kev->flags & EV_DELETE);
- assert(kq->kq_state & KQ_WORKLOOP);
+ kqlock(kqwl);
- error = filt_wlvalidate_kev_flags(kn, kev);
- if (error) {
- goto out;
+ if (kqwl->kqwl_owner) {
+ /*
+ * <rdar://problem/33584321> userspace sometimes due to events being
+ * delivered but not triggering a drain session can cause a process
+ * of the thread request knote.
+ *
+ * When that happens, the automatic deactivation due to process
+ * would swallow the event, so we have to activate the knote again.
+ */
+ knote_activate(kqwl, kn, FILTER_ACTIVE);
+ } else {
+#if DEBUG || DEVELOPMENT
+ if (kevent_debug_flags() & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
+ /*
+ * see src/queue_internal.h in libdispatch
+ */
+#define DISPATCH_QUEUE_ENQUEUED 0x1ull
+ user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
+ task_t t = current_task();
+ uint64_t val;
+ if (addr && task_is_active(t) && !task_is_halting(t) &&
+ copyin_atomic64(addr, &val) == 0 &&
+ val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
+ (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
+ panic("kevent: workloop %#016llx is not enqueued "
+ "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
+ kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
+ }
+ }
+#endif
+ knote_fill_kevent(kn, kev, 0);
+ kev->fflags = kn->kn_sfflags;
+ rc |= FILTER_ACTIVE;
}
- if (kn->kn_sfflags & NOTE_WL_THREAD_REQUEST) {
- knoteuse_flags |= KNUSE_BOOST;
- }
+ kqunlock(kqwl);
- /* take a usecount to allow taking the filt_wllock */
- if (!kqlock2knoteuse(kq, kn, knoteuse_flags)) {
- /* knote is being dropped already */
- error = EINPROGRESS;
- goto out;
+ if (rc & FILTER_ACTIVE) {
+ workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
}
+ return rc;
+}
- filt_wllock(kqwl);
-
- /*
- * Make sure user and kernel are in agreement on important state
- *
- * Userland will modify bits to cause this to fail for the touch / drop
- * race case (when a drop for a thread request quiescing comes in late after
- * the workloop has been woken up again).
- */
- error = filt_wldebounce(kqwl, kev, 0);
+SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
+ .f_extended_codes = true,
+ .f_attach = filt_wlattach,
+ .f_detach = filt_wldetach,
+ .f_event = filt_bad_event,
+ .f_touch = filt_wltouch,
+ .f_process = filt_wlprocess,
+ .f_allow_drop = filt_wlallow_drop,
+ .f_post_register_wait = filt_wlpost_register_wait,
+};
- if (!knoteuse2kqlock(kq, kn, knoteuse_flags)) {
- /* knote is no longer alive */
- error = EINPROGRESS;
- goto out_unlock;
- }
+#pragma mark - kqueues allocation and deallocation
- if (!error && (kn->kn_sfflags & NOTE_WL_THREAD_REQUEST) && kn->kn_inuse) {
- /*
- * There is a concurrent drop or touch happening, we can't resolve this,
- * userland has to redrive.
- *
- * The race we're worried about here is the following:
- *
- * f_touch | f_drop_and_unlock
- * ------------------------+--------------------------------------------
- * | kqlock()
- * | kqlock2knoteuse()
- * | filt_wllock()
- * | debounces successfully
- * kqlock() |
- * kqlock2knoteuse |
- * filt_wllock() <BLOCKS> |
- * | knoteuse2kqlock()
- * | filt_wlunlock()
- * | kqlock2knotedrop() <BLOCKS, WAKES f_touch>
- * debounces successfully |
- * filt_wlunlock() |
- * caller WAKES f_drop |
- * | performs drop, but f_touch should have won
- *
- * So if the usecount is not 0 here, we need to wait for it to drop and
- * redrive the whole logic (including looking up the knote again).
- */
- filt_wlunlock(kqwl);
- knoteusewait(kq, kn);
- return ERESTART;
- }
-
- /*
- * If error is 0 this will set kqr_qos_index to THREAD_QOS_UNSPECIFIED
- *
- * If error is 0 or ESTALE this may drop ownership and cause a thread
- * request redrive, however the kqlock is held which prevents f_process() to
- * run until we did the drop for real.
- */
- error = filt_wlupdateowner(kqwl, kev, error, 0);
- if (error) {
- goto out_unlock;
- }
-
- if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
- NOTE_WL_SYNC_WAIT) {
- /*
- * When deleting a SYNC_WAIT knote that hasn't been woken up
- * explicitly, issue a wake up.
- */
- kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
- if (kn->kn_hook) {
- thread_wakeup_thread((event_t)kn, (thread_t)kn->kn_hook);
- }
- }
+/*!
+ * @enum kqworkloop_dealloc_flags_t
+ *
+ * @brief
+ * Flags that alter kqworkloop_dealloc() behavior.
+ *
+ * @const KQWL_DEALLOC_NONE
+ * Convenient name for "no flags".
+ *
+ * @const KQWL_DEALLOC_SKIP_HASH_REMOVE
+ * Do not remove the workloop fromt he hash table.
+ * This is used for process tear-down codepaths as the workloops have been
+ * removed by the caller already.
+ */
+OS_OPTIONS(kqworkloop_dealloc_flags, unsigned,
+ KQWL_DEALLOC_NONE = 0x0000,
+ KQWL_DEALLOC_SKIP_HASH_REMOVE = 0x0001,
+ );
-out_unlock:
- filt_wlremember_last_update(kqwl, kn, kev, error);
- filt_wlunlock(kqwl);
+static void
+kqworkloop_dealloc(struct kqworkloop *, kqworkloop_dealloc_flags_t, uint32_t);
-out:
- if (error == 0) {
- /* If nothing failed, do the regular knote drop. */
- if (kqlock2knotedrop(kq, kn)) {
- knote_drop(kn, current_proc());
- } else {
- error = EINPROGRESS;
- }
+OS_NOINLINE OS_COLD OS_NORETURN
+static void
+kqworkloop_retain_panic(struct kqworkloop *kqwl, uint32_t previous)
+{
+ if (previous == 0) {
+ panic("kq(%p) resurrection", kqwl);
} else {
- kqunlock(kq);
+ panic("kq(%p) retain overflow", kqwl);
}
- if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
- error = 0;
- }
- if (error == EINPROGRESS) {
- /*
- * filt_wlprocess() makes sure that no event can be delivered for
- * NOTE_WL_THREAD_REQUEST knotes once a drop is happening, and
- * NOTE_WL_SYNC_* knotes are never fired.
- *
- * It means that EINPROGRESS is about a state that userland cannot
- * observe for this filter (an event being delivered concurrently from
- * a drop), so silence the error.
- */
- error = 0;
- }
- return error;
}
-static int
-filt_wlprocess(
- struct knote *kn,
- __unused struct filt_process_s *data,
- struct kevent_internal_s *kev)
+OS_NOINLINE OS_COLD OS_NORETURN
+static void
+kqworkloop_release_panic(struct kqworkloop *kqwl)
{
- struct kqueue *kq = knote_get_kq(kn);
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- struct kqrequest *kqr = &kqwl->kqwl_request;
- int rc = 0;
-
- assert(kq->kq_state & KQ_WORKLOOP);
+ panic("kq(%p) over-release", kqwl);
+}
- /* only thread requests should get here */
- assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
- if (kn->kn_sfflags & NOTE_WL_THREAD_REQUEST) {
- filt_wllock(kqwl);
- assert(kqr->kqr_qos_index != THREAD_QOS_UNSPECIFIED);
- if (kqwl->kqwl_owner) {
- /*
- * <rdar://problem/33584321> userspace sometimes due to events being
- * delivered but not triggering a drain session can cause a process
- * of the thread request knote.
- *
- * When that happens, the automatic deactivation due to process
- * would swallow the event, so we have to activate the knote again.
- */
- kqlock(kq);
- knote_activate(kn);
- kqunlock(kq);
- } else if (kqr->kqr_qos_index) {
-#if DEBUG || DEVELOPMENT
- user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
- task_t t = current_task();
- uint64_t val;
- if (addr && task_is_active(t) && !task_is_halting(t) &&
- copyin_word(addr, &val, sizeof(val)) == 0 &&
- val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
- (val >> 48) != 0 && (val >> 48) != 0xffff) {
- panic("kevent: workloop %#016llx is not enqueued "
- "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
- kn->kn_udata, kn, val,
- kn->kn_ext[EV_EXTIDX_WL_VALUE]);
- }
-#endif
- *kev = kn->kn_kevent;
- kev->fflags = kn->kn_sfflags;
- kev->data = kn->kn_sdata;
- kev->qos = kn->kn_qos;
- rc = 1;
+OS_ALWAYS_INLINE
+static inline bool
+kqworkloop_try_retain(struct kqworkloop *kqwl)
+{
+ uint32_t old_ref, new_ref;
+ os_atomic_rmw_loop(&kqwl->kqwl_retains, old_ref, new_ref, relaxed, {
+ if (__improbable(old_ref == 0)) {
+ os_atomic_rmw_loop_give_up(return false);
}
- filt_wlunlock(kqwl);
- }
- return rc;
+ if (__improbable(old_ref >= KQ_WORKLOOP_RETAINS_MAX)) {
+ kqworkloop_retain_panic(kqwl, old_ref);
+ }
+ new_ref = old_ref + 1;
+ });
+ return true;
}
-#pragma mark kevent / knotes
-
-/*
- * JMM - placeholder for not-yet-implemented filters
- */
-static int
-filt_badattach(__unused struct knote *kn, __unused struct kevent_internal_s *kev)
+OS_ALWAYS_INLINE
+static inline void
+kqworkloop_retain(struct kqworkloop *kqwl)
{
- kn->kn_flags |= EV_ERROR;
- kn->kn_data = ENOTSUP;
- return 0;
+ uint32_t previous = os_atomic_inc_orig(&kqwl->kqwl_retains, relaxed);
+ if (__improbable(previous == 0 || previous >= KQ_WORKLOOP_RETAINS_MAX)) {
+ kqworkloop_retain_panic(kqwl, previous);
+ }
}
-struct kqueue *
-kqueue_alloc(struct proc *p, unsigned int flags)
+OS_ALWAYS_INLINE
+static inline void
+kqueue_retain(kqueue_t kqu)
{
- struct filedesc *fdp = p->p_fd;
- struct kqueue *kq = NULL;
- int policy;
- void *hook = NULL;
- uint64_t kq_addr_offset;
-
- if (flags & KEVENT_FLAG_WORKQ) {
- struct kqworkq *kqwq;
- int i;
-
- kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
- if (kqwq == NULL)
- return NULL;
-
- kq = &kqwq->kqwq_kqueue;
- bzero(kqwq, sizeof (struct kqworkq));
-
- kqwq->kqwq_state = KQ_WORKQ;
-
- for (i = 0; i < KQWQ_NBUCKETS; i++) {
- TAILQ_INIT(&kq->kq_queue[i]);
- }
- for (i = 0; i < KQWQ_NQOS; i++) {
- kqwq->kqwq_request[i].kqr_qos_index = i;
- }
-
- lck_spin_init(&kqwq->kqwq_reqlock, kq_lck_grp, kq_lck_attr);
- policy = SYNC_POLICY_FIFO;
- hook = (void *)kqwq;
-
- } else if (flags & KEVENT_FLAG_WORKLOOP) {
- struct kqworkloop *kqwl;
- int i;
-
- kqwl = (struct kqworkloop *)zalloc(kqworkloop_zone);
- if (kqwl == NULL)
- return NULL;
-
- bzero(kqwl, sizeof (struct kqworkloop));
+ if (kqu.kq->kq_state & KQ_DYNAMIC) {
+ kqworkloop_retain(kqu.kqwl);
+ }
+}
- kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC;
- kqwl->kqwl_retains = 1; /* donate a retain to creator */
+OS_ALWAYS_INLINE
+static inline void
+kqworkloop_release_live(struct kqworkloop *kqwl)
+{
+ uint32_t refs = os_atomic_dec_orig(&kqwl->kqwl_retains, relaxed);
+ if (__improbable(refs <= 1)) {
+ kqworkloop_release_panic(kqwl);
+ }
+}
- kq = &kqwl->kqwl_kqueue;
- for (i = 0; i < KQWL_NBUCKETS; i++) {
- TAILQ_INIT(&kq->kq_queue[i]);
- }
- TAILQ_INIT(&kqwl->kqwl_request.kqr_suppressed);
+OS_ALWAYS_INLINE
+static inline void
+kqueue_release_live(kqueue_t kqu)
+{
+ if (kqu.kq->kq_state & KQ_DYNAMIC) {
+ kqworkloop_release_live(kqu.kqwl);
+ }
+}
- lck_spin_init(&kqwl->kqwl_reqlock, kq_lck_grp, kq_lck_attr);
- lck_mtx_init(&kqwl->kqwl_statelock, kq_lck_grp, kq_lck_attr);
+OS_ALWAYS_INLINE
+static inline void
+kqworkloop_release(struct kqworkloop *kqwl)
+{
+ uint32_t refs = os_atomic_dec_orig(&kqwl->kqwl_retains, relaxed);
- policy = SYNC_POLICY_FIFO;
- if (flags & KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD) {
- policy |= SYNC_POLICY_PREPOST;
- kq->kq_state |= KQ_NO_WQ_THREAD;
- } else {
- hook = (void *)kqwl;
- }
-
- } else {
- struct kqfile *kqf;
-
- kqf = (struct kqfile *)zalloc(kqfile_zone);
- if (kqf == NULL)
- return NULL;
+ if (__improbable(refs <= 1)) {
+ kqworkloop_dealloc(kqwl, KQWL_DEALLOC_NONE, refs - 1);
+ }
+}
- kq = &kqf->kqf_kqueue;
- bzero(kqf, sizeof (struct kqfile));
- TAILQ_INIT(&kq->kq_queue[0]);
- TAILQ_INIT(&kqf->kqf_suppressed);
-
- policy = SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST;
+OS_ALWAYS_INLINE
+static inline void
+kqueue_release(kqueue_t kqu)
+{
+ if (kqu.kq->kq_state & KQ_DYNAMIC) {
+ kqworkloop_release(kqu.kqwl);
}
+}
- waitq_set_init(&kq->kq_wqs, policy, NULL, hook);
- lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
- kq->kq_p = p;
+/*!
+ * @function kqueue_destroy
+ *
+ * @brief
+ * Common part to all kqueue dealloc functions.
+ */
+OS_NOINLINE
+static void
+kqueue_destroy(kqueue_t kqu, zone_t zone)
+{
+ /*
+ * waitq_set_deinit() remove the KQ's waitq set from
+ * any select sets to which it may belong.
+ *
+ * The order of these deinits matter: before waitq_set_deinit() returns,
+ * waitq_set__CALLING_PREPOST_HOOK__ may be called and it will take the
+ * kq_lock.
+ */
+ waitq_set_deinit(&kqu.kq->kq_wqs);
+ lck_spin_destroy(&kqu.kq->kq_lock, kq_lck_grp);
- if (fdp->fd_knlistsize < 0) {
- proc_fdlock(p);
- if (fdp->fd_knlistsize < 0)
- fdp->fd_knlistsize = 0; /* this process has had a kq */
- proc_fdunlock(p);
- }
+ zfree(zone, kqu.kq);
+}
- kq_addr_offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
- /* Assert that the address can be pointer compacted for use with knote */
- assert(kq_addr_offset < (uint64_t)(1ull << KNOTE_KQ_BITSIZE));
- return (kq);
+/*!
+ * @function kqueue_init
+ *
+ * @brief
+ * Common part to all kqueue alloc functions.
+ */
+static kqueue_t
+kqueue_init(kqueue_t kqu, waitq_set_prepost_hook_t *hook, int policy)
+{
+ waitq_set_init(&kqu.kq->kq_wqs, policy, NULL, hook);
+ lck_spin_init(&kqu.kq->kq_lock, kq_lck_grp, kq_lck_attr);
+ return kqu;
}
-/*
- * knotes_dealloc - detach all knotes for the process and drop them
+#pragma mark kqfile allocation and deallocation
+
+/*!
+ * @function kqueue_dealloc
*
- * Called with proc_fdlock held.
- * Returns with it locked.
- * May drop it temporarily.
- * Process is in such a state that it will not try to allocate
- * any more knotes during this process (stopped for exit or exec).
+ * @brief
+ * Detach all knotes from a kqfile and free it.
+ *
+ * @discussion
+ * We walk each list looking for knotes referencing this
+ * this kqueue. If we find one, we try to drop it. But
+ * if we fail to get a drop reference, that will wait
+ * until it is dropped. So, we can just restart again
+ * safe in the assumption that the list will eventually
+ * not contain any more references to this kqueue (either
+ * we dropped them all, or someone else did).
+ *
+ * Assumes no new events are being added to the kqueue.
+ * Nothing locked on entry or exit.
*/
void
-knotes_dealloc(proc_t p)
+kqueue_dealloc(struct kqueue *kq)
{
+ KNOTE_LOCK_CTX(knlc);
+ struct proc *p = kq->kq_p;
struct filedesc *fdp = p->p_fd;
- struct kqueue *kq;
struct knote *kn;
- struct klist *kn_hash = NULL;
- int i;
- /* Close all the fd-indexed knotes up front */
- if (fdp->fd_knlistsize > 0) {
- for (i = 0; i < fdp->fd_knlistsize; i++) {
- while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
- kq = knote_get_kq(kn);
+ assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
+
+ proc_fdlock(p);
+ for (int i = 0; i < fdp->fd_knlistsize; i++) {
+ kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+ while (kn != NULL) {
+ if (kq == knote_get_kq(kn)) {
kqlock(kq);
proc_fdunlock(p);
- /* drop it ourselves or wait */
- if (kqlock2knotedrop(kq, kn)) {
- knote_drop(kn, p);
+ if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
+ knote_drop(kq, kn, &knlc);
}
proc_fdlock(p);
+ /* start over at beginning of list */
+ kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+ continue;
}
+ kn = SLIST_NEXT(kn, kn_link);
}
- /* free the table */
- FREE(fdp->fd_knlist, M_KQUEUE);
- fdp->fd_knlist = NULL;
}
- fdp->fd_knlistsize = -1;
- knhash_lock(p);
+ knhash_lock(fdp);
proc_fdunlock(p);
- /* Clean out all the hashed knotes as well */
if (fdp->fd_knhashmask != 0) {
- for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
- while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
- kq = knote_get_kq(kn);
- kqlock(kq);
- knhash_unlock(p);
- /* drop it ourselves or wait */
- if (kqlock2knotedrop(kq, kn)) {
- knote_drop(kn, p);
- }
- knhash_lock(p);
- }
- }
- kn_hash = fdp->fd_knhash;
- fdp->fd_knhashmask = 0;
- fdp->fd_knhash = NULL;
- }
-
- knhash_unlock(p);
-
- /* free the kn_hash table */
- if (kn_hash)
- FREE(kn_hash, M_KQUEUE);
-
- proc_fdlock(p);
-}
-
-
-/*
- * kqueue_dealloc - detach all knotes from a kqueue and free it
- *
- * We walk each list looking for knotes referencing this
- * this kqueue. If we find one, we try to drop it. But
- * if we fail to get a drop reference, that will wait
- * until it is dropped. So, we can just restart again
- * safe in the assumption that the list will eventually
- * not contain any more references to this kqueue (either
- * we dropped them all, or someone else did).
- *
- * Assumes no new events are being added to the kqueue.
- * Nothing locked on entry or exit.
- *
- * Workloop kqueues cant get here unless all the knotes
- * are already gone and all requested threads have come
- * and gone (cancelled or arrived).
- */
-void
-kqueue_dealloc(struct kqueue *kq)
-{
- struct proc *p;
- struct filedesc *fdp;
- struct knote *kn;
- int i;
-
- if (kq == NULL)
- return;
-
- p = kq->kq_p;
- fdp = p->p_fd;
-
- if ((kq->kq_state & KQ_WORKLOOP) == 0) {
- proc_fdlock(p);
- for (i = 0; i < fdp->fd_knlistsize; i++) {
- kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+ for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
+ kn = SLIST_FIRST(&fdp->fd_knhash[i]);
while (kn != NULL) {
if (kq == knote_get_kq(kn)) {
kqlock(kq);
- proc_fdunlock(p);
- /* drop it ourselves or wait */
- if (kqlock2knotedrop(kq, kn)) {
- knote_drop(kn, p);
+ knhash_unlock(fdp);
+ if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
+ knote_drop(kq, kn, &knlc);
}
- proc_fdlock(p);
+ knhash_lock(fdp);
/* start over at beginning of list */
- kn = SLIST_FIRST(&fdp->fd_knlist[i]);
+ kn = SLIST_FIRST(&fdp->fd_knhash[i]);
continue;
}
kn = SLIST_NEXT(kn, kn_link);
}
}
- knhash_lock(p);
- proc_fdunlock(p);
-
- if (fdp->fd_knhashmask != 0) {
- for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
- kn = SLIST_FIRST(&fdp->fd_knhash[i]);
- while (kn != NULL) {
- if (kq == knote_get_kq(kn)) {
- kqlock(kq);
- knhash_unlock(p);
- /* drop it ourselves or wait */
- if (kqlock2knotedrop(kq, kn)) {
- knote_drop(kn, p);
- }
- knhash_lock(p);
- /* start over at beginning of list */
- kn = SLIST_FIRST(&fdp->fd_knhash[i]);
- continue;
- }
- kn = SLIST_NEXT(kn, kn_link);
- }
- }
- }
- knhash_unlock(p);
}
+ knhash_unlock(fdp);
- if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- struct kqrequest *kqr = &kqwl->kqwl_request;
- thread_t cur_owner = kqwl->kqwl_owner;
+ kqueue_destroy(kq, kqfile_zone);
+}
- assert(TAILQ_EMPTY(&kqwl->kqwl_request.kqr_suppressed));
- if (filt_wlowner_is_valid(cur_owner)) {
- /*
- * If the kqueue had an owner that prevented the thread request to
- * go through, then no unbind happened, and we may have lingering
- * overrides to drop.
- */
- if (kqr->kqr_dsync_owner_qos != THREAD_QOS_UNSPECIFIED) {
- thread_drop_ipc_override(cur_owner);
- kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED;
- }
+/*!
+ * @function kqueue_alloc
+ *
+ * @brief
+ * Allocate a kqfile.
+ */
+struct kqueue *
+kqueue_alloc(struct proc *p)
+{
+ struct kqfile *kqf;
- if (kqr->kqr_owner_override_is_sync) {
- thread_drop_sync_ipc_override(cur_owner);
- kqr->kqr_owner_override_is_sync = 0;
- }
- thread_ends_owning_workloop(cur_owner);
- thread_deallocate(cur_owner);
- kqwl->kqwl_owner = THREAD_NULL;
- }
+ kqf = (struct kqfile *)zalloc(kqfile_zone);
+ if (__improbable(kqf == NULL)) {
+ return NULL;
}
+ bzero(kqf, sizeof(struct kqfile));
/*
- * waitq_set_deinit() remove the KQ's waitq set from
- * any select sets to which it may belong.
+ * kqfiles are created with kqueue() so we need to wait for
+ * the first kevent syscall to know which bit among
+ * KQ_KEV_{32,64,QOS} will be set in kqf_state
*/
- waitq_set_deinit(&kq->kq_wqs);
- lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
-
- if (kq->kq_state & KQ_WORKQ) {
- struct kqworkq *kqwq = (struct kqworkq *)kq;
-
- lck_spin_destroy(&kqwq->kqwq_reqlock, kq_lck_grp);
- zfree(kqworkq_zone, kqwq);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
-
- assert(kqwl->kqwl_retains == 0);
- lck_spin_destroy(&kqwl->kqwl_reqlock, kq_lck_grp);
- lck_mtx_destroy(&kqwl->kqwl_statelock, kq_lck_grp);
- zfree(kqworkloop_zone, kqwl);
- } else {
- struct kqfile *kqf = (struct kqfile *)kq;
-
- zfree(kqfile_zone, kqf);
- }
-}
-
-static inline void
-kqueue_retain(struct kqueue *kq)
-{
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- uint32_t previous;
-
- if ((kq->kq_state & KQ_DYNAMIC) == 0)
- return;
-
- previous = OSIncrementAtomic(&kqwl->kqwl_retains);
- if (previous == KQ_WORKLOOP_RETAINS_MAX)
- panic("kq(%p) retain overflow", kq);
+ kqf->kqf_p = p;
+ TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue);
+ TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed);
- if (previous == 0)
- panic("kq(%p) resurrection", kq);
-}
-
-#define KQUEUE_CANT_BE_LAST_REF 0
-#define KQUEUE_MIGHT_BE_LAST_REF 1
-
-static inline int
-kqueue_release(struct kqueue *kq, __assert_only int possibly_last)
-{
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
-
- if ((kq->kq_state & KQ_DYNAMIC) == 0) {
- return 0;
- }
-
- assert(kq->kq_state & KQ_WORKLOOP); /* for now */
- uint32_t refs = OSDecrementAtomic(&kqwl->kqwl_retains);
- if (__improbable(refs == 0)) {
- panic("kq(%p) over-release", kq);
- }
- if (refs == 1) {
- assert(possibly_last);
- }
- return refs == 1;
+ return kqueue_init(kqf, NULL, SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST).kq;
}
+/*!
+ * @function kqueue_internal
+ *
+ * @brief
+ * Core implementation for kqueue and guarded_kqueue_np()
+ */
int
-kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
+kqueue_internal(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
{
struct kqueue *kq;
struct fileproc *fp;
int fd, error;
- error = falloc_withalloc(p,
- &fp, &fd, vfs_context_current(), fp_zalloc, cra);
+ error = falloc_withalloc(p, &fp, &fd, vfs_context_current(), fp_zalloc, cra);
if (error) {
- return (error);
+ return error;
}
- kq = kqueue_alloc(p, 0);
+ kq = kqueue_alloc(p);
if (kq == NULL) {
fp_free(p, fd, fp);
- return (ENOMEM);
+ return ENOMEM;
}
fp->f_flag = FREAD | FWRITE;
fp->f_ops = &kqueueops;
fp->f_data = kq;
+ fp->f_lflags |= FG_CONFINED;
proc_fdlock(p);
- *fdflags(p, fd) |= UF_EXCLOSE;
+ *fdflags(p, fd) |= UF_EXCLOSE | UF_FORKCLOSE;
procfdtbl_releasefd(p, fd, NULL);
fp_drop(p, fd, fp, 1);
proc_fdunlock(p);
*retval = fd;
- return (error);
+ return error;
}
+/*!
+ * @function kqueue
+ *
+ * @brief
+ * The kqueue syscall.
+ */
int
kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
{
- return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
+ return kqueue_internal(p, fileproc_alloc_init, NULL, retval);
}
-static int
-kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p,
- unsigned int flags)
-{
- int advance;
- int error;
+#pragma mark kqworkq allocation and deallocation
- if (flags & KEVENT_FLAG_LEGACY32) {
- bzero(kevp, sizeof (*kevp));
-
- if (IS_64BIT_PROCESS(p)) {
- struct user64_kevent kev64;
-
- advance = sizeof (kev64);
- error = copyin(*addrp, (caddr_t)&kev64, advance);
- if (error)
- return (error);
- kevp->ident = kev64.ident;
- kevp->filter = kev64.filter;
- kevp->flags = kev64.flags;
- kevp->udata = kev64.udata;
- kevp->fflags = kev64.fflags;
- kevp->data = kev64.data;
- } else {
- struct user32_kevent kev32;
-
- advance = sizeof (kev32);
- error = copyin(*addrp, (caddr_t)&kev32, advance);
- if (error)
- return (error);
- kevp->ident = (uintptr_t)kev32.ident;
- kevp->filter = kev32.filter;
- kevp->flags = kev32.flags;
- kevp->udata = CAST_USER_ADDR_T(kev32.udata);
- kevp->fflags = kev32.fflags;
- kevp->data = (intptr_t)kev32.data;
- }
- } else if (flags & KEVENT_FLAG_LEGACY64) {
- struct kevent64_s kev64;
+/*!
+ * @function kqworkq_dealloc
+ *
+ * @brief
+ * Deallocates a workqueue kqueue.
+ *
+ * @discussion
+ * This only happens at process death, or for races with concurrent
+ * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
+ * this kqueue, either there are none, or someone else took care of them.
+ */
+void
+kqworkq_dealloc(struct kqworkq *kqwq)
+{
+ kqueue_destroy(kqwq, kqworkq_zone);
+}
- bzero(kevp, sizeof (*kevp));
-
- advance = sizeof (struct kevent64_s);
- error = copyin(*addrp, (caddr_t)&kev64, advance);
- if (error)
- return(error);
- kevp->ident = kev64.ident;
- kevp->filter = kev64.filter;
- kevp->flags = kev64.flags;
- kevp->udata = kev64.udata;
- kevp->fflags = kev64.fflags;
- kevp->data = kev64.data;
- kevp->ext[0] = kev64.ext[0];
- kevp->ext[1] = kev64.ext[1];
-
- } else {
- struct kevent_qos_s kevqos;
-
- bzero(kevp, sizeof (*kevp));
-
- advance = sizeof (struct kevent_qos_s);
- error = copyin(*addrp, (caddr_t)&kevqos, advance);
- if (error)
- return error;
- kevp->ident = kevqos.ident;
- kevp->filter = kevqos.filter;
- kevp->flags = kevqos.flags;
- kevp->qos = kevqos.qos;
-// kevp->xflags = kevqos.xflags;
- kevp->udata = kevqos.udata;
- kevp->fflags = kevqos.fflags;
- kevp->data = kevqos.data;
- kevp->ext[0] = kevqos.ext[0];
- kevp->ext[1] = kevqos.ext[1];
- kevp->ext[2] = kevqos.ext[2];
- kevp->ext[3] = kevqos.ext[3];
- }
- if (!error)
- *addrp += advance;
- return (error);
-}
-
-static int
-kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *p,
- unsigned int flags)
+/*!
+ * @function kqworkq_alloc
+ *
+ * @brief
+ * Allocates a workqueue kqueue.
+ *
+ * @discussion
+ * This is the slow path of kevent_get_kqwq.
+ * This takes care of making sure procs have a single workq kqueue.
+ */
+OS_NOINLINE
+static struct kqworkq *
+kqworkq_alloc(struct proc *p, unsigned int flags)
{
- user_addr_t addr = *addrp;
- int advance;
- int error;
-
- /*
- * fully initialize the differnt output event structure
- * types from the internal kevent (and some universal
- * defaults for fields not represented in the internal
- * form).
- */
- if (flags & KEVENT_FLAG_LEGACY32) {
- assert((flags & KEVENT_FLAG_STACK_EVENTS) == 0);
-
- if (IS_64BIT_PROCESS(p)) {
- struct user64_kevent kev64;
-
- advance = sizeof (kev64);
- bzero(&kev64, advance);
-
- /*
- * deal with the special case of a user-supplied
- * value of (uintptr_t)-1.
- */
- kev64.ident = (kevp->ident == (uintptr_t)-1) ?
- (uint64_t)-1LL : (uint64_t)kevp->ident;
-
- kev64.filter = kevp->filter;
- kev64.flags = kevp->flags;
- kev64.fflags = kevp->fflags;
- kev64.data = (int64_t) kevp->data;
- kev64.udata = kevp->udata;
- error = copyout((caddr_t)&kev64, addr, advance);
- } else {
- struct user32_kevent kev32;
-
- advance = sizeof (kev32);
- bzero(&kev32, advance);
- kev32.ident = (uint32_t)kevp->ident;
- kev32.filter = kevp->filter;
- kev32.flags = kevp->flags;
- kev32.fflags = kevp->fflags;
- kev32.data = (int32_t)kevp->data;
- kev32.udata = kevp->udata;
- error = copyout((caddr_t)&kev32, addr, advance);
- }
- } else if (flags & KEVENT_FLAG_LEGACY64) {
- struct kevent64_s kev64;
-
- advance = sizeof (struct kevent64_s);
- if (flags & KEVENT_FLAG_STACK_EVENTS) {
- addr -= advance;
- }
- bzero(&kev64, advance);
- kev64.ident = kevp->ident;
- kev64.filter = kevp->filter;
- kev64.flags = kevp->flags;
- kev64.fflags = kevp->fflags;
- kev64.data = (int64_t) kevp->data;
- kev64.udata = kevp->udata;
- kev64.ext[0] = kevp->ext[0];
- kev64.ext[1] = kevp->ext[1];
- error = copyout((caddr_t)&kev64, addr, advance);
- } else {
- struct kevent_qos_s kevqos;
-
- advance = sizeof (struct kevent_qos_s);
- if (flags & KEVENT_FLAG_STACK_EVENTS) {
- addr -= advance;
- }
- bzero(&kevqos, advance);
- kevqos.ident = kevp->ident;
- kevqos.filter = kevp->filter;
- kevqos.flags = kevp->flags;
- kevqos.qos = kevp->qos;
- kevqos.udata = kevp->udata;
- kevqos.fflags = kevp->fflags;
- kevqos.xflags = 0;
- kevqos.data = (int64_t) kevp->data;
- kevqos.ext[0] = kevp->ext[0];
- kevqos.ext[1] = kevp->ext[1];
- kevqos.ext[2] = kevp->ext[2];
- kevqos.ext[3] = kevp->ext[3];
- error = copyout((caddr_t)&kevqos, addr, advance);
- }
- if (!error) {
- if (flags & KEVENT_FLAG_STACK_EVENTS)
- *addrp = addr;
- else
- *addrp = addr + advance;
- }
- return (error);
-}
+ struct kqworkq *kqwq, *tmp;
-static int
-kevent_get_data_size(struct proc *p,
- uint64_t data_available,
- unsigned int flags,
- user_size_t *residp)
-{
- user_size_t resid;
- int error = 0;
+ kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
+ if (__improbable(kqwq == NULL)) {
+ return NULL;
+ }
+ bzero(kqwq, sizeof(struct kqworkq));
- if (data_available != USER_ADDR_NULL) {
- if (flags & KEVENT_FLAG_KERNEL) {
- resid = *(user_size_t *)(uintptr_t)data_available;
- } else if (IS_64BIT_PROCESS(p)) {
- user64_size_t usize;
- error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
- resid = (user_size_t)usize;
- } else {
- user32_size_t usize;
- error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
- resid = (user_size_t)usize;
- }
- if (error)
- return(error);
+ assert((flags & KEVENT_FLAG_LEGACY32) == 0);
+ if (flags & KEVENT_FLAG_LEGACY64) {
+ kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64;
} else {
- resid = 0;
+ kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS;
}
- *residp = resid;
- return 0;
-}
-
-static int
-kevent_put_data_size(struct proc *p,
- uint64_t data_available,
- unsigned int flags,
- user_size_t resid)
-{
- int error = 0;
+ kqwq->kqwq_p = p;
- if (data_available) {
- if (flags & KEVENT_FLAG_KERNEL) {
- *(user_size_t *)(uintptr_t)data_available = resid;
- } else if (IS_64BIT_PROCESS(p)) {
- user64_size_t usize = (user64_size_t)resid;
- error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
- } else {
- user32_size_t usize = (user32_size_t)resid;
- error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
+ for (int i = 0; i < KQWQ_NBUCKETS; i++) {
+ TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]);
+ TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]);
+ }
+ for (int i = 0; i < KQWQ_NBUCKETS; i++) {
+ /*
+ * Because of how the bucketized system works, we mix overcommit
+ * sources with not overcommit: each time we move a knote from
+ * one bucket to the next due to overrides, we'd had to track
+ * overcommitness, and it's really not worth it in the workloop
+ * enabled world that track this faithfully.
+ *
+ * Incidentally, this behaves like the original manager-based
+ * kqwq where event delivery always happened (hence is
+ * "overcommit")
+ */
+ kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE;
+ kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT;
+ if (i != KQWQ_QOS_MANAGER) {
+ kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
}
+ kqwq->kqwq_request[i].tr_kq_qos_index = i;
}
- return error;
-}
-/*
- * kevent_continue - continue a kevent syscall after blocking
- *
- * assume we inherit a use count on the kq fileglob.
- */
+ kqueue_init(kqwq, &kqwq->kqwq_waitq_hook, SYNC_POLICY_FIFO);
-__attribute__((noreturn))
-static void
-kevent_continue(__unused struct kqueue *kq, void *data, int error)
-{
- struct _kevent *cont_args;
- struct fileproc *fp;
- uint64_t data_available;
- user_size_t data_size;
- user_size_t data_resid;
- unsigned int flags;
- int32_t *retval;
- int noutputs;
- int fd;
- struct proc *p = current_proc();
-
- cont_args = (struct _kevent *)data;
- data_available = cont_args->data_available;
- flags = cont_args->process_data.fp_flags;
- data_size = cont_args->process_data.fp_data_size;
- data_resid = cont_args->process_data.fp_data_resid;
- noutputs = cont_args->eventout;
- retval = cont_args->retval;
- fd = cont_args->fd;
- fp = cont_args->fp;
-
- kevent_put_kq(p, fd, fp, kq);
-
- /* don't abandon other output just because of residual copyout failures */
- if (error == 0 && data_available && data_resid != data_size) {
- (void)kevent_put_data_size(p, data_available, flags, data_resid);
+ if (!os_atomic_cmpxchgv(&p->p_fd->fd_wqkqueue, NULL, kqwq, &tmp, release)) {
+ kqworkq_dealloc(kqwq);
+ return tmp;
}
- /* don't restart after signals... */
- if (error == ERESTART)
- error = EINTR;
- else if (error == EWOULDBLOCK)
- error = 0;
- if (error == 0)
- *retval = noutputs;
- unix_syscall_return(error);
+ return kqwq;
}
-/*
- * kevent - [syscall] register and wait for kernel events
- *
- */
-int
-kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
-{
- unsigned int flags = KEVENT_FLAG_LEGACY32;
+#pragma mark kqworkloop allocation and deallocation
- return kevent_internal(p,
- (kqueue_id_t)uap->fd, NULL,
- uap->changelist, uap->nchanges,
- uap->eventlist, uap->nevents,
- 0ULL, 0ULL,
- flags,
- uap->timeout,
- kevent_continue,
- retval);
-}
+#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
+#define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
-int
-kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
+OS_ALWAYS_INLINE
+static inline void
+kqhash_lock(struct filedesc *fdp)
{
- unsigned int flags;
-
- /* restrict to user flags and set legacy64 */
- flags = uap->flags & KEVENT_FLAG_USER;
- flags |= KEVENT_FLAG_LEGACY64;
-
- return kevent_internal(p,
- (kqueue_id_t)uap->fd, NULL,
- uap->changelist, uap->nchanges,
- uap->eventlist, uap->nevents,
- 0ULL, 0ULL,
- flags,
- uap->timeout,
- kevent_continue,
- retval);
+ lck_mtx_lock_spin_always(&fdp->fd_kqhashlock);
}
-int
-kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
+OS_ALWAYS_INLINE
+static inline void
+kqhash_unlock(struct filedesc *fdp)
{
- /* restrict to user flags */
- uap->flags &= KEVENT_FLAG_USER;
-
- return kevent_internal(p,
- (kqueue_id_t)uap->fd, NULL,
- uap->changelist, uap->nchanges,
- uap->eventlist, uap->nevents,
- uap->data_out, (uint64_t)uap->data_available,
- uap->flags,
- 0ULL,
- kevent_continue,
- retval);
-}
-
-int
-kevent_qos_internal(struct proc *p, int fd,
- user_addr_t changelist, int nchanges,
- user_addr_t eventlist, int nevents,
- user_addr_t data_out, user_size_t *data_available,
- unsigned int flags,
- int32_t *retval)
-{
- return kevent_internal(p,
- (kqueue_id_t)fd, NULL,
- changelist, nchanges,
- eventlist, nevents,
- data_out, (uint64_t)data_available,
- (flags | KEVENT_FLAG_KERNEL),
- 0ULL,
- NULL,
- retval);
+ lck_mtx_unlock(&fdp->fd_kqhashlock);
}
-int
-kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
+OS_ALWAYS_INLINE
+static inline void
+kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id,
+ struct kqworkloop *kqwl)
{
- /* restrict to user flags */
- uap->flags &= KEVENT_FLAG_USER;
-
- return kevent_internal(p,
- (kqueue_id_t)uap->id, NULL,
- uap->changelist, uap->nchanges,
- uap->eventlist, uap->nevents,
- uap->data_out, (uint64_t)uap->data_available,
- (uap->flags | KEVENT_FLAG_DYNAMIC_KQUEUE),
- 0ULL,
- kevent_continue,
- retval);
+ struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
+ LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
}
-int
-kevent_id_internal(struct proc *p, kqueue_id_t *id,
- user_addr_t changelist, int nchanges,
- user_addr_t eventlist, int nevents,
- user_addr_t data_out, user_size_t *data_available,
- unsigned int flags,
- int32_t *retval)
-{
- return kevent_internal(p,
- *id, id,
- changelist, nchanges,
- eventlist, nevents,
- data_out, (uint64_t)data_available,
- (flags | KEVENT_FLAG_KERNEL | KEVENT_FLAG_DYNAMIC_KQUEUE),
- 0ULL,
- NULL,
- retval);
-}
-
-static int
-kevent_get_timeout(struct proc *p,
- user_addr_t utimeout,
- unsigned int flags,
- struct timeval *atvp)
+OS_ALWAYS_INLINE
+static inline struct kqworkloop *
+kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id)
{
- struct timeval atv;
- int error = 0;
+ struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
+ struct kqworkloop *kqwl;
- if (flags & KEVENT_FLAG_IMMEDIATE) {
- getmicrouptime(&atv);
- } else if (utimeout != USER_ADDR_NULL) {
- struct timeval rtv;
- if (flags & KEVENT_FLAG_KERNEL) {
- struct timespec *tsp = (struct timespec *)utimeout;
- TIMESPEC_TO_TIMEVAL(&rtv, tsp);
- } else if (IS_64BIT_PROCESS(p)) {
- struct user64_timespec ts;
- error = copyin(utimeout, &ts, sizeof(ts));
- if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
- error = EINVAL;
- else
- TIMESPEC_TO_TIMEVAL(&rtv, &ts);
- } else {
- struct user32_timespec ts;
- error = copyin(utimeout, &ts, sizeof(ts));
- TIMESPEC_TO_TIMEVAL(&rtv, &ts);
- }
- if (error)
- return (error);
- if (itimerfix(&rtv))
- return (EINVAL);
- getmicrouptime(&atv);
- timevaladd(&atv, &rtv);
- } else {
- /* wait forever value */
- atv.tv_sec = 0;
- atv.tv_usec = 0;
+ LIST_FOREACH(kqwl, list, kqwl_hashlink) {
+ if (kqwl->kqwl_dynamicid == id) {
+ return kqwl;
+ }
}
- *atvp = atv;
- return 0;
+ return NULL;
}
-static int
-kevent_set_kq_mode(struct kqueue *kq, unsigned int flags)
+static struct kqworkloop *
+kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id)
{
- /* each kq should only be used for events of one type */
- kqlock(kq);
- if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
- if (flags & KEVENT_FLAG_LEGACY32) {
- if ((kq->kq_state & KQ_KEV32) == 0) {
- kqunlock(kq);
- return EINVAL;
- }
- } else if (kq->kq_state & KQ_KEV32) {
- kqunlock(kq);
- return EINVAL;
+ struct kqworkloop *kqwl = NULL;
+
+ kqhash_lock(fdp);
+ if (__probable(fdp->fd_kqhash)) {
+ kqwl = kqworkloop_hash_lookup_locked(fdp, kq_id);
+ if (kqwl && !kqworkloop_try_retain(kqwl)) {
+ kqwl = NULL;
}
- } else if (flags & KEVENT_FLAG_LEGACY32) {
- kq->kq_state |= KQ_KEV32;
- } else if (flags & KEVENT_FLAG_LEGACY64) {
- kq->kq_state |= KQ_KEV64;
- } else {
- kq->kq_state |= KQ_KEV_QOS;
}
- kqunlock(kq);
- return 0;
+ kqhash_unlock(fdp);
+ return kqwl;
}
-#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
-#define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
-
-static inline void
-kqhash_lock(proc_t p)
+OS_NOINLINE
+static void
+kqworkloop_hash_init(struct filedesc *fdp)
{
- lck_mtx_lock_spin_always(&p->p_fd->fd_kqhashlock);
-}
+ struct kqwllist *alloc_hash;
+ u_long alloc_mask;
-static inline void
-kqhash_lock_held(__assert_only proc_t p)
-{
- LCK_MTX_ASSERT(&p->p_fd->fd_kqhashlock, LCK_MTX_ASSERT_OWNED);
-}
+ kqhash_unlock(fdp);
+ alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
+ kqhash_lock(fdp);
-static inline void
-kqhash_unlock(proc_t p)
-{
- lck_mtx_unlock(&p->p_fd->fd_kqhashlock);
+ /* See if we won the race */
+ if (__probable(fdp->fd_kqhashmask == 0)) {
+ fdp->fd_kqhash = alloc_hash;
+ fdp->fd_kqhashmask = alloc_mask;
+ } else {
+ kqhash_unlock(fdp);
+ FREE(alloc_hash, M_KQUEUE);
+ kqhash_lock(fdp);
+ }
}
+/*!
+ * @function kqworkloop_dealloc
+ *
+ * @brief
+ * Deallocates a workloop kqueue.
+ *
+ * @discussion
+ * Knotes hold references on the workloop, so we can't really reach this
+ * function unless all of these are already gone.
+ *
+ * Nothing locked on entry or exit.
+ *
+ * @param flags
+ * Unless KQWL_DEALLOC_SKIP_HASH_REMOVE is set, the workloop is removed
+ * from its hash table.
+ *
+ * @param current_ref
+ * This function is also called to undo a kqworkloop_alloc in case of
+ * allocation races, expected_ref is the current refcount that is expected
+ * on the workloop object, usually 0, and 1 when a dealloc race is resolved.
+ */
static void
-kqueue_hash_init_if_needed(proc_t p)
+kqworkloop_dealloc(struct kqworkloop *kqwl, kqworkloop_dealloc_flags_t flags,
+ uint32_t current_ref)
{
- struct filedesc *fdp = p->p_fd;
-
- kqhash_lock_held(p);
+ thread_t cur_owner;
- if (__improbable(fdp->fd_kqhash == NULL)) {
- struct kqlist *alloc_hash;
- u_long alloc_mask;
+ if (__improbable(current_ref > 1)) {
+ kqworkloop_release_panic(kqwl);
+ }
+ assert(kqwl->kqwl_retains == current_ref);
- kqhash_unlock(p);
- alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
- kqhash_lock(p);
+ /* pair with kqunlock() and other kq locks */
+ os_atomic_thread_fence(acquire);
- /* See if we won the race */
- if (fdp->fd_kqhashmask == 0) {
- fdp->fd_kqhash = alloc_hash;
- fdp->fd_kqhashmask = alloc_mask;
- } else {
- kqhash_unlock(p);
- FREE(alloc_hash, M_KQUEUE);
- kqhash_lock(p);
+ cur_owner = kqwl->kqwl_owner;
+ if (cur_owner) {
+ if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
+ thread_drop_kevent_override(cur_owner);
}
+ thread_deallocate(cur_owner);
+ kqwl->kqwl_owner = THREAD_NULL;
}
-}
-/*
- * Called with the kqhash_lock() held
- */
-static void
-kqueue_hash_insert(
- struct proc *p,
- kqueue_id_t id,
- struct kqueue *kq)
-{
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- struct filedesc *fdp = p->p_fd;
- struct kqlist *list;
+ if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
+ struct turnstile *ts;
+ turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
+ &ts, TURNSTILE_WORKLOOPS);
+ turnstile_cleanup();
+ turnstile_deallocate(ts);
+ }
- /* should hold the kq hash lock */
- kqhash_lock_held(p);
+ if ((flags & KQWL_DEALLOC_SKIP_HASH_REMOVE) == 0) {
+ struct filedesc *fdp = kqwl->kqwl_p->p_fd;
- if ((kq->kq_state & KQ_DYNAMIC) == 0) {
- assert(kq->kq_state & KQ_DYNAMIC);
- return;
+ kqhash_lock(fdp);
+ LIST_REMOVE(kqwl, kqwl_hashlink);
+ kqhash_unlock(fdp);
}
- /* only dynamically allocate workloop kqs for now */
- assert(kq->kq_state & KQ_WORKLOOP);
- assert(fdp->fd_kqhash);
-
- kqwl->kqwl_dynamicid = id;
+ assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed));
+ assert(kqwl->kqwl_owner == THREAD_NULL);
+ assert(kqwl->kqwl_turnstile == TURNSTILE_NULL);
- list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
- SLIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
+ lck_spin_destroy(&kqwl->kqwl_statelock, kq_lck_grp);
+ kqueue_destroy(kqwl, kqworkloop_zone);
}
-/* Called with kqhash_lock held */
+/*!
+ * @function kqworkloop_alloc
+ *
+ * @brief
+ * Allocates a workloop kqueue.
+ */
static void
-kqueue_hash_remove(
- struct proc *p,
- struct kqueue *kq)
+kqworkloop_init(struct kqworkloop *kqwl, proc_t p,
+ kqueue_id_t id, workq_threadreq_param_t *trp)
{
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- struct filedesc *fdp = p->p_fd;
- struct kqlist *list;
-
- /* should hold the kq hash lock */
- kqhash_lock_held(p);
+ bzero(kqwl, sizeof(struct kqworkloop));
- if ((kq->kq_state & KQ_DYNAMIC) == 0) {
- assert(kq->kq_state & KQ_DYNAMIC);
- return;
+ kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS;
+ kqwl->kqwl_retains = 1; /* donate a retain to creator */
+ kqwl->kqwl_dynamicid = id;
+ kqwl->kqwl_p = p;
+ if (trp) {
+ kqwl->kqwl_params = trp->trp_value;
}
- assert(kq->kq_state & KQ_WORKLOOP); /* for now */
- list = &fdp->fd_kqhash[KQ_HASH(kqwl->kqwl_dynamicid, fdp->fd_kqhashmask)];
- SLIST_REMOVE(list, kqwl, kqworkloop, kqwl_hashlink);
-}
-
-/* Called with kqhash_lock held */
-static struct kqueue *
-kqueue_hash_lookup(struct proc *p, kqueue_id_t id)
-{
- struct filedesc *fdp = p->p_fd;
- struct kqlist *list;
- struct kqworkloop *kqwl;
-
- /* should hold the kq hash lock */
- kqhash_lock_held(p);
-
- if (fdp->fd_kqhashmask == 0) return NULL;
-
- list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
- SLIST_FOREACH(kqwl, list, kqwl_hashlink) {
- if (kqwl->kqwl_dynamicid == id) {
- struct kqueue *kq = (struct kqueue *)kqwl;
- assert(kq->kq_state & KQ_DYNAMIC);
- assert(kq->kq_state & KQ_WORKLOOP); /* for now */
- return kq;
+ workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP;
+ if (trp) {
+ if (trp->trp_flags & TRP_PRIORITY) {
+ tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS;
}
- }
- return NULL;
-}
-
-static inline void
-kqueue_release_last(struct proc *p, struct kqueue *kq)
-{
- if (kq->kq_state & KQ_DYNAMIC) {
- kqhash_lock(p);
- if (kqueue_release(kq, KQUEUE_MIGHT_BE_LAST_REF)) {
- kqueue_hash_remove(p, kq);
- kqhash_unlock(p);
- kqueue_dealloc(kq);
- } else {
- kqhash_unlock(p);
+ if (trp->trp_flags) {
+ tr_flags |= WORKQ_TR_FLAG_WL_PARAMS;
}
}
-}
-
-static struct kqueue *
-kevent_get_bound_kq(__assert_only struct proc *p, thread_t thread,
- unsigned int kev_flags, unsigned int kq_flags)
-{
- struct kqueue *kq;
- struct uthread *ut = get_bsdthread_info(thread);
+ kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE;
+ kqwl->kqwl_request.tr_flags = tr_flags;
- assert(p == get_bsdthreadtask_info(thread));
-
- if (!(ut->uu_kqueue_flags & kev_flags))
- return NULL;
-
- kq = ut->uu_kqueue_bound;
- if (!kq)
- return NULL;
+ for (int i = 0; i < KQWL_NBUCKETS; i++) {
+ TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]);
+ }
+ TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed);
- if (!(kq->kq_state & kq_flags))
- return NULL;
+ lck_spin_init(&kqwl->kqwl_statelock, kq_lck_grp, kq_lck_attr);
- return kq;
+ kqueue_init(kqwl, &kqwl->kqwl_waitq_hook, SYNC_POLICY_FIFO);
}
+/*!
+ * @function kqworkloop_get_or_create
+ *
+ * @brief
+ * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
+ *
+ * @returns
+ * 0: success
+ * EINVAL: invalid parameters
+ * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
+ * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
+ * ENOMEM: allocation failed
+ */
static int
-kevent_get_kq(struct proc *p, kqueue_id_t id, unsigned int flags, struct fileproc **fpp, int *fdp, struct kqueue **kqp)
+kqworkloop_get_or_create(struct proc *p, kqueue_id_t id,
+ workq_threadreq_param_t *trp, unsigned int flags, struct kqworkloop **kqwlp)
{
- struct filedesc *descp = p->p_fd;
- struct fileproc *fp = NULL;
- struct kqueue *kq;
- int fd = 0;
+ struct filedesc *fdp = p->p_fd;
+ struct kqworkloop *alloc_kqwl = NULL;
+ struct kqworkloop *kqwl = NULL;
int error = 0;
- /* Was the workloop flag passed? Then it is for sure only a workloop */
- if (flags & KEVENT_FLAG_DYNAMIC_KQUEUE) {
- assert(flags & KEVENT_FLAG_WORKLOOP);
- if (id == (kqueue_id_t)-1 &&
- (flags & KEVENT_FLAG_KERNEL) &&
- (flags & KEVENT_FLAG_WORKLOOP)) {
-
- assert(is_workqueue_thread(current_thread()));
-
- /*
- * when kevent_id_internal is called from within the
- * kernel, and the passed 'id' value is '-1' then we
- * look for the currently bound workloop kq.
- *
- * Until pthread kext avoids calling in to kevent_id_internal
- * for threads whose fulfill is canceled, calling in unbound
- * can't be fatal.
- */
- kq = kevent_get_bound_kq(p, current_thread(),
- KEVENT_FLAG_WORKLOOP, KQ_WORKLOOP);
- if (kq) {
- kqueue_retain(kq);
- } else {
- struct uthread *ut = get_bsdthread_info(current_thread());
+ assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
- /* If thread is unbound due to cancel, just return an error */
- if (ut->uu_kqueue_flags == KEVENT_FLAG_WORKLOOP_CANCELED) {
- ut->uu_kqueue_flags = 0;
- error = ECANCELED;
- } else {
- panic("Unbound thread called kevent_internal with id=-1"
- " uu_kqueue_flags:0x%x, uu_kqueue_bound:%p",
- ut->uu_kqueue_flags, ut->uu_kqueue_bound);
- }
- }
+ if (id == 0 || id == (kqueue_id_t)-1) {
+ return EINVAL;
+ }
- *fpp = NULL;
- *fdp = 0;
- *kqp = kq;
- return error;
+ for (;;) {
+ kqhash_lock(fdp);
+ if (__improbable(fdp->fd_kqhash == NULL)) {
+ kqworkloop_hash_init(fdp);
}
- /* try shortcut on kq lookup for bound threads */
- kq = kevent_get_bound_kq(p, current_thread(), KEVENT_FLAG_WORKLOOP, KQ_WORKLOOP);
- if (kq != NULL && ((struct kqworkloop *)kq)->kqwl_dynamicid == id) {
-
- if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
+ kqwl = kqworkloop_hash_lookup_locked(fdp, id);
+ if (kqwl) {
+ if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
+ /*
+ * If MUST_NOT_EXIST was passed, even if we would have failed
+ * the try_retain, it could have gone the other way, and
+ * userspace can't tell. Let'em fix their race.
+ */
error = EEXIST;
- kq = NULL;
- goto out;
- }
-
- /* retain a reference while working with this kq. */
- assert(kq->kq_state & KQ_DYNAMIC);
- kqueue_retain(kq);
- error = 0;
- goto out;
- }
-
- /* look for the kq on the hash table */
- kqhash_lock(p);
- kq = kqueue_hash_lookup(p, id);
- if (kq == NULL) {
- kqhash_unlock(p);
-
- if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST) {
- error = ENOENT;
- goto out;
- }
-
- struct kqueue *alloc_kq;
- alloc_kq = kqueue_alloc(p, flags);
- if (alloc_kq) {
- kqhash_lock(p);
- kqueue_hash_init_if_needed(p);
- kq = kqueue_hash_lookup(p, id);
- if (kq == NULL) {
- /* insert our new one */
- kq = alloc_kq;
- kqueue_hash_insert(p, id, kq);
- kqhash_unlock(p);
- } else {
- /* lost race, retain existing workloop */
- kqueue_retain(kq);
- kqhash_unlock(p);
- kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF);
- kqueue_dealloc(alloc_kq);
- }
- } else {
- error = ENOMEM;
- goto out;
+ break;
}
- } else {
- if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
- kqhash_unlock(p);
- kq = NULL;
- error = EEXIST;
- goto out;
+ if (__probable(kqworkloop_try_retain(kqwl))) {
+ /*
+ * This is a valid live workloop !
+ */
+ *kqwlp = kqwl;
+ error = 0;
+ break;
}
+ }
- /* retain a reference while working with this kq. */
- assert(kq->kq_state & KQ_DYNAMIC);
- kqueue_retain(kq);
- kqhash_unlock(p);
+ if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) {
+ error = ENOENT;
+ break;
}
-
- } else if (flags & KEVENT_FLAG_WORKQ) {
- /* must already exist for bound threads. */
- if (flags & KEVENT_FLAG_KERNEL) {
- assert(descp->fd_wqkqueue != NULL);
+
+ /*
+ * We didn't find what we were looking for.
+ *
+ * If this is the second time we reach this point (alloc_kqwl != NULL),
+ * then we're done.
+ *
+ * If this is the first time we reach this point (alloc_kqwl == NULL),
+ * then try to allocate one without blocking.
+ */
+ if (__probable(alloc_kqwl == NULL)) {
+ alloc_kqwl = (struct kqworkloop *)zalloc_noblock(kqworkloop_zone);
+ }
+ if (__probable(alloc_kqwl)) {
+ kqworkloop_init(alloc_kqwl, p, id, trp);
+ kqworkloop_hash_insert_locked(fdp, id, alloc_kqwl);
+ kqhash_unlock(fdp);
+ *kqwlp = alloc_kqwl;
+ return 0;
}
/*
- * use the private kq associated with the proc workq.
- * Just being a thread within the process (and not
- * being the exit/exec thread) is enough to hold a
- * reference on this special kq.
+ * We have to block to allocate a workloop, drop the lock,
+ * allocate one, but then we need to retry lookups as someone
+ * else could race with us.
*/
- kq = descp->fd_wqkqueue;
- if (kq == NULL) {
- struct kqueue *alloc_kq = kqueue_alloc(p, KEVENT_FLAG_WORKQ);
- if (alloc_kq == NULL)
- return ENOMEM;
-
- knhash_lock(p);
- if (descp->fd_wqkqueue == NULL) {
- kq = descp->fd_wqkqueue = alloc_kq;
- knhash_unlock(p);
- } else {
- knhash_unlock(p);
- kq = descp->fd_wqkqueue;
- kqueue_dealloc(alloc_kq);
- }
+ kqhash_unlock(fdp);
+
+ alloc_kqwl = (struct kqworkloop *)zalloc(kqworkloop_zone);
+ if (__improbable(!alloc_kqwl)) {
+ return ENOMEM;
}
- } else {
- /* get a usecount for the kq itself */
- fd = (int)id;
- if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
- return (error);
- }
- if ((error = kevent_set_kq_mode(kq, flags)) != 0) {
- /* drop the usecount */
- if (fp != NULL)
- fp_drop(p, fd, fp, 0);
- return error;
- }
+ }
+
+ kqhash_unlock(fdp);
+
+ if (__improbable(alloc_kqwl)) {
+ zfree(kqworkloop_zone, alloc_kqwl);
+ }
-out:
- *fpp = fp;
- *fdp = fd;
- *kqp = kq;
-
return error;
}
-static void
-kevent_put_kq(
- struct proc *p,
- kqueue_id_t id,
- struct fileproc *fp,
- struct kqueue *kq)
+#pragma mark - knotes
+
+static int
+filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
{
- kqueue_release_last(p, kq);
- if (fp != NULL) {
- assert((kq->kq_state & KQ_WORKQ) == 0);
- fp_drop(p, (int)id, fp, 0);
- }
+ knote_set_error(kn, ENOTSUP);
+ return 0;
}
-static uint64_t
-kevent_workloop_serial_no_copyin(proc_t p, uint64_t workloop_id)
+static void
+filt_no_detach(__unused struct knote *kn)
{
- uint64_t serial_no = 0;
- user_addr_t addr;
- int rc;
+}
- if (workloop_id == 0 || p->p_dispatchqueue_serialno_offset == 0) {
- return 0;
- }
- addr = (user_addr_t)(workloop_id + p->p_dispatchqueue_serialno_offset);
+static int __dead2
+filt_bad_event(struct knote *kn, long hint)
+{
+ panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
+}
- if (proc_is64bit(p)) {
- rc = copyin(addr, (caddr_t)&serial_no, sizeof(serial_no));
- } else {
- uint32_t serial_no32 = 0;
- rc = copyin(addr, (caddr_t)&serial_no32, sizeof(serial_no32));
- serial_no = serial_no32;
- }
- return rc == 0 ? serial_no : 0;
+static int __dead2
+filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev)
+{
+ panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
}
-int
-kevent_exit_on_workloop_ownership_leak(thread_t thread)
+static int __dead2
+filt_bad_process(struct knote *kn, struct kevent_qos_s *kev)
{
- proc_t p = current_proc();
- struct filedesc *fdp = p->p_fd;
- kqueue_id_t workloop_id = 0;
- os_reason_t reason;
- mach_vm_address_t addr;
- uint32_t reason_size;
+ panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
+}
- kqhash_lock(p);
- if (fdp->fd_kqhashmask > 0) {
- for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
- struct kqworkloop *kqwl;
+/*
+ * knotes_dealloc - detach all knotes for the process and drop them
+ *
+ * Called with proc_fdlock held.
+ * Returns with it locked.
+ * May drop it temporarily.
+ * Process is in such a state that it will not try to allocate
+ * any more knotes during this process (stopped for exit or exec).
+ */
+void
+knotes_dealloc(proc_t p)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct kqueue *kq;
+ struct knote *kn;
+ struct klist *kn_hash = NULL;
+ int i;
- SLIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
- struct kqueue *kq = &kqwl->kqwl_kqueue;
- if ((kq->kq_state & KQ_DYNAMIC) && kqwl->kqwl_owner == thread) {
- workloop_id = kqwl->kqwl_dynamicid;
- break;
- }
+ /* Close all the fd-indexed knotes up front */
+ if (fdp->fd_knlistsize > 0) {
+ for (i = 0; i < fdp->fd_knlistsize; i++) {
+ while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
+ kq = knote_get_kq(kn);
+ kqlock(kq);
+ proc_fdunlock(p);
+ knote_drop(kq, kn, NULL);
+ proc_fdlock(p);
}
}
+ /* free the table */
+ FREE(fdp->fd_knlist, M_KQUEUE);
+ fdp->fd_knlist = NULL;
}
- kqhash_unlock(p);
- assert(workloop_id);
+ fdp->fd_knlistsize = 0;
- reason = os_reason_create(OS_REASON_LIBSYSTEM,
- OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK);
- if (reason == OS_REASON_NULL) {
- goto out;
- }
+ knhash_lock(fdp);
+ proc_fdunlock(p);
- reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
- reason_size = 2 * sizeof(uint64_t);
- reason_size = kcdata_estimate_required_buffer_size(2, reason_size);
- if (os_reason_alloc_buffer(reason, reason_size) != 0) {
- goto out;
+ /* Clean out all the hashed knotes as well */
+ if (fdp->fd_knhashmask != 0) {
+ for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
+ while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
+ kq = knote_get_kq(kn);
+ kqlock(kq);
+ knhash_unlock(fdp);
+ knote_drop(kq, kn, NULL);
+ knhash_lock(fdp);
+ }
+ }
+ kn_hash = fdp->fd_knhash;
+ fdp->fd_knhashmask = 0;
+ fdp->fd_knhash = NULL;
}
- struct kcdata_descriptor *kcd = &reason->osr_kcd_descriptor;
-
- if (kcdata_get_memory_addr(kcd, EXIT_REASON_WORKLOOP_ID,
- sizeof(workloop_id), &addr) == KERN_SUCCESS) {
- kcdata_memcpy(kcd, addr, &workloop_id, sizeof(workloop_id));
- }
+ knhash_unlock(fdp);
- uint64_t serial_no = kevent_workloop_serial_no_copyin(p, workloop_id);
- if (serial_no && kcdata_get_memory_addr(kcd, EXIT_REASON_DISPATCH_QUEUE_NO,
- sizeof(serial_no), &addr) == KERN_SUCCESS) {
- kcdata_memcpy(kcd, addr, &serial_no, sizeof(serial_no));
+ /* free the kn_hash table */
+ if (kn_hash) {
+ FREE(kn_hash, M_KQUEUE);
}
-out:
-#if DEVELOPMENT || DEBUG
- psignal_try_thread_with_reason(p, thread, SIGABRT, reason);
- return 0;
-#else
- return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL,
- FALSE, FALSE, 0, reason);
-#endif
-}
-
-
-static int
-kevent_servicer_detach_preflight(thread_t thread, unsigned int flags, struct kqueue *kq)
-{
- int error = 0;
- struct kqworkloop *kqwl;
- struct uthread *ut;
- struct kqrequest *kqr;
-
- if (!(flags & KEVENT_FLAG_WORKLOOP) || !(kq->kq_state & KQ_WORKLOOP))
- return EINVAL;
-
- /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads */
- if (!(kq->kq_state & KQ_NO_WQ_THREAD))
- return EINVAL;
-
- /* allow detach only on not wq threads */
- if (is_workqueue_thread(thread))
- return EINVAL;
-
- /* check that the current thread is bound to the requested wq */
- ut = get_bsdthread_info(thread);
- if (ut->uu_kqueue_bound != kq)
- return EINVAL;
-
- kqwl = (struct kqworkloop *)kq;
- kqwl_req_lock(kqwl);
- kqr = &kqwl->kqwl_request;
-
- /* check that the wq is bound to the thread */
- if ((kqr->kqr_state & KQR_BOUND) == 0 || (kqr->kqr_thread != thread))
- error = EINVAL;
-
- kqwl_req_unlock(kqwl);
-
- return error;
-}
-
-static void
-kevent_servicer_detach_thread(struct proc *p, kqueue_id_t id, thread_t thread,
- unsigned int flags, struct kqueue *kq)
-{
- struct kqworkloop *kqwl;
- struct uthread *ut;
-
- assert((flags & KEVENT_FLAG_WORKLOOP) && (kq->kq_state & KQ_WORKLOOP));
-
- /* allow detach only on not wqthreads threads */
- assert(!is_workqueue_thread(thread));
-
- /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads */
- assert(kq->kq_state & KQ_NO_WQ_THREAD);
-
- /* check that the current thread is bound to the requested kq */
- ut = get_bsdthread_info(thread);
- assert(ut->uu_kqueue_bound == kq);
-
- kqwl = (struct kqworkloop *)kq;
-
- kqlock(kq);
-
- /* unbind the thread.
- * unbind itself checks if still processing and ends it.
- */
- kqworkloop_unbind_thread(kqwl, thread, flags);
-
- kqunlock(kq);
-
- kevent_put_kq(p, id, NULL, kq);
-
- return;
+ proc_fdlock(p);
}
-static int
-kevent_servicer_attach_thread(thread_t thread, unsigned int flags, struct kqueue *kq)
+/*
+ * kqworkloops_dealloc - rebalance retains on kqworkloops created with
+ * scheduling parameters
+ *
+ * Called with proc_fdlock held.
+ * Returns with it locked.
+ * Process is in such a state that it will not try to allocate
+ * any more knotes during this process (stopped for exit or exec).
+ */
+void
+kqworkloops_dealloc(proc_t p)
{
- int error = 0;
- struct kqworkloop *kqwl;
- struct uthread *ut;
- struct kqrequest *kqr;
-
- if (!(flags & KEVENT_FLAG_WORKLOOP) || !(kq->kq_state & KQ_WORKLOOP))
- return EINVAL;
-
- /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads*/
- if (!(kq->kq_state & KQ_NO_WQ_THREAD))
- return EINVAL;
-
- /* allow attach only on not wqthreads */
- if (is_workqueue_thread(thread))
- return EINVAL;
-
- /* check that the thread is not already bound */
- ut = get_bsdthread_info(thread);
- if (ut->uu_kqueue_bound != NULL)
- return EINVAL;
-
- assert(ut->uu_kqueue_flags == 0);
-
- kqlock(kq);
- kqwl = (struct kqworkloop *)kq;
- kqwl_req_lock(kqwl);
- kqr = &kqwl->kqwl_request;
+ struct filedesc *fdp = p->p_fd;
+ struct kqworkloop *kqwl, *kqwln;
+ struct kqwllist tofree;
- /* check that the kqueue is not already bound */
- if (kqr->kqr_state & (KQR_BOUND | KQR_THREQUESTED | KQR_DRAIN)) {
- error = EINVAL;
- goto out;
+ if (!(fdp->fd_flags & FD_WORKLOOP)) {
+ return;
}
- assert(kqr->kqr_thread == NULL);
- assert((kqr->kqr_state & KQR_PROCESSING) == 0);
-
- kqr->kqr_state |= KQR_THREQUESTED;
- kqr->kqr_qos_index = THREAD_QOS_UNSPECIFIED;
- kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
- kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED;
- kqr->kqr_owner_override_is_sync = 0;
-
- kqworkloop_bind_thread_impl(kqwl, thread, KEVENT_FLAG_WORKLOOP);
-
- /* get a ref on the wlkq on behalf of the attached thread */
- kqueue_retain(kq);
-
-out:
- kqwl_req_unlock(kqwl);
- kqunlock(kq);
-
- return error;
-}
-
-static inline
-boolean_t kevent_args_requesting_events(unsigned int flags, int nevents)
-{
- return (!(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0);
-}
-
-static int
-kevent_internal(struct proc *p,
- kqueue_id_t id, kqueue_id_t *id_out,
- user_addr_t changelist, int nchanges,
- user_addr_t ueventlist, int nevents,
- user_addr_t data_out, uint64_t data_available,
- unsigned int flags,
- user_addr_t utimeout,
- kqueue_continue_t continuation,
- int32_t *retval)
-{
- struct _kevent *cont_args;
- uthread_t ut;
- struct kqueue *kq;
- struct fileproc *fp = NULL;
- int fd = 0;
- struct kevent_internal_s kev;
- int error, noutputs;
- struct timeval atv;
- user_size_t data_size;
- user_size_t data_resid;
- thread_t thread = current_thread();
-
- /* Don't allow user-space threads to process output events from the workq kqs */
- if (((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ) &&
- kevent_args_requesting_events(flags, nevents))
- return EINVAL;
-
- /* restrict dynamic kqueue allocation to workloops (for now) */
- if ((flags & (KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP)) == KEVENT_FLAG_DYNAMIC_KQUEUE)
- return EINVAL;
-
- if ((flags & (KEVENT_FLAG_WORKLOOP)) && (flags & (KEVENT_FLAG_WORKQ)))
- return EINVAL;
-
- if (flags & (KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH | KEVENT_FLAG_WORKLOOP_SERVICER_DETACH |
- KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST | KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD)) {
-
- /* allowed only on workloops when calling kevent_id from user-space */
- if (!(flags & KEVENT_FLAG_WORKLOOP) || (flags & KEVENT_FLAG_KERNEL) || !(flags & KEVENT_FLAG_DYNAMIC_KQUEUE))
- return EINVAL;
-
- /* cannot attach and detach simultaneously*/
- if ((flags & KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH) && (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH))
- return EINVAL;
-
- /* cannot ask for events and detach */
- if ((flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) && kevent_args_requesting_events(flags, nevents))
- return EINVAL;
-
- }
+ kqhash_lock(fdp);
- /* prepare to deal with stack-wise allocation of out events */
- if (flags & KEVENT_FLAG_STACK_EVENTS) {
- int scale = ((flags & KEVENT_FLAG_LEGACY32) ?
- (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
- sizeof(struct user32_kevent)) :
- ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
- sizeof(struct kevent_qos_s)));
- ueventlist += nevents * scale;
+ if (fdp->fd_kqhashmask == 0) {
+ kqhash_unlock(fdp);
+ return;
}
- /* convert timeout to absolute - if we have one (and not immediate) */
- error = kevent_get_timeout(p, utimeout, flags, &atv);
- if (error)
- return error;
-
- /* copyin initial value of data residual from data_available */
- error = kevent_get_data_size(p, data_available, flags, &data_size);
- if (error)
- return error;
-
- /* get the kq we are going to be working on */
- error = kevent_get_kq(p, id, flags, &fp, &fd, &kq);
- if (error)
- return error;
+ LIST_INIT(&tofree);
- /* only bound threads can receive events on workloops */
- if ((flags & KEVENT_FLAG_WORKLOOP) && kevent_args_requesting_events(flags, nevents)) {
- ut = (uthread_t)get_bsdthread_info(thread);
- if (ut->uu_kqueue_bound != kq) {
- error = EXDEV;
- goto out;
+ for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) {
+ LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) {
+ /*
+ * kqworkloops that have scheduling parameters have an
+ * implicit retain from kqueue_workloop_ctl that needs
+ * to be balanced on process exit.
+ */
+ assert(kqwl->kqwl_params);
+ LIST_REMOVE(kqwl, kqwl_hashlink);
+ LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
}
-
}
- /* attach the current thread if necessary */
- if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH) {
- error = kevent_servicer_attach_thread(thread, flags, kq);
- if (error)
- goto out;
- }
- else {
- /* before processing events and committing to the system call, return an error if the thread cannot be detached when requested */
- if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) {
- error = kevent_servicer_detach_preflight(thread, flags, kq);
- if (error)
- goto out;
- }
- }
+ kqhash_unlock(fdp);
- if (id_out && kq && (flags & KEVENT_FLAG_WORKLOOP)) {
- assert(kq->kq_state & KQ_WORKLOOP);
- struct kqworkloop *kqwl;
- kqwl = (struct kqworkloop *)kq;
- *id_out = kqwl->kqwl_dynamicid;
+ LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
+ kqworkloop_dealloc(kqwl, KQWL_DEALLOC_SKIP_HASH_REMOVE, 1);
}
+}
- /* register all the change requests the user provided... */
- noutputs = 0;
- while (nchanges > 0 && error == 0) {
- error = kevent_copyin(&changelist, &kev, p, flags);
- if (error)
- break;
-
- /* Make sure user doesn't pass in any system flags */
- kev.flags &= ~EV_SYSFLAGS;
-
- kevent_register(kq, &kev, p);
-
- if (nevents > 0 &&
- ((kev.flags & EV_ERROR) || (kev.flags & EV_RECEIPT))) {
- if (kev.flags & EV_RECEIPT) {
- kev.flags |= EV_ERROR;
- kev.data = 0;
- }
- error = kevent_copyout(&kev, &ueventlist, p, flags);
- if (error == 0) {
- nevents--;
- noutputs++;
- }
- } else if (kev.flags & EV_ERROR) {
- error = kev.data;
- }
- nchanges--;
+static int
+kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
+ struct kevent_qos_s *kev)
+{
+ /* We don't care about the priority of a disabled or deleted knote */
+ if (kev->flags & (EV_DISABLE | EV_DELETE)) {
+ return 0;
}
- /* short-circuit the scan if we only want error events */
- if (flags & KEVENT_FLAG_ERROR_EVENTS)
- nevents = 0;
-
- /* process pending events */
- if (nevents > 0 && noutputs == 0 && error == 0) {
- /* store the continuation/completion data in the uthread */
- ut = (uthread_t)get_bsdthread_info(thread);
- cont_args = &ut->uu_kevent.ss_kevent;
- cont_args->fp = fp;
- cont_args->fd = fd;
- cont_args->retval = retval;
- cont_args->eventlist = ueventlist;
- cont_args->eventcount = nevents;
- cont_args->eventout = noutputs;
- cont_args->data_available = data_available;
- cont_args->process_data.fp_fd = (int)id;
- cont_args->process_data.fp_flags = flags;
- cont_args->process_data.fp_data_out = data_out;
- cont_args->process_data.fp_data_size = data_size;
- cont_args->process_data.fp_data_resid = data_size;
-
- error = kqueue_scan(kq, kevent_callback,
- continuation, cont_args,
- &cont_args->process_data,
- &atv, p);
-
- /* process remaining outputs */
- noutputs = cont_args->eventout;
- data_resid = cont_args->process_data.fp_data_resid;
-
- /* copyout residual data size value (if it needs to be copied out) */
- /* don't abandon other output just because of residual copyout failures */
- if (error == 0 && data_available && data_resid != data_size) {
- (void)kevent_put_data_size(p, data_available, flags, data_resid);
+ if (kq->kq_state & KQ_WORKLOOP) {
+ /*
+ * Workloops need valid priorities with a QOS (excluding manager) for
+ * any enabled knote.
+ *
+ * When it is pre-existing, just make sure it has a valid QoS as
+ * kevent_register() will not use the incoming priority (filters who do
+ * have the responsibility to validate it again, see filt_wltouch).
+ *
+ * If the knote is being made, validate the incoming priority.
+ */
+ if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
+ return ERANGE;
}
}
- /* detach the current thread if necessary */
- if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) {
- assert(fp == NULL);
- kevent_servicer_detach_thread(p, id, thread, flags, kq);
- }
-
-out:
- kevent_put_kq(p, id, fp, kq);
-
- /* don't restart after signals... */
- if (error == ERESTART)
- error = EINTR;
- else if (error == EWOULDBLOCK)
- error = 0;
- if (error == 0)
- *retval = noutputs;
- return (error);
+ return 0;
}
-
/*
- * kevent_callback - callback for each individual event
+ * Prepare a filter for waiting after register.
*
- * called with nothing locked
- * caller holds a reference on the kqueue
+ * The f_post_register_wait hook will be called later by kevent_register()
+ * and should call kevent_register_wait_block()
*/
static int
-kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp,
- void *data)
+kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc)
{
- struct _kevent *cont_args;
- int error;
+ thread_t thread = current_thread();
- cont_args = (struct _kevent *)data;
- assert(cont_args->eventout < cont_args->eventcount);
+ assert(knote_fops(kn)->f_extended_codes);
- /*
- * Copy out the appropriate amount of event data for this user.
- */
- error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
- cont_args->process_data.fp_flags);
+ if (kn->kn_thread == NULL) {
+ thread_reference(thread);
+ kn->kn_thread = thread;
+ } else if (kn->kn_thread != thread) {
+ /*
+ * kn_thread may be set from a previous aborted wait
+ * However, it has to be from the same thread.
+ */
+ kev->flags |= EV_ERROR;
+ kev->data = EXDEV;
+ return 0;
+ }
- /*
- * If there isn't space for additional events, return
- * a harmless error to stop the processing here
- */
- if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
- error = EWOULDBLOCK;
- return (error);
+ return FILTER_REGISTER_WAIT | rc;
}
/*
- * kevent_description - format a description of a kevent for diagnostic output
- *
- * called with a 256-byte string buffer
+ * Cleanup a kevent_register_wait_prepare() effect for threads that have been
+ * aborted instead of properly woken up with thread_wakeup_thread().
+ */
+static void
+kevent_register_wait_cleanup(struct knote *kn)
+{
+ thread_t thread = kn->kn_thread;
+ kn->kn_thread = NULL;
+ thread_deallocate(thread);
+}
+
+/*
+ * Must be called at the end of a f_post_register_wait call from a filter.
*/
+static void
+kevent_register_wait_block(struct turnstile *ts, thread_t thread,
+ thread_continue_t cont, struct _kevent_register *cont_args)
+{
+ turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
+ kqunlock(cont_args->kqwl);
+ cont_args->handoff_thread = thread;
+ thread_handoff_parameter(thread, cont, cont_args);
+}
-char *
-kevent_description(struct kevent_internal_s *kevp, char *s, size_t n)
+/*
+ * Called by Filters using a f_post_register_wait to return from their wait.
+ */
+static void
+kevent_register_wait_return(struct _kevent_register *cont_args)
{
- snprintf(s, n,
- "kevent="
- "{.ident=%#llx, .filter=%d, .flags=%#x, .udata=%#llx, .fflags=%#x, .data=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
- kevp->ident,
- kevp->filter,
- kevp->flags,
- kevp->udata,
- kevp->fflags,
- kevp->data,
- kevp->ext[0],
- kevp->ext[1] );
+ struct kqworkloop *kqwl = cont_args->kqwl;
+ struct kevent_qos_s *kev = &cont_args->kev;
+ int error = 0;
+
+ if (cont_args->handoff_thread) {
+ thread_deallocate(cont_args->handoff_thread);
+ }
+
+ if (kev->flags & (EV_ERROR | EV_RECEIPT)) {
+ if ((kev->flags & EV_ERROR) == 0) {
+ kev->flags |= EV_ERROR;
+ kev->data = 0;
+ }
+ error = kevent_modern_copyout(kev, &cont_args->ueventlist);
+ if (error == 0) {
+ cont_args->eventout++;
+ }
+ }
- return (s);
+ kqworkloop_release(kqwl);
+ if (error == 0) {
+ *(int32_t *)¤t_uthread()->uu_rval = cont_args->eventout;
+ }
+ unix_syscall_return(error);
}
/*
* caller holds a reference on the kqueue
*/
-void
-kevent_register(struct kqueue *kq, struct kevent_internal_s *kev,
- __unused struct proc *ctxp)
+int
+kevent_register(struct kqueue *kq, struct kevent_qos_s *kev,
+ struct knote **kn_out)
{
struct proc *p = kq->kq_p;
const struct filterops *fops;
struct knote *kn = NULL;
- int result = 0;
- int error = 0;
+ int result = 0, error = 0;
unsigned short kev_flags = kev->flags;
- int knoteuse_flags = KNUSE_NONE;
+ KNOTE_LOCK_CTX(knlc);
- if (kev->filter < 0) {
- if (kev->filter + EVFILT_SYSCOUNT < 0) {
- error = EINVAL;
- goto out;
- }
- fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
+ if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) {
+ fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
} else {
error = EINVAL;
goto out;
}
/* restrict EV_VANISHED to adding udata-specific dispatch kevents */
- if ((kev->flags & EV_VANISHED) &&
- (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) {
+ if (__improbable((kev->flags & EV_VANISHED) &&
+ (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) {
error = EINVAL;
goto out;
}
/* Simplify the flags - delete and disable overrule */
- if (kev->flags & EV_DELETE)
+ if (kev->flags & EV_DELETE) {
kev->flags &= ~EV_ADD;
- if (kev->flags & EV_DISABLE)
+ }
+ if (kev->flags & EV_DISABLE) {
kev->flags &= ~EV_ENABLE;
+ }
if (kq->kq_state & KQ_WORKLOOP) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
- ((struct kqworkloop *)kq)->kqwl_dynamicid,
- kev->udata, kev->flags, kev->filter);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
+ ((struct kqworkloop *)kq)->kqwl_dynamicid,
+ kev->udata, kev->flags, kev->filter);
} else if (kq->kq_state & KQ_WORKQ) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
- 0, kev->udata, kev->flags, kev->filter);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
+ 0, kev->udata, kev->flags, kev->filter);
} else {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
- VM_KERNEL_UNSLIDE_OR_PERM(kq),
- kev->udata, kev->flags, kev->filter);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
+ VM_KERNEL_UNSLIDE_OR_PERM(kq),
+ kev->udata, kev->flags, kev->filter);
}
restart:
-
/* find the matching knote from the fd tables/hashes */
kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
+ error = kevent_register_validate_priority(kq, kn, kev);
+ result = 0;
+ if (error) {
+ goto out;
+ }
- if (kn == NULL) {
- if (kev->flags & EV_ADD) {
- struct fileproc *knote_fp = NULL;
-
- /* grab a file reference for the new knote */
- if (fops->f_isfd) {
- if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) {
- goto out;
- }
- }
-
- kn = knote_alloc();
- if (kn == NULL) {
- error = ENOMEM;
- if (knote_fp != NULL)
- fp_drop(p, kev->ident, knote_fp, 0);
- goto out;
- }
-
- kn->kn_fp = knote_fp;
- knote_set_kq(kn, kq);
- kqueue_retain(kq); /* retain a kq ref */
- kn->kn_filtid = ~kev->filter;
- kn->kn_inuse = 1; /* for f_attach() */
- kn->kn_status = KN_ATTACHING | KN_ATTACHED;
-
- /* was vanish support requested */
- if (kev->flags & EV_VANISHED) {
- kev->flags &= ~EV_VANISHED;
- kn->kn_status |= KN_REQVANISH;
- }
-
- /* snapshot matching/dispatching protcol flags into knote */
- if (kev->flags & EV_DISPATCH)
- kn->kn_status |= KN_DISPATCH;
- if (kev->flags & EV_UDATA_SPECIFIC)
- kn->kn_status |= KN_UDATA_SPECIFIC;
+ if (kn == NULL && (kev->flags & EV_ADD) == 0) {
+ /*
+ * No knote found, EV_ADD wasn't specified
+ */
+ if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
+ (kq->kq_state & KQ_WORKLOOP)) {
/*
- * copy the kevent state into knote
- * protocol is that fflags and data
- * are saved off, and cleared before
- * calling the attach routine.
+ * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
+ * that doesn't care about ENOENT, so just pretend the deletion
+ * happened.
*/
- kn->kn_kevent = *kev;
- kn->kn_sfflags = kev->fflags;
- kn->kn_sdata = kev->data;
- kn->kn_fflags = 0;
- kn->kn_data = 0;
-
- /* invoke pthread kext to convert kevent qos to thread qos */
- knote_canonicalize_kevent_qos(kn);
- knote_set_qos_index(kn, qos_index_from_qos(kn, kn->kn_qos, FALSE));
-
- /* before anyone can find it */
- if (kev->flags & EV_DISABLE) {
- /*
- * do this before anyone can find it,
- * this can't call knote_disable() because it expects having
- * the kqlock held
- */
- kn->kn_status |= KN_DISABLED;
- }
-
- /* Add the knote for lookup thru the fd table */
- error = kq_add_knote(kq, kn, kev, p, &knoteuse_flags);
- if (error) {
- (void)kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
- knote_free(kn);
- if (knote_fp != NULL)
- fp_drop(p, kev->ident, knote_fp, 0);
-
- if (error == ERESTART) {
- error = 0;
- goto restart;
- }
- goto out;
- }
-
- /* fp reference count now applies to knote */
- /* rwlock boost is now held */
-
- /* call filter attach routine */
- result = fops->f_attach(kn, kev);
+ } else {
+ error = ENOENT;
+ }
+ goto out;
+ } else if (kn == NULL) {
+ /*
+ * No knote found, need to attach a new one (attach)
+ */
- /*
- * Trade knote use count for kq lock.
- * Cannot be dropped because we held
- * KN_ATTACHING throughout.
- */
- knoteuse2kqlock(kq, kn, KNUSE_STEAL_DROP | knoteuse_flags);
+ struct fileproc *knote_fp = NULL;
- if (kn->kn_flags & EV_ERROR) {
- /*
- * Failed to attach correctly, so drop.
- * All other possible users/droppers
- * have deferred to us. Save the error
- * to return to our caller.
- */
- kn->kn_status &= ~KN_ATTACHED;
- kn->kn_status |= KN_DROPPING;
- error = kn->kn_data;
- kqunlock(kq);
- knote_drop(kn, p);
+ /* grab a file reference for the new knote */
+ if (fops->f_isfd) {
+ if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) {
goto out;
}
+ }
- /* end "attaching" phase - now just attached */
- kn->kn_status &= ~KN_ATTACHING;
-
- if (kn->kn_status & KN_DROPPING) {
- /*
- * Attach succeeded, but someone else
- * deferred their drop - now we have
- * to do it for them.
- */
- kqunlock(kq);
- knote_drop(kn, p);
- goto out;
+ kn = knote_alloc();
+ if (kn == NULL) {
+ error = ENOMEM;
+ if (knote_fp != NULL) {
+ fp_drop(p, kev->ident, knote_fp, 0);
}
+ goto out;
+ }
- /* Mark the thread request overcommit - if appropos */
- knote_set_qos_overcommit(kn);
+ kn->kn_fp = knote_fp;
+ kn->kn_is_fd = fops->f_isfd;
+ kn->kn_kq_packed = (intptr_t)(struct kqueue *)kq;
+ kn->kn_status = 0;
- /*
- * If the attach routine indicated that an
- * event is already fired, activate the knote.
- */
- if (result)
- knote_activate(kn);
+ /* was vanish support requested */
+ if (kev->flags & EV_VANISHED) {
+ kev->flags &= ~EV_VANISHED;
+ kn->kn_status |= KN_REQVANISH;
+ }
- if (knote_fops(kn)->f_post_attach) {
- error = knote_fops(kn)->f_post_attach(kn, kev);
- if (error) {
- kqunlock(kq);
- goto out;
- }
+ /* snapshot matching/dispatching protcol flags into knote */
+ if (kev->flags & EV_DISABLE) {
+ kn->kn_status |= KN_DISABLED;
+ }
+
+ /*
+ * copy the kevent state into knote
+ * protocol is that fflags and data
+ * are saved off, and cleared before
+ * calling the attach routine.
+ *
+ * - kn->kn_sfflags aliases with kev->xflags
+ * - kn->kn_sdata aliases with kev->data
+ * - kn->kn_filter is the top 8 bits of kev->filter
+ */
+ kn->kn_kevent = *(struct kevent_internal_s *)kev;
+ kn->kn_sfflags = kev->fflags;
+ kn->kn_filtid = (uint8_t)~kev->filter;
+ kn->kn_fflags = 0;
+ knote_reset_priority(kq, kn, kev->qos);
+
+ /* Add the knote for lookup thru the fd table */
+ error = kq_add_knote(kq, kn, &knlc, p);
+ if (error) {
+ knote_free(kn);
+ if (knote_fp != NULL) {
+ fp_drop(p, kev->ident, knote_fp, 0);
}
- } else {
- if ((kev_flags & (EV_ADD | EV_DELETE)) == (EV_ADD | EV_DELETE) &&
- (kq->kq_state & KQ_WORKLOOP)) {
- /*
- * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
- * that doesn't care about ENOENT, so just pretend the deletion
- * happened.
- */
- } else {
- error = ENOENT;
+ if (error == ERESTART) {
+ goto restart;
}
goto out;
}
- } else {
- /* existing knote: kqueue lock already taken by kq_find_knote_and_kq_lock */
+ /* fp reference count now applies to knote */
- if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
- /*
- * The knote is not in a stable state, wait for that
- * transition to complete and then redrive the lookup.
- */
- knoteusewait(kq, kn);
- goto restart;
+ /*
+ * we can't use filter_call() because f_attach can change the filter ops
+ * for a filter that supports f_extended_codes, so we need to reload
+ * knote_fops() and not use `fops`.
+ */
+ result = fops->f_attach(kn, kev);
+ if (result && !knote_fops(kn)->f_extended_codes) {
+ result = FILTER_ACTIVE;
}
- if (kev->flags & EV_DELETE) {
+ kqlock(kq);
+
+ if (result & FILTER_THREADREQ_NODEFEER) {
+ enable_preemption();
+ }
+ if (kn->kn_flags & EV_ERROR) {
/*
- * If attempting to delete a disabled dispatch2 knote,
- * we must wait for the knote to be re-enabled (unless
- * it is being re-enabled atomically here).
+ * Failed to attach correctly, so drop.
*/
- if ((kev->flags & EV_ENABLE) == 0 &&
- (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) ==
- (KN_DISPATCH2 | KN_DISABLED)) {
- kn->kn_status |= KN_DEFERDELETE;
- kqunlock(kq);
- error = EINPROGRESS;
- } else if (knote_fops(kn)->f_drop_and_unlock) {
- /*
- * The filter has requested to handle EV_DELETE events
- *
- * ERESTART means the kevent has to be re-evaluated
- */
- error = knote_fops(kn)->f_drop_and_unlock(kn, kev);
- if (error == ERESTART) {
- error = 0;
- goto restart;
- }
- } else if (kqlock2knotedrop(kq, kn)) {
- /* standard/default EV_DELETE path */
- knote_drop(kn, p);
- } else {
- /*
- * The kqueue is unlocked, it's not being
- * dropped, and kqlock2knotedrop returned 0:
- * this means that someone stole the drop of
- * the knote from us.
- */
- error = EINPROGRESS;
- }
+ kn->kn_filtid = EVFILTID_DETACHED;
+ error = kn->kn_sdata;
+ knote_drop(kq, kn, &knlc);
+ result = 0;
goto out;
}
/*
- * If we are re-enabling a deferred-delete knote,
- * just enable it now and avoid calling the
- * filter touch routine (it has delivered its
- * last event already).
+ * end "attaching" phase - now just attached
+ *
+ * Mark the thread request overcommit, if appropos
+ *
+ * If the attach routine indicated that an
+ * event is already fired, activate the knote.
*/
- if ((kev->flags & EV_ENABLE) &&
- (kn->kn_status & KN_DEFERDELETE)) {
- assert(kn->kn_status & KN_DISABLED);
- knote_activate(kn);
- knote_enable(kn);
- kqunlock(kq);
- goto out;
+ if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
+ (kq->kq_state & KQ_WORKLOOP)) {
+ kqworkloop_set_overcommit((struct kqworkloop *)kq);
}
-
+ } else if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
/*
- * If we are disabling, do it before unlocking and
- * calling the touch routine (so no processing can
- * see the new kevent state before the disable is
- * applied).
+ * The knote was dropped while we were waiting for the lock,
+ * we need to re-evaluate entirely
*/
- if (kev->flags & EV_DISABLE)
- knote_disable(kn);
+ goto restart;
+ } else if (kev->flags & EV_DELETE) {
/*
- * Convert the kqlock to a use reference on the
- * knote so we can call the filter touch routine.
+ * Deletion of a knote (drop)
+ *
+ * If the filter wants to filter drop events, let it do so.
+ *
+ * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
+ * we must wait for the knote to be re-enabled (unless it is being
+ * re-enabled atomically here).
*/
- if (knoteuse_needs_boost(kn, kev)) {
- knoteuse_flags |= KNUSE_BOOST;
- }
- if (kqlock2knoteuse(kq, kn, knoteuse_flags)) {
- /*
- * Call touch routine to notify filter of changes
- * in filter values (and to re-determine if any
- * events are fired).
- */
- result = knote_fops(kn)->f_touch(kn, kev);
- /* Get the kq lock back (don't defer droppers). */
- if (!knoteuse2kqlock(kq, kn, knoteuse_flags)) {
- kqunlock(kq);
- goto out;
- }
+ if (knote_fops(kn)->f_allow_drop) {
+ bool drop;
- /* Handle errors during touch routine */
- if (kev->flags & EV_ERROR) {
- error = kev->data;
- kqunlock(kq);
- goto out;
+ kqunlock(kq);
+ drop = knote_fops(kn)->f_allow_drop(kn, kev);
+ kqlock(kq);
+
+ if (!drop) {
+ goto out_unlock;
}
+ }
+
+ if ((kev->flags & EV_ENABLE) == 0 &&
+ (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
+ (kn->kn_status & KN_DISABLED) != 0) {
+ kn->kn_status |= KN_DEFERDELETE;
+ error = EINPROGRESS;
+ goto out_unlock;
+ }
+
+ knote_drop(kq, kn, &knlc);
+ goto out;
+ } else {
+ /*
+ * Regular update of a knote (touch)
+ *
+ * Call touch routine to notify filter of changes in filter values
+ * (and to re-determine if any events are fired).
+ *
+ * If the knote is in defer-delete, avoid calling the filter touch
+ * routine (it has delivered its last event already).
+ *
+ * If the touch routine had no failure,
+ * apply the requested side effects to the knote.
+ */
- /* Activate it if the touch routine said to */
- if (result)
- knote_activate(kn);
+ if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
+ if (kev->flags & EV_ENABLE) {
+ result = FILTER_ACTIVE;
+ }
+ } else {
+ kqunlock(kq);
+ result = filter_call(knote_fops(kn), f_touch(kn, kev));
+ kqlock(kq);
+ if (result & FILTER_THREADREQ_NODEFEER) {
+ enable_preemption();
+ }
}
- /* Enable the knote if called for */
- if (kev->flags & EV_ENABLE)
- knote_enable(kn);
+ if (kev->flags & EV_ERROR) {
+ result = 0;
+ goto out_unlock;
+ }
+ if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 &&
+ kn->kn_udata != kev->udata) {
+ // this allows klist_copy_udata() not to take locks
+ os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed);
+ }
+ if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) {
+ kn->kn_status |= KN_DISABLED;
+ knote_dequeue(kq, kn);
+ }
}
- /* still have kqlock held and knote is valid */
- kqunlock(kq);
+ /* accept new kevent state */
+ knote_apply_touch(kq, kn, kev, result);
+
+out_unlock:
+ /*
+ * When the filter asked for a post-register wait,
+ * we leave the kqueue locked for kevent_register()
+ * to call the filter's f_post_register_wait hook.
+ */
+ if (result & FILTER_REGISTER_WAIT) {
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
+ *kn_out = kn;
+ } else {
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
+ }
out:
/* output local errors through the kevent */
kev->flags |= EV_ERROR;
kev->data = error;
}
+ return result;
}
-
/*
* knote_process - process a triggered event
*
* kqueue locked on entry and exit - but may be dropped
*/
static int
-knote_process(struct knote *kn,
- kevent_callback_t callback,
- void *callback_data,
- struct filt_process_s *process_data,
- struct proc *p)
+knote_process(struct knote *kn, kevent_ctx_t kectx,
+ kevent_callback_t callback)
{
- struct kevent_internal_s kev;
+ struct kevent_qos_s kev;
struct kqueue *kq = knote_get_kq(kn);
- int result = 0;
+ KNOTE_LOCK_CTX(knlc);
+ int result = FILTER_ACTIVE;
int error = 0;
-
- bzero(&kev, sizeof(kev));
+ bool drop = false;
/*
* Must be active or stayactive
- * Must be queued and not disabled/suppressed
+ * Must be queued and not disabled/suppressed or dropping
*/
assert(kn->kn_status & KN_QUEUED);
- assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE));
- assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING)));
+ assert(kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE));
+ assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)));
if (kq->kq_state & KQ_WORKLOOP) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
- ((struct kqworkloop *)kq)->kqwl_dynamicid,
- kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
- kn->kn_filtid);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
+ ((struct kqworkloop *)kq)->kqwl_dynamicid,
+ kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
+ kn->kn_filtid);
} else if (kq->kq_state & KQ_WORKQ) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
- 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
- kn->kn_filtid);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
+ 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
+ kn->kn_filtid);
} else {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
- VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
- kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
+ kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
+ }
+
+ if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
+ /*
+ * When the knote is dropping or has dropped,
+ * then there's nothing we want to process.
+ */
+ return EJUSTRETURN;
+ }
+
+ /*
+ * While waiting for the knote lock, we may have dropped the kq lock.
+ * and a touch may have disabled and dequeued the knote.
+ */
+ if (!(kn->kn_status & KN_QUEUED)) {
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
+ return EJUSTRETURN;
}
/*
* event to acknowledge end-of-life. Otherwise, we call the
* filter's process routine to snapshot the kevent state under
* the filter's locking protocol.
+ *
+ * suppress knotes to avoid returning the same event multiple times in
+ * a single call.
*/
- if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
- /* create fake event */
- kev.filter = kn->kn_filter;
- kev.ident = kn->kn_id;
- kev.qos = kn->kn_qos;
- kev.flags = (kn->kn_status & KN_DEFERDELETE) ?
- EV_DELETE : EV_VANISHED;
- kev.flags |= (EV_DISPATCH2 | EV_ONESHOT);
- kev.udata = kn->kn_udata;
- result = 1;
-
- knote_suppress(kn);
- } else {
- int flags = KNUSE_NONE;
- /* deactivate - so new activations indicate a wakeup */
- knote_deactivate(kn);
+ knote_suppress(kq, kn);
- /* suppress knotes to avoid returning the same event multiple times in a single call. */
- knote_suppress(kn);
-
- if (knoteuse_needs_boost(kn, NULL)) {
- flags |= KNUSE_BOOST;
+ if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
+ int kev_flags = EV_DISPATCH2 | EV_ONESHOT;
+ if (kn->kn_status & KN_DEFERDELETE) {
+ kev_flags |= EV_DELETE;
+ } else {
+ kev_flags |= EV_VANISHED;
}
- /* convert lock to a knote use reference */
- if (!kqlock2knoteuse(kq, kn, flags))
- panic("dropping knote found on queue\n");
- /* call out to the filter to process with just a ref */
- result = knote_fops(kn)->f_process(kn, process_data, &kev);
- if (result) flags |= KNUSE_STEAL_DROP;
+ /* create fake event */
+ kev = (struct kevent_qos_s){
+ .filter = kn->kn_filter,
+ .ident = kn->kn_id,
+ .flags = kev_flags,
+ .udata = kn->kn_udata,
+ };
+ } else {
+ kqunlock(kq);
+ kev = (struct kevent_qos_s) { };
+ result = filter_call(knote_fops(kn), f_process(kn, &kev));
+ kqlock(kq);
+ }
- /*
- * convert our reference back to a lock. accept drop
- * responsibility from others if we've committed to
- * delivering event data.
- */
- if (!knoteuse2kqlock(kq, kn, flags)) {
- /* knote dropped */
- kn = NULL;
+ /*
+ * Determine how to dispatch the knote for future event handling.
+ * not-fired: just return (do not callout, leave deactivated).
+ * One-shot: If dispatch2, enter deferred-delete mode (unless this is
+ * is the deferred delete event delivery itself). Otherwise,
+ * drop it.
+ * Dispatch: don't clear state, just mark it disabled.
+ * Cleared: just leave it deactivated.
+ * Others: re-activate as there may be more events to handle.
+ * This will not wake up more handlers right now, but
+ * at the completion of handling events it may trigger
+ * more handler threads (TODO: optimize based on more than
+ * just this one event being detected by the filter).
+ */
+ if ((result & FILTER_ACTIVE) == 0) {
+ if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
+ /*
+ * Stay active knotes should not be unsuppressed or we'd create an
+ * infinite loop.
+ *
+ * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
+ * within f_process() but that doesn't necessarily make them
+ * ready to process, so we should leave them be.
+ *
+ * For other knotes, since we will not return an event,
+ * there's no point keeping the knote suppressed.
+ */
+ knote_unsuppress(kq, kn);
}
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
+ return EJUSTRETURN;
}
- if (kn != NULL) {
- /*
- * Determine how to dispatch the knote for future event handling.
- * not-fired: just return (do not callout, leave deactivated).
- * One-shot: If dispatch2, enter deferred-delete mode (unless this is
- * is the deferred delete event delivery itself). Otherwise,
- * drop it.
- * stolendrop:We took responsibility for someone else's drop attempt.
- * treat this just like one-shot and prepare to turn it back
- * into a deferred delete if required.
- * Dispatch: don't clear state, just mark it disabled.
- * Cleared: just leave it deactivated.
- * Others: re-activate as there may be more events to handle.
- * This will not wake up more handlers right now, but
- * at the completion of handling events it may trigger
- * more handler threads (TODO: optimize based on more than
- * just this one event being detected by the filter).
- */
+ if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
+ knote_adjust_qos(kq, kn, result);
+ }
+ kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
- if (result == 0)
- return (EJUSTRETURN);
-
- if ((kev.flags & EV_ONESHOT) || (kn->kn_status & KN_STOLENDROP)) {
- if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) {
- /* defer dropping non-delete oneshot dispatch2 events */
- kn->kn_status |= KN_DEFERDELETE;
- knote_disable(kn);
-
- /* if we took over another's drop clear those flags here */
- if (kn->kn_status & KN_STOLENDROP) {
- assert(kn->kn_status & KN_DROPPING);
- /*
- * the knote will be dropped when the
- * deferred deletion occurs
- */
- kn->kn_status &= ~(KN_DROPPING|KN_STOLENDROP);
- }
- } else if (kn->kn_status & KN_STOLENDROP) {
- /* We now own the drop of the knote. */
- assert(kn->kn_status & KN_DROPPING);
- knote_unsuppress(kn);
- kqunlock(kq);
- knote_drop(kn, p);
- kqlock(kq);
- } else if (kqlock2knotedrop(kq, kn)) {
- /* just EV_ONESHOT, _not_ DISPATCH2 */
- knote_drop(kn, p);
- kqlock(kq);
- }
- } else if (kn->kn_status & KN_DISPATCH) {
- /* disable all dispatch knotes */
- knote_disable(kn);
- } else if ((kev.flags & EV_CLEAR) == 0) {
- /* re-activate in case there are more events */
- knote_activate(kn);
+ if (kev.flags & EV_ONESHOT) {
+ if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
+ (kn->kn_status & KN_DEFERDELETE) == 0) {
+ /* defer dropping non-delete oneshot dispatch2 events */
+ kn->kn_status |= KN_DEFERDELETE | KN_DISABLED;
+ } else {
+ drop = true;
}
+ } else if (kn->kn_flags & EV_DISPATCH) {
+ /* disable all dispatch knotes */
+ kn->kn_status |= KN_DISABLED;
+ } else if ((kn->kn_flags & EV_CLEAR) == 0) {
+ /* re-activate in case there are more events */
+ knote_activate(kq, kn, FILTER_ACTIVE);
}
/*
* If we have to detach and drop the knote, do
* it while we have the kq unlocked.
*/
- if (result) {
- kqunlock(kq);
- error = (callback)(kq, &kev, callback_data);
- kqlock(kq);
+ if (drop) {
+ knote_drop(kq, kn, &knlc);
+ } else {
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
+ }
+
+ if (kev.flags & EV_VANISHED) {
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
+ kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
+ kn->kn_filtid);
}
- return (error);
-}
+ error = (callback)(&kev, kectx);
+ kqlock(kq);
+ return error;
+}
/*
- * Return 0 to indicate that processing should proceed,
- * -1 if there is nothing to process.
- *
- * Called with kqueue locked and returns the same way,
- * but may drop lock temporarily.
+ * Returns -1 if the kqueue was unbound and processing should not happen
*/
+#define KQWQAE_BEGIN_PROCESSING 1
+#define KQWQAE_END_PROCESSING 2
+#define KQWQAE_UNBIND 3
static int
-kqworkq_begin_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
+kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr,
+ int kevent_flags, int kqwqae_op)
{
- struct kqrequest *kqr;
- thread_t self = current_thread();
- __assert_only struct uthread *ut = get_bsdthread_info(self);
-
- assert(kqwq->kqwq_state & KQ_WORKQ);
- assert(qos_index < KQWQ_NQOS);
-
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
- flags, qos_index);
+ thread_qos_t old_override = THREAD_QOS_UNSPECIFIED;
+ thread_t thread = kqr_thread_fast(kqr);
+ struct knote *kn;
+ int rc = 0;
+ bool unbind;
+ struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index];
- kqwq_req_lock(kqwq);
+ kqlock_held(&kqwq->kqwq_kqueue);
- kqr = kqworkq_get_request(kqwq, qos_index);
+ if (!TAILQ_EMPTY(suppressq)) {
+ /*
+ * Return suppressed knotes to their original state.
+ * For workq kqueues, suppressed ones that are still
+ * truly active (not just forced into the queue) will
+ * set flags we check below to see if anything got
+ * woken up.
+ */
+ while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
+ assert(kn->kn_status & KN_SUPPRESSED);
+ knote_unsuppress(kqwq, kn);
+ }
+ }
- /* manager skips buckets that haven't asked for its help */
- if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
+#if DEBUG || DEVELOPMENT
+ thread_t self = current_thread();
+ struct uthread *ut = get_bsdthread_info(self);
- /* If nothing for manager to do, just return */
- if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
- 0, kqr->kqr_state);
- kqwq_req_unlock(kqwq);
- return -1;
- }
- /* bind manager thread from this time on */
- kqworkq_bind_thread_impl(kqwq, qos_index, self, flags);
+ assert(thread == self);
+ assert(ut->uu_kqr_bound == kqr);
+#endif // DEBUG || DEVELOPMENT
+ if (kqwqae_op == KQWQAE_UNBIND) {
+ unbind = true;
+ } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
+ unbind = false;
} else {
- /* We should already be bound to this kqueue */
- assert(kqr->kqr_state & KQR_BOUND);
- assert(kqr->kqr_thread == self);
- assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq);
- assert(ut->uu_kqueue_qos_index == qos_index);
- assert((ut->uu_kqueue_flags & flags) == ut->uu_kqueue_flags);
+ unbind = !kqr->tr_kq_wakeup;
+ }
+ if (unbind) {
+ old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
+ rc = -1;
+ /*
+ * request a new thread if we didn't process the whole queue or real events
+ * have happened (not just putting stay-active events back).
+ */
+ if (kqr->tr_kq_wakeup) {
+ kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
+ kqr->tr_kq_qos_index, 0);
+ }
}
- /*
- * we should have been requested to be here
- * and nobody else should still be processing
- */
- assert(kqr->kqr_state & KQR_WAKEUP);
- assert(kqr->kqr_state & KQR_THREQUESTED);
- assert((kqr->kqr_state & KQR_PROCESSING) == 0);
-
- /* reset wakeup trigger to catch new events after we start processing */
- kqr->kqr_state &= ~KQR_WAKEUP;
-
- /* convert to processing mode */
- kqr->kqr_state |= KQR_PROCESSING;
+ if (rc == 0) {
+ /*
+ * Reset wakeup bit to notice events firing while we are processing,
+ * as we cannot rely on the bucket queue emptiness because of stay
+ * active knotes.
+ */
+ kqr->tr_kq_wakeup = false;
+ }
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
- kqr_thread_id(kqr), kqr->kqr_state);
+ if (old_override) {
+ thread_drop_kevent_override(thread);
+ }
- kqwq_req_unlock(kqwq);
- return 0;
+ return rc;
}
-static inline bool
-kqworkloop_is_processing_on_current_thread(struct kqworkloop *kqwl)
+/*
+ * Return 0 to indicate that processing should proceed,
+ * -1 if there is nothing to process.
+ *
+ * Called with kqueue locked and returns the same way,
+ * but may drop lock temporarily.
+ */
+static int
+kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
+ int kevent_flags)
{
- struct kqueue *kq = &kqwl->kqwl_kqueue;
+ int rc = 0;
- kqlock_held(kq);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
+ 0, kqr->tr_kq_qos_index);
- if (kq->kq_state & KQ_PROCESSING) {
- /*
- * KQ_PROCESSING is unset with the kqlock held, and the kqr thread is
- * never modified while KQ_PROCESSING is set, meaning that peeking at
- * its value is safe from this context.
- */
- return kqwl->kqwl_request.kqr_thread == current_thread();
- }
- return false;
+ rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
+ KQWQAE_BEGIN_PROCESSING);
+
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ thread_tid(kqr_thread(kqr)), kqr->tr_kq_wakeup);
+
+ return rc;
}
-static void
-kqworkloop_acknowledge_events(struct kqworkloop *kqwl, boolean_t clear_ipc_override)
+static thread_qos_t
+kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
{
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ kq_index_t qos = THREAD_QOS_UNSPECIFIED;
struct knote *kn, *tmp;
- kqlock_held(&kqwl->kqwl_kqueue);
+ kqlock_held(kqwl);
- TAILQ_FOREACH_SAFE(kn, &kqr->kqr_suppressed, kn_tqe, tmp) {
+ TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) {
/*
* If a knote that can adjust QoS is disabled because of the automatic
* behavior of EV_DISPATCH, the knotes should stay suppressed so that
* further overrides keep pushing.
*/
if (knote_fops(kn)->f_adjusts_qos && (kn->kn_status & KN_DISABLED) &&
- (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 &&
- (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
- /*
- * When called from unbind, clear the sync ipc override on the knote
- * for events which are delivered.
- */
- if (clear_ipc_override) {
- knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE);
- }
+ (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 &&
+ (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
+ qos = MAX(qos, kn->kn_qos_override);
continue;
}
- knote_unsuppress(kn);
+ knote_unsuppress(kqwl, kn);
}
+
+ return qos;
}
static int
-kqworkloop_begin_processing(struct kqworkloop *kqwl,
- __assert_only unsigned int flags)
+kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
{
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
struct kqueue *kq = &kqwl->kqwl_kqueue;
+ thread_qos_t qos_override;
+ thread_t thread = kqr_thread_fast(kqr);
+ int rc = 0, op = KQWL_UTQ_NONE;
kqlock_held(kq);
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
- kqwl->kqwl_dynamicid, flags, 0);
-
- kqwl_req_lock(kqwl);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
+ kqwl->kqwl_dynamicid, 0, 0);
/* nobody else should still be processing */
- assert((kqr->kqr_state & KQR_PROCESSING) == 0);
assert((kq->kq_state & KQ_PROCESSING) == 0);
- kqr->kqr_state |= KQR_PROCESSING | KQR_R2K_NOTIF_ARMED;
kq->kq_state |= KQ_PROCESSING;
- kqwl_req_unlock(kqwl);
+ if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
+ op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
+ }
- kqworkloop_acknowledge_events(kqwl, FALSE);
+ if (kevent_flags & KEVENT_FLAG_PARKING) {
+ /*
+ * When "parking" we want to process events and if no events are found
+ * unbind.
+ *
+ * However, non overcommit threads sometimes park even when they have
+ * more work so that the pool can narrow. For these, we need to unbind
+ * early, so that calling kqworkloop_update_threads_qos() can ask the
+ * workqueue subsystem whether the thread should park despite having
+ * pending events.
+ */
+ if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ op = KQWL_UTQ_PARKING;
+ } else {
+ op = KQWL_UTQ_UNBINDING;
+ }
+ }
+ if (op == KQWL_UTQ_NONE) {
+ goto done;
+ }
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
- kqwl->kqwl_dynamicid, flags, 0);
+ qos_override = kqworkloop_acknowledge_events(kqwl);
- return 0;
+ if (op == KQWL_UTQ_UNBINDING) {
+ kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_IMMEDIATELY);
+ kqworkloop_release_live(kqwl);
+ }
+ kqworkloop_update_threads_qos(kqwl, op, qos_override);
+ if (op == KQWL_UTQ_PARKING) {
+ if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
+ /*
+ * We cannot trust tr_kq_wakeup when looking at stay active knotes.
+ * We need to process once, and kqworkloop_end_processing will
+ * handle the unbind.
+ */
+ } else if (!kqr->tr_kq_wakeup || kqwl->kqwl_owner) {
+ kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
+ kqworkloop_release_live(kqwl);
+ rc = -1;
+ }
+ } else if (op == KQWL_UTQ_UNBINDING) {
+ if (kqr_thread(kqr) == thread) {
+ /*
+ * The thread request fired again, passed the admission check and
+ * got bound to the current thread again.
+ */
+ } else {
+ rc = -1;
+ }
+ }
+
+ if (rc == 0) {
+ /*
+ * Reset wakeup bit to notice stay active events firing while we are
+ * processing, as we cannot rely on the stayactive bucket emptiness.
+ */
+ kqwl->kqwl_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
+ } else {
+ kq->kq_state &= ~KQ_PROCESSING;
+ }
+
+ if (rc == -1) {
+ kqworkloop_unbind_delayed_override_drop(thread);
+ }
+
+done:
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
+ kqwl->kqwl_dynamicid, 0, 0);
+
+ return rc;
}
/*
* Return 0 to indicate that processing should proceed,
* -1 if there is nothing to process.
+ * EBADF if the kqueue is draining
*
* Called with kqueue locked and returns the same way,
* but may drop lock temporarily.
* May block.
*/
static int
-kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
+kqfile_begin_processing(struct kqfile *kq)
{
struct kqtailq *suppressq;
kqlock_held(kq);
- if (kq->kq_state & KQ_WORKQ) {
- return kqworkq_begin_processing((struct kqworkq *)kq, qos_index, flags);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- return kqworkloop_begin_processing((struct kqworkloop*)kq, flags);
- }
-
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
- VM_KERNEL_UNSLIDE_OR_PERM(kq), flags);
-
- assert(qos_index == QOS_INDEX_KQFILE);
+ assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
/* wait to become the exclusive processing thread */
for (;;) {
- if (kq->kq_state & KQ_DRAIN) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
- VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
- return -1;
+ if (kq->kqf_state & KQ_DRAIN) {
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
+ return EBADF;
}
- if ((kq->kq_state & KQ_PROCESSING) == 0)
+ if ((kq->kqf_state & KQ_PROCESSING) == 0) {
break;
+ }
/* if someone else is processing the queue, wait */
- kq->kq_state |= KQ_PROCWAIT;
- suppressq = kqueue_get_suppressed_queue(kq, qos_index);
- waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(suppressq),
- THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
-
+ kq->kqf_state |= KQ_PROCWAIT;
+ suppressq = &kq->kqf_suppressed;
+ waitq_assert_wait64((struct waitq *)&kq->kqf_wqs,
+ CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT,
+ TIMEOUT_WAIT_FOREVER);
+
kqunlock(kq);
thread_block(THREAD_CONTINUE_NULL);
kqlock(kq);
/* Nobody else processing */
/* clear pre-posts and KQ_WAKEUP now, in case we bail early */
- waitq_set_clear_preposts(&kq->kq_wqs);
- kq->kq_state &= ~KQ_WAKEUP;
+ waitq_set_clear_preposts(&kq->kqf_wqs);
+ kq->kqf_state &= ~KQ_WAKEUP;
/* anything left to process? */
- if (kqueue_queue_empty(kq, qos_index)) {
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
- VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
+ if (TAILQ_EMPTY(&kq->kqf_queue)) {
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
return -1;
}
/* convert to processing mode */
- kq->kq_state |= KQ_PROCESSING;
+ kq->kqf_state |= KQ_PROCESSING;
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
- VM_KERNEL_UNSLIDE_OR_PERM(kq));
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(kq));
return 0;
}
/*
- * kqworkq_end_processing - Complete the processing of a workq kqueue
+ * Try to end the processing, only called when a workq thread is attempting to
+ * park (KEVENT_FLAG_PARKING is set).
*
- * We may have to request new threads.
- * This can happen there are no waiting processing threads and:
- * - there were active events we never got to (count > 0)
- * - we pended waitq hook callouts during processing
- * - we pended wakeups while processing (or unsuppressing)
- *
- * Called with kqueue lock held.
+ * When returning -1, the kqworkq is setup again so that it is ready to be
+ * processed.
*/
-static void
-kqworkq_end_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
+static int
+kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
+ int kevent_flags)
{
-#pragma unused(flags)
-
- struct kqueue *kq = &kqwq->kqwq_kqueue;
- struct kqtailq *suppressq = kqueue_get_suppressed_queue(kq, qos_index);
-
- thread_t self = current_thread();
- struct uthread *ut = get_bsdthread_info(self);
- struct knote *kn;
- struct kqrequest *kqr;
- thread_t thread;
-
- assert(kqwq->kqwq_state & KQ_WORKQ);
- assert(qos_index < KQWQ_NQOS);
-
- /* Are we really bound to this kqueue? */
- if (ut->uu_kqueue_bound != kq) {
- assert(ut->uu_kqueue_bound == kq);
- return;
+ if (!TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index])) {
+ /* remember we didn't process everything */
+ kqr->tr_kq_wakeup = true;
}
- kqr = kqworkq_get_request(kqwq, qos_index);
-
- kqwq_req_lock(kqwq);
-
- /* Do we claim to be manager? */
- if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
-
- /* bail if not bound that way */
- if (ut->uu_kqueue_qos_index != KQWQ_QOS_MANAGER ||
- (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0) {
- assert(ut->uu_kqueue_qos_index == KQWQ_QOS_MANAGER);
- assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
- kqwq_req_unlock(kqwq);
- return;
- }
-
- /* bail if this request wasn't already getting manager help */
- if ((kqr->kqr_state & KQWQ_THMANAGER) == 0 ||
- (kqr->kqr_state & KQR_PROCESSING) == 0) {
- kqwq_req_unlock(kqwq);
- return;
- }
- } else {
- if (ut->uu_kqueue_qos_index != qos_index ||
- (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER)) {
- assert(ut->uu_kqueue_qos_index == qos_index);
- assert((ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0);
- kqwq_req_unlock(kqwq);
- return;
+ if (kevent_flags & KEVENT_FLAG_PARKING) {
+ /*
+ * if acknowledge events "succeeds" it means there are events,
+ * which is a failure condition for end_processing.
+ */
+ int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
+ KQWQAE_END_PROCESSING);
+ if (rc == 0) {
+ return -1;
}
}
- assert(kqr->kqr_state & KQR_BOUND);
- thread = kqr->kqr_thread;
- assert(thread == self);
-
- assert(kqr->kqr_state & KQR_PROCESSING);
-
- /* If we didn't drain the whole queue, re-mark a wakeup being needed */
- if (!kqueue_queue_empty(kq, qos_index))
- kqr->kqr_state |= KQR_WAKEUP;
-
- kqwq_req_unlock(kqwq);
-
- /*
- * Return suppressed knotes to their original state.
- * For workq kqueues, suppressed ones that are still
- * truly active (not just forced into the queue) will
- * set flags we check below to see if anything got
- * woken up.
- */
- while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
- assert(kn->kn_status & KN_SUPPRESSED);
- knote_unsuppress(kn);
- }
-
- kqwq_req_lock(kqwq);
-
- /* Indicate that we are done processing this request */
- kqr->kqr_state &= ~KQR_PROCESSING;
-
- /*
- * Drop our association with this one request and its
- * override on us.
- */
- kqworkq_unbind_thread(kqwq, qos_index, thread, flags);
-
- /*
- * request a new thread if we didn't process the whole
- * queue or real events have happened (not just putting
- * stay-active events back).
- */
- if (kqr->kqr_state & KQR_WAKEUP) {
- if (kqueue_queue_empty(kq, qos_index)) {
- kqr->kqr_state &= ~KQR_WAKEUP;
- } else {
- kqworkq_request_thread(kqwq, qos_index);
- }
- }
- kqwq_req_unlock(kqwq);
+ return 0;
}
-static void
-kqworkloop_end_processing(struct kqworkloop *kqwl, int nevents,
- unsigned int flags)
+/*
+ * Try to end the processing, only called when a workq thread is attempting to
+ * park (KEVENT_FLAG_PARKING is set).
+ *
+ * When returning -1, the kqworkq is setup again so that it is ready to be
+ * processed (as if kqworkloop_begin_processing had just been called).
+ *
+ * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
+ * the kqworkloop is unbound from its servicer as a side effect.
+ */
+static int
+kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
{
- struct kqrequest *kqr = &kqwl->kqwl_request;
struct kqueue *kq = &kqwl->kqwl_kqueue;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
+ thread_qos_t qos_override;
+ thread_t thread = kqr_thread_fast(kqr);
+ int rc = 0;
kqlock_held(kq);
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
- kqwl->kqwl_dynamicid, flags, 0);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
+ kqwl->kqwl_dynamicid, 0, 0);
+
+ if (flags & KQ_PROCESSING) {
+ assert(kq->kq_state & KQ_PROCESSING);
+
+ /*
+ * If we still have queued stayactive knotes, remember we didn't finish
+ * processing all of them. This should be extremely rare and would
+ * require to have a lot of them registered and fired.
+ */
+ if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS,
+ KQWL_BUCKET_STAYACTIVE);
+ }
- if ((kq->kq_state & KQ_NO_WQ_THREAD) && nevents == 0 &&
- (flags & KEVENT_FLAG_IMMEDIATE) == 0) {
/*
- * <rdar://problem/31634014> We may soon block, but have returned no
- * kevents that need to be kept supressed for overriding purposes.
+ * When KEVENT_FLAG_PARKING is set, we need to attempt an unbind while
+ * still under the lock.
*
- * It is hence safe to acknowledge events and unsuppress everything, so
- * that if we block we can observe all events firing.
+ * So we do everything kqworkloop_unbind() would do, but because we're
+ * inside kqueue_process(), if the workloop actually received events
+ * while our locks were dropped, we have the opportunity to fail the end
+ * processing and loop again.
+ *
+ * This avoids going through the process-wide workqueue lock hence
+ * scales better.
*/
- kqworkloop_acknowledge_events(kqwl, TRUE);
+ if (kevent_flags & KEVENT_FLAG_PARKING) {
+ qos_override = kqworkloop_acknowledge_events(kqwl);
+ }
}
- kqwl_req_lock(kqwl);
-
- assert(kqr->kqr_state & KQR_PROCESSING);
- assert(kq->kq_state & KQ_PROCESSING);
+ if (kevent_flags & KEVENT_FLAG_PARKING) {
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
+ if (kqr->tr_kq_wakeup && !kqwl->kqwl_owner) {
+ /*
+ * Reset wakeup bit to notice stay active events firing while we are
+ * processing, as we cannot rely on the stayactive bucket emptiness.
+ */
+ kqwl->kqwl_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
+ rc = -1;
+ } else {
+ kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
+ kqworkloop_release_live(kqwl);
+ kq->kq_state &= ~flags;
+ }
+ } else {
+ kq->kq_state &= ~flags;
+ kq->kq_state |= KQ_R2K_ARMED;
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
+ }
- kq->kq_state &= ~KQ_PROCESSING;
- kqr->kqr_state &= ~KQR_PROCESSING;
- kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
+ if ((kevent_flags & KEVENT_FLAG_PARKING) && rc == 0) {
+ kqworkloop_unbind_delayed_override_drop(thread);
+ }
- kqwl_req_unlock(kqwl);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
+ kqwl->kqwl_dynamicid, 0, 0);
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
- kqwl->kqwl_dynamicid, flags, 0);
+ return rc;
}
/*
* Called with kqueue lock held.
+ *
+ * 0: no more events
+ * -1: has more events
+ * EBADF: kqueue is in draining mode
*/
-static void
-kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index,
- int nevents, unsigned int flags)
+static int
+kqfile_end_processing(struct kqfile *kq)
{
+ struct kqtailq *suppressq = &kq->kqf_suppressed;
struct knote *kn;
- struct kqtailq *suppressq;
int procwait;
kqlock_held(kq);
- assert((kq->kq_state & KQ_WORKQ) == 0);
-
- if (kq->kq_state & KQ_WORKLOOP) {
- return kqworkloop_end_processing((struct kqworkloop *)kq, nevents, flags);
- }
-
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
- VM_KERNEL_UNSLIDE_OR_PERM(kq), flags);
+ assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
- assert(qos_index == QOS_INDEX_KQFILE);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
+ VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
/*
* Return suppressed knotes to their original state.
*/
- suppressq = kqueue_get_suppressed_queue(kq, qos_index);
while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
assert(kn->kn_status & KN_SUPPRESSED);
- knote_unsuppress(kn);
+ knote_unsuppress(kq, kn);
}
- procwait = (kq->kq_state & KQ_PROCWAIT);
- kq->kq_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
+ procwait = (kq->kqf_state & KQ_PROCWAIT);
+ kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
if (procwait) {
/* first wake up any thread already waiting to process */
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(suppressq),
- THREAD_AWAKENED,
- WAITQ_ALL_PRIORITIES);
+ waitq_wakeup64_all((struct waitq *)&kq->kqf_wqs,
+ CAST_EVENT64_T(suppressq), THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
}
+
+ if (kq->kqf_state & KQ_DRAIN) {
+ return EBADF;
+ }
+ return (kq->kqf_state & KQ_WAKEUP) ? -1 : 0;
}
-/*
- * kqwq_internal_bind - bind thread to processing workq kqueue
- *
- * Determines if the provided thread will be responsible for
- * servicing the particular QoS class index specified in the
- * parameters. Once the binding is done, any overrides that may
- * be associated with the cooresponding events can be applied.
- *
- * This should be called as soon as the thread identity is known,
- * preferably while still at high priority during creation.
- *
- * - caller holds a reference on the process (and workq kq)
- * - the thread MUST call kevent_qos_internal after being bound
- * or the bucket of events may never be delivered.
- * - Nothing locked
- * (unless this is a synchronous bind, then the request is locked)
- */
static int
-kqworkq_internal_bind(
- struct proc *p,
- kq_index_t qos_index,
- thread_t thread,
- unsigned int flags)
+kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
+ struct kqueue_workloop_params *params, int *retval)
{
- struct kqueue *kq;
- struct kqworkq *kqwq;
- struct kqrequest *kqr;
- struct uthread *ut = get_bsdthread_info(thread);
+ int error = 0;
+ struct kqworkloop *kqwl;
+ struct filedesc *fdp = p->p_fd;
+ workq_threadreq_param_t trp = { };
- /* If no process workq, can't be our thread. */
- kq = p->p_fd->fd_wqkqueue;
+ switch (cmd) {
+ case KQ_WORKLOOP_CREATE:
+ if (!params->kqwlp_flags) {
+ error = EINVAL;
+ break;
+ }
- if (kq == NULL)
- return 0;
+ if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
+ (params->kqwlp_sched_pri < 1 ||
+ params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) {
+ error = EINVAL;
+ break;
+ }
- assert(kq->kq_state & KQ_WORKQ);
- kqwq = (struct kqworkq *)kq;
+ if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
+ invalid_policy(params->kqwlp_sched_pol)) {
+ error = EINVAL;
+ break;
+ }
- /*
- * No need to bind the manager thread to any specific
- * bucket, but still claim the thread.
- */
- if (qos_index == KQWQ_QOS_MANAGER) {
- assert(ut->uu_kqueue_bound == NULL);
- assert(flags & KEVENT_FLAG_WORKQ_MANAGER);
- ut->uu_kqueue_bound = kq;
- ut->uu_kqueue_qos_index = qos_index;
- ut->uu_kqueue_flags = flags;
+ if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
+ (params->kqwlp_cpu_percent <= 0 ||
+ params->kqwlp_cpu_percent > 100 ||
+ params->kqwlp_cpu_refillms <= 0 ||
+ params->kqwlp_cpu_refillms > 0x00ffffff)) {
+ error = EINVAL;
+ break;
+ }
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND),
- thread_tid(thread), flags, qos_index);
+ if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
+ trp.trp_flags |= TRP_PRIORITY;
+ trp.trp_pri = params->kqwlp_sched_pri;
+ }
+ if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
+ trp.trp_flags |= TRP_POLICY;
+ trp.trp_pol = params->kqwlp_sched_pol;
+ }
+ if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
+ trp.trp_flags |= TRP_CPUPERCENT;
+ trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
+ trp.trp_refillms = params->kqwlp_cpu_refillms;
+ }
- return 1;
- }
+ error = kqworkloop_get_or_create(p, params->kqwlp_id, &trp,
+ KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
+ KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &kqwl);
+ if (error) {
+ break;
+ }
- /*
- * If this is a synchronous bind callback, the request
- * lock is already held, so just do the bind.
- */
- if (flags & KEVENT_FLAG_SYNCHRONOUS_BIND) {
- kqwq_req_held(kqwq);
- /* strip out synchronout bind flag */
- flags &= ~KEVENT_FLAG_SYNCHRONOUS_BIND;
- kqworkq_bind_thread_impl(kqwq, qos_index, thread, flags);
- return 1;
+ if (!(fdp->fd_flags & FD_WORKLOOP)) {
+ /* FD_WORKLOOP indicates we've ever created a workloop
+ * via this syscall but its only ever added to a process, never
+ * removed.
+ */
+ proc_fdlock(p);
+ fdp->fd_flags |= FD_WORKLOOP;
+ proc_fdunlock(p);
+ }
+ break;
+ case KQ_WORKLOOP_DESTROY:
+ error = kqworkloop_get_or_create(p, params->kqwlp_id, NULL,
+ KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
+ KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &kqwl);
+ if (error) {
+ break;
+ }
+ kqlock(kqwl);
+ trp.trp_value = kqwl->kqwl_params;
+ if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
+ trp.trp_flags |= TRP_RELEASED;
+ kqwl->kqwl_params = trp.trp_value;
+ kqworkloop_release_live(kqwl);
+ } else {
+ error = EINVAL;
+ }
+ kqunlock(kqwl);
+ kqworkloop_release(kqwl);
+ break;
}
+ *retval = 0;
+ return error;
+}
- /*
- * check the request that corresponds to our qos_index
- * to see if there is an outstanding request.
- */
- kqr = kqworkq_get_request(kqwq, qos_index);
- assert(kqr->kqr_qos_index == qos_index);
- kqwq_req_lock(kqwq);
-
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND),
- thread_tid(thread), flags, qos_index, kqr->kqr_state);
+int
+kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
+{
+ struct kqueue_workloop_params params = {
+ .kqwlp_id = 0,
+ };
+ if (uap->sz < sizeof(params.kqwlp_version)) {
+ return EINVAL;
+ }
- if ((kqr->kqr_state & KQR_THREQUESTED) &&
- (kqr->kqr_state & KQR_PROCESSING) == 0) {
+ size_t copyin_sz = MIN(sizeof(params), uap->sz);
+ int rv = copyin(uap->addr, ¶ms, copyin_sz);
+ if (rv) {
+ return rv;
+ }
- if ((kqr->kqr_state & KQR_BOUND) &&
- thread == kqr->kqr_thread) {
- /* duplicate bind - claim the thread */
- assert(ut->uu_kqueue_bound == kq);
- assert(ut->uu_kqueue_qos_index == qos_index);
- kqwq_req_unlock(kqwq);
- return 1;
- }
- if ((kqr->kqr_state & (KQR_BOUND | KQWQ_THMANAGER)) == 0) {
- /* ours to bind to */
- kqworkq_bind_thread_impl(kqwq, qos_index, thread, flags);
- kqwq_req_unlock(kqwq);
- return 1;
- }
+ if (params.kqwlp_version != (int)uap->sz) {
+ return EINVAL;
}
- kqwq_req_unlock(kqwq);
- return 0;
+
+ return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, ¶ms,
+ retval);
}
-static void
-kqworkloop_bind_thread_impl(struct kqworkloop *kqwl,
- thread_t thread,
- __assert_only unsigned int flags)
+/*ARGSUSED*/
+static int
+kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
+ __unused vfs_context_t ctx)
{
- assert(flags & KEVENT_FLAG_WORKLOOP);
+ struct kqfile *kq = (struct kqfile *)fp->f_data;
+ struct kqtailq *suppressq = &kq->kqf_suppressed;
+ struct kqtailq *queue = &kq->kqf_queue;
+ struct knote *kn;
+ int retnum = 0;
- /* the request object must be locked */
- kqwl_req_held(kqwl);
+ if (which != FREAD) {
+ return 0;
+ }
- struct kqrequest *kqr = &kqwl->kqwl_request;
- struct uthread *ut = get_bsdthread_info(thread);
- boolean_t ipc_override_is_sync;
- kq_index_t qos_index = kqworkloop_combined_qos(kqwl, &ipc_override_is_sync);
+ kqlock(kq);
+
+ assert((kq->kqf_state & KQ_WORKQ) == 0);
- /* nobody else bound so finally bind (as a workloop) */
- assert(kqr->kqr_state & KQR_THREQUESTED);
- assert((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) == 0);
- assert(thread != kqwl->kqwl_owner);
+ /*
+ * If this is the first pass, link the wait queue associated with the
+ * the kqueue onto the wait queue set for the select(). Normally we
+ * use selrecord() for this, but it uses the wait queue within the
+ * selinfo structure and we need to use the main one for the kqueue to
+ * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
+ * (The select() call will unlink them when it ends).
+ */
+ if (wq_link_id != NULL) {
+ thread_t cur_act = current_thread();
+ struct uthread * ut = get_bsdthread_info(cur_act);
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_BIND),
- kqwl->kqwl_dynamicid, (uintptr_t)thread_tid(thread),
- qos_index,
- (uintptr_t)(((uintptr_t)kqr->kqr_override_index << 16) |
- (((uintptr_t)kqr->kqr_state) << 8) |
- ((uintptr_t)ipc_override_is_sync)));
+ kq->kqf_state |= KQ_SEL;
+ waitq_link((struct waitq *)&kq->kqf_wqs, ut->uu_wqset,
+ WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
- kqr->kqr_state |= KQR_BOUND | KQR_R2K_NOTIF_ARMED;
- kqr->kqr_thread = thread;
+ /* always consume the reserved link object */
+ waitq_link_release(*(uint64_t *)wq_link_id);
+ *(uint64_t *)wq_link_id = 0;
- /* bind the workloop to the uthread */
- ut->uu_kqueue_bound = (struct kqueue *)kqwl;
- ut->uu_kqueue_flags = flags;
- ut->uu_kqueue_qos_index = qos_index;
- assert(ut->uu_kqueue_override_is_sync == 0);
- ut->uu_kqueue_override_is_sync = ipc_override_is_sync;
- if (qos_index) {
- thread_add_ipc_override(thread, qos_index);
+ /*
+ * selprocess() is expecting that we send it back the waitq
+ * that was just added to the thread's waitq set. In order
+ * to not change the selrecord() API (which is exported to
+ * kexts), we pass this value back through the
+ * void *wq_link_id pointer we were passed. We need to use
+ * memcpy here because the pointer may not be properly aligned
+ * on 32-bit systems.
+ */
+ void *wqptr = &kq->kqf_wqs;
+ memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
}
- if (ipc_override_is_sync) {
- thread_add_sync_ipc_override(thread);
+
+ if (kqfile_begin_processing(kq) == -1) {
+ kqunlock(kq);
+ return 0;
}
-}
-/*
- * workloop_fulfill_threadreq - bind thread to processing workloop
- *
- * The provided thread will be responsible for delivering events
- * associated with the given kqrequest. Bind it and get ready for
- * the thread to eventually arrive.
- *
- * If WORKLOOP_FULFILL_THREADREQ_SYNC is specified, the callback
- * within the context of the pthread_functions->workq_threadreq
- * callout. In this case, the request structure is already locked.
- */
-int
-workloop_fulfill_threadreq(struct proc *p,
- workq_threadreq_t req,
- thread_t thread,
- int flags)
-{
- int sync = (flags & WORKLOOP_FULFILL_THREADREQ_SYNC);
- int cancel = (flags & WORKLOOP_FULFILL_THREADREQ_CANCEL);
- struct kqrequest *kqr;
- struct kqworkloop *kqwl;
+ if (!TAILQ_EMPTY(queue)) {
+ /*
+ * there is something queued - but it might be a
+ * KN_STAYACTIVE knote, which may or may not have
+ * any events pending. Otherwise, we have to walk
+ * the list of knotes to see, and peek at the
+ * (non-vanished) stay-active ones to be really sure.
+ */
+ while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
+ if (kn->kn_status & KN_ACTIVE) {
+ retnum = 1;
+ goto out;
+ }
+ assert(kn->kn_status & KN_STAYACTIVE);
+ knote_suppress(kq, kn);
+ }
- kqwl = (struct kqworkloop *)((uintptr_t)req -
- offsetof(struct kqworkloop, kqwl_request) -
- offsetof(struct kqrequest, kqr_req));
- kqr = &kqwl->kqwl_request;
+ /*
+ * There were no regular events on the queue, so take
+ * a deeper look at the stay-queued ones we suppressed.
+ */
+ while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
+ KNOTE_LOCK_CTX(knlc);
+ int result = 0;
- /* validate we're looking at something valid */
- if (kqwl->kqwl_p != p ||
- (kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
- assert(kqwl->kqwl_p == p);
- assert(kqwl->kqwl_state & KQ_WORKLOOP);
- return EINVAL;
- }
-
- if (!sync)
- kqwl_req_lock(kqwl);
+ /* If didn't vanish while suppressed - peek at it */
+ if ((kn->kn_status & KN_DROPPING) || !knote_lock(kq, kn, &knlc,
+ KNOTE_KQ_LOCK_ON_FAILURE)) {
+ continue;
+ }
- /* Should be a pending request */
- if ((kqr->kqr_state & KQR_BOUND) ||
- (kqr->kqr_state & KQR_THREQUESTED) == 0) {
+ result = filter_call(knote_fops(kn), f_peek(kn));
- assert((kqr->kqr_state & KQR_BOUND) == 0);
- assert(kqr->kqr_state & KQR_THREQUESTED);
- if (!sync)
- kqwl_req_unlock(kqwl);
- return EINPROGRESS;
- }
+ kqlock(kq);
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
- assert((kqr->kqr_state & KQR_DRAIN) == 0);
+ /* unsuppress it */
+ knote_unsuppress(kq, kn);
- /*
- * Is it a cancel indication from pthread.
- * If so, we must be exiting/exec'ing. Forget
- * our pending request.
- */
- if (cancel) {
- kqr->kqr_state &= ~KQR_THREQUESTED;
- kqr->kqr_state |= KQR_DRAIN;
- } else {
- /* do the actual bind? */
- kqworkloop_bind_thread_impl(kqwl, thread, KEVENT_FLAG_WORKLOOP);
+ /* has data or it has to report a vanish */
+ if (result & FILTER_ACTIVE) {
+ retnum = 1;
+ goto out;
+ }
+ }
}
- if (!sync)
- kqwl_req_unlock(kqwl);
+out:
+ kqfile_end_processing(kq);
+ kqunlock(kq);
+ return retnum;
+}
- if (cancel)
- kqueue_release_last(p, &kqwl->kqwl_kqueue); /* may dealloc kq */
+/*
+ * kqueue_close -
+ */
+/*ARGSUSED*/
+static int
+kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
+{
+ struct kqfile *kqf = (struct kqfile *)fg->fg_data;
+ assert((kqf->kqf_state & KQ_WORKQ) == 0);
+ kqueue_dealloc(&kqf->kqf_kqueue);
+ fg->fg_data = NULL;
return 0;
}
-
/*
- * kevent_qos_internal_bind - bind thread to processing kqueue
- *
- * Indicates that the provided thread will be responsible for
- * servicing the particular QoS class index specified in the
- * parameters. Once the binding is done, any overrides that may
- * be associated with the cooresponding events can be applied.
- *
- * This should be called as soon as the thread identity is known,
- * preferably while still at high priority during creation.
- *
- * - caller holds a reference on the kqueue.
- * - the thread MUST call kevent_qos_internal after being bound
- * or the bucket of events may never be delivered.
- * - Nothing locked (may take mutex or block).
+ * Max depth of the nested kq path that can be created.
+ * Note that this has to be less than the size of kq_level
+ * to avoid wrapping around and mislabeling the level.
*/
+#define MAX_NESTED_KQ 1000
-int
-kevent_qos_internal_bind(
- struct proc *p,
- int qos_class,
- thread_t thread,
- unsigned int flags)
+/*ARGSUSED*/
+/*
+ * The callers has taken a use-count reference on this kqueue and will donate it
+ * to the kqueue we are being added to. This keeps the kqueue from closing until
+ * that relationship is torn down.
+ */
+static int
+kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
+ __unused struct kevent_qos_s *kev)
{
- kq_index_t qos_index;
-
- assert(flags & KEVENT_FLAG_WORKQ);
-
- if (thread == THREAD_NULL || (flags & KEVENT_FLAG_WORKQ) == 0) {
- return EINVAL;
- }
+ struct kqfile *kqf = (struct kqfile *)fp->f_data;
+ struct kqueue *kq = &kqf->kqf_kqueue;
+ struct kqueue *parentkq = knote_get_kq(kn);
- /* get the qos index we're going to service */
- qos_index = qos_index_for_servicer(qos_class, thread, flags);
+ assert((kqf->kqf_state & KQ_WORKQ) == 0);
- if (kqworkq_internal_bind(p, qos_index, thread, flags))
+ if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
+ knote_set_error(kn, EINVAL);
return 0;
+ }
- return EINPROGRESS;
-}
-
-
-static void
-kqworkloop_internal_unbind(
- struct proc *p,
- thread_t thread,
- unsigned int flags)
-{
- struct kqueue *kq;
- struct kqworkloop *kqwl;
- struct uthread *ut = get_bsdthread_info(thread);
-
- assert(ut->uu_kqueue_bound != NULL);
- kq = ut->uu_kqueue_bound;
- assert(kq->kq_state & KQ_WORKLOOP);
- kqwl = (struct kqworkloop *)kq;
+ /*
+ * We have to avoid creating a cycle when nesting kqueues
+ * inside another. Rather than trying to walk the whole
+ * potential DAG of nested kqueues, we just use a simple
+ * ceiling protocol. When a kqueue is inserted into another,
+ * we check that the (future) parent is not already nested
+ * into another kqueue at a lower level than the potenial
+ * child (because it could indicate a cycle). If that test
+ * passes, we just mark the nesting levels accordingly.
+ *
+ * Only up to MAX_NESTED_KQ can be nested.
+ *
+ * Note: kqworkq and kqworkloop cannot be nested and have reused their
+ * kq_level field, so ignore these as parent.
+ */
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND),
- kqwl->kqwl_dynamicid, (uintptr_t)thread_tid(thread),
- flags, 0);
+ kqlock(parentkq);
- if (!(kq->kq_state & KQ_NO_WQ_THREAD)) {
- assert(is_workqueue_thread(thread));
+ if ((parentkq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
+ if (parentkq->kq_level > 0 &&
+ parentkq->kq_level < kq->kq_level) {
+ kqunlock(parentkq);
+ knote_set_error(kn, EINVAL);
+ return 0;
+ }
- kqlock(kq);
- kqworkloop_unbind_thread(kqwl, thread, flags);
- kqunlock(kq);
+ /* set parent level appropriately */
+ uint16_t plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
+ if (plevel < kq->kq_level + 1) {
+ if (kq->kq_level + 1 > MAX_NESTED_KQ) {
+ kqunlock(parentkq);
+ knote_set_error(kn, EINVAL);
+ return 0;
+ }
+ plevel = kq->kq_level + 1;
+ }
- /* If last reference, dealloc the workloop kq */
- kqueue_release_last(p, kq);
- } else {
- assert(!is_workqueue_thread(thread));
- kevent_servicer_detach_thread(p, kqwl->kqwl_dynamicid, thread, flags, kq);
+ parentkq->kq_level = plevel;
}
-}
-static void
-kqworkq_internal_unbind(
- struct proc *p,
- kq_index_t qos_index,
- thread_t thread,
- unsigned int flags)
-{
- struct kqueue *kq;
- struct kqworkq *kqwq;
- struct uthread *ut;
- kq_index_t end_index;
-
- assert(thread == current_thread());
- ut = get_bsdthread_info(thread);
-
- kq = p->p_fd->fd_wqkqueue;
- assert(kq->kq_state & KQ_WORKQ);
- assert(ut->uu_kqueue_bound == kq);
+ kqunlock(parentkq);
- kqwq = (struct kqworkq *)kq;
-
- /* end servicing any requests we might own */
- end_index = (qos_index == KQWQ_QOS_MANAGER) ?
- 0 : qos_index;
+ kn->kn_filtid = EVFILTID_KQREAD;
kqlock(kq);
+ KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
+ /* indicate nesting in child, if needed */
+ if (kq->kq_level == 0) {
+ kq->kq_level = 1;
+ }
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND),
- (uintptr_t)thread_tid(thread), flags, qos_index);
-
- do {
- kqworkq_end_processing(kqwq, qos_index, flags);
- } while (qos_index-- > end_index);
-
- ut->uu_kqueue_bound = NULL;
- ut->uu_kqueue_qos_index = 0;
- ut->uu_kqueue_flags = 0;
-
+ int count = kq->kq_count;
kqunlock(kq);
+ return count > 0;
}
/*
- * kevent_qos_internal_unbind - unbind thread from processing kqueue
- *
- * End processing the per-QoS bucket of events and allow other threads
- * to be requested for future servicing.
- *
- * caller holds a reference on the kqueue.
- * thread is the current thread.
+ * kqueue_drain - called when kq is closed
*/
-
-int
-kevent_qos_internal_unbind(
- struct proc *p,
- int qos_class,
- thread_t thread,
- unsigned int flags)
+/*ARGSUSED*/
+static int
+kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
{
-#pragma unused(qos_class)
+ struct kqfile *kqf = (struct kqfile *)fp->f_fglob->fg_data;
- struct uthread *ut;
- struct kqueue *kq;
- unsigned int bound_flags;
- bool check_flags;
+ assert((kqf->kqf_state & KQ_WORKQ) == 0);
- ut = get_bsdthread_info(thread);
- if (ut->uu_kqueue_bound == NULL) {
- /* early out if we are already unbound */
- assert(ut->uu_kqueue_flags == 0);
- assert(ut->uu_kqueue_qos_index == 0);
- assert(ut->uu_kqueue_override_is_sync == 0);
- return EALREADY;
+ kqlock(kqf);
+ kqf->kqf_state |= KQ_DRAIN;
+
+ /* wakeup sleeping threads */
+ if ((kqf->kqf_state & (KQ_SLEEP | KQ_SEL)) != 0) {
+ kqf->kqf_state &= ~(KQ_SLEEP | KQ_SEL);
+ (void)waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs,
+ KQ_EVENT,
+ THREAD_RESTART,
+ WAITQ_ALL_PRIORITIES);
}
- assert(flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP));
- assert(thread == current_thread());
+ /* wakeup threads waiting their turn to process */
+ if (kqf->kqf_state & KQ_PROCWAIT) {
+ assert(kqf->kqf_state & KQ_PROCESSING);
- check_flags = flags & KEVENT_FLAG_UNBIND_CHECK_FLAGS;
+ kqf->kqf_state &= ~KQ_PROCWAIT;
+ (void)waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs,
+ CAST_EVENT64_T(&kqf->kqf_suppressed),
+ THREAD_RESTART, WAITQ_ALL_PRIORITIES);
+ }
- /* Get the kqueue we started with */
- kq = ut->uu_kqueue_bound;
- assert(kq != NULL);
- assert(kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
+ kqunlock(kqf);
+ return 0;
+}
- /* get flags and QoS parameters we started with */
- bound_flags = ut->uu_kqueue_flags;
+/*ARGSUSED*/
+int
+kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
+{
+ assert((kq->kq_state & KQ_WORKQ) == 0);
- /* Unbind from the class of workq */
- if (kq->kq_state & KQ_WORKQ) {
- if (check_flags && !(flags & KEVENT_FLAG_WORKQ)) {
- return EINVAL;
- }
+ kqlock(kq);
+ if (isstat64 != 0) {
+ struct stat64 *sb64 = (struct stat64 *)ub;
- kqworkq_internal_unbind(p, ut->uu_kqueue_qos_index, thread, bound_flags);
- } else {
- if (check_flags && !(flags & KEVENT_FLAG_WORKLOOP)) {
- return EINVAL;
+ bzero((void *)sb64, sizeof(*sb64));
+ sb64->st_size = kq->kq_count;
+ if (kq->kq_state & KQ_KEV_QOS) {
+ sb64->st_blksize = sizeof(struct kevent_qos_s);
+ } else if (kq->kq_state & KQ_KEV64) {
+ sb64->st_blksize = sizeof(struct kevent64_s);
+ } else if (IS_64BIT_PROCESS(p)) {
+ sb64->st_blksize = sizeof(struct user64_kevent);
+ } else {
+ sb64->st_blksize = sizeof(struct user32_kevent);
}
+ sb64->st_mode = S_IFIFO;
+ } else {
+ struct stat *sb = (struct stat *)ub;
- kqworkloop_internal_unbind(p, thread, bound_flags);
+ bzero((void *)sb, sizeof(*sb));
+ sb->st_size = kq->kq_count;
+ if (kq->kq_state & KQ_KEV_QOS) {
+ sb->st_blksize = sizeof(struct kevent_qos_s);
+ } else if (kq->kq_state & KQ_KEV64) {
+ sb->st_blksize = sizeof(struct kevent64_s);
+ } else if (IS_64BIT_PROCESS(p)) {
+ sb->st_blksize = sizeof(struct user64_kevent);
+ } else {
+ sb->st_blksize = sizeof(struct user32_kevent);
+ }
+ sb->st_mode = S_IFIFO;
}
-
+ kqunlock(kq);
return 0;
}
+static inline bool
+kqueue_threadreq_can_use_ast(struct kqueue *kq)
+{
+ if (current_proc() == kq->kq_p) {
+ /*
+ * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
+ * do combined send/receive and in the case of self-IPC, the AST may bet
+ * set on a thread that will not return to userspace and needs the
+ * thread the AST would create to unblock itself.
+ *
+ * At this time, we really want to target:
+ *
+ * - kevent variants that can cause thread creations, and dispatch
+ * really only uses kevent_qos and kevent_id,
+ *
+ * - workq_kernreturn (directly about thread creations)
+ *
+ * - bsdthread_ctl which is used for qos changes and has direct impact
+ * on the creator thread scheduling decisions.
+ */
+ switch (current_uthread()->syscall_code) {
+ case SYS_kevent_qos:
+ case SYS_kevent_id:
+ case SYS_workq_kernreturn:
+ case SYS_bsdthread_ctl:
+ return true;
+ }
+ }
+ return false;
+}
+
/*
- * kqueue_process - process the triggered events in a kqueue
+ * Interact with the pthread kext to request a servicing there at a specific QoS
+ * level.
*
- * Walk the queued knotes and validate that they are
- * really still triggered events by calling the filter
- * routines (if necessary). Hold a use reference on
- * the knote to avoid it being detached. For each event
- * that is still considered triggered, invoke the
- * callback routine provided.
+ * - Caller holds the workq request lock
*
- * caller holds a reference on the kqueue.
- * kqueue locked on entry and exit - but may be dropped
- * kqueue list locked (held for duration of call)
+ * - May be called with the kqueue's wait queue set locked,
+ * so cannot do anything that could recurse on that.
*/
-
-static int
-kqueue_process(struct kqueue *kq,
- kevent_callback_t callback,
- void *callback_data,
- struct filt_process_s *process_data,
- int *countp,
- struct proc *p)
+static void
+kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t kqr,
+ kq_index_t qos, int flags)
{
- unsigned int flags = process_data ? process_data->fp_flags : 0;
- struct uthread *ut = get_bsdthread_info(current_thread());
- kq_index_t start_index, end_index, i;
- struct knote *kn;
- int nevents = 0;
- int error = 0;
+ assert(kqr->tr_kq_wakeup);
+ assert(kqr_thread(kqr) == THREAD_NULL);
+ assert(!kqr_thread_requested(kqr));
+ struct turnstile *ts = TURNSTILE_NULL;
- /*
- * Based on the mode of the kqueue and the bound QoS of the servicer,
- * determine the range of thread requests that need checking
- */
- if (kq->kq_state & KQ_WORKQ) {
- if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
- start_index = KQWQ_QOS_MANAGER;
- } else if (ut->uu_kqueue_bound != kq) {
- return EJUSTRETURN;
- } else {
- start_index = ut->uu_kqueue_qos_index;
- }
+ if (workq_is_exiting(kq->kq_p)) {
+ return;
+ }
- /* manager services every request in a workq kqueue */
- assert(start_index > 0 && start_index <= KQWQ_QOS_MANAGER);
- end_index = (start_index == KQWQ_QOS_MANAGER) ? 0 : start_index;
+ kqlock_held(kq);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- if (ut->uu_kqueue_bound != kq)
- return EJUSTRETURN;
+ if (kq->kq_state & KQ_WORKLOOP) {
+ __assert_only struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- /*
- * Single request servicing
- * we want to deliver all events, regardless of the QOS
- */
- start_index = end_index = THREAD_QOS_UNSPECIFIED;
+ assert(kqwl->kqwl_owner == THREAD_NULL);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
+ kqwl->kqwl_dynamicid, 0, qos, kqr->tr_kq_wakeup);
+ ts = kqwl->kqwl_turnstile;
+ /* Add a thread request reference on the kqueue. */
+ kqworkloop_retain(kqwl);
} else {
- start_index = end_index = QOS_INDEX_KQFILE;
+ assert(kq->kq_state & KQ_WORKQ);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST),
+ -1, 0, qos, kqr->tr_kq_wakeup);
}
-
- i = start_index;
-
- do {
- if (kqueue_begin_processing(kq, i, flags) == -1) {
- *countp = 0;
- /* Nothing to process */
- continue;
- }
+ /*
+ * New-style thread request supported.
+ * Provide the pthread kext a pointer to a workq_threadreq_s structure for
+ * its use until a corresponding kqueue_threadreq_bind callback.
+ */
+ if (kqueue_threadreq_can_use_ast(kq)) {
+ flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
+ }
+ if (qos == KQWQ_QOS_MANAGER) {
+ qos = WORKQ_THREAD_QOS_MANAGER;
+ }
+ if (!workq_kern_threadreq_initiate(kq->kq_p, kqr, ts, qos, flags)) {
/*
- * loop through the enqueued knotes associated with this request,
- * processing each one. Each request may have several queues
- * of knotes to process (depending on the type of kqueue) so we
- * have to loop through all the queues as long as we have additional
- * space.
+ * Process is shutting down or exec'ing.
+ * All the kqueues are going to be cleaned up
+ * soon. Forget we even asked for a thread -
+ * and make sure we don't ask for more.
*/
- error = 0;
-
- struct kqtailq *base_queue = kqueue_get_base_queue(kq, i);
- struct kqtailq *queue = kqueue_get_high_queue(kq, i);
- do {
- while (error == 0 && (kn = TAILQ_FIRST(queue)) != NULL) {
- error = knote_process(kn, callback, callback_data, process_data, p);
- if (error == EJUSTRETURN) {
- error = 0;
- } else {
- nevents++;
- }
- /* error is EWOULDBLOCK when the out event array is full */
- }
- } while (error == 0 && queue-- > base_queue);
+ kq->kq_state &= ~KQ_R2K_ARMED;
+ kqueue_release_live(kq);
+ }
+}
- if ((kq->kq_state & KQ_WORKQ) == 0) {
- kqueue_end_processing(kq, i, nevents, flags);
- }
+/*
+ * kqueue_threadreq_bind_prepost - prepost the bind to kevent
+ *
+ * This is used when kqueue_threadreq_bind may cause a lock inversion.
+ */
+__attribute__((always_inline))
+void
+kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t kqr,
+ struct uthread *ut)
+{
+ ut->uu_kqr_bound = kqr;
+ kqr->tr_thread = ut->uu_thread;
+ kqr->tr_state = WORKQ_TR_STATE_BINDING;
+}
- if (error == EWOULDBLOCK) {
- /* break out if no more space for additional events */
- error = 0;
- break;
- }
- } while (i-- > end_index);
+/*
+ * kqueue_threadreq_bind_commit - commit a bind prepost
+ *
+ * The workq code has to commit any binding prepost before the thread has
+ * a chance to come back to userspace (and do kevent syscalls) or be aborted.
+ */
+void
+kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
+{
+ struct uthread *ut = get_bsdthread_info(thread);
+ workq_threadreq_t kqr = ut->uu_kqr_bound;
+ kqueue_t kqu = kqr_kqueue(p, kqr);
- *countp = nevents;
- return (error);
+ kqlock(kqu);
+ if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
+ kqueue_threadreq_bind(p, kqr, thread, 0);
+ }
+ kqunlock(kqu);
}
static void
-kqueue_scan_continue(void *data, wait_result_t wait_result)
+kqueue_threadreq_modify(kqueue_t kqu, workq_threadreq_t kqr, kq_index_t qos,
+ workq_kern_threadreq_flags_t flags)
{
- thread_t self = current_thread();
- uthread_t ut = (uthread_t)get_bsdthread_info(self);
- struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
- struct kqueue *kq = (struct kqueue *)data;
- struct filt_process_s *process_data = cont_args->process_data;
- int error;
- int count;
-
- /* convert the (previous) wait_result to a proper error */
- switch (wait_result) {
- case THREAD_AWAKENED: {
- kqlock(kq);
- retry:
- error = kqueue_process(kq, cont_args->call, cont_args->data,
- process_data, &count, current_proc());
- if (error == 0 && count == 0) {
- if (kq->kq_state & KQ_DRAIN) {
- kqunlock(kq);
- goto drain;
- }
+ assert(kqr_thread_requested_pending(kqr));
- if (kq->kq_state & KQ_WAKEUP)
- goto retry;
+ kqlock_held(kqu);
- waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
- KQ_EVENT, THREAD_ABORTSAFE,
- cont_args->deadline);
- kq->kq_state |= KQ_SLEEP;
- kqunlock(kq);
- thread_block_parameter(kqueue_scan_continue, kq);
- /* NOTREACHED */
- }
- kqunlock(kq);
- } break;
- case THREAD_TIMED_OUT:
- error = EWOULDBLOCK;
- break;
- case THREAD_INTERRUPTED:
- error = EINTR;
- break;
- case THREAD_RESTART:
- drain:
- error = EBADF;
- break;
- default:
- panic("%s: - invalid wait_result (%d)", __func__,
- wait_result);
- error = 0;
+ if (kqueue_threadreq_can_use_ast(kqu.kq)) {
+ flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
}
-
- /* call the continuation with the results */
- assert(cont_args->cont != NULL);
- (cont_args->cont)(kq, cont_args->data, error);
+ workq_kern_threadreq_modify(kqu.kq->kq_p, kqr, qos, flags);
}
-
/*
- * kqueue_scan - scan and wait for events in a kqueue
- *
- * Process the triggered events in a kqueue.
+ * kqueue_threadreq_bind - bind thread to processing kqrequest
*
- * If there are no events triggered arrange to
- * wait for them. If the caller provided a
- * continuation routine, then kevent_scan will
- * also.
- *
- * The callback routine must be valid.
- * The caller must hold a use-count reference on the kq.
+ * The provided thread will be responsible for delivering events
+ * associated with the given kqrequest. Bind it and get ready for
+ * the thread to eventually arrive.
*/
+void
+kqueue_threadreq_bind(struct proc *p, workq_threadreq_t kqr, thread_t thread,
+ unsigned int flags)
+{
+ kqueue_t kqu = kqr_kqueue(p, kqr);
+ struct uthread *ut = get_bsdthread_info(thread);
-int
-kqueue_scan(struct kqueue *kq,
- kevent_callback_t callback,
- kqueue_continue_t continuation,
- void *callback_data,
- struct filt_process_s *process_data,
- struct timeval *atvp,
- struct proc *p)
-{
- thread_continue_t cont = THREAD_CONTINUE_NULL;
- unsigned int flags;
- uint64_t deadline;
- int error;
- int first;
- int fd;
-
- assert(callback != NULL);
+ kqlock_held(kqu);
- /*
- * Determine which QoS index we are servicing
- */
- flags = (process_data) ? process_data->fp_flags : 0;
- fd = (process_data) ? process_data->fp_fd : -1;
+ assert(ut->uu_kqueue_override == 0);
- first = 1;
- for (;;) {
- wait_result_t wait_result;
- int count;
+ if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
+ assert(ut->uu_kqr_bound == kqr);
+ assert(kqr->tr_thread == thread);
+ } else {
+ assert(kqr_thread_requested_pending(kqr));
+ assert(kqr->tr_thread == THREAD_NULL);
+ assert(ut->uu_kqr_bound == NULL);
+ ut->uu_kqr_bound = kqr;
+ kqr->tr_thread = thread;
+ }
- /*
- * Make a pass through the kq to find events already
- * triggered.
- */
- kqlock(kq);
- error = kqueue_process(kq, callback, callback_data,
- process_data, &count, p);
- if (error || count)
- break; /* lock still held */
-
- /* looks like we have to consider blocking */
- if (first) {
- first = 0;
- /* convert the timeout to a deadline once */
- if (atvp->tv_sec || atvp->tv_usec) {
- uint64_t now;
-
- clock_get_uptime(&now);
- nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
- atvp->tv_usec * (long)NSEC_PER_USEC,
- &deadline);
- if (now >= deadline) {
- /* non-blocking call */
- error = EWOULDBLOCK;
- break; /* lock still held */
- }
- deadline -= now;
- clock_absolutetime_interval_to_deadline(deadline, &deadline);
- } else {
- deadline = 0; /* block forever */
- }
+ kqr->tr_state = WORKQ_TR_STATE_BOUND;
- if (continuation) {
- uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
- struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan;
+ if (kqu.kq->kq_state & KQ_WORKLOOP) {
+ struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
- cont_args->call = callback;
- cont_args->cont = continuation;
- cont_args->deadline = deadline;
- cont_args->data = callback_data;
- cont_args->process_data = process_data;
- cont = kqueue_scan_continue;
+ if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
+ /*
+ * <rdar://problem/38626999> shows that asserting here is not ok.
+ *
+ * This is not supposed to happen for correct use of the interface,
+ * but it is sadly possible for userspace (with the help of memory
+ * corruption, such as over-release of a dispatch queue) to make
+ * the creator thread the "owner" of a workloop.
+ *
+ * Once that happens, and that creator thread picks up the same
+ * workloop as a servicer, we trip this codepath. We need to fixup
+ * the state to forget about this thread being the owner, as the
+ * entire workloop state machine expects servicers to never be
+ * owners and everything would basically go downhill from here.
+ */
+ kqu.kqwl->kqwl_owner = THREAD_NULL;
+ if (kqworkloop_override(kqu.kqwl)) {
+ thread_drop_kevent_override(thread);
}
}
- if (kq->kq_state & KQ_DRAIN) {
- kqunlock(kq);
- return EBADF;
+ if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
+ /*
+ * Past this point, the interlock is the kq req lock again,
+ * so we can fix the inheritor for good.
+ */
+ filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
+ turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
}
- /* If awakened during processing, try again */
- if (kq->kq_state & KQ_WAKEUP) {
- kqunlock(kq);
- continue;
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
+ thread_tid(thread), kqr->tr_kq_qos_index,
+ (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
+
+ ut->uu_kqueue_override = kqr->tr_kq_override_index;
+ if (kqr->tr_kq_override_index) {
+ thread_add_servicer_override(thread, kqr->tr_kq_override_index);
}
+ } else {
+ assert(kqr->tr_kq_override_index == 0);
- /* go ahead and wait */
- waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
- KQ_EVENT, THREAD_ABORTSAFE,
- TIMEOUT_URGENCY_USER_NORMAL,
- deadline, TIMEOUT_NO_LEEWAY);
- kq->kq_state |= KQ_SLEEP;
- kqunlock(kq);
- wait_result = thread_block_parameter(cont, kq);
- /* NOTREACHED if (continuation != NULL) */
-
- switch (wait_result) {
- case THREAD_AWAKENED:
- continue;
- case THREAD_TIMED_OUT:
- return EWOULDBLOCK;
- case THREAD_INTERRUPTED:
- return EINTR;
- case THREAD_RESTART:
- return EBADF;
- default:
- panic("%s: - bad wait_result (%d)", __func__,
- wait_result);
- error = 0;
- }
- }
- kqunlock(kq);
- return (error);
-}
-
-
-/*
- * XXX
- * This could be expanded to call kqueue_scan, if desired.
- */
-/*ARGSUSED*/
-static int
-kqueue_read(__unused struct fileproc *fp,
- __unused struct uio *uio,
- __unused int flags,
- __unused vfs_context_t ctx)
-{
- return (ENXIO);
-}
-
-/*ARGSUSED*/
-static int
-kqueue_write(__unused struct fileproc *fp,
- __unused struct uio *uio,
- __unused int flags,
- __unused vfs_context_t ctx)
-{
- return (ENXIO);
-}
-
-/*ARGSUSED*/
-static int
-kqueue_ioctl(__unused struct fileproc *fp,
- __unused u_long com,
- __unused caddr_t data,
- __unused vfs_context_t ctx)
-{
- return (ENOTTY);
-}
-
-/*ARGSUSED*/
-static int
-kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
- __unused vfs_context_t ctx)
-{
- struct kqueue *kq = (struct kqueue *)fp->f_data;
- struct kqtailq *queue;
- struct kqtailq *suppressq;
- struct knote *kn;
- int retnum = 0;
-
- if (which != FREAD)
- return (0);
-
- kqlock(kq);
-
- assert((kq->kq_state & KQ_WORKQ) == 0);
-
- /*
- * If this is the first pass, link the wait queue associated with the
- * the kqueue onto the wait queue set for the select(). Normally we
- * use selrecord() for this, but it uses the wait queue within the
- * selinfo structure and we need to use the main one for the kqueue to
- * catch events from KN_STAYQUEUED sources. So we do the linkage manually.
- * (The select() call will unlink them when it ends).
- */
- if (wq_link_id != NULL) {
- thread_t cur_act = current_thread();
- struct uthread * ut = get_bsdthread_info(cur_act);
-
- kq->kq_state |= KQ_SEL;
- waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset,
- WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
-
- /* always consume the reserved link object */
- waitq_link_release(*(uint64_t *)wq_link_id);
- *(uint64_t *)wq_link_id = 0;
-
- /*
- * selprocess() is expecting that we send it back the waitq
- * that was just added to the thread's waitq set. In order
- * to not change the selrecord() API (which is exported to
- * kexts), we pass this value back through the
- * void *wq_link_id pointer we were passed. We need to use
- * memcpy here because the pointer may not be properly aligned
- * on 32-bit systems.
- */
- void *wqptr = &kq->kq_wqs;
- memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
- }
-
- if (kqueue_begin_processing(kq, QOS_INDEX_KQFILE, 0) == -1) {
- kqunlock(kq);
- return (0);
- }
-
- queue = kqueue_get_base_queue(kq, QOS_INDEX_KQFILE);
- if (!TAILQ_EMPTY(queue)) {
- /*
- * there is something queued - but it might be a
- * KN_STAYACTIVE knote, which may or may not have
- * any events pending. Otherwise, we have to walk
- * the list of knotes to see, and peek at the
- * (non-vanished) stay-active ones to be really sure.
- */
- while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
- if (kn->kn_status & KN_ACTIVE) {
- retnum = 1;
- goto out;
- }
- assert(kn->kn_status & KN_STAYACTIVE);
- knote_suppress(kn);
- }
-
- /*
- * There were no regular events on the queue, so take
- * a deeper look at the stay-queued ones we suppressed.
- */
- suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
- while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
- unsigned peek = 1;
-
- assert(!knoteuse_needs_boost(kn, NULL));
-
- /* If didn't vanish while suppressed - peek at it */
- if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
- peek = knote_fops(kn)->f_peek(kn);
-
- /* if it dropped while getting lock - move on */
- if (!knoteuse2kqlock(kq, kn, KNUSE_NONE))
- continue;
- }
-
- /* unsuppress it */
- knote_unsuppress(kn);
-
- /* has data or it has to report a vanish */
- if (peek > 0) {
- retnum = 1;
- goto out;
- }
- }
- }
-
-out:
- kqueue_end_processing(kq, QOS_INDEX_KQFILE, retnum, 0);
- kqunlock(kq);
- return (retnum);
-}
-
-/*
- * kqueue_close -
- */
-/*ARGSUSED*/
-static int
-kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
-{
- struct kqfile *kqf = (struct kqfile *)fg->fg_data;
-
- assert((kqf->kqf_state & KQ_WORKQ) == 0);
- kqueue_dealloc(&kqf->kqf_kqueue);
- fg->fg_data = NULL;
- return (0);
-}
-
-/*ARGSUSED*/
-/*
- * The callers has taken a use-count reference on this kqueue and will donate it
- * to the kqueue we are being added to. This keeps the kqueue from closing until
- * that relationship is torn down.
- */
-static int
-kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn,
- __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx)
-{
- struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
- struct kqueue *kq = &kqf->kqf_kqueue;
- struct kqueue *parentkq = knote_get_kq(kn);
-
- assert((kqf->kqf_state & KQ_WORKQ) == 0);
-
- if (parentkq == kq ||
- kn->kn_filter != EVFILT_READ) {
- kn->kn_flags = EV_ERROR;
- kn->kn_data = EINVAL;
- return 0;
- }
-
- /*
- * We have to avoid creating a cycle when nesting kqueues
- * inside another. Rather than trying to walk the whole
- * potential DAG of nested kqueues, we just use a simple
- * ceiling protocol. When a kqueue is inserted into another,
- * we check that the (future) parent is not already nested
- * into another kqueue at a lower level than the potenial
- * child (because it could indicate a cycle). If that test
- * passes, we just mark the nesting levels accordingly.
- */
-
- kqlock(parentkq);
- if (parentkq->kq_level > 0 &&
- parentkq->kq_level < kq->kq_level)
- {
- kqunlock(parentkq);
- kn->kn_flags = EV_ERROR;
- kn->kn_data = EINVAL;
- return 0;
- } else {
- /* set parent level appropriately */
- if (parentkq->kq_level == 0)
- parentkq->kq_level = 2;
- if (parentkq->kq_level < kq->kq_level + 1)
- parentkq->kq_level = kq->kq_level + 1;
- kqunlock(parentkq);
-
- kn->kn_filtid = EVFILTID_KQREAD;
- kqlock(kq);
- KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
- /* indicate nesting in child, if needed */
- if (kq->kq_level == 0)
- kq->kq_level = 1;
-
- int count = kq->kq_count;
- kqunlock(kq);
- return (count > 0);
- }
-}
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
+ thread_tid(thread), kqr->tr_kq_qos_index,
+ (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
+ }
+}
/*
- * kqueue_drain - called when kq is closed
+ * kqueue_threadreq_cancel - abort a pending thread request
+ *
+ * Called when exiting/exec'ing. Forget our pending request.
*/
-/*ARGSUSED*/
-static int
-kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
+void
+kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t kqr)
{
- struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
-
- assert((kq->kq_state & KQ_WORKQ) == 0);
-
- kqlock(kq);
- kq->kq_state |= KQ_DRAIN;
- kqueue_interrupt(kq);
- kqunlock(kq);
- return (0);
+ kqueue_release(kqr_kqueue(p, kqr));
}
-/*ARGSUSED*/
-int
-kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
+workq_threadreq_param_t
+kqueue_threadreq_workloop_param(workq_threadreq_t kqr)
{
- assert((kq->kq_state & KQ_WORKQ) == 0);
-
- kqlock(kq);
- if (isstat64 != 0) {
- struct stat64 *sb64 = (struct stat64 *)ub;
-
- bzero((void *)sb64, sizeof(*sb64));
- sb64->st_size = kq->kq_count;
- if (kq->kq_state & KQ_KEV_QOS)
- sb64->st_blksize = sizeof(struct kevent_qos_s);
- else if (kq->kq_state & KQ_KEV64)
- sb64->st_blksize = sizeof(struct kevent64_s);
- else if (IS_64BIT_PROCESS(p))
- sb64->st_blksize = sizeof(struct user64_kevent);
- else
- sb64->st_blksize = sizeof(struct user32_kevent);
- sb64->st_mode = S_IFIFO;
- } else {
- struct stat *sb = (struct stat *)ub;
+ struct kqworkloop *kqwl;
+ workq_threadreq_param_t trp;
- bzero((void *)sb, sizeof(*sb));
- sb->st_size = kq->kq_count;
- if (kq->kq_state & KQ_KEV_QOS)
- sb->st_blksize = sizeof(struct kevent_qos_s);
- else if (kq->kq_state & KQ_KEV64)
- sb->st_blksize = sizeof(struct kevent64_s);
- else if (IS_64BIT_PROCESS(p))
- sb->st_blksize = sizeof(struct user64_kevent);
- else
- sb->st_blksize = sizeof(struct user32_kevent);
- sb->st_mode = S_IFIFO;
- }
- kqunlock(kq);
- return (0);
+ assert(kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
+ kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
+ trp.trp_value = kqwl->kqwl_params;
+ return trp;
}
/*
- * Interact with the pthread kext to request a servicing there.
- * Eventually, this will request threads at specific QoS levels.
- * For now, it only requests a dispatch-manager-QoS thread, and
- * only one-at-a-time.
+ * kqueue_threadreq_unbind - unbind thread from processing kqueue
*
- * - Caller holds the workq request lock
+ * End processing the per-QoS bucket of events and allow other threads
+ * to be requested for future servicing.
*
- * - May be called with the kqueue's wait queue set locked,
- * so cannot do anything that could recurse on that.
+ * caller holds a reference on the kqueue.
*/
-static void
-kqworkq_request_thread(
- struct kqworkq *kqwq,
- kq_index_t qos_index)
+void
+kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t kqr)
{
- struct kqrequest *kqr;
-
- assert(kqwq->kqwq_state & KQ_WORKQ);
- assert(qos_index < KQWQ_NQOS);
-
- kqr = kqworkq_get_request(kqwq, qos_index);
-
- assert(kqr->kqr_state & KQR_WAKEUP);
-
- /*
- * If we have already requested a thread, and it hasn't
- * started processing yet, there's no use hammering away
- * on the pthread kext.
- */
- if (kqr->kqr_state & KQR_THREQUESTED)
- return;
-
- assert((kqr->kqr_state & KQR_BOUND) == 0);
-
- /* request additional workq threads if appropriate */
- if (pthread_functions != NULL &&
- pthread_functions->workq_reqthreads != NULL) {
- unsigned int flags = KEVENT_FLAG_WORKQ;
- unsigned long priority;
- thread_t wqthread;
-
- /* Compute the appropriate pthread priority */
- priority = qos_from_qos_index(qos_index);
-
-#if 0
- /* JMM - for now remain compatible with old invocations */
- /* set the over-commit flag on the request if needed */
- if (kqr->kqr_state & KQR_THOVERCOMMIT)
- priority |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-#endif /* 0 */
-
- /* Compute a priority based on qos_index. */
- struct workq_reqthreads_req_s request = {
- .priority = priority,
- .count = 1
- };
-
- /* mark that we are making a request */
- kqr->kqr_state |= KQR_THREQUESTED;
- if (qos_index == KQWQ_QOS_MANAGER)
- kqr->kqr_state |= KQWQ_THMANAGER;
-
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST),
- 0, qos_index,
- (((uintptr_t)kqr->kqr_override_index << 8) |
- (uintptr_t)kqr->kqr_state));
- wqthread = (*pthread_functions->workq_reqthreads)(kqwq->kqwq_p, 1, &request);
-
- /* We've been switched to the emergency/manager thread */
- if (wqthread == (thread_t)-1) {
- assert(qos_index != KQWQ_QOS_MANAGER);
- kqr->kqr_state |= KQWQ_THMANAGER;
- return;
- }
-
- /*
- * bind the returned thread identity
- * This goes away when we switch to synchronous callback
- * binding from the pthread kext.
- */
- if (wqthread != NULL) {
- kqworkq_bind_thread_impl(kqwq, qos_index, wqthread, flags);
- }
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ kqworkloop_unbind(kqr_kqworkloop(kqr));
+ } else {
+ kqworkq_unbind(p, kqr);
}
}
* so cannot do anything that could recurse on that.
*/
static void
-kqworkq_request_help(
- struct kqworkq *kqwq,
- kq_index_t qos_index)
+kqworkq_wakeup(struct kqworkq *kqwq, kq_index_t qos_index)
{
- struct kqrequest *kqr;
+ workq_threadreq_t kqr = kqworkq_get_request(kqwq, qos_index);
/* convert to thread qos value */
- assert(qos_index < KQWQ_NQOS);
-
- kqwq_req_lock(kqwq);
- kqr = kqworkq_get_request(kqwq, qos_index);
-
- if ((kqr->kqr_state & KQR_WAKEUP) == 0) {
- /* Indicate that we needed help from this request */
- kqr->kqr_state |= KQR_WAKEUP;
-
- /* Go assure a thread request has been made */
- kqworkq_request_thread(kqwq, qos_index);
- }
- kqwq_req_unlock(kqwq);
-}
-
-static void
-kqworkloop_threadreq_impl(struct kqworkloop *kqwl, kq_index_t qos_index)
-{
- struct kqrequest *kqr = &kqwl->kqwl_request;
- unsigned long pri = pthread_priority_for_kqrequest(kqr, qos_index);
- int op, ret;
-
- assert((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED);
-
- /*
- * New-style thread request supported. Provide
- * the pthread kext a pointer to a workq_threadreq_s
- * structure for its use until a corresponding
- * workloop_fulfill_threqreq callback.
- */
- if (current_proc() == kqwl->kqwl_kqueue.kq_p) {
- op = WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL;
- } else {
- op = WORKQ_THREADREQ_WORKLOOP;
- }
-again:
- ret = (*pthread_functions->workq_threadreq)(kqwl->kqwl_p, &kqr->kqr_req,
- WORKQ_THREADREQ_WORKLOOP, pri, 0);
- switch (ret) {
- case ENOTSUP:
- assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL);
- op = WORKQ_THREADREQ_WORKLOOP;
- goto again;
-
- case ECANCELED:
- case EINVAL:
- /*
- * Process is shutting down or exec'ing.
- * All the kqueues are going to be cleaned up
- * soon. Forget we even asked for a thread -
- * and make sure we don't ask for more.
- */
- kqueue_release((struct kqueue *)kqwl, KQUEUE_CANT_BE_LAST_REF);
- kqr->kqr_state &= ~KQR_THREQUESTED;
- kqr->kqr_state |= KQR_DRAIN;
- break;
-
- case EAGAIN:
- assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL);
- act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
- break;
-
- default:
- assert(ret == 0);
- }
-}
-
-static void
-kqworkloop_threadreq_modify(struct kqworkloop *kqwl, kq_index_t qos_index)
-{
- struct kqrequest *kqr = &kqwl->kqwl_request;
- unsigned long pri = pthread_priority_for_kqrequest(kqr, qos_index);
- int ret, op = WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL;
-
- assert((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED);
-
- if (current_proc() == kqwl->kqwl_kqueue.kq_p) {
- op = WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL;
- } else {
- op = WORKQ_THREADREQ_CHANGE_PRI;
- }
-again:
- ret = (*pthread_functions->workq_threadreq_modify)(kqwl->kqwl_p,
- &kqr->kqr_req, op, pri, 0);
- switch (ret) {
- case ENOTSUP:
- assert(op == WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL);
- op = WORKQ_THREADREQ_CHANGE_PRI;
- goto again;
-
- case EAGAIN:
- assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL);
- act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
- break;
+ assert(qos_index < KQWQ_NBUCKETS);
- case ECANCELED:
- case EINVAL:
- case 0:
- break;
-
- default:
- assert(ret == 0);
+ if (!kqr->tr_kq_wakeup) {
+ kqr->tr_kq_wakeup = true;
+ if (!kqr_thread_requested(kqr)) {
+ kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
+ }
}
}
/*
- * Interact with the pthread kext to request a servicing thread.
- * This will request a single thread at the highest QoS level
- * for which there is work (whether that was the requested QoS
- * for an event or an override applied to a lower-QoS request).
- *
- * - Caller holds the workloop request lock
- *
- * - May be called with the kqueue's wait queue set locked,
- * so cannot do anything that could recurse on that.
+ * This represent the asynchronous QoS a given workloop contributes,
+ * hence is the max of the current active knotes (override index)
+ * and the workloop max qos (userspace async qos).
*/
-static void
-kqworkloop_request_thread(struct kqworkloop *kqwl, kq_index_t qos_index)
-{
- struct kqrequest *kqr;
-
- assert(kqwl->kqwl_state & KQ_WORKLOOP);
-
- kqr = &kqwl->kqwl_request;
-
- assert(kqwl->kqwl_owner == THREAD_NULL);
- assert((kqr->kqr_state & KQR_BOUND) == 0);
- assert((kqr->kqr_state & KQR_THREQUESTED) == 0);
- assert(!(kqwl->kqwl_kqueue.kq_state & KQ_NO_WQ_THREAD));
-
- /* If we're draining thread requests, just bail */
- if (kqr->kqr_state & KQR_DRAIN)
- return;
-
- if (pthread_functions != NULL &&
- pthread_functions->workq_threadreq != NULL) {
- /*
- * set request state flags, etc... before calling pthread
- * This assures they are set before a possible synchronous
- * callback to workloop_fulfill_threadreq().
- */
- kqr->kqr_state |= KQR_THREQUESTED;
-
- /* Add a thread request reference on the kqueue. */
- kqueue_retain((struct kqueue *)kqwl);
-
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
- kqwl->kqwl_dynamicid,
- 0, qos_index, kqr->kqr_state);
- kqworkloop_threadreq_impl(kqwl, qos_index);
- } else {
- panic("kqworkloop_request_thread");
- return;
- }
-}
-
-static void
-kqworkloop_update_sync_override_state(struct kqworkloop *kqwl, boolean_t sync_ipc_override)
-{
- struct kqrequest *kqr = &kqwl->kqwl_request;
- kqwl_req_lock(kqwl);
- kqr->kqr_has_sync_override = sync_ipc_override;
- kqwl_req_unlock(kqwl);
-
-}
-
-static inline kq_index_t
-kqworkloop_combined_qos(struct kqworkloop *kqwl, boolean_t *ipc_override_is_sync)
+static kq_index_t
+kqworkloop_override(struct kqworkloop *kqwl)
{
- struct kqrequest *kqr = &kqwl->kqwl_request;
- kq_index_t override;
-
- *ipc_override_is_sync = FALSE;
- override = MAX(MAX(kqr->kqr_qos_index, kqr->kqr_override_index),
- kqr->kqr_dsync_waiters_qos);
-
- if (kqr->kqr_sync_suppress_count > 0 || kqr->kqr_has_sync_override) {
- *ipc_override_is_sync = TRUE;
- override = THREAD_QOS_USER_INTERACTIVE;
- }
- return override;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
+ return MAX(kqr->tr_kq_qos_index, kqr->tr_kq_override_index);
}
static inline void
kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
{
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
- kqwl_req_held(kqwl);
+ kqlock_held(kqwl);
- if (kqr->kqr_state & KQR_R2K_NOTIF_ARMED) {
- assert(kqr->kqr_state & KQR_BOUND);
- assert(kqr->kqr_thread);
-
- kqr->kqr_state &= ~KQR_R2K_NOTIF_ARMED;
- act_set_astkevent(kqr->kqr_thread, AST_KEVENT_RETURN_TO_KERNEL);
+ if (kqwl->kqwl_state & KQ_R2K_ARMED) {
+ kqwl->kqwl_state &= ~KQ_R2K_ARMED;
+ act_set_astkevent(kqr_thread_fast(kqr), AST_KEVENT_RETURN_TO_KERNEL);
}
}
static void
kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
{
- const uint8_t KQWL_STAYACTIVE_FIRED_BIT = (1 << 0);
-
- struct kqrequest *kqr = &kqwl->kqwl_request;
- boolean_t old_ipc_override_is_sync = FALSE;
- kq_index_t old_qos = kqworkloop_combined_qos(kqwl, &old_ipc_override_is_sync);
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
struct kqueue *kq = &kqwl->kqwl_kqueue;
- bool static_thread = (kq->kq_state & KQ_NO_WQ_THREAD);
+ kq_index_t old_override = kqworkloop_override(kqwl);
kq_index_t i;
- /* must hold the kqr lock */
- kqwl_req_held(kqwl);
+ kqlock_held(kqwl);
switch (op) {
case KQWL_UTQ_UPDATE_WAKEUP_QOS:
if (qos == KQWL_BUCKET_STAYACTIVE) {
/*
* the KQWL_BUCKET_STAYACTIVE is not a QoS bucket, we only remember
- * a high watermark (kqr_stayactive_qos) of any stay active knote
+ * a high watermark (kqwl_stayactive_qos) of any stay active knote
* that was ever registered with this workloop.
*
* When waitq_set__CALLING_PREPOST_HOOK__() wakes up any stay active
* there is at least one stay active knote fired until the next full
* processing of this bucket.
*/
- kqr->kqr_wakeup_indexes |= KQWL_STAYACTIVE_FIRED_BIT;
- qos = kqr->kqr_stayactive_qos;
+ kqwl->kqwl_wakeup_indexes |= KQWL_STAYACTIVE_FIRED_BIT;
+ qos = kqwl->kqwl_stayactive_qos;
assert(qos);
- assert(!static_thread);
}
- if (kqr->kqr_wakeup_indexes & (1 << qos)) {
- assert(kqr->kqr_state & KQR_WAKEUP);
+ if (kqwl->kqwl_wakeup_indexes & (1 << qos)) {
+ assert(kqr->tr_kq_wakeup);
break;
}
- kqr->kqr_wakeup_indexes |= (1 << qos);
- kqr->kqr_state |= KQR_WAKEUP;
+ kqwl->kqwl_wakeup_indexes |= (1 << qos);
+ kqr->tr_kq_wakeup = true;
kqworkloop_request_fire_r2k_notification(kqwl);
- goto recompute_async;
+ goto recompute;
case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
assert(qos);
- if (kqr->kqr_stayactive_qos < qos) {
- kqr->kqr_stayactive_qos = qos;
- if (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT) {
- assert(kqr->kqr_state & KQR_WAKEUP);
- kqr->kqr_wakeup_indexes |= (1 << qos);
- goto recompute_async;
+ if (kqwl->kqwl_stayactive_qos < qos) {
+ kqwl->kqwl_stayactive_qos = qos;
+ if (kqwl->kqwl_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT) {
+ assert(kqr->tr_kq_wakeup);
+ kqwl->kqwl_wakeup_indexes |= (1 << qos);
+ goto recompute;
}
}
break;
+ case KQWL_UTQ_PARKING:
+ case KQWL_UTQ_UNBINDING:
+ kqr->tr_kq_override_index = qos;
+ /* FALLTHROUGH */
case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
- kqlock_held(kq); // to look at kq_queues
- kqr->kqr_has_sync_override = FALSE;
+ if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
+ assert(qos == THREAD_QOS_UNSPECIFIED);
+ }
i = KQWL_BUCKET_STAYACTIVE;
- if (TAILQ_EMPTY(&kqr->kqr_suppressed)) {
- kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
+ if (TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
+ kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
}
- if (!TAILQ_EMPTY(&kq->kq_queue[i]) &&
- (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) {
+ if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i]) &&
+ (kqwl->kqwl_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) {
/*
* If the KQWL_STAYACTIVE_FIRED_BIT is set, it means a stay active
- * knote may have fired, so we need to merge in kqr_stayactive_qos.
+ * knote may have fired, so we need to merge in kqwl_stayactive_qos.
*
* Unlike other buckets, this one is never empty but could be idle.
*/
- kqr->kqr_wakeup_indexes &= KQWL_STAYACTIVE_FIRED_BIT;
- kqr->kqr_wakeup_indexes |= (1 << kqr->kqr_stayactive_qos);
+ kqwl->kqwl_wakeup_indexes &= KQWL_STAYACTIVE_FIRED_BIT;
+ kqwl->kqwl_wakeup_indexes |= (1 << kqwl->kqwl_stayactive_qos);
} else {
- kqr->kqr_wakeup_indexes = 0;
+ kqwl->kqwl_wakeup_indexes = 0;
}
for (i = THREAD_QOS_UNSPECIFIED + 1; i < KQWL_BUCKET_STAYACTIVE; i++) {
- if (!TAILQ_EMPTY(&kq->kq_queue[i])) {
- kqr->kqr_wakeup_indexes |= (1 << i);
- struct knote *kn = TAILQ_FIRST(&kqwl->kqwl_kqueue.kq_queue[i]);
- if (i == THREAD_QOS_USER_INTERACTIVE &&
- kn->kn_qos_override_is_sync) {
- kqr->kqr_has_sync_override = TRUE;
- }
+ if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i])) {
+ kqwl->kqwl_wakeup_indexes |= (1 << i);
}
}
- if (kqr->kqr_wakeup_indexes) {
- kqr->kqr_state |= KQR_WAKEUP;
+ if (kqwl->kqwl_wakeup_indexes) {
+ kqr->tr_kq_wakeup = true;
kqworkloop_request_fire_r2k_notification(kqwl);
} else {
- kqr->kqr_state &= ~KQR_WAKEUP;
+ kqr->tr_kq_wakeup = false;
}
- assert(qos == THREAD_QOS_UNSPECIFIED);
- goto recompute_async;
+ goto recompute;
case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
- kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
- assert(qos == THREAD_QOS_UNSPECIFIED);
- goto recompute_async;
+ kqr->tr_kq_override_index = qos;
+ goto recompute;
case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
- recompute_async:
+recompute:
/*
- * When modifying the wakeup QoS or the async override QoS, we always
- * need to maintain our invariant that kqr_override_index is at least as
- * large as the highest QoS for which an event is fired.
+ * When modifying the wakeup QoS or the override QoS, we always need to
+ * maintain our invariant that kqr_override_index is at least as large
+ * as the highest QoS for which an event is fired.
*
* However this override index can be larger when there is an overriden
* suppressed knote pushing on the kqueue.
*/
- if (kqr->kqr_wakeup_indexes > (1 << qos)) {
- qos = fls(kqr->kqr_wakeup_indexes) - 1; /* fls is 1-based */
+ if (kqwl->kqwl_wakeup_indexes > (1 << qos)) {
+ qos = fls(kqwl->kqwl_wakeup_indexes) - 1; /* fls is 1-based */
}
- if (kqr->kqr_override_index < qos) {
- kqr->kqr_override_index = qos;
+ if (kqr->tr_kq_override_index < qos) {
+ kqr->tr_kq_override_index = qos;
}
break;
case KQWL_UTQ_REDRIVE_EVENTS:
break;
- case KQWL_UTQ_SET_ASYNC_QOS:
- filt_wlheld(kqwl);
- kqr->kqr_qos_index = qos;
- break;
-
- case KQWL_UTQ_SET_SYNC_WAITERS_QOS:
- filt_wlheld(kqwl);
- kqr->kqr_dsync_waiters_qos = qos;
+ case KQWL_UTQ_SET_QOS_INDEX:
+ kqr->tr_kq_qos_index = qos;
break;
default:
panic("unknown kqwl thread qos update operation: %d", op);
}
- boolean_t new_ipc_override_is_sync = FALSE;
- kq_index_t new_qos = kqworkloop_combined_qos(kqwl, &new_ipc_override_is_sync);
thread_t kqwl_owner = kqwl->kqwl_owner;
- thread_t servicer = kqr->kqr_thread;
- __assert_only int ret;
+ thread_t servicer = kqr_thread(kqr);
+ boolean_t qos_changed = FALSE;
+ kq_index_t new_override = kqworkloop_override(kqwl);
/*
* Apply the diffs to the owner if applicable
*/
- if (filt_wlowner_is_valid(kqwl_owner)) {
+ if (kqwl_owner) {
#if 0
/* JMM - need new trace hooks for owner overrides */
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
- kqwl->kqwl_dynamicid,
- (kqr->kqr_state & KQR_BOUND) ? thread_tid(kqwl_owner) : 0,
- (kqr->kqr_qos_index << 8) | new_qos,
- (kqr->kqr_override_index << 8) | kqr->kqr_state);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
+ kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->tr_kq_qos_index,
+ (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
#endif
- if (new_qos == kqr->kqr_dsync_owner_qos) {
+ if (new_override == old_override) {
// nothing to do
- } else if (kqr->kqr_dsync_owner_qos == THREAD_QOS_UNSPECIFIED) {
- thread_add_ipc_override(kqwl_owner, new_qos);
- } else if (new_qos == THREAD_QOS_UNSPECIFIED) {
- thread_drop_ipc_override(kqwl_owner);
- } else /* kqr->kqr_dsync_owner_qos != new_qos */ {
- thread_update_ipc_override(kqwl_owner, new_qos);
+ } else if (old_override == THREAD_QOS_UNSPECIFIED) {
+ thread_add_kevent_override(kqwl_owner, new_override);
+ } else if (new_override == THREAD_QOS_UNSPECIFIED) {
+ thread_drop_kevent_override(kqwl_owner);
+ } else { /* old_override != new_override */
+ thread_update_kevent_override(kqwl_owner, new_override);
}
- kqr->kqr_dsync_owner_qos = new_qos;
-
- if (new_ipc_override_is_sync &&
- !kqr->kqr_owner_override_is_sync) {
- thread_add_sync_ipc_override(kqwl_owner);
- } else if (!new_ipc_override_is_sync &&
- kqr->kqr_owner_override_is_sync) {
- thread_drop_sync_ipc_override(kqwl_owner);
- }
- kqr->kqr_owner_override_is_sync = new_ipc_override_is_sync;
}
/*
* apply the diffs to the servicer
*/
- if (static_thread) {
- /*
- * Statically bound thread
- *
- * These threads don't participates in QoS overrides today, just wakeup
- * the thread blocked on this kqueue if a new event arrived.
- */
-
- switch (op) {
- case KQWL_UTQ_UPDATE_WAKEUP_QOS:
- case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
- case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
- break;
-
- case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
- case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
- case KQWL_UTQ_REDRIVE_EVENTS:
- case KQWL_UTQ_SET_ASYNC_QOS:
- case KQWL_UTQ_SET_SYNC_WAITERS_QOS:
- panic("should never be called");
- break;
- }
-
- kqlock_held(kq);
-
- if ((kqr->kqr_state & KQR_BOUND) && (kqr->kqr_state & KQR_WAKEUP)) {
- assert(servicer && !is_workqueue_thread(servicer));
- if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
- kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, KQ_EVENT,
- THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
- }
- }
- } else if ((kqr->kqr_state & KQR_THREQUESTED) == 0) {
+ if (!kqr_thread_requested(kqr)) {
/*
* No servicer, nor thread-request
*
* first place.
*/
- if (kqwl_owner == THREAD_NULL && (kqr->kqr_state & KQR_WAKEUP)) {
- kqworkloop_request_thread(kqwl, new_qos);
+ if (kqwl_owner == NULL && kqr->tr_kq_wakeup) {
+ int initiate_flags = 0;
+ if (op == KQWL_UTQ_UNBINDING) {
+ initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
+ }
+ kqueue_threadreq_initiate(kq, kqr, new_override, initiate_flags);
}
- } else if ((kqr->kqr_state & KQR_BOUND) == 0 &&
- (kqwl_owner || (kqr->kqr_state & KQR_WAKEUP) == 0)) {
+ } else if (servicer) {
/*
- * No servicer, thread request in flight we want to cancel
+ * Servicer in flight
*
- * We just got rid of the last knote of the kqueue or noticed an owner
- * with a thread request still in flight, take it back.
+ * Just apply the diff to the servicer
*/
- ret = (*pthread_functions->workq_threadreq_modify)(kqwl->kqwl_p,
- &kqr->kqr_req, WORKQ_THREADREQ_CANCEL, 0, 0);
- if (ret == 0) {
- kqr->kqr_state &= ~KQR_THREQUESTED;
- kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
+ struct uthread *ut = get_bsdthread_info(servicer);
+ if (ut->uu_kqueue_override != new_override) {
+ if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
+ thread_add_servicer_override(servicer, new_override);
+ } else if (new_override == THREAD_QOS_UNSPECIFIED) {
+ thread_drop_servicer_override(servicer);
+ } else { /* ut->uu_kqueue_override != new_override */
+ thread_update_servicer_override(servicer, new_override);
+ }
+ ut->uu_kqueue_override = new_override;
+ qos_changed = TRUE;
}
- } else {
- boolean_t qos_changed = FALSE;
-
+ } else if (new_override == THREAD_QOS_UNSPECIFIED) {
/*
- * Servicer or request is in flight
+ * No events to deliver anymore.
*
- * Just apply the diff to the servicer or the thread request
+ * However canceling with turnstiles is challenging, so the fact that
+ * the request isn't useful will be discovered by the servicer himself
+ * later on.
*/
- if (kqr->kqr_state & KQR_BOUND) {
- servicer = kqr->kqr_thread;
- struct uthread *ut = get_bsdthread_info(servicer);
- if (ut->uu_kqueue_qos_index != new_qos) {
- if (ut->uu_kqueue_qos_index == THREAD_QOS_UNSPECIFIED) {
- thread_add_ipc_override(servicer, new_qos);
- } else if (new_qos == THREAD_QOS_UNSPECIFIED) {
- thread_drop_ipc_override(servicer);
- } else /* ut->uu_kqueue_qos_index != new_qos */ {
- thread_update_ipc_override(servicer, new_qos);
- }
- ut->uu_kqueue_qos_index = new_qos;
- qos_changed = TRUE;
- }
+ } else if (old_override != new_override) {
+ /*
+ * Request is in flight
+ *
+ * Apply the diff to the thread request
+ */
+ kqueue_threadreq_modify(kq, kqr, new_override, WORKQ_THREADREQ_NONE);
+ qos_changed = TRUE;
+ }
- if (new_ipc_override_is_sync != ut->uu_kqueue_override_is_sync) {
- if (new_ipc_override_is_sync &&
- !ut->uu_kqueue_override_is_sync) {
- thread_add_sync_ipc_override(servicer);
- } else if (!new_ipc_override_is_sync &&
- ut->uu_kqueue_override_is_sync) {
- thread_drop_sync_ipc_override(servicer);
- }
- ut->uu_kqueue_override_is_sync = new_ipc_override_is_sync;
- qos_changed = TRUE;
- }
- } else if (old_qos != new_qos) {
- assert(new_qos);
- kqworkloop_threadreq_modify(kqwl, new_qos);
- qos_changed = TRUE;
- }
- if (qos_changed) {
- servicer = kqr->kqr_thread;
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
- kqwl->kqwl_dynamicid,
- (kqr->kqr_state & KQR_BOUND) ? thread_tid(servicer) : 0,
- (kqr->kqr_qos_index << 16) | (new_qos << 8) | new_ipc_override_is_sync,
- (kqr->kqr_override_index << 8) | kqr->kqr_state);
- }
+ if (qos_changed) {
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
+ thread_tid(servicer), kqr->tr_kq_qos_index,
+ (kqr->tr_kq_override_index << 16) | kqr->tr_kq_wakeup);
}
}
static void
-kqworkloop_request_help(struct kqworkloop *kqwl, kq_index_t qos_index)
+kqworkloop_wakeup(struct kqworkloop *kqwl, kq_index_t qos)
{
- /* convert to thread qos value */
- assert(qos_index < KQWL_NBUCKETS);
+ if ((kqwl->kqwl_state & KQ_PROCESSING) &&
+ kqr_thread(&kqwl->kqwl_request) == current_thread()) {
+ /*
+ * kqworkloop_end_processing() will perform the required QoS
+ * computations when it unsets the processing mode.
+ */
+ return;
+ }
- kqwl_req_lock(kqwl);
- kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos_index);
- kqwl_req_unlock(kqwl);
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos);
}
-/*
- * These arrays described the low and high qindexes for a given qos_index.
- * The values come from the chart in <sys/eventvar.h> (must stay in sync).
- */
-static kq_index_t _kqwq_base_index[KQWQ_NQOS] = {0, 0, 6, 11, 15, 18, 20, 21};
-static kq_index_t _kqwq_high_index[KQWQ_NQOS] = {0, 5, 10, 14, 17, 19, 20, 21};
-
static struct kqtailq *
-kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index)
+kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
{
- if (kq->kq_state & KQ_WORKQ) {
- assert(qos_index < KQWQ_NQOS);
- return &kq->kq_queue[_kqwq_base_index[qos_index]];
- } else if (kq->kq_state & KQ_WORKLOOP) {
- assert(qos_index < KQWL_NBUCKETS);
- return &kq->kq_queue[qos_index];
+ if (kq.kq->kq_state & KQ_WORKLOOP) {
+ return &kq.kqwl->kqwl_suppressed;
+ } else if (kq.kq->kq_state & KQ_WORKQ) {
+ return &kq.kqwq->kqwq_suppressed[kn->kn_qos_index];
} else {
- assert(qos_index == QOS_INDEX_KQFILE);
- return &kq->kq_queue[QOS_INDEX_KQFILE];
+ return &kq.kqf->kqf_suppressed;
}
}
-static struct kqtailq *
-kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index)
+struct turnstile *
+kqueue_alloc_turnstile(kqueue_t kqu)
{
- if (kq->kq_state & KQ_WORKQ) {
- assert(qos_index < KQWQ_NQOS);
- return &kq->kq_queue[_kqwq_high_index[qos_index]];
- } else if (kq->kq_state & KQ_WORKLOOP) {
- assert(qos_index < KQWL_NBUCKETS);
- return &kq->kq_queue[KQWL_BUCKET_STAYACTIVE];
+ struct kqworkloop *kqwl = kqu.kqwl;
+ kq_state_t kq_state;
+
+ kq_state = os_atomic_load(&kqu.kq->kq_state, dependency);
+ if (kq_state & KQ_HAS_TURNSTILE) {
+ /* force a dependency to pair with the atomic or with release below */
+ return os_atomic_load_with_dependency_on(&kqwl->kqwl_turnstile,
+ (uintptr_t)kq_state);
+ }
+
+ if (!(kq_state & KQ_WORKLOOP)) {
+ return TURNSTILE_NULL;
+ }
+
+ struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
+ bool workq_locked = false;
+
+ kqlock(kqu);
+
+ if (filt_wlturnstile_interlock_is_workq(kqwl)) {
+ workq_locked = true;
+ workq_kern_threadreq_lock(kqwl->kqwl_p);
+ }
+
+ if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
+ free_ts = ts;
+ ts = kqwl->kqwl_turnstile;
} else {
- assert(qos_index == QOS_INDEX_KQFILE);
- return &kq->kq_queue[QOS_INDEX_KQFILE];
+ ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
+ ts, TURNSTILE_WORKLOOPS);
+
+ /* release-barrier to pair with the unlocked load of kqwl_turnstile above */
+ os_atomic_or(&kqwl->kqwl_state, KQ_HAS_TURNSTILE, release);
+
+ if (filt_wlturnstile_interlock_is_workq(kqwl)) {
+ workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
+ &kqwl->kqwl_request, kqwl->kqwl_owner,
+ ts, TURNSTILE_IMMEDIATE_UPDATE);
+ /*
+ * The workq may no longer be the interlock after this.
+ * In which case the inheritor wasn't updated.
+ */
+ }
+ if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
+ filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
+ }
+ }
+
+ if (workq_locked) {
+ workq_kern_threadreq_unlock(kqwl->kqwl_p);
}
+
+ kqunlock(kqu);
+
+ if (free_ts) {
+ turnstile_deallocate(free_ts);
+ } else {
+ turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
+ }
+ return ts;
}
-static int
-kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index)
+__attribute__((always_inline))
+struct turnstile *
+kqueue_turnstile(kqueue_t kqu)
{
- struct kqtailq *base_queue = kqueue_get_base_queue(kq, qos_index);
- struct kqtailq *queue = kqueue_get_high_queue(kq, qos_index);
+ kq_state_t kq_state = os_atomic_load(&kqu.kq->kq_state, relaxed);
+ if (kq_state & KQ_WORKLOOP) {
+ return os_atomic_load(&kqu.kqwl->kqwl_turnstile, relaxed);
+ }
+ return TURNSTILE_NULL;
+}
- do {
- if (!TAILQ_EMPTY(queue))
- return 0;
- } while (queue-- > base_queue);
- return 1;
+__attribute__((always_inline))
+struct turnstile *
+kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)
+{
+ struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
+ if (kqwl) {
+ return os_atomic_load(&kqwl->kqwl_turnstile, relaxed);
+ }
+ return TURNSTILE_NULL;
+}
+
+static void
+kqworkloop_set_overcommit(struct kqworkloop *kqwl)
+{
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
+
+ /*
+ * This test is racy, but since we never remove this bit,
+ * it allows us to avoid taking a lock.
+ */
+ if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ return;
+ }
+
+ kqlock_held(kqwl);
+
+ if (kqr_thread_requested_pending(kqr)) {
+ kqueue_threadreq_modify(kqwl, kqr, kqr->tr_qos,
+ WORKQ_THREADREQ_MAKE_OVERCOMMIT);
+ } else {
+ kqr->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
+ }
+}
+
+static void
+kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
+ kq_index_t override_index)
+{
+ workq_threadreq_t kqr;
+ kq_index_t old_override_index;
+ kq_index_t queue_index = kn->kn_qos_index;
+
+ if (override_index <= queue_index) {
+ return;
+ }
+
+ kqr = kqworkq_get_request(kqwq, queue_index);
+
+ kqlock_held(kqwq);
+
+ old_override_index = kqr->tr_kq_override_index;
+ if (override_index > MAX(kqr->tr_kq_qos_index, old_override_index)) {
+ thread_t servicer = kqr_thread(kqr);
+ kqr->tr_kq_override_index = override_index;
+
+ /* apply the override to [incoming?] servicing thread */
+ if (servicer) {
+ if (old_override_index) {
+ thread_update_kevent_override(servicer, override_index);
+ } else {
+ thread_add_kevent_override(servicer, override_index);
+ }
+ }
+ }
+}
+
+static void
+kqueue_update_override(kqueue_t kqu, struct knote *kn, thread_qos_t qos)
+{
+ if (kqu.kq->kq_state & KQ_WORKLOOP) {
+ kqworkloop_update_threads_qos(kqu.kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
+ qos);
+ } else {
+ kqworkq_update_override(kqu.kqwq, kn, qos);
+ }
}
-static struct kqtailq *
-kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index)
+static void
+kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread,
+ enum kqwl_unbind_locked_mode how)
{
- struct kqtailq *res;
- struct kqrequest *kqr;
+ struct uthread *ut = get_bsdthread_info(thread);
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
- if (kq->kq_state & KQ_WORKQ) {
- struct kqworkq *kqwq = (struct kqworkq *)kq;
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
+ thread_tid(thread), 0, 0);
- kqr = kqworkq_get_request(kqwq, qos_index);
- res = &kqr->kqr_suppressed;
- } else if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ kqlock_held(kqwl);
- kqr = &kqwl->kqwl_request;
- res = &kqr->kqr_suppressed;
- } else {
- struct kqfile *kqf = (struct kqfile *)kq;
- res = &kqf->kqf_suppressed;
+ assert(ut->uu_kqr_bound == kqr);
+ ut->uu_kqr_bound = NULL;
+ if (how == KQWL_OVERRIDE_DROP_IMMEDIATELY &&
+ ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
+ thread_drop_servicer_override(thread);
+ ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
}
- return res;
+
+ if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
+ turnstile_update_inheritor(kqwl->kqwl_turnstile,
+ TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
+ turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
+ TURNSTILE_INTERLOCK_HELD);
+ }
+
+ kqr->tr_thread = THREAD_NULL;
+ kqr->tr_state = WORKQ_TR_STATE_IDLE;
+ kqwl->kqwl_state &= ~KQ_R2K_ARMED;
}
-static kq_index_t
-knote_get_queue_index(struct knote *kn)
+static void
+kqworkloop_unbind_delayed_override_drop(thread_t thread)
{
- kq_index_t override_index = knote_get_qos_override_index(kn);
- kq_index_t qos_index = knote_get_qos_index(kn);
- struct kqueue *kq = knote_get_kq(kn);
- kq_index_t res;
-
- if (kq->kq_state & KQ_WORKQ) {
- res = _kqwq_base_index[qos_index];
- if (override_index > qos_index)
- res += override_index - qos_index;
- assert(res <= _kqwq_high_index[qos_index]);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- res = MAX(override_index, qos_index);
- assert(res < KQWL_NBUCKETS);
- } else {
- assert(qos_index == QOS_INDEX_KQFILE);
- assert(override_index == QOS_INDEX_KQFILE);
- res = QOS_INDEX_KQFILE;
+ struct uthread *ut = get_bsdthread_info(thread);
+ assert(ut->uu_kqr_bound == NULL);
+ if (ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
+ thread_drop_servicer_override(thread);
+ ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
}
- return res;
}
-static struct kqtailq *
-knote_get_queue(struct knote *kn)
+/*
+ * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
+ *
+ * It will acknowledge events, and possibly request a new thread if:
+ * - there were active events left
+ * - we pended waitq hook callouts during processing
+ * - we pended wakeups while processing (or unsuppressing)
+ *
+ * Called with kqueue lock held.
+ */
+static void
+kqworkloop_unbind(struct kqworkloop *kqwl)
{
- kq_index_t qindex = knote_get_queue_index(kn);
+ struct kqueue *kq = &kqwl->kqwl_kqueue;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
+ thread_t thread = kqr_thread_fast(kqr);
+ int op = KQWL_UTQ_PARKING;
+ kq_index_t qos_override = THREAD_QOS_UNSPECIFIED;
- return &(knote_get_kq(kn))->kq_queue[qindex];
-}
+ assert(thread == current_thread());
-static kq_index_t
-knote_get_req_index(struct knote *kn)
-{
- return kn->kn_req_index;
-}
+ kqlock(kqwl);
-static kq_index_t
-knote_get_qos_index(struct knote *kn)
-{
- return kn->kn_qos_index;
+ /*
+ * Forcing the KQ_PROCESSING flag allows for QoS updates because of
+ * unsuppressing knotes not to be applied until the eventual call to
+ * kqworkloop_update_threads_qos() below.
+ */
+ assert((kq->kq_state & KQ_PROCESSING) == 0);
+ if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
+ kq->kq_state |= KQ_PROCESSING;
+ qos_override = kqworkloop_acknowledge_events(kqwl);
+ kq->kq_state &= ~KQ_PROCESSING;
+ }
+
+ kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
+ kqworkloop_update_threads_qos(kqwl, op, qos_override);
+
+ kqunlock(kqwl);
+
+ /*
+ * Drop the override on the current thread last, after the call to
+ * kqworkloop_update_threads_qos above.
+ */
+ kqworkloop_unbind_delayed_override_drop(thread);
+
+ /* If last reference, dealloc the workloop kq */
+ kqworkloop_release(kqwl);
}
-static void
-knote_set_qos_index(struct knote *kn, kq_index_t qos_index)
+static thread_qos_t
+kqworkq_unbind_locked(struct kqworkq *kqwq,
+ workq_threadreq_t kqr, thread_t thread)
{
- struct kqueue *kq = knote_get_kq(kn);
+ struct uthread *ut = get_bsdthread_info(thread);
+ kq_index_t old_override = kqr->tr_kq_override_index;
- assert(qos_index < KQWQ_NQOS);
- assert((kn->kn_status & KN_QUEUED) == 0);
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
+ thread_tid(kqr_thread(kqr)), kqr->tr_kq_qos_index, 0);
- if (kq->kq_state & KQ_WORKQ) {
- assert(qos_index > THREAD_QOS_UNSPECIFIED);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- /* XXX this policy decision shouldn't be here */
- if (qos_index == THREAD_QOS_UNSPECIFIED)
- qos_index = THREAD_QOS_LEGACY;
- } else
- qos_index = QOS_INDEX_KQFILE;
+ kqlock_held(kqwq);
- /* always set requested */
- kn->kn_req_index = qos_index;
+ assert(ut->uu_kqr_bound == kqr);
+ ut->uu_kqr_bound = NULL;
+ kqr->tr_thread = THREAD_NULL;
+ kqr->tr_state = WORKQ_TR_STATE_IDLE;
+ kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
+ kqwq->kqwq_state &= ~KQ_R2K_ARMED;
- /* only adjust in-use qos index when not suppressed */
- if ((kn->kn_status & KN_SUPPRESSED) == 0)
- kn->kn_qos_index = qos_index;
+ return old_override;
}
+/*
+ * kqworkq_unbind - unbind of a workq kqueue from a thread
+ *
+ * We may have to request new threads.
+ * This can happen there are no waiting processing threads and:
+ * - there were active events we never got to (count > 0)
+ * - we pended waitq hook callouts during processing
+ * - we pended wakeups while processing (or unsuppressing)
+ */
static void
-knote_set_qos_overcommit(struct knote *kn)
+kqworkq_unbind(proc_t p, workq_threadreq_t kqr)
{
- struct kqueue *kq = knote_get_kq(kn);
- struct kqrequest *kqr;
+ struct kqworkq *kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue;
+ __assert_only int rc;
- /* turn overcommit on for the appropriate thread request? */
- if (kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
- if (kq->kq_state & KQ_WORKQ) {
- kq_index_t qos_index = knote_get_qos_index(kn);
- struct kqworkq *kqwq = (struct kqworkq *)kq;
+ kqlock(kqwq);
+ rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
+ assert(rc == -1);
+ kqunlock(kqwq);
+}
- kqr = kqworkq_get_request(kqwq, qos_index);
+workq_threadreq_t
+kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
+{
+ assert(qos_index < KQWQ_NBUCKETS);
+ return &kqwq->kqwq_request[qos_index];
+}
- kqwq_req_lock(kqwq);
- kqr->kqr_state |= KQR_THOVERCOMMIT;
- kqwq_req_unlock(kqwq);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+static void
+knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp)
+{
+ kq_index_t qos = _pthread_priority_thread_qos(pp);
+
+ if (kqu.kq->kq_state & KQ_WORKLOOP) {
+ assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
+ pp = _pthread_priority_normalize(pp);
+ } else if (kqu.kq->kq_state & KQ_WORKQ) {
+ if (qos == THREAD_QOS_UNSPECIFIED) {
+ /* On workqueues, outside of QoS means MANAGER */
+ qos = KQWQ_QOS_MANAGER;
+ pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ } else {
+ pp = _pthread_priority_normalize(pp);
+ }
+ } else {
+ pp = _pthread_unspecified_priority();
+ qos = THREAD_QOS_UNSPECIFIED;
+ }
- kqr = &kqwl->kqwl_request;
+ kn->kn_qos = pp;
- kqwl_req_lock(kqwl);
- kqr->kqr_state |= KQR_THOVERCOMMIT;
- kqwl_req_unlock(kqwl);
- }
+ if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
+ /* Never lower QoS when in "Merge" mode */
+ kn->kn_qos_override = qos;
}
-}
-static kq_index_t
-knote_get_qos_override_index(struct knote *kn)
-{
- return kn->kn_qos_override;
+ /* only adjust in-use qos index when not suppressed */
+ if (kn->kn_status & KN_SUPPRESSED) {
+ kqueue_update_override(kqu, kn, qos);
+ } else if (kn->kn_qos_index != qos) {
+ knote_dequeue(kqu, kn);
+ kn->kn_qos_index = qos;
+ }
}
static void
-knote_set_qos_override_index(struct knote *kn, kq_index_t override_index,
- boolean_t override_is_sync)
+knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
{
- struct kqueue *kq = knote_get_kq(kn);
- kq_index_t qos_index = knote_get_qos_index(kn);
- kq_index_t old_override_index = knote_get_qos_override_index(kn);
- boolean_t old_override_is_sync = kn->kn_qos_override_is_sync;
- uint32_t flags = 0;
+ thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
+
+ kqlock_held(kq);
- assert((kn->kn_status & KN_QUEUED) == 0);
+ assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
+ assert(qos_index < THREAD_QOS_LAST);
+
+ /*
+ * Early exit for knotes that should not change QoS
+ */
+ if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
+ panic("filter %d cannot change QoS", kn->kn_filtid);
+ } else if (__improbable(!knote_has_qos(kn))) {
+ return;
+ }
- if (override_index == KQWQ_QOS_MANAGER) {
- assert(qos_index == KQWQ_QOS_MANAGER);
+ /*
+ * knotes with the FALLBACK flag will only use their registration QoS if the
+ * incoming event has no QoS, else, the registration QoS acts as a floor.
+ */
+ thread_qos_t req_qos = _pthread_priority_thread_qos_fast(kn->kn_qos);
+ if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
+ if (qos_index == THREAD_QOS_UNSPECIFIED) {
+ qos_index = req_qos;
+ }
} else {
- assert(override_index < KQWQ_QOS_MANAGER);
+ if (qos_index < req_qos) {
+ qos_index = req_qos;
+ }
+ }
+ if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
+ /* Never lower QoS when in "Merge" mode */
+ return;
}
- kn->kn_qos_override = override_index;
- kn->kn_qos_override_is_sync = override_is_sync;
+ if ((kn->kn_status & KN_LOCKED) && (kn->kn_status & KN_POSTING)) {
+ /*
+ * When we're trying to update the QoS override and that both an
+ * f_event() and other f_* calls are running concurrently, any of these
+ * in flight calls may want to perform overrides that aren't properly
+ * serialized with each other.
+ *
+ * The first update that observes this racy situation enters a "Merge"
+ * mode which causes subsequent override requests to saturate the
+ * override instead of replacing its value.
+ *
+ * This mode is left when knote_unlock() or knote_post()
+ * observe that no other f_* routine is in flight.
+ */
+ kn->kn_status |= KN_MERGE_QOS;
+ }
/*
- * If this is a workq/workloop kqueue, apply the override to the
- * servicing thread.
+ * Now apply the override if it changed.
*/
- if (kq->kq_state & KQ_WORKQ) {
- struct kqworkq *kqwq = (struct kqworkq *)kq;
- assert(qos_index > THREAD_QOS_UNSPECIFIED);
- kqworkq_update_override(kqwq, qos_index, override_index);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ if (kn->kn_qos_override == qos_index) {
+ return;
+ }
- if ((kn->kn_status & KN_SUPPRESSED) == KN_SUPPRESSED) {
- flags = flags | KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS;
+ kn->kn_qos_override = qos_index;
- if (override_index == THREAD_QOS_USER_INTERACTIVE
- && override_is_sync) {
- flags = flags | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI;
- }
+ if (kn->kn_status & KN_SUPPRESSED) {
+ /*
+ * For suppressed events, the kn_qos_index field cannot be touched as it
+ * allows us to know on which supress queue the knote is for a kqworkq.
+ *
+ * Also, there's no natural push applied on the kqueues when this field
+ * changes anyway. We hence need to apply manual overrides in this case,
+ * which will be cleared when the events are later acknowledged.
+ */
+ kqueue_update_override(kq, kn, qos_index);
+ } else if (kn->kn_qos_index != qos_index) {
+ knote_dequeue(kq, kn);
+ kn->kn_qos_index = qos_index;
+ }
+}
- if (old_override_index == THREAD_QOS_USER_INTERACTIVE
- && old_override_is_sync) {
- flags = flags | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI;
- }
- }
+/*
+ * Called back from waitq code when no threads waiting and the hook was set.
+ *
+ * Preemption is disabled - minimal work can be done in this context!!!
+ */
+void
+waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *kq_hook)
+{
+ kqueue_t kqu;
+
+ kqu.kq = __container_of(kq_hook, struct kqueue, kq_waitq_hook);
+ assert(kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
- assert(qos_index > THREAD_QOS_UNSPECIFIED);
- kqworkloop_update_override(kqwl, qos_index, override_index, flags);
+ kqlock(kqu);
+
+ if (kqu.kq->kq_count > 0) {
+ if (kqu.kq->kq_state & KQ_WORKLOOP) {
+ kqworkloop_wakeup(kqu.kqwl, KQWL_BUCKET_STAYACTIVE);
+ } else {
+ kqworkq_wakeup(kqu.kqwq, KQWQ_QOS_MANAGER);
+ }
}
+
+ kqunlock(kqu);
}
-static kq_index_t
-knote_get_sync_qos_override_index(struct knote *kn)
+void
+klist_init(struct klist *list)
{
- return kn->kn_qos_sync_override;
+ SLIST_INIT(list);
}
-static void
-kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index)
+
+/*
+ * Query/Post each knote in the object's list
+ *
+ * The object lock protects the list. It is assumed
+ * that the filter/event routine for the object can
+ * determine that the object is already locked (via
+ * the hint) and not deadlock itself.
+ *
+ * The object lock should also hold off pending
+ * detach/drop operations.
+ */
+void
+knote(struct klist *list, long hint)
{
- struct kqrequest *kqr;
- kq_index_t old_override_index;
+ struct knote *kn;
- if (override_index <= qos_index) {
- return;
+ SLIST_FOREACH(kn, list, kn_selnext) {
+ knote_post(kn, hint);
}
+}
+
+/*
+ * attach a knote to the specified list. Return true if this is the first entry.
+ * The list is protected by whatever lock the object it is associated with uses.
+ */
+int
+knote_attach(struct klist *list, struct knote *kn)
+{
+ int ret = SLIST_EMPTY(list);
+ SLIST_INSERT_HEAD(list, kn, kn_selnext);
+ return ret;
+}
+
+/*
+ * detach a knote from the specified list. Return true if that was the last entry.
+ * The list is protected by whatever lock the object it is associated with uses.
+ */
+int
+knote_detach(struct klist *list, struct knote *kn)
+{
+ SLIST_REMOVE(list, kn, knote, kn_selnext);
+ return SLIST_EMPTY(list);
+}
- kqr = kqworkq_get_request(kqwq, qos_index);
+/*
+ * knote_vanish - Indicate that the source has vanished
+ *
+ * If the knote has requested EV_VANISHED delivery,
+ * arrange for that. Otherwise, deliver a NOTE_REVOKE
+ * event for backward compatibility.
+ *
+ * The knote is marked as having vanished, but is not
+ * actually detached from the source in this instance.
+ * The actual detach is deferred until the knote drop.
+ *
+ * Our caller already has the object lock held. Calling
+ * the detach routine would try to take that lock
+ * recursively - which likely is not supported.
+ */
+void
+knote_vanish(struct klist *list, bool make_active)
+{
+ struct knote *kn;
+ struct knote *kn_next;
- kqwq_req_lock(kqwq);
- old_override_index = kqr->kqr_override_index;
- if (override_index > MAX(kqr->kqr_qos_index, old_override_index)) {
- kqr->kqr_override_index = override_index;
+ SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
+ struct kqueue *kq = knote_get_kq(kn);
- /* apply the override to [incoming?] servicing thread */
- if (kqr->kqr_state & KQR_BOUND) {
- thread_t wqthread = kqr->kqr_thread;
-
- /* only apply if non-manager */
- assert(wqthread);
- if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
- if (old_override_index)
- thread_update_ipc_override(wqthread, override_index);
- else
- thread_add_ipc_override(wqthread, override_index);
- }
+ kqlock(kq);
+ if (__probable(kn->kn_status & KN_REQVANISH)) {
+ /*
+ * If EV_VANISH supported - prepare to deliver one
+ */
+ kn->kn_status |= KN_VANISHED;
+ } else {
+ /*
+ * Handle the legacy way to indicate that the port/portset was
+ * deallocated or left the current Mach portspace (modern technique
+ * is with an EV_VANISHED protocol).
+ *
+ * Deliver an EV_EOF event for these changes (hopefully it will get
+ * delivered before the port name recycles to the same generation
+ * count and someone tries to re-register a kevent for it or the
+ * events are udata-specific - avoiding a conflict).
+ */
+ kn->kn_flags |= EV_EOF | EV_ONESHOT;
+ }
+ if (make_active) {
+ knote_activate(kq, kn, FILTER_ACTIVE);
}
+ kqunlock(kq);
}
- kqwq_req_unlock(kqwq);
}
-/* called with the kqworkq lock held */
-static void
-kqworkq_bind_thread_impl(
- struct kqworkq *kqwq,
- kq_index_t qos_index,
- thread_t thread,
- unsigned int flags)
+/*
+ * Force a lazy allocation of the waitqset link
+ * of the kq_wqs associated with the kn
+ * if it wasn't already allocated.
+ *
+ * This allows knote_link_waitq to never block
+ * if reserved_link is not NULL.
+ */
+void
+knote_link_waitqset_lazy_alloc(struct knote *kn)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ waitq_set_lazy_init_link(&kq->kq_wqs);
+}
+
+/*
+ * Check if a lazy allocation for the waitqset link
+ * of the kq_wqs is needed.
+ */
+boolean_t
+knote_link_waitqset_should_lazy_alloc(struct knote *kn)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ return waitq_set_should_lazy_init_link(&kq->kq_wqs);
+}
+
+/*
+ * For a given knote, link a provided wait queue directly with the kqueue.
+ * Wakeups will happen via recursive wait queue support. But nothing will move
+ * the knote to the active list at wakeup (nothing calls knote()). Instead,
+ * we permanently enqueue them here.
+ *
+ * kqueue and knote references are held by caller.
+ * waitq locked by caller.
+ *
+ * caller provides the wait queue link structure and insures that the kq->kq_wqs
+ * is linked by previously calling knote_link_waitqset_lazy_alloc.
+ */
+int
+knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
{
- /* request lock must be held */
- kqwq_req_held(kqwq);
+ struct kqueue *kq = knote_get_kq(kn);
+ kern_return_t kr;
- struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
- assert(kqr->kqr_state & KQR_THREQUESTED);
+ kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
+ if (kr == KERN_SUCCESS) {
+ knote_markstayactive(kn);
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
- if (qos_index == KQWQ_QOS_MANAGER)
- flags |= KEVENT_FLAG_WORKQ_MANAGER;
+/*
+ * Unlink the provided wait queue from the kqueue associated with a knote.
+ * Also remove it from the magic list of directly attached knotes.
+ *
+ * Note that the unlink may have already happened from the other side, so
+ * ignore any failures to unlink and just remove it from the kqueue list.
+ *
+ * On success, caller is responsible for the link structure
+ */
+int
+knote_unlink_waitq(struct knote *kn, struct waitq *wq)
+{
+ struct kqueue *kq = knote_get_kq(kn);
+ kern_return_t kr;
- struct uthread *ut = get_bsdthread_info(thread);
+ kr = waitq_unlink(wq, &kq->kq_wqs);
+ knote_clearstayactive(kn);
+ return (kr != KERN_SUCCESS) ? EINVAL : 0;
+}
- /*
- * If this is a manager, and the manager request bit is
- * not set, assure no other thread is bound. If the bit
- * is set, make sure the old thread is us (or not set).
- */
- if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
- if ((kqr->kqr_state & KQR_BOUND) == 0) {
- kqr->kqr_state |= (KQR_BOUND | KQWQ_THMANAGER);
- TAILQ_INIT(&kqr->kqr_suppressed);
- kqr->kqr_thread = thread;
- ut->uu_kqueue_bound = (struct kqueue *)kqwq;
- ut->uu_kqueue_qos_index = KQWQ_QOS_MANAGER;
- ut->uu_kqueue_flags = (KEVENT_FLAG_WORKQ |
- KEVENT_FLAG_WORKQ_MANAGER);
- } else {
- assert(kqr->kqr_state & KQR_BOUND);
- assert(thread == kqr->kqr_thread);
- assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq);
- assert(ut->uu_kqueue_qos_index == KQWQ_QOS_MANAGER);
- assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
- }
- return;
- }
+/*
+ * remove all knotes referencing a specified fd
+ *
+ * Entered with the proc_fd lock already held.
+ * It returns the same way, but may drop it temporarily.
+ */
+void
+knote_fdclose(struct proc *p, int fd)
+{
+ struct klist *list;
+ struct knote *kn;
+ KNOTE_LOCK_CTX(knlc);
- /* Just a normal one-queue servicing thread */
- assert(kqr->kqr_state & KQR_THREQUESTED);
- assert(kqr->kqr_qos_index == qos_index);
+restart:
+ list = &p->p_fd->fd_knlist[fd];
+ SLIST_FOREACH(kn, list, kn_link) {
+ struct kqueue *kq = knote_get_kq(kn);
- if ((kqr->kqr_state & KQR_BOUND) == 0) {
- kqr->kqr_state |= KQR_BOUND;
- TAILQ_INIT(&kqr->kqr_suppressed);
- kqr->kqr_thread = thread;
+ kqlock(kq);
- /* apply an ipc QoS override if one is needed */
- if (kqr->kqr_override_index) {
- assert(kqr->kqr_qos_index);
- assert(kqr->kqr_override_index > kqr->kqr_qos_index);
- assert(thread_get_ipc_override(thread) == THREAD_QOS_UNSPECIFIED);
- thread_add_ipc_override(thread, kqr->kqr_override_index);
+ if (kq->kq_p != p) {
+ panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
+ __func__, kq->kq_p, p);
}
- /* indicate that we are processing in the uthread */
- ut->uu_kqueue_bound = (struct kqueue *)kqwq;
- ut->uu_kqueue_qos_index = qos_index;
- ut->uu_kqueue_flags = flags;
- } else {
/*
- * probably syncronously bound AND post-request bound
- * this logic can go away when we get rid of post-request bind
+ * If the knote supports EV_VANISHED delivery,
+ * transition it to vanished mode (or skip over
+ * it if already vanished).
*/
- assert(kqr->kqr_state & KQR_BOUND);
- assert(thread == kqr->kqr_thread);
- assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq);
- assert(ut->uu_kqueue_qos_index == qos_index);
- assert((ut->uu_kqueue_flags & flags) == flags);
- }
-}
-
-static void
-kqworkloop_update_override(
- struct kqworkloop *kqwl,
- kq_index_t qos_index,
- kq_index_t override_index,
- uint32_t flags)
-{
- struct kqrequest *kqr = &kqwl->kqwl_request;
-
- kqwl_req_lock(kqwl);
-
- /* Do not override on attached threads */
- if (kqr->kqr_state & KQR_BOUND) {
- assert(kqr->kqr_thread);
-
- if (kqwl->kqwl_kqueue.kq_state & KQ_NO_WQ_THREAD) {
- kqwl_req_unlock(kqwl);
- assert(!is_workqueue_thread(kqr->kqr_thread));
- return;
+ if (kn->kn_status & KN_VANISHED) {
+ kqunlock(kq);
+ continue;
}
- }
- /* Update sync ipc counts on kqr for suppressed knotes */
- if (flags & KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS) {
- kqworkloop_update_suppress_sync_count(kqr, flags);
- }
+ proc_fdunlock(p);
+ if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
+ /* the knote was dropped by someone, nothing to do */
+ } else if (kn->kn_status & KN_REQVANISH) {
+ kn->kn_status |= KN_VANISHED;
- if ((flags & KQWL_UO_UPDATE_OVERRIDE_LAZY) == 0) {
- kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
- MAX(qos_index, override_index));
- }
- kqwl_req_unlock(kqwl);
-}
+ kqunlock(kq);
+ knote_fops(kn)->f_detach(kn);
+ if (kn->kn_is_fd) {
+ fp_drop(p, kn->kn_id, kn->kn_fp, 0);
+ }
+ kn->kn_filtid = EVFILTID_DETACHED;
+ kqlock(kq);
-static void
-kqworkloop_update_suppress_sync_count(
- struct kqrequest *kqr,
- uint32_t flags)
-{
- if (flags & KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI) {
- kqr->kqr_sync_suppress_count++;
- }
+ knote_activate(kq, kn, FILTER_ACTIVE);
+ knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
+ } else {
+ knote_drop(kq, kn, &knlc);
+ }
- if (flags & KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI) {
- assert(kqr->kqr_sync_suppress_count > 0);
- kqr->kqr_sync_suppress_count--;
+ proc_fdlock(p);
+ goto restart;
}
}
/*
- * kqworkloop_unbind_thread - Unbind the servicer thread of a workloop kqueue
+ * knote_fdfind - lookup a knote in the fd table for process
*
- * It will end the processing phase in case it was still processing:
+ * If the filter is file-based, lookup based on fd index.
+ * Otherwise use a hash based on the ident.
*
- * We may have to request a new thread for not KQ_NO_WQ_THREAD workloop.
- * This can happen if :
- * - there were active events at or above our QoS we never got to (count > 0)
- * - we pended waitq hook callouts during processing
- * - we pended wakeups while processing (or unsuppressing)
+ * Matching is based on kq, filter, and ident. Optionally,
+ * it may also be based on the udata field in the kevent -
+ * allowing multiple event registration for the file object
+ * per kqueue.
*
- * Called with kqueue lock held.
+ * fd_knhashlock or fdlock held on entry (and exit)
*/
-
-static void
-kqworkloop_unbind_thread(
- struct kqworkloop *kqwl,
- thread_t thread,
- __unused unsigned int flags)
+static struct knote *
+knote_fdfind(struct kqueue *kq,
+ const struct kevent_internal_s *kev,
+ bool is_fd,
+ struct proc *p)
{
- struct kqueue *kq = &kqwl->kqwl_kqueue;
- struct kqrequest *kqr = &kqwl->kqwl_request;
-
- kqlock_held(kq);
+ struct filedesc *fdp = p->p_fd;
+ struct klist *list = NULL;
+ struct knote *kn = NULL;
- assert((kq->kq_state & KQ_PROCESSING) == 0);
- if (kq->kq_state & KQ_PROCESSING) {
- return;
+ /*
+ * determine where to look for the knote
+ */
+ if (is_fd) {
+ /* fd-based knotes are linked off the fd table */
+ if (kev->kei_ident < (u_int)fdp->fd_knlistsize) {
+ list = &fdp->fd_knlist[kev->kei_ident];
+ }
+ } else if (fdp->fd_knhashmask != 0) {
+ /* hash non-fd knotes here too */
+ list = &fdp->fd_knhash[KN_HASH((u_long)kev->kei_ident, fdp->fd_knhashmask)];
}
/*
- * Forcing the KQ_PROCESSING flag allows for QoS updates because of
- * unsuppressing knotes not to be applied until the eventual call to
- * kqworkloop_update_threads_qos() below.
+ * scan the selected list looking for a match
*/
- kq->kq_state |= KQ_PROCESSING;
- kqworkloop_acknowledge_events(kqwl, TRUE);
- kq->kq_state &= ~KQ_PROCESSING;
+ if (list != NULL) {
+ SLIST_FOREACH(kn, list, kn_link) {
+ if (kq == knote_get_kq(kn) &&
+ kev->kei_ident == kn->kn_id &&
+ kev->kei_filter == kn->kn_filter) {
+ if (kev->kei_flags & EV_UDATA_SPECIFIC) {
+ if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
+ kev->kei_udata == kn->kn_udata) {
+ break; /* matching udata-specific knote */
+ }
+ } else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
+ break; /* matching non-udata-specific knote */
+ }
+ }
+ }
+ }
+ return kn;
+}
- kqwl_req_lock(kqwl);
+/*
+ * kq_add_knote- Add knote to the fd table for process
+ * while checking for duplicates.
+ *
+ * All file-based filters associate a list of knotes by file
+ * descriptor index. All other filters hash the knote by ident.
+ *
+ * May have to grow the table of knote lists to cover the
+ * file descriptor index presented.
+ *
+ * fd_knhashlock and fdlock unheld on entry (and exit).
+ *
+ * Takes a rwlock boost if inserting the knote is successful.
+ */
+static int
+kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
+ struct proc *p)
+{
+ struct filedesc *fdp = p->p_fd;
+ struct klist *list = NULL;
+ int ret = 0;
+ bool is_fd = kn->kn_is_fd;
- /* deal with extraneous unbinds in release kernels */
- assert((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) == KQR_BOUND);
- if ((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) != KQR_BOUND) {
- kqwl_req_unlock(kqwl);
- return;
+ if (is_fd) {
+ proc_fdlock(p);
+ } else {
+ knhash_lock(fdp);
}
- assert(thread == current_thread());
- assert(kqr->kqr_thread == thread);
- if (kqr->kqr_thread != thread) {
- kqwl_req_unlock(kqwl);
- return;
+ if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
+ /* found an existing knote: we can't add this one */
+ ret = ERESTART;
+ goto out_locked;
}
- struct uthread *ut = get_bsdthread_info(thread);
- kq_index_t old_qos_index = ut->uu_kqueue_qos_index;
- boolean_t ipc_override_is_sync = ut->uu_kqueue_override_is_sync;
- ut->uu_kqueue_bound = NULL;
- ut->uu_kqueue_qos_index = 0;
- ut->uu_kqueue_override_is_sync = 0;
- ut->uu_kqueue_flags = 0;
-
- /* unbind the servicer thread, drop overrides */
- kqr->kqr_thread = NULL;
- kqr->kqr_state &= ~(KQR_BOUND | KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED);
- kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
-
- kqwl_req_unlock(kqwl);
+ /* knote was not found: add it now */
+ if (!is_fd) {
+ if (fdp->fd_knhashmask == 0) {
+ u_long size = 0;
- /*
- * Drop the override on the current thread last, after the call to
- * kqworkloop_update_threads_qos above.
- */
- if (old_qos_index) {
- thread_drop_ipc_override(thread);
- }
- if (ipc_override_is_sync) {
- thread_drop_sync_ipc_override(thread);
- }
-}
+ list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
+ if (list == NULL) {
+ ret = ENOMEM;
+ goto out_locked;
+ }
-/* called with the kqworkq lock held */
-static void
-kqworkq_unbind_thread(
- struct kqworkq *kqwq,
- kq_index_t qos_index,
- thread_t thread,
- __unused unsigned int flags)
-{
- struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
- kq_index_t override_index = 0;
+ fdp->fd_knhash = list;
+ fdp->fd_knhashmask = size;
+ }
- /* request lock must be held */
- kqwq_req_held(kqwq);
+ list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+ SLIST_INSERT_HEAD(list, kn, kn_link);
+ ret = 0;
+ goto out_locked;
+ } else {
+ /* knote is fd based */
- assert(thread == current_thread());
+ if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
+ u_int size = 0;
- if ((kqr->kqr_state & KQR_BOUND) == 0) {
- assert(kqr->kqr_state & KQR_BOUND);
- return;
- }
+ if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
+ || kn->kn_id >= (uint64_t)maxfiles) {
+ ret = EINVAL;
+ goto out_locked;
+ }
+ /* have to grow the fd_knlist */
+ size = fdp->fd_knlistsize;
+ while (size <= kn->kn_id) {
+ size += KQEXTENT;
+ }
- assert(kqr->kqr_thread == thread);
- assert(TAILQ_EMPTY(&kqr->kqr_suppressed));
+ if (size >= (UINT_MAX / sizeof(struct klist *))) {
+ ret = EINVAL;
+ goto out_locked;
+ }
- /*
- * If there is an override, drop it from the current thread
- * and then we are free to recompute (a potentially lower)
- * minimum override to apply to the next thread request.
- */
- if (kqr->kqr_override_index) {
- struct kqtailq *base_queue = kqueue_get_base_queue(&kqwq->kqwq_kqueue, qos_index);
- struct kqtailq *queue = kqueue_get_high_queue(&kqwq->kqwq_kqueue, qos_index);
+ MALLOC(list, struct klist *,
+ size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
+ if (list == NULL) {
+ ret = ENOMEM;
+ goto out_locked;
+ }
- /* if not bound to a manager thread, drop the current ipc override */
- if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
- thread_drop_ipc_override(thread);
+ bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
+ fdp->fd_knlistsize * sizeof(struct klist *));
+ bzero((caddr_t)list +
+ fdp->fd_knlistsize * sizeof(struct klist *),
+ (size - fdp->fd_knlistsize) * sizeof(struct klist *));
+ FREE(fdp->fd_knlist, M_KQUEUE);
+ fdp->fd_knlist = list;
+ fdp->fd_knlistsize = size;
}
- /* recompute the new override */
- do {
- if (!TAILQ_EMPTY(queue)) {
- override_index = queue - base_queue + qos_index;
- break;
- }
- } while (queue-- > base_queue);
+ list = &fdp->fd_knlist[kn->kn_id];
+ SLIST_INSERT_HEAD(list, kn, kn_link);
+ ret = 0;
+ goto out_locked;
}
- /* Mark it unbound */
- kqr->kqr_thread = NULL;
- kqr->kqr_state &= ~(KQR_BOUND | KQR_THREQUESTED | KQWQ_THMANAGER);
-
- /* apply the new override */
- if (override_index > kqr->kqr_qos_index) {
- kqr->kqr_override_index = override_index;
+out_locked:
+ if (ret == 0) {
+ kqlock(kq);
+ assert((kn->kn_status & KN_LOCKED) == 0);
+ (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
+ kqueue_retain(kq); /* retain a kq ref */
+ }
+ if (is_fd) {
+ proc_fdunlock(p);
} else {
- kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
+ knhash_unlock(fdp);
}
-}
-struct kqrequest *
-kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
-{
- assert(qos_index < KQWQ_NQOS);
- return &kqwq->kqwq_request[qos_index];
+ return ret;
}
-void
-knote_adjust_qos(struct knote *kn, qos_t new_qos, qos_t new_override, kq_index_t sync_override_index)
+/*
+ * kq_remove_knote - remove a knote from the fd table for process
+ *
+ * If the filter is file-based, remove based on fd index.
+ * Otherwise remove from the hash based on the ident.
+ *
+ * fd_knhashlock and fdlock unheld on entry (and exit).
+ */
+static void
+kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
+ struct knote_lock_ctx *knlc)
{
- struct kqueue *kq = knote_get_kq(kn);
- boolean_t override_is_sync = FALSE;
-
- if (kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) {
- kq_index_t new_qos_index;
- kq_index_t new_override_index;
- kq_index_t servicer_qos_index;
+ struct filedesc *fdp = p->p_fd;
+ struct klist *list = NULL;
+ uint16_t kq_state;
+ bool is_fd = kn->kn_is_fd;
- new_qos_index = qos_index_from_qos(kn, new_qos, FALSE);
- new_override_index = qos_index_from_qos(kn, new_override, TRUE);
+ if (is_fd) {
+ proc_fdlock(p);
+ } else {
+ knhash_lock(fdp);
+ }
- /* make sure the servicer qos acts as a floor */
- servicer_qos_index = qos_index_from_qos(kn, kn->kn_qos, FALSE);
- if (servicer_qos_index > new_qos_index)
- new_qos_index = servicer_qos_index;
- if (servicer_qos_index > new_override_index)
- new_override_index = servicer_qos_index;
- if (sync_override_index >= new_override_index) {
- new_override_index = sync_override_index;
- override_is_sync = TRUE;
- }
+ if (is_fd) {
+ assert((u_int)fdp->fd_knlistsize > kn->kn_id);
+ list = &fdp->fd_knlist[kn->kn_id];
+ } else {
+ list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+ }
+ SLIST_REMOVE(list, kn, knote, kn_link);
- kqlock(kq);
- if (new_qos_index != knote_get_req_index(kn) ||
- new_override_index != knote_get_qos_override_index(kn) ||
- override_is_sync != kn->kn_qos_override_is_sync) {
- if (kn->kn_status & KN_QUEUED) {
- knote_dequeue(kn);
- knote_set_qos_index(kn, new_qos_index);
- knote_set_qos_override_index(kn, new_override_index, override_is_sync);
- knote_enqueue(kn);
- knote_wakeup(kn);
- } else {
- knote_set_qos_index(kn, new_qos_index);
- knote_set_qos_override_index(kn, new_override_index, override_is_sync);
- }
- }
+ kqlock(kq);
+ kq_state = kq->kq_state;
+ if (knlc) {
+ knote_unlock_cancel(kq, kn, knlc);
+ } else {
kqunlock(kq);
}
+ if (is_fd) {
+ proc_fdunlock(p);
+ } else {
+ knhash_unlock(fdp);
+ }
+
+ if (kq_state & KQ_DYNAMIC) {
+ kqworkloop_release((struct kqworkloop *)kq);
+ }
}
-void
-knote_adjust_sync_qos(struct knote *kn, kq_index_t sync_qos, boolean_t lock_kq)
+/*
+ * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
+ * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
+ *
+ * fd_knhashlock or fdlock unheld on entry (and exit)
+ */
+
+static struct knote *
+kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_qos_s *kev,
+ bool is_fd, struct proc *p)
{
- struct kqueue *kq = knote_get_kq(kn);
- kq_index_t old_sync_override;
- kq_index_t qos_index = knote_get_qos_index(kn);
- uint32_t flags = 0;
+ struct filedesc *fdp = p->p_fd;
+ struct knote *kn;
- /* Tracking only happens for UI qos */
- if (sync_qos != THREAD_QOS_USER_INTERACTIVE &&
- sync_qos != THREAD_QOS_UNSPECIFIED) {
- return;
+ if (is_fd) {
+ proc_fdlock(p);
+ } else {
+ knhash_lock(fdp);
}
- if (lock_kq)
- kqlock(kq);
+ /*
+ * Temporary horrible hack:
+ * this cast is gross and will go away in a future change.
+ * It is OK to do because we don't look at xflags/s_fflags,
+ * and that when we cast down the kev this way,
+ * the truncated filter field works.
+ */
+ kn = knote_fdfind(kq, (struct kevent_internal_s *)kev, is_fd, p);
- if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ if (kn) {
+ kqlock(kq);
+ assert(knote_get_kq(kn) == kq);
+ }
- old_sync_override = knote_get_sync_qos_override_index(kn);
- if (old_sync_override != sync_qos) {
- kn->kn_qos_sync_override = sync_qos;
+ if (is_fd) {
+ proc_fdunlock(p);
+ } else {
+ knhash_unlock(fdp);
+ }
- /* update sync ipc counters for suppressed knotes */
- if ((kn->kn_status & KN_SUPPRESSED) == KN_SUPPRESSED) {
- flags = flags | KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS;
+ return kn;
+}
- /* Do not recalculate kqwl override, it would be done later */
- flags = flags | KQWL_UO_UPDATE_OVERRIDE_LAZY;
+__attribute__((noinline))
+static void
+kqfile_wakeup(struct kqfile *kqf, __unused kq_index_t qos)
+{
+ /* flag wakeups during processing */
+ if (kqf->kqf_state & KQ_PROCESSING) {
+ kqf->kqf_state |= KQ_WAKEUP;
+ }
- if (sync_qos == THREAD_QOS_USER_INTERACTIVE) {
- flags = flags | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI;
- }
+ /* wakeup a thread waiting on this queue */
+ if (kqf->kqf_state & (KQ_SLEEP | KQ_SEL)) {
+ kqf->kqf_state &= ~(KQ_SLEEP | KQ_SEL);
+ waitq_wakeup64_all((struct waitq *)&kqf->kqf_wqs, KQ_EVENT,
+ THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
+ }
- if (old_sync_override == THREAD_QOS_USER_INTERACTIVE) {
- flags = flags | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI;
- }
+ /* wakeup other kqueues/select sets we're inside */
+ KNOTE(&kqf->kqf_sel.si_note, 0);
+}
- kqworkloop_update_override(kqwl, qos_index, sync_qos,
- flags);
- }
+static struct kqtailq *
+knote_get_tailq(kqueue_t kqu, struct knote *kn)
+{
+ kq_index_t qos_index = kn->kn_qos_index;
- }
+ if (kqu.kq->kq_state & KQ_WORKLOOP) {
+ assert(qos_index < KQWL_NBUCKETS);
+ } else if (kqu.kq->kq_state & KQ_WORKQ) {
+ assert(qos_index < KQWQ_NBUCKETS);
+ } else {
+ assert(qos_index == QOS_INDEX_KQFILE);
}
- if (lock_kq)
- kqunlock(kq);
+ static_assert(offsetof(struct kqueue, kq_queue) == sizeof(struct kqueue),
+ "struct kqueue::kq_queue must be exactly at the end");
+ return &kqu.kq->kq_queue[qos_index];
}
static void
-knote_wakeup(struct knote *kn)
+knote_enqueue(kqueue_t kqu, struct knote *kn, kn_status_t wakeup_mask)
{
- struct kqueue *kq = knote_get_kq(kn);
- kq_index_t qos_index = knote_get_qos_index(kn);
+ kqlock_held(kqu);
- kqlock_held(kq);
+ if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
+ return;
+ }
- if (kq->kq_state & KQ_WORKQ) {
- /* request a servicing thread */
- struct kqworkq *kqwq = (struct kqworkq *)kq;
+ if (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)) {
+ return;
+ }
- kqworkq_request_help(kqwq, qos_index);
+ if ((kn->kn_status & KN_QUEUED) == 0) {
+ struct kqtailq *queue = knote_get_tailq(kqu, kn);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- /* request a servicing thread */
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
+ kn->kn_status |= KN_QUEUED;
+ kqu.kq->kq_count++;
+ } else if ((kn->kn_status & KN_STAYACTIVE) == 0) {
+ return;
+ }
- if (kqworkloop_is_processing_on_current_thread(kqwl)) {
- /*
- * kqworkloop_end_processing() will perform the required QoS
- * computations when it unsets the processing mode.
- */
- return;
+ if (kn->kn_status & wakeup_mask) {
+ if (kqu.kq->kq_state & KQ_WORKLOOP) {
+ kqworkloop_wakeup(kqu.kqwl, kn->kn_qos_index);
+ } else if (kqu.kq->kq_state & KQ_WORKQ) {
+ kqworkq_wakeup(kqu.kqwq, kn->kn_qos_index);
+ } else {
+ kqfile_wakeup(kqu.kqf, kn->kn_qos_index);
}
- kqworkloop_request_help(kqwl, qos_index);
- } else {
- struct kqfile *kqf = (struct kqfile *)kq;
+ }
+}
- /* flag wakeups during processing */
- if (kq->kq_state & KQ_PROCESSING)
- kq->kq_state |= KQ_WAKEUP;
+__attribute__((always_inline))
+static inline void
+knote_dequeue(kqueue_t kqu, struct knote *kn)
+{
+ if (kn->kn_status & KN_QUEUED) {
+ struct kqtailq *queue = knote_get_tailq(kqu, kn);
- /* wakeup a thread waiting on this queue */
- if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
- kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- KQ_EVENT,
- THREAD_AWAKENED,
- WAITQ_ALL_PRIORITIES);
- }
+ // attaching the knote calls knote_reset_priority() without
+ // the kqlock which is fine, so we can't call kqlock_held()
+ // if we're not queued.
+ kqlock_held(kqu);
- /* wakeup other kqueues/select sets we're inside */
- KNOTE(&kqf->kqf_sel.si_note, 0);
+ TAILQ_REMOVE(queue, kn, kn_tqe);
+ kn->kn_status &= ~KN_QUEUED;
+ kqu.kq->kq_count--;
}
}
-/*
- * Called with the kqueue locked
- */
+/* called with kqueue lock held */
static void
-kqueue_interrupt(struct kqueue *kq)
+knote_suppress(kqueue_t kqu, struct knote *kn)
{
- assert((kq->kq_state & KQ_WORKQ) == 0);
-
- /* wakeup sleeping threads */
- if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) {
- kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
- (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- KQ_EVENT,
- THREAD_RESTART,
- WAITQ_ALL_PRIORITIES);
- }
+ struct kqtailq *suppressq;
- /* wakeup threads waiting their turn to process */
- if (kq->kq_state & KQ_PROCWAIT) {
- struct kqtailq *suppressq;
+ kqlock_held(kqu);
- assert(kq->kq_state & KQ_PROCESSING);
+ assert((kn->kn_status & KN_SUPPRESSED) == 0);
+ assert(kn->kn_status & KN_QUEUED);
- kq->kq_state &= ~KQ_PROCWAIT;
- suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
- (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(suppressq),
- THREAD_RESTART,
- WAITQ_ALL_PRIORITIES);
- }
+ knote_dequeue(kqu, kn);
+ /* deactivate - so new activations indicate a wakeup */
+ kn->kn_status &= ~KN_ACTIVE;
+ kn->kn_status |= KN_SUPPRESSED;
+ suppressq = kqueue_get_suppressed_queue(kqu, kn);
+ TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
}
-/*
- * Called back from waitq code when no threads waiting and the hook was set.
- *
- * Interrupts are likely disabled and spin locks are held - minimal work
- * can be done in this context!!!
- *
- * JMM - in the future, this will try to determine which knotes match the
- * wait queue wakeup and apply these wakeups against those knotes themselves.
- * For now, all the events dispatched this way are dispatch-manager handled,
- * so hard-code that for now.
- */
-void
-waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos)
+__attribute__((always_inline))
+static inline void
+knote_unsuppress_noqueue(kqueue_t kqu, struct knote *kn)
{
-#pragma unused(knote_hook, qos)
-
- struct kqueue *kq = (struct kqueue *)kq_hook;
+ struct kqtailq *suppressq;
- if (kq->kq_state & KQ_WORKQ) {
- struct kqworkq *kqwq = (struct kqworkq *)kq;
+ kqlock_held(kqu);
- kqworkq_request_help(kqwq, KQWQ_QOS_MANAGER);
+ assert(kn->kn_status & KN_SUPPRESSED);
- } else if (kq->kq_state & KQ_WORKLOOP) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
+ kn->kn_status &= ~KN_SUPPRESSED;
+ suppressq = kqueue_get_suppressed_queue(kqu, kn);
+ TAILQ_REMOVE(suppressq, kn, kn_tqe);
- kqworkloop_request_help(kqwl, KQWL_BUCKET_STAYACTIVE);
+ /*
+ * If the knote is no longer active, reset its push,
+ * and resynchronize kn_qos_index with kn_qos_override
+ * for knotes with a real qos.
+ */
+ if ((kn->kn_status & KN_ACTIVE) == 0 && knote_has_qos(kn)) {
+ kn->kn_qos_override = _pthread_priority_thread_qos_fast(kn->kn_qos);
}
+ kn->kn_qos_index = kn->kn_qos_override;
}
-void
-klist_init(struct klist *list)
-{
- SLIST_INIT(list);
-}
-
-
-/*
- * Query/Post each knote in the object's list
- *
- * The object lock protects the list. It is assumed
- * that the filter/event routine for the object can
- * determine that the object is already locked (via
- * the hint) and not deadlock itself.
- *
- * The object lock should also hold off pending
- * detach/drop operations. But we'll prevent it here
- * too (by taking a use reference) - just in case.
- */
-void
-knote(struct klist *list, long hint)
+/* called with kqueue lock held */
+static void
+knote_unsuppress(kqueue_t kqu, struct knote *kn)
{
- struct knote *kn;
-
- SLIST_FOREACH(kn, list, kn_selnext) {
- struct kqueue *kq = knote_get_kq(kn);
-
- kqlock(kq);
-
- assert(!knoteuse_needs_boost(kn, NULL));
-
- /* If we can get a use reference - deliver event */
- if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
- int result;
-
- /* call the event with only a use count */
- result = knote_fops(kn)->f_event(kn, hint);
+ if (kn->kn_status & KN_SUPPRESSED) {
+ knote_unsuppress_noqueue(kqu, kn);
- /* if its not going away and triggered */
- if (knoteuse2kqlock(kq, kn, KNUSE_NONE) && result)
- knote_activate(kn);
- /* kq lock held */
- }
- kqunlock(kq);
+ /* don't wakeup if unsuppressing just a stay-active knote */
+ knote_enqueue(kqu, kn, KN_ACTIVE);
}
}
-/*
- * attach a knote to the specified list. Return true if this is the first entry.
- * The list is protected by whatever lock the object it is associated with uses.
- */
-int
-knote_attach(struct klist *list, struct knote *kn)
+__attribute__((always_inline))
+static inline void
+knote_mark_active(struct knote *kn)
{
- int ret = SLIST_EMPTY(list);
- SLIST_INSERT_HEAD(list, kn, kn_selnext);
- return (ret);
+ if ((kn->kn_status & KN_ACTIVE) == 0) {
+ KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
+ kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
+ kn->kn_filtid);
+ }
+
+ kn->kn_status |= KN_ACTIVE;
}
-/*
- * detach a knote from the specified list. Return true if that was the last entry.
- * The list is protected by whatever lock the object it is associated with uses.
- */
-int
-knote_detach(struct klist *list, struct knote *kn)
+/* called with kqueue lock held */
+static void
+knote_activate(kqueue_t kqu, struct knote *kn, int result)
{
- SLIST_REMOVE(list, kn, knote, kn_selnext);
- return (SLIST_EMPTY(list));
+ assert(result & FILTER_ACTIVE);
+ if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
+ // may dequeue the knote
+ knote_adjust_qos(kqu.kq, kn, result);
+ }
+ knote_mark_active(kn);
+ knote_enqueue(kqu, kn, KN_ACTIVE | KN_STAYACTIVE);
}
/*
- * knote_vanish - Indicate that the source has vanished
- *
- * If the knote has requested EV_VANISHED delivery,
- * arrange for that. Otherwise, deliver a NOTE_REVOKE
- * event for backward compatibility.
- *
- * The knote is marked as having vanished, but is not
- * actually detached from the source in this instance.
- * The actual detach is deferred until the knote drop.
- *
- * Our caller already has the object lock held. Calling
- * the detach routine would try to take that lock
- * recursively - which likely is not supported.
+ * This function applies changes requested by f_attach or f_touch for
+ * a given filter. It proceeds in a carefully chosen order to help
+ * every single transition do the minimal amount of work possible.
*/
-void
-knote_vanish(struct klist *list)
+static void
+knote_apply_touch(kqueue_t kqu, struct knote *kn, struct kevent_qos_s *kev,
+ int result)
{
- struct knote *kn;
- struct knote *kn_next;
-
- SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
- struct kqueue *kq = knote_get_kq(kn);
- int result;
+ kn_status_t wakeup_mask = KN_ACTIVE;
- kqlock(kq);
+ if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
+ /*
+ * When a stayactive knote is reenabled, we may have missed wakeups
+ * while it was disabled, so we need to poll it. To do so, ask
+ * knote_enqueue() below to reenqueue it.
+ */
+ wakeup_mask |= KN_STAYACTIVE;
+ kn->kn_status &= ~KN_DISABLED;
- assert(!knoteuse_needs_boost(kn, NULL));
+ /*
+ * it is possible for userland to have knotes registered for a given
+ * workloop `wl_orig` but really handled on another workloop `wl_new`.
+ *
+ * In that case, rearming will happen from the servicer thread of
+ * `wl_new` which if `wl_orig` is no longer being serviced, would cause
+ * this knote to stay suppressed forever if we only relied on
+ * kqworkloop_acknowledge_events to be called by `wl_orig`.
+ *
+ * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
+ * unsuppress because that would mess with the processing phase of
+ * `wl_orig`, however it also means kqworkloop_acknowledge_events()
+ * will be called.
+ */
+ if (__improbable(kn->kn_status & KN_SUPPRESSED)) {
+ if ((kqu.kq->kq_state & KQ_PROCESSING) == 0) {
+ knote_unsuppress_noqueue(kqu, kn);
+ }
+ }
+ }
- if ((kn->kn_status & KN_DROPPING) == 0) {
- /* If EV_VANISH supported - prepare to deliver one */
- if (kn->kn_status & KN_REQVANISH) {
- kn->kn_status |= KN_VANISHED;
- knote_activate(kn);
+ if ((result & FILTER_UPDATE_REQ_QOS) && kev->qos && kev->qos != kn->kn_qos) {
+ // may dequeue the knote
+ knote_reset_priority(kqu, kn, kev->qos);
+ }
- } else if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) {
- /* call the event with only a use count */
- result = knote_fops(kn)->f_event(kn, NOTE_REVOKE);
+ /*
+ * When we unsuppress above, or because of knote_reset_priority(),
+ * the knote may have been dequeued, we need to restore the invariant
+ * that if the knote is active it needs to be queued now that
+ * we're done applying changes.
+ */
+ if (result & FILTER_ACTIVE) {
+ knote_activate(kqu, kn, result);
+ } else {
+ knote_enqueue(kqu, kn, wakeup_mask);
+ }
- /* if its not going away and triggered */
- if (knoteuse2kqlock(kq, kn, KNUSE_NONE) && result)
- knote_activate(kn);
- /* lock held again */
- }
- }
- kqunlock(kq);
+ if ((result & FILTER_THREADREQ_NODEFEER) &&
+ act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ)) {
+ workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
}
}
/*
- * For a given knote, link a provided wait queue directly with the kqueue.
- * Wakeups will happen via recursive wait queue support. But nothing will move
- * the knote to the active list at wakeup (nothing calls knote()). Instead,
- * we permanently enqueue them here.
+ * knote_drop - disconnect and drop the knote
*
- * kqueue and knote references are held by caller.
- * waitq locked by caller.
+ * Called with the kqueue locked, returns with the kqueue unlocked.
+ *
+ * If a knote locking context is passed, it is canceled.
*
- * caller provides the wait queue link structure.
+ * The knote may have already been detached from
+ * (or not yet attached to) its source object.
*/
-int
-knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
+static void
+knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
{
- struct kqueue *kq = knote_get_kq(kn);
- kern_return_t kr;
+ struct proc *p = kq->kq_p;
- kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
- if (kr == KERN_SUCCESS) {
- knote_markstayactive(kn);
- return (0);
+ kqlock_held(kq);
+
+ assert((kn->kn_status & KN_DROPPING) == 0);
+ if (knlc == NULL) {
+ assert((kn->kn_status & KN_LOCKED) == 0);
+ }
+ kn->kn_status |= KN_DROPPING;
+
+ if (kn->kn_status & KN_SUPPRESSED) {
+ knote_unsuppress_noqueue(kq, kn);
} else {
- return (EINVAL);
+ knote_dequeue(kq, kn);
}
-}
+ knote_wait_for_post(kq, kn);
-/*
- * Unlink the provided wait queue from the kqueue associated with a knote.
- * Also remove it from the magic list of directly attached knotes.
- *
- * Note that the unlink may have already happened from the other side, so
- * ignore any failures to unlink and just remove it from the kqueue list.
- *
- * On success, caller is responsible for the link structure
- */
-int
-knote_unlink_waitq(struct knote *kn, struct waitq *wq)
-{
- struct kqueue *kq = knote_get_kq(kn);
- kern_return_t kr;
+ knote_fops(kn)->f_detach(kn);
- kr = waitq_unlink(wq, &kq->kq_wqs);
- knote_clearstayactive(kn);
- return ((kr != KERN_SUCCESS) ? EINVAL : 0);
+ /* kq may be freed when kq_remove_knote() returns */
+ kq_remove_knote(kq, kn, p, knlc);
+ if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) {
+ fp_drop(p, kn->kn_id, kn->kn_fp, 0);
+ }
+
+ knote_free(kn);
}
-/*
- * remove all knotes referencing a specified fd
- *
- * Essentially an inlined knote_remove & knote_drop
- * when we know for sure that the thing is a file
- *
- * Entered with the proc_fd lock already held.
- * It returns the same way, but may drop it temporarily.
- */
void
-knote_fdclose(struct proc *p, int fd, int force)
+knote_init(void)
{
- struct klist *list;
- struct knote *kn;
+ knote_zone = zinit(sizeof(struct knote), 8192 * sizeof(struct knote),
+ 8192, "knote zone");
+ zone_change(knote_zone, Z_CACHING_ENABLED, TRUE);
-restart:
- list = &p->p_fd->fd_knlist[fd];
- SLIST_FOREACH(kn, list, kn_link) {
- struct kqueue *kq = knote_get_kq(kn);
+ kqfile_zone = zinit(sizeof(struct kqfile), 8192 * sizeof(struct kqfile),
+ 8192, "kqueue file zone");
- kqlock(kq);
+ kqworkq_zone = zinit(sizeof(struct kqworkq), 8192 * sizeof(struct kqworkq),
+ 8192, "kqueue workq zone");
- if (kq->kq_p != p)
- panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
- __func__, kq->kq_p, p);
+ kqworkloop_zone = zinit(sizeof(struct kqworkloop), 8192 * sizeof(struct kqworkloop),
+ 8192, "kqueue workloop zone");
+ zone_change(kqworkloop_zone, Z_CACHING_ENABLED, TRUE);
+
+ /* allocate kq lock group attribute and group */
+ kq_lck_grp_attr = lck_grp_attr_alloc_init();
+
+ kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
+
+ /* Allocate kq lock attribute */
+ kq_lck_attr = lck_attr_alloc_init();
- /*
- * If the knote supports EV_VANISHED delivery,
- * transition it to vanished mode (or skip over
- * it if already vanished).
- */
- if (!force && (kn->kn_status & KN_REQVANISH)) {
+#if CONFIG_MEMORYSTATUS
+ /* Initialize the memorystatus list lock */
+ memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
+#endif
+}
+SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
- if ((kn->kn_status & KN_VANISHED) == 0) {
- proc_fdunlock(p);
+const struct filterops *
+knote_fops(struct knote *kn)
+{
+ return sysfilt_ops[kn->kn_filtid];
+}
- assert(!knoteuse_needs_boost(kn, NULL));
+static struct knote *
+knote_alloc(void)
+{
+ struct knote *kn = ((struct knote *)zalloc(knote_zone));
+ bzero(kn, sizeof(struct knote));
+ return kn;
+}
- /* get detach reference (also marks vanished) */
- if (kqlock2knotedetach(kq, kn, KNUSE_NONE)) {
- /* detach knote and drop fp use reference */
- knote_fops(kn)->f_detach(kn);
- if (knote_fops(kn)->f_isfd)
- fp_drop(p, kn->kn_id, kn->kn_fp, 0);
+static void
+knote_free(struct knote *kn)
+{
+ assert((kn->kn_status & (KN_LOCKED | KN_POSTING)) == 0);
+ zfree(knote_zone, kn);
+}
- /* activate it if it's still in existence */
- if (knoteuse2kqlock(kq, kn, KNUSE_NONE)) {
- knote_activate(kn);
- }
- kqunlock(kq);
- }
- proc_fdlock(p);
- goto restart;
- } else {
- kqunlock(kq);
- continue;
- }
- }
+#pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
- proc_fdunlock(p);
+kevent_ctx_t
+kevent_get_context(thread_t thread)
+{
+ uthread_t ut = get_bsdthread_info(thread);
+ return &ut->uu_save.uus_kevent;
+}
- /*
- * Convert the kq lock to a drop ref.
- * If we get it, go ahead and drop it.
- * Otherwise, we waited for the blocking
- * condition to complete. Either way,
- * we dropped the fdlock so start over.
- */
- if (kqlock2knotedrop(kq, kn)) {
- knote_drop(kn, p);
- }
+static inline bool
+kevent_args_requesting_events(unsigned int flags, int nevents)
+{
+ return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0;
+}
- proc_fdlock(p);
- goto restart;
- }
+static inline int
+kevent_adjust_flags_for_proc(proc_t p, int flags)
+{
+ __builtin_assume(p);
+ return flags | (IS_64BIT_PROCESS(p) ? KEVENT_FLAG_PROC64 : 0);
}
-/*
- * knote_fdfind - lookup a knote in the fd table for process
+/*!
+ * @function kevent_get_kqfile
*
- * If the filter is file-based, lookup based on fd index.
- * Otherwise use a hash based on the ident.
+ * @brief
+ * Lookup a kqfile by fd.
*
- * Matching is based on kq, filter, and ident. Optionally,
- * it may also be based on the udata field in the kevent -
- * allowing multiple event registration for the file object
- * per kqueue.
+ * @discussion
+ * Callers: kevent, kevent64, kevent_qos
*
- * fd_knhashlock or fdlock held on entry (and exit)
+ * This is not assumed to be a fastpath (kqfile interfaces are legacy)
*/
-static struct knote *
-knote_fdfind(struct kqueue *kq,
- struct kevent_internal_s *kev,
- bool is_fd,
- struct proc *p)
+OS_NOINLINE
+static int
+kevent_get_kqfile(struct proc *p, int fd, int flags,
+ struct fileproc **fp, struct kqueue **kqp)
{
- struct filedesc *fdp = p->p_fd;
- struct klist *list = NULL;
- struct knote *kn = NULL;
+ int error = 0;
+ struct kqueue *kq;
- /*
- * determine where to look for the knote
- */
- if (is_fd) {
- /* fd-based knotes are linked off the fd table */
- if (kev->ident < (u_int)fdp->fd_knlistsize) {
- list = &fdp->fd_knlist[kev->ident];
+ error = fp_getfkq(p, fd, fp, &kq);
+ if (__improbable(error)) {
+ return error;
+ }
+
+ uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed);
+ if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) {
+ kqlock(kq);
+ kq_state = kq->kq_state;
+ if (!(kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS))) {
+ if (flags & KEVENT_FLAG_LEGACY32) {
+ kq_state |= KQ_KEV32;
+ } else if (flags & KEVENT_FLAG_LEGACY64) {
+ kq_state |= KQ_KEV64;
+ } else {
+ kq_state |= KQ_KEV_QOS;
+ }
+ kq->kq_state = kq_state;
}
- } else if (fdp->fd_knhashmask != 0) {
- /* hash non-fd knotes here too */
- list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
+ kqunlock(kq);
}
/*
- * scan the selected list looking for a match
+ * kqfiles can't be used through the legacy kevent()
+ * and other interfaces at the same time.
*/
- if (list != NULL) {
- SLIST_FOREACH(kn, list, kn_link) {
- if (kq == knote_get_kq(kn) &&
- kev->ident == kn->kn_id &&
- kev->filter == kn->kn_filter) {
- if (kev->flags & EV_UDATA_SPECIFIC) {
- if ((kn->kn_status & KN_UDATA_SPECIFIC) &&
- kev->udata == kn->kn_udata) {
- break; /* matching udata-specific knote */
- }
- } else if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) {
- break; /* matching non-udata-specific knote */
- }
- }
- }
+ if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) !=
+ (bool)(kq_state & KQ_KEV32))) {
+ fp_drop(p, fd, *fp, 0);
+ return EINVAL;
}
- return kn;
+
+ *kqp = kq;
+ return 0;
}
-/*
- * kq_add_knote- Add knote to the fd table for process
- * while checking for duplicates.
+/*!
+ * @function kevent_get_kqwq
*
- * All file-based filters associate a list of knotes by file
- * descriptor index. All other filters hash the knote by ident.
- *
- * May have to grow the table of knote lists to cover the
- * file descriptor index presented.
- *
- * fd_knhashlock and fdlock unheld on entry (and exit).
+ * @brief
+ * Lookup or create the process kqwq (faspath).
*
- * Takes a rwlock boost if inserting the knote is successful.
+ * @discussion
+ * Callers: kevent64, kevent_qos
*/
+OS_ALWAYS_INLINE
static int
-kq_add_knote(struct kqueue *kq, struct knote *kn,
- struct kevent_internal_s *kev,
- struct proc *p, int *knoteuse_flags)
+kevent_get_kqwq(proc_t p, int flags, int nevents, struct kqueue **kqp)
{
- struct filedesc *fdp = p->p_fd;
- struct klist *list = NULL;
- int ret = 0;
- bool is_fd = knote_fops(kn)->f_isfd;
-
- if (is_fd)
- proc_fdlock(p);
- else
- knhash_lock(p);
+ struct kqworkq *kqwq = p->p_fd->fd_wqkqueue;
- if (knote_fdfind(kq, kev, is_fd, p) != NULL) {
- /* found an existing knote: we can't add this one */
- ret = ERESTART;
- goto out_locked;
+ if (__improbable(kevent_args_requesting_events(flags, nevents))) {
+ return EINVAL;
+ }
+ if (__improbable(kqwq == NULL)) {
+ kqwq = kqworkq_alloc(p, flags);
+ if (__improbable(kqwq == NULL)) {
+ return ENOMEM;
+ }
}
- /* knote was not found: add it now */
- if (!is_fd) {
- if (fdp->fd_knhashmask == 0) {
- u_long size = 0;
+ *kqp = &kqwq->kqwq_kqueue;
+ return 0;
+}
- list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
- &size);
- if (list == NULL) {
- ret = ENOMEM;
- goto out_locked;
- }
+#pragma mark kevent copyio
- fdp->fd_knhash = list;
- fdp->fd_knhashmask = size;
+/*!
+ * @function kevent_get_data_size
+ *
+ * @brief
+ * Copies in the extra data size from user-space.
+ */
+static int
+kevent_get_data_size(int flags, user_addr_t data_avail, user_addr_t data_out,
+ kevent_ctx_t kectx)
+{
+ if (!data_avail || !data_out) {
+ kectx->kec_data_size = 0;
+ kectx->kec_data_resid = 0;
+ } else if (flags & KEVENT_FLAG_PROC64) {
+ user64_size_t usize = 0;
+ int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
+ if (__improbable(error)) {
+ return error;
}
+ kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
+ } else {
+ user32_size_t usize = 0;
+ int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
+ if (__improbable(error)) {
+ return error;
+ }
+ kectx->kec_data_avail = data_avail;
+ kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
+ }
+ kectx->kec_data_out = data_out;
+ kectx->kec_data_avail = data_avail;
+ return 0;
+}
- list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
- SLIST_INSERT_HEAD(list, kn, kn_link);
- ret = 0;
- goto out_locked;
-
+/*!
+ * @function kevent_put_data_size
+ *
+ * @brief
+ * Copies out the residual data size to user-space if any has been used.
+ */
+static int
+kevent_put_data_size(unsigned int flags, kevent_ctx_t kectx)
+{
+ if (kectx->kec_data_resid == kectx->kec_data_size) {
+ return 0;
+ }
+ if (flags & KEVENT_FLAG_KERNEL) {
+ *(user_size_t *)(uintptr_t)kectx->kec_data_avail = kectx->kec_data_resid;
+ return 0;
+ }
+ if (flags & KEVENT_FLAG_PROC64) {
+ user64_size_t usize = (user64_size_t)kectx->kec_data_resid;
+ return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
} else {
- /* knote is fd based */
+ user32_size_t usize = (user32_size_t)kectx->kec_data_resid;
+ return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
+ }
+}
- if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
- u_int size = 0;
+/*!
+ * @function kevent_legacy_copyin
+ *
+ * @brief
+ * Handles the copyin of a kevent/kevent64 event.
+ */
+static int
+kevent_legacy_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp, unsigned int flags)
+{
+ int error;
- if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
- || kn->kn_id >= (uint64_t)maxfiles) {
- ret = EINVAL;
- goto out_locked;
- }
- /* have to grow the fd_knlist */
- size = fdp->fd_knlistsize;
- while (size <= kn->kn_id)
- size += KQEXTENT;
+ assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
- if (size >= (UINT_MAX/sizeof(struct klist *))) {
- ret = EINVAL;
- goto out_locked;
- }
+ if (flags & KEVENT_FLAG_LEGACY64) {
+ struct kevent64_s kev64;
- MALLOC(list, struct klist *,
- size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
- if (list == NULL) {
- ret = ENOMEM;
- goto out_locked;
- }
+ error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
+ if (__improbable(error)) {
+ return error;
+ }
+ *addrp += sizeof(kev64);
+ *kevp = (struct kevent_qos_s){
+ .ident = kev64.ident,
+ .filter = kev64.filter,
+ /* Make sure user doesn't pass in any system flags */
+ .flags = kev64.flags & ~EV_SYSFLAGS,
+ .udata = kev64.udata,
+ .fflags = kev64.fflags,
+ .data = kev64.data,
+ .ext[0] = kev64.ext[0],
+ .ext[1] = kev64.ext[1],
+ };
+ } else if (flags & KEVENT_FLAG_PROC64) {
+ struct user64_kevent kev64;
- bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
- fdp->fd_knlistsize * sizeof(struct klist *));
- bzero((caddr_t)list +
- fdp->fd_knlistsize * sizeof(struct klist *),
- (size - fdp->fd_knlistsize) * sizeof(struct klist *));
- FREE(fdp->fd_knlist, M_KQUEUE);
- fdp->fd_knlist = list;
- fdp->fd_knlistsize = size;
+ error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
+ if (__improbable(error)) {
+ return error;
+ }
+ *addrp += sizeof(kev64);
+ *kevp = (struct kevent_qos_s){
+ .ident = kev64.ident,
+ .filter = kev64.filter,
+ /* Make sure user doesn't pass in any system flags */
+ .flags = kev64.flags & ~EV_SYSFLAGS,
+ .udata = kev64.udata,
+ .fflags = kev64.fflags,
+ .data = kev64.data,
+ };
+ } else {
+ struct user32_kevent kev32;
+
+ error = copyin(*addrp, (caddr_t)&kev32, sizeof(kev32));
+ if (__improbable(error)) {
+ return error;
}
+ *addrp += sizeof(kev32);
+ *kevp = (struct kevent_qos_s){
+ .ident = (uintptr_t)kev32.ident,
+ .filter = kev32.filter,
+ /* Make sure user doesn't pass in any system flags */
+ .flags = kev32.flags & ~EV_SYSFLAGS,
+ .udata = CAST_USER_ADDR_T(kev32.udata),
+ .fflags = kev32.fflags,
+ .data = (intptr_t)kev32.data,
+ };
+ }
- list = &fdp->fd_knlist[kn->kn_id];
- SLIST_INSERT_HEAD(list, kn, kn_link);
- ret = 0;
- goto out_locked;
+ return 0;
+}
+/*!
+ * @function kevent_modern_copyin
+ *
+ * @brief
+ * Handles the copyin of a kevent_qos/kevent_id event.
+ */
+static int
+kevent_modern_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp)
+{
+ int error = copyin(*addrp, (caddr_t)kevp, sizeof(struct kevent_qos_s));
+ if (__probable(!error)) {
+ /* Make sure user doesn't pass in any system flags */
+ *addrp += sizeof(struct kevent_qos_s);
+ kevp->flags &= ~EV_SYSFLAGS;
}
+ return error;
+}
-out_locked:
- if (ret == 0 && knoteuse_needs_boost(kn, kev)) {
- set_thread_rwlock_boost();
- *knoteuse_flags = KNUSE_BOOST;
+/*!
+ * @function kevent_legacy_copyout
+ *
+ * @brief
+ * Handles the copyout of a kevent/kevent64 event.
+ */
+static int
+kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned int flags)
+{
+ int advance;
+ int error;
+
+ assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
+
+ /*
+ * fully initialize the differnt output event structure
+ * types from the internal kevent (and some universal
+ * defaults for fields not represented in the internal
+ * form).
+ *
+ * Note: these structures have no padding hence the C99
+ * initializers below do not leak kernel info.
+ */
+ if (flags & KEVENT_FLAG_LEGACY64) {
+ struct kevent64_s kev64 = {
+ .ident = kevp->ident,
+ .filter = kevp->filter,
+ .flags = kevp->flags,
+ .fflags = kevp->fflags,
+ .data = (int64_t)kevp->data,
+ .udata = kevp->udata,
+ .ext[0] = kevp->ext[0],
+ .ext[1] = kevp->ext[1],
+ };
+ advance = sizeof(struct kevent64_s);
+ error = copyout((caddr_t)&kev64, *addrp, advance);
+ } else if (flags & KEVENT_FLAG_PROC64) {
+ /*
+ * deal with the special case of a user-supplied
+ * value of (uintptr_t)-1.
+ */
+ uint64_t ident = (kevp->ident == (uintptr_t)-1) ?
+ (uint64_t)-1LL : (uint64_t)kevp->ident;
+ struct user64_kevent kev64 = {
+ .ident = ident,
+ .filter = kevp->filter,
+ .flags = kevp->flags,
+ .fflags = kevp->fflags,
+ .data = (int64_t) kevp->data,
+ .udata = kevp->udata,
+ };
+ advance = sizeof(kev64);
+ error = copyout((caddr_t)&kev64, *addrp, advance);
} else {
- *knoteuse_flags = KNUSE_NONE;
+ struct user32_kevent kev32 = {
+ .ident = (uint32_t)kevp->ident,
+ .filter = kevp->filter,
+ .flags = kevp->flags,
+ .fflags = kevp->fflags,
+ .data = (int32_t)kevp->data,
+ .udata = kevp->udata,
+ };
+ advance = sizeof(kev32);
+ error = copyout((caddr_t)&kev32, *addrp, advance);
}
- if (is_fd)
- proc_fdunlock(p);
- else
- knhash_unlock(p);
+ if (__probable(!error)) {
+ *addrp += advance;
+ }
+ return error;
+}
- return ret;
+/*!
+ * @function kevent_modern_copyout
+ *
+ * @brief
+ * Handles the copyout of a kevent_qos/kevent_id event.
+ */
+OS_ALWAYS_INLINE
+static inline int
+kevent_modern_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp)
+{
+ int error = copyout((caddr_t)kevp, *addrp, sizeof(struct kevent_qos_s));
+ if (__probable(!error)) {
+ *addrp += sizeof(struct kevent_qos_s);
+ }
+ return error;
}
-/*
- * kq_remove_knote - remove a knote from the fd table for process
- * and copy kn_status an kq_state while holding kqlock and
- * fd table locks.
+#pragma mark kevent core implementation
+
+/*!
+ * @function kevent_callback_inline
*
- * If the filter is file-based, remove based on fd index.
- * Otherwise remove from the hash based on the ident.
+ * @brief
+ * Callback for each individual event
*
- * fd_knhashlock and fdlock unheld on entry (and exit).
+ * @discussion
+ * This is meant to be inlined in kevent_modern_callback and
+ * kevent_legacy_callback.
*/
-static void
-kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
- kn_status_t *kn_status, uint16_t *kq_state)
+OS_ALWAYS_INLINE
+static inline int
+kevent_callback_inline(struct kevent_qos_s *kevp, kevent_ctx_t kectx, bool legacy)
{
- struct filedesc *fdp = p->p_fd;
- struct klist *list = NULL;
- bool is_fd;
-
- is_fd = knote_fops(kn)->f_isfd;
+ int error;
- if (is_fd)
- proc_fdlock(p);
- else
- knhash_lock(p);
+ assert(kectx->kec_process_noutputs < kectx->kec_process_nevents);
- if (is_fd) {
- assert ((u_int)fdp->fd_knlistsize > kn->kn_id);
- list = &fdp->fd_knlist[kn->kn_id];
+ /*
+ * Copy out the appropriate amount of event data for this user.
+ */
+ if (legacy) {
+ error = kevent_legacy_copyout(kevp, &kectx->kec_process_eventlist,
+ kectx->kec_process_flags);
} else {
- list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+ error = kevent_modern_copyout(kevp, &kectx->kec_process_eventlist);
}
- SLIST_REMOVE(list, kn, knote, kn_link);
- kqlock(kq);
- *kn_status = kn->kn_status;
- *kq_state = kq->kq_state;
- kqunlock(kq);
-
- if (is_fd)
- proc_fdunlock(p);
- else
- knhash_unlock(p);
+ /*
+ * If there isn't space for additional events, return
+ * a harmless error to stop the processing here
+ */
+ if (error == 0 && ++kectx->kec_process_noutputs == kectx->kec_process_nevents) {
+ error = EWOULDBLOCK;
+ }
+ return error;
}
-/*
- * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
- * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
+/*!
+ * @function kevent_modern_callback
*
- * fd_knhashlock or fdlock unheld on entry (and exit)
+ * @brief
+ * Callback for each individual modern event.
+ *
+ * @discussion
+ * This callback handles kevent_qos/kevent_id events.
*/
-
-static struct knote *
-kq_find_knote_and_kq_lock(struct kqueue *kq,
- struct kevent_internal_s *kev,
- bool is_fd,
- struct proc *p)
+static int
+kevent_modern_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
{
- struct knote * ret;
+ return kevent_callback_inline(kevp, kectx, /*legacy*/ false);
+}
- if (is_fd)
- proc_fdlock(p);
- else
- knhash_lock(p);
+/*!
+ * @function kevent_legacy_callback
+ *
+ * @brief
+ * Callback for each individual legacy event.
+ *
+ * @discussion
+ * This callback handles kevent/kevent64 events.
+ */
+static int
+kevent_legacy_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
+{
+ return kevent_callback_inline(kevp, kectx, /*legacy*/ true);
+}
- ret = knote_fdfind(kq, kev, is_fd, p);
+/*!
+ * @function kevent_cleanup
+ *
+ * @brief
+ * Handles the cleanup returning from a kevent call.
+ *
+ * @discussion
+ * kevent entry points will take a reference on workloops,
+ * and a usecount on the fileglob of kqfiles.
+ *
+ * This function undoes this on the exit paths of kevents.
+ *
+ * @returns
+ * The error to return to userspace.
+ */
+static int
+kevent_cleanup(kqueue_t kqu, int flags, int error, kevent_ctx_t kectx)
+{
+ // poll should not call any codepath leading to this
+ assert((flags & KEVENT_FLAG_POLL) == 0);
- if (ret) {
- kqlock(kq);
+ if (flags & KEVENT_FLAG_WORKLOOP) {
+ kqworkloop_release(kqu.kqwl);
+ } else if (flags & KEVENT_FLAG_WORKQ) {
+ /* nothing held */
+ } else {
+ fp_drop(kqu.kqf->kqf_p, kectx->kec_fd, kectx->kec_fp, 0);
}
- if (is_fd)
- proc_fdunlock(p);
- else
- knhash_unlock(p);
+ /* don't restart after signals... */
+ if (error == ERESTART) {
+ error = EINTR;
+ } else if (error == 0) {
+ /* don't abandon other output just because of residual copyout failures */
+ (void)kevent_put_data_size(flags, kectx);
+ }
- return ret;
+ if (flags & KEVENT_FLAG_PARKING) {
+ thread_t th = current_thread();
+ struct uthread *uth = get_bsdthread_info(th);
+ if (uth->uu_kqr_bound) {
+ thread_unfreeze_base_pri(th);
+ }
+ }
+ return error;
}
-/*
- * knote_drop - disconnect and drop the knote
+
+/*!
+ * @function kqueue_process
*
- * Called with the kqueue unlocked and holding a
- * "drop reference" on the knote in question.
- * This reference is most often aquired thru a call
- * to kqlock2knotedrop(). But it can also be acquired
- * through stealing a drop reference via a call to
- * knoteuse2knotedrop() or during the initial attach
- * of the knote.
+ * @brief
+ * Process the triggered events in a kqueue.
*
- * The knote may have already been detached from
- * (or not yet attached to) its source object.
+ * @discussion
+ * Walk the queued knotes and validate that they are really still triggered
+ * events by calling the filter routines (if necessary).
+ *
+ * For each event that is still considered triggered, invoke the callback
+ * routine provided.
+ *
+ * caller holds a reference on the kqueue.
+ * kqueue locked on entry and exit - but may be dropped
+ * kqueue list locked (held for duration of call)
+ *
+ * This is only called by kqueue_scan() so that the compiler can inline it.
+ *
+ * @returns
+ * - 0: no event was returned, no other error occured
+ * - EBADF: the kqueue is being destroyed (KQ_DRAIN is set)
+ * - EWOULDBLOCK: (not an error) events have been found and we should return
+ * - EFAULT: copyout failed
+ * - filter specific errors
*/
-static void
-knote_drop(struct knote *kn, __unused struct proc *ctxp)
+static int
+kqueue_process(kqueue_t kqu, int flags, kevent_ctx_t kectx,
+ kevent_callback_t callback)
{
- struct kqueue *kq = knote_get_kq(kn);
- struct proc *p = kq->kq_p;
- kn_status_t kn_status;
- uint16_t kq_state;
+ workq_threadreq_t kqr = current_uthread()->uu_kqr_bound;
+ struct knote *kn;
+ int error = 0, rc = 0;
+ struct kqtailq *base_queue, *queue;
+#if DEBUG || DEVELOPMENT
+ int retries = 64;
+#endif
+ uint16_t kq_type = (kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
- /* If we are attached, disconnect from the source first */
- if (kn->kn_status & KN_ATTACHED) {
- knote_fops(kn)->f_detach(kn);
+ if (kq_type & KQ_WORKQ) {
+ rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
+ } else if (kq_type & KQ_WORKLOOP) {
+ rc = kqworkloop_begin_processing(kqu.kqwl, flags);
+ } else {
+kqfile_retry:
+ rc = kqfile_begin_processing(kqu.kqf);
+ if (rc == EBADF) {
+ return EBADF;
+ }
}
- /* Remove the source from the appropriate hash */
- kq_remove_knote(kq, kn, p, &kn_status, &kq_state);
+ if (rc == -1) {
+ /* Nothing to process */
+ return 0;
+ }
/*
- * If a kqueue_dealloc is happening in parallel for the kq
- * pointed by the knote the kq could be aready deallocated
- * at this point.
- * Do not access the kq after the kq_remove_knote if it is
- * not a KQ_DYNAMIC.
+ * loop through the enqueued knotes associated with this request,
+ * processing each one. Each request may have several queues
+ * of knotes to process (depending on the type of kqueue) so we
+ * have to loop through all the queues as long as we have additional
+ * space.
*/
- /* determine if anyone needs to know about the drop */
- assert((kn_status & (KN_DROPPING | KN_SUPPRESSED | KN_QUEUED)) == KN_DROPPING);
+process_again:
+ if (kq_type & KQ_WORKQ) {
+ base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index];
+ } else if (kq_type & KQ_WORKLOOP) {
+ base_queue = &kqu.kqwl->kqwl_queue[0];
+ queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
+ } else {
+ base_queue = queue = &kqu.kqf->kqf_queue;
+ }
+
+ do {
+ while ((kn = TAILQ_FIRST(queue)) != NULL) {
+ error = knote_process(kn, kectx, callback);
+ if (error == EJUSTRETURN) {
+ error = 0;
+ } else if (__improbable(error)) {
+ /* error is EWOULDBLOCK when the out event array is full */
+ goto stop_processing;
+ }
+ }
+ } while (queue-- > base_queue);
+
+ if (kectx->kec_process_noutputs) {
+ /* callers will transform this into no error */
+ error = EWOULDBLOCK;
+ }
+stop_processing:
/*
- * If KN_USEWAIT is set, some other thread was trying to drop the kn.
- * Or it was in kqueue_dealloc, so the kqueue_dealloc did not happen
- * because that thread was waiting on this wake, or it was a drop happening
- * because of a kevent_register that takes a reference on the kq, and therefore
- * the kq cannot be deallocated in parallel.
+ * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
+ * we want to unbind the kqrequest from the thread.
+ *
+ * However, because the kq locks are dropped several times during process,
+ * new knotes may have fired again, in which case, we want to fail the end
+ * processing and process again, until it converges.
*
- * It is safe to access kq->kq_wqs if needswakeup is set.
+ * If we have an error or returned events, end processing never fails.
*/
- if (kn_status & KN_USEWAIT)
- waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
- CAST_EVENT64_T(&kn->kn_status),
- THREAD_RESTART,
- WAITQ_ALL_PRIORITIES);
+ if (error) {
+ flags &= ~KEVENT_FLAG_PARKING;
+ }
+ if (kq_type & KQ_WORKQ) {
+ rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
+ } else if (kq_type & KQ_WORKLOOP) {
+ rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
+ } else {
+ rc = kqfile_end_processing(kqu.kqf);
+ }
- if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0))
- fp_drop(p, kn->kn_id, kn->kn_fp, 0);
+ if (__probable(error)) {
+ return error;
+ }
- knote_free(kn);
+ if (__probable(rc >= 0)) {
+ assert(rc == 0 || rc == EBADF);
+ return rc;
+ }
- /*
- * release reference on dynamic kq (and free if last).
- * Will only be last if this is from fdfree, etc...
- * because otherwise processing thread has reference.
- */
- if (kq_state & KQ_DYNAMIC)
- kqueue_release_last(p, kq);
+#if DEBUG || DEVELOPMENT
+ if (retries-- == 0) {
+ panic("kevent: way too many knote_process retries, kq: %p (0x%04x)",
+ kqu.kq, kqu.kq->kq_state);
+ }
+#endif
+ if (kq_type & (KQ_WORKQ | KQ_WORKLOOP)) {
+ assert(flags & KEVENT_FLAG_PARKING);
+ goto process_again;
+ } else {
+ goto kqfile_retry;
+ }
}
-/* called with kqueue lock held */
+/*!
+ * @function kqueue_scan_continue
+ *
+ * @brief
+ * The continuation used by kqueue_scan for kevent entry points.
+ *
+ * @discussion
+ * Assumes we inherit a use/ref count on the kq or its fileglob.
+ *
+ * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
+ * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
+ */
+OS_NORETURN OS_NOINLINE
static void
-knote_activate(struct knote *kn)
+kqueue_scan_continue(void *data, wait_result_t wait_result)
{
- if (kn->kn_status & KN_ACTIVE)
- return;
+ uthread_t ut = current_uthread();
+ kevent_ctx_t kectx = &ut->uu_save.uus_kevent;
+ int error = 0, flags = kectx->kec_process_flags;
+ struct kqueue *kq = data;
- KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
- kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
- kn->kn_filtid);
+ /*
+ * only kevent variants call in here, so we know the callback is
+ * kevent_legacy_callback or kevent_modern_callback.
+ */
+ assert((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0);
- kn->kn_status |= KN_ACTIVE;
- if (knote_enqueue(kn))
- knote_wakeup(kn);
-}
+ switch (wait_result) {
+ case THREAD_AWAKENED:
+ if (__improbable(flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64))) {
+ error = kqueue_scan(kq, flags, kectx, kevent_legacy_callback);
+ } else {
+ error = kqueue_scan(kq, flags, kectx, kevent_modern_callback);
+ }
+ break;
+ case THREAD_TIMED_OUT:
+ error = 0;
+ break;
+ case THREAD_INTERRUPTED:
+ error = EINTR;
+ break;
+ case THREAD_RESTART:
+ error = EBADF;
+ break;
+ default:
+ panic("%s: - invalid wait_result (%d)", __func__, wait_result);
+ }
-/* called with kqueue lock held */
-static void
-knote_deactivate(struct knote *kn)
-{
- kn->kn_status &= ~KN_ACTIVE;
- if ((kn->kn_status & KN_STAYACTIVE) == 0)
- knote_dequeue(kn);
+
+ error = kevent_cleanup(kq, flags, error, kectx);
+ *(int32_t *)&ut->uu_rval = kectx->kec_process_noutputs;
+ unix_syscall_return(error);
}
-/* called with kqueue lock held */
-static void
-knote_enable(struct knote *kn)
+/*!
+ * @function kqueue_scan
+ *
+ * @brief
+ * Scan and wait for events in a kqueue (used by poll & kevent).
+ *
+ * @discussion
+ * Process the triggered events in a kqueue.
+ *
+ * If there are no events triggered arrange to wait for them:
+ * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
+ * - possibly until kectx->kec_deadline expires
+ *
+ * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
+ * are set, then it will wait in the kqueue_scan_continue continuation.
+ *
+ * poll() will block in place, and KEVENT_FLAG_KERNEL calls
+ * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
+ *
+ * @param kq
+ * The kqueue being scanned.
+ *
+ * @param flags
+ * The KEVENT_FLAG_* flags for this call.
+ *
+ * @param kectx
+ * The context used for this scan.
+ * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
+ *
+ * @param callback
+ * The callback to be called on events sucessfully processed.
+ * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
+ */
+int
+kqueue_scan(struct kqueue *kq, int flags, kevent_ctx_t kectx,
+ kevent_callback_t callback)
{
- if ((kn->kn_status & KN_DISABLED) == 0)
- return;
-
- kn->kn_status &= ~KN_DISABLED;
+ int error;
- if (kn->kn_status & KN_SUPPRESSED) {
- /* Clear the sync qos on the knote */
- knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE);
+ for (;;) {
+ kqlock(kq);
+ error = kqueue_process(kq, flags, kectx, callback);
/*
- * it is possible for userland to have knotes registered for a given
- * workloop `wl_orig` but really handled on another workloop `wl_new`.
- *
- * In that case, rearming will happen from the servicer thread of
- * `wl_new` which if `wl_orig` is no longer being serviced, would cause
- * this knote to stay suppressed forever if we only relied on
- * kqworkloop_acknowledge_events to be called by `wl_orig`.
- *
- * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
- * unsuppress because that would mess with the processing phase of
- * `wl_orig`, however it also means kqworkloop_acknowledge_events()
- * will be called.
+ * If we got an error, events returned (EWOULDBLOCK)
+ * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
+ * just return.
*/
- struct kqueue *kq = knote_get_kq(kn);
- if ((kq->kq_state & KQ_PROCESSING) == 0) {
- knote_unsuppress(kn);
+ if (__probable(error || (flags & KEVENT_FLAG_IMMEDIATE))) {
+ kqunlock(kq);
+ return error == EWOULDBLOCK ? 0 : error;
+ }
+
+ waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
+ KQ_EVENT, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL,
+ kectx->kec_deadline, TIMEOUT_NO_LEEWAY);
+ kq->kq_state |= KQ_SLEEP;
+
+ kqunlock(kq);
+
+ if (__probable((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0)) {
+ thread_block_parameter(kqueue_scan_continue, kq);
+ __builtin_unreachable();
+ }
+
+ wait_result_t wr = thread_block(THREAD_CONTINUE_NULL);
+ switch (wr) {
+ case THREAD_AWAKENED:
+ break;
+ case THREAD_TIMED_OUT:
+ return 0;
+ case THREAD_INTERRUPTED:
+ return EINTR;
+ case THREAD_RESTART:
+ return EBADF;
+ default:
+ panic("%s: - bad wait_result (%d)", __func__, wr);
}
- } else if (knote_enqueue(kn)) {
- knote_wakeup(kn);
}
}
-/* called with kqueue lock held */
-static void
-knote_disable(struct knote *kn)
+/*!
+ * @function kevent_internal
+ *
+ * @brief
+ * Common kevent code.
+ *
+ * @discussion
+ * Needs to be inlined to specialize for legacy or modern and
+ * eliminate dead code.
+ *
+ * This is the core logic of kevent entry points, that will:
+ * - register kevents
+ * - optionally scan the kqueue for events
+ *
+ * The caller is giving kevent_internal a reference on the kqueue
+ * or its fileproc that needs to be cleaned up by kevent_cleanup().
+ */
+OS_ALWAYS_INLINE
+static inline int
+kevent_internal(kqueue_t kqu,
+ user_addr_t changelist, int nchanges,
+ user_addr_t ueventlist, int nevents,
+ int flags, kevent_ctx_t kectx, int32_t *retval,
+ bool legacy)
{
- if (kn->kn_status & KN_DISABLED)
- return;
+ int error = 0, noutputs = 0, register_rc;
- kn->kn_status |= KN_DISABLED;
- knote_dequeue(kn);
-}
+ /* only bound threads can receive events on workloops */
+ if (!legacy && (flags & KEVENT_FLAG_WORKLOOP)) {
+#if CONFIG_WORKLOOP_DEBUG
+ UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
+ .uu_kqid = kqu.kqwl->kqwl_dynamicid,
+ .uu_kq = error ? NULL : kqu.kq,
+ .uu_error = error,
+ .uu_nchanges = nchanges,
+ .uu_nevents = nevents,
+ .uu_flags = flags,
+ });
+#endif // CONFIG_WORKLOOP_DEBUG
-/* called with kqueue lock held */
-static void
-knote_suppress(struct knote *kn)
-{
- struct kqtailq *suppressq;
- struct kqueue *kq = knote_get_kq(kn);
+ if (flags & KEVENT_FLAG_KERNEL) {
+ /* see kevent_workq_internal */
+ error = copyout(&kqu.kqwl->kqwl_dynamicid,
+ ueventlist - sizeof(kqueue_id_t), sizeof(kqueue_id_t));
+ kectx->kec_data_resid -= sizeof(kqueue_id_t);
+ if (__improbable(error)) {
+ goto out;
+ }
+ }
- kqlock_held(kq);
+ if (kevent_args_requesting_events(flags, nevents)) {
+ /*
+ * Disable the R2K notification while doing a register, if the
+ * caller wants events too, we don't want the AST to be set if we
+ * will process these events soon.
+ */
+ kqlock(kqu);
+ kqu.kq->kq_state &= ~KQ_R2K_ARMED;
+ kqunlock(kqu);
+ flags |= KEVENT_FLAG_NEEDS_END_PROCESSING;
+ }
+ }
- if (kn->kn_status & KN_SUPPRESSED)
- return;
+ /* register all the change requests the user provided... */
+ while (nchanges > 0 && error == 0) {
+ struct kevent_qos_s kev;
+ struct knote *kn = NULL;
- knote_dequeue(kn);
- kn->kn_status |= KN_SUPPRESSED;
- suppressq = kqueue_get_suppressed_queue(kq, knote_get_qos_index(kn));
- TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
+ if (legacy) {
+ error = kevent_legacy_copyin(&changelist, &kev, flags);
+ } else {
+ error = kevent_modern_copyin(&changelist, &kev);
+ }
+ if (error) {
+ break;
+ }
+
+ register_rc = kevent_register(kqu.kq, &kev, &kn);
+ if (__improbable(!legacy && (register_rc & FILTER_REGISTER_WAIT))) {
+ thread_t thread = current_thread();
+
+ kqlock_held(kqu);
+
+ if (act_clear_astkevent(thread, AST_KEVENT_REDRIVE_THREADREQ)) {
+ workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
+ }
+
+ // f_post_register_wait is meant to call a continuation and not to
+ // return, which is why we don't support FILTER_REGISTER_WAIT if
+ // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
+ // waits isn't the last.
+ //
+ // It is implementable, but not used by any userspace code at the
+ // moment, so for now return ENOTSUP if someone tries to do it.
+ if (nchanges == 1 && noutputs < nevents &&
+ (flags & KEVENT_FLAG_KERNEL) == 0 &&
+ (flags & KEVENT_FLAG_PARKING) == 0 &&
+ (flags & KEVENT_FLAG_ERROR_EVENTS) &&
+ (flags & KEVENT_FLAG_WORKLOOP)) {
+ uthread_t ut = get_bsdthread_info(thread);
+
+ /*
+ * store the continuation/completion data in the uthread
+ *
+ * Note: the kectx aliases with this,
+ * and is destroyed in the process.
+ */
+ ut->uu_save.uus_kevent_register = (struct _kevent_register){
+ .kev = kev,
+ .kqwl = kqu.kqwl,
+ .eventout = noutputs,
+ .ueventlist = ueventlist,
+ };
+ knote_fops(kn)->f_post_register_wait(ut, kn,
+ &ut->uu_save.uus_kevent_register);
+ __builtin_unreachable();
+ }
+ kqunlock(kqu);
+
+ kev.flags |= EV_ERROR;
+ kev.data = ENOTSUP;
+ } else {
+ assert((register_rc & FILTER_REGISTER_WAIT) == 0);
+ }
+
+ // keep in sync with kevent_register_wait_return()
+ if (noutputs < nevents && (kev.flags & (EV_ERROR | EV_RECEIPT))) {
+ if ((kev.flags & EV_ERROR) == 0) {
+ kev.flags |= EV_ERROR;
+ kev.data = 0;
+ }
+ if (legacy) {
+ error = kevent_legacy_copyout(&kev, &ueventlist, flags);
+ } else {
+ error = kevent_modern_copyout(&kev, &ueventlist);
+ }
+ if (error == 0) {
+ noutputs++;
+ }
+ } else if (kev.flags & EV_ERROR) {
+ error = kev.data;
+ }
+ nchanges--;
+ }
+
+ if ((flags & KEVENT_FLAG_ERROR_EVENTS) == 0 &&
+ nevents > 0 && noutputs == 0 && error == 0) {
+ kectx->kec_process_flags = flags;
+ kectx->kec_process_nevents = nevents;
+ kectx->kec_process_noutputs = 0;
+ kectx->kec_process_eventlist = ueventlist;
+
+ if (legacy) {
+ error = kqueue_scan(kqu.kq, flags, kectx, kevent_legacy_callback);
+ } else {
+ error = kqueue_scan(kqu.kq, flags, kectx, kevent_modern_callback);
+ }
- if ((kq->kq_state & KQ_WORKLOOP) &&
- knote_get_qos_override_index(kn) == THREAD_QOS_USER_INTERACTIVE &&
- kn->kn_qos_override_is_sync) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- /* update the sync qos override counter for suppressed knotes */
- kqworkloop_update_override(kqwl, knote_get_qos_index(kn),
- knote_get_qos_override_index(kn),
- (KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI));
+ noutputs = kectx->kec_process_noutputs;
+ } else if (!legacy && (flags & KEVENT_FLAG_NEEDS_END_PROCESSING)) {
+ /*
+ * If we didn't through kqworkloop_end_processing(),
+ * we need to do it here.
+ *
+ * kqueue_scan will call kqworkloop_end_processing(),
+ * so we only need to do it if we didn't scan.
+ */
+ kqlock(kqu);
+ kqworkloop_end_processing(kqu.kqwl, 0, 0);
+ kqunlock(kqu);
}
-}
-
-/* called with kqueue lock held */
-static void
-knote_unsuppress(struct knote *kn)
-{
- struct kqtailq *suppressq;
- struct kqueue *kq = knote_get_kq(kn);
- kqlock_held(kq);
+ *retval = noutputs;
+out:
+ return kevent_cleanup(kqu.kq, flags, error, kectx);
+}
- if ((kn->kn_status & KN_SUPPRESSED) == 0)
- return;
+#pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
- /* Clear the sync qos on the knote */
- knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE);
+/*!
+ * @function kevent_modern_internal
+ *
+ * @brief
+ * The backend of the kevent_id and kevent_workq_internal entry points.
+ *
+ * @discussion
+ * Needs to be inline due to the number of arguments.
+ */
+OS_NOINLINE
+static int
+kevent_modern_internal(kqueue_t kqu,
+ user_addr_t changelist, int nchanges,
+ user_addr_t ueventlist, int nevents,
+ int flags, kevent_ctx_t kectx, int32_t *retval)
+{
+ return kevent_internal(kqu.kq, changelist, nchanges,
+ ueventlist, nevents, flags, kectx, retval, /*legacy*/ false);
+}
- kn->kn_status &= ~KN_SUPPRESSED;
- suppressq = kqueue_get_suppressed_queue(kq, knote_get_qos_index(kn));
- TAILQ_REMOVE(suppressq, kn, kn_tqe);
+/*!
+ * @function kevent_id
+ *
+ * @brief
+ * The kevent_id() syscall.
+ */
+int
+kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
+{
+ int error, flags = uap->flags & KEVENT_FLAG_USER;
+ uthread_t uth = current_uthread();
+ workq_threadreq_t kqr = uth->uu_kqr_bound;
+ kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
+ kqueue_t kqu;
- /* udate in-use qos to equal requested qos */
- kn->kn_qos_index = kn->kn_req_index;
+ flags = kevent_adjust_flags_for_proc(p, flags);
+ flags |= KEVENT_FLAG_DYNAMIC_KQUEUE;
- /* don't wakeup if unsuppressing just a stay-active knote */
- if (knote_enqueue(kn) && (kn->kn_status & KN_ACTIVE)) {
- knote_wakeup(kn);
+ if (__improbable((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)) !=
+ KEVENT_FLAG_WORKLOOP)) {
+ return EINVAL;
}
- if ((kq->kq_state & KQ_WORKLOOP) && !(kq->kq_state & KQ_NO_WQ_THREAD) &&
- knote_get_qos_override_index(kn) == THREAD_QOS_USER_INTERACTIVE &&
- kn->kn_qos_override_is_sync) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
-
- /* update the sync qos override counter for suppressed knotes */
- kqworkloop_update_override(kqwl, knote_get_qos_index(kn),
- knote_get_qos_override_index(kn),
- (KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI));
+ error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
+ if (__improbable(error)) {
+ return error;
}
- if (TAILQ_EMPTY(suppressq) && (kq->kq_state & KQ_WORKLOOP) &&
- !(kq->kq_state & KQ_NO_WQ_THREAD)) {
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- if (kqworkloop_is_processing_on_current_thread(kqwl)) {
- /*
- * kqworkloop_end_processing() will perform the required QoS
- * computations when it unsets the processing mode.
- */
- } else {
- kqwl_req_lock(kqwl);
- kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RESET_WAKEUP_OVERRIDE, 0);
- kqwl_req_unlock(kqwl);
+ kectx->kec_deadline = 0;
+ kectx->kec_fp = NULL;
+ kectx->kec_fd = -1;
+ /* the kec_process_* fields are filled if kqueue_scann is called only */
+
+ /*
+ * Get the kq we are going to be working on
+ * As a fastpath, look at the currently bound workloop.
+ */
+ kqu.kqwl = kqr ? kqr_kqworkloop(kqr) : NULL;
+ if (kqu.kqwl && kqu.kqwl->kqwl_dynamicid == uap->id) {
+ if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
+ return EEXIST;
+ }
+ kqworkloop_retain(kqu.kqwl);
+ } else if (__improbable(kevent_args_requesting_events(flags, uap->nevents))) {
+ return EXDEV;
+ } else {
+ error = kqworkloop_get_or_create(p, uap->id, NULL, flags, &kqu.kqwl);
+ if (__improbable(error)) {
+ return error;
}
}
-}
-/* called with kqueue lock held */
-static void
-knote_update_sync_override_state(struct knote *kn)
-{
- struct kqtailq *queue = knote_get_queue(kn);
- struct kqueue *kq = knote_get_kq(kn);
+ return kevent_modern_internal(kqu, uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents, flags, kectx, retval);
+}
- if (!(kq->kq_state & KQ_WORKLOOP) ||
- knote_get_queue_index(kn) != THREAD_QOS_USER_INTERACTIVE)
- return;
+/**!
+ * @function kevent_workq_internal
+ *
+ * @discussion
+ * This function is exported for the sake of the workqueue subsystem.
+ *
+ * It is called in two ways:
+ * - when a thread is about to go to userspace to ask for pending event
+ * - when a thread is returning from userspace with events back
+ *
+ * the workqueue subsystem will only use the following flags:
+ * - KEVENT_FLAG_STACK_DATA (always)
+ * - KEVENT_FLAG_IMMEDIATE (always)
+ * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
+ * userspace).
+ *
+ * It implicitly acts on the bound kqueue, and for the case of workloops
+ * will copyout the kqueue ID before anything else.
+ *
+ *
+ * Pthread will have setup the various arguments to fit this stack layout:
+ *
+ * +-------....----+--------------+-----------+--------------------+
+ * | user stack | data avail | nevents | pthread_self() |
+ * +-------....----+--------------+-----------+--------------------+
+ * ^ ^
+ * data_out eventlist
+ *
+ * When a workloop is used, the workloop ID is copied out right before
+ * the eventlist and is taken from the data buffer.
+ *
+ * @warning
+ * This function is carefuly tailored to not make any call except the final tail
+ * call into kevent_modern_internal. (LTO inlines current_uthread()).
+ *
+ * This function is performance sensitive due to the workq subsystem.
+ */
+int
+kevent_workq_internal(struct proc *p,
+ user_addr_t changelist, int nchanges,
+ user_addr_t eventlist, int nevents,
+ user_addr_t data_out, user_size_t *data_available,
+ unsigned int flags, int32_t *retval)
+{
+ uthread_t uth = current_uthread();
+ workq_threadreq_t kqr = uth->uu_kqr_bound;
+ kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
+ kqueue_t kqu;
+
+ assert(flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE) ||
+ flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_PARKING));
+
+ kectx->kec_data_out = data_out;
+ kectx->kec_data_avail = (uint64_t)data_available;
+ kectx->kec_data_size = *data_available;
+ kectx->kec_data_resid = *data_available;
+ kectx->kec_deadline = 0;
+ kectx->kec_fp = NULL;
+ kectx->kec_fd = -1;
+ /* the kec_process_* fields are filled if kqueue_scann is called only */
+
+ flags = kevent_adjust_flags_for_proc(p, flags);
+
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ kqu.kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
+ kqworkloop_retain(kqu.kqwl);
+
+ flags |= KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_DYNAMIC_KQUEUE |
+ KEVENT_FLAG_KERNEL;
+ } else {
+ kqu.kqwq = p->p_fd->fd_wqkqueue;
- /* Update the sync ipc state on workloop */
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- boolean_t sync_ipc_override = FALSE;
- if (!TAILQ_EMPTY(queue)) {
- struct knote *kn_head = TAILQ_FIRST(queue);
- if (kn_head->kn_qos_override_is_sync)
- sync_ipc_override = TRUE;
+ flags |= KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL;
}
- kqworkloop_update_sync_override_state(kqwl, sync_ipc_override);
+
+ return kevent_modern_internal(kqu, changelist, nchanges,
+ eventlist, nevents, flags, kectx, retval);
}
-/* called with kqueue lock held */
-static int
-knote_enqueue(struct knote *kn)
+/*!
+ * @function kevent_qos
+ *
+ * @brief
+ * The kevent_qos() syscall.
+ */
+int
+kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
{
- if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0 ||
- (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)))
- return 0;
-
- if ((kn->kn_status & KN_QUEUED) == 0) {
- struct kqtailq *queue = knote_get_queue(kn);
- struct kqueue *kq = knote_get_kq(kn);
+ uthread_t uth = current_uthread();
+ kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
+ int error, flags = uap->flags & KEVENT_FLAG_USER;
+ struct kqueue *kq;
- kqlock_held(kq);
- /* insert at head for sync ipc waiters */
- if (kn->kn_qos_override_is_sync) {
- TAILQ_INSERT_HEAD(queue, kn, kn_tqe);
- } else {
- TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
- }
- kn->kn_status |= KN_QUEUED;
- kq->kq_count++;
- knote_update_sync_override_state(kn);
- return 1;
+ if (__improbable(flags & KEVENT_ID_FLAG_USER)) {
+ return EINVAL;
}
- return ((kn->kn_status & KN_STAYACTIVE) != 0);
-}
+ flags = kevent_adjust_flags_for_proc(p, flags);
-/* called with kqueue lock held */
-static void
-knote_dequeue(struct knote *kn)
-{
- struct kqueue *kq = knote_get_kq(kn);
- struct kqtailq *queue;
+ error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
+ if (__improbable(error)) {
+ return error;
+ }
- kqlock_held(kq);
+ kectx->kec_deadline = 0;
+ kectx->kec_fp = NULL;
+ kectx->kec_fd = uap->fd;
+ /* the kec_process_* fields are filled if kqueue_scann is called only */
- if ((kn->kn_status & KN_QUEUED) == 0)
- return;
+ /* get the kq we are going to be working on */
+ if (__probable(flags & KEVENT_FLAG_WORKQ)) {
+ error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
+ } else {
+ error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
+ }
+ if (__improbable(error)) {
+ return error;
+ }
- queue = knote_get_queue(kn);
- TAILQ_REMOVE(queue, kn, kn_tqe);
- kn->kn_status &= ~KN_QUEUED;
- kq->kq_count--;
- knote_update_sync_override_state(kn);
+ return kevent_modern_internal(kq, uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents, flags, kectx, retval);
}
-void
-knote_init(void)
+#pragma mark legacy syscalls: kevent, kevent64
+
+/*!
+ * @function kevent_legacy_get_deadline
+ *
+ * @brief
+ * Compute the deadline for the legacy kevent syscalls.
+ *
+ * @discussion
+ * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
+ * as this takes precedence over the deadline.
+ *
+ * This function will fail if utimeout is USER_ADDR_NULL
+ * (the caller should check).
+ */
+static int
+kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline)
{
- knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
- 8192, "knote zone");
+ struct timespec ts;
- kqfile_zone = zinit(sizeof(struct kqfile), 8192*sizeof(struct kqfile),
- 8192, "kqueue file zone");
+ if (flags & KEVENT_FLAG_PROC64) {
+ struct user64_timespec ts64;
+ int error = copyin(utimeout, &ts64, sizeof(ts64));
+ if (__improbable(error)) {
+ return error;
+ }
+ ts.tv_sec = ts64.tv_sec;
+ ts.tv_nsec = ts64.tv_nsec;
+ } else {
+ struct user32_timespec ts32;
+ int error = copyin(utimeout, &ts32, sizeof(ts32));
+ if (__improbable(error)) {
+ return error;
+ }
+ ts.tv_sec = ts32.tv_sec;
+ ts.tv_nsec = ts32.tv_nsec;
+ }
+ if (!timespec_is_valid(&ts)) {
+ return EINVAL;
+ }
- kqworkq_zone = zinit(sizeof(struct kqworkq), 8192*sizeof(struct kqworkq),
- 8192, "kqueue workq zone");
+ clock_absolutetime_interval_to_deadline(tstoabstime(&ts), deadline);
+ return 0;
+}
- kqworkloop_zone = zinit(sizeof(struct kqworkloop), 8192*sizeof(struct kqworkloop),
- 8192, "kqueue workloop zone");
+/*!
+ * @function kevent_legacy_internal
+ *
+ * @brief
+ * The core implementation for kevent and kevent64
+ */
+OS_NOINLINE
+static int
+kevent_legacy_internal(struct proc *p, struct kevent64_args *uap,
+ int32_t *retval, int flags)
+{
+ uthread_t uth = current_uthread();
+ kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
+ struct kqueue *kq;
+ int error;
- /* allocate kq lock group attribute and group */
- kq_lck_grp_attr = lck_grp_attr_alloc_init();
+ if (__improbable(uap->flags & KEVENT_ID_FLAG_USER)) {
+ return EINVAL;
+ }
- kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
+ flags = kevent_adjust_flags_for_proc(p, flags);
- /* Allocate kq lock attribute */
- kq_lck_attr = lck_attr_alloc_init();
+ kectx->kec_data_out = 0;
+ kectx->kec_data_avail = 0;
+ kectx->kec_data_size = 0;
+ kectx->kec_data_resid = 0;
+ kectx->kec_deadline = 0;
+ kectx->kec_fp = NULL;
+ kectx->kec_fd = uap->fd;
+ /* the kec_process_* fields are filled if kqueue_scann is called only */
- /* Initialize the timer filter lock */
- lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr);
+ /* convert timeout to absolute - if we have one (and not immediate) */
+ if (__improbable(uap->timeout && !(flags & KEVENT_FLAG_IMMEDIATE))) {
+ error = kevent_legacy_get_deadline(flags, uap->timeout,
+ &kectx->kec_deadline);
+ if (__improbable(error)) {
+ return error;
+ }
+ }
- /* Initialize the user filter lock */
- lck_spin_init(&_filt_userlock, kq_lck_grp, kq_lck_attr);
+ /* get the kq we are going to be working on */
+ if (flags & KEVENT_FLAG_WORKQ) {
+ error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
+ } else {
+ error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
+ }
+ if (__improbable(error)) {
+ return error;
+ }
-#if CONFIG_MEMORYSTATUS
- /* Initialize the memorystatus list lock */
- memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
-#endif
+ return kevent_internal(kq, uap->changelist, uap->nchanges,
+ uap->eventlist, uap->nevents, flags, kectx, retval,
+ /*legacy*/ true);
}
-SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
-const struct filterops *
-knote_fops(struct knote *kn)
+/*!
+ * @function kevent
+ *
+ * @brief
+ * The legacy kevent() syscall.
+ */
+int
+kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
{
- return sysfilt_ops[kn->kn_filtid];
-}
+ struct kevent64_args args = {
+ .fd = uap->fd,
+ .changelist = uap->changelist,
+ .nchanges = uap->nchanges,
+ .eventlist = uap->eventlist,
+ .nevents = uap->nevents,
+ .timeout = uap->timeout,
+ };
-static struct knote *
-knote_alloc(void)
-{
- struct knote *kn;
- kn = ((struct knote *)zalloc(knote_zone));
- *kn = (struct knote) { .kn_qos_override = 0, .kn_qos_sync_override = 0, .kn_qos_override_is_sync = 0 };
- return kn;
+ return kevent_legacy_internal(p, &args, retval, KEVENT_FLAG_LEGACY32);
}
-static void
-knote_free(struct knote *kn)
+/*!
+ * @function kevent64
+ *
+ * @brief
+ * The legacy kevent64() syscall.
+ */
+int
+kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
{
- zfree(knote_zone, kn);
+ int flags = (uap->flags & KEVENT_FLAG_USER) | KEVENT_FLAG_LEGACY64;
+ return kevent_legacy_internal(p, uap, retval, flags);
}
+#pragma mark - socket interface
+
#if SOCKETS
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/syslog.h>
#ifndef ROUNDUP64
-#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
+#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
#endif
#ifndef ADVANCE64
-#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
+#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
#endif
static lck_grp_attr_t *kev_lck_grp_attr;
static lck_attr_t *kev_lck_attr;
static lck_grp_t *kev_lck_grp;
-static decl_lck_rw_data(,kev_lck_data);
+static decl_lck_rw_data(, kev_lck_data);
static lck_rw_t *kev_rwlock = &kev_lck_data;
static int kev_attach(struct socket *so, int proto, struct proc *p);
static void kev_delete(struct kern_event_pcb *);
static struct pr_usrreqs event_usrreqs = {
- .pru_attach = kev_attach,
- .pru_control = kev_control,
- .pru_detach = kev_detach,
- .pru_soreceive = soreceive,
+ .pru_attach = kev_attach,
+ .pru_control = kev_control,
+ .pru_detach = kev_detach,
+ .pru_soreceive = soreceive,
};
static struct protosw eventsw[] = {
-{
- .pr_type = SOCK_RAW,
- .pr_protocol = SYSPROTO_EVENT,
- .pr_flags = PR_ATOMIC,
- .pr_usrreqs = &event_usrreqs,
- .pr_lock = event_lock,
- .pr_unlock = event_unlock,
- .pr_getlock = event_getlock,
-}
+ {
+ .pr_type = SOCK_RAW,
+ .pr_protocol = SYSPROTO_EVENT,
+ .pr_flags = PR_ATOMIC,
+ .pr_usrreqs = &event_usrreqs,
+ .pr_lock = event_lock,
+ .pr_unlock = event_unlock,
+ .pr_getlock = event_getlock,
+ }
};
__private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
__private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
- CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family");
+ CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family");
struct kevtstat kevtstat;
SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
kevt_getstat, "S,kevtstat", "");
SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
- CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
- kevt_pcblist, "S,xkevtpcb", "");
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ kevt_pcblist, "S,xkevtpcb", "");
static lck_mtx_t *
event_getlock(struct socket *so, int flags)
#pragma unused(flags)
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
- if (so->so_pcb != NULL) {
- if (so->so_usecount < 0)
+ if (so->so_pcb != NULL) {
+ if (so->so_usecount < 0) {
panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
so, so->so_usecount, solockhistory_nr(so));
- /* NOTREACHED */
+ }
+ /* NOTREACHED */
} else {
panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
so, solockhistory_nr(so));
/* NOTREACHED */
}
- return (&ev_pcb->evp_mtx);
+ return &ev_pcb->evp_mtx;
}
static int
{
void *lr_saved;
- if (lr == NULL)
+ if (lr == NULL) {
lr_saved = __builtin_return_address(0);
- else
+ } else {
lr_saved = lr;
+ }
if (so->so_pcb != NULL) {
lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
- } else {
+ } else {
panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
so, lr_saved, solockhistory_nr(so));
/* NOTREACHED */
/* NOTREACHED */
}
- if (refcount)
+ if (refcount) {
so->so_usecount++;
+ }
so->lock_lr[so->next_lock_lr] = lr_saved;
- so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
- return (0);
+ so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
+ return 0;
}
static int
void *lr_saved;
lck_mtx_t *mutex_held;
- if (lr == NULL)
+ if (lr == NULL) {
lr_saved = __builtin_return_address(0);
- else
+ } else {
lr_saved = lr;
+ }
if (refcount) {
so->so_usecount--;
LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
so->unlock_lr[so->next_unlock_lr] = lr_saved;
- so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
if (so->so_usecount == 0) {
VERIFY(so->so_flags & SOF_PCBCLEARING);
lck_mtx_unlock(mutex_held);
}
- return (0);
+ return 0;
}
static int
kev_delete(ev_pcb);
sofreelastref(so, 1);
- return (0);
+ return 0;
}
-static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
+static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw));
static
struct kern_event_head kern_event_head;
static u_int32_t static_event_id = 0;
-#define EVPCB_ZONE_MAX 65536
-#define EVPCB_ZONE_NAME "kerneventpcb"
+#define EVPCB_ZONE_MAX 65536
+#define EVPCB_ZONE_NAME "kerneventpcb"
static struct zone *ev_pcb_zone;
/*
/* NOTREACHED */
}
- for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++)
+ for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) {
net_add_proto(pr, dp, 1);
+ }
ev_pcb_zone = zinit(sizeof(struct kern_event_pcb),
EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME);
struct kern_event_pcb *ev_pcb;
error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
- if (error != 0)
- return (error);
+ if (error != 0) {
+ return error;
+ }
if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) {
- return (ENOBUFS);
+ return ENOBUFS;
}
bzero(ev_pcb, sizeof(struct kern_event_pcb));
lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr);
kevtstat.kes_gencnt++;
lck_rw_done(kev_rwlock);
- return (error);
+ return error;
}
static void
so->so_flags |= SOF_PCBCLEARING;
}
- return (0);
+ return 0;
}
/*
* For now, kev_vendor_code and mbuf_tags use the same
* mechanism.
*/
-errno_t kev_vendor_code_find(
- const char *string,
- u_int32_t *out_vendor_code)
+errno_t
+kev_vendor_code_find(
+ const char *string,
+ u_int32_t *out_vendor_code)
{
if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
- return (EINVAL);
+ return EINVAL;
}
- return (net_str_id_find_internal(string, out_vendor_code,
- NSI_VENDOR_CODE, 1));
+ return net_str_id_find_internal(string, out_vendor_code,
+ NSI_VENDOR_CODE, 1);
}
errno_t
net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
- if (event_msg == NULL)
- return (EINVAL);
+ if (event_msg == NULL) {
+ return EINVAL;
+ }
- /*
+ /*
* Limit third parties to posting events for registered vendor codes
* only
*/
if (event_msg->vendor_code < min_vendor ||
event_msg->vendor_code > max_vendor) {
- OSIncrementAtomic64((SInt64 *)&kevtstat.kes_badvendor);
- return (EINVAL);
+ os_atomic_inc(&kevtstat.kes_badvendor, relaxed);
+ return EINVAL;
}
- return (kev_post_msg(event_msg));
+ return kev_post_msg(event_msg);
}
int
total_size = KEV_MSG_HEADER_SIZE;
for (i = 0; i < 5; i++) {
- if (event_msg->dv[i].data_length == 0)
+ if (event_msg->dv[i].data_length == 0) {
break;
+ }
total_size += event_msg->dv[i].data_length;
}
if (total_size > MLEN) {
- OSIncrementAtomic64((SInt64 *)&kevtstat.kes_toobig);
- return (EMSGSIZE);
+ os_atomic_inc(&kevtstat.kes_toobig, relaxed);
+ return EMSGSIZE;
}
m = m_get(M_WAIT, MT_DATA);
if (m == 0) {
- OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
- return (ENOMEM);
+ os_atomic_inc(&kevtstat.kes_nomem, relaxed);
+ return ENOMEM;
}
ev = mtod(m, struct kern_event_msg *);
total_size = KEV_MSG_HEADER_SIZE;
tmp = (char *) &ev->event_data[0];
for (i = 0; i < 5; i++) {
- if (event_msg->dv[i].data_length == 0)
+ if (event_msg->dv[i].data_length == 0) {
break;
+ }
total_size += event_msg->dv[i].data_length;
bcopy(event_msg->dv[i].data_ptr, tmp,
m2 = m_copym(m, 0, m->m_len, M_WAIT);
if (m2 == 0) {
- OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
+ os_atomic_inc(&kevtstat.kes_nomem, relaxed);
m_free(m);
lck_mtx_unlock(&ev_pcb->evp_mtx);
lck_rw_done(kev_rwlock);
- return (ENOMEM);
+ return ENOMEM;
}
if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
/*
1, m->m_len, MBUF_TC_BE);
sorwakeup(ev_pcb->evp_socket);
- OSIncrementAtomic64((SInt64 *)&kevtstat.kes_posted);
+ os_atomic_inc(&kevtstat.kes_posted, relaxed);
} else {
- OSIncrementAtomic64((SInt64 *)&kevtstat.kes_fullsock);
+ os_atomic_inc(&kevtstat.kes_fullsock, relaxed);
}
lck_mtx_unlock(&ev_pcb->evp_mtx);
}
m_free(m);
lck_rw_done(kev_rwlock);
- return (0);
+ return 0;
}
static int
u_int32_t *id_value = (u_int32_t *) data;
switch (cmd) {
- case SIOCGKEVID:
- *id_value = static_event_id;
- break;
- case SIOCSKEVFILT:
- ev_pcb = (struct kern_event_pcb *) so->so_pcb;
- ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
- ev_pcb->evp_class_filter = kev_req->kev_class;
- ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
- break;
- case SIOCGKEVFILT:
- ev_pcb = (struct kern_event_pcb *) so->so_pcb;
- kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
- kev_req->kev_class = ev_pcb->evp_class_filter;
- kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
- break;
- case SIOCGKEVVENDOR:
- kev_vendor = (struct kev_vendor_code *)data;
- /* Make sure string is NULL terminated */
- kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0;
- return (net_str_id_find_internal(kev_vendor->vendor_string,
- &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0));
- default:
- return (ENOTSUP);
+ case SIOCGKEVID:
+ *id_value = static_event_id;
+ break;
+ case SIOCSKEVFILT:
+ ev_pcb = (struct kern_event_pcb *) so->so_pcb;
+ ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
+ ev_pcb->evp_class_filter = kev_req->kev_class;
+ ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
+ break;
+ case SIOCGKEVFILT:
+ ev_pcb = (struct kern_event_pcb *) so->so_pcb;
+ kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
+ kev_req->kev_class = ev_pcb->evp_class_filter;
+ kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
+ break;
+ case SIOCGKEVVENDOR:
+ kev_vendor = (struct kev_vendor_code *)data;
+ /* Make sure string is NULL terminated */
+ kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0;
+ return net_str_id_find_internal(kev_vendor->vendor_string,
+ &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0);
+ default:
+ return ENOTSUP;
}
- return (0);
+ return 0;
}
int
done:
lck_rw_done(kev_rwlock);
- return (error);
+ return error;
}
__private_extern__ int
int n, i;
struct xsystmgen xsg;
void *buf = NULL;
- size_t item_size = ROUNDUP64(sizeof (struct xkevtpcb)) +
- ROUNDUP64(sizeof (struct xsocket_n)) +
- 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
- ROUNDUP64(sizeof (struct xsockstat_n));
+ size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) +
+ ROUNDUP64(sizeof(struct xsocket_n)) +
+ 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
+ ROUNDUP64(sizeof(struct xsockstat_n));
struct kern_event_pcb *ev_pcb;
buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
- if (buf == NULL)
- return (ENOMEM);
+ if (buf == NULL) {
+ return ENOMEM;
+ }
lck_rw_lock_shared(kev_rwlock);
n = kevtstat.kes_pcbcount;
if (req->oldptr == USER_ADDR_NULL) {
- req->oldidx = (n + n/8) * item_size;
+ req->oldidx = (n + n / 8) * item_size;
goto done;
}
if (req->newptr != USER_ADDR_NULL) {
error = EPERM;
goto done;
}
- bzero(&xsg, sizeof (xsg));
- xsg.xg_len = sizeof (xsg);
+ bzero(&xsg, sizeof(xsg));
+ xsg.xg_len = sizeof(xsg);
xsg.xg_count = n;
xsg.xg_gen = kevtstat.kes_gencnt;
xsg.xg_sogen = so_gencnt;
- error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
+ error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
if (error) {
goto done;
}
i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
struct xkevtpcb *xk = (struct xkevtpcb *)buf;
struct xsocket_n *xso = (struct xsocket_n *)
- ADVANCE64(xk, sizeof (*xk));
+ ADVANCE64(xk, sizeof(*xk));
struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
- ADVANCE64(xso, sizeof (*xso));
+ ADVANCE64(xso, sizeof(*xso));
struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
- ADVANCE64(xsbrcv, sizeof (*xsbrcv));
+ ADVANCE64(xsbrcv, sizeof(*xsbrcv));
struct xsockstat_n *xsostats = (struct xsockstat_n *)
- ADVANCE64(xsbsnd, sizeof (*xsbsnd));
+ ADVANCE64(xsbsnd, sizeof(*xsbsnd));
bzero(buf, item_size);
sotoxsocket_n(ev_pcb->evp_socket, xso);
sbtoxsockbuf_n(ev_pcb->evp_socket ?
- &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
+ &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
sbtoxsockbuf_n(ev_pcb->evp_socket ?
- &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
+ &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
lck_mtx_unlock(&ev_pcb->evp_mtx);
* while we were processing this request, and it
* might be necessary to retry.
*/
- bzero(&xsg, sizeof (xsg));
- xsg.xg_len = sizeof (xsg);
+ bzero(&xsg, sizeof(xsg));
+ xsg.xg_len = sizeof(xsg);
xsg.xg_count = n;
xsg.xg_gen = kevtstat.kes_gencnt;
xsg.xg_sogen = so_gencnt;
- error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
+ error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
if (error) {
goto done;
}
done:
lck_rw_done(kev_rwlock);
- return (error);
+ return error;
}
#endif /* SOCKETS */
st = &kinfo->kq_stat;
st->vst_size = kq->kq_count;
- if (kq->kq_state & KQ_KEV_QOS)
+ if (kq->kq_state & KQ_KEV_QOS) {
st->vst_blksize = sizeof(struct kevent_qos_s);
- else if (kq->kq_state & KQ_KEV64)
+ } else if (kq->kq_state & KQ_KEV64) {
st->vst_blksize = sizeof(struct kevent64_s);
- else
+ } else {
st->vst_blksize = sizeof(struct kevent);
+ }
st->vst_mode = S_IFIFO;
st->vst_ino = (kq->kq_state & KQ_DYNAMIC) ?
- ((struct kqworkloop *)kq)->kqwl_dynamicid : 0;
+ ((struct kqworkloop *)kq)->kqwl_dynamicid : 0;
/* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
#define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK;
- return (0);
+ return 0;
}
static int
-fill_kqueue_dyninfo(struct kqueue *kq, struct kqueue_dyninfo *kqdi)
+fill_kqueue_dyninfo(struct kqworkloop *kqwl, struct kqueue_dyninfo *kqdi)
{
- struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- struct kqrequest *kqr = &kqwl->kqwl_request;
+ workq_threadreq_t kqr = &kqwl->kqwl_request;
+ workq_threadreq_param_t trp = {};
int err;
- if ((kq->kq_state & KQ_WORKLOOP) == 0) {
+ if ((kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
return EINVAL;
}
- if ((err = fill_kqueueinfo(kq, &kqdi->kqdi_info))) {
+ if ((err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi->kqdi_info))) {
return err;
}
- kqwl_req_lock(kqwl);
+ kqlock(kqwl);
- if (kqr->kqr_thread) {
- kqdi->kqdi_servicer = thread_tid(kqr->kqr_thread);
+ kqdi->kqdi_servicer = thread_tid(kqr_thread(kqr));
+ kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
+ kqdi->kqdi_request_state = kqr->tr_state;
+ kqdi->kqdi_async_qos = kqr->tr_kq_qos_index;
+ kqdi->kqdi_events_qos = kqr->tr_kq_override_index;
+ kqdi->kqdi_sync_waiters = 0;
+ kqdi->kqdi_sync_waiter_qos = 0;
+
+ trp.trp_value = kqwl->kqwl_params;
+ if (trp.trp_flags & TRP_PRIORITY) {
+ kqdi->kqdi_pri = trp.trp_pri;
+ } else {
+ kqdi->kqdi_pri = 0;
}
- if (kqwl->kqwl_owner == WL_OWNER_SUSPENDED) {
- kqdi->kqdi_owner = ~0ull;
+ if (trp.trp_flags & TRP_POLICY) {
+ kqdi->kqdi_pol = trp.trp_pol;
} else {
- kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
+ kqdi->kqdi_pol = 0;
}
- kqdi->kqdi_request_state = kqr->kqr_state;
- kqdi->kqdi_async_qos = kqr->kqr_qos_index;
- kqdi->kqdi_events_qos = kqr->kqr_override_index;
- kqdi->kqdi_sync_waiters = kqr->kqr_dsync_waiters;
- kqdi->kqdi_sync_waiter_qos = kqr->kqr_dsync_waiters_qos;
+ if (trp.trp_flags & TRP_CPUPERCENT) {
+ kqdi->kqdi_cpupercent = trp.trp_cpupercent;
+ } else {
+ kqdi->kqdi_cpupercent = 0;
+ }
- kqwl_req_unlock(kqwl);
+ kqunlock(kqwl);
return 0;
}
knote_markstayactive(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
+ kq_index_t qos;
kqlock(kq);
kn->kn_status |= KN_STAYACTIVE;
* Making a knote stay active is a property of the knote that must be
* established before it is fully attached.
*/
- assert(kn->kn_status & KN_ATTACHING);
+ assert((kn->kn_status & (KN_QUEUED | KN_SUPPRESSED)) == 0);
/* handle all stayactive knotes on the (appropriate) manager */
- if (kq->kq_state & KQ_WORKQ) {
- knote_set_qos_index(kn, KQWQ_QOS_MANAGER);
- } else if (kq->kq_state & KQ_WORKLOOP) {
+ if (kq->kq_state & KQ_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
- kqwl_req_lock(kqwl);
- assert(kn->kn_req_index && kn->kn_req_index < THREAD_QOS_LAST);
- kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS,
- kn->kn_req_index);
- kqwl_req_unlock(kqwl);
- knote_set_qos_index(kn, KQWL_BUCKET_STAYACTIVE);
+
+ qos = _pthread_priority_thread_qos(kn->kn_qos);
+ assert(qos && qos < THREAD_QOS_LAST);
+ kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS, qos);
+ qos = KQWL_BUCKET_STAYACTIVE;
+ } else if (kq->kq_state & KQ_WORKQ) {
+ qos = KQWQ_QOS_MANAGER;
+ } else {
+ qos = THREAD_QOS_UNSPECIFIED;
}
- knote_activate(kn);
+ kn->kn_qos_override = qos;
+ kn->kn_qos_index = qos;
+
+ knote_activate(kq, kn, FILTER_ACTIVE);
kqunlock(kq);
}
void
knote_clearstayactive(struct knote *kn)
{
- kqlock(knote_get_kq(kn));
- kn->kn_status &= ~KN_STAYACTIVE;
- knote_deactivate(kn);
- kqunlock(knote_get_kq(kn));
+ struct kqueue *kq = knote_get_kq(kn);
+ kqlock(kq);
+ kn->kn_status &= ~(KN_STAYACTIVE | KN_ACTIVE);
+ knote_dequeue(kq, kn);
+ kqunlock(kq);
}
static unsigned long
kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
- unsigned long buflen, unsigned long nknotes)
+ unsigned long buflen, unsigned long nknotes)
{
for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
if (kq == knote_get_kq(kn)) {
if (nknotes < buflen) {
struct kevent_extinfo *info = &buf[nknotes];
- struct kevent_internal_s *kevp = &kn->kn_kevent;
kqlock(kq);
- info->kqext_kev = (struct kevent_qos_s){
- .ident = kevp->ident,
- .filter = kevp->filter,
- .flags = kevp->flags,
- .fflags = kevp->fflags,
- .data = (int64_t)kevp->data,
- .udata = kevp->udata,
- .ext[0] = kevp->ext[0],
- .ext[1] = kevp->ext[1],
- .ext[2] = kevp->ext[2],
- .ext[3] = kevp->ext[3],
- .qos = kn->kn_req_index,
- };
- info->kqext_sdata = kn->kn_sdata;
- info->kqext_status = kn->kn_status;
- info->kqext_sfflags = kn->kn_sfflags;
+ info->kqext_kev = *(struct kevent_qos_s *)&kn->kn_kevent;
+ if (knote_has_qos(kn)) {
+ info->kqext_kev.qos =
+ _pthread_priority_thread_qos_fast(kn->kn_qos);
+ } else {
+ info->kqext_kev.qos = kn->kn_qos_override;
+ }
+ info->kqext_kev.filter |= 0xff00; /* sign extend filter */
+ info->kqext_kev.xflags = 0; /* this is where sfflags lives */
+ info->kqext_kev.data = 0; /* this is where sdata lives */
+ info->kqext_sdata = kn->kn_sdata;
+ info->kqext_status = kn->kn_status;
+ info->kqext_sfflags = kn->kn_sfflags;
kqunlock(kq);
}
int
kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
- int32_t *nkqueues_out)
+ int32_t *nkqueues_out)
{
proc_t p = (proc_t)proc;
struct filedesc *fdp = p->p_fd;
goto out;
}
kq_ids = kalloc(bufsize);
- assert(kq_ids != NULL);
+ if (!kq_ids) {
+ err = ENOMEM;
+ goto out;
+ }
+ bzero(kq_ids, bufsize);
}
- kqhash_lock(p);
+ kqhash_lock(fdp);
if (fdp->fd_kqhashmask > 0) {
for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
struct kqworkloop *kqwl;
- SLIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
+ LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
/* report the number of kqueues, even if they don't all fit */
if (nkqueues < buflen) {
kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
}
}
- kqhash_unlock(p);
+ kqhash_unlock(fdp);
if (kq_ids) {
size_t copysize;
- if (os_mul_overflow(sizeof(kqueue_id_t), min(ubuflen, nkqueues), ©size)) {
+ if (os_mul_overflow(sizeof(kqueue_id_t), min(buflen, nkqueues), ©size)) {
err = ERANGE;
goto out;
}
int
kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
- uint32_t ubufsize, int32_t *size_out)
+ uint32_t ubufsize, int32_t *size_out)
{
proc_t p = (proc_t)proc;
- struct kqueue *kq;
+ struct kqworkloop *kqwl;
int err = 0;
struct kqueue_dyninfo kqdi = { };
return ENOBUFS;
}
- kqhash_lock(p);
- kq = kqueue_hash_lookup(p, kq_id);
- if (!kq) {
- kqhash_unlock(p);
+ kqwl = kqworkloop_hash_lookup_and_retain(p->p_fd, kq_id);
+ if (!kqwl) {
return ESRCH;
}
- kqueue_retain(kq);
- kqhash_unlock(p);
/*
* backward compatibility: allow the argument to this call to only be
*/
if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
ubufsize = sizeof(struct kqueue_dyninfo);
- err = fill_kqueue_dyninfo(kq, &kqdi);
+ err = fill_kqueue_dyninfo(kqwl, &kqdi);
} else {
ubufsize = sizeof(struct kqueue_info);
- err = fill_kqueueinfo(kq, &kqdi.kqdi_info);
+ err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi.kqdi_info);
}
if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
*size_out = ubufsize;
}
- kqueue_release_last(p, kq);
+ kqworkloop_release(kqwl);
return err;
}
int
kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
- uint32_t ubufsize, int32_t *nknotes_out)
+ uint32_t ubufsize, int32_t *nknotes_out)
{
proc_t p = (proc_t)proc;
- struct kqueue *kq;
+ struct kqworkloop *kqwl;
int err;
- assert(p != NULL);
-
- kqhash_lock(p);
- kq = kqueue_hash_lookup(p, kq_id);
- if (!kq) {
- kqhash_unlock(p);
+ kqwl = kqworkloop_hash_lookup_and_retain(p->p_fd, kq_id);
+ if (!kqwl) {
return ESRCH;
}
- kqueue_retain(kq);
- kqhash_unlock(p);
- err = pid_kqueue_extinfo(p, kq, ubuf, ubufsize, nknotes_out);
- kqueue_release_last(p, kq);
+ err = pid_kqueue_extinfo(p, &kqwl->kqwl_kqueue, ubuf, ubufsize, nknotes_out);
+ kqworkloop_release(kqwl);
return err;
}
int
pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
- uint32_t bufsize, int32_t *retval)
+ uint32_t bufsize, int32_t *retval)
{
struct knote *kn;
int i;
if (fdp->fd_knhashmask != 0) {
for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
- kqhash_lock(p);
+ knhash_lock(fdp);
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
- kqhash_unlock(p);
+ knhash_unlock(fdp);
}
}
assert(bufsize >= sizeof(struct kevent_extinfo) * min(buflen, nknotes));
err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * min(buflen, nknotes));
- out:
+out:
if (kqext) {
kfree(kqext, buflen * sizeof(struct kevent_extinfo));
kqext = NULL;
static unsigned int
klist_copy_udata(struct klist *list, uint64_t *buf,
- unsigned int buflen, unsigned int nknotes)
+ unsigned int buflen, unsigned int nknotes)
{
- struct kevent_internal_s *kev;
struct knote *kn;
SLIST_FOREACH(kn, list, kn_link) {
if (nknotes < buflen) {
- struct kqueue *kq = knote_get_kq(kn);
- kqlock(kq);
- kev = &(kn->kn_kevent);
- buf[nknotes] = kev->udata;
- kqunlock(kq);
+ /*
+ * kevent_register will always set kn_udata atomically
+ * so that we don't have to take any kqlock here.
+ */
+ buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed);
}
/* we return total number of knotes, which may be more than requested */
nknotes++;
return nknotes;
}
-static unsigned int
-kqlist_copy_dynamicids(__assert_only proc_t p, struct kqlist *list,
- uint64_t *buf, unsigned int buflen, unsigned int nids)
-{
- kqhash_lock_held(p);
- struct kqworkloop *kqwl;
- SLIST_FOREACH(kqwl, list, kqwl_hashlink) {
- if (nids < buflen) {
- buf[nids] = kqwl->kqwl_dynamicid;
- }
- nids++;
- }
- return nids;
-}
-
int
kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize)
{
struct filedesc *fdp = p->p_fd;
unsigned int nuptrs = 0;
unsigned long buflen = bufsize / sizeof(uint64_t);
+ struct kqworkloop *kqwl;
if (buflen > 0) {
assert(buf != NULL);
for (int i = 0; i < fdp->fd_knlistsize; i++) {
nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
}
- knhash_lock(p);
proc_fdunlock(p);
+
+ knhash_lock(fdp);
if (fdp->fd_knhashmask != 0) {
- for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
+ for (size_t i = 0; i < fdp->fd_knhashmask + 1; i++) {
nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
}
}
- knhash_unlock(p);
+ knhash_unlock(fdp);
- kqhash_lock(p);
+ kqhash_lock(fdp);
if (fdp->fd_kqhashmask != 0) {
- for (int i = 0; i < (int)fdp->fd_kqhashmask + 1; i++) {
- nuptrs = kqlist_copy_dynamicids(p, &fdp->fd_kqhash[i], buf, buflen,
- nuptrs);
+ for (size_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
+ LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
+ if (nuptrs < buflen) {
+ buf[nuptrs] = kqwl->kqwl_dynamicid;
+ }
+ nuptrs++;
+ }
}
}
- kqhash_unlock(p);
+ kqhash_unlock(fdp);
return (int)nuptrs;
}
-static void
-kevent_redrive_proc_thread_request(proc_t p)
-{
- __assert_only int ret;
- ret = (*pthread_functions->workq_threadreq)(p, NULL, WORKQ_THREADREQ_REDRIVE, 0, 0);
- assert(ret == 0 || ret == ECANCELED);
-}
-
static void
kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
{
uint64_t ast_flags64 = 0;
struct uthread *ut = get_bsdthread_info(thread);
- if (ut->uu_kqueue_bound != NULL) {
- if (ut->uu_kqueue_flags & KEVENT_FLAG_WORKLOOP) {
- ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
- } else if (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ) {
- ast_flags64 |= R2K_WORKQ_PENDING_EVENTS;
- }
+ if (ut->uu_kqr_bound != NULL) {
+ ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
}
if (ast_flags64 == 0) {
}
if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
- (user_addr_t)ast_addr,
- user_addr_size) != 0) {
+ (user_addr_t)ast_addr,
+ user_addr_size) != 0) {
printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
- "ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr);
+ "ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr);
}
}
proc_t p = current_proc();
if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
- kevent_redrive_proc_thread_request(p);
+ workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
}
if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
kevent_set_return_to_kernel_user_tsd(p, thread);
#pragma unused(oidp, arg2)
uintptr_t type = (uintptr_t)arg1;
uint64_t bound_id = 0;
- struct uthread *ut;
- struct kqueue *kq;
if (type != KEVENT_SYSCTL_BOUND_ID) {
return EINVAL;
return EINVAL;
}
- ut = get_bsdthread_info(current_thread());
+ struct uthread *ut = get_bsdthread_info(current_thread());
if (!ut) {
return EFAULT;
}
- kq = ut->uu_kqueue_bound;
- if (kq) {
- if (kq->kq_state & KQ_WORKLOOP) {
- bound_id = ((struct kqworkloop *)kq)->kqwl_dynamicid;
- } else if (kq->kq_state & KQ_WORKQ) {
+ workq_threadreq_t kqr = ut->uu_kqr_bound;
+ if (kqr) {
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
+ } else {
bound_id = -1;
}
}
}
SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
- "kevent information");
+ "kevent information");
SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
- CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
- (void *)KEVENT_SYSCTL_BOUND_ID,
- sizeof(kqueue_id_t), kevent_sysctl, "Q",
- "get the ID of the bound kqueue");
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+ (void *)KEVENT_SYSCTL_BOUND_ID,
+ sizeof(kqueue_id_t), kevent_sysctl, "Q",
+ "get the ID of the bound kqueue");
#endif /* DEVELOPMENT || DEBUG */