X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d26ffc64f583ab2d29df48f13518685602bc8832..d9a64523371fa019c4575bb400cbbc3a50ac9903:/bsd/sys/eventvar.h diff --git a/bsd/sys/eventvar.h b/bsd/sys/eventvar.h index 82323625f..e60eaeb86 100644 --- a/bsd/sys/eventvar.h +++ b/bsd/sys/eventvar.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/*- +/* * Copyright (c) 1999,2000 Jonathan Lemon * All rights reserved. * @@ -68,8 +68,8 @@ typedef void (*kqueue_continue_t)(struct kqueue *, void *, int); #include #include -#include #include +#include /* * Lock ordering: @@ -100,6 +100,40 @@ typedef void (*kqueue_continue_t)(struct kqueue *, void *, int); #define KQEXTENT 256 /* linear growth by this amount */ +struct knote_lock_ctx { + struct knote *knlc_knote; + thread_t knlc_thread; + // TODO: knlc_turnstile + TAILQ_HEAD(, knote_lock_ctx) knlc_head; + union { + LIST_ENTRY(knote_lock_ctx) knlc_le; + TAILQ_ENTRY(knote_lock_ctx) knlc_tqe; + }; +#if DEBUG || DEVELOPMENT +#define KNOTE_LOCK_CTX_UNLOCKED 0 +#define KNOTE_LOCK_CTX_LOCKED 1 +#define KNOTE_LOCK_CTX_WAITING 2 + int knlc_state; +#endif +}; +LIST_HEAD(knote_locks, knote_lock_ctx); + +#if DEBUG || DEVELOPMENT +/* + * KNOTE_LOCK_CTX(name) is a convenience macro to define a knote lock context on + * the stack named `name`. In development kernels, it uses tricks to make sure + * not locks was still held when exiting the C-scope that contains this context. + */ +__attribute__((noinline,not_tail_called)) +void knote_lock_ctx_chk(struct knote_lock_ctx *ctx); +#define KNOTE_LOCK_CTX(n) \ + struct knote_lock_ctx n __attribute__((cleanup(knote_lock_ctx_chk))); \ + n.knlc_state = KNOTE_LOCK_CTX_UNLOCKED +#else +#define KNOTE_LOCK_CTX(n) \ + struct knote_lock_ctx n +#endif + /* * kqueue - common core definition of a kqueue * @@ -108,13 +142,17 @@ typedef void (*kqueue_continue_t)(struct kqueue *, void *, int); * derived from this definition. */ struct kqueue { - struct waitq_set kq_wqs; /* private waitq set */ - lck_spin_t kq_lock; /* kqueue lock */ - uint16_t kq_state; /* state of the kq */ - uint16_t kq_level; /* nesting level of the kq */ - uint32_t kq_count; /* number of queued events */ - struct proc *kq_p; /* process containing kqueue */ - struct kqtailq kq_queue[1]; /* variable array of kqtailq structs */ + struct { + struct waitq_set kq_wqs; /* private waitq set */ + lck_spin_t kq_lock; /* kqueue lock */ + uint16_t kq_state; /* state of the kq */ + uint16_t kq_level; /* nesting level of the kq */ + uint32_t kq_count; /* number of queued events */ + struct proc *kq_p; /* process containing kqueue */ + struct knote_locks kq_knlocks; /* list of knote locks held */ + lck_spin_t kq_reqlock; /* kqueue request lock */ + }; /* make sure struct padding is put before kq_queue */ + struct kqtailq kq_queue[0]; /* variable array of queues */ }; #define KQ_SEL 0x001 /* select was recorded for kq */ @@ -129,7 +167,6 @@ struct kqueue { #define KQ_DRAIN 0x200 /* kq is draining */ #define KQ_WAKEUP 0x400 /* kq awakened while processing */ #define KQ_DYNAMIC 0x800 /* kqueue is dynamically managed */ -#define KQ_NO_WQ_THREAD 0x1000 /* kq will not have workqueue threads dynamically created */ /* * kqfile - definition of a typical kqueue opened as a file descriptor * via the kqueue() system call. @@ -139,6 +176,7 @@ struct kqueue { */ struct kqfile { struct kqueue kqf_kqueue; /* common kqueue core */ + struct kqtailq kqf_queue; /* queue of woken up knotes */ struct kqtailq kqf_suppressed; /* suppression queue */ struct selinfo kqf_sel; /* parent select/kqueue info */ }; @@ -149,57 +187,33 @@ struct kqfile { #define kqf_level kqf_kqueue.kq_level #define kqf_count kqf_kqueue.kq_count #define kqf_p kqf_kqueue.kq_p -#define kqf_queue kqf_kqueue.kq_queue #define QOS_INDEX_KQFILE 0 /* number of qos levels in a file kq */ -struct kqr_bound { - struct kqtailq kqrb_suppressed; /* Per-QoS suppression queues */ - thread_t kqrb_thread; /* thread to satisfy request */ -}; - /* * kqrequest - per-QoS thread request status */ struct kqrequest { -#if 0 - union { - struct kqr_bound kqru_bound; /* used when thread is bound */ - struct workq_threadreq_s kqru_req; /* used when request oustanding */ - } kqr_u; -#define kqr_suppressed kqr_u.kqru_bound.kqrb_suppressed -#define kqr_thread kqr_u.kqru_bound.kqrb_thread -#define kqr_req kqr_u.kqru_req -#else - struct kqr_bound kqr_bound; /* used when thread is bound */ struct workq_threadreq_s kqr_req; /* used when request oustanding */ -#define kqr_suppressed kqr_bound.kqrb_suppressed -#define kqr_thread kqr_bound.kqrb_thread -#endif - uint8_t kqr_state; /* KQ/workq interaction state */ - uint8_t kqr_wakeup_indexes; /* QoS/override levels that woke */ - uint16_t kqr_dsync_waiters:13, /* number of dispatch sync waiters */ - kqr_dsync_owner_qos:3; /* Qos override on dispatch sync owner */ - uint16_t kqr_sync_suppress_count; /* number of suppressed sync ipc knotes */ - kq_index_t kqr_stayactive_qos:3, /* max QoS of statyactive knotes */ - kqr_owner_override_is_sync:1, /* sync owner has sync ipc override */ - kqr_override_index:3, /* highest wakeup override index */ - kqr_has_sync_override:1; /* Qos/override at UI is sync ipc override */ - - /* set under both the kqlock and the filt_wllock */ - kq_index_t :0; /* prevent bitfields coalescing */ - kq_index_t kqr_qos_index:4, /* QoS for the thread request */ - kqr_dsync_waiters_qos:4; /* override from dispatch sync waiters */ + struct kqtailq kqr_suppressed; /* Per-QoS suppression queues */ + thread_t kqr_thread; /* thread to satisfy request */ + uint8_t kqr_state; /* KQ/workq interaction state */ +#define KQWL_STAYACTIVE_FIRED_BIT (1 << 0) + uint8_t kqr_wakeup_indexes; /* QoS/override levels that woke */ + uint16_t kqr_dsync_waiters; /* number of dispatch sync waiters */ + kq_index_t kqr_stayactive_qos; /* max QoS of statyactive knotes */ + kq_index_t kqr_override_index; /* highest wakeup override index */ + kq_index_t kqr_qos_index; /* QoS for the thread request */ }; -#define KQR_PROCESSING 0x01 /* requested thread is running the q */ +#define KQR_WORKLOOP 0x01 /* owner is a workloop */ #define KQR_THREQUESTED 0x02 /* thread has been requested from workq */ #define KQR_WAKEUP 0x04 /* wakeup called during processing */ -#define KQR_BOUND 0x08 /* servicing thread is bound */ -#define KQR_THOVERCOMMIT 0x20 /* overcommit needed for thread requests */ -#define KQR_DRAIN 0x40 /* cancel initiated - drain fulfill */ -#define KQR_R2K_NOTIF_ARMED 0x80 /* ast notifications armed */ +#define KQR_THOVERCOMMIT 0x08 /* overcommit needed for thread requests */ +#define KQR_R2K_NOTIF_ARMED 0x10 /* ast notifications armed */ +#define KQR_ALLOCATED_TURNSTILE 0x20 /* kqwl_turnstile is allocated */ + /* * WorkQ kqueues need to request threads to service the triggered * knotes in the queue. These threads are brought up on a @@ -213,40 +227,8 @@ struct kqrequest { #define KQWQ_QOS_MANAGER (THREAD_QOS_LAST) #endif -#if !defined(KQWQ_NQOS) -#define KQWQ_NQOS (KQWQ_QOS_MANAGER + 1) -#endif - -/* - * Workq thread start out a particular effective-requested-QoS, but - * additional events processed by the filters may represent - * backlogged events that may themselves have a higher requested-QoS. - * To represent this, the filter may apply an override to a knote's - * requested QoS. - * - * We further segregate these overridden knotes into different buckets - * by grouping. This allows easy matching of - * knotes to process vs. the highest workq thread override applied. - * - * Only certain override patterns need to be supported. A knote - * cannot have an effective-requested-QoS of UNSPECIFIED - because - * the kevent->qos (when canonicalized) will always be above that - * or indicate manager. And we don't allow an override to specify - * manager. This results in the following buckets being needed: - * - * Effective-Requested QoS - * MAINT BG UTIL DEFAULT UINIT UINTER MANAGER - * override: - * MAINT 0 - * BG 1 6 - * UTILITY 2 7 11 - * DEFAULT 3 8 12 15 - * UINIT 4 9 13 16 18 - * UINTER 5 10 14 17 19 20 - * 21 - */ #if !defined(KQWQ_NBUCKETS) -#define KQWQ_NBUCKETS 22 +#define KQWQ_NBUCKETS (KQWQ_QOS_MANAGER + 1) #endif /* @@ -259,9 +241,8 @@ struct kqrequest { */ struct kqworkq { struct kqueue kqwq_kqueue; - struct kqtailq kqwq_queuecont[KQWQ_NBUCKETS-1]; /* continue array of queues */ - struct kqrequest kqwq_request[KQWQ_NQOS]; /* per-QoS request states */ - lck_spin_t kqwq_reqlock; /* kqueue request lock */ + struct kqtailq kqwq_queue[KQWQ_NBUCKETS]; /* array of queues */ + struct kqrequest kqwq_request[KQWQ_NBUCKETS]; /* per-QoS request states */ }; #define kqwq_wqs kqwq_kqueue.kq_wqs @@ -270,13 +251,6 @@ struct kqworkq { #define kqwq_level kqwq_kqueue.kq_level #define kqwq_count kqwq_kqueue.kq_count #define kqwq_p kqwq_kqueue.kq_p -#define kqwq_queue kqwq_kqueue.kq_queue - -#define kqwq_req_lock(kqwq) lck_spin_lock(&kqwq->kqwq_reqlock) -#define kqwq_req_unlock(kqwq) lck_spin_unlock(&kqwq->kqwq_reqlock) -#define kqwq_req_held(kqwq) LCK_SPIN_ASSERT(&kqwq->kqwq_reqlock, LCK_ASSERT_OWNED) - -#define KQWQ_THMANAGER 0x10 /* expect manager thread to run the queue */ /* * WorkLoop kqueues need to request a thread to service the triggered @@ -319,16 +293,49 @@ struct kqworkq { */ struct kqworkloop { struct kqueue kqwl_kqueue; /* queue of events */ - struct kqtailq kqwl_queuecont[KQWL_NBUCKETS-1]; /* continue array of queues */ + struct kqtailq kqwl_queue[KQWL_NBUCKETS]; /* array of queues */ struct kqrequest kqwl_request; /* thread request state */ - lck_spin_t kqwl_reqlock; /* kqueue request lock */ lck_mtx_t kqwl_statelock; /* state/debounce lock */ thread_t kqwl_owner; /* current [sync] owner thread */ uint32_t kqwl_retains; /* retain references */ kqueue_id_t kqwl_dynamicid; /* dynamic identity */ + uint64_t kqwl_params; /* additional parameters */ + struct turnstile *kqwl_turnstile; /* turnstile for sync IPC/waiters */ SLIST_ENTRY(kqworkloop) kqwl_hashlink; /* linkage for search list */ +#if CONFIG_WORKLOOP_DEBUG +#define KQWL_HISTORY_COUNT 32 +#define KQWL_HISTORY_WRITE_ENTRY(kqwl, ...) ({ \ + struct kqworkloop *__kqwl = (kqwl); \ + unsigned int __index = os_atomic_inc_orig(&__kqwl->kqwl_index, relaxed); \ + __kqwl->kqwl_history[__index % KQWL_HISTORY_COUNT] = \ + (struct kqwl_history)__VA_ARGS__; \ + }) + struct kqwl_history { + thread_t updater; /* Note: updates can be reordered */ + thread_t servicer; + thread_t old_owner; + thread_t new_owner; + + uint64_t kev_ident; + int16_t error; + uint16_t kev_flags; + uint32_t kev_fflags; + + uint64_t kev_mask; + uint64_t kev_value; + uint64_t in_value; + } kqwl_history[KQWL_HISTORY_COUNT]; + unsigned int kqwl_index; +#endif // CONFIG_WORKLOOP_DEBUG }; +typedef union { + struct kqueue *kq; + struct kqworkq *kqwq; + struct kqfile *kqf; + struct kqworkloop *kqwl; +} __attribute__((transparent_union)) kqueue_t; + SLIST_HEAD(kqlist, kqworkloop); #define kqwl_wqs kqwl_kqueue.kq_wqs @@ -337,30 +344,39 @@ SLIST_HEAD(kqlist, kqworkloop); #define kqwl_level kqwl_kqueue.kq_level #define kqwl_count kqwl_kqueue.kq_count #define kqwl_p kqwl_kqueue.kq_p -#define kqwl_queue kqwl_kqueue.kq_queue - -#define kqwl_req_lock(kqwl) lck_spin_lock(&kqwl->kqwl_reqlock) -#define kqwl_req_unlock(kqwl) lck_spin_unlock(&kqwl->kqwl_reqlock) -#define kqwl_req_held(kqwl) LCK_SPIN_ASSERT(&kqwl->kqwl_reqlock, LCK_ASSERT_OWNED) #define KQ_WORKLOOP_RETAINS_MAX UINT32_MAX -extern int workloop_fulfill_threadreq(struct proc *p, workq_threadreq_t req, thread_t thread, int flags); +extern void kqueue_threadreq_unbind(struct proc *p, struct kqrequest *kqr); + +// called with the kq req held +#define KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE 0x1 +extern void kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, + thread_t thread, unsigned int flags); + +// called with the wq lock held +extern void kqueue_threadreq_bind_prepost(struct proc *p, workq_threadreq_t req, thread_t thread); + +// called with no lock held +extern void kqueue_threadreq_bind_commit(struct proc *p, thread_t thread); + +extern void kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t req); + +// lock not held as kqwl_params is immutable after creation +extern workq_threadreq_param_t kqueue_threadreq_workloop_param(workq_threadreq_t req); extern struct kqueue *kqueue_alloc(struct proc *, unsigned int); extern void kqueue_dealloc(struct kqueue *); extern void knotes_dealloc(struct proc *); +extern void kqworkloops_dealloc(struct proc *); -extern void kevent_register(struct kqueue *, struct kevent_internal_s *, struct proc *); +extern int kevent_register(struct kqueue *, struct kevent_internal_s *, + struct knote_lock_ctx *); extern int kqueue_scan(struct kqueue *, kevent_callback_t, kqueue_continue_t, - void *, struct filt_process_s *, struct timeval *, struct proc *); + void *, struct filt_process_s *, struct timeval *, struct proc *); extern int kqueue_stat(struct kqueue *, void *, int, proc_t); #endif /* XNU_KERNEL_PRIVATE */ #endif /* !_SYS_EVENTVAR_H_ */ - - - -