]> git.saurik.com Git - apple/xnu.git/blob - bsd/sys/eventvar.h
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / sys / eventvar.h
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999,2000 Jonathan Lemon <jlemon@FreeBSD.org>
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * $FreeBSD: src/sys/sys/eventvar.h,v 1.1.2.2 2000/07/18 21:49:12 jlemon Exp $
54 */
55
56 #ifndef _SYS_EVENTVAR_H_
57 #define _SYS_EVENTVAR_H_
58
59 #include <sys/event.h>
60 #include <sys/select.h>
61 #include <kern/kern_types.h>
62 #include <kern/waitq.h>
63
64 #if defined(XNU_KERNEL_PRIVATE)
65
66 typedef int (*kevent_callback_t)(struct kevent_qos_s *, struct kevent_ctx_s *);
67
68 #include <stdint.h>
69 #include <kern/locks.h>
70 #include <mach/thread_policy.h>
71 #include <pthread/workqueue_internal.h>
72
73 /*
74 * Lock ordering:
75 *
76 * The kqueue locking order can follow a few different patterns:
77 *
78 * Standard file-based kqueues (from above):
79 * proc fd lock -> kq lock -> kq-waitq-set lock -> thread lock
80 *
81 * WorkQ/WorkLoop kqueues (from above):
82 * proc fd lock -> kq lock -> workq lock -> thread lock
83 *
84 * Whenever kqueues interact with source locks, it drops all of its own
85 * locks in exchange for a use-reference on the knote used to synchronize
86 * with the source code. When those sources post events from below, they
87 * have the following lock hierarchy.
88 *
89 * Standard file-based kqueues (from below):
90 * XXX lock -> kq lock -> kq-waitq-set lock -> thread lock
91 *
92 * WorkQ/WorkLoop kqueues (from below):
93 * XXX lock -> kq lock -> workq lock -> thread lock
94 */
95
96 #define KQEXTENT 256 /* linear growth by this amount */
97
98 struct knote_lock_ctx {
99 struct knote *knlc_knote;
100 thread_t knlc_thread;
101 uintptr_t knlc_waiters;
102 LIST_ENTRY(knote_lock_ctx) knlc_link;
103 #if DEBUG || DEVELOPMENT
104 #define KNOTE_LOCK_CTX_UNLOCKED 0
105 #define KNOTE_LOCK_CTX_LOCKED 1
106 #define KNOTE_LOCK_CTX_WAITING 2
107 int knlc_state;
108 #endif
109 };
110 LIST_HEAD(knote_locks, knote_lock_ctx);
111
112 #if DEBUG || DEVELOPMENT
113 /*
114 * KNOTE_LOCK_CTX(name) is a convenience macro to define a knote lock context on
115 * the stack named `name`. In development kernels, it uses tricks to make sure
116 * not locks was still held when exiting the C-scope that contains this context.
117 */
118 static inline void
119 knote_lock_ctx_chk(struct knote_lock_ctx *knlc)
120 {
121 /* evil hackery to make sure no one forgets to unlock */
122 assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
123 }
124 #define KNOTE_LOCK_CTX(n) \
125 struct knote_lock_ctx n __attribute__((cleanup(knote_lock_ctx_chk))); \
126 n.knlc_state = KNOTE_LOCK_CTX_UNLOCKED
127 #else
128 #define KNOTE_LOCK_CTX(n) \
129 struct knote_lock_ctx n
130 #endif
131
132
133 __options_decl(kq_state_t, uint16_t, {
134 KQ_SEL = 0x0001, /* select was recorded for kq */
135 KQ_SLEEP = 0x0002, /* thread is waiting for events */
136 KQ_PROCWAIT = 0x0004, /* thread waiting for processing */
137 KQ_KEV32 = 0x0008, /* kq is used with 32-bit events */
138 KQ_KEV64 = 0x0010, /* kq is used with 64-bit events */
139 KQ_KEV_QOS = 0x0020, /* kq events carry QoS info */
140 KQ_WORKQ = 0x0040, /* KQ is bound to process workq */
141 KQ_WORKLOOP = 0x0080, /* KQ is part of a workloop */
142 KQ_PROCESSING = 0x0100, /* KQ is being processed */
143 KQ_DRAIN = 0x0200, /* kq is draining */
144 KQ_WAKEUP = 0x0400, /* kq awakened while processing */
145 KQ_DYNAMIC = 0x0800, /* kqueue is dynamically managed */
146 KQ_R2K_ARMED = 0x1000, /* ast notification armed */
147 KQ_HAS_TURNSTILE = 0x2000, /* this kqueue has a turnstile */
148 });
149
150 /*
151 * kqueue - common core definition of a kqueue
152 *
153 * No real structures are allocated of this type. They are
154 * either kqfile objects or kqworkq objects - each of which is
155 * derived from this definition.
156 */
157 struct kqueue {
158 struct {
159 struct waitq_set kq_wqs; /* private waitq set */
160 lck_spin_t kq_lock; /* kqueue lock */
161 kq_state_t kq_state; /* state of the kq */
162 union {
163 uint16_t kq_waitq_hook;/* prepost hook (kqwl/kqwq) */
164 uint16_t kq_level; /* nesting level of the kq */
165 };
166 uint32_t kq_count; /* number of queued events */
167 struct proc *kq_p; /* process containing kqueue */
168 struct knote_locks kq_knlocks; /* list of knote locks held */
169 }; /* make sure struct padding is put before kq_queue */
170 struct kqtailq kq_queue[0]; /* variable array of queues */
171 };
172
173 /*
174 * kqfile - definition of a typical kqueue opened as a file descriptor
175 * via the kqueue() system call.
176 *
177 * Adds selinfo support to the base kqueue definition, as these
178 * fds can be fed into select().
179 */
180 struct kqfile {
181 struct kqueue kqf_kqueue; /* common kqueue core */
182 struct kqtailq kqf_queue; /* queue of woken up knotes */
183 struct kqtailq kqf_suppressed; /* suppression queue */
184 struct selinfo kqf_sel; /* parent select/kqueue info */
185 #define kqf_wqs kqf_kqueue.kq_wqs
186 #define kqf_lock kqf_kqueue.kq_lock
187 #define kqf_state kqf_kqueue.kq_state
188 #define kqf_level kqf_kqueue.kq_level
189 #define kqf_count kqf_kqueue.kq_count
190 #define kqf_p kqf_kqueue.kq_p
191 };
192
193 #define QOS_INDEX_KQFILE 0 /* number of qos levels in a file kq */
194
195 /*
196 * WorkQ kqueues need to request threads to service the triggered
197 * knotes in the queue. These threads are brought up on a
198 * effective-requested-QoS basis. Knotes are segregated based on
199 * that value - calculated by computing max(event-QoS, kevent-QoS).
200 * Only one servicing thread is requested at a time for all the
201 * knotes at a given effective-requested-QoS.
202 */
203
204 #if !defined(KQWQ_QOS_MANAGER)
205 #define KQWQ_QOS_MANAGER (THREAD_QOS_LAST)
206 #endif
207
208 #if !defined(KQWQ_NBUCKETS)
209 #define KQWQ_NBUCKETS (KQWQ_QOS_MANAGER + 1)
210 #endif
211
212 /*
213 * kqworkq - definition of a private kqueue used to coordinate event
214 * handling for pthread work queues.
215 *
216 * These have per-qos processing queues and state to coordinate with
217 * the pthread kext to ask for threads at corresponding pthread priority
218 * values.
219 */
220 struct kqworkq {
221 struct kqueue kqwq_kqueue;
222 struct kqtailq kqwq_queue[KQWQ_NBUCKETS]; /* array of queues */
223 struct kqtailq kqwq_suppressed[KQWQ_NBUCKETS]; /* Per-QoS suppression queues */
224 workq_threadreq_s kqwq_request[KQWQ_NBUCKETS]; /* per-QoS request states */
225 };
226
227 #define kqwq_wqs kqwq_kqueue.kq_wqs
228 #define kqwq_lock kqwq_kqueue.kq_lock
229 #define kqwq_state kqwq_kqueue.kq_state
230 #define kqwq_waitq_hook kqwq_kqueue.kq_waitq_hook
231 #define kqwq_count kqwq_kqueue.kq_count
232 #define kqwq_p kqwq_kqueue.kq_p
233
234 /*
235 * WorkLoop kqueues need to request a thread to service the triggered
236 * knotes in the queue. The thread is brought up on a
237 * effective-requested-QoS basis. Knotes are segregated based on
238 * that value. Once a request is made, it cannot be undone. If
239 * events with higher QoS arrive after, they are stored in their
240 * own queues and an override applied to the original request based
241 * on the delta between the two QoS values.
242 */
243
244 /*
245 * "Stay-active" knotes are held in a separate bucket that indicates
246 * special handling required. They are kept separate because the
247 * wakeups issued to them don't have context to tell us where to go
248 * to find and process them. All processing of them happens at the
249 * highest QoS. Unlike WorkQ kqueues, there is no special singular
250 * "manager thread" for a process. We simply request a servicing
251 * thread at the higest known QoS when these are woken (or override
252 * an existing request to that).
253 */
254 #define KQWL_BUCKET_STAYACTIVE (THREAD_QOS_LAST)
255
256 #if !defined(KQWL_NBUCKETS)
257 #define KQWL_NBUCKETS (KQWL_BUCKET_STAYACTIVE + 1)
258 #endif
259
260 /*
261 * kqworkloop - definition of a private kqueue used to coordinate event
262 * handling for pthread workloops.
263 *
264 * Workloops vary from workqs in that only a single thread is ever
265 * requested to service a workloop at a time. But unlike workqs,
266 * workloops may be "owned" by user-space threads that are
267 * synchronously draining an event off the workloop. In those cases,
268 * any overrides have to be applied to the owner until it relinqueshes
269 * ownership.
270 *
271 * NOTE: "lane" support is TBD.
272 */
273 struct kqworkloop {
274 struct kqueue kqwl_kqueue; /* queue of events */
275 struct kqtailq kqwl_queue[KQWL_NBUCKETS]; /* array of queues */
276 struct kqtailq kqwl_suppressed; /* Per-QoS suppression queues */
277 workq_threadreq_s kqwl_request; /* thread request state */
278 lck_spin_t kqwl_statelock; /* state/debounce lock */
279 thread_t kqwl_owner; /* current [sync] owner thread */
280 uint32_t kqwl_retains; /* retain references */
281 #define KQWL_STAYACTIVE_FIRED_BIT (1 << 0)
282 uint8_t kqwl_wakeup_indexes; /* QoS/override levels that woke */
283 kq_index_t kqwl_stayactive_qos; /* max QoS of statyactive knotes */
284 struct turnstile *kqwl_turnstile; /* turnstile for sync IPC/waiters */
285 kqueue_id_t kqwl_dynamicid; /* dynamic identity */
286 uint64_t kqwl_params; /* additional parameters */
287 LIST_ENTRY(kqworkloop) kqwl_hashlink; /* linkage for search list */
288 #if CONFIG_WORKLOOP_DEBUG
289 #define KQWL_HISTORY_COUNT 32
290 #define KQWL_HISTORY_WRITE_ENTRY(kqwl, ...) ({ \
291 struct kqworkloop *__kqwl = (kqwl); \
292 unsigned int __index = os_atomic_inc_orig(&__kqwl->kqwl_index, relaxed); \
293 __kqwl->kqwl_history[__index % KQWL_HISTORY_COUNT] = \
294 (struct kqwl_history)__VA_ARGS__; \
295 })
296 struct kqwl_history {
297 thread_t updater; /* Note: updates can be reordered */
298 thread_t servicer;
299 thread_t old_owner;
300 thread_t new_owner;
301
302 uint64_t kev_ident;
303 int16_t error;
304 uint16_t kev_flags;
305 uint32_t kev_fflags;
306
307 uint64_t kev_mask;
308 uint64_t kev_value;
309 uint64_t in_value;
310 } kqwl_history[KQWL_HISTORY_COUNT];
311 unsigned int kqwl_index;
312 #endif // CONFIG_WORKLOOP_DEBUG
313 };
314 LIST_HEAD(kqwllist, kqworkloop);
315
316 typedef union {
317 struct kqueue *kq;
318 struct kqworkq *kqwq;
319 struct kqfile *kqf;
320 struct kqworkloop *kqwl;
321 } __attribute__((transparent_union)) kqueue_t;
322
323
324 #define kqwl_wqs kqwl_kqueue.kq_wqs
325 #define kqwl_lock kqwl_kqueue.kq_lock
326 #define kqwl_state kqwl_kqueue.kq_state
327 #define kqwl_waitq_hook kqwl_kqueue.kq_waitq_hook
328 #define kqwl_count kqwl_kqueue.kq_count
329 #define kqwl_p kqwl_kqueue.kq_p
330
331 #define KQ_WORKLOOP_RETAINS_MAX UINT32_MAX
332
333 extern void kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t);
334
335 // called with the kq req held
336 #define KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE 0x1
337 extern void kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req,
338 thread_t thread, unsigned int flags);
339
340 struct turnstile *kqueue_threadreq_get_turnstile(workq_threadreq_t kqr);
341
342 // called with the wq lock held
343 extern void kqueue_threadreq_bind_prepost(struct proc *p, workq_threadreq_t req,
344 struct uthread *uth);
345
346 // called with no lock held
347 extern void kqueue_threadreq_bind_commit(struct proc *p, thread_t thread);
348
349 extern void kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t req);
350
351 // lock not held as kqwl_params is immutable after creation
352 extern workq_threadreq_param_t kqueue_threadreq_workloop_param(workq_threadreq_t req);
353
354 extern struct kqueue *kqueue_alloc(struct proc *);
355 extern void kqueue_dealloc(struct kqueue *);
356 extern void kqworkq_dealloc(struct kqworkq *kqwq);
357
358 extern void knotes_dealloc(struct proc *);
359 extern void kqworkloops_dealloc(struct proc *);
360
361 extern int kevent_register(struct kqueue *, struct kevent_qos_s *,
362 struct knote **);
363 extern int kqueue_scan(struct kqueue *, int flags,
364 struct kevent_ctx_s *, kevent_callback_t);
365 extern int kqueue_stat(struct kqueue *, void *, int, proc_t);
366
367 #endif /* XNU_KERNEL_PRIVATE */
368
369 #endif /* !_SYS_EVENTVAR_H_ */