]> git.saurik.com Git - apple/xnu.git/blame - bsd/sys/eventvar.h
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / sys / eventvar.h
CommitLineData
1c79356b 1/*
5d5c5d0d
A
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
d9a64523 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
d9a64523 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
d9a64523 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
d9a64523 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
d9a64523 28/*
55e303ae
A
29 * Copyright (c) 1999,2000 Jonathan Lemon <jlemon@FreeBSD.org>
30 * All rights reserved.
1c79356b
A
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
1c79356b 40 *
55e303ae 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1c79356b
A
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55e303ae 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1c79356b
A
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
55e303ae 53 * $FreeBSD: src/sys/sys/eventvar.h,v 1.1.2.2 2000/07/18 21:49:12 jlemon Exp $
1c79356b
A
54 */
55
55e303ae
A
56#ifndef _SYS_EVENTVAR_H_
57#define _SYS_EVENTVAR_H_
58
91447636 59#include <sys/event.h>
55e303ae
A
60#include <sys/select.h>
61#include <kern/kern_types.h>
3e170ce0 62#include <kern/waitq.h>
55e303ae 63
39037602
A
64#if defined(XNU_KERNEL_PRIVATE)
65
cb323159 66typedef int (*kevent_callback_t)(struct kevent_qos_s *, struct kevent_ctx_s *);
5ba3f43e
A
67
68#include <stdint.h>
39037602
A
69#include <kern/locks.h>
70#include <mach/thread_policy.h>
d9a64523 71#include <pthread/workqueue_internal.h>
39037602 72
5ba3f43e
A
73/*
74 * Lock ordering:
75 *
76 * The kqueue locking order can follow a few different patterns:
77 *
78 * Standard file-based kqueues (from above):
79 * proc fd lock -> kq lock -> kq-waitq-set lock -> thread lock
80 *
81 * WorkQ/WorkLoop kqueues (from above):
cb323159 82 * proc fd lock -> kq lock -> workq lock -> thread lock
5ba3f43e
A
83 *
84 * Whenever kqueues interact with source locks, it drops all of its own
85 * locks in exchange for a use-reference on the knote used to synchronize
86 * with the source code. When those sources post events from below, they
87 * have the following lock hierarchy.
88 *
89 * Standard file-based kqueues (from below):
90 * XXX lock -> kq lock -> kq-waitq-set lock -> thread lock
5ba3f43e
A
91 *
92 * WorkQ/WorkLoop kqueues (from below):
cb323159 93 * XXX lock -> kq lock -> workq lock -> thread lock
5ba3f43e
A
94 */
95
0a7de745 96#define KQEXTENT 256 /* linear growth by this amount */
9bccf70c 97
d9a64523 98struct knote_lock_ctx {
cb323159
A
99 struct knote *knlc_knote;
100 thread_t knlc_thread;
101 uintptr_t knlc_waiters;
102 LIST_ENTRY(knote_lock_ctx) knlc_link;
d9a64523
A
103#if DEBUG || DEVELOPMENT
104#define KNOTE_LOCK_CTX_UNLOCKED 0
105#define KNOTE_LOCK_CTX_LOCKED 1
106#define KNOTE_LOCK_CTX_WAITING 2
107 int knlc_state;
108#endif
109};
110LIST_HEAD(knote_locks, knote_lock_ctx);
111
112#if DEBUG || DEVELOPMENT
113/*
114 * KNOTE_LOCK_CTX(name) is a convenience macro to define a knote lock context on
115 * the stack named `name`. In development kernels, it uses tricks to make sure
116 * not locks was still held when exiting the C-scope that contains this context.
117 */
cb323159
A
118static inline void
119knote_lock_ctx_chk(struct knote_lock_ctx *knlc)
120{
121 /* evil hackery to make sure no one forgets to unlock */
122 assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
123}
d9a64523 124#define KNOTE_LOCK_CTX(n) \
0a7de745
A
125 struct knote_lock_ctx n __attribute__((cleanup(knote_lock_ctx_chk))); \
126 n.knlc_state = KNOTE_LOCK_CTX_UNLOCKED
d9a64523
A
127#else
128#define KNOTE_LOCK_CTX(n) \
0a7de745 129 struct knote_lock_ctx n
d9a64523
A
130#endif
131
cb323159
A
132
133__options_decl(kq_state_t, uint16_t, {
134 KQ_SEL = 0x0001, /* select was recorded for kq */
135 KQ_SLEEP = 0x0002, /* thread is waiting for events */
136 KQ_PROCWAIT = 0x0004, /* thread waiting for processing */
137 KQ_KEV32 = 0x0008, /* kq is used with 32-bit events */
138 KQ_KEV64 = 0x0010, /* kq is used with 64-bit events */
139 KQ_KEV_QOS = 0x0020, /* kq events carry QoS info */
140 KQ_WORKQ = 0x0040, /* KQ is bound to process workq */
141 KQ_WORKLOOP = 0x0080, /* KQ is part of a workloop */
142 KQ_PROCESSING = 0x0100, /* KQ is being processed */
143 KQ_DRAIN = 0x0200, /* kq is draining */
144 KQ_WAKEUP = 0x0400, /* kq awakened while processing */
145 KQ_DYNAMIC = 0x0800, /* kqueue is dynamically managed */
146 KQ_R2K_ARMED = 0x1000, /* ast notification armed */
147 KQ_HAS_TURNSTILE = 0x2000, /* this kqueue has a turnstile */
148});
149
39037602
A
150/*
151 * kqueue - common core definition of a kqueue
152 *
153 * No real structures are allocated of this type. They are
154 * either kqfile objects or kqworkq objects - each of which is
155 * derived from this definition.
156 */
55e303ae 157struct kqueue {
d9a64523
A
158 struct {
159 struct waitq_set kq_wqs; /* private waitq set */
160 lck_spin_t kq_lock; /* kqueue lock */
cb323159
A
161 kq_state_t kq_state; /* state of the kq */
162 union {
163 uint16_t kq_waitq_hook;/* prepost hook (kqwl/kqwq) */
164 uint16_t kq_level; /* nesting level of the kq */
165 };
d9a64523
A
166 uint32_t kq_count; /* number of queued events */
167 struct proc *kq_p; /* process containing kqueue */
168 struct knote_locks kq_knlocks; /* list of knote locks held */
d9a64523
A
169 }; /* make sure struct padding is put before kq_queue */
170 struct kqtailq kq_queue[0]; /* variable array of queues */
39037602
A
171};
172
39037602
A
173/*
174 * kqfile - definition of a typical kqueue opened as a file descriptor
175 * via the kqueue() system call.
176 *
177 * Adds selinfo support to the base kqueue definition, as these
178 * fds can be fed into select().
179 */
180struct kqfile {
181 struct kqueue kqf_kqueue; /* common kqueue core */
d9a64523 182 struct kqtailq kqf_queue; /* queue of woken up knotes */
39037602
A
183 struct kqtailq kqf_suppressed; /* suppression queue */
184 struct selinfo kqf_sel; /* parent select/kqueue info */
39037602
A
185#define kqf_wqs kqf_kqueue.kq_wqs
186#define kqf_lock kqf_kqueue.kq_lock
187#define kqf_state kqf_kqueue.kq_state
188#define kqf_level kqf_kqueue.kq_level
189#define kqf_count kqf_kqueue.kq_count
190#define kqf_p kqf_kqueue.kq_p
5ba3f43e
A
191};
192
cb323159 193#define QOS_INDEX_KQFILE 0 /* number of qos levels in a file kq */
d9a64523 194
39037602
A
195/*
196 * WorkQ kqueues need to request threads to service the triggered
197 * knotes in the queue. These threads are brought up on a
198 * effective-requested-QoS basis. Knotes are segregated based on
199 * that value - calculated by computing max(event-QoS, kevent-QoS).
200 * Only one servicing thread is requested at a time for all the
201 * knotes at a given effective-requested-QoS.
202 */
203
204#if !defined(KQWQ_QOS_MANAGER)
205#define KQWQ_QOS_MANAGER (THREAD_QOS_LAST)
206#endif
207
39037602 208#if !defined(KQWQ_NBUCKETS)
d9a64523 209#define KQWQ_NBUCKETS (KQWQ_QOS_MANAGER + 1)
39037602
A
210#endif
211
212/*
213 * kqworkq - definition of a private kqueue used to coordinate event
214 * handling for pthread work queues.
215 *
216 * These have per-qos processing queues and state to coordinate with
217 * the pthread kext to ask for threads at corresponding pthread priority
218 * values.
219 */
220struct kqworkq {
cb323159
A
221 struct kqueue kqwq_kqueue;
222 struct kqtailq kqwq_queue[KQWQ_NBUCKETS]; /* array of queues */
223 struct kqtailq kqwq_suppressed[KQWQ_NBUCKETS]; /* Per-QoS suppression queues */
224 workq_threadreq_s kqwq_request[KQWQ_NBUCKETS]; /* per-QoS request states */
39037602
A
225};
226
cb323159
A
227#define kqwq_wqs kqwq_kqueue.kq_wqs
228#define kqwq_lock kqwq_kqueue.kq_lock
229#define kqwq_state kqwq_kqueue.kq_state
230#define kqwq_waitq_hook kqwq_kqueue.kq_waitq_hook
231#define kqwq_count kqwq_kqueue.kq_count
232#define kqwq_p kqwq_kqueue.kq_p
5ba3f43e
A
233
234/*
235 * WorkLoop kqueues need to request a thread to service the triggered
236 * knotes in the queue. The thread is brought up on a
237 * effective-requested-QoS basis. Knotes are segregated based on
238 * that value. Once a request is made, it cannot be undone. If
239 * events with higher QoS arrive after, they are stored in their
240 * own queues and an override applied to the original request based
241 * on the delta between the two QoS values.
242 */
243
244/*
245 * "Stay-active" knotes are held in a separate bucket that indicates
246 * special handling required. They are kept separate because the
247 * wakeups issued to them don't have context to tell us where to go
248 * to find and process them. All processing of them happens at the
249 * highest QoS. Unlike WorkQ kqueues, there is no special singular
250 * "manager thread" for a process. We simply request a servicing
251 * thread at the higest known QoS when these are woken (or override
252 * an existing request to that).
253 */
254#define KQWL_BUCKET_STAYACTIVE (THREAD_QOS_LAST)
255
256#if !defined(KQWL_NBUCKETS)
257#define KQWL_NBUCKETS (KQWL_BUCKET_STAYACTIVE + 1)
258#endif
259
260/*
261 * kqworkloop - definition of a private kqueue used to coordinate event
262 * handling for pthread workloops.
263 *
264 * Workloops vary from workqs in that only a single thread is ever
265 * requested to service a workloop at a time. But unlike workqs,
266 * workloops may be "owned" by user-space threads that are
267 * synchronously draining an event off the workloop. In those cases,
268 * any overrides have to be applied to the owner until it relinqueshes
269 * ownership.
270 *
271 * NOTE: "lane" support is TBD.
272 */
273struct kqworkloop {
cb323159
A
274 struct kqueue kqwl_kqueue; /* queue of events */
275 struct kqtailq kqwl_queue[KQWL_NBUCKETS]; /* array of queues */
276 struct kqtailq kqwl_suppressed; /* Per-QoS suppression queues */
277 workq_threadreq_s kqwl_request; /* thread request state */
278 lck_spin_t kqwl_statelock; /* state/debounce lock */
279 thread_t kqwl_owner; /* current [sync] owner thread */
280 uint32_t kqwl_retains; /* retain references */
281#define KQWL_STAYACTIVE_FIRED_BIT (1 << 0)
282 uint8_t kqwl_wakeup_indexes; /* QoS/override levels that woke */
283 kq_index_t kqwl_stayactive_qos; /* max QoS of statyactive knotes */
94ff46dc 284 struct turnstile *kqwl_turnstile; /* turnstile for sync IPC/waiters */
cb323159
A
285 kqueue_id_t kqwl_dynamicid; /* dynamic identity */
286 uint64_t kqwl_params; /* additional parameters */
cb323159 287 LIST_ENTRY(kqworkloop) kqwl_hashlink; /* linkage for search list */
d9a64523
A
288#if CONFIG_WORKLOOP_DEBUG
289#define KQWL_HISTORY_COUNT 32
290#define KQWL_HISTORY_WRITE_ENTRY(kqwl, ...) ({ \
0a7de745
A
291 struct kqworkloop *__kqwl = (kqwl); \
292 unsigned int __index = os_atomic_inc_orig(&__kqwl->kqwl_index, relaxed); \
293 __kqwl->kqwl_history[__index % KQWL_HISTORY_COUNT] = \
294 (struct kqwl_history)__VA_ARGS__; \
d9a64523
A
295 })
296 struct kqwl_history {
297 thread_t updater; /* Note: updates can be reordered */
298 thread_t servicer;
299 thread_t old_owner;
300 thread_t new_owner;
301
302 uint64_t kev_ident;
303 int16_t error;
304 uint16_t kev_flags;
305 uint32_t kev_fflags;
306
307 uint64_t kev_mask;
308 uint64_t kev_value;
309 uint64_t in_value;
310 } kqwl_history[KQWL_HISTORY_COUNT];
311 unsigned int kqwl_index;
312#endif // CONFIG_WORKLOOP_DEBUG
5ba3f43e 313};
cb323159 314LIST_HEAD(kqwllist, kqworkloop);
39037602 315
d9a64523 316typedef union {
0a7de745 317 struct kqueue *kq;
d9a64523
A
318 struct kqworkq *kqwq;
319 struct kqfile *kqf;
320 struct kqworkloop *kqwl;
321} __attribute__((transparent_union)) kqueue_t;
322
5ba3f43e 323
cb323159
A
324#define kqwl_wqs kqwl_kqueue.kq_wqs
325#define kqwl_lock kqwl_kqueue.kq_lock
326#define kqwl_state kqwl_kqueue.kq_state
327#define kqwl_waitq_hook kqwl_kqueue.kq_waitq_hook
328#define kqwl_count kqwl_kqueue.kq_count
329#define kqwl_p kqwl_kqueue.kq_p
5ba3f43e
A
330
331#define KQ_WORKLOOP_RETAINS_MAX UINT32_MAX
332
cb323159 333extern void kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t);
d9a64523
A
334
335// called with the kq req held
336#define KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE 0x1
337extern void kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req,
0a7de745 338 thread_t thread, unsigned int flags);
d9a64523 339
cb323159
A
340struct turnstile *kqueue_threadreq_get_turnstile(workq_threadreq_t kqr);
341
d9a64523 342// called with the wq lock held
cb323159
A
343extern void kqueue_threadreq_bind_prepost(struct proc *p, workq_threadreq_t req,
344 struct uthread *uth);
d9a64523
A
345
346// called with no lock held
347extern void kqueue_threadreq_bind_commit(struct proc *p, thread_t thread);
348
349extern void kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t req);
350
351// lock not held as kqwl_params is immutable after creation
352extern workq_threadreq_param_t kqueue_threadreq_workloop_param(workq_threadreq_t req);
39037602 353
cb323159 354extern struct kqueue *kqueue_alloc(struct proc *);
2d21ac55 355extern void kqueue_dealloc(struct kqueue *);
cb323159 356extern void kqworkq_dealloc(struct kqworkq *kqwq);
91447636 357
5ba3f43e 358extern void knotes_dealloc(struct proc *);
d9a64523 359extern void kqworkloops_dealloc(struct proc *);
91447636 360
cb323159
A
361extern int kevent_register(struct kqueue *, struct kevent_qos_s *,
362 struct knote **);
363extern int kqueue_scan(struct kqueue *, int flags,
364 struct kevent_ctx_s *, kevent_callback_t);
fe8ab488 365extern int kqueue_stat(struct kqueue *, void *, int, proc_t);
91447636 366
39037602
A
367#endif /* XNU_KERNEL_PRIVATE */
368
55e303ae 369#endif /* !_SYS_EVENTVAR_H_ */