]> git.saurik.com Git - apple/xnu.git/blame - bsd/sys/event.h
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / sys / event.h
CommitLineData
55e303ae 1/*
cb323159 2 * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
55e303ae 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
d9a64523 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
d9a64523 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
d9a64523 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
d9a64523 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
27 */
28/*-
29 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * $FreeBSD: src/sys/sys/event.h,v 1.5.2.5 2001/12/14 19:21:22 jlemon Exp $
54 */
55
56#ifndef _SYS_EVENT_H_
57#define _SYS_EVENT_H_
58
91447636
A
59#include <machine/types.h>
60#include <sys/cdefs.h>
61#include <stdint.h>
62
5ba3f43e
A
63/*
64 * Filter types
65 */
0a7de745
A
66#define EVFILT_READ (-1)
67#define EVFILT_WRITE (-2)
68#define EVFILT_AIO (-3) /* attached to aio requests */
69#define EVFILT_VNODE (-4) /* attached to vnodes */
70#define EVFILT_PROC (-5) /* attached to struct proc */
71#define EVFILT_SIGNAL (-6) /* attached to struct proc */
72#define EVFILT_TIMER (-7) /* timers */
73#define EVFILT_MACHPORT (-8) /* Mach portsets */
74#define EVFILT_FS (-9) /* Filesystem events */
b0d623f7 75#define EVFILT_USER (-10) /* User events */
cb323159
A
76#ifdef PRIVATE
77#define EVFILT_UNUSED_11 (-11) /* (-11) unused */
78#endif
0a7de745 79#define EVFILT_VM (-12) /* Virtual memory events */
316670eb 80#ifdef PRIVATE
0a7de745
A
81#define EVFILT_SOCK (-13) /* Socket events */
82#define EVFILT_MEMORYSTATUS (-14) /* Memorystatus events */
316670eb 83#endif /* PRIVATE */
0a7de745 84#define EVFILT_EXCEPT (-15) /* Exception events */
5ba3f43e 85#ifdef PRIVATE
cb323159 86#define EVFILT_WORKLOOP (-17) /* Workloop events */
5ba3f43e 87#endif /* PRIVATE */
316670eb 88
0a7de745
A
89#define EVFILT_SYSCOUNT 17
90#define EVFILT_THREADMARKER EVFILT_SYSCOUNT /* Internal use only */
91447636 91
0c530ab8 92#pragma pack(4)
55e303ae
A
93
94struct kevent {
cb323159
A
95 uintptr_t ident; /* identifier for this event */
96 int16_t filter; /* filter for event */
97 uint16_t flags; /* general flags */
98 uint32_t fflags; /* filter-specific flags */
99 intptr_t data; /* filter-specific data */
100 void *udata; /* opaque user data identifier */
91447636
A
101};
102
103#ifdef KERNEL_PRIVATE
104
b0d623f7 105struct user64_kevent {
cb323159
A
106 uint64_t ident; /* identifier for this event */
107 int16_t filter; /* filter for event */
108 uint16_t flags; /* general flags */
109 uint32_t fflags; /* filter-specific flags */
110 int64_t data; /* filter-specific data */
111 user_addr_t udata; /* opaque user data identifier */
55e303ae
A
112};
113
b0d623f7 114struct user32_kevent {
cb323159
A
115 uint32_t ident; /* identifier for this event */
116 int16_t filter; /* filter for event */
117 uint16_t flags; /* general flags */
118 uint32_t fflags; /* filter-specific flags */
119 int32_t data; /* filter-specific data */
0a7de745 120 user32_addr_t udata; /* opaque user data identifier */
b0d623f7
A
121};
122
5ba3f43e 123#endif /* KERNEL_PRIVATE */
91447636 124
0c530ab8 125#pragma pack()
91447636 126
b0d623f7 127struct kevent64_s {
0a7de745
A
128 uint64_t ident; /* identifier for this event */
129 int16_t filter; /* filter for event */
130 uint16_t flags; /* general flags */
131 uint32_t fflags; /* filter-specific flags */
132 int64_t data; /* filter-specific data */
133 uint64_t udata; /* opaque user data identifier */
134 uint64_t ext[2]; /* filter-specific extensions */
b0d623f7
A
135};
136
3e170ce0
A
137#ifdef PRIVATE
138struct kevent_qos_s {
0a7de745
A
139 uint64_t ident; /* identifier for this event */
140 int16_t filter; /* filter for event */
141 uint16_t flags; /* general flags */
142 int32_t qos; /* quality of service */
143 uint64_t udata; /* opaque user data identifier */
144 uint32_t fflags; /* filter-specific flags */
145 uint32_t xflags; /* extra filter-specific flags */
146 int64_t data; /* filter-specific data */
147 uint64_t ext[4]; /* filter-specific extensions */
3e170ce0 148};
5ba3f43e
A
149
150/*
151 * Type definition for names/ids of dynamically allocated kqueues.
152 */
153typedef uint64_t kqueue_id_t;
3e170ce0
A
154#endif /* PRIVATE */
155
0a7de745
A
156#define EV_SET(kevp, a, b, c, d, e, f) do { \
157 struct kevent *__kevp__ = (kevp); \
158 __kevp__->ident = (a); \
159 __kevp__->filter = (b); \
160 __kevp__->flags = (c); \
161 __kevp__->fflags = (d); \
162 __kevp__->data = (e); \
163 __kevp__->udata = (f); \
55e303ae
A
164} while(0)
165
0a7de745
A
166#define EV_SET64(kevp, a, b, c, d, e, f, g, h) do { \
167 struct kevent64_s *__kevp__ = (kevp); \
168 __kevp__->ident = (a); \
169 __kevp__->filter = (b); \
170 __kevp__->flags = (c); \
171 __kevp__->fflags = (d); \
172 __kevp__->data = (e); \
173 __kevp__->udata = (f); \
174 __kevp__->ext[0] = (g); \
175 __kevp__->ext[1] = (h); \
b0d623f7
A
176} while(0)
177
3e170ce0
A
178
179/* kevent system call flags */
0a7de745
A
180#define KEVENT_FLAG_NONE 0x000000 /* no flag value */
181#define KEVENT_FLAG_IMMEDIATE 0x000001 /* immediate timeout */
182#define KEVENT_FLAG_ERROR_EVENTS 0x000002 /* output events only include change errors */
3e170ce0
A
183
184#ifdef PRIVATE
185
3e170ce0
A
186/*
187 * Rather than provide an EV_SET_QOS macro for kevent_qos_t structure
188 * initialization, we encourage use of named field initialization support
189 * instead.
190 */
191
cb323159 192// was KEVENT_FLAG_STACK_EVENTS 0x000004
d9a64523 193#define KEVENT_FLAG_STACK_DATA 0x000008 /* output data allocated as stack (grows down) */
cb323159 194// KEVENT_FLAG_POLL 0x000010
d9a64523
A
195#define KEVENT_FLAG_WORKQ 0x000020 /* interact with the default workq kq */
196// KEVENT_FLAG_LEGACY32 0x000040
197// KEVENT_FLAG_LEGACY64 0x000080
cb323159 198// KEVENT_FLAG_PROC64 0x000100
d9a64523
A
199#define KEVENT_FLAG_WORKQ_MANAGER 0x000200 /* obsolete */
200#define KEVENT_FLAG_WORKLOOP 0x000400 /* interact with the specified workloop kq */
201#define KEVENT_FLAG_PARKING 0x000800 /* workq thread is parking */
202// KEVENT_FLAG_KERNEL 0x001000
203// KEVENT_FLAG_DYNAMIC_KQUEUE 0x002000
cb323159 204// KEVENT_FLAG_NEEDS_END_PROCESSING 0x004000
d9a64523
A
205#define KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH 0x008000 /* obsolete */
206#define KEVENT_FLAG_WORKLOOP_SERVICER_DETACH 0x010000 /* obsolete */
207#define KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST 0x020000 /* kq lookup by id must exist */
208#define KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST 0x040000 /* kq lookup by id must not exist */
209#define KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD 0x080000 /* obsolete */
3e170ce0
A
210
211#ifdef XNU_KERNEL_PRIVATE
212
cb323159 213#define KEVENT_FLAG_POLL 0x0010 /* Call is for poll() */
d9a64523
A
214#define KEVENT_FLAG_LEGACY32 0x0040 /* event data in legacy 32-bit format */
215#define KEVENT_FLAG_LEGACY64 0x0080 /* event data in legacy 64-bit format */
cb323159 216#define KEVENT_FLAG_PROC64 0x0100 /* proc is 64bits */
5ba3f43e
A
217#define KEVENT_FLAG_KERNEL 0x1000 /* caller is in-kernel */
218#define KEVENT_FLAG_DYNAMIC_KQUEUE 0x2000 /* kqueue is dynamically allocated */
cb323159
A
219#define KEVENT_FLAG_NEEDS_END_PROCESSING 0x4000 /* end processing required before returning */
220
221#define KEVENT_ID_FLAG_USER (KEVENT_FLAG_WORKLOOP | \
222 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)
5ba3f43e
A
223
224#define KEVENT_FLAG_USER (KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | \
cb323159 225 KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP | \
0a7de745 226 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)
5ba3f43e
A
227
228/*
229 * Since some filter ops are not part of the standard sysfilt_ops, we use
230 * kn_filtid starting from EVFILT_SYSCOUNT to identify these cases. This is to
231 * let kn_fops() get the correct fops for all cases.
0a7de745 232 */
5ba3f43e 233#define EVFILTID_KQREAD (EVFILT_SYSCOUNT)
cb323159
A
234#define EVFILTID_PIPE_N (EVFILT_SYSCOUNT + 1)
235#define EVFILTID_PIPE_R (EVFILT_SYSCOUNT + 2)
236#define EVFILTID_PIPE_W (EVFILT_SYSCOUNT + 3)
237#define EVFILTID_PTSD (EVFILT_SYSCOUNT + 4)
238#define EVFILTID_SOREAD (EVFILT_SYSCOUNT + 5)
239#define EVFILTID_SOWRITE (EVFILT_SYSCOUNT + 6)
240#define EVFILTID_SCK (EVFILT_SYSCOUNT + 7)
241#define EVFILTID_SOEXCEPT (EVFILT_SYSCOUNT + 8)
242#define EVFILTID_SPEC (EVFILT_SYSCOUNT + 9)
243#define EVFILTID_BPFREAD (EVFILT_SYSCOUNT + 10)
244#define EVFILTID_NECP_FD (EVFILT_SYSCOUNT + 11)
245#define EVFILTID_FSEVENT (EVFILT_SYSCOUNT + 15)
246#define EVFILTID_VN (EVFILT_SYSCOUNT + 16)
247#define EVFILTID_TTY (EVFILT_SYSCOUNT + 17)
248#define EVFILTID_PTMX (EVFILT_SYSCOUNT + 18)
249
250#define EVFILTID_DETACHED (EVFILT_SYSCOUNT + 19)
251#define EVFILTID_MAX (EVFILT_SYSCOUNT + 20)
5ba3f43e
A
252
253#endif /* defined(XNU_KERNEL_PRIVATE) */
39037602
A
254
255#define EV_SET_QOS 0
256
3e170ce0
A
257#endif /* PRIVATE */
258
55e303ae 259/* actions */
d9a64523
A
260#define EV_ADD 0x0001 /* add event to kq (implies enable) */
261#define EV_DELETE 0x0002 /* delete event from kq */
262#define EV_ENABLE 0x0004 /* enable event */
263#define EV_DISABLE 0x0008 /* disable event (not reported) */
55e303ae
A
264
265/* flags */
d9a64523
A
266#define EV_ONESHOT 0x0010 /* only report one occurrence */
267#define EV_CLEAR 0x0020 /* clear event state after reporting */
268#define EV_RECEIPT 0x0040 /* force immediate event output */
269 /* ... with or without EV_ERROR */
270 /* ... use KEVENT_FLAG_ERROR_EVENTS */
271 /* on syscalls supporting flags */
39037602 272
d9a64523
A
273#define EV_DISPATCH 0x0080 /* disable event after reporting */
274#define EV_UDATA_SPECIFIC 0x0100 /* unique kevent per udata value */
3e170ce0 275
39037602 276#define EV_DISPATCH2 (EV_DISPATCH | EV_UDATA_SPECIFIC)
0a7de745
A
277/* ... in combination with EV_DELETE */
278/* will defer delete until udata-specific */
279/* event enabled. EINPROGRESS will be */
280/* returned to indicate the deferral */
55e303ae 281
d9a64523
A
282#define EV_VANISHED 0x0200 /* report that source has vanished */
283 /* ... only valid with EV_DISPATCH2 */
3e170ce0 284
d9a64523
A
285#define EV_SYSFLAGS 0xF000 /* reserved by system */
286#define EV_FLAG0 0x1000 /* filter-specific flag */
287#define EV_FLAG1 0x2000 /* filter-specific flag */
55e303ae
A
288
289/* returned values */
d9a64523
A
290#define EV_EOF 0x8000 /* EOF detected */
291#define EV_ERROR 0x4000 /* error, data contains errno */
55e303ae
A
292
293/*
91447636
A
294 * Filter specific flags for EVFILT_READ
295 *
296 * The default behavior for EVFILT_READ is to make the "read" determination
d9a64523 297 * relative to the current file descriptor read pointer.
3e170ce0
A
298 *
299 * The EV_POLL flag indicates the determination should be made via poll(2)
300 * semantics. These semantics dictate always returning true for regular files,
d9a64523 301 * regardless of the amount of unread data in the file.
91447636 302 *
04b8595b
A
303 * On input, EV_OOBAND specifies that filter should actively return in the
304 * presence of OOB on the descriptor. It implies that filter will return
305 * if there is OOB data available to read OR when any other condition
306 * for the read are met (for example number of bytes regular data becomes >=
307 * low-watermark).
308 * If EV_OOBAND is not set on input, it implies that the filter should not actively
309 * return for out of band data on the descriptor. The filter will then only return
310 * when some other condition for read is met (ex: when number of regular data bytes
311 * >=low-watermark OR when socket can't receive more data (SS_CANTRCVMORE)).
91447636 312 *
04b8595b 313 * On output, EV_OOBAND indicates the presence of OOB data on the descriptor.
91447636 314 * If it was not specified as an input parameter, then the data count is the
04b8595b
A
315 * number of bytes before the current OOB marker, else data count is the number
316 * of bytes beyond OOB marker.
55e303ae 317 */
0a7de745
A
318#define EV_POLL EV_FLAG0
319#define EV_OOBAND EV_FLAG1
55e303ae 320
b0d623f7
A
321/*
322 * data/hint fflags for EVFILT_USER, shared with userspace
323 */
324
325/*
326 * On input, NOTE_TRIGGER causes the event to be triggered for output.
327 */
0a7de745 328#define NOTE_TRIGGER 0x01000000
b0d623f7
A
329
330/*
d9a64523 331 * On input, the top two bits of fflags specifies how the lower twenty four
b0d623f7
A
332 * bits should be applied to the stored value of fflags.
333 *
334 * On output, the top two bits will always be set to NOTE_FFNOP and the
335 * remaining twenty four bits will contain the stored fflags value.
336 */
337#define NOTE_FFNOP 0x00000000 /* ignore input fflags */
338#define NOTE_FFAND 0x40000000 /* and fflags */
339#define NOTE_FFOR 0x80000000 /* or fflags */
340#define NOTE_FFCOPY 0xc0000000 /* copy fflags */
341#define NOTE_FFCTRLMASK 0xc0000000 /* mask for operations */
0a7de745 342#define NOTE_FFLAGSMASK 0x00ffffff
b0d623f7 343
5ba3f43e
A
344#ifdef PRIVATE
345/*
346 * data/hint fflags for EVFILT_WORKLOOP, shared with userspace
347 *
348 * The ident for thread requests should be the dynamic ID of the workloop
349 * The ident for each sync waiter must be unique to that waiter [for this workloop]
350 *
351 *
352 * Commands:
353 *
354 * @const NOTE_WL_THREAD_REQUEST [in/out]
355 * The kevent represents asynchronous userspace work and its associated QoS.
356 * There can only be a single knote with this flag set per workloop.
357 *
358 * @const NOTE_WL_SYNC_WAIT [in/out]
359 * This bit is set when the caller is waiting to become the owner of a workloop.
360 * If the NOTE_WL_SYNC_WAKE bit is already set then the caller is not blocked,
361 * else it blocks until it is set.
362 *
363 * The QoS field of the knote is used to push on other owners or servicers.
364 *
365 * @const NOTE_WL_SYNC_WAKE [in/out]
366 * Marks the waiter knote as being eligible to become an owner
367 * This bit can only be set once, trying it again will fail with EALREADY.
368 *
cb323159
A
369 * @const NOTE_WL_SYNC_IPC [in/out]
370 * The knote is a sync IPC redirected turnstile push.
5ba3f43e
A
371 *
372 * Flags/Modifiers:
373 *
374 * @const NOTE_WL_UPDATE_QOS [in] (only NOTE_WL_THREAD_REQUEST)
375 * For successful updates (EV_ADD only), learn the new userspace async QoS from
376 * the kevent qos field.
377 *
378 * @const NOTE_WL_END_OWNERSHIP [in]
379 * If the update is successful (including deletions) or returns ESTALE, and
380 * the caller thread or the "suspended" thread is currently owning the workloop,
381 * then ownership is forgotten.
382 *
383 * @const NOTE_WL_DISCOVER_OWNER [in]
384 * If the update is successful (including deletions), learn the owner identity
385 * from the loaded value during debounce. This requires an address to have been
386 * filled in the EV_EXTIDX_WL_ADDR ext field, but doesn't require a mask to have
387 * been set in the EV_EXTIDX_WL_MASK.
388 *
389 * @const NOTE_WL_IGNORE_ESTALE [in]
390 * If the operation would fail with ESTALE, mask the error and pretend the
391 * update was successful. However the operation itself didn't happen, meaning
392 * that:
393 * - attaching a new knote will not happen
394 * - dropping an existing knote will not happen
395 * - NOTE_WL_UPDATE_QOS or NOTE_WL_DISCOVER_OWNER will have no effect
396 *
397 * This modifier doesn't affect NOTE_WL_END_OWNERSHIP.
398 */
399#define NOTE_WL_THREAD_REQUEST 0x00000001
400#define NOTE_WL_SYNC_WAIT 0x00000004
401#define NOTE_WL_SYNC_WAKE 0x00000008
cb323159
A
402#define NOTE_WL_SYNC_IPC 0x80000000
403#define NOTE_WL_COMMANDS_MASK 0x8000000f /* Mask of all the [in] commands above */
5ba3f43e
A
404
405#define NOTE_WL_UPDATE_QOS 0x00000010
406#define NOTE_WL_END_OWNERSHIP 0x00000020
5ba3f43e
A
407#define NOTE_WL_DISCOVER_OWNER 0x00000080
408#define NOTE_WL_IGNORE_ESTALE 0x00000100
409#define NOTE_WL_UPDATES_MASK 0x000001f0 /* Mask of all the [in] updates above */
410
cb323159
A
411#define NOTE_WL_UPDATE_OWNER 0 /* ... compatibility define ... */
412
5ba3f43e
A
413/*
414 * EVFILT_WORKLOOP ext[] array indexes/meanings.
415 */
416#define EV_EXTIDX_WL_LANE 0 /* lane identifier [in: sync waiter]
cb323159 417 * [out: thread request] */
5ba3f43e
A
418#define EV_EXTIDX_WL_ADDR 1 /* debounce address [in: NULL==no debounce] */
419#define EV_EXTIDX_WL_MASK 2 /* debounce mask [in] */
420#define EV_EXTIDX_WL_VALUE 3 /* debounce value [in: not current->ESTALE]
cb323159
A
421 * [out: new/debounce value] */
422
5ba3f43e
A
423#endif /* PRIVATE */
424
55e303ae 425/*
91447636
A
426 * data/hint fflags for EVFILT_{READ|WRITE}, shared with userspace
427 *
428 * The default behavior for EVFILT_READ is to make the determination
429 * realtive to the current file descriptor read pointer.
430 */
0a7de745 431#define NOTE_LOWAT 0x00000001 /* low water mark */
3e170ce0 432
39037602 433/* data/hint flags for EVFILT_EXCEPT, shared with userspace */
0a7de745 434#define NOTE_OOB 0x00000002 /* OOB data */
39037602 435
91447636
A
436/*
437 * data/hint fflags for EVFILT_VNODE, shared with userspace
55e303ae 438 */
0a7de745
A
439#define NOTE_DELETE 0x00000001 /* vnode was removed */
440#define NOTE_WRITE 0x00000002 /* data contents changed */
441#define NOTE_EXTEND 0x00000004 /* size increased */
442#define NOTE_ATTRIB 0x00000008 /* attributes changed */
443#define NOTE_LINK 0x00000010 /* link count changed */
444#define NOTE_RENAME 0x00000020 /* vnode was renamed */
445#define NOTE_REVOKE 0x00000040 /* vnode access was revoked */
446#define NOTE_NONE 0x00000080 /* No specific vnode event: to test for EVFILT_READ activation*/
447#define NOTE_FUNLOCK 0x00000100 /* vnode was unlocked by flock(2) */
55e303ae
A
448
449/*
91447636 450 * data/hint fflags for EVFILT_PROC, shared with userspace
2d21ac55
A
451 *
452 * Please note that EVFILT_PROC and EVFILT_SIGNAL share the same knote list
453 * that hangs off the proc structure. They also both play games with the hint
454 * passed to KNOTE(). If NOTE_SIGNAL is passed as a hint, then the lower bits
455 * of the hint contain the signal. IF NOTE_FORK is passed, then the lower bits
39236c6e
A
456 * contain the PID of the child (but the pid does not get passed through in
457 * the actual kevent).
55e303ae 458 */
39236c6e
A
459enum {
460 eNoteReapDeprecated __deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is deprecated") = 0x10000000
461};
462
0a7de745
A
463#define NOTE_EXIT 0x80000000 /* process exited */
464#define NOTE_FORK 0x40000000 /* process forked */
465#define NOTE_EXEC 0x20000000 /* process exec'd */
466#define NOTE_REAP ((unsigned int)eNoteReapDeprecated /* 0x10000000 */ ) /* process reaped */
467#define NOTE_SIGNAL 0x08000000 /* shared with EVFILT_SIGNAL */
468#define NOTE_EXITSTATUS 0x04000000 /* exit status to be returned, valid for child process only */
469#define NOTE_EXIT_DETAIL 0x02000000 /* provide details on reasons for exit */
39236c6e 470
0a7de745
A
471#define NOTE_PDATAMASK 0x000fffff /* mask for signal & exit status */
472#define NOTE_PCTRLMASK (~NOTE_PDATAMASK)
55e303ae 473
99c3a104
A
474/*
475 * If NOTE_EXITSTATUS is present, provide additional info about exiting process.
476 */
39236c6e 477enum {
d9a64523 478 eNoteExitReparentedDeprecated __deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is no longer sent") = 0x00080000
39236c6e 479};
0a7de745 480#define NOTE_EXIT_REPARENTED ((unsigned int)eNoteExitReparentedDeprecated) /* exited while reparented */
39236c6e
A
481
482/*
483 * If NOTE_EXIT_DETAIL is present, these bits indicate specific reasons for exiting.
484 */
0a7de745
A
485#define NOTE_EXIT_DETAIL_MASK 0x00070000
486#define NOTE_EXIT_DECRYPTFAIL 0x00010000
487#define NOTE_EXIT_MEMORY 0x00020000
488#define NOTE_EXIT_CSERROR 0x00040000
39236c6e
A
489
490#ifdef PRIVATE
491
492/*
493 * If NOTE_EXIT_MEMORY is present, these bits indicate specific jetsam condition.
494 */
0a7de745
A
495#define NOTE_EXIT_MEMORY_DETAIL_MASK 0xfe000000
496#define NOTE_EXIT_MEMORY_VMPAGESHORTAGE 0x80000000 /* jetsam condition: lowest jetsam priority proc killed due to vm page shortage */
497#define NOTE_EXIT_MEMORY_VMTHRASHING 0x40000000 /* jetsam condition: lowest jetsam priority proc killed due to vm thrashing */
498#define NOTE_EXIT_MEMORY_HIWAT 0x20000000 /* jetsam condition: process reached its high water mark */
499#define NOTE_EXIT_MEMORY_PID 0x10000000 /* jetsam condition: special pid kill requested */
500#define NOTE_EXIT_MEMORY_IDLE 0x08000000 /* jetsam condition: idle process cleaned up */
501#define NOTE_EXIT_MEMORY_VNODE 0X04000000 /* jetsam condition: virtual node kill */
502#define NOTE_EXIT_MEMORY_FCTHRASHING 0x02000000 /* jetsam condition: lowest jetsam priority proc killed due to filecache thrashing */
39236c6e
A
503
504#endif
99c3a104 505
6d2010ae
A
506/*
507 * data/hint fflags for EVFILT_VM, shared with userspace.
508 */
0a7de745
A
509#define NOTE_VM_PRESSURE 0x80000000 /* will react on memory pressure */
510#define NOTE_VM_PRESSURE_TERMINATE 0x40000000 /* will quit on memory pressure, possibly after cleaning up dirty state */
511#define NOTE_VM_PRESSURE_SUDDEN_TERMINATE 0x20000000 /* will quit immediately on memory pressure */
512#define NOTE_VM_ERROR 0x10000000 /* there was an error */
6d2010ae 513
39236c6e
A
514#ifdef PRIVATE
515
516/*
517 * data/hint fflags for EVFILT_MEMORYSTATUS, shared with userspace.
518 */
0a7de745
A
519#define NOTE_MEMORYSTATUS_PRESSURE_NORMAL 0x00000001 /* system memory pressure has returned to normal */
520#define NOTE_MEMORYSTATUS_PRESSURE_WARN 0x00000002 /* system memory pressure has changed to the warning state */
521#define NOTE_MEMORYSTATUS_PRESSURE_CRITICAL 0x00000004 /* system memory pressure has changed to the critical state */
522#define NOTE_MEMORYSTATUS_LOW_SWAP 0x00000008 /* system is in a low-swap state */
523#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0x00000010 /* process memory limit has hit a warning state */
524#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0x00000020 /* process memory limit has hit a critical state - soft limit */
5ba3f43e 525#define NOTE_MEMORYSTATUS_MSL_STATUS 0xf0000000 /* bits used to request change to process MSL status */
39236c6e 526
813fb2f6
A
527#ifdef KERNEL_PRIVATE
528/*
529 * data/hint fflags for EVFILT_MEMORYSTATUS, but not shared with userspace.
530 */
531#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE 0x00000040 /* Used to restrict sending a warn event only once, per active limit, soft limits only */
532#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE 0x00000080 /* Used to restrict sending a warn event only once, per inactive limit, soft limit only */
533#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE 0x00000100 /* Used to restrict sending a critical event only once per active limit, soft limit only */
534#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE 0x00000200 /* Used to restrict sending a critical event only once per inactive limit, soft limit only */
cb323159 535#define NOTE_MEMORYSTATUS_JETSAM_FG_BAND 0x00000400 /* jetsam is approaching foreground band */
813fb2f6
A
536
537/*
538 * Use this mask to protect the kernel private flags.
539 */
540#define EVFILT_MEMORYSTATUS_ALL_MASK \
5ba3f43e 541 (NOTE_MEMORYSTATUS_PRESSURE_NORMAL | NOTE_MEMORYSTATUS_PRESSURE_WARN | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL | NOTE_MEMORYSTATUS_LOW_SWAP | \
d9a64523 542 NOTE_MEMORYSTATUS_PROC_LIMIT_WARN | NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL | NOTE_MEMORYSTATUS_MSL_STATUS)
813fb2f6
A
543
544#endif /* KERNEL_PRIVATE */
545
39236c6e 546typedef enum vm_pressure_level {
d9a64523
A
547 kVMPressureNormal = 0,
548 kVMPressureWarning = 1,
549 kVMPressureUrgent = 2,
550 kVMPressureCritical = 3,
cb323159 551 kVMPressureJetsam = 4, /* jetsam approaching FG bands */
39236c6e
A
552} vm_pressure_level_t;
553
813fb2f6 554#endif /* PRIVATE */
39236c6e 555
91447636
A
556/*
557 * data/hint fflags for EVFILT_TIMER, shared with userspace.
558 * The default is a (repeating) interval timer with the data
559 * specifying the timeout interval in milliseconds.
560 *
561 * All timeouts are implicitly EV_CLEAR events.
562 */
0a7de745
A
563#define NOTE_SECONDS 0x00000001 /* data is seconds */
564#define NOTE_USECONDS 0x00000002 /* data is microseconds */
565#define NOTE_NSECONDS 0x00000004 /* data is nanoseconds */
566#define NOTE_ABSOLUTE 0x00000008 /* absolute timeout */
567/* ... implicit EV_ONESHOT, timeout uses the gettimeofday epoch */
568#define NOTE_LEEWAY 0x00000010 /* ext[1] holds leeway for power aware timers */
569#define NOTE_CRITICAL 0x00000020 /* system does minimal timer coalescing */
570#define NOTE_BACKGROUND 0x00000040 /* system does maximum timer coalescing */
571#define NOTE_MACH_CONTINUOUS_TIME 0x00000080
572/*
573 * NOTE_MACH_CONTINUOUS_TIME:
574 * with NOTE_ABSOLUTE: causes the timer to continue to tick across sleep,
575 * still uses gettimeofday epoch
576 * with NOTE_MACHTIME and NOTE_ABSOLUTE: uses mach continuous time epoch
577 * without NOTE_ABSOLUTE (interval timer mode): continues to tick across sleep
578 */
5ba3f43e 579#define NOTE_MACHTIME 0x00000100 /* data is mach absolute time units */
0a7de745 580/* timeout uses the mach absolute time epoch */
5ba3f43e 581
316670eb
A
582#ifdef PRIVATE
583/*
584 * data/hint fflags for EVFILT_SOCK, shared with userspace.
585 *
586 */
0a7de745
A
587#define NOTE_CONNRESET 0x00000001 /* Received RST */
588#define NOTE_READCLOSED 0x00000002 /* Read side is shutdown */
589#define NOTE_WRITECLOSED 0x00000004 /* Write side is shutdown */
590#define NOTE_TIMEOUT 0x00000008 /* timeout: rexmt, keep-alive or persist */
591#define NOTE_NOSRCADDR 0x00000010 /* source address not available */
592#define NOTE_IFDENIED 0x00000020 /* interface denied connection */
593#define NOTE_SUSPEND 0x00000040 /* output queue suspended */
594#define NOTE_RESUME 0x00000080 /* output queue resumed */
595#define NOTE_KEEPALIVE 0x00000100 /* TCP Keepalive received */
596#define NOTE_ADAPTIVE_WTIMO 0x00000200 /* TCP adaptive write timeout */
597#define NOTE_ADAPTIVE_RTIMO 0x00000400 /* TCP adaptive read timeout */
598#define NOTE_CONNECTED 0x00000800 /* socket is connected */
599#define NOTE_DISCONNECTED 0x00001000 /* socket is disconnected */
600#define NOTE_CONNINFO_UPDATED 0x00002000 /* connection info was updated */
601#define NOTE_NOTIFY_ACK 0x00004000 /* notify acknowledgement */
316670eb 602
d9a64523 603#define EVFILT_SOCK_LEVEL_TRIGGER_MASK \
0a7de745
A
604 (NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_SUSPEND | NOTE_RESUME | \
605 NOTE_CONNECTED | NOTE_DISCONNECTED)
3e170ce0
A
606
607#define EVFILT_SOCK_ALL_MASK \
0a7de745
A
608 (NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | \
609 NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | NOTE_RESUME | \
610 NOTE_KEEPALIVE | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO | \
611 NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED | \
612 NOTE_NOTIFY_ACK)
3e170ce0 613
316670eb
A
614#endif /* PRIVATE */
615
b0d623f7
A
616/*
617 * data/hint fflags for EVFILT_MACHPORT, shared with userspace.
618 *
6d2010ae 619 * Only portsets are supported at this time.
b0d623f7
A
620 *
621 * The fflags field can optionally contain the MACH_RCV_MSG, MACH_RCV_LARGE,
622 * and related trailer receive options as defined in <mach/message.h>.
623 * The presence of these flags directs the kevent64() call to attempt to receive
624 * the message during kevent delivery, rather than just indicate that a message exists.
625 * On setup, The ext[0] field contains the receive buffer pointer and ext[1] contains
626 * the receive buffer length. Upon event delivery, the actual received message size
627 * is returned in ext[1]. As with mach_msg(), the buffer must be large enough to
628 * receive the message and the requested (or default) message trailers. In addition,
629 * the fflags field contains the return code normally returned by mach_msg().
630 *
3e170ce0
A
631 * If MACH_RCV_MSG is specified, and the ext[1] field specifies a zero length, the
632 * system call argument specifying an ouput area (kevent_qos) will be consulted. If
633 * the system call specified an output data area, the user-space address
634 * of the received message is carved from that provided output data area (if enough
d9a64523 635 * space remains there). The address and length of each received message is
3e170ce0
A
636 * returned in the ext[0] and ext[1] fields (respectively) of the corresponding kevent.
637 *
638 * IF_MACH_RCV_VOUCHER_CONTENT is specified, the contents of the message voucher is
639 * extracted (as specified in the xflags field) and stored in ext[2] up to ext[3]
640 * length. If the input length is zero, and the system call provided a data area,
641 * the space for the voucher content is carved from the provided space and its
642 * address and length is returned in ext[2] and ext[3] respectively.
643 *
b0d623f7
A
644 * If no message receipt options were provided in the fflags field on setup, no
645 * message is received by this call. Instead, on output, the data field simply
646 * contains the name of the actual port detected with a message waiting.
647 */
648
2d21ac55
A
649/*
650 * DEPRECATED!!!!!!!!!
651 * NOTE_TRACK, NOTE_TRACKERR, and NOTE_CHILD are no longer supported as of 10.5
652 */
55e303ae 653/* additional flags for EVFILT_PROC */
0a7de745
A
654#define NOTE_TRACK 0x00000001 /* follow across forks */
655#define NOTE_TRACKERR 0x00000002 /* could not track child */
656#define NOTE_CHILD 0x00000004 /* am a child process */
55e303ae
A
657
658
5ba3f43e
A
659#ifdef PRIVATE
660#endif /* PRIVATE */
55e303ae 661
91447636
A
662#ifndef KERNEL
663/* Temporay solution for BootX to use inode.h till kqueue moves to vfs layer */
d9a64523 664#include <sys/queue.h>
91447636
A
665struct knote;
666SLIST_HEAD(klist, knote);
667#endif
668
669#ifdef KERNEL
670
39037602 671#ifdef XNU_KERNEL_PRIVATE
d9a64523 672#include <sys/queue.h>
39236c6e 673#include <kern/kern_types.h>
5ba3f43e
A
674#include <sys/fcntl.h> /* FREAD, FWRITE */
675#include <kern/debug.h> /* panic */
d9a64523 676#include <pthread/priority_private.h>
55e303ae
A
677
678#ifdef MALLOC_DECLARE
679MALLOC_DECLARE(M_KQUEUE);
680#endif
681
cb323159 682LIST_HEAD(knote_list, knote);
0a7de745 683TAILQ_HEAD(kqtailq, knote); /* a list of "queued" events */
91447636 684
39037602 685/* index into various kq queues */
d9a64523 686typedef uint8_t kq_index_t;
cb323159
A
687
688/* lskq(1) knows about this type */
689__options_decl(kn_status_t, uint16_t /* 12 bits really */, {
690 KN_ACTIVE = 0x001, /* event has been triggered */
691 KN_QUEUED = 0x002, /* event is on queue */
692 KN_DISABLED = 0x004, /* event is disabled */
693 KN_DROPPING = 0x008, /* knote is being dropped */
694 KN_LOCKED = 0x010, /* knote is locked (kq_knlocks) */
695 KN_POSTING = 0x020, /* f_event() in flight */
696 KN_STAYACTIVE = 0x040, /* force event to stay active */
697 KN_DEFERDELETE = 0x080, /* defer delete until re-enabled */
698 KN_MERGE_QOS = 0x100, /* f_event() / f_* ran concurrently and overrides must merge */
699 KN_REQVANISH = 0x200, /* requested EV_VANISH */
700 KN_VANISHED = 0x400, /* has vanished */
701 KN_SUPPRESSED = 0x800, /* event is suppressed during delivery */
702});
39037602 703
d9a64523 704#define KNOTE_KQ_BITSIZE 42
cb323159 705_Static_assert(KNOTE_KQ_BITSIZE > VM_KERNEL_POINTER_SIGNIFICANT_BITS,
0a7de745 706 "Make sure sign extending kn_kq_packed is legit");
d9a64523
A
707
708struct kqueue;
813fb2f6 709struct knote {
39037602
A
710 TAILQ_ENTRY(knote) kn_tqe; /* linkage for tail queue */
711 SLIST_ENTRY(knote) kn_link; /* linkage for search list */
712 SLIST_ENTRY(knote) kn_selnext; /* klist element chain */
cb323159
A
713
714 kn_status_t kn_status : 12;
715 uintptr_t
0a7de745 716 kn_qos_index:4, /* in-use qos index */
cb323159
A
717 kn_qos_override:3, /* qos override index */
718 kn_is_fd:1, /* knote is an fd */
0a7de745
A
719 kn_vnode_kqok:1,
720 kn_vnode_use_ofst:1;
d9a64523 721#if __LP64__
cb323159 722 intptr_t kn_kq_packed : KNOTE_KQ_BITSIZE;
d9a64523 723#else
cb323159 724 intptr_t kn_kq_packed;
d9a64523 725#endif
cb323159
A
726
727 /* per filter stash of data (pointer, uint32_t or uint64_t) */
3e170ce0 728 union {
cb323159
A
729 void *kn_hook;
730 uint32_t kn_hook32;
94ff46dc
A
731#if __LP64__
732 uint64_t kn_hook_waitqid;
733#else
734 uint32_t kn_hook_waitqid;
735#endif
3e170ce0 736 };
cb323159
A
737
738 /* per filter pointer to the resource being watched */
d9a64523 739 union {
cb323159
A
740 struct fileproc *kn_fp; /* file data pointer */
741 struct proc *kn_proc; /* proc pointer */
742 struct ipc_mqueue *kn_mqueue; /* pset pointer */
743 struct thread_call *kn_thcall;
744 struct thread *kn_thread;
745 };
746
747 /*
748 * Mimic kevent_qos so that knote_fill_kevent code is not horrid,
749 * but with subtleties:
750 *
751 * - kevent_qos_s::filter is 16bits where ours is 8, and we use the top
752 * bits to store the real specialized filter.
753 * knote_fill_kevent* will always force the top bits to 0xff.
754 *
755 * - kevent_qos_s::xflags is not kept, kn_sfflags takes its place,
756 * knote_fill_kevent* will set xflags to 0.
757 *
758 * - kevent_qos_s::data is saved as kn_sdata and filters are encouraged
759 * to use knote_fill_kevent, knote_fill_kevent_with_sdata will copy
760 * kn_sdata as the output value.
761 *
762 * knote_fill_kevent_with_sdata() programatically asserts
763 * these aliasings are respected.
764 */
765 struct kevent_internal_s {
766 uint64_t kei_ident; /* identifier for this event */
767#ifdef __LITTLE_ENDIAN__
768 int8_t kei_filter; /* filter for event */
769 uint8_t kei_filtid; /* actual filter for event */
770#else
771 uint8_t kei_filtid; /* actual filter for event */
772 int8_t kei_filter; /* filter for event */
773#endif
774 uint16_t kei_flags; /* general flags */
775 int32_t kei_qos; /* quality of service */
776 uint64_t kei_udata; /* opaque user data identifier */
777 uint32_t kei_fflags; /* filter-specific flags */
778 uint32_t kei_sfflags; /* knote: saved fflags */
779 int64_t kei_sdata; /* knote: filter-specific saved data */
780 uint64_t kei_ext[4]; /* filter-specific extensions */
781 } kn_kevent;
782
783#define kn_id kn_kevent.kei_ident
784#define kn_filtid kn_kevent.kei_filtid
785#define kn_filter kn_kevent.kei_filter
786#define kn_flags kn_kevent.kei_flags
787#define kn_qos kn_kevent.kei_qos
788#define kn_udata kn_kevent.kei_udata
789#define kn_fflags kn_kevent.kei_fflags
790#define kn_sfflags kn_kevent.kei_sfflags
791#define kn_sdata kn_kevent.kei_sdata
792#define kn_ext kn_kevent.kei_ext
55e303ae
A
793};
794
d9a64523
A
795static inline struct kqueue *
796knote_get_kq(struct knote *kn)
39037602 797{
d9a64523 798 return (struct kqueue *)kn->kn_kq_packed;
39037602
A
799}
800
0a7de745
A
801static inline int
802knote_get_seltype(struct knote *kn)
5ba3f43e
A
803{
804 switch (kn->kn_filter) {
d9a64523 805 case EVFILT_READ:
5ba3f43e
A
806 return FREAD;
807 case EVFILT_WRITE:
808 return FWRITE;
809 default:
810 panic("%s(%p): invalid filter %d\n",
0a7de745 811 __func__, kn, kn->kn_filter);
5ba3f43e
A
812 return 0;
813 }
814}
815
cb323159
A
816struct kevent_ctx_s {
817 uint64_t kec_data_avail; /* address of remaining data size */
818 user_addr_t kec_data_out; /* extra data pointer */
819 user_size_t kec_data_size; /* total extra data size */
820 user_size_t kec_data_resid; /* residual extra data size */
821 uint64_t kec_deadline; /* wait deadline unless KEVENT_FLAG_IMMEDIATE */
822 struct fileproc *kec_fp; /* fileproc to pass to fp_drop or NULL */
823 int kec_fd; /* fd to pass to fp_drop or -1 */
824
825 /* the fields below are only set during process / scan */
826 int kec_process_nevents; /* user-level event count */
827 int kec_process_noutputs; /* number of events output */
828 unsigned int kec_process_flags; /* kevent flags, only set for process */
829 user_addr_t kec_process_eventlist; /* user-level event list address */
39037602 830};
cb323159
A
831typedef struct kevent_ctx_s *kevent_ctx_t;
832
833kevent_ctx_t
834kevent_get_context(thread_t thread);
39037602
A
835
836/*
837 * Filter operators
838 *
839 * These routines, provided by each filter, are called to attach, detach, deliver events,
d9a64523
A
840 * change/update filter registration and process/deliver events:
841 *
842 * - the f_attach, f_touch, f_process, f_peek and f_detach callbacks are always
843 * serialized with respect to each other for the same knote.
844 *
845 * - the f_event routine is called with a use-count taken on the knote to
846 * prolongate its lifetime and protect against drop, but is not otherwise
847 * serialized with other routine calls.
848 *
849 * - the f_detach routine is always called last, and is serialized with all
850 * other callbacks, including f_event calls.
851 *
852 *
853 * Here are more details:
39037602
A
854 *
855 * f_isfd -
856 * identifies if the "ident" field in the kevent structure is a file-descriptor.
857 *
858 * If so, the knote is associated with the file descriptor prior to attach and
859 * auto-removed when the file descriptor is closed (this latter behavior may change
860 * for EV_DISPATCH2 kevent types to allow delivery of events identifying unintended
861 * closes).
862 *
863 * Otherwise the knote is hashed by the ident and has no auto-close behavior.
864 *
5ba3f43e
A
865 * f_adjusts_qos -
866 * identifies if the filter can adjust its QoS during its lifetime.
867 *
d9a64523
A
868 * Filters using this facility should request the new overrides they want
869 * using the appropriate FILTER_{RESET,ADJUST}_EVENT_QOS extended codes.
5ba3f43e 870 *
d9a64523 871 * Currently, EVFILT_MACHPORT is the only filter using this facility.
5ba3f43e 872 *
d9a64523
A
873 * f_extended_codes -
874 * identifies if the filter returns extended codes from its routines
875 * (see FILTER_ACTIVE, ...) or 0 / 1 values.
5ba3f43e 876 *
39037602 877 * f_attach -
d9a64523 878 * called to attach the knote to the underlying object that will be delivering events
39037602
A
879 * through it when EV_ADD is supplied and no existing matching event is found
880 *
881 * provided a knote that is pre-attached to the fd or hashed (see above) but is
882 * specially marked to avoid concurrent access until the attach is complete. The
883 * kevent structure embedded in this knote has been filled in with a sanitized
884 * version of the user-supplied kevent data. However, the user-supplied filter-specific
885 * flags (fflags) and data fields have been moved into the knote's kn_sfflags and kn_sdata
886 * fields respectively. These are usually interpretted as a set of "interest" flags and
887 * data by each filter - to be matched against delivered events.
888 *
889 * The attach operator indicated errors by setting the EV_ERROR flog in the flags field
890 * embedded in the knote's kevent structure - with the specific error indicated in the
891 * corresponding data field.
892 *
893 * The return value indicates if the knote should already be considered "activated" at
894 * the time of attach (one or more of the interest events has already occured).
895 *
896 * f_detach -
897 * called to disassociate the knote from the underlying object delivering events
d9a64523 898 * the filter should not attempt to deliver events through this knote after this
39037602
A
899 * operation returns control to the kq system.
900 *
901 * f_event -
902 * if the knote() function (or KNOTE() macro) is called against a list of knotes,
903 * this operator will be called on each knote in the list.
904 *
905 * The "hint" parameter is completely filter-specific, but usually indicates an
906 * event or set of events that have occured against the source object associated
907 * with the list.
908 *
909 * The return value indicates if the knote should already be considered "activated" at
910 * the time of attach (one or more of the interest events has already occured).
911 *
912 * f_process -
d9a64523 913 * called when attempting to deliver triggered events to user-space.
39037602
A
914 *
915 * If the knote was previously activated, this operator will be called when a
916 * thread is trying to deliver events to user-space. The filter gets one last
917 * chance to determine if the event/events are still interesting for this knote
918 * (are the conditions still right to deliver an event). If so, the filter
919 * fills in the output kevent structure with the information to be delivered.
920 *
921 * The input context/data parameter is used during event delivery. Some
922 * filters allow additional data delivery as part of event delivery. This
923 * context field indicates if space was made available for these additional
924 * items and how that space is to be allocated/carved-out.
925 *
926 * The filter may set EV_CLEAR or EV_ONESHOT in the output flags field to indicate
927 * special post-delivery dispositions for the knote.
928 *
929 * EV_CLEAR - indicates that all matching events have been delivered. Even
930 * though there were events to deliver now, there will not be any
931 * more until some additional events are delivered to the knote
932 * via the f_event operator, or the interest set is changed via
933 * the f_touch operator. The knote can remain deactivated after
934 * processing this event delivery.
935 *
936 * EV_ONESHOT - indicates that this is the last event to be delivered via
937 * this knote. It will automatically be deleted upon delivery
938 * (or if in dispatch-mode, upon re-enablement after this delivery).
939 *
940 * The return value indicates if the knote has delivered an output event.
941 * Unless one of the special output flags was set in the output kevent, a non-
942 * zero return value ALSO indicates that the knote should be re-activated
943 * for future event processing (in case it delivers level-based or a multi-edge
d9a64523 944 * type events like message queues that already exist).
39037602
A
945 *
946 * NOTE: In the future, the boolean may change to an enum that allows more
947 * explicit indication of just delivering a current event vs delivering
948 * an event with more events still pending.
949 *
5ba3f43e 950 * f_touch -
d9a64523
A
951 * called to update the knote with new state from the user during
952 * EVFILT_ADD/ENABLE/DISABLE on an already-attached knote.
5ba3f43e
A
953 *
954 * f_touch should copy relevant new data from the kevent into the knote.
5ba3f43e 955 *
d9a64523 956 * operator must lock against concurrent f_event operations.
5ba3f43e 957 *
d9a64523
A
958 * A return value of 1 indicates that the knote should now be considered
959 * 'activated'.
5ba3f43e 960 *
d9a64523
A
961 * f_touch can set EV_ERROR with specific error in the data field to
962 * return an error to the client. You should return 1 to indicate that
963 * the kevent needs to be activated and processed.
5ba3f43e 964 *
39037602 965 * f_peek -
d9a64523
A
966 * For knotes marked KN_STAYACTIVE, indicate if the knote is truly active
967 * at the moment (not used for event delivery, but for status checks).
968 *
969 * f_allow_drop -
970 *
971 * [OPTIONAL] If this function is non-null, then it indicates that the
972 * filter wants to validate EV_DELETE events. This is necessary if
973 * a particular filter needs to synchronize knote deletion with its own
974 * filter lock.
975 *
976 * When true is returned, the the EV_DELETE is allowed and can proceed.
977 *
978 * If false is returned, the EV_DELETE doesn't proceed, and the passed in
979 * kevent is used for the copyout to userspace.
980 *
981 * Currently, EVFILT_WORKLOOP is the only filter using this facility.
982 *
983 * f_post_register_wait -
984 * [OPTIONAL] called when attach or touch return the FILTER_REGISTER_WAIT
985 * extended code bit. It is possible to use this facility when the last
986 * register command wants to wait.
987 *
988 * Currently, EVFILT_WORKLOOP is the only filter using this facility.
39037602 989 */
b0d623f7 990
d9a64523
A
991struct _kevent_register;
992struct knote_lock_ctx;
993struct proc;
994struct uthread;
995struct waitq;
996
55e303ae 997struct filterops {
0a7de745 998 bool f_isfd; /* true if ident == filedescriptor */
d9a64523
A
999 bool f_adjusts_qos; /* true if the filter can override the knote */
1000 bool f_extended_codes; /* hooks return extended codes */
1001
cb323159 1002 int (*f_attach)(struct knote *kn, struct kevent_qos_s *kev);
5ba3f43e
A
1003 void (*f_detach)(struct knote *kn);
1004 int (*f_event)(struct knote *kn, long hint);
cb323159
A
1005 int (*f_touch)(struct knote *kn, struct kevent_qos_s *kev);
1006 int (*f_process)(struct knote *kn, struct kevent_qos_s *kev);
d9a64523
A
1007 int (*f_peek)(struct knote *kn);
1008
1009 /* optional & advanced */
cb323159
A
1010 bool (*f_allow_drop)(struct knote *kn, struct kevent_qos_s *kev);
1011 void (*f_post_register_wait)(struct uthread *uth, struct knote *kn,
0a7de745 1012 struct _kevent_register *ss_kr);
55e303ae
A
1013};
1014
d9a64523
A
1015/*
1016 * Extended codes returned by filter routines when f_extended_codes is set.
1017 *
1018 * FILTER_ACTIVE
1019 * The filter is active and a call to f_process() may return an event.
1020 *
1021 * For f_process() the meaning is slightly different: the knote will be
1022 * activated again as long as f_process returns FILTER_ACTIVE, unless
1023 * EV_CLEAR is set, which require a new f_event to reactivate the knote.
1024 *
1025 * Valid: f_attach, f_event, f_touch, f_process, f_peek
1026 * Implicit: -
1027 * Ignored: -
1028 *
1029 * FILTER_REGISTER_WAIT
1030 * The filter wants its f_post_register_wait() to be called.
1031 *
1032 * Note: It is only valid to ask for this behavior for a workloop kqueue,
1033 * and is really only meant to be used by EVFILT_WORKLOOP.
1034 *
1035 * Valid: f_attach, f_touch
1036 * Implicit: -
1037 * Ignored: f_event, f_process, f_peek
1038 *
1039 * FILTER_UPDATE_REQ_QOS
1040 * The filter wants the passed in QoS to be updated as the new intrinsic qos
1041 * for this knote. If the kevent `qos` field is 0, no update is performed.
1042 *
1043 * This also will reset the event QoS, so FILTER_ADJUST_EVENT_QOS() must
1044 * also be used if an override should be maintained.
1045 *
1046 * Valid: f_touch
1047 * Implicit: f_attach
1048 * Ignored: f_event, f_process, f_peek
1049 *
1050 * FILTER_RESET_EVENT_QOS
1051 * FILTER_ADJUST_EVENT_QOS(qos)
1052 * The filter wants the QoS of the next event delivery to be overridden
1053 * at the specified QoS. This allows for the next event QoS to be elevated
1054 * from the knote requested qos (See FILTER_UPDATE_REQ_QOS).
1055 *
1056 * Event QoS Overrides are reset when a particular knote is no longer
1057 * active. Hence this is ignored if FILTER_ACTIVE isn't also returned.
1058 *
1059 * Races between an f_event() and any other f_* routine asking for
1060 * a specific QoS override are handled generically and the filters do not
1061 * have to worry about them.
1062 *
1063 * To use this facility, filters MUST set their f_adjusts_qos bit to true.
1064 *
1065 * It is expected that filters will return the new QoS they expect to be
1066 * applied from any f_* callback except for f_process() where no specific
1067 * information should be provided. Filters should not try to hide no-ops,
1068 * kevent will already optimize these away.
1069 *
1070 * Valid: f_touch, f_attach, f_event, f_process
1071 * Implicit: -
1072 * Ignored: f_peek
cb323159
A
1073 *
1074 * FILTER_THREADREQ_NODEFEER
1075 * The filter has moved a turnstile priority push away from the current
1076 * thread, preemption has been disabled, and thread requests need to be
1077 * commited before preemption is re-enabled.
1078 *
1079 *
1080 * Valid: f_attach, f_touch
1081 * Implicit: -
1082 * Invalid: f_event, f_process, f_peek
d9a64523
A
1083 */
1084#define FILTER_ACTIVE 0x00000001
1085#define FILTER_REGISTER_WAIT 0x00000002
1086#define FILTER_UPDATE_REQ_QOS 0x00000004
1087#define FILTER_ADJUST_EVENT_QOS_BIT 0x00000008
1088#define FILTER_ADJUST_EVENT_QOS_MASK 0x00000070
1089#define FILTER_ADJUST_EVENT_QOS_SHIFT 4
1090#define FILTER_ADJUST_EVENT_QOS(qos) \
0a7de745 1091 (((qos) << FILTER_ADJUST_EVENT_QOS_SHIFT) | FILTER_ADJUST_EVENT_QOS_BIT)
d9a64523 1092#define FILTER_RESET_EVENT_QOS FILTER_ADJUST_EVENT_QOS_BIT
cb323159 1093#define FILTER_THREADREQ_NODEFEER 0x00000080
d9a64523
A
1094
1095#define filter_call(_ops, call) \
0a7de745 1096 ((_ops)->f_extended_codes ? (_ops)->call : !!((_ops)->call))
55e303ae
A
1097
1098SLIST_HEAD(klist, knote);
0a7de745
A
1099extern void knote_init(void);
1100extern void klist_init(struct klist *list);
1101
1102#define KNOTE(list, hint) knote(list, hint)
1103#define KNOTE_ATTACH(list, kn) knote_attach(list, kn)
1104#define KNOTE_DETACH(list, kn) knote_detach(list, kn)
1105
cb323159
A
1106extern void knote(struct klist *list, long hint);
1107extern int knote_attach(struct klist *list, struct knote *kn);
1108extern int knote_detach(struct klist *list, struct knote *kn);
1109extern void knote_vanish(struct klist *list, bool make_active);
1110
1111extern void knote_set_error(struct knote *kn, int error);
1112extern int64_t knote_low_watermark(const struct knote *kn) __pure2;
1113extern void knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev);
1114extern void knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data);
1115
1116extern void knote_link_waitqset_lazy_alloc(struct knote *kn);
d9a64523 1117extern boolean_t knote_link_waitqset_should_lazy_alloc(struct knote *kn);
cb323159
A
1118extern int knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link);
1119extern int knote_unlink_waitq(struct knote *kn, struct waitq *wq);
1120extern void knote_fdclose(struct proc *p, int fd);
1121extern void knote_markstayactive(struct knote *kn);
1122extern void knote_clearstayactive(struct knote *kn);
5ba3f43e 1123extern const struct filterops *knote_fops(struct knote *kn);
5ba3f43e 1124
d9a64523
A
1125extern struct turnstile *kqueue_turnstile(struct kqueue *);
1126extern struct turnstile *kqueue_alloc_turnstile(struct kqueue *);
1127
5ba3f43e
A
1128int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize);
1129int kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf,
0a7de745 1130 uint32_t ubufsize, int32_t *nkqueues_out);
5ba3f43e 1131int kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
0a7de745 1132 uint32_t ubufsize, int32_t *size_out);
5ba3f43e 1133int kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
0a7de745 1134 uint32_t ubufsize, int32_t *nknotes_out);
3e170ce0 1135
cb323159
A
1136extern int filt_wlattach_sync_ipc(struct knote *kn);
1137extern void filt_wldetach_sync_ipc(struct knote *kn);
1138
1139extern int kevent_workq_internal(struct proc *p,
1140 user_addr_t changelist, int nchanges,
1141 user_addr_t eventlist, int nevents,
1142 user_addr_t data_out, user_size_t *data_available,
1143 unsigned int flags, int32_t *retval);
1144
39037602
A
1145#elif defined(KERNEL_PRIVATE) /* !XNU_KERNEL_PRIVATE: kexts still need a klist structure definition */
1146
d9a64523 1147#include <sys/queue.h>
39037602
A
1148struct proc;
1149struct knote;
1150SLIST_HEAD(klist, knote);
1151
1152#endif /* !XNU_KERNEL_PRIVATE && KERNEL_PRIVATE */
1153
0a7de745 1154#else /* KERNEL */
55e303ae 1155
3e170ce0 1156#include <sys/types.h>
55e303ae 1157
55e303ae
A
1158struct timespec;
1159
1160__BEGIN_DECLS
91447636 1161int kqueue(void);
d9a64523 1162int kevent(int kq,
0a7de745
A
1163 const struct kevent *changelist, int nchanges,
1164 struct kevent *eventlist, int nevents,
1165 const struct timespec *timeout);
d9a64523 1166int kevent64(int kq,
0a7de745
A
1167 const struct kevent64_s *changelist, int nchanges,
1168 struct kevent64_s *eventlist, int nevents,
1169 unsigned int flags,
1170 const struct timespec *timeout);
3e170ce0
A
1171
1172#ifdef PRIVATE
d9a64523 1173int kevent_qos(int kq,
0a7de745
A
1174 const struct kevent_qos_s *changelist, int nchanges,
1175 struct kevent_qos_s *eventlist, int nevents,
1176 void *data_out, size_t *data_available,
1177 unsigned int flags);
5ba3f43e 1178
d9a64523 1179int kevent_id(kqueue_id_t id,
0a7de745
A
1180 const struct kevent_qos_s *changelist, int nchanges,
1181 struct kevent_qos_s *eventlist, int nevents,
1182 void *data_out, size_t *data_available,
1183 unsigned int flags);
3e170ce0
A
1184#endif /* PRIVATE */
1185
55e303ae
A
1186__END_DECLS
1187
55e303ae 1188
91447636 1189#endif /* KERNEL */
55e303ae 1190
5ba3f43e
A
1191#ifdef PRIVATE
1192
1193/* Flags for pending events notified by kernel via return-to-kernel ast */
0a7de745
A
1194#define R2K_WORKLOOP_PENDING_EVENTS 0x1
1195#define R2K_WORKQ_PENDING_EVENTS 0x2
5ba3f43e
A
1196
1197#endif /* PRIVATE */
1198
55e303ae 1199#endif /* !_SYS_EVENT_H_ */