]> git.saurik.com Git - apple/xnu.git/blob - bsd/sys/event.h
04385bc6c5d204199bced0ded7103ad65384b07f
[apple/xnu.git] / bsd / sys / event.h
1 /*
2 * Copyright (c) 2003-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * $FreeBSD: src/sys/sys/event.h,v 1.5.2.5 2001/12/14 19:21:22 jlemon Exp $
54 */
55
56 #ifndef _SYS_EVENT_H_
57 #define _SYS_EVENT_H_
58
59 #include <machine/types.h>
60 #include <sys/cdefs.h>
61 #include <stdint.h>
62
63 /*
64 * Filter types
65 */
66 #define EVFILT_READ (-1)
67 #define EVFILT_WRITE (-2)
68 #define EVFILT_AIO (-3) /* attached to aio requests */
69 #define EVFILT_VNODE (-4) /* attached to vnodes */
70 #define EVFILT_PROC (-5) /* attached to struct proc */
71 #define EVFILT_SIGNAL (-6) /* attached to struct proc */
72 #define EVFILT_TIMER (-7) /* timers */
73 #define EVFILT_MACHPORT (-8) /* Mach portsets */
74 #define EVFILT_FS (-9) /* Filesystem events */
75 #define EVFILT_USER (-10) /* User events */
76 /* (-11) unused */
77 #define EVFILT_VM (-12) /* Virtual memory events */
78
79 #ifdef PRIVATE
80 #define EVFILT_SOCK (-13) /* Socket events */
81 #define EVFILT_MEMORYSTATUS (-14) /* Memorystatus events */
82 #endif /* PRIVATE */
83 #define EVFILT_EXCEPT (-15) /* Exception events */
84 #ifdef PRIVATE
85 #define EVFILT_WORKLOOP (-17) /* Workloop events */
86 #endif /* PRIVATE */
87
88 #define EVFILT_SYSCOUNT 17
89 #define EVFILT_THREADMARKER EVFILT_SYSCOUNT /* Internal use only */
90
91 #pragma pack(4)
92
93 struct kevent {
94 uintptr_t ident; /* identifier for this event */
95 int16_t filter; /* filter for event */
96 uint16_t flags; /* general flags */
97 uint32_t fflags; /* filter-specific flags */
98 intptr_t data; /* filter-specific data */
99 void *udata; /* opaque user data identifier */
100 };
101
102 #ifdef KERNEL_PRIVATE
103
104 struct user64_kevent {
105 uint64_t ident; /* identifier for this event */
106 int16_t filter; /* filter for event */
107 uint16_t flags; /* general flags */
108 uint32_t fflags; /* filter-specific flags */
109 int64_t data; /* filter-specific data */
110 user_addr_t udata; /* opaque user data identifier */
111 };
112
113 struct user32_kevent {
114 uint32_t ident; /* identifier for this event */
115 int16_t filter; /* filter for event */
116 uint16_t flags; /* general flags */
117 uint32_t fflags; /* filter-specific flags */
118 int32_t data; /* filter-specific data */
119 user32_addr_t udata; /* opaque user data identifier */
120 };
121
122 struct kevent_internal_s {
123 uint64_t ident; /* identifier for this event */
124 int16_t filter; /* filter for event */
125 uint16_t flags; /* general flags */
126 int32_t qos; /* quality of service */
127 uint32_t fflags; /* filter-specific flags */
128 // uint32_t xflags; /* extra filter-specific flags */
129 int64_t data; /* filter-specific data */
130 uint64_t udata; /* opaque user data identifier */
131 uint64_t ext[4]; /* filter-specific extensions */
132 };
133
134 #endif /* KERNEL_PRIVATE */
135
136 #pragma pack()
137
138 struct kevent64_s {
139 uint64_t ident; /* identifier for this event */
140 int16_t filter; /* filter for event */
141 uint16_t flags; /* general flags */
142 uint32_t fflags; /* filter-specific flags */
143 int64_t data; /* filter-specific data */
144 uint64_t udata; /* opaque user data identifier */
145 uint64_t ext[2]; /* filter-specific extensions */
146 };
147
148 #ifdef PRIVATE
149 struct kevent_qos_s {
150 uint64_t ident; /* identifier for this event */
151 int16_t filter; /* filter for event */
152 uint16_t flags; /* general flags */
153 int32_t qos; /* quality of service */
154 uint64_t udata; /* opaque user data identifier */
155 uint32_t fflags; /* filter-specific flags */
156 uint32_t xflags; /* extra filter-specific flags */
157 int64_t data; /* filter-specific data */
158 uint64_t ext[4]; /* filter-specific extensions */
159 };
160
161 /*
162 * Type definition for names/ids of dynamically allocated kqueues.
163 */
164 typedef uint64_t kqueue_id_t;
165
166 #endif /* PRIVATE */
167
168 #define EV_SET(kevp, a, b, c, d, e, f) do { \
169 struct kevent *__kevp__ = (kevp); \
170 __kevp__->ident = (a); \
171 __kevp__->filter = (b); \
172 __kevp__->flags = (c); \
173 __kevp__->fflags = (d); \
174 __kevp__->data = (e); \
175 __kevp__->udata = (f); \
176 } while(0)
177
178 #define EV_SET64(kevp, a, b, c, d, e, f, g, h) do { \
179 struct kevent64_s *__kevp__ = (kevp); \
180 __kevp__->ident = (a); \
181 __kevp__->filter = (b); \
182 __kevp__->flags = (c); \
183 __kevp__->fflags = (d); \
184 __kevp__->data = (e); \
185 __kevp__->udata = (f); \
186 __kevp__->ext[0] = (g); \
187 __kevp__->ext[1] = (h); \
188 } while(0)
189
190
191 /* kevent system call flags */
192 #define KEVENT_FLAG_NONE 0x000 /* no flag value */
193 #define KEVENT_FLAG_IMMEDIATE 0x001 /* immediate timeout */
194 #define KEVENT_FLAG_ERROR_EVENTS 0x002 /* output events only include change errors */
195
196 #ifdef PRIVATE
197
198 /*
199 * Rather than provide an EV_SET_QOS macro for kevent_qos_t structure
200 * initialization, we encourage use of named field initialization support
201 * instead.
202 */
203
204 #define KEVENT_FLAG_STACK_EVENTS 0x004 /* output events treated as stack (grows down) */
205 #define KEVENT_FLAG_STACK_DATA 0x008 /* output data allocated as stack (grows down) */
206 #define KEVENT_FLAG_UNBIND_CHECK_FLAGS 0x010 /* check the flags passed to kevent_qos_internal_unbind */
207 #define KEVENT_FLAG_WORKQ 0x020 /* interact with the default workq kq */
208 #define KEVENT_FLAG_WORKQ_MANAGER 0x200 /* current thread is the workq manager */
209 #define KEVENT_FLAG_WORKLOOP 0x400 /* interact with the specified workloop kq */
210 #define KEVENT_FLAG_SYNCHRONOUS_BIND 0x800 /* synchronous bind callback */
211
212 #define KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH 0x8000 /* attach current thread to workloop */
213 #define KEVENT_FLAG_WORKLOOP_SERVICER_DETACH 0x10000 /* unbind current thread from workloop */
214 #define KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST 0x20000 /* kq lookup by id must exist */
215 #define KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST 0x40000 /* kq lookup by id must not exist */
216 #define KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD 0x80000 /* do not create workqueue threads for this worloop */
217
218 #ifdef XNU_KERNEL_PRIVATE
219
220 #define KEVENT_FLAG_LEGACY32 0x040 /* event data in legacy 32-bit format */
221 #define KEVENT_FLAG_LEGACY64 0x080 /* event data in legacy 64-bit format */
222 #define KEVENT_FLAG_KERNEL 0x1000 /* caller is in-kernel */
223 #define KEVENT_FLAG_DYNAMIC_KQUEUE 0x2000 /* kqueue is dynamically allocated */
224 #define KEVENT_FLAG_WORKLOOP_CANCELED 0x4000 /* workloop bind was cancelled */
225
226 #define KEVENT_FLAG_USER (KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | \
227 KEVENT_FLAG_STACK_EVENTS | KEVENT_FLAG_STACK_DATA | \
228 KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP | \
229 KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH | KEVENT_FLAG_WORKLOOP_SERVICER_DETACH | \
230 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST | \
231 KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD)
232
233 /*
234 * Since some filter ops are not part of the standard sysfilt_ops, we use
235 * kn_filtid starting from EVFILT_SYSCOUNT to identify these cases. This is to
236 * let kn_fops() get the correct fops for all cases.
237 */
238 #define EVFILTID_KQREAD (EVFILT_SYSCOUNT)
239 #define EVFILTID_PIPE_R (EVFILT_SYSCOUNT + 1)
240 #define EVFILTID_PIPE_W (EVFILT_SYSCOUNT + 2)
241 #define EVFILTID_PTSD (EVFILT_SYSCOUNT + 3)
242 #define EVFILTID_SOREAD (EVFILT_SYSCOUNT + 4)
243 #define EVFILTID_SOWRITE (EVFILT_SYSCOUNT + 5)
244 #define EVFILTID_SCK (EVFILT_SYSCOUNT + 6)
245 #define EVFILTID_SOEXCEPT (EVFILT_SYSCOUNT + 7)
246 #define EVFILTID_SPEC (EVFILT_SYSCOUNT + 8)
247 #define EVFILTID_BPFREAD (EVFILT_SYSCOUNT + 9)
248 #define EVFILTID_NECP_FD (EVFILT_SYSCOUNT + 10)
249 #define EVFILTID_FSEVENT (EVFILT_SYSCOUNT + 13)
250 #define EVFILTID_VN (EVFILT_SYSCOUNT + 14)
251 #define EVFILTID_TTY (EVFILT_SYSCOUNT + 16)
252 #define EVFILTID_PTMX (EVFILT_SYSCOUNT + 17)
253
254 #define EVFILTID_MAX (EVFILT_SYSCOUNT + 18)
255
256 #endif /* defined(XNU_KERNEL_PRIVATE) */
257
258 #define EV_SET_QOS 0
259
260 #endif /* PRIVATE */
261
262 /* actions */
263 #define EV_ADD 0x0001 /* add event to kq (implies enable) */
264 #define EV_DELETE 0x0002 /* delete event from kq */
265 #define EV_ENABLE 0x0004 /* enable event */
266 #define EV_DISABLE 0x0008 /* disable event (not reported) */
267
268 /* flags */
269 #define EV_ONESHOT 0x0010 /* only report one occurrence */
270 #define EV_CLEAR 0x0020 /* clear event state after reporting */
271 #define EV_RECEIPT 0x0040 /* force immediate event output */
272 /* ... with or without EV_ERROR */
273 /* ... use KEVENT_FLAG_ERROR_EVENTS */
274 /* on syscalls supporting flags */
275
276 #define EV_DISPATCH 0x0080 /* disable event after reporting */
277 #define EV_UDATA_SPECIFIC 0x0100 /* unique kevent per udata value */
278
279 #define EV_DISPATCH2 (EV_DISPATCH | EV_UDATA_SPECIFIC)
280 /* ... in combination with EV_DELETE */
281 /* will defer delete until udata-specific */
282 /* event enabled. EINPROGRESS will be */
283 /* returned to indicate the deferral */
284
285 #define EV_VANISHED 0x0200 /* report that source has vanished */
286 /* ... only valid with EV_DISPATCH2 */
287
288 #define EV_SYSFLAGS 0xF000 /* reserved by system */
289 #define EV_FLAG0 0x1000 /* filter-specific flag */
290 #define EV_FLAG1 0x2000 /* filter-specific flag */
291
292 /* returned values */
293 #define EV_EOF 0x8000 /* EOF detected */
294 #define EV_ERROR 0x4000 /* error, data contains errno */
295
296 /*
297 * Filter specific flags for EVFILT_READ
298 *
299 * The default behavior for EVFILT_READ is to make the "read" determination
300 * relative to the current file descriptor read pointer.
301 *
302 * The EV_POLL flag indicates the determination should be made via poll(2)
303 * semantics. These semantics dictate always returning true for regular files,
304 * regardless of the amount of unread data in the file.
305 *
306 * On input, EV_OOBAND specifies that filter should actively return in the
307 * presence of OOB on the descriptor. It implies that filter will return
308 * if there is OOB data available to read OR when any other condition
309 * for the read are met (for example number of bytes regular data becomes >=
310 * low-watermark).
311 * If EV_OOBAND is not set on input, it implies that the filter should not actively
312 * return for out of band data on the descriptor. The filter will then only return
313 * when some other condition for read is met (ex: when number of regular data bytes
314 * >=low-watermark OR when socket can't receive more data (SS_CANTRCVMORE)).
315 *
316 * On output, EV_OOBAND indicates the presence of OOB data on the descriptor.
317 * If it was not specified as an input parameter, then the data count is the
318 * number of bytes before the current OOB marker, else data count is the number
319 * of bytes beyond OOB marker.
320 */
321 #define EV_POLL EV_FLAG0
322 #define EV_OOBAND EV_FLAG1
323
324 /*
325 * data/hint fflags for EVFILT_USER, shared with userspace
326 */
327
328 /*
329 * On input, NOTE_TRIGGER causes the event to be triggered for output.
330 */
331 #define NOTE_TRIGGER 0x01000000
332
333 /*
334 * On input, the top two bits of fflags specifies how the lower twenty four
335 * bits should be applied to the stored value of fflags.
336 *
337 * On output, the top two bits will always be set to NOTE_FFNOP and the
338 * remaining twenty four bits will contain the stored fflags value.
339 */
340 #define NOTE_FFNOP 0x00000000 /* ignore input fflags */
341 #define NOTE_FFAND 0x40000000 /* and fflags */
342 #define NOTE_FFOR 0x80000000 /* or fflags */
343 #define NOTE_FFCOPY 0xc0000000 /* copy fflags */
344 #define NOTE_FFCTRLMASK 0xc0000000 /* mask for operations */
345 #define NOTE_FFLAGSMASK 0x00ffffff
346
347 #ifdef PRIVATE
348 /*
349 * data/hint fflags for EVFILT_WORKLOOP, shared with userspace
350 *
351 * The ident for thread requests should be the dynamic ID of the workloop
352 * The ident for each sync waiter must be unique to that waiter [for this workloop]
353 *
354 *
355 * Commands:
356 *
357 * @const NOTE_WL_THREAD_REQUEST [in/out]
358 * The kevent represents asynchronous userspace work and its associated QoS.
359 * There can only be a single knote with this flag set per workloop.
360 *
361 * @const NOTE_WL_SYNC_WAIT [in/out]
362 * This bit is set when the caller is waiting to become the owner of a workloop.
363 * If the NOTE_WL_SYNC_WAKE bit is already set then the caller is not blocked,
364 * else it blocks until it is set.
365 *
366 * The QoS field of the knote is used to push on other owners or servicers.
367 *
368 * @const NOTE_WL_SYNC_WAKE [in/out]
369 * Marks the waiter knote as being eligible to become an owner
370 * This bit can only be set once, trying it again will fail with EALREADY.
371 *
372 *
373 * Flags/Modifiers:
374 *
375 * @const NOTE_WL_UPDATE_QOS [in] (only NOTE_WL_THREAD_REQUEST)
376 * For successful updates (EV_ADD only), learn the new userspace async QoS from
377 * the kevent qos field.
378 *
379 * @const NOTE_WL_END_OWNERSHIP [in]
380 * If the update is successful (including deletions) or returns ESTALE, and
381 * the caller thread or the "suspended" thread is currently owning the workloop,
382 * then ownership is forgotten.
383 *
384 * @const NOTE_WL_DISCOVER_OWNER [in]
385 * If the update is successful (including deletions), learn the owner identity
386 * from the loaded value during debounce. This requires an address to have been
387 * filled in the EV_EXTIDX_WL_ADDR ext field, but doesn't require a mask to have
388 * been set in the EV_EXTIDX_WL_MASK.
389 *
390 * @const NOTE_WL_IGNORE_ESTALE [in]
391 * If the operation would fail with ESTALE, mask the error and pretend the
392 * update was successful. However the operation itself didn't happen, meaning
393 * that:
394 * - attaching a new knote will not happen
395 * - dropping an existing knote will not happen
396 * - NOTE_WL_UPDATE_QOS or NOTE_WL_DISCOVER_OWNER will have no effect
397 *
398 * This modifier doesn't affect NOTE_WL_END_OWNERSHIP.
399 */
400 #define NOTE_WL_THREAD_REQUEST 0x00000001
401 #define NOTE_WL_SYNC_WAIT 0x00000004
402 #define NOTE_WL_SYNC_WAKE 0x00000008
403 #define NOTE_WL_COMMANDS_MASK 0x0000000f /* Mask of all the [in] commands above */
404
405 #define NOTE_WL_UPDATE_QOS 0x00000010
406 #define NOTE_WL_END_OWNERSHIP 0x00000020
407 #define NOTE_WL_UPDATE_OWNER 0 /* ... compatibility define ... */
408 #define NOTE_WL_DISCOVER_OWNER 0x00000080
409 #define NOTE_WL_IGNORE_ESTALE 0x00000100
410 #define NOTE_WL_UPDATES_MASK 0x000001f0 /* Mask of all the [in] updates above */
411
412 /*
413 * EVFILT_WORKLOOP ext[] array indexes/meanings.
414 */
415 #define EV_EXTIDX_WL_LANE 0 /* lane identifier [in: sync waiter]
416 [out: thread request] */
417 #define EV_EXTIDX_WL_ADDR 1 /* debounce address [in: NULL==no debounce] */
418 #define EV_EXTIDX_WL_MASK 2 /* debounce mask [in] */
419 #define EV_EXTIDX_WL_VALUE 3 /* debounce value [in: not current->ESTALE]
420 [out: new/debounce value] */
421 #endif /* PRIVATE */
422
423 /*
424 * data/hint fflags for EVFILT_{READ|WRITE}, shared with userspace
425 *
426 * The default behavior for EVFILT_READ is to make the determination
427 * realtive to the current file descriptor read pointer.
428 */
429 #define NOTE_LOWAT 0x00000001 /* low water mark */
430
431 /* data/hint flags for EVFILT_EXCEPT, shared with userspace */
432 #define NOTE_OOB 0x00000002 /* OOB data */
433
434 /*
435 * data/hint fflags for EVFILT_VNODE, shared with userspace
436 */
437 #define NOTE_DELETE 0x00000001 /* vnode was removed */
438 #define NOTE_WRITE 0x00000002 /* data contents changed */
439 #define NOTE_EXTEND 0x00000004 /* size increased */
440 #define NOTE_ATTRIB 0x00000008 /* attributes changed */
441 #define NOTE_LINK 0x00000010 /* link count changed */
442 #define NOTE_RENAME 0x00000020 /* vnode was renamed */
443 #define NOTE_REVOKE 0x00000040 /* vnode access was revoked */
444 #define NOTE_NONE 0x00000080 /* No specific vnode event: to test for EVFILT_READ activation*/
445 #define NOTE_FUNLOCK 0x00000100 /* vnode was unlocked by flock(2) */
446
447 /*
448 * data/hint fflags for EVFILT_PROC, shared with userspace
449 *
450 * Please note that EVFILT_PROC and EVFILT_SIGNAL share the same knote list
451 * that hangs off the proc structure. They also both play games with the hint
452 * passed to KNOTE(). If NOTE_SIGNAL is passed as a hint, then the lower bits
453 * of the hint contain the signal. IF NOTE_FORK is passed, then the lower bits
454 * contain the PID of the child (but the pid does not get passed through in
455 * the actual kevent).
456 */
457 enum {
458 eNoteReapDeprecated __deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is deprecated") = 0x10000000
459 };
460
461 #define NOTE_EXIT 0x80000000 /* process exited */
462 #define NOTE_FORK 0x40000000 /* process forked */
463 #define NOTE_EXEC 0x20000000 /* process exec'd */
464 #define NOTE_REAP ((unsigned int)eNoteReapDeprecated /* 0x10000000 */) /* process reaped */
465 #define NOTE_SIGNAL 0x08000000 /* shared with EVFILT_SIGNAL */
466 #define NOTE_EXITSTATUS 0x04000000 /* exit status to be returned, valid for child process only */
467 #define NOTE_EXIT_DETAIL 0x02000000 /* provide details on reasons for exit */
468
469 #define NOTE_PDATAMASK 0x000fffff /* mask for signal & exit status */
470 #define NOTE_PCTRLMASK (~NOTE_PDATAMASK)
471
472 /*
473 * If NOTE_EXITSTATUS is present, provide additional info about exiting process.
474 */
475 enum {
476 eNoteExitReparentedDeprecated __deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is no longer sent") = 0x00080000
477 };
478 #define NOTE_EXIT_REPARENTED ((unsigned int)eNoteExitReparentedDeprecated) /* exited while reparented */
479
480 /*
481 * If NOTE_EXIT_DETAIL is present, these bits indicate specific reasons for exiting.
482 */
483 #define NOTE_EXIT_DETAIL_MASK 0x00070000
484 #define NOTE_EXIT_DECRYPTFAIL 0x00010000
485 #define NOTE_EXIT_MEMORY 0x00020000
486 #define NOTE_EXIT_CSERROR 0x00040000
487
488 #ifdef PRIVATE
489
490 /*
491 * If NOTE_EXIT_MEMORY is present, these bits indicate specific jetsam condition.
492 */
493 #define NOTE_EXIT_MEMORY_DETAIL_MASK 0xfe000000
494 #define NOTE_EXIT_MEMORY_VMPAGESHORTAGE 0x80000000 /* jetsam condition: lowest jetsam priority proc killed due to vm page shortage */
495 #define NOTE_EXIT_MEMORY_VMTHRASHING 0x40000000 /* jetsam condition: lowest jetsam priority proc killed due to vm thrashing */
496 #define NOTE_EXIT_MEMORY_HIWAT 0x20000000 /* jetsam condition: process reached its high water mark */
497 #define NOTE_EXIT_MEMORY_PID 0x10000000 /* jetsam condition: special pid kill requested */
498 #define NOTE_EXIT_MEMORY_IDLE 0x08000000 /* jetsam condition: idle process cleaned up */
499 #define NOTE_EXIT_MEMORY_VNODE 0X04000000 /* jetsam condition: virtual node kill */
500 #define NOTE_EXIT_MEMORY_FCTHRASHING 0x02000000 /* jetsam condition: lowest jetsam priority proc killed due to filecache thrashing */
501
502 #endif
503
504 /*
505 * data/hint fflags for EVFILT_VM, shared with userspace.
506 */
507 #define NOTE_VM_PRESSURE 0x80000000 /* will react on memory pressure */
508 #define NOTE_VM_PRESSURE_TERMINATE 0x40000000 /* will quit on memory pressure, possibly after cleaning up dirty state */
509 #define NOTE_VM_PRESSURE_SUDDEN_TERMINATE 0x20000000 /* will quit immediately on memory pressure */
510 #define NOTE_VM_ERROR 0x10000000 /* there was an error */
511
512 #ifdef PRIVATE
513
514 /*
515 * data/hint fflags for EVFILT_MEMORYSTATUS, shared with userspace.
516 */
517 #define NOTE_MEMORYSTATUS_PRESSURE_NORMAL 0x00000001 /* system memory pressure has returned to normal */
518 #define NOTE_MEMORYSTATUS_PRESSURE_WARN 0x00000002 /* system memory pressure has changed to the warning state */
519 #define NOTE_MEMORYSTATUS_PRESSURE_CRITICAL 0x00000004 /* system memory pressure has changed to the critical state */
520 #define NOTE_MEMORYSTATUS_LOW_SWAP 0x00000008 /* system is in a low-swap state */
521 #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0x00000010 /* process memory limit has hit a warning state */
522 #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0x00000020 /* process memory limit has hit a critical state - soft limit */
523 #define NOTE_MEMORYSTATUS_MSL_STATUS 0xf0000000 /* bits used to request change to process MSL status */
524
525 #ifdef KERNEL_PRIVATE
526 /*
527 * data/hint fflags for EVFILT_MEMORYSTATUS, but not shared with userspace.
528 */
529 #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE 0x00000040 /* Used to restrict sending a warn event only once, per active limit, soft limits only */
530 #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE 0x00000080 /* Used to restrict sending a warn event only once, per inactive limit, soft limit only */
531 #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE 0x00000100 /* Used to restrict sending a critical event only once per active limit, soft limit only */
532 #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE 0x00000200 /* Used to restrict sending a critical event only once per inactive limit, soft limit only */
533
534 /*
535 * Use this mask to protect the kernel private flags.
536 */
537 #define EVFILT_MEMORYSTATUS_ALL_MASK \
538 (NOTE_MEMORYSTATUS_PRESSURE_NORMAL | NOTE_MEMORYSTATUS_PRESSURE_WARN | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL | NOTE_MEMORYSTATUS_LOW_SWAP | \
539 NOTE_MEMORYSTATUS_PROC_LIMIT_WARN | NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL | NOTE_MEMORYSTATUS_MSL_STATUS)
540
541 #endif /* KERNEL_PRIVATE */
542
543 typedef enum vm_pressure_level {
544 kVMPressureNormal = 0,
545 kVMPressureWarning = 1,
546 kVMPressureUrgent = 2,
547 kVMPressureCritical = 3,
548 } vm_pressure_level_t;
549
550 #endif /* PRIVATE */
551
552 /*
553 * data/hint fflags for EVFILT_TIMER, shared with userspace.
554 * The default is a (repeating) interval timer with the data
555 * specifying the timeout interval in milliseconds.
556 *
557 * All timeouts are implicitly EV_CLEAR events.
558 */
559 #define NOTE_SECONDS 0x00000001 /* data is seconds */
560 #define NOTE_USECONDS 0x00000002 /* data is microseconds */
561 #define NOTE_NSECONDS 0x00000004 /* data is nanoseconds */
562 #define NOTE_ABSOLUTE 0x00000008 /* absolute timeout */
563 /* ... implicit EV_ONESHOT, timeout uses the gettimeofday epoch */
564 #define NOTE_LEEWAY 0x00000010 /* ext[1] holds leeway for power aware timers */
565 #define NOTE_CRITICAL 0x00000020 /* system does minimal timer coalescing */
566 #define NOTE_BACKGROUND 0x00000040 /* system does maximum timer coalescing */
567 #define NOTE_MACH_CONTINUOUS_TIME 0x00000080
568 /*
569 * NOTE_MACH_CONTINUOUS_TIME:
570 * with NOTE_ABSOLUTE: causes the timer to continue to tick across sleep,
571 * still uses gettimeofday epoch
572 * with NOTE_MACHTIME and NOTE_ABSOLUTE: uses mach continuous time epoch
573 * without NOTE_ABSOLUTE (interval timer mode): continues to tick across sleep
574 */
575 #define NOTE_MACHTIME 0x00000100 /* data is mach absolute time units */
576 /* timeout uses the mach absolute time epoch */
577
578 #ifdef PRIVATE
579 /*
580 * data/hint fflags for EVFILT_SOCK, shared with userspace.
581 *
582 */
583 #define NOTE_CONNRESET 0x00000001 /* Received RST */
584 #define NOTE_READCLOSED 0x00000002 /* Read side is shutdown */
585 #define NOTE_WRITECLOSED 0x00000004 /* Write side is shutdown */
586 #define NOTE_TIMEOUT 0x00000008 /* timeout: rexmt, keep-alive or persist */
587 #define NOTE_NOSRCADDR 0x00000010 /* source address not available */
588 #define NOTE_IFDENIED 0x00000020 /* interface denied connection */
589 #define NOTE_SUSPEND 0x00000040 /* output queue suspended */
590 #define NOTE_RESUME 0x00000080 /* output queue resumed */
591 #define NOTE_KEEPALIVE 0x00000100 /* TCP Keepalive received */
592 #define NOTE_ADAPTIVE_WTIMO 0x00000200 /* TCP adaptive write timeout */
593 #define NOTE_ADAPTIVE_RTIMO 0x00000400 /* TCP adaptive read timeout */
594 #define NOTE_CONNECTED 0x00000800 /* socket is connected */
595 #define NOTE_DISCONNECTED 0x00001000 /* socket is disconnected */
596 #define NOTE_CONNINFO_UPDATED 0x00002000 /* connection info was updated */
597 #define NOTE_NOTIFY_ACK 0x00004000 /* notify acknowledgement */
598
599 #define EVFILT_SOCK_LEVEL_TRIGGER_MASK \
600 (NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_SUSPEND | NOTE_RESUME | NOTE_CONNECTED | NOTE_DISCONNECTED)
601
602 #define EVFILT_SOCK_ALL_MASK \
603 (NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | NOTE_RESUME | NOTE_KEEPALIVE | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED | NOTE_NOTIFY_ACK)
604
605 #endif /* PRIVATE */
606
607 /*
608 * data/hint fflags for EVFILT_MACHPORT, shared with userspace.
609 *
610 * Only portsets are supported at this time.
611 *
612 * The fflags field can optionally contain the MACH_RCV_MSG, MACH_RCV_LARGE,
613 * and related trailer receive options as defined in <mach/message.h>.
614 * The presence of these flags directs the kevent64() call to attempt to receive
615 * the message during kevent delivery, rather than just indicate that a message exists.
616 * On setup, The ext[0] field contains the receive buffer pointer and ext[1] contains
617 * the receive buffer length. Upon event delivery, the actual received message size
618 * is returned in ext[1]. As with mach_msg(), the buffer must be large enough to
619 * receive the message and the requested (or default) message trailers. In addition,
620 * the fflags field contains the return code normally returned by mach_msg().
621 *
622 * If MACH_RCV_MSG is specified, and the ext[1] field specifies a zero length, the
623 * system call argument specifying an ouput area (kevent_qos) will be consulted. If
624 * the system call specified an output data area, the user-space address
625 * of the received message is carved from that provided output data area (if enough
626 * space remains there). The address and length of each received message is
627 * returned in the ext[0] and ext[1] fields (respectively) of the corresponding kevent.
628 *
629 * IF_MACH_RCV_VOUCHER_CONTENT is specified, the contents of the message voucher is
630 * extracted (as specified in the xflags field) and stored in ext[2] up to ext[3]
631 * length. If the input length is zero, and the system call provided a data area,
632 * the space for the voucher content is carved from the provided space and its
633 * address and length is returned in ext[2] and ext[3] respectively.
634 *
635 * If no message receipt options were provided in the fflags field on setup, no
636 * message is received by this call. Instead, on output, the data field simply
637 * contains the name of the actual port detected with a message waiting.
638 */
639
640 /*
641 * DEPRECATED!!!!!!!!!
642 * NOTE_TRACK, NOTE_TRACKERR, and NOTE_CHILD are no longer supported as of 10.5
643 */
644 /* additional flags for EVFILT_PROC */
645 #define NOTE_TRACK 0x00000001 /* follow across forks */
646 #define NOTE_TRACKERR 0x00000002 /* could not track child */
647 #define NOTE_CHILD 0x00000004 /* am a child process */
648
649
650 #ifdef PRIVATE
651 #endif /* PRIVATE */
652
653 #ifndef KERNEL
654 /* Temporay solution for BootX to use inode.h till kqueue moves to vfs layer */
655 #include <sys/queue.h>
656 struct knote;
657 SLIST_HEAD(klist, knote);
658 #endif
659
660 #ifdef KERNEL
661
662 #ifdef XNU_KERNEL_PRIVATE
663 #include <sys/queue.h>
664 #include <kern/kern_types.h>
665 #include <sys/fcntl.h> /* FREAD, FWRITE */
666 #include <kern/debug.h> /* panic */
667
668 #ifdef MALLOC_DECLARE
669 MALLOC_DECLARE(M_KQUEUE);
670 #endif
671
672 TAILQ_HEAD(kqtailq, knote); /* a list of "queued" events */
673
674 /* Bit size for packed field within knote */
675 #define KNOTE_KQ_BITSIZE 40
676
677
678 /* index into various kq queues */
679 typedef uint8_t kq_index_t;
680 typedef uint16_t kn_status_t;
681
682 #define KN_ACTIVE 0x0001 /* event has been triggered */
683 #define KN_QUEUED 0x0002 /* event is on queue */
684 #define KN_DISABLED 0x0004 /* event is disabled */
685 #define KN_DROPPING 0x0008 /* knote is being dropped */
686 #define KN_USEWAIT 0x0010 /* wait for knote use */
687 #define KN_ATTACHING 0x0020 /* event is pending attach */
688 #define KN_STAYACTIVE 0x0040 /* force event to stay active */
689 #define KN_DEFERDELETE 0x0080 /* defer delete until re-enabled */
690 #define KN_ATTACHED 0x0100 /* currently attached to source */
691 #define KN_DISPATCH 0x0200 /* disables as part of deliver */
692 #define KN_UDATA_SPECIFIC 0x0400 /* udata is part of matching */
693 #define KN_SUPPRESSED 0x0800 /* event is suppressed during delivery */
694 #define KN_STOLENDROP 0x1000 /* someone stole the drop privilege */
695 #define KN_REQVANISH 0x2000 /* requested EV_VANISH */
696 #define KN_VANISHED 0x4000 /* has vanished */
697
698 #define KN_DISPATCH2 (KN_DISPATCH | KN_UDATA_SPECIFIC)
699 /* combination defines deferred-delete mode enabled */
700
701 struct knote {
702 TAILQ_ENTRY(knote) kn_tqe; /* linkage for tail queue */
703 SLIST_ENTRY(knote) kn_link; /* linkage for search list */
704 SLIST_ENTRY(knote) kn_selnext; /* klist element chain */
705 union {
706 struct fileproc *p_fp; /* file data pointer */
707 struct proc *p_proc; /* proc pointer */
708 struct ipc_mqueue *p_mqueue; /* pset pointer */
709 } kn_ptr;
710 uint64_t kn_req_index:3, /* requested qos index */
711 kn_qos_index:3, /* in-use qos index */
712 kn_qos_override:3, /* qos override index */
713 kn_qos_sync_override:3, /* qos sync override index */
714 kn_vnode_kqok:1,
715 kn_vnode_use_ofst:1,
716 kn_qos_override_is_sync:1, /* qos override index is a sync override */
717 kn_reserved:1, /* reserved bits */
718 kn_filtid:8, /* filter id to index filter ops */
719 kn_kq_packed:KNOTE_KQ_BITSIZE; /* packed pointer for kq */
720
721 union {
722 void *kn_hook;
723 uint64_t kn_hook_data;
724 };
725 int64_t kn_sdata; /* saved data field */
726 struct kevent_internal_s kn_kevent;
727 int kn_sfflags; /* saved filter flags */
728 int kn_hookid;
729 uint16_t kn_inuse; /* inuse count */
730 kn_status_t kn_status; /* status bits */
731
732 #define kn_id kn_kevent.ident
733 #define kn_filter kn_kevent.filter
734 #define kn_flags kn_kevent.flags
735 #define kn_qos kn_kevent.qos
736 #define kn_udata kn_kevent.udata
737 #define kn_fflags kn_kevent.fflags
738 #define kn_xflags kn_kevent.xflags
739 #define kn_data kn_kevent.data
740 #define kn_ext kn_kevent.ext
741 #define kn_fp kn_ptr.p_fp
742 };
743
744 static inline struct kqueue *knote_get_kq(struct knote *kn)
745 {
746 if (!(kn->kn_kq_packed))
747 return 0;
748 else
749 return (struct kqueue *)((uintptr_t)(kn->kn_kq_packed) + (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
750 }
751
752 static inline void knote_set_kq(struct knote *kn, void *kq)
753 {
754 if (!kq)
755 kn->kn_kq_packed = 0;
756 else {
757 uint64_t offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
758 kn->kn_kq_packed = offset;
759 }
760 }
761
762 static inline int knote_get_seltype(struct knote *kn)
763 {
764 switch (kn->kn_filter) {
765 case EVFILT_READ:
766 return FREAD;
767 case EVFILT_WRITE:
768 return FWRITE;
769 default:
770 panic("%s(%p): invalid filter %d\n",
771 __func__, kn, kn->kn_filter);
772 return 0;
773 }
774 }
775
776 static inline void knote_set_error(struct knote *kn, int error)
777 {
778 kn->kn_flags |= EV_ERROR;
779 kn->kn_data = error;
780 }
781
782 struct filt_process_s {
783 int fp_fd;
784 unsigned int fp_flags;
785 user_addr_t fp_data_out;
786 user_size_t fp_data_size;
787 user_size_t fp_data_resid;
788 };
789 typedef struct filt_process_s *filt_process_data_t;
790
791 /*
792 * Filter operators
793 *
794 * These routines, provided by each filter, are called to attach, detach, deliver events,
795 * change/update filter registration and process/deliver events. They are called with the
796 * with a use-count referenced knote, with the kq unlocked. Here are more details:
797 *
798 * f_isfd -
799 * identifies if the "ident" field in the kevent structure is a file-descriptor.
800 *
801 * If so, the knote is associated with the file descriptor prior to attach and
802 * auto-removed when the file descriptor is closed (this latter behavior may change
803 * for EV_DISPATCH2 kevent types to allow delivery of events identifying unintended
804 * closes).
805 *
806 * Otherwise the knote is hashed by the ident and has no auto-close behavior.
807 *
808 * f_adjusts_qos -
809 * identifies if the filter can adjust its QoS during its lifetime.
810 *
811 * Currently, EVFILT_MAACHPORT is the only filter using this facility.
812 *
813 * f_needs_boost -
814 * [OPTIONAL] used by filters to communicate they need to hold a boost
815 * while holding a usecount on this knote. This is called with the kqlock
816 * held.
817 *
818 * This is only used by EVFILT_WORKLOOP currently.
819 *
820 * f_attach -
821 * called to attach the knote to the underlying object that will be delivering events
822 * through it when EV_ADD is supplied and no existing matching event is found
823 *
824 * provided a knote that is pre-attached to the fd or hashed (see above) but is
825 * specially marked to avoid concurrent access until the attach is complete. The
826 * kevent structure embedded in this knote has been filled in with a sanitized
827 * version of the user-supplied kevent data. However, the user-supplied filter-specific
828 * flags (fflags) and data fields have been moved into the knote's kn_sfflags and kn_sdata
829 * fields respectively. These are usually interpretted as a set of "interest" flags and
830 * data by each filter - to be matched against delivered events.
831 *
832 * The attach operator indicated errors by setting the EV_ERROR flog in the flags field
833 * embedded in the knote's kevent structure - with the specific error indicated in the
834 * corresponding data field.
835 *
836 * The return value indicates if the knote should already be considered "activated" at
837 * the time of attach (one or more of the interest events has already occured).
838 *
839 * f_post_attach -
840 * [OPTIONAL] called after a successful attach, with the kqueue lock held,
841 * returns lock held, may drop and re-acquire
842 *
843 * If this function is non-null, then it indicates that the filter wants
844 * to perform an action after a successful ATTACH of a knote.
845 *
846 * Currently, EVFILT_WORKLOOP is the only filter using this facility.
847 *
848 * The return value indicates an error to report to userland.
849 *
850 *
851 * f_detach -
852 * called to disassociate the knote from the underlying object delivering events
853 * the filter should not attempt to deliver events through this knote after this
854 * operation returns control to the kq system.
855 *
856 * f_event -
857 * if the knote() function (or KNOTE() macro) is called against a list of knotes,
858 * this operator will be called on each knote in the list.
859 *
860 * The "hint" parameter is completely filter-specific, but usually indicates an
861 * event or set of events that have occured against the source object associated
862 * with the list.
863 *
864 * The return value indicates if the knote should already be considered "activated" at
865 * the time of attach (one or more of the interest events has already occured).
866 *
867 * f_drop_and_unlock -
868 * [OPTIONAL] called with the kqueue locked, and has to unlock
869 *
870 * If this function is non-null, then it indicates that the filter
871 * wants to handle EV_DELETE events. This is necessary if a particular
872 * filter needs to synchronize knote deletion with its own filter lock.
873 * Currently, EVFILT_WORKLOOP is the only filter using this facility.
874 *
875 * The return value indicates an error during the knote drop, i.e., the
876 * knote still exists and user space should re-drive the EV_DELETE.
877 *
878 * If the return value is ERESTART, kevent_register() is called from
879 * scratch again (useful to wait for usecounts to drop and then
880 * reevaluate the relevance of that drop)
881 *
882 *
883 * f_process -
884 * called when attempting to deliver triggered events to user-space.
885 *
886 * If the knote was previously activated, this operator will be called when a
887 * thread is trying to deliver events to user-space. The filter gets one last
888 * chance to determine if the event/events are still interesting for this knote
889 * (are the conditions still right to deliver an event). If so, the filter
890 * fills in the output kevent structure with the information to be delivered.
891 *
892 * The input context/data parameter is used during event delivery. Some
893 * filters allow additional data delivery as part of event delivery. This
894 * context field indicates if space was made available for these additional
895 * items and how that space is to be allocated/carved-out.
896 *
897 * The filter may set EV_CLEAR or EV_ONESHOT in the output flags field to indicate
898 * special post-delivery dispositions for the knote.
899 *
900 * EV_CLEAR - indicates that all matching events have been delivered. Even
901 * though there were events to deliver now, there will not be any
902 * more until some additional events are delivered to the knote
903 * via the f_event operator, or the interest set is changed via
904 * the f_touch operator. The knote can remain deactivated after
905 * processing this event delivery.
906 *
907 * EV_ONESHOT - indicates that this is the last event to be delivered via
908 * this knote. It will automatically be deleted upon delivery
909 * (or if in dispatch-mode, upon re-enablement after this delivery).
910 *
911 * The return value indicates if the knote has delivered an output event.
912 * Unless one of the special output flags was set in the output kevent, a non-
913 * zero return value ALSO indicates that the knote should be re-activated
914 * for future event processing (in case it delivers level-based or a multi-edge
915 * type events like message queues that already exist).
916 *
917 * NOTE: In the future, the boolean may change to an enum that allows more
918 * explicit indication of just delivering a current event vs delivering
919 * an event with more events still pending.
920 *
921 * f_touch -
922 * called to update the knote with new state from the user during EVFILT_ADD/ENABLE/DISABLE
923 * on an already-attached knote.
924 *
925 * f_touch should copy relevant new data from the kevent into the knote.
926 * (if KN_UDATA_SPECIFIC is not set, you may need to update the udata too)
927 *
928 * operator must lock against concurrent f_event and f_process operations.
929 *
930 * A return value of 1 indicates that the knote should now be considered 'activated'.
931 *
932 * f_touch can set EV_ERROR with specific error in the data field to return an error to the client.
933 * You should return 1 to indicate that the kevent needs to be activated and processed.
934 *
935 * f_peek -
936 * For knotes marked KN_STAYACTIVE, indicate if the knote is truly active at
937 * the moment (not used for event delivery, but for status checks).
938 */
939
940 struct filterops {
941 bool f_isfd; /* true if ident == filedescriptor */
942 bool f_adjusts_qos; /* true if the filter can override the knote */
943 bool (*f_needs_boost)(struct kevent_internal_s *kev);
944 int (*f_attach)(struct knote *kn, struct kevent_internal_s *kev);
945 int (*f_post_attach)(struct knote *kn, struct kevent_internal_s *kev);
946 void (*f_detach)(struct knote *kn);
947 int (*f_event)(struct knote *kn, long hint);
948 int (*f_touch)(struct knote *kn, struct kevent_internal_s *kev);
949 int (*f_drop_and_unlock)(struct knote *kn, struct kevent_internal_s *kev);
950 int (*f_process)(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
951 unsigned (*f_peek)(struct knote *kn);
952 };
953
954 struct proc;
955 struct waitq;
956
957 SLIST_HEAD(klist, knote);
958 extern void knote_init(void);
959 extern void klist_init(struct klist *list);
960
961 #define KNOTE(list, hint) knote(list, hint)
962 #define KNOTE_ATTACH(list, kn) knote_attach(list, kn)
963 #define KNOTE_DETACH(list, kn) knote_detach(list, kn)
964
965 extern void knote(struct klist *list, long hint);
966 extern int knote_attach(struct klist *list, struct knote *kn);
967 extern int knote_detach(struct klist *list, struct knote *kn);
968 extern void knote_vanish(struct klist *list);
969 extern int knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link);
970 extern int knote_unlink_waitq(struct knote *kn, struct waitq *wq);
971 extern void knote_fdclose(struct proc *p, int fd, int force);
972 extern void knote_markstayactive(struct knote *kn);
973 extern void knote_clearstayactive(struct knote *kn);
974 extern void knote_adjust_qos(struct knote *kn, int qos, int override, kq_index_t sync_override_index);
975 extern void knote_adjust_sync_qos(struct knote *kn, kq_index_t sync_qos, boolean_t lock_kq);
976 extern const struct filterops *knote_fops(struct knote *kn);
977 extern void knote_set_error(struct knote *kn, int error);
978
979 int kevent_exit_on_workloop_ownership_leak(thread_t thread);
980 int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize);
981 int kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf,
982 uint32_t ubufsize, int32_t *nkqueues_out);
983 int kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
984 uint32_t ubufsize, int32_t *size_out);
985 int kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
986 uint32_t ubufsize, int32_t *nknotes_out);
987
988 #elif defined(KERNEL_PRIVATE) /* !XNU_KERNEL_PRIVATE: kexts still need a klist structure definition */
989
990 #include <sys/queue.h>
991 struct proc;
992 struct knote;
993 SLIST_HEAD(klist, knote);
994
995 #endif /* !XNU_KERNEL_PRIVATE && KERNEL_PRIVATE */
996
997 #ifdef KERNEL_PRIVATE
998 #ifdef PRIVATE
999
1000 /* make these private functions available to the pthread kext */
1001 extern int kevent_qos_internal(struct proc *p, int fd,
1002 user_addr_t changelist, int nchanges,
1003 user_addr_t eventlist, int nevents,
1004 user_addr_t data_out, user_size_t *data_available,
1005 unsigned int flags, int32_t *retval);
1006
1007 extern int kevent_qos_internal_bind(struct proc *p,
1008 int qos, thread_t thread, unsigned int flags);
1009 extern int kevent_qos_internal_unbind(struct proc *p,
1010 int qos, thread_t thread, unsigned int flags);
1011
1012 extern int kevent_id_internal(struct proc *p, kqueue_id_t *id,
1013 user_addr_t changelist, int nchanges,
1014 user_addr_t eventlist, int nevents,
1015 user_addr_t data_out, user_size_t *data_available,
1016 unsigned int flags, int32_t *retval);
1017
1018 #endif /* PRIVATE */
1019 #endif /* KERNEL_PRIVATE */
1020
1021 #else /* KERNEL */
1022
1023 #include <sys/types.h>
1024
1025 struct timespec;
1026
1027 __BEGIN_DECLS
1028 int kqueue(void);
1029 int kevent(int kq,
1030 const struct kevent *changelist, int nchanges,
1031 struct kevent *eventlist, int nevents,
1032 const struct timespec *timeout);
1033 int kevent64(int kq,
1034 const struct kevent64_s *changelist, int nchanges,
1035 struct kevent64_s *eventlist, int nevents,
1036 unsigned int flags,
1037 const struct timespec *timeout);
1038
1039 #ifdef PRIVATE
1040 int kevent_qos(int kq,
1041 const struct kevent_qos_s *changelist, int nchanges,
1042 struct kevent_qos_s *eventlist, int nevents,
1043 void *data_out, size_t *data_available,
1044 unsigned int flags);
1045
1046 int kevent_id(kqueue_id_t id,
1047 const struct kevent_qos_s *changelist, int nchanges,
1048 struct kevent_qos_s *eventlist, int nevents,
1049 void *data_out, size_t *data_available,
1050 unsigned int flags);
1051 #endif /* PRIVATE */
1052
1053 __END_DECLS
1054
1055
1056 #endif /* KERNEL */
1057
1058 #ifdef PRIVATE
1059
1060 /* Flags for pending events notified by kernel via return-to-kernel ast */
1061 #define R2K_WORKLOOP_PENDING_EVENTS 0x1
1062 #define R2K_WORKQ_PENDING_EVENTS 0x2
1063
1064 #endif /* PRIVATE */
1065
1066
1067 #endif /* !_SYS_EVENT_H_ */