2  * Copyright (c) 2003-2019 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 
  30  * All rights reserved. 
  32  * Redistribution and use in source and binary forms, with or without 
  33  * modification, are permitted provided that the following conditions 
  35  * 1. Redistributions of source code must retain the above copyright 
  36  *    notice, this list of conditions and the following disclaimer. 
  37  * 2. Redistributions in binary form must reproduce the above copyright 
  38  *    notice, this list of conditions and the following disclaimer in the 
  39  *    documentation and/or other materials provided with the distribution. 
  41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 
  42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 
  45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  53  *      $FreeBSD: src/sys/sys/event.h,v 1.5.2.5 2001/12/14 19:21:22 jlemon Exp $ 
  59 #include <machine/types.h> 
  60 #include <sys/cdefs.h> 
  66 #define EVFILT_READ             (-1) 
  67 #define EVFILT_WRITE            (-2) 
  68 #define EVFILT_AIO              (-3)    /* attached to aio requests */ 
  69 #define EVFILT_VNODE            (-4)    /* attached to vnodes */ 
  70 #define EVFILT_PROC             (-5)    /* attached to struct proc */ 
  71 #define EVFILT_SIGNAL           (-6)    /* attached to struct proc */ 
  72 #define EVFILT_TIMER            (-7)    /* timers */ 
  73 #define EVFILT_MACHPORT         (-8)    /* Mach portsets */ 
  74 #define EVFILT_FS               (-9)    /* Filesystem events */ 
  75 #define EVFILT_USER             (-10)   /* User events */ 
  77 #define EVFILT_UNUSED_11        (-11)   /* (-11) unused */ 
  79 #define EVFILT_VM               (-12)   /* Virtual memory events */ 
  81 #define EVFILT_SOCK             (-13)   /* Socket events */ 
  82 #define EVFILT_MEMORYSTATUS     (-14)   /* Memorystatus events */ 
  84 #define EVFILT_EXCEPT           (-15)   /* Exception events */ 
  86 #define EVFILT_WORKLOOP         (-17)   /* Workloop events */ 
  89 #define EVFILT_SYSCOUNT         17 
  90 #define EVFILT_THREADMARKER     EVFILT_SYSCOUNT /* Internal use only */ 
  95         uintptr_t       ident
;  /* identifier for this event */ 
  96         int16_t         filter
; /* filter for event */ 
  97         uint16_t        flags
;  /* general flags */ 
  98         uint32_t        fflags
; /* filter-specific flags */ 
  99         intptr_t        data
;   /* filter-specific data */ 
 100         void            *udata
; /* opaque user data identifier */ 
 103 #ifdef KERNEL_PRIVATE 
 105 struct user64_kevent 
{ 
 106         uint64_t        ident
;  /* identifier for this event */ 
 107         int16_t         filter
; /* filter for event */ 
 108         uint16_t        flags
;  /* general flags */ 
 109         uint32_t        fflags
; /* filter-specific flags */ 
 110         int64_t         data
;   /* filter-specific data */ 
 111         user_addr_t     udata
;  /* opaque user data identifier */ 
 114 struct user32_kevent 
{ 
 115         uint32_t        ident
;  /* identifier for this event */ 
 116         int16_t         filter
; /* filter for event */ 
 117         uint16_t        flags
;  /* general flags */ 
 118         uint32_t        fflags
; /* filter-specific flags */ 
 119         int32_t         data
;   /* filter-specific data */ 
 120         user32_addr_t   udata
;  /* opaque user data identifier */ 
 123 #endif /* KERNEL_PRIVATE */ 
 128         uint64_t        ident
;          /* identifier for this event */ 
 129         int16_t         filter
;         /* filter for event */ 
 130         uint16_t        flags
;          /* general flags */ 
 131         uint32_t        fflags
;         /* filter-specific flags */ 
 132         int64_t         data
;           /* filter-specific data */ 
 133         uint64_t        udata
;          /* opaque user data identifier */ 
 134         uint64_t        ext
[2];         /* filter-specific extensions */ 
 138 struct kevent_qos_s 
{ 
 139         uint64_t        ident
;          /* identifier for this event */ 
 140         int16_t         filter
;         /* filter for event */ 
 141         uint16_t        flags
;          /* general flags */ 
 142         int32_t         qos
;            /* quality of service */ 
 143         uint64_t        udata
;          /* opaque user data identifier */ 
 144         uint32_t        fflags
;         /* filter-specific flags */ 
 145         uint32_t        xflags
;         /* extra filter-specific flags */ 
 146         int64_t         data
;           /* filter-specific data */ 
 147         uint64_t        ext
[4];         /* filter-specific extensions */ 
 151  * Type definition for names/ids of dynamically allocated kqueues. 
 153 typedef uint64_t kqueue_id_t
; 
 156 #define EV_SET(kevp, a, b, c, d, e, f) do {     \ 
 157         struct kevent *__kevp__ = (kevp);       \ 
 158         __kevp__->ident = (a);                  \ 
 159         __kevp__->filter = (b);                 \ 
 160         __kevp__->flags = (c);                  \ 
 161         __kevp__->fflags = (d);                 \ 
 162         __kevp__->data = (e);                   \ 
 163         __kevp__->udata = (f);                  \ 
 166 #define EV_SET64(kevp, a, b, c, d, e, f, g, h) do {     \ 
 167         struct kevent64_s *__kevp__ = (kevp);           \ 
 168         __kevp__->ident = (a);                          \ 
 169         __kevp__->filter = (b);                         \ 
 170         __kevp__->flags = (c);                          \ 
 171         __kevp__->fflags = (d);                         \ 
 172         __kevp__->data = (e);                           \ 
 173         __kevp__->udata = (f);                          \ 
 174         __kevp__->ext[0] = (g);                         \ 
 175         __kevp__->ext[1] = (h);                         \ 
 179 /* kevent system call flags */ 
 180 #define KEVENT_FLAG_NONE                         0x000000       /* no flag value */ 
 181 #define KEVENT_FLAG_IMMEDIATE                    0x000001       /* immediate timeout */ 
 182 #define KEVENT_FLAG_ERROR_EVENTS                 0x000002       /* output events only include change errors */ 
 187  * Rather than provide an EV_SET_QOS macro for kevent_qos_t structure 
 188  * initialization, we encourage use of named field initialization support 
 192 // was  KEVENT_FLAG_STACK_EVENTS                 0x000004 
 193 #define KEVENT_FLAG_STACK_DATA                   0x000008   /* output data allocated as stack (grows down) */ 
 194 //      KEVENT_FLAG_POLL                         0x000010 
 195 #define KEVENT_FLAG_WORKQ                        0x000020   /* interact with the default workq kq */ 
 196 //      KEVENT_FLAG_LEGACY32                     0x000040 
 197 //      KEVENT_FLAG_LEGACY64                     0x000080 
 198 //      KEVENT_FLAG_PROC64                       0x000100 
 199 #define KEVENT_FLAG_WORKQ_MANAGER                0x000200   /* obsolete */ 
 200 #define KEVENT_FLAG_WORKLOOP                     0x000400   /* interact with the specified workloop kq */ 
 201 #define KEVENT_FLAG_PARKING                      0x000800   /* workq thread is parking */ 
 202 //      KEVENT_FLAG_KERNEL                       0x001000 
 203 //      KEVENT_FLAG_DYNAMIC_KQUEUE               0x002000 
 204 //      KEVENT_FLAG_NEEDS_END_PROCESSING         0x004000 
 205 #define KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH     0x008000   /* obsolete */ 
 206 #define KEVENT_FLAG_WORKLOOP_SERVICER_DETACH     0x010000   /* obsolete */ 
 207 #define KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST        0x020000   /* kq lookup by id must exist */ 
 208 #define KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST    0x040000   /* kq lookup by id must not exist */ 
 209 #define KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD        0x080000   /* obsolete */ 
 211 #ifdef XNU_KERNEL_PRIVATE 
 213 #define KEVENT_FLAG_POLL                         0x0010  /* Call is for poll() */ 
 214 #define KEVENT_FLAG_LEGACY32                     0x0040  /* event data in legacy 32-bit format */ 
 215 #define KEVENT_FLAG_LEGACY64                     0x0080  /* event data in legacy 64-bit format */ 
 216 #define KEVENT_FLAG_PROC64                       0x0100  /* proc is 64bits */ 
 217 #define KEVENT_FLAG_KERNEL                       0x1000  /* caller is in-kernel */ 
 218 #define KEVENT_FLAG_DYNAMIC_KQUEUE               0x2000  /* kqueue is dynamically allocated */ 
 219 #define KEVENT_FLAG_NEEDS_END_PROCESSING         0x4000  /* end processing required before returning */ 
 221 #define KEVENT_ID_FLAG_USER (KEVENT_FLAG_WORKLOOP | \ 
 222                 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) 
 224 #define KEVENT_FLAG_USER (KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | \ 
 225                 KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP | \ 
 226                 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) 
 229  * Since some filter ops are not part of the standard sysfilt_ops, we use 
 230  * kn_filtid starting from EVFILT_SYSCOUNT to identify these cases.  This is to 
 231  * let kn_fops() get the correct fops for all cases. 
 233 #define EVFILTID_KQREAD            (EVFILT_SYSCOUNT) 
 234 #define EVFILTID_PIPE_N            (EVFILT_SYSCOUNT + 1) 
 235 #define EVFILTID_PIPE_R            (EVFILT_SYSCOUNT + 2) 
 236 #define EVFILTID_PIPE_W            (EVFILT_SYSCOUNT + 3) 
 237 #define EVFILTID_PTSD              (EVFILT_SYSCOUNT + 4) 
 238 #define EVFILTID_SOREAD            (EVFILT_SYSCOUNT + 5) 
 239 #define EVFILTID_SOWRITE           (EVFILT_SYSCOUNT + 6) 
 240 #define EVFILTID_SCK               (EVFILT_SYSCOUNT + 7) 
 241 #define EVFILTID_SOEXCEPT          (EVFILT_SYSCOUNT + 8) 
 242 #define EVFILTID_SPEC              (EVFILT_SYSCOUNT + 9) 
 243 #define EVFILTID_BPFREAD           (EVFILT_SYSCOUNT + 10) 
 244 #define EVFILTID_NECP_FD           (EVFILT_SYSCOUNT + 11) 
 245 #define EVFILTID_FSEVENT           (EVFILT_SYSCOUNT + 15) 
 246 #define EVFILTID_VN                (EVFILT_SYSCOUNT + 16) 
 247 #define EVFILTID_TTY               (EVFILT_SYSCOUNT + 17) 
 248 #define EVFILTID_PTMX              (EVFILT_SYSCOUNT + 18) 
 250 #define EVFILTID_DETACHED          (EVFILT_SYSCOUNT + 19) 
 251 #define EVFILTID_MAX               (EVFILT_SYSCOUNT + 20) 
 253 #endif /* defined(XNU_KERNEL_PRIVATE) */ 
 260 #define EV_ADD              0x0001      /* add event to kq (implies enable) */ 
 261 #define EV_DELETE           0x0002      /* delete event from kq */ 
 262 #define EV_ENABLE           0x0004      /* enable event */ 
 263 #define EV_DISABLE          0x0008      /* disable event (not reported) */ 
 266 #define EV_ONESHOT          0x0010      /* only report one occurrence */ 
 267 #define EV_CLEAR            0x0020      /* clear event state after reporting */ 
 268 #define EV_RECEIPT          0x0040      /* force immediate event output */ 
 269                                         /* ... with or without EV_ERROR */ 
 270                                         /* ... use KEVENT_FLAG_ERROR_EVENTS */ 
 271                                         /*     on syscalls supporting flags */ 
 273 #define EV_DISPATCH         0x0080      /* disable event after reporting */ 
 274 #define EV_UDATA_SPECIFIC   0x0100      /* unique kevent per udata value */ 
 276 #define EV_DISPATCH2        (EV_DISPATCH | EV_UDATA_SPECIFIC) 
 277 /* ... in combination with EV_DELETE */ 
 278 /* will defer delete until udata-specific */ 
 279 /* event enabled. EINPROGRESS will be */ 
 280 /* returned to indicate the deferral */ 
 282 #define EV_VANISHED         0x0200      /* report that source has vanished  */ 
 283                                         /* ... only valid with EV_DISPATCH2 */ 
 285 #define EV_SYSFLAGS         0xF000      /* reserved by system */ 
 286 #define EV_FLAG0            0x1000      /* filter-specific flag */ 
 287 #define EV_FLAG1            0x2000      /* filter-specific flag */ 
 289 /* returned values */ 
 290 #define EV_EOF              0x8000      /* EOF detected */ 
 291 #define EV_ERROR            0x4000      /* error, data contains errno */ 
 294  * Filter specific flags for EVFILT_READ 
 296  * The default behavior for EVFILT_READ is to make the "read" determination 
 297  * relative to the current file descriptor read pointer. 
 299  * The EV_POLL flag indicates the determination should be made via poll(2) 
 300  * semantics. These semantics dictate always returning true for regular files, 
 301  * regardless of the amount of unread data in the file. 
 303  * On input, EV_OOBAND specifies that filter should actively return in the 
 304  * presence of OOB on the descriptor. It implies that filter will return 
 305  * if there is OOB data available to read OR when any other condition 
 306  * for the read are met (for example number of bytes regular data becomes >= 
 308  * If EV_OOBAND is not set on input, it implies that the filter should not actively 
 309  * return for out of band data on the descriptor. The filter will then only return 
 310  * when some other condition for read is met (ex: when number of regular data bytes 
 311  * >=low-watermark OR when socket can't receive more data (SS_CANTRCVMORE)). 
 313  * On output, EV_OOBAND indicates the presence of OOB data on the descriptor. 
 314  * If it was not specified as an input parameter, then the data count is the 
 315  * number of bytes before the current OOB marker, else data count is the number 
 316  * of bytes beyond OOB marker. 
 318 #define EV_POLL         EV_FLAG0 
 319 #define EV_OOBAND       EV_FLAG1 
 322  * data/hint fflags for EVFILT_USER, shared with userspace 
 326  * On input, NOTE_TRIGGER causes the event to be triggered for output. 
 328 #define NOTE_TRIGGER    0x01000000 
 331  * On input, the top two bits of fflags specifies how the lower twenty four 
 332  * bits should be applied to the stored value of fflags. 
 334  * On output, the top two bits will always be set to NOTE_FFNOP and the 
 335  * remaining twenty four bits will contain the stored fflags value. 
 337 #define NOTE_FFNOP      0x00000000              /* ignore input fflags */ 
 338 #define NOTE_FFAND      0x40000000              /* and fflags */ 
 339 #define NOTE_FFOR       0x80000000              /* or fflags */ 
 340 #define NOTE_FFCOPY     0xc0000000              /* copy fflags */ 
 341 #define NOTE_FFCTRLMASK 0xc0000000              /* mask for operations */ 
 342 #define NOTE_FFLAGSMASK 0x00ffffff 
 346  * data/hint fflags for EVFILT_WORKLOOP, shared with userspace 
 348  * The ident for thread requests should be the dynamic ID of the workloop 
 349  * The ident for each sync waiter must be unique to that waiter [for this workloop] 
 354  * @const NOTE_WL_THREAD_REQUEST [in/out] 
 355  * The kevent represents asynchronous userspace work and its associated QoS. 
 356  * There can only be a single knote with this flag set per workloop. 
 358  * @const NOTE_WL_SYNC_WAIT [in/out] 
 359  * This bit is set when the caller is waiting to become the owner of a workloop. 
 360  * If the NOTE_WL_SYNC_WAKE bit is already set then the caller is not blocked, 
 361  * else it blocks until it is set. 
 363  * The QoS field of the knote is used to push on other owners or servicers. 
 365  * @const NOTE_WL_SYNC_WAKE [in/out] 
 366  * Marks the waiter knote as being eligible to become an owner 
 367  * This bit can only be set once, trying it again will fail with EALREADY. 
 369  * @const NOTE_WL_SYNC_IPC [in/out] 
 370  * The knote is a sync IPC redirected turnstile push. 
 374  * @const NOTE_WL_UPDATE_QOS [in] (only NOTE_WL_THREAD_REQUEST) 
 375  * For successful updates (EV_ADD only), learn the new userspace async QoS from 
 376  * the kevent qos field. 
 378  * @const NOTE_WL_END_OWNERSHIP [in] 
 379  * If the update is successful (including deletions) or returns ESTALE, and 
 380  * the caller thread or the "suspended" thread is currently owning the workloop, 
 381  * then ownership is forgotten. 
 383  * @const NOTE_WL_DISCOVER_OWNER [in] 
 384  * If the update is successful (including deletions), learn the owner identity 
 385  * from the loaded value during debounce. This requires an address to have been 
 386  * filled in the EV_EXTIDX_WL_ADDR ext field, but doesn't require a mask to have 
 387  * been set in the EV_EXTIDX_WL_MASK. 
 389  * @const NOTE_WL_IGNORE_ESTALE [in] 
 390  * If the operation would fail with ESTALE, mask the error and pretend the 
 391  * update was successful. However the operation itself didn't happen, meaning 
 393  * - attaching a new knote will not happen 
 394  * - dropping an existing knote will not happen 
 395  * - NOTE_WL_UPDATE_QOS or NOTE_WL_DISCOVER_OWNER will have no effect 
 397  * This modifier doesn't affect NOTE_WL_END_OWNERSHIP. 
 399 #define NOTE_WL_THREAD_REQUEST   0x00000001 
 400 #define NOTE_WL_SYNC_WAIT        0x00000004 
 401 #define NOTE_WL_SYNC_WAKE        0x00000008 
 402 #define NOTE_WL_SYNC_IPC         0x80000000 
 403 #define NOTE_WL_COMMANDS_MASK    0x8000000f /* Mask of all the [in] commands above */ 
 405 #define NOTE_WL_UPDATE_QOS       0x00000010 
 406 #define NOTE_WL_END_OWNERSHIP    0x00000020 
 407 #define NOTE_WL_DISCOVER_OWNER   0x00000080 
 408 #define NOTE_WL_IGNORE_ESTALE    0x00000100 
 409 #define NOTE_WL_UPDATES_MASK     0x000001f0 /* Mask of all the [in] updates above */ 
 411 #define NOTE_WL_UPDATE_OWNER     0 /* ... compatibility define ... */ 
 414  * EVFILT_WORKLOOP ext[] array indexes/meanings. 
 416 #define EV_EXTIDX_WL_LANE        0         /* lane identifier  [in: sync waiter] 
 417                                             *                  [out: thread request]     */ 
 418 #define EV_EXTIDX_WL_ADDR        1         /* debounce address [in: NULL==no debounce]   */ 
 419 #define EV_EXTIDX_WL_MASK        2         /* debounce mask    [in]                      */ 
 420 #define EV_EXTIDX_WL_VALUE       3         /* debounce value   [in: not current->ESTALE] 
 421                                             *                  [out: new/debounce value] */ 
 426  * data/hint fflags for EVFILT_{READ|WRITE}, shared with userspace 
 428  * The default behavior for EVFILT_READ is to make the determination 
 429  * realtive to the current file descriptor read pointer. 
 431 #define NOTE_LOWAT      0x00000001              /* low water mark */ 
 433 /* data/hint flags for EVFILT_EXCEPT, shared with userspace */ 
 434 #define NOTE_OOB        0x00000002              /* OOB data */ 
 437  * data/hint fflags for EVFILT_VNODE, shared with userspace 
 439 #define NOTE_DELETE     0x00000001              /* vnode was removed */ 
 440 #define NOTE_WRITE      0x00000002              /* data contents changed */ 
 441 #define NOTE_EXTEND     0x00000004              /* size increased */ 
 442 #define NOTE_ATTRIB     0x00000008              /* attributes changed */ 
 443 #define NOTE_LINK       0x00000010              /* link count changed */ 
 444 #define NOTE_RENAME     0x00000020              /* vnode was renamed */ 
 445 #define NOTE_REVOKE     0x00000040              /* vnode access was revoked */ 
 446 #define NOTE_NONE       0x00000080              /* No specific vnode event: to test for EVFILT_READ activation*/ 
 447 #define NOTE_FUNLOCK    0x00000100              /* vnode was unlocked by flock(2) */ 
 450  * data/hint fflags for EVFILT_PROC, shared with userspace 
 452  * Please note that EVFILT_PROC and EVFILT_SIGNAL share the same knote list 
 453  * that hangs off the proc structure. They also both play games with the hint 
 454  * passed to KNOTE(). If NOTE_SIGNAL is passed as a hint, then the lower bits 
 455  * of the hint contain the signal. IF NOTE_FORK is passed, then the lower bits 
 456  * contain the PID of the child (but the pid does not get passed through in 
 457  * the actual kevent). 
 460         eNoteReapDeprecated 
__deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is deprecated") = 0x10000000 
 463 #define NOTE_EXIT               0x80000000      /* process exited */ 
 464 #define NOTE_FORK               0x40000000      /* process forked */ 
 465 #define NOTE_EXEC               0x20000000      /* process exec'd */ 
 466 #define NOTE_REAP               ((unsigned int)eNoteReapDeprecated /* 0x10000000 */ )   /* process reaped */ 
 467 #define NOTE_SIGNAL             0x08000000      /* shared with EVFILT_SIGNAL */ 
 468 #define NOTE_EXITSTATUS         0x04000000      /* exit status to be returned, valid for child process or when allowed to signal target pid */ 
 469 #define NOTE_EXIT_DETAIL        0x02000000      /* provide details on reasons for exit */ 
 471 #define NOTE_PDATAMASK  0x000fffff              /* mask for signal & exit status */ 
 472 #define NOTE_PCTRLMASK  (~NOTE_PDATAMASK) 
 475  * If NOTE_EXITSTATUS is present, provide additional info about exiting process. 
 478         eNoteExitReparentedDeprecated 
__deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is no longer sent") = 0x00080000 
 480 #define NOTE_EXIT_REPARENTED    ((unsigned int)eNoteExitReparentedDeprecated)   /* exited while reparented */ 
 483  * If NOTE_EXIT_DETAIL is present, these bits indicate specific reasons for exiting. 
 485 #define NOTE_EXIT_DETAIL_MASK           0x00070000 
 486 #define NOTE_EXIT_DECRYPTFAIL           0x00010000 
 487 #define NOTE_EXIT_MEMORY                0x00020000 
 488 #define NOTE_EXIT_CSERROR               0x00040000 
 493  * If NOTE_EXIT_MEMORY is present, these bits indicate specific jetsam condition. 
 495 #define NOTE_EXIT_MEMORY_DETAIL_MASK    0xfe000000 
 496 #define NOTE_EXIT_MEMORY_VMPAGESHORTAGE 0x80000000      /* jetsam condition: lowest jetsam priority proc killed due to vm page shortage */ 
 497 #define NOTE_EXIT_MEMORY_VMTHRASHING    0x40000000      /* jetsam condition: lowest jetsam priority proc killed due to vm thrashing */ 
 498 #define NOTE_EXIT_MEMORY_HIWAT          0x20000000      /* jetsam condition: process reached its high water mark */ 
 499 #define NOTE_EXIT_MEMORY_PID            0x10000000      /* jetsam condition: special pid kill requested */ 
 500 #define NOTE_EXIT_MEMORY_IDLE           0x08000000      /* jetsam condition: idle process cleaned up */ 
 501 #define NOTE_EXIT_MEMORY_VNODE          0X04000000      /* jetsam condition: virtual node kill */ 
 502 #define NOTE_EXIT_MEMORY_FCTHRASHING    0x02000000      /* jetsam condition: lowest jetsam priority proc killed due to filecache thrashing */ 
 507  * data/hint fflags for EVFILT_VM, shared with userspace. 
 509 #define NOTE_VM_PRESSURE                        0x80000000              /* will react on memory pressure */ 
 510 #define NOTE_VM_PRESSURE_TERMINATE              0x40000000              /* will quit on memory pressure, possibly after cleaning up dirty state */ 
 511 #define NOTE_VM_PRESSURE_SUDDEN_TERMINATE       0x20000000              /* will quit immediately on memory pressure */ 
 512 #define NOTE_VM_ERROR                           0x10000000              /* there was an error */ 
 517  * data/hint fflags for EVFILT_MEMORYSTATUS, shared with userspace. 
 519 #define NOTE_MEMORYSTATUS_PRESSURE_NORMAL       0x00000001      /* system memory pressure has returned to normal */ 
 520 #define NOTE_MEMORYSTATUS_PRESSURE_WARN         0x00000002      /* system memory pressure has changed to the warning state */ 
 521 #define NOTE_MEMORYSTATUS_PRESSURE_CRITICAL     0x00000004      /* system memory pressure has changed to the critical state */ 
 522 #define NOTE_MEMORYSTATUS_LOW_SWAP              0x00000008      /* system is in a low-swap state */ 
 523 #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN       0x00000010      /* process memory limit has hit a warning state */ 
 524 #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL   0x00000020      /* process memory limit has hit a critical state - soft limit */ 
 525 #define NOTE_MEMORYSTATUS_MSL_STATUS   0xf0000000      /* bits used to request change to process MSL status */ 
 527 #ifdef KERNEL_PRIVATE 
 529  * data/hint fflags for EVFILT_MEMORYSTATUS, but not shared with userspace. 
 531 #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE        0x00000040      /* Used to restrict sending a warn event only once, per active limit, soft limits only */ 
 532 #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE      0x00000080      /* Used to restrict sending a warn event only once, per inactive limit, soft limit only */ 
 533 #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE    0x00000100      /* Used to restrict sending a critical event only once per active limit, soft limit only */ 
 534 #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE  0x00000200      /* Used to restrict sending a critical event only once per inactive limit, soft limit only */ 
 535 #define NOTE_MEMORYSTATUS_JETSAM_FG_BAND                0x00000400      /* jetsam is approaching foreground band */ 
 538  * Use this mask to protect the kernel private flags. 
 540 #define EVFILT_MEMORYSTATUS_ALL_MASK \ 
 541         (NOTE_MEMORYSTATUS_PRESSURE_NORMAL | NOTE_MEMORYSTATUS_PRESSURE_WARN | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL | NOTE_MEMORYSTATUS_LOW_SWAP | \ 
 542          NOTE_MEMORYSTATUS_PROC_LIMIT_WARN | NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL | NOTE_MEMORYSTATUS_MSL_STATUS) 
 544 #endif /* KERNEL_PRIVATE */ 
 546 typedef enum vm_pressure_level 
{ 
 547         kVMPressureNormal   
= 0, 
 548         kVMPressureWarning  
= 1, 
 549         kVMPressureUrgent   
= 2, 
 550         kVMPressureCritical 
= 3, 
 551         kVMPressureJetsam   
= 4,  /* jetsam approaching FG bands */ 
 552 } vm_pressure_level_t
; 
 557  * data/hint fflags for EVFILT_TIMER, shared with userspace. 
 558  * The default is a (repeating) interval timer with the data 
 559  * specifying the timeout interval in milliseconds. 
 561  * All timeouts are implicitly EV_CLEAR events. 
 563 #define NOTE_SECONDS    0x00000001              /* data is seconds         */ 
 564 #define NOTE_USECONDS   0x00000002              /* data is microseconds    */ 
 565 #define NOTE_NSECONDS   0x00000004              /* data is nanoseconds     */ 
 566 #define NOTE_ABSOLUTE   0x00000008              /* absolute timeout        */ 
 567 /* ... implicit EV_ONESHOT, timeout uses the gettimeofday epoch */ 
 568 #define NOTE_LEEWAY             0x00000010              /* ext[1] holds leeway for power aware timers */ 
 569 #define NOTE_CRITICAL   0x00000020              /* system does minimal timer coalescing */ 
 570 #define NOTE_BACKGROUND 0x00000040              /* system does maximum timer coalescing */ 
 571 #define NOTE_MACH_CONTINUOUS_TIME       0x00000080 
 573  * NOTE_MACH_CONTINUOUS_TIME: 
 574  * with NOTE_ABSOLUTE: causes the timer to continue to tick across sleep, 
 575  *      still uses gettimeofday epoch 
 576  * with NOTE_MACHTIME and NOTE_ABSOLUTE: uses mach continuous time epoch 
 577  * without NOTE_ABSOLUTE (interval timer mode): continues to tick across sleep 
 579 #define NOTE_MACHTIME   0x00000100              /* data is mach absolute time units */ 
 580 /* timeout uses the mach absolute time epoch */ 
 584  * data/hint fflags for EVFILT_SOCK, shared with userspace. 
 587 #define NOTE_CONNRESET          0x00000001 /* Received RST */ 
 588 #define NOTE_READCLOSED         0x00000002 /* Read side is shutdown */ 
 589 #define NOTE_WRITECLOSED        0x00000004 /* Write side is shutdown */ 
 590 #define NOTE_TIMEOUT            0x00000008 /* timeout: rexmt, keep-alive or persist */ 
 591 #define NOTE_NOSRCADDR          0x00000010 /* source address not available */ 
 592 #define NOTE_IFDENIED           0x00000020 /* interface denied connection */ 
 593 #define NOTE_SUSPEND            0x00000040 /* output queue suspended */ 
 594 #define NOTE_RESUME             0x00000080 /* output queue resumed */ 
 595 #define NOTE_KEEPALIVE          0x00000100 /* TCP Keepalive received */ 
 596 #define NOTE_ADAPTIVE_WTIMO     0x00000200 /* TCP adaptive write timeout */ 
 597 #define NOTE_ADAPTIVE_RTIMO     0x00000400 /* TCP adaptive read timeout */ 
 598 #define NOTE_CONNECTED          0x00000800 /* socket is connected */ 
 599 #define NOTE_DISCONNECTED       0x00001000 /* socket is disconnected */ 
 600 #define NOTE_CONNINFO_UPDATED   0x00002000 /* connection info was updated */ 
 601 #define NOTE_NOTIFY_ACK         0x00004000 /* notify acknowledgement */ 
 603 #define EVFILT_SOCK_LEVEL_TRIGGER_MASK \ 
 604                 (NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_SUSPEND | NOTE_RESUME | \ 
 605                  NOTE_CONNECTED | NOTE_DISCONNECTED) 
 607 #define EVFILT_SOCK_ALL_MASK \ 
 608                 (NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | \ 
 609                 NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | NOTE_RESUME | \ 
 610                 NOTE_KEEPALIVE | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO | \ 
 611                 NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED | \ 
 617  * data/hint fflags for EVFILT_MACHPORT, shared with userspace. 
 619  * Only portsets are supported at this time. 
 621  * The fflags field can optionally contain the MACH_RCV_MSG, MACH_RCV_LARGE, 
 622  * and related trailer receive options as defined in <mach/message.h>. 
 623  * The presence of these flags directs the kevent64() call to attempt to receive 
 624  * the message during kevent delivery, rather than just indicate that a message exists. 
 625  * On setup, The ext[0] field contains the receive buffer pointer and ext[1] contains 
 626  * the receive buffer length.  Upon event delivery, the actual received message size 
 627  * is returned in ext[1].  As with mach_msg(), the buffer must be large enough to 
 628  * receive the message and the requested (or default) message trailers.  In addition, 
 629  * the fflags field contains the return code normally returned by mach_msg(). 
 631  * If MACH_RCV_MSG is specified, and the ext[1] field specifies a zero length, the 
 632  * system call argument specifying an ouput area (kevent_qos) will be consulted. If 
 633  * the system call specified an output data area, the user-space address 
 634  * of the received message is carved from that provided output data area (if enough 
 635  * space remains there). The address and length of each received message is 
 636  * returned in the ext[0] and ext[1] fields (respectively) of the corresponding kevent. 
 638  * IF_MACH_RCV_VOUCHER_CONTENT is specified, the contents of the message voucher is 
 639  * extracted (as specified in the xflags field) and stored in ext[2] up to ext[3] 
 640  * length.  If the input length is zero, and the system call provided a data area, 
 641  * the space for the voucher content is carved from the provided space and its 
 642  * address and length is returned in ext[2] and ext[3] respectively. 
 644  * If no message receipt options were provided in the fflags field on setup, no 
 645  * message is received by this call. Instead, on output, the data field simply 
 646  * contains the name of the actual port detected with a message waiting. 
 650  * DEPRECATED!!!!!!!!! 
 651  * NOTE_TRACK, NOTE_TRACKERR, and NOTE_CHILD are no longer supported as of 10.5 
 653 /* additional flags for EVFILT_PROC */ 
 654 #define NOTE_TRACK      0x00000001              /* follow across forks */ 
 655 #define NOTE_TRACKERR   0x00000002              /* could not track child */ 
 656 #define NOTE_CHILD      0x00000004              /* am a child process */ 
 663 /* Temporay solution for BootX to use inode.h till kqueue moves to vfs layer */ 
 664 #include <sys/queue.h> 
 666 SLIST_HEAD(klist
, knote
); 
 671 #ifdef XNU_KERNEL_PRIVATE 
 672 #include <sys/queue.h> 
 673 #include <mach/vm_param.h> 
 674 #include <kern/kern_types.h> 
 675 #include <sys/fcntl.h> /* FREAD, FWRITE */ 
 676 #include <kern/debug.h> /* panic */ 
 677 #include <pthread/priority_private.h> 
 679 #ifdef MALLOC_DECLARE 
 680 MALLOC_DECLARE(M_KQUEUE
); 
 683 LIST_HEAD(knote_list
, knote
); 
 684 TAILQ_HEAD(kqtailq
, knote
);     /* a list of "queued" events */ 
 686 /* index into various kq queues */ 
 687 typedef uint8_t kq_index_t
; 
 689 /* lskq(1) knows about this type */ 
 690 __options_decl(kn_status_t
, uint16_t /* 12 bits really */, { 
 691         KN_ACTIVE         
= 0x001,  /* event has been triggered */ 
 692         KN_QUEUED         
= 0x002,  /* event is on queue */ 
 693         KN_DISABLED       
= 0x004,  /* event is disabled */ 
 694         KN_DROPPING       
= 0x008,  /* knote is being dropped */ 
 695         KN_LOCKED         
= 0x010,  /* knote is locked (kq_knlocks) */ 
 696         KN_POSTING        
= 0x020,  /* f_event() in flight */ 
 697         KN_STAYACTIVE     
= 0x040,  /* force event to stay active */ 
 698         KN_DEFERDELETE    
= 0x080,  /* defer delete until re-enabled */ 
 699         KN_MERGE_QOS      
= 0x100,  /* f_event() / f_* ran concurrently and overrides must merge */ 
 700         KN_REQVANISH      
= 0x200,  /* requested EV_VANISH */ 
 701         KN_VANISHED       
= 0x400,  /* has vanished */ 
 702         KN_SUPPRESSED     
= 0x800,  /* event is suppressed during delivery */ 
 706 #define KNOTE_KQ_PACKED_BITS   42 
 707 #define KNOTE_KQ_PACKED_SHIFT   0 
 708 #define KNOTE_KQ_PACKED_BASE    0 
 710 #define KNOTE_KQ_PACKED_BITS   32 
 711 #define KNOTE_KQ_PACKED_SHIFT   0 
 712 #define KNOTE_KQ_PACKED_BASE    0 
 715 _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(KNOTE_KQ_PACKED
), 
 716     "Make sure the knote pointer packing is based on arithmetic shifts"); 
 720         TAILQ_ENTRY(knote
)       kn_tqe
;            /* linkage for tail queue */ 
 721         SLIST_ENTRY(knote
)       kn_link
;           /* linkage for search list */ 
 722         SLIST_ENTRY(knote
)       kn_selnext
;        /* klist element chain */ 
 724         kn_status_t              kn_status 
: 12; 
 726             kn_qos_index
:4,                         /* in-use qos index */ 
 727             kn_qos_override
:3,                      /* qos override index */ 
 728             kn_is_fd
:1,                             /* knote is an fd */ 
 732         uintptr_t                   kn_kq_packed 
: KNOTE_KQ_PACKED_BITS
; 
 734         uintptr_t                   kn_kq_packed
; 
 737         /* per filter stash of data (pointer, uint32_t or uint64_t) */ 
 742                 uint64_t            kn_hook_waitqid
; 
 744                 uint32_t            kn_hook_waitqid
; 
 748         /* per filter pointer to the resource being watched */ 
 750                 struct fileproc    
*kn_fp
;          /* file data pointer */ 
 751                 struct proc        
*kn_proc
;        /* proc pointer */ 
 752                 struct ipc_mqueue  
*kn_mqueue
;      /* pset pointer */ 
 753                 struct thread_call 
*kn_thcall
; 
 754                 struct thread      
*kn_thread
; 
 758          * Mimic kevent_qos so that knote_fill_kevent code is not horrid, 
 759          * but with subtleties: 
 761          * - kevent_qos_s::filter is 16bits where ours is 8, and we use the top 
 762          *   bits to store the real specialized filter. 
 763          *   knote_fill_kevent* will always force the top bits to 0xff. 
 765          * - kevent_qos_s::xflags is not kept, kn_sfflags takes its place, 
 766          *   knote_fill_kevent* will set xflags to 0. 
 768          * - kevent_qos_s::data is saved as kn_sdata and filters are encouraged 
 769          *   to use knote_fill_kevent, knote_fill_kevent_with_sdata will copy 
 770          *   kn_sdata as the output value. 
 772          * knote_fill_kevent_with_sdata() programatically asserts 
 773          * these aliasings are respected. 
 775         struct kevent_internal_s 
{ 
 776                 uint64_t    kei_ident
;      /* identifier for this event */ 
 777 #ifdef __LITTLE_ENDIAN__ 
 778                 int8_t      kei_filter
;     /* filter for event */ 
 779                 uint8_t     kei_filtid
;     /* actual filter for event */ 
 781                 uint8_t     kei_filtid
;     /* actual filter for event */ 
 782                 int8_t      kei_filter
;     /* filter for event */ 
 784                 uint16_t    kei_flags
;      /* general flags */ 
 785                 int32_t     kei_qos
;        /* quality of service */ 
 786                 uint64_t    kei_udata
;      /* opaque user data identifier */ 
 787                 uint32_t    kei_fflags
;     /* filter-specific flags */ 
 788                 uint32_t    kei_sfflags
;    /* knote: saved fflags */ 
 789                 int64_t     kei_sdata
;      /* knote: filter-specific saved data */ 
 790                 uint64_t    kei_ext
[4];     /* filter-specific extensions */ 
 793 #define kn_id           kn_kevent.kei_ident 
 794 #define kn_filtid       kn_kevent.kei_filtid 
 795 #define kn_filter       kn_kevent.kei_filter 
 796 #define kn_flags        kn_kevent.kei_flags 
 797 #define kn_qos          kn_kevent.kei_qos 
 798 #define kn_udata        kn_kevent.kei_udata 
 799 #define kn_fflags       kn_kevent.kei_fflags 
 800 #define kn_sfflags      kn_kevent.kei_sfflags 
 801 #define kn_sdata        kn_kevent.kei_sdata 
 802 #define kn_ext          kn_kevent.kei_ext 
 805 static inline struct kqueue 
* 
 806 knote_get_kq(struct knote 
*kn
) 
 808         return (struct kqueue 
*)VM_UNPACK_POINTER(kn
->kn_kq_packed
, KNOTE_KQ_PACKED
); 
 812 knote_get_seltype(struct knote 
*kn
) 
 814         switch (kn
->kn_filter
) { 
 820                 panic("%s(%p): invalid filter %d\n", 
 821                     __func__
, kn
, kn
->kn_filter
); 
 826 struct kevent_ctx_s 
{ 
 827         uint64_t         kec_data_avail
;    /* address of remaining data size */ 
 828         user_addr_t      kec_data_out
;      /* extra data pointer */ 
 829         user_size_t      kec_data_size
;     /* total extra data size */ 
 830         user_size_t      kec_data_resid
;    /* residual extra data size */ 
 831         uint64_t         kec_deadline
;      /* wait deadline unless KEVENT_FLAG_IMMEDIATE */ 
 832         struct fileproc 
*kec_fp
;            /* fileproc to pass to fp_drop or NULL */ 
 833         int              kec_fd
;            /* fd to pass to fp_drop or -1 */ 
 835         /* the fields below are only set during process / scan */ 
 836         int              kec_process_nevents
;       /* user-level event count */ 
 837         int              kec_process_noutputs
;      /* number of events output */ 
 838         unsigned int     kec_process_flags
;         /* kevent flags, only set for process  */ 
 839         user_addr_t      kec_process_eventlist
;     /* user-level event list address */ 
 841 typedef struct kevent_ctx_s 
*kevent_ctx_t
; 
 844 kevent_get_context(thread_t thread
); 
 849  * These routines, provided by each filter, are called to attach, detach, deliver events, 
 850  * change/update filter registration and process/deliver events: 
 852  * - the f_attach, f_touch, f_process, f_peek and f_detach callbacks are always 
 853  *   serialized with respect to each other for the same knote. 
 855  * - the f_event routine is called with a use-count taken on the knote to 
 856  *   prolongate its lifetime and protect against drop, but is not otherwise 
 857  *   serialized with other routine calls. 
 859  * - the f_detach routine is always called last, and is serialized with all 
 860  *   other callbacks, including f_event calls. 
 863  * Here are more details: 
 866  *        identifies if the "ident" field in the kevent structure is a file-descriptor. 
 868  *        If so, the knote is associated with the file descriptor prior to attach and 
 869  *        auto-removed when the file descriptor is closed (this latter behavior may change 
 870  *        for EV_DISPATCH2 kevent types to allow delivery of events identifying unintended 
 873  *        Otherwise the knote is hashed by the ident and has no auto-close behavior. 
 876  *        identifies if the filter can adjust its QoS during its lifetime. 
 878  *        Filters using this facility should request the new overrides they want 
 879  *        using the appropriate FILTER_{RESET,ADJUST}_EVENT_QOS extended codes. 
 881  *        Currently, EVFILT_MACHPORT is the only filter using this facility. 
 884  *        identifies if the filter returns extended codes from its routines 
 885  *        (see FILTER_ACTIVE, ...) or 0 / 1 values. 
 888  *        called to attach the knote to the underlying object that will be delivering events 
 889  *        through it when EV_ADD is supplied and no existing matching event is found 
 891  *        provided a knote that is pre-attached to the fd or hashed (see above) but is 
 892  *        specially marked to avoid concurrent access until the attach is complete. The 
 893  *        kevent structure embedded in this knote has been filled in with a sanitized 
 894  *        version of the user-supplied kevent data.  However, the user-supplied filter-specific 
 895  *        flags (fflags) and data fields have been moved into the knote's kn_sfflags and kn_sdata 
 896  *        fields respectively.  These are usually interpretted as a set of "interest" flags and 
 897  *        data by each filter - to be matched against delivered events. 
 899  *        The attach operator indicated errors by setting the EV_ERROR flog in the flags field 
 900  *        embedded in the knote's kevent structure - with the specific error indicated in the 
 901  *        corresponding data field. 
 903  *        The return value indicates if the knote should already be considered "activated" at 
 904  *        the time of attach (one or more of the interest events has already occured). 
 907  *        called to disassociate the knote from the underlying object delivering events 
 908  *        the filter should not attempt to deliver events through this knote after this 
 909  *        operation returns control to the kq system. 
 912  *        if the knote() function (or KNOTE() macro) is called against a list of knotes, 
 913  *        this operator will be called on each knote in the list. 
 915  *        The "hint" parameter is completely filter-specific, but usually indicates an 
 916  *        event or set of events that have occured against the source object associated 
 919  *        The return value indicates if the knote should already be considered "activated" at 
 920  *        the time of attach (one or more of the interest events has already occured). 
 923  *        called when attempting to deliver triggered events to user-space. 
 925  *        If the knote was previously activated, this operator will be called when a 
 926  *        thread is trying to deliver events to user-space.  The filter gets one last 
 927  *        chance to determine if the event/events are still interesting for this knote 
 928  *        (are the conditions still right to deliver an event).  If so, the filter 
 929  *        fills in the output kevent structure with the information to be delivered. 
 931  *        The input context/data parameter is used during event delivery.  Some 
 932  *        filters allow additional data delivery as part of event delivery.  This 
 933  *        context field indicates if space was made available for these additional 
 934  *        items and how that space is to be allocated/carved-out. 
 936  *        The filter may set EV_CLEAR or EV_ONESHOT in the output flags field to indicate 
 937  *        special post-delivery dispositions for the knote. 
 939  *        EV_CLEAR - indicates that all matching events have been delivered. Even 
 940  *                   though there were events to deliver now, there will not be any 
 941  *                   more until some additional events are delivered to the knote 
 942  *                   via the f_event operator, or the interest set is changed via 
 943  *                   the f_touch operator.  The knote can remain deactivated after 
 944  *                   processing this event delivery. 
 946  *        EV_ONESHOT - indicates that this is the last event to be delivered via 
 947  *                   this knote.  It will automatically be deleted upon delivery 
 948  *                   (or if in dispatch-mode, upon re-enablement after this delivery). 
 950  *        The return value indicates if the knote has delivered an output event. 
 951  *        Unless one of the special output flags was set in the output kevent, a non- 
 952  *        zero return value ALSO indicates that the knote should be re-activated 
 953  *        for future event processing (in case it delivers level-based or a multi-edge 
 954  *        type events like message queues that already exist). 
 956  *        NOTE: In the future, the boolean may change to an enum that allows more 
 957  *              explicit indication of just delivering a current event vs delivering 
 958  *              an event with more events still pending. 
 961  *        called to update the knote with new state from the user during 
 962  *        EVFILT_ADD/ENABLE/DISABLE on an already-attached knote. 
 964  *        f_touch should copy relevant new data from the kevent into the knote. 
 966  *        operator must lock against concurrent f_event operations. 
 968  *        A return value of 1 indicates that the knote should now be considered 
 971  *        f_touch can set EV_ERROR with specific error in the data field to 
 972  *        return an error to the client. You should return 1 to indicate that 
 973  *        the kevent needs to be activated and processed. 
 976  *        For knotes marked KN_STAYACTIVE, indicate if the knote is truly active 
 977  *        at the moment (not used for event delivery, but for status checks). 
 981  *        [OPTIONAL] If this function is non-null, then it indicates that the 
 982  *        filter wants to validate EV_DELETE events. This is necessary if 
 983  *        a particular filter needs to synchronize knote deletion with its own 
 986  *        When true is returned, the the EV_DELETE is allowed and can proceed. 
 988  *        If false is returned, the EV_DELETE doesn't proceed, and the passed in 
 989  *        kevent is used for the copyout to userspace. 
 991  *        Currently, EVFILT_WORKLOOP is the only filter using this facility. 
 993  * f_post_register_wait - 
 994  *        [OPTIONAL] called when attach or touch return the FILTER_REGISTER_WAIT 
 995  *        extended code bit. It is possible to use this facility when the last 
 996  *        register command wants to wait. 
 998  *        Currently, EVFILT_WORKLOOP is the only filter using this facility. 
1001 struct _kevent_register
; 
1002 struct knote_lock_ctx
; 
1008         bool    f_isfd
;               /* true if ident == filedescriptor */ 
1009         bool    f_adjusts_qos
;    /* true if the filter can override the knote */ 
1010         bool    f_extended_codes
; /* hooks return extended codes */ 
1012         int     (*f_attach
)(struct knote 
*kn
, struct kevent_qos_s 
*kev
); 
1013         void    (*f_detach
)(struct knote 
*kn
); 
1014         int     (*f_event
)(struct knote 
*kn
, long hint
); 
1015         int     (*f_touch
)(struct knote 
*kn
, struct kevent_qos_s 
*kev
); 
1016         int     (*f_process
)(struct knote 
*kn
, struct kevent_qos_s 
*kev
); 
1017         int     (*f_peek
)(struct knote 
*kn
); 
1019         /* optional & advanced */ 
1020         bool    (*f_allow_drop
)(struct knote 
*kn
, struct kevent_qos_s 
*kev
); 
1021         void    (*f_post_register_wait
)(struct uthread 
*uth
, struct knote 
*kn
, 
1022             struct _kevent_register 
*ss_kr
); 
1026  * Extended codes returned by filter routines when f_extended_codes is set. 
1029  *     The filter is active and a call to f_process() may return an event. 
1031  *     For f_process() the meaning is slightly different: the knote will be 
1032  *     activated again as long as f_process returns FILTER_ACTIVE, unless 
1033  *     EV_CLEAR is set, which require a new f_event to reactivate the knote. 
1035  *     Valid:    f_attach, f_event, f_touch, f_process, f_peek 
1039  * FILTER_REGISTER_WAIT 
1040  *     The filter wants its f_post_register_wait() to be called. 
1042  *     Note: It is only valid to ask for this behavior for a workloop kqueue, 
1043  *     and is really only meant to be used by EVFILT_WORKLOOP. 
1045  *     Valid:    f_attach, f_touch 
1047  *     Ignored:  f_event, f_process, f_peek 
1049  * FILTER_UPDATE_REQ_QOS 
1050  *     The filter wants the passed in QoS to be updated as the new intrinsic qos 
1051  *     for this knote. If the kevent `qos` field is 0, no update is performed. 
1053  *     This also will reset the event QoS, so FILTER_ADJUST_EVENT_QOS() must 
1054  *     also be used if an override should be maintained. 
1057  *     Implicit: f_attach 
1058  *     Ignored:  f_event, f_process, f_peek 
1060  * FILTER_RESET_EVENT_QOS 
1061  * FILTER_ADJUST_EVENT_QOS(qos) 
1062  *     The filter wants the QoS of the next event delivery to be overridden 
1063  *     at the specified QoS.  This allows for the next event QoS to be elevated 
1064  *     from the knote requested qos (See FILTER_UPDATE_REQ_QOS). 
1066  *     Event QoS Overrides are reset when a particular knote is no longer 
1067  *     active. Hence this is ignored if FILTER_ACTIVE isn't also returned. 
1069  *     Races between an f_event() and any other f_* routine asking for 
1070  *     a specific QoS override are handled generically and the filters do not 
1071  *     have to worry about them. 
1073  *     To use this facility, filters MUST set their f_adjusts_qos bit to true. 
1075  *     It is expected that filters will return the new QoS they expect to be 
1076  *     applied from any f_* callback except for f_process() where no specific 
1077  *     information should be provided. Filters should not try to hide no-ops, 
1078  *     kevent will already optimize these away. 
1080  *     Valid:    f_touch, f_attach, f_event, f_process 
1084  * FILTER_THREADREQ_NODEFEER 
1085  *     The filter has moved a turnstile priority push away from the current 
1086  *     thread, preemption has been disabled, and thread requests need to be 
1087  *     commited before preemption is re-enabled. 
1090  *     Valid:    f_attach, f_touch 
1092  *     Invalid:  f_event, f_process, f_peek 
1094 #define FILTER_ACTIVE                       0x00000001 
1095 #define FILTER_REGISTER_WAIT                0x00000002 
1096 #define FILTER_UPDATE_REQ_QOS               0x00000004 
1097 #define FILTER_ADJUST_EVENT_QOS_BIT         0x00000008 
1098 #define FILTER_ADJUST_EVENT_QOS_MASK        0x00000070 
1099 #define FILTER_ADJUST_EVENT_QOS_SHIFT 4 
1100 #define FILTER_ADJUST_EVENT_QOS(qos) \ 
1101                 (((qos) << FILTER_ADJUST_EVENT_QOS_SHIFT) | FILTER_ADJUST_EVENT_QOS_BIT) 
1102 #define FILTER_RESET_EVENT_QOS              FILTER_ADJUST_EVENT_QOS_BIT 
1103 #define FILTER_THREADREQ_NODEFEER           0x00000080 
1105 #define filter_call(_ops, call)  \ 
1106                 ((_ops)->f_extended_codes ? (_ops)->call : !!((_ops)->call)) 
1108 SLIST_HEAD(klist
, knote
); 
1109 extern void     knote_init(void); 
1110 extern void     klist_init(struct klist 
*list
); 
1112 #define KNOTE(list, hint)       knote(list, hint) 
1113 #define KNOTE_ATTACH(list, kn)  knote_attach(list, kn) 
1114 #define KNOTE_DETACH(list, kn)  knote_detach(list, kn) 
1116 extern void knote(struct klist 
*list
, long hint
); 
1117 extern int knote_attach(struct klist 
*list
, struct knote 
*kn
); 
1118 extern int knote_detach(struct klist 
*list
, struct knote 
*kn
); 
1119 extern void knote_vanish(struct klist 
*list
, bool make_active
); 
1121 extern void knote_set_error(struct knote 
*kn
, int error
); 
1122 extern int64_t knote_low_watermark(const struct knote 
*kn
) __pure2
; 
1123 extern void knote_fill_kevent_with_sdata(struct knote 
*kn
, struct kevent_qos_s 
*kev
); 
1124 extern void knote_fill_kevent(struct knote 
*kn
, struct kevent_qos_s 
*kev
, int64_t data
); 
1126 extern void knote_link_waitqset_lazy_alloc(struct knote 
*kn
); 
1127 extern boolean_t 
knote_link_waitqset_should_lazy_alloc(struct knote 
*kn
); 
1128 extern int knote_link_waitq(struct knote 
*kn
, struct waitq 
*wq
, uint64_t *reserved_link
); 
1129 extern int knote_unlink_waitq(struct knote 
*kn
, struct waitq 
*wq
); 
1130 extern void knote_fdclose(struct proc 
*p
, int fd
); 
1131 extern void knote_markstayactive(struct knote 
*kn
); 
1132 extern void knote_clearstayactive(struct knote 
*kn
); 
1133 extern const struct filterops 
*knote_fops(struct knote 
*kn
); 
1135 extern struct turnstile 
*kqueue_turnstile(struct kqueue 
*); 
1136 extern struct turnstile 
*kqueue_alloc_turnstile(struct kqueue 
*); 
1138 int kevent_proc_copy_uptrs(void *proc
, uint64_t *buf
, uint32_t bufsize
); 
1139 int kevent_copyout_proc_dynkqids(void *proc
, user_addr_t ubuf
, 
1140     uint32_t ubufsize
, int32_t *nkqueues_out
); 
1141 int kevent_copyout_dynkqinfo(void *proc
, kqueue_id_t kq_id
, user_addr_t ubuf
, 
1142     uint32_t ubufsize
, int32_t *size_out
); 
1143 int kevent_copyout_dynkqextinfo(void *proc
, kqueue_id_t kq_id
, user_addr_t ubuf
, 
1144     uint32_t ubufsize
, int32_t *nknotes_out
); 
1146 extern int filt_wlattach_sync_ipc(struct knote 
*kn
); 
1147 extern void filt_wldetach_sync_ipc(struct knote 
*kn
); 
1149 extern int kevent_workq_internal(struct proc 
*p
, 
1150     user_addr_t changelist
, int nchanges
, 
1151     user_addr_t eventlist
, int nevents
, 
1152     user_addr_t data_out
, user_size_t 
*data_available
, 
1153     unsigned int flags
, int32_t *retval
); 
1155 #elif defined(KERNEL_PRIVATE) /* !XNU_KERNEL_PRIVATE: kexts still need a klist structure definition */ 
1157 #include <sys/queue.h> 
1160 SLIST_HEAD(klist
, knote
); 
1162 #endif /* !XNU_KERNEL_PRIVATE && KERNEL_PRIVATE */ 
1166 #include <sys/types.h> 
1173     const struct kevent 
*changelist
, int nchanges
, 
1174     struct kevent 
*eventlist
, int nevents
, 
1175     const struct timespec 
*timeout
); 
1176 int     kevent64(int kq
, 
1177     const struct kevent64_s 
*changelist
, int nchanges
, 
1178     struct kevent64_s 
*eventlist
, int nevents
, 
1180     const struct timespec 
*timeout
); 
1183 int     kevent_qos(int kq
, 
1184     const struct kevent_qos_s 
*changelist
, int nchanges
, 
1185     struct kevent_qos_s 
*eventlist
, int nevents
, 
1186     void *data_out
, size_t *data_available
, 
1187     unsigned int flags
); 
1189 int     kevent_id(kqueue_id_t id
, 
1190     const struct kevent_qos_s 
*changelist
, int nchanges
, 
1191     struct kevent_qos_s 
*eventlist
, int nevents
, 
1192     void *data_out
, size_t *data_available
, 
1193     unsigned int flags
); 
1194 #endif /* PRIVATE */ 
1203 /* Flags for pending events notified by kernel via return-to-kernel ast */ 
1204 #define R2K_WORKLOOP_PENDING_EVENTS             0x1 
1205 #define R2K_WORKQ_PENDING_EVENTS                0x2 
1207 #endif /* PRIVATE */ 
1209 #endif /* !_SYS_EVENT_H_ */