]> git.saurik.com Git - apple/libdispatch.git/blob - src/event/event_kevent.c
libdispatch-913.30.4.tar.gz
[apple/libdispatch.git] / src / event / event_kevent.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22 #if DISPATCH_EVENT_BACKEND_KEVENT
23 #if HAVE_MACH
24 #include "protocol.h"
25 #include "protocolServer.h"
26 #endif
27
28 #if DISPATCH_USE_KEVENT_WORKQUEUE && !DISPATCH_USE_KEVENT_QOS
29 #error unsupported configuration
30 #endif
31
32 #define DISPATCH_KEVENT_MUXED_MARKER 1ul
33 #define DISPATCH_MACH_AUDIT_TOKEN_PID (5)
34
35 typedef struct dispatch_muxnote_s {
36 TAILQ_ENTRY(dispatch_muxnote_s) dmn_list;
37 TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head;
38 dispatch_wlh_t dmn_wlh;
39 dispatch_kevent_s dmn_kev;
40 } *dispatch_muxnote_t;
41
42 static bool _dispatch_timers_force_max_leeway;
43 static int _dispatch_kq = -1;
44 static struct {
45 dispatch_once_t pred;
46 dispatch_unfair_lock_s lock;
47 } _dispatch_muxnotes;
48 #if !DISPATCH_USE_KEVENT_WORKQUEUE
49 #define _dispatch_muxnotes_lock() \
50 _dispatch_unfair_lock_lock(&_dispatch_muxnotes.lock)
51 #define _dispatch_muxnotes_unlock() \
52 _dispatch_unfair_lock_unlock(&_dispatch_muxnotes.lock)
53 #else
54 #define _dispatch_muxnotes_lock()
55 #define _dispatch_muxnotes_unlock()
56 #endif // !DISPATCH_USE_KEVENT_WORKQUEUE
57
58 DISPATCH_CACHELINE_ALIGN
59 static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s)
60 _dispatch_sources[DSL_HASH_SIZE];
61
62 #define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME
63 #define DISPATCH_NOTE_CLOCK_MACH 0
64
65 static const uint32_t _dispatch_timer_index_to_fflags[] = {
66 #define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \
67 [DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \
68 DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \
69 NOTE_NSECONDS | NOTE_LEEWAY | (note)
70 DISPATCH_TIMER_FFLAGS_INIT(WALL, NORMAL, 0),
71 DISPATCH_TIMER_FFLAGS_INIT(MACH, NORMAL, 0),
72 #if DISPATCH_HAVE_TIMER_QOS
73 DISPATCH_TIMER_FFLAGS_INIT(WALL, CRITICAL, NOTE_CRITICAL),
74 DISPATCH_TIMER_FFLAGS_INIT(MACH, CRITICAL, NOTE_CRITICAL),
75 DISPATCH_TIMER_FFLAGS_INIT(WALL, BACKGROUND, NOTE_BACKGROUND),
76 DISPATCH_TIMER_FFLAGS_INIT(MACH, BACKGROUND, NOTE_BACKGROUND),
77 #endif
78 #undef DISPATCH_TIMER_FFLAGS_INIT
79 };
80
81 static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke);
82
83 #pragma mark -
84 #pragma mark kevent debug
85
86 DISPATCH_NOINLINE
87 static const char *
88 _evfiltstr(short filt)
89 {
90 switch (filt) {
91 #define _evfilt2(f) case (f): return #f
92 _evfilt2(EVFILT_READ);
93 _evfilt2(EVFILT_WRITE);
94 _evfilt2(EVFILT_SIGNAL);
95 _evfilt2(EVFILT_TIMER);
96
97 #ifdef DISPATCH_EVENT_BACKEND_KEVENT
98 _evfilt2(EVFILT_AIO);
99 _evfilt2(EVFILT_VNODE);
100 _evfilt2(EVFILT_PROC);
101 #if HAVE_MACH
102 _evfilt2(EVFILT_MACHPORT);
103 _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION);
104 #endif
105 _evfilt2(EVFILT_FS);
106 _evfilt2(EVFILT_USER);
107 #ifdef EVFILT_SOCK
108 _evfilt2(EVFILT_SOCK);
109 #endif
110 #ifdef EVFILT_MEMORYSTATUS
111 _evfilt2(EVFILT_MEMORYSTATUS);
112 #endif
113 #endif // DISPATCH_EVENT_BACKEND_KEVENT
114
115 _evfilt2(DISPATCH_EVFILT_TIMER);
116 _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
117 _evfilt2(DISPATCH_EVFILT_CUSTOM_OR);
118 _evfilt2(DISPATCH_EVFILT_CUSTOM_REPLACE);
119 default:
120 return "EVFILT_missing";
121 }
122 }
123
124 #if DISPATCH_DEBUG
125 static const char *
126 _evflagstr2(uint16_t *flagsp)
127 {
128 #define _evflag2(f) \
129 if ((*flagsp & (f)) == (f) && (f)) { \
130 *flagsp &= ~(f); \
131 return #f "|"; \
132 }
133 _evflag2(EV_ADD);
134 _evflag2(EV_DELETE);
135 _evflag2(EV_ENABLE);
136 _evflag2(EV_DISABLE);
137 _evflag2(EV_ONESHOT);
138 _evflag2(EV_CLEAR);
139 _evflag2(EV_RECEIPT);
140 _evflag2(EV_DISPATCH);
141 _evflag2(EV_UDATA_SPECIFIC);
142 #ifdef EV_POLL
143 _evflag2(EV_POLL);
144 #endif
145 #ifdef EV_OOBAND
146 _evflag2(EV_OOBAND);
147 #endif
148 _evflag2(EV_ERROR);
149 _evflag2(EV_EOF);
150 _evflag2(EV_VANISHED);
151 *flagsp = 0;
152 return "EV_UNKNOWN ";
153 }
154
155 DISPATCH_NOINLINE
156 static const char *
157 _evflagstr(uint16_t flags, char *str, size_t strsize)
158 {
159 str[0] = 0;
160 while (flags) {
161 strlcat(str, _evflagstr2(&flags), strsize);
162 }
163 size_t sz = strlen(str);
164 if (sz) str[sz-1] = 0;
165 return str;
166 }
167
168 DISPATCH_NOINLINE
169 static void
170 dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev,
171 int i, int n, const char *function, unsigned int line)
172 {
173 char flagstr[256];
174 char i_n[31];
175
176 if (n > 1) {
177 snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n);
178 } else {
179 i_n[0] = '\0';
180 }
181 if (verb == NULL) {
182 if (kev->flags & EV_DELETE) {
183 verb = "deleting";
184 } else if (kev->flags & EV_ADD) {
185 verb = "adding";
186 } else {
187 verb = "updating";
188 }
189 }
190 #if DISPATCH_USE_KEVENT_QOS
191 _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
192 "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
193 "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, "
194 "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident,
195 _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
196 sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
197 kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3],
198 function, line);
199 #else
200 _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
201 "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: "
202 "%s #%u", verb, kev, i_n,
203 kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
204 sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
205 function, line);
206 #endif
207 }
208 #else
209 static inline void
210 dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev,
211 int i, int n, const char *function, unsigned int line)
212 {
213 (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line;
214 }
215 #endif // DISPATCH_DEBUG
216 #define _dispatch_kevent_debug_n(verb, _kev, i, n) \
217 dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__)
218 #define _dispatch_kevent_debug(verb, _kev) \
219 _dispatch_kevent_debug_n(verb, _kev, 0, 0)
220 #if DISPATCH_MGR_QUEUE_DEBUG
221 #define _dispatch_kevent_mgr_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
222 #else
223 #define _dispatch_kevent_mgr_debug(verb, kev) ((void)verb, (void)kev)
224 #endif // DISPATCH_MGR_QUEUE_DEBUG
225 #if DISPATCH_WLH_DEBUG
226 #define _dispatch_kevent_wlh_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
227 #else
228 #define _dispatch_kevent_wlh_debug(verb, kev) ((void)verb, (void)kev)
229 #endif // DISPATCH_WLH_DEBUG
230
231 #if DISPATCH_MACHPORT_DEBUG
232 #ifndef MACH_PORT_TYPE_SPREQUEST
233 #define MACH_PORT_TYPE_SPREQUEST 0x40000000
234 #endif
235
236 DISPATCH_NOINLINE
237 void
238 dispatch_debug_machport(mach_port_t name, const char* str)
239 {
240 mach_port_type_t type;
241 mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0;
242 unsigned int dnreqs = 0, dnrsiz;
243 kern_return_t kr = mach_port_type(mach_task_self(), name, &type);
244 if (kr) {
245 _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name,
246 kr, mach_error_string(kr), str);
247 return;
248 }
249 if (type & MACH_PORT_TYPE_SEND) {
250 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
251 MACH_PORT_RIGHT_SEND, &ns));
252 }
253 if (type & MACH_PORT_TYPE_SEND_ONCE) {
254 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
255 MACH_PORT_RIGHT_SEND_ONCE, &nso));
256 }
257 if (type & MACH_PORT_TYPE_DEAD_NAME) {
258 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
259 MACH_PORT_RIGHT_DEAD_NAME, &nd));
260 }
261 if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) {
262 kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs);
263 if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr);
264 }
265 if (type & MACH_PORT_TYPE_RECEIVE) {
266 mach_port_status_t status = { .mps_pset = 0, };
267 mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT;
268 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
269 MACH_PORT_RIGHT_RECEIVE, &nr));
270 (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(),
271 name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt));
272 _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
273 "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) "
274 "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) "
275 "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs,
276 type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N",
277 status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N",
278 status.mps_srights ? "Y":"N", status.mps_sorights,
279 status.mps_qlimit, status.mps_msgcount, status.mps_mscount,
280 status.mps_seqno, str);
281 } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE|
282 MACH_PORT_TYPE_DEAD_NAME)) {
283 _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
284 "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs,
285 type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str);
286 } else {
287 _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type,
288 str);
289 }
290 }
291 #endif
292
293 #pragma mark dispatch_kevent_t
294
295 #if HAVE_MACH
296
297 static dispatch_once_t _dispatch_mach_host_port_pred;
298 static mach_port_t _dispatch_mach_host_port;
299
300 static inline void*
301 _dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke)
302 {
303 return (void*)ke->ext[0];
304 }
305
306 static inline mach_msg_size_t
307 _dispatch_kevent_mach_msg_size(dispatch_kevent_t ke)
308 {
309 // buffer size in the successful receive case, but message size (like
310 // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size.
311 return (mach_msg_size_t)ke->ext[1];
312 }
313
314 static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke);
315 static inline void _dispatch_mach_host_calendar_change_register(void);
316
317 // DISPATCH_MACH_NOTIFICATION_ARMED are muxnotes that aren't registered with
318 // kevent for real, but with mach_port_request_notification()
319 //
320 // the kevent structure is used for bookkeeping:
321 // - ident, filter, flags and fflags have their usual meaning
322 // - data is used to monitor the actual state of the
323 // mach_port_request_notification()
324 // - ext[0] is a boolean that trackes whether the notification is armed or not
325 #define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0])
326 #endif
327
328 DISPATCH_ALWAYS_INLINE
329 static dispatch_muxnote_t
330 _dispatch_kevent_get_muxnote(dispatch_kevent_t ke)
331 {
332 uintptr_t dmn_addr = (uintptr_t)ke->udata & ~DISPATCH_KEVENT_MUXED_MARKER;
333 return (dispatch_muxnote_t)dmn_addr;
334 }
335
336 DISPATCH_ALWAYS_INLINE
337 static dispatch_unote_t
338 _dispatch_kevent_get_unote(dispatch_kevent_t ke)
339 {
340 dispatch_assert((ke->udata & DISPATCH_KEVENT_MUXED_MARKER) == 0);
341 return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)ke->udata };
342 }
343
344 DISPATCH_NOINLINE
345 static void
346 _dispatch_kevent_print_error(dispatch_kevent_t ke)
347 {
348 _dispatch_debug("kevent[0x%llx]: handling error",
349 (unsigned long long)ke->udata);
350 if (ke->flags & EV_DELETE) {
351 if (ke->flags & EV_UDATA_SPECIFIC) {
352 if (ke->data == EINPROGRESS) {
353 // deferred EV_DELETE
354 return;
355 }
356 }
357 // for EV_DELETE if the update was deferred we may have reclaimed
358 // the udata already, and it is unsafe to dereference it now.
359 } else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) {
360 ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags;
361 } else if (ke->udata) {
362 if (!_dispatch_unote_registered(_dispatch_kevent_get_unote(ke))) {
363 ke->flags |= EV_ADD;
364 }
365 }
366
367 #if HAVE_MACH
368 if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP &&
369 (ke->flags & EV_ADD) && (ke->fflags & MACH_RCV_MSG)) {
370 DISPATCH_INTERNAL_CRASH(ke->ident,
371 "Missing EVFILT_MACHPORT support for ports");
372 }
373 #endif
374
375 if (ke->data) {
376 // log the unexpected error
377 _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
378 !ke->udata ? NULL :
379 ke->flags & EV_DELETE ? "delete" :
380 ke->flags & EV_ADD ? "add" :
381 ke->flags & EV_ENABLE ? "enable" : "monitor",
382 (int)ke->data);
383 }
384 }
385
386 DISPATCH_NOINLINE
387 static void
388 _dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke)
389 {
390 uintptr_t data;
391 uintptr_t status = 0;
392 pthread_priority_t pp = 0;
393 #if DISPATCH_USE_KEVENT_QOS
394 pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK;
395 #endif
396 dispatch_unote_action_t action = du._du->du_data_action;
397 if (action == DISPATCH_UNOTE_ACTION_DATA_SET) {
398 // ke->data is signed and "negative available data" makes no sense
399 // zero bytes happens when EV_EOF is set
400 dispatch_assert(ke->data >= 0l);
401 data = ~(unsigned long)ke->data;
402 #if HAVE_MACH
403 } else if (du._du->du_filter == EVFILT_MACHPORT) {
404 data = DISPATCH_MACH_RECV_MESSAGE;
405 #endif
406 } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) {
407 data = (unsigned long)ke->data;
408 } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR) {
409 data = ke->fflags & du._du->du_fflags;
410 } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) {
411 data = ke->fflags & du._du->du_fflags;
412 status = (unsigned long)ke->data;
413 } else {
414 DISPATCH_INTERNAL_CRASH(action, "Corrupt unote action");
415 }
416 return dux_merge_evt(du._du, ke->flags, data, status, pp);
417 }
418
419 DISPATCH_NOINLINE
420 static void
421 _dispatch_kevent_merge_muxed(dispatch_kevent_t ke)
422 {
423 dispatch_muxnote_t dmn = _dispatch_kevent_get_muxnote(ke);
424 dispatch_unote_linkage_t dul, dul_next;
425
426 TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
427 _dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul), ke);
428 }
429 }
430
431 DISPATCH_NOINLINE
432 static void
433 _dispatch_kevent_drain(dispatch_kevent_t ke)
434 {
435 if (ke->filter == EVFILT_USER) {
436 _dispatch_kevent_mgr_debug("received", ke);
437 return;
438 }
439 _dispatch_kevent_debug("received", ke);
440 if (unlikely(ke->flags & EV_ERROR)) {
441 if (ke->filter == EVFILT_PROC && ke->data == ESRCH) {
442 // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
443 // <rdar://problem/5067725>. As a workaround, we simulate an exit event for
444 // any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
445 ke->flags &= ~(EV_ERROR | EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC);
446 ke->flags |= EV_ONESHOT;
447 ke->fflags = NOTE_EXIT;
448 ke->data = 0;
449 _dispatch_kevent_debug("synthetic NOTE_EXIT", ke);
450 } else {
451 return _dispatch_kevent_print_error(ke);
452 }
453 }
454 if (ke->filter == EVFILT_TIMER) {
455 return _dispatch_kevent_timer_drain(ke);
456 }
457
458 #if HAVE_MACH
459 if (ke->filter == EVFILT_MACHPORT) {
460 if (_dispatch_kevent_mach_msg_size(ke)) {
461 return _dispatch_kevent_mach_msg_drain(ke);
462 }
463 }
464 #endif
465
466 if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) {
467 return _dispatch_kevent_merge_muxed(ke);
468 }
469 return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke), ke);
470 }
471
472 #pragma mark dispatch_kq
473
474 #if DISPATCH_USE_MGR_THREAD
475 DISPATCH_NOINLINE
476 static int
477 _dispatch_kq_create(const void *guard_ptr)
478 {
479 static const dispatch_kevent_s kev = {
480 .ident = 1,
481 .filter = EVFILT_USER,
482 .flags = EV_ADD|EV_CLEAR,
483 .udata = (uintptr_t)DISPATCH_WLH_MANAGER,
484 };
485 int kqfd;
486
487 _dispatch_fork_becomes_unsafe();
488 #if DISPATCH_USE_GUARDED_FD
489 guardid_t guard = (uintptr_t)guard_ptr;
490 kqfd = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP);
491 #else
492 (void)guard_ptr;
493 kqfd = kqueue();
494 #endif
495 if (kqfd == -1) {
496 int err = errno;
497 switch (err) {
498 case EMFILE:
499 DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
500 "process is out of file descriptors");
501 break;
502 case ENFILE:
503 DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
504 "system is out of file descriptors");
505 break;
506 case ENOMEM:
507 DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
508 "kernel is out of memory");
509 break;
510 default:
511 DISPATCH_INTERNAL_CRASH(err, "kqueue() failure");
512 break;
513 }
514 }
515 #if DISPATCH_USE_KEVENT_QOS
516 dispatch_assume_zero(kevent_qos(kqfd, &kev, 1, NULL, 0, NULL, NULL, 0));
517 #else
518 dispatch_assume_zero(kevent(kqfd, &kev, 1, NULL, 0, NULL));
519 #endif
520 return kqfd;
521 }
522 #endif
523
524 static void
525 _dispatch_kq_init(void *context)
526 {
527 bool *kq_initialized = context;
528
529 _dispatch_fork_becomes_unsafe();
530 if (unlikely(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
531 _dispatch_timers_force_max_leeway = true;
532 }
533 *kq_initialized = true;
534
535 #if DISPATCH_USE_KEVENT_WORKQUEUE
536 _dispatch_kevent_workqueue_init();
537 if (_dispatch_kevent_workqueue_enabled) {
538 int r;
539 int kqfd = _dispatch_kq;
540 const dispatch_kevent_s ke = {
541 .ident = 1,
542 .filter = EVFILT_USER,
543 .flags = EV_ADD|EV_CLEAR,
544 .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
545 .udata = (uintptr_t)DISPATCH_WLH_MANAGER,
546 };
547 retry:
548 r = kevent_qos(kqfd, &ke, 1, NULL, 0, NULL, NULL,
549 KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE);
550 if (unlikely(r == -1)) {
551 int err = errno;
552 switch (err) {
553 case EINTR:
554 goto retry;
555 default:
556 DISPATCH_CLIENT_CRASH(err,
557 "Failed to initalize workqueue kevent");
558 break;
559 }
560 }
561 return;
562 }
563 #endif // DISPATCH_USE_KEVENT_WORKQUEUE
564 #if DISPATCH_USE_MGR_THREAD
565 _dispatch_kq = _dispatch_kq_create(&_dispatch_mgr_q);
566 dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
567 #endif // DISPATCH_USE_MGR_THREAD
568 }
569
570 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
571 static void _dispatch_memorypressure_init(void);
572 #else
573 #define _dispatch_memorypressure_init() ((void)0)
574 #endif
575
576 DISPATCH_NOINLINE
577 static int
578 _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n,
579 dispatch_kevent_t ke_out, int n_out, void *buf, size_t *avail,
580 uint32_t flags)
581 {
582 static dispatch_once_t pred;
583 bool kq_initialized = false;
584 int r = 0;
585
586 dispatch_once_f(&pred, &kq_initialized, _dispatch_kq_init);
587 if (unlikely(kq_initialized)) {
588 // The calling thread was the one doing the initialization
589 //
590 // The event loop needs the memory pressure source and debug channel,
591 // however creating these will recursively call _dispatch_kq_poll(),
592 // so we can't quite initialize them under the dispatch once.
593 _dispatch_memorypressure_init();
594 _voucher_activity_debug_channel_init();
595 }
596
597
598 #if !DISPATCH_USE_KEVENT_QOS
599 if (flags & KEVENT_FLAG_ERROR_EVENTS) {
600 // emulate KEVENT_FLAG_ERROR_EVENTS
601 for (r = 0; r < n; r++) {
602 ke[r].flags |= EV_RECEIPT;
603 }
604 out_n = n;
605 }
606 #endif
607
608 retry:
609 if (unlikely(wlh == NULL)) {
610 DISPATCH_INTERNAL_CRASH(wlh, "Invalid wlh");
611 } else if (wlh == DISPATCH_WLH_ANON) {
612 int kqfd = _dispatch_kq;
613 #if DISPATCH_USE_KEVENT_QOS
614 if (_dispatch_kevent_workqueue_enabled) {
615 flags |= KEVENT_FLAG_WORKQ;
616 }
617 r = kevent_qos(kqfd, ke, n, ke_out, n_out, buf, avail, flags);
618 #else
619 const struct timespec timeout_immediately = {}, *timeout = NULL;
620 if (flags & KEVENT_FLAG_IMMEDIATE) timeout = &timeout_immediately;
621 r = kevent(kqfd, ke, n, ke_out, n_out, timeout);
622 #endif
623 }
624 if (unlikely(r == -1)) {
625 int err = errno;
626 switch (err) {
627 case ENOMEM:
628 _dispatch_temporary_resource_shortage();
629 /* FALLTHROUGH */
630 case EINTR:
631 goto retry;
632 case EBADF:
633 DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
634 default:
635 DISPATCH_CLIENT_CRASH(err, "Unexpected error from kevent");
636 }
637 }
638 return r;
639 }
640
641 DISPATCH_NOINLINE
642 static int
643 _dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n,
644 uint32_t flags)
645 {
646 dispatch_kevent_s ke_out[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
647 bool poll_for_events = !(flags & KEVENT_FLAG_ERROR_EVENTS);
648 int i, n_out = countof(ke_out), r = 0;
649 size_t *avail = NULL;
650 void *buf = NULL;
651
652 #if DISPATCH_USE_KEVENT_QOS
653 size_t size;
654 if (poll_for_events) {
655 size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE +
656 DISPATCH_MACH_TRAILER_SIZE;
657 buf = alloca(size);
658 avail = &size;
659 }
660 #endif
661
662 #if DISPATCH_DEBUG
663 for (r = 0; r < n; r++) {
664 if (ke[r].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
665 _dispatch_kevent_debug_n(NULL, ke + r, r, n);
666 }
667 }
668 #endif
669
670 if (poll_for_events) _dispatch_clear_return_to_kernel();
671 n = _dispatch_kq_poll(wlh, ke, n, ke_out, n_out, buf, avail, flags);
672 if (n == 0) {
673 r = 0;
674 } else if (flags & KEVENT_FLAG_ERROR_EVENTS) {
675 for (i = 0, r = 0; i < n; i++) {
676 if ((ke_out[i].flags & EV_ERROR) && ke_out[i].data) {
677 _dispatch_kevent_drain(&ke_out[i]);
678 r = (int)ke_out[i].data;
679 }
680 }
681 } else {
682 for (i = 0, r = 0; i < n; i++) {
683 _dispatch_kevent_drain(&ke_out[i]);
684 }
685 }
686 return r;
687 }
688
689 DISPATCH_ALWAYS_INLINE
690 static inline int
691 _dispatch_kq_update_one(dispatch_wlh_t wlh, dispatch_kevent_t ke)
692 {
693 return _dispatch_kq_drain(wlh, ke, 1,
694 KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS);
695 }
696
697 DISPATCH_ALWAYS_INLINE
698 static inline void
699 _dispatch_kq_update_all(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n)
700 {
701 (void)_dispatch_kq_drain(wlh, ke, n,
702 KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS);
703 }
704
705 DISPATCH_ALWAYS_INLINE
706 static inline void
707 _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk,
708 uint16_t action)
709 {
710 dispatch_unote_class_t du = _du._du;
711 dispatch_source_type_t dst = du->du_type;
712 uint16_t flags = dst->dst_flags | action;
713
714 if ((flags & EV_VANISHED) && !(flags & EV_ADD)) {
715 flags &= ~EV_VANISHED;
716 }
717 pthread_priority_t pp = _dispatch_priority_to_pp(du->du_priority);
718 *dk = (dispatch_kevent_s){
719 .ident = du->du_ident,
720 .filter = dst->dst_filter,
721 .flags = flags,
722 .udata = (uintptr_t)du,
723 .fflags = du->du_fflags | dst->dst_fflags,
724 .data = (typeof(dk->data))dst->dst_data,
725 #if DISPATCH_USE_KEVENT_QOS
726 .qos = (typeof(dk->qos))pp,
727 #endif
728 };
729 }
730
731 DISPATCH_ALWAYS_INLINE
732 static inline int
733 _dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi,
734 int16_t filter, uint64_t ident, uint64_t udata)
735 {
736 dispatch_kevent_t events = ddi->ddi_eventlist;
737 int i;
738
739 for (i = 0; i < ddi->ddi_nevents; i++) {
740 if (events[i].filter == filter && events[i].ident == ident &&
741 events[i].udata == udata) {
742 break;
743 }
744 }
745 return i;
746 }
747
748 DISPATCH_ALWAYS_INLINE
749 static inline dispatch_kevent_t
750 _dispatch_kq_deferred_reuse_slot(dispatch_wlh_t wlh,
751 dispatch_deferred_items_t ddi, int slot)
752 {
753 if (wlh != DISPATCH_WLH_ANON) _dispatch_set_return_to_kernel();
754 if (unlikely(slot == ddi->ddi_maxevents)) {
755 int nevents = ddi->ddi_nevents;
756 ddi->ddi_nevents = 1;
757 _dispatch_kq_update_all(wlh, ddi->ddi_eventlist, nevents);
758 dispatch_assert(ddi->ddi_nevents == 1);
759 slot = 0;
760 } else if (slot == ddi->ddi_nevents) {
761 ddi->ddi_nevents++;
762 }
763 return ddi->ddi_eventlist + slot;
764 }
765
766 DISPATCH_ALWAYS_INLINE
767 static inline void
768 _dispatch_kq_deferred_discard_slot(dispatch_deferred_items_t ddi, int slot)
769 {
770 if (slot < ddi->ddi_nevents) {
771 int last = --ddi->ddi_nevents;
772 if (slot != last) {
773 ddi->ddi_eventlist[slot] = ddi->ddi_eventlist[last];
774 }
775 }
776 }
777
778 DISPATCH_NOINLINE
779 static void
780 _dispatch_kq_deferred_update(dispatch_wlh_t wlh, dispatch_kevent_t ke)
781 {
782 dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
783
784 if (ddi && ddi->ddi_maxevents && wlh == _dispatch_get_wlh()) {
785 int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident,
786 ke->udata);
787 dispatch_kevent_t dk = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot);
788 *dk = *ke;
789 if (ke->filter != EVFILT_USER) {
790 _dispatch_kevent_mgr_debug("deferred", ke);
791 }
792 } else {
793 _dispatch_kq_update_one(wlh, ke);
794 }
795 }
796
797 DISPATCH_NOINLINE
798 static int
799 _dispatch_kq_immediate_update(dispatch_wlh_t wlh, dispatch_kevent_t ke)
800 {
801 dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
802 if (ddi && wlh == _dispatch_get_wlh()) {
803 int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident,
804 ke->udata);
805 _dispatch_kq_deferred_discard_slot(ddi, slot);
806 }
807 return _dispatch_kq_update_one(wlh, ke);
808 }
809
810 DISPATCH_NOINLINE
811 static bool
812 _dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du,
813 uint16_t action_flags)
814 {
815 dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
816 dispatch_unote_class_t du = _du._du;
817 dispatch_kevent_t ke;
818 int r = 0;
819
820 if (action_flags & EV_ADD) {
821 // as soon as we register we may get an event delivery and it has to
822 // see du_wlh already set, else it will not unregister the kevent
823 dispatch_assert(du->du_wlh == NULL);
824 _dispatch_wlh_retain(wlh);
825 du->du_wlh = wlh;
826 }
827
828 if (ddi && wlh == _dispatch_get_wlh()) {
829 int slot = _dispatch_kq_deferred_find_slot(ddi,
830 du->du_filter, du->du_ident, (uintptr_t)du);
831 if (slot < ddi->ddi_nevents) {
832 // <rdar://problem/26202376> when deleting and an enable is pending,
833 // we must merge EV_ENABLE to do an immediate deletion
834 action_flags |= (ddi->ddi_eventlist[slot].flags & EV_ENABLE);
835 }
836
837 if (!(action_flags & EV_ADD) && (action_flags & EV_ENABLE)) {
838 // can be deferred, so do it!
839 ke = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot);
840 _dispatch_kq_unote_set_kevent(du, ke, action_flags);
841 _dispatch_kevent_debug("deferred", ke);
842 goto done;
843 }
844
845 // get rid of the deferred item if any, we can't wait
846 _dispatch_kq_deferred_discard_slot(ddi, slot);
847 }
848
849 if (action_flags) {
850 dispatch_kevent_s dk;
851 _dispatch_kq_unote_set_kevent(du, &dk, action_flags);
852 r = _dispatch_kq_update_one(wlh, &dk);
853 }
854
855 done:
856 if (action_flags & EV_ADD) {
857 if (unlikely(r)) {
858 _dispatch_wlh_release(du->du_wlh);
859 du->du_wlh = NULL;
860 }
861 return r == 0;
862 }
863
864 if (action_flags & EV_DELETE) {
865 if (r == EINPROGRESS) {
866 return false;
867 }
868 _dispatch_wlh_release(du->du_wlh);
869 du->du_wlh = NULL;
870 }
871
872 dispatch_assume_zero(r);
873 return true;
874 }
875
876 #pragma mark dispatch_muxnote_t
877
878 static void
879 _dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED)
880 {
881 uint32_t i;
882 for (i = 0; i < DSL_HASH_SIZE; i++) {
883 TAILQ_INIT(&_dispatch_sources[i]);
884 }
885 }
886
887 DISPATCH_ALWAYS_INLINE
888 static inline struct dispatch_muxnote_bucket_s *
889 _dispatch_muxnote_bucket(uint64_t ident, int16_t filter)
890 {
891 switch (filter) {
892 #if HAVE_MACH
893 case EVFILT_MACHPORT:
894 case DISPATCH_EVFILT_MACH_NOTIFICATION:
895 ident = MACH_PORT_INDEX(ident);
896 break;
897 #endif
898 case EVFILT_SIGNAL: // signo
899 case EVFILT_PROC: // pid_t
900 default: // fd
901 break;
902 }
903
904 dispatch_once_f(&_dispatch_muxnotes.pred, NULL, _dispatch_muxnotes_init);
905 return &_dispatch_sources[DSL_HASH((uintptr_t)ident)];
906 }
907 #define _dispatch_unote_muxnote_bucket(du) \
908 _dispatch_muxnote_bucket(du._du->du_ident, du._du->du_filter)
909
910 DISPATCH_ALWAYS_INLINE
911 static inline dispatch_muxnote_t
912 _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb,
913 dispatch_wlh_t wlh, uint64_t ident, int16_t filter)
914 {
915 dispatch_muxnote_t dmn;
916 _dispatch_muxnotes_lock();
917 TAILQ_FOREACH(dmn, dmb, dmn_list) {
918 if (dmn->dmn_wlh == wlh && dmn->dmn_kev.ident == ident &&
919 dmn->dmn_kev.filter == filter) {
920 break;
921 }
922 }
923 _dispatch_muxnotes_unlock();
924 return dmn;
925 }
926 #define _dispatch_unote_muxnote_find(dmb, du, wlh) \
927 _dispatch_muxnote_find(dmb, wlh, du._du->du_ident, du._du->du_filter)
928
929 DISPATCH_ALWAYS_INLINE
930 static inline dispatch_muxnote_t
931 _dispatch_mach_muxnote_find(mach_port_t name, int16_t filter)
932 {
933 struct dispatch_muxnote_bucket_s *dmb;
934 dmb = _dispatch_muxnote_bucket(name, filter);
935 return _dispatch_muxnote_find(dmb, DISPATCH_WLH_ANON, name, filter);
936 }
937
938 DISPATCH_NOINLINE
939 static bool
940 _dispatch_unote_register_muxed(dispatch_unote_t du, dispatch_wlh_t wlh)
941 {
942 struct dispatch_muxnote_bucket_s *dmb = _dispatch_unote_muxnote_bucket(du);
943 dispatch_muxnote_t dmn;
944 bool installed = true;
945
946 dmn = _dispatch_unote_muxnote_find(dmb, du, wlh);
947 if (dmn) {
948 uint32_t flags = du._du->du_fflags & ~dmn->dmn_kev.fflags;
949 if (flags) {
950 dmn->dmn_kev.fflags |= flags;
951 if (unlikely(du._du->du_type->dst_update_mux)) {
952 installed = du._du->du_type->dst_update_mux(dmn);
953 } else {
954 installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh,
955 &dmn->dmn_kev);
956 }
957 if (!installed) dmn->dmn_kev.fflags &= ~flags;
958 }
959 } else {
960 dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s));
961 TAILQ_INIT(&dmn->dmn_unotes_head);
962 _dispatch_kq_unote_set_kevent(du, &dmn->dmn_kev, EV_ADD | EV_ENABLE);
963 #if DISPATCH_USE_KEVENT_QOS
964 dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
965 #endif
966 dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER;
967 dmn->dmn_wlh = wlh;
968 if (unlikely(du._du->du_type->dst_update_mux)) {
969 installed = du._du->du_type->dst_update_mux(dmn);
970 } else {
971 installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh,
972 &dmn->dmn_kev);
973 }
974 if (installed) {
975 dmn->dmn_kev.flags &= ~(EV_ADD | EV_VANISHED);
976 _dispatch_muxnotes_lock();
977 TAILQ_INSERT_TAIL(dmb, dmn, dmn_list);
978 _dispatch_muxnotes_unlock();
979 } else {
980 free(dmn);
981 }
982 }
983
984 if (installed) {
985 dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
986 TAILQ_INSERT_TAIL(&dmn->dmn_unotes_head, dul, du_link);
987 dul->du_muxnote = dmn;
988
989 if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) {
990 bool armed = DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev);
991 os_atomic_store2o(du._dmsr, dmsr_notification_armed, armed,relaxed);
992 }
993 du._du->du_wlh = DISPATCH_WLH_ANON;
994 }
995 return installed;
996 }
997
998 bool
999 _dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh,
1000 dispatch_priority_t pri)
1001 {
1002 dispatch_assert(!_dispatch_unote_registered(du));
1003 du._du->du_priority = pri;
1004 switch (du._du->du_filter) {
1005 case DISPATCH_EVFILT_CUSTOM_ADD:
1006 case DISPATCH_EVFILT_CUSTOM_OR:
1007 case DISPATCH_EVFILT_CUSTOM_REPLACE:
1008 du._du->du_wlh = DISPATCH_WLH_ANON;
1009 return true;
1010 }
1011 if (!du._du->du_is_direct) {
1012 return _dispatch_unote_register_muxed(du, DISPATCH_WLH_ANON);
1013 }
1014 return _dispatch_kq_unote_update(wlh, du, EV_ADD | EV_ENABLE);
1015 }
1016
1017 void
1018 _dispatch_unote_resume(dispatch_unote_t du)
1019 {
1020 dispatch_assert(_dispatch_unote_registered(du));
1021
1022 if (du._du->du_is_direct) {
1023 dispatch_wlh_t wlh = du._du->du_wlh;
1024 _dispatch_kq_unote_update(wlh, du, EV_ENABLE);
1025 } else if (unlikely(du._du->du_type->dst_update_mux)) {
1026 dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
1027 du._du->du_type->dst_update_mux(dul->du_muxnote);
1028 } else {
1029 dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
1030 dispatch_muxnote_t dmn = dul->du_muxnote;
1031 _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev);
1032 }
1033 }
1034
1035 DISPATCH_NOINLINE
1036 static bool
1037 _dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags)
1038 {
1039 dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
1040 dispatch_muxnote_t dmn = dul->du_muxnote;
1041 bool update = false, dispose = false;
1042
1043 if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) {
1044 os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed);
1045 }
1046 dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON);
1047 du._du->du_wlh = NULL;
1048 TAILQ_REMOVE(&dmn->dmn_unotes_head, dul, du_link);
1049 _TAILQ_TRASH_ENTRY(dul, du_link);
1050 dul->du_muxnote = NULL;
1051
1052 if (TAILQ_EMPTY(&dmn->dmn_unotes_head)) {
1053 dmn->dmn_kev.flags |= EV_DELETE;
1054 update = dispose = true;
1055 } else {
1056 uint32_t fflags = du._du->du_type->dst_fflags;
1057 TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
1058 du = _dispatch_unote_linkage_get_unote(dul);
1059 fflags |= du._du->du_fflags;
1060 }
1061 if (dmn->dmn_kev.fflags & ~fflags) {
1062 dmn->dmn_kev.fflags &= fflags;
1063 update = true;
1064 }
1065 }
1066 if (update && !(flags & DU_UNREGISTER_ALREADY_DELETED)) {
1067 if (unlikely(du._du->du_type->dst_update_mux)) {
1068 dispatch_assume(du._du->du_type->dst_update_mux(dmn));
1069 } else {
1070 _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev);
1071 }
1072 }
1073 if (dispose) {
1074 struct dispatch_muxnote_bucket_s *dmb;
1075 dmb = _dispatch_muxnote_bucket(dmn->dmn_kev.ident, dmn->dmn_kev.filter);
1076 _dispatch_muxnotes_lock();
1077 TAILQ_REMOVE(dmb, dmn, dmn_list);
1078 _dispatch_muxnotes_unlock();
1079 free(dmn);
1080 }
1081 return true;
1082 }
1083
1084 bool
1085 _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags)
1086 {
1087 switch (du._du->du_filter) {
1088 case DISPATCH_EVFILT_CUSTOM_ADD:
1089 case DISPATCH_EVFILT_CUSTOM_OR:
1090 case DISPATCH_EVFILT_CUSTOM_REPLACE:
1091 du._du->du_wlh = NULL;
1092 return true;
1093 }
1094 dispatch_wlh_t wlh = du._du->du_wlh;
1095 if (wlh) {
1096 if (!du._du->du_is_direct) {
1097 return _dispatch_unote_unregister_muxed(du, flags);
1098 }
1099 uint16_t action_flags;
1100 if (flags & DU_UNREGISTER_ALREADY_DELETED) {
1101 action_flags = 0;
1102 } else if (flags & DU_UNREGISTER_IMMEDIATE_DELETE) {
1103 action_flags = EV_DELETE | EV_ENABLE;
1104 } else {
1105 action_flags = EV_DELETE;
1106 }
1107 return _dispatch_kq_unote_update(wlh, du, action_flags);
1108 }
1109 return true;
1110 }
1111
1112 #pragma mark -
1113 #pragma mark dispatch_event_loop
1114
1115 void
1116 _dispatch_event_loop_atfork_child(void)
1117 {
1118 #if HAVE_MACH
1119 _dispatch_mach_host_port_pred = 0;
1120 _dispatch_mach_host_port = MACH_PORT_NULL;
1121 #endif
1122 }
1123
1124
1125 DISPATCH_NOINLINE
1126 void
1127 _dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, uint32_t flags)
1128 {
1129 if (wlh == DISPATCH_WLH_MANAGER) {
1130 dispatch_kevent_s ke = (dispatch_kevent_s){
1131 .ident = 1,
1132 .filter = EVFILT_USER,
1133 .fflags = NOTE_TRIGGER,
1134 .udata = (uintptr_t)DISPATCH_WLH_MANAGER,
1135 };
1136 return _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke);
1137 } else if (wlh && wlh != DISPATCH_WLH_ANON) {
1138 (void)dq_state; (void)flags;
1139 }
1140 DISPATCH_INTERNAL_CRASH(wlh, "Unsupported wlh configuration");
1141 }
1142
1143 DISPATCH_NOINLINE
1144 void
1145 _dispatch_event_loop_drain(uint32_t flags)
1146 {
1147 dispatch_wlh_t wlh = _dispatch_get_wlh();
1148 dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
1149 int n;
1150
1151 again:
1152 n = ddi->ddi_nevents;
1153 ddi->ddi_nevents = 0;
1154 _dispatch_kq_drain(wlh, ddi->ddi_eventlist, n, flags);
1155
1156 if ((flags & KEVENT_FLAG_IMMEDIATE) &&
1157 !(flags & KEVENT_FLAG_ERROR_EVENTS) &&
1158 _dispatch_needs_to_return_to_kernel()) {
1159 goto again;
1160 }
1161 }
1162
1163 void
1164 _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents)
1165 {
1166 dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
1167 dispatch_kevent_s kev[nevents];
1168
1169 // now we can re-use the whole event list, but we need to save one slot
1170 // for the event loop poke
1171 memcpy(kev, events, sizeof(kev));
1172 ddi->ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT - 2;
1173
1174 for (int i = 0; i < nevents; i++) {
1175 _dispatch_kevent_drain(&kev[i]);
1176 }
1177
1178 dispatch_wlh_t wlh = _dispatch_get_wlh();
1179 if (wlh == DISPATCH_WLH_ANON && ddi->ddi_stashed_dou._do) {
1180 if (ddi->ddi_nevents) {
1181 // We will drain the stashed item and not return to the kernel
1182 // right away. As a consequence, do not delay these updates.
1183 _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE |
1184 KEVENT_FLAG_ERROR_EVENTS);
1185 }
1186 _dispatch_trace_continuation_push(ddi->ddi_stashed_rq,
1187 ddi->ddi_stashed_dou);
1188 }
1189 }
1190
1191 void
1192 _dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state)
1193 {
1194 (void)wlh; (void)dq_state;
1195 }
1196
1197 void
1198 _dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh, uint64_t dq_state)
1199 {
1200 (void)wlh; (void)dq_state;
1201 }
1202
1203 void
1204 _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc,
1205 dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state)
1206 {
1207 (void)dsc; (void)wlh; (void)old_state; (void)new_state;
1208 }
1209
1210 void
1211 _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc)
1212 {
1213 if (dsc->dsc_release_storage) {
1214 _dispatch_queue_release_storage(dsc->dc_data);
1215 }
1216 }
1217
1218 void
1219 _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state,
1220 uint64_t new_state, uint32_t flags)
1221 {
1222 (void)wlh; (void)old_state; (void)new_state; (void)flags;
1223 }
1224
1225 #if DISPATCH_WLH_DEBUG
1226 void
1227 _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh)
1228 {
1229 (void)wlh;
1230 }
1231 #endif // DISPATCH_WLH_DEBUG
1232
1233 #pragma mark -
1234 #pragma mark dispatch_event_loop timers
1235
1236 #define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8)
1237
1238 DISPATCH_NOINLINE
1239 static void
1240 _dispatch_kevent_timer_drain(dispatch_kevent_t ke)
1241 {
1242 dispatch_assert(ke->data > 0);
1243 dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) ==
1244 DISPATCH_KEVENT_TIMEOUT_IDENT_MASK);
1245 uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK;
1246
1247 dispatch_assert(tidx < DISPATCH_TIMER_COUNT);
1248 _dispatch_timers_expired = true;
1249 _dispatch_timers_processing_mask |= 1 << tidx;
1250 _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
1251 #if DISPATCH_USE_DTRACE
1252 _dispatch_timers_will_wake |= 1 << DISPATCH_TIMER_QOS(tidx);
1253 #endif
1254 }
1255
1256 DISPATCH_NOINLINE
1257 static void
1258 _dispatch_event_loop_timer_program(uint32_t tidx,
1259 uint64_t target, uint64_t leeway, uint16_t action)
1260 {
1261 dispatch_kevent_s ke = {
1262 .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK | tidx,
1263 .filter = EVFILT_TIMER,
1264 .flags = action | EV_ONESHOT,
1265 .fflags = _dispatch_timer_index_to_fflags[tidx],
1266 .data = (int64_t)target,
1267 .udata = (uintptr_t)&_dispatch_timers_heap[tidx],
1268 #if DISPATCH_HAVE_TIMER_COALESCING
1269 .ext[1] = leeway,
1270 #endif
1271 #if DISPATCH_USE_KEVENT_QOS
1272 .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
1273 #endif
1274 };
1275
1276 _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke);
1277 }
1278
1279 void
1280 _dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range,
1281 dispatch_clock_now_cache_t nows)
1282 {
1283 if (unlikely(_dispatch_timers_force_max_leeway)) {
1284 range.delay += range.leeway;
1285 range.leeway = 0;
1286 }
1287 #if HAVE_MACH
1288 if (DISPATCH_TIMER_CLOCK(tidx) == DISPATCH_CLOCK_WALL) {
1289 _dispatch_mach_host_calendar_change_register();
1290 }
1291 #endif
1292
1293 // <rdar://problem/13186331> EVFILT_TIMER NOTE_ABSOLUTE always expects
1294 // a WALL deadline
1295 uint64_t now = _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
1296 _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED;
1297 _dispatch_event_loop_timer_program(tidx, now + range.delay, range.leeway,
1298 EV_ADD | EV_ENABLE);
1299 }
1300
1301 void
1302 _dispatch_event_loop_timer_delete(uint32_t tidx)
1303 {
1304 _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
1305 _dispatch_event_loop_timer_program(tidx, 0, 0, EV_DELETE);
1306 }
1307
1308 #pragma mark -
1309 #pragma mark kevent specific sources
1310
1311 static dispatch_unote_t
1312 _dispatch_source_proc_create(dispatch_source_type_t dst DISPATCH_UNUSED,
1313 uintptr_t handle, unsigned long mask DISPATCH_UNUSED)
1314 {
1315 dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask);
1316 if (du._du && (mask & DISPATCH_PROC_EXIT_STATUS)) {
1317 du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET;
1318 }
1319 return du;
1320 }
1321
1322 const dispatch_source_type_s _dispatch_source_type_proc = {
1323 .dst_kind = "proc",
1324 .dst_filter = EVFILT_PROC,
1325 .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR,
1326 .dst_fflags = NOTE_EXIT, // rdar://16655831
1327 .dst_mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXITSTATUS
1328 #if HAVE_DECL_NOTE_SIGNAL
1329 |NOTE_SIGNAL
1330 #endif
1331 #if HAVE_DECL_NOTE_REAP
1332 |NOTE_REAP
1333 #endif
1334 ,
1335 .dst_size = sizeof(struct dispatch_source_refs_s),
1336
1337 .dst_create = _dispatch_source_proc_create,
1338 .dst_merge_evt = _dispatch_source_merge_evt,
1339 };
1340
1341 const dispatch_source_type_s _dispatch_source_type_vnode = {
1342 .dst_kind = "vnode",
1343 .dst_filter = EVFILT_VNODE,
1344 .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
1345 .dst_mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK
1346 |NOTE_RENAME|NOTE_FUNLOCK
1347 #if HAVE_DECL_NOTE_REVOKE
1348 |NOTE_REVOKE
1349 #endif
1350 #if HAVE_DECL_NOTE_NONE
1351 |NOTE_NONE
1352 #endif
1353 ,
1354 .dst_size = sizeof(struct dispatch_source_refs_s),
1355
1356 .dst_create = _dispatch_unote_create_with_fd,
1357 .dst_merge_evt = _dispatch_source_merge_evt,
1358 };
1359
1360 const dispatch_source_type_s _dispatch_source_type_vfs = {
1361 .dst_kind = "vfs",
1362 .dst_filter = EVFILT_FS,
1363 .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR,
1364 .dst_mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT
1365 |VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK
1366 #if HAVE_DECL_VQ_UPDATE
1367 |VQ_UPDATE
1368 #endif
1369 #if HAVE_DECL_VQ_VERYLOWDISK
1370 |VQ_VERYLOWDISK
1371 #endif
1372 #if HAVE_DECL_VQ_QUOTA
1373 |VQ_QUOTA
1374 #endif
1375 #if HAVE_DECL_VQ_NEARLOWDISK
1376 |VQ_NEARLOWDISK
1377 #endif
1378 #if HAVE_DECL_VQ_DESIRED_DISK
1379 |VQ_DESIRED_DISK
1380 #endif
1381 ,
1382 .dst_size = sizeof(struct dispatch_source_refs_s),
1383
1384 .dst_create = _dispatch_unote_create_without_handle,
1385 .dst_merge_evt = _dispatch_source_merge_evt,
1386 };
1387
1388 #ifdef EVFILT_SOCK
1389 const dispatch_source_type_s _dispatch_source_type_sock = {
1390 .dst_kind = "sock",
1391 .dst_filter = EVFILT_SOCK,
1392 .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
1393 .dst_mask = NOTE_CONNRESET|NOTE_READCLOSED|NOTE_WRITECLOSED
1394 |NOTE_TIMEOUT|NOTE_NOSRCADDR|NOTE_IFDENIED|NOTE_SUSPEND|NOTE_RESUME
1395 |NOTE_KEEPALIVE
1396 #ifdef NOTE_ADAPTIVE_WTIMO
1397 |NOTE_ADAPTIVE_WTIMO|NOTE_ADAPTIVE_RTIMO
1398 #endif
1399 #ifdef NOTE_CONNECTED
1400 |NOTE_CONNECTED|NOTE_DISCONNECTED|NOTE_CONNINFO_UPDATED
1401 #endif
1402 #ifdef NOTE_NOTIFY_ACK
1403 |NOTE_NOTIFY_ACK
1404 #endif
1405 ,
1406 .dst_size = sizeof(struct dispatch_source_refs_s),
1407
1408 .dst_create = _dispatch_unote_create_with_fd,
1409 .dst_merge_evt = _dispatch_source_merge_evt,
1410 };
1411 #endif // EVFILT_SOCK
1412
1413 #ifdef EVFILT_NW_CHANNEL
1414 const dispatch_source_type_s _dispatch_source_type_nw_channel = {
1415 .dst_kind = "nw_channel",
1416 .dst_filter = EVFILT_NW_CHANNEL,
1417 .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
1418 .dst_mask = NOTE_FLOW_ADV_UPDATE,
1419 .dst_size = sizeof(struct dispatch_source_refs_s),
1420 .dst_create = _dispatch_unote_create_with_fd,
1421 .dst_merge_evt = _dispatch_source_merge_evt,
1422 };
1423 #endif // EVFILT_NW_CHANNEL
1424
1425 #if DISPATCH_USE_MEMORYSTATUS
1426
1427 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
1428 #define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \
1429 DISPATCH_MEMORYPRESSURE_NORMAL | \
1430 DISPATCH_MEMORYPRESSURE_WARN | \
1431 DISPATCH_MEMORYPRESSURE_CRITICAL | \
1432 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
1433 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \
1434 DISPATCH_MEMORYPRESSURE_MSL_STATUS)
1435
1436 #define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \
1437 DISPATCH_MEMORYPRESSURE_WARN | \
1438 DISPATCH_MEMORYPRESSURE_CRITICAL | \
1439 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
1440 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \
1441 DISPATCH_MEMORYPRESSURE_MSL_STATUS)
1442
1443
1444 static void
1445 _dispatch_memorypressure_handler(void *context)
1446 {
1447 dispatch_source_t ds = context;
1448 unsigned long memorypressure = dispatch_source_get_data(ds);
1449
1450 if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) {
1451 _dispatch_memory_warn = false;
1452 _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT;
1453 #if VOUCHER_USE_MACH_VOUCHER
1454 if (_firehose_task_buffer) {
1455 firehose_buffer_clear_bank_flags(_firehose_task_buffer,
1456 FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
1457 }
1458 #endif
1459 }
1460 if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) {
1461 _dispatch_memory_warn = true;
1462 _dispatch_continuation_cache_limit =
1463 DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN;
1464 #if VOUCHER_USE_MACH_VOUCHER
1465 if (_firehose_task_buffer) {
1466 firehose_buffer_set_bank_flags(_firehose_task_buffer,
1467 FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
1468 }
1469 #endif
1470 }
1471 memorypressure &= DISPATCH_MEMORYPRESSURE_MALLOC_MASK;
1472 if (memorypressure) {
1473 malloc_memory_event_handler(memorypressure);
1474 }
1475 }
1476
1477 static void
1478 _dispatch_memorypressure_init(void)
1479 {
1480 dispatch_source_t ds = dispatch_source_create(
1481 DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0,
1482 DISPATCH_MEMORYPRESSURE_SOURCE_MASK, &_dispatch_mgr_q);
1483 dispatch_set_context(ds, ds);
1484 dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler);
1485 dispatch_activate(ds);
1486 }
1487 #endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE
1488
1489 #if TARGET_OS_SIMULATOR // rdar://problem/9219483
1490 static int _dispatch_ios_simulator_memory_warnings_fd = -1;
1491 static void
1492 _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
1493 {
1494 char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
1495 if (!e) return;
1496 _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
1497 if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
1498 (void)dispatch_assume_zero(errno);
1499 }
1500 }
1501
1502 static dispatch_unote_t
1503 _dispatch_source_memorypressure_create(dispatch_source_type_t dst,
1504 uintptr_t handle, unsigned long mask)
1505 {
1506 static dispatch_once_t pred;
1507 dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
1508
1509 if (handle) {
1510 return DISPATCH_UNOTE_NULL;
1511 }
1512
1513 dst = &_dispatch_source_type_vnode;
1514 handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
1515 mask = NOTE_ATTRIB;
1516
1517 dispatch_unote_t du = dux_create(dst, handle, mask);
1518 if (du._du) {
1519 du._du->du_memorypressure_override = true;
1520 }
1521 return du;
1522 }
1523 #endif // TARGET_OS_SIMULATOR
1524
1525 const dispatch_source_type_s _dispatch_source_type_memorypressure = {
1526 .dst_kind = "memorystatus",
1527 .dst_filter = EVFILT_MEMORYSTATUS,
1528 .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH,
1529 .dst_mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL
1530 |NOTE_MEMORYSTATUS_PRESSURE_WARN|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL
1531 |NOTE_MEMORYSTATUS_LOW_SWAP|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
1532 |NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
1533 |NOTE_MEMORYSTATUS_MSL_STATUS,
1534 .dst_size = sizeof(struct dispatch_source_refs_s),
1535
1536 #if TARGET_OS_SIMULATOR
1537 .dst_create = _dispatch_source_memorypressure_create,
1538 // redirected to _dispatch_source_type_vnode
1539 #else
1540 .dst_create = _dispatch_unote_create_without_handle,
1541 .dst_merge_evt = _dispatch_source_merge_evt,
1542 #endif
1543 };
1544
1545 static dispatch_unote_t
1546 _dispatch_source_vm_create(dispatch_source_type_t dst DISPATCH_UNUSED,
1547 uintptr_t handle, unsigned long mask DISPATCH_UNUSED)
1548 {
1549 // Map legacy vm pressure to memorypressure warning rdar://problem/15907505
1550 dispatch_unote_t du = dux_create(&_dispatch_source_type_memorypressure,
1551 handle, NOTE_MEMORYSTATUS_PRESSURE_WARN);
1552 if (du._du) {
1553 du._du->du_vmpressure_override = 1;
1554 }
1555 return du;
1556 }
1557
1558 const dispatch_source_type_s _dispatch_source_type_vm = {
1559 .dst_kind = "vm (deprecated)",
1560 .dst_filter = EVFILT_MEMORYSTATUS,
1561 .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH,
1562 .dst_mask = NOTE_VM_PRESSURE,
1563 .dst_size = sizeof(struct dispatch_source_refs_s),
1564
1565 .dst_create = _dispatch_source_vm_create,
1566 // redirected to _dispatch_source_type_memorypressure
1567 };
1568 #endif // DISPATCH_USE_MEMORYSTATUS
1569
1570 #pragma mark mach send / notifications
1571 #if HAVE_MACH
1572
1573 // Flags for all notifications that are registered/unregistered when a
1574 // send-possible notification is requested/delivered
1575 #define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \
1576 DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED)
1577
1578 static void _dispatch_mach_host_notify_update(void *context);
1579
1580 static mach_port_t _dispatch_mach_notify_port;
1581 static dispatch_source_t _dispatch_mach_notify_source;
1582
1583 static void
1584 _dispatch_timers_calendar_change(void)
1585 {
1586 uint32_t qos;
1587
1588 // calendar change may have gone past the wallclock deadline
1589 _dispatch_timers_expired = true;
1590 for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
1591 _dispatch_timers_processing_mask |=
1592 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos);
1593 }
1594 }
1595
1596 static mach_msg_audit_trailer_t *
1597 _dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr)
1598 {
1599 mach_msg_trailer_t *tlr = NULL;
1600 mach_msg_audit_trailer_t *audit_tlr = NULL;
1601 tlr = (mach_msg_trailer_t *)((unsigned char *)hdr +
1602 round_msg(hdr->msgh_size));
1603 // The trailer should always be of format zero.
1604 if (tlr->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0) {
1605 if (tlr->msgh_trailer_size >= sizeof(mach_msg_audit_trailer_t)) {
1606 audit_tlr = (mach_msg_audit_trailer_t *)tlr;
1607 }
1608 }
1609 return audit_tlr;
1610 }
1611
1612 DISPATCH_NOINLINE
1613 static void
1614 _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr)
1615 {
1616 mig_reply_error_t reply;
1617 mach_msg_audit_trailer_t *tlr = NULL;
1618 dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union
1619 __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem));
1620 dispatch_assert(sizeof(mig_reply_error_t) <
1621 DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE);
1622 tlr = _dispatch_mach_msg_get_audit_trailer(hdr);
1623 if (!tlr) {
1624 DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer");
1625 }
1626 if (hdr->msgh_id <= MACH_NOTIFY_LAST
1627 && dispatch_assume_zero(tlr->msgh_audit.val[
1628 DISPATCH_MACH_AUDIT_TOKEN_PID])) {
1629 mach_msg_destroy(hdr);
1630 return;
1631 }
1632 boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head);
1633 if (!success && reply.RetCode == MIG_BAD_ID &&
1634 (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID ||
1635 hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) {
1636 _dispatch_debug("calendar-change notification");
1637 _dispatch_timers_calendar_change();
1638 _dispatch_mach_host_notify_update(NULL);
1639 success = TRUE;
1640 reply.RetCode = KERN_SUCCESS;
1641 }
1642 if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) {
1643 (void)dispatch_assume_zero(reply.RetCode);
1644 }
1645 if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) {
1646 mach_msg_destroy(hdr);
1647 }
1648 }
1649
1650 DISPATCH_NOINLINE
1651 static void
1652 _dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED)
1653 {
1654 kern_return_t kr;
1655 #if HAVE_MACH_PORT_CONSTRUCT
1656 mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT };
1657 #if DISPATCH_SIZEOF_PTR == 8
1658 const mach_port_context_t guard = 0xfeed09071f1ca7edull;
1659 #else
1660 const mach_port_context_t guard = 0xff1ca7edull;
1661 #endif
1662 kr = mach_port_construct(mach_task_self(), &opts, guard,
1663 &_dispatch_mach_notify_port);
1664 #else
1665 kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
1666 &_dispatch_mach_notify_port);
1667 #endif
1668 DISPATCH_VERIFY_MIG(kr);
1669 if (unlikely(kr)) {
1670 DISPATCH_CLIENT_CRASH(kr,
1671 "mach_port_construct() failed: cannot create receive right");
1672 }
1673
1674 static const struct dispatch_continuation_s dc = {
1675 .dc_func = (void*)_dispatch_mach_notify_source_invoke,
1676 };
1677 _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv(
1678 _dispatch_mach_notify_port, &dc);
1679 dispatch_assert(_dispatch_mach_notify_source);
1680 dispatch_activate(_dispatch_mach_notify_source);
1681 }
1682
1683 static void
1684 _dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED)
1685 {
1686 kern_return_t kr;
1687 mach_port_t mp, mhp = mach_host_self();
1688 kr = host_get_host_port(mhp, &mp);
1689 DISPATCH_VERIFY_MIG(kr);
1690 if (likely(!kr)) {
1691 // mach_host_self returned the HOST_PRIV port
1692 kr = mach_port_deallocate(mach_task_self(), mhp);
1693 DISPATCH_VERIFY_MIG(kr);
1694 mhp = mp;
1695 } else if (kr != KERN_INVALID_ARGUMENT) {
1696 (void)dispatch_assume_zero(kr);
1697 }
1698 if (unlikely(!mhp)) {
1699 DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port");
1700 }
1701 _dispatch_mach_host_port = mhp;
1702 }
1703
1704 mach_port_t
1705 _dispatch_get_mach_host_port(void)
1706 {
1707 dispatch_once_f(&_dispatch_mach_host_port_pred, NULL,
1708 _dispatch_mach_host_port_init);
1709 return _dispatch_mach_host_port;
1710 }
1711
1712 DISPATCH_ALWAYS_INLINE
1713 static inline mach_port_t
1714 _dispatch_get_mach_notify_port(void)
1715 {
1716 static dispatch_once_t pred;
1717 dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init);
1718 return _dispatch_mach_notify_port;
1719 }
1720
1721 static void
1722 _dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED)
1723 {
1724 static int notify_type = HOST_NOTIFY_CALENDAR_SET;
1725 kern_return_t kr;
1726 _dispatch_debug("registering for calendar-change notification");
1727 retry:
1728 kr = host_request_notification(_dispatch_get_mach_host_port(),
1729 notify_type, _dispatch_get_mach_notify_port());
1730 // Fallback when missing support for newer _SET variant, fires strictly more
1731 if (kr == KERN_INVALID_ARGUMENT &&
1732 notify_type != HOST_NOTIFY_CALENDAR_CHANGE) {
1733 notify_type = HOST_NOTIFY_CALENDAR_CHANGE;
1734 goto retry;
1735 }
1736 DISPATCH_VERIFY_MIG(kr);
1737 (void)dispatch_assume_zero(kr);
1738 }
1739
1740 DISPATCH_ALWAYS_INLINE
1741 static inline void
1742 _dispatch_mach_host_calendar_change_register(void)
1743 {
1744 static dispatch_once_t pred;
1745 dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update);
1746 }
1747
1748 static kern_return_t
1749 _dispatch_mach_notify_update(dispatch_muxnote_t dmn, uint32_t new_flags,
1750 uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid,
1751 mach_port_mscount_t notify_sync)
1752 {
1753 mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident;
1754 typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data;
1755 kern_return_t kr, krr = 0;
1756
1757 // Update notification registration state.
1758 dmn->dmn_kev.data |= (new_flags | dmn->dmn_kev.fflags) & mask;
1759 dmn->dmn_kev.data &= ~(del_flags & mask);
1760
1761 _dispatch_debug_machport(port);
1762 if ((dmn->dmn_kev.data & mask) && !(prev & mask)) {
1763 _dispatch_debug("machport[0x%08x]: registering for send-possible "
1764 "notification", port);
1765 previous = MACH_PORT_NULL;
1766 krr = mach_port_request_notification(mach_task_self(), port,
1767 notify_msgid, notify_sync, _dispatch_get_mach_notify_port(),
1768 MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
1769 DISPATCH_VERIFY_MIG(krr);
1770
1771 switch (krr) {
1772 case KERN_INVALID_NAME:
1773 case KERN_INVALID_RIGHT:
1774 // Suppress errors & clear registration state
1775 dmn->dmn_kev.data &= ~mask;
1776 break;
1777 default:
1778 // Else, we don't expect any errors from mach. Log any errors
1779 if (dispatch_assume_zero(krr)) {
1780 // log the error & clear registration state
1781 dmn->dmn_kev.data &= ~mask;
1782 } else if (dispatch_assume_zero(previous)) {
1783 // Another subsystem has beat libdispatch to requesting the
1784 // specified Mach notification on this port. We should
1785 // technically cache the previous port and message it when the
1786 // kernel messages our port. Or we can just say screw those
1787 // subsystems and deallocate the previous port.
1788 // They should adopt libdispatch :-P
1789 kr = mach_port_deallocate(mach_task_self(), previous);
1790 DISPATCH_VERIFY_MIG(kr);
1791 (void)dispatch_assume_zero(kr);
1792 previous = MACH_PORT_NULL;
1793 }
1794 }
1795 } else if (!(dmn->dmn_kev.data & mask) && (prev & mask)) {
1796 _dispatch_debug("machport[0x%08x]: unregistering for send-possible "
1797 "notification", port);
1798 previous = MACH_PORT_NULL;
1799 kr = mach_port_request_notification(mach_task_self(), port,
1800 notify_msgid, notify_sync, MACH_PORT_NULL,
1801 MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous);
1802 DISPATCH_VERIFY_MIG(kr);
1803
1804 switch (kr) {
1805 case KERN_INVALID_NAME:
1806 case KERN_INVALID_RIGHT:
1807 case KERN_INVALID_ARGUMENT:
1808 break;
1809 default:
1810 if (dispatch_assume_zero(kr)) {
1811 // log the error
1812 }
1813 }
1814 } else {
1815 return 0;
1816 }
1817 if (unlikely(previous)) {
1818 // the kernel has not consumed the send-once right yet
1819 (void)dispatch_assume_zero(
1820 _dispatch_send_consume_send_once_right(previous));
1821 }
1822 return krr;
1823 }
1824
1825 static bool
1826 _dispatch_kevent_mach_notify_resume(dispatch_muxnote_t dmn, uint32_t new_flags,
1827 uint32_t del_flags)
1828 {
1829 kern_return_t kr = KERN_SUCCESS;
1830 dispatch_assert_zero(new_flags & del_flags);
1831 if ((new_flags & _DISPATCH_MACH_SP_FLAGS) ||
1832 (del_flags & _DISPATCH_MACH_SP_FLAGS)) {
1833 // Requesting a (delayed) non-sync send-possible notification
1834 // registers for both immediate dead-name notification and delayed-arm
1835 // send-possible notification for the port.
1836 // The send-possible notification is armed when a mach_msg() with the
1837 // the MACH_SEND_NOTIFY to the port times out.
1838 // If send-possible is unavailable, fall back to immediate dead-name
1839 // registration rdar://problem/2527840&9008724
1840 kr = _dispatch_mach_notify_update(dmn, new_flags, del_flags,
1841 _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE,
1842 MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME);
1843 }
1844 return kr == KERN_SUCCESS;
1845 }
1846
1847 DISPATCH_NOINLINE
1848 static void
1849 _dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final)
1850 {
1851 dispatch_unote_linkage_t dul, dul_next;
1852 dispatch_muxnote_t dmn;
1853
1854 _dispatch_debug_machport(name);
1855 dmn = _dispatch_mach_muxnote_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION);
1856 if (!dmn) {
1857 return;
1858 }
1859
1860 dmn->dmn_kev.data &= ~_DISPATCH_MACH_SP_FLAGS;
1861 if (!final) {
1862 // Re-register for notification before delivery
1863 final = !_dispatch_kevent_mach_notify_resume(dmn, data, 0);
1864 }
1865
1866 uint32_t flags = final ? EV_ONESHOT : EV_ENABLE;
1867 DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = 0;
1868 TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
1869 dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
1870 os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed);
1871 dux_merge_evt(du._du, flags, (data & du._du->du_fflags), 0, 0);
1872 if (!dul_next || DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev)) {
1873 // current merge is last in list (dmn might have been freed)
1874 // or it re-armed the notification
1875 break;
1876 }
1877 }
1878 }
1879
1880 kern_return_t
1881 _dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED,
1882 mach_port_name_t name)
1883 {
1884 #if DISPATCH_DEBUG
1885 _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x "
1886 "deleted prematurely", name);
1887 #endif
1888 _dispatch_debug_machport(name);
1889 _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true);
1890 return KERN_SUCCESS;
1891 }
1892
1893 kern_return_t
1894 _dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED,
1895 mach_port_name_t name)
1896 {
1897 kern_return_t kr;
1898
1899 _dispatch_debug("machport[0x%08x]: dead-name notification", name);
1900 _dispatch_debug_machport(name);
1901 _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true);
1902
1903 // the act of receiving a dead name notification allocates a dead-name
1904 // right that must be deallocated
1905 kr = mach_port_deallocate(mach_task_self(), name);
1906 DISPATCH_VERIFY_MIG(kr);
1907 //(void)dispatch_assume_zero(kr);
1908 return KERN_SUCCESS;
1909 }
1910
1911 kern_return_t
1912 _dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED,
1913 mach_port_name_t name)
1914 {
1915 _dispatch_debug("machport[0x%08x]: send-possible notification", name);
1916 _dispatch_debug_machport(name);
1917 _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false);
1918 return KERN_SUCCESS;
1919 }
1920
1921 void
1922 _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr)
1923 {
1924 dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote;
1925 dispatch_unote_linkage_t dul;
1926 dispatch_unote_t du;
1927
1928 if (!_dispatch_unote_registered(dmsr)) {
1929 return;
1930 }
1931
1932 DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = true;
1933 TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
1934 du = _dispatch_unote_linkage_get_unote(dul);
1935 os_atomic_store2o(du._dmsr, dmsr_notification_armed, true, relaxed);
1936 }
1937 }
1938
1939 static dispatch_unote_t
1940 _dispatch_source_mach_send_create(dispatch_source_type_t dst,
1941 uintptr_t handle, unsigned long mask)
1942 {
1943 if (!mask) {
1944 // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
1945 mask = DISPATCH_MACH_SEND_DEAD;
1946 }
1947 if (!handle) {
1948 handle = MACH_PORT_DEAD; // <rdar://problem/27651332>
1949 }
1950 return _dispatch_unote_create_with_handle(dst, handle, mask);
1951 }
1952
1953 static bool
1954 _dispatch_mach_send_update(dispatch_muxnote_t dmn)
1955 {
1956 if (dmn->dmn_kev.flags & EV_DELETE) {
1957 return _dispatch_kevent_mach_notify_resume(dmn, 0, dmn->dmn_kev.fflags);
1958 } else {
1959 return _dispatch_kevent_mach_notify_resume(dmn, dmn->dmn_kev.fflags, 0);
1960 }
1961 }
1962
1963 const dispatch_source_type_s _dispatch_source_type_mach_send = {
1964 .dst_kind = "mach_send",
1965 .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
1966 .dst_flags = EV_CLEAR,
1967 .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
1968 .dst_size = sizeof(struct dispatch_source_refs_s),
1969
1970 .dst_create = _dispatch_source_mach_send_create,
1971 .dst_update_mux = _dispatch_mach_send_update,
1972 .dst_merge_evt = _dispatch_source_merge_evt,
1973 };
1974
1975 static dispatch_unote_t
1976 _dispatch_mach_send_create(dispatch_source_type_t dst,
1977 uintptr_t handle, unsigned long mask)
1978 {
1979 // without handle because the mach code will set the ident later
1980 dispatch_unote_t du =
1981 _dispatch_unote_create_without_handle(dst, handle, mask);
1982 if (du._dmsr) {
1983 du._dmsr->dmsr_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED;
1984 TAILQ_INIT(&du._dmsr->dmsr_replies);
1985 }
1986 return du;
1987 }
1988
1989 const dispatch_source_type_s _dispatch_mach_type_send = {
1990 .dst_kind = "mach_send (mach)",
1991 .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
1992 .dst_flags = EV_CLEAR,
1993 .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
1994 .dst_size = sizeof(struct dispatch_mach_send_refs_s),
1995
1996 .dst_create = _dispatch_mach_send_create,
1997 .dst_update_mux = _dispatch_mach_send_update,
1998 .dst_merge_evt = _dispatch_mach_merge_notification,
1999 };
2000
2001 #endif // HAVE_MACH
2002 #pragma mark mach recv / reply
2003 #if HAVE_MACH
2004
2005 static void
2006 _dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags,
2007 mach_msg_header_t *hdr)
2008 {
2009 mach_msg_size_t siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
2010 mach_port_t name = hdr->msgh_local_port;
2011
2012 if (!dispatch_assume(hdr->msgh_size <= UINT_MAX -
2013 DISPATCH_MACH_TRAILER_SIZE)) {
2014 _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
2015 "received overlarge message");
2016 } else if (!dispatch_assume(name)) {
2017 _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
2018 "received message with MACH_PORT_NULL port");
2019 } else {
2020 _dispatch_debug_machport(name);
2021 if (likely(du._du)) {
2022 return dux_merge_msg(du._du, flags, hdr, siz);
2023 }
2024 _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
2025 "received message with no listeners");
2026 }
2027
2028 mach_msg_destroy(hdr);
2029 if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
2030 free(hdr);
2031 }
2032 }
2033
2034 DISPATCH_NOINLINE
2035 static void
2036 _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke)
2037 {
2038 mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke);
2039 mach_msg_size_t siz;
2040 mach_msg_return_t kr = (mach_msg_return_t)ke->fflags;
2041 uint32_t flags = ke->flags;
2042 dispatch_unote_t du = _dispatch_kevent_get_unote(ke);
2043
2044 if (unlikely(!hdr)) {
2045 DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message");
2046 }
2047 if (likely(!kr)) {
2048 _dispatch_kevent_mach_msg_recv(du, flags, hdr);
2049 goto out;
2050 } else if (kr != MACH_RCV_TOO_LARGE) {
2051 goto out;
2052 } else if (!ke->data) {
2053 DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity");
2054 }
2055 if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) {
2056 DISPATCH_INTERNAL_CRASH(ke->ext[1],
2057 "EVFILT_MACHPORT with overlarge message");
2058 }
2059 siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE;
2060 hdr = malloc(siz);
2061 if (dispatch_assume(hdr)) {
2062 flags |= DISPATCH_EV_MSG_NEEDS_FREE;
2063 } else {
2064 // Kernel will discard message too large to fit
2065 hdr = NULL;
2066 siz = 0;
2067 }
2068 mach_port_t name = (mach_port_name_t)ke->data;
2069 const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS |
2070 MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE);
2071 kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE,
2072 MACH_PORT_NULL);
2073 if (likely(!kr)) {
2074 _dispatch_kevent_mach_msg_recv(du, flags, hdr);
2075 goto out;
2076 } else if (kr == MACH_RCV_TOO_LARGE) {
2077 _dispatch_log("BUG in libdispatch client: "
2078 "_dispatch_kevent_mach_msg_drain: dropped message too "
2079 "large to fit in memory: id = 0x%x, size = %u",
2080 hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke));
2081 kr = MACH_MSG_SUCCESS;
2082 }
2083 if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
2084 free(hdr);
2085 }
2086 out:
2087 if (unlikely(kr)) {
2088 _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: "
2089 "message reception failed", kr);
2090 }
2091 }
2092
2093 const dispatch_source_type_s _dispatch_source_type_mach_recv = {
2094 .dst_kind = "mach_recv",
2095 .dst_filter = EVFILT_MACHPORT,
2096 .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
2097 .dst_fflags = 0,
2098 .dst_size = sizeof(struct dispatch_source_refs_s),
2099
2100 .dst_create = _dispatch_unote_create_with_handle,
2101 .dst_merge_evt = _dispatch_source_merge_evt,
2102 .dst_merge_msg = NULL, // never receives messages directly
2103
2104 .dst_per_trigger_qos = true,
2105 };
2106
2107 static void
2108 _dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du, uint32_t flags,
2109 mach_msg_header_t *msg, mach_msg_size_t msgsz DISPATCH_UNUSED)
2110 {
2111 dispatch_continuation_t dc = du._dr->ds_handler[DS_EVENT_HANDLER];
2112 dispatch_source_t ds = _dispatch_source_from_refs(du._dr);
2113 dispatch_queue_t cq = _dispatch_queue_get_current();
2114
2115 // see firehose_client_push_notify_async
2116 _dispatch_queue_set_current(ds->_as_dq);
2117 dc->dc_func(msg);
2118 _dispatch_queue_set_current(cq);
2119 if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
2120 free(msg);
2121 }
2122 if ((ds->dq_atomic_flags & DSF_CANCELED) ||
2123 (flags & (EV_ONESHOT | EV_DELETE))) {
2124 return _dispatch_source_merge_evt(du, flags, 0, 0, 0);
2125 }
2126 if (_dispatch_unote_needs_rearm(du)) {
2127 return _dispatch_unote_resume(du);
2128 }
2129 }
2130
2131 static void
2132 _dispatch_mach_recv_direct_merge(dispatch_unote_t du,
2133 uint32_t flags, uintptr_t data,
2134 uintptr_t status DISPATCH_UNUSED,
2135 pthread_priority_t pp)
2136 {
2137 if (flags & EV_VANISHED) {
2138 DISPATCH_CLIENT_CRASH(du._du->du_ident,
2139 "Unexpected EV_VANISHED (do not destroy random mach ports)");
2140 }
2141 return _dispatch_source_merge_evt(du, flags, data, 0, pp);
2142 }
2143
2144 const dispatch_source_type_s _dispatch_source_type_mach_recv_direct = {
2145 .dst_kind = "direct mach_recv",
2146 .dst_filter = EVFILT_MACHPORT,
2147 .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
2148 .dst_fflags = DISPATCH_MACH_RCV_OPTIONS,
2149 .dst_size = sizeof(struct dispatch_source_refs_s),
2150
2151 .dst_create = _dispatch_unote_create_with_handle,
2152 .dst_merge_evt = _dispatch_mach_recv_direct_merge,
2153 .dst_merge_msg = _dispatch_source_mach_recv_direct_merge_msg,
2154
2155 .dst_per_trigger_qos = true,
2156 };
2157
2158 const dispatch_source_type_s _dispatch_mach_type_recv = {
2159 .dst_kind = "mach_recv (channel)",
2160 .dst_filter = EVFILT_MACHPORT,
2161 .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
2162 .dst_fflags = DISPATCH_MACH_RCV_OPTIONS,
2163 .dst_size = sizeof(struct dispatch_mach_recv_refs_s),
2164
2165 // without handle because the mach code will set the ident after connect
2166 .dst_create = _dispatch_unote_create_without_handle,
2167 .dst_merge_evt = _dispatch_mach_recv_direct_merge,
2168 .dst_merge_msg = _dispatch_mach_merge_msg,
2169
2170 .dst_per_trigger_qos = true,
2171 };
2172
2173 DISPATCH_NORETURN
2174 static void
2175 _dispatch_mach_reply_merge_evt(dispatch_unote_t du,
2176 uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED,
2177 uintptr_t status DISPATCH_UNUSED,
2178 pthread_priority_t pp DISPATCH_UNUSED)
2179 {
2180 DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event");
2181 }
2182
2183 const dispatch_source_type_s _dispatch_mach_type_reply = {
2184 .dst_kind = "mach reply",
2185 .dst_filter = EVFILT_MACHPORT,
2186 .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED,
2187 .dst_fflags = DISPATCH_MACH_RCV_OPTIONS,
2188 .dst_size = sizeof(struct dispatch_mach_reply_refs_s),
2189
2190 .dst_create = _dispatch_unote_create_with_handle,
2191 .dst_merge_evt = _dispatch_mach_reply_merge_evt,
2192 .dst_merge_msg = _dispatch_mach_reply_merge_msg,
2193 };
2194
2195 #pragma mark Mach channel SIGTERM notification (for XPC channels only)
2196
2197 const dispatch_source_type_s _dispatch_xpc_type_sigterm = {
2198 .dst_kind = "sigterm (xpc)",
2199 .dst_filter = EVFILT_SIGNAL,
2200 .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_ONESHOT,
2201 .dst_fflags = 0,
2202 .dst_size = sizeof(struct dispatch_xpc_term_refs_s),
2203
2204 .dst_create = _dispatch_unote_create_with_handle,
2205 .dst_merge_evt = _dispatch_xpc_sigterm_merge,
2206 };
2207
2208 #endif // HAVE_MACH
2209
2210 #endif // DISPATCH_EVENT_BACKEND_KEVENT