2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 #if DISPATCH_EVENT_BACKEND_KEVENT
25 #include "protocolServer.h"
28 #if DISPATCH_USE_KEVENT_WORKQUEUE && !DISPATCH_USE_KEVENT_QOS
29 #error unsupported configuration
32 #define DISPATCH_KEVENT_MUXED_MARKER 1ul
33 #define DISPATCH_MACH_AUDIT_TOKEN_PID (5)
35 typedef struct dispatch_muxnote_s
{
36 TAILQ_ENTRY(dispatch_muxnote_s
) dmn_list
;
37 TAILQ_HEAD(, dispatch_unote_linkage_s
) dmn_unotes_head
;
38 dispatch_wlh_t dmn_wlh
;
39 dispatch_kevent_s dmn_kev
;
40 } *dispatch_muxnote_t
;
42 static bool _dispatch_timers_force_max_leeway
;
43 static int _dispatch_kq
= -1;
46 dispatch_unfair_lock_s lock
;
48 #if !DISPATCH_USE_KEVENT_WORKQUEUE
49 #define _dispatch_muxnotes_lock() \
50 _dispatch_unfair_lock_lock(&_dispatch_muxnotes.lock)
51 #define _dispatch_muxnotes_unlock() \
52 _dispatch_unfair_lock_unlock(&_dispatch_muxnotes.lock)
54 #define _dispatch_muxnotes_lock()
55 #define _dispatch_muxnotes_unlock()
56 #endif // !DISPATCH_USE_KEVENT_WORKQUEUE
58 DISPATCH_CACHELINE_ALIGN
59 static TAILQ_HEAD(dispatch_muxnote_bucket_s
, dispatch_muxnote_s
)
60 _dispatch_sources
[DSL_HASH_SIZE
];
62 #define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME
63 #define DISPATCH_NOTE_CLOCK_MACH 0
65 static const uint32_t _dispatch_timer_index_to_fflags
[] = {
66 #define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \
67 [DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \
68 DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \
69 NOTE_NSECONDS | NOTE_LEEWAY | (note)
70 DISPATCH_TIMER_FFLAGS_INIT(WALL
, NORMAL
, 0),
71 DISPATCH_TIMER_FFLAGS_INIT(MACH
, NORMAL
, 0),
72 #if DISPATCH_HAVE_TIMER_QOS
73 DISPATCH_TIMER_FFLAGS_INIT(WALL
, CRITICAL
, NOTE_CRITICAL
),
74 DISPATCH_TIMER_FFLAGS_INIT(MACH
, CRITICAL
, NOTE_CRITICAL
),
75 DISPATCH_TIMER_FFLAGS_INIT(WALL
, BACKGROUND
, NOTE_BACKGROUND
),
76 DISPATCH_TIMER_FFLAGS_INIT(MACH
, BACKGROUND
, NOTE_BACKGROUND
),
78 #undef DISPATCH_TIMER_FFLAGS_INIT
81 static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke
);
84 #pragma mark kevent debug
88 _evfiltstr(short filt
)
91 #define _evfilt2(f) case (f): return #f
92 _evfilt2(EVFILT_READ
);
93 _evfilt2(EVFILT_WRITE
);
94 _evfilt2(EVFILT_SIGNAL
);
95 _evfilt2(EVFILT_TIMER
);
97 #ifdef DISPATCH_EVENT_BACKEND_KEVENT
99 _evfilt2(EVFILT_VNODE
);
100 _evfilt2(EVFILT_PROC
);
102 _evfilt2(EVFILT_MACHPORT
);
103 _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION
);
106 _evfilt2(EVFILT_USER
);
108 _evfilt2(EVFILT_SOCK
);
110 #ifdef EVFILT_MEMORYSTATUS
111 _evfilt2(EVFILT_MEMORYSTATUS
);
113 #endif // DISPATCH_EVENT_BACKEND_KEVENT
115 _evfilt2(DISPATCH_EVFILT_TIMER
);
116 _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD
);
117 _evfilt2(DISPATCH_EVFILT_CUSTOM_OR
);
118 _evfilt2(DISPATCH_EVFILT_CUSTOM_REPLACE
);
120 return "EVFILT_missing";
126 _evflagstr2(uint16_t *flagsp
)
128 #define _evflag2(f) \
129 if ((*flagsp & (f)) == (f) && (f)) { \
136 _evflag2(EV_DISABLE
);
137 _evflag2(EV_ONESHOT
);
139 _evflag2(EV_RECEIPT
);
140 _evflag2(EV_DISPATCH
);
141 _evflag2(EV_UDATA_SPECIFIC
);
150 _evflag2(EV_VANISHED
);
152 return "EV_UNKNOWN ";
157 _evflagstr(uint16_t flags
, char *str
, size_t strsize
)
161 strlcat(str
, _evflagstr2(&flags
), strsize
);
163 size_t sz
= strlen(str
);
164 if (sz
) str
[sz
-1] = 0;
170 dispatch_kevent_debug(const char *verb
, const dispatch_kevent_s
*kev
,
171 int i
, int n
, const char *function
, unsigned int line
)
177 snprintf(i_n
, sizeof(i_n
), "%d/%d ", i
+ 1, n
);
182 if (kev
->flags
& EV_DELETE
) {
184 } else if (kev
->flags
& EV_ADD
) {
190 #if DISPATCH_USE_KEVENT_QOS
191 _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
192 "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
193 "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, "
194 "ext[3] = 0x%llx }: %s #%u", verb
, kev
, i_n
, kev
->ident
,
195 _evfiltstr(kev
->filter
), _evflagstr(kev
->flags
, flagstr
,
196 sizeof(flagstr
)), kev
->flags
, kev
->fflags
, kev
->data
, kev
->udata
,
197 kev
->qos
, kev
->ext
[0], kev
->ext
[1], kev
->ext
[2], kev
->ext
[3],
200 _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
201 "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: "
202 "%s #%u", verb
, kev
, i_n
,
203 kev
->ident
, _evfiltstr(kev
->filter
), _evflagstr(kev
->flags
, flagstr
,
204 sizeof(flagstr
)), kev
->flags
, kev
->fflags
, kev
->data
, kev
->udata
,
210 dispatch_kevent_debug(const char *verb
, const dispatch_kevent_s
*kev
,
211 int i
, int n
, const char *function
, unsigned int line
)
213 (void)verb
; (void)kev
; (void)i
; (void)n
; (void)function
; (void)line
;
215 #endif // DISPATCH_DEBUG
216 #define _dispatch_kevent_debug_n(verb, _kev, i, n) \
217 dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__)
218 #define _dispatch_kevent_debug(verb, _kev) \
219 _dispatch_kevent_debug_n(verb, _kev, 0, 0)
220 #if DISPATCH_MGR_QUEUE_DEBUG
221 #define _dispatch_kevent_mgr_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
223 #define _dispatch_kevent_mgr_debug(verb, kev) ((void)verb, (void)kev)
224 #endif // DISPATCH_MGR_QUEUE_DEBUG
225 #if DISPATCH_WLH_DEBUG
226 #define _dispatch_kevent_wlh_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
228 #define _dispatch_kevent_wlh_debug(verb, kev) ((void)verb, (void)kev)
229 #endif // DISPATCH_WLH_DEBUG
231 #if DISPATCH_MACHPORT_DEBUG
232 #ifndef MACH_PORT_TYPE_SPREQUEST
233 #define MACH_PORT_TYPE_SPREQUEST 0x40000000
238 dispatch_debug_machport(mach_port_t name
, const char* str
)
240 mach_port_type_t type
;
241 mach_msg_bits_t ns
= 0, nr
= 0, nso
= 0, nd
= 0;
242 unsigned int dnreqs
= 0, dnrsiz
;
243 kern_return_t kr
= mach_port_type(mach_task_self(), name
, &type
);
245 _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name
,
246 kr
, mach_error_string(kr
), str
);
249 if (type
& MACH_PORT_TYPE_SEND
) {
250 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name
,
251 MACH_PORT_RIGHT_SEND
, &ns
));
253 if (type
& MACH_PORT_TYPE_SEND_ONCE
) {
254 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name
,
255 MACH_PORT_RIGHT_SEND_ONCE
, &nso
));
257 if (type
& MACH_PORT_TYPE_DEAD_NAME
) {
258 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name
,
259 MACH_PORT_RIGHT_DEAD_NAME
, &nd
));
261 if (type
& (MACH_PORT_TYPE_RECEIVE
|MACH_PORT_TYPE_SEND
)) {
262 kr
= mach_port_dnrequest_info(mach_task_self(), name
, &dnrsiz
, &dnreqs
);
263 if (kr
!= KERN_INVALID_RIGHT
) (void)dispatch_assume_zero(kr
);
265 if (type
& MACH_PORT_TYPE_RECEIVE
) {
266 mach_port_status_t status
= { .mps_pset
= 0, };
267 mach_msg_type_number_t cnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
268 (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name
,
269 MACH_PORT_RIGHT_RECEIVE
, &nr
));
270 (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(),
271 name
, MACH_PORT_RECEIVE_STATUS
, (void*)&status
, &cnt
));
272 _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
273 "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) "
274 "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) "
275 "seqno(%03u) }: %s", name
, nr
, ns
, nso
, nd
, dnreqs
,
276 type
& MACH_PORT_TYPE_SPREQUEST
? "Y":"N",
277 status
.mps_nsrequest
? "Y":"N", status
.mps_pdrequest
? "Y":"N",
278 status
.mps_srights
? "Y":"N", status
.mps_sorights
,
279 status
.mps_qlimit
, status
.mps_msgcount
, status
.mps_mscount
,
280 status
.mps_seqno
, str
);
281 } else if (type
& (MACH_PORT_TYPE_SEND
|MACH_PORT_TYPE_SEND_ONCE
|
282 MACH_PORT_TYPE_DEAD_NAME
)) {
283 _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
284 "dnreqs(%03u) spreq(%s) }: %s", name
, nr
, ns
, nso
, nd
, dnreqs
,
285 type
& MACH_PORT_TYPE_SPREQUEST
? "Y":"N", str
);
287 _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name
, type
,
293 #pragma mark dispatch_kevent_t
297 static dispatch_once_t _dispatch_mach_host_port_pred
;
298 static mach_port_t _dispatch_mach_host_port
;
301 _dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke
)
303 return (void*)ke
->ext
[0];
306 static inline mach_msg_size_t
307 _dispatch_kevent_mach_msg_size(dispatch_kevent_t ke
)
309 // buffer size in the successful receive case, but message size (like
310 // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size.
311 return (mach_msg_size_t
)ke
->ext
[1];
314 static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke
);
315 static inline void _dispatch_mach_host_calendar_change_register(void);
317 // DISPATCH_MACH_NOTIFICATION_ARMED are muxnotes that aren't registered with
318 // kevent for real, but with mach_port_request_notification()
320 // the kevent structure is used for bookkeeping:
321 // - ident, filter, flags and fflags have their usual meaning
322 // - data is used to monitor the actual state of the
323 // mach_port_request_notification()
324 // - ext[0] is a boolean that trackes whether the notification is armed or not
325 #define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0])
328 DISPATCH_ALWAYS_INLINE
329 static dispatch_muxnote_t
330 _dispatch_kevent_get_muxnote(dispatch_kevent_t ke
)
332 uintptr_t dmn_addr
= (uintptr_t)ke
->udata
& ~DISPATCH_KEVENT_MUXED_MARKER
;
333 return (dispatch_muxnote_t
)dmn_addr
;
336 DISPATCH_ALWAYS_INLINE
337 static dispatch_unote_t
338 _dispatch_kevent_get_unote(dispatch_kevent_t ke
)
340 dispatch_assert((ke
->udata
& DISPATCH_KEVENT_MUXED_MARKER
) == 0);
341 return (dispatch_unote_t
){ ._du
= (dispatch_unote_class_t
)ke
->udata
};
346 _dispatch_kevent_print_error(dispatch_kevent_t ke
)
348 _dispatch_debug("kevent[0x%llx]: handling error",
349 (unsigned long long)ke
->udata
);
350 if (ke
->flags
& EV_DELETE
) {
351 if (ke
->flags
& EV_UDATA_SPECIFIC
) {
352 if (ke
->data
== EINPROGRESS
) {
353 // deferred EV_DELETE
357 // for EV_DELETE if the update was deferred we may have reclaimed
358 // the udata already, and it is unsafe to dereference it now.
359 } else if (ke
->udata
& DISPATCH_KEVENT_MUXED_MARKER
) {
360 ke
->flags
|= _dispatch_kevent_get_muxnote(ke
)->dmn_kev
.flags
;
361 } else if (ke
->udata
) {
362 if (!_dispatch_unote_registered(_dispatch_kevent_get_unote(ke
))) {
368 if (ke
->filter
== EVFILT_MACHPORT
&& ke
->data
== ENOTSUP
&&
369 (ke
->flags
& EV_ADD
) && (ke
->fflags
& MACH_RCV_MSG
)) {
370 DISPATCH_INTERNAL_CRASH(ke
->ident
,
371 "Missing EVFILT_MACHPORT support for ports");
376 // log the unexpected error
377 _dispatch_bug_kevent_client("kevent", _evfiltstr(ke
->filter
),
379 ke
->flags
& EV_DELETE
? "delete" :
380 ke
->flags
& EV_ADD
? "add" :
381 ke
->flags
& EV_ENABLE
? "enable" : "monitor",
388 _dispatch_kevent_merge(dispatch_unote_t du
, dispatch_kevent_t ke
)
391 uintptr_t status
= 0;
392 pthread_priority_t pp
= 0;
393 #if DISPATCH_USE_KEVENT_QOS
394 pp
= ((pthread_priority_t
)ke
->qos
) & ~_PTHREAD_PRIORITY_FLAGS_MASK
;
396 dispatch_unote_action_t action
= du
._du
->du_data_action
;
397 if (action
== DISPATCH_UNOTE_ACTION_DATA_SET
) {
398 // ke->data is signed and "negative available data" makes no sense
399 // zero bytes happens when EV_EOF is set
400 dispatch_assert(ke
->data
>= 0l);
401 data
= ~(unsigned long)ke
->data
;
403 } else if (du
._du
->du_filter
== EVFILT_MACHPORT
) {
404 data
= DISPATCH_MACH_RECV_MESSAGE
;
406 } else if (action
== DISPATCH_UNOTE_ACTION_DATA_ADD
) {
407 data
= (unsigned long)ke
->data
;
408 } else if (action
== DISPATCH_UNOTE_ACTION_DATA_OR
) {
409 data
= ke
->fflags
& du
._du
->du_fflags
;
410 } else if (action
== DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET
) {
411 data
= ke
->fflags
& du
._du
->du_fflags
;
412 status
= (unsigned long)ke
->data
;
414 DISPATCH_INTERNAL_CRASH(action
, "Corrupt unote action");
416 return dux_merge_evt(du
._du
, ke
->flags
, data
, status
, pp
);
421 _dispatch_kevent_merge_muxed(dispatch_kevent_t ke
)
423 dispatch_muxnote_t dmn
= _dispatch_kevent_get_muxnote(ke
);
424 dispatch_unote_linkage_t dul
, dul_next
;
426 TAILQ_FOREACH_SAFE(dul
, &dmn
->dmn_unotes_head
, du_link
, dul_next
) {
427 _dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul
), ke
);
433 _dispatch_kevent_drain(dispatch_kevent_t ke
)
435 if (ke
->filter
== EVFILT_USER
) {
436 _dispatch_kevent_mgr_debug("received", ke
);
439 _dispatch_kevent_debug("received", ke
);
440 if (unlikely(ke
->flags
& EV_ERROR
)) {
441 if (ke
->filter
== EVFILT_PROC
&& ke
->data
== ESRCH
) {
442 // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
443 // <rdar://problem/5067725>. As a workaround, we simulate an exit event for
444 // any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
445 ke
->flags
&= ~(EV_ERROR
| EV_ADD
| EV_ENABLE
| EV_UDATA_SPECIFIC
);
446 ke
->flags
|= EV_ONESHOT
;
447 ke
->fflags
= NOTE_EXIT
;
449 _dispatch_kevent_debug("synthetic NOTE_EXIT", ke
);
451 return _dispatch_kevent_print_error(ke
);
454 if (ke
->filter
== EVFILT_TIMER
) {
455 return _dispatch_kevent_timer_drain(ke
);
459 if (ke
->filter
== EVFILT_MACHPORT
) {
460 if (_dispatch_kevent_mach_msg_size(ke
)) {
461 return _dispatch_kevent_mach_msg_drain(ke
);
466 if (ke
->udata
& DISPATCH_KEVENT_MUXED_MARKER
) {
467 return _dispatch_kevent_merge_muxed(ke
);
469 return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke
), ke
);
472 #pragma mark dispatch_kq
474 #if DISPATCH_USE_MGR_THREAD
477 _dispatch_kq_create(const void *guard_ptr
)
479 static const dispatch_kevent_s kev
= {
481 .filter
= EVFILT_USER
,
482 .flags
= EV_ADD
|EV_CLEAR
,
483 .udata
= (uintptr_t)DISPATCH_WLH_MANAGER
,
487 _dispatch_fork_becomes_unsafe();
488 #if DISPATCH_USE_GUARDED_FD
489 guardid_t guard
= (uintptr_t)guard_ptr
;
490 kqfd
= guarded_kqueue_np(&guard
, GUARD_CLOSE
| GUARD_DUP
);
499 DISPATCH_CLIENT_CRASH(err
, "kqueue() failure: "
500 "process is out of file descriptors");
503 DISPATCH_CLIENT_CRASH(err
, "kqueue() failure: "
504 "system is out of file descriptors");
507 DISPATCH_CLIENT_CRASH(err
, "kqueue() failure: "
508 "kernel is out of memory");
511 DISPATCH_INTERNAL_CRASH(err
, "kqueue() failure");
515 #if DISPATCH_USE_KEVENT_QOS
516 dispatch_assume_zero(kevent_qos(kqfd
, &kev
, 1, NULL
, 0, NULL
, NULL
, 0));
518 dispatch_assume_zero(kevent(kqfd
, &kev
, 1, NULL
, 0, NULL
));
525 _dispatch_kq_init(void *context
)
527 bool *kq_initialized
= context
;
529 _dispatch_fork_becomes_unsafe();
530 if (unlikely(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
531 _dispatch_timers_force_max_leeway
= true;
533 *kq_initialized
= true;
535 #if DISPATCH_USE_KEVENT_WORKQUEUE
536 _dispatch_kevent_workqueue_init();
537 if (_dispatch_kevent_workqueue_enabled
) {
539 int kqfd
= _dispatch_kq
;
540 const dispatch_kevent_s ke
= {
542 .filter
= EVFILT_USER
,
543 .flags
= EV_ADD
|EV_CLEAR
,
544 .qos
= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
,
545 .udata
= (uintptr_t)DISPATCH_WLH_MANAGER
,
548 r
= kevent_qos(kqfd
, &ke
, 1, NULL
, 0, NULL
, NULL
,
549 KEVENT_FLAG_WORKQ
|KEVENT_FLAG_IMMEDIATE
);
550 if (unlikely(r
== -1)) {
556 DISPATCH_CLIENT_CRASH(err
,
557 "Failed to initalize workqueue kevent");
563 #endif // DISPATCH_USE_KEVENT_WORKQUEUE
564 #if DISPATCH_USE_MGR_THREAD
565 _dispatch_kq
= _dispatch_kq_create(&_dispatch_mgr_q
);
566 dx_push(_dispatch_mgr_q
.do_targetq
, &_dispatch_mgr_q
, 0);
567 #endif // DISPATCH_USE_MGR_THREAD
570 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
571 static void _dispatch_memorypressure_init(void);
573 #define _dispatch_memorypressure_init() ((void)0)
578 _dispatch_kq_poll(dispatch_wlh_t wlh
, dispatch_kevent_t ke
, int n
,
579 dispatch_kevent_t ke_out
, int n_out
, void *buf
, size_t *avail
,
582 static dispatch_once_t pred
;
583 bool kq_initialized
= false;
586 dispatch_once_f(&pred
, &kq_initialized
, _dispatch_kq_init
);
587 if (unlikely(kq_initialized
)) {
588 // The calling thread was the one doing the initialization
590 // The event loop needs the memory pressure source and debug channel,
591 // however creating these will recursively call _dispatch_kq_poll(),
592 // so we can't quite initialize them under the dispatch once.
593 _dispatch_memorypressure_init();
594 _voucher_activity_debug_channel_init();
598 #if !DISPATCH_USE_KEVENT_QOS
599 if (flags
& KEVENT_FLAG_ERROR_EVENTS
) {
600 // emulate KEVENT_FLAG_ERROR_EVENTS
601 for (r
= 0; r
< n
; r
++) {
602 ke
[r
].flags
|= EV_RECEIPT
;
609 if (unlikely(wlh
== NULL
)) {
610 DISPATCH_INTERNAL_CRASH(wlh
, "Invalid wlh");
611 } else if (wlh
== DISPATCH_WLH_ANON
) {
612 int kqfd
= _dispatch_kq
;
613 #if DISPATCH_USE_KEVENT_QOS
614 if (_dispatch_kevent_workqueue_enabled
) {
615 flags
|= KEVENT_FLAG_WORKQ
;
617 r
= kevent_qos(kqfd
, ke
, n
, ke_out
, n_out
, buf
, avail
, flags
);
619 const struct timespec timeout_immediately
= {}, *timeout
= NULL
;
620 if (flags
& KEVENT_FLAG_IMMEDIATE
) timeout
= &timeout_immediately
;
621 r
= kevent(kqfd
, ke
, n
, ke_out
, n_out
, timeout
);
624 if (unlikely(r
== -1)) {
628 _dispatch_temporary_resource_shortage();
633 DISPATCH_CLIENT_CRASH(err
, "Do not close random Unix descriptors");
635 DISPATCH_CLIENT_CRASH(err
, "Unexpected error from kevent");
643 _dispatch_kq_drain(dispatch_wlh_t wlh
, dispatch_kevent_t ke
, int n
,
646 dispatch_kevent_s ke_out
[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT
];
647 bool poll_for_events
= !(flags
& KEVENT_FLAG_ERROR_EVENTS
);
648 int i
, n_out
= countof(ke_out
), r
= 0;
649 size_t *avail
= NULL
;
652 #if DISPATCH_USE_KEVENT_QOS
654 if (poll_for_events
) {
655 size
= DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE
+
656 DISPATCH_MACH_TRAILER_SIZE
;
663 for (r
= 0; r
< n
; r
++) {
664 if (ke
[r
].filter
!= EVFILT_USER
|| DISPATCH_MGR_QUEUE_DEBUG
) {
665 _dispatch_kevent_debug_n(NULL
, ke
+ r
, r
, n
);
670 if (poll_for_events
) _dispatch_clear_return_to_kernel();
671 n
= _dispatch_kq_poll(wlh
, ke
, n
, ke_out
, n_out
, buf
, avail
, flags
);
674 } else if (flags
& KEVENT_FLAG_ERROR_EVENTS
) {
675 for (i
= 0, r
= 0; i
< n
; i
++) {
676 if ((ke_out
[i
].flags
& EV_ERROR
) && ke_out
[i
].data
) {
677 _dispatch_kevent_drain(&ke_out
[i
]);
678 r
= (int)ke_out
[i
].data
;
682 for (i
= 0, r
= 0; i
< n
; i
++) {
683 _dispatch_kevent_drain(&ke_out
[i
]);
689 DISPATCH_ALWAYS_INLINE
691 _dispatch_kq_update_one(dispatch_wlh_t wlh
, dispatch_kevent_t ke
)
693 return _dispatch_kq_drain(wlh
, ke
, 1,
694 KEVENT_FLAG_IMMEDIATE
| KEVENT_FLAG_ERROR_EVENTS
);
697 DISPATCH_ALWAYS_INLINE
699 _dispatch_kq_update_all(dispatch_wlh_t wlh
, dispatch_kevent_t ke
, int n
)
701 (void)_dispatch_kq_drain(wlh
, ke
, n
,
702 KEVENT_FLAG_IMMEDIATE
| KEVENT_FLAG_ERROR_EVENTS
);
705 DISPATCH_ALWAYS_INLINE
707 _dispatch_kq_unote_set_kevent(dispatch_unote_t _du
, dispatch_kevent_t dk
,
710 dispatch_unote_class_t du
= _du
._du
;
711 dispatch_source_type_t dst
= du
->du_type
;
712 uint16_t flags
= dst
->dst_flags
| action
;
714 if ((flags
& EV_VANISHED
) && !(flags
& EV_ADD
)) {
715 flags
&= ~EV_VANISHED
;
717 pthread_priority_t pp
= _dispatch_priority_to_pp(du
->du_priority
);
718 *dk
= (dispatch_kevent_s
){
719 .ident
= du
->du_ident
,
720 .filter
= dst
->dst_filter
,
722 .udata
= (uintptr_t)du
,
723 .fflags
= du
->du_fflags
| dst
->dst_fflags
,
724 .data
= (typeof(dk
->data
))dst
->dst_data
,
725 #if DISPATCH_USE_KEVENT_QOS
726 .qos
= (typeof(dk
->qos
))pp
,
731 DISPATCH_ALWAYS_INLINE
733 _dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi
,
734 int16_t filter
, uint64_t ident
, uint64_t udata
)
736 dispatch_kevent_t events
= ddi
->ddi_eventlist
;
739 for (i
= 0; i
< ddi
->ddi_nevents
; i
++) {
740 if (events
[i
].filter
== filter
&& events
[i
].ident
== ident
&&
741 events
[i
].udata
== udata
) {
748 DISPATCH_ALWAYS_INLINE
749 static inline dispatch_kevent_t
750 _dispatch_kq_deferred_reuse_slot(dispatch_wlh_t wlh
,
751 dispatch_deferred_items_t ddi
, int slot
)
753 if (wlh
!= DISPATCH_WLH_ANON
) _dispatch_set_return_to_kernel();
754 if (unlikely(slot
== ddi
->ddi_maxevents
)) {
755 int nevents
= ddi
->ddi_nevents
;
756 ddi
->ddi_nevents
= 1;
757 _dispatch_kq_update_all(wlh
, ddi
->ddi_eventlist
, nevents
);
758 dispatch_assert(ddi
->ddi_nevents
== 1);
760 } else if (slot
== ddi
->ddi_nevents
) {
763 return ddi
->ddi_eventlist
+ slot
;
766 DISPATCH_ALWAYS_INLINE
768 _dispatch_kq_deferred_discard_slot(dispatch_deferred_items_t ddi
, int slot
)
770 if (slot
< ddi
->ddi_nevents
) {
771 int last
= --ddi
->ddi_nevents
;
773 ddi
->ddi_eventlist
[slot
] = ddi
->ddi_eventlist
[last
];
780 _dispatch_kq_deferred_update(dispatch_wlh_t wlh
, dispatch_kevent_t ke
)
782 dispatch_deferred_items_t ddi
= _dispatch_deferred_items_get();
784 if (ddi
&& ddi
->ddi_maxevents
&& wlh
== _dispatch_get_wlh()) {
785 int slot
= _dispatch_kq_deferred_find_slot(ddi
, ke
->filter
, ke
->ident
,
787 dispatch_kevent_t dk
= _dispatch_kq_deferred_reuse_slot(wlh
, ddi
, slot
);
789 if (ke
->filter
!= EVFILT_USER
) {
790 _dispatch_kevent_mgr_debug("deferred", ke
);
793 _dispatch_kq_update_one(wlh
, ke
);
799 _dispatch_kq_immediate_update(dispatch_wlh_t wlh
, dispatch_kevent_t ke
)
801 dispatch_deferred_items_t ddi
= _dispatch_deferred_items_get();
802 if (ddi
&& wlh
== _dispatch_get_wlh()) {
803 int slot
= _dispatch_kq_deferred_find_slot(ddi
, ke
->filter
, ke
->ident
,
805 _dispatch_kq_deferred_discard_slot(ddi
, slot
);
807 return _dispatch_kq_update_one(wlh
, ke
);
812 _dispatch_kq_unote_update(dispatch_wlh_t wlh
, dispatch_unote_t _du
,
813 uint16_t action_flags
)
815 dispatch_deferred_items_t ddi
= _dispatch_deferred_items_get();
816 dispatch_unote_class_t du
= _du
._du
;
817 dispatch_kevent_t ke
;
820 if (action_flags
& EV_ADD
) {
821 // as soon as we register we may get an event delivery and it has to
822 // see du_wlh already set, else it will not unregister the kevent
823 dispatch_assert(du
->du_wlh
== NULL
);
824 _dispatch_wlh_retain(wlh
);
828 if (ddi
&& wlh
== _dispatch_get_wlh()) {
829 int slot
= _dispatch_kq_deferred_find_slot(ddi
,
830 du
->du_filter
, du
->du_ident
, (uintptr_t)du
);
831 if (slot
< ddi
->ddi_nevents
) {
832 // <rdar://problem/26202376> when deleting and an enable is pending,
833 // we must merge EV_ENABLE to do an immediate deletion
834 action_flags
|= (ddi
->ddi_eventlist
[slot
].flags
& EV_ENABLE
);
837 if (!(action_flags
& EV_ADD
) && (action_flags
& EV_ENABLE
)) {
838 // can be deferred, so do it!
839 ke
= _dispatch_kq_deferred_reuse_slot(wlh
, ddi
, slot
);
840 _dispatch_kq_unote_set_kevent(du
, ke
, action_flags
);
841 _dispatch_kevent_debug("deferred", ke
);
845 // get rid of the deferred item if any, we can't wait
846 _dispatch_kq_deferred_discard_slot(ddi
, slot
);
850 dispatch_kevent_s dk
;
851 _dispatch_kq_unote_set_kevent(du
, &dk
, action_flags
);
852 r
= _dispatch_kq_update_one(wlh
, &dk
);
856 if (action_flags
& EV_ADD
) {
858 _dispatch_wlh_release(du
->du_wlh
);
864 if (action_flags
& EV_DELETE
) {
865 if (r
== EINPROGRESS
) {
868 _dispatch_wlh_release(du
->du_wlh
);
872 dispatch_assume_zero(r
);
876 #pragma mark dispatch_muxnote_t
879 _dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED
)
882 for (i
= 0; i
< DSL_HASH_SIZE
; i
++) {
883 TAILQ_INIT(&_dispatch_sources
[i
]);
887 DISPATCH_ALWAYS_INLINE
888 static inline struct dispatch_muxnote_bucket_s
*
889 _dispatch_muxnote_bucket(uint64_t ident
, int16_t filter
)
893 case EVFILT_MACHPORT
:
894 case DISPATCH_EVFILT_MACH_NOTIFICATION
:
895 ident
= MACH_PORT_INDEX(ident
);
898 case EVFILT_SIGNAL
: // signo
899 case EVFILT_PROC
: // pid_t
904 dispatch_once_f(&_dispatch_muxnotes
.pred
, NULL
, _dispatch_muxnotes_init
);
905 return &_dispatch_sources
[DSL_HASH((uintptr_t)ident
)];
907 #define _dispatch_unote_muxnote_bucket(du) \
908 _dispatch_muxnote_bucket(du._du->du_ident, du._du->du_filter)
910 DISPATCH_ALWAYS_INLINE
911 static inline dispatch_muxnote_t
912 _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s
*dmb
,
913 dispatch_wlh_t wlh
, uint64_t ident
, int16_t filter
)
915 dispatch_muxnote_t dmn
;
916 _dispatch_muxnotes_lock();
917 TAILQ_FOREACH(dmn
, dmb
, dmn_list
) {
918 if (dmn
->dmn_wlh
== wlh
&& dmn
->dmn_kev
.ident
== ident
&&
919 dmn
->dmn_kev
.filter
== filter
) {
923 _dispatch_muxnotes_unlock();
926 #define _dispatch_unote_muxnote_find(dmb, du, wlh) \
927 _dispatch_muxnote_find(dmb, wlh, du._du->du_ident, du._du->du_filter)
929 DISPATCH_ALWAYS_INLINE
930 static inline dispatch_muxnote_t
931 _dispatch_mach_muxnote_find(mach_port_t name
, int16_t filter
)
933 struct dispatch_muxnote_bucket_s
*dmb
;
934 dmb
= _dispatch_muxnote_bucket(name
, filter
);
935 return _dispatch_muxnote_find(dmb
, DISPATCH_WLH_ANON
, name
, filter
);
940 _dispatch_unote_register_muxed(dispatch_unote_t du
, dispatch_wlh_t wlh
)
942 struct dispatch_muxnote_bucket_s
*dmb
= _dispatch_unote_muxnote_bucket(du
);
943 dispatch_muxnote_t dmn
;
944 bool installed
= true;
946 dmn
= _dispatch_unote_muxnote_find(dmb
, du
, wlh
);
948 uint32_t flags
= du
._du
->du_fflags
& ~dmn
->dmn_kev
.fflags
;
950 dmn
->dmn_kev
.fflags
|= flags
;
951 if (unlikely(du
._du
->du_type
->dst_update_mux
)) {
952 installed
= du
._du
->du_type
->dst_update_mux(dmn
);
954 installed
= !_dispatch_kq_immediate_update(dmn
->dmn_wlh
,
957 if (!installed
) dmn
->dmn_kev
.fflags
&= ~flags
;
960 dmn
= _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s
));
961 TAILQ_INIT(&dmn
->dmn_unotes_head
);
962 _dispatch_kq_unote_set_kevent(du
, &dmn
->dmn_kev
, EV_ADD
| EV_ENABLE
);
963 #if DISPATCH_USE_KEVENT_QOS
964 dmn
->dmn_kev
.qos
= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
966 dmn
->dmn_kev
.udata
= (uintptr_t)dmn
| DISPATCH_KEVENT_MUXED_MARKER
;
968 if (unlikely(du
._du
->du_type
->dst_update_mux
)) {
969 installed
= du
._du
->du_type
->dst_update_mux(dmn
);
971 installed
= !_dispatch_kq_immediate_update(dmn
->dmn_wlh
,
975 dmn
->dmn_kev
.flags
&= ~(EV_ADD
| EV_VANISHED
);
976 _dispatch_muxnotes_lock();
977 TAILQ_INSERT_TAIL(dmb
, dmn
, dmn_list
);
978 _dispatch_muxnotes_unlock();
985 dispatch_unote_linkage_t dul
= _dispatch_unote_get_linkage(du
);
986 TAILQ_INSERT_TAIL(&dmn
->dmn_unotes_head
, dul
, du_link
);
987 dul
->du_muxnote
= dmn
;
989 if (du
._du
->du_filter
== DISPATCH_EVFILT_MACH_NOTIFICATION
) {
990 bool armed
= DISPATCH_MACH_NOTIFICATION_ARMED(&dmn
->dmn_kev
);
991 os_atomic_store2o(du
._dmsr
, dmsr_notification_armed
, armed
,relaxed
);
993 du
._du
->du_wlh
= DISPATCH_WLH_ANON
;
999 _dispatch_unote_register(dispatch_unote_t du
, dispatch_wlh_t wlh
,
1000 dispatch_priority_t pri
)
1002 dispatch_assert(!_dispatch_unote_registered(du
));
1003 du
._du
->du_priority
= pri
;
1004 switch (du
._du
->du_filter
) {
1005 case DISPATCH_EVFILT_CUSTOM_ADD
:
1006 case DISPATCH_EVFILT_CUSTOM_OR
:
1007 case DISPATCH_EVFILT_CUSTOM_REPLACE
:
1008 du
._du
->du_wlh
= DISPATCH_WLH_ANON
;
1011 if (!du
._du
->du_is_direct
) {
1012 return _dispatch_unote_register_muxed(du
, DISPATCH_WLH_ANON
);
1014 return _dispatch_kq_unote_update(wlh
, du
, EV_ADD
| EV_ENABLE
);
1018 _dispatch_unote_resume(dispatch_unote_t du
)
1020 dispatch_assert(_dispatch_unote_registered(du
));
1022 if (du
._du
->du_is_direct
) {
1023 dispatch_wlh_t wlh
= du
._du
->du_wlh
;
1024 _dispatch_kq_unote_update(wlh
, du
, EV_ENABLE
);
1025 } else if (unlikely(du
._du
->du_type
->dst_update_mux
)) {
1026 dispatch_unote_linkage_t dul
= _dispatch_unote_get_linkage(du
);
1027 du
._du
->du_type
->dst_update_mux(dul
->du_muxnote
);
1029 dispatch_unote_linkage_t dul
= _dispatch_unote_get_linkage(du
);
1030 dispatch_muxnote_t dmn
= dul
->du_muxnote
;
1031 _dispatch_kq_deferred_update(dmn
->dmn_wlh
, &dmn
->dmn_kev
);
1037 _dispatch_unote_unregister_muxed(dispatch_unote_t du
, uint32_t flags
)
1039 dispatch_unote_linkage_t dul
= _dispatch_unote_get_linkage(du
);
1040 dispatch_muxnote_t dmn
= dul
->du_muxnote
;
1041 bool update
= false, dispose
= false;
1043 if (dmn
->dmn_kev
.filter
== DISPATCH_EVFILT_MACH_NOTIFICATION
) {
1044 os_atomic_store2o(du
._dmsr
, dmsr_notification_armed
, false, relaxed
);
1046 dispatch_assert(du
._du
->du_wlh
== DISPATCH_WLH_ANON
);
1047 du
._du
->du_wlh
= NULL
;
1048 TAILQ_REMOVE(&dmn
->dmn_unotes_head
, dul
, du_link
);
1049 _TAILQ_TRASH_ENTRY(dul
, du_link
);
1050 dul
->du_muxnote
= NULL
;
1052 if (TAILQ_EMPTY(&dmn
->dmn_unotes_head
)) {
1053 dmn
->dmn_kev
.flags
|= EV_DELETE
;
1054 update
= dispose
= true;
1056 uint32_t fflags
= du
._du
->du_type
->dst_fflags
;
1057 TAILQ_FOREACH(dul
, &dmn
->dmn_unotes_head
, du_link
) {
1058 du
= _dispatch_unote_linkage_get_unote(dul
);
1059 fflags
|= du
._du
->du_fflags
;
1061 if (dmn
->dmn_kev
.fflags
& ~fflags
) {
1062 dmn
->dmn_kev
.fflags
&= fflags
;
1066 if (update
&& !(flags
& DU_UNREGISTER_ALREADY_DELETED
)) {
1067 if (unlikely(du
._du
->du_type
->dst_update_mux
)) {
1068 dispatch_assume(du
._du
->du_type
->dst_update_mux(dmn
));
1070 _dispatch_kq_deferred_update(dmn
->dmn_wlh
, &dmn
->dmn_kev
);
1074 struct dispatch_muxnote_bucket_s
*dmb
;
1075 dmb
= _dispatch_muxnote_bucket(dmn
->dmn_kev
.ident
, dmn
->dmn_kev
.filter
);
1076 _dispatch_muxnotes_lock();
1077 TAILQ_REMOVE(dmb
, dmn
, dmn_list
);
1078 _dispatch_muxnotes_unlock();
1085 _dispatch_unote_unregister(dispatch_unote_t du
, uint32_t flags
)
1087 switch (du
._du
->du_filter
) {
1088 case DISPATCH_EVFILT_CUSTOM_ADD
:
1089 case DISPATCH_EVFILT_CUSTOM_OR
:
1090 case DISPATCH_EVFILT_CUSTOM_REPLACE
:
1091 du
._du
->du_wlh
= NULL
;
1094 dispatch_wlh_t wlh
= du
._du
->du_wlh
;
1096 if (!du
._du
->du_is_direct
) {
1097 return _dispatch_unote_unregister_muxed(du
, flags
);
1099 uint16_t action_flags
;
1100 if (flags
& DU_UNREGISTER_ALREADY_DELETED
) {
1102 } else if (flags
& DU_UNREGISTER_IMMEDIATE_DELETE
) {
1103 action_flags
= EV_DELETE
| EV_ENABLE
;
1105 action_flags
= EV_DELETE
;
1107 return _dispatch_kq_unote_update(wlh
, du
, action_flags
);
1113 #pragma mark dispatch_event_loop
1116 _dispatch_event_loop_atfork_child(void)
1119 _dispatch_mach_host_port_pred
= 0;
1120 _dispatch_mach_host_port
= MACH_PORT_NULL
;
1127 _dispatch_event_loop_poke(dispatch_wlh_t wlh
, uint64_t dq_state
, uint32_t flags
)
1129 if (wlh
== DISPATCH_WLH_MANAGER
) {
1130 dispatch_kevent_s ke
= (dispatch_kevent_s
){
1132 .filter
= EVFILT_USER
,
1133 .fflags
= NOTE_TRIGGER
,
1134 .udata
= (uintptr_t)DISPATCH_WLH_MANAGER
,
1136 return _dispatch_kq_deferred_update(DISPATCH_WLH_ANON
, &ke
);
1137 } else if (wlh
&& wlh
!= DISPATCH_WLH_ANON
) {
1138 (void)dq_state
; (void)flags
;
1140 DISPATCH_INTERNAL_CRASH(wlh
, "Unsupported wlh configuration");
1145 _dispatch_event_loop_drain(uint32_t flags
)
1147 dispatch_wlh_t wlh
= _dispatch_get_wlh();
1148 dispatch_deferred_items_t ddi
= _dispatch_deferred_items_get();
1152 n
= ddi
->ddi_nevents
;
1153 ddi
->ddi_nevents
= 0;
1154 _dispatch_kq_drain(wlh
, ddi
->ddi_eventlist
, n
, flags
);
1156 if ((flags
& KEVENT_FLAG_IMMEDIATE
) &&
1157 !(flags
& KEVENT_FLAG_ERROR_EVENTS
) &&
1158 _dispatch_needs_to_return_to_kernel()) {
1164 _dispatch_event_loop_merge(dispatch_kevent_t events
, int nevents
)
1166 dispatch_deferred_items_t ddi
= _dispatch_deferred_items_get();
1167 dispatch_kevent_s kev
[nevents
];
1169 // now we can re-use the whole event list, but we need to save one slot
1170 // for the event loop poke
1171 memcpy(kev
, events
, sizeof(kev
));
1172 ddi
->ddi_maxevents
= DISPATCH_DEFERRED_ITEMS_EVENT_COUNT
- 2;
1174 for (int i
= 0; i
< nevents
; i
++) {
1175 _dispatch_kevent_drain(&kev
[i
]);
1178 dispatch_wlh_t wlh
= _dispatch_get_wlh();
1179 if (wlh
== DISPATCH_WLH_ANON
&& ddi
->ddi_stashed_dou
._do
) {
1180 if (ddi
->ddi_nevents
) {
1181 // We will drain the stashed item and not return to the kernel
1182 // right away. As a consequence, do not delay these updates.
1183 _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE
|
1184 KEVENT_FLAG_ERROR_EVENTS
);
1186 _dispatch_trace_continuation_push(ddi
->ddi_stashed_rq
,
1187 ddi
->ddi_stashed_dou
);
1192 _dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh
, uint64_t dq_state
)
1194 (void)wlh
; (void)dq_state
;
1198 _dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh
, uint64_t dq_state
)
1200 (void)wlh
; (void)dq_state
;
1204 _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc
,
1205 dispatch_wlh_t wlh
, uint64_t old_state
, uint64_t new_state
)
1207 (void)dsc
; (void)wlh
; (void)old_state
; (void)new_state
;
1211 _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc
)
1213 if (dsc
->dsc_release_storage
) {
1214 _dispatch_queue_release_storage(dsc
->dc_data
);
1219 _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh
, uint64_t old_state
,
1220 uint64_t new_state
, uint32_t flags
)
1222 (void)wlh
; (void)old_state
; (void)new_state
; (void)flags
;
1225 #if DISPATCH_WLH_DEBUG
1227 _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh
)
1231 #endif // DISPATCH_WLH_DEBUG
1234 #pragma mark dispatch_event_loop timers
1236 #define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8)
1240 _dispatch_kevent_timer_drain(dispatch_kevent_t ke
)
1242 dispatch_assert(ke
->data
> 0);
1243 dispatch_assert((ke
->ident
& DISPATCH_KEVENT_TIMEOUT_IDENT_MASK
) ==
1244 DISPATCH_KEVENT_TIMEOUT_IDENT_MASK
);
1245 uint32_t tidx
= ke
->ident
& ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK
;
1247 dispatch_assert(tidx
< DISPATCH_TIMER_COUNT
);
1248 _dispatch_timers_expired
= true;
1249 _dispatch_timers_processing_mask
|= 1 << tidx
;
1250 _dispatch_timers_heap
[tidx
].dth_flags
&= ~DTH_ARMED
;
1251 #if DISPATCH_USE_DTRACE
1252 _dispatch_timers_will_wake
|= 1 << DISPATCH_TIMER_QOS(tidx
);
1258 _dispatch_event_loop_timer_program(uint32_t tidx
,
1259 uint64_t target
, uint64_t leeway
, uint16_t action
)
1261 dispatch_kevent_s ke
= {
1262 .ident
= DISPATCH_KEVENT_TIMEOUT_IDENT_MASK
| tidx
,
1263 .filter
= EVFILT_TIMER
,
1264 .flags
= action
| EV_ONESHOT
,
1265 .fflags
= _dispatch_timer_index_to_fflags
[tidx
],
1266 .data
= (int64_t)target
,
1267 .udata
= (uintptr_t)&_dispatch_timers_heap
[tidx
],
1268 #if DISPATCH_HAVE_TIMER_COALESCING
1271 #if DISPATCH_USE_KEVENT_QOS
1272 .qos
= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
,
1276 _dispatch_kq_deferred_update(DISPATCH_WLH_ANON
, &ke
);
1280 _dispatch_event_loop_timer_arm(uint32_t tidx
, dispatch_timer_delay_s range
,
1281 dispatch_clock_now_cache_t nows
)
1283 if (unlikely(_dispatch_timers_force_max_leeway
)) {
1284 range
.delay
+= range
.leeway
;
1288 if (DISPATCH_TIMER_CLOCK(tidx
) == DISPATCH_CLOCK_WALL
) {
1289 _dispatch_mach_host_calendar_change_register();
1293 // <rdar://problem/13186331> EVFILT_TIMER NOTE_ABSOLUTE always expects
1295 uint64_t now
= _dispatch_time_now_cached(DISPATCH_CLOCK_WALL
, nows
);
1296 _dispatch_timers_heap
[tidx
].dth_flags
|= DTH_ARMED
;
1297 _dispatch_event_loop_timer_program(tidx
, now
+ range
.delay
, range
.leeway
,
1298 EV_ADD
| EV_ENABLE
);
1302 _dispatch_event_loop_timer_delete(uint32_t tidx
)
1304 _dispatch_timers_heap
[tidx
].dth_flags
&= ~DTH_ARMED
;
1305 _dispatch_event_loop_timer_program(tidx
, 0, 0, EV_DELETE
);
1309 #pragma mark kevent specific sources
1311 static dispatch_unote_t
1312 _dispatch_source_proc_create(dispatch_source_type_t dst DISPATCH_UNUSED
,
1313 uintptr_t handle
, unsigned long mask DISPATCH_UNUSED
)
1315 dispatch_unote_t du
= _dispatch_unote_create_with_handle(dst
, handle
, mask
);
1316 if (du
._du
&& (mask
& DISPATCH_PROC_EXIT_STATUS
)) {
1317 du
._du
->du_data_action
= DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET
;
1322 const dispatch_source_type_s _dispatch_source_type_proc
= {
1324 .dst_filter
= EVFILT_PROC
,
1325 .dst_flags
= DISPATCH_EV_DIRECT
|EV_CLEAR
,
1326 .dst_fflags
= NOTE_EXIT
, // rdar://16655831
1327 .dst_mask
= NOTE_EXIT
|NOTE_FORK
|NOTE_EXEC
|NOTE_EXITSTATUS
1328 #if HAVE_DECL_NOTE_SIGNAL
1331 #if HAVE_DECL_NOTE_REAP
1335 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1337 .dst_create
= _dispatch_source_proc_create
,
1338 .dst_merge_evt
= _dispatch_source_merge_evt
,
1341 const dispatch_source_type_s _dispatch_source_type_vnode
= {
1342 .dst_kind
= "vnode",
1343 .dst_filter
= EVFILT_VNODE
,
1344 .dst_flags
= DISPATCH_EV_DIRECT
|EV_CLEAR
|EV_VANISHED
,
1345 .dst_mask
= NOTE_DELETE
|NOTE_WRITE
|NOTE_EXTEND
|NOTE_ATTRIB
|NOTE_LINK
1346 |NOTE_RENAME
|NOTE_FUNLOCK
1347 #if HAVE_DECL_NOTE_REVOKE
1350 #if HAVE_DECL_NOTE_NONE
1354 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1356 .dst_create
= _dispatch_unote_create_with_fd
,
1357 .dst_merge_evt
= _dispatch_source_merge_evt
,
1360 const dispatch_source_type_s _dispatch_source_type_vfs
= {
1362 .dst_filter
= EVFILT_FS
,
1363 .dst_flags
= DISPATCH_EV_DIRECT
|EV_CLEAR
,
1364 .dst_mask
= VQ_NOTRESP
|VQ_NEEDAUTH
|VQ_LOWDISK
|VQ_MOUNT
|VQ_UNMOUNT
1365 |VQ_DEAD
|VQ_ASSIST
|VQ_NOTRESPLOCK
1366 #if HAVE_DECL_VQ_UPDATE
1369 #if HAVE_DECL_VQ_VERYLOWDISK
1372 #if HAVE_DECL_VQ_QUOTA
1375 #if HAVE_DECL_VQ_NEARLOWDISK
1378 #if HAVE_DECL_VQ_DESIRED_DISK
1382 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1384 .dst_create
= _dispatch_unote_create_without_handle
,
1385 .dst_merge_evt
= _dispatch_source_merge_evt
,
1389 const dispatch_source_type_s _dispatch_source_type_sock
= {
1391 .dst_filter
= EVFILT_SOCK
,
1392 .dst_flags
= DISPATCH_EV_DIRECT
|EV_CLEAR
|EV_VANISHED
,
1393 .dst_mask
= NOTE_CONNRESET
|NOTE_READCLOSED
|NOTE_WRITECLOSED
1394 |NOTE_TIMEOUT
|NOTE_NOSRCADDR
|NOTE_IFDENIED
|NOTE_SUSPEND
|NOTE_RESUME
1396 #ifdef NOTE_ADAPTIVE_WTIMO
1397 |NOTE_ADAPTIVE_WTIMO
|NOTE_ADAPTIVE_RTIMO
1399 #ifdef NOTE_CONNECTED
1400 |NOTE_CONNECTED
|NOTE_DISCONNECTED
|NOTE_CONNINFO_UPDATED
1402 #ifdef NOTE_NOTIFY_ACK
1406 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1408 .dst_create
= _dispatch_unote_create_with_fd
,
1409 .dst_merge_evt
= _dispatch_source_merge_evt
,
1411 #endif // EVFILT_SOCK
1413 #ifdef EVFILT_NW_CHANNEL
1414 const dispatch_source_type_s _dispatch_source_type_nw_channel
= {
1415 .dst_kind
= "nw_channel",
1416 .dst_filter
= EVFILT_NW_CHANNEL
,
1417 .dst_flags
= DISPATCH_EV_DIRECT
|EV_CLEAR
|EV_VANISHED
,
1418 .dst_mask
= NOTE_FLOW_ADV_UPDATE
,
1419 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1420 .dst_create
= _dispatch_unote_create_with_fd
,
1421 .dst_merge_evt
= _dispatch_source_merge_evt
,
1423 #endif // EVFILT_NW_CHANNEL
1425 #if DISPATCH_USE_MEMORYSTATUS
1427 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
1428 #define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \
1429 DISPATCH_MEMORYPRESSURE_NORMAL | \
1430 DISPATCH_MEMORYPRESSURE_WARN | \
1431 DISPATCH_MEMORYPRESSURE_CRITICAL | \
1432 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
1433 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \
1434 DISPATCH_MEMORYPRESSURE_MSL_STATUS)
1436 #define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \
1437 DISPATCH_MEMORYPRESSURE_WARN | \
1438 DISPATCH_MEMORYPRESSURE_CRITICAL | \
1439 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
1440 DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \
1441 DISPATCH_MEMORYPRESSURE_MSL_STATUS)
1445 _dispatch_memorypressure_handler(void *context
)
1447 dispatch_source_t ds
= context
;
1448 unsigned long memorypressure
= dispatch_source_get_data(ds
);
1450 if (memorypressure
& DISPATCH_MEMORYPRESSURE_NORMAL
) {
1451 _dispatch_memory_warn
= false;
1452 _dispatch_continuation_cache_limit
= DISPATCH_CONTINUATION_CACHE_LIMIT
;
1453 #if VOUCHER_USE_MACH_VOUCHER
1454 if (_firehose_task_buffer
) {
1455 firehose_buffer_clear_bank_flags(_firehose_task_buffer
,
1456 FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY
);
1460 if (memorypressure
& DISPATCH_MEMORYPRESSURE_WARN
) {
1461 _dispatch_memory_warn
= true;
1462 _dispatch_continuation_cache_limit
=
1463 DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN
;
1464 #if VOUCHER_USE_MACH_VOUCHER
1465 if (_firehose_task_buffer
) {
1466 firehose_buffer_set_bank_flags(_firehose_task_buffer
,
1467 FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY
);
1471 memorypressure
&= DISPATCH_MEMORYPRESSURE_MALLOC_MASK
;
1472 if (memorypressure
) {
1473 malloc_memory_event_handler(memorypressure
);
1478 _dispatch_memorypressure_init(void)
1480 dispatch_source_t ds
= dispatch_source_create(
1481 DISPATCH_SOURCE_TYPE_MEMORYPRESSURE
, 0,
1482 DISPATCH_MEMORYPRESSURE_SOURCE_MASK
, &_dispatch_mgr_q
);
1483 dispatch_set_context(ds
, ds
);
1484 dispatch_source_set_event_handler_f(ds
, _dispatch_memorypressure_handler
);
1485 dispatch_activate(ds
);
1487 #endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE
1489 #if TARGET_OS_SIMULATOR // rdar://problem/9219483
1490 static int _dispatch_ios_simulator_memory_warnings_fd
= -1;
1492 _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED
)
1494 char *e
= getenv("SIMULATOR_MEMORY_WARNINGS");
1496 _dispatch_ios_simulator_memory_warnings_fd
= open(e
, O_EVTONLY
);
1497 if (_dispatch_ios_simulator_memory_warnings_fd
== -1) {
1498 (void)dispatch_assume_zero(errno
);
1502 static dispatch_unote_t
1503 _dispatch_source_memorypressure_create(dispatch_source_type_t dst
,
1504 uintptr_t handle
, unsigned long mask
)
1506 static dispatch_once_t pred
;
1507 dispatch_once_f(&pred
, NULL
, _dispatch_ios_simulator_memorypressure_init
);
1510 return DISPATCH_UNOTE_NULL
;
1513 dst
= &_dispatch_source_type_vnode
;
1514 handle
= (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd
;
1517 dispatch_unote_t du
= dux_create(dst
, handle
, mask
);
1519 du
._du
->du_memorypressure_override
= true;
1523 #endif // TARGET_OS_SIMULATOR
1525 const dispatch_source_type_s _dispatch_source_type_memorypressure
= {
1526 .dst_kind
= "memorystatus",
1527 .dst_filter
= EVFILT_MEMORYSTATUS
,
1528 .dst_flags
= EV_UDATA_SPECIFIC
|EV_DISPATCH
,
1529 .dst_mask
= NOTE_MEMORYSTATUS_PRESSURE_NORMAL
1530 |NOTE_MEMORYSTATUS_PRESSURE_WARN
|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL
1531 |NOTE_MEMORYSTATUS_LOW_SWAP
|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
1532 |NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
1533 |NOTE_MEMORYSTATUS_MSL_STATUS
,
1534 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1536 #if TARGET_OS_SIMULATOR
1537 .dst_create
= _dispatch_source_memorypressure_create
,
1538 // redirected to _dispatch_source_type_vnode
1540 .dst_create
= _dispatch_unote_create_without_handle
,
1541 .dst_merge_evt
= _dispatch_source_merge_evt
,
1545 static dispatch_unote_t
1546 _dispatch_source_vm_create(dispatch_source_type_t dst DISPATCH_UNUSED
,
1547 uintptr_t handle
, unsigned long mask DISPATCH_UNUSED
)
1549 // Map legacy vm pressure to memorypressure warning rdar://problem/15907505
1550 dispatch_unote_t du
= dux_create(&_dispatch_source_type_memorypressure
,
1551 handle
, NOTE_MEMORYSTATUS_PRESSURE_WARN
);
1553 du
._du
->du_vmpressure_override
= 1;
1558 const dispatch_source_type_s _dispatch_source_type_vm
= {
1559 .dst_kind
= "vm (deprecated)",
1560 .dst_filter
= EVFILT_MEMORYSTATUS
,
1561 .dst_flags
= EV_UDATA_SPECIFIC
|EV_DISPATCH
,
1562 .dst_mask
= NOTE_VM_PRESSURE
,
1563 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1565 .dst_create
= _dispatch_source_vm_create
,
1566 // redirected to _dispatch_source_type_memorypressure
1568 #endif // DISPATCH_USE_MEMORYSTATUS
1570 #pragma mark mach send / notifications
1573 // Flags for all notifications that are registered/unregistered when a
1574 // send-possible notification is requested/delivered
1575 #define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \
1576 DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED)
1578 static void _dispatch_mach_host_notify_update(void *context
);
1580 static mach_port_t _dispatch_mach_notify_port
;
1581 static dispatch_source_t _dispatch_mach_notify_source
;
1584 _dispatch_timers_calendar_change(void)
1588 // calendar change may have gone past the wallclock deadline
1589 _dispatch_timers_expired
= true;
1590 for (qos
= 0; qos
< DISPATCH_TIMER_QOS_COUNT
; qos
++) {
1591 _dispatch_timers_processing_mask
|=
1592 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL
, qos
);
1596 static mach_msg_audit_trailer_t
*
1597 _dispatch_mach_msg_get_audit_trailer(mach_msg_header_t
*hdr
)
1599 mach_msg_trailer_t
*tlr
= NULL
;
1600 mach_msg_audit_trailer_t
*audit_tlr
= NULL
;
1601 tlr
= (mach_msg_trailer_t
*)((unsigned char *)hdr
+
1602 round_msg(hdr
->msgh_size
));
1603 // The trailer should always be of format zero.
1604 if (tlr
->msgh_trailer_type
== MACH_MSG_TRAILER_FORMAT_0
) {
1605 if (tlr
->msgh_trailer_size
>= sizeof(mach_msg_audit_trailer_t
)) {
1606 audit_tlr
= (mach_msg_audit_trailer_t
*)tlr
;
1614 _dispatch_mach_notify_source_invoke(mach_msg_header_t
*hdr
)
1616 mig_reply_error_t reply
;
1617 mach_msg_audit_trailer_t
*tlr
= NULL
;
1618 dispatch_assert(sizeof(mig_reply_error_t
) == sizeof(union
1619 __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem
));
1620 dispatch_assert(sizeof(mig_reply_error_t
) <
1621 DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE
);
1622 tlr
= _dispatch_mach_msg_get_audit_trailer(hdr
);
1624 DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer");
1626 if (hdr
->msgh_id
<= MACH_NOTIFY_LAST
1627 && dispatch_assume_zero(tlr
->msgh_audit
.val
[
1628 DISPATCH_MACH_AUDIT_TOKEN_PID
])) {
1629 mach_msg_destroy(hdr
);
1632 boolean_t success
= libdispatch_internal_protocol_server(hdr
, &reply
.Head
);
1633 if (!success
&& reply
.RetCode
== MIG_BAD_ID
&&
1634 (hdr
->msgh_id
== HOST_CALENDAR_SET_REPLYID
||
1635 hdr
->msgh_id
== HOST_CALENDAR_CHANGED_REPLYID
)) {
1636 _dispatch_debug("calendar-change notification");
1637 _dispatch_timers_calendar_change();
1638 _dispatch_mach_host_notify_update(NULL
);
1640 reply
.RetCode
= KERN_SUCCESS
;
1642 if (dispatch_assume(success
) && reply
.RetCode
!= MIG_NO_REPLY
) {
1643 (void)dispatch_assume_zero(reply
.RetCode
);
1645 if (!success
|| (reply
.RetCode
&& reply
.RetCode
!= MIG_NO_REPLY
)) {
1646 mach_msg_destroy(hdr
);
1652 _dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED
)
1655 #if HAVE_MACH_PORT_CONSTRUCT
1656 mach_port_options_t opts
= { .flags
= MPO_CONTEXT_AS_GUARD
| MPO_STRICT
};
1657 #if DISPATCH_SIZEOF_PTR == 8
1658 const mach_port_context_t guard
= 0xfeed09071f1ca7edull
;
1660 const mach_port_context_t guard
= 0xff1ca7edull
;
1662 kr
= mach_port_construct(mach_task_self(), &opts
, guard
,
1663 &_dispatch_mach_notify_port
);
1665 kr
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
,
1666 &_dispatch_mach_notify_port
);
1668 DISPATCH_VERIFY_MIG(kr
);
1670 DISPATCH_CLIENT_CRASH(kr
,
1671 "mach_port_construct() failed: cannot create receive right");
1674 static const struct dispatch_continuation_s dc
= {
1675 .dc_func
= (void*)_dispatch_mach_notify_source_invoke
,
1677 _dispatch_mach_notify_source
= _dispatch_source_create_mach_msg_direct_recv(
1678 _dispatch_mach_notify_port
, &dc
);
1679 dispatch_assert(_dispatch_mach_notify_source
);
1680 dispatch_activate(_dispatch_mach_notify_source
);
1684 _dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED
)
1687 mach_port_t mp
, mhp
= mach_host_self();
1688 kr
= host_get_host_port(mhp
, &mp
);
1689 DISPATCH_VERIFY_MIG(kr
);
1691 // mach_host_self returned the HOST_PRIV port
1692 kr
= mach_port_deallocate(mach_task_self(), mhp
);
1693 DISPATCH_VERIFY_MIG(kr
);
1695 } else if (kr
!= KERN_INVALID_ARGUMENT
) {
1696 (void)dispatch_assume_zero(kr
);
1698 if (unlikely(!mhp
)) {
1699 DISPATCH_CLIENT_CRASH(kr
, "Could not get unprivileged host port");
1701 _dispatch_mach_host_port
= mhp
;
1705 _dispatch_get_mach_host_port(void)
1707 dispatch_once_f(&_dispatch_mach_host_port_pred
, NULL
,
1708 _dispatch_mach_host_port_init
);
1709 return _dispatch_mach_host_port
;
1712 DISPATCH_ALWAYS_INLINE
1713 static inline mach_port_t
1714 _dispatch_get_mach_notify_port(void)
1716 static dispatch_once_t pred
;
1717 dispatch_once_f(&pred
, NULL
, _dispatch_mach_notify_port_init
);
1718 return _dispatch_mach_notify_port
;
1722 _dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED
)
1724 static int notify_type
= HOST_NOTIFY_CALENDAR_SET
;
1726 _dispatch_debug("registering for calendar-change notification");
1728 kr
= host_request_notification(_dispatch_get_mach_host_port(),
1729 notify_type
, _dispatch_get_mach_notify_port());
1730 // Fallback when missing support for newer _SET variant, fires strictly more
1731 if (kr
== KERN_INVALID_ARGUMENT
&&
1732 notify_type
!= HOST_NOTIFY_CALENDAR_CHANGE
) {
1733 notify_type
= HOST_NOTIFY_CALENDAR_CHANGE
;
1736 DISPATCH_VERIFY_MIG(kr
);
1737 (void)dispatch_assume_zero(kr
);
1740 DISPATCH_ALWAYS_INLINE
1742 _dispatch_mach_host_calendar_change_register(void)
1744 static dispatch_once_t pred
;
1745 dispatch_once_f(&pred
, NULL
, _dispatch_mach_host_notify_update
);
1748 static kern_return_t
1749 _dispatch_mach_notify_update(dispatch_muxnote_t dmn
, uint32_t new_flags
,
1750 uint32_t del_flags
, uint32_t mask
, mach_msg_id_t notify_msgid
,
1751 mach_port_mscount_t notify_sync
)
1753 mach_port_t previous
, port
= (mach_port_t
)dmn
->dmn_kev
.ident
;
1754 typeof(dmn
->dmn_kev
.data
) prev
= dmn
->dmn_kev
.data
;
1755 kern_return_t kr
, krr
= 0;
1757 // Update notification registration state.
1758 dmn
->dmn_kev
.data
|= (new_flags
| dmn
->dmn_kev
.fflags
) & mask
;
1759 dmn
->dmn_kev
.data
&= ~(del_flags
& mask
);
1761 _dispatch_debug_machport(port
);
1762 if ((dmn
->dmn_kev
.data
& mask
) && !(prev
& mask
)) {
1763 _dispatch_debug("machport[0x%08x]: registering for send-possible "
1764 "notification", port
);
1765 previous
= MACH_PORT_NULL
;
1766 krr
= mach_port_request_notification(mach_task_self(), port
,
1767 notify_msgid
, notify_sync
, _dispatch_get_mach_notify_port(),
1768 MACH_MSG_TYPE_MAKE_SEND_ONCE
, &previous
);
1769 DISPATCH_VERIFY_MIG(krr
);
1772 case KERN_INVALID_NAME
:
1773 case KERN_INVALID_RIGHT
:
1774 // Suppress errors & clear registration state
1775 dmn
->dmn_kev
.data
&= ~mask
;
1778 // Else, we don't expect any errors from mach. Log any errors
1779 if (dispatch_assume_zero(krr
)) {
1780 // log the error & clear registration state
1781 dmn
->dmn_kev
.data
&= ~mask
;
1782 } else if (dispatch_assume_zero(previous
)) {
1783 // Another subsystem has beat libdispatch to requesting the
1784 // specified Mach notification on this port. We should
1785 // technically cache the previous port and message it when the
1786 // kernel messages our port. Or we can just say screw those
1787 // subsystems and deallocate the previous port.
1788 // They should adopt libdispatch :-P
1789 kr
= mach_port_deallocate(mach_task_self(), previous
);
1790 DISPATCH_VERIFY_MIG(kr
);
1791 (void)dispatch_assume_zero(kr
);
1792 previous
= MACH_PORT_NULL
;
1795 } else if (!(dmn
->dmn_kev
.data
& mask
) && (prev
& mask
)) {
1796 _dispatch_debug("machport[0x%08x]: unregistering for send-possible "
1797 "notification", port
);
1798 previous
= MACH_PORT_NULL
;
1799 kr
= mach_port_request_notification(mach_task_self(), port
,
1800 notify_msgid
, notify_sync
, MACH_PORT_NULL
,
1801 MACH_MSG_TYPE_MOVE_SEND_ONCE
, &previous
);
1802 DISPATCH_VERIFY_MIG(kr
);
1805 case KERN_INVALID_NAME
:
1806 case KERN_INVALID_RIGHT
:
1807 case KERN_INVALID_ARGUMENT
:
1810 if (dispatch_assume_zero(kr
)) {
1817 if (unlikely(previous
)) {
1818 // the kernel has not consumed the send-once right yet
1819 (void)dispatch_assume_zero(
1820 _dispatch_send_consume_send_once_right(previous
));
1826 _dispatch_kevent_mach_notify_resume(dispatch_muxnote_t dmn
, uint32_t new_flags
,
1829 kern_return_t kr
= KERN_SUCCESS
;
1830 dispatch_assert_zero(new_flags
& del_flags
);
1831 if ((new_flags
& _DISPATCH_MACH_SP_FLAGS
) ||
1832 (del_flags
& _DISPATCH_MACH_SP_FLAGS
)) {
1833 // Requesting a (delayed) non-sync send-possible notification
1834 // registers for both immediate dead-name notification and delayed-arm
1835 // send-possible notification for the port.
1836 // The send-possible notification is armed when a mach_msg() with the
1837 // the MACH_SEND_NOTIFY to the port times out.
1838 // If send-possible is unavailable, fall back to immediate dead-name
1839 // registration rdar://problem/2527840&9008724
1840 kr
= _dispatch_mach_notify_update(dmn
, new_flags
, del_flags
,
1841 _DISPATCH_MACH_SP_FLAGS
, MACH_NOTIFY_SEND_POSSIBLE
,
1842 MACH_NOTIFY_SEND_POSSIBLE
== MACH_NOTIFY_DEAD_NAME
);
1844 return kr
== KERN_SUCCESS
;
1849 _dispatch_mach_notify_merge(mach_port_t name
, uint32_t data
, bool final
)
1851 dispatch_unote_linkage_t dul
, dul_next
;
1852 dispatch_muxnote_t dmn
;
1854 _dispatch_debug_machport(name
);
1855 dmn
= _dispatch_mach_muxnote_find(name
, DISPATCH_EVFILT_MACH_NOTIFICATION
);
1860 dmn
->dmn_kev
.data
&= ~_DISPATCH_MACH_SP_FLAGS
;
1862 // Re-register for notification before delivery
1863 final
= !_dispatch_kevent_mach_notify_resume(dmn
, data
, 0);
1866 uint32_t flags
= final
? EV_ONESHOT
: EV_ENABLE
;
1867 DISPATCH_MACH_NOTIFICATION_ARMED(&dmn
->dmn_kev
) = 0;
1868 TAILQ_FOREACH_SAFE(dul
, &dmn
->dmn_unotes_head
, du_link
, dul_next
) {
1869 dispatch_unote_t du
= _dispatch_unote_linkage_get_unote(dul
);
1870 os_atomic_store2o(du
._dmsr
, dmsr_notification_armed
, false, relaxed
);
1871 dux_merge_evt(du
._du
, flags
, (data
& du
._du
->du_fflags
), 0, 0);
1872 if (!dul_next
|| DISPATCH_MACH_NOTIFICATION_ARMED(&dmn
->dmn_kev
)) {
1873 // current merge is last in list (dmn might have been freed)
1874 // or it re-armed the notification
1881 _dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED
,
1882 mach_port_name_t name
)
1885 _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x "
1886 "deleted prematurely", name
);
1888 _dispatch_debug_machport(name
);
1889 _dispatch_mach_notify_merge(name
, DISPATCH_MACH_SEND_DELETED
, true);
1890 return KERN_SUCCESS
;
1894 _dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED
,
1895 mach_port_name_t name
)
1899 _dispatch_debug("machport[0x%08x]: dead-name notification", name
);
1900 _dispatch_debug_machport(name
);
1901 _dispatch_mach_notify_merge(name
, DISPATCH_MACH_SEND_DEAD
, true);
1903 // the act of receiving a dead name notification allocates a dead-name
1904 // right that must be deallocated
1905 kr
= mach_port_deallocate(mach_task_self(), name
);
1906 DISPATCH_VERIFY_MIG(kr
);
1907 //(void)dispatch_assume_zero(kr);
1908 return KERN_SUCCESS
;
1912 _dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED
,
1913 mach_port_name_t name
)
1915 _dispatch_debug("machport[0x%08x]: send-possible notification", name
);
1916 _dispatch_debug_machport(name
);
1917 _dispatch_mach_notify_merge(name
, DISPATCH_MACH_SEND_POSSIBLE
, false);
1918 return KERN_SUCCESS
;
1922 _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr
)
1924 dispatch_muxnote_t dmn
= _dispatch_unote_get_linkage(dmsr
)->du_muxnote
;
1925 dispatch_unote_linkage_t dul
;
1926 dispatch_unote_t du
;
1928 if (!_dispatch_unote_registered(dmsr
)) {
1932 DISPATCH_MACH_NOTIFICATION_ARMED(&dmn
->dmn_kev
) = true;
1933 TAILQ_FOREACH(dul
, &dmn
->dmn_unotes_head
, du_link
) {
1934 du
= _dispatch_unote_linkage_get_unote(dul
);
1935 os_atomic_store2o(du
._dmsr
, dmsr_notification_armed
, true, relaxed
);
1939 static dispatch_unote_t
1940 _dispatch_source_mach_send_create(dispatch_source_type_t dst
,
1941 uintptr_t handle
, unsigned long mask
)
1944 // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
1945 mask
= DISPATCH_MACH_SEND_DEAD
;
1948 handle
= MACH_PORT_DEAD
; // <rdar://problem/27651332>
1950 return _dispatch_unote_create_with_handle(dst
, handle
, mask
);
1954 _dispatch_mach_send_update(dispatch_muxnote_t dmn
)
1956 if (dmn
->dmn_kev
.flags
& EV_DELETE
) {
1957 return _dispatch_kevent_mach_notify_resume(dmn
, 0, dmn
->dmn_kev
.fflags
);
1959 return _dispatch_kevent_mach_notify_resume(dmn
, dmn
->dmn_kev
.fflags
, 0);
1963 const dispatch_source_type_s _dispatch_source_type_mach_send
= {
1964 .dst_kind
= "mach_send",
1965 .dst_filter
= DISPATCH_EVFILT_MACH_NOTIFICATION
,
1966 .dst_flags
= EV_CLEAR
,
1967 .dst_mask
= DISPATCH_MACH_SEND_DEAD
|DISPATCH_MACH_SEND_POSSIBLE
,
1968 .dst_size
= sizeof(struct dispatch_source_refs_s
),
1970 .dst_create
= _dispatch_source_mach_send_create
,
1971 .dst_update_mux
= _dispatch_mach_send_update
,
1972 .dst_merge_evt
= _dispatch_source_merge_evt
,
1975 static dispatch_unote_t
1976 _dispatch_mach_send_create(dispatch_source_type_t dst
,
1977 uintptr_t handle
, unsigned long mask
)
1979 // without handle because the mach code will set the ident later
1980 dispatch_unote_t du
=
1981 _dispatch_unote_create_without_handle(dst
, handle
, mask
);
1983 du
._dmsr
->dmsr_disconnect_cnt
= DISPATCH_MACH_NEVER_CONNECTED
;
1984 TAILQ_INIT(&du
._dmsr
->dmsr_replies
);
1989 const dispatch_source_type_s _dispatch_mach_type_send
= {
1990 .dst_kind
= "mach_send (mach)",
1991 .dst_filter
= DISPATCH_EVFILT_MACH_NOTIFICATION
,
1992 .dst_flags
= EV_CLEAR
,
1993 .dst_mask
= DISPATCH_MACH_SEND_DEAD
|DISPATCH_MACH_SEND_POSSIBLE
,
1994 .dst_size
= sizeof(struct dispatch_mach_send_refs_s
),
1996 .dst_create
= _dispatch_mach_send_create
,
1997 .dst_update_mux
= _dispatch_mach_send_update
,
1998 .dst_merge_evt
= _dispatch_mach_merge_notification
,
2002 #pragma mark mach recv / reply
2006 _dispatch_kevent_mach_msg_recv(dispatch_unote_t du
, uint32_t flags
,
2007 mach_msg_header_t
*hdr
)
2009 mach_msg_size_t siz
= hdr
->msgh_size
+ DISPATCH_MACH_TRAILER_SIZE
;
2010 mach_port_t name
= hdr
->msgh_local_port
;
2012 if (!dispatch_assume(hdr
->msgh_size
<= UINT_MAX
-
2013 DISPATCH_MACH_TRAILER_SIZE
)) {
2014 _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
2015 "received overlarge message");
2016 } else if (!dispatch_assume(name
)) {
2017 _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
2018 "received message with MACH_PORT_NULL port");
2020 _dispatch_debug_machport(name
);
2021 if (likely(du
._du
)) {
2022 return dux_merge_msg(du
._du
, flags
, hdr
, siz
);
2024 _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
2025 "received message with no listeners");
2028 mach_msg_destroy(hdr
);
2029 if (flags
& DISPATCH_EV_MSG_NEEDS_FREE
) {
2036 _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke
)
2038 mach_msg_header_t
*hdr
= _dispatch_kevent_mach_msg_buf(ke
);
2039 mach_msg_size_t siz
;
2040 mach_msg_return_t kr
= (mach_msg_return_t
)ke
->fflags
;
2041 uint32_t flags
= ke
->flags
;
2042 dispatch_unote_t du
= _dispatch_kevent_get_unote(ke
);
2044 if (unlikely(!hdr
)) {
2045 DISPATCH_INTERNAL_CRASH(kr
, "EVFILT_MACHPORT with no message");
2048 _dispatch_kevent_mach_msg_recv(du
, flags
, hdr
);
2050 } else if (kr
!= MACH_RCV_TOO_LARGE
) {
2052 } else if (!ke
->data
) {
2053 DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity");
2055 if (unlikely(ke
->ext
[1] > (UINT_MAX
- DISPATCH_MACH_TRAILER_SIZE
))) {
2056 DISPATCH_INTERNAL_CRASH(ke
->ext
[1],
2057 "EVFILT_MACHPORT with overlarge message");
2059 siz
= _dispatch_kevent_mach_msg_size(ke
) + DISPATCH_MACH_TRAILER_SIZE
;
2061 if (dispatch_assume(hdr
)) {
2062 flags
|= DISPATCH_EV_MSG_NEEDS_FREE
;
2064 // Kernel will discard message too large to fit
2068 mach_port_t name
= (mach_port_name_t
)ke
->data
;
2069 const mach_msg_option_t options
= ((DISPATCH_MACH_RCV_OPTIONS
|
2070 MACH_RCV_TIMEOUT
) & ~MACH_RCV_LARGE
);
2071 kr
= mach_msg(hdr
, options
, 0, siz
, name
, MACH_MSG_TIMEOUT_NONE
,
2074 _dispatch_kevent_mach_msg_recv(du
, flags
, hdr
);
2076 } else if (kr
== MACH_RCV_TOO_LARGE
) {
2077 _dispatch_log("BUG in libdispatch client: "
2078 "_dispatch_kevent_mach_msg_drain: dropped message too "
2079 "large to fit in memory: id = 0x%x, size = %u",
2080 hdr
->msgh_id
, _dispatch_kevent_mach_msg_size(ke
));
2081 kr
= MACH_MSG_SUCCESS
;
2083 if (flags
& DISPATCH_EV_MSG_NEEDS_FREE
) {
2088 _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: "
2089 "message reception failed", kr
);
2093 const dispatch_source_type_s _dispatch_source_type_mach_recv
= {
2094 .dst_kind
= "mach_recv",
2095 .dst_filter
= EVFILT_MACHPORT
,
2096 .dst_flags
= EV_UDATA_SPECIFIC
|EV_DISPATCH
|EV_VANISHED
,
2098 .dst_size
= sizeof(struct dispatch_source_refs_s
),
2100 .dst_create
= _dispatch_unote_create_with_handle
,
2101 .dst_merge_evt
= _dispatch_source_merge_evt
,
2102 .dst_merge_msg
= NULL
, // never receives messages directly
2104 .dst_per_trigger_qos
= true,
2108 _dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du
, uint32_t flags
,
2109 mach_msg_header_t
*msg
, mach_msg_size_t msgsz DISPATCH_UNUSED
)
2111 dispatch_continuation_t dc
= du
._dr
->ds_handler
[DS_EVENT_HANDLER
];
2112 dispatch_source_t ds
= _dispatch_source_from_refs(du
._dr
);
2113 dispatch_queue_t cq
= _dispatch_queue_get_current();
2115 // see firehose_client_push_notify_async
2116 _dispatch_queue_set_current(ds
->_as_dq
);
2118 _dispatch_queue_set_current(cq
);
2119 if (flags
& DISPATCH_EV_MSG_NEEDS_FREE
) {
2122 if ((ds
->dq_atomic_flags
& DSF_CANCELED
) ||
2123 (flags
& (EV_ONESHOT
| EV_DELETE
))) {
2124 return _dispatch_source_merge_evt(du
, flags
, 0, 0, 0);
2126 if (_dispatch_unote_needs_rearm(du
)) {
2127 return _dispatch_unote_resume(du
);
2132 _dispatch_mach_recv_direct_merge(dispatch_unote_t du
,
2133 uint32_t flags
, uintptr_t data
,
2134 uintptr_t status DISPATCH_UNUSED
,
2135 pthread_priority_t pp
)
2137 if (flags
& EV_VANISHED
) {
2138 DISPATCH_CLIENT_CRASH(du
._du
->du_ident
,
2139 "Unexpected EV_VANISHED (do not destroy random mach ports)");
2141 return _dispatch_source_merge_evt(du
, flags
, data
, 0, pp
);
2144 const dispatch_source_type_s _dispatch_source_type_mach_recv_direct
= {
2145 .dst_kind
= "direct mach_recv",
2146 .dst_filter
= EVFILT_MACHPORT
,
2147 .dst_flags
= EV_UDATA_SPECIFIC
|EV_DISPATCH
|EV_VANISHED
,
2148 .dst_fflags
= DISPATCH_MACH_RCV_OPTIONS
,
2149 .dst_size
= sizeof(struct dispatch_source_refs_s
),
2151 .dst_create
= _dispatch_unote_create_with_handle
,
2152 .dst_merge_evt
= _dispatch_mach_recv_direct_merge
,
2153 .dst_merge_msg
= _dispatch_source_mach_recv_direct_merge_msg
,
2155 .dst_per_trigger_qos
= true,
2158 const dispatch_source_type_s _dispatch_mach_type_recv
= {
2159 .dst_kind
= "mach_recv (channel)",
2160 .dst_filter
= EVFILT_MACHPORT
,
2161 .dst_flags
= EV_UDATA_SPECIFIC
|EV_DISPATCH
|EV_VANISHED
,
2162 .dst_fflags
= DISPATCH_MACH_RCV_OPTIONS
,
2163 .dst_size
= sizeof(struct dispatch_mach_recv_refs_s
),
2165 // without handle because the mach code will set the ident after connect
2166 .dst_create
= _dispatch_unote_create_without_handle
,
2167 .dst_merge_evt
= _dispatch_mach_recv_direct_merge
,
2168 .dst_merge_msg
= _dispatch_mach_merge_msg
,
2170 .dst_per_trigger_qos
= true,
2175 _dispatch_mach_reply_merge_evt(dispatch_unote_t du
,
2176 uint32_t flags DISPATCH_UNUSED
, uintptr_t data DISPATCH_UNUSED
,
2177 uintptr_t status DISPATCH_UNUSED
,
2178 pthread_priority_t pp DISPATCH_UNUSED
)
2180 DISPATCH_INTERNAL_CRASH(du
._du
->du_ident
, "Unexpected event");
2183 const dispatch_source_type_s _dispatch_mach_type_reply
= {
2184 .dst_kind
= "mach reply",
2185 .dst_filter
= EVFILT_MACHPORT
,
2186 .dst_flags
= EV_UDATA_SPECIFIC
|EV_DISPATCH
|EV_ONESHOT
|EV_VANISHED
,
2187 .dst_fflags
= DISPATCH_MACH_RCV_OPTIONS
,
2188 .dst_size
= sizeof(struct dispatch_mach_reply_refs_s
),
2190 .dst_create
= _dispatch_unote_create_with_handle
,
2191 .dst_merge_evt
= _dispatch_mach_reply_merge_evt
,
2192 .dst_merge_msg
= _dispatch_mach_reply_merge_msg
,
2195 #pragma mark Mach channel SIGTERM notification (for XPC channels only)
2197 const dispatch_source_type_s _dispatch_xpc_type_sigterm
= {
2198 .dst_kind
= "sigterm (xpc)",
2199 .dst_filter
= EVFILT_SIGNAL
,
2200 .dst_flags
= DISPATCH_EV_DIRECT
|EV_CLEAR
|EV_ONESHOT
,
2202 .dst_size
= sizeof(struct dispatch_xpc_term_refs_s
),
2204 .dst_create
= _dispatch_unote_create_with_handle
,
2205 .dst_merge_evt
= _dispatch_xpc_sigterm_merge
,
2210 #endif // DISPATCH_EVENT_BACKEND_KEVENT