2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1
25 #define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
26 #define DISPATCH_MACH_WAIT_FOR_REPLY 0x4
27 #define DISPATCH_MACH_OWNED_REPLY_PORT 0x8
28 #define DISPATCH_MACH_ASYNC_REPLY 0x10
29 #define DISPATCH_MACH_OPTIONS_MASK 0xffff
31 #define DM_SEND_STATUS_SUCCESS 0x1
32 #define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2
34 DISPATCH_ENUM(dispatch_mach_send_invoke_flags
, uint32_t,
35 DM_SEND_INVOKE_NONE
= 0x0,
36 DM_SEND_INVOKE_MAKE_DIRTY
= 0x1,
37 DM_SEND_INVOKE_NEEDS_BARRIER
= 0x2,
38 DM_SEND_INVOKE_CANCEL
= 0x4,
39 DM_SEND_INVOKE_CAN_RUN_BARRIER
= 0x8,
40 DM_SEND_INVOKE_IMMEDIATE_SEND
= 0x10,
42 #define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \
43 ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND)
45 static inline mach_msg_option_t
_dispatch_mach_checkin_options(void);
46 static mach_port_t
_dispatch_mach_msg_get_remote_port(dispatch_object_t dou
);
47 static mach_port_t
_dispatch_mach_msg_get_reply_port(dispatch_object_t dou
);
48 static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm
,
49 mach_port_t local_port
, mach_port_t remote_port
);
50 static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm
,
51 dispatch_mach_reply_refs_t dmr
, mach_port_t local_port
);
52 static dispatch_mach_msg_t
_dispatch_mach_msg_create_reply_disconnected(
53 dispatch_object_t dou
, dispatch_mach_reply_refs_t dmr
,
54 dispatch_mach_reason_t reason
);
55 static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm
,
56 dispatch_object_t dou
);
57 static inline mach_msg_header_t
* _dispatch_mach_msg_get_msg(
58 dispatch_mach_msg_t dmsg
);
59 static void _dispatch_mach_send_push(dispatch_mach_t dm
, dispatch_object_t dou
,
61 static void _dispatch_mach_cancel(dispatch_mach_t dm
);
62 static void _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm
,
64 static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm
,
65 dispatch_mach_msg_t dmsg
);
66 static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm
,
67 dispatch_mach_msg_t dmsg
, dispatch_queue_t drq
);
68 static dispatch_queue_t
_dispatch_mach_msg_context_async_reply_queue(
70 static dispatch_continuation_t
_dispatch_mach_msg_async_reply_wrap(
71 dispatch_mach_msg_t dmsg
, dispatch_mach_t dm
);
72 static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm
);
73 static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm
,
77 DISPATCH_EXPORT
void _dispatch_mach_hooks_install_default(void);
80 _dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp
,
81 const struct dispatch_continuation_s
*dc
)
84 ds
= dispatch_source_create(&_dispatch_source_type_mach_recv_direct
,
85 recvp
, 0, &_dispatch_mgr_q
);
86 os_atomic_store(&ds
->ds_refs
->ds_handler
[DS_EVENT_HANDLER
],
87 (dispatch_continuation_t
)dc
, relaxed
);
92 #pragma mark dispatch to XPC callbacks
94 static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks
;
96 // Default dmxh_direct_message_handler callback that does not handle
99 _dispatch_mach_xpc_no_handle_message(
100 void *_Nullable context DISPATCH_UNUSED
,
101 dispatch_mach_reason_t reason DISPATCH_UNUSED
,
102 dispatch_mach_msg_t message DISPATCH_UNUSED
,
103 mach_error_t error DISPATCH_UNUSED
)
108 // Default dmxh_msg_context_reply_queue callback that returns a NULL queue.
109 static dispatch_queue_t
110 _dispatch_mach_msg_context_no_async_reply_queue(
111 void *_Nonnull msg_context DISPATCH_UNUSED
)
116 // Default dmxh_async_reply_handler callback that crashes when called.
119 _dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED
,
120 dispatch_mach_reason_t reason DISPATCH_UNUSED
,
121 dispatch_mach_msg_t message DISPATCH_UNUSED
)
123 DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks
,
124 "_dispatch_mach_default_async_reply_handler called");
127 // Default dmxh_enable_sigterm_notification callback that enables delivery of
128 // SIGTERM notifications (for backwards compatibility).
130 _dispatch_mach_enable_sigterm(void *_Nullable context DISPATCH_UNUSED
)
135 // Callbacks from dispatch to XPC. The default is to not support any callbacks.
136 static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default
138 .version
= DISPATCH_MACH_XPC_HOOKS_VERSION
,
139 .dmxh_direct_message_handler
= &_dispatch_mach_xpc_no_handle_message
,
140 .dmxh_msg_context_reply_queue
=
141 &_dispatch_mach_msg_context_no_async_reply_queue
,
142 .dmxh_async_reply_handler
= &_dispatch_mach_default_async_reply_handler
,
143 .dmxh_enable_sigterm_notification
= &_dispatch_mach_enable_sigterm
,
146 static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks
147 = &_dispatch_mach_xpc_hooks_default
;
150 dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks
)
152 if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks
,
153 &_dispatch_mach_xpc_hooks_default
, hooks
, relaxed
)) {
154 DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks
,
155 "dispatch_mach_hooks_install_4libxpc called twice");
160 _dispatch_mach_hooks_install_default(void)
162 os_atomic_store(&_dispatch_mach_xpc_hooks
,
163 &_dispatch_mach_xpc_hooks_default
, relaxed
);
167 #pragma mark dispatch_mach_t
169 static dispatch_mach_t
170 _dispatch_mach_create(const char *label
, dispatch_queue_t q
, void *context
,
171 dispatch_mach_handler_function_t handler
, bool handler_is_block
,
174 dispatch_mach_recv_refs_t dmrr
;
175 dispatch_mach_send_refs_t dmsr
;
177 dm
= _dispatch_object_alloc(DISPATCH_VTABLE(mach
),
178 sizeof(struct dispatch_mach_s
));
179 _dispatch_queue_init(dm
->_as_dq
, DQF_LEGACY
, 1,
180 DISPATCH_QUEUE_INACTIVE
| DISPATCH_QUEUE_ROLE_INNER
);
182 dm
->dq_label
= label
;
183 dm
->do_ref_cnt
++; // the reference _dispatch_mach_cancel_invoke holds
184 dm
->dm_is_xpc
= is_xpc
;
186 dmrr
= dux_create(&_dispatch_mach_type_recv
, 0, 0)._dmrr
;
187 dispatch_assert(dmrr
->du_is_direct
);
188 dmrr
->du_owner_wref
= _dispatch_ptr2wref(dm
);
189 dmrr
->dmrr_handler_func
= handler
;
190 dmrr
->dmrr_handler_ctxt
= context
;
191 dmrr
->dmrr_handler_is_block
= handler_is_block
;
192 dm
->dm_recv_refs
= dmrr
;
194 dmsr
= dux_create(&_dispatch_mach_type_send
, 0,
195 DISPATCH_MACH_SEND_POSSIBLE
|DISPATCH_MACH_SEND_DEAD
)._dmsr
;
196 dmsr
->du_owner_wref
= _dispatch_ptr2wref(dm
);
197 dm
->dm_send_refs
= dmsr
;
200 q
= _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT
, true);
205 _dispatch_object_debug(dm
, "%s", __func__
);
210 dispatch_mach_create(const char *label
, dispatch_queue_t q
,
211 dispatch_mach_handler_t handler
)
213 dispatch_block_t bb
= _dispatch_Block_copy((void*)handler
);
214 return _dispatch_mach_create(label
, q
, bb
,
215 (dispatch_mach_handler_function_t
)_dispatch_Block_invoke(bb
), true,
220 dispatch_mach_create_f(const char *label
, dispatch_queue_t q
, void *context
,
221 dispatch_mach_handler_function_t handler
)
223 return _dispatch_mach_create(label
, q
, context
, handler
, false, false);
227 dispatch_mach_create_4libxpc(const char *label
, dispatch_queue_t q
,
228 void *context
, dispatch_mach_handler_function_t handler
)
230 return _dispatch_mach_create(label
, q
, context
, handler
, false, true);
234 _dispatch_mach_dispose(dispatch_mach_t dm
, bool *allow_free
)
236 _dispatch_object_debug(dm
, "%s", __func__
);
237 _dispatch_unote_dispose(dm
->dm_recv_refs
);
238 dm
->dm_recv_refs
= NULL
;
239 _dispatch_unote_dispose(dm
->dm_send_refs
);
240 dm
->dm_send_refs
= NULL
;
241 if (dm
->dm_xpc_term_refs
) {
242 _dispatch_unote_dispose(dm
->dm_xpc_term_refs
);
243 dm
->dm_xpc_term_refs
= NULL
;
245 _dispatch_queue_destroy(dm
->_as_dq
, allow_free
);
249 dispatch_mach_connect(dispatch_mach_t dm
, mach_port_t receive
,
250 mach_port_t send
, dispatch_mach_msg_t checkin
)
252 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
253 uint32_t disconnect_cnt
;
255 if (MACH_PORT_VALID(receive
)) {
256 dm
->dm_recv_refs
->du_ident
= receive
;
257 _dispatch_retain(dm
); // the reference the manager queue holds
259 dmsr
->dmsr_send
= send
;
260 if (MACH_PORT_VALID(send
)) {
262 dispatch_mach_msg_t dmsg
= checkin
;
263 dispatch_retain(dmsg
);
264 dmsg
->dmsg_options
= _dispatch_mach_checkin_options();
265 dmsr
->dmsr_checkin_port
= _dispatch_mach_msg_get_remote_port(dmsg
);
267 dmsr
->dmsr_checkin
= checkin
;
269 dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED
- 1 ==
270 DISPATCH_MACH_NEVER_INSTALLED
);
271 disconnect_cnt
= os_atomic_dec2o(dmsr
, dmsr_disconnect_cnt
, release
);
272 if (unlikely(disconnect_cnt
!= DISPATCH_MACH_NEVER_INSTALLED
)) {
273 DISPATCH_CLIENT_CRASH(disconnect_cnt
, "Channel already connected");
275 _dispatch_object_debug(dm
, "%s", __func__
);
276 return dispatch_activate(dm
);
280 _dispatch_mach_reply_tryremove(dispatch_mach_t dm
,
281 dispatch_mach_reply_refs_t dmr
)
284 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
285 if ((removed
= _TAILQ_IS_ENQUEUED(dmr
, dmr_list
))) {
286 TAILQ_REMOVE(&dm
->dm_send_refs
->dmsr_replies
, dmr
, dmr_list
);
287 _TAILQ_MARK_NOT_ENQUEUED(dmr
, dmr_list
);
289 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
295 _dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm
,
296 dispatch_mach_reply_refs_t dmr
, uint32_t options
)
298 dispatch_mach_msg_t dmsgr
= NULL
;
299 bool disconnected
= (options
& DU_UNREGISTER_DISCONNECTED
);
300 if (options
& DU_UNREGISTER_REPLY_REMOVE
) {
301 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
302 if (unlikely(!_TAILQ_IS_ENQUEUED(dmr
, dmr_list
))) {
303 DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
305 TAILQ_REMOVE(&dm
->dm_send_refs
->dmsr_replies
, dmr
, dmr_list
);
306 _TAILQ_MARK_NOT_ENQUEUED(dmr
, dmr_list
);
307 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
310 dmsgr
= _dispatch_mach_msg_create_reply_disconnected(NULL
, dmr
,
311 DISPATCH_MACH_DISCONNECTED
);
312 } else if (dmr
->dmr_voucher
) {
313 _voucher_release(dmr
->dmr_voucher
);
314 dmr
->dmr_voucher
= NULL
;
316 _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p",
317 _dispatch_mach_reply_get_reply_port((mach_port_t
)dmr
->du_ident
),
318 disconnected
? " (disconnected)" : "", dmr
->dmr_ctxt
);
320 return _dispatch_mach_handle_or_push_received_msg(dm
, dmsgr
);
326 _dispatch_mach_reply_list_remove(dispatch_mach_t dm
,
327 dispatch_mach_reply_refs_t dmr
) {
328 // dmsr_replies_lock must be held by the caller.
329 bool removed
= false;
330 if (likely(_TAILQ_IS_ENQUEUED(dmr
, dmr_list
))) {
331 TAILQ_REMOVE(&dm
->dm_send_refs
->dmsr_replies
, dmr
, dmr_list
);
332 _TAILQ_MARK_NOT_ENQUEUED(dmr
, dmr_list
);
340 _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm
,
341 dispatch_mach_reply_refs_t dmr
, uint32_t options
)
343 dispatch_assert(!_TAILQ_IS_ENQUEUED(dmr
, dmr_list
));
345 bool disconnected
= (options
& DU_UNREGISTER_DISCONNECTED
);
346 _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p",
347 (mach_port_t
)dmr
->du_ident
, disconnected
? " (disconnected)" : "",
349 if (!_dispatch_unote_unregister(dmr
, options
)) {
350 _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]",
351 (mach_port_t
)dmr
->du_ident
, dmr
);
352 dispatch_assert(options
== DU_UNREGISTER_DISCONNECTED
);
356 dispatch_mach_msg_t dmsgr
= NULL
;
357 dispatch_queue_t drq
= NULL
;
359 // The next call is guaranteed to always transfer or consume the voucher
360 // in the dmr, if there is one.
361 dmsgr
= _dispatch_mach_msg_create_reply_disconnected(NULL
, dmr
,
362 dmr
->dmr_async_reply
? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
363 : DISPATCH_MACH_DISCONNECTED
);
365 drq
= _dispatch_mach_msg_context_async_reply_queue(dmr
->dmr_ctxt
);
367 dispatch_assert(dmr
->dmr_voucher
== NULL
);
368 } else if (dmr
->dmr_voucher
) {
369 _voucher_release(dmr
->dmr_voucher
);
370 dmr
->dmr_voucher
= NULL
;
372 _dispatch_unote_dispose(dmr
);
376 _dispatch_mach_push_async_reply_msg(dm
, dmsgr
, drq
);
378 _dispatch_mach_handle_or_push_received_msg(dm
, dmsgr
);
386 _dispatch_mach_reply_waiter_register(dispatch_mach_t dm
,
387 dispatch_mach_reply_refs_t dmr
, mach_port_t reply_port
,
388 dispatch_mach_msg_t dmsg
, mach_msg_option_t msg_opts
)
390 dmr
->du_owner_wref
= _dispatch_ptr2wref(dm
);
392 dmr
->du_filter
= EVFILT_MACHPORT
;
393 dmr
->du_ident
= reply_port
;
394 if (msg_opts
& DISPATCH_MACH_OWNED_REPLY_PORT
) {
395 _dispatch_mach_reply_mark_reply_port_owned(dmr
);
397 if (dmsg
->dmsg_voucher
) {
398 dmr
->dmr_voucher
= _voucher_retain(dmsg
->dmsg_voucher
);
400 dmr
->dmr_priority
= _dispatch_priority_from_pp(dmsg
->dmsg_priority
);
401 // make reply context visible to leaks rdar://11777199
402 dmr
->dmr_ctxt
= dmsg
->do_ctxt
;
405 _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p",
406 reply_port
, dmsg
->do_ctxt
);
407 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
408 if (unlikely(_TAILQ_IS_ENQUEUED(dmr
, dmr_list
))) {
409 DISPATCH_INTERNAL_CRASH(dmr
->dmr_list
.tqe_prev
,
410 "Reply already registered");
412 TAILQ_INSERT_TAIL(&dm
->dm_send_refs
->dmsr_replies
, dmr
, dmr_list
);
413 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
418 _dispatch_mach_reply_kevent_register(dispatch_mach_t dm
, mach_port_t reply_port
,
419 dispatch_mach_msg_t dmsg
)
421 dispatch_mach_reply_refs_t dmr
;
422 dispatch_priority_t mpri
, pri
, overcommit
;
425 dmr
= dux_create(&_dispatch_mach_type_reply
, reply_port
, 0)._dmr
;
426 dispatch_assert(dmr
->du_is_direct
);
427 dmr
->du_owner_wref
= _dispatch_ptr2wref(dm
);
428 if (dmsg
->dmsg_voucher
) {
429 dmr
->dmr_voucher
= _voucher_retain(dmsg
->dmsg_voucher
);
431 dmr
->dmr_priority
= _dispatch_priority_from_pp(dmsg
->dmsg_priority
);
432 // make reply context visible to leaks rdar://11777199
433 dmr
->dmr_ctxt
= dmsg
->do_ctxt
;
435 dispatch_queue_t drq
= NULL
;
436 if (dmsg
->dmsg_options
& DISPATCH_MACH_ASYNC_REPLY
) {
437 dmr
->dmr_async_reply
= true;
438 drq
= _dispatch_mach_msg_context_async_reply_queue(dmsg
->do_ctxt
);
442 pri
= dm
->dq_priority
;
443 wlh
= dm
->dm_recv_refs
->du_wlh
;
444 } else if (dx_type(drq
) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE
) {
445 pri
= DISPATCH_PRIORITY_FLAG_MANAGER
;
446 wlh
= (dispatch_wlh_t
)drq
;
447 } else if (dx_hastypeflag(drq
, QUEUE_ROOT
)) {
448 pri
= drq
->dq_priority
;
449 wlh
= DISPATCH_WLH_ANON
;
450 } else if (drq
== dm
->do_targetq
) {
451 pri
= dm
->dq_priority
;
452 wlh
= dm
->dm_recv_refs
->du_wlh
;
453 } else if (!(pri
= _dispatch_queue_compute_priority_and_wlh(drq
, &wlh
))) {
454 pri
= drq
->dq_priority
;
455 wlh
= DISPATCH_WLH_ANON
;
457 if (pri
& DISPATCH_PRIORITY_REQUESTED_MASK
) {
458 overcommit
= pri
& DISPATCH_PRIORITY_FLAG_OVERCOMMIT
;
459 pri
&= DISPATCH_PRIORITY_REQUESTED_MASK
;
460 mpri
= _dispatch_priority_from_pp_strip_flags(dmsg
->dmsg_priority
);
461 if (pri
< mpri
) pri
= mpri
;
464 pri
= DISPATCH_PRIORITY_FLAG_MANAGER
;
467 _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p",
468 reply_port
, dmsg
->do_ctxt
);
469 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
470 if (unlikely(_TAILQ_IS_ENQUEUED(dmr
, dmr_list
))) {
471 DISPATCH_INTERNAL_CRASH(dmr
->dmr_list
.tqe_prev
,
472 "Reply already registered");
474 TAILQ_INSERT_TAIL(&dm
->dm_send_refs
->dmsr_replies
, dmr
, dmr_list
);
475 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
477 if (!_dispatch_unote_register(dmr
, wlh
, pri
)) {
478 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
479 _dispatch_mach_reply_list_remove(dm
, dmr
);
480 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
481 _dispatch_mach_reply_kevent_unregister(dm
, dmr
,
482 DU_UNREGISTER_DISCONNECTED
);
487 #pragma mark dispatch_mach_msg
489 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
491 _dispatch_use_mach_special_reply_port(void)
493 #if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE
496 #define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;})
502 _dispatch_get_thread_reply_port(void)
504 mach_port_t reply_port
, mrp
;
505 if (_dispatch_use_mach_special_reply_port()) {
506 mrp
= _dispatch_get_thread_special_reply_port();
508 mrp
= _dispatch_get_thread_mig_reply_port();
512 _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port",
515 if (_dispatch_use_mach_special_reply_port()) {
516 reply_port
= thread_get_special_reply_port();
517 _dispatch_set_thread_special_reply_port(reply_port
);
519 reply_port
= mach_reply_port();
520 _dispatch_set_thread_mig_reply_port(reply_port
);
522 if (unlikely(!MACH_PORT_VALID(reply_port
))) {
523 DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(),
524 "Unable to allocate reply port, possible port leak");
526 _dispatch_debug("machport[0x%08x]: allocated thread sync reply port",
529 _dispatch_debug_machport(reply_port
);
534 _dispatch_clear_thread_reply_port(mach_port_t reply_port
)
537 if (_dispatch_use_mach_special_reply_port()) {
538 mrp
= _dispatch_get_thread_special_reply_port();
540 mrp
= _dispatch_get_thread_mig_reply_port();
542 if (reply_port
!= mrp
) {
544 _dispatch_debug("machport[0x%08x]: did not clear thread sync reply "
545 "port (found 0x%08x)", reply_port
, mrp
);
549 if (_dispatch_use_mach_special_reply_port()) {
550 _dispatch_set_thread_special_reply_port(MACH_PORT_NULL
);
552 _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL
);
554 _dispatch_debug_machport(reply_port
);
555 _dispatch_debug("machport[0x%08x]: cleared thread sync reply port",
560 _dispatch_set_thread_reply_port(mach_port_t reply_port
)
562 _dispatch_debug_machport(reply_port
);
564 if (_dispatch_use_mach_special_reply_port()) {
565 mrp
= _dispatch_get_thread_special_reply_port();
567 mrp
= _dispatch_get_thread_mig_reply_port();
570 kern_return_t kr
= mach_port_mod_refs(mach_task_self(), reply_port
,
571 MACH_PORT_RIGHT_RECEIVE
, -1);
572 DISPATCH_VERIFY_MIG(kr
);
573 dispatch_assume_zero(kr
);
574 _dispatch_debug("machport[0x%08x]: deallocated sync reply port "
575 "(found 0x%08x)", reply_port
, mrp
);
577 if (_dispatch_use_mach_special_reply_port()) {
578 _dispatch_set_thread_special_reply_port(reply_port
);
580 _dispatch_set_thread_mig_reply_port(reply_port
);
582 _dispatch_debug("machport[0x%08x]: restored thread sync reply port",
587 static inline mach_port_t
588 _dispatch_mach_msg_get_remote_port(dispatch_object_t dou
)
590 mach_msg_header_t
*hdr
= _dispatch_mach_msg_get_msg(dou
._dmsg
);
591 mach_port_t remote
= hdr
->msgh_remote_port
;
595 static inline mach_port_t
596 _dispatch_mach_msg_get_reply_port(dispatch_object_t dou
)
598 mach_msg_header_t
*hdr
= _dispatch_mach_msg_get_msg(dou
._dmsg
);
599 mach_port_t local
= hdr
->msgh_local_port
;
600 if (!MACH_PORT_VALID(local
) || MACH_MSGH_BITS_LOCAL(hdr
->msgh_bits
) !=
601 MACH_MSG_TYPE_MAKE_SEND_ONCE
) return MACH_PORT_NULL
;
606 _dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg
, mach_error_t err
,
607 unsigned long reason
)
609 dispatch_assert_zero(reason
& ~(unsigned long)code_emask
);
610 dmsg
->dmsg_error
= ((err
|| !reason
) ? err
:
611 err_local
|err_sub(0x3e0)|(mach_error_t
)reason
);
614 static inline unsigned long
615 _dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg
, mach_error_t
*err_ptr
)
617 mach_error_t err
= dmsg
->dmsg_error
;
619 if ((err
& system_emask
) == err_local
&& err_get_sub(err
) == 0x3e0) {
621 return err_get_code(err
);
624 return err
? DISPATCH_MACH_MESSAGE_SEND_FAILED
: DISPATCH_MACH_MESSAGE_SENT
;
627 static inline dispatch_mach_msg_t
628 _dispatch_mach_msg_create_recv(mach_msg_header_t
*hdr
, mach_msg_size_t siz
,
629 dispatch_mach_reply_refs_t dmr
, uint32_t flags
)
631 dispatch_mach_msg_destructor_t destructor
;
632 dispatch_mach_msg_t dmsg
;
634 pthread_priority_t pp
;
637 _voucher_mach_msg_clear(hdr
, false); // deallocate reply message voucher
638 pp
= _dispatch_priority_to_pp(dmr
->dmr_priority
);
639 voucher
= dmr
->dmr_voucher
;
640 dmr
->dmr_voucher
= NULL
; // transfer reference
642 voucher
= voucher_create_with_mach_msg(hdr
);
643 pp
= _dispatch_priority_compute_propagated(
644 _voucher_get_priority(voucher
), 0);
647 destructor
= (flags
& DISPATCH_EV_MSG_NEEDS_FREE
) ?
648 DISPATCH_MACH_MSG_DESTRUCTOR_FREE
:
649 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT
;
650 dmsg
= dispatch_mach_msg_create(hdr
, siz
, destructor
, NULL
);
651 if (!(flags
& DISPATCH_EV_MSG_NEEDS_FREE
)) {
652 _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move
,
653 (uint64_t)hdr
, (uint64_t)dmsg
->dmsg_buf
);
655 dmsg
->dmsg_voucher
= voucher
;
656 dmsg
->dmsg_priority
= pp
;
657 dmsg
->do_ctxt
= dmr
? dmr
->dmr_ctxt
: NULL
;
658 _dispatch_mach_msg_set_reason(dmsg
, 0, DISPATCH_MACH_MESSAGE_RECEIVED
);
659 _dispatch_voucher_debug("mach-msg[%p] create", voucher
, dmsg
);
660 _dispatch_voucher_ktrace_dmsg_push(dmsg
);
665 _dispatch_mach_merge_msg(dispatch_unote_t du
, uint32_t flags
,
666 mach_msg_header_t
*hdr
, mach_msg_size_t siz
)
668 // this function is very similar with what _dispatch_source_merge_evt does
669 // but can't reuse it as handling the message must be protected by the
670 // internal refcount between the first half and the trailer of what
671 // _dispatch_source_merge_evt does.
673 dispatch_mach_recv_refs_t dmrr
= du
._dmrr
;
674 dispatch_mach_t dm
= _dispatch_wref2ptr(dmrr
->du_owner_wref
);
675 dispatch_queue_flags_t dqf
;
676 dispatch_mach_msg_t dmsg
;
678 dispatch_assert(_dispatch_unote_needs_rearm(du
));
680 if (flags
& EV_VANISHED
) {
681 DISPATCH_CLIENT_CRASH(du
._du
->du_ident
,
682 "Unexpected EV_VANISHED (do not destroy random mach ports)");
685 // once we modify the queue atomic flags below, it will allow concurrent
686 // threads running _dispatch_mach_invoke2 to dispose of the source,
687 // so we can't safely borrow the reference we get from the muxnote udata
688 // anymore, and need our own
689 dispatch_wakeup_flags_t wflags
= DISPATCH_WAKEUP_CONSUME_2
;
690 _dispatch_retain_2(dm
); // rdar://20382435
692 if (unlikely((flags
& EV_ONESHOT
) && !(flags
& EV_DELETE
))) {
693 dqf
= _dispatch_queue_atomic_flags_set_and_clear(dm
->_as_dq
,
694 DSF_DEFERRED_DELETE
, DSF_ARMED
);
695 _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]",
697 } else if (unlikely(flags
& (EV_ONESHOT
| EV_DELETE
))) {
698 _dispatch_source_refs_unregister(dm
->_as_ds
,
699 DU_UNREGISTER_ALREADY_DELETED
);
700 dqf
= _dispatch_queue_atomic_flags(dm
->_as_dq
);
701 _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", dm
, dmrr
);
703 dqf
= _dispatch_queue_atomic_flags_clear(dm
->_as_dq
, DSF_ARMED
);
704 _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", dm
, dmrr
);
707 _dispatch_debug_machport(hdr
->msgh_remote_port
);
708 _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
709 hdr
->msgh_local_port
, hdr
->msgh_id
, hdr
->msgh_remote_port
);
711 if (dqf
& DSF_CANCELED
) {
712 _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x",
713 hdr
->msgh_local_port
, hdr
->msgh_id
, hdr
->msgh_remote_port
);
714 mach_msg_destroy(hdr
);
715 if (flags
& DISPATCH_EV_MSG_NEEDS_FREE
) {
718 return dx_wakeup(dm
, 0, wflags
| DISPATCH_WAKEUP_MAKE_DIRTY
);
721 // Once the mach channel disarming is visible, cancellation will switch to
722 // immediate deletion. If we're preempted here, then the whole cancellation
723 // sequence may be complete by the time we really enqueue the message.
725 // _dispatch_mach_msg_invoke_with_mach() is responsible for filtering it out
726 // to keep the promise that DISPATCH_MACH_DISCONNECTED is the last
729 dmsg
= _dispatch_mach_msg_create_recv(hdr
, siz
, NULL
, flags
);
730 _dispatch_mach_handle_or_push_received_msg(dm
, dmsg
);
731 return _dispatch_release_2_tailcall(dm
);
735 _dispatch_mach_reply_merge_msg(dispatch_unote_t du
, uint32_t flags
,
736 mach_msg_header_t
*hdr
, mach_msg_size_t siz
)
738 dispatch_mach_reply_refs_t dmr
= du
._dmr
;
739 dispatch_mach_t dm
= _dispatch_wref2ptr(dmr
->du_owner_wref
);
740 bool canceled
= (_dispatch_queue_atomic_flags(dm
->_as_dq
) & DSF_CANCELED
);
741 dispatch_mach_msg_t dmsg
= NULL
;
743 _dispatch_debug_machport(hdr
->msgh_remote_port
);
744 _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
745 hdr
->msgh_local_port
, hdr
->msgh_id
, hdr
->msgh_remote_port
);
748 dmsg
= _dispatch_mach_msg_create_recv(hdr
, siz
, dmr
, flags
);
752 dispatch_queue_t drq
= NULL
;
754 drq
= _dispatch_mach_msg_context_async_reply_queue(dmsg
->do_ctxt
);
757 _dispatch_mach_push_async_reply_msg(dm
, dmsg
, drq
);
759 _dispatch_mach_handle_or_push_received_msg(dm
, dmsg
);
762 _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x",
763 hdr
->msgh_local_port
, hdr
->msgh_id
, hdr
->msgh_remote_port
);
764 mach_msg_destroy(hdr
);
765 if (flags
& DISPATCH_EV_MSG_NEEDS_FREE
) {
770 dispatch_wakeup_flags_t wflags
= 0;
771 uint32_t options
= DU_UNREGISTER_IMMEDIATE_DELETE
;
773 options
|= DU_UNREGISTER_DISCONNECTED
;
776 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
777 bool removed
= _dispatch_mach_reply_list_remove(dm
, dmr
);
778 dispatch_assert(removed
);
779 if (TAILQ_EMPTY(&dm
->dm_send_refs
->dmsr_replies
) &&
780 (dm
->dm_send_refs
->dmsr_disconnect_cnt
||
781 (dm
->dq_atomic_flags
& DSF_CANCELED
))) {
782 // When the list is empty, _dispatch_mach_disconnect() may release the
783 // last reference count on the Mach channel. To avoid this, take our
784 // own reference before releasing the lock.
785 wflags
= DISPATCH_WAKEUP_MAKE_DIRTY
| DISPATCH_WAKEUP_CONSUME_2
;
786 _dispatch_retain_2(dm
);
788 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
790 bool result
= _dispatch_mach_reply_kevent_unregister(dm
, dmr
, options
);
791 dispatch_assert(result
);
792 if (wflags
) dx_wakeup(dm
, 0, wflags
);
795 DISPATCH_ALWAYS_INLINE
796 static inline dispatch_mach_msg_t
797 _dispatch_mach_msg_reply_recv(dispatch_mach_t dm
,
798 dispatch_mach_reply_refs_t dmr
, mach_port_t reply_port
,
801 if (slowpath(!MACH_PORT_VALID(reply_port
))) {
802 DISPATCH_CLIENT_CRASH(reply_port
, "Invalid reply port");
804 void *ctxt
= dmr
->dmr_ctxt
;
805 mach_msg_header_t
*hdr
, *hdr2
= NULL
;
806 void *hdr_copyout_addr
;
807 mach_msg_size_t siz
, msgsiz
= 0;
808 mach_msg_return_t kr
;
809 mach_msg_option_t options
;
810 mach_port_t notify
= MACH_PORT_NULL
;
811 siz
= mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE
+
812 DISPATCH_MACH_TRAILER_SIZE
);
814 for (mach_vm_address_t p
= mach_vm_trunc_page(hdr
+ vm_page_size
);
815 p
< (mach_vm_address_t
)hdr
+ siz
; p
+= vm_page_size
) {
816 *(char*)p
= 0; // ensure alloca buffer doesn't overlap with stack guard
818 options
= DISPATCH_MACH_RCV_OPTIONS
& (~MACH_RCV_VOUCHER
);
819 if (MACH_PORT_VALID(send
)) {
821 options
|= MACH_RCV_SYNC_WAIT
;
825 _dispatch_debug_machport(reply_port
);
826 _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port
,
827 (options
& MACH_RCV_TIMEOUT
) ? "poll" : "wait");
828 kr
= mach_msg(hdr
, options
, 0, siz
, reply_port
, MACH_MSG_TIMEOUT_NONE
,
830 hdr_copyout_addr
= hdr
;
831 _dispatch_debug_machport(reply_port
);
832 _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) "
833 "returned: %s - 0x%x", reply_port
, siz
, options
,
834 mach_error_string(kr
), kr
);
836 case MACH_RCV_TOO_LARGE
:
837 if (!fastpath(hdr
->msgh_size
<= UINT_MAX
-
838 DISPATCH_MACH_TRAILER_SIZE
)) {
839 DISPATCH_CLIENT_CRASH(hdr
->msgh_size
, "Overlarge message");
841 if (options
& MACH_RCV_LARGE
) {
842 msgsiz
= hdr
->msgh_size
+ DISPATCH_MACH_TRAILER_SIZE
;
843 hdr2
= malloc(msgsiz
);
844 if (dispatch_assume(hdr2
)) {
848 options
|= MACH_RCV_TIMEOUT
;
849 options
&= ~MACH_RCV_LARGE
;
852 _dispatch_log("BUG in libdispatch client: "
853 "dispatch_mach_send_and_wait_for_reply: dropped message too "
854 "large to fit in memory: id = 0x%x, size = %u", hdr
->msgh_id
,
857 case MACH_RCV_INVALID_NAME
: // rdar://problem/21963848
858 case MACH_RCV_PORT_CHANGED
: // rdar://problem/21885327
859 case MACH_RCV_PORT_DIED
:
860 // channel was disconnected/canceled and reply port destroyed
861 _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: "
862 "%s - 0x%x", reply_port
, ctxt
, mach_error_string(kr
), kr
);
864 case MACH_MSG_SUCCESS
:
865 if (hdr
->msgh_remote_port
) {
866 _dispatch_debug_machport(hdr
->msgh_remote_port
);
868 _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, "
869 "reply on 0x%08x", hdr
->msgh_local_port
, hdr
->msgh_id
,
870 hdr
->msgh_size
, hdr
->msgh_remote_port
);
871 siz
= hdr
->msgh_size
+ DISPATCH_MACH_TRAILER_SIZE
;
872 if (hdr2
&& siz
< msgsiz
) {
873 void *shrink
= realloc(hdr2
, msgsiz
);
874 if (shrink
) hdr
= hdr2
= shrink
;
877 case MACH_RCV_INVALID_NOTIFY
:
879 DISPATCH_INTERNAL_CRASH(kr
, "Unexpected error from mach_msg_receive");
882 _dispatch_mach_msg_reply_received(dm
, dmr
, hdr
->msgh_local_port
);
883 hdr
->msgh_local_port
= MACH_PORT_NULL
;
884 if (slowpath((dm
->dq_atomic_flags
& DSF_CANCELED
) || kr
)) {
885 if (!kr
) mach_msg_destroy(hdr
);
888 dispatch_mach_msg_t dmsg
;
889 dispatch_mach_msg_destructor_t destructor
= (!hdr2
) ?
890 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT
:
891 DISPATCH_MACH_MSG_DESTRUCTOR_FREE
;
892 dmsg
= dispatch_mach_msg_create(hdr
, siz
, destructor
, NULL
);
893 if (!hdr2
|| hdr
!= hdr_copyout_addr
) {
894 _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move
,
895 (uint64_t)hdr_copyout_addr
,
896 (uint64_t)_dispatch_mach_msg_get_msg(dmsg
));
898 dmsg
->do_ctxt
= ctxt
;
906 _dispatch_mach_msg_reply_received(dispatch_mach_t dm
,
907 dispatch_mach_reply_refs_t dmr
, mach_port_t local_port
)
909 bool removed
= _dispatch_mach_reply_tryremove(dm
, dmr
);
910 if (!MACH_PORT_VALID(local_port
) || !removed
) {
911 // port moved/destroyed during receive, or reply waiter was never
912 // registered or already removed (disconnected)
915 mach_port_t reply_port
= _dispatch_mach_reply_get_reply_port(
916 (mach_port_t
)dmr
->du_ident
);
917 _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p",
918 reply_port
, dmr
->dmr_ctxt
);
919 if (_dispatch_mach_reply_is_reply_port_owned(dmr
)) {
920 _dispatch_set_thread_reply_port(reply_port
);
921 if (local_port
!= reply_port
) {
922 DISPATCH_CLIENT_CRASH(local_port
,
923 "Reply received on unexpected port");
927 mach_msg_header_t
*hdr
;
928 dispatch_mach_msg_t dmsg
;
929 dmsg
= dispatch_mach_msg_create(NULL
, sizeof(mach_msg_header_t
),
930 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT
, &hdr
);
931 hdr
->msgh_local_port
= local_port
;
932 dmsg
->dmsg_voucher
= dmr
->dmr_voucher
;
933 dmr
->dmr_voucher
= NULL
; // transfer reference
934 dmsg
->dmsg_priority
= _dispatch_priority_to_pp(dmr
->dmr_priority
);
935 dmsg
->do_ctxt
= dmr
->dmr_ctxt
;
936 _dispatch_mach_msg_set_reason(dmsg
, 0, DISPATCH_MACH_REPLY_RECEIVED
);
937 return _dispatch_mach_handle_or_push_received_msg(dm
, dmsg
);
941 _dispatch_mach_msg_disconnected(dispatch_mach_t dm
, mach_port_t local_port
,
942 mach_port_t remote_port
)
944 mach_msg_header_t
*hdr
;
945 dispatch_mach_msg_t dmsg
;
946 dmsg
= dispatch_mach_msg_create(NULL
, sizeof(mach_msg_header_t
),
947 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT
, &hdr
);
948 if (local_port
) hdr
->msgh_local_port
= local_port
;
949 if (remote_port
) hdr
->msgh_remote_port
= remote_port
;
950 _dispatch_mach_msg_set_reason(dmsg
, 0, DISPATCH_MACH_DISCONNECTED
);
951 _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port
?
952 local_port
: remote_port
, local_port
? "receive" : "send");
953 return _dispatch_mach_handle_or_push_received_msg(dm
, dmsg
);
956 static inline dispatch_mach_msg_t
957 _dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou
,
958 dispatch_mach_reply_refs_t dmr
, dispatch_mach_reason_t reason
)
960 dispatch_mach_msg_t dmsg
= dou
._dmsg
, dmsgr
;
961 mach_port_t reply_port
= dmsg
? dmsg
->dmsg_reply
:
962 _dispatch_mach_reply_get_reply_port((mach_port_t
)dmr
->du_ident
);
967 v
= dmr
->dmr_voucher
;
968 dmr
->dmr_voucher
= NULL
; // transfer reference
969 if (v
) _voucher_release(v
);
975 v
= dmsg
->dmsg_voucher
;
976 if (v
) _voucher_retain(v
);
978 v
= dmr
->dmr_voucher
;
979 dmr
->dmr_voucher
= NULL
; // transfer reference
982 if ((dmsg
&& (dmsg
->dmsg_options
& DISPATCH_MACH_WAIT_FOR_REPLY
) &&
983 (dmsg
->dmsg_options
& DISPATCH_MACH_OWNED_REPLY_PORT
)) ||
984 (dmr
&& !_dispatch_unote_registered(dmr
) &&
985 _dispatch_mach_reply_is_reply_port_owned(dmr
))) {
986 if (v
) _voucher_release(v
);
987 // deallocate owned reply port to break _dispatch_mach_msg_reply_recv
988 // out of waiting in mach_msg(MACH_RCV_MSG)
989 kern_return_t kr
= mach_port_mod_refs(mach_task_self(), reply_port
,
990 MACH_PORT_RIGHT_RECEIVE
, -1);
991 DISPATCH_VERIFY_MIG(kr
);
992 dispatch_assume_zero(kr
);
996 mach_msg_header_t
*hdr
;
997 dmsgr
= dispatch_mach_msg_create(NULL
, sizeof(mach_msg_header_t
),
998 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT
, &hdr
);
999 dmsgr
->dmsg_voucher
= v
;
1000 hdr
->msgh_local_port
= reply_port
;
1002 dmsgr
->dmsg_priority
= dmsg
->dmsg_priority
;
1003 dmsgr
->do_ctxt
= dmsg
->do_ctxt
;
1005 dmsgr
->dmsg_priority
= _dispatch_priority_to_pp(dmr
->dmr_priority
);
1006 dmsgr
->do_ctxt
= dmr
->dmr_ctxt
;
1008 _dispatch_mach_msg_set_reason(dmsgr
, 0, reason
);
1009 _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p",
1010 hdr
->msgh_local_port
, dmsgr
->do_ctxt
);
1016 _dispatch_mach_msg_not_sent(dispatch_mach_t dm
, dispatch_object_t dou
)
1018 dispatch_mach_msg_t dmsg
= dou
._dmsg
, dmsgr
;
1019 dispatch_queue_t drq
= NULL
;
1020 mach_msg_header_t
*msg
= _dispatch_mach_msg_get_msg(dmsg
);
1021 mach_msg_option_t msg_opts
= dmsg
->dmsg_options
;
1022 _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, "
1023 "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x",
1024 msg
->msgh_remote_port
, msg
->msgh_id
, dmsg
->do_ctxt
,
1025 msg_opts
, msg
->msgh_voucher_port
, dmsg
->dmsg_reply
);
1026 unsigned long reason
= (msg_opts
& DISPATCH_MACH_REGISTER_FOR_REPLY
) ?
1027 0 : DISPATCH_MACH_MESSAGE_NOT_SENT
;
1028 dmsgr
= _dispatch_mach_msg_create_reply_disconnected(dmsg
, NULL
,
1029 msg_opts
& DISPATCH_MACH_ASYNC_REPLY
1030 ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
1031 : DISPATCH_MACH_DISCONNECTED
);
1032 if (dmsg
->do_ctxt
) {
1033 drq
= _dispatch_mach_msg_context_async_reply_queue(dmsg
->do_ctxt
);
1035 _dispatch_mach_msg_set_reason(dmsg
, 0, reason
);
1036 _dispatch_mach_handle_or_push_received_msg(dm
, dmsg
);
1039 _dispatch_mach_push_async_reply_msg(dm
, dmsgr
, drq
);
1041 _dispatch_mach_handle_or_push_received_msg(dm
, dmsgr
);
1048 _dispatch_mach_msg_send(dispatch_mach_t dm
, dispatch_object_t dou
,
1049 dispatch_mach_reply_refs_t dmr
, dispatch_qos_t qos
,
1050 dispatch_mach_send_invoke_flags_t send_flags
)
1052 dispatch_mach_send_refs_t dsrr
= dm
->dm_send_refs
;
1053 dispatch_mach_msg_t dmsg
= dou
._dmsg
, dmsgr
= NULL
;
1054 voucher_t voucher
= dmsg
->dmsg_voucher
;
1055 dispatch_queue_t drq
= NULL
;
1056 mach_voucher_t ipc_kvoucher
= MACH_VOUCHER_NULL
;
1057 uint32_t send_status
= 0;
1058 bool clear_voucher
= false, kvoucher_move_send
= false;
1059 mach_msg_header_t
*msg
= _dispatch_mach_msg_get_msg(dmsg
);
1060 bool is_reply
= (MACH_MSGH_BITS_REMOTE(msg
->msgh_bits
) ==
1061 MACH_MSG_TYPE_MOVE_SEND_ONCE
);
1062 mach_port_t reply_port
= dmsg
->dmsg_reply
;
1064 dm
->dm_needs_mgr
= 0;
1065 if (unlikely(dsrr
->dmsr_checkin
&& dmsg
!= dsrr
->dmsr_checkin
)) {
1066 // send initial checkin message
1067 if (unlikely(_dispatch_unote_registered(dsrr
) &&
1068 _dispatch_queue_get_current() != &_dispatch_mgr_q
)) {
1069 // send kevent must be uninstalled on the manager queue
1070 dm
->dm_needs_mgr
= 1;
1073 if (unlikely(!_dispatch_mach_msg_send(dm
,
1074 dsrr
->dmsr_checkin
, NULL
, qos
, DM_SEND_INVOKE_NONE
))) {
1077 dsrr
->dmsr_checkin
= NULL
;
1080 mach_msg_return_t kr
= 0;
1081 mach_msg_option_t opts
= 0, msg_opts
= dmsg
->dmsg_options
;
1082 if (!(msg_opts
& DISPATCH_MACH_REGISTER_FOR_REPLY
)) {
1083 mach_msg_priority_t msg_priority
= MACH_MSG_PRIORITY_UNSPECIFIED
;
1084 opts
= MACH_SEND_MSG
| (msg_opts
& ~DISPATCH_MACH_OPTIONS_MASK
);
1086 if (dmsg
!= dsrr
->dmsr_checkin
) {
1087 msg
->msgh_remote_port
= dsrr
->dmsr_send
;
1089 if (_dispatch_queue_get_current() == &_dispatch_mgr_q
) {
1090 if (unlikely(!_dispatch_unote_registered(dsrr
))) {
1091 _dispatch_mach_notification_kevent_register(dm
,
1092 msg
->msgh_remote_port
);
1094 if (likely(_dispatch_unote_registered(dsrr
))) {
1095 if (os_atomic_load2o(dsrr
, dmsr_notification_armed
,
1099 opts
|= MACH_SEND_NOTIFY
;
1102 opts
|= MACH_SEND_TIMEOUT
;
1103 if (dmsg
->dmsg_priority
!= _voucher_get_priority(voucher
)) {
1104 ipc_kvoucher
= _voucher_create_mach_voucher_with_priority(
1105 voucher
, dmsg
->dmsg_priority
);
1107 _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher
, dmsg
);
1109 kvoucher_move_send
= true;
1110 clear_voucher
= _voucher_mach_msg_set_mach_voucher(msg
,
1111 ipc_kvoucher
, kvoucher_move_send
);
1113 clear_voucher
= _voucher_mach_msg_set(msg
, voucher
);
1116 opts
|= MACH_SEND_OVERRIDE
;
1117 msg_priority
= (mach_msg_priority_t
)
1118 _dispatch_priority_compute_propagated(
1119 _dispatch_qos_to_pp(qos
), 0);
1122 _dispatch_debug_machport(msg
->msgh_remote_port
);
1123 if (reply_port
) _dispatch_debug_machport(reply_port
);
1124 if (msg_opts
& DISPATCH_MACH_WAIT_FOR_REPLY
) {
1125 if (msg_opts
& DISPATCH_MACH_OWNED_REPLY_PORT
) {
1126 if (_dispatch_use_mach_special_reply_port()) {
1127 opts
|= MACH_SEND_SYNC_OVERRIDE
;
1129 _dispatch_clear_thread_reply_port(reply_port
);
1131 _dispatch_mach_reply_waiter_register(dm
, dmr
, reply_port
, dmsg
,
1134 kr
= mach_msg(msg
, opts
, msg
->msgh_size
, 0, MACH_PORT_NULL
, 0,
1136 _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
1137 "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
1138 "%s - 0x%x", msg
->msgh_remote_port
, msg
->msgh_id
, dmsg
->do_ctxt
,
1139 opts
, msg_opts
, msg
->msgh_voucher_port
, reply_port
,
1140 mach_error_string(kr
), kr
);
1141 if (unlikely(kr
&& (msg_opts
& DISPATCH_MACH_WAIT_FOR_REPLY
))) {
1142 _dispatch_mach_reply_waiter_unregister(dm
, dmr
,
1143 DU_UNREGISTER_REPLY_REMOVE
);
1145 if (clear_voucher
) {
1146 if (kr
== MACH_SEND_INVALID_VOUCHER
&& msg
->msgh_voucher_port
) {
1147 DISPATCH_CLIENT_CRASH(kr
, "Voucher port corruption");
1150 kv
= _voucher_mach_msg_clear(msg
, kvoucher_move_send
);
1151 if (kvoucher_move_send
) ipc_kvoucher
= kv
;
1154 if (kr
== MACH_SEND_TIMED_OUT
&& (opts
& MACH_SEND_TIMEOUT
)) {
1155 if (opts
& MACH_SEND_NOTIFY
) {
1156 _dispatch_debug("machport[0x%08x]: send-possible notification "
1157 "armed", (mach_port_t
)dsrr
->du_ident
);
1158 _dispatch_mach_notification_set_armed(dsrr
);
1160 // send kevent must be installed on the manager queue
1161 dm
->dm_needs_mgr
= 1;
1164 _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher
);
1165 voucher_t ipc_voucher
;
1166 ipc_voucher
= _voucher_create_with_priority_and_mach_voucher(
1167 voucher
, dmsg
->dmsg_priority
, ipc_kvoucher
);
1168 _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
1169 ipc_voucher
, dmsg
, voucher
);
1170 if (dmsg
->dmsg_voucher
) _voucher_release(dmsg
->dmsg_voucher
);
1171 dmsg
->dmsg_voucher
= ipc_voucher
;
1174 } else if (ipc_kvoucher
&& (kr
|| !kvoucher_move_send
)) {
1175 _voucher_dealloc_mach_voucher(ipc_kvoucher
);
1177 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
1178 if (!(msg_opts
& DISPATCH_MACH_WAIT_FOR_REPLY
) && !kr
&& reply_port
&&
1179 !(_dispatch_unote_registered(dmrr
) &&
1180 dmrr
->du_ident
== reply_port
)) {
1181 _dispatch_mach_reply_kevent_register(dm
, reply_port
, dmsg
);
1183 if (unlikely(!is_reply
&& dmsg
== dsrr
->dmsr_checkin
&&
1184 _dispatch_unote_registered(dsrr
))) {
1185 _dispatch_mach_notification_kevent_unregister(dm
);
1188 // Send failed, so reply was never registered <rdar://problem/14309159>
1189 dmsgr
= _dispatch_mach_msg_create_reply_disconnected(dmsg
, NULL
,
1190 msg_opts
& DISPATCH_MACH_ASYNC_REPLY
1191 ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
1192 : DISPATCH_MACH_DISCONNECTED
);
1193 if (dmsg
->do_ctxt
) {
1194 drq
= _dispatch_mach_msg_context_async_reply_queue(dmsg
->do_ctxt
);
1197 _dispatch_mach_msg_set_reason(dmsg
, kr
, 0);
1198 if ((send_flags
& DM_SEND_INVOKE_IMMEDIATE_SEND
) &&
1199 (msg_opts
& DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT
)) {
1200 // Return sent message synchronously <rdar://problem/25947334>
1201 send_status
|= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT
;
1203 _dispatch_mach_handle_or_push_received_msg(dm
, dmsg
);
1207 _dispatch_mach_push_async_reply_msg(dm
, dmsgr
, drq
);
1209 _dispatch_mach_handle_or_push_received_msg(dm
, dmsgr
);
1212 send_status
|= DM_SEND_STATUS_SUCCESS
;
1218 #pragma mark dispatch_mach_send_refs_t
1220 #define _dmsr_state_needs_lock_override(dq_state, qos) \
1221 unlikely(qos < _dq_state_max_qos(dq_state))
1223 DISPATCH_ALWAYS_INLINE
1224 static inline dispatch_qos_t
1225 _dmsr_state_max_qos(uint64_t dmsr_state
)
1227 return _dq_state_max_qos(dmsr_state
);
1230 DISPATCH_ALWAYS_INLINE
1232 _dmsr_state_needs_override(uint64_t dmsr_state
, dispatch_qos_t qos
)
1234 dmsr_state
&= DISPATCH_MACH_STATE_MAX_QOS_MASK
;
1235 return dmsr_state
< _dq_state_from_qos(qos
);
1238 DISPATCH_ALWAYS_INLINE
1239 static inline uint64_t
1240 _dmsr_state_merge_override(uint64_t dmsr_state
, dispatch_qos_t qos
)
1242 if (_dmsr_state_needs_override(dmsr_state
, qos
)) {
1243 dmsr_state
&= ~DISPATCH_MACH_STATE_MAX_QOS_MASK
;
1244 dmsr_state
|= _dq_state_from_qos(qos
);
1245 dmsr_state
|= DISPATCH_MACH_STATE_DIRTY
;
1246 dmsr_state
|= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1251 #define _dispatch_mach_send_push_update_tail(dmsr, tail) \
1252 os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next)
1253 #define _dispatch_mach_send_push_update_head(dmsr, head) \
1254 os_mpsc_push_update_head(dmsr, dmsr, head)
1255 #define _dispatch_mach_send_get_head(dmsr) \
1256 os_mpsc_get_head(dmsr, dmsr)
1257 #define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \
1258 os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next)
1259 #define _dispatch_mach_send_pop_head(dmsr, head) \
1260 os_mpsc_pop_head(dmsr, dmsr, head, do_next)
1262 #define dm_push(dm, dc, qos) \
1263 _dispatch_queue_push((dm)->_as_dq, dc, qos)
1265 DISPATCH_ALWAYS_INLINE
1267 _dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr
,
1268 dispatch_object_t dou
)
1270 if (_dispatch_mach_send_push_update_tail(dmsr
, dou
._do
)) {
1271 _dispatch_mach_send_push_update_head(dmsr
, dou
._do
);
1279 _dispatch_mach_send_drain(dispatch_mach_t dm
, dispatch_invoke_flags_t flags
,
1280 dispatch_mach_send_invoke_flags_t send_flags
)
1282 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
1283 dispatch_mach_reply_refs_t dmr
;
1284 dispatch_mach_msg_t dmsg
;
1285 struct dispatch_object_s
*dc
= NULL
, *next_dc
= NULL
;
1286 dispatch_qos_t qos
= _dmsr_state_max_qos(dmsr
->dmsr_state
);
1287 uint64_t old_state
, new_state
;
1288 uint32_t send_status
;
1289 bool needs_mgr
, disconnecting
, returning_send_result
= false;
1292 needs_mgr
= false; disconnecting
= false;
1293 while (dmsr
->dmsr_tail
) {
1294 dc
= _dispatch_mach_send_get_head(dmsr
);
1296 dispatch_mach_send_invoke_flags_t sf
= send_flags
;
1297 // Only request immediate send result for the first message
1298 send_flags
&= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK
;
1299 next_dc
= _dispatch_mach_send_pop_head(dmsr
, dc
);
1300 if (_dispatch_object_has_type(dc
,
1301 DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER
))) {
1302 if (!(send_flags
& DM_SEND_INVOKE_CAN_RUN_BARRIER
)) {
1305 _dispatch_continuation_pop(dc
, NULL
, flags
, dm
->_as_dq
);
1308 if (_dispatch_object_is_sync_waiter(dc
)) {
1309 dmsg
= ((dispatch_continuation_t
)dc
)->dc_data
;
1310 dmr
= ((dispatch_continuation_t
)dc
)->dc_other
;
1311 } else if (_dispatch_object_has_vtable(dc
)) {
1312 dmsg
= (dispatch_mach_msg_t
)dc
;
1315 if (_dispatch_unote_registered(dmsr
) &&
1316 (_dispatch_queue_get_current() != &_dispatch_mgr_q
)) {
1317 // send kevent must be uninstalled on the manager queue
1321 if (unlikely(!_dispatch_mach_reconnect_invoke(dm
, dc
))) {
1322 disconnecting
= true;
1325 _dispatch_perfmon_workitem_inc();
1328 _dispatch_voucher_ktrace_dmsg_pop(dmsg
);
1329 if (unlikely(dmsr
->dmsr_disconnect_cnt
||
1330 (dm
->dq_atomic_flags
& DSF_CANCELED
))) {
1331 _dispatch_mach_msg_not_sent(dm
, dmsg
);
1332 _dispatch_perfmon_workitem_inc();
1335 send_status
= _dispatch_mach_msg_send(dm
, dmsg
, dmr
, qos
, sf
);
1336 if (unlikely(!send_status
)) {
1339 if (send_status
& DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT
) {
1340 returning_send_result
= true;
1342 _dispatch_perfmon_workitem_inc();
1343 } while ((dc
= next_dc
));
1346 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, release
, {
1347 if (old_state
& DISPATCH_MACH_STATE_DIRTY
) {
1348 new_state
= old_state
;
1349 new_state
&= ~DISPATCH_MACH_STATE_DIRTY
;
1350 new_state
&= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1351 new_state
&= ~DISPATCH_MACH_STATE_PENDING_BARRIER
;
1360 // if this is not a complete drain, we must undo some things
1361 _dispatch_mach_send_unpop_head(dmsr
, dc
, next_dc
);
1363 if (_dispatch_object_has_type(dc
,
1364 DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER
))) {
1365 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, release
, {
1366 new_state
= old_state
;
1367 new_state
|= DISPATCH_MACH_STATE_DIRTY
;
1368 new_state
|= DISPATCH_MACH_STATE_PENDING_BARRIER
;
1369 new_state
&= ~DISPATCH_MACH_STATE_UNLOCK_MASK
;
1370 new_state
&= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1373 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, release
, {
1374 new_state
= old_state
;
1375 if (old_state
& (DISPATCH_MACH_STATE_DIRTY
|
1376 DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
)) {
1377 new_state
&= ~DISPATCH_MACH_STATE_DIRTY
;
1378 new_state
&= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1379 new_state
&= ~DISPATCH_MACH_STATE_PENDING_BARRIER
;
1381 new_state
|= DISPATCH_MACH_STATE_DIRTY
;
1382 new_state
&= ~DISPATCH_MACH_STATE_UNLOCK_MASK
;
1388 if (old_state
& DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
) {
1389 // Ensure that the root queue sees that this thread was overridden.
1390 _dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state
));
1393 if (unlikely(new_state
& DISPATCH_MACH_STATE_UNLOCK_MASK
)) {
1394 qos
= _dmsr_state_max_qos(new_state
);
1395 os_atomic_thread_fence(dependency
);
1396 dmsr
= os_atomic_force_dependency_on(dmsr
, new_state
);
1400 if (new_state
& DISPATCH_MACH_STATE_PENDING_BARRIER
) {
1401 qos
= _dmsr_state_max_qos(new_state
);
1402 _dispatch_mach_push_send_barrier_drain(dm
, qos
);
1404 if (needs_mgr
|| dm
->dm_needs_mgr
) {
1405 qos
= _dmsr_state_max_qos(new_state
);
1409 if (!disconnecting
) dx_wakeup(dm
, qos
, DISPATCH_WAKEUP_MAKE_DIRTY
);
1411 return returning_send_result
;
1416 _dispatch_mach_send_invoke(dispatch_mach_t dm
, dispatch_invoke_flags_t flags
,
1417 dispatch_mach_send_invoke_flags_t send_flags
)
1419 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
1420 dispatch_lock owner_self
= _dispatch_lock_value_for_self();
1421 uint64_t old_state
, new_state
;
1423 uint64_t canlock_mask
= DISPATCH_MACH_STATE_UNLOCK_MASK
;
1424 uint64_t canlock_state
= 0;
1426 if (send_flags
& DM_SEND_INVOKE_NEEDS_BARRIER
) {
1427 canlock_mask
|= DISPATCH_MACH_STATE_PENDING_BARRIER
;
1428 canlock_state
= DISPATCH_MACH_STATE_PENDING_BARRIER
;
1429 } else if (!(send_flags
& DM_SEND_INVOKE_CAN_RUN_BARRIER
)) {
1430 canlock_mask
|= DISPATCH_MACH_STATE_PENDING_BARRIER
;
1433 dispatch_qos_t oq_floor
= _dispatch_get_basepri_override_qos_floor();
1435 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, acquire
, {
1436 new_state
= old_state
;
1437 if (unlikely((old_state
& canlock_mask
) != canlock_state
)) {
1438 if (!(send_flags
& DM_SEND_INVOKE_MAKE_DIRTY
)) {
1439 os_atomic_rmw_loop_give_up(break);
1441 new_state
|= DISPATCH_MACH_STATE_DIRTY
;
1443 if (_dmsr_state_needs_lock_override(old_state
, oq_floor
)) {
1444 os_atomic_rmw_loop_give_up({
1445 oq_floor
= _dispatch_queue_override_self(old_state
);
1449 new_state
|= owner_self
;
1450 new_state
&= ~DISPATCH_MACH_STATE_DIRTY
;
1451 new_state
&= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1452 new_state
&= ~DISPATCH_MACH_STATE_PENDING_BARRIER
;
1456 if (unlikely((old_state
& canlock_mask
) != canlock_state
)) {
1459 if (send_flags
& DM_SEND_INVOKE_CANCEL
) {
1460 _dispatch_mach_cancel(dm
);
1462 _dispatch_mach_send_drain(dm
, flags
, send_flags
);
1467 _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc
,
1468 DISPATCH_UNUSED dispatch_invoke_context_t dic
,
1469 dispatch_invoke_flags_t flags
)
1471 dispatch_mach_t dm
= (dispatch_mach_t
)_dispatch_queue_get_current();
1472 uintptr_t dc_flags
= DISPATCH_OBJ_CONSUME_BIT
;
1473 dispatch_thread_frame_s dtf
;
1475 DISPATCH_COMPILER_CAN_ASSUME(dc
->dc_priority
== DISPATCH_NO_PRIORITY
);
1476 DISPATCH_COMPILER_CAN_ASSUME(dc
->dc_voucher
== DISPATCH_NO_VOUCHER
);
1477 // hide the mach channel (see _dispatch_mach_barrier_invoke comment)
1478 _dispatch_thread_frame_stash(&dtf
);
1479 _dispatch_continuation_pop_forwarded(dc
, DISPATCH_NO_VOUCHER
, dc_flags
,{
1480 _dispatch_mach_send_invoke(dm
, flags
,
1481 DM_SEND_INVOKE_NEEDS_BARRIER
| DM_SEND_INVOKE_CAN_RUN_BARRIER
);
1483 _dispatch_thread_frame_unstash(&dtf
);
1488 _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm
, dispatch_qos_t qos
)
1490 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
1492 dc
->do_vtable
= DC_VTABLE(MACH_SEND_BARRRIER_DRAIN
);
1495 dc
->dc_voucher
= DISPATCH_NO_VOUCHER
;
1496 dc
->dc_priority
= DISPATCH_NO_PRIORITY
;
1497 dm_push(dm
, dc
, qos
);
1502 _dispatch_mach_send_push(dispatch_mach_t dm
, dispatch_continuation_t dc
,
1505 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
1506 uint64_t old_state
, new_state
, state_flags
= 0;
1510 // <rdar://problem/25896179> when pushing a send barrier that destroys
1511 // the last reference to this channel, and the send queue is already
1512 // draining on another thread, the send barrier may run as soon as
1513 // _dispatch_mach_send_push_inline() returns.
1514 _dispatch_retain_2(dm
);
1516 wakeup
= _dispatch_mach_send_push_inline(dmsr
, dc
);
1518 state_flags
= DISPATCH_MACH_STATE_DIRTY
;
1519 if (dc
->do_vtable
== DC_VTABLE(MACH_SEND_BARRIER
)) {
1520 state_flags
|= DISPATCH_MACH_STATE_PENDING_BARRIER
;
1525 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, release
, {
1526 new_state
= _dmsr_state_merge_override(old_state
, qos
);
1527 new_state
|= state_flags
;
1530 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, relaxed
, {
1531 new_state
= _dmsr_state_merge_override(old_state
, qos
);
1532 if (old_state
== new_state
) {
1533 os_atomic_rmw_loop_give_up(break);
1538 qos
= _dmsr_state_max_qos(new_state
);
1539 owner
= _dispatch_lock_owner((dispatch_lock
)old_state
);
1541 if (_dmsr_state_needs_override(old_state
, qos
)) {
1542 _dispatch_wqthread_override_start_check_owner(owner
, qos
,
1543 &dmsr
->dmsr_state_lock
.dul_lock
);
1545 return _dispatch_release_2_tailcall(dm
);
1548 dispatch_wakeup_flags_t wflags
= 0;
1549 if (state_flags
& DISPATCH_MACH_STATE_PENDING_BARRIER
) {
1550 _dispatch_mach_push_send_barrier_drain(dm
, qos
);
1551 } else if (wakeup
|| dmsr
->dmsr_disconnect_cnt
||
1552 (dm
->dq_atomic_flags
& DSF_CANCELED
)) {
1553 wflags
= DISPATCH_WAKEUP_MAKE_DIRTY
| DISPATCH_WAKEUP_CONSUME_2
;
1554 } else if (old_state
& DISPATCH_MACH_STATE_PENDING_BARRIER
) {
1555 wflags
= DISPATCH_WAKEUP_CONSUME_2
;
1558 return dx_wakeup(dm
, qos
, wflags
);
1560 return _dispatch_release_2_tailcall(dm
);
1565 _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm
,
1566 dispatch_object_t dou
, dispatch_qos_t qos
,
1567 dispatch_mach_send_invoke_flags_t send_flags
)
1569 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
1570 dispatch_lock owner_self
= _dispatch_lock_value_for_self();
1571 uint64_t old_state
, new_state
, canlock_mask
, state_flags
= 0;
1574 bool wakeup
= _dispatch_mach_send_push_inline(dmsr
, dou
);
1576 state_flags
= DISPATCH_MACH_STATE_DIRTY
;
1579 if (unlikely(dmsr
->dmsr_disconnect_cnt
||
1580 (dm
->dq_atomic_flags
& DSF_CANCELED
))) {
1581 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, release
, {
1582 new_state
= _dmsr_state_merge_override(old_state
, qos
);
1583 new_state
|= state_flags
;
1585 dx_wakeup(dm
, qos
, DISPATCH_WAKEUP_MAKE_DIRTY
);
1589 canlock_mask
= DISPATCH_MACH_STATE_UNLOCK_MASK
|
1590 DISPATCH_MACH_STATE_PENDING_BARRIER
;
1592 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, seq_cst
, {
1593 new_state
= _dmsr_state_merge_override(old_state
, qos
);
1594 new_state
|= state_flags
;
1595 if (likely((old_state
& canlock_mask
) == 0)) {
1596 new_state
|= owner_self
;
1597 new_state
&= ~DISPATCH_MACH_STATE_DIRTY
;
1598 new_state
&= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1599 new_state
&= ~DISPATCH_MACH_STATE_PENDING_BARRIER
;
1603 os_atomic_rmw_loop2o(dmsr
, dmsr_state
, old_state
, new_state
, acquire
, {
1604 new_state
= _dmsr_state_merge_override(old_state
, qos
);
1605 if (new_state
== old_state
) {
1606 os_atomic_rmw_loop_give_up(return false);
1608 if (likely((old_state
& canlock_mask
) == 0)) {
1609 new_state
|= owner_self
;
1610 new_state
&= ~DISPATCH_MACH_STATE_DIRTY
;
1611 new_state
&= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE
;
1612 new_state
&= ~DISPATCH_MACH_STATE_PENDING_BARRIER
;
1617 owner
= _dispatch_lock_owner((dispatch_lock
)old_state
);
1619 if (_dmsr_state_needs_override(old_state
, qos
)) {
1620 _dispatch_wqthread_override_start_check_owner(owner
, qos
,
1621 &dmsr
->dmsr_state_lock
.dul_lock
);
1626 if (old_state
& DISPATCH_MACH_STATE_PENDING_BARRIER
) {
1627 dx_wakeup(dm
, qos
, 0);
1631 // Ensure our message is still at the head of the queue and has not already
1632 // been dequeued by another thread that raced us to the send queue lock.
1633 // A plain load of the head and comparison against our object pointer is
1635 if (unlikely(!(wakeup
&& dou
._do
== dmsr
->dmsr_head
))) {
1636 // Don't request immediate send result for messages we don't own
1637 send_flags
&= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK
;
1639 return _dispatch_mach_send_drain(dm
, DISPATCH_INVOKE_NONE
, send_flags
);
1643 #pragma mark dispatch_mach
1645 DISPATCH_ALWAYS_INLINE
1647 _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm
)
1649 DISPATCH_ASSERT_ON_MANAGER_QUEUE();
1650 if (_dispatch_unote_registered(dm
->dm_send_refs
)) {
1651 dispatch_assume(_dispatch_unote_unregister(dm
->dm_send_refs
, 0));
1653 dm
->dm_send_refs
->du_ident
= 0;
1656 DISPATCH_ALWAYS_INLINE
1658 _dispatch_mach_notification_kevent_register(dispatch_mach_t dm
,mach_port_t send
)
1660 DISPATCH_ASSERT_ON_MANAGER_QUEUE();
1661 dm
->dm_send_refs
->du_ident
= send
;
1662 dispatch_assume(_dispatch_unote_register(dm
->dm_send_refs
,
1663 DISPATCH_WLH_ANON
, 0));
1667 _dispatch_mach_merge_notification(dispatch_unote_t du
,
1668 uint32_t flags DISPATCH_UNUSED
, uintptr_t data
,
1669 uintptr_t status DISPATCH_UNUSED
,
1670 pthread_priority_t pp DISPATCH_UNUSED
)
1672 dispatch_mach_send_refs_t dmsr
= du
._dmsr
;
1673 dispatch_mach_t dm
= _dispatch_wref2ptr(dmsr
->du_owner_wref
);
1675 if (data
& dmsr
->du_fflags
) {
1676 _dispatch_mach_send_invoke(dm
, DISPATCH_INVOKE_MANAGER_DRAIN
,
1677 DM_SEND_INVOKE_MAKE_DIRTY
);
1683 _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm
,
1684 dispatch_mach_msg_t dmsg
)
1687 dispatch_mach_reason_t reason
= _dispatch_mach_msg_get_reason(dmsg
, &error
);
1688 if (reason
== DISPATCH_MACH_MESSAGE_RECEIVED
|| !dm
->dm_is_xpc
||
1689 !_dispatch_mach_xpc_hooks
->dmxh_direct_message_handler(
1690 dm
->dm_recv_refs
->dmrr_handler_ctxt
, reason
, dmsg
, error
)) {
1691 // Not XPC client or not a message that XPC can handle inline - push
1692 // it onto the channel queue.
1693 dm_push(dm
, dmsg
, _dispatch_qos_from_pp(dmsg
->dmsg_priority
));
1695 // XPC handled the message inline. Do the cleanup that would otherwise
1696 // have happened in _dispatch_mach_msg_invoke(), leaving out steps that
1697 // are not required in this context.
1698 dmsg
->do_next
= DISPATCH_OBJECT_LISTLESS
;
1699 dispatch_release(dmsg
);
1703 DISPATCH_ALWAYS_INLINE
1705 _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm
,
1706 dispatch_mach_msg_t dmsg
, dispatch_queue_t drq
) {
1707 // Push the message onto the given queue. This function is only used for
1708 // replies to messages sent by
1709 // dispatch_mach_send_with_result_and_async_reply_4libxpc().
1710 dispatch_continuation_t dc
= _dispatch_mach_msg_async_reply_wrap(dmsg
, dm
);
1711 _dispatch_trace_continuation_push(drq
, dc
);
1712 dx_push(drq
, dc
, _dispatch_qos_from_pp(dmsg
->dmsg_priority
));
1716 #pragma mark dispatch_mach_t
1718 static inline mach_msg_option_t
1719 _dispatch_mach_checkin_options(void)
1721 mach_msg_option_t options
= 0;
1722 #if DISPATCH_USE_CHECKIN_NOIMPORTANCE
1723 options
= MACH_SEND_NOIMPORTANCE
; // <rdar://problem/16996737>
1729 static inline mach_msg_option_t
1730 _dispatch_mach_send_options(void)
1732 mach_msg_option_t options
= 0;
1736 DISPATCH_ALWAYS_INLINE
1737 static inline dispatch_qos_t
1738 _dispatch_mach_priority_propagate(mach_msg_option_t options
,
1739 pthread_priority_t
*msg_pp
)
1741 #if DISPATCH_USE_NOIMPORTANCE_QOS
1742 if (options
& MACH_SEND_NOIMPORTANCE
) {
1747 unsigned int flags
= DISPATCH_PRIORITY_PROPAGATE_CURRENT
;
1748 if ((options
& DISPATCH_MACH_WAIT_FOR_REPLY
) &&
1749 (options
& DISPATCH_MACH_OWNED_REPLY_PORT
) &&
1750 _dispatch_use_mach_special_reply_port()) {
1751 flags
|= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC
;
1753 *msg_pp
= _dispatch_priority_compute_propagated(0, flags
);
1754 // TODO: remove QoS contribution of sync IPC messages to send queue
1756 return _dispatch_qos_from_pp(*msg_pp
);
1761 _dispatch_mach_send_msg(dispatch_mach_t dm
, dispatch_mach_msg_t dmsg
,
1762 dispatch_continuation_t dc_wait
, mach_msg_option_t options
)
1764 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
1765 if (slowpath(dmsg
->do_next
!= DISPATCH_OBJECT_LISTLESS
)) {
1766 DISPATCH_CLIENT_CRASH(dmsg
->do_next
, "Message already enqueued");
1768 dispatch_retain(dmsg
);
1769 pthread_priority_t msg_pp
;
1770 dispatch_qos_t qos
= _dispatch_mach_priority_propagate(options
, &msg_pp
);
1771 options
|= _dispatch_mach_send_options();
1772 dmsg
->dmsg_options
= options
;
1773 mach_msg_header_t
*msg
= _dispatch_mach_msg_get_msg(dmsg
);
1774 dmsg
->dmsg_reply
= _dispatch_mach_msg_get_reply_port(dmsg
);
1775 bool is_reply
= (MACH_MSGH_BITS_REMOTE(msg
->msgh_bits
) ==
1776 MACH_MSG_TYPE_MOVE_SEND_ONCE
);
1777 dmsg
->dmsg_priority
= msg_pp
;
1778 dmsg
->dmsg_voucher
= _voucher_copy();
1779 _dispatch_voucher_debug("mach-msg[%p] set", dmsg
->dmsg_voucher
, dmsg
);
1781 uint32_t send_status
;
1782 bool returning_send_result
= false;
1783 dispatch_mach_send_invoke_flags_t send_flags
= DM_SEND_INVOKE_NONE
;
1784 if (options
& DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT
) {
1785 send_flags
= DM_SEND_INVOKE_IMMEDIATE_SEND
;
1787 if (is_reply
&& !dmsg
->dmsg_reply
&& !dmsr
->dmsr_disconnect_cnt
&&
1788 !(dm
->dq_atomic_flags
& DSF_CANCELED
)) {
1789 // replies are sent to a send-once right and don't need the send queue
1790 dispatch_assert(!dc_wait
);
1791 send_status
= _dispatch_mach_msg_send(dm
, dmsg
, NULL
, 0, send_flags
);
1792 dispatch_assert(send_status
);
1793 returning_send_result
= !!(send_status
&
1794 DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT
);
1796 _dispatch_voucher_ktrace_dmsg_push(dmsg
);
1797 dispatch_object_t dou
= { ._dmsg
= dmsg
};
1798 if (dc_wait
) dou
._dc
= dc_wait
;
1799 returning_send_result
= _dispatch_mach_send_push_and_trydrain(dm
, dou
,
1802 if (returning_send_result
) {
1803 _dispatch_voucher_debug("mach-msg[%p] clear", dmsg
->dmsg_voucher
, dmsg
);
1804 if (dmsg
->dmsg_voucher
) _voucher_release(dmsg
->dmsg_voucher
);
1805 dmsg
->dmsg_voucher
= NULL
;
1806 dmsg
->do_next
= DISPATCH_OBJECT_LISTLESS
;
1807 dispatch_release(dmsg
);
1809 return returning_send_result
;
1814 dispatch_mach_send(dispatch_mach_t dm
, dispatch_mach_msg_t dmsg
,
1815 mach_msg_option_t options
)
1817 dispatch_assert_zero(options
& DISPATCH_MACH_OPTIONS_MASK
);
1818 options
&= ~DISPATCH_MACH_OPTIONS_MASK
;
1819 bool returned_send_result
= _dispatch_mach_send_msg(dm
, dmsg
, NULL
,options
);
1820 dispatch_assert(!returned_send_result
);
1825 dispatch_mach_send_with_result(dispatch_mach_t dm
, dispatch_mach_msg_t dmsg
,
1826 mach_msg_option_t options
, dispatch_mach_send_flags_t send_flags
,
1827 dispatch_mach_reason_t
*send_result
, mach_error_t
*send_error
)
1829 if (unlikely(send_flags
!= DISPATCH_MACH_SEND_DEFAULT
)) {
1830 DISPATCH_CLIENT_CRASH(send_flags
, "Invalid send flags");
1832 dispatch_assert_zero(options
& DISPATCH_MACH_OPTIONS_MASK
);
1833 options
&= ~DISPATCH_MACH_OPTIONS_MASK
;
1834 options
|= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT
;
1835 bool returned_send_result
= _dispatch_mach_send_msg(dm
, dmsg
, NULL
,options
);
1836 unsigned long reason
= DISPATCH_MACH_NEEDS_DEFERRED_SEND
;
1837 mach_error_t err
= 0;
1838 if (returned_send_result
) {
1839 reason
= _dispatch_mach_msg_get_reason(dmsg
, &err
);
1841 *send_result
= reason
;
1847 _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm
,
1848 dispatch_mach_msg_t dmsg
, mach_msg_option_t options
,
1849 bool *returned_send_result
)
1851 mach_port_t send
= MACH_PORT_NULL
;
1852 mach_port_t reply_port
= _dispatch_mach_msg_get_reply_port(dmsg
);
1854 // use per-thread mach reply port <rdar://24597802>
1855 reply_port
= _dispatch_get_thread_reply_port();
1856 mach_msg_header_t
*hdr
= _dispatch_mach_msg_get_msg(dmsg
);
1857 dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr
->msgh_bits
) ==
1858 MACH_MSG_TYPE_MAKE_SEND_ONCE
);
1859 hdr
->msgh_local_port
= reply_port
;
1860 options
|= DISPATCH_MACH_OWNED_REPLY_PORT
;
1862 options
|= DISPATCH_MACH_WAIT_FOR_REPLY
;
1864 dispatch_mach_reply_refs_t dmr
;
1866 dmr
= _dispatch_calloc(1, sizeof(*dmr
));
1868 struct dispatch_mach_reply_refs_s dmr_buf
= { };
1871 struct dispatch_continuation_s dc_wait
= {
1872 .dc_flags
= DISPATCH_OBJ_SYNC_WAITER_BIT
,
1875 .dc_priority
= DISPATCH_NO_PRIORITY
,
1876 .dc_voucher
= DISPATCH_NO_VOUCHER
,
1878 dmr
->dmr_ctxt
= dmsg
->do_ctxt
;
1879 dmr
->dmr_waiter_tid
= _dispatch_tid_self();
1880 *returned_send_result
= _dispatch_mach_send_msg(dm
, dmsg
, &dc_wait
,options
);
1881 if (options
& DISPATCH_MACH_OWNED_REPLY_PORT
) {
1882 _dispatch_clear_thread_reply_port(reply_port
);
1883 if (_dispatch_use_mach_special_reply_port()) {
1884 // link special reply port to send right for remote receive right
1885 // TODO: extend to pre-connect phase <rdar://problem/31823384>
1886 send
= dm
->dm_send_refs
->dmsr_send
;
1889 dmsg
= _dispatch_mach_msg_reply_recv(dm
, dmr
, reply_port
, send
);
1898 dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm
,
1899 dispatch_mach_msg_t dmsg
, mach_msg_option_t options
)
1901 bool returned_send_result
;
1902 dispatch_mach_msg_t reply
;
1903 dispatch_assert_zero(options
& DISPATCH_MACH_OPTIONS_MASK
);
1904 options
&= ~DISPATCH_MACH_OPTIONS_MASK
;
1905 reply
= _dispatch_mach_send_and_wait_for_reply(dm
, dmsg
, options
,
1906 &returned_send_result
);
1907 dispatch_assert(!returned_send_result
);
1913 dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm
,
1914 dispatch_mach_msg_t dmsg
, mach_msg_option_t options
,
1915 dispatch_mach_send_flags_t send_flags
,
1916 dispatch_mach_reason_t
*send_result
, mach_error_t
*send_error
)
1918 if (unlikely(send_flags
!= DISPATCH_MACH_SEND_DEFAULT
)) {
1919 DISPATCH_CLIENT_CRASH(send_flags
, "Invalid send flags");
1921 bool returned_send_result
;
1922 dispatch_mach_msg_t reply
;
1923 dispatch_assert_zero(options
& DISPATCH_MACH_OPTIONS_MASK
);
1924 options
&= ~DISPATCH_MACH_OPTIONS_MASK
;
1925 options
|= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT
;
1926 reply
= _dispatch_mach_send_and_wait_for_reply(dm
, dmsg
, options
,
1927 &returned_send_result
);
1928 unsigned long reason
= DISPATCH_MACH_NEEDS_DEFERRED_SEND
;
1929 mach_error_t err
= 0;
1930 if (returned_send_result
) {
1931 reason
= _dispatch_mach_msg_get_reason(dmsg
, &err
);
1933 *send_result
= reason
;
1940 dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm
,
1941 dispatch_mach_msg_t dmsg
, mach_msg_option_t options
,
1942 dispatch_mach_send_flags_t send_flags
,
1943 dispatch_mach_reason_t
*send_result
, mach_error_t
*send_error
)
1945 if (unlikely(send_flags
!= DISPATCH_MACH_SEND_DEFAULT
)) {
1946 DISPATCH_CLIENT_CRASH(send_flags
, "Invalid send flags");
1948 if (unlikely(!dm
->dm_is_xpc
)) {
1949 DISPATCH_CLIENT_CRASH(0,
1950 "dispatch_mach_send_with_result_and_wait_for_reply is XPC only");
1953 dispatch_assert_zero(options
& DISPATCH_MACH_OPTIONS_MASK
);
1954 options
&= ~DISPATCH_MACH_OPTIONS_MASK
;
1955 options
|= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT
;
1956 mach_port_t reply_port
= _dispatch_mach_msg_get_reply_port(dmsg
);
1958 DISPATCH_CLIENT_CRASH(0, "Reply port needed for async send with reply");
1960 options
|= DISPATCH_MACH_ASYNC_REPLY
;
1961 bool returned_send_result
= _dispatch_mach_send_msg(dm
, dmsg
, NULL
,options
);
1962 unsigned long reason
= DISPATCH_MACH_NEEDS_DEFERRED_SEND
;
1963 mach_error_t err
= 0;
1964 if (returned_send_result
) {
1965 reason
= _dispatch_mach_msg_get_reason(dmsg
, &err
);
1967 *send_result
= reason
;
1973 _dispatch_mach_disconnect(dispatch_mach_t dm
)
1975 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
1977 if (_dispatch_unote_registered(dmsr
)) {
1978 _dispatch_mach_notification_kevent_unregister(dm
);
1980 if (MACH_PORT_VALID(dmsr
->dmsr_send
)) {
1981 _dispatch_mach_msg_disconnected(dm
, MACH_PORT_NULL
, dmsr
->dmsr_send
);
1982 dmsr
->dmsr_send
= MACH_PORT_NULL
;
1984 if (dmsr
->dmsr_checkin
) {
1985 _dispatch_mach_msg_not_sent(dm
, dmsr
->dmsr_checkin
);
1986 dmsr
->dmsr_checkin
= NULL
;
1988 _dispatch_unfair_lock_lock(&dm
->dm_send_refs
->dmsr_replies_lock
);
1989 dispatch_mach_reply_refs_t dmr
, tmp
;
1990 TAILQ_FOREACH_SAFE(dmr
, &dm
->dm_send_refs
->dmsr_replies
, dmr_list
, tmp
) {
1991 TAILQ_REMOVE(&dm
->dm_send_refs
->dmsr_replies
, dmr
, dmr_list
);
1992 _TAILQ_MARK_NOT_ENQUEUED(dmr
, dmr_list
);
1993 if (_dispatch_unote_registered(dmr
)) {
1994 if (!_dispatch_mach_reply_kevent_unregister(dm
, dmr
,
1995 DU_UNREGISTER_DISCONNECTED
)) {
1996 TAILQ_INSERT_HEAD(&dm
->dm_send_refs
->dmsr_replies
, dmr
,
2000 _dispatch_mach_reply_waiter_unregister(dm
, dmr
,
2001 DU_UNREGISTER_DISCONNECTED
);
2004 disconnected
= TAILQ_EMPTY(&dm
->dm_send_refs
->dmsr_replies
);
2005 _dispatch_unfair_lock_unlock(&dm
->dm_send_refs
->dmsr_replies_lock
);
2006 return disconnected
;
2010 _dispatch_mach_cancel(dispatch_mach_t dm
)
2012 _dispatch_object_debug(dm
, "%s", __func__
);
2013 if (!_dispatch_mach_disconnect(dm
)) return;
2015 bool uninstalled
= true;
2016 dispatch_assert(!dm
->dm_uninstalled
);
2018 if (dm
->dm_xpc_term_refs
) {
2019 uninstalled
= _dispatch_unote_unregister(dm
->dm_xpc_term_refs
, 0);
2022 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2023 mach_port_t local_port
= (mach_port_t
)dmrr
->du_ident
;
2025 // handle the deferred delete case properly, similar to what
2026 // _dispatch_source_invoke2() does
2027 dispatch_queue_flags_t dqf
= _dispatch_queue_atomic_flags(dm
->_as_dq
);
2028 if ((dqf
& DSF_DEFERRED_DELETE
) && !(dqf
& DSF_ARMED
)) {
2029 _dispatch_source_refs_unregister(dm
->_as_ds
,
2030 DU_UNREGISTER_IMMEDIATE_DELETE
);
2031 dqf
= _dispatch_queue_atomic_flags(dm
->_as_dq
);
2032 } else if (!(dqf
& DSF_DEFERRED_DELETE
) && !(dqf
& DSF_DELETED
)) {
2033 _dispatch_source_refs_unregister(dm
->_as_ds
, 0);
2034 dqf
= _dispatch_queue_atomic_flags(dm
->_as_dq
);
2036 if ((dqf
& DSF_STATE_MASK
) == DSF_DELETED
) {
2037 _dispatch_mach_msg_disconnected(dm
, local_port
, MACH_PORT_NULL
);
2040 uninstalled
= false;
2043 _dispatch_queue_atomic_flags_set_and_clear(dm
->_as_dq
, DSF_DELETED
,
2044 DSF_ARMED
| DSF_DEFERRED_DELETE
);
2047 if (dm
->dm_send_refs
->dmsr_disconnect_cnt
) {
2048 uninstalled
= false; // <rdar://problem/31233110>
2050 if (uninstalled
) dm
->dm_uninstalled
= uninstalled
;
2055 _dispatch_mach_reconnect_invoke(dispatch_mach_t dm
, dispatch_object_t dou
)
2057 if (!_dispatch_mach_disconnect(dm
)) return false;
2058 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
2059 dmsr
->dmsr_checkin
= dou
._dc
->dc_data
;
2060 dmsr
->dmsr_send
= (mach_port_t
)dou
._dc
->dc_other
;
2061 _dispatch_continuation_free(dou
._dc
);
2062 (void)os_atomic_dec2o(dmsr
, dmsr_disconnect_cnt
, relaxed
);
2063 _dispatch_object_debug(dm
, "%s", __func__
);
2064 _dispatch_release(dm
); // <rdar://problem/26266265>
2070 dispatch_mach_reconnect(dispatch_mach_t dm
, mach_port_t send
,
2071 dispatch_mach_msg_t checkin
)
2073 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
2074 (void)os_atomic_inc2o(dmsr
, dmsr_disconnect_cnt
, relaxed
);
2075 if (MACH_PORT_VALID(send
) && checkin
) {
2076 dispatch_mach_msg_t dmsg
= checkin
;
2077 dispatch_retain(dmsg
);
2078 dmsg
->dmsg_options
= _dispatch_mach_checkin_options();
2079 dmsr
->dmsr_checkin_port
= _dispatch_mach_msg_get_remote_port(dmsg
);
2082 dmsr
->dmsr_checkin_port
= MACH_PORT_NULL
;
2084 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
2085 dc
->dc_flags
= DISPATCH_OBJ_CONSUME_BIT
;
2086 // actually called manually in _dispatch_mach_send_drain
2087 dc
->dc_func
= (void*)_dispatch_mach_reconnect_invoke
;
2089 dc
->dc_data
= checkin
;
2090 dc
->dc_other
= (void*)(uintptr_t)send
;
2091 dc
->dc_voucher
= DISPATCH_NO_VOUCHER
;
2092 dc
->dc_priority
= DISPATCH_NO_PRIORITY
;
2093 _dispatch_retain(dm
); // <rdar://problem/26266265>
2094 return _dispatch_mach_send_push(dm
, dc
, 0);
2099 dispatch_mach_get_checkin_port(dispatch_mach_t dm
)
2101 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
2102 if (slowpath(dm
->dq_atomic_flags
& DSF_CANCELED
)) {
2103 return MACH_PORT_DEAD
;
2105 return dmsr
->dmsr_checkin_port
;
2110 _dispatch_mach_connect_invoke(dispatch_mach_t dm
)
2112 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2113 _dispatch_client_callout4(dmrr
->dmrr_handler_ctxt
,
2114 DISPATCH_MACH_CONNECTED
, NULL
, 0, dmrr
->dmrr_handler_func
);
2115 dm
->dm_connect_handler_called
= 1;
2116 _dispatch_perfmon_workitem_inc();
2119 DISPATCH_ALWAYS_INLINE
2121 _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg
,
2122 dispatch_invoke_flags_t flags
, dispatch_mach_t dm
)
2124 dispatch_mach_recv_refs_t dmrr
;
2126 unsigned long reason
= _dispatch_mach_msg_get_reason(dmsg
, &err
);
2127 dispatch_thread_set_self_t adopt_flags
= DISPATCH_PRIORITY_ENFORCE
|
2128 DISPATCH_VOUCHER_CONSUME
|DISPATCH_VOUCHER_REPLACE
;
2130 dmrr
= dm
->dm_recv_refs
;
2131 dmsg
->do_next
= DISPATCH_OBJECT_LISTLESS
;
2132 _dispatch_voucher_ktrace_dmsg_pop(dmsg
);
2133 _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg
->dmsg_voucher
, dmsg
);
2134 (void)_dispatch_adopt_priority_and_set_voucher(dmsg
->dmsg_priority
,
2135 dmsg
->dmsg_voucher
, adopt_flags
);
2136 dmsg
->dmsg_voucher
= NULL
;
2137 dispatch_invoke_with_autoreleasepool(flags
, {
2138 if (flags
& DISPATCH_INVOKE_ASYNC_REPLY
) {
2139 _dispatch_client_callout3(dmrr
->dmrr_handler_ctxt
, reason
, dmsg
,
2140 _dispatch_mach_xpc_hooks
->dmxh_async_reply_handler
);
2142 if (slowpath(!dm
->dm_connect_handler_called
)) {
2143 _dispatch_mach_connect_invoke(dm
);
2145 if (reason
== DISPATCH_MACH_MESSAGE_RECEIVED
&&
2146 (_dispatch_queue_atomic_flags(dm
->_as_dq
) & DSF_CANCELED
)) {
2147 // <rdar://problem/32184699> Do not deliver message received
2148 // after cancellation: _dispatch_mach_merge_msg can be preempted
2149 // for a long time between clearing DSF_ARMED but before
2150 // enqueuing the message, allowing for cancellation to complete,
2151 // and then the message event to be delivered.
2153 // This makes XPC unhappy because some of these messages are
2154 // port-destroyed notifications that can cause it to try to
2155 // reconnect on a channel that is almost fully canceled
2157 _dispatch_client_callout4(dmrr
->dmrr_handler_ctxt
, reason
, dmsg
,
2158 err
, dmrr
->dmrr_handler_func
);
2161 _dispatch_perfmon_workitem_inc();
2163 _dispatch_introspection_queue_item_complete(dmsg
);
2164 dispatch_release(dmsg
);
2169 _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg
,
2170 DISPATCH_UNUSED dispatch_invoke_context_t dic
,
2171 dispatch_invoke_flags_t flags
)
2173 dispatch_thread_frame_s dtf
;
2175 // hide mach channel
2176 dispatch_mach_t dm
= (dispatch_mach_t
)_dispatch_thread_frame_stash(&dtf
);
2177 _dispatch_mach_msg_invoke_with_mach(dmsg
, flags
, dm
);
2178 _dispatch_thread_frame_unstash(&dtf
);
2183 _dispatch_mach_barrier_invoke(dispatch_continuation_t dc
,
2184 DISPATCH_UNUSED dispatch_invoke_context_t dic
,
2185 dispatch_invoke_flags_t flags
)
2187 dispatch_thread_frame_s dtf
;
2188 dispatch_mach_t dm
= dc
->dc_other
;
2189 dispatch_mach_recv_refs_t dmrr
;
2190 uintptr_t dc_flags
= (uintptr_t)dc
->dc_data
;
2191 unsigned long type
= dc_type(dc
);
2193 // hide mach channel from clients
2194 if (type
== DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER
)) {
2195 // on the send queue, the mach channel isn't the current queue
2196 // its target queue is the current one already
2197 _dispatch_thread_frame_stash(&dtf
);
2199 dmrr
= dm
->dm_recv_refs
;
2200 DISPATCH_COMPILER_CAN_ASSUME(dc_flags
& DISPATCH_OBJ_CONSUME_BIT
);
2201 _dispatch_continuation_pop_forwarded(dc
, DISPATCH_NO_VOUCHER
, dc_flags
, {
2202 dispatch_invoke_with_autoreleasepool(flags
, {
2203 if (slowpath(!dm
->dm_connect_handler_called
)) {
2204 _dispatch_mach_connect_invoke(dm
);
2206 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
2207 _dispatch_client_callout4(dmrr
->dmrr_handler_ctxt
,
2208 DISPATCH_MACH_BARRIER_COMPLETED
, NULL
, 0,
2209 dmrr
->dmrr_handler_func
);
2212 if (type
== DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER
)) {
2213 _dispatch_thread_frame_unstash(&dtf
);
2217 DISPATCH_ALWAYS_INLINE
2219 _dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc
,
2220 dispatch_mach_t dm
, dispatch_continuation_vtable_t vtable
)
2222 dc
->dc_data
= (void *)dc
->dc_flags
;
2224 dc
->do_vtable
= vtable
; // Must be after dc_flags load, dc_vtable aliases
2229 dispatch_mach_send_barrier_f(dispatch_mach_t dm
, void *context
,
2230 dispatch_function_t func
)
2232 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
2233 uintptr_t dc_flags
= DISPATCH_OBJ_CONSUME_BIT
| DISPATCH_OBJ_MACH_BARRIER
;
2236 _dispatch_continuation_init_f(dc
, dm
, context
, func
, 0, 0, dc_flags
);
2237 _dispatch_mach_barrier_set_vtable(dc
, dm
, DC_VTABLE(MACH_SEND_BARRIER
));
2238 _dispatch_trace_continuation_push(dm
->_as_dq
, dc
);
2239 qos
= _dispatch_continuation_override_qos(dm
->_as_dq
, dc
);
2240 return _dispatch_mach_send_push(dm
, dc
, qos
);
2245 dispatch_mach_send_barrier(dispatch_mach_t dm
, dispatch_block_t barrier
)
2247 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
2248 uintptr_t dc_flags
= DISPATCH_OBJ_CONSUME_BIT
| DISPATCH_OBJ_MACH_BARRIER
;
2251 _dispatch_continuation_init(dc
, dm
, barrier
, 0, 0, dc_flags
);
2252 _dispatch_mach_barrier_set_vtable(dc
, dm
, DC_VTABLE(MACH_SEND_BARRIER
));
2253 _dispatch_trace_continuation_push(dm
->_as_dq
, dc
);
2254 qos
= _dispatch_continuation_override_qos(dm
->_as_dq
, dc
);
2255 return _dispatch_mach_send_push(dm
, dc
, qos
);
2260 dispatch_mach_receive_barrier_f(dispatch_mach_t dm
, void *context
,
2261 dispatch_function_t func
)
2263 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
2264 uintptr_t dc_flags
= DISPATCH_OBJ_CONSUME_BIT
| DISPATCH_OBJ_MACH_BARRIER
;
2266 _dispatch_continuation_init_f(dc
, dm
, context
, func
, 0, 0, dc_flags
);
2267 _dispatch_mach_barrier_set_vtable(dc
, dm
, DC_VTABLE(MACH_RECV_BARRIER
));
2268 return _dispatch_continuation_async(dm
->_as_dq
, dc
);
2273 dispatch_mach_receive_barrier(dispatch_mach_t dm
, dispatch_block_t barrier
)
2275 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
2276 uintptr_t dc_flags
= DISPATCH_OBJ_CONSUME_BIT
| DISPATCH_OBJ_MACH_BARRIER
;
2278 _dispatch_continuation_init(dc
, dm
, barrier
, 0, 0, dc_flags
);
2279 _dispatch_mach_barrier_set_vtable(dc
, dm
, DC_VTABLE(MACH_RECV_BARRIER
));
2280 return _dispatch_continuation_async(dm
->_as_dq
, dc
);
2285 _dispatch_mach_cancel_invoke(dispatch_mach_t dm
, dispatch_invoke_flags_t flags
)
2287 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2289 dispatch_invoke_with_autoreleasepool(flags
, {
2290 if (slowpath(!dm
->dm_connect_handler_called
)) {
2291 _dispatch_mach_connect_invoke(dm
);
2293 _dispatch_client_callout4(dmrr
->dmrr_handler_ctxt
,
2294 DISPATCH_MACH_CANCELED
, NULL
, 0, dmrr
->dmrr_handler_func
);
2295 _dispatch_perfmon_workitem_inc();
2297 dm
->dm_cancel_handler_called
= 1;
2298 _dispatch_release(dm
); // the retain is done at creation time
2303 dispatch_mach_cancel(dispatch_mach_t dm
)
2305 dispatch_source_cancel(dm
->_as_ds
);
2309 _dispatch_mach_install(dispatch_mach_t dm
, dispatch_wlh_t wlh
,
2310 dispatch_priority_t pri
)
2312 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2313 uint32_t disconnect_cnt
;
2315 if (dmrr
->du_ident
) {
2316 _dispatch_source_refs_register(dm
->_as_ds
, wlh
, pri
);
2317 dispatch_assert(dmrr
->du_is_direct
);
2320 if (dm
->dm_is_xpc
) {
2321 bool monitor_sigterm
;
2322 if (_dispatch_mach_xpc_hooks
->version
< 3) {
2323 monitor_sigterm
= true;
2324 } else if (!_dispatch_mach_xpc_hooks
->dmxh_enable_sigterm_notification
){
2325 monitor_sigterm
= true;
2328 _dispatch_mach_xpc_hooks
->dmxh_enable_sigterm_notification(
2329 dm
->dm_recv_refs
->dmrr_handler_ctxt
);
2331 if (monitor_sigterm
) {
2332 dispatch_xpc_term_refs_t _dxtr
=
2333 dux_create(&_dispatch_xpc_type_sigterm
, SIGTERM
, 0)._dxtr
;
2334 _dxtr
->du_owner_wref
= _dispatch_ptr2wref(dm
);
2335 dm
->dm_xpc_term_refs
= _dxtr
;
2336 _dispatch_unote_register(dm
->dm_xpc_term_refs
, wlh
, pri
);
2339 if (!dm
->dq_priority
) {
2340 // _dispatch_mach_reply_kevent_register assumes this has been done
2341 // which is unlike regular sources or queues, the DEFAULTQUEUE flag
2342 // is used so that the priority of the channel doesn't act as
2343 // a QoS floor for incoming messages (26761457)
2344 dm
->dq_priority
= pri
;
2346 dm
->ds_is_installed
= true;
2347 if (unlikely(!os_atomic_cmpxchgv2o(dm
->dm_send_refs
, dmsr_disconnect_cnt
,
2348 DISPATCH_MACH_NEVER_INSTALLED
, 0, &disconnect_cnt
, release
))) {
2349 DISPATCH_INTERNAL_CRASH(disconnect_cnt
, "Channel already installed");
2354 _dispatch_mach_finalize_activation(dispatch_mach_t dm
, bool *allow_resume
)
2356 dispatch_priority_t pri
;
2360 _dispatch_queue_finalize_activation(dm
->_as_dq
, allow_resume
);
2362 if (!dm
->ds_is_installed
) {
2363 pri
= _dispatch_queue_compute_priority_and_wlh(dm
->_as_dq
, &wlh
);
2364 if (pri
) _dispatch_mach_install(dm
, wlh
, pri
);
2368 DISPATCH_ALWAYS_INLINE
2370 _dispatch_mach_tryarm(dispatch_mach_t dm
, dispatch_queue_flags_t
*out_dqf
)
2372 dispatch_queue_flags_t oqf
, nqf
;
2373 bool rc
= os_atomic_rmw_loop2o(dm
, dq_atomic_flags
, oqf
, nqf
, relaxed
, {
2375 if (nqf
& (DSF_ARMED
| DSF_CANCELED
| DSF_DEFERRED_DELETE
|
2377 // the test is inside the loop because it's convenient but the
2378 // result should not change for the duration of the rmw_loop
2379 os_atomic_rmw_loop_give_up(break);
2383 if (out_dqf
) *out_dqf
= nqf
;
2387 DISPATCH_ALWAYS_INLINE
2388 static inline dispatch_queue_wakeup_target_t
2389 _dispatch_mach_invoke2(dispatch_object_t dou
,
2390 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
,
2393 dispatch_mach_t dm
= dou
._dm
;
2394 dispatch_queue_wakeup_target_t retq
= NULL
;
2395 dispatch_queue_t dq
= _dispatch_queue_get_current();
2396 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
2397 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2398 dispatch_queue_flags_t dqf
= 0;
2400 if (!(flags
& DISPATCH_INVOKE_MANAGER_DRAIN
) && dmrr
&&
2401 _dispatch_unote_wlh_changed(dmrr
, _dispatch_get_wlh())) {
2402 dqf
= _dispatch_queue_atomic_flags_set_orig(dm
->_as_dq
,
2404 if (!(dqf
& DSF_WLH_CHANGED
)) {
2405 if (dm
->dm_is_xpc
) {
2406 _dispatch_bug_deprecated("Changing target queue "
2407 "hierarchy after xpc connection was activated");
2409 _dispatch_bug_deprecated("Changing target queue "
2410 "hierarchy after mach channel was activated");
2415 // This function performs all mach channel actions. Each action is
2416 // responsible for verifying that it takes place on the appropriate queue.
2417 // If the current queue is not the correct queue for this action, the
2418 // correct queue will be returned and the invoke will be re-driven on that
2421 // The order of tests here in invoke and in wakeup should be consistent.
2423 if (unlikely(!dm
->ds_is_installed
)) {
2424 // The channel needs to be installed on the kevent queue.
2425 if (unlikely(flags
& DISPATCH_INVOKE_MANAGER_DRAIN
)) {
2426 return dm
->do_targetq
;
2428 _dispatch_mach_install(dm
, _dispatch_get_wlh(),_dispatch_get_basepri());
2429 _dispatch_perfmon_workitem_inc();
2432 if (_dispatch_queue_class_probe(dm
)) {
2433 if (dq
== dm
->do_targetq
) {
2435 retq
= _dispatch_queue_serial_drain(dm
->_as_dq
, dic
, flags
, owned
);
2437 retq
= dm
->do_targetq
;
2441 if (!retq
&& _dispatch_unote_registered(dmrr
)) {
2442 if (_dispatch_mach_tryarm(dm
, &dqf
)) {
2443 _dispatch_unote_resume(dmrr
);
2444 if (dq
== dm
->do_targetq
&& !dq
->do_targetq
&& !dmsr
->dmsr_tail
&&
2445 (dq
->dq_priority
& DISPATCH_PRIORITY_FLAG_OVERCOMMIT
) &&
2446 _dispatch_wlh_should_poll_unote(dmrr
)) {
2447 // try to redrive the drain from under the lock for channels
2448 // targeting an overcommit root queue to avoid parking
2449 // when the next message has already fired
2450 _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE
);
2451 if (dm
->dq_items_tail
) goto drain
;
2455 dqf
= _dispatch_queue_atomic_flags(dm
->_as_dq
);
2458 if (dmsr
->dmsr_tail
) {
2459 bool requires_mgr
= dm
->dm_needs_mgr
|| (dmsr
->dmsr_disconnect_cnt
&&
2460 _dispatch_unote_registered(dmsr
));
2461 if (!os_atomic_load2o(dmsr
, dmsr_notification_armed
, relaxed
) ||
2462 (dqf
& DSF_CANCELED
) || dmsr
->dmsr_disconnect_cnt
) {
2463 // The channel has pending messages to send.
2464 if (unlikely(requires_mgr
&& dq
!= &_dispatch_mgr_q
)) {
2465 return retq
? retq
: &_dispatch_mgr_q
;
2467 dispatch_mach_send_invoke_flags_t send_flags
= DM_SEND_INVOKE_NONE
;
2468 if (dq
!= &_dispatch_mgr_q
) {
2469 send_flags
|= DM_SEND_INVOKE_CAN_RUN_BARRIER
;
2471 _dispatch_mach_send_invoke(dm
, flags
, send_flags
);
2473 if (!retq
) retq
= DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT
;
2474 } else if (!retq
&& (dqf
& DSF_CANCELED
)) {
2475 // The channel has been cancelled and needs to be uninstalled from the
2476 // manager queue. After uninstallation, the cancellation handler needs
2477 // to be delivered to the target queue.
2478 if (!dm
->dm_uninstalled
) {
2479 if ((dqf
& DSF_STATE_MASK
) == (DSF_ARMED
| DSF_DEFERRED_DELETE
)) {
2480 // waiting for the delivery of a deferred delete event
2481 return retq
? retq
: DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT
;
2483 if (dq
!= &_dispatch_mgr_q
) {
2484 return retq
? retq
: &_dispatch_mgr_q
;
2486 _dispatch_mach_send_invoke(dm
, flags
, DM_SEND_INVOKE_CANCEL
);
2487 if (unlikely(!dm
->dm_uninstalled
)) {
2488 // waiting for the delivery of a deferred delete event
2489 // or deletion didn't happen because send_invoke couldn't
2490 // acquire the send lock
2491 return retq
? retq
: DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT
;
2494 if (!dm
->dm_cancel_handler_called
) {
2495 if (dq
!= dm
->do_targetq
) {
2496 return retq
? retq
: dm
->do_targetq
;
2498 _dispatch_mach_cancel_invoke(dm
, flags
);
2507 _dispatch_mach_invoke(dispatch_mach_t dm
,
2508 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
)
2510 _dispatch_queue_class_invoke(dm
, dic
, flags
,
2511 DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS
, _dispatch_mach_invoke2
);
2515 _dispatch_mach_wakeup(dispatch_mach_t dm
, dispatch_qos_t qos
,
2516 dispatch_wakeup_flags_t flags
)
2518 // This function determines whether the mach channel needs to be invoked.
2519 // The order of tests here in probe and in invoke should be consistent.
2521 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
2522 dispatch_queue_wakeup_target_t tq
= DISPATCH_QUEUE_WAKEUP_NONE
;
2523 dispatch_queue_flags_t dqf
= _dispatch_queue_atomic_flags(dm
->_as_dq
);
2525 if (!dm
->ds_is_installed
) {
2526 // The channel needs to be installed on the kevent queue.
2527 tq
= DISPATCH_QUEUE_WAKEUP_TARGET
;
2531 if (_dispatch_queue_class_probe(dm
)) {
2532 tq
= DISPATCH_QUEUE_WAKEUP_TARGET
;
2536 if (_dispatch_lock_is_locked(dmsr
->dmsr_state_lock
.dul_lock
)) {
2537 // Sending and uninstallation below require the send lock, the channel
2538 // will be woken up when the lock is dropped <rdar://15132939&15203957>
2542 if (dmsr
->dmsr_tail
) {
2543 bool requires_mgr
= dm
->dm_needs_mgr
|| (dmsr
->dmsr_disconnect_cnt
&&
2544 _dispatch_unote_registered(dmsr
));
2545 if (!os_atomic_load2o(dmsr
, dmsr_notification_armed
, relaxed
) ||
2546 (dqf
& DSF_CANCELED
) || dmsr
->dmsr_disconnect_cnt
) {
2547 if (unlikely(requires_mgr
)) {
2548 tq
= DISPATCH_QUEUE_WAKEUP_MGR
;
2550 tq
= DISPATCH_QUEUE_WAKEUP_TARGET
;
2553 } else if (dqf
& DSF_CANCELED
) {
2554 if (!dm
->dm_uninstalled
) {
2555 if ((dqf
& DSF_STATE_MASK
) == (DSF_ARMED
| DSF_DEFERRED_DELETE
)) {
2556 // waiting for the delivery of a deferred delete event
2558 // The channel needs to be uninstalled from the manager queue
2559 tq
= DISPATCH_QUEUE_WAKEUP_MGR
;
2561 } else if (!dm
->dm_cancel_handler_called
) {
2562 // the cancellation handler needs to be delivered to the target
2564 tq
= DISPATCH_QUEUE_WAKEUP_TARGET
;
2569 if ((tq
== DISPATCH_QUEUE_WAKEUP_TARGET
) &&
2570 dm
->do_targetq
== &_dispatch_mgr_q
) {
2571 tq
= DISPATCH_QUEUE_WAKEUP_MGR
;
2574 return _dispatch_queue_class_wakeup(dm
->_as_dq
, qos
, flags
, tq
);
2578 _dispatch_mach_sigterm_invoke(void *ctx
)
2580 dispatch_mach_t dm
= ctx
;
2581 if (!(dm
->dq_atomic_flags
& DSF_CANCELED
)) {
2582 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2583 _dispatch_client_callout4(dmrr
->dmrr_handler_ctxt
,
2584 DISPATCH_MACH_SIGTERM_RECEIVED
, NULL
, 0,
2585 dmrr
->dmrr_handler_func
);
2590 _dispatch_xpc_sigterm_merge(dispatch_unote_t du
,
2591 uint32_t flags DISPATCH_UNUSED
, uintptr_t data DISPATCH_UNUSED
,
2592 uintptr_t status DISPATCH_UNUSED
, pthread_priority_t pp
)
2594 dispatch_mach_t dm
= _dispatch_wref2ptr(du
._du
->du_owner_wref
);
2595 uint32_t options
= 0;
2596 if ((flags
& EV_UDATA_SPECIFIC
) && (flags
& EV_ONESHOT
) &&
2597 !(flags
& EV_DELETE
)) {
2598 options
= DU_UNREGISTER_IMMEDIATE_DELETE
;
2600 dispatch_assert((flags
& EV_ONESHOT
) && (flags
& EV_DELETE
));
2601 options
= DU_UNREGISTER_ALREADY_DELETED
;
2603 _dispatch_unote_unregister(du
, options
);
2605 if (!(dm
->dq_atomic_flags
& DSF_CANCELED
)) {
2606 _dispatch_barrier_async_detached_f(dm
->_as_dq
, dm
,
2607 _dispatch_mach_sigterm_invoke
);
2609 dx_wakeup(dm
, _dispatch_qos_from_pp(pp
), DISPATCH_WAKEUP_MAKE_DIRTY
);
2614 #pragma mark dispatch_mach_msg_t
2617 dispatch_mach_msg_create(mach_msg_header_t
*msg
, size_t size
,
2618 dispatch_mach_msg_destructor_t destructor
, mach_msg_header_t
**msg_ptr
)
2620 if (slowpath(size
< sizeof(mach_msg_header_t
)) ||
2621 slowpath(destructor
&& !msg
)) {
2622 DISPATCH_CLIENT_CRASH(size
, "Empty message");
2625 dispatch_mach_msg_t dmsg
;
2626 size_t msg_size
= sizeof(struct dispatch_mach_msg_s
);
2627 if (!destructor
&& os_add_overflow(msg_size
,
2628 (size
- sizeof(dmsg
->dmsg_msg
)), &msg_size
)) {
2629 DISPATCH_CLIENT_CRASH(size
, "Message size too large");
2632 dmsg
= _dispatch_object_alloc(DISPATCH_VTABLE(mach_msg
), msg_size
);
2634 dmsg
->dmsg_msg
= msg
;
2636 memcpy(dmsg
->dmsg_buf
, msg
, size
);
2638 dmsg
->do_next
= DISPATCH_OBJECT_LISTLESS
;
2639 dmsg
->do_targetq
= _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT
, false);
2640 dmsg
->dmsg_destructor
= destructor
;
2641 dmsg
->dmsg_size
= size
;
2643 *msg_ptr
= _dispatch_mach_msg_get_msg(dmsg
);
2649 _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg
,
2650 DISPATCH_UNUSED
bool *allow_free
)
2652 if (dmsg
->dmsg_voucher
) {
2653 _voucher_release(dmsg
->dmsg_voucher
);
2654 dmsg
->dmsg_voucher
= NULL
;
2656 switch (dmsg
->dmsg_destructor
) {
2657 case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT
:
2659 case DISPATCH_MACH_MSG_DESTRUCTOR_FREE
:
2660 free(dmsg
->dmsg_msg
);
2662 case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE
: {
2663 mach_vm_size_t vm_size
= dmsg
->dmsg_size
;
2664 mach_vm_address_t vm_addr
= (uintptr_t)dmsg
->dmsg_msg
;
2665 (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
2671 static inline mach_msg_header_t
*
2672 _dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg
)
2674 return dmsg
->dmsg_destructor
? dmsg
->dmsg_msg
:
2675 (mach_msg_header_t
*)dmsg
->dmsg_buf
;
2679 dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg
, size_t *size_ptr
)
2682 *size_ptr
= dmsg
->dmsg_size
;
2684 return _dispatch_mach_msg_get_msg(dmsg
);
2688 _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg
, char* buf
, size_t bufsiz
)
2691 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
2692 dx_kind(dmsg
), dmsg
);
2693 offset
+= _dispatch_object_debug_attr(dmsg
, buf
+ offset
, bufsiz
- offset
);
2694 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "opts/err = 0x%x, "
2695 "msgh[%p] = { ", dmsg
->dmsg_options
, dmsg
->dmsg_buf
);
2696 mach_msg_header_t
*hdr
= _dispatch_mach_msg_get_msg(dmsg
);
2698 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "id 0x%x, ",
2701 if (hdr
->msgh_size
) {
2702 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "size %u, ",
2705 if (hdr
->msgh_bits
) {
2706 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "bits <l %u, r %u",
2707 MACH_MSGH_BITS_LOCAL(hdr
->msgh_bits
),
2708 MACH_MSGH_BITS_REMOTE(hdr
->msgh_bits
));
2709 if (MACH_MSGH_BITS_OTHER(hdr
->msgh_bits
)) {
2710 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, ", o 0x%x",
2711 MACH_MSGH_BITS_OTHER(hdr
->msgh_bits
));
2713 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, ">, ");
2715 if (hdr
->msgh_local_port
&& hdr
->msgh_remote_port
) {
2716 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "local 0x%x, "
2717 "remote 0x%x", hdr
->msgh_local_port
, hdr
->msgh_remote_port
);
2718 } else if (hdr
->msgh_local_port
) {
2719 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "local 0x%x",
2720 hdr
->msgh_local_port
);
2721 } else if (hdr
->msgh_remote_port
) {
2722 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "remote 0x%x",
2723 hdr
->msgh_remote_port
);
2725 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "no ports");
2727 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, " } }");
2731 DISPATCH_ALWAYS_INLINE
2732 static dispatch_queue_t
2733 _dispatch_mach_msg_context_async_reply_queue(void *msg_context
)
2735 if (DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(_dispatch_mach_xpc_hooks
)) {
2736 return _dispatch_mach_xpc_hooks
->dmxh_msg_context_reply_queue(
2742 static dispatch_continuation_t
2743 _dispatch_mach_msg_async_reply_wrap(dispatch_mach_msg_t dmsg
,
2746 _dispatch_retain(dm
); // Released in _dispatch_mach_msg_async_reply_invoke()
2747 dispatch_continuation_t dc
= _dispatch_continuation_alloc();
2748 dc
->do_vtable
= DC_VTABLE(MACH_ASYNC_REPLY
);
2751 dc
->dc_priority
= DISPATCH_NO_PRIORITY
;
2752 dc
->dc_voucher
= DISPATCH_NO_VOUCHER
;
2758 _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc
,
2759 DISPATCH_UNUSED dispatch_invoke_context_t dic
,
2760 dispatch_invoke_flags_t flags
)
2762 // _dispatch_mach_msg_invoke_with_mach() releases the reference on dmsg
2763 // taken by _dispatch_mach_msg_async_reply_wrap() after handling it.
2764 dispatch_mach_msg_t dmsg
= dc
->dc_data
;
2765 dispatch_mach_t dm
= dc
->dc_other
;
2766 _dispatch_mach_msg_invoke_with_mach(dmsg
,
2767 flags
| DISPATCH_INVOKE_ASYNC_REPLY
, dm
);
2769 // Balances _dispatch_mach_msg_async_reply_wrap
2770 _dispatch_release(dc
->dc_other
);
2772 _dispatch_continuation_free(dc
);
2776 #pragma mark dispatch_mig_server
2779 dispatch_mig_server(dispatch_source_t ds
, size_t maxmsgsz
,
2780 dispatch_mig_callback_t callback
)
2782 mach_msg_options_t options
= MACH_RCV_MSG
| MACH_RCV_TIMEOUT
2783 | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX
)
2784 | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0
) | MACH_RCV_VOUCHER
;
2785 mach_msg_options_t tmp_options
;
2786 mig_reply_error_t
*bufTemp
, *bufRequest
, *bufReply
;
2787 mach_msg_return_t kr
= 0;
2788 uint64_t assertion_token
= 0;
2789 uint32_t cnt
= 1000; // do not stall out serial queues
2790 boolean_t demux_success
;
2791 bool received
= false;
2792 size_t rcv_size
= maxmsgsz
+ MAX_TRAILER_SIZE
;
2793 dispatch_source_refs_t dr
= ds
->ds_refs
;
2795 bufRequest
= alloca(rcv_size
);
2796 bufRequest
->RetCode
= 0;
2797 for (mach_vm_address_t p
= mach_vm_trunc_page(bufRequest
+ vm_page_size
);
2798 p
< (mach_vm_address_t
)bufRequest
+ rcv_size
; p
+= vm_page_size
) {
2799 *(char*)p
= 0; // ensure alloca buffer doesn't overlap with stack guard
2802 bufReply
= alloca(rcv_size
);
2803 bufReply
->Head
.msgh_size
= 0;
2804 for (mach_vm_address_t p
= mach_vm_trunc_page(bufReply
+ vm_page_size
);
2805 p
< (mach_vm_address_t
)bufReply
+ rcv_size
; p
+= vm_page_size
) {
2806 *(char*)p
= 0; // ensure alloca buffer doesn't overlap with stack guard
2810 options
|= MACH_RCV_LARGE
; // rdar://problem/8422992
2812 tmp_options
= options
;
2813 // XXX FIXME -- change this to not starve out the target queue
2815 if (DISPATCH_QUEUE_IS_SUSPENDED(ds
) || (--cnt
== 0)) {
2816 options
&= ~MACH_RCV_MSG
;
2817 tmp_options
&= ~MACH_RCV_MSG
;
2819 if (!(tmp_options
& MACH_SEND_MSG
)) {
2823 kr
= mach_msg(&bufReply
->Head
, tmp_options
, bufReply
->Head
.msgh_size
,
2824 (mach_msg_size_t
)rcv_size
, (mach_port_t
)dr
->du_ident
, 0, 0);
2826 tmp_options
= options
;
2830 case MACH_SEND_INVALID_DEST
:
2831 case MACH_SEND_TIMED_OUT
:
2832 if (bufReply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
2833 mach_msg_destroy(&bufReply
->Head
);
2836 case MACH_RCV_TIMED_OUT
:
2837 // Don't return an error if a message was sent this time or
2838 // a message was successfully received previously
2839 // rdar://problems/7363620&7791738
2840 if(bufReply
->Head
.msgh_remote_port
|| received
) {
2841 kr
= MACH_MSG_SUCCESS
;
2844 case MACH_RCV_INVALID_NAME
:
2847 case MACH_RCV_TOO_LARGE
:
2848 // receive messages that are too large and log their id and size
2849 // rdar://problem/8422992
2850 tmp_options
&= ~MACH_RCV_LARGE
;
2851 size_t large_size
= bufReply
->Head
.msgh_size
+ MAX_TRAILER_SIZE
;
2852 void *large_buf
= malloc(large_size
);
2854 rcv_size
= large_size
;
2855 bufReply
= large_buf
;
2857 if (!mach_msg(&bufReply
->Head
, tmp_options
, 0,
2858 (mach_msg_size_t
)rcv_size
,
2859 (mach_port_t
)dr
->du_ident
, 0, 0)) {
2860 _dispatch_log("BUG in libdispatch client: "
2861 "dispatch_mig_server received message larger than "
2862 "requested size %zd: id = 0x%x, size = %d",
2863 maxmsgsz
, bufReply
->Head
.msgh_id
,
2864 bufReply
->Head
.msgh_size
);
2872 _dispatch_bug_mach_client(
2873 "dispatch_mig_server: mach_msg() failed", kr
);
2879 if (!(tmp_options
& MACH_RCV_MSG
)) {
2883 if (assertion_token
) {
2884 #if DISPATCH_USE_IMPORTANCE_ASSERTION
2885 int r
= proc_importance_assertion_complete(assertion_token
);
2886 (void)dispatch_assume_zero(r
);
2888 assertion_token
= 0;
2892 bufTemp
= bufRequest
;
2893 bufRequest
= bufReply
;
2896 #if DISPATCH_USE_IMPORTANCE_ASSERTION
2897 #pragma clang diagnostic push
2898 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2899 int r
= proc_importance_assertion_begin_with_msg(&bufRequest
->Head
,
2900 NULL
, &assertion_token
);
2901 if (r
&& slowpath(r
!= EIO
)) {
2902 (void)dispatch_assume_zero(r
);
2904 #pragma clang diagnostic pop
2906 _voucher_replace(voucher_create_with_mach_msg(&bufRequest
->Head
));
2907 demux_success
= callback(&bufRequest
->Head
, &bufReply
->Head
);
2909 if (!demux_success
) {
2910 // destroy the request - but not the reply port
2911 bufRequest
->Head
.msgh_remote_port
= 0;
2912 mach_msg_destroy(&bufRequest
->Head
);
2913 } else if (!(bufReply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
)) {
2914 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode
2916 if (slowpath(bufReply
->RetCode
)) {
2917 if (bufReply
->RetCode
== MIG_NO_REPLY
) {
2921 // destroy the request - but not the reply port
2922 bufRequest
->Head
.msgh_remote_port
= 0;
2923 mach_msg_destroy(&bufRequest
->Head
);
2927 if (bufReply
->Head
.msgh_remote_port
) {
2928 tmp_options
|= MACH_SEND_MSG
;
2929 if (MACH_MSGH_BITS_REMOTE(bufReply
->Head
.msgh_bits
) !=
2930 MACH_MSG_TYPE_MOVE_SEND_ONCE
) {
2931 tmp_options
|= MACH_SEND_TIMEOUT
;
2937 if (assertion_token
) {
2938 #if DISPATCH_USE_IMPORTANCE_ASSERTION
2939 int r
= proc_importance_assertion_complete(assertion_token
);
2940 (void)dispatch_assume_zero(r
);
2948 #pragma mark dispatch_mach_debug
2951 _dispatch_mach_debug_attr(dispatch_mach_t dm
, char *buf
, size_t bufsiz
)
2953 dispatch_queue_t target
= dm
->do_targetq
;
2954 dispatch_mach_send_refs_t dmsr
= dm
->dm_send_refs
;
2955 dispatch_mach_recv_refs_t dmrr
= dm
->dm_recv_refs
;
2957 return dsnprintf(buf
, bufsiz
, "target = %s[%p], receive = 0x%x, "
2958 "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, "
2959 "send state = %016llx, disconnected = %d, canceled = %d ",
2960 target
&& target
->dq_label
? target
->dq_label
: "", target
,
2961 (mach_port_t
)dmrr
->du_ident
, dmsr
->dmsr_send
,
2962 (mach_port_t
)dmsr
->du_ident
,
2963 dmsr
->dmsr_notification_armed
? " (armed)" : "",
2964 dmsr
->dmsr_checkin_port
, dmsr
->dmsr_checkin
? " (pending)" : "",
2965 dmsr
->dmsr_state
, dmsr
->dmsr_disconnect_cnt
,
2966 (bool)(dm
->dq_atomic_flags
& DSF_CANCELED
));
2970 _dispatch_mach_debug(dispatch_mach_t dm
, char* buf
, size_t bufsiz
)
2973 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
2974 dm
->dq_label
&& !dm
->dm_cancel_handler_called
? dm
->dq_label
:
2976 offset
+= _dispatch_object_debug_attr(dm
, &buf
[offset
], bufsiz
- offset
);
2977 offset
+= _dispatch_mach_debug_attr(dm
, &buf
[offset
], bufsiz
- offset
);
2978 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "}");
2982 #endif /* HAVE_MACH */