]> git.saurik.com Git - apple/libdispatch.git/blob - src/mach.c
libdispatch-913.1.6.tar.gz
[apple/libdispatch.git] / src / mach.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22 #if HAVE_MACH
23
24 #define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1
25 #define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
26 #define DISPATCH_MACH_WAIT_FOR_REPLY 0x4
27 #define DISPATCH_MACH_OWNED_REPLY_PORT 0x8
28 #define DISPATCH_MACH_ASYNC_REPLY 0x10
29 #define DISPATCH_MACH_OPTIONS_MASK 0xffff
30
31 #define DM_SEND_STATUS_SUCCESS 0x1
32 #define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2
33
34 DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t,
35 DM_SEND_INVOKE_NONE = 0x0,
36 DM_SEND_INVOKE_MAKE_DIRTY = 0x1,
37 DM_SEND_INVOKE_NEEDS_BARRIER = 0x2,
38 DM_SEND_INVOKE_CANCEL = 0x4,
39 DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8,
40 DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10,
41 );
42 #define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \
43 ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND)
44
45 static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
46 static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou);
47 static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou);
48 static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm,
49 mach_port_t local_port, mach_port_t remote_port);
50 static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm,
51 dispatch_mach_reply_refs_t dmr, mach_port_t local_port);
52 static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected(
53 dispatch_object_t dou, dispatch_mach_reply_refs_t dmr,
54 dispatch_mach_reason_t reason);
55 static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm,
56 dispatch_object_t dou);
57 static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(
58 dispatch_mach_msg_t dmsg);
59 static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou,
60 dispatch_qos_t qos);
61 static void _dispatch_mach_cancel(dispatch_mach_t dm);
62 static void _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm,
63 dispatch_qos_t qos);
64 static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm,
65 dispatch_mach_msg_t dmsg);
66 static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm,
67 dispatch_mach_msg_t dmsg, dispatch_queue_t drq);
68 static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue(
69 void *ctxt);
70 static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap(
71 dispatch_mach_msg_t dmsg, dispatch_mach_t dm);
72 static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm);
73 static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm,
74 mach_port_t send);
75
76 // For tests only.
77 DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void);
78
79 dispatch_source_t
80 _dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
81 const struct dispatch_continuation_s *dc)
82 {
83 dispatch_source_t ds;
84 ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct,
85 recvp, 0, &_dispatch_mgr_q);
86 os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER],
87 (dispatch_continuation_t)dc, relaxed);
88 return ds;
89 }
90
91 #pragma mark -
92 #pragma mark dispatch to XPC callbacks
93
94 static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks;
95
96 // Default dmxh_direct_message_handler callback that does not handle
97 // messages inline.
98 static bool
99 _dispatch_mach_xpc_no_handle_message(
100 void *_Nullable context DISPATCH_UNUSED,
101 dispatch_mach_reason_t reason DISPATCH_UNUSED,
102 dispatch_mach_msg_t message DISPATCH_UNUSED,
103 mach_error_t error DISPATCH_UNUSED)
104 {
105 return false;
106 }
107
108 // Default dmxh_msg_context_reply_queue callback that returns a NULL queue.
109 static dispatch_queue_t
110 _dispatch_mach_msg_context_no_async_reply_queue(
111 void *_Nonnull msg_context DISPATCH_UNUSED)
112 {
113 return NULL;
114 }
115
116 // Default dmxh_async_reply_handler callback that crashes when called.
117 DISPATCH_NORETURN
118 static void
119 _dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED,
120 dispatch_mach_reason_t reason DISPATCH_UNUSED,
121 dispatch_mach_msg_t message DISPATCH_UNUSED)
122 {
123 DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks,
124 "_dispatch_mach_default_async_reply_handler called");
125 }
126
127 // Default dmxh_enable_sigterm_notification callback that enables delivery of
128 // SIGTERM notifications (for backwards compatibility).
129 static bool
130 _dispatch_mach_enable_sigterm(void *_Nullable context DISPATCH_UNUSED)
131 {
132 return true;
133 }
134
135 // Callbacks from dispatch to XPC. The default is to not support any callbacks.
136 static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default
137 = {
138 .version = DISPATCH_MACH_XPC_HOOKS_VERSION,
139 .dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message,
140 .dmxh_msg_context_reply_queue =
141 &_dispatch_mach_msg_context_no_async_reply_queue,
142 .dmxh_async_reply_handler = &_dispatch_mach_default_async_reply_handler,
143 .dmxh_enable_sigterm_notification = &_dispatch_mach_enable_sigterm,
144 };
145
146 static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks
147 = &_dispatch_mach_xpc_hooks_default;
148
149 void
150 dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks)
151 {
152 if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks,
153 &_dispatch_mach_xpc_hooks_default, hooks, relaxed)) {
154 DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks,
155 "dispatch_mach_hooks_install_4libxpc called twice");
156 }
157 }
158
159 void
160 _dispatch_mach_hooks_install_default(void)
161 {
162 os_atomic_store(&_dispatch_mach_xpc_hooks,
163 &_dispatch_mach_xpc_hooks_default, relaxed);
164 }
165
166 #pragma mark -
167 #pragma mark dispatch_mach_t
168
169 static dispatch_mach_t
170 _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context,
171 dispatch_mach_handler_function_t handler, bool handler_is_block,
172 bool is_xpc)
173 {
174 dispatch_mach_recv_refs_t dmrr;
175 dispatch_mach_send_refs_t dmsr;
176 dispatch_mach_t dm;
177 dm = _dispatch_object_alloc(DISPATCH_VTABLE(mach),
178 sizeof(struct dispatch_mach_s));
179 _dispatch_queue_init(dm->_as_dq, DQF_LEGACY, 1,
180 DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER);
181
182 dm->dq_label = label;
183 dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds
184 dm->dm_is_xpc = is_xpc;
185
186 dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr;
187 dispatch_assert(dmrr->du_is_direct);
188 dmrr->du_owner_wref = _dispatch_ptr2wref(dm);
189 dmrr->dmrr_handler_func = handler;
190 dmrr->dmrr_handler_ctxt = context;
191 dmrr->dmrr_handler_is_block = handler_is_block;
192 dm->dm_recv_refs = dmrr;
193
194 dmsr = dux_create(&_dispatch_mach_type_send, 0,
195 DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD)._dmsr;
196 dmsr->du_owner_wref = _dispatch_ptr2wref(dm);
197 dm->dm_send_refs = dmsr;
198
199 if (slowpath(!q)) {
200 q = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
201 } else {
202 _dispatch_retain(q);
203 }
204 dm->do_targetq = q;
205 _dispatch_object_debug(dm, "%s", __func__);
206 return dm;
207 }
208
209 dispatch_mach_t
210 dispatch_mach_create(const char *label, dispatch_queue_t q,
211 dispatch_mach_handler_t handler)
212 {
213 dispatch_block_t bb = _dispatch_Block_copy((void*)handler);
214 return _dispatch_mach_create(label, q, bb,
215 (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true,
216 false);
217 }
218
219 dispatch_mach_t
220 dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context,
221 dispatch_mach_handler_function_t handler)
222 {
223 return _dispatch_mach_create(label, q, context, handler, false, false);
224 }
225
226 dispatch_mach_t
227 dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q,
228 void *context, dispatch_mach_handler_function_t handler)
229 {
230 return _dispatch_mach_create(label, q, context, handler, false, true);
231 }
232
233 void
234 _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free)
235 {
236 _dispatch_object_debug(dm, "%s", __func__);
237 _dispatch_unote_dispose(dm->dm_recv_refs);
238 dm->dm_recv_refs = NULL;
239 _dispatch_unote_dispose(dm->dm_send_refs);
240 dm->dm_send_refs = NULL;
241 if (dm->dm_xpc_term_refs) {
242 _dispatch_unote_dispose(dm->dm_xpc_term_refs);
243 dm->dm_xpc_term_refs = NULL;
244 }
245 _dispatch_queue_destroy(dm->_as_dq, allow_free);
246 }
247
248 void
249 dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive,
250 mach_port_t send, dispatch_mach_msg_t checkin)
251 {
252 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
253 uint32_t disconnect_cnt;
254
255 if (MACH_PORT_VALID(receive)) {
256 dm->dm_recv_refs->du_ident = receive;
257 _dispatch_retain(dm); // the reference the manager queue holds
258 }
259 dmsr->dmsr_send = send;
260 if (MACH_PORT_VALID(send)) {
261 if (checkin) {
262 dispatch_mach_msg_t dmsg = checkin;
263 dispatch_retain(dmsg);
264 dmsg->dmsg_options = _dispatch_mach_checkin_options();
265 dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg);
266 }
267 dmsr->dmsr_checkin = checkin;
268 }
269 dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 ==
270 DISPATCH_MACH_NEVER_INSTALLED);
271 disconnect_cnt = os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, release);
272 if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) {
273 DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected");
274 }
275 _dispatch_object_debug(dm, "%s", __func__);
276 return dispatch_activate(dm);
277 }
278
279 static inline bool
280 _dispatch_mach_reply_tryremove(dispatch_mach_t dm,
281 dispatch_mach_reply_refs_t dmr)
282 {
283 bool removed;
284 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
285 if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
286 TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
287 _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
288 }
289 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
290 return removed;
291 }
292
293 DISPATCH_NOINLINE
294 static void
295 _dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm,
296 dispatch_mach_reply_refs_t dmr, uint32_t options)
297 {
298 dispatch_mach_msg_t dmsgr = NULL;
299 bool disconnected = (options & DU_UNREGISTER_DISCONNECTED);
300 if (options & DU_UNREGISTER_REPLY_REMOVE) {
301 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
302 if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
303 DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
304 }
305 TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
306 _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
307 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
308 }
309 if (disconnected) {
310 dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr,
311 DISPATCH_MACH_DISCONNECTED);
312 } else if (dmr->dmr_voucher) {
313 _voucher_release(dmr->dmr_voucher);
314 dmr->dmr_voucher = NULL;
315 }
316 _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p",
317 _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident),
318 disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
319 if (dmsgr) {
320 return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
321 }
322 }
323
324 DISPATCH_NOINLINE
325 static bool
326 _dispatch_mach_reply_list_remove(dispatch_mach_t dm,
327 dispatch_mach_reply_refs_t dmr) {
328 // dmsr_replies_lock must be held by the caller.
329 bool removed = false;
330 if (likely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
331 TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
332 _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
333 removed = true;
334 }
335 return removed;
336 }
337
338 DISPATCH_NOINLINE
339 static bool
340 _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
341 dispatch_mach_reply_refs_t dmr, uint32_t options)
342 {
343 dispatch_assert(!_TAILQ_IS_ENQUEUED(dmr, dmr_list));
344
345 bool disconnected = (options & DU_UNREGISTER_DISCONNECTED);
346 _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p",
347 (mach_port_t)dmr->du_ident, disconnected ? " (disconnected)" : "",
348 dmr->dmr_ctxt);
349 if (!_dispatch_unote_unregister(dmr, options)) {
350 _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]",
351 (mach_port_t)dmr->du_ident, dmr);
352 dispatch_assert(options == DU_UNREGISTER_DISCONNECTED);
353 return false;
354 }
355
356 dispatch_mach_msg_t dmsgr = NULL;
357 dispatch_queue_t drq = NULL;
358 if (disconnected) {
359 // The next call is guaranteed to always transfer or consume the voucher
360 // in the dmr, if there is one.
361 dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr,
362 dmr->dmr_async_reply ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
363 : DISPATCH_MACH_DISCONNECTED);
364 if (dmr->dmr_ctxt) {
365 drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt);
366 }
367 dispatch_assert(dmr->dmr_voucher == NULL);
368 } else if (dmr->dmr_voucher) {
369 _voucher_release(dmr->dmr_voucher);
370 dmr->dmr_voucher = NULL;
371 }
372 _dispatch_unote_dispose(dmr);
373
374 if (dmsgr) {
375 if (drq) {
376 _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq);
377 } else {
378 _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
379 }
380 }
381 return true;
382 }
383
384 DISPATCH_NOINLINE
385 static void
386 _dispatch_mach_reply_waiter_register(dispatch_mach_t dm,
387 dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
388 dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts)
389 {
390 dmr->du_owner_wref = _dispatch_ptr2wref(dm);
391 dmr->du_wlh = NULL;
392 dmr->du_filter = EVFILT_MACHPORT;
393 dmr->du_ident = reply_port;
394 if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
395 _dispatch_mach_reply_mark_reply_port_owned(dmr);
396 } else {
397 if (dmsg->dmsg_voucher) {
398 dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
399 }
400 dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority);
401 // make reply context visible to leaks rdar://11777199
402 dmr->dmr_ctxt = dmsg->do_ctxt;
403 }
404
405 _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p",
406 reply_port, dmsg->do_ctxt);
407 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
408 if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
409 DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev,
410 "Reply already registered");
411 }
412 TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
413 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
414 }
415
416 DISPATCH_NOINLINE
417 static void
418 _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port,
419 dispatch_mach_msg_t dmsg)
420 {
421 dispatch_mach_reply_refs_t dmr;
422 dispatch_priority_t mpri, pri, overcommit;
423 dispatch_wlh_t wlh;
424
425 dmr = dux_create(&_dispatch_mach_type_reply, reply_port, 0)._dmr;
426 dispatch_assert(dmr->du_is_direct);
427 dmr->du_owner_wref = _dispatch_ptr2wref(dm);
428 if (dmsg->dmsg_voucher) {
429 dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
430 }
431 dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority);
432 // make reply context visible to leaks rdar://11777199
433 dmr->dmr_ctxt = dmsg->do_ctxt;
434
435 dispatch_queue_t drq = NULL;
436 if (dmsg->dmsg_options & DISPATCH_MACH_ASYNC_REPLY) {
437 dmr->dmr_async_reply = true;
438 drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
439 }
440
441 if (!drq) {
442 pri = dm->dq_priority;
443 wlh = dm->dm_recv_refs->du_wlh;
444 } else if (dx_type(drq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) {
445 pri = DISPATCH_PRIORITY_FLAG_MANAGER;
446 wlh = (dispatch_wlh_t)drq;
447 } else if (dx_hastypeflag(drq, QUEUE_ROOT)) {
448 pri = drq->dq_priority;
449 wlh = DISPATCH_WLH_ANON;
450 } else if (drq == dm->do_targetq) {
451 pri = dm->dq_priority;
452 wlh = dm->dm_recv_refs->du_wlh;
453 } else if (!(pri = _dispatch_queue_compute_priority_and_wlh(drq, &wlh))) {
454 pri = drq->dq_priority;
455 wlh = DISPATCH_WLH_ANON;
456 }
457 if (pri & DISPATCH_PRIORITY_REQUESTED_MASK) {
458 overcommit = pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
459 pri &= DISPATCH_PRIORITY_REQUESTED_MASK;
460 mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority);
461 if (pri < mpri) pri = mpri;
462 pri |= overcommit;
463 } else {
464 pri = DISPATCH_PRIORITY_FLAG_MANAGER;
465 }
466
467 _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p",
468 reply_port, dmsg->do_ctxt);
469 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
470 if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
471 DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev,
472 "Reply already registered");
473 }
474 TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
475 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
476
477 if (!_dispatch_unote_register(dmr, wlh, pri)) {
478 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
479 _dispatch_mach_reply_list_remove(dm, dmr);
480 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
481 _dispatch_mach_reply_kevent_unregister(dm, dmr,
482 DU_UNREGISTER_DISCONNECTED);
483 }
484 }
485
486 #pragma mark -
487 #pragma mark dispatch_mach_msg
488
489 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
490 static inline bool
491 _dispatch_use_mach_special_reply_port(void)
492 {
493 #if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE
494 return true;
495 #else
496 #define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;})
497 return false;
498 #endif
499 }
500
501 static mach_port_t
502 _dispatch_get_thread_reply_port(void)
503 {
504 mach_port_t reply_port, mrp;
505 if (_dispatch_use_mach_special_reply_port()) {
506 mrp = _dispatch_get_thread_special_reply_port();
507 } else {
508 mrp = _dispatch_get_thread_mig_reply_port();
509 }
510 if (mrp) {
511 reply_port = mrp;
512 _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port",
513 reply_port);
514 } else {
515 if (_dispatch_use_mach_special_reply_port()) {
516 reply_port = thread_get_special_reply_port();
517 _dispatch_set_thread_special_reply_port(reply_port);
518 } else {
519 reply_port = mach_reply_port();
520 _dispatch_set_thread_mig_reply_port(reply_port);
521 }
522 if (unlikely(!MACH_PORT_VALID(reply_port))) {
523 DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(),
524 "Unable to allocate reply port, possible port leak");
525 }
526 _dispatch_debug("machport[0x%08x]: allocated thread sync reply port",
527 reply_port);
528 }
529 _dispatch_debug_machport(reply_port);
530 return reply_port;
531 }
532
533 static void
534 _dispatch_clear_thread_reply_port(mach_port_t reply_port)
535 {
536 mach_port_t mrp;
537 if (_dispatch_use_mach_special_reply_port()) {
538 mrp = _dispatch_get_thread_special_reply_port();
539 } else {
540 mrp = _dispatch_get_thread_mig_reply_port();
541 }
542 if (reply_port != mrp) {
543 if (mrp) {
544 _dispatch_debug("machport[0x%08x]: did not clear thread sync reply "
545 "port (found 0x%08x)", reply_port, mrp);
546 }
547 return;
548 }
549 if (_dispatch_use_mach_special_reply_port()) {
550 _dispatch_set_thread_special_reply_port(MACH_PORT_NULL);
551 } else {
552 _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL);
553 }
554 _dispatch_debug_machport(reply_port);
555 _dispatch_debug("machport[0x%08x]: cleared thread sync reply port",
556 reply_port);
557 }
558
559 static void
560 _dispatch_set_thread_reply_port(mach_port_t reply_port)
561 {
562 _dispatch_debug_machport(reply_port);
563 mach_port_t mrp;
564 if (_dispatch_use_mach_special_reply_port()) {
565 mrp = _dispatch_get_thread_special_reply_port();
566 } else {
567 mrp = _dispatch_get_thread_mig_reply_port();
568 }
569 if (mrp) {
570 kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
571 MACH_PORT_RIGHT_RECEIVE, -1);
572 DISPATCH_VERIFY_MIG(kr);
573 dispatch_assume_zero(kr);
574 _dispatch_debug("machport[0x%08x]: deallocated sync reply port "
575 "(found 0x%08x)", reply_port, mrp);
576 } else {
577 if (_dispatch_use_mach_special_reply_port()) {
578 _dispatch_set_thread_special_reply_port(reply_port);
579 } else {
580 _dispatch_set_thread_mig_reply_port(reply_port);
581 }
582 _dispatch_debug("machport[0x%08x]: restored thread sync reply port",
583 reply_port);
584 }
585 }
586
587 static inline mach_port_t
588 _dispatch_mach_msg_get_remote_port(dispatch_object_t dou)
589 {
590 mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
591 mach_port_t remote = hdr->msgh_remote_port;
592 return remote;
593 }
594
595 static inline mach_port_t
596 _dispatch_mach_msg_get_reply_port(dispatch_object_t dou)
597 {
598 mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
599 mach_port_t local = hdr->msgh_local_port;
600 if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) !=
601 MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL;
602 return local;
603 }
604
605 static inline void
606 _dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err,
607 unsigned long reason)
608 {
609 dispatch_assert_zero(reason & ~(unsigned long)code_emask);
610 dmsg->dmsg_error = ((err || !reason) ? err :
611 err_local|err_sub(0x3e0)|(mach_error_t)reason);
612 }
613
614 static inline unsigned long
615 _dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr)
616 {
617 mach_error_t err = dmsg->dmsg_error;
618
619 if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) {
620 *err_ptr = 0;
621 return err_get_code(err);
622 }
623 *err_ptr = err;
624 return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT;
625 }
626
627 static inline dispatch_mach_msg_t
628 _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz,
629 dispatch_mach_reply_refs_t dmr, uint32_t flags)
630 {
631 dispatch_mach_msg_destructor_t destructor;
632 dispatch_mach_msg_t dmsg;
633 voucher_t voucher;
634 pthread_priority_t pp;
635
636 if (dmr) {
637 _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher
638 pp = _dispatch_priority_to_pp(dmr->dmr_priority);
639 voucher = dmr->dmr_voucher;
640 dmr->dmr_voucher = NULL; // transfer reference
641 } else {
642 voucher = voucher_create_with_mach_msg(hdr);
643 pp = _dispatch_priority_compute_propagated(
644 _voucher_get_priority(voucher), 0);
645 }
646
647 destructor = (flags & DISPATCH_EV_MSG_NEEDS_FREE) ?
648 DISPATCH_MACH_MSG_DESTRUCTOR_FREE :
649 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT;
650 dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
651 if (!(flags & DISPATCH_EV_MSG_NEEDS_FREE)) {
652 _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move,
653 (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf);
654 }
655 dmsg->dmsg_voucher = voucher;
656 dmsg->dmsg_priority = pp;
657 dmsg->do_ctxt = dmr ? dmr->dmr_ctxt : NULL;
658 _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED);
659 _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg);
660 _dispatch_voucher_ktrace_dmsg_push(dmsg);
661 return dmsg;
662 }
663
664 void
665 _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags,
666 mach_msg_header_t *hdr, mach_msg_size_t siz)
667 {
668 // this function is very similar with what _dispatch_source_merge_evt does
669 // but can't reuse it as handling the message must be protected by the
670 // internal refcount between the first half and the trailer of what
671 // _dispatch_source_merge_evt does.
672
673 dispatch_mach_recv_refs_t dmrr = du._dmrr;
674 dispatch_mach_t dm = _dispatch_wref2ptr(dmrr->du_owner_wref);
675 dispatch_queue_flags_t dqf;
676 dispatch_mach_msg_t dmsg;
677
678 dispatch_assert(_dispatch_unote_needs_rearm(du));
679
680 if (flags & EV_VANISHED) {
681 DISPATCH_CLIENT_CRASH(du._du->du_ident,
682 "Unexpected EV_VANISHED (do not destroy random mach ports)");
683 }
684
685 // once we modify the queue atomic flags below, it will allow concurrent
686 // threads running _dispatch_mach_invoke2 to dispose of the source,
687 // so we can't safely borrow the reference we get from the muxnote udata
688 // anymore, and need our own
689 dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_CONSUME_2;
690 _dispatch_retain_2(dm); // rdar://20382435
691
692 if (unlikely((flags & EV_ONESHOT) && !(flags & EV_DELETE))) {
693 dqf = _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq,
694 DSF_DEFERRED_DELETE, DSF_ARMED);
695 _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]",
696 dm, dmrr);
697 } else if (unlikely(flags & (EV_ONESHOT | EV_DELETE))) {
698 _dispatch_source_refs_unregister(dm->_as_ds,
699 DU_UNREGISTER_ALREADY_DELETED);
700 dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
701 _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", dm, dmrr);
702 } else {
703 dqf = _dispatch_queue_atomic_flags_clear(dm->_as_dq, DSF_ARMED);
704 _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", dm, dmrr);
705 }
706
707 _dispatch_debug_machport(hdr->msgh_remote_port);
708 _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
709 hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
710
711 if (dqf & DSF_CANCELED) {
712 _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x",
713 hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
714 mach_msg_destroy(hdr);
715 if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
716 free(hdr);
717 }
718 return dx_wakeup(dm, 0, wflags | DISPATCH_WAKEUP_MAKE_DIRTY);
719 }
720
721 // Once the mach channel disarming is visible, cancellation will switch to
722 // immediate deletion. If we're preempted here, then the whole cancellation
723 // sequence may be complete by the time we really enqueue the message.
724 //
725 // _dispatch_mach_msg_invoke_with_mach() is responsible for filtering it out
726 // to keep the promise that DISPATCH_MACH_DISCONNECTED is the last
727 // event sent.
728
729 dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags);
730 _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
731 return _dispatch_release_2_tailcall(dm);
732 }
733
734 void
735 _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags,
736 mach_msg_header_t *hdr, mach_msg_size_t siz)
737 {
738 dispatch_mach_reply_refs_t dmr = du._dmr;
739 dispatch_mach_t dm = _dispatch_wref2ptr(dmr->du_owner_wref);
740 bool canceled = (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED);
741 dispatch_mach_msg_t dmsg = NULL;
742
743 _dispatch_debug_machport(hdr->msgh_remote_port);
744 _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
745 hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
746
747 if (!canceled) {
748 dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags);
749 }
750
751 if (dmsg) {
752 dispatch_queue_t drq = NULL;
753 if (dmsg->do_ctxt) {
754 drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
755 }
756 if (drq) {
757 _dispatch_mach_push_async_reply_msg(dm, dmsg, drq);
758 } else {
759 _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
760 }
761 } else {
762 _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x",
763 hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
764 mach_msg_destroy(hdr);
765 if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
766 free(hdr);
767 }
768 }
769
770 dispatch_wakeup_flags_t wflags = 0;
771 uint32_t options = DU_UNREGISTER_IMMEDIATE_DELETE;
772 if (canceled) {
773 options |= DU_UNREGISTER_DISCONNECTED;
774 }
775
776 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
777 bool removed = _dispatch_mach_reply_list_remove(dm, dmr);
778 dispatch_assert(removed);
779 if (TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies) &&
780 (dm->dm_send_refs->dmsr_disconnect_cnt ||
781 (dm->dq_atomic_flags & DSF_CANCELED))) {
782 // When the list is empty, _dispatch_mach_disconnect() may release the
783 // last reference count on the Mach channel. To avoid this, take our
784 // own reference before releasing the lock.
785 wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2;
786 _dispatch_retain_2(dm);
787 }
788 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
789
790 bool result = _dispatch_mach_reply_kevent_unregister(dm, dmr, options);
791 dispatch_assert(result);
792 if (wflags) dx_wakeup(dm, 0, wflags);
793 }
794
795 DISPATCH_ALWAYS_INLINE
796 static inline dispatch_mach_msg_t
797 _dispatch_mach_msg_reply_recv(dispatch_mach_t dm,
798 dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
799 mach_port_t send)
800 {
801 if (slowpath(!MACH_PORT_VALID(reply_port))) {
802 DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port");
803 }
804 void *ctxt = dmr->dmr_ctxt;
805 mach_msg_header_t *hdr, *hdr2 = NULL;
806 void *hdr_copyout_addr;
807 mach_msg_size_t siz, msgsiz = 0;
808 mach_msg_return_t kr;
809 mach_msg_option_t options;
810 mach_port_t notify = MACH_PORT_NULL;
811 siz = mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE +
812 DISPATCH_MACH_TRAILER_SIZE);
813 hdr = alloca(siz);
814 for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size);
815 p < (mach_vm_address_t)hdr + siz; p += vm_page_size) {
816 *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
817 }
818 options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER);
819 if (MACH_PORT_VALID(send)) {
820 notify = send;
821 options |= MACH_RCV_SYNC_WAIT;
822 }
823
824 retry:
825 _dispatch_debug_machport(reply_port);
826 _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port,
827 (options & MACH_RCV_TIMEOUT) ? "poll" : "wait");
828 kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE,
829 notify);
830 hdr_copyout_addr = hdr;
831 _dispatch_debug_machport(reply_port);
832 _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) "
833 "returned: %s - 0x%x", reply_port, siz, options,
834 mach_error_string(kr), kr);
835 switch (kr) {
836 case MACH_RCV_TOO_LARGE:
837 if (!fastpath(hdr->msgh_size <= UINT_MAX -
838 DISPATCH_MACH_TRAILER_SIZE)) {
839 DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message");
840 }
841 if (options & MACH_RCV_LARGE) {
842 msgsiz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
843 hdr2 = malloc(msgsiz);
844 if (dispatch_assume(hdr2)) {
845 hdr = hdr2;
846 siz = msgsiz;
847 }
848 options |= MACH_RCV_TIMEOUT;
849 options &= ~MACH_RCV_LARGE;
850 goto retry;
851 }
852 _dispatch_log("BUG in libdispatch client: "
853 "dispatch_mach_send_and_wait_for_reply: dropped message too "
854 "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id,
855 hdr->msgh_size);
856 break;
857 case MACH_RCV_INVALID_NAME: // rdar://problem/21963848
858 case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327
859 case MACH_RCV_PORT_DIED:
860 // channel was disconnected/canceled and reply port destroyed
861 _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: "
862 "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr);
863 goto out;
864 case MACH_MSG_SUCCESS:
865 if (hdr->msgh_remote_port) {
866 _dispatch_debug_machport(hdr->msgh_remote_port);
867 }
868 _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, "
869 "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id,
870 hdr->msgh_size, hdr->msgh_remote_port);
871 siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
872 if (hdr2 && siz < msgsiz) {
873 void *shrink = realloc(hdr2, msgsiz);
874 if (shrink) hdr = hdr2 = shrink;
875 }
876 break;
877 case MACH_RCV_INVALID_NOTIFY:
878 default:
879 DISPATCH_INTERNAL_CRASH(kr, "Unexpected error from mach_msg_receive");
880 break;
881 }
882 _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port);
883 hdr->msgh_local_port = MACH_PORT_NULL;
884 if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) {
885 if (!kr) mach_msg_destroy(hdr);
886 goto out;
887 }
888 dispatch_mach_msg_t dmsg;
889 dispatch_mach_msg_destructor_t destructor = (!hdr2) ?
890 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
891 DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
892 dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
893 if (!hdr2 || hdr != hdr_copyout_addr) {
894 _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move,
895 (uint64_t)hdr_copyout_addr,
896 (uint64_t)_dispatch_mach_msg_get_msg(dmsg));
897 }
898 dmsg->do_ctxt = ctxt;
899 return dmsg;
900 out:
901 free(hdr2);
902 return NULL;
903 }
904
905 static inline void
906 _dispatch_mach_msg_reply_received(dispatch_mach_t dm,
907 dispatch_mach_reply_refs_t dmr, mach_port_t local_port)
908 {
909 bool removed = _dispatch_mach_reply_tryremove(dm, dmr);
910 if (!MACH_PORT_VALID(local_port) || !removed) {
911 // port moved/destroyed during receive, or reply waiter was never
912 // registered or already removed (disconnected)
913 return;
914 }
915 mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(
916 (mach_port_t)dmr->du_ident);
917 _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p",
918 reply_port, dmr->dmr_ctxt);
919 if (_dispatch_mach_reply_is_reply_port_owned(dmr)) {
920 _dispatch_set_thread_reply_port(reply_port);
921 if (local_port != reply_port) {
922 DISPATCH_CLIENT_CRASH(local_port,
923 "Reply received on unexpected port");
924 }
925 return;
926 }
927 mach_msg_header_t *hdr;
928 dispatch_mach_msg_t dmsg;
929 dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
930 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
931 hdr->msgh_local_port = local_port;
932 dmsg->dmsg_voucher = dmr->dmr_voucher;
933 dmr->dmr_voucher = NULL; // transfer reference
934 dmsg->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority);
935 dmsg->do_ctxt = dmr->dmr_ctxt;
936 _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED);
937 return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
938 }
939
940 static inline void
941 _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port,
942 mach_port_t remote_port)
943 {
944 mach_msg_header_t *hdr;
945 dispatch_mach_msg_t dmsg;
946 dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
947 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
948 if (local_port) hdr->msgh_local_port = local_port;
949 if (remote_port) hdr->msgh_remote_port = remote_port;
950 _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED);
951 _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ?
952 local_port : remote_port, local_port ? "receive" : "send");
953 return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
954 }
955
956 static inline dispatch_mach_msg_t
957 _dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou,
958 dispatch_mach_reply_refs_t dmr, dispatch_mach_reason_t reason)
959 {
960 dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
961 mach_port_t reply_port = dmsg ? dmsg->dmsg_reply :
962 _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident);
963 voucher_t v;
964
965 if (!reply_port) {
966 if (!dmsg) {
967 v = dmr->dmr_voucher;
968 dmr->dmr_voucher = NULL; // transfer reference
969 if (v) _voucher_release(v);
970 }
971 return NULL;
972 }
973
974 if (dmsg) {
975 v = dmsg->dmsg_voucher;
976 if (v) _voucher_retain(v);
977 } else {
978 v = dmr->dmr_voucher;
979 dmr->dmr_voucher = NULL; // transfer reference
980 }
981
982 if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
983 (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) ||
984 (dmr && !_dispatch_unote_registered(dmr) &&
985 _dispatch_mach_reply_is_reply_port_owned(dmr))) {
986 if (v) _voucher_release(v);
987 // deallocate owned reply port to break _dispatch_mach_msg_reply_recv
988 // out of waiting in mach_msg(MACH_RCV_MSG)
989 kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
990 MACH_PORT_RIGHT_RECEIVE, -1);
991 DISPATCH_VERIFY_MIG(kr);
992 dispatch_assume_zero(kr);
993 return NULL;
994 }
995
996 mach_msg_header_t *hdr;
997 dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
998 DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
999 dmsgr->dmsg_voucher = v;
1000 hdr->msgh_local_port = reply_port;
1001 if (dmsg) {
1002 dmsgr->dmsg_priority = dmsg->dmsg_priority;
1003 dmsgr->do_ctxt = dmsg->do_ctxt;
1004 } else {
1005 dmsgr->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority);
1006 dmsgr->do_ctxt = dmr->dmr_ctxt;
1007 }
1008 _dispatch_mach_msg_set_reason(dmsgr, 0, reason);
1009 _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p",
1010 hdr->msgh_local_port, dmsgr->do_ctxt);
1011 return dmsgr;
1012 }
1013
1014 DISPATCH_NOINLINE
1015 static void
1016 _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou)
1017 {
1018 dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
1019 dispatch_queue_t drq = NULL;
1020 mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
1021 mach_msg_option_t msg_opts = dmsg->dmsg_options;
1022 _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, "
1023 "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x",
1024 msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
1025 msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply);
1026 unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ?
1027 0 : DISPATCH_MACH_MESSAGE_NOT_SENT;
1028 dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL,
1029 msg_opts & DISPATCH_MACH_ASYNC_REPLY
1030 ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
1031 : DISPATCH_MACH_DISCONNECTED);
1032 if (dmsg->do_ctxt) {
1033 drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
1034 }
1035 _dispatch_mach_msg_set_reason(dmsg, 0, reason);
1036 _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
1037 if (dmsgr) {
1038 if (drq) {
1039 _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq);
1040 } else {
1041 _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
1042 }
1043 }
1044 }
1045
1046 DISPATCH_NOINLINE
1047 static uint32_t
1048 _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou,
1049 dispatch_mach_reply_refs_t dmr, dispatch_qos_t qos,
1050 dispatch_mach_send_invoke_flags_t send_flags)
1051 {
1052 dispatch_mach_send_refs_t dsrr = dm->dm_send_refs;
1053 dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL;
1054 voucher_t voucher = dmsg->dmsg_voucher;
1055 dispatch_queue_t drq = NULL;
1056 mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL;
1057 uint32_t send_status = 0;
1058 bool clear_voucher = false, kvoucher_move_send = false;
1059 mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
1060 bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
1061 MACH_MSG_TYPE_MOVE_SEND_ONCE);
1062 mach_port_t reply_port = dmsg->dmsg_reply;
1063 if (!is_reply) {
1064 dm->dm_needs_mgr = 0;
1065 if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) {
1066 // send initial checkin message
1067 if (unlikely(_dispatch_unote_registered(dsrr) &&
1068 _dispatch_queue_get_current() != &_dispatch_mgr_q)) {
1069 // send kevent must be uninstalled on the manager queue
1070 dm->dm_needs_mgr = 1;
1071 goto out;
1072 }
1073 if (unlikely(!_dispatch_mach_msg_send(dm,
1074 dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) {
1075 goto out;
1076 }
1077 dsrr->dmsr_checkin = NULL;
1078 }
1079 }
1080 mach_msg_return_t kr = 0;
1081 mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options;
1082 if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) {
1083 mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED;
1084 opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK);
1085 if (!is_reply) {
1086 if (dmsg != dsrr->dmsr_checkin) {
1087 msg->msgh_remote_port = dsrr->dmsr_send;
1088 }
1089 if (_dispatch_queue_get_current() == &_dispatch_mgr_q) {
1090 if (unlikely(!_dispatch_unote_registered(dsrr))) {
1091 _dispatch_mach_notification_kevent_register(dm,
1092 msg->msgh_remote_port);
1093 }
1094 if (likely(_dispatch_unote_registered(dsrr))) {
1095 if (os_atomic_load2o(dsrr, dmsr_notification_armed,
1096 relaxed)) {
1097 goto out;
1098 }
1099 opts |= MACH_SEND_NOTIFY;
1100 }
1101 }
1102 opts |= MACH_SEND_TIMEOUT;
1103 if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) {
1104 ipc_kvoucher = _voucher_create_mach_voucher_with_priority(
1105 voucher, dmsg->dmsg_priority);
1106 }
1107 _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg);
1108 if (ipc_kvoucher) {
1109 kvoucher_move_send = true;
1110 clear_voucher = _voucher_mach_msg_set_mach_voucher(msg,
1111 ipc_kvoucher, kvoucher_move_send);
1112 } else {
1113 clear_voucher = _voucher_mach_msg_set(msg, voucher);
1114 }
1115 if (qos) {
1116 opts |= MACH_SEND_OVERRIDE;
1117 msg_priority = (mach_msg_priority_t)
1118 _dispatch_priority_compute_propagated(
1119 _dispatch_qos_to_pp(qos), 0);
1120 }
1121 }
1122 _dispatch_debug_machport(msg->msgh_remote_port);
1123 if (reply_port) _dispatch_debug_machport(reply_port);
1124 if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) {
1125 if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
1126 if (_dispatch_use_mach_special_reply_port()) {
1127 opts |= MACH_SEND_SYNC_OVERRIDE;
1128 }
1129 _dispatch_clear_thread_reply_port(reply_port);
1130 }
1131 _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg,
1132 msg_opts);
1133 }
1134 kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0,
1135 msg_priority);
1136 _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
1137 "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
1138 "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
1139 opts, msg_opts, msg->msgh_voucher_port, reply_port,
1140 mach_error_string(kr), kr);
1141 if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) {
1142 _dispatch_mach_reply_waiter_unregister(dm, dmr,
1143 DU_UNREGISTER_REPLY_REMOVE);
1144 }
1145 if (clear_voucher) {
1146 if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) {
1147 DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption");
1148 }
1149 mach_voucher_t kv;
1150 kv = _voucher_mach_msg_clear(msg, kvoucher_move_send);
1151 if (kvoucher_move_send) ipc_kvoucher = kv;
1152 }
1153 }
1154 if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) {
1155 if (opts & MACH_SEND_NOTIFY) {
1156 _dispatch_debug("machport[0x%08x]: send-possible notification "
1157 "armed", (mach_port_t)dsrr->du_ident);
1158 _dispatch_mach_notification_set_armed(dsrr);
1159 } else {
1160 // send kevent must be installed on the manager queue
1161 dm->dm_needs_mgr = 1;
1162 }
1163 if (ipc_kvoucher) {
1164 _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher);
1165 voucher_t ipc_voucher;
1166 ipc_voucher = _voucher_create_with_priority_and_mach_voucher(
1167 voucher, dmsg->dmsg_priority, ipc_kvoucher);
1168 _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
1169 ipc_voucher, dmsg, voucher);
1170 if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
1171 dmsg->dmsg_voucher = ipc_voucher;
1172 }
1173 goto out;
1174 } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) {
1175 _voucher_dealloc_mach_voucher(ipc_kvoucher);
1176 }
1177 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
1178 if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port &&
1179 !(_dispatch_unote_registered(dmrr) &&
1180 dmrr->du_ident == reply_port)) {
1181 _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg);
1182 }
1183 if (unlikely(!is_reply && dmsg == dsrr->dmsr_checkin &&
1184 _dispatch_unote_registered(dsrr))) {
1185 _dispatch_mach_notification_kevent_unregister(dm);
1186 }
1187 if (slowpath(kr)) {
1188 // Send failed, so reply was never registered <rdar://problem/14309159>
1189 dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL,
1190 msg_opts & DISPATCH_MACH_ASYNC_REPLY
1191 ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
1192 : DISPATCH_MACH_DISCONNECTED);
1193 if (dmsg->do_ctxt) {
1194 drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
1195 }
1196 }
1197 _dispatch_mach_msg_set_reason(dmsg, kr, 0);
1198 if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) &&
1199 (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) {
1200 // Return sent message synchronously <rdar://problem/25947334>
1201 send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT;
1202 } else {
1203 _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
1204 }
1205 if (dmsgr) {
1206 if (drq) {
1207 _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq);
1208 } else {
1209 _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
1210 }
1211 }
1212 send_status |= DM_SEND_STATUS_SUCCESS;
1213 out:
1214 return send_status;
1215 }
1216
1217 #pragma mark -
1218 #pragma mark dispatch_mach_send_refs_t
1219
1220 #define _dmsr_state_needs_lock_override(dq_state, qos) \
1221 unlikely(qos < _dq_state_max_qos(dq_state))
1222
1223 DISPATCH_ALWAYS_INLINE
1224 static inline dispatch_qos_t
1225 _dmsr_state_max_qos(uint64_t dmsr_state)
1226 {
1227 return _dq_state_max_qos(dmsr_state);
1228 }
1229
1230 DISPATCH_ALWAYS_INLINE
1231 static inline bool
1232 _dmsr_state_needs_override(uint64_t dmsr_state, dispatch_qos_t qos)
1233 {
1234 dmsr_state &= DISPATCH_MACH_STATE_MAX_QOS_MASK;
1235 return dmsr_state < _dq_state_from_qos(qos);
1236 }
1237
1238 DISPATCH_ALWAYS_INLINE
1239 static inline uint64_t
1240 _dmsr_state_merge_override(uint64_t dmsr_state, dispatch_qos_t qos)
1241 {
1242 if (_dmsr_state_needs_override(dmsr_state, qos)) {
1243 dmsr_state &= ~DISPATCH_MACH_STATE_MAX_QOS_MASK;
1244 dmsr_state |= _dq_state_from_qos(qos);
1245 dmsr_state |= DISPATCH_MACH_STATE_DIRTY;
1246 dmsr_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1247 }
1248 return dmsr_state;
1249 }
1250
1251 #define _dispatch_mach_send_push_update_tail(dmsr, tail) \
1252 os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next)
1253 #define _dispatch_mach_send_push_update_head(dmsr, head) \
1254 os_mpsc_push_update_head(dmsr, dmsr, head)
1255 #define _dispatch_mach_send_get_head(dmsr) \
1256 os_mpsc_get_head(dmsr, dmsr)
1257 #define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \
1258 os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next)
1259 #define _dispatch_mach_send_pop_head(dmsr, head) \
1260 os_mpsc_pop_head(dmsr, dmsr, head, do_next)
1261
1262 #define dm_push(dm, dc, qos) \
1263 _dispatch_queue_push((dm)->_as_dq, dc, qos)
1264
1265 DISPATCH_ALWAYS_INLINE
1266 static inline bool
1267 _dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr,
1268 dispatch_object_t dou)
1269 {
1270 if (_dispatch_mach_send_push_update_tail(dmsr, dou._do)) {
1271 _dispatch_mach_send_push_update_head(dmsr, dou._do);
1272 return true;
1273 }
1274 return false;
1275 }
1276
1277 DISPATCH_NOINLINE
1278 static bool
1279 _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
1280 dispatch_mach_send_invoke_flags_t send_flags)
1281 {
1282 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
1283 dispatch_mach_reply_refs_t dmr;
1284 dispatch_mach_msg_t dmsg;
1285 struct dispatch_object_s *dc = NULL, *next_dc = NULL;
1286 dispatch_qos_t qos = _dmsr_state_max_qos(dmsr->dmsr_state);
1287 uint64_t old_state, new_state;
1288 uint32_t send_status;
1289 bool needs_mgr, disconnecting, returning_send_result = false;
1290
1291 again:
1292 needs_mgr = false; disconnecting = false;
1293 while (dmsr->dmsr_tail) {
1294 dc = _dispatch_mach_send_get_head(dmsr);
1295 do {
1296 dispatch_mach_send_invoke_flags_t sf = send_flags;
1297 // Only request immediate send result for the first message
1298 send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
1299 next_dc = _dispatch_mach_send_pop_head(dmsr, dc);
1300 if (_dispatch_object_has_type(dc,
1301 DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
1302 if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
1303 goto partial_drain;
1304 }
1305 _dispatch_continuation_pop(dc, NULL, flags, dm->_as_dq);
1306 continue;
1307 }
1308 if (_dispatch_object_is_sync_waiter(dc)) {
1309 dmsg = ((dispatch_continuation_t)dc)->dc_data;
1310 dmr = ((dispatch_continuation_t)dc)->dc_other;
1311 } else if (_dispatch_object_has_vtable(dc)) {
1312 dmsg = (dispatch_mach_msg_t)dc;
1313 dmr = NULL;
1314 } else {
1315 if (_dispatch_unote_registered(dmsr) &&
1316 (_dispatch_queue_get_current() != &_dispatch_mgr_q)) {
1317 // send kevent must be uninstalled on the manager queue
1318 needs_mgr = true;
1319 goto partial_drain;
1320 }
1321 if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) {
1322 disconnecting = true;
1323 goto partial_drain;
1324 }
1325 _dispatch_perfmon_workitem_inc();
1326 continue;
1327 }
1328 _dispatch_voucher_ktrace_dmsg_pop(dmsg);
1329 if (unlikely(dmsr->dmsr_disconnect_cnt ||
1330 (dm->dq_atomic_flags & DSF_CANCELED))) {
1331 _dispatch_mach_msg_not_sent(dm, dmsg);
1332 _dispatch_perfmon_workitem_inc();
1333 continue;
1334 }
1335 send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, qos, sf);
1336 if (unlikely(!send_status)) {
1337 goto partial_drain;
1338 }
1339 if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) {
1340 returning_send_result = true;
1341 }
1342 _dispatch_perfmon_workitem_inc();
1343 } while ((dc = next_dc));
1344 }
1345
1346 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
1347 if (old_state & DISPATCH_MACH_STATE_DIRTY) {
1348 new_state = old_state;
1349 new_state &= ~DISPATCH_MACH_STATE_DIRTY;
1350 new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1351 new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
1352 } else {
1353 // unlock
1354 new_state = 0;
1355 }
1356 });
1357 goto out;
1358
1359 partial_drain:
1360 // if this is not a complete drain, we must undo some things
1361 _dispatch_mach_send_unpop_head(dmsr, dc, next_dc);
1362
1363 if (_dispatch_object_has_type(dc,
1364 DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
1365 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
1366 new_state = old_state;
1367 new_state |= DISPATCH_MACH_STATE_DIRTY;
1368 new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER;
1369 new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
1370 new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1371 });
1372 } else {
1373 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
1374 new_state = old_state;
1375 if (old_state & (DISPATCH_MACH_STATE_DIRTY |
1376 DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) {
1377 new_state &= ~DISPATCH_MACH_STATE_DIRTY;
1378 new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1379 new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
1380 } else {
1381 new_state |= DISPATCH_MACH_STATE_DIRTY;
1382 new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
1383 }
1384 });
1385 }
1386
1387 out:
1388 if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) {
1389 // Ensure that the root queue sees that this thread was overridden.
1390 _dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state));
1391 }
1392
1393 if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) {
1394 qos = _dmsr_state_max_qos(new_state);
1395 os_atomic_thread_fence(dependency);
1396 dmsr = os_atomic_force_dependency_on(dmsr, new_state);
1397 goto again;
1398 }
1399
1400 if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
1401 qos = _dmsr_state_max_qos(new_state);
1402 _dispatch_mach_push_send_barrier_drain(dm, qos);
1403 } else {
1404 if (needs_mgr || dm->dm_needs_mgr) {
1405 qos = _dmsr_state_max_qos(new_state);
1406 } else {
1407 qos = 0;
1408 }
1409 if (!disconnecting) dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
1410 }
1411 return returning_send_result;
1412 }
1413
1414 DISPATCH_NOINLINE
1415 static void
1416 _dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
1417 dispatch_mach_send_invoke_flags_t send_flags)
1418 {
1419 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
1420 dispatch_lock owner_self = _dispatch_lock_value_for_self();
1421 uint64_t old_state, new_state;
1422
1423 uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK;
1424 uint64_t canlock_state = 0;
1425
1426 if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) {
1427 canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
1428 canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER;
1429 } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
1430 canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
1431 }
1432
1433 dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor();
1434 retry:
1435 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, {
1436 new_state = old_state;
1437 if (unlikely((old_state & canlock_mask) != canlock_state)) {
1438 if (!(send_flags & DM_SEND_INVOKE_MAKE_DIRTY)) {
1439 os_atomic_rmw_loop_give_up(break);
1440 }
1441 new_state |= DISPATCH_MACH_STATE_DIRTY;
1442 } else {
1443 if (_dmsr_state_needs_lock_override(old_state, oq_floor)) {
1444 os_atomic_rmw_loop_give_up({
1445 oq_floor = _dispatch_queue_override_self(old_state);
1446 goto retry;
1447 });
1448 }
1449 new_state |= owner_self;
1450 new_state &= ~DISPATCH_MACH_STATE_DIRTY;
1451 new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1452 new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
1453 }
1454 });
1455
1456 if (unlikely((old_state & canlock_mask) != canlock_state)) {
1457 return;
1458 }
1459 if (send_flags & DM_SEND_INVOKE_CANCEL) {
1460 _dispatch_mach_cancel(dm);
1461 }
1462 _dispatch_mach_send_drain(dm, flags, send_flags);
1463 }
1464
1465 DISPATCH_NOINLINE
1466 void
1467 _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
1468 DISPATCH_UNUSED dispatch_invoke_context_t dic,
1469 dispatch_invoke_flags_t flags)
1470 {
1471 dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current();
1472 uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
1473 dispatch_thread_frame_s dtf;
1474
1475 DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY);
1476 DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER);
1477 // hide the mach channel (see _dispatch_mach_barrier_invoke comment)
1478 _dispatch_thread_frame_stash(&dtf);
1479 _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{
1480 _dispatch_mach_send_invoke(dm, flags,
1481 DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER);
1482 });
1483 _dispatch_thread_frame_unstash(&dtf);
1484 }
1485
1486 DISPATCH_NOINLINE
1487 static void
1488 _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos)
1489 {
1490 dispatch_continuation_t dc = _dispatch_continuation_alloc();
1491
1492 dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN);
1493 dc->dc_func = NULL;
1494 dc->dc_ctxt = NULL;
1495 dc->dc_voucher = DISPATCH_NO_VOUCHER;
1496 dc->dc_priority = DISPATCH_NO_PRIORITY;
1497 dm_push(dm, dc, qos);
1498 }
1499
1500 DISPATCH_NOINLINE
1501 static void
1502 _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc,
1503 dispatch_qos_t qos)
1504 {
1505 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
1506 uint64_t old_state, new_state, state_flags = 0;
1507 dispatch_tid owner;
1508 bool wakeup;
1509
1510 // <rdar://problem/25896179> when pushing a send barrier that destroys
1511 // the last reference to this channel, and the send queue is already
1512 // draining on another thread, the send barrier may run as soon as
1513 // _dispatch_mach_send_push_inline() returns.
1514 _dispatch_retain_2(dm);
1515
1516 wakeup = _dispatch_mach_send_push_inline(dmsr, dc);
1517 if (wakeup) {
1518 state_flags = DISPATCH_MACH_STATE_DIRTY;
1519 if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) {
1520 state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER;
1521 }
1522 }
1523
1524 if (state_flags) {
1525 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
1526 new_state = _dmsr_state_merge_override(old_state, qos);
1527 new_state |= state_flags;
1528 });
1529 } else {
1530 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, {
1531 new_state = _dmsr_state_merge_override(old_state, qos);
1532 if (old_state == new_state) {
1533 os_atomic_rmw_loop_give_up(break);
1534 }
1535 });
1536 }
1537
1538 qos = _dmsr_state_max_qos(new_state);
1539 owner = _dispatch_lock_owner((dispatch_lock)old_state);
1540 if (owner) {
1541 if (_dmsr_state_needs_override(old_state, qos)) {
1542 _dispatch_wqthread_override_start_check_owner(owner, qos,
1543 &dmsr->dmsr_state_lock.dul_lock);
1544 }
1545 return _dispatch_release_2_tailcall(dm);
1546 }
1547
1548 dispatch_wakeup_flags_t wflags = 0;
1549 if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) {
1550 _dispatch_mach_push_send_barrier_drain(dm, qos);
1551 } else if (wakeup || dmsr->dmsr_disconnect_cnt ||
1552 (dm->dq_atomic_flags & DSF_CANCELED)) {
1553 wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2;
1554 } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
1555 wflags = DISPATCH_WAKEUP_CONSUME_2;
1556 }
1557 if (wflags) {
1558 return dx_wakeup(dm, qos, wflags);
1559 }
1560 return _dispatch_release_2_tailcall(dm);
1561 }
1562
1563 DISPATCH_NOINLINE
1564 static bool
1565 _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm,
1566 dispatch_object_t dou, dispatch_qos_t qos,
1567 dispatch_mach_send_invoke_flags_t send_flags)
1568 {
1569 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
1570 dispatch_lock owner_self = _dispatch_lock_value_for_self();
1571 uint64_t old_state, new_state, canlock_mask, state_flags = 0;
1572 dispatch_tid owner;
1573
1574 bool wakeup = _dispatch_mach_send_push_inline(dmsr, dou);
1575 if (wakeup) {
1576 state_flags = DISPATCH_MACH_STATE_DIRTY;
1577 }
1578
1579 if (unlikely(dmsr->dmsr_disconnect_cnt ||
1580 (dm->dq_atomic_flags & DSF_CANCELED))) {
1581 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
1582 new_state = _dmsr_state_merge_override(old_state, qos);
1583 new_state |= state_flags;
1584 });
1585 dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
1586 return false;
1587 }
1588
1589 canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK |
1590 DISPATCH_MACH_STATE_PENDING_BARRIER;
1591 if (state_flags) {
1592 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, seq_cst, {
1593 new_state = _dmsr_state_merge_override(old_state, qos);
1594 new_state |= state_flags;
1595 if (likely((old_state & canlock_mask) == 0)) {
1596 new_state |= owner_self;
1597 new_state &= ~DISPATCH_MACH_STATE_DIRTY;
1598 new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1599 new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
1600 }
1601 });
1602 } else {
1603 os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, {
1604 new_state = _dmsr_state_merge_override(old_state, qos);
1605 if (new_state == old_state) {
1606 os_atomic_rmw_loop_give_up(return false);
1607 }
1608 if (likely((old_state & canlock_mask) == 0)) {
1609 new_state |= owner_self;
1610 new_state &= ~DISPATCH_MACH_STATE_DIRTY;
1611 new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
1612 new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
1613 }
1614 });
1615 }
1616
1617 owner = _dispatch_lock_owner((dispatch_lock)old_state);
1618 if (owner) {
1619 if (_dmsr_state_needs_override(old_state, qos)) {
1620 _dispatch_wqthread_override_start_check_owner(owner, qos,
1621 &dmsr->dmsr_state_lock.dul_lock);
1622 }
1623 return false;
1624 }
1625
1626 if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
1627 dx_wakeup(dm, qos, 0);
1628 return false;
1629 }
1630
1631 // Ensure our message is still at the head of the queue and has not already
1632 // been dequeued by another thread that raced us to the send queue lock.
1633 // A plain load of the head and comparison against our object pointer is
1634 // sufficient.
1635 if (unlikely(!(wakeup && dou._do == dmsr->dmsr_head))) {
1636 // Don't request immediate send result for messages we don't own
1637 send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
1638 }
1639 return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags);
1640 }
1641
1642 #pragma mark -
1643 #pragma mark dispatch_mach
1644
1645 DISPATCH_ALWAYS_INLINE
1646 static inline void
1647 _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm)
1648 {
1649 DISPATCH_ASSERT_ON_MANAGER_QUEUE();
1650 if (_dispatch_unote_registered(dm->dm_send_refs)) {
1651 dispatch_assume(_dispatch_unote_unregister(dm->dm_send_refs, 0));
1652 }
1653 dm->dm_send_refs->du_ident = 0;
1654 }
1655
1656 DISPATCH_ALWAYS_INLINE
1657 static inline void
1658 _dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send)
1659 {
1660 DISPATCH_ASSERT_ON_MANAGER_QUEUE();
1661 dm->dm_send_refs->du_ident = send;
1662 dispatch_assume(_dispatch_unote_register(dm->dm_send_refs,
1663 DISPATCH_WLH_ANON, 0));
1664 }
1665
1666 void
1667 _dispatch_mach_merge_notification(dispatch_unote_t du,
1668 uint32_t flags DISPATCH_UNUSED, uintptr_t data,
1669 uintptr_t status DISPATCH_UNUSED,
1670 pthread_priority_t pp DISPATCH_UNUSED)
1671 {
1672 dispatch_mach_send_refs_t dmsr = du._dmsr;
1673 dispatch_mach_t dm = _dispatch_wref2ptr(dmsr->du_owner_wref);
1674
1675 if (data & dmsr->du_fflags) {
1676 _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN,
1677 DM_SEND_INVOKE_MAKE_DIRTY);
1678 }
1679 }
1680
1681 DISPATCH_NOINLINE
1682 static void
1683 _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm,
1684 dispatch_mach_msg_t dmsg)
1685 {
1686 mach_error_t error;
1687 dispatch_mach_reason_t reason = _dispatch_mach_msg_get_reason(dmsg, &error);
1688 if (reason == DISPATCH_MACH_MESSAGE_RECEIVED || !dm->dm_is_xpc ||
1689 !_dispatch_mach_xpc_hooks->dmxh_direct_message_handler(
1690 dm->dm_recv_refs->dmrr_handler_ctxt, reason, dmsg, error)) {
1691 // Not XPC client or not a message that XPC can handle inline - push
1692 // it onto the channel queue.
1693 dm_push(dm, dmsg, _dispatch_qos_from_pp(dmsg->dmsg_priority));
1694 } else {
1695 // XPC handled the message inline. Do the cleanup that would otherwise
1696 // have happened in _dispatch_mach_msg_invoke(), leaving out steps that
1697 // are not required in this context.
1698 dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
1699 dispatch_release(dmsg);
1700 }
1701 }
1702
1703 DISPATCH_ALWAYS_INLINE
1704 static void
1705 _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm,
1706 dispatch_mach_msg_t dmsg, dispatch_queue_t drq) {
1707 // Push the message onto the given queue. This function is only used for
1708 // replies to messages sent by
1709 // dispatch_mach_send_with_result_and_async_reply_4libxpc().
1710 dispatch_continuation_t dc = _dispatch_mach_msg_async_reply_wrap(dmsg, dm);
1711 _dispatch_trace_continuation_push(drq, dc);
1712 dx_push(drq, dc, _dispatch_qos_from_pp(dmsg->dmsg_priority));
1713 }
1714
1715 #pragma mark -
1716 #pragma mark dispatch_mach_t
1717
1718 static inline mach_msg_option_t
1719 _dispatch_mach_checkin_options(void)
1720 {
1721 mach_msg_option_t options = 0;
1722 #if DISPATCH_USE_CHECKIN_NOIMPORTANCE
1723 options = MACH_SEND_NOIMPORTANCE; // <rdar://problem/16996737>
1724 #endif
1725 return options;
1726 }
1727
1728
1729 static inline mach_msg_option_t
1730 _dispatch_mach_send_options(void)
1731 {
1732 mach_msg_option_t options = 0;
1733 return options;
1734 }
1735
1736 DISPATCH_ALWAYS_INLINE
1737 static inline dispatch_qos_t
1738 _dispatch_mach_priority_propagate(mach_msg_option_t options,
1739 pthread_priority_t *msg_pp)
1740 {
1741 #if DISPATCH_USE_NOIMPORTANCE_QOS
1742 if (options & MACH_SEND_NOIMPORTANCE) {
1743 *msg_pp = 0;
1744 return 0;
1745 }
1746 #endif
1747 unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT;
1748 if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
1749 (options & DISPATCH_MACH_OWNED_REPLY_PORT) &&
1750 _dispatch_use_mach_special_reply_port()) {
1751 flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC;
1752 }
1753 *msg_pp = _dispatch_priority_compute_propagated(0, flags);
1754 // TODO: remove QoS contribution of sync IPC messages to send queue
1755 // rdar://31848737
1756 return _dispatch_qos_from_pp(*msg_pp);
1757 }
1758
1759 DISPATCH_NOINLINE
1760 static bool
1761 _dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
1762 dispatch_continuation_t dc_wait, mach_msg_option_t options)
1763 {
1764 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
1765 if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) {
1766 DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued");
1767 }
1768 dispatch_retain(dmsg);
1769 pthread_priority_t msg_pp;
1770 dispatch_qos_t qos = _dispatch_mach_priority_propagate(options, &msg_pp);
1771 options |= _dispatch_mach_send_options();
1772 dmsg->dmsg_options = options;
1773 mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
1774 dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg);
1775 bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
1776 MACH_MSG_TYPE_MOVE_SEND_ONCE);
1777 dmsg->dmsg_priority = msg_pp;
1778 dmsg->dmsg_voucher = _voucher_copy();
1779 _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
1780
1781 uint32_t send_status;
1782 bool returning_send_result = false;
1783 dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
1784 if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) {
1785 send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND;
1786 }
1787 if (is_reply && !dmsg->dmsg_reply && !dmsr->dmsr_disconnect_cnt &&
1788 !(dm->dq_atomic_flags & DSF_CANCELED)) {
1789 // replies are sent to a send-once right and don't need the send queue
1790 dispatch_assert(!dc_wait);
1791 send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags);
1792 dispatch_assert(send_status);
1793 returning_send_result = !!(send_status &
1794 DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT);
1795 } else {
1796 _dispatch_voucher_ktrace_dmsg_push(dmsg);
1797 dispatch_object_t dou = { ._dmsg = dmsg };
1798 if (dc_wait) dou._dc = dc_wait;
1799 returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou,
1800 qos, send_flags);
1801 }
1802 if (returning_send_result) {
1803 _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg);
1804 if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
1805 dmsg->dmsg_voucher = NULL;
1806 dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
1807 dispatch_release(dmsg);
1808 }
1809 return returning_send_result;
1810 }
1811
1812 DISPATCH_NOINLINE
1813 void
1814 dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
1815 mach_msg_option_t options)
1816 {
1817 dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
1818 options &= ~DISPATCH_MACH_OPTIONS_MASK;
1819 bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
1820 dispatch_assert(!returned_send_result);
1821 }
1822
1823 DISPATCH_NOINLINE
1824 void
1825 dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
1826 mach_msg_option_t options, dispatch_mach_send_flags_t send_flags,
1827 dispatch_mach_reason_t *send_result, mach_error_t *send_error)
1828 {
1829 if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
1830 DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
1831 }
1832 dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
1833 options &= ~DISPATCH_MACH_OPTIONS_MASK;
1834 options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
1835 bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
1836 unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
1837 mach_error_t err = 0;
1838 if (returned_send_result) {
1839 reason = _dispatch_mach_msg_get_reason(dmsg, &err);
1840 }
1841 *send_result = reason;
1842 *send_error = err;
1843 }
1844
1845 static inline
1846 dispatch_mach_msg_t
1847 _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
1848 dispatch_mach_msg_t dmsg, mach_msg_option_t options,
1849 bool *returned_send_result)
1850 {
1851 mach_port_t send = MACH_PORT_NULL;
1852 mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
1853 if (!reply_port) {
1854 // use per-thread mach reply port <rdar://24597802>
1855 reply_port = _dispatch_get_thread_reply_port();
1856 mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
1857 dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) ==
1858 MACH_MSG_TYPE_MAKE_SEND_ONCE);
1859 hdr->msgh_local_port = reply_port;
1860 options |= DISPATCH_MACH_OWNED_REPLY_PORT;
1861 }
1862 options |= DISPATCH_MACH_WAIT_FOR_REPLY;
1863
1864 dispatch_mach_reply_refs_t dmr;
1865 #if DISPATCH_DEBUG
1866 dmr = _dispatch_calloc(1, sizeof(*dmr));
1867 #else
1868 struct dispatch_mach_reply_refs_s dmr_buf = { };
1869 dmr = &dmr_buf;
1870 #endif
1871 struct dispatch_continuation_s dc_wait = {
1872 .dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT,
1873 .dc_data = dmsg,
1874 .dc_other = dmr,
1875 .dc_priority = DISPATCH_NO_PRIORITY,
1876 .dc_voucher = DISPATCH_NO_VOUCHER,
1877 };
1878 dmr->dmr_ctxt = dmsg->do_ctxt;
1879 dmr->dmr_waiter_tid = _dispatch_tid_self();
1880 *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options);
1881 if (options & DISPATCH_MACH_OWNED_REPLY_PORT) {
1882 _dispatch_clear_thread_reply_port(reply_port);
1883 if (_dispatch_use_mach_special_reply_port()) {
1884 // link special reply port to send right for remote receive right
1885 // TODO: extend to pre-connect phase <rdar://problem/31823384>
1886 send = dm->dm_send_refs->dmsr_send;
1887 }
1888 }
1889 dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port, send);
1890 #if DISPATCH_DEBUG
1891 free(dmr);
1892 #endif
1893 return dmsg;
1894 }
1895
1896 DISPATCH_NOINLINE
1897 dispatch_mach_msg_t
1898 dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
1899 dispatch_mach_msg_t dmsg, mach_msg_option_t options)
1900 {
1901 bool returned_send_result;
1902 dispatch_mach_msg_t reply;
1903 dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
1904 options &= ~DISPATCH_MACH_OPTIONS_MASK;
1905 reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
1906 &returned_send_result);
1907 dispatch_assert(!returned_send_result);
1908 return reply;
1909 }
1910
1911 DISPATCH_NOINLINE
1912 dispatch_mach_msg_t
1913 dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm,
1914 dispatch_mach_msg_t dmsg, mach_msg_option_t options,
1915 dispatch_mach_send_flags_t send_flags,
1916 dispatch_mach_reason_t *send_result, mach_error_t *send_error)
1917 {
1918 if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
1919 DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
1920 }
1921 bool returned_send_result;
1922 dispatch_mach_msg_t reply;
1923 dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
1924 options &= ~DISPATCH_MACH_OPTIONS_MASK;
1925 options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
1926 reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
1927 &returned_send_result);
1928 unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
1929 mach_error_t err = 0;
1930 if (returned_send_result) {
1931 reason = _dispatch_mach_msg_get_reason(dmsg, &err);
1932 }
1933 *send_result = reason;
1934 *send_error = err;
1935 return reply;
1936 }
1937
1938 DISPATCH_NOINLINE
1939 void
1940 dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm,
1941 dispatch_mach_msg_t dmsg, mach_msg_option_t options,
1942 dispatch_mach_send_flags_t send_flags,
1943 dispatch_mach_reason_t *send_result, mach_error_t *send_error)
1944 {
1945 if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
1946 DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
1947 }
1948 if (unlikely(!dm->dm_is_xpc)) {
1949 DISPATCH_CLIENT_CRASH(0,
1950 "dispatch_mach_send_with_result_and_wait_for_reply is XPC only");
1951 }
1952
1953 dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
1954 options &= ~DISPATCH_MACH_OPTIONS_MASK;
1955 options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
1956 mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
1957 if (!reply_port) {
1958 DISPATCH_CLIENT_CRASH(0, "Reply port needed for async send with reply");
1959 }
1960 options |= DISPATCH_MACH_ASYNC_REPLY;
1961 bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
1962 unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
1963 mach_error_t err = 0;
1964 if (returned_send_result) {
1965 reason = _dispatch_mach_msg_get_reason(dmsg, &err);
1966 }
1967 *send_result = reason;
1968 *send_error = err;
1969 }
1970
1971 DISPATCH_NOINLINE
1972 static bool
1973 _dispatch_mach_disconnect(dispatch_mach_t dm)
1974 {
1975 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
1976 bool disconnected;
1977 if (_dispatch_unote_registered(dmsr)) {
1978 _dispatch_mach_notification_kevent_unregister(dm);
1979 }
1980 if (MACH_PORT_VALID(dmsr->dmsr_send)) {
1981 _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send);
1982 dmsr->dmsr_send = MACH_PORT_NULL;
1983 }
1984 if (dmsr->dmsr_checkin) {
1985 _dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin);
1986 dmsr->dmsr_checkin = NULL;
1987 }
1988 _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
1989 dispatch_mach_reply_refs_t dmr, tmp;
1990 TAILQ_FOREACH_SAFE(dmr, &dm->dm_send_refs->dmsr_replies, dmr_list, tmp) {
1991 TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
1992 _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
1993 if (_dispatch_unote_registered(dmr)) {
1994 if (!_dispatch_mach_reply_kevent_unregister(dm, dmr,
1995 DU_UNREGISTER_DISCONNECTED)) {
1996 TAILQ_INSERT_HEAD(&dm->dm_send_refs->dmsr_replies, dmr,
1997 dmr_list);
1998 }
1999 } else {
2000 _dispatch_mach_reply_waiter_unregister(dm, dmr,
2001 DU_UNREGISTER_DISCONNECTED);
2002 }
2003 }
2004 disconnected = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies);
2005 _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
2006 return disconnected;
2007 }
2008
2009 static void
2010 _dispatch_mach_cancel(dispatch_mach_t dm)
2011 {
2012 _dispatch_object_debug(dm, "%s", __func__);
2013 if (!_dispatch_mach_disconnect(dm)) return;
2014
2015 bool uninstalled = true;
2016 dispatch_assert(!dm->dm_uninstalled);
2017
2018 if (dm->dm_xpc_term_refs) {
2019 uninstalled = _dispatch_unote_unregister(dm->dm_xpc_term_refs, 0);
2020 }
2021
2022 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2023 mach_port_t local_port = (mach_port_t)dmrr->du_ident;
2024 if (local_port) {
2025 // handle the deferred delete case properly, similar to what
2026 // _dispatch_source_invoke2() does
2027 dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
2028 if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) {
2029 _dispatch_source_refs_unregister(dm->_as_ds,
2030 DU_UNREGISTER_IMMEDIATE_DELETE);
2031 dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
2032 } else if (!(dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_DELETED)) {
2033 _dispatch_source_refs_unregister(dm->_as_ds, 0);
2034 dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
2035 }
2036 if ((dqf & DSF_STATE_MASK) == DSF_DELETED) {
2037 _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL);
2038 dmrr->du_ident = 0;
2039 } else {
2040 uninstalled = false;
2041 }
2042 } else {
2043 _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED,
2044 DSF_ARMED | DSF_DEFERRED_DELETE);
2045 }
2046
2047 if (dm->dm_send_refs->dmsr_disconnect_cnt) {
2048 uninstalled = false; // <rdar://problem/31233110>
2049 }
2050 if (uninstalled) dm->dm_uninstalled = uninstalled;
2051 }
2052
2053 DISPATCH_NOINLINE
2054 static bool
2055 _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou)
2056 {
2057 if (!_dispatch_mach_disconnect(dm)) return false;
2058 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
2059 dmsr->dmsr_checkin = dou._dc->dc_data;
2060 dmsr->dmsr_send = (mach_port_t)dou._dc->dc_other;
2061 _dispatch_continuation_free(dou._dc);
2062 (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed);
2063 _dispatch_object_debug(dm, "%s", __func__);
2064 _dispatch_release(dm); // <rdar://problem/26266265>
2065 return true;
2066 }
2067
2068 DISPATCH_NOINLINE
2069 void
2070 dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send,
2071 dispatch_mach_msg_t checkin)
2072 {
2073 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
2074 (void)os_atomic_inc2o(dmsr, dmsr_disconnect_cnt, relaxed);
2075 if (MACH_PORT_VALID(send) && checkin) {
2076 dispatch_mach_msg_t dmsg = checkin;
2077 dispatch_retain(dmsg);
2078 dmsg->dmsg_options = _dispatch_mach_checkin_options();
2079 dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg);
2080 } else {
2081 checkin = NULL;
2082 dmsr->dmsr_checkin_port = MACH_PORT_NULL;
2083 }
2084 dispatch_continuation_t dc = _dispatch_continuation_alloc();
2085 dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT;
2086 // actually called manually in _dispatch_mach_send_drain
2087 dc->dc_func = (void*)_dispatch_mach_reconnect_invoke;
2088 dc->dc_ctxt = dc;
2089 dc->dc_data = checkin;
2090 dc->dc_other = (void*)(uintptr_t)send;
2091 dc->dc_voucher = DISPATCH_NO_VOUCHER;
2092 dc->dc_priority = DISPATCH_NO_PRIORITY;
2093 _dispatch_retain(dm); // <rdar://problem/26266265>
2094 return _dispatch_mach_send_push(dm, dc, 0);
2095 }
2096
2097 DISPATCH_NOINLINE
2098 mach_port_t
2099 dispatch_mach_get_checkin_port(dispatch_mach_t dm)
2100 {
2101 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
2102 if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) {
2103 return MACH_PORT_DEAD;
2104 }
2105 return dmsr->dmsr_checkin_port;
2106 }
2107
2108 DISPATCH_NOINLINE
2109 static void
2110 _dispatch_mach_connect_invoke(dispatch_mach_t dm)
2111 {
2112 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2113 _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
2114 DISPATCH_MACH_CONNECTED, NULL, 0, dmrr->dmrr_handler_func);
2115 dm->dm_connect_handler_called = 1;
2116 _dispatch_perfmon_workitem_inc();
2117 }
2118
2119 DISPATCH_ALWAYS_INLINE
2120 static void
2121 _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg,
2122 dispatch_invoke_flags_t flags, dispatch_mach_t dm)
2123 {
2124 dispatch_mach_recv_refs_t dmrr;
2125 mach_error_t err;
2126 unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err);
2127 dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE|
2128 DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE;
2129
2130 dmrr = dm->dm_recv_refs;
2131 dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
2132 _dispatch_voucher_ktrace_dmsg_pop(dmsg);
2133 _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg);
2134 (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority,
2135 dmsg->dmsg_voucher, adopt_flags);
2136 dmsg->dmsg_voucher = NULL;
2137 dispatch_invoke_with_autoreleasepool(flags, {
2138 if (flags & DISPATCH_INVOKE_ASYNC_REPLY) {
2139 _dispatch_client_callout3(dmrr->dmrr_handler_ctxt, reason, dmsg,
2140 _dispatch_mach_xpc_hooks->dmxh_async_reply_handler);
2141 } else {
2142 if (slowpath(!dm->dm_connect_handler_called)) {
2143 _dispatch_mach_connect_invoke(dm);
2144 }
2145 if (reason == DISPATCH_MACH_MESSAGE_RECEIVED &&
2146 (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED)) {
2147 // <rdar://problem/32184699> Do not deliver message received
2148 // after cancellation: _dispatch_mach_merge_msg can be preempted
2149 // for a long time between clearing DSF_ARMED but before
2150 // enqueuing the message, allowing for cancellation to complete,
2151 // and then the message event to be delivered.
2152 //
2153 // This makes XPC unhappy because some of these messages are
2154 // port-destroyed notifications that can cause it to try to
2155 // reconnect on a channel that is almost fully canceled
2156 } else {
2157 _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, reason, dmsg,
2158 err, dmrr->dmrr_handler_func);
2159 }
2160 }
2161 _dispatch_perfmon_workitem_inc();
2162 });
2163 _dispatch_introspection_queue_item_complete(dmsg);
2164 dispatch_release(dmsg);
2165 }
2166
2167 DISPATCH_NOINLINE
2168 void
2169 _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
2170 DISPATCH_UNUSED dispatch_invoke_context_t dic,
2171 dispatch_invoke_flags_t flags)
2172 {
2173 dispatch_thread_frame_s dtf;
2174
2175 // hide mach channel
2176 dispatch_mach_t dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf);
2177 _dispatch_mach_msg_invoke_with_mach(dmsg, flags, dm);
2178 _dispatch_thread_frame_unstash(&dtf);
2179 }
2180
2181 DISPATCH_NOINLINE
2182 void
2183 _dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
2184 DISPATCH_UNUSED dispatch_invoke_context_t dic,
2185 dispatch_invoke_flags_t flags)
2186 {
2187 dispatch_thread_frame_s dtf;
2188 dispatch_mach_t dm = dc->dc_other;
2189 dispatch_mach_recv_refs_t dmrr;
2190 uintptr_t dc_flags = (uintptr_t)dc->dc_data;
2191 unsigned long type = dc_type(dc);
2192
2193 // hide mach channel from clients
2194 if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
2195 // on the send queue, the mach channel isn't the current queue
2196 // its target queue is the current one already
2197 _dispatch_thread_frame_stash(&dtf);
2198 }
2199 dmrr = dm->dm_recv_refs;
2200 DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT);
2201 _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags, {
2202 dispatch_invoke_with_autoreleasepool(flags, {
2203 if (slowpath(!dm->dm_connect_handler_called)) {
2204 _dispatch_mach_connect_invoke(dm);
2205 }
2206 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2207 _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
2208 DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0,
2209 dmrr->dmrr_handler_func);
2210 });
2211 });
2212 if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
2213 _dispatch_thread_frame_unstash(&dtf);
2214 }
2215 }
2216
2217 DISPATCH_ALWAYS_INLINE
2218 static inline void
2219 _dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc,
2220 dispatch_mach_t dm, dispatch_continuation_vtable_t vtable)
2221 {
2222 dc->dc_data = (void *)dc->dc_flags;
2223 dc->dc_other = dm;
2224 dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases
2225 }
2226
2227 DISPATCH_NOINLINE
2228 void
2229 dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context,
2230 dispatch_function_t func)
2231 {
2232 dispatch_continuation_t dc = _dispatch_continuation_alloc();
2233 uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
2234 dispatch_qos_t qos;
2235
2236 _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
2237 _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER));
2238 _dispatch_trace_continuation_push(dm->_as_dq, dc);
2239 qos = _dispatch_continuation_override_qos(dm->_as_dq, dc);
2240 return _dispatch_mach_send_push(dm, dc, qos);
2241 }
2242
2243 DISPATCH_NOINLINE
2244 void
2245 dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
2246 {
2247 dispatch_continuation_t dc = _dispatch_continuation_alloc();
2248 uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
2249 dispatch_qos_t qos;
2250
2251 _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
2252 _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER));
2253 _dispatch_trace_continuation_push(dm->_as_dq, dc);
2254 qos = _dispatch_continuation_override_qos(dm->_as_dq, dc);
2255 return _dispatch_mach_send_push(dm, dc, qos);
2256 }
2257
2258 DISPATCH_NOINLINE
2259 void
2260 dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context,
2261 dispatch_function_t func)
2262 {
2263 dispatch_continuation_t dc = _dispatch_continuation_alloc();
2264 uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
2265
2266 _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
2267 _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER));
2268 return _dispatch_continuation_async(dm->_as_dq, dc);
2269 }
2270
2271 DISPATCH_NOINLINE
2272 void
2273 dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
2274 {
2275 dispatch_continuation_t dc = _dispatch_continuation_alloc();
2276 uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
2277
2278 _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
2279 _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER));
2280 return _dispatch_continuation_async(dm->_as_dq, dc);
2281 }
2282
2283 DISPATCH_NOINLINE
2284 static void
2285 _dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
2286 {
2287 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2288
2289 dispatch_invoke_with_autoreleasepool(flags, {
2290 if (slowpath(!dm->dm_connect_handler_called)) {
2291 _dispatch_mach_connect_invoke(dm);
2292 }
2293 _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
2294 DISPATCH_MACH_CANCELED, NULL, 0, dmrr->dmrr_handler_func);
2295 _dispatch_perfmon_workitem_inc();
2296 });
2297 dm->dm_cancel_handler_called = 1;
2298 _dispatch_release(dm); // the retain is done at creation time
2299 }
2300
2301 DISPATCH_NOINLINE
2302 void
2303 dispatch_mach_cancel(dispatch_mach_t dm)
2304 {
2305 dispatch_source_cancel(dm->_as_ds);
2306 }
2307
2308 static void
2309 _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh,
2310 dispatch_priority_t pri)
2311 {
2312 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2313 uint32_t disconnect_cnt;
2314
2315 if (dmrr->du_ident) {
2316 _dispatch_source_refs_register(dm->_as_ds, wlh, pri);
2317 dispatch_assert(dmrr->du_is_direct);
2318 }
2319
2320 if (dm->dm_is_xpc) {
2321 bool monitor_sigterm;
2322 if (_dispatch_mach_xpc_hooks->version < 3) {
2323 monitor_sigterm = true;
2324 } else if (!_dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification){
2325 monitor_sigterm = true;
2326 } else {
2327 monitor_sigterm =
2328 _dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification(
2329 dm->dm_recv_refs->dmrr_handler_ctxt);
2330 }
2331 if (monitor_sigterm) {
2332 dispatch_xpc_term_refs_t _dxtr =
2333 dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr;
2334 _dxtr->du_owner_wref = _dispatch_ptr2wref(dm);
2335 dm->dm_xpc_term_refs = _dxtr;
2336 _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri);
2337 }
2338 }
2339 if (!dm->dq_priority) {
2340 // _dispatch_mach_reply_kevent_register assumes this has been done
2341 // which is unlike regular sources or queues, the DEFAULTQUEUE flag
2342 // is used so that the priority of the channel doesn't act as
2343 // a QoS floor for incoming messages (26761457)
2344 dm->dq_priority = pri;
2345 }
2346 dm->ds_is_installed = true;
2347 if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_send_refs, dmsr_disconnect_cnt,
2348 DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) {
2349 DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed");
2350 }
2351 }
2352
2353 void
2354 _dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume)
2355 {
2356 dispatch_priority_t pri;
2357 dispatch_wlh_t wlh;
2358
2359 // call "super"
2360 _dispatch_queue_finalize_activation(dm->_as_dq, allow_resume);
2361
2362 if (!dm->ds_is_installed) {
2363 pri = _dispatch_queue_compute_priority_and_wlh(dm->_as_dq, &wlh);
2364 if (pri) _dispatch_mach_install(dm, wlh, pri);
2365 }
2366 }
2367
2368 DISPATCH_ALWAYS_INLINE
2369 static inline bool
2370 _dispatch_mach_tryarm(dispatch_mach_t dm, dispatch_queue_flags_t *out_dqf)
2371 {
2372 dispatch_queue_flags_t oqf, nqf;
2373 bool rc = os_atomic_rmw_loop2o(dm, dq_atomic_flags, oqf, nqf, relaxed, {
2374 nqf = oqf;
2375 if (nqf & (DSF_ARMED | DSF_CANCELED | DSF_DEFERRED_DELETE |
2376 DSF_DELETED)) {
2377 // the test is inside the loop because it's convenient but the
2378 // result should not change for the duration of the rmw_loop
2379 os_atomic_rmw_loop_give_up(break);
2380 }
2381 nqf |= DSF_ARMED;
2382 });
2383 if (out_dqf) *out_dqf = nqf;
2384 return rc;
2385 }
2386
2387 DISPATCH_ALWAYS_INLINE
2388 static inline dispatch_queue_wakeup_target_t
2389 _dispatch_mach_invoke2(dispatch_object_t dou,
2390 dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
2391 uint64_t *owned)
2392 {
2393 dispatch_mach_t dm = dou._dm;
2394 dispatch_queue_wakeup_target_t retq = NULL;
2395 dispatch_queue_t dq = _dispatch_queue_get_current();
2396 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
2397 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2398 dispatch_queue_flags_t dqf = 0;
2399
2400 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && dmrr &&
2401 _dispatch_unote_wlh_changed(dmrr, _dispatch_get_wlh())) {
2402 dqf = _dispatch_queue_atomic_flags_set_orig(dm->_as_dq,
2403 DSF_WLH_CHANGED);
2404 if (!(dqf & DSF_WLH_CHANGED)) {
2405 if (dm->dm_is_xpc) {
2406 _dispatch_bug_deprecated("Changing target queue "
2407 "hierarchy after xpc connection was activated");
2408 } else {
2409 _dispatch_bug_deprecated("Changing target queue "
2410 "hierarchy after mach channel was activated");
2411 }
2412 }
2413 }
2414
2415 // This function performs all mach channel actions. Each action is
2416 // responsible for verifying that it takes place on the appropriate queue.
2417 // If the current queue is not the correct queue for this action, the
2418 // correct queue will be returned and the invoke will be re-driven on that
2419 // queue.
2420
2421 // The order of tests here in invoke and in wakeup should be consistent.
2422
2423 if (unlikely(!dm->ds_is_installed)) {
2424 // The channel needs to be installed on the kevent queue.
2425 if (unlikely(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
2426 return dm->do_targetq;
2427 }
2428 _dispatch_mach_install(dm, _dispatch_get_wlh(),_dispatch_get_basepri());
2429 _dispatch_perfmon_workitem_inc();
2430 }
2431
2432 if (_dispatch_queue_class_probe(dm)) {
2433 if (dq == dm->do_targetq) {
2434 drain:
2435 retq = _dispatch_queue_serial_drain(dm->_as_dq, dic, flags, owned);
2436 } else {
2437 retq = dm->do_targetq;
2438 }
2439 }
2440
2441 if (!retq && _dispatch_unote_registered(dmrr)) {
2442 if (_dispatch_mach_tryarm(dm, &dqf)) {
2443 _dispatch_unote_resume(dmrr);
2444 if (dq == dm->do_targetq && !dq->do_targetq && !dmsr->dmsr_tail &&
2445 (dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) &&
2446 _dispatch_wlh_should_poll_unote(dmrr)) {
2447 // try to redrive the drain from under the lock for channels
2448 // targeting an overcommit root queue to avoid parking
2449 // when the next message has already fired
2450 _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE);
2451 if (dm->dq_items_tail) goto drain;
2452 }
2453 }
2454 } else {
2455 dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
2456 }
2457
2458 if (dmsr->dmsr_tail) {
2459 bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt &&
2460 _dispatch_unote_registered(dmsr));
2461 if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) ||
2462 (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) {
2463 // The channel has pending messages to send.
2464 if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) {
2465 return retq ? retq : &_dispatch_mgr_q;
2466 }
2467 dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
2468 if (dq != &_dispatch_mgr_q) {
2469 send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER;
2470 }
2471 _dispatch_mach_send_invoke(dm, flags, send_flags);
2472 }
2473 if (!retq) retq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
2474 } else if (!retq && (dqf & DSF_CANCELED)) {
2475 // The channel has been cancelled and needs to be uninstalled from the
2476 // manager queue. After uninstallation, the cancellation handler needs
2477 // to be delivered to the target queue.
2478 if (!dm->dm_uninstalled) {
2479 if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
2480 // waiting for the delivery of a deferred delete event
2481 return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
2482 }
2483 if (dq != &_dispatch_mgr_q) {
2484 return retq ? retq : &_dispatch_mgr_q;
2485 }
2486 _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL);
2487 if (unlikely(!dm->dm_uninstalled)) {
2488 // waiting for the delivery of a deferred delete event
2489 // or deletion didn't happen because send_invoke couldn't
2490 // acquire the send lock
2491 return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
2492 }
2493 }
2494 if (!dm->dm_cancel_handler_called) {
2495 if (dq != dm->do_targetq) {
2496 return retq ? retq : dm->do_targetq;
2497 }
2498 _dispatch_mach_cancel_invoke(dm, flags);
2499 }
2500 }
2501
2502 return retq;
2503 }
2504
2505 DISPATCH_NOINLINE
2506 void
2507 _dispatch_mach_invoke(dispatch_mach_t dm,
2508 dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
2509 {
2510 _dispatch_queue_class_invoke(dm, dic, flags,
2511 DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_mach_invoke2);
2512 }
2513
2514 void
2515 _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos,
2516 dispatch_wakeup_flags_t flags)
2517 {
2518 // This function determines whether the mach channel needs to be invoked.
2519 // The order of tests here in probe and in invoke should be consistent.
2520
2521 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
2522 dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
2523 dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
2524
2525 if (!dm->ds_is_installed) {
2526 // The channel needs to be installed on the kevent queue.
2527 tq = DISPATCH_QUEUE_WAKEUP_TARGET;
2528 goto done;
2529 }
2530
2531 if (_dispatch_queue_class_probe(dm)) {
2532 tq = DISPATCH_QUEUE_WAKEUP_TARGET;
2533 goto done;
2534 }
2535
2536 if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) {
2537 // Sending and uninstallation below require the send lock, the channel
2538 // will be woken up when the lock is dropped <rdar://15132939&15203957>
2539 goto done;
2540 }
2541
2542 if (dmsr->dmsr_tail) {
2543 bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt &&
2544 _dispatch_unote_registered(dmsr));
2545 if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) ||
2546 (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) {
2547 if (unlikely(requires_mgr)) {
2548 tq = DISPATCH_QUEUE_WAKEUP_MGR;
2549 } else {
2550 tq = DISPATCH_QUEUE_WAKEUP_TARGET;
2551 }
2552 }
2553 } else if (dqf & DSF_CANCELED) {
2554 if (!dm->dm_uninstalled) {
2555 if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
2556 // waiting for the delivery of a deferred delete event
2557 } else {
2558 // The channel needs to be uninstalled from the manager queue
2559 tq = DISPATCH_QUEUE_WAKEUP_MGR;
2560 }
2561 } else if (!dm->dm_cancel_handler_called) {
2562 // the cancellation handler needs to be delivered to the target
2563 // queue.
2564 tq = DISPATCH_QUEUE_WAKEUP_TARGET;
2565 }
2566 }
2567
2568 done:
2569 if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) &&
2570 dm->do_targetq == &_dispatch_mgr_q) {
2571 tq = DISPATCH_QUEUE_WAKEUP_MGR;
2572 }
2573
2574 return _dispatch_queue_class_wakeup(dm->_as_dq, qos, flags, tq);
2575 }
2576
2577 static void
2578 _dispatch_mach_sigterm_invoke(void *ctx)
2579 {
2580 dispatch_mach_t dm = ctx;
2581 if (!(dm->dq_atomic_flags & DSF_CANCELED)) {
2582 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2583 _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
2584 DISPATCH_MACH_SIGTERM_RECEIVED, NULL, 0,
2585 dmrr->dmrr_handler_func);
2586 }
2587 }
2588
2589 void
2590 _dispatch_xpc_sigterm_merge(dispatch_unote_t du,
2591 uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED,
2592 uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp)
2593 {
2594 dispatch_mach_t dm = _dispatch_wref2ptr(du._du->du_owner_wref);
2595 uint32_t options = 0;
2596 if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
2597 !(flags & EV_DELETE)) {
2598 options = DU_UNREGISTER_IMMEDIATE_DELETE;
2599 } else {
2600 dispatch_assert((flags & EV_ONESHOT) && (flags & EV_DELETE));
2601 options = DU_UNREGISTER_ALREADY_DELETED;
2602 }
2603 _dispatch_unote_unregister(du, options);
2604
2605 if (!(dm->dq_atomic_flags & DSF_CANCELED)) {
2606 _dispatch_barrier_async_detached_f(dm->_as_dq, dm,
2607 _dispatch_mach_sigterm_invoke);
2608 } else {
2609 dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY);
2610 }
2611 }
2612
2613 #pragma mark -
2614 #pragma mark dispatch_mach_msg_t
2615
2616 dispatch_mach_msg_t
2617 dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size,
2618 dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr)
2619 {
2620 if (slowpath(size < sizeof(mach_msg_header_t)) ||
2621 slowpath(destructor && !msg)) {
2622 DISPATCH_CLIENT_CRASH(size, "Empty message");
2623 }
2624
2625 dispatch_mach_msg_t dmsg;
2626 size_t msg_size = sizeof(struct dispatch_mach_msg_s);
2627 if (!destructor && os_add_overflow(msg_size,
2628 (size - sizeof(dmsg->dmsg_msg)), &msg_size)) {
2629 DISPATCH_CLIENT_CRASH(size, "Message size too large");
2630 }
2631
2632 dmsg = _dispatch_object_alloc(DISPATCH_VTABLE(mach_msg), msg_size);
2633 if (destructor) {
2634 dmsg->dmsg_msg = msg;
2635 } else if (msg) {
2636 memcpy(dmsg->dmsg_buf, msg, size);
2637 }
2638 dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
2639 dmsg->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
2640 dmsg->dmsg_destructor = destructor;
2641 dmsg->dmsg_size = size;
2642 if (msg_ptr) {
2643 *msg_ptr = _dispatch_mach_msg_get_msg(dmsg);
2644 }
2645 return dmsg;
2646 }
2647
2648 void
2649 _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg,
2650 DISPATCH_UNUSED bool *allow_free)
2651 {
2652 if (dmsg->dmsg_voucher) {
2653 _voucher_release(dmsg->dmsg_voucher);
2654 dmsg->dmsg_voucher = NULL;
2655 }
2656 switch (dmsg->dmsg_destructor) {
2657 case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT:
2658 break;
2659 case DISPATCH_MACH_MSG_DESTRUCTOR_FREE:
2660 free(dmsg->dmsg_msg);
2661 break;
2662 case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: {
2663 mach_vm_size_t vm_size = dmsg->dmsg_size;
2664 mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg;
2665 (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
2666 vm_addr, vm_size));
2667 break;
2668 }}
2669 }
2670
2671 static inline mach_msg_header_t*
2672 _dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg)
2673 {
2674 return dmsg->dmsg_destructor ? dmsg->dmsg_msg :
2675 (mach_msg_header_t*)dmsg->dmsg_buf;
2676 }
2677
2678 mach_msg_header_t*
2679 dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr)
2680 {
2681 if (size_ptr) {
2682 *size_ptr = dmsg->dmsg_size;
2683 }
2684 return _dispatch_mach_msg_get_msg(dmsg);
2685 }
2686
2687 size_t
2688 _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz)
2689 {
2690 size_t offset = 0;
2691 offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
2692 dx_kind(dmsg), dmsg);
2693 offset += _dispatch_object_debug_attr(dmsg, buf + offset, bufsiz - offset);
2694 offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, "
2695 "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf);
2696 mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
2697 if (hdr->msgh_id) {
2698 offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ",
2699 hdr->msgh_id);
2700 }
2701 if (hdr->msgh_size) {
2702 offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ",
2703 hdr->msgh_size);
2704 }
2705 if (hdr->msgh_bits) {
2706 offset += dsnprintf(&buf[offset], bufsiz - offset, "bits <l %u, r %u",
2707 MACH_MSGH_BITS_LOCAL(hdr->msgh_bits),
2708 MACH_MSGH_BITS_REMOTE(hdr->msgh_bits));
2709 if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) {
2710 offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x",
2711 MACH_MSGH_BITS_OTHER(hdr->msgh_bits));
2712 }
2713 offset += dsnprintf(&buf[offset], bufsiz - offset, ">, ");
2714 }
2715 if (hdr->msgh_local_port && hdr->msgh_remote_port) {
2716 offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, "
2717 "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port);
2718 } else if (hdr->msgh_local_port) {
2719 offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x",
2720 hdr->msgh_local_port);
2721 } else if (hdr->msgh_remote_port) {
2722 offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x",
2723 hdr->msgh_remote_port);
2724 } else {
2725 offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports");
2726 }
2727 offset += dsnprintf(&buf[offset], bufsiz - offset, " } }");
2728 return offset;
2729 }
2730
2731 DISPATCH_ALWAYS_INLINE
2732 static dispatch_queue_t
2733 _dispatch_mach_msg_context_async_reply_queue(void *msg_context)
2734 {
2735 if (DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(_dispatch_mach_xpc_hooks)) {
2736 return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(
2737 msg_context);
2738 }
2739 return NULL;
2740 }
2741
2742 static dispatch_continuation_t
2743 _dispatch_mach_msg_async_reply_wrap(dispatch_mach_msg_t dmsg,
2744 dispatch_mach_t dm)
2745 {
2746 _dispatch_retain(dm); // Released in _dispatch_mach_msg_async_reply_invoke()
2747 dispatch_continuation_t dc = _dispatch_continuation_alloc();
2748 dc->do_vtable = DC_VTABLE(MACH_ASYNC_REPLY);
2749 dc->dc_data = dmsg;
2750 dc->dc_other = dm;
2751 dc->dc_priority = DISPATCH_NO_PRIORITY;
2752 dc->dc_voucher = DISPATCH_NO_VOUCHER;
2753 return dc;
2754 }
2755
2756 DISPATCH_NOINLINE
2757 void
2758 _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc,
2759 DISPATCH_UNUSED dispatch_invoke_context_t dic,
2760 dispatch_invoke_flags_t flags)
2761 {
2762 // _dispatch_mach_msg_invoke_with_mach() releases the reference on dmsg
2763 // taken by _dispatch_mach_msg_async_reply_wrap() after handling it.
2764 dispatch_mach_msg_t dmsg = dc->dc_data;
2765 dispatch_mach_t dm = dc->dc_other;
2766 _dispatch_mach_msg_invoke_with_mach(dmsg,
2767 flags | DISPATCH_INVOKE_ASYNC_REPLY, dm);
2768
2769 // Balances _dispatch_mach_msg_async_reply_wrap
2770 _dispatch_release(dc->dc_other);
2771
2772 _dispatch_continuation_free(dc);
2773 }
2774
2775 #pragma mark -
2776 #pragma mark dispatch_mig_server
2777
2778 mach_msg_return_t
2779 dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
2780 dispatch_mig_callback_t callback)
2781 {
2782 mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT
2783 | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX)
2784 | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER;
2785 mach_msg_options_t tmp_options;
2786 mig_reply_error_t *bufTemp, *bufRequest, *bufReply;
2787 mach_msg_return_t kr = 0;
2788 uint64_t assertion_token = 0;
2789 uint32_t cnt = 1000; // do not stall out serial queues
2790 boolean_t demux_success;
2791 bool received = false;
2792 size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE;
2793 dispatch_source_refs_t dr = ds->ds_refs;
2794
2795 bufRequest = alloca(rcv_size);
2796 bufRequest->RetCode = 0;
2797 for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size);
2798 p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) {
2799 *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
2800 }
2801
2802 bufReply = alloca(rcv_size);
2803 bufReply->Head.msgh_size = 0;
2804 for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size);
2805 p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) {
2806 *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
2807 }
2808
2809 #if DISPATCH_DEBUG
2810 options |= MACH_RCV_LARGE; // rdar://problem/8422992
2811 #endif
2812 tmp_options = options;
2813 // XXX FIXME -- change this to not starve out the target queue
2814 for (;;) {
2815 if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) {
2816 options &= ~MACH_RCV_MSG;
2817 tmp_options &= ~MACH_RCV_MSG;
2818
2819 if (!(tmp_options & MACH_SEND_MSG)) {
2820 goto out;
2821 }
2822 }
2823 kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
2824 (mach_msg_size_t)rcv_size, (mach_port_t)dr->du_ident, 0, 0);
2825
2826 tmp_options = options;
2827
2828 if (slowpath(kr)) {
2829 switch (kr) {
2830 case MACH_SEND_INVALID_DEST:
2831 case MACH_SEND_TIMED_OUT:
2832 if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
2833 mach_msg_destroy(&bufReply->Head);
2834 }
2835 break;
2836 case MACH_RCV_TIMED_OUT:
2837 // Don't return an error if a message was sent this time or
2838 // a message was successfully received previously
2839 // rdar://problems/7363620&7791738
2840 if(bufReply->Head.msgh_remote_port || received) {
2841 kr = MACH_MSG_SUCCESS;
2842 }
2843 break;
2844 case MACH_RCV_INVALID_NAME:
2845 break;
2846 #if DISPATCH_DEBUG
2847 case MACH_RCV_TOO_LARGE:
2848 // receive messages that are too large and log their id and size
2849 // rdar://problem/8422992
2850 tmp_options &= ~MACH_RCV_LARGE;
2851 size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE;
2852 void *large_buf = malloc(large_size);
2853 if (large_buf) {
2854 rcv_size = large_size;
2855 bufReply = large_buf;
2856 }
2857 if (!mach_msg(&bufReply->Head, tmp_options, 0,
2858 (mach_msg_size_t)rcv_size,
2859 (mach_port_t)dr->du_ident, 0, 0)) {
2860 _dispatch_log("BUG in libdispatch client: "
2861 "dispatch_mig_server received message larger than "
2862 "requested size %zd: id = 0x%x, size = %d",
2863 maxmsgsz, bufReply->Head.msgh_id,
2864 bufReply->Head.msgh_size);
2865 }
2866 if (large_buf) {
2867 free(large_buf);
2868 }
2869 // fall through
2870 #endif
2871 default:
2872 _dispatch_bug_mach_client(
2873 "dispatch_mig_server: mach_msg() failed", kr);
2874 break;
2875 }
2876 goto out;
2877 }
2878
2879 if (!(tmp_options & MACH_RCV_MSG)) {
2880 goto out;
2881 }
2882
2883 if (assertion_token) {
2884 #if DISPATCH_USE_IMPORTANCE_ASSERTION
2885 int r = proc_importance_assertion_complete(assertion_token);
2886 (void)dispatch_assume_zero(r);
2887 #endif
2888 assertion_token = 0;
2889 }
2890 received = true;
2891
2892 bufTemp = bufRequest;
2893 bufRequest = bufReply;
2894 bufReply = bufTemp;
2895
2896 #if DISPATCH_USE_IMPORTANCE_ASSERTION
2897 #pragma clang diagnostic push
2898 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2899 int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head,
2900 NULL, &assertion_token);
2901 if (r && slowpath(r != EIO)) {
2902 (void)dispatch_assume_zero(r);
2903 }
2904 #pragma clang diagnostic pop
2905 #endif
2906 _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head));
2907 demux_success = callback(&bufRequest->Head, &bufReply->Head);
2908
2909 if (!demux_success) {
2910 // destroy the request - but not the reply port
2911 bufRequest->Head.msgh_remote_port = 0;
2912 mach_msg_destroy(&bufRequest->Head);
2913 } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
2914 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode
2915 // is present
2916 if (slowpath(bufReply->RetCode)) {
2917 if (bufReply->RetCode == MIG_NO_REPLY) {
2918 continue;
2919 }
2920
2921 // destroy the request - but not the reply port
2922 bufRequest->Head.msgh_remote_port = 0;
2923 mach_msg_destroy(&bufRequest->Head);
2924 }
2925 }
2926
2927 if (bufReply->Head.msgh_remote_port) {
2928 tmp_options |= MACH_SEND_MSG;
2929 if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) !=
2930 MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2931 tmp_options |= MACH_SEND_TIMEOUT;
2932 }
2933 }
2934 }
2935
2936 out:
2937 if (assertion_token) {
2938 #if DISPATCH_USE_IMPORTANCE_ASSERTION
2939 int r = proc_importance_assertion_complete(assertion_token);
2940 (void)dispatch_assume_zero(r);
2941 #endif
2942 }
2943
2944 return kr;
2945 }
2946
2947 #pragma mark -
2948 #pragma mark dispatch_mach_debug
2949
2950 static size_t
2951 _dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz)
2952 {
2953 dispatch_queue_t target = dm->do_targetq;
2954 dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
2955 dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
2956
2957 return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, "
2958 "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, "
2959 "send state = %016llx, disconnected = %d, canceled = %d ",
2960 target && target->dq_label ? target->dq_label : "", target,
2961 (mach_port_t)dmrr->du_ident, dmsr->dmsr_send,
2962 (mach_port_t)dmsr->du_ident,
2963 dmsr->dmsr_notification_armed ? " (armed)" : "",
2964 dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "",
2965 dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt,
2966 (bool)(dm->dq_atomic_flags & DSF_CANCELED));
2967 }
2968
2969 size_t
2970 _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz)
2971 {
2972 size_t offset = 0;
2973 offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
2974 dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label :
2975 dx_kind(dm), dm);
2976 offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset);
2977 offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset);
2978 offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
2979 return offset;
2980 }
2981
2982 #endif /* HAVE_MACH */