2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 // Contains introspection routines that only exist in the version of the
22 // library with introspection support
24 #if DISPATCH_INTROSPECTION
28 #include "dispatch/introspection.h"
29 #include "introspection_private.h"
31 typedef struct dispatch_introspection_thread_s
{
33 TAILQ_ENTRY(dispatch_introspection_thread_s
) dit_list
;
35 dispatch_queue_t
*queue
;
36 } dispatch_introspection_thread_s
;
37 typedef struct dispatch_introspection_thread_s
*dispatch_introspection_thread_t
;
39 struct dispatch_introspection_state_s _dispatch_introspection
= {
40 .threads
= TAILQ_HEAD_INITIALIZER(_dispatch_introspection
.threads
),
41 .queues
= TAILQ_HEAD_INITIALIZER(_dispatch_introspection
.queues
),
44 static void _dispatch_introspection_thread_remove(void *ctxt
);
46 static void _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq
);
49 #pragma mark dispatch_introspection_init
53 _dispatch_getenv_bool(const char *env
, bool default_v
)
55 const char *v
= getenv(env
);
58 return strcasecmp(v
, "YES") == 0 || strcasecmp(v
, "Y") == 0 ||
59 strcasecmp(v
, "TRUE") == 0 || atoi(v
);
65 _dispatch_introspection_init(void)
67 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
68 &_dispatch_main_q
, diq_list
);
69 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
70 &_dispatch_mgr_q
, diq_list
);
71 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
72 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
73 _dispatch_mgr_q
.do_targetq
, diq_list
);
75 for (size_t i
= 0; i
< DISPATCH_ROOT_QUEUE_COUNT
; i
++) {
76 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
77 &_dispatch_root_queues
[i
], diq_list
);
80 _dispatch_introspection
.debug_queue_inversions
=
81 _dispatch_getenv_bool("LIBDISPATCH_DEBUG_QUEUE_INVERSIONS", false);
83 // Hack to determine queue TSD offset from start of pthread structure
84 uintptr_t thread
= _dispatch_thread_self();
85 thread_identifier_info_data_t tiid
;
86 mach_msg_type_number_t cnt
= THREAD_IDENTIFIER_INFO_COUNT
;
87 kern_return_t kr
= thread_info(pthread_mach_thread_np((void*)thread
),
88 THREAD_IDENTIFIER_INFO
, (thread_info_t
)&tiid
, &cnt
);
89 if (!dispatch_assume_zero(kr
)) {
90 _dispatch_introspection
.thread_queue_offset
=
91 (void*)(uintptr_t)tiid
.dispatch_qaddr
- (void*)thread
;
93 _dispatch_thread_key_create(&dispatch_introspection_key
,
94 _dispatch_introspection_thread_remove
);
95 _dispatch_introspection_thread_add(); // add main thread
98 const struct dispatch_introspection_versions_s
99 dispatch_introspection_versions
= {
100 .introspection_version
= 1,
102 .hooks_size
= sizeof(dispatch_introspection_hooks_s
),
103 .queue_item_version
= 1,
104 .queue_item_size
= sizeof(dispatch_introspection_queue_item_s
),
105 .queue_block_version
= 1,
106 .queue_block_size
= sizeof(dispatch_introspection_queue_block_s
),
107 .queue_function_version
= 1,
108 .queue_function_size
= sizeof(dispatch_introspection_queue_function_s
),
109 .queue_thread_version
= 1,
110 .queue_thread_size
= sizeof(dispatch_introspection_queue_thread_s
),
112 .object_size
= sizeof(dispatch_introspection_object_s
),
114 .queue_size
= sizeof(dispatch_introspection_queue_s
),
116 .source_size
= sizeof(dispatch_introspection_source_s
),
120 #pragma mark dispatch_introspection_threads
123 _dispatch_introspection_thread_add(void)
125 if (_dispatch_thread_getspecific(dispatch_introspection_key
)) {
128 uintptr_t thread
= _dispatch_thread_self();
129 dispatch_introspection_thread_t dit
= (void*)_dispatch_continuation_alloc();
130 dit
->dit_isa
= (void*)0x41;
131 dit
->thread
= (void*)thread
;
132 dit
->queue
= !_dispatch_introspection
.thread_queue_offset
? NULL
:
133 (void*)thread
+ _dispatch_introspection
.thread_queue_offset
;
134 _dispatch_thread_setspecific(dispatch_introspection_key
, dit
);
135 _dispatch_unfair_lock_lock(&_dispatch_introspection
.threads_lock
);
136 TAILQ_INSERT_TAIL(&_dispatch_introspection
.threads
, dit
, dit_list
);
137 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.threads_lock
);
141 _dispatch_introspection_thread_remove(void *ctxt
)
143 dispatch_introspection_thread_t dit
= ctxt
;
144 _dispatch_unfair_lock_lock(&_dispatch_introspection
.threads_lock
);
145 TAILQ_REMOVE(&_dispatch_introspection
.threads
, dit
, dit_list
);
146 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.threads_lock
);
147 _dispatch_continuation_free((void*)dit
);
148 _dispatch_thread_setspecific(dispatch_introspection_key
, NULL
);
152 #pragma mark dispatch_introspection_info
155 dispatch_introspection_queue_s
156 dispatch_introspection_queue_get_info(dispatch_queue_t dq
)
158 bool global
= (dq
->do_xref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
159 (dq
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
);
160 uint64_t dq_state
= os_atomic_load2o(dq
, dq_state
, relaxed
);
162 dispatch_introspection_queue_s diq
= {
164 .target_queue
= dq
->do_targetq
,
165 .label
= dq
->dq_label
,
166 .serialnum
= dq
->dq_serialnum
,
167 .width
= dq
->dq_width
,
168 .suspend_count
= _dq_state_suspend_cnt(dq_state
) + dq
->dq_side_suspend_cnt
,
169 .enqueued
= _dq_state_is_enqueued(dq_state
) && !global
,
170 .barrier
= _dq_state_is_in_barrier(dq_state
) && !global
,
171 .draining
= (dq
->dq_items_head
== (void*)~0ul) ||
172 (!dq
->dq_items_head
&& dq
->dq_items_tail
),
174 .main
= (dq
== &_dispatch_main_q
),
180 _dispatch_introspection_continuation_get_info(dispatch_queue_t dq
,
181 dispatch_continuation_t dc
, dispatch_introspection_queue_item_t diqi
)
183 void *ctxt
= dc
->dc_ctxt
;
184 dispatch_function_t func
= dc
->dc_func
;
185 pthread_t waiter
= NULL
;
187 uintptr_t flags
= dc
->dc_flags
;
189 if (_dispatch_object_has_vtable(dc
)) {
191 switch (dc_type(dc
)) {
192 #if HAVE_PTHREAD_WORKQUEUE_QOS
193 case DC_OVERRIDE_STEALING_TYPE
:
194 case DC_OVERRIDE_OWNING_TYPE
:
196 if (_dispatch_object_has_vtable(dc
)) {
197 // these really wrap queues so we should hide the continuation type
198 dq
= (dispatch_queue_t
)dc
;
199 diqi
->type
= dispatch_introspection_queue_item_type_queue
;
200 diqi
->queue
= dispatch_introspection_queue_get_info(dq
);
203 return _dispatch_introspection_continuation_get_info(dq
, dc
, diqi
);
205 case DC_ASYNC_REDIRECT_TYPE
:
206 DISPATCH_INTERNAL_CRASH(0, "Handled by the caller");
207 case DC_MACH_SEND_BARRRIER_DRAIN_TYPE
:
209 case DC_MACH_SEND_BARRIER_TYPE
:
210 case DC_MACH_RECV_BARRIER_TYPE
:
211 flags
= (uintptr_t)dc
->dc_data
;
216 if (flags
& DISPATCH_OBJ_SYNC_SLOW_BIT
) {
217 waiter
= pthread_from_mach_thread_np((mach_port_t
)dc
->dc_data
);
218 if (flags
& DISPATCH_OBJ_BARRIER_BIT
) {
225 if (func
== _dispatch_sync_recurse_invoke
) {
230 } else if (func
== _dispatch_apply_invoke
||
231 func
== _dispatch_apply_redirect_invoke
) {
232 dispatch_apply_t da
= ctxt
;
242 if (flags
& DISPATCH_OBJ_BLOCK_BIT
) {
243 diqi
->type
= dispatch_introspection_queue_item_type_block
;
244 func
= _dispatch_Block_invoke(ctxt
);
246 diqi
->type
= dispatch_introspection_queue_item_type_function
;
248 diqi
->function
= (dispatch_introspection_queue_function_s
){
254 .barrier
= (flags
& DISPATCH_OBJ_BARRIER_BIT
) || dq
->dq_width
== 1,
255 .sync
= flags
& DISPATCH_OBJ_SYNC_SLOW_BIT
,
258 if (flags
& DISPATCH_OBJ_GROUP_BIT
) {
259 dispatch_group_t group
= dc
->dc_data
;
260 if (dx_type(group
) == DISPATCH_GROUP_TYPE
) {
261 diqi
->function
.group
= group
;
267 dispatch_introspection_object_s
268 _dispatch_introspection_object_get_info(dispatch_object_t dou
)
270 dispatch_introspection_object_s dio
= {
272 .target_queue
= dou
._do
->do_targetq
,
273 .type
= (void*)dou
._do
->do_vtable
,
274 .kind
= dx_kind(dou
._do
),
280 dispatch_introspection_source_s
281 _dispatch_introspection_source_get_info(dispatch_source_t ds
)
283 dispatch_source_refs_t dr
= ds
->ds_refs
;
284 dispatch_continuation_t dc
= dr
->ds_handler
[DS_EVENT_HANDLER
];
286 dispatch_function_t handler
= NULL
;
287 bool hdlr_is_block
= false;
290 handler
= dc
->dc_func
;
291 hdlr_is_block
= (dc
->dc_flags
& DISPATCH_OBJ_BLOCK_BIT
);
294 uint64_t dq_state
= os_atomic_load2o(ds
, dq_state
, relaxed
);
295 dispatch_introspection_source_s dis
= {
297 .target_queue
= ds
->do_targetq
,
300 .suspend_count
= _dq_state_suspend_cnt(dq_state
) + ds
->dq_side_suspend_cnt
,
301 .enqueued
= _dq_state_is_enqueued(dq_state
),
302 .handler_is_block
= hdlr_is_block
,
303 .timer
= ds
->ds_is_timer
,
304 .after
= ds
->ds_is_timer
&& (bool)(ds_timer(ds
).flags
& DISPATCH_TIMER_AFTER
),
306 dispatch_kevent_t dk
= ds
->ds_dkev
;
307 if (ds
->ds_is_custom_source
) {
308 dis
.type
= (unsigned long)dk
;
310 dis
.type
= (unsigned long)dk
->dk_kevent
.filter
;
311 dis
.handle
= (unsigned long)dk
->dk_kevent
.ident
;
317 dispatch_introspection_queue_thread_s
318 _dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit
)
320 dispatch_introspection_queue_thread_s diqt
= {
321 .object
= (void*)dit
,
322 .thread
= dit
->thread
,
324 if (dit
->queue
&& *dit
->queue
) {
325 diqt
.queue
= dispatch_introspection_queue_get_info(*dit
->queue
);
331 dispatch_introspection_queue_item_s
332 dispatch_introspection_queue_item_get_info(dispatch_queue_t dq
,
333 dispatch_continuation_t dc
)
335 dispatch_introspection_queue_item_s diqi
;
336 dispatch_object_t dou
;
340 if (_dispatch_object_has_vtable(dou
._do
)) {
341 unsigned long type
= dx_type(dou
._do
);
342 unsigned long metatype
= type
& _DISPATCH_META_TYPE_MASK
;
343 if (type
== DC_ASYNC_REDIRECT_TYPE
) {
348 if (metatype
== _DISPATCH_CONTINUATION_TYPE
) {
349 _dispatch_introspection_continuation_get_info(dq
, dc
, &diqi
);
350 } else if (metatype
== _DISPATCH_QUEUE_TYPE
&&
351 type
!= DISPATCH_QUEUE_SPECIFIC_TYPE
) {
352 diqi
.type
= dispatch_introspection_queue_item_type_queue
;
353 diqi
.queue
= dispatch_introspection_queue_get_info(dou
._dq
);
354 } else if (metatype
== _DISPATCH_SOURCE_TYPE
&&
355 type
!= DISPATCH_MACH_CHANNEL_TYPE
) {
356 diqi
.type
= dispatch_introspection_queue_item_type_source
;
357 diqi
.source
= _dispatch_introspection_source_get_info(dou
._ds
);
359 diqi
.type
= dispatch_introspection_queue_item_type_object
;
360 diqi
.object
= _dispatch_introspection_object_get_info(dou
._do
);
363 _dispatch_introspection_continuation_get_info(dq
, dc
, &diqi
);
369 #pragma mark dispatch_introspection_iterators
373 dispatch_introspection_get_queues(dispatch_queue_t start
, size_t count
,
374 dispatch_introspection_queue_t queues
)
376 dispatch_queue_t next
;
377 next
= start
? start
: TAILQ_FIRST(&_dispatch_introspection
.queues
);
380 queues
->queue
= NULL
;
383 *queues
++ = dispatch_introspection_queue_get_info(next
);
384 next
= TAILQ_NEXT(next
, diq_list
);
390 dispatch_continuation_t
391 dispatch_introspection_get_queue_threads(dispatch_continuation_t start
,
392 size_t count
, dispatch_introspection_queue_thread_t threads
)
394 dispatch_introspection_thread_t next
= start
? (void*)start
:
395 TAILQ_FIRST(&_dispatch_introspection
.threads
);
398 threads
->object
= NULL
;
401 *threads
++ = _dispatch_introspection_thread_get_info(next
);
402 next
= TAILQ_NEXT(next
, dit_list
);
408 dispatch_continuation_t
409 dispatch_introspection_queue_get_items(dispatch_queue_t dq
,
410 dispatch_continuation_t start
, size_t count
,
411 dispatch_introspection_queue_item_t items
)
413 dispatch_continuation_t next
= start
? start
:
414 dq
->dq_items_head
== (void*)~0ul ? NULL
: (void*)dq
->dq_items_head
;
417 items
->type
= dispatch_introspection_queue_item_type_none
;
420 *items
++ = dispatch_introspection_queue_item_get_info(dq
, next
);
421 next
= next
->do_next
;
427 #pragma mark dispatch_introspection_hooks
429 #define DISPATCH_INTROSPECTION_NO_HOOK ((void*)~0ul)
431 dispatch_introspection_hooks_s _dispatch_introspection_hooks
;
432 dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts
;
434 dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled
= {
435 .queue_create
= DISPATCH_INTROSPECTION_NO_HOOK
,
436 .queue_dispose
= DISPATCH_INTROSPECTION_NO_HOOK
,
437 .queue_item_enqueue
= DISPATCH_INTROSPECTION_NO_HOOK
,
438 .queue_item_dequeue
= DISPATCH_INTROSPECTION_NO_HOOK
,
439 .queue_item_complete
= DISPATCH_INTROSPECTION_NO_HOOK
,
442 #define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \
443 sizeof(_dispatch_introspection_hook_callouts_enabled) - \
444 sizeof(_dispatch_introspection_hook_callouts_enabled._reserved)) / \
445 sizeof(dispatch_function_t))
447 #define DISPATCH_INTROSPECTION_HOOK_ENABLED(h) \
448 (slowpath(_dispatch_introspection_hooks.h))
450 #define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \
451 typeof(_dispatch_introspection_hooks.h) _h; \
452 _h = _dispatch_introspection_hooks.h; \
453 if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \
457 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \
458 DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \
459 asm("_dispatch_introspection_hook_" #h); \
460 void _dispatch_introspection_hook_##h(void) {}
462 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\
463 dispatch_introspection_hook_##h(__VA_ARGS__)
465 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_create
);
466 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_destroy
);
467 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_enqueue
);
468 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_dequeue
);
469 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_complete
);
470 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_begin
);
471 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_end
);
475 dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks
)
477 dispatch_introspection_hooks_s old_hooks
= _dispatch_introspection_hooks
;
478 _dispatch_introspection_hooks
= *hooks
;
479 dispatch_function_t
*e
= (void*)&_dispatch_introspection_hook_callouts
,
480 *h
= (void*)&_dispatch_introspection_hooks
, *oh
= (void*)&old_hooks
;
481 for (size_t i
= 0; i
< DISPATCH_INTROSPECTION_HOOKS_COUNT
; i
++) {
483 h
[i
] = DISPATCH_INTROSPECTION_NO_HOOK
;
485 if (oh
[i
] == DISPATCH_INTROSPECTION_NO_HOOK
) {
494 dispatch_introspection_hook_callouts_enable(
495 dispatch_introspection_hooks_t enable
)
497 _dispatch_introspection_hook_callouts
= enable
? *enable
:
498 _dispatch_introspection_hook_callouts_enabled
;
499 dispatch_function_t
*e
= (void*)&_dispatch_introspection_hook_callouts
,
500 *h
= (void*)&_dispatch_introspection_hooks
;
501 for (size_t i
= 0; i
< DISPATCH_INTROSPECTION_HOOKS_COUNT
; i
++) {
503 h
[i
] = DISPATCH_INTROSPECTION_NO_HOOK
;
504 } else if (!e
[i
] && h
[i
] == DISPATCH_INTROSPECTION_NO_HOOK
) {
512 dispatch_introspection_hook_callout_queue_create(
513 dispatch_introspection_queue_t queue_info
)
515 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_create
, queue_info
);
520 _dispatch_introspection_queue_create_hook(dispatch_queue_t dq
)
522 dispatch_introspection_queue_s diq
;
523 diq
= dispatch_introspection_queue_get_info(dq
);
524 dispatch_introspection_hook_callout_queue_create(&diq
);
528 _dispatch_introspection_queue_create(dispatch_queue_t dq
)
530 TAILQ_INIT(&dq
->diq_order_top_head
);
531 TAILQ_INIT(&dq
->diq_order_bottom_head
);
532 _dispatch_unfair_lock_lock(&_dispatch_introspection
.queues_lock
);
533 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
, dq
, diq_list
);
534 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.queues_lock
);
536 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create
, dq
);
537 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create
)) {
538 _dispatch_introspection_queue_create_hook(dq
);
545 dispatch_introspection_hook_callout_queue_dispose(
546 dispatch_introspection_queue_t queue_info
)
548 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_dispose
, queue_info
);
553 _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq
)
555 dispatch_introspection_queue_s diq
;
556 diq
= dispatch_introspection_queue_get_info(dq
);
557 dispatch_introspection_hook_callout_queue_dispose(&diq
);
561 _dispatch_introspection_queue_dispose(dispatch_queue_t dq
)
563 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy
, dq
);
564 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose
)) {
565 _dispatch_introspection_queue_dispose_hook(dq
);
568 _dispatch_unfair_lock_lock(&_dispatch_introspection
.queues_lock
);
569 TAILQ_REMOVE(&_dispatch_introspection
.queues
, dq
, diq_list
);
570 _dispatch_introspection_queue_order_dispose(dq
);
571 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.queues_lock
);
576 dispatch_introspection_hook_callout_queue_item_enqueue(dispatch_queue_t queue
,
577 dispatch_introspection_queue_item_t item
)
579 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_enqueue
, queue
, item
);
584 _dispatch_introspection_queue_item_enqueue_hook(dispatch_queue_t dq
,
585 dispatch_object_t dou
)
587 dispatch_introspection_queue_item_s diqi
;
588 diqi
= dispatch_introspection_queue_item_get_info(dq
, dou
._dc
);
589 dispatch_introspection_hook_callout_queue_item_enqueue(dq
, &diqi
);
593 _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq
,
594 dispatch_object_t dou
)
596 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
597 queue_item_enqueue
, dq
, dou
);
598 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_enqueue
)) {
599 _dispatch_introspection_queue_item_enqueue_hook(dq
, dou
);
605 dispatch_introspection_hook_callout_queue_item_dequeue(dispatch_queue_t queue
,
606 dispatch_introspection_queue_item_t item
)
608 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_dequeue
, queue
, item
);
613 _dispatch_introspection_queue_item_dequeue_hook(dispatch_queue_t dq
,
614 dispatch_object_t dou
)
616 dispatch_introspection_queue_item_s diqi
;
617 diqi
= dispatch_introspection_queue_item_get_info(dq
, dou
._dc
);
618 dispatch_introspection_hook_callout_queue_item_dequeue(dq
, &diqi
);
622 _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq
,
623 dispatch_object_t dou
)
625 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
626 queue_item_dequeue
, dq
, dou
);
627 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_dequeue
)) {
628 _dispatch_introspection_queue_item_dequeue_hook(dq
, dou
);
634 dispatch_introspection_hook_callout_queue_item_complete(
635 dispatch_continuation_t object
)
637 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_complete
, object
);
642 _dispatch_introspection_queue_item_complete_hook(dispatch_object_t dou
)
644 dispatch_introspection_hook_callout_queue_item_complete(dou
._dc
);
648 _dispatch_introspection_queue_item_complete(dispatch_object_t dou
)
650 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_item_complete
, dou
);
651 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_complete
)) {
652 _dispatch_introspection_queue_item_complete_hook(dou
);
657 _dispatch_introspection_callout_entry(void *ctxt
, dispatch_function_t f
)
659 dispatch_queue_t dq
= _dispatch_queue_get_current();
660 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
661 queue_callout_begin
, dq
, ctxt
, f
);
665 _dispatch_introspection_callout_return(void *ctxt
, dispatch_function_t f
)
667 dispatch_queue_t dq
= _dispatch_queue_get_current();
668 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
669 queue_callout_end
, dq
, ctxt
, f
);
673 #pragma mark dispatch introspection deadlock detection
675 typedef struct dispatch_queue_order_entry_s
*dispatch_queue_order_entry_t
;
676 struct dispatch_queue_order_entry_s
{
677 TAILQ_ENTRY(dispatch_queue_order_entry_s
) dqoe_order_top_list
;
678 TAILQ_ENTRY(dispatch_queue_order_entry_s
) dqoe_order_bottom_list
;
679 const char *dqoe_top_label
;
680 const char *dqoe_bottom_label
;
681 dispatch_queue_t dqoe_top_tq
;
682 dispatch_queue_t dqoe_bottom_tq
;
688 _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq
)
690 dispatch_queue_order_entry_t e
, te
;
691 dispatch_queue_t otherq
;
692 TAILQ_HEAD(, dispatch_queue_order_entry_s
) head
;
694 // this whole thing happens with _dispatch_introspection.queues_lock locked
696 _dispatch_unfair_lock_lock(&dq
->diq_order_top_head_lock
);
697 head
.tqh_first
= dq
->diq_order_top_head
.tqh_first
;
698 head
.tqh_last
= dq
->diq_order_top_head
.tqh_last
;
699 TAILQ_INIT(&dq
->diq_order_top_head
);
700 _dispatch_unfair_lock_unlock(&dq
->diq_order_top_head_lock
);
702 TAILQ_FOREACH_SAFE(e
, &head
, dqoe_order_top_list
, te
) {
703 otherq
= e
->dqoe_bottom_tq
;
704 _dispatch_unfair_lock_lock(&otherq
->diq_order_bottom_head_lock
);
705 TAILQ_REMOVE(&otherq
->diq_order_bottom_head
, e
, dqoe_order_bottom_list
);
706 _dispatch_unfair_lock_unlock(&otherq
->diq_order_bottom_head_lock
);
710 _dispatch_unfair_lock_lock(&dq
->diq_order_bottom_head_lock
);
711 head
.tqh_first
= dq
->diq_order_bottom_head
.tqh_first
;
712 head
.tqh_last
= dq
->diq_order_bottom_head
.tqh_last
;
713 TAILQ_INIT(&dq
->diq_order_bottom_head
);
714 _dispatch_unfair_lock_unlock(&dq
->diq_order_bottom_head_lock
);
716 TAILQ_FOREACH_SAFE(e
, &head
, dqoe_order_bottom_list
, te
) {
717 otherq
= e
->dqoe_top_tq
;
718 _dispatch_unfair_lock_lock(&otherq
->diq_order_top_head_lock
);
719 TAILQ_REMOVE(&otherq
->diq_order_top_head
, e
, dqoe_order_top_list
);
720 _dispatch_unfair_lock_unlock(&otherq
->diq_order_top_head_lock
);
725 // caller must make sure dq is not a root quueue
726 DISPATCH_ALWAYS_INLINE
727 static inline dispatch_queue_t
728 _dispatch_queue_bottom_target_queue(dispatch_queue_t dq
)
730 while (dq
->do_targetq
->do_targetq
) {
736 typedef struct dispatch_order_frame_s
*dispatch_order_frame_t
;
737 struct dispatch_order_frame_s
{
738 dispatch_order_frame_t dof_prev
;
739 dispatch_queue_order_entry_t dof_e
;
744 _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof
,
745 dispatch_queue_t top_q
, dispatch_queue_t bottom_q
)
747 _SIMPLE_STRING buf
= _simple_salloc();
748 const char *leading_word
= "with";
750 _simple_sprintf(buf
, "%s Lock inversion detected\n"
751 "queue [%s] trying to sync onto queue [%s] conflicts\n",
752 DISPATCH_ASSERTION_FAILED_MESSAGE
,
753 bottom_q
->dq_label
?: "", top_q
->dq_label
?: "");
756 dispatch_queue_order_entry_t e
= dof
->dof_e
;
760 "%s queue [%s] syncing onto queue [%s] at:\n", leading_word
,
761 dof
->dof_e
->dqoe_bottom_label
, dof
->dof_e
->dqoe_top_label
);
763 symbols
= backtrace_symbols(e
->dqoe_pcs
, e
->dqoe_pcs_n
);
765 for (int i
= 0; i
< e
->dqoe_pcs_n
; i
++) {
766 _simple_sprintf(buf
, "%s\n", symbols
[i
]);
770 _simple_sappend(buf
, "<missing backtrace>\n");
773 leading_word
= "and";
777 // <rdar://problem/25053293> turn off the feature for crash handlers
778 _dispatch_introspection
.debug_queue_inversions
= false;
779 _dispatch_assert_crash(_simple_string(buf
));
784 _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev
,
785 dispatch_queue_t top_q
, dispatch_queue_t top_tq
,
786 dispatch_queue_t bottom_q
, dispatch_queue_t bottom_tq
)
788 struct dispatch_order_frame_s dof
= { .dof_prev
= dof_prev
};
790 // has anyone above bottom_tq ever sync()ed onto top_tq ?
791 _dispatch_unfair_lock_lock(&bottom_tq
->diq_order_top_head_lock
);
792 TAILQ_FOREACH(dof
.dof_e
, &bottom_tq
->diq_order_top_head
, dqoe_order_top_list
) {
793 if (slowpath(dof
.dof_e
->dqoe_bottom_tq
== top_tq
)) {
794 _dispatch_introspection_lock_inversion_fail(&dof
, top_q
, bottom_q
);
796 _dispatch_introspection_order_check(&dof
, top_q
, top_tq
,
797 bottom_q
, dof
.dof_e
->dqoe_bottom_tq
);
799 _dispatch_unfair_lock_unlock(&bottom_tq
->diq_order_top_head_lock
);
803 _dispatch_introspection_order_record(dispatch_queue_t top_q
,
804 dispatch_queue_t bottom_q
)
806 dispatch_queue_order_entry_t e
, it
;
807 const int pcs_skip
= 1, pcs_n_max
= 128;
808 void *pcs
[pcs_n_max
];
811 if (!bottom_q
|| !bottom_q
->do_targetq
|| !top_q
->do_targetq
) {
815 dispatch_queue_t top_tq
= _dispatch_queue_bottom_target_queue(top_q
);
816 dispatch_queue_t bottom_tq
= _dispatch_queue_bottom_target_queue(bottom_q
);
818 _dispatch_unfair_lock_lock(&top_tq
->diq_order_top_head_lock
);
819 TAILQ_FOREACH(it
, &top_tq
->diq_order_top_head
, dqoe_order_top_list
) {
820 if (it
->dqoe_bottom_tq
== bottom_tq
) {
821 // that dispatch_sync() is known and validated
823 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
827 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
829 _dispatch_introspection_order_check(NULL
, top_q
, top_tq
, bottom_q
, bottom_tq
);
830 pcs_n
= MAX(backtrace(pcs
, pcs_n_max
) - pcs_skip
, 0);
832 bool copy_top_label
= false, copy_bottom_label
= false;
833 size_t size
= sizeof(struct dispatch_queue_order_entry_s
)
834 + (size_t)pcs_n
* sizeof(void *);
836 if (_dispatch_queue_label_needs_free(top_q
)) {
837 size
+= strlen(top_q
->dq_label
) + 1;
838 copy_top_label
= true;
840 if (_dispatch_queue_label_needs_free(bottom_q
)) {
841 size
+= strlen(bottom_q
->dq_label
) + 1;
842 copy_bottom_label
= true;
845 e
= _dispatch_calloc(1, size
);
846 e
->dqoe_top_tq
= top_tq
;
847 e
->dqoe_bottom_tq
= bottom_tq
;
848 e
->dqoe_pcs_n
= pcs_n
;
849 memcpy(e
->dqoe_pcs
, pcs
+ pcs_skip
, (size_t)pcs_n
* sizeof(void *));
850 // and then lay out the names of the queues at the end
851 char *p
= (char *)(e
->dqoe_pcs
+ pcs_n
);
852 if (copy_top_label
) {
853 e
->dqoe_top_label
= strcpy(p
, top_q
->dq_label
);
856 e
->dqoe_top_label
= top_q
->dq_label
?: "";
858 if (copy_bottom_label
) {
859 e
->dqoe_bottom_label
= strcpy(p
, bottom_q
->dq_label
);
861 e
->dqoe_bottom_label
= bottom_q
->dq_label
?: "";
864 _dispatch_unfair_lock_lock(&top_tq
->diq_order_top_head_lock
);
865 TAILQ_FOREACH(it
, &top_tq
->diq_order_top_head
, dqoe_order_top_list
) {
866 if (slowpath(it
->dqoe_bottom_tq
== bottom_tq
)) {
867 // someone else validated it at the same time
869 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
874 TAILQ_INSERT_HEAD(&top_tq
->diq_order_top_head
, e
, dqoe_order_top_list
);
875 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
877 _dispatch_unfair_lock_lock(&bottom_tq
->diq_order_bottom_head_lock
);
878 TAILQ_INSERT_HEAD(&bottom_tq
->diq_order_bottom_head
, e
, dqoe_order_bottom_list
);
879 _dispatch_unfair_lock_unlock(&bottom_tq
->diq_order_bottom_head_lock
);
883 _dispatch_introspection_target_queue_changed(dispatch_queue_t dq
)
885 if (!_dispatch_introspection
.debug_queue_inversions
) return;
887 if (_dispatch_queue_atomic_flags(dq
) & DQF_TARGETED
) {
889 "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging "
890 "cannot be used with code that changes the target "
891 "of a queue already targeted by other dispatch objects\n"
892 "queue %p[%s] was already targeted by other dispatch objects",
893 dq
, dq
->dq_label
?: "");
894 _dispatch_introspection
.debug_queue_inversions
= false;
898 static char const * const reasons
[] = {
899 [1] = "an initiator",
901 [3] = "both an initiator and a recipient"
903 bool as_top
= !TAILQ_EMPTY(&dq
->diq_order_top_head
);
904 bool as_bottom
= !TAILQ_EMPTY(&dq
->diq_order_top_head
);
906 if (as_top
|| as_bottom
) {
908 "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging "
909 "expects queues to not participate in dispatch_sync() "
910 "before their setup is complete\n"
911 "forgetting that queue 0x%p[%s] participated as %s of "
912 "a dispatch_sync", dq
, dq
->dq_label
?: "",
913 reasons
[(int)as_top
+ 2 * (int)as_bottom
]);
914 _dispatch_unfair_lock_lock(&_dispatch_introspection
.queues_lock
);
915 _dispatch_introspection_queue_order_dispose(dq
);
916 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.queues_lock
);
920 #endif // DISPATCH_INTROSPECTION