2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 // Contains introspection routines that only exist in the version of the
22 // library with introspection support
24 #if DISPATCH_INTROSPECTION
28 #include "dispatch/introspection.h"
29 #include "introspection_private.h"
31 typedef struct dispatch_introspection_thread_s
{
33 TAILQ_ENTRY(dispatch_introspection_thread_s
) dit_list
;
35 dispatch_queue_t
*queue
;
36 } dispatch_introspection_thread_s
;
37 typedef struct dispatch_introspection_thread_s
*dispatch_introspection_thread_t
;
39 struct dispatch_introspection_state_s _dispatch_introspection
= {
40 .threads
= TAILQ_HEAD_INITIALIZER(_dispatch_introspection
.threads
),
41 .queues
= TAILQ_HEAD_INITIALIZER(_dispatch_introspection
.queues
),
44 static void _dispatch_introspection_thread_remove(void *ctxt
);
46 static void _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq
);
49 #pragma mark dispatch_introspection_init
53 _dispatch_getenv_bool(const char *env
, bool default_v
)
55 const char *v
= getenv(env
);
58 return strcasecmp(v
, "YES") == 0 || strcasecmp(v
, "Y") == 0 ||
59 strcasecmp(v
, "TRUE") == 0 || atoi(v
);
65 _dispatch_introspection_init(void)
67 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
68 &_dispatch_main_q
, diq_list
);
69 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
70 &_dispatch_mgr_q
, diq_list
);
71 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
72 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
73 _dispatch_mgr_q
.do_targetq
, diq_list
);
75 for (size_t i
= 0; i
< DISPATCH_ROOT_QUEUE_COUNT
; i
++) {
76 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
,
77 &_dispatch_root_queues
[i
], diq_list
);
80 _dispatch_introspection
.debug_queue_inversions
=
81 _dispatch_getenv_bool("LIBDISPATCH_DEBUG_QUEUE_INVERSIONS", false);
83 // Hack to determine queue TSD offset from start of pthread structure
84 uintptr_t thread
= _dispatch_thread_self();
85 thread_identifier_info_data_t tiid
;
86 mach_msg_type_number_t cnt
= THREAD_IDENTIFIER_INFO_COUNT
;
87 kern_return_t kr
= thread_info(pthread_mach_thread_np((void*)thread
),
88 THREAD_IDENTIFIER_INFO
, (thread_info_t
)&tiid
, &cnt
);
89 if (!dispatch_assume_zero(kr
)) {
90 _dispatch_introspection
.thread_queue_offset
=
91 (void*)(uintptr_t)tiid
.dispatch_qaddr
- (void*)thread
;
93 _dispatch_thread_key_create(&dispatch_introspection_key
,
94 _dispatch_introspection_thread_remove
);
95 _dispatch_introspection_thread_add(); // add main thread
98 const struct dispatch_introspection_versions_s
99 dispatch_introspection_versions
= {
100 .introspection_version
= 1,
102 .hooks_size
= sizeof(dispatch_introspection_hooks_s
),
103 .queue_item_version
= 1,
104 .queue_item_size
= sizeof(dispatch_introspection_queue_item_s
),
105 .queue_block_version
= 1,
106 .queue_block_size
= sizeof(dispatch_introspection_queue_block_s
),
107 .queue_function_version
= 1,
108 .queue_function_size
= sizeof(dispatch_introspection_queue_function_s
),
109 .queue_thread_version
= 1,
110 .queue_thread_size
= sizeof(dispatch_introspection_queue_thread_s
),
112 .object_size
= sizeof(dispatch_introspection_object_s
),
114 .queue_size
= sizeof(dispatch_introspection_queue_s
),
116 .source_size
= sizeof(dispatch_introspection_source_s
),
120 #pragma mark dispatch_introspection_threads
123 _dispatch_introspection_thread_add(void)
125 if (_dispatch_thread_getspecific(dispatch_introspection_key
)) {
128 uintptr_t thread
= _dispatch_thread_self();
129 dispatch_introspection_thread_t dit
= (void*)_dispatch_continuation_alloc();
130 dit
->dit_isa
= (void*)0x41;
131 dit
->thread
= (void*)thread
;
132 dit
->queue
= !_dispatch_introspection
.thread_queue_offset
? NULL
:
133 (void*)thread
+ _dispatch_introspection
.thread_queue_offset
;
134 _dispatch_thread_setspecific(dispatch_introspection_key
, dit
);
135 _dispatch_unfair_lock_lock(&_dispatch_introspection
.threads_lock
);
136 TAILQ_INSERT_TAIL(&_dispatch_introspection
.threads
, dit
, dit_list
);
137 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.threads_lock
);
141 _dispatch_introspection_thread_remove(void *ctxt
)
143 dispatch_introspection_thread_t dit
= ctxt
;
144 _dispatch_unfair_lock_lock(&_dispatch_introspection
.threads_lock
);
145 TAILQ_REMOVE(&_dispatch_introspection
.threads
, dit
, dit_list
);
146 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.threads_lock
);
147 _dispatch_continuation_free((void*)dit
);
148 _dispatch_thread_setspecific(dispatch_introspection_key
, NULL
);
152 #pragma mark dispatch_introspection_info
155 dispatch_introspection_queue_s
156 dispatch_introspection_queue_get_info(dispatch_queue_t dq
)
158 bool global
= (dq
->do_xref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
159 (dq
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
);
160 uint64_t dq_state
= os_atomic_load2o(dq
, dq_state
, relaxed
);
162 dispatch_introspection_queue_s diq
= {
164 .target_queue
= dq
->do_targetq
,
165 .label
= dq
->dq_label
,
166 .serialnum
= dq
->dq_serialnum
,
167 .width
= dq
->dq_width
,
168 .suspend_count
= _dq_state_suspend_cnt(dq_state
) + dq
->dq_side_suspend_cnt
,
169 .enqueued
= _dq_state_is_enqueued(dq_state
) && !global
,
170 .barrier
= _dq_state_is_in_barrier(dq_state
) && !global
,
171 .draining
= (dq
->dq_items_head
== (void*)~0ul) ||
172 (!dq
->dq_items_head
&& dq
->dq_items_tail
),
174 .main
= (dq
== &_dispatch_main_q
),
180 _dispatch_introspection_continuation_get_info(dispatch_queue_t dq
,
181 dispatch_continuation_t dc
, dispatch_introspection_queue_item_t diqi
)
183 void *ctxt
= dc
->dc_ctxt
;
184 dispatch_function_t func
= dc
->dc_func
;
185 pthread_t waiter
= NULL
;
187 uintptr_t flags
= dc
->dc_flags
;
189 if (_dispatch_object_has_vtable(dc
)) {
191 switch (dc_type(dc
)) {
192 #if HAVE_PTHREAD_WORKQUEUE_QOS
193 case DC_OVERRIDE_STEALING_TYPE
:
194 case DC_OVERRIDE_OWNING_TYPE
:
196 if (!_dispatch_object_is_continuation(dc
)) {
197 // these really wrap queues so we should hide the continuation type
198 dq
= (dispatch_queue_t
)dc
;
199 diqi
->type
= dispatch_introspection_queue_item_type_queue
;
200 diqi
->queue
= dispatch_introspection_queue_get_info(dq
);
203 return _dispatch_introspection_continuation_get_info(dq
, dc
, diqi
);
205 case DC_ASYNC_REDIRECT_TYPE
:
206 DISPATCH_INTERNAL_CRASH(0, "Handled by the caller");
207 case DC_MACH_ASYNC_REPLY_TYPE
:
209 case DC_MACH_SEND_BARRRIER_DRAIN_TYPE
:
211 case DC_MACH_SEND_BARRIER_TYPE
:
212 case DC_MACH_RECV_BARRIER_TYPE
:
213 flags
= (uintptr_t)dc
->dc_data
;
217 DISPATCH_INTERNAL_CRASH(dc
->do_vtable
, "Unknown dc vtable type");
220 if (flags
& DISPATCH_OBJ_SYNC_WAITER_BIT
) {
221 dispatch_sync_context_t dsc
= (dispatch_sync_context_t
)dc
;
222 waiter
= pthread_from_mach_thread_np(dsc
->dsc_waiter
);
223 ctxt
= dsc
->dsc_ctxt
;
224 func
= dsc
->dsc_func
;
226 if (func
== _dispatch_apply_invoke
||
227 func
== _dispatch_apply_redirect_invoke
) {
228 dispatch_apply_t da
= ctxt
;
238 if (flags
& DISPATCH_OBJ_BLOCK_BIT
) {
239 diqi
->type
= dispatch_introspection_queue_item_type_block
;
240 func
= _dispatch_Block_invoke(ctxt
);
242 diqi
->type
= dispatch_introspection_queue_item_type_function
;
244 diqi
->function
= (dispatch_introspection_queue_function_s
){
250 .barrier
= (flags
& DISPATCH_OBJ_BARRIER_BIT
) || dq
->dq_width
== 1,
251 .sync
= flags
& DISPATCH_OBJ_SYNC_WAITER_BIT
,
254 if (flags
& DISPATCH_OBJ_GROUP_BIT
) {
255 dispatch_group_t group
= dc
->dc_data
;
256 if (dx_type(group
) == DISPATCH_GROUP_TYPE
) {
257 diqi
->function
.group
= group
;
263 dispatch_introspection_object_s
264 _dispatch_introspection_object_get_info(dispatch_object_t dou
)
266 dispatch_introspection_object_s dio
= {
268 .target_queue
= dou
._do
->do_targetq
,
269 .type
= (void*)dou
._do
->do_vtable
,
270 .kind
= dx_kind(dou
._do
),
276 dispatch_introspection_source_s
277 _dispatch_introspection_source_get_info(dispatch_source_t ds
)
279 dispatch_source_refs_t dr
= ds
->ds_refs
;
280 dispatch_continuation_t dc
= dr
->ds_handler
[DS_EVENT_HANDLER
];
282 dispatch_function_t handler
= NULL
;
283 bool hdlr_is_block
= false;
286 handler
= dc
->dc_func
;
287 hdlr_is_block
= (dc
->dc_flags
& DISPATCH_OBJ_BLOCK_BIT
);
290 uint64_t dq_state
= os_atomic_load2o(ds
, dq_state
, relaxed
);
291 dispatch_introspection_source_s dis
= {
293 .target_queue
= ds
->do_targetq
,
296 .suspend_count
= _dq_state_suspend_cnt(dq_state
) + ds
->dq_side_suspend_cnt
,
297 .enqueued
= _dq_state_is_enqueued(dq_state
),
298 .handler_is_block
= hdlr_is_block
,
299 .timer
= dr
->du_is_timer
,
300 .after
= dr
->du_is_timer
&& (dr
->du_fflags
& DISPATCH_TIMER_AFTER
),
301 .type
= (unsigned long)dr
->du_filter
,
302 .handle
= (unsigned long)dr
->du_ident
,
308 dispatch_introspection_queue_thread_s
309 _dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit
)
311 dispatch_introspection_queue_thread_s diqt
= {
312 .object
= (void*)dit
,
313 .thread
= dit
->thread
,
315 if (dit
->queue
&& *dit
->queue
) {
316 diqt
.queue
= dispatch_introspection_queue_get_info(*dit
->queue
);
322 dispatch_introspection_queue_item_s
323 dispatch_introspection_queue_item_get_info(dispatch_queue_t dq
,
324 dispatch_continuation_t dc
)
326 dispatch_introspection_queue_item_s diqi
;
327 dispatch_object_t dou
;
331 if (_dispatch_object_has_vtable(dou
._do
)) {
332 unsigned long type
= dx_type(dou
._do
);
333 unsigned long metatype
= type
& _DISPATCH_META_TYPE_MASK
;
334 if (type
== DC_ASYNC_REDIRECT_TYPE
) {
339 if (metatype
== _DISPATCH_CONTINUATION_TYPE
) {
340 _dispatch_introspection_continuation_get_info(dq
, dc
, &diqi
);
341 } else if (metatype
== _DISPATCH_QUEUE_TYPE
&&
342 type
!= DISPATCH_QUEUE_SPECIFIC_TYPE
) {
343 diqi
.type
= dispatch_introspection_queue_item_type_queue
;
344 diqi
.queue
= dispatch_introspection_queue_get_info(dou
._dq
);
345 } else if (metatype
== _DISPATCH_SOURCE_TYPE
&&
346 type
!= DISPATCH_MACH_CHANNEL_TYPE
) {
347 diqi
.type
= dispatch_introspection_queue_item_type_source
;
348 diqi
.source
= _dispatch_introspection_source_get_info(dou
._ds
);
350 diqi
.type
= dispatch_introspection_queue_item_type_object
;
351 diqi
.object
= _dispatch_introspection_object_get_info(dou
._do
);
354 _dispatch_introspection_continuation_get_info(dq
, dc
, &diqi
);
360 #pragma mark dispatch_introspection_iterators
364 dispatch_introspection_get_queues(dispatch_queue_t start
, size_t count
,
365 dispatch_introspection_queue_t queues
)
367 dispatch_queue_t next
;
368 next
= start
? start
: TAILQ_FIRST(&_dispatch_introspection
.queues
);
371 queues
->queue
= NULL
;
374 *queues
++ = dispatch_introspection_queue_get_info(next
);
375 next
= TAILQ_NEXT(next
, diq_list
);
381 dispatch_continuation_t
382 dispatch_introspection_get_queue_threads(dispatch_continuation_t start
,
383 size_t count
, dispatch_introspection_queue_thread_t threads
)
385 dispatch_introspection_thread_t next
= start
? (void*)start
:
386 TAILQ_FIRST(&_dispatch_introspection
.threads
);
389 threads
->object
= NULL
;
392 *threads
++ = _dispatch_introspection_thread_get_info(next
);
393 next
= TAILQ_NEXT(next
, dit_list
);
399 dispatch_continuation_t
400 dispatch_introspection_queue_get_items(dispatch_queue_t dq
,
401 dispatch_continuation_t start
, size_t count
,
402 dispatch_introspection_queue_item_t items
)
404 dispatch_continuation_t next
= start
? start
:
405 dq
->dq_items_head
== (void*)~0ul ? NULL
: (void*)dq
->dq_items_head
;
408 items
->type
= dispatch_introspection_queue_item_type_none
;
411 *items
++ = dispatch_introspection_queue_item_get_info(dq
, next
);
412 next
= next
->do_next
;
418 #pragma mark dispatch_introspection_hooks
420 #define DISPATCH_INTROSPECTION_NO_HOOK ((void*)~0ul)
422 dispatch_introspection_hooks_s _dispatch_introspection_hooks
;
423 dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts
;
425 dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled
= {
426 .queue_create
= DISPATCH_INTROSPECTION_NO_HOOK
,
427 .queue_dispose
= DISPATCH_INTROSPECTION_NO_HOOK
,
428 .queue_item_enqueue
= DISPATCH_INTROSPECTION_NO_HOOK
,
429 .queue_item_dequeue
= DISPATCH_INTROSPECTION_NO_HOOK
,
430 .queue_item_complete
= DISPATCH_INTROSPECTION_NO_HOOK
,
433 #define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \
434 sizeof(_dispatch_introspection_hook_callouts_enabled) - \
435 sizeof(_dispatch_introspection_hook_callouts_enabled._reserved)) / \
436 sizeof(dispatch_function_t))
438 #define DISPATCH_INTROSPECTION_HOOK_ENABLED(h) \
439 (slowpath(_dispatch_introspection_hooks.h))
441 #define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \
442 typeof(_dispatch_introspection_hooks.h) _h; \
443 _h = _dispatch_introspection_hooks.h; \
444 if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \
448 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \
449 DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \
450 asm("_dispatch_introspection_hook_" #h); \
451 void _dispatch_introspection_hook_##h(void) {}
453 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\
454 dispatch_introspection_hook_##h(__VA_ARGS__)
456 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_create
);
457 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_destroy
);
458 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_enqueue
);
459 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_dequeue
);
460 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_complete
);
461 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_begin
);
462 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_end
);
466 dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks
)
468 dispatch_introspection_hooks_s old_hooks
= _dispatch_introspection_hooks
;
469 _dispatch_introspection_hooks
= *hooks
;
470 dispatch_function_t
*e
= (void*)&_dispatch_introspection_hook_callouts
,
471 *h
= (void*)&_dispatch_introspection_hooks
, *oh
= (void*)&old_hooks
;
472 for (size_t i
= 0; i
< DISPATCH_INTROSPECTION_HOOKS_COUNT
; i
++) {
474 h
[i
] = DISPATCH_INTROSPECTION_NO_HOOK
;
476 if (oh
[i
] == DISPATCH_INTROSPECTION_NO_HOOK
) {
485 dispatch_introspection_hook_callouts_enable(
486 dispatch_introspection_hooks_t enable
)
488 _dispatch_introspection_hook_callouts
= enable
? *enable
:
489 _dispatch_introspection_hook_callouts_enabled
;
490 dispatch_function_t
*e
= (void*)&_dispatch_introspection_hook_callouts
,
491 *h
= (void*)&_dispatch_introspection_hooks
;
492 for (size_t i
= 0; i
< DISPATCH_INTROSPECTION_HOOKS_COUNT
; i
++) {
494 h
[i
] = DISPATCH_INTROSPECTION_NO_HOOK
;
495 } else if (!e
[i
] && h
[i
] == DISPATCH_INTROSPECTION_NO_HOOK
) {
503 dispatch_introspection_hook_callout_queue_create(
504 dispatch_introspection_queue_t queue_info
)
506 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_create
, queue_info
);
511 _dispatch_introspection_queue_create_hook(dispatch_queue_t dq
)
513 dispatch_introspection_queue_s diq
;
514 diq
= dispatch_introspection_queue_get_info(dq
);
515 dispatch_introspection_hook_callout_queue_create(&diq
);
519 _dispatch_introspection_queue_create(dispatch_queue_t dq
)
521 TAILQ_INIT(&dq
->diq_order_top_head
);
522 TAILQ_INIT(&dq
->diq_order_bottom_head
);
523 _dispatch_unfair_lock_lock(&_dispatch_introspection
.queues_lock
);
524 TAILQ_INSERT_TAIL(&_dispatch_introspection
.queues
, dq
, diq_list
);
525 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.queues_lock
);
527 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create
, dq
);
528 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create
)) {
529 _dispatch_introspection_queue_create_hook(dq
);
536 dispatch_introspection_hook_callout_queue_dispose(
537 dispatch_introspection_queue_t queue_info
)
539 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_dispose
, queue_info
);
544 _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq
)
546 dispatch_introspection_queue_s diq
;
547 diq
= dispatch_introspection_queue_get_info(dq
);
548 dispatch_introspection_hook_callout_queue_dispose(&diq
);
552 _dispatch_introspection_queue_dispose(dispatch_queue_t dq
)
554 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy
, dq
);
555 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose
)) {
556 _dispatch_introspection_queue_dispose_hook(dq
);
559 _dispatch_unfair_lock_lock(&_dispatch_introspection
.queues_lock
);
560 TAILQ_REMOVE(&_dispatch_introspection
.queues
, dq
, diq_list
);
561 _dispatch_introspection_queue_order_dispose(dq
);
562 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.queues_lock
);
567 dispatch_introspection_hook_callout_queue_item_enqueue(dispatch_queue_t queue
,
568 dispatch_introspection_queue_item_t item
)
570 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_enqueue
, queue
, item
);
575 _dispatch_introspection_queue_item_enqueue_hook(dispatch_queue_t dq
,
576 dispatch_object_t dou
)
578 dispatch_introspection_queue_item_s diqi
;
579 diqi
= dispatch_introspection_queue_item_get_info(dq
, dou
._dc
);
580 dispatch_introspection_hook_callout_queue_item_enqueue(dq
, &diqi
);
584 _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq
,
585 dispatch_object_t dou
)
587 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
588 queue_item_enqueue
, dq
, dou
);
589 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_enqueue
)) {
590 _dispatch_introspection_queue_item_enqueue_hook(dq
, dou
);
596 dispatch_introspection_hook_callout_queue_item_dequeue(dispatch_queue_t queue
,
597 dispatch_introspection_queue_item_t item
)
599 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_dequeue
, queue
, item
);
604 _dispatch_introspection_queue_item_dequeue_hook(dispatch_queue_t dq
,
605 dispatch_object_t dou
)
607 dispatch_introspection_queue_item_s diqi
;
608 diqi
= dispatch_introspection_queue_item_get_info(dq
, dou
._dc
);
609 dispatch_introspection_hook_callout_queue_item_dequeue(dq
, &diqi
);
613 _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq
,
614 dispatch_object_t dou
)
616 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
617 queue_item_dequeue
, dq
, dou
);
618 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_dequeue
)) {
619 _dispatch_introspection_queue_item_dequeue_hook(dq
, dou
);
625 dispatch_introspection_hook_callout_queue_item_complete(
626 dispatch_continuation_t object
)
628 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_complete
, object
);
633 _dispatch_introspection_queue_item_complete_hook(dispatch_object_t dou
)
635 dispatch_introspection_hook_callout_queue_item_complete(dou
._dc
);
639 _dispatch_introspection_queue_item_complete(dispatch_object_t dou
)
641 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_item_complete
, dou
);
642 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_complete
)) {
643 _dispatch_introspection_queue_item_complete_hook(dou
);
648 _dispatch_introspection_callout_entry(void *ctxt
, dispatch_function_t f
)
650 dispatch_queue_t dq
= _dispatch_queue_get_current();
651 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
652 queue_callout_begin
, dq
, ctxt
, f
);
656 _dispatch_introspection_callout_return(void *ctxt
, dispatch_function_t f
)
658 dispatch_queue_t dq
= _dispatch_queue_get_current();
659 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
660 queue_callout_end
, dq
, ctxt
, f
);
664 #pragma mark dispatch introspection deadlock detection
666 typedef struct dispatch_queue_order_entry_s
*dispatch_queue_order_entry_t
;
667 struct dispatch_queue_order_entry_s
{
668 TAILQ_ENTRY(dispatch_queue_order_entry_s
) dqoe_order_top_list
;
669 TAILQ_ENTRY(dispatch_queue_order_entry_s
) dqoe_order_bottom_list
;
670 const char *dqoe_top_label
;
671 const char *dqoe_bottom_label
;
672 dispatch_queue_t dqoe_top_tq
;
673 dispatch_queue_t dqoe_bottom_tq
;
679 _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq
)
681 dispatch_queue_order_entry_t e
, te
;
682 dispatch_queue_t otherq
;
683 TAILQ_HEAD(, dispatch_queue_order_entry_s
) head
;
685 // this whole thing happens with _dispatch_introspection.queues_lock locked
687 _dispatch_unfair_lock_lock(&dq
->diq_order_top_head_lock
);
688 head
.tqh_first
= dq
->diq_order_top_head
.tqh_first
;
689 head
.tqh_last
= dq
->diq_order_top_head
.tqh_last
;
690 TAILQ_INIT(&dq
->diq_order_top_head
);
691 _dispatch_unfair_lock_unlock(&dq
->diq_order_top_head_lock
);
693 TAILQ_FOREACH_SAFE(e
, &head
, dqoe_order_top_list
, te
) {
694 otherq
= e
->dqoe_bottom_tq
;
695 _dispatch_unfair_lock_lock(&otherq
->diq_order_bottom_head_lock
);
696 TAILQ_REMOVE(&otherq
->diq_order_bottom_head
, e
, dqoe_order_bottom_list
);
697 _dispatch_unfair_lock_unlock(&otherq
->diq_order_bottom_head_lock
);
701 _dispatch_unfair_lock_lock(&dq
->diq_order_bottom_head_lock
);
702 head
.tqh_first
= dq
->diq_order_bottom_head
.tqh_first
;
703 head
.tqh_last
= dq
->diq_order_bottom_head
.tqh_last
;
704 TAILQ_INIT(&dq
->diq_order_bottom_head
);
705 _dispatch_unfair_lock_unlock(&dq
->diq_order_bottom_head_lock
);
707 TAILQ_FOREACH_SAFE(e
, &head
, dqoe_order_bottom_list
, te
) {
708 otherq
= e
->dqoe_top_tq
;
709 _dispatch_unfair_lock_lock(&otherq
->diq_order_top_head_lock
);
710 TAILQ_REMOVE(&otherq
->diq_order_top_head
, e
, dqoe_order_top_list
);
711 _dispatch_unfair_lock_unlock(&otherq
->diq_order_top_head_lock
);
716 // caller must make sure dq is not a root quueue
717 DISPATCH_ALWAYS_INLINE
718 static inline dispatch_queue_t
719 _dispatch_queue_bottom_target_queue(dispatch_queue_t dq
)
721 while (dq
->do_targetq
->do_targetq
) {
727 typedef struct dispatch_order_frame_s
*dispatch_order_frame_t
;
728 struct dispatch_order_frame_s
{
729 dispatch_order_frame_t dof_prev
;
730 dispatch_queue_order_entry_t dof_e
;
733 DISPATCH_NOINLINE DISPATCH_NORETURN
735 _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof
,
736 dispatch_queue_t top_q
, dispatch_queue_t bottom_q
)
738 _SIMPLE_STRING buf
= _simple_salloc();
739 const char *leading_word
= "with";
741 _simple_sprintf(buf
, "%s Lock inversion detected\n"
742 "queue [%s] trying to sync onto queue [%s] conflicts\n",
743 DISPATCH_ASSERTION_FAILED_MESSAGE
,
744 bottom_q
->dq_label
?: "", top_q
->dq_label
?: "");
747 dispatch_queue_order_entry_t e
= dof
->dof_e
;
751 "%s queue [%s] syncing onto queue [%s] at:\n", leading_word
,
752 dof
->dof_e
->dqoe_bottom_label
, dof
->dof_e
->dqoe_top_label
);
754 symbols
= backtrace_symbols(e
->dqoe_pcs
, e
->dqoe_pcs_n
);
756 for (int i
= 0; i
< e
->dqoe_pcs_n
; i
++) {
757 _simple_sprintf(buf
, "%s\n", symbols
[i
]);
761 _simple_sappend(buf
, "<missing backtrace>\n");
764 leading_word
= "and";
768 // <rdar://problem/25053293> turn off the feature for crash handlers
769 _dispatch_introspection
.debug_queue_inversions
= false;
770 _dispatch_assert_crash(_simple_string(buf
));
775 _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev
,
776 dispatch_queue_t top_q
, dispatch_queue_t top_tq
,
777 dispatch_queue_t bottom_q
, dispatch_queue_t bottom_tq
)
779 struct dispatch_order_frame_s dof
= { .dof_prev
= dof_prev
};
781 // has anyone above bottom_tq ever sync()ed onto top_tq ?
782 _dispatch_unfair_lock_lock(&bottom_tq
->diq_order_top_head_lock
);
783 TAILQ_FOREACH(dof
.dof_e
, &bottom_tq
->diq_order_top_head
, dqoe_order_top_list
) {
784 if (slowpath(dof
.dof_e
->dqoe_bottom_tq
== top_tq
)) {
785 _dispatch_introspection_lock_inversion_fail(&dof
, top_q
, bottom_q
);
787 _dispatch_introspection_order_check(&dof
, top_q
, top_tq
,
788 bottom_q
, dof
.dof_e
->dqoe_bottom_tq
);
790 _dispatch_unfair_lock_unlock(&bottom_tq
->diq_order_top_head_lock
);
794 _dispatch_introspection_order_record(dispatch_queue_t top_q
,
795 dispatch_queue_t bottom_q
)
797 dispatch_queue_order_entry_t e
, it
;
798 const int pcs_skip
= 1, pcs_n_max
= 128;
799 void *pcs
[pcs_n_max
];
802 if (!bottom_q
|| !bottom_q
->do_targetq
|| !top_q
->do_targetq
) {
806 dispatch_queue_t top_tq
= _dispatch_queue_bottom_target_queue(top_q
);
807 dispatch_queue_t bottom_tq
= _dispatch_queue_bottom_target_queue(bottom_q
);
809 _dispatch_unfair_lock_lock(&top_tq
->diq_order_top_head_lock
);
810 TAILQ_FOREACH(it
, &top_tq
->diq_order_top_head
, dqoe_order_top_list
) {
811 if (it
->dqoe_bottom_tq
== bottom_tq
) {
812 // that dispatch_sync() is known and validated
814 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
818 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
820 _dispatch_introspection_order_check(NULL
, top_q
, top_tq
, bottom_q
, bottom_tq
);
821 pcs_n
= MAX(backtrace(pcs
, pcs_n_max
) - pcs_skip
, 0);
823 bool copy_top_label
= false, copy_bottom_label
= false;
824 size_t size
= sizeof(struct dispatch_queue_order_entry_s
)
825 + (size_t)pcs_n
* sizeof(void *);
827 if (_dispatch_queue_label_needs_free(top_q
)) {
828 size
+= strlen(top_q
->dq_label
) + 1;
829 copy_top_label
= true;
831 if (_dispatch_queue_label_needs_free(bottom_q
)) {
832 size
+= strlen(bottom_q
->dq_label
) + 1;
833 copy_bottom_label
= true;
836 e
= _dispatch_calloc(1, size
);
837 e
->dqoe_top_tq
= top_tq
;
838 e
->dqoe_bottom_tq
= bottom_tq
;
839 e
->dqoe_pcs_n
= pcs_n
;
840 memcpy(e
->dqoe_pcs
, pcs
+ pcs_skip
, (size_t)pcs_n
* sizeof(void *));
841 // and then lay out the names of the queues at the end
842 char *p
= (char *)(e
->dqoe_pcs
+ pcs_n
);
843 if (copy_top_label
) {
844 e
->dqoe_top_label
= strcpy(p
, top_q
->dq_label
);
847 e
->dqoe_top_label
= top_q
->dq_label
?: "";
849 if (copy_bottom_label
) {
850 e
->dqoe_bottom_label
= strcpy(p
, bottom_q
->dq_label
);
852 e
->dqoe_bottom_label
= bottom_q
->dq_label
?: "";
855 _dispatch_unfair_lock_lock(&top_tq
->diq_order_top_head_lock
);
856 TAILQ_FOREACH(it
, &top_tq
->diq_order_top_head
, dqoe_order_top_list
) {
857 if (slowpath(it
->dqoe_bottom_tq
== bottom_tq
)) {
858 // someone else validated it at the same time
860 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
865 TAILQ_INSERT_HEAD(&top_tq
->diq_order_top_head
, e
, dqoe_order_top_list
);
866 _dispatch_unfair_lock_unlock(&top_tq
->diq_order_top_head_lock
);
868 _dispatch_unfair_lock_lock(&bottom_tq
->diq_order_bottom_head_lock
);
869 TAILQ_INSERT_HEAD(&bottom_tq
->diq_order_bottom_head
, e
, dqoe_order_bottom_list
);
870 _dispatch_unfair_lock_unlock(&bottom_tq
->diq_order_bottom_head_lock
);
874 _dispatch_introspection_target_queue_changed(dispatch_queue_t dq
)
876 if (!_dispatch_introspection
.debug_queue_inversions
) return;
878 if (_dispatch_queue_atomic_flags(dq
) & DQF_TARGETED
) {
880 "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging "
881 "cannot be used with code that changes the target "
882 "of a queue already targeted by other dispatch objects\n"
883 "queue %p[%s] was already targeted by other dispatch objects",
884 dq
, dq
->dq_label
?: "");
885 _dispatch_introspection
.debug_queue_inversions
= false;
889 static char const * const reasons
[] = {
890 [1] = "an initiator",
892 [3] = "both an initiator and a recipient"
894 bool as_top
= !TAILQ_EMPTY(&dq
->diq_order_top_head
);
895 bool as_bottom
= !TAILQ_EMPTY(&dq
->diq_order_top_head
);
897 if (as_top
|| as_bottom
) {
899 "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging "
900 "expects queues to not participate in dispatch_sync() "
901 "before their setup is complete\n"
902 "forgetting that queue 0x%p[%s] participated as %s of "
903 "a dispatch_sync", dq
, dq
->dq_label
?: "",
904 reasons
[(int)as_top
+ 2 * (int)as_bottom
]);
905 _dispatch_unfair_lock_lock(&_dispatch_introspection
.queues_lock
);
906 _dispatch_introspection_queue_order_dispose(dq
);
907 _dispatch_unfair_lock_unlock(&_dispatch_introspection
.queues_lock
);
911 #endif // DISPATCH_INTROSPECTION