]> git.saurik.com Git - apple/libdispatch.git/blob - src/introspection.c
libdispatch-703.30.5.tar.gz
[apple/libdispatch.git] / src / introspection.c
1 /*
2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 // Contains introspection routines that only exist in the version of the
22 // library with introspection support
23
24 #if DISPATCH_INTROSPECTION
25
26 #include <execinfo.h>
27 #include "internal.h"
28 #include "dispatch/introspection.h"
29 #include "introspection_private.h"
30
31 typedef struct dispatch_introspection_thread_s {
32 void *dit_isa;
33 TAILQ_ENTRY(dispatch_introspection_thread_s) dit_list;
34 pthread_t thread;
35 dispatch_queue_t *queue;
36 } dispatch_introspection_thread_s;
37 typedef struct dispatch_introspection_thread_s *dispatch_introspection_thread_t;
38
39 struct dispatch_introspection_state_s _dispatch_introspection = {
40 .threads = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.threads),
41 .queues = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.queues),
42 };
43
44 static void _dispatch_introspection_thread_remove(void *ctxt);
45
46 static void _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq);
47
48 #pragma mark -
49 #pragma mark dispatch_introspection_init
50
51 DISPATCH_NOINLINE
52 static bool
53 _dispatch_getenv_bool(const char *env, bool default_v)
54 {
55 const char *v = getenv(env);
56
57 if (v) {
58 return strcasecmp(v, "YES") == 0 || strcasecmp(v, "Y") == 0 ||
59 strcasecmp(v, "TRUE") == 0 || atoi(v);
60 }
61 return default_v;
62 }
63
64 void
65 _dispatch_introspection_init(void)
66 {
67 TAILQ_INSERT_TAIL(&_dispatch_introspection.queues,
68 &_dispatch_main_q, diq_list);
69 TAILQ_INSERT_TAIL(&_dispatch_introspection.queues,
70 &_dispatch_mgr_q, diq_list);
71 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
72 TAILQ_INSERT_TAIL(&_dispatch_introspection.queues,
73 _dispatch_mgr_q.do_targetq, diq_list);
74 #endif
75 for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
76 TAILQ_INSERT_TAIL(&_dispatch_introspection.queues,
77 &_dispatch_root_queues[i], diq_list);
78 }
79
80 _dispatch_introspection.debug_queue_inversions =
81 _dispatch_getenv_bool("LIBDISPATCH_DEBUG_QUEUE_INVERSIONS", false);
82
83 // Hack to determine queue TSD offset from start of pthread structure
84 uintptr_t thread = _dispatch_thread_self();
85 thread_identifier_info_data_t tiid;
86 mach_msg_type_number_t cnt = THREAD_IDENTIFIER_INFO_COUNT;
87 kern_return_t kr = thread_info(pthread_mach_thread_np((void*)thread),
88 THREAD_IDENTIFIER_INFO, (thread_info_t)&tiid, &cnt);
89 if (!dispatch_assume_zero(kr)) {
90 _dispatch_introspection.thread_queue_offset =
91 (void*)(uintptr_t)tiid.dispatch_qaddr - (void*)thread;
92 }
93 _dispatch_thread_key_create(&dispatch_introspection_key,
94 _dispatch_introspection_thread_remove);
95 _dispatch_introspection_thread_add(); // add main thread
96 }
97
98 const struct dispatch_introspection_versions_s
99 dispatch_introspection_versions = {
100 .introspection_version = 1,
101 .hooks_version = 2,
102 .hooks_size = sizeof(dispatch_introspection_hooks_s),
103 .queue_item_version = 1,
104 .queue_item_size = sizeof(dispatch_introspection_queue_item_s),
105 .queue_block_version = 1,
106 .queue_block_size = sizeof(dispatch_introspection_queue_block_s),
107 .queue_function_version = 1,
108 .queue_function_size = sizeof(dispatch_introspection_queue_function_s),
109 .queue_thread_version = 1,
110 .queue_thread_size = sizeof(dispatch_introspection_queue_thread_s),
111 .object_version = 1,
112 .object_size = sizeof(dispatch_introspection_object_s),
113 .queue_version = 1,
114 .queue_size = sizeof(dispatch_introspection_queue_s),
115 .source_version = 1,
116 .source_size = sizeof(dispatch_introspection_source_s),
117 };
118
119 #pragma mark -
120 #pragma mark dispatch_introspection_threads
121
122 void
123 _dispatch_introspection_thread_add(void)
124 {
125 if (_dispatch_thread_getspecific(dispatch_introspection_key)) {
126 return;
127 }
128 uintptr_t thread = _dispatch_thread_self();
129 dispatch_introspection_thread_t dit = (void*)_dispatch_continuation_alloc();
130 dit->dit_isa = (void*)0x41;
131 dit->thread = (void*)thread;
132 dit->queue = !_dispatch_introspection.thread_queue_offset ? NULL :
133 (void*)thread + _dispatch_introspection.thread_queue_offset;
134 _dispatch_thread_setspecific(dispatch_introspection_key, dit);
135 _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock);
136 TAILQ_INSERT_TAIL(&_dispatch_introspection.threads, dit, dit_list);
137 _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock);
138 }
139
140 static void
141 _dispatch_introspection_thread_remove(void *ctxt)
142 {
143 dispatch_introspection_thread_t dit = ctxt;
144 _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock);
145 TAILQ_REMOVE(&_dispatch_introspection.threads, dit, dit_list);
146 _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock);
147 _dispatch_continuation_free((void*)dit);
148 _dispatch_thread_setspecific(dispatch_introspection_key, NULL);
149 }
150
151 #pragma mark -
152 #pragma mark dispatch_introspection_info
153
154 DISPATCH_USED inline
155 dispatch_introspection_queue_s
156 dispatch_introspection_queue_get_info(dispatch_queue_t dq)
157 {
158 bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
159 (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT);
160 uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
161
162 dispatch_introspection_queue_s diq = {
163 .queue = dq,
164 .target_queue = dq->do_targetq,
165 .label = dq->dq_label,
166 .serialnum = dq->dq_serialnum,
167 .width = dq->dq_width,
168 .suspend_count = _dq_state_suspend_cnt(dq_state) + dq->dq_side_suspend_cnt,
169 .enqueued = _dq_state_is_enqueued(dq_state) && !global,
170 .barrier = _dq_state_is_in_barrier(dq_state) && !global,
171 .draining = (dq->dq_items_head == (void*)~0ul) ||
172 (!dq->dq_items_head && dq->dq_items_tail),
173 .global = global,
174 .main = (dq == &_dispatch_main_q),
175 };
176 return diq;
177 }
178
179 static inline void
180 _dispatch_introspection_continuation_get_info(dispatch_queue_t dq,
181 dispatch_continuation_t dc, dispatch_introspection_queue_item_t diqi)
182 {
183 void *ctxt = dc->dc_ctxt;
184 dispatch_function_t func = dc->dc_func;
185 pthread_t waiter = NULL;
186 bool apply = false;
187 uintptr_t flags = dc->dc_flags;
188
189 if (_dispatch_object_has_vtable(dc)) {
190 flags = 0;
191 switch (dc_type(dc)) {
192 #if HAVE_PTHREAD_WORKQUEUE_QOS
193 case DC_OVERRIDE_STEALING_TYPE:
194 case DC_OVERRIDE_OWNING_TYPE:
195 dc = dc->dc_data;
196 if (_dispatch_object_has_vtable(dc)) {
197 // these really wrap queues so we should hide the continuation type
198 dq = (dispatch_queue_t)dc;
199 diqi->type = dispatch_introspection_queue_item_type_queue;
200 diqi->queue = dispatch_introspection_queue_get_info(dq);
201 return;
202 }
203 return _dispatch_introspection_continuation_get_info(dq, dc, diqi);
204 #endif
205 case DC_ASYNC_REDIRECT_TYPE:
206 DISPATCH_INTERNAL_CRASH(0, "Handled by the caller");
207 case DC_MACH_SEND_BARRRIER_DRAIN_TYPE:
208 break;
209 case DC_MACH_SEND_BARRIER_TYPE:
210 case DC_MACH_RECV_BARRIER_TYPE:
211 flags = (uintptr_t)dc->dc_data;
212 dq = dq->do_targetq;
213 break;
214 }
215 } else {
216 if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) {
217 waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data);
218 if (flags & DISPATCH_OBJ_BARRIER_BIT) {
219 dc = dc->dc_ctxt;
220 dq = dc->dc_data;
221 }
222 ctxt = dc->dc_ctxt;
223 func = dc->dc_func;
224 }
225 if (func == _dispatch_sync_recurse_invoke) {
226 dc = dc->dc_ctxt;
227 dq = dc->dc_data;
228 ctxt = dc->dc_ctxt;
229 func = dc->dc_func;
230 } else if (func == _dispatch_apply_invoke ||
231 func == _dispatch_apply_redirect_invoke) {
232 dispatch_apply_t da = ctxt;
233 if (da->da_todo) {
234 dc = da->da_dc;
235 dq = dc->dc_data;
236 ctxt = dc->dc_ctxt;
237 func = dc->dc_func;
238 apply = true;
239 }
240 }
241 }
242 if (flags & DISPATCH_OBJ_BLOCK_BIT) {
243 diqi->type = dispatch_introspection_queue_item_type_block;
244 func = _dispatch_Block_invoke(ctxt);
245 } else {
246 diqi->type = dispatch_introspection_queue_item_type_function;
247 }
248 diqi->function = (dispatch_introspection_queue_function_s){
249 .continuation = dc,
250 .target_queue = dq,
251 .context = ctxt,
252 .function = func,
253 .waiter = waiter,
254 .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1,
255 .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT,
256 .apply = apply,
257 };
258 if (flags & DISPATCH_OBJ_GROUP_BIT) {
259 dispatch_group_t group = dc->dc_data;
260 if (dx_type(group) == DISPATCH_GROUP_TYPE) {
261 diqi->function.group = group;
262 }
263 }
264 }
265
266 static inline
267 dispatch_introspection_object_s
268 _dispatch_introspection_object_get_info(dispatch_object_t dou)
269 {
270 dispatch_introspection_object_s dio = {
271 .object = dou._dc,
272 .target_queue = dou._do->do_targetq,
273 .type = (void*)dou._do->do_vtable,
274 .kind = dx_kind(dou._do),
275 };
276 return dio;
277 }
278
279 static inline
280 dispatch_introspection_source_s
281 _dispatch_introspection_source_get_info(dispatch_source_t ds)
282 {
283 dispatch_source_refs_t dr = ds->ds_refs;
284 dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER];
285 void *ctxt = NULL;
286 dispatch_function_t handler = NULL;
287 bool hdlr_is_block = false;
288 if (dc) {
289 ctxt = dc->dc_ctxt;
290 handler = dc->dc_func;
291 hdlr_is_block = (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT);
292 }
293
294 uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed);
295 dispatch_introspection_source_s dis = {
296 .source = ds,
297 .target_queue = ds->do_targetq,
298 .context = ctxt,
299 .handler = handler,
300 .suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt,
301 .enqueued = _dq_state_is_enqueued(dq_state),
302 .handler_is_block = hdlr_is_block,
303 .timer = ds->ds_is_timer,
304 .after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER),
305 };
306 dispatch_kevent_t dk = ds->ds_dkev;
307 if (ds->ds_is_custom_source) {
308 dis.type = (unsigned long)dk;
309 } else if (dk) {
310 dis.type = (unsigned long)dk->dk_kevent.filter;
311 dis.handle = (unsigned long)dk->dk_kevent.ident;
312 }
313 return dis;
314 }
315
316 static inline
317 dispatch_introspection_queue_thread_s
318 _dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit)
319 {
320 dispatch_introspection_queue_thread_s diqt = {
321 .object = (void*)dit,
322 .thread = dit->thread,
323 };
324 if (dit->queue && *dit->queue) {
325 diqt.queue = dispatch_introspection_queue_get_info(*dit->queue);
326 }
327 return diqt;
328 }
329
330 DISPATCH_USED inline
331 dispatch_introspection_queue_item_s
332 dispatch_introspection_queue_item_get_info(dispatch_queue_t dq,
333 dispatch_continuation_t dc)
334 {
335 dispatch_introspection_queue_item_s diqi;
336 dispatch_object_t dou;
337
338 again:
339 dou._dc = dc;
340 if (_dispatch_object_has_vtable(dou._do)) {
341 unsigned long type = dx_type(dou._do);
342 unsigned long metatype = type & _DISPATCH_META_TYPE_MASK;
343 if (type == DC_ASYNC_REDIRECT_TYPE) {
344 dq = dc->dc_data;
345 dc = dc->dc_other;
346 goto again;
347 }
348 if (metatype == _DISPATCH_CONTINUATION_TYPE) {
349 _dispatch_introspection_continuation_get_info(dq, dc, &diqi);
350 } else if (metatype == _DISPATCH_QUEUE_TYPE &&
351 type != DISPATCH_QUEUE_SPECIFIC_TYPE) {
352 diqi.type = dispatch_introspection_queue_item_type_queue;
353 diqi.queue = dispatch_introspection_queue_get_info(dou._dq);
354 } else if (metatype == _DISPATCH_SOURCE_TYPE &&
355 type != DISPATCH_MACH_CHANNEL_TYPE) {
356 diqi.type = dispatch_introspection_queue_item_type_source;
357 diqi.source = _dispatch_introspection_source_get_info(dou._ds);
358 } else {
359 diqi.type = dispatch_introspection_queue_item_type_object;
360 diqi.object = _dispatch_introspection_object_get_info(dou._do);
361 }
362 } else {
363 _dispatch_introspection_continuation_get_info(dq, dc, &diqi);
364 }
365 return diqi;
366 }
367
368 #pragma mark -
369 #pragma mark dispatch_introspection_iterators
370
371 DISPATCH_USED
372 dispatch_queue_t
373 dispatch_introspection_get_queues(dispatch_queue_t start, size_t count,
374 dispatch_introspection_queue_t queues)
375 {
376 dispatch_queue_t next;
377 next = start ? start : TAILQ_FIRST(&_dispatch_introspection.queues);
378 while (count--) {
379 if (!next) {
380 queues->queue = NULL;
381 break;
382 }
383 *queues++ = dispatch_introspection_queue_get_info(next);
384 next = TAILQ_NEXT(next, diq_list);
385 }
386 return next;
387 }
388
389 DISPATCH_USED
390 dispatch_continuation_t
391 dispatch_introspection_get_queue_threads(dispatch_continuation_t start,
392 size_t count, dispatch_introspection_queue_thread_t threads)
393 {
394 dispatch_introspection_thread_t next = start ? (void*)start :
395 TAILQ_FIRST(&_dispatch_introspection.threads);
396 while (count--) {
397 if (!next) {
398 threads->object = NULL;
399 break;
400 }
401 *threads++ = _dispatch_introspection_thread_get_info(next);
402 next = TAILQ_NEXT(next, dit_list);
403 }
404 return (void*)next;
405 }
406
407 DISPATCH_USED
408 dispatch_continuation_t
409 dispatch_introspection_queue_get_items(dispatch_queue_t dq,
410 dispatch_continuation_t start, size_t count,
411 dispatch_introspection_queue_item_t items)
412 {
413 dispatch_continuation_t next = start ? start :
414 dq->dq_items_head == (void*)~0ul ? NULL : (void*)dq->dq_items_head;
415 while (count--) {
416 if (!next) {
417 items->type = dispatch_introspection_queue_item_type_none;
418 break;
419 }
420 *items++ = dispatch_introspection_queue_item_get_info(dq, next);
421 next = next->do_next;
422 }
423 return next;
424 }
425
426 #pragma mark -
427 #pragma mark dispatch_introspection_hooks
428
429 #define DISPATCH_INTROSPECTION_NO_HOOK ((void*)~0ul)
430
431 dispatch_introspection_hooks_s _dispatch_introspection_hooks;
432 dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts;
433 static const
434 dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = {
435 .queue_create = DISPATCH_INTROSPECTION_NO_HOOK,
436 .queue_dispose = DISPATCH_INTROSPECTION_NO_HOOK,
437 .queue_item_enqueue = DISPATCH_INTROSPECTION_NO_HOOK,
438 .queue_item_dequeue = DISPATCH_INTROSPECTION_NO_HOOK,
439 .queue_item_complete = DISPATCH_INTROSPECTION_NO_HOOK,
440 };
441
442 #define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \
443 sizeof(_dispatch_introspection_hook_callouts_enabled) - \
444 sizeof(_dispatch_introspection_hook_callouts_enabled._reserved)) / \
445 sizeof(dispatch_function_t))
446
447 #define DISPATCH_INTROSPECTION_HOOK_ENABLED(h) \
448 (slowpath(_dispatch_introspection_hooks.h))
449
450 #define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \
451 typeof(_dispatch_introspection_hooks.h) _h; \
452 _h = _dispatch_introspection_hooks.h; \
453 if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \
454 _h(__VA_ARGS__); \
455 } })
456
457 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \
458 DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \
459 asm("_dispatch_introspection_hook_" #h); \
460 void _dispatch_introspection_hook_##h(void) {}
461
462 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\
463 dispatch_introspection_hook_##h(__VA_ARGS__)
464
465 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_create);
466 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_destroy);
467 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_enqueue);
468 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_dequeue);
469 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_complete);
470 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_begin);
471 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_end);
472
473 DISPATCH_USED
474 void
475 dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks)
476 {
477 dispatch_introspection_hooks_s old_hooks = _dispatch_introspection_hooks;
478 _dispatch_introspection_hooks = *hooks;
479 dispatch_function_t *e = (void*)&_dispatch_introspection_hook_callouts,
480 *h = (void*)&_dispatch_introspection_hooks, *oh = (void*)&old_hooks;
481 for (size_t i = 0; i < DISPATCH_INTROSPECTION_HOOKS_COUNT; i++) {
482 if (!h[i] && e[i]) {
483 h[i] = DISPATCH_INTROSPECTION_NO_HOOK;
484 }
485 if (oh[i] == DISPATCH_INTROSPECTION_NO_HOOK) {
486 oh[i] = NULL;
487 }
488 }
489 *hooks = old_hooks;
490 }
491
492 DISPATCH_USED
493 void
494 dispatch_introspection_hook_callouts_enable(
495 dispatch_introspection_hooks_t enable)
496 {
497 _dispatch_introspection_hook_callouts = enable ? *enable :
498 _dispatch_introspection_hook_callouts_enabled;
499 dispatch_function_t *e = (void*)&_dispatch_introspection_hook_callouts,
500 *h = (void*)&_dispatch_introspection_hooks;
501 for (size_t i = 0; i < DISPATCH_INTROSPECTION_HOOKS_COUNT; i++) {
502 if (e[i] && !h[i]) {
503 h[i] = DISPATCH_INTROSPECTION_NO_HOOK;
504 } else if (!e[i] && h[i] == DISPATCH_INTROSPECTION_NO_HOOK) {
505 h[i] = NULL;
506 }
507 }
508 }
509
510 DISPATCH_NOINLINE
511 void
512 dispatch_introspection_hook_callout_queue_create(
513 dispatch_introspection_queue_t queue_info)
514 {
515 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_create, queue_info);
516 }
517
518 DISPATCH_NOINLINE
519 static void
520 _dispatch_introspection_queue_create_hook(dispatch_queue_t dq)
521 {
522 dispatch_introspection_queue_s diq;
523 diq = dispatch_introspection_queue_get_info(dq);
524 dispatch_introspection_hook_callout_queue_create(&diq);
525 }
526
527 dispatch_queue_t
528 _dispatch_introspection_queue_create(dispatch_queue_t dq)
529 {
530 TAILQ_INIT(&dq->diq_order_top_head);
531 TAILQ_INIT(&dq->diq_order_bottom_head);
532 _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock);
533 TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, dq, diq_list);
534 _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock);
535
536 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq);
537 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) {
538 _dispatch_introspection_queue_create_hook(dq);
539 }
540 return dq;
541 }
542
543 DISPATCH_NOINLINE
544 void
545 dispatch_introspection_hook_callout_queue_dispose(
546 dispatch_introspection_queue_t queue_info)
547 {
548 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_dispose, queue_info);
549 }
550
551 DISPATCH_NOINLINE
552 static void
553 _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq)
554 {
555 dispatch_introspection_queue_s diq;
556 diq = dispatch_introspection_queue_get_info(dq);
557 dispatch_introspection_hook_callout_queue_dispose(&diq);
558 }
559
560 void
561 _dispatch_introspection_queue_dispose(dispatch_queue_t dq)
562 {
563 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq);
564 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) {
565 _dispatch_introspection_queue_dispose_hook(dq);
566 }
567
568 _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock);
569 TAILQ_REMOVE(&_dispatch_introspection.queues, dq, diq_list);
570 _dispatch_introspection_queue_order_dispose(dq);
571 _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock);
572 }
573
574 DISPATCH_NOINLINE
575 void
576 dispatch_introspection_hook_callout_queue_item_enqueue(dispatch_queue_t queue,
577 dispatch_introspection_queue_item_t item)
578 {
579 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_enqueue, queue, item);
580 }
581
582 DISPATCH_NOINLINE
583 static void
584 _dispatch_introspection_queue_item_enqueue_hook(dispatch_queue_t dq,
585 dispatch_object_t dou)
586 {
587 dispatch_introspection_queue_item_s diqi;
588 diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc);
589 dispatch_introspection_hook_callout_queue_item_enqueue(dq, &diqi);
590 }
591
592 void
593 _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq,
594 dispatch_object_t dou)
595 {
596 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
597 queue_item_enqueue, dq, dou);
598 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_enqueue)) {
599 _dispatch_introspection_queue_item_enqueue_hook(dq, dou);
600 }
601 }
602
603 DISPATCH_NOINLINE
604 void
605 dispatch_introspection_hook_callout_queue_item_dequeue(dispatch_queue_t queue,
606 dispatch_introspection_queue_item_t item)
607 {
608 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_dequeue, queue, item);
609 }
610
611 DISPATCH_NOINLINE
612 static void
613 _dispatch_introspection_queue_item_dequeue_hook(dispatch_queue_t dq,
614 dispatch_object_t dou)
615 {
616 dispatch_introspection_queue_item_s diqi;
617 diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc);
618 dispatch_introspection_hook_callout_queue_item_dequeue(dq, &diqi);
619 }
620
621 void
622 _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq,
623 dispatch_object_t dou)
624 {
625 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
626 queue_item_dequeue, dq, dou);
627 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_dequeue)) {
628 _dispatch_introspection_queue_item_dequeue_hook(dq, dou);
629 }
630 }
631
632 DISPATCH_NOINLINE
633 void
634 dispatch_introspection_hook_callout_queue_item_complete(
635 dispatch_continuation_t object)
636 {
637 DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_complete, object);
638 }
639
640 DISPATCH_NOINLINE
641 static void
642 _dispatch_introspection_queue_item_complete_hook(dispatch_object_t dou)
643 {
644 dispatch_introspection_hook_callout_queue_item_complete(dou._dc);
645 }
646
647 void
648 _dispatch_introspection_queue_item_complete(dispatch_object_t dou)
649 {
650 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_item_complete, dou);
651 if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_complete)) {
652 _dispatch_introspection_queue_item_complete_hook(dou);
653 }
654 }
655
656 void
657 _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f)
658 {
659 dispatch_queue_t dq = _dispatch_queue_get_current();
660 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
661 queue_callout_begin, dq, ctxt, f);
662 }
663
664 void
665 _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f)
666 {
667 dispatch_queue_t dq = _dispatch_queue_get_current();
668 DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(
669 queue_callout_end, dq, ctxt, f);
670 }
671
672 #pragma mark -
673 #pragma mark dispatch introspection deadlock detection
674
675 typedef struct dispatch_queue_order_entry_s *dispatch_queue_order_entry_t;
676 struct dispatch_queue_order_entry_s {
677 TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_top_list;
678 TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_bottom_list;
679 const char *dqoe_top_label;
680 const char *dqoe_bottom_label;
681 dispatch_queue_t dqoe_top_tq;
682 dispatch_queue_t dqoe_bottom_tq;
683 int dqoe_pcs_n;
684 void *dqoe_pcs[];
685 };
686
687 static void
688 _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq)
689 {
690 dispatch_queue_order_entry_t e, te;
691 dispatch_queue_t otherq;
692 TAILQ_HEAD(, dispatch_queue_order_entry_s) head;
693
694 // this whole thing happens with _dispatch_introspection.queues_lock locked
695
696 _dispatch_unfair_lock_lock(&dq->diq_order_top_head_lock);
697 head.tqh_first = dq->diq_order_top_head.tqh_first;
698 head.tqh_last = dq->diq_order_top_head.tqh_last;
699 TAILQ_INIT(&dq->diq_order_top_head);
700 _dispatch_unfair_lock_unlock(&dq->diq_order_top_head_lock);
701
702 TAILQ_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) {
703 otherq = e->dqoe_bottom_tq;
704 _dispatch_unfair_lock_lock(&otherq->diq_order_bottom_head_lock);
705 TAILQ_REMOVE(&otherq->diq_order_bottom_head, e, dqoe_order_bottom_list);
706 _dispatch_unfair_lock_unlock(&otherq->diq_order_bottom_head_lock);
707 free(e);
708 }
709
710 _dispatch_unfair_lock_lock(&dq->diq_order_bottom_head_lock);
711 head.tqh_first = dq->diq_order_bottom_head.tqh_first;
712 head.tqh_last = dq->diq_order_bottom_head.tqh_last;
713 TAILQ_INIT(&dq->diq_order_bottom_head);
714 _dispatch_unfair_lock_unlock(&dq->diq_order_bottom_head_lock);
715
716 TAILQ_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) {
717 otherq = e->dqoe_top_tq;
718 _dispatch_unfair_lock_lock(&otherq->diq_order_top_head_lock);
719 TAILQ_REMOVE(&otherq->diq_order_top_head, e, dqoe_order_top_list);
720 _dispatch_unfair_lock_unlock(&otherq->diq_order_top_head_lock);
721 free(e);
722 }
723 }
724
725 // caller must make sure dq is not a root quueue
726 DISPATCH_ALWAYS_INLINE
727 static inline dispatch_queue_t
728 _dispatch_queue_bottom_target_queue(dispatch_queue_t dq)
729 {
730 while (dq->do_targetq->do_targetq) {
731 dq = dq->do_targetq;
732 }
733 return dq;
734 }
735
736 typedef struct dispatch_order_frame_s *dispatch_order_frame_t;
737 struct dispatch_order_frame_s {
738 dispatch_order_frame_t dof_prev;
739 dispatch_queue_order_entry_t dof_e;
740 };
741
742 DISPATCH_NOINLINE
743 static void
744 _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof,
745 dispatch_queue_t top_q, dispatch_queue_t bottom_q)
746 {
747 _SIMPLE_STRING buf = _simple_salloc();
748 const char *leading_word = "with";
749
750 _simple_sprintf(buf, "%s Lock inversion detected\n"
751 "queue [%s] trying to sync onto queue [%s] conflicts\n",
752 DISPATCH_ASSERTION_FAILED_MESSAGE,
753 bottom_q->dq_label ?: "", top_q->dq_label ?: "");
754
755 while (dof) {
756 dispatch_queue_order_entry_t e = dof->dof_e;
757 char **symbols;
758
759 _simple_sprintf(buf,
760 "%s queue [%s] syncing onto queue [%s] at:\n", leading_word,
761 dof->dof_e->dqoe_bottom_label, dof->dof_e->dqoe_top_label);
762
763 symbols = backtrace_symbols(e->dqoe_pcs, e->dqoe_pcs_n);
764 if (symbols) {
765 for (int i = 0; i < e->dqoe_pcs_n; i++) {
766 _simple_sprintf(buf, "%s\n", symbols[i]);
767 }
768 free(symbols);
769 } else {
770 _simple_sappend(buf, "<missing backtrace>\n");
771 }
772
773 leading_word = "and";
774 dof = dof->dof_prev;
775 }
776
777 // <rdar://problem/25053293> turn off the feature for crash handlers
778 _dispatch_introspection.debug_queue_inversions = false;
779 _dispatch_assert_crash(_simple_string(buf));
780 _simple_sfree(buf);
781 }
782
783 static void
784 _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev,
785 dispatch_queue_t top_q, dispatch_queue_t top_tq,
786 dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq)
787 {
788 struct dispatch_order_frame_s dof = { .dof_prev = dof_prev };
789
790 // has anyone above bottom_tq ever sync()ed onto top_tq ?
791 _dispatch_unfair_lock_lock(&bottom_tq->diq_order_top_head_lock);
792 TAILQ_FOREACH(dof.dof_e, &bottom_tq->diq_order_top_head, dqoe_order_top_list) {
793 if (slowpath(dof.dof_e->dqoe_bottom_tq == top_tq)) {
794 _dispatch_introspection_lock_inversion_fail(&dof, top_q, bottom_q);
795 }
796 _dispatch_introspection_order_check(&dof, top_q, top_tq,
797 bottom_q, dof.dof_e->dqoe_bottom_tq);
798 }
799 _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_top_head_lock);
800 }
801
802 void
803 _dispatch_introspection_order_record(dispatch_queue_t top_q,
804 dispatch_queue_t bottom_q)
805 {
806 dispatch_queue_order_entry_t e, it;
807 const int pcs_skip = 1, pcs_n_max = 128;
808 void *pcs[pcs_n_max];
809 int pcs_n;
810
811 if (!bottom_q || !bottom_q->do_targetq || !top_q->do_targetq) {
812 return;
813 }
814
815 dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q);
816 dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q);
817
818 _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock);
819 TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) {
820 if (it->dqoe_bottom_tq == bottom_tq) {
821 // that dispatch_sync() is known and validated
822 // move on
823 _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock);
824 return;
825 }
826 }
827 _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock);
828
829 _dispatch_introspection_order_check(NULL, top_q, top_tq, bottom_q, bottom_tq);
830 pcs_n = MAX(backtrace(pcs, pcs_n_max) - pcs_skip, 0);
831
832 bool copy_top_label = false, copy_bottom_label = false;
833 size_t size = sizeof(struct dispatch_queue_order_entry_s)
834 + (size_t)pcs_n * sizeof(void *);
835
836 if (_dispatch_queue_label_needs_free(top_q)) {
837 size += strlen(top_q->dq_label) + 1;
838 copy_top_label = true;
839 }
840 if (_dispatch_queue_label_needs_free(bottom_q)) {
841 size += strlen(bottom_q->dq_label) + 1;
842 copy_bottom_label = true;
843 }
844
845 e = _dispatch_calloc(1, size);
846 e->dqoe_top_tq = top_tq;
847 e->dqoe_bottom_tq = bottom_tq;
848 e->dqoe_pcs_n = pcs_n;
849 memcpy(e->dqoe_pcs, pcs + pcs_skip, (size_t)pcs_n * sizeof(void *));
850 // and then lay out the names of the queues at the end
851 char *p = (char *)(e->dqoe_pcs + pcs_n);
852 if (copy_top_label) {
853 e->dqoe_top_label = strcpy(p, top_q->dq_label);
854 p += strlen(p) + 1;
855 } else {
856 e->dqoe_top_label = top_q->dq_label ?: "";
857 }
858 if (copy_bottom_label) {
859 e->dqoe_bottom_label = strcpy(p, bottom_q->dq_label);
860 } else {
861 e->dqoe_bottom_label = bottom_q->dq_label ?: "";
862 }
863
864 _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock);
865 TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) {
866 if (slowpath(it->dqoe_bottom_tq == bottom_tq)) {
867 // someone else validated it at the same time
868 // go away quickly
869 _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock);
870 free(e);
871 return;
872 }
873 }
874 TAILQ_INSERT_HEAD(&top_tq->diq_order_top_head, e, dqoe_order_top_list);
875 _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock);
876
877 _dispatch_unfair_lock_lock(&bottom_tq->diq_order_bottom_head_lock);
878 TAILQ_INSERT_HEAD(&bottom_tq->diq_order_bottom_head, e, dqoe_order_bottom_list);
879 _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_bottom_head_lock);
880 }
881
882 void
883 _dispatch_introspection_target_queue_changed(dispatch_queue_t dq)
884 {
885 if (!_dispatch_introspection.debug_queue_inversions) return;
886
887 if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
888 _dispatch_log(
889 "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging "
890 "cannot be used with code that changes the target "
891 "of a queue already targeted by other dispatch objects\n"
892 "queue %p[%s] was already targeted by other dispatch objects",
893 dq, dq->dq_label ?: "");
894 _dispatch_introspection.debug_queue_inversions = false;
895 return;
896 }
897
898 static char const * const reasons[] = {
899 [1] = "an initiator",
900 [2] = "a recipient",
901 [3] = "both an initiator and a recipient"
902 };
903 bool as_top = !TAILQ_EMPTY(&dq->diq_order_top_head);
904 bool as_bottom = !TAILQ_EMPTY(&dq->diq_order_top_head);
905
906 if (as_top || as_bottom) {
907 _dispatch_log(
908 "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging "
909 "expects queues to not participate in dispatch_sync() "
910 "before their setup is complete\n"
911 "forgetting that queue 0x%p[%s] participated as %s of "
912 "a dispatch_sync", dq, dq->dq_label ?: "",
913 reasons[(int)as_top + 2 * (int)as_bottom]);
914 _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock);
915 _dispatch_introspection_queue_order_dispose(dq);
916 _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock);
917 }
918 }
919
920 #endif // DISPATCH_INTROSPECTION