]> git.saurik.com Git - apple/libdispatch.git/blob - src/queue_internal.h
libdispatch-703.30.5.tar.gz
[apple/libdispatch.git] / src / queue_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_QUEUE_INTERNAL__
28 #define __DISPATCH_QUEUE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
36 #define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
37 #endif
38
39 /* x86 & cortex-a8 have a 64 byte cacheline */
40 #define DISPATCH_CACHELINE_SIZE 64u
41 #define ROUND_UP_TO_CACHELINE_SIZE(x) \
42 (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
43 ~(DISPATCH_CACHELINE_SIZE - 1u))
44 #define DISPATCH_CACHELINE_ALIGN \
45 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
46
47
48 #pragma mark -
49 #pragma mark dispatch_queue_t
50
51 DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
52 DQF_NONE = 0x0000,
53 DQF_AUTORELEASE_ALWAYS = 0x0001,
54 DQF_AUTORELEASE_NEVER = 0x0002,
55 #define _DQF_AUTORELEASE_MASK 0x0003
56 DQF_THREAD_BOUND = 0x0004, // queue is bound to a thread
57 DQF_BARRIER_BIT = 0x0008, // queue is a barrier on its target
58 DQF_TARGETED = 0x0010, // queue is targeted by another object
59 DQF_LABEL_NEEDS_FREE = 0x0020, // queue label was strduped; need to free it
60 DQF_CANNOT_TRYSYNC = 0x0040,
61 DQF_RELEASED = 0x0080, // xref_cnt == -1
62
63 // only applies to sources
64 //
65 // Assuming DSF_ARMED (a), DSF_DEFERRED_DELETE (p), DSF_DELETED (d):
66 //
67 // ---
68 // a--
69 // source states for regular operations
70 // (delivering event / waiting for event)
71 //
72 // ap-
73 // Either armed for deferred deletion delivery, waiting for an EV_DELETE,
74 // and the next state will be -pd (EV_DELETE delivered),
75 // Or, a cancellation raced with an event delivery and failed
76 // (EINPROGRESS), and when the event delivery happens, the next state
77 // will be -p-.
78 //
79 // -pd
80 // Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is
81 // gone from the kernel, but ds_dkev lives. Next state will be --d.
82 //
83 // -p-
84 // Received an EV_ONESHOT event (from a--), or the delivery of an event
85 // causing the cancellation to fail with EINPROGRESS was delivered
86 // (from ap-). The knote still lives, next state will be --d.
87 //
88 // --d
89 // Final state of the source, the knote is gone from the kernel and
90 // ds_dkev is freed. The source can safely be released.
91 //
92 // a-d (INVALID)
93 // apd (INVALID)
94 // Setting DSF_DELETED should also always atomically clear DSF_ARMED. If
95 // the knote is gone from the kernel, it makes no sense whatsoever to
96 // have it armed. And generally speaking, once `d` or `p` has been set,
97 // `a` cannot do a cleared -> set transition anymore
98 // (see _dispatch_source_try_set_armed).
99 //
100 DSF_CANCEL_WAITER = 0x0800, // synchronous waiters for cancel
101 DSF_CANCELED = 0x1000, // cancellation has been requested
102 DSF_ARMED = 0x2000, // source is armed
103 DSF_DEFERRED_DELETE = 0x4000, // source is pending delete
104 DSF_DELETED = 0x8000, // source knote is deleted
105 #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED)
106
107 DQF_WIDTH_MASK = 0xffff0000,
108 #define DQF_WIDTH_SHIFT 16
109 );
110
111 #define _DISPATCH_QUEUE_HEADER(x) \
112 struct os_mpsc_queue_s _as_oq[0]; \
113 DISPATCH_OBJECT_HEADER(x); \
114 _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \
115 dispatch_queue_t dq_specific_q; \
116 union { \
117 uint32_t volatile dq_atomic_flags; \
118 DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
119 uint16_t dq_atomic_bits, \
120 uint16_t dq_width \
121 ); \
122 }; \
123 uint32_t dq_side_suspend_cnt; \
124 DISPATCH_INTROSPECTION_QUEUE_HEADER; \
125 dispatch_unfair_lock_s dq_sidelock
126 /* LP64: 32bit hole on LP64 */
127
128 #define DISPATCH_QUEUE_HEADER(x) \
129 struct dispatch_queue_s _as_dq[0]; \
130 _DISPATCH_QUEUE_HEADER(x)
131
132 #define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8)))
133
134 #define DISPATCH_QUEUE_WIDTH_POOL 0x7fff
135 #define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe
136 #define DISPATCH_QUEUE_USES_REDIRECTION(width) \
137 ({ uint16_t _width = (width); \
138 _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
139
140 #define DISPATCH_QUEUE_CACHELINE_PADDING \
141 char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
142 #ifdef __LP64__
143 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
144 (sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
145 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
146 #elif OS_OBJECT_HAVE_OBJC1
147 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
148 (11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
149 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
150 #else
151 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
152 (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
153 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
154 #endif
155
156 /*
157 * dispatch queues `dq_state` demystified
158 *
159 *******************************************************************************
160 *
161 * Most Significant 32 bit Word
162 * ----------------------------
163 *
164 * sc: suspend count (bits 63 - 57)
165 * The suspend count unsurprisingly holds the suspend count of the queue
166 * Only 7 bits are stored inline. Extra counts are transfered in a side
167 * suspend count and when that has happened, the ssc: bit is set.
168 */
169 #define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0200000000000000ull
170 #define DISPATCH_QUEUE_SUSPEND_HALF 0x40u
171 /*
172 * ssc: side suspend count (bit 56)
173 * This bit means that the total suspend count didn't fit in the inline
174 * suspend count, and that there are additional suspend counts stored in the
175 * `dq_side_suspend_cnt` field.
176 */
177 #define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0100000000000000ull
178 /*
179 * i: inactive bit (bit 55)
180 * This bit means that the object is inactive (see dispatch_activate)
181 */
182 #define DISPATCH_QUEUE_INACTIVE 0x0080000000000000ull
183 /*
184 * na: needs activation (bit 54)
185 * This bit is set if the object is created inactive. It tells
186 * dispatch_queue_wakeup to perform various tasks at first wakeup.
187 *
188 * This bit is cleared as part of the first wakeup. Having that bit prevents
189 * the object from being woken up (because _dq_state_should_wakeup will say
190 * no), except in the dispatch_activate/dispatch_resume codepath.
191 */
192 #define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0040000000000000ull
193 /*
194 * This mask covers the suspend count (sc), side suspend count bit (ssc),
195 * inactive (i) and needs activation (na) bits
196 */
197 #define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xffc0000000000000ull
198 /*
199 * ib: in barrier (bit 53)
200 * This bit is set when the queue is currently executing a barrier
201 */
202 #define DISPATCH_QUEUE_IN_BARRIER 0x0020000000000000ull
203 /*
204 * qf: queue full (bit 52)
205 * This bit is a subtle hack that allows to check for any queue width whether
206 * the full width of the queue is used or reserved (depending on the context)
207 * In other words that the queue has reached or overflown its capacity.
208 */
209 #define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull
210 #define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull
211 /*
212 * w: width (bits 51 - 37)
213 * This encodes how many work items are in flight. Barriers hold `dq_width`
214 * of them while they run. This is encoded as a signed offset with respect,
215 * to full use, where the negative values represent how many available slots
216 * are left, and the positive values how many work items are exceeding our
217 * capacity.
218 *
219 * When this value is positive, then `wo` is always set to 1.
220 */
221 #define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull
222 #define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull
223 #define DISPATCH_QUEUE_WIDTH_SHIFT 37
224 /*
225 * pb: pending barrier (bit 36)
226 * Drainers set this bit when they couldn't run the next work item and it is
227 * a barrier. When this bit is set, `dq_width - 1` work item slots are
228 * reserved so that no wakeup happens until the last work item in flight
229 * completes.
230 */
231 #define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull
232 /*
233 * d: dirty bit (bit 35)
234 * This bit is set when a queue transitions from empty to not empty.
235 * This bit is set before dq_items_head is set, with appropriate barriers.
236 * Any thread looking at a queue head is responsible for unblocking any
237 * dispatch_*_sync that could be enqueued at the beginning.
238 *
239 * Drainer perspective
240 * ===================
241 *
242 * When done, any "Drainer", in particular for dispatch_*_sync() handoff
243 * paths, exits in 3 steps, and the point of the DIRTY bit is to make
244 * the Drainers take the slowpath at step 2 to take into account enqueuers
245 * that could have made the queue non idle concurrently.
246 *
247 * <code>
248 * // drainer-exit step 1
249 * if (slowpath(dq->dq_items_tail)) { // speculative test
250 * return handle_non_empty_queue_or_wakeup(dq);
251 * }
252 * // drainer-exit step 2
253 * if (!_dispatch_queue_drain_try_unlock(dq, ${owned}, ...)) {
254 * return handle_non_empty_queue_or_wakeup(dq);
255 * }
256 * // drainer-exit step 3
257 * // no need to wake up the queue, it's really empty for sure
258 * return;
259 * </code>
260 *
261 * The crux is _dispatch_queue_drain_try_unlock(), it is a function whose
262 * contract is to release everything the current thread owns from the queue
263 * state, so that when it's successful, any other thread can acquire
264 * width from that queue.
265 *
266 * But, that function must fail if it sees the DIRTY bit set, leaving
267 * the state untouched. Leaving the state untouched is vital as it ensures
268 * that no other Slayer^WDrainer can rise at the same time, because the
269 * resource stays locked.
270 *
271 *
272 * Note that releasing the DRAIN_LOCK or ENQUEUE_LOCK (see below) currently
273 * doesn't use that pattern, and always tries to requeue. It isn't a problem
274 * because while holding either of these locks prevents *some* sync (the
275 * barrier one) codepaths to acquire the resource, the retry they perform
276 * at their step D (see just below) isn't affected by the state of these bits
277 * at all.
278 *
279 *
280 * Sync items perspective
281 * ======================
282 *
283 * On the dispatch_*_sync() acquire side, the code must look like this:
284 *
285 * <code>
286 * // step A
287 * if (try_acquire_sync(dq)) {
288 * return sync_operation_fastpath(dq, item);
289 * }
290 *
291 * // step B
292 * if (queue_push_and_inline(dq, item)) {
293 * atomic_store(dq->dq_items_head, item, relaxed);
294 * // step C
295 * atomic_or(dq->dq_state, DIRTY, release);
296 *
297 * // step D
298 * if (try_acquire_sync(dq)) {
299 * try_lock_transfer_or_wakeup(dq);
300 * }
301 * }
302 *
303 * // step E
304 * wait_for_lock_transfer(dq);
305 * </code>
306 *
307 * A. If this code can acquire the resource it needs at step A, we're good.
308 *
309 * B. If the item isn't the first at enqueue time, then there is no issue
310 * At least another thread went through C, this thread isn't interesting
311 * for the possible races, responsibility to make progress is transfered
312 * to the thread which went through C-D.
313 *
314 * C. The DIRTY bit is set with a release barrier, after the head/tail
315 * has been set, so that seeing the DIRTY bit means that head/tail
316 * will be visible to any drainer that has the matching acquire barrier.
317 *
318 * Drainers may see the head/tail and fail to see DIRTY, in which
319 * case, their _dispatch_queue_drain_try_unlock() will clear the DIRTY
320 * bit, and fail, causing the caller to retry exactly once.
321 *
322 * D. At this stage, there's two possible outcomes:
323 *
324 * - either the acquire works this time, in which case this thread
325 * successfuly becomes a drainer. That's obviously the happy path.
326 * It means all drainers are after Step 2 (or there is no Drainer)
327 *
328 * - or the acquire fails, which means that another drainer is before
329 * its Step 2. Since we set the DIRTY bit on the dq_state by now,
330 * and that drainers manipulate the state atomically, at least one
331 * drainer that is still before its step 2 will fail its step 2, and
332 * be responsible for making progress.
333 *
334 *
335 * Async items perspective
336 * ======================
337 *
338 * On the async codepath, when the queue becomes non empty, the queue
339 * is always woken up. There is no point in trying to avoid that wake up
340 * for the async case, because it's required for the async()ed item to make
341 * progress: a drain of the queue must happen.
342 *
343 * So on the async "acquire" side, there is no subtlety at all.
344 */
345 #define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull
346 /*
347 * qo: (bit 34)
348 * Set when a queue has a useful override set.
349 * This bit is only cleared when the final drain_try_unlock() succeeds.
350 *
351 * When the queue dq_override is touched (overrides or-ed in), usually with
352 * _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set
353 * with a release barrier and one of these three things happen next:
354 *
355 * - the queue is enqueued, which will cause it to be drained, and the
356 * override to be handled by _dispatch_queue_drain_try_unlock().
357 * In rare cases it could cause the queue to be queued while empty though.
358 *
359 * - the DIRTY bit is also set with a release barrier, which pairs with
360 * the handling of these bits by _dispatch_queue_drain_try_unlock(),
361 * so that dq_override is reset properly.
362 *
363 * - the queue was suspended, and _dispatch_queue_resume() will handle the
364 * override as part of its wakeup sequence.
365 */
366 #define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull
367 /*
368 * p: pended bit (bit 33)
369 * Set when a drain lock has been pended. When this bit is set,
370 * the drain lock is taken and ENQUEUED is never set.
371 *
372 * This bit marks a queue that needs further processing but was kept pended
373 * by an async drainer (not reenqueued) in the hope of being able to drain
374 * it further later.
375 */
376 #define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull
377 /*
378 * e: enqueued bit (bit 32)
379 * Set when a queue is enqueued on its target queue
380 */
381 #define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull
382 /*
383 * dl: drain lock (bits 31-0)
384 * This is used by the normal drain to drain exlusively relative to other
385 * drain stealers (like the QoS Override codepath). It holds the identity
386 * (thread port) of the current drainer.
387 */
388 #define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull
389 #ifdef DLOCK_NOWAITERS_BIT
390 #define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
391 ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT))
392 #define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
393 (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\
394 ^ DLOCK_NOWAITERS_BIT)
395 #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
396 (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
397 DLOCK_NOWAITERS_BIT)
398 #else
399 #define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
400 ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT))
401 #define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
402 ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))
403 #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
404 (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
405 DLOCK_WAITERS_BIT)
406 #endif
407 /*
408 *******************************************************************************
409 *
410 * `Drainers`
411 *
412 * Drainers are parts of the code that hold the drain lock by setting its value
413 * to their thread port. There are two kinds:
414 * 1. async drainers,
415 * 2. lock transfer handlers.
416 *
417 * Drainers from the first category are _dispatch_queue_class_invoke and its
418 * stealers. Those drainers always try to reserve width at the same time they
419 * acquire the drain lock, to make sure they can make progress, and else exit
420 * quickly.
421 *
422 * Drainers from the second category are `slow` work items. Those run on the
423 * calling thread, and when done, try to transfer the width they own to the
424 * possible next `slow` work item, and if there is no such item, they reliquish
425 * that right. To do so, prior to taking any decision, they also try to own
426 * the full "barrier" width on the given queue.
427 *
428 * see _dispatch_try_lock_transfer_or_wakeup
429 *
430 *******************************************************************************
431 *
432 * Enqueuing and wakeup rules
433 *
434 * Nobody should enqueue any dispatch object if it has no chance to make any
435 * progress. That means that queues that:
436 * - are suspended
437 * - have reached or overflown their capacity
438 * - are currently draining
439 * - are already enqueued
440 *
441 * should not try to be enqueued.
442 *
443 *******************************************************************************
444 *
445 * Lock transfer
446 *
447 * The point of the lock transfer code is to allow pure dispatch_*_sync()
448 * callers to make progress without requiring the bring up of a drainer.
449 * There are two reason for that:
450 *
451 * - performance, as draining has to give up for dispatch_*_sync() work items,
452 * so waking up a queue for this is wasteful.
453 *
454 * - liveness, as with dispatch_*_sync() you burn threads waiting, you're more
455 * likely to hit various thread limits and may not have any drain being
456 * brought up if the process hits a limit.
457 *
458 *
459 * Lock transfer happens at the end on the dispatch_*_sync() codepaths:
460 *
461 * - obviously once a dispatch_*_sync() work item finishes, it owns queue
462 * width and it should try to transfer that ownership to the possible next
463 * queued item if it is a dispatch_*_sync() item
464 *
465 * - just before such a work item blocks to make sure that that work item
466 * itself isn't its own last chance to be woken up. That can happen when
467 * a Drainer pops up everything from the queue, and that a dispatch_*_sync()
468 * work item has taken the slow path then was preempted for a long time.
469 *
470 * That's why such work items, if first in the queue, must try a lock
471 * transfer procedure.
472 *
473 *
474 * For transfers where a partial width is owned, we give back that width.
475 * If the queue state is "idle" again, we attempt to acquire the full width.
476 * If that succeeds, this falls back to the full barrier lock
477 * transfer, else it wakes up the queue according to its state.
478 *
479 * For full barrier transfers, if items eligible for lock transfer are found,
480 * then they are woken up and the lock transfer is successful.
481 *
482 * If none are found, the full barrier width is released. If by doing so the
483 * DIRTY bit is found, releasing the full barrier width fails and transferring
484 * the lock is retried from scratch.
485 */
486
487 #define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \
488 ((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT)
489
490 /* Magic dq_state values for global queues: they have QUEUE_FULL and IN_BARRIER
491 * set to force the slowpath in both dispatch_barrier_sync() and dispatch_sync()
492 */
493 #define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \
494 (DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER)
495
496 #define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \
497 (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL)
498
499 DISPATCH_CLASS_DECL(queue);
500 #if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
501 struct dispatch_queue_s {
502 _DISPATCH_QUEUE_HEADER(queue);
503 DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
504 } DISPATCH_QUEUE_ALIGN;
505 #endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
506
507 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue);
508 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue);
509 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_main, queue);
510 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
511 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
512 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue);
513
514 OS_OBJECT_INTERNAL_CLASS_DECL(dispatch_queue_specific_queue, dispatch_queue,
515 DISPATCH_OBJECT_VTABLE_HEADER(dispatch_queue_specific_queue));
516
517 typedef union {
518 struct os_mpsc_queue_s *_oq;
519 struct dispatch_queue_s *_dq;
520 struct dispatch_source_s *_ds;
521 struct dispatch_mach_s *_dm;
522 struct dispatch_queue_specific_queue_s *_dqsq;
523 struct dispatch_timer_aggregate_s *_dta;
524 #if USE_OBJC
525 os_mpsc_queue_t _ojbc_oq;
526 dispatch_queue_t _objc_dq;
527 dispatch_source_t _objc_ds;
528 dispatch_mach_t _objc_dm;
529 dispatch_queue_specific_queue_t _objc_dqsq;
530 dispatch_timer_aggregate_t _objc_dta;
531 #endif
532 } dispatch_queue_class_t __attribute__((__transparent_union__));
533
534 typedef struct dispatch_thread_context_s *dispatch_thread_context_t;
535 typedef struct dispatch_thread_context_s {
536 dispatch_thread_context_t dtc_prev;
537 const void *dtc_key;
538 union {
539 size_t dtc_apply_nesting;
540 dispatch_io_t dtc_io_in_barrier;
541 };
542 } dispatch_thread_context_s;
543
544 typedef struct dispatch_thread_frame_s *dispatch_thread_frame_t;
545 typedef struct dispatch_thread_frame_s {
546 // must be in the same order as our TSD keys!
547 dispatch_queue_t dtf_queue;
548 dispatch_thread_frame_t dtf_prev;
549 struct dispatch_object_s *dtf_deferred;
550 } dispatch_thread_frame_s;
551
552 DISPATCH_ENUM(dispatch_queue_wakeup_target, long,
553 DISPATCH_QUEUE_WAKEUP_NONE = 0,
554 DISPATCH_QUEUE_WAKEUP_TARGET,
555 DISPATCH_QUEUE_WAKEUP_MGR,
556 );
557
558 void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu,
559 pthread_priority_t pp, dispatch_wakeup_flags_t flags);
560 void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp,
561 dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target);
562
563 void _dispatch_queue_destroy(dispatch_queue_t dq);
564 void _dispatch_queue_dispose(dispatch_queue_t dq);
565 void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq);
566 void _dispatch_queue_suspend(dispatch_queue_t dq);
567 void _dispatch_queue_resume(dispatch_queue_t dq, bool activate);
568 void _dispatch_queue_finalize_activation(dispatch_queue_t dq);
569 void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
570 void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n);
571 void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
572 pthread_priority_t pp);
573 void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq);
574 void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
575 dispatch_wakeup_flags_t flags);
576 dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
577 dispatch_invoke_flags_t flags, uint64_t *owned,
578 struct dispatch_object_s **dc_ptr);
579 void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq,
580 dispatch_invoke_flags_t flags, uint64_t to_unlock,
581 struct dispatch_object_s *dc);
582 void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
583 dqsq);
584 void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
585 dispatch_wakeup_flags_t flags);
586 void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
587 struct dispatch_object_s *dou, pthread_priority_t pp);
588 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
589 void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
590 dispatch_wakeup_flags_t flags);
591 void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
592 dispatch_wakeup_flags_t flags);
593 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
594 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
595 void _dispatch_mgr_queue_drain(void);
596 #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
597 void _dispatch_mgr_priority_init(void);
598 #else
599 static inline void _dispatch_mgr_priority_init(void) {}
600 #endif
601 #if DISPATCH_USE_KEVENT_WORKQUEUE
602 void _dispatch_kevent_workqueue_init(void);
603 #else
604 static inline void _dispatch_kevent_workqueue_init(void) {}
605 #endif
606 void _dispatch_sync_recurse_invoke(void *ctxt);
607 void _dispatch_apply_invoke(void *ctxt);
608 void _dispatch_apply_redirect_invoke(void *ctxt);
609 void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
610 dispatch_function_t func);
611 void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
612 dispatch_function_t func);
613
614 #if DISPATCH_DEBUG
615 void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
616 #else
617 static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED,
618 const char* str DISPATCH_UNUSED) {}
619 #endif
620
621 size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
622 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
623 size_t bufsiz);
624
625 #define DISPATCH_QUEUE_QOS_COUNT 6
626 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
627
628 // must be in lowest to highest qos order (as encoded in pthread_priority_t)
629 // overcommit qos index values need bit 1 set
630 enum {
631 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
632 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
633 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
634 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
635 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
636 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
637 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
638 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
639 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
640 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
641 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
642 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
643 _DISPATCH_ROOT_QUEUE_IDX_COUNT,
644 };
645
646 extern unsigned long volatile _dispatch_queue_serial_numbers;
647 extern struct dispatch_queue_s _dispatch_root_queues[];
648 extern struct dispatch_queue_s _dispatch_mgr_q;
649 void _dispatch_root_queues_init(void);
650
651 #if HAVE_PTHREAD_WORKQUEUE_QOS
652 extern pthread_priority_t _dispatch_background_priority;
653 extern pthread_priority_t _dispatch_user_initiated_priority;
654 #endif
655
656 typedef uint8_t _dispatch_qos_class_t;
657
658 #pragma mark -
659 #pragma mark dispatch_queue_attr_t
660
661 typedef enum {
662 _dispatch_queue_attr_overcommit_unspecified = 0,
663 _dispatch_queue_attr_overcommit_enabled,
664 _dispatch_queue_attr_overcommit_disabled,
665 } _dispatch_queue_attr_overcommit_t;
666
667 DISPATCH_CLASS_DECL(queue_attr);
668 struct dispatch_queue_attr_s {
669 OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr);
670 _dispatch_qos_class_t dqa_qos_class;
671 int8_t dqa_relative_priority;
672 uint16_t dqa_overcommit:2;
673 uint16_t dqa_autorelease_frequency:2;
674 uint16_t dqa_concurrent:1;
675 uint16_t dqa_inactive:1;
676 };
677
678 enum {
679 DQA_INDEX_UNSPECIFIED_OVERCOMMIT = 0,
680 DQA_INDEX_NON_OVERCOMMIT,
681 DQA_INDEX_OVERCOMMIT,
682 };
683
684 #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3
685
686 enum {
687 DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT =
688 DISPATCH_AUTORELEASE_FREQUENCY_INHERIT,
689 DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM =
690 DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM,
691 DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER =
692 DISPATCH_AUTORELEASE_FREQUENCY_NEVER,
693 };
694
695 #define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3
696
697 enum {
698 DQA_INDEX_CONCURRENT = 0,
699 DQA_INDEX_SERIAL,
700 };
701
702 #define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2
703
704 enum {
705 DQA_INDEX_ACTIVE = 0,
706 DQA_INDEX_INACTIVE,
707 };
708
709 #define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2
710
711 typedef enum {
712 DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0,
713 DQA_INDEX_QOS_CLASS_MAINTENANCE,
714 DQA_INDEX_QOS_CLASS_BACKGROUND,
715 DQA_INDEX_QOS_CLASS_UTILITY,
716 DQA_INDEX_QOS_CLASS_DEFAULT,
717 DQA_INDEX_QOS_CLASS_USER_INITIATED,
718 DQA_INDEX_QOS_CLASS_USER_INTERACTIVE,
719 } _dispatch_queue_attr_index_qos_class_t;
720
721 #define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
722
723 extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
724 [DISPATCH_QUEUE_ATTR_PRIO_COUNT]
725 [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT]
726 [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT]
727 [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT]
728 [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT];
729
730 dispatch_queue_attr_t _dispatch_get_default_queue_attr(void);
731
732 #pragma mark -
733 #pragma mark dispatch_continuation_t
734
735 // If dc_flags is less than 0x1000, then the object is a continuation.
736 // Otherwise, the object has a private layout and memory management rules. The
737 // layout until after 'do_next' must align with normal objects.
738 #if __LP64__
739 #define DISPATCH_CONTINUATION_HEADER(x) \
740 union { \
741 const void *do_vtable; \
742 uintptr_t dc_flags; \
743 }; \
744 union { \
745 pthread_priority_t dc_priority; \
746 int dc_cache_cnt; \
747 uintptr_t dc_pad; \
748 }; \
749 struct dispatch_##x##_s *volatile do_next; \
750 struct voucher_s *dc_voucher; \
751 dispatch_function_t dc_func; \
752 void *dc_ctxt; \
753 void *dc_data; \
754 void *dc_other
755 #define _DISPATCH_SIZEOF_PTR 8
756 #elif OS_OBJECT_HAVE_OBJC1
757 #define DISPATCH_CONTINUATION_HEADER(x) \
758 dispatch_function_t dc_func; \
759 union { \
760 pthread_priority_t dc_priority; \
761 int dc_cache_cnt; \
762 uintptr_t dc_pad; \
763 }; \
764 struct voucher_s *dc_voucher; \
765 union { \
766 const void *do_vtable; \
767 uintptr_t dc_flags; \
768 }; \
769 struct dispatch_##x##_s *volatile do_next; \
770 void *dc_ctxt; \
771 void *dc_data; \
772 void *dc_other
773 #define _DISPATCH_SIZEOF_PTR 4
774 #else
775 #define DISPATCH_CONTINUATION_HEADER(x) \
776 union { \
777 const void *do_vtable; \
778 uintptr_t dc_flags; \
779 }; \
780 union { \
781 pthread_priority_t dc_priority; \
782 int dc_cache_cnt; \
783 uintptr_t dc_pad; \
784 }; \
785 struct voucher_s *dc_voucher; \
786 struct dispatch_##x##_s *volatile do_next; \
787 dispatch_function_t dc_func; \
788 void *dc_ctxt; \
789 void *dc_data; \
790 void *dc_other
791 #define _DISPATCH_SIZEOF_PTR 4
792 #endif
793 #define _DISPATCH_CONTINUATION_PTRS 8
794 #if DISPATCH_HW_CONFIG_UP
795 // UP devices don't contend on continuations so we don't need to force them to
796 // occupy a whole cacheline (which is intended to avoid contention)
797 #define DISPATCH_CONTINUATION_SIZE \
798 (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
799 #else
800 #define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \
801 (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
802 #endif
803 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \
804 (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
805 ~(DISPATCH_CONTINUATION_SIZE - 1u))
806
807 // continuation is a dispatch_sync or dispatch_barrier_sync
808 #define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul
809 // continuation acts as a barrier
810 #define DISPATCH_OBJ_BARRIER_BIT 0x002ul
811 // continuation resources are freed on run
812 // this is set on async or for non event_handler source handlers
813 #define DISPATCH_OBJ_CONSUME_BIT 0x004ul
814 // continuation has a group in dc_data
815 #define DISPATCH_OBJ_GROUP_BIT 0x008ul
816 // continuation function is a block (copied in dc_ctxt)
817 #define DISPATCH_OBJ_BLOCK_BIT 0x010ul
818 // continuation function is a block with private data, implies BLOCK_BIT
819 #define DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT 0x020ul
820 // source handler requires fetching context from source
821 #define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul
822 // use the voucher from the continuation even if the queue has voucher set
823 #define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul
824
825 struct dispatch_continuation_s {
826 struct dispatch_object_s _as_do[0];
827 DISPATCH_CONTINUATION_HEADER(continuation);
828 };
829 typedef struct dispatch_continuation_s *dispatch_continuation_t;
830
831 typedef struct dispatch_continuation_vtable_s {
832 _OS_OBJECT_CLASS_HEADER();
833 DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation);
834 } *dispatch_continuation_vtable_t;
835
836 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
837 #if TARGET_OS_EMBEDDED
838 #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
839 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16
840 #else
841 #define DISPATCH_CONTINUATION_CACHE_LIMIT 1024
842 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 128
843 #endif
844 #endif
845
846 dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
847 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
848 void _dispatch_continuation_async(dispatch_queue_t dq,
849 dispatch_continuation_t dc);
850 void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq,
851 dispatch_invoke_flags_t flags);
852 void _dispatch_continuation_invoke(dispatch_object_t dou,
853 voucher_t override_voucher, dispatch_invoke_flags_t flags);
854
855 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
856 extern int _dispatch_continuation_cache_limit;
857 void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
858 #else
859 #define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
860 #define _dispatch_continuation_free_to_cache_limit(c) \
861 _dispatch_continuation_free_to_heap(c)
862 #endif
863
864 #pragma mark -
865 #pragma mark dispatch_continuation vtables
866
867 enum {
868 _DC_USER_TYPE = 0,
869 DC_ASYNC_REDIRECT_TYPE,
870 DC_MACH_SEND_BARRRIER_DRAIN_TYPE,
871 DC_MACH_SEND_BARRIER_TYPE,
872 DC_MACH_RECV_BARRIER_TYPE,
873 #if HAVE_PTHREAD_WORKQUEUE_QOS
874 DC_OVERRIDE_STEALING_TYPE,
875 DC_OVERRIDE_OWNING_TYPE,
876 #endif
877 _DC_MAX_TYPE,
878 };
879
880 DISPATCH_ALWAYS_INLINE
881 static inline unsigned long
882 dc_type(dispatch_continuation_t dc)
883 {
884 return dx_type(dc->_as_do);
885 }
886
887 DISPATCH_ALWAYS_INLINE
888 static inline unsigned long
889 dc_subtype(dispatch_continuation_t dc)
890 {
891 return dx_subtype(dc->_as_do);
892 }
893
894 extern const struct dispatch_continuation_vtable_s
895 _dispatch_continuation_vtables[_DC_MAX_TYPE];
896
897 void
898 _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
899 dispatch_invoke_flags_t flags);
900
901 #if HAVE_PTHREAD_WORKQUEUE_QOS
902 void
903 _dispatch_queue_override_invoke(dispatch_continuation_t dc,
904 dispatch_invoke_flags_t flags);
905 #endif
906
907 #define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE])
908
909 #define DC_VTABLE_ENTRY(name, ...) \
910 [DC_##name##_TYPE] = { \
911 .do_type = DISPATCH_CONTINUATION_TYPE(name), \
912 __VA_ARGS__ \
913 }
914
915 #pragma mark -
916 #pragma mark _dispatch_set_priority_and_voucher
917 #if HAVE_PTHREAD_WORKQUEUE_QOS
918
919 void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
920 mach_voucher_t kv);
921 voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri,
922 voucher_t voucher, _dispatch_thread_set_self_t flags);
923
924 #endif
925 #pragma mark -
926 #pragma mark dispatch_apply_t
927
928 struct dispatch_apply_s {
929 size_t volatile da_index, da_todo;
930 size_t da_iterations, da_nested;
931 dispatch_continuation_t da_dc;
932 dispatch_thread_event_s da_event;
933 dispatch_invoke_flags_t da_flags;
934 uint32_t da_thr_cnt;
935 };
936 typedef struct dispatch_apply_s *dispatch_apply_t;
937
938 #pragma mark -
939 #pragma mark dispatch_block_t
940
941 #ifdef __BLOCKS__
942
943 #define DISPATCH_BLOCK_API_MASK (0x80u - 1)
944 #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
945 #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
946
947 #define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \
948 unsigned long dbpd_magic; \
949 dispatch_block_flags_t dbpd_flags; \
950 unsigned int volatile dbpd_atomic_flags; \
951 int volatile dbpd_performed; \
952 pthread_priority_t dbpd_priority; \
953 voucher_t dbpd_voucher; \
954 dispatch_block_t dbpd_block; \
955 dispatch_group_t dbpd_group; \
956 os_mpsc_queue_t volatile dbpd_queue; \
957 mach_port_t dbpd_thread;
958
959 #if !defined(__cplusplus)
960 struct dispatch_block_private_data_s {
961 DISPATCH_BLOCK_PRIVATE_DATA_HEADER();
962 };
963 #endif
964 typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
965
966 // dbpd_atomic_flags bits
967 #define DBF_CANCELED 1u // block has been cancelled
968 #define DBF_WAITING 2u // dispatch_block_wait has begun
969 #define DBF_WAITED 4u // dispatch_block_wait has finished without timeout
970 #define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave
971
972 #define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
973
974 // struct for synchronous perform: no group_leave at end of invoke
975 #define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \
976 { \
977 .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
978 .dbpd_flags = (flags), \
979 .dbpd_atomic_flags = DBF_PERFORM, \
980 .dbpd_block = (block), \
981 }
982
983 dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
984 voucher_t voucher, pthread_priority_t priority, dispatch_block_t block);
985 void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd);
986 void _dispatch_block_sync_invoke(void *block);
987
988 void _dispatch_continuation_init_slow(dispatch_continuation_t dc,
989 dispatch_queue_class_t dqu, dispatch_block_flags_t flags);
990 void _dispatch_continuation_update_bits(dispatch_continuation_t dc,
991 uintptr_t dc_flags);
992
993 bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
994 dispatch_function_t func);
995
996 /* exported for tests in dispatch_trysync.c */
997 DISPATCH_EXPORT DISPATCH_NOTHROW
998 bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt,
999 dispatch_function_t f);
1000
1001 #endif /* __BLOCKS__ */
1002
1003 typedef struct dispatch_pthread_root_queue_observer_hooks_s {
1004 void (*queue_will_execute)(dispatch_queue_t queue);
1005 void (*queue_did_execute)(dispatch_queue_t queue);
1006 } dispatch_pthread_root_queue_observer_hooks_s;
1007 typedef dispatch_pthread_root_queue_observer_hooks_s
1008 *dispatch_pthread_root_queue_observer_hooks_t;
1009
1010 #ifdef __APPLE__
1011 #define DISPATCH_IOHID_SPI 1
1012
1013 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
1014 DISPATCH_NOTHROW DISPATCH_NONNULL4
1015 dispatch_queue_t
1016 _dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(
1017 const char *label, unsigned long flags, const pthread_attr_t *attr,
1018 dispatch_pthread_root_queue_observer_hooks_t observer_hooks,
1019 dispatch_block_t configure);
1020
1021 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
1022 bool
1023 _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID(
1024 dispatch_queue_t queue);
1025
1026 #endif // __APPLE__
1027
1028 #endif