2  * Copyright (c) 2008-2013 Apple Inc. All rights reserved. 
   4  * @APPLE_APACHE_LICENSE_HEADER_START@ 
   6  * Licensed under the Apache License, Version 2.0 (the "License"); 
   7  * you may not use this file except in compliance with the License. 
   8  * You may obtain a copy of the License at 
  10  *     http://www.apache.org/licenses/LICENSE-2.0 
  12  * Unless required by applicable law or agreed to in writing, software 
  13  * distributed under the License is distributed on an "AS IS" BASIS, 
  14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  15  * See the License for the specific language governing permissions and 
  16  * limitations under the License. 
  18  * @APPLE_APACHE_LICENSE_HEADER_END@ 
  22  * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch 
  23  * which are subject to change in future releases of Mac OS X. Any applications 
  24  * relying on these interfaces WILL break. 
  27 #ifndef __DISPATCH_QUEUE_INTERNAL__ 
  28 #define __DISPATCH_QUEUE_INTERNAL__ 
  30 #ifndef __DISPATCH_INDIRECT__ 
  31 #error "Please #include <dispatch/dispatch.h> instead of this file directly." 
  32 #include <dispatch/base.h> // for HeaderDoc 
  35 #if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES) 
  36 #define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357> 
  39 /* x86 & cortex-a8 have a 64 byte cacheline */ 
  40 #define DISPATCH_CACHELINE_SIZE 64u 
  41 #define ROUND_UP_TO_CACHELINE_SIZE(x) \ 
  42                 (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \ 
  43                 ~(DISPATCH_CACHELINE_SIZE - 1u)) 
  44 #define DISPATCH_CACHELINE_ALIGN \ 
  45                 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) 
  47 #define DISPATCH_CACHELINE_PAD_SIZE(type) \ 
  48                 (roundup(sizeof(type), DISPATCH_CACHELINE_SIZE) - sizeof(type)) 
  52 #pragma mark dispatch_queue_t 
  54 DISPATCH_ENUM(dispatch_queue_flags
, uint32_t, 
  55         DQF_NONE                
= 0x00000000, 
  56         DQF_AUTORELEASE_ALWAYS  
= 0x00010000, 
  57         DQF_AUTORELEASE_NEVER   
= 0x00020000, 
  58 #define _DQF_AUTORELEASE_MASK 0x00030000 
  59         DQF_THREAD_BOUND        
= 0x00040000, // queue is bound to a thread 
  60         DQF_BARRIER_BIT         
= 0x00080000, // queue is a barrier on its target 
  61         DQF_TARGETED            
= 0x00100000, // queue is targeted by another object 
  62         DQF_LABEL_NEEDS_FREE    
= 0x00200000, // queue label was strduped; need to free it 
  63         DQF_CANNOT_TRYSYNC      
= 0x00400000, 
  64         DQF_RELEASED            
= 0x00800000, // xref_cnt == -1 
  65         DQF_LEGACY              
= 0x01000000, 
  67         // only applies to sources 
  69         // Assuming DSF_ARMED (a), DSF_DEFERRED_DELETE (p), DSF_DELETED (d): 
  73         //    source states for regular operations 
  74         //    (delivering event / waiting for event) 
  77         //    Either armed for deferred deletion delivery, waiting for an EV_DELETE, 
  78         //    and the next state will be -pd (EV_DELETE delivered), 
  79         //    Or, a cancellation raced with an event delivery and failed 
  80         //    (EINPROGRESS), and when the event delivery happens, the next state 
  84         //    Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote 
  85         //    is gone from the kernel. Next state will be --d. 
  88         //    Received an EV_ONESHOT event (from a--), or the delivery of an event 
  89         //    causing the cancellation to fail with EINPROGRESS was delivered 
  90         //    (from ap-). The muxnote still lives, next state will be --d. 
  93         //    Final state of the source, the muxnote is gone from the kernel and 
  94         //    ds_refs is unregistered. The source can safely be released. 
  98         //    Setting DSF_DELETED should also always atomically clear DSF_ARMED. If 
  99         //    the muxnote is gone from the kernel, it makes no sense whatsoever to 
 100         //    have it armed. And generally speaking, once `d` or `p` has been set, 
 101         //    `a` cannot do a cleared -> set transition anymore 
 102         //    (see _dispatch_source_try_set_armed). 
 104         DSF_WLH_CHANGED         
= 0x04000000, 
 105         DSF_CANCEL_WAITER       
= 0x08000000, // synchronous waiters for cancel 
 106         DSF_CANCELED            
= 0x10000000, // cancellation has been requested 
 107         DSF_ARMED               
= 0x20000000, // source is armed 
 108         DSF_DEFERRED_DELETE     
= 0x40000000, // source is pending delete 
 109         DSF_DELETED             
= 0x80000000, // source muxnote is deleted 
 110 #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED) 
 112 #define DQF_FLAGS_MASK        ((dispatch_queue_flags_t)0xffff0000) 
 113 #define DQF_WIDTH_MASK        ((dispatch_queue_flags_t)0x0000ffff) 
 114 #define DQF_WIDTH(n)          ((dispatch_queue_flags_t)(uint16_t)(n)) 
 117 #define _DISPATCH_QUEUE_HEADER(x) \ 
 118         struct os_mpsc_queue_s _as_oq[0]; \ 
 119         DISPATCH_OBJECT_HEADER(x); \ 
 120         _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \ 
 121         uint32_t dq_side_suspend_cnt; \ 
 122         dispatch_unfair_lock_s dq_sidelock; \ 
 124                 dispatch_queue_t dq_specific_q; \ 
 125                 struct dispatch_source_refs_s *ds_refs; \ 
 126                 struct dispatch_timer_source_refs_s *ds_timer_refs; \ 
 127                 struct dispatch_mach_recv_refs_s *dm_recv_refs; \ 
 129         DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \ 
 130                 const uint16_t dq_width, \ 
 131                 const uint16_t __dq_opaque \ 
 133         DISPATCH_INTROSPECTION_QUEUE_HEADER 
 134         /* LP64: 32bit hole */ 
 136 #define DISPATCH_QUEUE_HEADER(x) \ 
 137         struct dispatch_queue_s _as_dq[0]; \ 
 138         _DISPATCH_QUEUE_HEADER(x) 
 140 struct _dispatch_unpadded_queue_s 
{ 
 141         _DISPATCH_QUEUE_HEADER(dummy
); 
 144 #define DISPATCH_QUEUE_CACHELINE_PAD \ 
 145                 DISPATCH_CACHELINE_PAD_SIZE(struct _dispatch_unpadded_queue_s) 
 147 #define DISPATCH_QUEUE_CACHELINE_PADDING \ 
 148                 char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] 
 151  * dispatch queues `dq_state` demystified 
 153  ******************************************************************************* 
 155  * Most Significant 32 bit Word 
 156  * ---------------------------- 
 158  * sc: suspend count (bits 63 - 58) 
 159  *    The suspend count unsurprisingly holds the suspend count of the queue 
 160  *    Only 7 bits are stored inline. Extra counts are transfered in a side 
 161  *    suspend count and when that has happened, the ssc: bit is set. 
 163 #define DISPATCH_QUEUE_SUSPEND_INTERVAL         0x0400000000000000ull 
 164 #define DISPATCH_QUEUE_SUSPEND_HALF                     0x20u 
 166  * ssc: side suspend count (bit 57) 
 167  *    This bit means that the total suspend count didn't fit in the inline 
 168  *    suspend count, and that there are additional suspend counts stored in the 
 169  *    `dq_side_suspend_cnt` field. 
 171 #define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT     0x0200000000000000ull 
 173  * i: inactive bit (bit 56) 
 174  *    This bit means that the object is inactive (see dispatch_activate) 
 176 #define DISPATCH_QUEUE_INACTIVE                         0x0100000000000000ull 
 178  * na: needs activation (bit 55) 
 179  *    This bit is set if the object is created inactive. It tells 
 180  *    dispatch_queue_wakeup to perform various tasks at first wakeup. 
 182  *    This bit is cleared as part of the first wakeup. Having that bit prevents 
 183  *    the object from being woken up (because _dq_state_should_wakeup will say 
 184  *    no), except in the dispatch_activate/dispatch_resume codepath. 
 186 #define DISPATCH_QUEUE_NEEDS_ACTIVATION         0x0080000000000000ull 
 188  * This mask covers the suspend count (sc), side suspend count bit (ssc), 
 189  * inactive (i) and needs activation (na) bits 
 191 #define DISPATCH_QUEUE_SUSPEND_BITS_MASK        0xff80000000000000ull 
 193  * ib: in barrier (bit 54) 
 194  *    This bit is set when the queue is currently executing a barrier 
 196 #define DISPATCH_QUEUE_IN_BARRIER                       0x0040000000000000ull 
 198  * qf: queue full (bit 53) 
 199  *    This bit is a subtle hack that allows to check for any queue width whether 
 200  *    the full width of the queue is used or reserved (depending on the context) 
 201  *    In other words that the queue has reached or overflown its capacity. 
 203 #define DISPATCH_QUEUE_WIDTH_FULL_BIT           0x0020000000000000ull 
 204 #define DISPATCH_QUEUE_WIDTH_FULL                       0x1000ull 
 205 #define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1) 
 206 #define DISPATCH_QUEUE_WIDTH_MAX  (DISPATCH_QUEUE_WIDTH_FULL - 2) 
 207 #define DISPATCH_QUEUE_USES_REDIRECTION(width) \ 
 208                 ({ uint16_t _width = (width); \ 
 209                 _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) 
 211  * w:  width (bits 52 - 41) 
 212  *    This encodes how many work items are in flight. Barriers hold `dq_width` 
 213  *    of them while they run. This is encoded as a signed offset with respect, 
 214  *    to full use, where the negative values represent how many available slots 
 215  *    are left, and the positive values how many work items are exceeding our 
 218  *    When this value is positive, then `wo` is always set to 1. 
 220 #define DISPATCH_QUEUE_WIDTH_INTERVAL           0x0000020000000000ull 
 221 #define DISPATCH_QUEUE_WIDTH_MASK                       0x003ffe0000000000ull 
 222 #define DISPATCH_QUEUE_WIDTH_SHIFT                      41 
 224  * pb: pending barrier (bit 40) 
 225  *    Drainers set this bit when they couldn't run the next work item and it is 
 226  *    a barrier. When this bit is set, `dq_width - 1` work item slots are 
 227  *    reserved so that no wakeup happens until the last work item in flight 
 230 #define DISPATCH_QUEUE_PENDING_BARRIER          0x0000010000000000ull 
 232  * d: dirty bit (bit 39) 
 233  *    This bit is set when a queue transitions from empty to not empty. 
 234  *    This bit is set before dq_items_head is set, with appropriate barriers. 
 235  *    Any thread looking at a queue head is responsible for unblocking any 
 236  *    dispatch_*_sync that could be enqueued at the beginning. 
 238  *    Drainer perspective 
 239  *    =================== 
 241  *    When done, any "Drainer", in particular for dispatch_*_sync() handoff 
 242  *    paths, exits in 3 steps, and the point of the DIRTY bit is to make 
 243  *    the Drainers take the slowpath at step 2 to take into account enqueuers 
 244  *    that could have made the queue non idle concurrently. 
 247  *        // drainer-exit step 1 
 248  *        if (slowpath(dq->dq_items_tail)) { // speculative test 
 249  *            return handle_non_empty_queue_or_wakeup(dq); 
 251  *        // drainer-exit step 2 
 252  *        if (!_dispatch_queue_drain_try_unlock(dq, ${owned}, ...)) { 
 253  *            return handle_non_empty_queue_or_wakeup(dq); 
 255  *        // drainer-exit step 3 
 256  *        // no need to wake up the queue, it's really empty for sure 
 260  *    The crux is _dispatch_queue_drain_try_unlock(), it is a function whose 
 261  *    contract is to release everything the current thread owns from the queue 
 262  *    state, so that when it's successful, any other thread can acquire 
 263  *    width from that queue. 
 265  *    But, that function must fail if it sees the DIRTY bit set, leaving 
 266  *    the state untouched. Leaving the state untouched is vital as it ensures 
 267  *    that no other Slayer^WDrainer can rise at the same time, because the 
 268  *    resource stays locked. 
 271  *    Note that releasing the DRAIN_LOCK or ENQUEUE_LOCK (see below) currently 
 272  *    doesn't use that pattern, and always tries to requeue. It isn't a problem 
 273  *    because while holding either of these locks prevents *some* sync (the 
 274  *    barrier one) codepaths to acquire the resource, the retry they perform 
 275  *    at their step D (see just below) isn't affected by the state of these bits 
 279  *    Sync items perspective 
 280  *    ====================== 
 282  *    On the dispatch_*_sync() acquire side, the code must look like this: 
 286  *        if (try_acquire_sync(dq)) { 
 287  *            return sync_operation_fastpath(dq, item); 
 291  *        if (queue_push_and_inline(dq, item)) { 
 292  *            atomic_store(dq->dq_items_head, item, relaxed); 
 294  *            atomic_or(dq->dq_state, DIRTY, release); 
 297  *            if (try_acquire_sync(dq)) { 
 298  *                try_lock_transfer_or_wakeup(dq); 
 303  *        wait_for_lock_transfer(dq); 
 306  *    A. If this code can acquire the resource it needs at step A, we're good. 
 308  *    B. If the item isn't the first at enqueue time, then there is no issue 
 309  *       At least another thread went through C, this thread isn't interesting 
 310  *       for the possible races, responsibility to make progress is transfered 
 311  *       to the thread which went through C-D. 
 313  *    C. The DIRTY bit is set with a release barrier, after the head/tail 
 314  *       has been set, so that seeing the DIRTY bit means that head/tail 
 315  *       will be visible to any drainer that has the matching acquire barrier. 
 317  *       Drainers may see the head/tail and fail to see DIRTY, in which 
 318  *       case, their _dispatch_queue_drain_try_unlock() will clear the DIRTY 
 319  *       bit, and fail, causing the caller to retry exactly once. 
 321  *    D. At this stage, there's two possible outcomes: 
 323  *       - either the acquire works this time, in which case this thread 
 324  *         successfuly becomes a drainer. That's obviously the happy path. 
 325  *         It means all drainers are after Step 2 (or there is no Drainer) 
 327  *       - or the acquire fails, which means that another drainer is before 
 328  *         its Step 2. Since we set the DIRTY bit on the dq_state by now, 
 329  *         and that drainers manipulate the state atomically, at least one 
 330  *         drainer that is still before its step 2 will fail its step 2, and 
 331  *         be responsible for making progress. 
 334  *    Async items perspective 
 335  *    ====================== 
 337  *    On the async codepath, when the queue becomes non empty, the queue 
 338  *    is always woken up. There is no point in trying to avoid that wake up 
 339  *    for the async case, because it's required for the async()ed item to make 
 340  *    progress: a drain of the queue must happen. 
 342  *    So on the async "acquire" side, there is no subtlety at all. 
 344 #define DISPATCH_QUEUE_DIRTY                            0x0000008000000000ull 
 346  * md: enqueued/draining on manager (bit 38) 
 347  *    Set when enqueued and draining on the manager hierarchy. 
 349  *    Unlike the ENQUEUED bit, it is kept until the queue is unlocked from its 
 350  *    invoke call on the manager. This is used to prevent stealing, and 
 351  *    overrides to be applied down the target queue chain. 
 353 #define DISPATCH_QUEUE_ENQUEUED_ON_MGR          0x0000004000000000ull 
 355  * r: queue graph role (bits 37 - 36) 
 356  *    Queue role in the target queue graph 
 363 #define DISPATCH_QUEUE_ROLE_MASK                        0x0000003000000000ull 
 364 #define DISPATCH_QUEUE_ROLE_BASE_WLH            0x0000002000000000ull 
 365 #define DISPATCH_QUEUE_ROLE_BASE_ANON           0x0000001000000000ull 
 366 #define DISPATCH_QUEUE_ROLE_INNER                       0x0000000000000000ull 
 368  * o: has override (bit 35, if role is DISPATCH_QUEUE_ROLE_BASE_ANON) 
 369  *    Set when a queue has received a QOS override and needs to reset it. 
 370  *    This bit is only cleared when the final drain_try_unlock() succeeds. 
 372  * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH) 
 373  *    Set when a queue owner has been exposed to the kernel because of 
 374  *    dispatch_sync() contention. 
 376 #define DISPATCH_QUEUE_RECEIVED_OVERRIDE        0x0000000800000000ull 
 377 #define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT       0x0000000800000000ull 
 379  * max_qos: max qos (bits 34 - 32) 
 380  *   This is the maximum qos that has been enqueued on the queue 
 382 #define DISPATCH_QUEUE_MAX_QOS_MASK                     0x0000000700000000ull 
 383 #define DISPATCH_QUEUE_MAX_QOS_SHIFT            32 
 385  * dl: drain lock (bits 31-0) 
 386  *    This is used by the normal drain to drain exlusively relative to other 
 387  *    drain stealers (like the QoS Override codepath). It holds the identity 
 388  *    (thread port) of the current drainer. 
 390  * st: sync transfer (bit 1 or 30) 
 391  *    Set when a dispatch_sync() is transferred to 
 393  * e: enqueued bit (bit 0 or 31) 
 394  *    Set when a queue is enqueued on its target queue 
 396 #define DISPATCH_QUEUE_DRAIN_OWNER_MASK         ((uint64_t)DLOCK_OWNER_MASK) 
 397 #define DISPATCH_QUEUE_SYNC_TRANSFER            ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) 
 398 #define DISPATCH_QUEUE_ENQUEUED                         ((uint64_t)DLOCK_WAITERS_BIT) 
 400 #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ 
 401                 (DISPATCH_QUEUE_ENQUEUED_ON_MGR | DISPATCH_QUEUE_ENQUEUED | \ 
 402                 DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_MAX_QOS_MASK) 
 404 #define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \ 
 405                 (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \ 
 406                 DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER) 
 409  ******************************************************************************* 
 413  * Drainers are parts of the code that hold the drain lock by setting its value 
 414  * to their thread port. There are two kinds: 
 416  * 2. lock transfer handlers. 
 418  * Drainers from the first category are _dispatch_queue_class_invoke and its 
 419  * stealers. Those drainers always try to reserve width at the same time they 
 420  * acquire the drain lock, to make sure they can make progress, and else exit 
 423  * Drainers from the second category are `slow` work items. Those run on the 
 424  * calling thread, and when done, try to transfer the width they own to the 
 425  * possible next `slow` work item, and if there is no such item, they reliquish 
 426  * that right. To do so, prior to taking any decision, they also try to own 
 427  * the full "barrier" width on the given queue. 
 429  ******************************************************************************* 
 431  * Enqueuing and wakeup rules 
 433  * Nobody should enqueue any dispatch object if it has no chance to make any 
 434  * progress. That means that queues that: 
 436  * - have reached or overflown their capacity 
 437  * - are currently draining 
 438  * - are already enqueued 
 440  * should not try to be enqueued. 
 442  ******************************************************************************* 
 446  * The point of the lock transfer code is to allow pure dispatch_*_sync() 
 447  * callers to make progress without requiring the bring up of a drainer. 
 448  * There are two reason for that: 
 450  * - performance, as draining has to give up for dispatch_*_sync() work items, 
 451  *   so waking up a queue for this is wasteful. 
 453  * - liveness, as with dispatch_*_sync() you burn threads waiting, you're more 
 454  *   likely to hit various thread limits and may not have any drain being 
 455  *   brought up if the process hits a limit. 
 458  * Lock transfer happens at the end on the dispatch_*_sync() codepaths: 
 460  * - obviously once a dispatch_*_sync() work item finishes, it owns queue 
 461  *   width and it should try to transfer that ownership to the possible next 
 462  *   queued item if it is a dispatch_*_sync() item 
 464  * - just before such a work item blocks to make sure that that work item 
 465  *   itself isn't its own last chance to be woken up. That can happen when 
 466  *   a Drainer pops up everything from the queue, and that a dispatch_*_sync() 
 467  *   work item has taken the slow path then was preempted for a long time. 
 469  *   That's why such work items, if first in the queue, must try a lock 
 470  *   transfer procedure. 
 473  * For transfers where a partial width is owned, we give back that width. 
 474  * If the queue state is "idle" again, we attempt to acquire the full width. 
 475  * If that succeeds, this falls back to the full barrier lock 
 476  * transfer, else it wakes up the queue according to its state. 
 478  * For full barrier transfers, if items eligible for lock transfer are found, 
 479  * then they are woken up and the lock transfer is successful. 
 481  * If none are found, the full barrier width is released. If by doing so the 
 482  * DIRTY bit is found, releasing the full barrier width fails and transferring 
 483  * the lock is retried from scratch. 
 486 #define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \ 
 487                 ((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT) 
 489 /* Magic dq_state values for global queues: they have QUEUE_FULL and IN_BARRIER 
 490  * set to force the slowpath in both dispatch_barrier_sync() and dispatch_sync() 
 492 #define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \ 
 493                 (DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER) 
 495 #define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \ 
 496                 (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL) 
 498 DISPATCH_CLASS_DECL(queue
); 
 500 #if !defined(__cplusplus) || !DISPATCH_INTROSPECTION 
 501 struct dispatch_queue_s 
{ 
 502         _DISPATCH_QUEUE_HEADER(queue
); 
 503         DISPATCH_QUEUE_CACHELINE_PADDING
; // for static queues only 
 504 } DISPATCH_ATOMIC64_ALIGN
; 
 506 #if __has_feature(c_static_assert) && !DISPATCH_INTROSPECTION 
 507 _Static_assert(sizeof(struct dispatch_queue_s
) <= 128, "dispatch queue size"); 
 509 #endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION 
 511 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial
, queue
); 
 512 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent
, queue
); 
 513 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_main
, queue
); 
 514 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root
, queue
); 
 515 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop
, queue
); 
 516 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr
, queue
); 
 518 OS_OBJECT_INTERNAL_CLASS_DECL(dispatch_queue_specific_queue
, dispatch_queue
, 
 519                 DISPATCH_OBJECT_VTABLE_HEADER(dispatch_queue_specific_queue
)); 
 522         struct os_mpsc_queue_s 
*_oq
; 
 523         struct dispatch_queue_s 
*_dq
; 
 524         struct dispatch_source_s 
*_ds
; 
 525         struct dispatch_mach_s 
*_dm
; 
 526         struct dispatch_queue_specific_queue_s 
*_dqsq
; 
 528         os_mpsc_queue_t _ojbc_oq
; 
 529         dispatch_queue_t _objc_dq
; 
 530         dispatch_source_t _objc_ds
; 
 531         dispatch_mach_t _objc_dm
; 
 532         dispatch_queue_specific_queue_t _objc_dqsq
; 
 534 } dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION
; 
 536 typedef struct dispatch_thread_context_s 
*dispatch_thread_context_t
; 
 537 typedef struct dispatch_thread_context_s 
{ 
 538         dispatch_thread_context_t dtc_prev
; 
 541                 size_t dtc_apply_nesting
; 
 542                 dispatch_io_t dtc_io_in_barrier
; 
 544 } dispatch_thread_context_s
; 
 546 typedef struct dispatch_thread_frame_s 
*dispatch_thread_frame_t
; 
 547 typedef struct dispatch_thread_frame_s 
{ 
 548         // must be in the same order as our TSD keys! 
 549         dispatch_queue_t dtf_queue
; 
 550         dispatch_thread_frame_t dtf_prev
; 
 551 } dispatch_thread_frame_s
; 
 553 typedef dispatch_queue_t dispatch_queue_wakeup_target_t
; 
 554 #define DISPATCH_QUEUE_WAKEUP_NONE           ((dispatch_queue_wakeup_target_t)0) 
 555 #define DISPATCH_QUEUE_WAKEUP_TARGET         ((dispatch_queue_wakeup_target_t)1) 
 556 #define DISPATCH_QUEUE_WAKEUP_MGR            (&_dispatch_mgr_q) 
 557 #define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1) 
 559 void _dispatch_queue_class_wakeup(dispatch_queue_t dqu
, dispatch_qos_t qos
, 
 560                 dispatch_wakeup_flags_t flags
, dispatch_queue_wakeup_target_t target
); 
 561 dispatch_priority_t 
_dispatch_queue_compute_priority_and_wlh( 
 562                 dispatch_queue_t dq
, dispatch_wlh_t 
*wlh_out
); 
 563 void _dispatch_queue_destroy(dispatch_queue_t dq
, bool *allow_free
); 
 564 void _dispatch_queue_dispose(dispatch_queue_t dq
, bool *allow_free
); 
 565 void _dispatch_queue_xref_dispose(struct dispatch_queue_s 
*dq
); 
 566 void _dispatch_queue_set_target_queue(dispatch_queue_t dq
, dispatch_queue_t tq
); 
 567 void _dispatch_queue_suspend(dispatch_queue_t dq
); 
 568 void _dispatch_queue_resume(dispatch_queue_t dq
, bool activate
); 
 569 void _dispatch_queue_finalize_activation(dispatch_queue_t dq
, 
 571 void _dispatch_queue_invoke(dispatch_queue_t dq
, 
 572                 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
); 
 573 void _dispatch_global_queue_poke(dispatch_queue_t dq
, int n
, int floor
); 
 574 void _dispatch_queue_push(dispatch_queue_t dq
, dispatch_object_t dou
, 
 576 void _dispatch_queue_wakeup(dispatch_queue_t dq
, dispatch_qos_t qos
, 
 577                 dispatch_wakeup_flags_t flags
); 
 578 dispatch_queue_wakeup_target_t 
_dispatch_queue_serial_drain(dispatch_queue_t dq
, 
 579                 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
, 
 581 void _dispatch_queue_drain_sync_waiter(dispatch_queue_t dq
, 
 582                 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
, 
 584 void _dispatch_queue_specific_queue_dispose( 
 585                 dispatch_queue_specific_queue_t dqsq
, bool *allow_free
); 
 586 void _dispatch_root_queue_wakeup(dispatch_queue_t dq
, dispatch_qos_t qos
, 
 587                 dispatch_wakeup_flags_t flags
); 
 588 void _dispatch_root_queue_push(dispatch_queue_t dq
, dispatch_object_t dou
, 
 590 #if DISPATCH_USE_KEVENT_WORKQUEUE 
 591 void _dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi
 
 592                 DISPATCH_PERF_MON_ARGS_PROTO
); 
 593 void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi
 
 594                 DISPATCH_PERF_MON_ARGS_PROTO
); 
 596 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq
, 
 598 void _dispatch_main_queue_wakeup(dispatch_queue_t dq
, dispatch_qos_t qos
, 
 599                 dispatch_wakeup_flags_t flags
); 
 600 void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq
, dispatch_qos_t qos
, 
 601                 dispatch_wakeup_flags_t flags
); 
 602 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq
); 
 603 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq
, bool *allow_free
); 
 604 void _dispatch_mgr_queue_drain(void); 
 605 #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 
 606 void _dispatch_mgr_priority_init(void); 
 608 static inline void _dispatch_mgr_priority_init(void) {} 
 610 #if DISPATCH_USE_KEVENT_WORKQUEUE 
 611 void _dispatch_kevent_workqueue_init(void); 
 613 static inline void _dispatch_kevent_workqueue_init(void) {} 
 615 void _dispatch_apply_invoke(void *ctxt
); 
 616 void _dispatch_apply_redirect_invoke(void *ctxt
); 
 617 void _dispatch_barrier_async_detached_f(dispatch_queue_t dq
, void *ctxt
, 
 618                 dispatch_function_t func
); 
 619 #define DISPATCH_BARRIER_TRYSYNC_SUSPEND 0x1 
 620 void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq
, void *ctxt
, 
 621                 dispatch_function_t func
, uint32_t flags
); 
 622 void _dispatch_queue_atfork_child(void); 
 625 void dispatch_debug_queue(dispatch_queue_t dq
, const char* str
); 
 627 static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED
, 
 628                 const char* str DISPATCH_UNUSED
) {} 
 631 size_t dispatch_queue_debug(dispatch_queue_t dq
, char* buf
, size_t bufsiz
); 
 632 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq
, char* buf
, 
 635 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2) 
 637 // must be in lowest to highest qos order (as encoded in dispatch_qos_t) 
 638 // overcommit qos index values need bit 1 set 
 640         DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS 
= 0, 
 641         DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT
, 
 642         DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS
, 
 643         DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT
, 
 644         DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS
, 
 645         DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT
, 
 646         DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS
, 
 647         DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT
, 
 648         DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS
, 
 649         DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT
, 
 650         DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS
, 
 651         DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT
, 
 652         _DISPATCH_ROOT_QUEUE_IDX_COUNT
, 
 659 // 4,5,6,7,8,9,10,11,12,13,14,15 - global queues 
 660 // we use 'xadd' on Intel, so the initial value == next assigned 
 661 #define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 16 
 662 extern unsigned long volatile _dispatch_queue_serial_numbers
; 
 663 extern struct dispatch_queue_s _dispatch_root_queues
[]; 
 664 extern struct dispatch_queue_s _dispatch_mgr_q
; 
 665 void _dispatch_root_queues_init(void); 
 668 #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ 
 669        dispatch_assert_queue(&_dispatch_mgr_q) 
 671 #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() 
 675 #pragma mark dispatch_queue_attr_t 
 678         _dispatch_queue_attr_overcommit_unspecified 
= 0, 
 679         _dispatch_queue_attr_overcommit_enabled
, 
 680         _dispatch_queue_attr_overcommit_disabled
, 
 681 } _dispatch_queue_attr_overcommit_t
; 
 683 DISPATCH_CLASS_DECL(queue_attr
); 
 684 struct dispatch_queue_attr_s 
{ 
 685         OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr
); 
 686         dispatch_priority_requested_t dqa_qos_and_relpri
; 
 687         uint16_t dqa_overcommit
:2; 
 688         uint16_t dqa_autorelease_frequency
:2; 
 689         uint16_t dqa_concurrent
:1; 
 690         uint16_t dqa_inactive
:1; 
 694         DQA_INDEX_UNSPECIFIED_OVERCOMMIT 
= 0, 
 695         DQA_INDEX_NON_OVERCOMMIT
, 
 696         DQA_INDEX_OVERCOMMIT
, 
 699 #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3 
 702         DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT 
= 
 703                         DISPATCH_AUTORELEASE_FREQUENCY_INHERIT
, 
 704         DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM 
= 
 705                         DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM
, 
 706         DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER 
= 
 707                         DISPATCH_AUTORELEASE_FREQUENCY_NEVER
, 
 710 #define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3 
 713         DQA_INDEX_CONCURRENT 
= 0, 
 717 #define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2 
 720         DQA_INDEX_ACTIVE 
= 0, 
 724 #define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2 
 727         DQA_INDEX_QOS_CLASS_UNSPECIFIED 
= 0, 
 728         DQA_INDEX_QOS_CLASS_MAINTENANCE
, 
 729         DQA_INDEX_QOS_CLASS_BACKGROUND
, 
 730         DQA_INDEX_QOS_CLASS_UTILITY
, 
 731         DQA_INDEX_QOS_CLASS_DEFAULT
, 
 732         DQA_INDEX_QOS_CLASS_USER_INITIATED
, 
 733         DQA_INDEX_QOS_CLASS_USER_INTERACTIVE
, 
 734 } _dispatch_queue_attr_index_qos_class_t
; 
 736 #define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY) 
 738 extern const struct dispatch_queue_attr_s _dispatch_queue_attrs
[] 
 739                 [DISPATCH_QUEUE_ATTR_PRIO_COUNT
] 
 740                 [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT
] 
 741                 [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT
] 
 742                 [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT
] 
 743                 [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT
]; 
 745 dispatch_queue_attr_t 
_dispatch_get_default_queue_attr(void); 
 748 #pragma mark dispatch_continuation_t 
 750 // If dc_flags is less than 0x1000, then the object is a continuation. 
 751 // Otherwise, the object has a private layout and memory management rules. The 
 752 // layout until after 'do_next' must align with normal objects. 
 754 #define DISPATCH_CONTINUATION_HEADER(x) \ 
 756                 const void *do_vtable; \ 
 757                 uintptr_t dc_flags; \ 
 760                 pthread_priority_t dc_priority; \ 
 764         struct dispatch_##x##_s *volatile do_next; \ 
 765         struct voucher_s *dc_voucher; \ 
 766         dispatch_function_t dc_func; \ 
 770 #elif OS_OBJECT_HAVE_OBJC1 
 771 #define DISPATCH_CONTINUATION_HEADER(x) \ 
 772         dispatch_function_t dc_func; \ 
 774                 pthread_priority_t dc_priority; \ 
 778         struct voucher_s *dc_voucher; \ 
 780                 const void *do_vtable; \ 
 781                 uintptr_t dc_flags; \ 
 783         struct dispatch_##x##_s *volatile do_next; \ 
 788 #define DISPATCH_CONTINUATION_HEADER(x) \ 
 790                 const void *do_vtable; \ 
 791                 uintptr_t dc_flags; \ 
 794                 pthread_priority_t dc_priority; \ 
 798         struct voucher_s *dc_voucher; \ 
 799         struct dispatch_##x##_s *volatile do_next; \ 
 800         dispatch_function_t dc_func; \ 
 805 #define _DISPATCH_CONTINUATION_PTRS 8 
 806 #if DISPATCH_HW_CONFIG_UP 
 807 // UP devices don't contend on continuations so we don't need to force them to 
 808 // occupy a whole cacheline (which is intended to avoid contention) 
 809 #define DISPATCH_CONTINUATION_SIZE \ 
 810                 (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR) 
 812 #define DISPATCH_CONTINUATION_SIZE  ROUND_UP_TO_CACHELINE_SIZE( \ 
 813                 (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR)) 
 815 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \ 
 816                 (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ 
 817                 ~(DISPATCH_CONTINUATION_SIZE - 1u)) 
 819 // continuation is a dispatch_sync or dispatch_barrier_sync 
 820 #define DISPATCH_OBJ_SYNC_WAITER_BIT            0x001ul 
 821 // continuation acts as a barrier 
 822 #define DISPATCH_OBJ_BARRIER_BIT                        0x002ul 
 823 // continuation resources are freed on run 
 824 // this is set on async or for non event_handler source handlers 
 825 #define DISPATCH_OBJ_CONSUME_BIT                        0x004ul 
 826 // continuation has a group in dc_data 
 827 #define DISPATCH_OBJ_GROUP_BIT                          0x008ul 
 828 // continuation function is a block (copied in dc_ctxt) 
 829 #define DISPATCH_OBJ_BLOCK_BIT                          0x010ul 
 830 // continuation function is a block with private data, implies BLOCK_BIT 
 831 #define DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT     0x020ul 
 832 // source handler requires fetching context from source 
 833 #define DISPATCH_OBJ_CTXT_FETCH_BIT                     0x040ul 
 834 // use the voucher from the continuation even if the queue has voucher set 
 835 #define DISPATCH_OBJ_ENFORCE_VOUCHER            0x080ul 
 836 // never set on continuations, used by mach.c only 
 837 #define DISPATCH_OBJ_MACH_BARRIER               0x1000000ul 
 839 typedef struct dispatch_continuation_s 
{ 
 840         struct dispatch_object_s _as_do
[0]; 
 841         DISPATCH_CONTINUATION_HEADER(continuation
); 
 842 } *dispatch_continuation_t
; 
 844 typedef struct dispatch_sync_context_s 
{ 
 845         struct dispatch_object_s _as_do
[0]; 
 846         struct dispatch_continuation_s _as_dc
[0]; 
 847         DISPATCH_CONTINUATION_HEADER(continuation
); 
 848         dispatch_function_t dsc_func
; 
 850 #if DISPATCH_COCOA_COMPAT 
 851         dispatch_thread_frame_s dsc_dtf
; 
 853         dispatch_thread_event_s dsc_event
; 
 854         dispatch_tid dsc_waiter
; 
 855         dispatch_qos_t dsc_override_qos_floor
; 
 856         dispatch_qos_t dsc_override_qos
; 
 857         bool dsc_wlh_was_first
; 
 858         bool dsc_release_storage
; 
 859 } *dispatch_sync_context_t
; 
 861 typedef struct dispatch_continuation_vtable_s 
{ 
 862         _OS_OBJECT_CLASS_HEADER(); 
 863         DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation
); 
 864 } const *dispatch_continuation_vtable_t
; 
 866 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT 
 867 #if TARGET_OS_EMBEDDED 
 868 #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads 
 869 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16 
 871 #define DISPATCH_CONTINUATION_CACHE_LIMIT 1024 
 872 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 128 
 876 dispatch_continuation_t 
_dispatch_continuation_alloc_from_heap(void); 
 877 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c
); 
 878 void _dispatch_continuation_async(dispatch_queue_t dq
, 
 879         dispatch_continuation_t dc
); 
 880 void _dispatch_continuation_pop(dispatch_object_t dou
, 
 881                 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
, 
 882                 dispatch_queue_t dq
); 
 883 void _dispatch_continuation_invoke(dispatch_object_t dou
, 
 884                 voucher_t override_voucher
, dispatch_invoke_flags_t flags
); 
 886 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE 
 887 extern int _dispatch_continuation_cache_limit
; 
 888 void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c
); 
 890 #define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT 
 891 #define _dispatch_continuation_free_to_cache_limit(c) \ 
 892                 _dispatch_continuation_free_to_heap(c) 
 896 #pragma mark dispatch_continuation vtables 
 900         DC_ASYNC_REDIRECT_TYPE
, 
 901         DC_MACH_SEND_BARRRIER_DRAIN_TYPE
, 
 902         DC_MACH_SEND_BARRIER_TYPE
, 
 903         DC_MACH_RECV_BARRIER_TYPE
, 
 904         DC_MACH_ASYNC_REPLY_TYPE
, 
 905 #if HAVE_PTHREAD_WORKQUEUE_QOS 
 906         DC_OVERRIDE_STEALING_TYPE
, 
 907         DC_OVERRIDE_OWNING_TYPE
, 
 912 DISPATCH_ALWAYS_INLINE
 
 913 static inline unsigned long 
 914 dc_type(dispatch_continuation_t dc
) 
 916         return dx_type(dc
->_as_do
); 
 919 DISPATCH_ALWAYS_INLINE
 
 920 static inline unsigned long 
 921 dc_subtype(dispatch_continuation_t dc
) 
 923         return dx_subtype(dc
->_as_do
); 
 926 extern const struct dispatch_continuation_vtable_s
 
 927                 _dispatch_continuation_vtables
[_DC_MAX_TYPE
]; 
 930 _dispatch_async_redirect_invoke(dispatch_continuation_t dc
, 
 931                 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
); 
 933 #if HAVE_PTHREAD_WORKQUEUE_QOS 
 935 _dispatch_queue_override_invoke(dispatch_continuation_t dc
, 
 936                 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
); 
 939 #define DC_VTABLE(name)  (&_dispatch_continuation_vtables[DC_##name##_TYPE]) 
 941 #define DC_VTABLE_ENTRY(name, ...)  \ 
 942         [DC_##name##_TYPE] = { \ 
 943                 .do_type = DISPATCH_CONTINUATION_TYPE(name), \ 
 948 #pragma mark _dispatch_set_priority_and_voucher 
 949 #if HAVE_PTHREAD_WORKQUEUE_QOS 
 951 void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri
, 
 953 voucher_t 
_dispatch_set_priority_and_voucher_slow(pthread_priority_t pri
, 
 954                 voucher_t voucher
, dispatch_thread_set_self_t flags
); 
 957 _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri
, 
 964 #pragma mark dispatch_apply_t 
 966 struct dispatch_apply_s 
{ 
 967         size_t volatile da_index
, da_todo
; 
 968         size_t da_iterations
, da_nested
; 
 969         dispatch_continuation_t da_dc
; 
 970         dispatch_thread_event_s da_event
; 
 971         dispatch_invoke_flags_t da_flags
; 
 974 typedef struct dispatch_apply_s 
*dispatch_apply_t
; 
 977 #pragma mark dispatch_block_t 
 981 #define DISPATCH_BLOCK_API_MASK (0x100u - 1) 
 982 #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31) 
 983 #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30) 
 985 #define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \ 
 986         unsigned long dbpd_magic; \ 
 987         dispatch_block_flags_t dbpd_flags; \ 
 988         unsigned int volatile dbpd_atomic_flags; \ 
 989         int volatile dbpd_performed; \ 
 990         pthread_priority_t dbpd_priority; \ 
 991         voucher_t dbpd_voucher; \ 
 992         dispatch_block_t dbpd_block; \ 
 993         dispatch_group_t dbpd_group; \ 
 994         os_mpsc_queue_t volatile dbpd_queue; \ 
 995         mach_port_t dbpd_thread; 
 997 #if !defined(__cplusplus) 
 998 struct dispatch_block_private_data_s 
{ 
 999         DISPATCH_BLOCK_PRIVATE_DATA_HEADER(); 
1002 typedef struct dispatch_block_private_data_s 
*dispatch_block_private_data_t
; 
1004 // dbpd_atomic_flags bits 
1005 #define DBF_CANCELED 1u // block has been cancelled 
1006 #define DBF_WAITING 2u // dispatch_block_wait has begun 
1007 #define DBF_WAITED 4u // dispatch_block_wait has finished without timeout 
1008 #define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave 
1010 #define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk 
1012 // struct for synchronous perform: no group_leave at end of invoke 
1013 #define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \ 
1015                         .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \ 
1016                         .dbpd_flags = (flags), \ 
1017                         .dbpd_atomic_flags = DBF_PERFORM, \ 
1018                         .dbpd_block = (block), \ 
1021 dispatch_block_t 
_dispatch_block_create(dispatch_block_flags_t flags
, 
1022                 voucher_t voucher
, pthread_priority_t priority
, dispatch_block_t block
); 
1023 void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s 
*dbcpd
); 
1024 void _dispatch_block_sync_invoke(void *block
); 
1026 void _dispatch_continuation_init_slow(dispatch_continuation_t dc
, 
1027                 dispatch_queue_class_t dqu
, dispatch_block_flags_t flags
); 
1029 long _dispatch_barrier_trysync_f(dispatch_queue_t dq
, void *ctxt
, 
1030                 dispatch_function_t func
); 
1032 /* exported for tests in dispatch_trysync.c */ 
1033 DISPATCH_EXPORT DISPATCH_NOTHROW
 
1034 long _dispatch_trysync_f(dispatch_queue_t dq
, void *ctxt
, 
1035                 dispatch_function_t f
); 
1037 #endif /* __BLOCKS__ */ 
1039 typedef struct dispatch_pthread_root_queue_observer_hooks_s 
{ 
1040         void (*queue_will_execute
)(dispatch_queue_t queue
); 
1041         void (*queue_did_execute
)(dispatch_queue_t queue
); 
1042 } dispatch_pthread_root_queue_observer_hooks_s
; 
1043 typedef dispatch_pthread_root_queue_observer_hooks_s
 
1044                 *dispatch_pthread_root_queue_observer_hooks_t
; 
1047 #define DISPATCH_IOHID_SPI 1 
1049 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 
1050 DISPATCH_NOTHROW DISPATCH_NONNULL4
 
1052 _dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID( 
1053         const char *label
, unsigned long flags
, const pthread_attr_t 
*attr
, 
1054         dispatch_pthread_root_queue_observer_hooks_t observer_hooks
, 
1055         dispatch_block_t configure
); 
1057 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 
1059 _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( 
1060                 dispatch_queue_t queue
);