#define DISPATCH_CACHELINE_ALIGN \
__attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
+#define DISPATCH_CACHELINE_PAD_SIZE(type) \
+ (roundup(sizeof(type), DISPATCH_CACHELINE_SIZE) - sizeof(type))
+
#pragma mark -
#pragma mark dispatch_queue_t
DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
- DQF_NONE = 0x0000,
- DQF_AUTORELEASE_ALWAYS = 0x0001,
- DQF_AUTORELEASE_NEVER = 0x0002,
-#define _DQF_AUTORELEASE_MASK 0x0003
- DQF_THREAD_BOUND = 0x0004, // queue is bound to a thread
- DQF_BARRIER_BIT = 0x0008, // queue is a barrier on its target
- DQF_TARGETED = 0x0010, // queue is targeted by another object
- DQF_LABEL_NEEDS_FREE = 0x0020, // queue label was strduped; need to free it
- DQF_CANNOT_TRYSYNC = 0x0040,
- DQF_RELEASED = 0x0080, // xref_cnt == -1
+ DQF_NONE = 0x00000000,
+ DQF_AUTORELEASE_ALWAYS = 0x00010000,
+ DQF_AUTORELEASE_NEVER = 0x00020000,
+#define _DQF_AUTORELEASE_MASK 0x00030000
+ DQF_THREAD_BOUND = 0x00040000, // queue is bound to a thread
+ DQF_BARRIER_BIT = 0x00080000, // queue is a barrier on its target
+ DQF_TARGETED = 0x00100000, // queue is targeted by another object
+ DQF_LABEL_NEEDS_FREE = 0x00200000, // queue label was strduped; need to free it
+ DQF_CANNOT_TRYSYNC = 0x00400000,
+ DQF_RELEASED = 0x00800000, // xref_cnt == -1
+ DQF_LEGACY = 0x01000000,
// only applies to sources
//
// will be -p-.
//
// -pd
- // Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is
- // gone from the kernel, but ds_dkev lives. Next state will be --d.
+ // Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote
+ // is gone from the kernel. Next state will be --d.
//
// -p-
// Received an EV_ONESHOT event (from a--), or the delivery of an event
// causing the cancellation to fail with EINPROGRESS was delivered
- // (from ap-). The knote still lives, next state will be --d.
+ // (from ap-). The muxnote still lives, next state will be --d.
//
// --d
- // Final state of the source, the knote is gone from the kernel and
- // ds_dkev is freed. The source can safely be released.
+ // Final state of the source, the muxnote is gone from the kernel and
+ // ds_refs is unregistered. The source can safely be released.
//
// a-d (INVALID)
// apd (INVALID)
// Setting DSF_DELETED should also always atomically clear DSF_ARMED. If
- // the knote is gone from the kernel, it makes no sense whatsoever to
+ // the muxnote is gone from the kernel, it makes no sense whatsoever to
// have it armed. And generally speaking, once `d` or `p` has been set,
// `a` cannot do a cleared -> set transition anymore
// (see _dispatch_source_try_set_armed).
//
- DSF_CANCEL_WAITER = 0x0800, // synchronous waiters for cancel
- DSF_CANCELED = 0x1000, // cancellation has been requested
- DSF_ARMED = 0x2000, // source is armed
- DSF_DEFERRED_DELETE = 0x4000, // source is pending delete
- DSF_DELETED = 0x8000, // source knote is deleted
+ DSF_WLH_CHANGED = 0x04000000,
+ DSF_CANCEL_WAITER = 0x08000000, // synchronous waiters for cancel
+ DSF_CANCELED = 0x10000000, // cancellation has been requested
+ DSF_ARMED = 0x20000000, // source is armed
+ DSF_DEFERRED_DELETE = 0x40000000, // source is pending delete
+ DSF_DELETED = 0x80000000, // source muxnote is deleted
#define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED)
- DQF_WIDTH_MASK = 0xffff0000,
-#define DQF_WIDTH_SHIFT 16
+#define DQF_FLAGS_MASK ((dispatch_queue_flags_t)0xffff0000)
+#define DQF_WIDTH_MASK ((dispatch_queue_flags_t)0x0000ffff)
+#define DQF_WIDTH(n) ((dispatch_queue_flags_t)(uint16_t)(n))
);
#define _DISPATCH_QUEUE_HEADER(x) \
struct os_mpsc_queue_s _as_oq[0]; \
DISPATCH_OBJECT_HEADER(x); \
_OS_MPSC_QUEUE_FIELDS(dq, dq_state); \
- dispatch_queue_t dq_specific_q; \
- union { \
- uint32_t volatile dq_atomic_flags; \
- DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
- uint16_t dq_atomic_bits, \
- uint16_t dq_width \
- ); \
- }; \
uint32_t dq_side_suspend_cnt; \
- DISPATCH_INTROSPECTION_QUEUE_HEADER; \
- dispatch_unfair_lock_s dq_sidelock
- /* LP64: 32bit hole on LP64 */
+ dispatch_unfair_lock_s dq_sidelock; \
+ union { \
+ dispatch_queue_t dq_specific_q; \
+ struct dispatch_source_refs_s *ds_refs; \
+ struct dispatch_timer_source_refs_s *ds_timer_refs; \
+ struct dispatch_mach_recv_refs_s *dm_recv_refs; \
+ }; \
+ DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \
+ const uint16_t dq_width, \
+ const uint16_t __dq_opaque \
+ ); \
+ DISPATCH_INTROSPECTION_QUEUE_HEADER
+ /* LP64: 32bit hole */
#define DISPATCH_QUEUE_HEADER(x) \
struct dispatch_queue_s _as_dq[0]; \
_DISPATCH_QUEUE_HEADER(x)
-#define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8)))
+struct _dispatch_unpadded_queue_s {
+ _DISPATCH_QUEUE_HEADER(dummy);
+};
-#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff
-#define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe
-#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
- ({ uint16_t _width = (width); \
- _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
+#define DISPATCH_QUEUE_CACHELINE_PAD \
+ DISPATCH_CACHELINE_PAD_SIZE(struct _dispatch_unpadded_queue_s)
#define DISPATCH_QUEUE_CACHELINE_PADDING \
char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
-#ifdef __LP64__
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
- (sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
- + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#elif OS_OBJECT_HAVE_OBJC1
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
- (11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
- + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#else
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
- (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
- + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#endif
/*
* dispatch queues `dq_state` demystified
* Most Significant 32 bit Word
* ----------------------------
*
- * sc: suspend count (bits 63 - 57)
+ * sc: suspend count (bits 63 - 58)
* The suspend count unsurprisingly holds the suspend count of the queue
* Only 7 bits are stored inline. Extra counts are transfered in a side
* suspend count and when that has happened, the ssc: bit is set.
*/
-#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0200000000000000ull
-#define DISPATCH_QUEUE_SUSPEND_HALF 0x40u
+#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0400000000000000ull
+#define DISPATCH_QUEUE_SUSPEND_HALF 0x20u
/*
- * ssc: side suspend count (bit 56)
+ * ssc: side suspend count (bit 57)
* This bit means that the total suspend count didn't fit in the inline
* suspend count, and that there are additional suspend counts stored in the
* `dq_side_suspend_cnt` field.
*/
-#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0100000000000000ull
+#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0200000000000000ull
/*
- * i: inactive bit (bit 55)
+ * i: inactive bit (bit 56)
* This bit means that the object is inactive (see dispatch_activate)
*/
-#define DISPATCH_QUEUE_INACTIVE 0x0080000000000000ull
+#define DISPATCH_QUEUE_INACTIVE 0x0100000000000000ull
/*
- * na: needs activation (bit 54)
+ * na: needs activation (bit 55)
* This bit is set if the object is created inactive. It tells
* dispatch_queue_wakeup to perform various tasks at first wakeup.
*
* the object from being woken up (because _dq_state_should_wakeup will say
* no), except in the dispatch_activate/dispatch_resume codepath.
*/
-#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0040000000000000ull
+#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0080000000000000ull
/*
* This mask covers the suspend count (sc), side suspend count bit (ssc),
* inactive (i) and needs activation (na) bits
*/
-#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xffc0000000000000ull
+#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xff80000000000000ull
/*
- * ib: in barrier (bit 53)
+ * ib: in barrier (bit 54)
* This bit is set when the queue is currently executing a barrier
*/
-#define DISPATCH_QUEUE_IN_BARRIER 0x0020000000000000ull
+#define DISPATCH_QUEUE_IN_BARRIER 0x0040000000000000ull
/*
- * qf: queue full (bit 52)
+ * qf: queue full (bit 53)
* This bit is a subtle hack that allows to check for any queue width whether
* the full width of the queue is used or reserved (depending on the context)
* In other words that the queue has reached or overflown its capacity.
*/
-#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull
-#define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull
+#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0020000000000000ull
+#define DISPATCH_QUEUE_WIDTH_FULL 0x1000ull
+#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1)
+#define DISPATCH_QUEUE_WIDTH_MAX (DISPATCH_QUEUE_WIDTH_FULL - 2)
+#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
+ ({ uint16_t _width = (width); \
+ _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
/*
- * w: width (bits 51 - 37)
+ * w: width (bits 52 - 41)
* This encodes how many work items are in flight. Barriers hold `dq_width`
* of them while they run. This is encoded as a signed offset with respect,
* to full use, where the negative values represent how many available slots
*
* When this value is positive, then `wo` is always set to 1.
*/
-#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull
-#define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull
-#define DISPATCH_QUEUE_WIDTH_SHIFT 37
+#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000020000000000ull
+#define DISPATCH_QUEUE_WIDTH_MASK 0x003ffe0000000000ull
+#define DISPATCH_QUEUE_WIDTH_SHIFT 41
/*
- * pb: pending barrier (bit 36)
+ * pb: pending barrier (bit 40)
* Drainers set this bit when they couldn't run the next work item and it is
* a barrier. When this bit is set, `dq_width - 1` work item slots are
* reserved so that no wakeup happens until the last work item in flight
* completes.
*/
-#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull
+#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000010000000000ull
/*
- * d: dirty bit (bit 35)
+ * d: dirty bit (bit 39)
* This bit is set when a queue transitions from empty to not empty.
* This bit is set before dq_items_head is set, with appropriate barriers.
* Any thread looking at a queue head is responsible for unblocking any
*
* So on the async "acquire" side, there is no subtlety at all.
*/
-#define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull
+#define DISPATCH_QUEUE_DIRTY 0x0000008000000000ull
/*
- * qo: (bit 34)
- * Set when a queue has a useful override set.
- * This bit is only cleared when the final drain_try_unlock() succeeds.
- *
- * When the queue dq_override is touched (overrides or-ed in), usually with
- * _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set
- * with a release barrier and one of these three things happen next:
- *
- * - the queue is enqueued, which will cause it to be drained, and the
- * override to be handled by _dispatch_queue_drain_try_unlock().
- * In rare cases it could cause the queue to be queued while empty though.
+ * md: enqueued/draining on manager (bit 38)
+ * Set when enqueued and draining on the manager hierarchy.
*
- * - the DIRTY bit is also set with a release barrier, which pairs with
- * the handling of these bits by _dispatch_queue_drain_try_unlock(),
- * so that dq_override is reset properly.
+ * Unlike the ENQUEUED bit, it is kept until the queue is unlocked from its
+ * invoke call on the manager. This is used to prevent stealing, and
+ * overrides to be applied down the target queue chain.
+ */
+#define DISPATCH_QUEUE_ENQUEUED_ON_MGR 0x0000004000000000ull
+/*
+ * r: queue graph role (bits 37 - 36)
+ * Queue role in the target queue graph
*
- * - the queue was suspended, and _dispatch_queue_resume() will handle the
- * override as part of its wakeup sequence.
+ * 11: unused
+ * 10: WLH base
+ * 01: non wlh base
+ * 00: inner queue
*/
-#define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull
+#define DISPATCH_QUEUE_ROLE_MASK 0x0000003000000000ull
+#define DISPATCH_QUEUE_ROLE_BASE_WLH 0x0000002000000000ull
+#define DISPATCH_QUEUE_ROLE_BASE_ANON 0x0000001000000000ull
+#define DISPATCH_QUEUE_ROLE_INNER 0x0000000000000000ull
/*
- * p: pended bit (bit 33)
- * Set when a drain lock has been pended. When this bit is set,
- * the drain lock is taken and ENQUEUED is never set.
+ * o: has override (bit 35, if role is DISPATCH_QUEUE_ROLE_BASE_ANON)
+ * Set when a queue has received a QOS override and needs to reset it.
+ * This bit is only cleared when the final drain_try_unlock() succeeds.
*
- * This bit marks a queue that needs further processing but was kept pended
- * by an async drainer (not reenqueued) in the hope of being able to drain
- * it further later.
+ * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH)
+ * Set when a queue owner has been exposed to the kernel because of
+ * dispatch_sync() contention.
*/
-#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull
+#define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull
+#define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT 0x0000000800000000ull
/*
- * e: enqueued bit (bit 32)
- * Set when a queue is enqueued on its target queue
+ * max_qos: max qos (bits 34 - 32)
+ * This is the maximum qos that has been enqueued on the queue
*/
-#define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull
+#define DISPATCH_QUEUE_MAX_QOS_MASK 0x0000000700000000ull
+#define DISPATCH_QUEUE_MAX_QOS_SHIFT 32
/*
* dl: drain lock (bits 31-0)
* This is used by the normal drain to drain exlusively relative to other
* drain stealers (like the QoS Override codepath). It holds the identity
* (thread port) of the current drainer.
+ *
+ * st: sync transfer (bit 1 or 30)
+ * Set when a dispatch_sync() is transferred to
+ *
+ * e: enqueued bit (bit 0 or 31)
+ * Set when a queue is enqueued on its target queue
*/
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull
-#ifdef DLOCK_NOWAITERS_BIT
-#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
- ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT))
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
- (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\
- ^ DLOCK_NOWAITERS_BIT)
-#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
- (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
- DLOCK_NOWAITERS_BIT)
-#else
-#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
- ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT))
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
- ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))
+#define DISPATCH_QUEUE_DRAIN_OWNER_MASK ((uint64_t)DLOCK_OWNER_MASK)
+#define DISPATCH_QUEUE_SYNC_TRANSFER ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT)
+#define DISPATCH_QUEUE_ENQUEUED ((uint64_t)DLOCK_WAITERS_BIT)
+
#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
- (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
- DLOCK_WAITERS_BIT)
-#endif
+ (DISPATCH_QUEUE_ENQUEUED_ON_MGR | DISPATCH_QUEUE_ENQUEUED | \
+ DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_MAX_QOS_MASK)
+
+#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \
+ (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \
+ DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER)
+
/*
*******************************************************************************
*
* that right. To do so, prior to taking any decision, they also try to own
* the full "barrier" width on the given queue.
*
- * see _dispatch_try_lock_transfer_or_wakeup
- *
*******************************************************************************
*
* Enqueuing and wakeup rules
(DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL)
DISPATCH_CLASS_DECL(queue);
-#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
+
+#if !defined(__cplusplus) || !DISPATCH_INTROSPECTION
struct dispatch_queue_s {
_DISPATCH_QUEUE_HEADER(queue);
DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
-} DISPATCH_QUEUE_ALIGN;
-#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
+} DISPATCH_ATOMIC64_ALIGN;
+
+#if __has_feature(c_static_assert) && !DISPATCH_INTROSPECTION
+_Static_assert(sizeof(struct dispatch_queue_s) <= 128, "dispatch queue size");
+#endif
+#endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue);
struct dispatch_source_s *_ds;
struct dispatch_mach_s *_dm;
struct dispatch_queue_specific_queue_s *_dqsq;
- struct dispatch_timer_aggregate_s *_dta;
#if USE_OBJC
os_mpsc_queue_t _ojbc_oq;
dispatch_queue_t _objc_dq;
dispatch_source_t _objc_ds;
dispatch_mach_t _objc_dm;
dispatch_queue_specific_queue_t _objc_dqsq;
- dispatch_timer_aggregate_t _objc_dta;
#endif
-} dispatch_queue_class_t __attribute__((__transparent_union__));
+} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION;
typedef struct dispatch_thread_context_s *dispatch_thread_context_t;
typedef struct dispatch_thread_context_s {
// must be in the same order as our TSD keys!
dispatch_queue_t dtf_queue;
dispatch_thread_frame_t dtf_prev;
- struct dispatch_object_s *dtf_deferred;
} dispatch_thread_frame_s;
-DISPATCH_ENUM(dispatch_queue_wakeup_target, long,
- DISPATCH_QUEUE_WAKEUP_NONE = 0,
- DISPATCH_QUEUE_WAKEUP_TARGET,
- DISPATCH_QUEUE_WAKEUP_MGR,
-);
+typedef dispatch_queue_t dispatch_queue_wakeup_target_t;
+#define DISPATCH_QUEUE_WAKEUP_NONE ((dispatch_queue_wakeup_target_t)0)
+#define DISPATCH_QUEUE_WAKEUP_TARGET ((dispatch_queue_wakeup_target_t)1)
+#define DISPATCH_QUEUE_WAKEUP_MGR (&_dispatch_mgr_q)
+#define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1)
-void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu,
- pthread_priority_t pp, dispatch_wakeup_flags_t flags);
-void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp,
+void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target);
-
-void _dispatch_queue_destroy(dispatch_queue_t dq);
-void _dispatch_queue_dispose(dispatch_queue_t dq);
+dispatch_priority_t _dispatch_queue_compute_priority_and_wlh(
+ dispatch_queue_t dq, dispatch_wlh_t *wlh_out);
+void _dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free);
+void _dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free);
+void _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq);
void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq);
void _dispatch_queue_suspend(dispatch_queue_t dq);
void _dispatch_queue_resume(dispatch_queue_t dq, bool activate);
-void _dispatch_queue_finalize_activation(dispatch_queue_t dq);
-void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
-void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n);
+void _dispatch_queue_finalize_activation(dispatch_queue_t dq,
+ bool *allow_resume);
+void _dispatch_queue_invoke(dispatch_queue_t dq,
+ dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+void _dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor);
void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
- pthread_priority_t pp);
-void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq);
-void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+ dispatch_qos_t qos);
+void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
-dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
- dispatch_invoke_flags_t flags, uint64_t *owned,
- struct dispatch_object_s **dc_ptr);
-void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq,
- dispatch_invoke_flags_t flags, uint64_t to_unlock,
- struct dispatch_object_s *dc);
-void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
- dqsq);
-void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+dispatch_queue_wakeup_target_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
+ dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+ uint64_t *owned);
+void _dispatch_queue_drain_sync_waiter(dispatch_queue_t dq,
+ dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+ uint64_t owned);
+void _dispatch_queue_specific_queue_dispose(
+ dispatch_queue_specific_queue_t dqsq, bool *allow_free);
+void _dispatch_root_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
-void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
- struct dispatch_object_s *dou, pthread_priority_t pp);
-void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
-void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_root_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
+ dispatch_qos_t qos);
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+void _dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi
+ DISPATCH_PERF_MON_ARGS_PROTO);
+void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi
+ DISPATCH_PERF_MON_ARGS_PROTO);
+#endif
+void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq,
+ bool *allow_free);
+void _dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
-void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
-void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
+void _dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free);
void _dispatch_mgr_queue_drain(void);
#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
void _dispatch_mgr_priority_init(void);
#else
static inline void _dispatch_kevent_workqueue_init(void) {}
#endif
-void _dispatch_sync_recurse_invoke(void *ctxt);
void _dispatch_apply_invoke(void *ctxt);
void _dispatch_apply_redirect_invoke(void *ctxt);
void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
+#define DISPATCH_BARRIER_TRYSYNC_SUSPEND 0x1
void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func);
+ dispatch_function_t func, uint32_t flags);
+void _dispatch_queue_atfork_child(void);
#if DISPATCH_DEBUG
void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
size_t bufsiz);
-#define DISPATCH_QUEUE_QOS_COUNT 6
-#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
+#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2)
-// must be in lowest to highest qos order (as encoded in pthread_priority_t)
+// must be in lowest to highest qos order (as encoded in dispatch_qos_t)
// overcommit qos index values need bit 1 set
enum {
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
_DISPATCH_ROOT_QUEUE_IDX_COUNT,
};
+// skip zero
+// 1 - main_q
+// 2 - mgr_q
+// 3 - mgr_root_q
+// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
+// we use 'xadd' on Intel, so the initial value == next assigned
+#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 16
extern unsigned long volatile _dispatch_queue_serial_numbers;
extern struct dispatch_queue_s _dispatch_root_queues[];
extern struct dispatch_queue_s _dispatch_mgr_q;
void _dispatch_root_queues_init(void);
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-extern pthread_priority_t _dispatch_background_priority;
-extern pthread_priority_t _dispatch_user_initiated_priority;
+#if DISPATCH_DEBUG
+#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
+ dispatch_assert_queue(&_dispatch_mgr_q)
+#else
+#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
#endif
-typedef uint8_t _dispatch_qos_class_t;
-
#pragma mark -
#pragma mark dispatch_queue_attr_t
DISPATCH_CLASS_DECL(queue_attr);
struct dispatch_queue_attr_s {
OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr);
- _dispatch_qos_class_t dqa_qos_class;
- int8_t dqa_relative_priority;
+ dispatch_priority_requested_t dqa_qos_and_relpri;
uint16_t dqa_overcommit:2;
uint16_t dqa_autorelease_frequency:2;
uint16_t dqa_concurrent:1;
void *dc_ctxt; \
void *dc_data; \
void *dc_other
-#define _DISPATCH_SIZEOF_PTR 8
#elif OS_OBJECT_HAVE_OBJC1
#define DISPATCH_CONTINUATION_HEADER(x) \
dispatch_function_t dc_func; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
-#define _DISPATCH_SIZEOF_PTR 4
#else
#define DISPATCH_CONTINUATION_HEADER(x) \
union { \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
-#define _DISPATCH_SIZEOF_PTR 4
#endif
#define _DISPATCH_CONTINUATION_PTRS 8
#if DISPATCH_HW_CONFIG_UP
// UP devices don't contend on continuations so we don't need to force them to
// occupy a whole cacheline (which is intended to avoid contention)
#define DISPATCH_CONTINUATION_SIZE \
- (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
+ (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR)
#else
#define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \
- (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
+ (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR))
#endif
#define ROUND_UP_TO_CONTINUATION_SIZE(x) \
(((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
~(DISPATCH_CONTINUATION_SIZE - 1u))
// continuation is a dispatch_sync or dispatch_barrier_sync
-#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul
+#define DISPATCH_OBJ_SYNC_WAITER_BIT 0x001ul
// continuation acts as a barrier
#define DISPATCH_OBJ_BARRIER_BIT 0x002ul
// continuation resources are freed on run
#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul
// use the voucher from the continuation even if the queue has voucher set
#define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul
+// never set on continuations, used by mach.c only
+#define DISPATCH_OBJ_MACH_BARRIER 0x1000000ul
-struct dispatch_continuation_s {
+typedef struct dispatch_continuation_s {
struct dispatch_object_s _as_do[0];
DISPATCH_CONTINUATION_HEADER(continuation);
-};
-typedef struct dispatch_continuation_s *dispatch_continuation_t;
+} *dispatch_continuation_t;
+
+typedef struct dispatch_sync_context_s {
+ struct dispatch_object_s _as_do[0];
+ struct dispatch_continuation_s _as_dc[0];
+ DISPATCH_CONTINUATION_HEADER(continuation);
+ dispatch_function_t dsc_func;
+ void *dsc_ctxt;
+#if DISPATCH_COCOA_COMPAT
+ dispatch_thread_frame_s dsc_dtf;
+#endif
+ dispatch_thread_event_s dsc_event;
+ dispatch_tid dsc_waiter;
+ dispatch_qos_t dsc_override_qos_floor;
+ dispatch_qos_t dsc_override_qos;
+ bool dsc_wlh_was_first;
+ bool dsc_release_storage;
+} *dispatch_sync_context_t;
typedef struct dispatch_continuation_vtable_s {
_OS_OBJECT_CLASS_HEADER();
DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation);
-} *dispatch_continuation_vtable_t;
+} const *dispatch_continuation_vtable_t;
#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
#if TARGET_OS_EMBEDDED
void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
void _dispatch_continuation_async(dispatch_queue_t dq,
dispatch_continuation_t dc);
-void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq,
- dispatch_invoke_flags_t flags);
+void _dispatch_continuation_pop(dispatch_object_t dou,
+ dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+ dispatch_queue_t dq);
void _dispatch_continuation_invoke(dispatch_object_t dou,
voucher_t override_voucher, dispatch_invoke_flags_t flags);
DC_MACH_SEND_BARRRIER_DRAIN_TYPE,
DC_MACH_SEND_BARRIER_TYPE,
DC_MACH_RECV_BARRIER_TYPE,
+ DC_MACH_ASYNC_REPLY_TYPE,
#if HAVE_PTHREAD_WORKQUEUE_QOS
DC_OVERRIDE_STEALING_TYPE,
DC_OVERRIDE_OWNING_TYPE,
void
_dispatch_async_redirect_invoke(dispatch_continuation_t dc,
- dispatch_invoke_flags_t flags);
+ dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
#if HAVE_PTHREAD_WORKQUEUE_QOS
void
_dispatch_queue_override_invoke(dispatch_continuation_t dc,
- dispatch_invoke_flags_t flags);
+ dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
#endif
#define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE])
void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
mach_voucher_t kv);
voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri,
- voucher_t voucher, _dispatch_thread_set_self_t flags);
-
+ voucher_t voucher, dispatch_thread_set_self_t flags);
+#else
+static inline void
+_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
+ mach_voucher_t kv)
+{
+ (void)pri; (void)kv;
+}
#endif
#pragma mark -
#pragma mark dispatch_apply_t
dispatch_continuation_t da_dc;
dispatch_thread_event_s da_event;
dispatch_invoke_flags_t da_flags;
- uint32_t da_thr_cnt;
+ int32_t da_thr_cnt;
};
typedef struct dispatch_apply_s *dispatch_apply_t;
#ifdef __BLOCKS__
-#define DISPATCH_BLOCK_API_MASK (0x80u - 1)
+#define DISPATCH_BLOCK_API_MASK (0x100u - 1)
#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
void _dispatch_continuation_init_slow(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_flags_t flags);
-void _dispatch_continuation_update_bits(dispatch_continuation_t dc,
- uintptr_t dc_flags);
-bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
+long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
/* exported for tests in dispatch_trysync.c */
DISPATCH_EXPORT DISPATCH_NOTHROW
-bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt,
+long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t f);
#endif /* __BLOCKS__ */