2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
35 #if DISPATCH_USE_CLIENT_CALLOUT
38 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
);
40 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t));
43 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
44 dispatch_mach_msg_t dmsg
, mach_error_t error
,
45 dispatch_mach_handler_function_t f
);
48 #else // !DISPATCH_USE_CLIENT_CALLOUT
50 DISPATCH_ALWAYS_INLINE
52 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
)
57 DISPATCH_ALWAYS_INLINE
59 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t))
65 DISPATCH_ALWAYS_INLINE
67 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
68 dispatch_mach_msg_t dmsg
, mach_error_t error
,
69 dispatch_mach_handler_function_t f
)
71 return f(ctxt
, reason
, dmsg
, error
);
75 #endif // !DISPATCH_USE_CLIENT_CALLOUT
78 #pragma mark _os_object_t & dispatch_object_t
81 DISPATCH_ALWAYS_INLINE
83 _dispatch_object_has_vtable(dispatch_object_t dou
)
85 uintptr_t dc_flags
= dou
._dc
->dc_flags
;
87 // vtables are pointers far away from the low page in memory
88 return dc_flags
> 0xffful
;
91 DISPATCH_ALWAYS_INLINE
93 _dispatch_object_is_continuation(dispatch_object_t dou
)
95 if (_dispatch_object_has_vtable(dou
)) {
96 return dx_metatype(dou
._do
) == _DISPATCH_CONTINUATION_TYPE
;
101 DISPATCH_ALWAYS_INLINE
103 _dispatch_object_has_type(dispatch_object_t dou
, unsigned long type
)
105 return _dispatch_object_has_vtable(dou
) && dx_type(dou
._do
) == type
;
108 DISPATCH_ALWAYS_INLINE
110 _dispatch_object_is_redirection(dispatch_object_t dou
)
112 return _dispatch_object_has_type(dou
,
113 DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT
));
116 DISPATCH_ALWAYS_INLINE
118 _dispatch_object_is_barrier(dispatch_object_t dou
)
120 dispatch_queue_flags_t dq_flags
;
122 if (!_dispatch_object_has_vtable(dou
)) {
123 return (dou
._dc
->dc_flags
& DISPATCH_OBJ_BARRIER_BIT
);
125 switch (dx_metatype(dou
._do
)) {
126 case _DISPATCH_QUEUE_TYPE
:
127 case _DISPATCH_SOURCE_TYPE
:
128 dq_flags
= os_atomic_load2o(dou
._dq
, dq_atomic_flags
, relaxed
);
129 return dq_flags
& DQF_BARRIER_BIT
;
135 DISPATCH_ALWAYS_INLINE
137 _dispatch_object_is_slow_item(dispatch_object_t dou
)
139 if (_dispatch_object_has_vtable(dou
)) {
142 return (dou
._dc
->dc_flags
& DISPATCH_OBJ_SYNC_SLOW_BIT
);
145 DISPATCH_ALWAYS_INLINE
147 _dispatch_object_is_slow_non_barrier(dispatch_object_t dou
)
149 if (_dispatch_object_has_vtable(dou
)) {
152 return ((dou
._dc
->dc_flags
&
153 (DISPATCH_OBJ_BARRIER_BIT
| DISPATCH_OBJ_SYNC_SLOW_BIT
)) ==
154 (DISPATCH_OBJ_SYNC_SLOW_BIT
));
157 DISPATCH_ALWAYS_INLINE
159 _dispatch_object_is_slow_barrier(dispatch_object_t dou
)
161 if (_dispatch_object_has_vtable(dou
)) {
164 return ((dou
._dc
->dc_flags
&
165 (DISPATCH_OBJ_BARRIER_BIT
| DISPATCH_OBJ_SYNC_SLOW_BIT
)) ==
166 (DISPATCH_OBJ_BARRIER_BIT
| DISPATCH_OBJ_SYNC_SLOW_BIT
));
169 DISPATCH_ALWAYS_INLINE
170 static inline _os_object_t
171 _os_object_retain_internal_inline(_os_object_t obj
)
173 int ref_cnt
= _os_object_refcnt_inc(obj
);
174 if (unlikely(ref_cnt
<= 0)) {
175 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
180 DISPATCH_ALWAYS_INLINE
182 _os_object_release_internal_inline_no_dispose(_os_object_t obj
)
184 int ref_cnt
= _os_object_refcnt_dec(obj
);
185 if (likely(ref_cnt
>= 0)) {
189 _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object");
191 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
194 DISPATCH_ALWAYS_INLINE
196 _os_object_release_internal_inline(_os_object_t obj
)
198 int ref_cnt
= _os_object_refcnt_dec(obj
);
199 if (likely(ref_cnt
>= 0)) {
202 if (unlikely(ref_cnt
< -1)) {
203 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
206 int xref_cnt
= obj
->os_obj_xref_cnt
;
207 if (unlikely(xref_cnt
>= 0)) {
208 DISPATCH_INTERNAL_CRASH(xref_cnt
,
209 "Release while external references exist");
212 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
213 return _os_object_dispose(obj
);
216 DISPATCH_ALWAYS_INLINE_NDEBUG
218 _dispatch_retain(dispatch_object_t dou
)
220 (void)_os_object_retain_internal_inline(dou
._os_obj
);
223 DISPATCH_ALWAYS_INLINE_NDEBUG
225 _dispatch_release(dispatch_object_t dou
)
227 _os_object_release_internal_inline(dou
._os_obj
);
230 DISPATCH_ALWAYS_INLINE_NDEBUG
232 _dispatch_release_tailcall(dispatch_object_t dou
)
234 _os_object_release_internal(dou
._os_obj
);
237 DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
239 _dispatch_object_set_target_queue_inline(dispatch_object_t dou
,
242 _dispatch_retain(tq
);
243 tq
= os_atomic_xchg2o(dou
._do
, do_targetq
, tq
, release
);
244 if (tq
) _dispatch_release(tq
);
245 _dispatch_object_debug(dou
._do
, "%s", __func__
);
248 #endif // DISPATCH_PURE_C
250 #pragma mark dispatch_thread
253 #define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */
254 #define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8
255 #ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN
256 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN
>=
257 DISPATCH_DEFERRED_ITEMS_EVENT_COUNT
,
258 "our list should not be longer than the kernel's");
261 typedef struct dispatch_deferred_items_s
{
263 dispatch_queue_t ddi_stashed_dq
;
264 struct dispatch_object_s
*ddi_stashed_dou
;
265 dispatch_priority_t ddi_stashed_pp
;
268 _dispatch_kevent_qos_s ddi_eventlist
[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT
];
269 } dispatch_deferred_items_s
, *dispatch_deferred_items_t
;
271 DISPATCH_ALWAYS_INLINE
273 _dispatch_deferred_items_set(dispatch_deferred_items_t ddi
)
275 _dispatch_thread_setspecific(dispatch_deferred_items_key
, (void *)ddi
);
278 DISPATCH_ALWAYS_INLINE
279 static inline dispatch_deferred_items_t
280 _dispatch_deferred_items_get(void)
282 dispatch_deferred_items_t ddi
= (dispatch_deferred_items_t
)
283 _dispatch_thread_getspecific(dispatch_deferred_items_key
);
284 if (ddi
&& ddi
->ddi_magic
== DISPATCH_DEFERRED_ITEMS_MAGIC
) {
290 #endif // DISPATCH_PURE_C
292 #pragma mark dispatch_thread
295 DISPATCH_ALWAYS_INLINE
296 static inline dispatch_thread_context_t
297 _dispatch_thread_context_find(const void *key
)
299 dispatch_thread_context_t dtc
=
300 _dispatch_thread_getspecific(dispatch_context_key
);
302 if (dtc
->dtc_key
== key
) {
310 DISPATCH_ALWAYS_INLINE
312 _dispatch_thread_context_push(dispatch_thread_context_t ctxt
)
314 ctxt
->dtc_prev
= _dispatch_thread_getspecific(dispatch_context_key
);
315 _dispatch_thread_setspecific(dispatch_context_key
, ctxt
);
318 DISPATCH_ALWAYS_INLINE
320 _dispatch_thread_context_pop(dispatch_thread_context_t ctxt
)
322 dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key
) == ctxt
);
323 _dispatch_thread_setspecific(dispatch_context_key
, ctxt
->dtc_prev
);
326 typedef struct dispatch_thread_frame_iterator_s
{
327 dispatch_queue_t dtfi_queue
;
328 dispatch_thread_frame_t dtfi_frame
;
329 } *dispatch_thread_frame_iterator_t
;
331 DISPATCH_ALWAYS_INLINE
333 _dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it
)
335 _dispatch_thread_getspecific_pair(
336 dispatch_queue_key
, (void **)&it
->dtfi_queue
,
337 dispatch_frame_key
, (void **)&it
->dtfi_frame
);
340 DISPATCH_ALWAYS_INLINE
342 _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it
)
344 dispatch_thread_frame_t dtf
= it
->dtfi_frame
;
345 dispatch_queue_t dq
= it
->dtfi_queue
;
348 if (dq
->do_targetq
) {
349 // redirections and trysync_f may skip some frames,
350 // so we need to simulate seeing the missing links
351 // however the bottom root queue is always present
352 it
->dtfi_queue
= dq
->do_targetq
;
353 if (it
->dtfi_queue
== dtf
->dtf_queue
) {
354 it
->dtfi_frame
= dtf
->dtf_prev
;
357 it
->dtfi_queue
= dtf
->dtf_queue
;
358 it
->dtfi_frame
= dtf
->dtf_prev
;
361 it
->dtfi_queue
= dq
->do_targetq
;
365 DISPATCH_ALWAYS_INLINE
367 _dispatch_thread_frame_find_queue(dispatch_queue_t dq
)
369 struct dispatch_thread_frame_iterator_s it
;
371 _dispatch_thread_frame_iterate_start(&it
);
372 while (it
.dtfi_queue
) {
373 if (it
.dtfi_queue
== dq
) {
376 _dispatch_thread_frame_iterate_next(&it
);
381 DISPATCH_ALWAYS_INLINE
382 static inline dispatch_thread_frame_t
383 _dispatch_thread_frame_get_current(void)
385 return _dispatch_thread_getspecific(dispatch_frame_key
);
388 DISPATCH_ALWAYS_INLINE
390 _dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf
)
392 _dispatch_thread_setspecific(dispatch_frame_key
, dtf
);
395 DISPATCH_ALWAYS_INLINE
397 _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf
)
399 _dispatch_thread_getspecific_packed_pair(
400 dispatch_queue_key
, dispatch_frame_key
, (void **)&dtf
->dtf_queue
);
403 DISPATCH_ALWAYS_INLINE
405 _dispatch_thread_frame_push(dispatch_thread_frame_t dtf
, dispatch_queue_t dq
)
407 _dispatch_thread_frame_save_state(dtf
);
408 _dispatch_thread_setspecific_pair(dispatch_queue_key
, dq
,
409 dispatch_frame_key
, dtf
);
410 dtf
->dtf_deferred
= NULL
;
413 DISPATCH_ALWAYS_INLINE
415 _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf
,
416 dispatch_queue_t dq
, dispatch_thread_frame_t new_base
)
418 _dispatch_thread_frame_save_state(dtf
);
419 _dispatch_thread_setspecific_pair(dispatch_queue_key
, dq
,
420 dispatch_frame_key
, new_base
);
421 dtf
->dtf_deferred
= NULL
;
424 DISPATCH_ALWAYS_INLINE
426 _dispatch_thread_frame_pop(dispatch_thread_frame_t dtf
)
428 _dispatch_thread_setspecific_packed_pair(
429 dispatch_queue_key
, dispatch_frame_key
, (void **)&dtf
->dtf_queue
);
432 DISPATCH_ALWAYS_INLINE
433 static inline dispatch_queue_t
434 _dispatch_thread_frame_stash(dispatch_thread_frame_t dtf
)
436 _dispatch_thread_getspecific_pair(
437 dispatch_queue_key
, (void **)&dtf
->dtf_queue
,
438 dispatch_frame_key
, (void **)&dtf
->dtf_prev
);
439 _dispatch_thread_frame_pop(dtf
->dtf_prev
);
440 return dtf
->dtf_queue
;
443 DISPATCH_ALWAYS_INLINE
445 _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf
)
447 _dispatch_thread_frame_pop(dtf
);
450 DISPATCH_ALWAYS_INLINE
452 _dispatch_wqthread_override_start_check_owner(mach_port_t thread
,
453 pthread_priority_t pp
, mach_port_t
*ulock_addr
)
455 #if HAVE_PTHREAD_WORKQUEUE_QOS
456 if (!_dispatch_set_qos_class_enabled
) return 0;
457 return _pthread_workqueue_override_start_direct_check_owner(thread
,
460 (void)thread
; (void)pp
; (void)ulock_addr
;
465 DISPATCH_ALWAYS_INLINE
467 _dispatch_wqthread_override_start(mach_port_t thread
,
468 pthread_priority_t pp
)
470 #if HAVE_PTHREAD_WORKQUEUE_QOS
471 if (!_dispatch_set_qos_class_enabled
) return;
472 (void)_pthread_workqueue_override_start_direct(thread
, pp
);
474 (void)thread
; (void)pp
;
478 DISPATCH_ALWAYS_INLINE
480 _dispatch_wqthread_override_reset(void)
482 #if HAVE_PTHREAD_WORKQUEUE_QOS
483 if (!_dispatch_set_qos_class_enabled
) return;
484 (void)_pthread_workqueue_override_reset();
488 DISPATCH_ALWAYS_INLINE
490 _dispatch_thread_override_start(mach_port_t thread
, pthread_priority_t pp
,
493 #if HAVE_PTHREAD_WORKQUEUE_QOS
494 if (!_dispatch_set_qos_class_enabled
) return;
495 (void)_pthread_qos_override_start_direct(thread
, pp
, resource
);
497 (void)thread
; (void)pp
; (void)resource
;
501 DISPATCH_ALWAYS_INLINE
503 _dispatch_thread_override_end(mach_port_t thread
, void *resource
)
505 #if HAVE_PTHREAD_WORKQUEUE_QOS
506 if (!_dispatch_set_qos_class_enabled
) return;
507 (void)_pthread_qos_override_end_direct(thread
, resource
);
509 (void)thread
; (void)resource
;
513 #if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS
514 DISPATCH_ALWAYS_INLINE
516 _dispatch_qos_class_is_valid(pthread_priority_t pp
)
518 pp
&= _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
519 if (pp
> (1UL << (DISPATCH_QUEUE_QOS_COUNT
+
520 _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
))) {
525 #define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \
526 if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \
527 DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \
531 DISPATCH_ALWAYS_INLINE
533 _dispatch_qos_override_is_valid(pthread_priority_t pp
)
535 if (pp
& (pthread_priority_t
)_PTHREAD_PRIORITY_FLAGS_MASK
) {
538 return _dispatch_qos_class_is_valid(pp
);
540 #define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \
541 if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \
542 DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \
546 #define _dispatch_assert_is_valid_qos_override(pp) (void)(pp)
547 #define _dispatch_assert_is_valid_qos_class(pp) (void)(pp)
550 #endif // DISPATCH_PURE_C
552 #pragma mark dispatch_queue_t state accessors
555 DISPATCH_ALWAYS_INLINE
556 static inline dispatch_queue_flags_t
557 _dispatch_queue_atomic_flags(dispatch_queue_t dq
)
559 return os_atomic_load2o(dq
, dq_atomic_flags
, relaxed
);
562 DISPATCH_ALWAYS_INLINE
563 static inline dispatch_queue_flags_t
564 _dispatch_queue_atomic_flags_set(dispatch_queue_t dq
,
565 dispatch_queue_flags_t bits
)
567 return os_atomic_or2o(dq
, dq_atomic_flags
, bits
, relaxed
);
570 DISPATCH_ALWAYS_INLINE
571 static inline dispatch_queue_flags_t
572 _dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq
,
573 dispatch_queue_flags_t add_bits
, dispatch_queue_flags_t clr_bits
)
575 dispatch_queue_flags_t oflags
, nflags
;
576 os_atomic_rmw_loop2o(dq
, dq_atomic_flags
, oflags
, nflags
, relaxed
, {
577 nflags
= (oflags
| add_bits
) & ~clr_bits
;
582 DISPATCH_ALWAYS_INLINE
583 static inline dispatch_queue_flags_t
584 _dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq
,
585 dispatch_queue_flags_t add_bits
, dispatch_queue_flags_t clr_bits
)
587 dispatch_queue_flags_t oflags
, nflags
;
588 os_atomic_rmw_loop2o(dq
, dq_atomic_flags
, oflags
, nflags
, relaxed
, {
589 nflags
= (oflags
| add_bits
) & ~clr_bits
;
594 DISPATCH_ALWAYS_INLINE
595 static inline dispatch_queue_flags_t
596 _dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq
,
597 dispatch_queue_flags_t bits
)
599 return os_atomic_or_orig2o(dq
, dq_atomic_flags
, bits
, relaxed
);
602 DISPATCH_ALWAYS_INLINE
603 static inline dispatch_queue_flags_t
604 _dispatch_queue_atomic_flags_clear(dispatch_queue_t dq
,
605 dispatch_queue_flags_t bits
)
607 return os_atomic_and2o(dq
, dq_atomic_flags
, ~bits
, relaxed
);
610 DISPATCH_ALWAYS_INLINE
612 _dispatch_queue_is_thread_bound(dispatch_queue_t dq
)
614 return _dispatch_queue_atomic_flags(dq
) & DQF_THREAD_BOUND
;
617 DISPATCH_ALWAYS_INLINE
619 _dispatch_queue_cannot_trysync(dispatch_queue_t dq
)
621 return _dispatch_queue_atomic_flags(dq
) & DQF_CANNOT_TRYSYNC
;
624 DISPATCH_ALWAYS_INLINE
626 _dispatch_queue_label_needs_free(dispatch_queue_t dq
)
628 return _dispatch_queue_atomic_flags(dq
) & DQF_LABEL_NEEDS_FREE
;
631 DISPATCH_ALWAYS_INLINE
632 static inline dispatch_invoke_flags_t
633 _dispatch_queue_autorelease_frequency(dispatch_queue_t dq
)
635 const unsigned long factor
=
636 DISPATCH_INVOKE_AUTORELEASE_ALWAYS
/ DQF_AUTORELEASE_ALWAYS
;
637 dispatch_static_assert(factor
> 0);
639 dispatch_queue_flags_t qaf
= _dispatch_queue_atomic_flags(dq
);
641 qaf
&= _DQF_AUTORELEASE_MASK
;
642 return (dispatch_invoke_flags_t
)qaf
* factor
;
645 DISPATCH_ALWAYS_INLINE
646 static inline dispatch_invoke_flags_t
647 _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq
,
648 dispatch_invoke_flags_t flags
)
650 dispatch_invoke_flags_t qaf
= _dispatch_queue_autorelease_frequency(dq
);
653 flags
&= ~_DISPATCH_INVOKE_AUTORELEASE_MASK
;
659 DISPATCH_ALWAYS_INLINE
661 _dispatch_queue_has_immutable_target(dispatch_queue_t dq
)
663 if (dx_metatype(dq
) != _DISPATCH_QUEUE_TYPE
) {
666 return dx_type(dq
) != DISPATCH_QUEUE_LEGACY_TYPE
;
669 #endif // DISPATCH_PURE_C
672 DISPATCH_ALWAYS_INLINE
673 static inline uint32_t
674 _dq_state_suspend_cnt(uint64_t dq_state
)
676 return (uint32_t)(dq_state
/ DISPATCH_QUEUE_SUSPEND_INTERVAL
);
679 DISPATCH_ALWAYS_INLINE
681 _dq_state_has_side_suspend_cnt(uint64_t dq_state
)
683 return dq_state
& DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT
;
686 DISPATCH_ALWAYS_INLINE
687 static inline uint32_t
688 _dq_state_extract_width_bits(uint64_t dq_state
)
690 dq_state
&= DISPATCH_QUEUE_WIDTH_MASK
;
691 return (uint32_t)(dq_state
>> DISPATCH_QUEUE_WIDTH_SHIFT
);
694 DISPATCH_ALWAYS_INLINE
695 static inline uint32_t
696 _dq_state_available_width(uint64_t dq_state
)
698 uint32_t full
= DISPATCH_QUEUE_WIDTH_FULL
;
699 if (fastpath(!(dq_state
& DISPATCH_QUEUE_WIDTH_FULL_BIT
))) {
700 return full
- _dq_state_extract_width_bits(dq_state
);
705 DISPATCH_ALWAYS_INLINE
706 static inline uint32_t
707 _dq_state_used_width(uint64_t dq_state
, uint16_t dq_width
)
709 uint32_t full
= DISPATCH_QUEUE_WIDTH_FULL
;
710 uint32_t width
= _dq_state_extract_width_bits(dq_state
);
712 if (dq_state
& DISPATCH_QUEUE_PENDING_BARRIER
) {
713 // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width
714 // is pre-reservation that we want to ignore
715 return width
- (full
- dq_width
) - (dq_width
- 1);
717 return width
- (full
- dq_width
);
720 DISPATCH_ALWAYS_INLINE
722 _dq_state_is_suspended(uint64_t dq_state
)
724 return dq_state
>= DISPATCH_QUEUE_NEEDS_ACTIVATION
;
726 #define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state)
728 DISPATCH_ALWAYS_INLINE
730 _dq_state_is_inactive(uint64_t dq_state
)
732 return dq_state
& DISPATCH_QUEUE_INACTIVE
;
735 DISPATCH_ALWAYS_INLINE
737 _dq_state_needs_activation(uint64_t dq_state
)
739 return dq_state
& DISPATCH_QUEUE_NEEDS_ACTIVATION
;
742 DISPATCH_ALWAYS_INLINE
744 _dq_state_is_in_barrier(uint64_t dq_state
)
746 return dq_state
& DISPATCH_QUEUE_IN_BARRIER
;
749 DISPATCH_ALWAYS_INLINE
751 _dq_state_has_available_width(uint64_t dq_state
)
753 return !(dq_state
& DISPATCH_QUEUE_WIDTH_FULL_BIT
);
756 DISPATCH_ALWAYS_INLINE
758 _dq_state_has_pending_barrier(uint64_t dq_state
)
760 return dq_state
& DISPATCH_QUEUE_PENDING_BARRIER
;
763 DISPATCH_ALWAYS_INLINE
765 _dq_state_is_dirty(uint64_t dq_state
)
767 return dq_state
& DISPATCH_QUEUE_DIRTY
;
770 DISPATCH_ALWAYS_INLINE
772 _dq_state_is_enqueued(uint64_t dq_state
)
774 return dq_state
& DISPATCH_QUEUE_ENQUEUED
;
777 DISPATCH_ALWAYS_INLINE
779 _dq_state_has_override(uint64_t dq_state
)
781 return dq_state
& DISPATCH_QUEUE_HAS_OVERRIDE
;
784 DISPATCH_ALWAYS_INLINE
785 static inline dispatch_lock_owner
786 _dq_state_drain_owner(uint64_t dq_state
)
788 return _dispatch_lock_owner((dispatch_lock
)dq_state
);
790 #define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
791 _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
793 DISPATCH_ALWAYS_INLINE
795 _dq_state_drain_pended(uint64_t dq_state
)
797 return (dq_state
& DISPATCH_QUEUE_DRAIN_PENDED
);
800 DISPATCH_ALWAYS_INLINE
802 _dq_state_drain_locked_by(uint64_t dq_state
, uint32_t owner
)
804 if (_dq_state_drain_pended(dq_state
)) {
807 return _dq_state_drain_owner(dq_state
) == owner
;
810 DISPATCH_ALWAYS_INLINE
812 _dq_state_drain_locked(uint64_t dq_state
)
814 return (dq_state
& DISPATCH_QUEUE_DRAIN_OWNER_MASK
) != 0;
817 DISPATCH_ALWAYS_INLINE
819 _dq_state_has_waiters(uint64_t dq_state
)
821 return _dispatch_lock_has_waiters((dispatch_lock
)dq_state
);
824 DISPATCH_ALWAYS_INLINE
826 _dq_state_is_sync_runnable(uint64_t dq_state
)
828 return dq_state
< DISPATCH_QUEUE_IN_BARRIER
;
831 DISPATCH_ALWAYS_INLINE
833 _dq_state_is_runnable(uint64_t dq_state
)
835 return dq_state
< DISPATCH_QUEUE_WIDTH_FULL_BIT
;
838 DISPATCH_ALWAYS_INLINE
840 _dq_state_should_wakeup(uint64_t dq_state
)
842 return _dq_state_is_runnable(dq_state
) &&
843 !_dq_state_is_enqueued(dq_state
) &&
844 !_dq_state_drain_locked(dq_state
);
847 #endif // __cplusplus
849 #pragma mark dispatch_queue_t state machine
852 static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu
,
853 pthread_priority_t pp
);
854 static inline bool _dispatch_queue_need_override_retain(
855 dispatch_queue_class_t dqu
, pthread_priority_t pp
);
856 static inline dispatch_priority_t
_dispatch_queue_reset_override_priority(
857 dispatch_queue_class_t dqu
, bool qp_is_floor
);
858 static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu
,
859 dispatch_priority_t new_op
);
860 static inline pthread_priority_t
_dispatch_get_defaultpriority(void);
861 static inline void _dispatch_set_defaultpriority_override(void);
862 static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp
);
863 static inline pthread_priority_t
_dispatch_get_priority(void);
864 static inline pthread_priority_t
_dispatch_set_defaultpriority(
865 pthread_priority_t pp
, pthread_priority_t
*new_pp
);
867 DISPATCH_ALWAYS_INLINE
869 _dispatch_queue_xref_dispose(struct dispatch_queue_s
*dq
)
871 if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq
))) {
872 // Arguments for and against this assert are within 6705399
873 DISPATCH_CLIENT_CRASH(dq
, "Release of a suspended object");
875 os_atomic_or2o(dq
, dq_atomic_flags
, DQF_RELEASED
, relaxed
);
881 // Note to later developers: ensure that any initialization changes are
882 // made for statically allocated queues (i.e. _dispatch_main_q).
884 _dispatch_queue_init(dispatch_queue_t dq
, dispatch_queue_flags_t dqf
,
885 uint16_t width
, bool inactive
)
887 uint64_t dq_state
= DISPATCH_QUEUE_STATE_INIT_VALUE(width
);
890 dq_state
+= DISPATCH_QUEUE_INACTIVE
+ DISPATCH_QUEUE_NEEDS_ACTIVATION
;
891 dq
->do_ref_cnt
++; // rdar://8181908 see _dispatch_queue_resume
893 dq
->do_next
= (struct dispatch_queue_s
*)DISPATCH_OBJECT_LISTLESS
;
894 dqf
|= (dispatch_queue_flags_t
)width
<< DQF_WIDTH_SHIFT
;
895 os_atomic_store2o(dq
, dq_atomic_flags
, dqf
, relaxed
);
896 dq
->dq_state
= dq_state
;
897 dq
->dq_override_voucher
= DISPATCH_NO_VOUCHER
;
899 os_atomic_inc_orig(&_dispatch_queue_serial_numbers
, relaxed
);
903 * - _dispatch_queue_set_target_queue
904 * - changing dispatch source handlers
906 * Tries to prevent concurrent wakeup of an inactive queue by suspending it.
908 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
910 _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq
)
912 uint64_t dq_state
, value
;
914 (void)os_atomic_rmw_loop2o(dq
, dq_state
, dq_state
, value
, relaxed
, {
915 if (!fastpath(_dq_state_is_inactive(dq_state
))) {
916 os_atomic_rmw_loop_give_up(return false);
918 value
= dq_state
+ DISPATCH_QUEUE_SUSPEND_INTERVAL
;
920 if (slowpath(!_dq_state_is_suspended(dq_state
)) ||
921 slowpath(_dq_state_has_side_suspend_cnt(dq_state
))) {
922 // Crashing here means that 128+ dispatch_suspend() calls have been
923 // made on an inactive object and then dispatch_set_target_queue() or
924 // dispatch_set_*_handler() has been called.
926 // We don't want to handle the side suspend count in a codepath that
928 DISPATCH_CLIENT_CRASH(dq
, "Too many calls to dispatch_suspend() "
929 "prior to calling dispatch_set_target_queue() "
930 "or dispatch_set_*_handler()");
935 /* Must be used by any caller meaning to do a speculative wakeup when the caller
936 * was preventing other wakeups (for example dispatch_resume() or a drainer not
937 * doing a drain_try_unlock() and not observing DIRTY)
939 * In that case this call loads DIRTY with an acquire barrier so that when
940 * other threads have made changes (such as dispatch_source_cancel()) the
941 * caller can take these state machine changes into account in its decision to
942 * wake up the object.
944 DISPATCH_ALWAYS_INLINE
946 _dispatch_queue_try_wakeup(dispatch_queue_t dq
, uint64_t dq_state
,
947 dispatch_wakeup_flags_t flags
)
949 if (_dq_state_should_wakeup(dq_state
)) {
950 if (slowpath(_dq_state_is_dirty(dq_state
))) {
951 // <rdar://problem/14637483>
952 // seq_cst wrt state changes that were flushed and not acted upon
953 os_atomic_thread_fence(acquire
);
955 return dx_wakeup(dq
, 0, flags
);
957 if (flags
& DISPATCH_WAKEUP_CONSUME
) {
958 return _dispatch_release_tailcall(dq
);
963 * - _dispatch_queue_class_invoke (normal path)
964 * - _dispatch_queue_override_invoke (stealer)
966 * Initial state must be { sc:0, ib:0, qf:0, dl:0 }
967 * Final state forces { dl:self, qf:1, d: 0 }
968 * ib:1 is forced when the width acquired is equivalent to the barrier width
970 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
971 static inline uint64_t
972 _dispatch_queue_drain_try_lock(dispatch_queue_t dq
,
973 dispatch_invoke_flags_t flags
, uint64_t *dq_state
)
975 uint64_t pending_barrier_width
=
976 (dq
->dq_width
- 1) * DISPATCH_QUEUE_WIDTH_INTERVAL
;
977 uint64_t xor_owner_and_set_full_width
=
978 _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT
;
979 uint64_t clear_enqueued_bit
, old_state
, new_state
;
981 if (flags
& DISPATCH_INVOKE_STEALING
) {
982 clear_enqueued_bit
= 0;
984 clear_enqueued_bit
= DISPATCH_QUEUE_ENQUEUED
;
987 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
988 new_state
= old_state
;
989 new_state
^= clear_enqueued_bit
;
990 if (likely(_dq_state_is_runnable(old_state
) &&
991 !_dq_state_drain_locked(old_state
))) {
993 // Only keep the HAS_WAITER bit (and ENQUEUED if stealing).
994 // In particular acquiring the drain lock clears the DIRTY bit
996 new_state
&= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK
;
998 // For the NOWAITERS_BIT case, the thread identity
999 // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above,
1000 // so the xor below flips the NOWAITERS_BIT to 0 as expected.
1002 // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in
1003 // the thread identity, and the xor leaves the bit alone.
1005 new_state
^= xor_owner_and_set_full_width
;
1006 if (_dq_state_has_pending_barrier(old_state
) ||
1007 old_state
+ pending_barrier_width
<
1008 DISPATCH_QUEUE_WIDTH_FULL_BIT
) {
1009 new_state
|= DISPATCH_QUEUE_IN_BARRIER
;
1011 } else if (!clear_enqueued_bit
) {
1012 os_atomic_rmw_loop_give_up(break);
1016 if (dq_state
) *dq_state
= new_state
;
1017 if (likely(_dq_state_is_runnable(old_state
) &&
1018 !_dq_state_drain_locked(old_state
))) {
1019 new_state
&= DISPATCH_QUEUE_IN_BARRIER
| DISPATCH_QUEUE_WIDTH_FULL_BIT
;
1020 old_state
&= DISPATCH_QUEUE_WIDTH_MASK
;
1021 return new_state
- old_state
;
1026 /* Used by _dispatch_barrier_{try,}sync
1028 * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
1029 * simple cmpxchg which is significantly faster on Intel, and makes a
1030 * significant difference on the uncontended codepath.
1032 * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
1034 * Initial state must be `completely idle`
1035 * Final state forces { ib:1, qf:1, w:0 }
1037 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1039 _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq
)
1041 uint64_t value
= DISPATCH_QUEUE_WIDTH_FULL_BIT
| DISPATCH_QUEUE_IN_BARRIER
;
1042 value
|= _dispatch_tid_self();
1044 return os_atomic_cmpxchg2o(dq
, dq_state
,
1045 DISPATCH_QUEUE_STATE_INIT_VALUE(dq
->dq_width
), value
, acquire
);
1048 /* Used by _dispatch_sync for root queues and some drain codepaths
1050 * Root queues have no strict orderning and dispatch_sync() always goes through.
1051 * Drain is the sole setter of `dl` hence can use this non failing version of
1052 * _dispatch_queue_try_acquire_sync().
1054 * Final state: { w += 1 }
1056 DISPATCH_ALWAYS_INLINE
1058 _dispatch_queue_reserve_sync_width(dispatch_queue_t dq
)
1060 (void)os_atomic_add2o(dq
, dq_state
,
1061 DISPATCH_QUEUE_WIDTH_INTERVAL
, relaxed
);
1064 /* Used by _dispatch_sync on non-serial queues
1066 * Initial state must be { sc:0, ib:0, pb:0, d:0 }
1067 * Final state: { w += 1 }
1069 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1071 _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq
)
1073 uint64_t dq_state
, value
;
1075 return os_atomic_rmw_loop2o(dq
, dq_state
, dq_state
, value
, relaxed
, {
1076 if (!fastpath(_dq_state_is_sync_runnable(dq_state
)) ||
1077 slowpath(_dq_state_is_dirty(dq_state
)) ||
1078 slowpath(_dq_state_has_pending_barrier(dq_state
))) {
1079 os_atomic_rmw_loop_give_up(return false);
1081 value
= dq_state
+ DISPATCH_QUEUE_WIDTH_INTERVAL
;
1085 /* Used by _dispatch_apply_redirect
1087 * Try to acquire at most da_width and returns what could be acquired,
1090 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1091 static inline uint32_t
1092 _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq
, uint32_t da_width
)
1094 uint64_t dq_state
, value
;
1097 (void)os_atomic_rmw_loop2o(dq
, dq_state
, dq_state
, value
, relaxed
, {
1098 width
= _dq_state_available_width(dq_state
);
1099 if (!fastpath(width
)) {
1100 os_atomic_rmw_loop_give_up(return 0);
1102 if (width
> da_width
) {
1105 value
= dq_state
+ width
* DISPATCH_QUEUE_WIDTH_INTERVAL
;
1110 /* Used by _dispatch_apply_redirect
1112 * Release width acquired by _dispatch_queue_try_acquire_width
1114 DISPATCH_ALWAYS_INLINE
1116 _dispatch_queue_relinquish_width(dispatch_queue_t dq
, uint32_t da_width
)
1118 (void)os_atomic_sub2o(dq
, dq_state
,
1119 da_width
* DISPATCH_QUEUE_WIDTH_INTERVAL
, relaxed
);
1122 /* Used by target-queue recursing code
1124 * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 }
1125 * Final state: { w += 1 }
1127 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1129 _dispatch_queue_try_acquire_async(dispatch_queue_t dq
)
1131 uint64_t dq_state
, value
;
1133 return os_atomic_rmw_loop2o(dq
, dq_state
, dq_state
, value
, acquire
, {
1134 if (!fastpath(_dq_state_is_runnable(dq_state
)) ||
1135 slowpath(_dq_state_is_dirty(dq_state
)) ||
1136 slowpath(_dq_state_has_pending_barrier(dq_state
))) {
1137 os_atomic_rmw_loop_give_up(return false);
1139 value
= dq_state
+ DISPATCH_QUEUE_WIDTH_INTERVAL
;
1143 /* Used at the end of Drainers
1145 * This adjusts the `owned` width when the next continuation is already known
1146 * to account for its barrierness.
1148 DISPATCH_ALWAYS_INLINE
1149 static inline uint64_t
1150 _dispatch_queue_adjust_owned(dispatch_queue_t dq
, uint64_t owned
,
1151 struct dispatch_object_s
*next_dc
)
1153 uint64_t reservation
;
1155 if (slowpath(dq
->dq_width
> 1)) {
1156 if (next_dc
&& _dispatch_object_is_barrier(next_dc
)) {
1157 reservation
= DISPATCH_QUEUE_PENDING_BARRIER
;
1158 reservation
+= (dq
->dq_width
- 1) * DISPATCH_QUEUE_WIDTH_INTERVAL
;
1159 owned
-= reservation
;
1165 /* Used at the end of Drainers
1167 * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended).
1168 * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used
1169 * as a signal to renew the drain lock instead of releasing it.
1171 * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned`
1173 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1175 _dispatch_queue_drain_try_unlock(dispatch_queue_t dq
, uint64_t owned
)
1177 uint64_t old_state
= os_atomic_load2o(dq
, dq_state
, relaxed
);
1179 dispatch_priority_t pp
= 0, op
;
1182 if (unlikely(_dq_state_is_dirty(old_state
) &&
1183 !_dq_state_is_suspended(old_state
))) {
1184 // just renew the drain lock with an acquire barrier, to see
1185 // what the enqueuer that set DIRTY has done.
1186 os_atomic_and2o(dq
, dq_state
, ~DISPATCH_QUEUE_DIRTY
, acquire
);
1187 _dispatch_queue_reinstate_override_priority(dq
, pp
);
1190 new_state
= old_state
- owned
;
1191 if ((new_state
& DISPATCH_QUEUE_WIDTH_FULL_BIT
) ||
1192 _dq_state_is_suspended(old_state
)) {
1193 // the test for the WIDTH_FULL_BIT is about narrow concurrent queues
1194 // releasing the drain lock while being at the width limit
1196 // _non_barrier_complete() will set the DIRTY bit when going back
1197 // under the limit which will cause the try_unlock to fail
1198 new_state
= DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state
);
1200 new_state
&= ~DISPATCH_QUEUE_DIRTY
;
1201 new_state
&= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK
;
1202 // This current owner is the only one that can clear HAS_OVERRIDE,
1203 // so accumulating reset overrides here is valid.
1204 if (unlikely(_dq_state_has_override(new_state
))) {
1205 new_state
&= ~DISPATCH_QUEUE_HAS_OVERRIDE
;
1206 dispatch_assert(!_dispatch_queue_is_thread_bound(dq
));
1207 op
= _dispatch_queue_reset_override_priority(dq
, false);
1208 if (op
> pp
) pp
= op
;
1211 } while (!fastpath(os_atomic_cmpxchgvw2o(dq
, dq_state
,
1212 old_state
, new_state
, &old_state
, release
)));
1214 if (_dq_state_has_override(old_state
)) {
1215 // Ensure that the root queue sees that this thread was overridden.
1216 _dispatch_set_defaultpriority_override();
1221 /* Used at the end of Drainers when the next work item is known
1222 * and that the dirty-head check isn't needed.
1224 * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen.
1226 DISPATCH_ALWAYS_INLINE
1227 static inline uint64_t
1228 _dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq
,
1229 uint64_t owned
, mach_port_t next_owner
, uint64_t *orig_state
)
1231 uint64_t dq_state
, value
;
1233 #ifdef DLOCK_NOWAITERS_BIT
1234 // The NOWAITERS_BIT state must not change through the transfer. It means
1235 // that if next_owner is 0 the bit must be flipped in the rmw_loop below,
1236 // and if next_owner is set, then the bit must be left unchanged.
1238 // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner,
1239 // which causes the second xor to flip the bit as expected.
1240 // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to
1241 // clear it so that the second xor leaves the NOWAITERS_BIT alone.
1242 next_owner
^= DLOCK_NOWAITERS_BIT
;
1244 os_atomic_rmw_loop2o(dq
, dq_state
, dq_state
, value
, release
, {
1245 value
= dq_state
- owned
;
1246 // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
1247 // but we want to be more efficient wrt the WAITERS_BIT
1248 value
&= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK
;
1249 value
&= ~DISPATCH_QUEUE_DRAIN_PENDED
;
1250 value
&= ~DISPATCH_QUEUE_DIRTY
;
1251 value
^= next_owner
;
1254 if (_dq_state_has_override(dq_state
)) {
1255 // Ensure that the root queue sees that this thread was overridden.
1256 _dispatch_set_defaultpriority_override();
1258 if (orig_state
) *orig_state
= dq_state
;
1261 #define _dispatch_queue_drain_unlock(dq, owned, orig) \
1262 _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig)
1264 DISPATCH_ALWAYS_INLINE
1266 _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq
,
1267 uint64_t to_unlock
, dispatch_object_t dou
)
1269 mach_port_t th_next
= 0;
1270 if (dou
._dc
->dc_flags
& DISPATCH_OBJ_BARRIER_BIT
) {
1271 th_next
= (mach_port_t
)dou
._dc
->dc_data
;
1273 _dispatch_queue_drain_lock_transfer_or_unlock(dq
, to_unlock
, th_next
, NULL
);
1278 #pragma mark os_mpsc_queue
1280 // type_t * {volatile,const,_Atomic,...} -> type_t *
1281 // type_t[] -> type_t *
1282 #define os_unqualified_pointer_type(expr) \
1283 typeof(typeof(*(expr)) *)
1285 #define os_mpsc_node_type(q, _ns) \
1286 os_unqualified_pointer_type((q)->_ns##_head)
1289 // Multi Producer calls, can be used safely concurrently
1292 // Returns true when the queue was empty and the head must be set
1293 #define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \
1294 os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
1295 _tail->_o_next = NULL; \
1296 _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
1297 if (fastpath(_prev)) { \
1298 os_atomic_store2o(_prev, _o_next, _head, relaxed); \
1303 // Returns true when the queue was empty and the head must be set
1304 #define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \
1305 os_mpsc_node_type(q, _ns) _o = (o); \
1306 os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \
1309 #define os_mpsc_push_update_head(q, _ns, o) ({ \
1310 os_atomic_store2o((q), _ns##_head, o, relaxed); \
1314 // Single Consumer calls, can NOT be used safely concurrently
1317 #define os_mpsc_get_head(q, _ns) ({ \
1318 os_mpsc_node_type(q, _ns) _head; \
1319 _dispatch_wait_until(_head = (q)->_ns##_head); \
1323 #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \
1324 typeof(q) _q = (q); \
1325 os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \
1326 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1327 /* 22708742: set tail to NULL with release, so that NULL write */ \
1328 /* to head above doesn't clobber head from concurrent enqueuer */ \
1329 if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \
1330 _dispatch_wait_until(_n = fastpath(_head->_o_next)); \
1331 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1336 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \
1337 typeof(q) _q = (q); \
1338 os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
1339 if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \
1340 _dispatch_wait_until(_n = _q->_ns##_head); \
1341 _head->_o_next = _n; \
1343 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1346 #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \
1347 typeof(q) _q = (q); \
1348 os_mpsc_node_type(_q, _ns) _head; \
1349 _dispatch_wait_until(_head = _q->_ns##_head); \
1350 os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
1351 /* 22708742: set tail to NULL with release, so that NULL write */ \
1352 /* to head above doesn't clobber head from concurrent enqueuer */ \
1353 *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \
1357 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
1358 os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
1359 if (_head != (tail)) { \
1360 _dispatch_wait_until(_n = _head->_o_next); \
1364 #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \
1365 typeof(q) _q = (q); \
1366 os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
1367 _tail->_o_next = NULL; \
1368 if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \
1369 _dispatch_wait_until(_n = _q->_ns##_head); \
1370 _tail->_o_next = _n; \
1372 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1376 #pragma mark dispatch_queue_t tq lock
1378 DISPATCH_ALWAYS_INLINE
1380 _dispatch_queue_sidelock_trylock(dispatch_queue_t dq
, pthread_priority_t pp
)
1382 dispatch_lock_owner owner
;
1383 if (_dispatch_unfair_lock_trylock(&dq
->dq_sidelock
, &owner
)) {
1386 _dispatch_wqthread_override_start_check_owner(owner
, pp
,
1387 &dq
->dq_sidelock
.dul_lock
);
1391 DISPATCH_ALWAYS_INLINE
1393 _dispatch_queue_sidelock_lock(dispatch_queue_t dq
)
1395 return _dispatch_unfair_lock_lock(&dq
->dq_sidelock
);
1398 DISPATCH_ALWAYS_INLINE
1400 _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq
)
1402 if (_dispatch_unfair_lock_tryunlock(&dq
->dq_sidelock
)) {
1405 // Ensure that the root queue sees that this thread was overridden.
1406 _dispatch_set_defaultpriority_override();
1410 DISPATCH_ALWAYS_INLINE
1412 _dispatch_queue_sidelock_unlock(dispatch_queue_t dq
)
1414 if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq
->dq_sidelock
)) {
1415 // Ensure that the root queue sees that this thread was overridden.
1416 _dispatch_set_defaultpriority_override();
1421 #pragma mark dispatch_queue_t misc
1423 DISPATCH_ALWAYS_INLINE
1424 static inline dispatch_queue_t
1425 _dispatch_queue_get_current(void)
1427 return (dispatch_queue_t
)_dispatch_thread_getspecific(dispatch_queue_key
);
1430 DISPATCH_ALWAYS_INLINE
1432 _dispatch_queue_set_current(dispatch_queue_t dq
)
1434 _dispatch_thread_setspecific(dispatch_queue_key
, dq
);
1437 DISPATCH_ALWAYS_INLINE
1438 static inline struct dispatch_object_s
*
1439 _dispatch_queue_head(dispatch_queue_t dq
)
1441 return os_mpsc_get_head(dq
, dq_items
);
1444 DISPATCH_ALWAYS_INLINE
1445 static inline struct dispatch_object_s
*
1446 _dispatch_queue_next(dispatch_queue_t dq
, struct dispatch_object_s
*dc
)
1448 return os_mpsc_pop_head(dq
, dq_items
, dc
, do_next
);
1451 DISPATCH_ALWAYS_INLINE
1453 _dispatch_queue_push_update_tail(dispatch_queue_t dq
,
1454 struct dispatch_object_s
*tail
)
1456 // if we crash here with a value less than 0x1000, then we are
1457 // at a known bug in client code. for example, see
1458 // _dispatch_queue_dispose or _dispatch_atfork_child
1459 return os_mpsc_push_update_tail(dq
, dq_items
, tail
, do_next
);
1462 DISPATCH_ALWAYS_INLINE
1464 _dispatch_queue_push_update_tail_list(dispatch_queue_t dq
,
1465 struct dispatch_object_s
*head
, struct dispatch_object_s
*tail
)
1467 // if we crash here with a value less than 0x1000, then we are
1468 // at a known bug in client code. for example, see
1469 // _dispatch_queue_dispose or _dispatch_atfork_child
1470 return os_mpsc_push_update_tail_list(dq
, dq_items
, head
, tail
, do_next
);
1473 DISPATCH_ALWAYS_INLINE
1475 _dispatch_queue_push_update_head(dispatch_queue_t dq
,
1476 struct dispatch_object_s
*head
, bool retained
)
1478 if (dx_type(dq
) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE
) {
1479 dispatch_assert(!retained
);
1480 // Lie about "retained" here, it generates better assembly in this
1481 // hotpath, and _dispatch_root_queue_wakeup knows to ignore this
1482 // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH.
1484 // We need to bypass the retain below because pthread root queues
1485 // are not global and retaining them would be wrong.
1487 // We should eventually have a typeflag for "POOL" kind of root queues.
1490 // The queue must be retained before dq_items_head is written in order
1491 // to ensure that the reference is still valid when _dispatch_queue_wakeup
1492 // is called. Otherwise, if preempted between the assignment to
1493 // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the
1494 // queue may release the last reference to the queue when invoked by
1495 // _dispatch_queue_drain. <rdar://problem/6932776>
1496 if (!retained
) _dispatch_retain(dq
);
1497 os_mpsc_push_update_head(dq
, dq_items
, head
);
1500 DISPATCH_ALWAYS_INLINE
1502 _dispatch_queue_push_list(dispatch_queue_t dq
, dispatch_object_t _head
,
1503 dispatch_object_t _tail
, pthread_priority_t pp
, unsigned int n
)
1505 struct dispatch_object_s
*head
= _head
._do
, *tail
= _tail
._do
;
1506 bool override
= _dispatch_queue_need_override_retain(dq
, pp
);
1507 dispatch_queue_flags_t flags
;
1508 if (slowpath(_dispatch_queue_push_update_tail_list(dq
, head
, tail
))) {
1509 _dispatch_queue_push_update_head(dq
, head
, override
);
1510 if (fastpath(dx_type(dq
) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE
)) {
1511 return _dispatch_queue_push_list_slow(dq
, n
);
1513 flags
= DISPATCH_WAKEUP_CONSUME
| DISPATCH_WAKEUP_FLUSH
;
1514 } else if (override
) {
1515 flags
= DISPATCH_WAKEUP_CONSUME
| DISPATCH_WAKEUP_OVERRIDING
;
1519 dx_wakeup(dq
, pp
, flags
);
1522 DISPATCH_ALWAYS_INLINE
1524 _dispatch_queue_push_inline(dispatch_queue_t dq
, dispatch_object_t _tail
,
1525 pthread_priority_t pp
, dispatch_wakeup_flags_t flags
)
1527 struct dispatch_object_s
*tail
= _tail
._do
;
1528 bool override
= _dispatch_queue_need_override(dq
, pp
);
1529 if (flags
& DISPATCH_WAKEUP_SLOW_WAITER
) {
1530 // when SLOW_WAITER is set, we borrow the reference of the caller
1531 if (unlikely(_dispatch_queue_push_update_tail(dq
, tail
))) {
1532 _dispatch_queue_push_update_head(dq
, tail
, true);
1533 flags
= DISPATCH_WAKEUP_SLOW_WAITER
| DISPATCH_WAKEUP_FLUSH
;
1534 } else if (override
) {
1535 flags
= DISPATCH_WAKEUP_SLOW_WAITER
| DISPATCH_WAKEUP_OVERRIDING
;
1537 flags
= DISPATCH_WAKEUP_SLOW_WAITER
;
1540 if (override
) _dispatch_retain(dq
);
1541 if (unlikely(_dispatch_queue_push_update_tail(dq
, tail
))) {
1542 _dispatch_queue_push_update_head(dq
, tail
, override
);
1543 flags
= DISPATCH_WAKEUP_CONSUME
| DISPATCH_WAKEUP_FLUSH
;
1544 } else if (override
) {
1545 flags
= DISPATCH_WAKEUP_CONSUME
| DISPATCH_WAKEUP_OVERRIDING
;
1550 return dx_wakeup(dq
, pp
, flags
);
1553 struct _dispatch_identity_s
{
1554 pthread_priority_t old_pp
;
1557 DISPATCH_ALWAYS_INLINE
1559 _dispatch_root_queue_identity_assume(struct _dispatch_identity_s
*di
,
1560 pthread_priority_t pp
)
1562 // assumed_rq was set by the caller, we need to fake the priorities
1563 dispatch_queue_t assumed_rq
= _dispatch_queue_get_current();
1565 dispatch_assert(dx_type(assumed_rq
) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE
);
1567 di
->old_pp
= _dispatch_get_defaultpriority();
1569 if (!(assumed_rq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
)) {
1571 pp
= _dispatch_get_priority();
1572 // _dispatch_root_queue_drain_deferred_item() may turn a manager
1573 // thread into a regular root queue, and we must never try to
1574 // restore the manager flag once we became a regular work queue
1576 pp
&= ~(pthread_priority_t
)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
1578 if ((pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
) >
1579 (assumed_rq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
)) {
1580 _dispatch_wqthread_override_start(_dispatch_tid_self(), pp
);
1581 // Ensure that the root queue sees that this thread was overridden.
1582 _dispatch_set_defaultpriority_override();
1585 _dispatch_reset_defaultpriority(assumed_rq
->dq_priority
);
1588 DISPATCH_ALWAYS_INLINE
1590 _dispatch_root_queue_identity_restore(struct _dispatch_identity_s
*di
)
1592 _dispatch_reset_defaultpriority(di
->old_pp
);
1595 typedef dispatch_queue_t
1596 _dispatch_queue_class_invoke_handler_t(dispatch_object_t
,
1597 dispatch_invoke_flags_t
, uint64_t *owned
, struct dispatch_object_s
**);
1599 DISPATCH_ALWAYS_INLINE
1601 _dispatch_queue_class_invoke(dispatch_object_t dou
,
1602 dispatch_invoke_flags_t flags
,
1603 _dispatch_queue_class_invoke_handler_t invoke
)
1605 dispatch_queue_t dq
= dou
._dq
;
1606 struct dispatch_object_s
*dc
= NULL
;
1607 dispatch_queue_t tq
= NULL
;
1608 uint64_t dq_state
, to_unlock
= 0;
1609 bool owning
= !slowpath(flags
& DISPATCH_INVOKE_STEALING
);
1610 bool overriding
= slowpath(flags
& DISPATCH_INVOKE_OVERRIDING
);
1612 // When called from a plain _dispatch_queue_drain:
1613 // overriding = false
1616 // When called from an override continuation:
1617 // overriding = true
1618 // owning depends on whether the override embedded the queue or steals
1619 DISPATCH_COMPILER_CAN_ASSUME(owning
|| overriding
);
1622 dq
->do_next
= DISPATCH_OBJECT_LISTLESS
;
1624 to_unlock
= _dispatch_queue_drain_try_lock(dq
, flags
, &dq_state
);
1625 if (likely(to_unlock
)) {
1626 struct _dispatch_identity_s di
;
1627 pthread_priority_t old_dp
;
1629 drain_pending_barrier
:
1631 _dispatch_object_debug(dq
, "stolen onto thread 0x%x, 0x%lx",
1632 _dispatch_tid_self(), _dispatch_get_defaultpriority());
1633 _dispatch_root_queue_identity_assume(&di
, 0);
1636 if (!(flags
& DISPATCH_INVOKE_MANAGER_DRAIN
)) {
1637 pthread_priority_t op
, dp
;
1639 old_dp
= _dispatch_set_defaultpriority(dq
->dq_priority
, &dp
);
1640 op
= dq
->dq_override
;
1641 if (op
> (dp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
)) {
1642 _dispatch_wqthread_override_start(_dispatch_tid_self(), op
);
1643 // Ensure that the root queue sees that this thread was overridden.
1644 _dispatch_set_defaultpriority_override();
1648 flags
= _dispatch_queue_merge_autorelease_frequency(dq
, flags
);
1649 attempt_running_slow_head
:
1650 tq
= invoke(dq
, flags
, &to_unlock
, &dc
);
1652 // Either dc is set, which is a deferred invoke case
1654 // or only tq is and it means a reenqueue is required, because of:
1655 // a retarget, a suspension, or a width change.
1657 // In both cases, we want to bypass the check for DIRTY.
1658 // That may cause us to leave DIRTY in place but all drain lock
1659 // acquirers clear it
1661 if (!_dispatch_queue_drain_try_unlock(dq
, to_unlock
)) {
1662 goto attempt_running_slow_head
;
1667 _dispatch_root_queue_identity_restore(&di
);
1669 if (!(flags
& DISPATCH_INVOKE_MANAGER_DRAIN
)) {
1670 _dispatch_reset_defaultpriority(old_dp
);
1672 } else if (overriding
) {
1673 uint32_t owner
= _dq_state_drain_owner(dq_state
);
1674 pthread_priority_t p
= dq
->dq_override
;
1676 _dispatch_object_debug(dq
, "overriding thr 0x%x to priority 0x%lx",
1678 _dispatch_wqthread_override_start_check_owner(owner
, p
,
1679 &dq
->dq_state_lock
);
1684 _dispatch_introspection_queue_item_complete(dq
);
1688 return _dispatch_queue_drain_deferred_invoke(dq
, flags
, to_unlock
, dc
);
1692 bool full_width_upgrade_allowed
= (tq
== _dispatch_queue_get_current());
1693 uint64_t old_state
, new_state
;
1695 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, release
, {
1696 new_state
= old_state
- to_unlock
;
1697 if (full_width_upgrade_allowed
&& _dq_state_is_runnable(new_state
) &&
1698 _dq_state_has_pending_barrier(new_state
)) {
1699 new_state
+= DISPATCH_QUEUE_IN_BARRIER
;
1700 new_state
+= DISPATCH_QUEUE_WIDTH_INTERVAL
;
1701 new_state
-= DISPATCH_QUEUE_PENDING_BARRIER
;
1702 new_state
+= to_unlock
& DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK
;
1704 new_state
= DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state
);
1705 if (_dq_state_should_wakeup(new_state
)) {
1706 // drain was not interupted for suspension
1707 // we will reenqueue right away, just put ENQUEUED back
1708 new_state
|= DISPATCH_QUEUE_ENQUEUED
;
1709 new_state
|= DISPATCH_QUEUE_DIRTY
;
1713 if (_dq_state_is_in_barrier(new_state
)) {
1714 // we did a "full width upgrade" and just added IN_BARRIER
1715 // so adjust what we own and drain again
1716 to_unlock
&= DISPATCH_QUEUE_ENQUEUED
;
1717 to_unlock
+= DISPATCH_QUEUE_IN_BARRIER
;
1718 to_unlock
+= dq
->dq_width
* DISPATCH_QUEUE_WIDTH_INTERVAL
;
1719 goto drain_pending_barrier
;
1721 if (_dq_state_has_override(old_state
)) {
1722 // Ensure that the root queue sees that this thread was overridden.
1723 _dispatch_set_defaultpriority_override();
1726 if ((old_state
^ new_state
) & DISPATCH_QUEUE_ENQUEUED
) {
1727 return _dispatch_queue_push(tq
, dq
, 0);
1731 return _dispatch_release_tailcall(dq
);
1734 DISPATCH_ALWAYS_INLINE
1736 _dispatch_queue_class_probe(dispatch_queue_class_t dqu
)
1738 struct dispatch_object_s
*tail
;
1739 // seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
1740 // seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
1741 tail
= os_atomic_load2o(dqu
._oq
, oq_items_tail
, ordered
);
1742 return slowpath(tail
!= NULL
);
1745 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1747 _dispatch_is_in_root_queues_array(dispatch_queue_t dq
)
1749 return (dq
>= _dispatch_root_queues
) &&
1750 (dq
< _dispatch_root_queues
+ _DISPATCH_ROOT_QUEUE_IDX_COUNT
);
1753 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1754 static inline dispatch_queue_t
1755 _dispatch_get_root_queue(qos_class_t priority
, bool overcommit
)
1757 if (overcommit
) switch (priority
) {
1758 case _DISPATCH_QOS_CLASS_MAINTENANCE
:
1759 return &_dispatch_root_queues
[
1760 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT
];
1761 case _DISPATCH_QOS_CLASS_BACKGROUND
:
1762 return &_dispatch_root_queues
[
1763 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT
];
1764 case _DISPATCH_QOS_CLASS_UTILITY
:
1765 return &_dispatch_root_queues
[
1766 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT
];
1767 case _DISPATCH_QOS_CLASS_DEFAULT
:
1768 return &_dispatch_root_queues
[
1769 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT
];
1770 case _DISPATCH_QOS_CLASS_USER_INITIATED
:
1771 return &_dispatch_root_queues
[
1772 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT
];
1773 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE
:
1774 return &_dispatch_root_queues
[
1775 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT
];
1776 } else switch (priority
) {
1777 case _DISPATCH_QOS_CLASS_MAINTENANCE
:
1778 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS
];
1779 case _DISPATCH_QOS_CLASS_BACKGROUND
:
1780 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS
];
1781 case _DISPATCH_QOS_CLASS_UTILITY
:
1782 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS
];
1783 case _DISPATCH_QOS_CLASS_DEFAULT
:
1784 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS
];
1785 case _DISPATCH_QOS_CLASS_USER_INITIATED
:
1786 return &_dispatch_root_queues
[
1787 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS
];
1788 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE
:
1789 return &_dispatch_root_queues
[
1790 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS
];
1795 #if HAVE_PTHREAD_WORKQUEUE_QOS
1796 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1797 static inline dispatch_queue_t
1798 _dispatch_get_root_queue_for_priority(pthread_priority_t pp
, bool overcommit
)
1802 pp
&= _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
1803 idx
= (uint32_t)__builtin_ffs((int)pp
);
1804 if (unlikely(!_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS
]
1806 // If kernel doesn't support maintenance, bottom bit is background.
1807 // Shift to our idea of where background bit is.
1810 // ffs starts at 1, and account for the QOS_CLASS_SHIFT
1811 // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than
1812 // DISPATCH_QOS_COUNT
1813 idx
-= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT
+ 1);
1814 if (unlikely(idx
>= DISPATCH_QUEUE_QOS_COUNT
)) {
1815 DISPATCH_CLIENT_CRASH(pp
, "Corrupted priority");
1817 return &_dispatch_root_queues
[2 * idx
+ overcommit
];
1821 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1822 static inline dispatch_queue_t
1823 _dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq
, bool overcommit
)
1825 bool rq_overcommit
= (rq
->dq_priority
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
);
1826 // root queues in _dispatch_root_queues are not overcommit for even indices
1827 // and overcommit for odd ones, so fixing overcommit is either returning
1828 // the same queue, or picking its neighbour in _dispatch_root_queues
1829 if (overcommit
&& !rq_overcommit
) {
1832 if (!overcommit
&& rq_overcommit
) {
1838 DISPATCH_ALWAYS_INLINE
1840 _dispatch_queue_set_bound_thread(dispatch_queue_t dq
)
1842 // Tag thread-bound queues with the owning thread
1843 dispatch_assert(_dispatch_queue_is_thread_bound(dq
));
1844 mach_port_t old_owner
, self
= _dispatch_tid_self();
1845 uint64_t dq_state
= os_atomic_or_orig2o(dq
, dq_state
, self
, relaxed
);
1846 if (unlikely(old_owner
= _dq_state_drain_owner(dq_state
))) {
1847 DISPATCH_INTERNAL_CRASH(old_owner
, "Queue bound twice");
1851 DISPATCH_ALWAYS_INLINE
1853 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq
)
1855 uint64_t dq_state
, value
;
1857 dispatch_assert(_dispatch_queue_is_thread_bound(dq
));
1858 os_atomic_rmw_loop2o(dq
, dq_state
, dq_state
, value
, relaxed
, {
1859 value
= DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state
);
1863 DISPATCH_ALWAYS_INLINE
1864 static inline dispatch_pthread_root_queue_observer_hooks_t
1865 _dispatch_get_pthread_root_queue_observer_hooks(void)
1867 return _dispatch_thread_getspecific(
1868 dispatch_pthread_root_queue_observer_hooks_key
);
1871 DISPATCH_ALWAYS_INLINE
1873 _dispatch_set_pthread_root_queue_observer_hooks(
1874 dispatch_pthread_root_queue_observer_hooks_t observer_hooks
)
1876 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key
,
1881 #pragma mark dispatch_priority
1883 DISPATCH_ALWAYS_INLINE
1884 static inline pthread_priority_t
1885 _dispatch_get_defaultpriority(void)
1887 #if HAVE_PTHREAD_WORKQUEUE_QOS
1888 pthread_priority_t pp
= (uintptr_t)_dispatch_thread_getspecific(
1889 dispatch_defaultpriority_key
);
1896 DISPATCH_ALWAYS_INLINE
1898 _dispatch_reset_defaultpriority(pthread_priority_t pp
)
1900 #if HAVE_PTHREAD_WORKQUEUE_QOS
1901 pthread_priority_t old_pp
= _dispatch_get_defaultpriority();
1902 // If an inner-loop or'd in the override flag to the per-thread priority,
1903 // it needs to be propagated up the chain.
1904 pp
|= old_pp
& _PTHREAD_PRIORITY_OVERRIDE_FLAG
;
1905 _dispatch_thread_setspecific(dispatch_defaultpriority_key
, (void*)pp
);
1911 DISPATCH_ALWAYS_INLINE
1913 _dispatch_set_defaultpriority_override(void)
1915 #if HAVE_PTHREAD_WORKQUEUE_QOS
1916 pthread_priority_t old_pp
= _dispatch_get_defaultpriority();
1917 pthread_priority_t pp
= old_pp
| _PTHREAD_PRIORITY_OVERRIDE_FLAG
;
1919 _dispatch_thread_setspecific(dispatch_defaultpriority_key
, (void*)pp
);
1923 DISPATCH_ALWAYS_INLINE
1925 _dispatch_reset_defaultpriority_override(void)
1927 #if HAVE_PTHREAD_WORKQUEUE_QOS
1928 pthread_priority_t old_pp
= _dispatch_get_defaultpriority();
1929 pthread_priority_t pp
= old_pp
&
1930 ~((pthread_priority_t
)_PTHREAD_PRIORITY_OVERRIDE_FLAG
);
1932 _dispatch_thread_setspecific(dispatch_defaultpriority_key
, (void*)pp
);
1933 return unlikely(pp
!= old_pp
);
1938 DISPATCH_ALWAYS_INLINE
1940 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq
,
1941 dispatch_queue_t tq
)
1943 #if HAVE_PTHREAD_WORKQUEUE_QOS
1944 const dispatch_priority_t rootqueue_flag
= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
;
1945 const dispatch_priority_t inherited_flag
= _PTHREAD_PRIORITY_INHERIT_FLAG
;
1946 const dispatch_priority_t defaultqueue_flag
=
1947 _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
;
1948 dispatch_priority_t dqp
= dq
->dq_priority
, tqp
= tq
->dq_priority
;
1949 if ((!(dqp
& ~_PTHREAD_PRIORITY_FLAGS_MASK
) || (dqp
& inherited_flag
)) &&
1950 (tqp
& rootqueue_flag
)) {
1951 if (tqp
& defaultqueue_flag
) {
1952 dq
->dq_priority
= 0;
1954 dq
->dq_priority
= (tqp
& ~rootqueue_flag
) | inherited_flag
;
1962 DISPATCH_ALWAYS_INLINE
1963 static inline pthread_priority_t
1964 _dispatch_set_defaultpriority(pthread_priority_t pp
, pthread_priority_t
*new_pp
)
1966 #if HAVE_PTHREAD_WORKQUEUE_QOS
1967 const pthread_priority_t default_priority_preserved_flags
=
1968 _PTHREAD_PRIORITY_OVERRIDE_FLAG
|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
1969 pthread_priority_t old_pp
= _dispatch_get_defaultpriority();
1971 pthread_priority_t flags
, defaultqueue
, basepri
;
1972 flags
= (pp
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
1973 defaultqueue
= (old_pp
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
1974 basepri
= (old_pp
& ~_PTHREAD_PRIORITY_FLAGS_MASK
);
1975 pp
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
1977 flags
= _PTHREAD_PRIORITY_INHERIT_FLAG
| defaultqueue
;
1979 } else if (pp
< basepri
&& !defaultqueue
) { // rdar://16349734
1982 pp
|= flags
| (old_pp
& default_priority_preserved_flags
);
1984 _dispatch_thread_setspecific(dispatch_defaultpriority_key
, (void*)pp
);
1985 if (new_pp
) *new_pp
= pp
;
1988 (void)pp
; (void)new_pp
;
1993 DISPATCH_ALWAYS_INLINE
1994 static inline pthread_priority_t
1995 _dispatch_priority_adopt(pthread_priority_t pp
, unsigned long flags
)
1997 #if HAVE_PTHREAD_WORKQUEUE_QOS
1998 pthread_priority_t defaultpri
= _dispatch_get_defaultpriority();
1999 bool enforce
, inherited
, defaultqueue
;
2000 enforce
= (flags
& DISPATCH_PRIORITY_ENFORCE
) ||
2001 (pp
& _PTHREAD_PRIORITY_ENFORCE_FLAG
);
2002 inherited
= (defaultpri
& _PTHREAD_PRIORITY_INHERIT_FLAG
);
2003 defaultqueue
= (defaultpri
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
2004 defaultpri
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2005 pp
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2009 } else if (defaultqueue
) { // rdar://16349734
2011 } else if (pp
< defaultpri
) {
2013 } else if (enforce
|| inherited
) {
2019 (void)pp
; (void)flags
;
2024 DISPATCH_ALWAYS_INLINE
2025 static inline pthread_priority_t
2026 _dispatch_priority_inherit_from_root_queue(pthread_priority_t pp
,
2027 dispatch_queue_t rq
)
2029 #if HAVE_PTHREAD_WORKQUEUE_QOS
2030 pthread_priority_t p
= pp
& ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2031 pthread_priority_t rqp
= rq
->dq_priority
& ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2032 pthread_priority_t defaultqueue
=
2033 rq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
;
2035 if (!p
|| (!defaultqueue
&& p
< rqp
)) {
2036 p
= rqp
| defaultqueue
;
2038 return p
| (rq
->dq_priority
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
);
2045 DISPATCH_ALWAYS_INLINE
2046 static inline pthread_priority_t
2047 _dispatch_get_priority(void)
2049 #if HAVE_PTHREAD_WORKQUEUE_QOS
2050 pthread_priority_t pp
= (uintptr_t)
2051 _dispatch_thread_getspecific(dispatch_priority_key
);
2058 #if HAVE_PTHREAD_WORKQUEUE_QOS
2059 DISPATCH_ALWAYS_INLINE
2060 static inline pthread_priority_t
2061 _dispatch_priority_compute_update(pthread_priority_t pp
)
2063 dispatch_assert(pp
!= DISPATCH_NO_PRIORITY
);
2064 if (!_dispatch_set_qos_class_enabled
) return 0;
2065 // the priority in _dispatch_get_priority() only tracks manager-ness
2066 // and overcommit, which is inherited from the current value for each update
2067 // however if the priority had the NEEDS_UNBIND flag set we need to clear it
2068 // the first chance we get
2070 // the manager bit is invalid input, but we keep it to get meaningful
2071 // assertions in _dispatch_set_priority_and_voucher_slow()
2072 pp
&= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
| ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2073 pthread_priority_t cur_priority
= _dispatch_get_priority();
2074 pthread_priority_t unbind
= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2075 pthread_priority_t overcommit
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2076 if (unlikely(cur_priority
& unbind
)) {
2077 // else we always need an update if the NEEDS_UNBIND flag is set
2078 // the slowpath in _dispatch_set_priority_and_voucher_slow() will
2079 // adjust the priority further with the proper overcommitness
2080 return pp
? pp
: (cur_priority
& ~unbind
);
2082 cur_priority
&= ~overcommit
;
2084 if (unlikely(pp
!= cur_priority
)) return pp
;
2089 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2090 static inline voucher_t
2091 _dispatch_set_priority_and_voucher(pthread_priority_t pp
,
2092 voucher_t v
, _dispatch_thread_set_self_t flags
)
2094 #if HAVE_PTHREAD_WORKQUEUE_QOS
2095 pp
= _dispatch_priority_compute_update(pp
);
2097 if (v
== DISPATCH_NO_VOUCHER
) {
2098 return DISPATCH_NO_VOUCHER
;
2100 if (likely(v
== _voucher_get())) {
2101 bool retained
= flags
& DISPATCH_VOUCHER_CONSUME
;
2102 if (flags
& DISPATCH_VOUCHER_REPLACE
) {
2103 if (retained
&& v
) _voucher_release_no_dispose(v
);
2104 v
= DISPATCH_NO_VOUCHER
;
2106 if (!retained
&& v
) _voucher_retain(v
);
2111 return _dispatch_set_priority_and_voucher_slow(pp
, v
, flags
);
2113 (void)pp
; (void)v
; (void)flags
;
2114 return DISPATCH_NO_VOUCHER
;
2118 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2119 static inline voucher_t
2120 _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp
,
2121 voucher_t v
, _dispatch_thread_set_self_t flags
)
2123 pthread_priority_t p
= 0;
2124 if (pp
!= DISPATCH_NO_PRIORITY
) {
2125 p
= _dispatch_priority_adopt(pp
, flags
);
2127 return _dispatch_set_priority_and_voucher(p
, v
, flags
);
2130 DISPATCH_ALWAYS_INLINE
2132 _dispatch_reset_priority_and_voucher(pthread_priority_t pp
, voucher_t v
)
2134 if (pp
== DISPATCH_NO_PRIORITY
) pp
= 0;
2135 (void)_dispatch_set_priority_and_voucher(pp
, v
,
2136 DISPATCH_VOUCHER_CONSUME
| DISPATCH_VOUCHER_REPLACE
);
2139 DISPATCH_ALWAYS_INLINE
2141 _dispatch_reset_voucher(voucher_t v
, _dispatch_thread_set_self_t flags
)
2143 flags
|= DISPATCH_VOUCHER_CONSUME
| DISPATCH_VOUCHER_REPLACE
;
2144 (void)_dispatch_set_priority_and_voucher(0, v
, flags
);
2147 DISPATCH_ALWAYS_INLINE
2149 _dispatch_queue_need_override(dispatch_queue_class_t dqu
, pthread_priority_t pp
)
2151 // global queues have their override set to DISPATCH_SATURATED_OVERRIDE
2152 // which makes this test always return false for them.
2153 return dqu
._oq
->oq_override
< (pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
2156 DISPATCH_ALWAYS_INLINE
2158 _dispatch_queue_received_override(dispatch_queue_class_t dqu
,
2159 pthread_priority_t pp
)
2161 dispatch_assert(dqu
._oq
->oq_override
!= DISPATCH_SATURATED_OVERRIDE
);
2162 return dqu
._oq
->oq_override
> (pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
2165 DISPATCH_ALWAYS_INLINE
2167 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu
,
2168 pthread_priority_t pp
)
2170 if (_dispatch_queue_need_override(dqu
, pp
)) {
2171 _os_object_retain_internal_inline(dqu
._oq
->_as_os_obj
);
2177 DISPATCH_ALWAYS_INLINE
2179 _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu
,
2180 dispatch_priority_t new_op
)
2182 dispatch_priority_t old_op
;
2183 new_op
&= _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
2184 if (!new_op
) return false;
2185 os_atomic_rmw_loop2o(dqu
._oq
, oq_override
, old_op
, new_op
, relaxed
, {
2186 if (new_op
<= old_op
) {
2187 os_atomic_rmw_loop_give_up(return false);
2193 DISPATCH_ALWAYS_INLINE
2195 _dispatch_queue_override_priority(dispatch_queue_class_t dqu
,
2196 pthread_priority_t
*pp
, dispatch_wakeup_flags_t
*flags
)
2198 os_mpsc_queue_t oq
= dqu
._oq
;
2199 dispatch_priority_t qp
= oq
->oq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
2200 dispatch_priority_t np
= (*pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
2201 dispatch_priority_t o
;
2203 _dispatch_assert_is_valid_qos_override(np
);
2204 if (oq
->oq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
) {
2206 } else if (*flags
& DISPATCH_WAKEUP_SLOW_WAITER
) {
2207 // when a queue is used as a lock its priority doesn't count
2208 } else if (np
< qp
) {
2209 // for asynchronous workitems, queue priority is the floor for overrides
2212 *flags
&= ~_DISPATCH_WAKEUP_OVERRIDE_BITS
;
2214 // this optimizes for the case when no update of the override is required
2215 // os_atomic_rmw_loop2o optimizes for the case when the update happens,
2216 // and can't be used.
2217 o
= os_atomic_load2o(oq
, oq_override
, relaxed
);
2219 if (likely(np
<= o
)) break;
2220 } while (unlikely(!os_atomic_cmpxchgvw2o(oq
, oq_override
, o
, np
, &o
, relaxed
)));
2225 *flags
|= DISPATCH_WAKEUP_OVERRIDING
;
2229 *flags
|= DISPATCH_WAKEUP_WAS_OVERRIDDEN
;
2233 DISPATCH_ALWAYS_INLINE
2234 static inline dispatch_priority_t
2235 _dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu
,
2238 os_mpsc_queue_t oq
= dqu
._oq
;
2239 dispatch_priority_t p
= 0;
2241 // thread bound queues floor their dq_override to their
2242 // priority to avoid receiving useless overrides
2243 p
= oq
->oq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
2245 dispatch_priority_t o
= os_atomic_xchg2o(oq
, oq_override
, p
, relaxed
);
2246 dispatch_assert(o
!= DISPATCH_SATURATED_OVERRIDE
);
2247 return (o
> p
) ? o
: 0;
2250 DISPATCH_ALWAYS_INLINE
2251 static inline pthread_priority_t
2252 _dispatch_priority_propagate(void)
2254 #if HAVE_PTHREAD_WORKQUEUE_QOS
2255 pthread_priority_t pp
= _dispatch_get_priority();
2256 pp
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2257 if (pp
> _dispatch_user_initiated_priority
) {
2258 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
2259 pp
= _dispatch_user_initiated_priority
;
2267 // including maintenance
2268 DISPATCH_ALWAYS_INLINE
2270 _dispatch_is_background_thread(void)
2272 #if HAVE_PTHREAD_WORKQUEUE_QOS
2273 pthread_priority_t pp
= _dispatch_get_priority();
2274 pp
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2275 return pp
&& (pp
<= _dispatch_background_priority
);
2282 #pragma mark dispatch_block_t
2286 DISPATCH_ALWAYS_INLINE
2288 _dispatch_block_has_private_data(const dispatch_block_t block
)
2290 extern void (*_dispatch_block_special_invoke
)(void*);
2291 return (_dispatch_Block_invoke(block
) == _dispatch_block_special_invoke
);
2294 DISPATCH_ALWAYS_INLINE
2296 _dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags
)
2299 * Generates better assembly than the actual readable test:
2300 * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS)
2302 flags
&= DISPATCH_BLOCK_ENFORCE_QOS_CLASS
| DISPATCH_BLOCK_INHERIT_QOS_CLASS
;
2303 return flags
!= DISPATCH_BLOCK_INHERIT_QOS_CLASS
;
2306 DISPATCH_ALWAYS_INLINE
2307 static inline dispatch_block_private_data_t
2308 _dispatch_block_get_data(const dispatch_block_t db
)
2310 if (!_dispatch_block_has_private_data(db
)) {
2313 // Keep in sync with _dispatch_block_create implementation
2314 uint8_t *x
= (uint8_t *)db
;
2315 // x points to base of struct Block_layout
2316 x
+= sizeof(struct Block_layout
);
2317 // x points to base of captured dispatch_block_private_data_s object
2318 dispatch_block_private_data_t dbpd
= (dispatch_block_private_data_t
)x
;
2319 if (dbpd
->dbpd_magic
!= DISPATCH_BLOCK_PRIVATE_DATA_MAGIC
) {
2320 DISPATCH_CLIENT_CRASH(dbpd
->dbpd_magic
,
2321 "Corruption of dispatch block object");
2326 DISPATCH_ALWAYS_INLINE
2327 static inline pthread_priority_t
2328 _dispatch_block_get_priority(const dispatch_block_t db
)
2330 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
2331 return dbpd
? dbpd
->dbpd_priority
: 0;
2334 DISPATCH_ALWAYS_INLINE
2335 static inline dispatch_block_flags_t
2336 _dispatch_block_get_flags(const dispatch_block_t db
)
2338 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
2339 return dbpd
? dbpd
->dbpd_flags
: 0;
2345 #pragma mark dispatch_continuation_t
2347 DISPATCH_ALWAYS_INLINE
2348 static inline dispatch_continuation_t
2349 _dispatch_continuation_alloc_cacheonly(void)
2351 dispatch_continuation_t dc
= (dispatch_continuation_t
)
2352 _dispatch_thread_getspecific(dispatch_cache_key
);
2354 _dispatch_thread_setspecific(dispatch_cache_key
, dc
->do_next
);
2359 DISPATCH_ALWAYS_INLINE
2360 static inline dispatch_continuation_t
2361 _dispatch_continuation_alloc(void)
2363 dispatch_continuation_t dc
=
2364 _dispatch_continuation_alloc_cacheonly();
2365 if (unlikely(!dc
)) {
2366 return _dispatch_continuation_alloc_from_heap();
2371 DISPATCH_ALWAYS_INLINE
2372 static inline dispatch_continuation_t
2373 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc
)
2375 dispatch_continuation_t prev_dc
= (dispatch_continuation_t
)
2376 _dispatch_thread_getspecific(dispatch_cache_key
);
2377 int cnt
= prev_dc
? prev_dc
->dc_cache_cnt
+ 1 : 1;
2378 // Cap continuation cache
2379 if (unlikely(cnt
> _dispatch_continuation_cache_limit
)) {
2382 dc
->do_next
= prev_dc
;
2383 dc
->dc_cache_cnt
= cnt
;
2384 _dispatch_thread_setspecific(dispatch_cache_key
, dc
);
2388 DISPATCH_ALWAYS_INLINE
2390 _dispatch_continuation_free(dispatch_continuation_t dc
)
2392 dc
= _dispatch_continuation_free_cacheonly(dc
);
2394 _dispatch_continuation_free_to_cache_limit(dc
);
2400 DISPATCH_ALWAYS_INLINE
2402 _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc
)
2404 struct dispatch_object_s
*dou
= dc
->dc_data
;
2405 unsigned long type
= dx_type(dou
);
2406 if (type
== DISPATCH_GROUP_TYPE
) {
2407 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
2408 _dispatch_introspection_queue_item_complete(dou
);
2409 dispatch_group_leave((dispatch_group_t
)dou
);
2411 DISPATCH_INTERNAL_CRASH(dx_type(dou
), "Unexpected object type");
2415 DISPATCH_ALWAYS_INLINE
2417 _dispatch_continuation_invoke_inline(dispatch_object_t dou
, voucher_t ov
,
2418 dispatch_invoke_flags_t flags
)
2420 dispatch_continuation_t dc
= dou
._dc
, dc1
;
2421 dispatch_invoke_with_autoreleasepool(flags
, {
2422 uintptr_t dc_flags
= dc
->dc_flags
;
2423 // Add the item back to the cache before calling the function. This
2424 // allows the 'hot' continuation to be used for a quick callback.
2426 // The ccache version is per-thread.
2427 // Therefore, the object has not been reused yet.
2428 // This generates better assembly.
2429 _dispatch_continuation_voucher_adopt(dc
, ov
, dc_flags
);
2430 if (dc_flags
& DISPATCH_OBJ_CONSUME_BIT
) {
2431 dc1
= _dispatch_continuation_free_cacheonly(dc
);
2435 if (unlikely(dc_flags
& DISPATCH_OBJ_GROUP_BIT
)) {
2436 _dispatch_continuation_with_group_invoke(dc
);
2438 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
2439 _dispatch_introspection_queue_item_complete(dou
);
2441 if (unlikely(dc1
)) {
2442 _dispatch_continuation_free_to_cache_limit(dc1
);
2447 DISPATCH_ALWAYS_INLINE_NDEBUG
2449 _dispatch_continuation_pop_inline(dispatch_object_t dou
, dispatch_queue_t dq
,
2450 dispatch_invoke_flags_t flags
)
2452 dispatch_pthread_root_queue_observer_hooks_t observer_hooks
=
2453 _dispatch_get_pthread_root_queue_observer_hooks();
2454 if (observer_hooks
) observer_hooks
->queue_will_execute(dq
);
2455 _dispatch_trace_continuation_pop(dq
, dou
);
2456 flags
&= _DISPATCH_INVOKE_PROPAGATE_MASK
;
2457 if (_dispatch_object_has_vtable(dou
)) {
2458 dx_invoke(dou
._do
, flags
);
2460 voucher_t ov
= dq
->dq_override_voucher
;
2461 _dispatch_continuation_invoke_inline(dou
, ov
, flags
);
2463 if (observer_hooks
) observer_hooks
->queue_did_execute(dq
);
2466 // used to forward the do_invoke of a continuation with a vtable to its real
2468 #define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \
2470 dispatch_continuation_t _dc = (dc), _dc1; \
2471 uintptr_t _dc_flags = (dc_flags); \
2472 _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \
2473 if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \
2474 _dc1 = _dispatch_continuation_free_cacheonly(_dc); \
2479 _dispatch_introspection_queue_item_complete(_dc); \
2480 if (unlikely(_dc1)) { \
2481 _dispatch_continuation_free_to_cache_limit(_dc1); \
2485 DISPATCH_ALWAYS_INLINE
2487 _dispatch_continuation_priority_set(dispatch_continuation_t dc
,
2488 pthread_priority_t pp
, dispatch_block_flags_t flags
)
2490 #if HAVE_PTHREAD_WORKQUEUE_QOS
2491 if (likely(!(flags
& DISPATCH_BLOCK_HAS_PRIORITY
))) {
2492 pp
= _dispatch_priority_propagate();
2494 if (flags
& DISPATCH_BLOCK_ENFORCE_QOS_CLASS
) {
2495 pp
|= _PTHREAD_PRIORITY_ENFORCE_FLAG
;
2497 dc
->dc_priority
= pp
;
2499 (void)dc
; (void)pp
; (void)flags
;
2503 DISPATCH_ALWAYS_INLINE
2504 static inline pthread_priority_t
2505 _dispatch_continuation_get_override_priority(dispatch_queue_t dq
,
2506 dispatch_continuation_t dc
)
2508 #if HAVE_PTHREAD_WORKQUEUE_QOS
2509 pthread_priority_t p
= dc
->dc_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
2510 bool enforce
= dc
->dc_priority
& _PTHREAD_PRIORITY_ENFORCE_FLAG
;
2511 pthread_priority_t dqp
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
2512 bool defaultqueue
= dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
;
2514 dispatch_assert(dc
->dc_priority
!= DISPATCH_NO_PRIORITY
);
2515 if (p
&& (enforce
|| !dqp
|| defaultqueue
)) {
2525 DISPATCH_ALWAYS_INLINE
2527 _dispatch_continuation_init_f(dispatch_continuation_t dc
,
2528 dispatch_queue_class_t dqu
, void *ctxt
, dispatch_function_t func
,
2529 pthread_priority_t pp
, dispatch_block_flags_t flags
, uintptr_t dc_flags
)
2531 dc
->dc_flags
= dc_flags
;
2534 _dispatch_continuation_voucher_set(dc
, dqu
, flags
);
2535 _dispatch_continuation_priority_set(dc
, pp
, flags
);
2538 DISPATCH_ALWAYS_INLINE
2540 _dispatch_continuation_init(dispatch_continuation_t dc
,
2541 dispatch_queue_class_t dqu
, dispatch_block_t work
,
2542 pthread_priority_t pp
, dispatch_block_flags_t flags
, uintptr_t dc_flags
)
2544 dc
->dc_flags
= dc_flags
| DISPATCH_OBJ_BLOCK_BIT
;
2545 dc
->dc_ctxt
= _dispatch_Block_copy(work
);
2546 _dispatch_continuation_priority_set(dc
, pp
, flags
);
2548 if (unlikely(_dispatch_block_has_private_data(work
))) {
2549 // always sets dc_func & dc_voucher
2550 // may update dc_priority & do_vtable
2551 return _dispatch_continuation_init_slow(dc
, dqu
, flags
);
2554 if (dc_flags
& DISPATCH_OBJ_CONSUME_BIT
) {
2555 dc
->dc_func
= _dispatch_call_block_and_release
;
2557 dc
->dc_func
= _dispatch_Block_invoke(work
);
2559 _dispatch_continuation_voucher_set(dc
, dqu
, flags
);
2562 #endif // DISPATCH_PURE_C
2564 #endif /* __DISPATCH_INLINE_INTERNAL__ */