2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
35 #if DISPATCH_USE_CLIENT_CALLOUT
38 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
);
40 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t));
43 _dispatch_client_callout3(void *ctxt
, dispatch_mach_reason_t reason
,
44 dispatch_mach_msg_t dmsg
, dispatch_mach_async_reply_callback_t f
);
46 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
47 dispatch_mach_msg_t dmsg
, mach_error_t error
,
48 dispatch_mach_handler_function_t f
);
51 #else // !DISPATCH_USE_CLIENT_CALLOUT
53 DISPATCH_ALWAYS_INLINE
55 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
)
60 DISPATCH_ALWAYS_INLINE
62 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t))
68 DISPATCH_ALWAYS_INLINE
70 _dispatch_client_callout3(void *ctxt
, dispatch_mach_reason_t reason
,
71 dispatch_mach_msg_t dmsg
, dispatch_mach_async_reply_callback_t f
)
73 return f(ctxt
, reason
, dmsg
);
76 DISPATCH_ALWAYS_INLINE
78 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
79 dispatch_mach_msg_t dmsg
, mach_error_t error
,
80 dispatch_mach_handler_function_t f
)
82 return f(ctxt
, reason
, dmsg
, error
);
86 #endif // !DISPATCH_USE_CLIENT_CALLOUT
89 #pragma mark _os_object_t & dispatch_object_t
92 DISPATCH_ALWAYS_INLINE
94 _dispatch_object_has_vtable(dispatch_object_t dou
)
96 uintptr_t dc_flags
= dou
._dc
->dc_flags
;
98 // vtables are pointers far away from the low page in memory
99 return dc_flags
> 0xffful
;
102 DISPATCH_ALWAYS_INLINE
104 _dispatch_object_is_queue(dispatch_object_t dou
)
106 return _dispatch_object_has_vtable(dou
) && dx_vtable(dou
._do
)->do_push
;
109 DISPATCH_ALWAYS_INLINE
111 _dispatch_object_is_continuation(dispatch_object_t dou
)
113 if (_dispatch_object_has_vtable(dou
)) {
114 return dx_metatype(dou
._do
) == _DISPATCH_CONTINUATION_TYPE
;
119 DISPATCH_ALWAYS_INLINE
121 _dispatch_object_has_type(dispatch_object_t dou
, unsigned long type
)
123 return _dispatch_object_has_vtable(dou
) && dx_type(dou
._do
) == type
;
126 DISPATCH_ALWAYS_INLINE
128 _dispatch_object_is_redirection(dispatch_object_t dou
)
130 return _dispatch_object_has_type(dou
,
131 DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT
));
134 DISPATCH_ALWAYS_INLINE
136 _dispatch_object_is_barrier(dispatch_object_t dou
)
138 dispatch_queue_flags_t dq_flags
;
140 if (!_dispatch_object_has_vtable(dou
)) {
141 return (dou
._dc
->dc_flags
& DISPATCH_OBJ_BARRIER_BIT
);
143 switch (dx_metatype(dou
._do
)) {
144 case _DISPATCH_QUEUE_TYPE
:
145 case _DISPATCH_SOURCE_TYPE
:
146 dq_flags
= os_atomic_load2o(dou
._dq
, dq_atomic_flags
, relaxed
);
147 return dq_flags
& DQF_BARRIER_BIT
;
153 DISPATCH_ALWAYS_INLINE
155 _dispatch_object_is_sync_waiter(dispatch_object_t dou
)
157 if (_dispatch_object_has_vtable(dou
)) {
160 return (dou
._dc
->dc_flags
& DISPATCH_OBJ_SYNC_WAITER_BIT
);
163 DISPATCH_ALWAYS_INLINE
165 _dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou
)
167 if (_dispatch_object_has_vtable(dou
)) {
170 return ((dou
._dc
->dc_flags
&
171 (DISPATCH_OBJ_BARRIER_BIT
| DISPATCH_OBJ_SYNC_WAITER_BIT
)) ==
172 (DISPATCH_OBJ_SYNC_WAITER_BIT
));
175 DISPATCH_ALWAYS_INLINE
176 static inline _os_object_t
177 _os_object_retain_internal_n_inline(_os_object_t obj
, int n
)
179 int ref_cnt
= _os_object_refcnt_add(obj
, n
);
180 if (unlikely(ref_cnt
<= 0)) {
181 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
186 DISPATCH_ALWAYS_INLINE
188 _os_object_release_internal_n_no_dispose_inline(_os_object_t obj
, int n
)
190 int ref_cnt
= _os_object_refcnt_sub(obj
, n
);
191 if (likely(ref_cnt
>= 0)) {
194 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
197 DISPATCH_ALWAYS_INLINE
199 _os_object_release_internal_n_inline(_os_object_t obj
, int n
)
201 int ref_cnt
= _os_object_refcnt_sub(obj
, n
);
202 if (likely(ref_cnt
>= 0)) {
205 if (unlikely(ref_cnt
< -1)) {
206 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
209 int xref_cnt
= obj
->os_obj_xref_cnt
;
210 if (unlikely(xref_cnt
>= 0)) {
211 DISPATCH_INTERNAL_CRASH(xref_cnt
,
212 "Release while external references exist");
215 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
216 return _os_object_dispose(obj
);
219 DISPATCH_ALWAYS_INLINE_NDEBUG
221 _dispatch_retain(dispatch_object_t dou
)
223 (void)_os_object_retain_internal_n_inline(dou
._os_obj
, 1);
226 DISPATCH_ALWAYS_INLINE_NDEBUG
228 _dispatch_retain_2(dispatch_object_t dou
)
230 (void)_os_object_retain_internal_n_inline(dou
._os_obj
, 2);
233 DISPATCH_ALWAYS_INLINE_NDEBUG
235 _dispatch_retain_n(dispatch_object_t dou
, int n
)
237 (void)_os_object_retain_internal_n_inline(dou
._os_obj
, n
);
240 DISPATCH_ALWAYS_INLINE_NDEBUG
242 _dispatch_release(dispatch_object_t dou
)
244 _os_object_release_internal_n_inline(dou
._os_obj
, 1);
247 DISPATCH_ALWAYS_INLINE_NDEBUG
249 _dispatch_release_2(dispatch_object_t dou
)
251 _os_object_release_internal_n_inline(dou
._os_obj
, 2);
254 DISPATCH_ALWAYS_INLINE_NDEBUG
256 _dispatch_release_n(dispatch_object_t dou
, int n
)
258 _os_object_release_internal_n_inline(dou
._os_obj
, n
);
261 DISPATCH_ALWAYS_INLINE_NDEBUG
263 _dispatch_release_no_dispose(dispatch_object_t dou
)
265 _os_object_release_internal_n_no_dispose_inline(dou
._os_obj
, 1);
268 DISPATCH_ALWAYS_INLINE_NDEBUG
270 _dispatch_release_2_no_dispose(dispatch_object_t dou
)
272 _os_object_release_internal_n_no_dispose_inline(dou
._os_obj
, 2);
275 DISPATCH_ALWAYS_INLINE_NDEBUG
277 _dispatch_release_tailcall(dispatch_object_t dou
)
279 _os_object_release_internal(dou
._os_obj
);
282 DISPATCH_ALWAYS_INLINE_NDEBUG
284 _dispatch_release_2_tailcall(dispatch_object_t dou
)
286 _os_object_release_internal_n(dou
._os_obj
, 2);
289 DISPATCH_ALWAYS_INLINE
291 _dispatch_queue_retain_storage(dispatch_queue_t dq
)
293 int ref_cnt
= os_atomic_inc2o(dq
, dq_sref_cnt
, relaxed
);
294 if (unlikely(ref_cnt
<= 0)) {
295 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
299 DISPATCH_ALWAYS_INLINE
301 _dispatch_queue_release_storage(dispatch_queue_t dq
)
303 // this refcount only delays the _dispatch_object_dealloc() and there's no
304 // need for visibility wrt to the allocation, the internal refcount already
305 // gives us that, and the object becomes immutable after the last internal
307 int ref_cnt
= os_atomic_dec2o(dq
, dq_sref_cnt
, relaxed
);
308 if (unlikely(ref_cnt
>= 0)) {
311 if (unlikely(ref_cnt
< -1)) {
312 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
314 dq
->dq_state
= 0xdead000000000000;
315 _dispatch_object_dealloc(dq
);
318 DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
320 _dispatch_object_set_target_queue_inline(dispatch_object_t dou
,
323 _dispatch_retain(tq
);
324 tq
= os_atomic_xchg2o(dou
._do
, do_targetq
, tq
, release
);
325 if (tq
) _dispatch_release(tq
);
326 _dispatch_object_debug(dou
._do
, "%s", __func__
);
329 #endif // DISPATCH_PURE_C
331 #pragma mark dispatch_thread
334 DISPATCH_ALWAYS_INLINE
335 static inline dispatch_thread_context_t
336 _dispatch_thread_context_find(const void *key
)
338 dispatch_thread_context_t dtc
=
339 _dispatch_thread_getspecific(dispatch_context_key
);
341 if (dtc
->dtc_key
== key
) {
349 DISPATCH_ALWAYS_INLINE
351 _dispatch_thread_context_push(dispatch_thread_context_t ctxt
)
353 ctxt
->dtc_prev
= _dispatch_thread_getspecific(dispatch_context_key
);
354 _dispatch_thread_setspecific(dispatch_context_key
, ctxt
);
357 DISPATCH_ALWAYS_INLINE
359 _dispatch_thread_context_pop(dispatch_thread_context_t ctxt
)
361 dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key
) == ctxt
);
362 _dispatch_thread_setspecific(dispatch_context_key
, ctxt
->dtc_prev
);
365 typedef struct dispatch_thread_frame_iterator_s
{
366 dispatch_queue_t dtfi_queue
;
367 dispatch_thread_frame_t dtfi_frame
;
368 } *dispatch_thread_frame_iterator_t
;
370 DISPATCH_ALWAYS_INLINE
372 _dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it
)
374 _dispatch_thread_getspecific_pair(
375 dispatch_queue_key
, (void **)&it
->dtfi_queue
,
376 dispatch_frame_key
, (void **)&it
->dtfi_frame
);
379 DISPATCH_ALWAYS_INLINE
381 _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it
)
383 dispatch_thread_frame_t dtf
= it
->dtfi_frame
;
384 dispatch_queue_t dq
= it
->dtfi_queue
;
387 dispatch_queue_t tq
= dq
->do_targetq
;
389 // redirections, dispatch_sync and dispatch_trysync_f may skip
390 // frames, so we need to simulate seeing the missing links
392 if (dq
== dtf
->dtf_queue
) {
393 it
->dtfi_frame
= dtf
->dtf_prev
;
396 it
->dtfi_queue
= dtf
->dtf_queue
;
397 it
->dtfi_frame
= dtf
->dtf_prev
;
400 it
->dtfi_queue
= dq
->do_targetq
;
404 DISPATCH_ALWAYS_INLINE
406 _dispatch_thread_frame_find_queue(dispatch_queue_t dq
)
408 struct dispatch_thread_frame_iterator_s it
;
410 _dispatch_thread_frame_iterate_start(&it
);
411 while (it
.dtfi_queue
) {
412 if (it
.dtfi_queue
== dq
) {
415 _dispatch_thread_frame_iterate_next(&it
);
420 DISPATCH_ALWAYS_INLINE
421 static inline dispatch_thread_frame_t
422 _dispatch_thread_frame_get_current(void)
424 return _dispatch_thread_getspecific(dispatch_frame_key
);
427 DISPATCH_ALWAYS_INLINE
429 _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf
)
431 _dispatch_thread_getspecific_packed_pair(
432 dispatch_queue_key
, dispatch_frame_key
, (void **)&dtf
->dtf_queue
);
435 DISPATCH_ALWAYS_INLINE
437 _dispatch_thread_frame_push(dispatch_thread_frame_t dtf
, dispatch_queue_t dq
)
439 _dispatch_thread_frame_save_state(dtf
);
440 _dispatch_thread_setspecific_pair(dispatch_queue_key
, dq
,
441 dispatch_frame_key
, dtf
);
444 DISPATCH_ALWAYS_INLINE
446 _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf
,
447 dispatch_queue_t dq
, dispatch_thread_frame_t new_base
)
449 _dispatch_thread_frame_save_state(dtf
);
450 _dispatch_thread_setspecific_pair(dispatch_queue_key
, dq
,
451 dispatch_frame_key
, new_base
);
454 DISPATCH_ALWAYS_INLINE
456 _dispatch_thread_frame_pop(dispatch_thread_frame_t dtf
)
458 _dispatch_thread_setspecific_packed_pair(
459 dispatch_queue_key
, dispatch_frame_key
, (void **)&dtf
->dtf_queue
);
462 DISPATCH_ALWAYS_INLINE
463 static inline dispatch_queue_t
464 _dispatch_thread_frame_stash(dispatch_thread_frame_t dtf
)
466 _dispatch_thread_getspecific_pair(
467 dispatch_queue_key
, (void **)&dtf
->dtf_queue
,
468 dispatch_frame_key
, (void **)&dtf
->dtf_prev
);
469 _dispatch_thread_frame_pop(dtf
->dtf_prev
);
470 return dtf
->dtf_queue
;
473 DISPATCH_ALWAYS_INLINE
475 _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf
)
477 _dispatch_thread_frame_pop(dtf
);
480 DISPATCH_ALWAYS_INLINE
482 _dispatch_wqthread_override_start_check_owner(mach_port_t thread
,
483 dispatch_qos_t qos
, mach_port_t
*ulock_addr
)
485 #if HAVE_PTHREAD_WORKQUEUE_QOS
486 if (!_dispatch_set_qos_class_enabled
) return 0;
487 return _pthread_workqueue_override_start_direct_check_owner(thread
,
488 _dispatch_qos_to_pp(qos
), ulock_addr
);
490 (void)thread
; (void)qos
; (void)ulock_addr
;
495 DISPATCH_ALWAYS_INLINE
497 _dispatch_wqthread_override_start(mach_port_t thread
, dispatch_qos_t qos
)
499 #if HAVE_PTHREAD_WORKQUEUE_QOS
500 if (!_dispatch_set_qos_class_enabled
) return;
501 (void)_pthread_workqueue_override_start_direct(thread
,
502 _dispatch_qos_to_pp(qos
));
504 (void)thread
; (void)qos
;
508 DISPATCH_ALWAYS_INLINE
510 _dispatch_wqthread_override_reset(void)
512 #if HAVE_PTHREAD_WORKQUEUE_QOS
513 if (!_dispatch_set_qos_class_enabled
) return;
514 (void)_pthread_workqueue_override_reset();
518 DISPATCH_ALWAYS_INLINE
520 _dispatch_thread_override_start(mach_port_t thread
, pthread_priority_t pp
,
523 #if HAVE_PTHREAD_WORKQUEUE_QOS
524 if (!_dispatch_set_qos_class_enabled
) return;
525 (void)_pthread_qos_override_start_direct(thread
, pp
, resource
);
527 (void)thread
; (void)pp
; (void)resource
;
531 DISPATCH_ALWAYS_INLINE
533 _dispatch_thread_override_end(mach_port_t thread
, void *resource
)
535 #if HAVE_PTHREAD_WORKQUEUE_QOS
536 if (!_dispatch_set_qos_class_enabled
) return;
537 (void)_pthread_qos_override_end_direct(thread
, resource
);
539 (void)thread
; (void)resource
;
543 #endif // DISPATCH_PURE_C
545 #pragma mark dispatch_queue_t state accessors
548 DISPATCH_ALWAYS_INLINE
549 static inline dispatch_queue_flags_t
550 _dispatch_queue_atomic_flags(dispatch_queue_t dq
)
552 return os_atomic_load2o(dq
, dq_atomic_flags
, relaxed
);
555 DISPATCH_ALWAYS_INLINE
556 static inline dispatch_queue_flags_t
557 _dispatch_queue_atomic_flags_set(dispatch_queue_t dq
,
558 dispatch_queue_flags_t bits
)
560 return os_atomic_or2o(dq
, dq_atomic_flags
, bits
, relaxed
);
563 DISPATCH_ALWAYS_INLINE
564 static inline dispatch_queue_flags_t
565 _dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq
,
566 dispatch_queue_flags_t add_bits
, dispatch_queue_flags_t clr_bits
)
568 dispatch_queue_flags_t oflags
, nflags
;
569 os_atomic_rmw_loop2o(dq
, dq_atomic_flags
, oflags
, nflags
, relaxed
, {
570 nflags
= (oflags
| add_bits
) & ~clr_bits
;
575 DISPATCH_ALWAYS_INLINE
576 static inline dispatch_queue_flags_t
577 _dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq
,
578 dispatch_queue_flags_t add_bits
, dispatch_queue_flags_t clr_bits
)
580 dispatch_queue_flags_t oflags
, nflags
;
581 os_atomic_rmw_loop2o(dq
, dq_atomic_flags
, oflags
, nflags
, relaxed
, {
582 nflags
= (oflags
| add_bits
) & ~clr_bits
;
587 DISPATCH_ALWAYS_INLINE
588 static inline dispatch_queue_flags_t
589 _dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq
,
590 dispatch_queue_flags_t bits
)
592 return os_atomic_or_orig2o(dq
, dq_atomic_flags
, bits
, relaxed
);
595 DISPATCH_ALWAYS_INLINE
596 static inline dispatch_queue_flags_t
597 _dispatch_queue_atomic_flags_clear(dispatch_queue_t dq
,
598 dispatch_queue_flags_t bits
)
600 return os_atomic_and2o(dq
, dq_atomic_flags
, ~bits
, relaxed
);
603 DISPATCH_ALWAYS_INLINE
605 _dispatch_queue_is_thread_bound(dispatch_queue_t dq
)
607 return _dispatch_queue_atomic_flags(dq
) & DQF_THREAD_BOUND
;
610 DISPATCH_ALWAYS_INLINE
612 _dispatch_queue_cannot_trysync(dispatch_queue_t dq
)
614 return _dispatch_queue_atomic_flags(dq
) & DQF_CANNOT_TRYSYNC
;
617 DISPATCH_ALWAYS_INLINE
619 _dispatch_queue_label_needs_free(dispatch_queue_t dq
)
621 return _dispatch_queue_atomic_flags(dq
) & DQF_LABEL_NEEDS_FREE
;
624 DISPATCH_ALWAYS_INLINE
625 static inline dispatch_invoke_flags_t
626 _dispatch_queue_autorelease_frequency(dispatch_queue_t dq
)
628 const unsigned long factor
=
629 DISPATCH_INVOKE_AUTORELEASE_ALWAYS
/ DQF_AUTORELEASE_ALWAYS
;
630 dispatch_static_assert(factor
> 0);
632 dispatch_queue_flags_t qaf
= _dispatch_queue_atomic_flags(dq
);
634 qaf
&= _DQF_AUTORELEASE_MASK
;
635 return (dispatch_invoke_flags_t
)qaf
* factor
;
638 DISPATCH_ALWAYS_INLINE
639 static inline dispatch_invoke_flags_t
640 _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq
,
641 dispatch_invoke_flags_t flags
)
643 dispatch_invoke_flags_t qaf
= _dispatch_queue_autorelease_frequency(dq
);
646 flags
&= ~_DISPATCH_INVOKE_AUTORELEASE_MASK
;
652 DISPATCH_ALWAYS_INLINE
654 _dispatch_queue_is_legacy(dispatch_queue_t dq
)
656 return _dispatch_queue_atomic_flags(dq
) & DQF_LEGACY
;
659 DISPATCH_ALWAYS_INLINE
661 _dispatch_wlh_retain(dispatch_wlh_t wlh
)
663 if (wlh
&& wlh
!= DISPATCH_WLH_ANON
) {
664 _dispatch_queue_retain_storage((dispatch_queue_t
)wlh
);
668 DISPATCH_ALWAYS_INLINE
670 _dispatch_wlh_release(dispatch_wlh_t wlh
)
672 if (wlh
&& wlh
!= DISPATCH_WLH_ANON
) {
673 _dispatch_queue_release_storage((dispatch_queue_t
)wlh
);
677 #define DISPATCH_WLH_STORAGE_REF 1ul
679 DISPATCH_ALWAYS_INLINE DISPATCH_PURE
680 static inline dispatch_wlh_t
681 _dispatch_get_wlh(void)
683 return _dispatch_thread_getspecific(dispatch_wlh_key
);
686 DISPATCH_ALWAYS_INLINE DISPATCH_PURE
687 static inline dispatch_wlh_t
688 _dispatch_get_wlh_reference(void)
690 dispatch_wlh_t wlh
= _dispatch_thread_getspecific(dispatch_wlh_key
);
691 if (wlh
!= DISPATCH_WLH_ANON
) {
692 wlh
= (dispatch_wlh_t
)((uintptr_t)wlh
& ~DISPATCH_WLH_STORAGE_REF
);
697 DISPATCH_ALWAYS_INLINE
699 _dispatch_adopt_wlh_anon_recurse(void)
701 dispatch_wlh_t cur_wlh
= _dispatch_get_wlh_reference();
702 if (cur_wlh
== DISPATCH_WLH_ANON
) return false;
703 _dispatch_debug("wlh[anon]: set current (releasing %p)", cur_wlh
);
704 _dispatch_wlh_release(cur_wlh
);
705 _dispatch_thread_setspecific(dispatch_wlh_key
, (void *)DISPATCH_WLH_ANON
);
709 DISPATCH_ALWAYS_INLINE
711 _dispatch_adopt_wlh_anon(void)
713 if (unlikely(!_dispatch_adopt_wlh_anon_recurse())) {
714 DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON");
718 DISPATCH_ALWAYS_INLINE
720 _dispatch_adopt_wlh(dispatch_wlh_t wlh
)
722 dispatch_wlh_t cur_wlh
= _dispatch_get_wlh_reference();
723 _dispatch_debug("wlh[%p]: adopt current (releasing %p)", wlh
, cur_wlh
);
724 if (cur_wlh
== DISPATCH_WLH_ANON
) {
725 DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON");
727 if (cur_wlh
!= wlh
) {
728 dispatch_assert(wlh
);
729 _dispatch_wlh_release(cur_wlh
);
730 _dispatch_wlh_retain(wlh
);
732 _dispatch_thread_setspecific(dispatch_wlh_key
, (void *)wlh
);
735 DISPATCH_ALWAYS_INLINE
737 _dispatch_preserve_wlh_storage_reference(dispatch_wlh_t wlh
)
739 dispatch_assert(wlh
!= DISPATCH_WLH_ANON
);
740 dispatch_assert(wlh
== _dispatch_get_wlh());
741 _dispatch_thread_setspecific(dispatch_wlh_key
,
742 (void *)((uintptr_t)wlh
| DISPATCH_WLH_STORAGE_REF
));
745 DISPATCH_ALWAYS_INLINE
747 _dispatch_reset_wlh(void)
749 dispatch_assert(_dispatch_get_wlh() == DISPATCH_WLH_ANON
);
750 _dispatch_debug("wlh[anon]: clear current");
751 _dispatch_thread_setspecific(dispatch_wlh_key
, NULL
);
752 _dispatch_clear_return_to_kernel();
755 DISPATCH_ALWAYS_INLINE
757 _dispatch_wlh_should_poll_unote(dispatch_unote_t du
)
759 if (likely(_dispatch_needs_to_return_to_kernel())) {
760 dispatch_wlh_t wlh
= _dispatch_get_wlh();
761 return wlh
!= DISPATCH_WLH_ANON
&& du
._du
->du_wlh
== wlh
;
766 #endif // DISPATCH_PURE_C
769 DISPATCH_ALWAYS_INLINE
770 static inline uint32_t
771 _dq_state_suspend_cnt(uint64_t dq_state
)
773 return (uint32_t)(dq_state
/ DISPATCH_QUEUE_SUSPEND_INTERVAL
);
776 DISPATCH_ALWAYS_INLINE
778 _dq_state_has_side_suspend_cnt(uint64_t dq_state
)
780 return dq_state
& DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT
;
783 DISPATCH_ALWAYS_INLINE
784 static inline int32_t
785 _dq_state_extract_width_bits(uint64_t dq_state
)
787 dq_state
&= DISPATCH_QUEUE_WIDTH_MASK
;
788 return (int32_t)(dq_state
>> DISPATCH_QUEUE_WIDTH_SHIFT
);
791 DISPATCH_ALWAYS_INLINE
792 static inline int32_t
793 _dq_state_available_width(uint64_t dq_state
)
795 int32_t full
= DISPATCH_QUEUE_WIDTH_FULL
;
796 if (likely(!(dq_state
& DISPATCH_QUEUE_WIDTH_FULL_BIT
))) {
797 return full
- _dq_state_extract_width_bits(dq_state
);
802 DISPATCH_ALWAYS_INLINE
803 static inline int32_t
804 _dq_state_used_width(uint64_t dq_state
, uint16_t dq_width
)
806 int32_t full
= DISPATCH_QUEUE_WIDTH_FULL
;
807 int32_t width
= _dq_state_extract_width_bits(dq_state
);
809 if (dq_state
& DISPATCH_QUEUE_PENDING_BARRIER
) {
810 // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width
811 // is pre-reservation that we want to ignore
812 return width
- (full
- dq_width
) - (dq_width
- 1);
814 return width
- (full
- dq_width
);
817 DISPATCH_ALWAYS_INLINE
819 _dq_state_is_suspended(uint64_t dq_state
)
821 return dq_state
>= DISPATCH_QUEUE_NEEDS_ACTIVATION
;
823 #define DISPATCH_QUEUE_IS_SUSPENDED(x) \
824 _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed))
826 DISPATCH_ALWAYS_INLINE
828 _dq_state_is_inactive(uint64_t dq_state
)
830 return dq_state
& DISPATCH_QUEUE_INACTIVE
;
833 DISPATCH_ALWAYS_INLINE
835 _dq_state_needs_activation(uint64_t dq_state
)
837 return dq_state
& DISPATCH_QUEUE_NEEDS_ACTIVATION
;
840 DISPATCH_ALWAYS_INLINE
842 _dq_state_is_in_barrier(uint64_t dq_state
)
844 return dq_state
& DISPATCH_QUEUE_IN_BARRIER
;
847 DISPATCH_ALWAYS_INLINE
849 _dq_state_has_available_width(uint64_t dq_state
)
851 return !(dq_state
& DISPATCH_QUEUE_WIDTH_FULL_BIT
);
854 DISPATCH_ALWAYS_INLINE
856 _dq_state_has_pending_barrier(uint64_t dq_state
)
858 return dq_state
& DISPATCH_QUEUE_PENDING_BARRIER
;
861 DISPATCH_ALWAYS_INLINE
863 _dq_state_is_dirty(uint64_t dq_state
)
865 return dq_state
& DISPATCH_QUEUE_DIRTY
;
868 DISPATCH_ALWAYS_INLINE
870 _dq_state_is_base_wlh(uint64_t dq_state
)
872 return dq_state
& DISPATCH_QUEUE_ROLE_BASE_WLH
;
875 DISPATCH_ALWAYS_INLINE
877 _dq_state_is_base_anon(uint64_t dq_state
)
879 return dq_state
& DISPATCH_QUEUE_ROLE_BASE_ANON
;
882 DISPATCH_ALWAYS_INLINE
884 _dq_state_is_inner_queue(uint64_t dq_state
)
886 return (dq_state
& DISPATCH_QUEUE_ROLE_MASK
) == DISPATCH_QUEUE_ROLE_INNER
;
889 DISPATCH_ALWAYS_INLINE
891 _dq_state_is_enqueued(uint64_t dq_state
)
893 return dq_state
& (DISPATCH_QUEUE_ENQUEUED
|DISPATCH_QUEUE_ENQUEUED_ON_MGR
);
896 DISPATCH_ALWAYS_INLINE
898 _dq_state_is_enqueued_on_target(uint64_t dq_state
)
900 return dq_state
& DISPATCH_QUEUE_ENQUEUED
;
903 DISPATCH_ALWAYS_INLINE
905 _dq_state_is_enqueued_on_manager(uint64_t dq_state
)
907 return dq_state
& DISPATCH_QUEUE_ENQUEUED_ON_MGR
;
910 DISPATCH_ALWAYS_INLINE
912 _dq_state_in_sync_transfer(uint64_t dq_state
)
914 return dq_state
& DISPATCH_QUEUE_SYNC_TRANSFER
;
917 DISPATCH_ALWAYS_INLINE
919 _dq_state_received_override(uint64_t dq_state
)
921 return _dq_state_is_base_anon(dq_state
) &&
922 (dq_state
& DISPATCH_QUEUE_RECEIVED_OVERRIDE
);
925 DISPATCH_ALWAYS_INLINE
927 _dq_state_received_sync_wait(uint64_t dq_state
)
929 return _dq_state_is_base_wlh(dq_state
) &&
930 (dq_state
& DISPATCH_QUEUE_RECEIVED_SYNC_WAIT
);
933 DISPATCH_ALWAYS_INLINE
934 static inline dispatch_qos_t
935 _dq_state_max_qos(uint64_t dq_state
)
937 dq_state
&= DISPATCH_QUEUE_MAX_QOS_MASK
;
938 return (dispatch_qos_t
)(dq_state
>> DISPATCH_QUEUE_MAX_QOS_SHIFT
);
941 DISPATCH_ALWAYS_INLINE
942 static inline uint64_t
943 _dq_state_from_qos(dispatch_qos_t qos
)
945 return (uint64_t)(qos
) << DISPATCH_QUEUE_MAX_QOS_SHIFT
;
948 DISPATCH_ALWAYS_INLINE
949 static inline uint64_t
950 _dq_state_merge_qos(uint64_t dq_state
, dispatch_qos_t qos
)
952 uint64_t qos_bits
= _dq_state_from_qos(qos
);
953 if ((dq_state
& DISPATCH_QUEUE_MAX_QOS_MASK
) < qos_bits
) {
954 dq_state
&= ~DISPATCH_QUEUE_MAX_QOS_MASK
;
955 dq_state
|= qos_bits
;
956 if (unlikely(_dq_state_is_base_anon(dq_state
))) {
957 dq_state
|= DISPATCH_QUEUE_RECEIVED_OVERRIDE
;
963 DISPATCH_ALWAYS_INLINE
964 static inline dispatch_tid
965 _dq_state_drain_owner(uint64_t dq_state
)
967 return _dispatch_lock_owner((dispatch_lock
)dq_state
);
969 #define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
970 _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
972 DISPATCH_ALWAYS_INLINE
974 _dq_state_drain_locked_by(uint64_t dq_state
, dispatch_tid tid
)
976 return _dispatch_lock_is_locked_by((dispatch_lock
)dq_state
, tid
);
979 DISPATCH_ALWAYS_INLINE
981 _dq_state_drain_locked_by_self(uint64_t dq_state
)
983 return _dispatch_lock_is_locked_by_self((dispatch_lock
)dq_state
);
986 DISPATCH_ALWAYS_INLINE
988 _dq_state_drain_locked(uint64_t dq_state
)
990 return _dispatch_lock_is_locked((dispatch_lock
)dq_state
);
993 DISPATCH_ALWAYS_INLINE
995 _dq_state_is_sync_runnable(uint64_t dq_state
)
997 return dq_state
< DISPATCH_QUEUE_IN_BARRIER
;
1000 DISPATCH_ALWAYS_INLINE
1002 _dq_state_is_runnable(uint64_t dq_state
)
1004 return dq_state
< DISPATCH_QUEUE_WIDTH_FULL_BIT
;
1007 DISPATCH_ALWAYS_INLINE
1009 _dq_state_should_override(uint64_t dq_state
)
1011 if (_dq_state_is_suspended(dq_state
) ||
1012 _dq_state_is_enqueued_on_manager(dq_state
)) {
1015 if (_dq_state_is_enqueued_on_target(dq_state
)) {
1018 if (_dq_state_is_base_wlh(dq_state
)) {
1021 return _dq_state_drain_locked(dq_state
);
1025 #endif // __cplusplus
1027 #pragma mark dispatch_queue_t state machine
1029 static inline pthread_priority_t
_dispatch_get_priority(void);
1030 static inline dispatch_priority_t
_dispatch_get_basepri(void);
1031 static inline dispatch_qos_t
_dispatch_get_basepri_override_qos_floor(void);
1032 static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos
);
1033 static inline void _dispatch_reset_basepri(dispatch_priority_t dbp
);
1034 static inline dispatch_priority_t
_dispatch_set_basepri(dispatch_priority_t dbp
);
1035 static inline bool _dispatch_queue_need_override_retain(
1036 dispatch_queue_class_t dqu
, dispatch_qos_t qos
);
1040 // Note to later developers: ensure that any initialization changes are
1041 // made for statically allocated queues (i.e. _dispatch_main_q).
1043 _dispatch_queue_init(dispatch_queue_t dq
, dispatch_queue_flags_t dqf
,
1044 uint16_t width
, uint64_t initial_state_bits
)
1046 uint64_t dq_state
= DISPATCH_QUEUE_STATE_INIT_VALUE(width
);
1048 dispatch_assert((initial_state_bits
& ~(DISPATCH_QUEUE_ROLE_MASK
|
1049 DISPATCH_QUEUE_INACTIVE
)) == 0);
1051 if (initial_state_bits
& DISPATCH_QUEUE_INACTIVE
) {
1052 dq_state
|= DISPATCH_QUEUE_INACTIVE
+ DISPATCH_QUEUE_NEEDS_ACTIVATION
;
1053 dq
->do_ref_cnt
+= 2; // rdar://8181908 see _dispatch_queue_resume
1056 dq_state
|= (initial_state_bits
& DISPATCH_QUEUE_ROLE_MASK
);
1057 dq
->do_next
= (struct dispatch_queue_s
*)DISPATCH_OBJECT_LISTLESS
;
1058 dqf
|= DQF_WIDTH(width
);
1059 os_atomic_store2o(dq
, dq_atomic_flags
, dqf
, relaxed
);
1060 dq
->dq_state
= dq_state
;
1062 os_atomic_inc_orig(&_dispatch_queue_serial_numbers
, relaxed
);
1066 * - _dispatch_queue_set_target_queue
1067 * - changing dispatch source handlers
1069 * Tries to prevent concurrent wakeup of an inactive queue by suspending it.
1071 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1073 _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq
)
1075 uint64_t old_state
, new_state
;
1077 (void)os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, relaxed
, {
1078 if (unlikely(!_dq_state_is_inactive(old_state
))) {
1079 os_atomic_rmw_loop_give_up(return false);
1081 new_state
= old_state
+ DISPATCH_QUEUE_SUSPEND_INTERVAL
;
1083 if (unlikely(!_dq_state_is_suspended(old_state
) ||
1084 _dq_state_has_side_suspend_cnt(old_state
))) {
1085 // Crashing here means that 128+ dispatch_suspend() calls have been
1086 // made on an inactive object and then dispatch_set_target_queue() or
1087 // dispatch_set_*_handler() has been called.
1089 // We don't want to handle the side suspend count in a codepath that
1090 // needs to be fast.
1091 DISPATCH_CLIENT_CRASH(dq
, "Too many calls to dispatch_suspend() "
1092 "prior to calling dispatch_set_target_queue() "
1093 "or dispatch_set_*_handler()");
1098 DISPATCH_ALWAYS_INLINE
1100 _dq_state_needs_lock_override(uint64_t dq_state
, dispatch_qos_t qos
)
1102 return _dq_state_is_base_anon(dq_state
) &&
1103 qos
< _dq_state_max_qos(dq_state
);
1106 DISPATCH_ALWAYS_INLINE
1107 static inline dispatch_qos_t
1108 _dispatch_queue_override_self(uint64_t dq_state
)
1110 dispatch_qos_t qos
= _dq_state_max_qos(dq_state
);
1111 _dispatch_wqthread_override_start(_dispatch_tid_self(), qos
);
1112 // ensure that the root queue sees
1113 // that this thread was overridden.
1114 _dispatch_set_basepri_override_qos(qos
);
1118 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1119 static inline uint64_t
1120 _dispatch_queue_drain_try_lock(dispatch_queue_t dq
,
1121 dispatch_invoke_flags_t flags
)
1123 uint64_t pending_barrier_width
=
1124 (dq
->dq_width
- 1) * DISPATCH_QUEUE_WIDTH_INTERVAL
;
1125 uint64_t set_owner_and_set_full_width
=
1126 _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT
;
1127 uint64_t lock_fail_mask
, old_state
, new_state
, dequeue_mask
;
1129 // same as !_dq_state_is_runnable()
1130 lock_fail_mask
= ~(DISPATCH_QUEUE_WIDTH_FULL_BIT
- 1);
1131 // same as _dq_state_drain_locked()
1132 lock_fail_mask
|= DISPATCH_QUEUE_DRAIN_OWNER_MASK
;
1134 if (flags
& DISPATCH_INVOKE_STEALING
) {
1135 lock_fail_mask
|= DISPATCH_QUEUE_ENQUEUED_ON_MGR
;
1137 } else if (flags
& DISPATCH_INVOKE_MANAGER_DRAIN
) {
1138 dequeue_mask
= DISPATCH_QUEUE_ENQUEUED_ON_MGR
;
1140 lock_fail_mask
|= DISPATCH_QUEUE_ENQUEUED_ON_MGR
;
1141 dequeue_mask
= DISPATCH_QUEUE_ENQUEUED
;
1143 dispatch_assert(!(flags
& DISPATCH_INVOKE_WLH
));
1145 dispatch_qos_t oq_floor
= _dispatch_get_basepri_override_qos_floor();
1147 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
1148 new_state
= old_state
;
1149 if (likely(!(old_state
& lock_fail_mask
))) {
1150 if (unlikely(_dq_state_needs_lock_override(old_state
, oq_floor
))) {
1151 os_atomic_rmw_loop_give_up({
1152 oq_floor
= _dispatch_queue_override_self(old_state
);
1157 // Only keep the HAS_WAITER, MAX_QOS and ENQUEUED bits
1158 // In particular acquiring the drain lock clears the DIRTY and
1159 // RECEIVED_OVERRIDE bits.
1161 new_state
&= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK
;
1162 new_state
|= set_owner_and_set_full_width
;
1163 if (_dq_state_has_pending_barrier(old_state
) ||
1164 old_state
+ pending_barrier_width
<
1165 DISPATCH_QUEUE_WIDTH_FULL_BIT
) {
1166 new_state
|= DISPATCH_QUEUE_IN_BARRIER
;
1168 } else if (dequeue_mask
) {
1169 // dequeue_mask is in a register, xor yields better assembly
1170 new_state
^= dequeue_mask
;
1172 os_atomic_rmw_loop_give_up(break);
1176 dispatch_assert((old_state
& dequeue_mask
) == dequeue_mask
);
1177 if (likely(!(old_state
& lock_fail_mask
))) {
1178 new_state
&= DISPATCH_QUEUE_IN_BARRIER
| DISPATCH_QUEUE_WIDTH_FULL_BIT
|
1180 old_state
&= DISPATCH_QUEUE_WIDTH_MASK
;
1181 return new_state
- old_state
;
1186 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1188 _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq
, uint64_t *dq_state
)
1190 uint64_t old_state
, new_state
;
1191 uint64_t lock_bits
= _dispatch_lock_value_for_self() |
1192 DISPATCH_QUEUE_WIDTH_FULL_BIT
| DISPATCH_QUEUE_IN_BARRIER
;
1194 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
1195 new_state
= old_state
;
1196 if (unlikely(_dq_state_is_suspended(old_state
))) {
1197 new_state
&= ~DISPATCH_QUEUE_ENQUEUED
;
1198 } else if (unlikely(_dq_state_drain_locked(old_state
))) {
1199 os_atomic_rmw_loop_give_up(break);
1201 new_state
&= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK
;
1202 new_state
|= lock_bits
;
1205 if (unlikely(!_dq_state_is_base_wlh(old_state
) ||
1206 !_dq_state_is_enqueued_on_target(old_state
) ||
1207 _dq_state_is_enqueued_on_manager(old_state
))) {
1211 DISPATCH_INTERNAL_CRASH(old_state
, "Invalid wlh state");
1214 if (dq_state
) *dq_state
= new_state
;
1215 return !_dq_state_is_suspended(old_state
) &&
1216 !_dq_state_drain_locked(old_state
);
1219 DISPATCH_ALWAYS_INLINE
1221 _dispatch_queue_mgr_lock(dispatch_queue_t dq
)
1223 uint64_t old_state
, new_state
, set_owner_and_set_full_width
=
1224 _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED
;
1226 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
1227 new_state
= old_state
;
1228 if (unlikely(!_dq_state_is_runnable(old_state
) ||
1229 _dq_state_drain_locked(old_state
))) {
1230 DISPATCH_INTERNAL_CRASH((uintptr_t)old_state
,
1231 "Locking the manager should not fail");
1233 new_state
&= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK
;
1234 new_state
|= set_owner_and_set_full_width
;
1238 DISPATCH_ALWAYS_INLINE
1240 _dispatch_queue_mgr_unlock(dispatch_queue_t dq
)
1242 uint64_t old_state
, new_state
;
1243 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, release
, {
1244 new_state
= old_state
- DISPATCH_QUEUE_SERIAL_DRAIN_OWNED
;
1245 new_state
&= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK
;
1246 new_state
&= ~DISPATCH_QUEUE_MAX_QOS_MASK
;
1248 return _dq_state_is_dirty(old_state
);
1251 /* Used by _dispatch_barrier_{try,}sync
1253 * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
1254 * simple cmpxchg which is significantly faster on Intel, and makes a
1255 * significant difference on the uncontended codepath.
1257 * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
1259 * Initial state must be `completely idle`
1260 * Final state forces { ib:1, qf:1, w:0 }
1262 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1264 _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_queue_t dq
,
1265 uint32_t tid
, uint64_t suspend_count
)
1267 uint64_t init
= DISPATCH_QUEUE_STATE_INIT_VALUE(dq
->dq_width
);
1268 uint64_t value
= DISPATCH_QUEUE_WIDTH_FULL_BIT
| DISPATCH_QUEUE_IN_BARRIER
|
1269 _dispatch_lock_value_from_tid(tid
) |
1270 (suspend_count
* DISPATCH_QUEUE_SUSPEND_INTERVAL
);
1271 uint64_t old_state
, new_state
;
1273 return os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
1274 uint64_t role
= old_state
& DISPATCH_QUEUE_ROLE_MASK
;
1275 if (old_state
!= (init
| role
)) {
1276 os_atomic_rmw_loop_give_up(break);
1278 new_state
= value
| role
;
1282 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1284 _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq
, uint32_t tid
)
1286 return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq
, tid
, 0);
1289 /* Used by _dispatch_sync for root queues and some drain codepaths
1291 * Root queues have no strict orderning and dispatch_sync() always goes through.
1292 * Drain is the sole setter of `dl` hence can use this non failing version of
1293 * _dispatch_queue_try_acquire_sync().
1295 * Final state: { w += 1 }
1297 DISPATCH_ALWAYS_INLINE
1299 _dispatch_queue_reserve_sync_width(dispatch_queue_t dq
)
1301 (void)os_atomic_add2o(dq
, dq_state
,
1302 DISPATCH_QUEUE_WIDTH_INTERVAL
, relaxed
);
1305 /* Used by _dispatch_sync on non-serial queues
1307 * Initial state must be { sc:0, ib:0, pb:0, d:0 }
1308 * Final state: { w += 1 }
1310 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1312 _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq
)
1314 uint64_t old_state
, new_state
;
1316 // <rdar://problem/24738102&24743140> reserving non barrier width
1317 // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width
1318 // equivalent), so we have to check that this thread hasn't enqueued
1319 // anything ahead of this call or we can break ordering
1320 if (unlikely(dq
->dq_items_tail
)) {
1324 return os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, relaxed
, {
1325 if (unlikely(!_dq_state_is_sync_runnable(old_state
)) ||
1326 _dq_state_is_dirty(old_state
) ||
1327 _dq_state_has_pending_barrier(old_state
)) {
1328 os_atomic_rmw_loop_give_up(return false);
1330 new_state
= old_state
+ DISPATCH_QUEUE_WIDTH_INTERVAL
;
1334 /* Used by _dispatch_apply_redirect
1336 * Try to acquire at most da_width and returns what could be acquired,
1339 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1340 static inline int32_t
1341 _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq
, int32_t da_width
)
1343 uint64_t old_state
, new_state
;
1346 (void)os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, relaxed
, {
1347 width
= (int32_t)_dq_state_available_width(old_state
);
1348 if (unlikely(!width
)) {
1349 os_atomic_rmw_loop_give_up(return 0);
1351 if (width
> da_width
) {
1354 new_state
= old_state
+ (uint64_t)width
* DISPATCH_QUEUE_WIDTH_INTERVAL
;
1359 /* Used by _dispatch_apply_redirect
1361 * Release width acquired by _dispatch_queue_try_acquire_width
1363 DISPATCH_ALWAYS_INLINE
1365 _dispatch_queue_relinquish_width(dispatch_queue_t dq
, int32_t da_width
)
1367 (void)os_atomic_sub2o(dq
, dq_state
,
1368 (uint64_t)da_width
* DISPATCH_QUEUE_WIDTH_INTERVAL
, relaxed
);
1371 /* Used by target-queue recursing code
1373 * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 }
1374 * Final state: { w += 1 }
1376 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1378 _dispatch_queue_try_acquire_async(dispatch_queue_t dq
)
1380 uint64_t old_state
, new_state
;
1382 return os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
1383 if (unlikely(!_dq_state_is_runnable(old_state
) ||
1384 _dq_state_is_dirty(old_state
) ||
1385 _dq_state_has_pending_barrier(old_state
))) {
1386 os_atomic_rmw_loop_give_up(return false);
1388 new_state
= old_state
+ DISPATCH_QUEUE_WIDTH_INTERVAL
;
1392 /* Used by concurrent drain
1394 * Either acquires the full barrier width, in which case the Final state is:
1395 * { ib:1 qf:1 pb:0 d:0 }
1396 * Or if there isn't enough width prepare the queue with the PENDING_BARRIER bit
1399 * This always clears the dirty bit as we know for sure we shouldn't reevaluate
1400 * the state machine here
1402 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1404 _dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq
, uint64_t owned
)
1406 uint64_t old_state
, new_state
;
1407 uint64_t pending_barrier_width
= DISPATCH_QUEUE_PENDING_BARRIER
+
1408 (dq
->dq_width
- 1) * DISPATCH_QUEUE_WIDTH_INTERVAL
;
1410 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, acquire
, {
1411 new_state
= old_state
- owned
;
1412 if (likely(!_dq_state_has_pending_barrier(old_state
))) {
1413 new_state
+= pending_barrier_width
;
1415 if (likely(_dq_state_is_runnable(new_state
))) {
1416 new_state
+= DISPATCH_QUEUE_WIDTH_INTERVAL
;
1417 new_state
+= DISPATCH_QUEUE_IN_BARRIER
;
1418 new_state
-= DISPATCH_QUEUE_PENDING_BARRIER
;
1420 new_state
&= ~DISPATCH_QUEUE_DIRTY
;
1422 return new_state
& DISPATCH_QUEUE_IN_BARRIER
;
1425 /* Used at the end of Drainers
1427 * This adjusts the `owned` width when the next continuation is already known
1428 * to account for its barrierness.
1430 DISPATCH_ALWAYS_INLINE
1431 static inline uint64_t
1432 _dispatch_queue_adjust_owned(dispatch_queue_t dq
, uint64_t owned
,
1433 struct dispatch_object_s
*next_dc
)
1435 uint64_t reservation
;
1437 if (unlikely(dq
->dq_width
> 1)) {
1438 if (next_dc
&& _dispatch_object_is_barrier(next_dc
)) {
1439 reservation
= DISPATCH_QUEUE_PENDING_BARRIER
;
1440 reservation
+= (dq
->dq_width
- 1) * DISPATCH_QUEUE_WIDTH_INTERVAL
;
1441 owned
-= reservation
;
1447 /* Used at the end of Drainers
1449 * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended).
1450 * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used
1451 * as a signal to renew the drain lock instead of releasing it.
1453 * Successful unlock forces { dl:0, d:!done, qo:0 } and gives back `owned`
1455 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1457 _dispatch_queue_drain_try_unlock(dispatch_queue_t dq
, uint64_t owned
, bool done
)
1459 uint64_t old_state
, new_state
;
1461 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, release
, {
1462 new_state
= old_state
- owned
;
1463 new_state
&= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK
;
1464 if (unlikely(_dq_state_is_suspended(old_state
))) {
1466 } else if (unlikely(_dq_state_is_dirty(old_state
))) {
1467 os_atomic_rmw_loop_give_up({
1468 // just renew the drain lock with an acquire barrier, to see
1469 // what the enqueuer that set DIRTY has done.
1470 // the xor generates better assembly as DISPATCH_QUEUE_DIRTY
1471 // is already in a register
1472 os_atomic_xor2o(dq
, dq_state
, DISPATCH_QUEUE_DIRTY
, acquire
);
1475 } else if (likely(done
)) {
1476 new_state
&= ~DISPATCH_QUEUE_MAX_QOS_MASK
;
1478 new_state
|= DISPATCH_QUEUE_DIRTY
;
1482 if (_dq_state_received_override(old_state
)) {
1483 // Ensure that the root queue sees that this thread was overridden.
1484 _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state
));
1490 #pragma mark os_mpsc_queue
1492 // type_t * {volatile,const,_Atomic,...} -> type_t *
1493 // type_t[] -> type_t *
1494 #define os_unqualified_pointer_type(expr) \
1495 typeof(typeof(*(expr)) *)
1497 #define os_mpsc_node_type(q, _ns) \
1498 os_unqualified_pointer_type((q)->_ns##_head)
1501 // Multi Producer calls, can be used safely concurrently
1504 // Returns true when the queue was empty and the head must be set
1505 #define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \
1506 os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
1507 _tail->_o_next = NULL; \
1508 _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
1509 if (likely(_prev)) { \
1510 os_atomic_store2o(_prev, _o_next, _head, relaxed); \
1515 // Returns true when the queue was empty and the head must be set
1516 #define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \
1517 os_mpsc_node_type(q, _ns) _o = (o); \
1518 os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \
1521 #define os_mpsc_push_update_head(q, _ns, o) ({ \
1522 os_atomic_store2o((q), _ns##_head, o, relaxed); \
1526 // Single Consumer calls, can NOT be used safely concurrently
1529 #define os_mpsc_get_head(q, _ns) \
1530 _dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency))
1532 #define os_mpsc_get_next(_n, _o_next) \
1533 _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency))
1535 #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \
1536 typeof(q) _q = (q); \
1537 os_mpsc_node_type(_q, _ns) _head = (head), _n; \
1538 _n = os_atomic_load2o(_head, _o_next, dependency); \
1539 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1540 /* 22708742: set tail to NULL with release, so that NULL write */ \
1541 /* to head above doesn't clobber head from concurrent enqueuer */ \
1542 if (unlikely(!_n && \
1543 !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \
1544 _n = os_mpsc_get_next(_head, _o_next); \
1545 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1550 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \
1551 typeof(q) _q = (q); \
1552 os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
1553 if (unlikely(!_n && \
1554 !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \
1555 _n = os_mpsc_get_head(q, _ns); \
1556 os_atomic_store2o(_head, _o_next, _n, relaxed); \
1558 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1561 #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \
1562 typeof(q) _q = (q); \
1563 os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \
1564 os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
1565 /* 22708742: set tail to NULL with release, so that NULL write */ \
1566 /* to head above doesn't clobber head from concurrent enqueuer */ \
1567 *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \
1571 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
1572 os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
1573 if (_head != (tail)) { \
1574 _n = os_mpsc_get_next(_head, _o_next); \
1578 #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \
1579 typeof(q) _q = (q); \
1580 os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
1581 os_atomic_store2o(_tail, _o_next, NULL, relaxed); \
1582 if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \
1583 _n = os_mpsc_get_head(q, _ns); \
1584 os_atomic_store2o(_tail, _o_next, _n, relaxed); \
1586 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1590 #pragma mark dispatch_queue_t tq lock
1592 DISPATCH_ALWAYS_INLINE
1594 _dispatch_queue_sidelock_trylock(dispatch_queue_t dq
, dispatch_qos_t qos
)
1597 if (_dispatch_unfair_lock_trylock(&dq
->dq_sidelock
, &owner
)) {
1600 _dispatch_wqthread_override_start_check_owner(owner
, qos
,
1601 &dq
->dq_sidelock
.dul_lock
);
1605 DISPATCH_ALWAYS_INLINE
1607 _dispatch_queue_sidelock_lock(dispatch_queue_t dq
)
1609 return _dispatch_unfair_lock_lock(&dq
->dq_sidelock
);
1612 DISPATCH_ALWAYS_INLINE
1614 _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq
)
1616 if (_dispatch_unfair_lock_tryunlock(&dq
->dq_sidelock
)) {
1619 // Ensure that the root queue sees that this thread was overridden.
1620 // Since we don't know which override QoS was used, use MAINTENANCE
1621 // as a marker for _dispatch_reset_basepri_override()
1622 _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE
);
1626 DISPATCH_ALWAYS_INLINE
1628 _dispatch_queue_sidelock_unlock(dispatch_queue_t dq
)
1630 if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq
->dq_sidelock
)) {
1631 // Ensure that the root queue sees that this thread was overridden.
1632 // Since we don't know which override QoS was used, use MAINTENANCE
1633 // as a marker for _dispatch_reset_basepri_override()
1634 _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE
);
1639 #pragma mark dispatch_queue_t misc
1641 DISPATCH_ALWAYS_INLINE
1642 static inline dispatch_queue_t
1643 _dispatch_queue_get_current(void)
1645 return (dispatch_queue_t
)_dispatch_thread_getspecific(dispatch_queue_key
);
1648 DISPATCH_ALWAYS_INLINE
1650 _dispatch_queue_set_current(dispatch_queue_t dq
)
1652 _dispatch_thread_setspecific(dispatch_queue_key
, dq
);
1655 DISPATCH_ALWAYS_INLINE
1656 static inline struct dispatch_object_s
*
1657 _dispatch_queue_head(dispatch_queue_t dq
)
1659 return os_mpsc_get_head(dq
, dq_items
);
1662 DISPATCH_ALWAYS_INLINE
1663 static inline struct dispatch_object_s
*
1664 _dispatch_queue_next(dispatch_queue_t dq
, struct dispatch_object_s
*dc
)
1666 return os_mpsc_pop_head(dq
, dq_items
, dc
, do_next
);
1669 DISPATCH_ALWAYS_INLINE
1671 _dispatch_queue_push_update_tail(dispatch_queue_t dq
,
1672 struct dispatch_object_s
*tail
)
1674 // if we crash here with a value less than 0x1000, then we are
1675 // at a known bug in client code. for example, see
1676 // _dispatch_queue_dispose or _dispatch_atfork_child
1677 return os_mpsc_push_update_tail(dq
, dq_items
, tail
, do_next
);
1680 DISPATCH_ALWAYS_INLINE
1682 _dispatch_queue_push_update_tail_list(dispatch_queue_t dq
,
1683 struct dispatch_object_s
*head
, struct dispatch_object_s
*tail
)
1685 // if we crash here with a value less than 0x1000, then we are
1686 // at a known bug in client code. for example, see
1687 // _dispatch_queue_dispose or _dispatch_atfork_child
1688 return os_mpsc_push_update_tail_list(dq
, dq_items
, head
, tail
, do_next
);
1691 DISPATCH_ALWAYS_INLINE
1693 _dispatch_queue_push_update_head(dispatch_queue_t dq
,
1694 struct dispatch_object_s
*head
)
1696 os_mpsc_push_update_head(dq
, dq_items
, head
);
1699 DISPATCH_ALWAYS_INLINE
1701 _dispatch_root_queue_push_inline(dispatch_queue_t dq
, dispatch_object_t _head
,
1702 dispatch_object_t _tail
, int n
)
1704 struct dispatch_object_s
*head
= _head
._do
, *tail
= _tail
._do
;
1705 if (unlikely(_dispatch_queue_push_update_tail_list(dq
, head
, tail
))) {
1706 _dispatch_queue_push_update_head(dq
, head
);
1707 return _dispatch_global_queue_poke(dq
, n
, 0);
1711 DISPATCH_ALWAYS_INLINE
1713 _dispatch_queue_push_inline(dispatch_queue_t dq
, dispatch_object_t _tail
,
1716 struct dispatch_object_s
*tail
= _tail
._do
;
1717 dispatch_wakeup_flags_t flags
= 0;
1718 // If we are going to call dx_wakeup(), the queue must be retained before
1719 // the item we're pushing can be dequeued, which means:
1720 // - before we exchange the tail if we may have to override
1721 // - before we set the head if we made the queue non empty.
1722 // Otherwise, if preempted between one of these and the call to dx_wakeup()
1723 // the blocks submitted to the queue may release the last reference to the
1724 // queue when invoked by _dispatch_queue_drain. <rdar://problem/6932776>
1725 bool overriding
= _dispatch_queue_need_override_retain(dq
, qos
);
1726 if (unlikely(_dispatch_queue_push_update_tail(dq
, tail
))) {
1727 if (!overriding
) _dispatch_retain_2(dq
->_as_os_obj
);
1728 _dispatch_queue_push_update_head(dq
, tail
);
1729 flags
= DISPATCH_WAKEUP_CONSUME_2
| DISPATCH_WAKEUP_MAKE_DIRTY
;
1730 } else if (overriding
) {
1731 flags
= DISPATCH_WAKEUP_CONSUME_2
;
1735 return dx_wakeup(dq
, qos
, flags
);
1738 DISPATCH_ALWAYS_INLINE
1740 _dispatch_queue_push_queue(dispatch_queue_t tq
, dispatch_queue_t dq
,
1743 return dx_push(tq
, dq
, _dq_state_max_qos(dq_state
));
1746 DISPATCH_ALWAYS_INLINE
1747 static inline dispatch_priority_t
1748 _dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq
)
1750 dispatch_priority_t old_dbp
= _dispatch_get_basepri();
1751 dispatch_assert(dx_hastypeflag(assumed_rq
, QUEUE_ROOT
));
1752 _dispatch_reset_basepri(assumed_rq
->dq_priority
);
1753 _dispatch_queue_set_current(assumed_rq
);
1757 typedef dispatch_queue_wakeup_target_t
1758 _dispatch_queue_class_invoke_handler_t(dispatch_object_t
,
1759 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t
,
1762 DISPATCH_ALWAYS_INLINE
1764 _dispatch_queue_class_invoke(dispatch_object_t dou
,
1765 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
,
1766 dispatch_invoke_flags_t const_restrict_flags
,
1767 _dispatch_queue_class_invoke_handler_t invoke
)
1769 dispatch_queue_t dq
= dou
._dq
;
1770 dispatch_queue_wakeup_target_t tq
= DISPATCH_QUEUE_WAKEUP_NONE
;
1771 bool owning
= !(flags
& DISPATCH_INVOKE_STEALING
);
1774 // When called from a plain _dispatch_queue_drain:
1775 // overriding = false
1778 // When called from an override continuation:
1779 // overriding = true
1780 // owning depends on whether the override embedded the queue or steals
1782 if (!(flags
& (DISPATCH_INVOKE_STEALING
| DISPATCH_INVOKE_WLH
))) {
1783 dq
->do_next
= DISPATCH_OBJECT_LISTLESS
;
1785 flags
|= const_restrict_flags
;
1786 if (likely(flags
& DISPATCH_INVOKE_WLH
)) {
1787 owned
= DISPATCH_QUEUE_SERIAL_DRAIN_OWNED
| DISPATCH_QUEUE_ENQUEUED
;
1789 owned
= _dispatch_queue_drain_try_lock(dq
, flags
);
1791 if (likely(owned
)) {
1792 dispatch_priority_t old_dbp
;
1793 if (!(flags
& DISPATCH_INVOKE_MANAGER_DRAIN
)) {
1794 old_dbp
= _dispatch_set_basepri(dq
->dq_priority
);
1799 flags
= _dispatch_queue_merge_autorelease_frequency(dq
, flags
);
1800 attempt_running_slow_head
:
1801 #if DISPATCH_COCOA_COMPAT
1802 if ((flags
& DISPATCH_INVOKE_WLH
) &&
1803 !(flags
& DISPATCH_INVOKE_AUTORELEASE_ALWAYS
)) {
1804 _dispatch_last_resort_autorelease_pool_push(dic
);
1806 #endif // DISPATCH_COCOA_COMPAT
1807 tq
= invoke(dq
, dic
, flags
, &owned
);
1808 #if DISPATCH_COCOA_COMPAT
1809 if ((flags
& DISPATCH_INVOKE_WLH
) &&
1810 !(flags
& DISPATCH_INVOKE_AUTORELEASE_ALWAYS
)) {
1811 dispatch_thread_frame_s dtf
;
1812 _dispatch_thread_frame_push(&dtf
, dq
);
1813 _dispatch_last_resort_autorelease_pool_pop(dic
);
1814 _dispatch_thread_frame_pop(&dtf
);
1816 #endif // DISPATCH_COCOA_COMPAT
1817 dispatch_assert(tq
!= DISPATCH_QUEUE_WAKEUP_TARGET
);
1818 if (unlikely(tq
!= DISPATCH_QUEUE_WAKEUP_NONE
&&
1819 tq
!= DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT
)) {
1820 // Either dc is set, which is a deferred invoke case
1822 // or only tq is and it means a reenqueue is required, because of:
1823 // a retarget, a suspension, or a width change.
1825 // In both cases, we want to bypass the check for DIRTY.
1826 // That may cause us to leave DIRTY in place but all drain lock
1827 // acquirers clear it
1828 } else if (!_dispatch_queue_drain_try_unlock(dq
, owned
,
1829 tq
== DISPATCH_QUEUE_WAKEUP_NONE
)) {
1830 tq
= _dispatch_queue_get_current();
1831 if (dx_hastypeflag(tq
, QUEUE_ROOT
) || !owning
) {
1832 goto attempt_running_slow_head
;
1834 DISPATCH_COMPILER_CAN_ASSUME(tq
!= DISPATCH_QUEUE_WAKEUP_NONE
);
1839 if (!(flags
& DISPATCH_INVOKE_MANAGER_DRAIN
)) {
1840 _dispatch_reset_basepri(old_dbp
);
1843 if (likely(owning
)) {
1844 _dispatch_introspection_queue_item_complete(dq
);
1848 if (const_restrict_flags
& DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS
) {
1849 dispatch_assert(dic
->dic_deferred
== NULL
);
1850 } else if (dic
->dic_deferred
) {
1851 return _dispatch_queue_drain_sync_waiter(dq
, dic
,
1855 uint64_t old_state
, new_state
, enqueued
= DISPATCH_QUEUE_ENQUEUED
;
1856 if (tq
== DISPATCH_QUEUE_WAKEUP_MGR
) {
1857 enqueued
= DISPATCH_QUEUE_ENQUEUED_ON_MGR
;
1859 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, release
, {
1860 new_state
= old_state
- owned
;
1861 new_state
&= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK
;
1862 new_state
|= DISPATCH_QUEUE_DIRTY
;
1863 if (_dq_state_is_runnable(new_state
) &&
1864 !_dq_state_is_enqueued(new_state
)) {
1865 // drain was not interupted for suspension
1866 // we will reenqueue right away, just put ENQUEUED back
1867 new_state
|= enqueued
;
1871 if (_dq_state_received_override(old_state
)) {
1872 // Ensure that the root queue sees that this thread was overridden.
1873 _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state
));
1875 if ((old_state
^ new_state
) & enqueued
) {
1876 dispatch_assert(_dq_state_is_enqueued(new_state
));
1877 return _dispatch_queue_push_queue(tq
, dq
, new_state
);
1881 _dispatch_release_2_tailcall(dq
);
1884 DISPATCH_ALWAYS_INLINE
1886 _dispatch_queue_class_probe(dispatch_queue_class_t dqu
)
1888 struct dispatch_object_s
*tail
;
1889 // seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
1890 // seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
1891 tail
= os_atomic_load2o(dqu
._oq
, oq_items_tail
, ordered
);
1892 return unlikely(tail
!= NULL
);
1895 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1897 _dispatch_is_in_root_queues_array(dispatch_queue_t dq
)
1899 return (dq
>= _dispatch_root_queues
) &&
1900 (dq
< _dispatch_root_queues
+ _DISPATCH_ROOT_QUEUE_IDX_COUNT
);
1903 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1904 static inline dispatch_queue_t
1905 _dispatch_get_root_queue(dispatch_qos_t qos
, bool overcommit
)
1907 if (unlikely(qos
== DISPATCH_QOS_UNSPECIFIED
|| qos
> DISPATCH_QOS_MAX
)) {
1908 DISPATCH_CLIENT_CRASH(qos
, "Corrupted priority");
1910 return &_dispatch_root_queues
[2 * (qos
- 1) + overcommit
];
1913 DISPATCH_ALWAYS_INLINE
1915 _dispatch_queue_set_bound_thread(dispatch_queue_t dq
)
1917 // Tag thread-bound queues with the owning thread
1918 dispatch_assert(_dispatch_queue_is_thread_bound(dq
));
1919 uint64_t old_state
, new_state
;
1920 os_atomic_rmw_loop2o(dq
, dq_state
, old_state
, new_state
, relaxed
, {
1921 new_state
= old_state
;
1922 new_state
&= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK
;
1923 new_state
|= _dispatch_lock_value_for_self();
1927 DISPATCH_ALWAYS_INLINE
1929 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq
)
1931 dispatch_assert(_dispatch_queue_is_thread_bound(dq
));
1932 _dispatch_queue_atomic_flags_clear(dq
, DQF_THREAD_BOUND
|DQF_CANNOT_TRYSYNC
);
1933 os_atomic_and2o(dq
, dq_state
, ~DISPATCH_QUEUE_DRAIN_OWNER_MASK
, relaxed
);
1936 DISPATCH_ALWAYS_INLINE
1937 static inline dispatch_pthread_root_queue_observer_hooks_t
1938 _dispatch_get_pthread_root_queue_observer_hooks(void)
1940 return _dispatch_thread_getspecific(
1941 dispatch_pthread_root_queue_observer_hooks_key
);
1944 DISPATCH_ALWAYS_INLINE
1946 _dispatch_set_pthread_root_queue_observer_hooks(
1947 dispatch_pthread_root_queue_observer_hooks_t observer_hooks
)
1949 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key
,
1954 #pragma mark dispatch_priority
1956 DISPATCH_ALWAYS_INLINE
1957 static inline dispatch_priority_t
1958 _dispatch_get_basepri(void)
1960 #if HAVE_PTHREAD_WORKQUEUE_QOS
1961 return (dispatch_priority_t
)(uintptr_t)_dispatch_thread_getspecific(
1962 dispatch_basepri_key
);
1968 DISPATCH_ALWAYS_INLINE
1970 _dispatch_reset_basepri(dispatch_priority_t dbp
)
1972 #if HAVE_PTHREAD_WORKQUEUE_QOS
1973 dispatch_priority_t old_dbp
= _dispatch_get_basepri();
1974 // If an inner-loop or'd in the override flag to the per-thread priority,
1975 // it needs to be propagated up the chain.
1976 dbp
&= ~DISPATCH_PRIORITY_OVERRIDE_MASK
;
1977 dbp
|= (old_dbp
& DISPATCH_PRIORITY_OVERRIDE_MASK
);
1978 _dispatch_thread_setspecific(dispatch_basepri_key
, (void*)(uintptr_t)dbp
);
1984 DISPATCH_ALWAYS_INLINE
1985 static inline dispatch_qos_t
1986 _dispatch_get_basepri_override_qos_floor(void)
1988 dispatch_priority_t dbp
= _dispatch_get_basepri();
1989 dispatch_qos_t qos
= _dispatch_priority_qos(dbp
);
1990 dispatch_qos_t oqos
= _dispatch_priority_override_qos(dbp
);
1991 dispatch_qos_t qos_floor
= MAX(qos
, oqos
);
1992 return qos_floor
? qos_floor
: DISPATCH_QOS_SATURATED
;
1995 DISPATCH_ALWAYS_INLINE
1997 _dispatch_set_basepri_override_qos(dispatch_qos_t qos
)
1999 #if HAVE_PTHREAD_WORKQUEUE_QOS
2000 dispatch_priority_t dbp
= _dispatch_get_basepri();
2001 if (_dispatch_priority_override_qos(dbp
) >= qos
) return;
2002 dbp
&= ~DISPATCH_PRIORITY_OVERRIDE_MASK
;
2003 dbp
|= qos
<< DISPATCH_PRIORITY_OVERRIDE_SHIFT
;
2004 _dispatch_thread_setspecific(dispatch_basepri_key
, (void*)(uintptr_t)dbp
);
2010 DISPATCH_ALWAYS_INLINE
2012 _dispatch_reset_basepri_override(void)
2014 #if HAVE_PTHREAD_WORKQUEUE_QOS
2015 dispatch_priority_t dbp
= _dispatch_get_basepri();
2016 dispatch_qos_t oqos
= _dispatch_priority_override_qos(dbp
);
2018 dbp
&= ~DISPATCH_PRIORITY_OVERRIDE_MASK
;
2019 _dispatch_thread_setspecific(dispatch_basepri_key
, (void*)(uintptr_t)dbp
);
2020 return oqos
!= DISPATCH_QOS_SATURATED
;
2026 DISPATCH_ALWAYS_INLINE
2027 static inline dispatch_priority_t
2028 _dispatch_set_basepri(dispatch_priority_t dbp
)
2030 #if HAVE_PTHREAD_WORKQUEUE_QOS
2031 const dispatch_priority_t preserved_mask
=
2032 DISPATCH_PRIORITY_OVERRIDE_MASK
| DISPATCH_PRIORITY_FLAG_OVERCOMMIT
;
2033 dispatch_priority_t old_dbp
= _dispatch_get_basepri();
2035 dispatch_priority_t flags
, defaultqueue
, basepri
;
2036 flags
= (dbp
& DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
);
2037 defaultqueue
= (old_dbp
& DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
);
2038 basepri
= old_dbp
& DISPATCH_PRIORITY_REQUESTED_MASK
;
2039 dbp
&= DISPATCH_PRIORITY_REQUESTED_MASK
;
2041 flags
= DISPATCH_PRIORITY_FLAG_INHERIT
| defaultqueue
;
2043 } else if (dbp
< basepri
&& !defaultqueue
) { // rdar://16349734
2046 dbp
|= flags
| (old_dbp
& preserved_mask
);
2048 dbp
&= ~DISPATCH_PRIORITY_OVERRIDE_MASK
;
2050 _dispatch_thread_setspecific(dispatch_basepri_key
, (void*)(uintptr_t)dbp
);
2058 DISPATCH_ALWAYS_INLINE
2059 static inline dispatch_priority_t
2060 _dispatch_set_basepri_wlh(dispatch_priority_t dbp
)
2062 #if HAVE_PTHREAD_WORKQUEUE_QOS
2063 dispatch_assert(!_dispatch_get_basepri());
2064 // _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED)
2065 dbp
|= DISPATCH_QOS_SATURATED
<< DISPATCH_PRIORITY_OVERRIDE_SHIFT
;
2066 _dispatch_thread_setspecific(dispatch_basepri_key
, (void*)(uintptr_t)dbp
);
2073 DISPATCH_ALWAYS_INLINE
2074 static inline pthread_priority_t
2075 _dispatch_priority_adopt(pthread_priority_t pp
, unsigned long flags
)
2077 #if HAVE_PTHREAD_WORKQUEUE_QOS
2078 dispatch_priority_t inherited
, defaultqueue
, dbp
= _dispatch_get_basepri();
2079 pthread_priority_t basepp
= _dispatch_priority_to_pp_strip_flags(dbp
);
2080 bool enforce
= (flags
& DISPATCH_PRIORITY_ENFORCE
) ||
2081 (pp
& _PTHREAD_PRIORITY_ENFORCE_FLAG
);
2082 inherited
= (dbp
& DISPATCH_PRIORITY_FLAG_INHERIT
);
2083 defaultqueue
= (dbp
& DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
);
2084 pp
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2088 } else if (defaultqueue
) { // rdar://16349734
2090 } else if (pp
< basepp
) {
2092 } else if (enforce
|| inherited
) {
2098 (void)pp
; (void)flags
;
2103 DISPATCH_ALWAYS_INLINE
2105 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq
,
2106 dispatch_queue_t tq
)
2108 #if HAVE_PTHREAD_WORKQUEUE_QOS
2109 const dispatch_priority_t rootqueue_flag
= DISPATCH_PRIORITY_FLAG_ROOTQUEUE
;
2110 const dispatch_priority_t inherited_flag
= DISPATCH_PRIORITY_FLAG_INHERIT
;
2111 const dispatch_priority_t defaultqueue_flag
=
2112 DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
;
2113 dispatch_priority_t pri
= dq
->dq_priority
, tpri
= tq
->dq_priority
;
2115 if ((!_dispatch_priority_qos(pri
) || (pri
& inherited_flag
)) &&
2116 (tpri
& rootqueue_flag
)) {
2117 if (_dispatch_priority_override_qos(pri
) == DISPATCH_QOS_SATURATED
) {
2118 pri
&= DISPATCH_PRIORITY_OVERRIDE_MASK
;
2122 if (tpri
& defaultqueue_flag
) {
2123 // <rdar://problem/32921639> base queues need to know they target
2124 // the default root queue so that _dispatch_queue_override_qos()
2125 // in _dispatch_queue_class_wakeup() can fallback to QOS_DEFAULT
2126 // if no other priority was provided.
2127 pri
|= defaultqueue_flag
;
2129 pri
|= (tpri
& ~rootqueue_flag
) | inherited_flag
;
2131 dq
->dq_priority
= pri
;
2132 } else if (pri
& defaultqueue_flag
) {
2133 // the DEFAULTQUEUE flag is only set on queues due to the code above,
2134 // and must never be kept if we don't target a global root queue.
2135 dq
->dq_priority
= (pri
& ~defaultqueue_flag
);
2142 DISPATCH_ALWAYS_INLINE
2143 static inline dispatch_priority_t
2144 _dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri
,
2145 dispatch_queue_t rq
)
2147 #if HAVE_PTHREAD_WORKQUEUE_QOS
2148 dispatch_priority_t p
= pri
& DISPATCH_PRIORITY_REQUESTED_MASK
;
2149 dispatch_priority_t rqp
= rq
->dq_priority
& DISPATCH_PRIORITY_REQUESTED_MASK
;
2150 dispatch_priority_t defaultqueue
=
2151 rq
->dq_priority
& DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
;
2153 if (!p
|| (!defaultqueue
&& p
< rqp
)) {
2154 p
= rqp
| defaultqueue
;
2156 return p
| (rq
->dq_priority
& DISPATCH_PRIORITY_FLAG_OVERCOMMIT
);
2158 (void)rq
; (void)pri
;
2163 DISPATCH_ALWAYS_INLINE
2164 static inline pthread_priority_t
2165 _dispatch_get_priority(void)
2167 #if HAVE_PTHREAD_WORKQUEUE_QOS
2168 pthread_priority_t pp
= (uintptr_t)
2169 _dispatch_thread_getspecific(dispatch_priority_key
);
2176 #if HAVE_PTHREAD_WORKQUEUE_QOS
2177 DISPATCH_ALWAYS_INLINE
2178 static inline pthread_priority_t
2179 _dispatch_priority_compute_update(pthread_priority_t pp
)
2181 dispatch_assert(pp
!= DISPATCH_NO_PRIORITY
);
2182 if (!_dispatch_set_qos_class_enabled
) return 0;
2183 // the priority in _dispatch_get_priority() only tracks manager-ness
2184 // and overcommit, which is inherited from the current value for each update
2185 // however if the priority had the NEEDS_UNBIND flag set we need to clear it
2186 // the first chance we get
2188 // the manager bit is invalid input, but we keep it to get meaningful
2189 // assertions in _dispatch_set_priority_and_voucher_slow()
2190 pp
&= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
| ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2191 pthread_priority_t cur_priority
= _dispatch_get_priority();
2192 pthread_priority_t unbind
= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2193 pthread_priority_t overcommit
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2194 if (unlikely(cur_priority
& unbind
)) {
2195 // else we always need an update if the NEEDS_UNBIND flag is set
2196 // the slow path in _dispatch_set_priority_and_voucher_slow() will
2197 // adjust the priority further with the proper overcommitness
2198 return pp
? pp
: (cur_priority
& ~unbind
);
2200 cur_priority
&= ~overcommit
;
2202 if (unlikely(pp
!= cur_priority
)) return pp
;
2207 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2208 static inline voucher_t
2209 _dispatch_set_priority_and_voucher(pthread_priority_t pp
,
2210 voucher_t v
, dispatch_thread_set_self_t flags
)
2212 #if HAVE_PTHREAD_WORKQUEUE_QOS
2213 pp
= _dispatch_priority_compute_update(pp
);
2215 if (v
== DISPATCH_NO_VOUCHER
) {
2216 return DISPATCH_NO_VOUCHER
;
2218 if (likely(v
== _voucher_get())) {
2219 bool retained
= flags
& DISPATCH_VOUCHER_CONSUME
;
2220 if (flags
& DISPATCH_VOUCHER_REPLACE
) {
2221 if (retained
&& v
) _voucher_release_no_dispose(v
);
2222 v
= DISPATCH_NO_VOUCHER
;
2224 if (!retained
&& v
) _voucher_retain(v
);
2229 return _dispatch_set_priority_and_voucher_slow(pp
, v
, flags
);
2231 (void)pp
; (void)v
; (void)flags
;
2232 return DISPATCH_NO_VOUCHER
;
2236 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2237 static inline voucher_t
2238 _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp
,
2239 voucher_t v
, dispatch_thread_set_self_t flags
)
2241 pthread_priority_t p
= 0;
2242 if (pp
!= DISPATCH_NO_PRIORITY
) {
2243 p
= _dispatch_priority_adopt(pp
, flags
);
2245 return _dispatch_set_priority_and_voucher(p
, v
, flags
);
2248 DISPATCH_ALWAYS_INLINE
2250 _dispatch_reset_priority_and_voucher(pthread_priority_t pp
, voucher_t v
)
2252 if (pp
== DISPATCH_NO_PRIORITY
) pp
= 0;
2253 (void)_dispatch_set_priority_and_voucher(pp
, v
,
2254 DISPATCH_VOUCHER_CONSUME
| DISPATCH_VOUCHER_REPLACE
);
2257 DISPATCH_ALWAYS_INLINE
2259 _dispatch_reset_voucher(voucher_t v
, dispatch_thread_set_self_t flags
)
2261 flags
|= DISPATCH_VOUCHER_CONSUME
| DISPATCH_VOUCHER_REPLACE
;
2262 (void)_dispatch_set_priority_and_voucher(0, v
, flags
);
2265 DISPATCH_ALWAYS_INLINE
2267 _dispatch_queue_need_override(dispatch_queue_class_t dqu
, dispatch_qos_t qos
)
2269 uint64_t dq_state
= os_atomic_load2o(dqu
._dq
, dq_state
, relaxed
);
2270 // dq_priority "override qos" contains the priority at which the queue
2271 // is already running for thread-bound queues.
2272 // For non thread-bound queues, the qos of the queue may not be observed
2273 // when the first work item is dispatched synchronously.
2274 return _dq_state_max_qos(dq_state
) < qos
&&
2275 _dispatch_priority_override_qos(dqu
._dq
->dq_priority
) < qos
;
2278 DISPATCH_ALWAYS_INLINE
2280 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu
,
2283 if (_dispatch_queue_need_override(dqu
, qos
)) {
2284 _os_object_retain_internal_n_inline(dqu
._oq
->_as_os_obj
, 2);
2290 DISPATCH_ALWAYS_INLINE
2291 static inline dispatch_qos_t
2292 _dispatch_queue_override_qos(dispatch_queue_class_t dqu
, dispatch_qos_t qos
)
2294 if (dqu
._oq
->oq_priority
& DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
) {
2295 // queues targeting the default root queue use any asynchronous
2296 // workitem priority available and fallback to QOS_DEFAULT otherwise.
2297 return qos
? qos
: DISPATCH_QOS_DEFAULT
;
2299 // for asynchronous workitems, queue priority is the floor for overrides
2300 return MAX(qos
, _dispatch_priority_qos(dqu
._oq
->oq_priority
));
2303 #define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1
2304 #define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2
2306 DISPATCH_ALWAYS_INLINE
2307 static inline pthread_priority_t
2308 _dispatch_priority_compute_propagated(pthread_priority_t pp
,
2311 #if HAVE_PTHREAD_WORKQUEUE_QOS
2312 if (flags
& DISPATCH_PRIORITY_PROPAGATE_CURRENT
) {
2313 pp
= _dispatch_get_priority();
2315 pp
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2316 if (!(flags
& DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC
) &&
2317 pp
> _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED
)) {
2318 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
2319 return _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED
);
2323 (void)pp
; (void)flags
;
2328 DISPATCH_ALWAYS_INLINE
2329 static inline pthread_priority_t
2330 _dispatch_priority_propagate(void)
2332 return _dispatch_priority_compute_propagated(0,
2333 DISPATCH_PRIORITY_PROPAGATE_CURRENT
);
2336 // including maintenance
2337 DISPATCH_ALWAYS_INLINE
2339 _dispatch_is_background_thread(void)
2341 #if HAVE_PTHREAD_WORKQUEUE_QOS
2342 pthread_priority_t pp
= _dispatch_get_priority();
2343 return _dispatch_qos_is_background(_dispatch_qos_from_pp(pp
));
2350 #pragma mark dispatch_block_t
2354 DISPATCH_ALWAYS_INLINE
2356 _dispatch_block_has_private_data(const dispatch_block_t block
)
2358 extern void (*_dispatch_block_special_invoke
)(void*);
2359 return (_dispatch_Block_invoke(block
) == _dispatch_block_special_invoke
);
2362 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2363 static inline pthread_priority_t
2364 _dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags
,
2365 pthread_priority_t new_pri
)
2367 pthread_priority_t old_pri
, p
= 0; // 0 means do not change priority.
2368 if ((flags
& DISPATCH_BLOCK_HAS_PRIORITY
)
2369 && ((flags
& DISPATCH_BLOCK_ENFORCE_QOS_CLASS
) ||
2370 !(flags
& DISPATCH_BLOCK_INHERIT_QOS_CLASS
))) {
2371 old_pri
= _dispatch_get_priority();
2372 new_pri
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2373 p
= old_pri
& ~_PTHREAD_PRIORITY_FLAGS_MASK
;
2374 if (!p
|| p
>= new_pri
) p
= 0;
2379 DISPATCH_ALWAYS_INLINE
2380 static inline dispatch_block_private_data_t
2381 _dispatch_block_get_data(const dispatch_block_t db
)
2383 if (!_dispatch_block_has_private_data(db
)) {
2386 // Keep in sync with _dispatch_block_create implementation
2387 uint8_t *x
= (uint8_t *)db
;
2388 // x points to base of struct Block_layout
2389 x
+= sizeof(struct Block_layout
);
2390 // x points to base of captured dispatch_block_private_data_s object
2391 dispatch_block_private_data_t dbpd
= (dispatch_block_private_data_t
)x
;
2392 if (dbpd
->dbpd_magic
!= DISPATCH_BLOCK_PRIVATE_DATA_MAGIC
) {
2393 DISPATCH_CLIENT_CRASH(dbpd
->dbpd_magic
,
2394 "Corruption of dispatch block object");
2399 DISPATCH_ALWAYS_INLINE
2400 static inline pthread_priority_t
2401 _dispatch_block_get_priority(const dispatch_block_t db
)
2403 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
2404 return dbpd
? dbpd
->dbpd_priority
: 0;
2407 DISPATCH_ALWAYS_INLINE
2408 static inline dispatch_block_flags_t
2409 _dispatch_block_get_flags(const dispatch_block_t db
)
2411 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
2412 return dbpd
? dbpd
->dbpd_flags
: 0;
2418 #pragma mark dispatch_continuation_t
2420 DISPATCH_ALWAYS_INLINE
2421 static inline dispatch_continuation_t
2422 _dispatch_continuation_alloc_cacheonly(void)
2424 dispatch_continuation_t dc
= (dispatch_continuation_t
)
2425 _dispatch_thread_getspecific(dispatch_cache_key
);
2427 _dispatch_thread_setspecific(dispatch_cache_key
, dc
->do_next
);
2432 DISPATCH_ALWAYS_INLINE
2433 static inline dispatch_continuation_t
2434 _dispatch_continuation_alloc(void)
2436 dispatch_continuation_t dc
=
2437 _dispatch_continuation_alloc_cacheonly();
2438 if (unlikely(!dc
)) {
2439 return _dispatch_continuation_alloc_from_heap();
2444 DISPATCH_ALWAYS_INLINE
2445 static inline dispatch_continuation_t
2446 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc
)
2448 dispatch_continuation_t prev_dc
= (dispatch_continuation_t
)
2449 _dispatch_thread_getspecific(dispatch_cache_key
);
2450 int cnt
= prev_dc
? prev_dc
->dc_cache_cnt
+ 1 : 1;
2451 // Cap continuation cache
2452 if (unlikely(cnt
> _dispatch_continuation_cache_limit
)) {
2455 dc
->do_next
= prev_dc
;
2456 dc
->dc_cache_cnt
= cnt
;
2457 _dispatch_thread_setspecific(dispatch_cache_key
, dc
);
2461 DISPATCH_ALWAYS_INLINE
2463 _dispatch_continuation_free(dispatch_continuation_t dc
)
2465 dc
= _dispatch_continuation_free_cacheonly(dc
);
2467 _dispatch_continuation_free_to_cache_limit(dc
);
2473 DISPATCH_ALWAYS_INLINE
2475 _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc
)
2477 struct dispatch_object_s
*dou
= dc
->dc_data
;
2478 unsigned long type
= dx_type(dou
);
2479 if (type
== DISPATCH_GROUP_TYPE
) {
2480 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
2481 _dispatch_introspection_queue_item_complete(dou
);
2482 dispatch_group_leave((dispatch_group_t
)dou
);
2484 DISPATCH_INTERNAL_CRASH(dx_type(dou
), "Unexpected object type");
2488 DISPATCH_ALWAYS_INLINE
2490 _dispatch_continuation_invoke_inline(dispatch_object_t dou
, voucher_t ov
,
2491 dispatch_invoke_flags_t flags
)
2493 dispatch_continuation_t dc
= dou
._dc
, dc1
;
2494 dispatch_invoke_with_autoreleasepool(flags
, {
2495 uintptr_t dc_flags
= dc
->dc_flags
;
2496 // Add the item back to the cache before calling the function. This
2497 // allows the 'hot' continuation to be used for a quick callback.
2499 // The ccache version is per-thread.
2500 // Therefore, the object has not been reused yet.
2501 // This generates better assembly.
2502 _dispatch_continuation_voucher_adopt(dc
, ov
, dc_flags
);
2503 if (dc_flags
& DISPATCH_OBJ_CONSUME_BIT
) {
2504 dc1
= _dispatch_continuation_free_cacheonly(dc
);
2508 if (unlikely(dc_flags
& DISPATCH_OBJ_GROUP_BIT
)) {
2509 _dispatch_continuation_with_group_invoke(dc
);
2511 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
2512 _dispatch_introspection_queue_item_complete(dou
);
2514 if (unlikely(dc1
)) {
2515 _dispatch_continuation_free_to_cache_limit(dc1
);
2518 _dispatch_perfmon_workitem_inc();
2521 DISPATCH_ALWAYS_INLINE_NDEBUG
2523 _dispatch_continuation_pop_inline(dispatch_object_t dou
,
2524 dispatch_invoke_context_t dic
, dispatch_invoke_flags_t flags
,
2525 dispatch_queue_t dq
)
2527 dispatch_pthread_root_queue_observer_hooks_t observer_hooks
=
2528 _dispatch_get_pthread_root_queue_observer_hooks();
2529 if (observer_hooks
) observer_hooks
->queue_will_execute(dq
);
2530 _dispatch_trace_continuation_pop(dq
, dou
);
2531 flags
&= _DISPATCH_INVOKE_PROPAGATE_MASK
;
2532 if (_dispatch_object_has_vtable(dou
)) {
2533 dx_invoke(dou
._do
, dic
, flags
);
2535 _dispatch_continuation_invoke_inline(dou
, DISPATCH_NO_VOUCHER
, flags
);
2537 if (observer_hooks
) observer_hooks
->queue_did_execute(dq
);
2540 // used to forward the do_invoke of a continuation with a vtable to its real
2542 #define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \
2544 dispatch_continuation_t _dc = (dc), _dc1; \
2545 uintptr_t _dc_flags = (dc_flags); \
2546 _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \
2547 if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \
2548 _dc1 = _dispatch_continuation_free_cacheonly(_dc); \
2553 _dispatch_introspection_queue_item_complete(_dc); \
2554 if (unlikely(_dc1)) { \
2555 _dispatch_continuation_free_to_cache_limit(_dc1); \
2559 DISPATCH_ALWAYS_INLINE
2561 _dispatch_continuation_priority_set(dispatch_continuation_t dc
,
2562 pthread_priority_t pp
, dispatch_block_flags_t flags
)
2564 #if HAVE_PTHREAD_WORKQUEUE_QOS
2565 if (likely(!(flags
& DISPATCH_BLOCK_HAS_PRIORITY
))) {
2566 pp
= _dispatch_priority_propagate();
2568 if (flags
& DISPATCH_BLOCK_ENFORCE_QOS_CLASS
) {
2569 pp
|= _PTHREAD_PRIORITY_ENFORCE_FLAG
;
2571 dc
->dc_priority
= pp
;
2573 (void)dc
; (void)pp
; (void)flags
;
2577 DISPATCH_ALWAYS_INLINE
2578 static inline dispatch_qos_t
2579 _dispatch_continuation_override_qos(dispatch_queue_t dq
,
2580 dispatch_continuation_t dc
)
2582 #if HAVE_PTHREAD_WORKQUEUE_QOS
2583 dispatch_qos_t dc_qos
= _dispatch_qos_from_pp(dc
->dc_priority
);
2584 bool enforce
= dc
->dc_priority
& _PTHREAD_PRIORITY_ENFORCE_FLAG
;
2585 dispatch_qos_t dq_qos
= _dispatch_priority_qos(dq
->dq_priority
);
2586 bool defaultqueue
= dq
->dq_priority
& DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE
;
2588 dispatch_assert(dc
->dc_priority
!= DISPATCH_NO_PRIORITY
);
2589 if (dc_qos
&& (enforce
|| !dq_qos
|| defaultqueue
)) {
2599 DISPATCH_ALWAYS_INLINE
2601 _dispatch_continuation_init_f(dispatch_continuation_t dc
,
2602 dispatch_queue_class_t dqu
, void *ctxt
, dispatch_function_t func
,
2603 pthread_priority_t pp
, dispatch_block_flags_t flags
, uintptr_t dc_flags
)
2605 dc
->dc_flags
= dc_flags
;
2608 _dispatch_continuation_voucher_set(dc
, dqu
, flags
);
2609 _dispatch_continuation_priority_set(dc
, pp
, flags
);
2612 DISPATCH_ALWAYS_INLINE
2614 _dispatch_continuation_init(dispatch_continuation_t dc
,
2615 dispatch_queue_class_t dqu
, dispatch_block_t work
,
2616 pthread_priority_t pp
, dispatch_block_flags_t flags
, uintptr_t dc_flags
)
2618 dc
->dc_flags
= dc_flags
| DISPATCH_OBJ_BLOCK_BIT
;
2619 dc
->dc_ctxt
= _dispatch_Block_copy(work
);
2620 _dispatch_continuation_priority_set(dc
, pp
, flags
);
2622 if (unlikely(_dispatch_block_has_private_data(work
))) {
2623 // always sets dc_func & dc_voucher
2624 // may update dc_priority & do_vtable
2625 return _dispatch_continuation_init_slow(dc
, dqu
, flags
);
2628 if (dc_flags
& DISPATCH_OBJ_CONSUME_BIT
) {
2629 dc
->dc_func
= _dispatch_call_block_and_release
;
2631 dc
->dc_func
= _dispatch_Block_invoke(work
);
2633 _dispatch_continuation_voucher_set(dc
, dqu
, flags
);
2637 #pragma mark dispatch_mach_reply_refs_t
2639 // assumes low bit of mach port names is always set
2640 #define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u
2642 DISPATCH_ALWAYS_INLINE
2644 _dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr
)
2646 dmr
->du_ident
&= ~DISPATCH_MACH_REPLY_PORT_UNOWNED
;
2649 DISPATCH_ALWAYS_INLINE
2651 _dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr
)
2653 mach_port_t reply_port
= (mach_port_t
)dmr
->du_ident
;
2654 return reply_port
? !(reply_port
& DISPATCH_MACH_REPLY_PORT_UNOWNED
) :false;
2657 DISPATCH_ALWAYS_INLINE
2658 static inline mach_port_t
2659 _dispatch_mach_reply_get_reply_port(mach_port_t reply_port
)
2661 return reply_port
? (reply_port
| DISPATCH_MACH_REPLY_PORT_UNOWNED
) : 0;
2666 #endif // DISPATCH_PURE_C
2668 #endif /* __DISPATCH_INLINE_INTERNAL__ */