2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
35 #if DISPATCH_USE_CLIENT_CALLOUT
38 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
);
40 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t));
42 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
43 dispatch_mach_msg_t dmsg
, mach_error_t error
,
44 dispatch_mach_handler_function_t f
);
46 #else // !DISPATCH_USE_CLIENT_CALLOUT
48 DISPATCH_ALWAYS_INLINE
50 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
)
55 DISPATCH_ALWAYS_INLINE
57 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t))
62 DISPATCH_ALWAYS_INLINE
64 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
65 dispatch_mach_msg_t dmsg
, mach_error_t error
,
66 dispatch_mach_handler_function_t f
)
68 return f(ctxt
, reason
, dmsg
, error
);
71 #endif // !DISPATCH_USE_CLIENT_CALLOUT
73 #if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
76 #pragma mark _os_object_t & dispatch_object_t
78 DISPATCH_ALWAYS_INLINE
79 static inline _os_object_t
80 _os_object_retain_internal_inline(_os_object_t obj
)
82 int ref_cnt
= _os_object_refcnt_inc(obj
);
83 if (slowpath(ref_cnt
<= 0)) {
84 DISPATCH_CRASH("Resurrection of an object");
89 DISPATCH_ALWAYS_INLINE
91 _os_object_release_internal_inline(_os_object_t obj
)
93 int ref_cnt
= _os_object_refcnt_dec(obj
);
94 if (fastpath(ref_cnt
>= 0)) {
97 if (slowpath(ref_cnt
< -1)) {
98 DISPATCH_CRASH("Over-release of an object");
101 if (slowpath(obj
->os_obj_xref_cnt
>= 0)) {
102 DISPATCH_CRASH("Release while external references exist");
105 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
106 return _os_object_dispose(obj
);
109 DISPATCH_ALWAYS_INLINE_NDEBUG
111 _dispatch_retain(dispatch_object_t dou
)
113 (void)_os_object_retain_internal_inline(dou
._os_obj
);
116 DISPATCH_ALWAYS_INLINE_NDEBUG
118 _dispatch_release(dispatch_object_t dou
)
120 _os_object_release_internal_inline(dou
._os_obj
);
124 #pragma mark dispatch_thread
126 DISPATCH_ALWAYS_INLINE
128 _dispatch_wqthread_override_start(mach_port_t thread
,
129 pthread_priority_t priority
)
131 #if HAVE_PTHREAD_WORKQUEUE_QOS
132 if (!_dispatch_set_qos_class_enabled
) return;
133 (void)_pthread_workqueue_override_start_direct(thread
, priority
);
135 (void)thread
; (void)priority
;
139 DISPATCH_ALWAYS_INLINE
141 _dispatch_wqthread_override_reset(void)
143 #if HAVE_PTHREAD_WORKQUEUE_QOS
144 if (!_dispatch_set_qos_class_enabled
) return;
145 (void)_pthread_workqueue_override_reset();
149 DISPATCH_ALWAYS_INLINE
151 _dispatch_thread_override_start(mach_port_t thread
, pthread_priority_t priority
)
153 #if HAVE_PTHREAD_WORKQUEUE_QOS
154 if (!_dispatch_set_qos_class_enabled
) return;
155 (void)_pthread_override_qos_class_start_direct(thread
, priority
);
157 (void)thread
; (void)priority
;
161 DISPATCH_ALWAYS_INLINE
163 _dispatch_thread_override_end(mach_port_t thread
)
165 #if HAVE_PTHREAD_WORKQUEUE_QOS
166 if (!_dispatch_set_qos_class_enabled
) return;
167 (void)_pthread_override_qos_class_end_direct(thread
);
174 #pragma mark dispatch_queue_t
176 static inline bool _dispatch_queue_need_override(dispatch_queue_t dq
,
177 pthread_priority_t pp
);
178 static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq
,
179 pthread_priority_t pp
);
180 static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq
,
181 pthread_priority_t pp
);
182 static inline pthread_priority_t
_dispatch_queue_get_override_priority(
183 dispatch_queue_t dq
);
184 static inline pthread_priority_t
_dispatch_queue_reset_override_priority(
185 dispatch_queue_t dq
);
186 static inline pthread_priority_t
_dispatch_get_defaultpriority(void);
187 static inline void _dispatch_set_defaultpriority_override(void);
188 static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority
);
189 static inline pthread_priority_t
_dispatch_get_priority(void);
190 static inline void _dispatch_set_priority(pthread_priority_t priority
);
192 DISPATCH_ALWAYS_INLINE
193 static inline dispatch_queue_t
194 _dispatch_queue_get_current(void)
196 return (dispatch_queue_t
)_dispatch_thread_getspecific(dispatch_queue_key
);
199 DISPATCH_ALWAYS_INLINE
201 _dispatch_queue_set_thread(dispatch_queue_t dq
)
203 // The manager queue uses dispatch_queue_drain but is thread bound
204 if (!dq
->dq_is_thread_bound
) {
205 dq
->dq_thread
= _dispatch_thread_port();
209 DISPATCH_ALWAYS_INLINE
211 _dispatch_queue_clear_thread(dispatch_queue_t dq
)
213 if (!dq
->dq_is_thread_bound
) {
214 dq
->dq_thread
= MACH_PORT_NULL
;
218 DISPATCH_ALWAYS_INLINE
220 _dispatch_queue_push_list2(dispatch_queue_t dq
, struct dispatch_object_s
*head
,
221 struct dispatch_object_s
*tail
)
223 struct dispatch_object_s
*prev
;
224 tail
->do_next
= NULL
;
225 prev
= dispatch_atomic_xchg2o(dq
, dq_items_tail
, tail
, release
);
226 if (fastpath(prev
)) {
227 // if we crash here with a value less than 0x1000, then we are at a
228 // known bug in client code for example, see _dispatch_queue_dispose
229 // or _dispatch_atfork_child
230 prev
->do_next
= head
;
232 return (prev
!= NULL
);
235 DISPATCH_ALWAYS_INLINE
237 _dispatch_queue_push_list(dispatch_queue_t dq
, dispatch_object_t _head
,
238 dispatch_object_t _tail
, pthread_priority_t pp
, unsigned int n
)
240 struct dispatch_object_s
*head
= _head
._do
, *tail
= _tail
._do
;
241 bool override
= _dispatch_queue_need_override_retain(dq
, pp
);
242 if (!fastpath(_dispatch_queue_push_list2(dq
, head
, tail
))) {
243 _dispatch_queue_push_list_slow(dq
, pp
, head
, n
, override
);
244 } else if (override
) {
245 _dispatch_queue_wakeup_with_qos_and_release(dq
, pp
);
249 DISPATCH_ALWAYS_INLINE
251 _dispatch_queue_push(dispatch_queue_t dq
, dispatch_object_t _tail
,
252 pthread_priority_t pp
)
254 struct dispatch_object_s
*tail
= _tail
._do
;
255 bool override
= _dispatch_queue_need_override_retain(dq
, pp
);
256 if (!fastpath(_dispatch_queue_push_list2(dq
, tail
, tail
))) {
257 _dispatch_queue_push_slow(dq
, pp
, tail
, override
);
258 } else if (override
) {
259 _dispatch_queue_wakeup_with_qos_and_release(dq
, pp
);
263 DISPATCH_ALWAYS_INLINE
265 _dispatch_queue_push_wakeup(dispatch_queue_t dq
, dispatch_object_t _tail
,
266 pthread_priority_t pp
, bool wakeup
)
268 // caller assumed to have a reference on dq
269 struct dispatch_object_s
*tail
= _tail
._do
;
270 if (!fastpath(_dispatch_queue_push_list2(dq
, tail
, tail
))) {
271 _dispatch_queue_push_slow(dq
, pp
, tail
, false);
272 } else if (_dispatch_queue_need_override(dq
, pp
)) {
273 _dispatch_queue_wakeup_with_qos(dq
, pp
);
274 } else if (slowpath(wakeup
)) {
275 _dispatch_queue_wakeup(dq
);
279 struct _dispatch_identity_s
{
280 pthread_priority_t old_pri
;
281 pthread_priority_t old_pp
;
282 dispatch_queue_t old_dq
;
285 DISPATCH_ALWAYS_INLINE
287 _dispatch_root_queue_identity_assume(struct _dispatch_identity_s
*di
,
288 dispatch_queue_t assumed_rq
)
290 di
->old_dq
= _dispatch_queue_get_current();
291 di
->old_pri
= _dispatch_get_priority();
292 di
->old_pp
= _dispatch_get_defaultpriority();
294 dispatch_assert(dx_type(di
->old_dq
) == DISPATCH_QUEUE_ROOT_TYPE
);
295 dispatch_assert(dx_type(assumed_rq
) == DISPATCH_QUEUE_ROOT_TYPE
);
297 _dispatch_wqthread_override_start(_dispatch_thread_port(), di
->old_pri
);
298 _dispatch_set_priority(assumed_rq
->dq_priority
);
299 _dispatch_reset_defaultpriority(assumed_rq
->dq_priority
);
300 _dispatch_thread_setspecific(dispatch_queue_key
, assumed_rq
);
303 DISPATCH_ALWAYS_INLINE
305 _dispatch_root_queue_identity_restore(struct _dispatch_identity_s
*di
)
307 _dispatch_thread_setspecific(dispatch_queue_key
, di
->old_dq
);
308 _dispatch_set_priority(di
->old_pri
);
309 _dispatch_reset_defaultpriority(di
->old_pp
);
310 // Ensure that the root queue sees that this thread was overridden.
311 _dispatch_set_defaultpriority_override();
314 typedef dispatch_queue_t
315 _dispatch_queue_class_invoke_handler_t(dispatch_object_t
,
316 _dispatch_thread_semaphore_t
*);
318 DISPATCH_ALWAYS_INLINE
320 _dispatch_queue_class_invoke(dispatch_object_t dou
,
321 dispatch_continuation_t dc
, dispatch_invoke_flags_t flags
,
322 _dispatch_queue_class_invoke_handler_t invoke
)
324 pthread_priority_t p
= 0;
325 dispatch_queue_t dq
= dou
._dq
;
326 bool owning
= !slowpath(flags
& DISPATCH_INVOKE_STEALING
);
327 bool overriding
= slowpath(flags
& DISPATCH_INVOKE_OVERRIDING
);
329 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq
)) &&
330 fastpath(dispatch_atomic_cmpxchg2o(dq
, dq_running
, 0, 1, acquire
))){
331 _dispatch_queue_set_thread(dq
);
333 dispatch_queue_t tq
= NULL
;
334 _dispatch_thread_semaphore_t sema
= 0;
335 struct _dispatch_identity_s di
;
338 _dispatch_object_debug(dq
, "stolen onto thread 0x%x, 0x%lx",
339 dq
->dq_thread
, _dispatch_get_defaultpriority());
340 _dispatch_root_queue_identity_assume(&di
, dc
->dc_other
);
343 tq
= invoke(dq
, &sema
);
344 _dispatch_queue_clear_thread(dq
);
346 if (!owning
&& !sema
&& tq
&& tq
!= dq
->do_targetq
) {
348 * When (tq && tq != dq->do_targetq) this is a source or mach
349 * channel asking to get to their manager queue.
351 * Since stealers cannot call _dispatch_queue_push_queue and
352 * retarget those, they need ot destroy the override so that
353 * when waking those sources or mach channels on their target queue
354 * we don't risk a stealer taking them over and not be able to
355 * retarget again, effectively live-locking them.
357 * Also, we're in the `overriding` case so the thread will be marked
358 * dirty by _dispatch_root_queue_identity_restore anyway
359 * so forgetting about p is fine.
361 (void)_dispatch_queue_reset_override_priority(dq
);
363 } else if (sema
|| tq
|| DISPATCH_OBJECT_SUSPENDED(dq
)) {
364 p
= _dispatch_queue_get_override_priority(dq
);
366 p
= _dispatch_queue_reset_override_priority(dq
);
369 _dispatch_root_queue_identity_restore(&di
);
371 if (p
> (dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
)) {
372 // Ensure that the root queue sees that this thread was overridden.
373 _dispatch_set_defaultpriority_override();
377 uint32_t running
= dispatch_atomic_dec2o(dq
, dq_running
, release
);
379 _dispatch_thread_semaphore_signal(sema
);
380 } else if (owning
&& tq
) {
381 _dispatch_introspection_queue_item_complete(dq
);
382 return _dispatch_queue_push_queue(tq
, dq
, p
);
384 if (!owning
&& running
== 0) {
385 _dispatch_introspection_queue_item_complete(dq
);
386 return _dispatch_queue_wakeup_with_qos_and_release(dq
, p
);
388 } else if (overriding
) {
389 mach_port_t th
= dq
->dq_thread
;
391 p
= _dispatch_queue_get_override_priority(dq
);
392 _dispatch_object_debug(dq
, "overriding thr 0x%x to priority 0x%lx",
394 _dispatch_wqthread_override_start(th
, p
);
398 _dispatch_introspection_queue_item_complete(dq
);
400 dq
->do_next
= DISPATCH_OBJECT_LISTLESS
;
401 if (!dispatch_atomic_sub2o(dq
, do_suspend_cnt
,
402 DISPATCH_OBJECT_SUSPEND_LOCK
, seq_cst
)) {
403 // seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
404 if (dispatch_atomic_load2o(dq
, dq_running
, seq_cst
) == 0) {
405 // verify that the queue is idle
406 return _dispatch_queue_wakeup_with_qos_and_release(dq
, p
);
410 _dispatch_release(dq
); // added when the queue is put on the list
413 DISPATCH_ALWAYS_INLINE
414 static inline unsigned long
415 _dispatch_queue_class_probe(dispatch_object_t dou
)
417 dispatch_queue_t dq
= dou
._dq
;
418 struct dispatch_object_s
*tail
;
419 // seq_cst with atomic store to suspend_cnt <rdar://problem/14637483>
420 tail
= dispatch_atomic_load2o(dq
, dq_items_tail
, seq_cst
);
421 return (unsigned long)slowpath(tail
!= NULL
);
424 DISPATCH_ALWAYS_INLINE
426 _dispatch_object_suspended(dispatch_object_t dou
)
428 struct dispatch_object_s
*obj
= dou
._do
;
429 unsigned int suspend_cnt
;
430 // seq_cst with atomic store to tail <rdar://problem/14637483>
431 suspend_cnt
= dispatch_atomic_load2o(obj
, do_suspend_cnt
, seq_cst
);
432 return slowpath(suspend_cnt
>= DISPATCH_OBJECT_SUSPEND_INTERVAL
);
435 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
436 static inline dispatch_queue_t
437 _dispatch_get_root_queue(qos_class_t priority
, bool overcommit
)
439 if (overcommit
) switch (priority
) {
440 case _DISPATCH_QOS_CLASS_MAINTENANCE
:
441 return &_dispatch_root_queues
[
442 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT
];
443 case _DISPATCH_QOS_CLASS_BACKGROUND
:
444 return &_dispatch_root_queues
[
445 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT
];
446 case _DISPATCH_QOS_CLASS_UTILITY
:
447 return &_dispatch_root_queues
[
448 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT
];
449 case _DISPATCH_QOS_CLASS_DEFAULT
:
450 return &_dispatch_root_queues
[
451 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT
];
452 case _DISPATCH_QOS_CLASS_USER_INITIATED
:
453 return &_dispatch_root_queues
[
454 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT
];
455 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE
:
456 return &_dispatch_root_queues
[
457 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT
];
458 } else switch (priority
) {
459 case _DISPATCH_QOS_CLASS_MAINTENANCE
:
460 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS
];
461 case _DISPATCH_QOS_CLASS_BACKGROUND
:
462 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS
];
463 case _DISPATCH_QOS_CLASS_UTILITY
:
464 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS
];
465 case _DISPATCH_QOS_CLASS_DEFAULT
:
466 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS
];
467 case _DISPATCH_QOS_CLASS_USER_INITIATED
:
468 return &_dispatch_root_queues
[
469 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS
];
470 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE
:
471 return &_dispatch_root_queues
[
472 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS
];
477 // Note to later developers: ensure that any initialization changes are
478 // made for statically allocated queues (i.e. _dispatch_main_q).
480 _dispatch_queue_init(dispatch_queue_t dq
)
482 dq
->do_next
= (struct dispatch_queue_s
*)DISPATCH_OBJECT_LISTLESS
;
486 dq
->dq_override_voucher
= DISPATCH_NO_VOUCHER
;
487 dq
->dq_serialnum
= dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers
,
491 DISPATCH_ALWAYS_INLINE
493 _dispatch_queue_set_bound_thread(dispatch_queue_t dq
)
495 //Tag thread-bound queues with the owning thread
496 dispatch_assert(dq
->dq_is_thread_bound
);
497 dq
->dq_thread
= _dispatch_thread_port();
500 DISPATCH_ALWAYS_INLINE
502 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq
)
504 dispatch_assert(dq
->dq_is_thread_bound
);
505 dq
->dq_thread
= MACH_PORT_NULL
;
508 DISPATCH_ALWAYS_INLINE
509 static inline mach_port_t
510 _dispatch_queue_get_bound_thread(dispatch_queue_t dq
)
512 dispatch_assert(dq
->dq_is_thread_bound
);
513 return dq
->dq_thread
;
516 DISPATCH_ALWAYS_INLINE
517 static inline dispatch_pthread_root_queue_observer_hooks_t
518 _dispatch_get_pthread_root_queue_observer_hooks(void)
520 return _dispatch_thread_getspecific(
521 dispatch_pthread_root_queue_observer_hooks_key
);
524 DISPATCH_ALWAYS_INLINE
526 _dispatch_set_pthread_root_queue_observer_hooks(
527 dispatch_pthread_root_queue_observer_hooks_t observer_hooks
)
529 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key
,
534 #pragma mark dispatch_priority
536 DISPATCH_ALWAYS_INLINE
537 static inline pthread_priority_t
538 _dispatch_get_defaultpriority(void)
540 #if HAVE_PTHREAD_WORKQUEUE_QOS
541 pthread_priority_t priority
= (uintptr_t)_dispatch_thread_getspecific(
542 dispatch_defaultpriority_key
);
549 DISPATCH_ALWAYS_INLINE
551 _dispatch_reset_defaultpriority(pthread_priority_t priority
)
553 #if HAVE_PTHREAD_WORKQUEUE_QOS
554 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
555 // if an inner-loop or'd in the override flag to the per-thread priority,
556 // it needs to be propogated up the chain
557 priority
|= old_priority
& _PTHREAD_PRIORITY_OVERRIDE_FLAG
;
559 if (slowpath(priority
!= old_priority
)) {
560 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
568 DISPATCH_ALWAYS_INLINE
570 _dispatch_set_defaultpriority_override(void)
572 #if HAVE_PTHREAD_WORKQUEUE_QOS
573 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
574 pthread_priority_t priority
= old_priority
|
575 _PTHREAD_PRIORITY_OVERRIDE_FLAG
;
577 if (slowpath(priority
!= old_priority
)) {
578 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
584 DISPATCH_ALWAYS_INLINE
586 _dispatch_reset_defaultpriority_override(void)
588 #if HAVE_PTHREAD_WORKQUEUE_QOS
589 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
590 pthread_priority_t priority
= old_priority
&
591 ~((pthread_priority_t
)_PTHREAD_PRIORITY_OVERRIDE_FLAG
);
593 if (slowpath(priority
!= old_priority
)) {
594 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
602 DISPATCH_ALWAYS_INLINE
604 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq
,
607 #if HAVE_PTHREAD_WORKQUEUE_QOS
608 const pthread_priority_t rootqueue_flag
= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
;
609 const pthread_priority_t inherited_flag
= _PTHREAD_PRIORITY_INHERIT_FLAG
;
610 pthread_priority_t dqp
= dq
->dq_priority
, tqp
= tq
->dq_priority
;
611 if ((!(dqp
& ~_PTHREAD_PRIORITY_FLAGS_MASK
) || (dqp
& inherited_flag
)) &&
612 (tqp
& rootqueue_flag
)) {
613 dq
->dq_priority
= (tqp
& ~rootqueue_flag
) | inherited_flag
;
620 DISPATCH_ALWAYS_INLINE
621 static inline pthread_priority_t
622 _dispatch_set_defaultpriority(pthread_priority_t priority
)
624 #if HAVE_PTHREAD_WORKQUEUE_QOS
625 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
627 pthread_priority_t flags
, defaultqueue
, basepri
;
628 flags
= (priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
629 defaultqueue
= (old_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
630 basepri
= (old_priority
& ~_PTHREAD_PRIORITY_FLAGS_MASK
);
631 priority
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
633 flags
= _PTHREAD_PRIORITY_INHERIT_FLAG
| defaultqueue
;
635 } else if (priority
< basepri
&& !defaultqueue
) { // rdar://16349734
638 priority
|= flags
| (old_priority
& _PTHREAD_PRIORITY_OVERRIDE_FLAG
);
640 if (slowpath(priority
!= old_priority
)) {
641 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
651 DISPATCH_ALWAYS_INLINE
652 static inline pthread_priority_t
653 _dispatch_priority_adopt(pthread_priority_t priority
, unsigned long flags
)
655 #if HAVE_PTHREAD_WORKQUEUE_QOS
656 pthread_priority_t defaultpri
= _dispatch_get_defaultpriority();
657 bool enforce
, inherited
, defaultqueue
;
658 enforce
= (flags
& DISPATCH_PRIORITY_ENFORCE
) ||
659 (priority
& _PTHREAD_PRIORITY_ENFORCE_FLAG
);
660 inherited
= (defaultpri
& _PTHREAD_PRIORITY_INHERIT_FLAG
);
661 defaultqueue
= (defaultpri
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
662 defaultpri
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
663 priority
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
666 } else if (!enforce
) {
667 if (priority
< defaultpri
) {
668 if (defaultqueue
) enforce
= true; // rdar://16349734
669 } else if (inherited
|| defaultqueue
) {
672 } else if (priority
< defaultpri
&& !defaultqueue
) { // rdar://16349734
675 return enforce
? priority
: defaultpri
;
677 (void)priority
; (void)flags
;
682 DISPATCH_ALWAYS_INLINE
683 static inline pthread_priority_t
684 _dispatch_get_priority(void)
686 #if HAVE_PTHREAD_WORKQUEUE_QOS
687 pthread_priority_t priority
= (uintptr_t)_dispatch_thread_getspecific(
688 dispatch_priority_key
);
689 return (priority
& ~_PTHREAD_PRIORITY_FLAGS_MASK
);
695 DISPATCH_ALWAYS_INLINE
697 _dispatch_set_priority_and_mach_voucher(pthread_priority_t priority
,
700 #if HAVE_PTHREAD_WORKQUEUE_QOS
701 _pthread_set_flags_t flags
= 0;
702 if (priority
&& _dispatch_set_qos_class_enabled
) {
703 pthread_priority_t old_priority
= _dispatch_get_priority();
704 if (priority
!= old_priority
&& old_priority
) {
705 flags
|= _PTHREAD_SET_SELF_QOS_FLAG
;
708 if (kv
!= VOUCHER_NO_MACH_VOUCHER
) {
709 #if VOUCHER_USE_MACH_VOUCHER
710 flags
|= _PTHREAD_SET_SELF_VOUCHER_FLAG
;
714 int r
= _pthread_set_properties_self(flags
, priority
, kv
);
715 (void)dispatch_assume_zero(r
);
716 #elif VOUCHER_USE_MACH_VOUCHER
717 #error Invalid build configuration
719 (void)priority
; (void)kv
;
723 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
724 static inline voucher_t
725 _dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority
,
728 pthread_priority_t p
= (priority
!= DISPATCH_NO_PRIORITY
) ? priority
: 0;
729 voucher_t ov
= DISPATCH_NO_VOUCHER
;
730 mach_voucher_t kv
= VOUCHER_NO_MACH_VOUCHER
;
731 if (voucher
!= DISPATCH_NO_VOUCHER
) {
733 kv
= _voucher_swap_and_get_mach_voucher(ov
, voucher
);
735 _dispatch_set_priority_and_mach_voucher(p
, kv
);
739 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
740 static inline voucher_t
741 _dispatch_adopt_priority_and_voucher(pthread_priority_t priority
,
742 voucher_t v
, unsigned long flags
)
744 pthread_priority_t p
= 0;
745 if (priority
!= DISPATCH_NO_PRIORITY
) {
746 p
= _dispatch_priority_adopt(priority
, flags
);
748 if (!(flags
& DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE
)) {
749 dispatch_queue_t dq
= _dispatch_queue_get_current();
750 if (dq
&& dq
->dq_override_voucher
!= DISPATCH_NO_VOUCHER
) {
751 if (v
!= DISPATCH_NO_VOUCHER
&& v
) _voucher_release(v
);
752 v
= dq
->dq_override_voucher
;
753 if (v
) _voucher_retain(v
);
756 return _dispatch_set_priority_and_adopt_voucher(p
, v
);
759 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
760 static inline voucher_t
761 _dispatch_adopt_queue_override_voucher(dispatch_queue_t dq
)
763 voucher_t v
= dq
->dq_override_voucher
;
764 if (v
== DISPATCH_NO_VOUCHER
) return DISPATCH_NO_VOUCHER
;
765 if (v
) _voucher_retain(v
);
766 return _dispatch_set_priority_and_adopt_voucher(DISPATCH_NO_PRIORITY
, v
);
769 DISPATCH_ALWAYS_INLINE
771 _dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority
,
772 voucher_t voucher
, unsigned long flags
)
775 ov
= _dispatch_adopt_priority_and_voucher(priority
, voucher
, flags
);
776 if (voucher
!= DISPATCH_NO_VOUCHER
&& ov
) _voucher_release(ov
);
779 DISPATCH_ALWAYS_INLINE
781 _dispatch_reset_priority_and_voucher(pthread_priority_t priority
,
785 ov
= _dispatch_set_priority_and_adopt_voucher(priority
, voucher
);
786 if (voucher
!= DISPATCH_NO_VOUCHER
&& ov
) _voucher_release(ov
);
789 DISPATCH_ALWAYS_INLINE
791 _dispatch_reset_voucher(voucher_t voucher
)
793 return _dispatch_reset_priority_and_voucher(DISPATCH_NO_PRIORITY
, voucher
);
796 DISPATCH_ALWAYS_INLINE
798 _dispatch_set_priority(pthread_priority_t priority
)
800 _dispatch_set_priority_and_mach_voucher(priority
, VOUCHER_NO_MACH_VOUCHER
);
803 DISPATCH_ALWAYS_INLINE
804 static inline pthread_priority_t
805 _dispatch_priority_normalize(pthread_priority_t pp
)
807 dispatch_assert_zero(pp
& ~(pthread_priority_t
)
808 _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
809 unsigned int qosbits
= (unsigned int)pp
, idx
;
810 if (!qosbits
) return 0;
811 idx
= (unsigned int)(sizeof(qosbits
)*8) -
812 (unsigned int)__builtin_clz(qosbits
) - 1;
816 DISPATCH_ALWAYS_INLINE
818 _dispatch_queue_need_override(dispatch_queue_t dq
, pthread_priority_t pp
)
820 if (!pp
|| dx_type(dq
) == DISPATCH_QUEUE_ROOT_TYPE
) return false;
821 uint32_t p
= (pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
822 uint32_t o
= dq
->dq_override
;
826 DISPATCH_ALWAYS_INLINE
828 _dispatch_queue_need_override_retain(dispatch_queue_t dq
, pthread_priority_t pp
)
830 bool override
= _dispatch_queue_need_override(dq
, pp
);
831 if (override
) _dispatch_retain(dq
);
835 DISPATCH_ALWAYS_INLINE
837 _dispatch_queue_override_priority(dispatch_queue_t dq
, pthread_priority_t
*pp
,
838 bool *was_overridden
)
840 uint32_t o
= dq
->dq_override
;
841 uint32_t p
= (*pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
843 o
= dispatch_atomic_or_orig2o(dq
, dq_override
, p
, relaxed
);
844 if (was_overridden
) {
845 o
= (uint32_t)_dispatch_priority_normalize(o
);
847 *pp
= _dispatch_priority_normalize(o
| p
);
849 o
= (uint32_t)_dispatch_priority_normalize(o
);
852 if (was_overridden
) {
854 (dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
) < o
;
859 DISPATCH_ALWAYS_INLINE
860 static inline pthread_priority_t
861 _dispatch_queue_get_override_priority(dispatch_queue_t dq
)
863 uint32_t p
= (dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
864 uint32_t o
= dq
->dq_override
;
865 if (o
== p
) return o
;
866 return _dispatch_priority_normalize(o
);
869 DISPATCH_ALWAYS_INLINE
871 _dispatch_queue_set_override_priority(dispatch_queue_t dq
)
874 if (!(dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
)) {
875 p
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
877 dispatch_atomic_store2o(dq
, dq_override
, p
, relaxed
);
880 DISPATCH_ALWAYS_INLINE
881 static inline pthread_priority_t
882 _dispatch_queue_reset_override_priority(dispatch_queue_t dq
)
885 if (!(dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
)) {
886 p
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
888 uint32_t o
= dispatch_atomic_xchg2o(dq
, dq_override
, p
, relaxed
);
889 if (o
== p
) return o
;
890 return _dispatch_priority_normalize(o
);
893 DISPATCH_ALWAYS_INLINE
894 static inline pthread_priority_t
895 _dispatch_priority_propagate(void)
897 #if HAVE_PTHREAD_WORKQUEUE_QOS
898 pthread_priority_t priority
= _dispatch_get_priority();
899 if (priority
> _dispatch_user_initiated_priority
) {
900 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
901 priority
= _dispatch_user_initiated_priority
;
909 // including maintenance
910 DISPATCH_ALWAYS_INLINE
912 _dispatch_is_background_thread(void)
914 #if HAVE_PTHREAD_WORKQUEUE_QOS
915 pthread_priority_t priority
;
916 priority
= _dispatch_get_priority();
917 return priority
&& (priority
<= _dispatch_background_priority
);
924 #pragma mark dispatch_block_t
928 DISPATCH_ALWAYS_INLINE
930 _dispatch_block_has_private_data(const dispatch_block_t block
)
932 extern void (*_dispatch_block_special_invoke
)(void*);
933 return (_dispatch_Block_invoke(block
) == _dispatch_block_special_invoke
);
936 DISPATCH_ALWAYS_INLINE
937 static inline dispatch_block_private_data_t
938 _dispatch_block_get_data(const dispatch_block_t db
)
940 if (!_dispatch_block_has_private_data(db
)) {
943 // Keep in sync with _dispatch_block_create implementation
944 uint8_t *x
= (uint8_t *)db
;
945 // x points to base of struct Block_layout
946 x
+= sizeof(struct Block_layout
);
947 // x points to base of captured dispatch_block_private_data_s object
948 dispatch_block_private_data_t dbpd
= (dispatch_block_private_data_t
)x
;
949 if (dbpd
->dbpd_magic
!= DISPATCH_BLOCK_PRIVATE_DATA_MAGIC
) {
950 DISPATCH_CRASH("Corruption of dispatch block object");
955 DISPATCH_ALWAYS_INLINE
956 static inline pthread_priority_t
957 _dispatch_block_get_priority(const dispatch_block_t db
)
959 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
960 return dbpd
? dbpd
->dbpd_priority
: 0;
963 DISPATCH_ALWAYS_INLINE
964 static inline dispatch_block_flags_t
965 _dispatch_block_get_flags(const dispatch_block_t db
)
967 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
968 return dbpd
? dbpd
->dbpd_flags
: 0;
971 #define DISPATCH_BLOCK_HAS(flag, db) \
972 ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0)
973 #define DISPATCH_BLOCK_IS(flag, db) \
974 ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0)
979 #pragma mark dispatch_continuation_t
981 DISPATCH_ALWAYS_INLINE
982 static inline dispatch_continuation_t
983 _dispatch_continuation_alloc_cacheonly(void)
985 dispatch_continuation_t dc
= (dispatch_continuation_t
)
986 fastpath(_dispatch_thread_getspecific(dispatch_cache_key
));
988 _dispatch_thread_setspecific(dispatch_cache_key
, dc
->do_next
);
993 DISPATCH_ALWAYS_INLINE
994 static inline dispatch_continuation_t
995 _dispatch_continuation_alloc(void)
997 dispatch_continuation_t dc
=
998 fastpath(_dispatch_continuation_alloc_cacheonly());
1000 return _dispatch_continuation_alloc_from_heap();
1005 DISPATCH_ALWAYS_INLINE
1006 static inline dispatch_continuation_t
1007 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc
)
1009 dispatch_continuation_t prev_dc
= (dispatch_continuation_t
)
1010 fastpath(_dispatch_thread_getspecific(dispatch_cache_key
));
1011 int cnt
= prev_dc
? prev_dc
->dc_cache_cnt
+ 1 : 1;
1012 // Cap continuation cache
1013 if (slowpath(cnt
> _dispatch_continuation_cache_limit
)) {
1016 dc
->do_next
= prev_dc
;
1017 dc
->dc_cache_cnt
= cnt
;
1018 _dispatch_thread_setspecific(dispatch_cache_key
, dc
);
1022 DISPATCH_ALWAYS_INLINE
1024 _dispatch_continuation_free(dispatch_continuation_t dc
)
1026 dc
= _dispatch_continuation_free_cacheonly(dc
);
1028 _dispatch_continuation_free_to_cache_limit(dc
);
1034 DISPATCH_ALWAYS_INLINE
1036 _dispatch_continuation_invoke(dispatch_object_t dou
, dispatch_queue_t dq
)
1038 dispatch_continuation_t dc
= dou
._dc
, dc1
;
1039 dispatch_group_t dg
;
1041 _dispatch_trace_continuation_pop(dq
, dou
);
1042 if (DISPATCH_OBJ_IS_VTABLE(dou
._do
)) {
1043 return dx_invoke(dou
._do
, NULL
, DISPATCH_INVOKE_NONE
);
1046 // Add the item back to the cache before calling the function. This
1047 // allows the 'hot' continuation to be used for a quick callback.
1049 // The ccache version is per-thread.
1050 // Therefore, the object has not been reused yet.
1051 // This generates better assembly.
1052 if ((long)dc
->do_vtable
& DISPATCH_OBJ_ASYNC_BIT
) {
1053 _dispatch_continuation_voucher_adopt(dc
);
1054 dc1
= _dispatch_continuation_free_cacheonly(dc
);
1058 if ((long)dc
->do_vtable
& DISPATCH_OBJ_GROUP_BIT
) {
1063 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
1065 dispatch_group_leave(dg
);
1066 _dispatch_release(dg
);
1068 _dispatch_introspection_queue_item_complete(dou
);
1069 if (slowpath(dc1
)) {
1070 _dispatch_continuation_free_to_cache_limit(dc1
);
1074 DISPATCH_ALWAYS_INLINE_NDEBUG
1076 _dispatch_continuation_pop(dispatch_object_t dou
)
1078 dispatch_queue_t dq
= _dispatch_queue_get_current();
1079 dispatch_pthread_root_queue_observer_hooks_t observer_hooks
=
1080 _dispatch_get_pthread_root_queue_observer_hooks();
1081 if (observer_hooks
) observer_hooks
->queue_will_execute(dq
);
1082 _dispatch_continuation_invoke(dou
, dq
);
1083 if (observer_hooks
) observer_hooks
->queue_did_execute(dq
);
1086 DISPATCH_ALWAYS_INLINE
1088 _dispatch_continuation_priority_set(dispatch_continuation_t dc
,
1089 pthread_priority_t pp
, dispatch_block_flags_t flags
)
1091 #if HAVE_PTHREAD_WORKQUEUE_QOS
1092 pthread_priority_t prio
= 0;
1093 if (flags
& DISPATCH_BLOCK_HAS_PRIORITY
) {
1095 } else if (!(flags
& DISPATCH_BLOCK_NO_QOS_CLASS
)) {
1096 prio
= _dispatch_priority_propagate();
1098 if (flags
& DISPATCH_BLOCK_ENFORCE_QOS_CLASS
) {
1099 prio
|= _PTHREAD_PRIORITY_ENFORCE_FLAG
;
1101 dc
->dc_priority
= prio
;
1103 (void)dc
; (void)pp
; (void)flags
;
1107 DISPATCH_ALWAYS_INLINE
1108 static inline pthread_priority_t
1109 _dispatch_continuation_get_override_priority(dispatch_queue_t dq
,
1110 dispatch_continuation_t dc
)
1112 #if HAVE_PTHREAD_WORKQUEUE_QOS
1113 pthread_priority_t p
= dc
->dc_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
1114 bool enforce
= dc
->dc_priority
& _PTHREAD_PRIORITY_ENFORCE_FLAG
;
1115 pthread_priority_t dqp
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
1116 bool defaultqueue
= dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
;
1119 } else if (!enforce
&& (!dqp
|| defaultqueue
)) {
1132 #endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
1134 #endif /* __DISPATCH_INLINE_INTERNAL__ */