2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
35 #if DISPATCH_USE_CLIENT_CALLOUT
38 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
);
40 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t));
42 _dispatch_client_callout3(void *ctxt
, dispatch_data_t region
, size_t offset
,
43 const void *buffer
, size_t size
, dispatch_data_applier_function_t f
);
45 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
46 dispatch_mach_msg_t dmsg
, mach_error_t error
,
47 dispatch_mach_handler_function_t f
);
49 #else // !DISPATCH_USE_CLIENT_CALLOUT
51 DISPATCH_ALWAYS_INLINE
53 _dispatch_client_callout(void *ctxt
, dispatch_function_t f
)
58 DISPATCH_ALWAYS_INLINE
60 _dispatch_client_callout2(void *ctxt
, size_t i
, void (*f
)(void *, size_t))
65 DISPATCH_ALWAYS_INLINE
67 _dispatch_client_callout3(void *ctxt
, dispatch_data_t region
, size_t offset
,
68 const void *buffer
, size_t size
, dispatch_data_applier_function_t f
)
70 return f(ctxt
, region
, offset
, buffer
, size
);
73 DISPATCH_ALWAYS_INLINE
75 _dispatch_client_callout4(void *ctxt
, dispatch_mach_reason_t reason
,
76 dispatch_mach_msg_t dmsg
, mach_error_t error
,
77 dispatch_mach_handler_function_t f
)
79 return f(ctxt
, reason
, dmsg
, error
);
82 #endif // !DISPATCH_USE_CLIENT_CALLOUT
84 #if !(USE_OBJC && __OBJC2__)
87 #pragma mark _os_object_t & dispatch_object_t
89 DISPATCH_ALWAYS_INLINE
90 static inline _os_object_t
91 _os_object_retain_internal_inline(_os_object_t obj
)
93 int ref_cnt
= obj
->os_obj_ref_cnt
;
94 if (slowpath(ref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
95 return obj
; // global object
97 ref_cnt
= dispatch_atomic_inc2o(obj
, os_obj_ref_cnt
, relaxed
);
98 if (slowpath(ref_cnt
<= 0)) {
99 DISPATCH_CRASH("Resurrection of an object");
104 DISPATCH_ALWAYS_INLINE
106 _os_object_release_internal_inline(_os_object_t obj
)
108 int ref_cnt
= obj
->os_obj_ref_cnt
;
109 if (slowpath(ref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
110 return; // global object
112 ref_cnt
= dispatch_atomic_dec2o(obj
, os_obj_ref_cnt
, relaxed
);
113 if (fastpath(ref_cnt
>= 0)) {
116 if (slowpath(ref_cnt
< -1)) {
117 DISPATCH_CRASH("Over-release of an object");
120 if (slowpath(obj
->os_obj_xref_cnt
>= 0)) {
121 DISPATCH_CRASH("Release while external references exist");
124 return _os_object_dispose(obj
);
127 DISPATCH_ALWAYS_INLINE_NDEBUG
129 _dispatch_retain(dispatch_object_t dou
)
131 (void)_os_object_retain_internal_inline(dou
._os_obj
);
134 DISPATCH_ALWAYS_INLINE_NDEBUG
136 _dispatch_release(dispatch_object_t dou
)
138 _os_object_release_internal_inline(dou
._os_obj
);
142 #pragma mark dispatch_thread
144 DISPATCH_ALWAYS_INLINE
146 _dispatch_wqthread_override_start(mach_port_t thread
,
147 pthread_priority_t priority
)
149 #if HAVE_PTHREAD_WORKQUEUE_QOS
150 if (!_dispatch_set_qos_class_enabled
) return;
151 (void)_pthread_workqueue_override_start_direct(thread
, priority
);
153 (void)thread
; (void)priority
;
157 DISPATCH_ALWAYS_INLINE
159 _dispatch_wqthread_override_reset(void)
161 #if HAVE_PTHREAD_WORKQUEUE_QOS
162 if (!_dispatch_set_qos_class_enabled
) return;
163 (void)_pthread_workqueue_override_reset();
167 DISPATCH_ALWAYS_INLINE
169 _dispatch_thread_override_start(mach_port_t thread
, pthread_priority_t priority
)
171 #if HAVE_PTHREAD_WORKQUEUE_QOS
172 if (!_dispatch_set_qos_class_enabled
) return;
173 (void)_pthread_override_qos_class_start_direct(thread
, priority
);
175 (void)thread
; (void)priority
;
179 DISPATCH_ALWAYS_INLINE
181 _dispatch_thread_override_end(mach_port_t thread
)
183 #if HAVE_PTHREAD_WORKQUEUE_QOS
184 if (!_dispatch_set_qos_class_enabled
) return;
185 (void)_pthread_override_qos_class_end_direct(thread
);
192 #pragma mark dispatch_queue_t
194 static inline bool _dispatch_queue_need_override(dispatch_queue_t dq
,
195 pthread_priority_t pp
);
196 static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq
,
197 pthread_priority_t pp
);
198 static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq
,
199 pthread_priority_t pp
);
200 static inline pthread_priority_t
_dispatch_queue_get_override_priority(
201 dispatch_queue_t dq
);
202 static inline pthread_priority_t
_dispatch_queue_reset_override_priority(
203 dispatch_queue_t dq
);
204 static inline pthread_priority_t
_dispatch_get_defaultpriority(void);
205 static inline void _dispatch_set_defaultpriority_override(void);
206 static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority
);
207 static inline void _dispatch_set_priority(pthread_priority_t priority
);
209 DISPATCH_ALWAYS_INLINE
211 _dispatch_queue_set_thread(dispatch_queue_t dq
)
213 // The manager queue uses dispatch_queue_drain but is thread bound
214 if (!dq
->dq_is_thread_bound
) {
215 dq
->dq_thread
= _dispatch_thread_port();
219 DISPATCH_ALWAYS_INLINE
221 _dispatch_queue_clear_thread(dispatch_queue_t dq
)
223 if (!dq
->dq_is_thread_bound
) {
224 dq
->dq_thread
= MACH_PORT_NULL
;
228 DISPATCH_ALWAYS_INLINE
230 _dispatch_queue_push_list2(dispatch_queue_t dq
, struct dispatch_object_s
*head
,
231 struct dispatch_object_s
*tail
)
233 struct dispatch_object_s
*prev
;
234 tail
->do_next
= NULL
;
235 prev
= dispatch_atomic_xchg2o(dq
, dq_items_tail
, tail
, release
);
236 if (fastpath(prev
)) {
237 // if we crash here with a value less than 0x1000, then we are at a
238 // known bug in client code for example, see _dispatch_queue_dispose
239 // or _dispatch_atfork_child
240 prev
->do_next
= head
;
242 return (prev
!= NULL
);
245 DISPATCH_ALWAYS_INLINE
247 _dispatch_queue_push_list(dispatch_queue_t dq
, dispatch_object_t _head
,
248 dispatch_object_t _tail
, pthread_priority_t pp
, unsigned int n
)
250 struct dispatch_object_s
*head
= _head
._do
, *tail
= _tail
._do
;
251 bool override
= _dispatch_queue_need_override_retain(dq
, pp
);
252 if (!fastpath(_dispatch_queue_push_list2(dq
, head
, tail
))) {
253 _dispatch_queue_push_list_slow(dq
, pp
, head
, n
, override
);
254 } else if (override
) {
255 _dispatch_queue_wakeup_with_qos_and_release(dq
, pp
);
259 DISPATCH_ALWAYS_INLINE
261 _dispatch_queue_push(dispatch_queue_t dq
, dispatch_object_t _tail
,
262 pthread_priority_t pp
)
264 struct dispatch_object_s
*tail
= _tail
._do
;
265 bool override
= _dispatch_queue_need_override_retain(dq
, pp
);
266 if (!fastpath(_dispatch_queue_push_list2(dq
, tail
, tail
))) {
267 _dispatch_queue_push_slow(dq
, pp
, tail
, override
);
268 } else if (override
) {
269 _dispatch_queue_wakeup_with_qos_and_release(dq
, pp
);
273 DISPATCH_ALWAYS_INLINE
275 _dispatch_queue_push_wakeup(dispatch_queue_t dq
, dispatch_object_t _tail
,
276 pthread_priority_t pp
, bool wakeup
)
278 // caller assumed to have a reference on dq
279 struct dispatch_object_s
*tail
= _tail
._do
;
280 if (!fastpath(_dispatch_queue_push_list2(dq
, tail
, tail
))) {
281 _dispatch_queue_push_slow(dq
, pp
, tail
, false);
282 } else if (_dispatch_queue_need_override(dq
, pp
)) {
283 _dispatch_queue_wakeup_with_qos(dq
, pp
);
284 } else if (slowpath(wakeup
)) {
285 _dispatch_queue_wakeup(dq
);
289 DISPATCH_ALWAYS_INLINE
291 _dispatch_queue_class_invoke(dispatch_object_t dou
,
292 dispatch_queue_t (*invoke
)(dispatch_object_t
,
293 _dispatch_thread_semaphore_t
*))
295 pthread_priority_t p
= 0;
296 dispatch_queue_t dq
= dou
._dq
;
297 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq
)) &&
298 fastpath(dispatch_atomic_cmpxchg2o(dq
, dq_running
, 0, 1, acquire
))){
299 _dispatch_queue_set_thread(dq
);
300 dispatch_queue_t tq
= NULL
;
301 _dispatch_thread_semaphore_t sema
= 0;
302 tq
= invoke(dq
, &sema
);
303 _dispatch_queue_clear_thread(dq
);
304 p
= _dispatch_queue_reset_override_priority(dq
);
305 if (p
> (dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
)) {
306 // Ensure that the root queue sees that this thread was overridden.
307 _dispatch_set_defaultpriority_override();
309 // We do not need to check the result.
310 // When the suspend-count lock is dropped, then the check will happen.
311 (void)dispatch_atomic_dec2o(dq
, dq_running
, release
);
313 _dispatch_thread_semaphore_signal(sema
);
315 _dispatch_introspection_queue_item_complete(dq
);
316 return _dispatch_queue_push(tq
, dq
, p
);
319 dq
->do_next
= DISPATCH_OBJECT_LISTLESS
;
320 _dispatch_introspection_queue_item_complete(dq
);
321 if (!dispatch_atomic_sub2o(dq
, do_suspend_cnt
,
322 DISPATCH_OBJECT_SUSPEND_LOCK
, seq_cst
)) {
323 // seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
324 if (dispatch_atomic_load2o(dq
, dq_running
, seq_cst
) == 0) {
325 // verify that the queue is idle
326 return _dispatch_queue_wakeup_with_qos_and_release(dq
, p
);
329 _dispatch_release(dq
); // added when the queue is put on the list
332 DISPATCH_ALWAYS_INLINE
333 static inline unsigned long
334 _dispatch_queue_class_probe(dispatch_object_t dou
)
336 dispatch_queue_t dq
= dou
._dq
;
337 struct dispatch_object_s
*tail
;
338 // seq_cst with atomic store to suspend_cnt <rdar://problem/14637483>
339 tail
= dispatch_atomic_load2o(dq
, dq_items_tail
, seq_cst
);
340 return (unsigned long)slowpath(tail
!= NULL
);
343 DISPATCH_ALWAYS_INLINE
345 _dispatch_object_suspended(dispatch_object_t dou
)
347 struct dispatch_object_s
*obj
= dou
._do
;
348 unsigned int suspend_cnt
;
349 // seq_cst with atomic store to tail <rdar://problem/14637483>
350 suspend_cnt
= dispatch_atomic_load2o(obj
, do_suspend_cnt
, seq_cst
);
351 return slowpath(suspend_cnt
>= DISPATCH_OBJECT_SUSPEND_INTERVAL
);
354 DISPATCH_ALWAYS_INLINE
355 static inline dispatch_queue_t
356 _dispatch_queue_get_current(void)
358 return (dispatch_queue_t
)_dispatch_thread_getspecific(dispatch_queue_key
);
361 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
362 static inline dispatch_queue_t
363 _dispatch_get_root_queue(qos_class_t priority
, bool overcommit
)
365 if (overcommit
) switch (priority
) {
366 case _DISPATCH_QOS_CLASS_MAINTENANCE
:
367 return &_dispatch_root_queues
[
368 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT
];
369 case _DISPATCH_QOS_CLASS_BACKGROUND
:
370 return &_dispatch_root_queues
[
371 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT
];
372 case _DISPATCH_QOS_CLASS_UTILITY
:
373 return &_dispatch_root_queues
[
374 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT
];
375 case _DISPATCH_QOS_CLASS_DEFAULT
:
376 return &_dispatch_root_queues
[
377 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT
];
378 case _DISPATCH_QOS_CLASS_USER_INITIATED
:
379 return &_dispatch_root_queues
[
380 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT
];
381 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE
:
382 return &_dispatch_root_queues
[
383 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT
];
384 } else switch (priority
) {
385 case _DISPATCH_QOS_CLASS_MAINTENANCE
:
386 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS
];
387 case _DISPATCH_QOS_CLASS_BACKGROUND
:
388 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS
];
389 case _DISPATCH_QOS_CLASS_UTILITY
:
390 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS
];
391 case _DISPATCH_QOS_CLASS_DEFAULT
:
392 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS
];
393 case _DISPATCH_QOS_CLASS_USER_INITIATED
:
394 return &_dispatch_root_queues
[
395 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS
];
396 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE
:
397 return &_dispatch_root_queues
[
398 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS
];
403 // Note to later developers: ensure that any initialization changes are
404 // made for statically allocated queues (i.e. _dispatch_main_q).
406 _dispatch_queue_init(dispatch_queue_t dq
)
408 dq
->do_next
= (struct dispatch_queue_s
*)DISPATCH_OBJECT_LISTLESS
;
412 dq
->dq_serialnum
= dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers
,
416 DISPATCH_ALWAYS_INLINE
418 _dispatch_queue_set_bound_thread(dispatch_queue_t dq
)
420 //Tag thread-bound queues with the owning thread
421 dispatch_assert(dq
->dq_is_thread_bound
);
422 dq
->dq_thread
= _dispatch_thread_port();
425 DISPATCH_ALWAYS_INLINE
427 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq
)
429 dispatch_assert(dq
->dq_is_thread_bound
);
430 dq
->dq_thread
= MACH_PORT_NULL
;
433 DISPATCH_ALWAYS_INLINE
434 static inline mach_port_t
435 _dispatch_queue_get_bound_thread(dispatch_queue_t dq
)
437 dispatch_assert(dq
->dq_is_thread_bound
);
438 return dq
->dq_thread
;
442 #pragma mark dispatch_priority
444 DISPATCH_ALWAYS_INLINE
445 static inline pthread_priority_t
446 _dispatch_get_defaultpriority(void)
448 #if HAVE_PTHREAD_WORKQUEUE_QOS
449 pthread_priority_t priority
= (uintptr_t)_dispatch_thread_getspecific(
450 dispatch_defaultpriority_key
);
457 DISPATCH_ALWAYS_INLINE
459 _dispatch_reset_defaultpriority(pthread_priority_t priority
)
461 #if HAVE_PTHREAD_WORKQUEUE_QOS
462 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
463 // if an inner-loop or'd in the override flag to the per-thread priority,
464 // it needs to be propogated up the chain
465 priority
|= old_priority
& _PTHREAD_PRIORITY_OVERRIDE_FLAG
;
467 if (slowpath(priority
!= old_priority
)) {
468 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
476 DISPATCH_ALWAYS_INLINE
478 _dispatch_set_defaultpriority_override(void)
480 #if HAVE_PTHREAD_WORKQUEUE_QOS
481 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
482 pthread_priority_t priority
= old_priority
|
483 _PTHREAD_PRIORITY_OVERRIDE_FLAG
;
485 if (slowpath(priority
!= old_priority
)) {
486 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
492 DISPATCH_ALWAYS_INLINE
494 _dispatch_reset_defaultpriority_override(void)
496 #if HAVE_PTHREAD_WORKQUEUE_QOS
497 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
498 pthread_priority_t priority
= old_priority
&
499 ~((pthread_priority_t
)_PTHREAD_PRIORITY_OVERRIDE_FLAG
);
501 if (slowpath(priority
!= old_priority
)) {
502 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
510 DISPATCH_ALWAYS_INLINE
512 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq
,
515 #if HAVE_PTHREAD_WORKQUEUE_QOS
516 const pthread_priority_t rootqueue_flag
= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
;
517 const pthread_priority_t inherited_flag
= _PTHREAD_PRIORITY_INHERIT_FLAG
;
518 pthread_priority_t dqp
= dq
->dq_priority
, tqp
= tq
->dq_priority
;
519 if ((!dqp
|| (dqp
& inherited_flag
)) && (tqp
& rootqueue_flag
)) {
520 dq
->dq_priority
= (tqp
& ~rootqueue_flag
) | inherited_flag
;
527 DISPATCH_ALWAYS_INLINE
528 static inline pthread_priority_t
529 _dispatch_set_defaultpriority(pthread_priority_t priority
)
531 #if HAVE_PTHREAD_WORKQUEUE_QOS
532 pthread_priority_t old_priority
= _dispatch_get_defaultpriority();
534 pthread_priority_t flags
, defaultqueue
, basepri
;
535 flags
= (priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
536 defaultqueue
= (old_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
537 basepri
= (old_priority
& ~_PTHREAD_PRIORITY_FLAGS_MASK
);
538 priority
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
540 flags
= _PTHREAD_PRIORITY_INHERIT_FLAG
| defaultqueue
;
542 } else if (priority
< basepri
&& !defaultqueue
) { // rdar://16349734
545 priority
|= flags
| (old_priority
& _PTHREAD_PRIORITY_OVERRIDE_FLAG
);
547 if (slowpath(priority
!= old_priority
)) {
548 _dispatch_thread_setspecific(dispatch_defaultpriority_key
,
558 DISPATCH_ALWAYS_INLINE
559 static inline pthread_priority_t
560 _dispatch_priority_adopt(pthread_priority_t priority
, unsigned long flags
)
562 #if HAVE_PTHREAD_WORKQUEUE_QOS
563 pthread_priority_t defaultpri
= _dispatch_get_defaultpriority();
564 bool enforce
, inherited
, defaultqueue
;
565 enforce
= (flags
& DISPATCH_PRIORITY_ENFORCE
) ||
566 (priority
& _PTHREAD_PRIORITY_ENFORCE_FLAG
);
567 inherited
= (defaultpri
& _PTHREAD_PRIORITY_INHERIT_FLAG
);
568 defaultqueue
= (defaultpri
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
);
569 defaultpri
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
570 priority
&= ~_PTHREAD_PRIORITY_FLAGS_MASK
;
573 } else if (!enforce
) {
574 if (priority
< defaultpri
) {
575 if (defaultqueue
) enforce
= true; // rdar://16349734
576 } else if (inherited
|| defaultqueue
) {
579 } else if (priority
< defaultpri
&& !defaultqueue
) { // rdar://16349734
582 return enforce
? priority
: defaultpri
;
584 (void)priority
; (void)flags
;
589 DISPATCH_ALWAYS_INLINE
590 static inline pthread_priority_t
591 _dispatch_get_priority(void)
593 #if HAVE_PTHREAD_WORKQUEUE_QOS
594 pthread_priority_t priority
= (uintptr_t)_dispatch_thread_getspecific(
595 dispatch_priority_key
);
596 return (priority
& ~_PTHREAD_PRIORITY_FLAGS_MASK
);
602 DISPATCH_ALWAYS_INLINE
604 _dispatch_set_priority_and_mach_voucher(pthread_priority_t priority
,
607 #if HAVE_PTHREAD_WORKQUEUE_QOS
608 _pthread_set_flags_t flags
= 0;
609 if (priority
&& _dispatch_set_qos_class_enabled
) {
610 pthread_priority_t old_priority
= _dispatch_get_priority();
611 if (priority
!= old_priority
&& old_priority
) {
612 flags
|= _PTHREAD_SET_SELF_QOS_FLAG
;
615 if (kv
!= VOUCHER_NO_MACH_VOUCHER
) {
616 #if VOUCHER_USE_MACH_VOUCHER
617 flags
|= _PTHREAD_SET_SELF_VOUCHER_FLAG
;
621 int r
= _pthread_set_properties_self(flags
, priority
, kv
);
622 (void)dispatch_assume_zero(r
);
623 #elif VOUCHER_USE_MACH_VOUCHER
624 #error Invalid build configuration
626 (void)priority
; (void)kv
;
630 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
631 static inline voucher_t
632 _dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority
,
635 pthread_priority_t p
= (priority
!= DISPATCH_NO_PRIORITY
) ? priority
: 0;
636 voucher_t ov
= DISPATCH_NO_VOUCHER
;
637 mach_voucher_t kv
= VOUCHER_NO_MACH_VOUCHER
;
638 if (voucher
!= DISPATCH_NO_VOUCHER
) {
640 kv
= _voucher_swap_and_get_mach_voucher(ov
, voucher
);
642 _dispatch_set_priority_and_mach_voucher(p
, kv
);
646 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
647 static inline voucher_t
648 _dispatch_adopt_priority_and_voucher(pthread_priority_t priority
,
649 voucher_t voucher
, unsigned long flags
)
651 pthread_priority_t p
= 0;
652 if (priority
!= DISPATCH_NO_PRIORITY
) {
653 p
= _dispatch_priority_adopt(priority
, flags
);
655 return _dispatch_set_priority_and_adopt_voucher(p
, voucher
);
658 DISPATCH_ALWAYS_INLINE
660 _dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority
,
661 voucher_t voucher
, unsigned long flags
)
664 ov
= _dispatch_adopt_priority_and_voucher(priority
, voucher
, flags
);
665 if (voucher
!= DISPATCH_NO_VOUCHER
&& ov
) _voucher_release(ov
);
668 DISPATCH_ALWAYS_INLINE
670 _dispatch_set_priority_and_replace_voucher(pthread_priority_t priority
,
674 ov
= _dispatch_set_priority_and_adopt_voucher(priority
, voucher
);
675 if (voucher
!= DISPATCH_NO_VOUCHER
&& ov
) _voucher_release(ov
);
678 DISPATCH_ALWAYS_INLINE
680 _dispatch_set_priority(pthread_priority_t priority
)
682 _dispatch_set_priority_and_mach_voucher(priority
, VOUCHER_NO_MACH_VOUCHER
);
685 DISPATCH_ALWAYS_INLINE
686 static inline pthread_priority_t
687 _dispatch_priority_normalize(pthread_priority_t pp
)
689 dispatch_assert_zero(pp
& ~(pthread_priority_t
)
690 _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
691 unsigned int qosbits
= (unsigned int)pp
, idx
;
692 if (!qosbits
) return 0;
693 idx
= (unsigned int)(sizeof(qosbits
)*8) -
694 (unsigned int)__builtin_clz(qosbits
) - 1;
698 DISPATCH_ALWAYS_INLINE
700 _dispatch_queue_need_override(dispatch_queue_t dq
, pthread_priority_t pp
)
702 if (!pp
|| dx_type(dq
) == DISPATCH_QUEUE_ROOT_TYPE
) return false;
703 uint32_t p
= (pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
704 uint32_t o
= dq
->dq_override
;
708 DISPATCH_ALWAYS_INLINE
710 _dispatch_queue_need_override_retain(dispatch_queue_t dq
, pthread_priority_t pp
)
712 bool override
= _dispatch_queue_need_override(dq
, pp
);
713 if (override
) _dispatch_retain(dq
);
717 DISPATCH_ALWAYS_INLINE
719 _dispatch_queue_override_priority(dispatch_queue_t dq
, pthread_priority_t pp
)
721 uint32_t p
= (pp
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
722 uint32_t o
= dq
->dq_override
;
723 if (o
< p
) o
= dispatch_atomic_or_orig2o(dq
, dq_override
, p
, relaxed
);
727 DISPATCH_ALWAYS_INLINE
728 static inline pthread_priority_t
729 _dispatch_queue_get_override_priority(dispatch_queue_t dq
)
731 uint32_t p
= (dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
);
732 uint32_t o
= dq
->dq_override
;
733 if (o
== p
) return o
;
734 return _dispatch_priority_normalize(o
);
737 DISPATCH_ALWAYS_INLINE
739 _dispatch_queue_set_override_priority(dispatch_queue_t dq
)
742 if (!(dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
)) {
743 p
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
745 dispatch_atomic_store2o(dq
, dq_override
, p
, relaxed
);
748 DISPATCH_ALWAYS_INLINE
749 static inline pthread_priority_t
750 _dispatch_queue_reset_override_priority(dispatch_queue_t dq
)
753 if (!(dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
)) {
754 p
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
756 uint32_t o
= dispatch_atomic_xchg2o(dq
, dq_override
, p
, relaxed
);
757 if (o
== p
) return o
;
758 return _dispatch_priority_normalize(o
);
761 DISPATCH_ALWAYS_INLINE
762 static inline pthread_priority_t
763 _dispatch_priority_propagate(void)
765 #if HAVE_PTHREAD_WORKQUEUE_QOS
766 pthread_priority_t priority
= _dispatch_get_priority();
767 if (priority
> _dispatch_user_initiated_priority
) {
768 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
769 priority
= _dispatch_user_initiated_priority
;
777 // including maintenance
778 DISPATCH_ALWAYS_INLINE
780 _dispatch_is_background_thread(void)
782 #if HAVE_PTHREAD_WORKQUEUE_QOS
783 pthread_priority_t priority
;
784 priority
= _dispatch_get_priority();
785 return priority
&& (priority
<= _dispatch_background_priority
);
792 #pragma mark dispatch_block_t
796 DISPATCH_ALWAYS_INLINE
798 _dispatch_block_has_private_data(const dispatch_block_t block
)
800 extern void (*_dispatch_block_special_invoke
)(void*);
801 return (_dispatch_Block_invoke(block
) == _dispatch_block_special_invoke
);
804 DISPATCH_ALWAYS_INLINE
805 static inline dispatch_block_private_data_t
806 _dispatch_block_get_data(const dispatch_block_t db
)
808 if (!_dispatch_block_has_private_data(db
)) {
811 // Keep in sync with _dispatch_block_create implementation
812 uint8_t *x
= (uint8_t *)db
;
813 // x points to base of struct Block_layout
814 x
+= sizeof(struct Block_layout
);
815 // x points to addresss of captured block
816 x
+= sizeof(dispatch_block_t
);
818 // x points to addresss of captured voucher
819 x
+= sizeof(voucher_t
);
821 // x points to base of captured dispatch_block_private_data_s structure
822 dispatch_block_private_data_t dbpd
= (dispatch_block_private_data_t
)x
;
823 if (dbpd
->dbpd_magic
!= DISPATCH_BLOCK_PRIVATE_DATA_MAGIC
) {
824 DISPATCH_CRASH("Corruption of dispatch block object");
829 DISPATCH_ALWAYS_INLINE
830 static inline pthread_priority_t
831 _dispatch_block_get_priority(const dispatch_block_t db
)
833 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
834 return dbpd
? dbpd
->dbpd_priority
: 0;
837 DISPATCH_ALWAYS_INLINE
838 static inline dispatch_block_flags_t
839 _dispatch_block_get_flags(const dispatch_block_t db
)
841 dispatch_block_private_data_t dbpd
= _dispatch_block_get_data(db
);
842 return dbpd
? dbpd
->dbpd_flags
: 0;
845 #define DISPATCH_BLOCK_HAS(flag, db) \
846 ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0)
847 #define DISPATCH_BLOCK_IS(flag, db) \
848 ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0)
853 #pragma mark dispatch_continuation_t
855 DISPATCH_ALWAYS_INLINE
856 static inline dispatch_continuation_t
857 _dispatch_continuation_alloc_cacheonly(void)
859 dispatch_continuation_t dc
= (dispatch_continuation_t
)
860 fastpath(_dispatch_thread_getspecific(dispatch_cache_key
));
862 _dispatch_thread_setspecific(dispatch_cache_key
, dc
->do_next
);
867 DISPATCH_ALWAYS_INLINE
868 static inline dispatch_continuation_t
869 _dispatch_continuation_alloc(void)
871 dispatch_continuation_t dc
=
872 fastpath(_dispatch_continuation_alloc_cacheonly());
874 return _dispatch_continuation_alloc_from_heap();
879 DISPATCH_ALWAYS_INLINE
880 static inline dispatch_continuation_t
881 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc
)
883 dispatch_continuation_t prev_dc
= (dispatch_continuation_t
)
884 fastpath(_dispatch_thread_getspecific(dispatch_cache_key
));
885 int cnt
= prev_dc
? prev_dc
->dc_cache_cnt
+ 1 : 1;
886 // Cap continuation cache
887 if (slowpath(cnt
> _dispatch_continuation_cache_limit
)) {
890 dc
->do_next
= prev_dc
;
891 dc
->dc_cache_cnt
= cnt
;
892 _dispatch_thread_setspecific(dispatch_cache_key
, dc
);
896 DISPATCH_ALWAYS_INLINE
898 _dispatch_continuation_free(dispatch_continuation_t dc
)
900 dc
= _dispatch_continuation_free_cacheonly(dc
);
902 _dispatch_continuation_free_to_cache_limit(dc
);
908 DISPATCH_ALWAYS_INLINE_NDEBUG
910 _dispatch_continuation_pop(dispatch_object_t dou
)
912 dispatch_continuation_t dc
= dou
._dc
, dc1
;
915 _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou
);
916 if (DISPATCH_OBJ_IS_VTABLE(dou
._do
)) {
917 return dx_invoke(dou
._do
);
920 // Add the item back to the cache before calling the function. This
921 // allows the 'hot' continuation to be used for a quick callback.
923 // The ccache version is per-thread.
924 // Therefore, the object has not been reused yet.
925 // This generates better assembly.
926 if ((long)dc
->do_vtable
& DISPATCH_OBJ_ASYNC_BIT
) {
927 _dispatch_continuation_voucher_adopt(dc
);
928 dc1
= _dispatch_continuation_free_cacheonly(dc
);
932 if ((long)dc
->do_vtable
& DISPATCH_OBJ_GROUP_BIT
) {
937 _dispatch_client_callout(dc
->dc_ctxt
, dc
->dc_func
);
939 dispatch_group_leave(dg
);
940 _dispatch_release(dg
);
942 _dispatch_introspection_queue_item_complete(dou
);
944 _dispatch_continuation_free_to_cache_limit(dc1
);
948 DISPATCH_ALWAYS_INLINE
950 _dispatch_continuation_priority_set(dispatch_continuation_t dc
,
951 pthread_priority_t pp
, dispatch_block_flags_t flags
)
953 #if HAVE_PTHREAD_WORKQUEUE_QOS
954 pthread_priority_t prio
= 0;
955 if (flags
& DISPATCH_BLOCK_HAS_PRIORITY
) {
957 } else if (!(flags
& DISPATCH_BLOCK_NO_QOS_CLASS
)) {
958 prio
= _dispatch_priority_propagate();
960 if (flags
& DISPATCH_BLOCK_ENFORCE_QOS_CLASS
) {
961 prio
|= _PTHREAD_PRIORITY_ENFORCE_FLAG
;
963 dc
->dc_priority
= prio
;
965 (void)dc
; (void)pp
; (void)flags
;
969 DISPATCH_ALWAYS_INLINE
970 static inline pthread_priority_t
971 _dispatch_continuation_get_override_priority(dispatch_queue_t dq
,
972 dispatch_continuation_t dc
)
974 #if HAVE_PTHREAD_WORKQUEUE_QOS
975 pthread_priority_t p
= dc
->dc_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
976 bool enforce
= dc
->dc_priority
& _PTHREAD_PRIORITY_ENFORCE_FLAG
;
977 pthread_priority_t dqp
= dq
->dq_priority
& _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
978 bool defaultqueue
= dq
->dq_priority
& _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
;
981 } else if (!enforce
&& (!dqp
|| defaultqueue
)) {
994 #endif // !(USE_OBJC && __OBJC2__)
996 #endif /* __DISPATCH_INLINE_INTERNAL__ */