]> git.saurik.com Git - apple/libdispatch.git/blob - src/inline_internal.h
libdispatch-500.1.5.tar.gz
[apple/libdispatch.git] / src / inline_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if DISPATCH_USE_CLIENT_CALLOUT
36
37 DISPATCH_NOTHROW void
38 _dispatch_client_callout(void *ctxt, dispatch_function_t f);
39 DISPATCH_NOTHROW void
40 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
41 DISPATCH_NOTHROW void
42 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
43 dispatch_mach_msg_t dmsg, mach_error_t error,
44 dispatch_mach_handler_function_t f);
45
46 #else // !DISPATCH_USE_CLIENT_CALLOUT
47
48 DISPATCH_ALWAYS_INLINE
49 static inline void
50 _dispatch_client_callout(void *ctxt, dispatch_function_t f)
51 {
52 return f(ctxt);
53 }
54
55 DISPATCH_ALWAYS_INLINE
56 static inline void
57 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
58 {
59 return f(ctxt, i);
60 }
61
62 DISPATCH_ALWAYS_INLINE
63 static inline void
64 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
65 dispatch_mach_msg_t dmsg, mach_error_t error,
66 dispatch_mach_handler_function_t f)
67 {
68 return f(ctxt, reason, dmsg, error);
69 }
70
71 #endif // !DISPATCH_USE_CLIENT_CALLOUT
72
73 #if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
74
75 #pragma mark -
76 #pragma mark _os_object_t & dispatch_object_t
77
78 DISPATCH_ALWAYS_INLINE
79 static inline _os_object_t
80 _os_object_retain_internal_inline(_os_object_t obj)
81 {
82 int ref_cnt = _os_object_refcnt_inc(obj);
83 if (slowpath(ref_cnt <= 0)) {
84 DISPATCH_CRASH("Resurrection of an object");
85 }
86 return obj;
87 }
88
89 DISPATCH_ALWAYS_INLINE
90 static inline void
91 _os_object_release_internal_inline(_os_object_t obj)
92 {
93 int ref_cnt = _os_object_refcnt_dec(obj);
94 if (fastpath(ref_cnt >= 0)) {
95 return;
96 }
97 if (slowpath(ref_cnt < -1)) {
98 DISPATCH_CRASH("Over-release of an object");
99 }
100 #if DISPATCH_DEBUG
101 if (slowpath(obj->os_obj_xref_cnt >= 0)) {
102 DISPATCH_CRASH("Release while external references exist");
103 }
104 #endif
105 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
106 return _os_object_dispose(obj);
107 }
108
109 DISPATCH_ALWAYS_INLINE_NDEBUG
110 static inline void
111 _dispatch_retain(dispatch_object_t dou)
112 {
113 (void)_os_object_retain_internal_inline(dou._os_obj);
114 }
115
116 DISPATCH_ALWAYS_INLINE_NDEBUG
117 static inline void
118 _dispatch_release(dispatch_object_t dou)
119 {
120 _os_object_release_internal_inline(dou._os_obj);
121 }
122
123 #pragma mark -
124 #pragma mark dispatch_thread
125
126 DISPATCH_ALWAYS_INLINE
127 static inline void
128 _dispatch_wqthread_override_start(mach_port_t thread,
129 pthread_priority_t priority)
130 {
131 #if HAVE_PTHREAD_WORKQUEUE_QOS
132 if (!_dispatch_set_qos_class_enabled) return;
133 (void)_pthread_workqueue_override_start_direct(thread, priority);
134 #else
135 (void)thread; (void)priority;
136 #endif
137 }
138
139 DISPATCH_ALWAYS_INLINE
140 static inline void
141 _dispatch_wqthread_override_reset(void)
142 {
143 #if HAVE_PTHREAD_WORKQUEUE_QOS
144 if (!_dispatch_set_qos_class_enabled) return;
145 (void)_pthread_workqueue_override_reset();
146 #endif
147 }
148
149 DISPATCH_ALWAYS_INLINE
150 static inline void
151 _dispatch_thread_override_start(mach_port_t thread, pthread_priority_t priority)
152 {
153 #if HAVE_PTHREAD_WORKQUEUE_QOS
154 if (!_dispatch_set_qos_class_enabled) return;
155 (void)_pthread_override_qos_class_start_direct(thread, priority);
156 #else
157 (void)thread; (void)priority;
158 #endif
159 }
160
161 DISPATCH_ALWAYS_INLINE
162 static inline void
163 _dispatch_thread_override_end(mach_port_t thread)
164 {
165 #if HAVE_PTHREAD_WORKQUEUE_QOS
166 if (!_dispatch_set_qos_class_enabled) return;
167 (void)_pthread_override_qos_class_end_direct(thread);
168 #else
169 (void)thread;
170 #endif
171 }
172
173 #pragma mark -
174 #pragma mark dispatch_queue_t
175
176 static inline bool _dispatch_queue_need_override(dispatch_queue_t dq,
177 pthread_priority_t pp);
178 static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq,
179 pthread_priority_t pp);
180 static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq,
181 pthread_priority_t pp);
182 static inline pthread_priority_t _dispatch_queue_get_override_priority(
183 dispatch_queue_t dq);
184 static inline pthread_priority_t _dispatch_queue_reset_override_priority(
185 dispatch_queue_t dq);
186 static inline pthread_priority_t _dispatch_get_defaultpriority(void);
187 static inline void _dispatch_set_defaultpriority_override(void);
188 static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority);
189 static inline pthread_priority_t _dispatch_get_priority(void);
190 static inline void _dispatch_set_priority(pthread_priority_t priority);
191
192 DISPATCH_ALWAYS_INLINE
193 static inline dispatch_queue_t
194 _dispatch_queue_get_current(void)
195 {
196 return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
197 }
198
199 DISPATCH_ALWAYS_INLINE
200 static inline void
201 _dispatch_queue_set_thread(dispatch_queue_t dq)
202 {
203 // The manager queue uses dispatch_queue_drain but is thread bound
204 if (!dq->dq_is_thread_bound) {
205 dq->dq_thread = _dispatch_thread_port();
206 }
207 }
208
209 DISPATCH_ALWAYS_INLINE
210 static inline void
211 _dispatch_queue_clear_thread(dispatch_queue_t dq)
212 {
213 if (!dq->dq_is_thread_bound) {
214 dq->dq_thread = MACH_PORT_NULL;
215 }
216 }
217
218 DISPATCH_ALWAYS_INLINE
219 static inline bool
220 _dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head,
221 struct dispatch_object_s *tail)
222 {
223 struct dispatch_object_s *prev;
224 tail->do_next = NULL;
225 prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release);
226 if (fastpath(prev)) {
227 // if we crash here with a value less than 0x1000, then we are at a
228 // known bug in client code for example, see _dispatch_queue_dispose
229 // or _dispatch_atfork_child
230 prev->do_next = head;
231 }
232 return (prev != NULL);
233 }
234
235 DISPATCH_ALWAYS_INLINE
236 static inline void
237 _dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
238 dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
239 {
240 struct dispatch_object_s *head = _head._do, *tail = _tail._do;
241 bool override = _dispatch_queue_need_override_retain(dq, pp);
242 if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) {
243 _dispatch_queue_push_list_slow(dq, pp, head, n, override);
244 } else if (override) {
245 _dispatch_queue_wakeup_with_qos_and_release(dq, pp);
246 }
247 }
248
249 DISPATCH_ALWAYS_INLINE
250 static inline void
251 _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail,
252 pthread_priority_t pp)
253 {
254 struct dispatch_object_s *tail = _tail._do;
255 bool override = _dispatch_queue_need_override_retain(dq, pp);
256 if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
257 _dispatch_queue_push_slow(dq, pp, tail, override);
258 } else if (override) {
259 _dispatch_queue_wakeup_with_qos_and_release(dq, pp);
260 }
261 }
262
263 DISPATCH_ALWAYS_INLINE
264 static inline void
265 _dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
266 pthread_priority_t pp, bool wakeup)
267 {
268 // caller assumed to have a reference on dq
269 struct dispatch_object_s *tail = _tail._do;
270 if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
271 _dispatch_queue_push_slow(dq, pp, tail, false);
272 } else if (_dispatch_queue_need_override(dq, pp)) {
273 _dispatch_queue_wakeup_with_qos(dq, pp);
274 } else if (slowpath(wakeup)) {
275 _dispatch_queue_wakeup(dq);
276 }
277 }
278
279 struct _dispatch_identity_s {
280 pthread_priority_t old_pri;
281 pthread_priority_t old_pp;
282 dispatch_queue_t old_dq;
283 };
284
285 DISPATCH_ALWAYS_INLINE
286 static inline void
287 _dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di,
288 dispatch_queue_t assumed_rq)
289 {
290 di->old_dq = _dispatch_queue_get_current();
291 di->old_pri = _dispatch_get_priority();
292 di->old_pp = _dispatch_get_defaultpriority();
293
294 dispatch_assert(dx_type(di->old_dq) == DISPATCH_QUEUE_ROOT_TYPE);
295 dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_ROOT_TYPE);
296
297 _dispatch_wqthread_override_start(_dispatch_thread_port(), di->old_pri);
298 _dispatch_set_priority(assumed_rq->dq_priority);
299 _dispatch_reset_defaultpriority(assumed_rq->dq_priority);
300 _dispatch_thread_setspecific(dispatch_queue_key, assumed_rq);
301 }
302
303 DISPATCH_ALWAYS_INLINE
304 static inline void
305 _dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di)
306 {
307 _dispatch_thread_setspecific(dispatch_queue_key, di->old_dq);
308 _dispatch_set_priority(di->old_pri);
309 _dispatch_reset_defaultpriority(di->old_pp);
310 // Ensure that the root queue sees that this thread was overridden.
311 _dispatch_set_defaultpriority_override();
312 }
313
314 typedef dispatch_queue_t
315 _dispatch_queue_class_invoke_handler_t(dispatch_object_t,
316 _dispatch_thread_semaphore_t*);
317
318 DISPATCH_ALWAYS_INLINE
319 static inline void
320 _dispatch_queue_class_invoke(dispatch_object_t dou,
321 dispatch_continuation_t dc, dispatch_invoke_flags_t flags,
322 _dispatch_queue_class_invoke_handler_t invoke)
323 {
324 pthread_priority_t p = 0;
325 dispatch_queue_t dq = dou._dq;
326 bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING);
327 bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING);
328
329 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
330 fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
331 _dispatch_queue_set_thread(dq);
332
333 dispatch_queue_t tq = NULL;
334 _dispatch_thread_semaphore_t sema = 0;
335 struct _dispatch_identity_s di;
336
337 if (overriding) {
338 _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
339 dq->dq_thread, _dispatch_get_defaultpriority());
340 _dispatch_root_queue_identity_assume(&di, dc->dc_other);
341 }
342
343 tq = invoke(dq, &sema);
344 _dispatch_queue_clear_thread(dq);
345
346 if (!owning && !sema && tq && tq != dq->do_targetq) {
347 /*
348 * When (tq && tq != dq->do_targetq) this is a source or mach
349 * channel asking to get to their manager queue.
350 *
351 * Since stealers cannot call _dispatch_queue_push_queue and
352 * retarget those, they need ot destroy the override so that
353 * when waking those sources or mach channels on their target queue
354 * we don't risk a stealer taking them over and not be able to
355 * retarget again, effectively live-locking them.
356 *
357 * Also, we're in the `overriding` case so the thread will be marked
358 * dirty by _dispatch_root_queue_identity_restore anyway
359 * so forgetting about p is fine.
360 */
361 (void)_dispatch_queue_reset_override_priority(dq);
362 p = 0;
363 } else if (sema || tq || DISPATCH_OBJECT_SUSPENDED(dq)) {
364 p = _dispatch_queue_get_override_priority(dq);
365 } else {
366 p = _dispatch_queue_reset_override_priority(dq);
367 }
368 if (overriding) {
369 _dispatch_root_queue_identity_restore(&di);
370 } else {
371 if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
372 // Ensure that the root queue sees that this thread was overridden.
373 _dispatch_set_defaultpriority_override();
374 }
375 }
376
377 uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release);
378 if (sema) {
379 _dispatch_thread_semaphore_signal(sema);
380 } else if (owning && tq) {
381 _dispatch_introspection_queue_item_complete(dq);
382 return _dispatch_queue_push_queue(tq, dq, p);
383 }
384 if (!owning && running == 0) {
385 _dispatch_introspection_queue_item_complete(dq);
386 return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
387 }
388 } else if (overriding) {
389 mach_port_t th = dq->dq_thread;
390 if (th) {
391 p = _dispatch_queue_get_override_priority(dq);
392 _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
393 th, p);
394 _dispatch_wqthread_override_start(th, p);
395 }
396 }
397
398 _dispatch_introspection_queue_item_complete(dq);
399 if (owning) {
400 dq->do_next = DISPATCH_OBJECT_LISTLESS;
401 if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
402 DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) {
403 // seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
404 if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
405 // verify that the queue is idle
406 return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
407 }
408 }
409 }
410 _dispatch_release(dq); // added when the queue is put on the list
411 }
412
413 DISPATCH_ALWAYS_INLINE
414 static inline unsigned long
415 _dispatch_queue_class_probe(dispatch_object_t dou)
416 {
417 dispatch_queue_t dq = dou._dq;
418 struct dispatch_object_s *tail;
419 // seq_cst with atomic store to suspend_cnt <rdar://problem/14637483>
420 tail = dispatch_atomic_load2o(dq, dq_items_tail, seq_cst);
421 return (unsigned long)slowpath(tail != NULL);
422 }
423
424 DISPATCH_ALWAYS_INLINE
425 static inline bool
426 _dispatch_object_suspended(dispatch_object_t dou)
427 {
428 struct dispatch_object_s *obj = dou._do;
429 unsigned int suspend_cnt;
430 // seq_cst with atomic store to tail <rdar://problem/14637483>
431 suspend_cnt = dispatch_atomic_load2o(obj, do_suspend_cnt, seq_cst);
432 return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL);
433 }
434
435 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
436 static inline dispatch_queue_t
437 _dispatch_get_root_queue(qos_class_t priority, bool overcommit)
438 {
439 if (overcommit) switch (priority) {
440 case _DISPATCH_QOS_CLASS_MAINTENANCE:
441 return &_dispatch_root_queues[
442 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
443 case _DISPATCH_QOS_CLASS_BACKGROUND:
444 return &_dispatch_root_queues[
445 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
446 case _DISPATCH_QOS_CLASS_UTILITY:
447 return &_dispatch_root_queues[
448 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
449 case _DISPATCH_QOS_CLASS_DEFAULT:
450 return &_dispatch_root_queues[
451 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
452 case _DISPATCH_QOS_CLASS_USER_INITIATED:
453 return &_dispatch_root_queues[
454 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
455 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
456 return &_dispatch_root_queues[
457 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
458 } else switch (priority) {
459 case _DISPATCH_QOS_CLASS_MAINTENANCE:
460 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
461 case _DISPATCH_QOS_CLASS_BACKGROUND:
462 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
463 case _DISPATCH_QOS_CLASS_UTILITY:
464 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
465 case _DISPATCH_QOS_CLASS_DEFAULT:
466 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
467 case _DISPATCH_QOS_CLASS_USER_INITIATED:
468 return &_dispatch_root_queues[
469 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
470 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
471 return &_dispatch_root_queues[
472 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
473 }
474 return NULL;
475 }
476
477 // Note to later developers: ensure that any initialization changes are
478 // made for statically allocated queues (i.e. _dispatch_main_q).
479 static inline void
480 _dispatch_queue_init(dispatch_queue_t dq)
481 {
482 dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
483
484 dq->dq_running = 0;
485 dq->dq_width = 1;
486 dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
487 dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
488 relaxed);
489 }
490
491 DISPATCH_ALWAYS_INLINE
492 static inline void
493 _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
494 {
495 //Tag thread-bound queues with the owning thread
496 dispatch_assert(dq->dq_is_thread_bound);
497 dq->dq_thread = _dispatch_thread_port();
498 }
499
500 DISPATCH_ALWAYS_INLINE
501 static inline void
502 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
503 {
504 dispatch_assert(dq->dq_is_thread_bound);
505 dq->dq_thread = MACH_PORT_NULL;
506 }
507
508 DISPATCH_ALWAYS_INLINE
509 static inline mach_port_t
510 _dispatch_queue_get_bound_thread(dispatch_queue_t dq)
511 {
512 dispatch_assert(dq->dq_is_thread_bound);
513 return dq->dq_thread;
514 }
515
516 DISPATCH_ALWAYS_INLINE
517 static inline dispatch_pthread_root_queue_observer_hooks_t
518 _dispatch_get_pthread_root_queue_observer_hooks(void)
519 {
520 return _dispatch_thread_getspecific(
521 dispatch_pthread_root_queue_observer_hooks_key);
522 }
523
524 DISPATCH_ALWAYS_INLINE
525 static inline void
526 _dispatch_set_pthread_root_queue_observer_hooks(
527 dispatch_pthread_root_queue_observer_hooks_t observer_hooks)
528 {
529 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key,
530 observer_hooks);
531 }
532
533 #pragma mark -
534 #pragma mark dispatch_priority
535
536 DISPATCH_ALWAYS_INLINE
537 static inline pthread_priority_t
538 _dispatch_get_defaultpriority(void)
539 {
540 #if HAVE_PTHREAD_WORKQUEUE_QOS
541 pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific(
542 dispatch_defaultpriority_key);
543 return priority;
544 #else
545 return 0;
546 #endif
547 }
548
549 DISPATCH_ALWAYS_INLINE
550 static inline void
551 _dispatch_reset_defaultpriority(pthread_priority_t priority)
552 {
553 #if HAVE_PTHREAD_WORKQUEUE_QOS
554 pthread_priority_t old_priority = _dispatch_get_defaultpriority();
555 // if an inner-loop or'd in the override flag to the per-thread priority,
556 // it needs to be propogated up the chain
557 priority |= old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
558
559 if (slowpath(priority != old_priority)) {
560 _dispatch_thread_setspecific(dispatch_defaultpriority_key,
561 (void*)priority);
562 }
563 #else
564 (void)priority;
565 #endif
566 }
567
568 DISPATCH_ALWAYS_INLINE
569 static inline void
570 _dispatch_set_defaultpriority_override(void)
571 {
572 #if HAVE_PTHREAD_WORKQUEUE_QOS
573 pthread_priority_t old_priority = _dispatch_get_defaultpriority();
574 pthread_priority_t priority = old_priority |
575 _PTHREAD_PRIORITY_OVERRIDE_FLAG;
576
577 if (slowpath(priority != old_priority)) {
578 _dispatch_thread_setspecific(dispatch_defaultpriority_key,
579 (void*)priority);
580 }
581 #endif
582 }
583
584 DISPATCH_ALWAYS_INLINE
585 static inline bool
586 _dispatch_reset_defaultpriority_override(void)
587 {
588 #if HAVE_PTHREAD_WORKQUEUE_QOS
589 pthread_priority_t old_priority = _dispatch_get_defaultpriority();
590 pthread_priority_t priority = old_priority &
591 ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
592
593 if (slowpath(priority != old_priority)) {
594 _dispatch_thread_setspecific(dispatch_defaultpriority_key,
595 (void*)priority);
596 return true;
597 }
598 #endif
599 return false;
600 }
601
602 DISPATCH_ALWAYS_INLINE
603 static inline void
604 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
605 dispatch_queue_t tq)
606 {
607 #if HAVE_PTHREAD_WORKQUEUE_QOS
608 const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
609 const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
610 pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
611 if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) &&
612 (tqp & rootqueue_flag)) {
613 dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
614 }
615 #else
616 (void)dq; (void)tq;
617 #endif
618 }
619
620 DISPATCH_ALWAYS_INLINE
621 static inline pthread_priority_t
622 _dispatch_set_defaultpriority(pthread_priority_t priority)
623 {
624 #if HAVE_PTHREAD_WORKQUEUE_QOS
625 pthread_priority_t old_priority = _dispatch_get_defaultpriority();
626 if (old_priority) {
627 pthread_priority_t flags, defaultqueue, basepri;
628 flags = (priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
629 defaultqueue = (old_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
630 basepri = (old_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK);
631 priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
632 if (!priority) {
633 flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
634 priority = basepri;
635 } else if (priority < basepri && !defaultqueue) { // rdar://16349734
636 priority = basepri;
637 }
638 priority |= flags | (old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG);
639 }
640 if (slowpath(priority != old_priority)) {
641 _dispatch_thread_setspecific(dispatch_defaultpriority_key,
642 (void*)priority);
643 }
644 return old_priority;
645 #else
646 (void)priority;
647 return 0;
648 #endif
649 }
650
651 DISPATCH_ALWAYS_INLINE
652 static inline pthread_priority_t
653 _dispatch_priority_adopt(pthread_priority_t priority, unsigned long flags)
654 {
655 #if HAVE_PTHREAD_WORKQUEUE_QOS
656 pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
657 bool enforce, inherited, defaultqueue;
658 enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
659 (priority & _PTHREAD_PRIORITY_ENFORCE_FLAG);
660 inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
661 defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
662 defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
663 priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
664 if (!priority) {
665 enforce = false;
666 } else if (!enforce) {
667 if (priority < defaultpri) {
668 if (defaultqueue) enforce = true; // rdar://16349734
669 } else if (inherited || defaultqueue) {
670 enforce = true;
671 }
672 } else if (priority < defaultpri && !defaultqueue) { // rdar://16349734
673 enforce = false;
674 }
675 return enforce ? priority : defaultpri;
676 #else
677 (void)priority; (void)flags;
678 return 0;
679 #endif
680 }
681
682 DISPATCH_ALWAYS_INLINE
683 static inline pthread_priority_t
684 _dispatch_get_priority(void)
685 {
686 #if HAVE_PTHREAD_WORKQUEUE_QOS
687 pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific(
688 dispatch_priority_key);
689 return (priority & ~_PTHREAD_PRIORITY_FLAGS_MASK);
690 #else
691 return 0;
692 #endif
693 }
694
695 DISPATCH_ALWAYS_INLINE
696 static inline void
697 _dispatch_set_priority_and_mach_voucher(pthread_priority_t priority,
698 mach_voucher_t kv)
699 {
700 #if HAVE_PTHREAD_WORKQUEUE_QOS
701 _pthread_set_flags_t flags = 0;
702 if (priority && _dispatch_set_qos_class_enabled) {
703 pthread_priority_t old_priority = _dispatch_get_priority();
704 if (priority != old_priority && old_priority) {
705 flags |= _PTHREAD_SET_SELF_QOS_FLAG;
706 }
707 }
708 if (kv != VOUCHER_NO_MACH_VOUCHER) {
709 #if VOUCHER_USE_MACH_VOUCHER
710 flags |= _PTHREAD_SET_SELF_VOUCHER_FLAG;
711 #endif
712 }
713 if (!flags) return;
714 int r = _pthread_set_properties_self(flags, priority, kv);
715 (void)dispatch_assume_zero(r);
716 #elif VOUCHER_USE_MACH_VOUCHER
717 #error Invalid build configuration
718 #else
719 (void)priority; (void)kv;
720 #endif
721 }
722
723 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
724 static inline voucher_t
725 _dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority,
726 voucher_t voucher)
727 {
728 pthread_priority_t p = (priority != DISPATCH_NO_PRIORITY) ? priority : 0;
729 voucher_t ov = DISPATCH_NO_VOUCHER;
730 mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER;
731 if (voucher != DISPATCH_NO_VOUCHER) {
732 ov = _voucher_get();
733 kv = _voucher_swap_and_get_mach_voucher(ov, voucher);
734 }
735 _dispatch_set_priority_and_mach_voucher(p, kv);
736 return ov;
737 }
738
739 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
740 static inline voucher_t
741 _dispatch_adopt_priority_and_voucher(pthread_priority_t priority,
742 voucher_t v, unsigned long flags)
743 {
744 pthread_priority_t p = 0;
745 if (priority != DISPATCH_NO_PRIORITY) {
746 p = _dispatch_priority_adopt(priority, flags);
747 }
748 if (!(flags & DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE)) {
749 dispatch_queue_t dq = _dispatch_queue_get_current();
750 if (dq && dq->dq_override_voucher != DISPATCH_NO_VOUCHER) {
751 if (v != DISPATCH_NO_VOUCHER && v) _voucher_release(v);
752 v = dq->dq_override_voucher;
753 if (v) _voucher_retain(v);
754 }
755 }
756 return _dispatch_set_priority_and_adopt_voucher(p, v);
757 }
758
759 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
760 static inline voucher_t
761 _dispatch_adopt_queue_override_voucher(dispatch_queue_t dq)
762 {
763 voucher_t v = dq->dq_override_voucher;
764 if (v == DISPATCH_NO_VOUCHER) return DISPATCH_NO_VOUCHER;
765 if (v) _voucher_retain(v);
766 return _dispatch_set_priority_and_adopt_voucher(DISPATCH_NO_PRIORITY, v);
767 }
768
769 DISPATCH_ALWAYS_INLINE
770 static inline void
771 _dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority,
772 voucher_t voucher, unsigned long flags)
773 {
774 voucher_t ov;
775 ov = _dispatch_adopt_priority_and_voucher(priority, voucher, flags);
776 if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
777 }
778
779 DISPATCH_ALWAYS_INLINE
780 static inline void
781 _dispatch_reset_priority_and_voucher(pthread_priority_t priority,
782 voucher_t voucher)
783 {
784 voucher_t ov;
785 ov = _dispatch_set_priority_and_adopt_voucher(priority, voucher);
786 if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
787 }
788
789 DISPATCH_ALWAYS_INLINE
790 static inline void
791 _dispatch_reset_voucher(voucher_t voucher)
792 {
793 return _dispatch_reset_priority_and_voucher(DISPATCH_NO_PRIORITY, voucher);
794 }
795
796 DISPATCH_ALWAYS_INLINE
797 static inline void
798 _dispatch_set_priority(pthread_priority_t priority)
799 {
800 _dispatch_set_priority_and_mach_voucher(priority, VOUCHER_NO_MACH_VOUCHER);
801 }
802
803 DISPATCH_ALWAYS_INLINE
804 static inline pthread_priority_t
805 _dispatch_priority_normalize(pthread_priority_t pp)
806 {
807 dispatch_assert_zero(pp & ~(pthread_priority_t)
808 _PTHREAD_PRIORITY_QOS_CLASS_MASK);
809 unsigned int qosbits = (unsigned int)pp, idx;
810 if (!qosbits) return 0;
811 idx = (unsigned int)(sizeof(qosbits)*8) -
812 (unsigned int)__builtin_clz(qosbits) - 1;
813 return (1 << idx);
814 }
815
816 DISPATCH_ALWAYS_INLINE
817 static inline bool
818 _dispatch_queue_need_override(dispatch_queue_t dq, pthread_priority_t pp)
819 {
820 if (!pp || dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE) return false;
821 uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
822 uint32_t o = dq->dq_override;
823 return (o < p);
824 }
825
826 DISPATCH_ALWAYS_INLINE
827 static inline bool
828 _dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp)
829 {
830 bool override = _dispatch_queue_need_override(dq, pp);
831 if (override) _dispatch_retain(dq);
832 return override;
833 }
834
835 DISPATCH_ALWAYS_INLINE
836 static inline bool
837 _dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t *pp,
838 bool *was_overridden)
839 {
840 uint32_t o = dq->dq_override;
841 uint32_t p = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
842 if (o < p) {
843 o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed);
844 if (was_overridden) {
845 o = (uint32_t)_dispatch_priority_normalize(o);
846 }
847 *pp = _dispatch_priority_normalize(o | p);
848 } else {
849 o = (uint32_t)_dispatch_priority_normalize(o);
850 *pp = o;
851 }
852 if (was_overridden) {
853 *was_overridden =
854 (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) < o;
855 }
856 return (o < p);
857 }
858
859 DISPATCH_ALWAYS_INLINE
860 static inline pthread_priority_t
861 _dispatch_queue_get_override_priority(dispatch_queue_t dq)
862 {
863 uint32_t p = (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
864 uint32_t o = dq->dq_override;
865 if (o == p) return o;
866 return _dispatch_priority_normalize(o);
867 }
868
869 DISPATCH_ALWAYS_INLINE
870 static inline void
871 _dispatch_queue_set_override_priority(dispatch_queue_t dq)
872 {
873 uint32_t p = 0;
874 if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
875 p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
876 }
877 dispatch_atomic_store2o(dq, dq_override, p, relaxed);
878 }
879
880 DISPATCH_ALWAYS_INLINE
881 static inline pthread_priority_t
882 _dispatch_queue_reset_override_priority(dispatch_queue_t dq)
883 {
884 uint32_t p = 0;
885 if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
886 p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
887 }
888 uint32_t o = dispatch_atomic_xchg2o(dq, dq_override, p, relaxed);
889 if (o == p) return o;
890 return _dispatch_priority_normalize(o);
891 }
892
893 DISPATCH_ALWAYS_INLINE
894 static inline pthread_priority_t
895 _dispatch_priority_propagate(void)
896 {
897 #if HAVE_PTHREAD_WORKQUEUE_QOS
898 pthread_priority_t priority = _dispatch_get_priority();
899 if (priority > _dispatch_user_initiated_priority) {
900 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
901 priority = _dispatch_user_initiated_priority;
902 }
903 return priority;
904 #else
905 return 0;
906 #endif
907 }
908
909 // including maintenance
910 DISPATCH_ALWAYS_INLINE
911 static inline bool
912 _dispatch_is_background_thread(void)
913 {
914 #if HAVE_PTHREAD_WORKQUEUE_QOS
915 pthread_priority_t priority;
916 priority = _dispatch_get_priority();
917 return priority && (priority <= _dispatch_background_priority);
918 #else
919 return false;
920 #endif
921 }
922
923 #pragma mark -
924 #pragma mark dispatch_block_t
925
926 #ifdef __BLOCKS__
927
928 DISPATCH_ALWAYS_INLINE
929 static inline bool
930 _dispatch_block_has_private_data(const dispatch_block_t block)
931 {
932 extern void (*_dispatch_block_special_invoke)(void*);
933 return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
934 }
935
936 DISPATCH_ALWAYS_INLINE
937 static inline dispatch_block_private_data_t
938 _dispatch_block_get_data(const dispatch_block_t db)
939 {
940 if (!_dispatch_block_has_private_data(db)) {
941 return NULL;
942 }
943 // Keep in sync with _dispatch_block_create implementation
944 uint8_t *x = (uint8_t *)db;
945 // x points to base of struct Block_layout
946 x += sizeof(struct Block_layout);
947 // x points to base of captured dispatch_block_private_data_s object
948 dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
949 if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
950 DISPATCH_CRASH("Corruption of dispatch block object");
951 }
952 return dbpd;
953 }
954
955 DISPATCH_ALWAYS_INLINE
956 static inline pthread_priority_t
957 _dispatch_block_get_priority(const dispatch_block_t db)
958 {
959 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
960 return dbpd ? dbpd->dbpd_priority : 0;
961 }
962
963 DISPATCH_ALWAYS_INLINE
964 static inline dispatch_block_flags_t
965 _dispatch_block_get_flags(const dispatch_block_t db)
966 {
967 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
968 return dbpd ? dbpd->dbpd_flags : 0;
969 }
970
971 #define DISPATCH_BLOCK_HAS(flag, db) \
972 ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0)
973 #define DISPATCH_BLOCK_IS(flag, db) \
974 ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0)
975
976 #endif
977
978 #pragma mark -
979 #pragma mark dispatch_continuation_t
980
981 DISPATCH_ALWAYS_INLINE
982 static inline dispatch_continuation_t
983 _dispatch_continuation_alloc_cacheonly(void)
984 {
985 dispatch_continuation_t dc = (dispatch_continuation_t)
986 fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
987 if (dc) {
988 _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
989 }
990 return dc;
991 }
992
993 DISPATCH_ALWAYS_INLINE
994 static inline dispatch_continuation_t
995 _dispatch_continuation_alloc(void)
996 {
997 dispatch_continuation_t dc =
998 fastpath(_dispatch_continuation_alloc_cacheonly());
999 if(!dc) {
1000 return _dispatch_continuation_alloc_from_heap();
1001 }
1002 return dc;
1003 }
1004
1005 DISPATCH_ALWAYS_INLINE
1006 static inline dispatch_continuation_t
1007 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
1008 {
1009 dispatch_continuation_t prev_dc = (dispatch_continuation_t)
1010 fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
1011 int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1;
1012 // Cap continuation cache
1013 if (slowpath(cnt > _dispatch_continuation_cache_limit)) {
1014 return dc;
1015 }
1016 dc->do_next = prev_dc;
1017 dc->dc_cache_cnt = cnt;
1018 _dispatch_thread_setspecific(dispatch_cache_key, dc);
1019 return NULL;
1020 }
1021
1022 DISPATCH_ALWAYS_INLINE
1023 static inline void
1024 _dispatch_continuation_free(dispatch_continuation_t dc)
1025 {
1026 dc = _dispatch_continuation_free_cacheonly(dc);
1027 if (slowpath(dc)) {
1028 _dispatch_continuation_free_to_cache_limit(dc);
1029 }
1030 }
1031
1032 #include "trace.h"
1033
1034 DISPATCH_ALWAYS_INLINE
1035 static inline void
1036 _dispatch_continuation_invoke(dispatch_object_t dou, dispatch_queue_t dq)
1037 {
1038 dispatch_continuation_t dc = dou._dc, dc1;
1039 dispatch_group_t dg;
1040
1041 _dispatch_trace_continuation_pop(dq, dou);
1042 if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
1043 return dx_invoke(dou._do, NULL, DISPATCH_INVOKE_NONE);
1044 }
1045
1046 // Add the item back to the cache before calling the function. This
1047 // allows the 'hot' continuation to be used for a quick callback.
1048 //
1049 // The ccache version is per-thread.
1050 // Therefore, the object has not been reused yet.
1051 // This generates better assembly.
1052 if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
1053 _dispatch_continuation_voucher_adopt(dc);
1054 dc1 = _dispatch_continuation_free_cacheonly(dc);
1055 } else {
1056 dc1 = NULL;
1057 }
1058 if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
1059 dg = dc->dc_data;
1060 } else {
1061 dg = NULL;
1062 }
1063 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
1064 if (dg) {
1065 dispatch_group_leave(dg);
1066 _dispatch_release(dg);
1067 }
1068 _dispatch_introspection_queue_item_complete(dou);
1069 if (slowpath(dc1)) {
1070 _dispatch_continuation_free_to_cache_limit(dc1);
1071 }
1072 }
1073
1074 DISPATCH_ALWAYS_INLINE_NDEBUG
1075 static inline void
1076 _dispatch_continuation_pop(dispatch_object_t dou)
1077 {
1078 dispatch_queue_t dq = _dispatch_queue_get_current();
1079 dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
1080 _dispatch_get_pthread_root_queue_observer_hooks();
1081 if (observer_hooks) observer_hooks->queue_will_execute(dq);
1082 _dispatch_continuation_invoke(dou, dq);
1083 if (observer_hooks) observer_hooks->queue_did_execute(dq);
1084 }
1085
1086 DISPATCH_ALWAYS_INLINE
1087 static inline void
1088 _dispatch_continuation_priority_set(dispatch_continuation_t dc,
1089 pthread_priority_t pp, dispatch_block_flags_t flags)
1090 {
1091 #if HAVE_PTHREAD_WORKQUEUE_QOS
1092 pthread_priority_t prio = 0;
1093 if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
1094 prio = pp;
1095 } else if (!(flags & DISPATCH_BLOCK_NO_QOS_CLASS)) {
1096 prio = _dispatch_priority_propagate();
1097 }
1098 if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) {
1099 prio |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
1100 }
1101 dc->dc_priority = prio;
1102 #else
1103 (void)dc; (void)pp; (void)flags;
1104 #endif
1105 }
1106
1107 DISPATCH_ALWAYS_INLINE
1108 static inline pthread_priority_t
1109 _dispatch_continuation_get_override_priority(dispatch_queue_t dq,
1110 dispatch_continuation_t dc)
1111 {
1112 #if HAVE_PTHREAD_WORKQUEUE_QOS
1113 pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
1114 bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
1115 pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
1116 bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
1117 if (!p) {
1118 enforce = false;
1119 } else if (!enforce && (!dqp || defaultqueue)) {
1120 enforce = true;
1121 }
1122 if (!enforce) {
1123 p = dqp;
1124 }
1125 return p;
1126 #else
1127 (void)dq; (void)dc;
1128 return 0;
1129 #endif
1130 }
1131
1132 #endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
1133
1134 #endif /* __DISPATCH_INLINE_INTERNAL__ */