]> git.saurik.com Git - apple/libdispatch.git/blob - src/inline_internal.h
libdispatch-913.1.6.tar.gz
[apple/libdispatch.git] / src / inline_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if DISPATCH_USE_CLIENT_CALLOUT
36
37 DISPATCH_NOTHROW void
38 _dispatch_client_callout(void *ctxt, dispatch_function_t f);
39 DISPATCH_NOTHROW void
40 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
41 #if HAVE_MACH
42 DISPATCH_NOTHROW void
43 _dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
44 dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f);
45 DISPATCH_NOTHROW void
46 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
47 dispatch_mach_msg_t dmsg, mach_error_t error,
48 dispatch_mach_handler_function_t f);
49 #endif // HAVE_MACH
50
51 #else // !DISPATCH_USE_CLIENT_CALLOUT
52
53 DISPATCH_ALWAYS_INLINE
54 static inline void
55 _dispatch_client_callout(void *ctxt, dispatch_function_t f)
56 {
57 return f(ctxt);
58 }
59
60 DISPATCH_ALWAYS_INLINE
61 static inline void
62 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
63 {
64 return f(ctxt, i);
65 }
66
67 #if HAVE_MACH
68 DISPATCH_ALWAYS_INLINE
69 static inline void
70 _dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
71 dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f)
72 {
73 return f(ctxt, reason, dmsg);
74 }
75
76 DISPATCH_ALWAYS_INLINE
77 static inline void
78 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
79 dispatch_mach_msg_t dmsg, mach_error_t error,
80 dispatch_mach_handler_function_t f)
81 {
82 return f(ctxt, reason, dmsg, error);
83 }
84 #endif // HAVE_MACH
85
86 #endif // !DISPATCH_USE_CLIENT_CALLOUT
87
88 #pragma mark -
89 #pragma mark _os_object_t & dispatch_object_t
90 #if DISPATCH_PURE_C
91
92 DISPATCH_ALWAYS_INLINE
93 static inline bool
94 _dispatch_object_has_vtable(dispatch_object_t dou)
95 {
96 uintptr_t dc_flags = dou._dc->dc_flags;
97
98 // vtables are pointers far away from the low page in memory
99 return dc_flags > 0xffful;
100 }
101
102 DISPATCH_ALWAYS_INLINE
103 static inline bool
104 _dispatch_object_is_queue(dispatch_object_t dou)
105 {
106 return _dispatch_object_has_vtable(dou) && dx_vtable(dou._do)->do_push;
107 }
108
109 DISPATCH_ALWAYS_INLINE
110 static inline bool
111 _dispatch_object_is_continuation(dispatch_object_t dou)
112 {
113 if (_dispatch_object_has_vtable(dou)) {
114 return dx_metatype(dou._do) == _DISPATCH_CONTINUATION_TYPE;
115 }
116 return true;
117 }
118
119 DISPATCH_ALWAYS_INLINE
120 static inline bool
121 _dispatch_object_has_type(dispatch_object_t dou, unsigned long type)
122 {
123 return _dispatch_object_has_vtable(dou) && dx_type(dou._do) == type;
124 }
125
126 DISPATCH_ALWAYS_INLINE
127 static inline bool
128 _dispatch_object_is_redirection(dispatch_object_t dou)
129 {
130 return _dispatch_object_has_type(dou,
131 DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT));
132 }
133
134 DISPATCH_ALWAYS_INLINE
135 static inline bool
136 _dispatch_object_is_barrier(dispatch_object_t dou)
137 {
138 dispatch_queue_flags_t dq_flags;
139
140 if (!_dispatch_object_has_vtable(dou)) {
141 return (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT);
142 }
143 switch (dx_metatype(dou._do)) {
144 case _DISPATCH_QUEUE_TYPE:
145 case _DISPATCH_SOURCE_TYPE:
146 dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed);
147 return dq_flags & DQF_BARRIER_BIT;
148 default:
149 return false;
150 }
151 }
152
153 DISPATCH_ALWAYS_INLINE
154 static inline bool
155 _dispatch_object_is_sync_waiter(dispatch_object_t dou)
156 {
157 if (_dispatch_object_has_vtable(dou)) {
158 return false;
159 }
160 return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT);
161 }
162
163 DISPATCH_ALWAYS_INLINE
164 static inline bool
165 _dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou)
166 {
167 if (_dispatch_object_has_vtable(dou)) {
168 return false;
169 }
170 return ((dou._dc->dc_flags &
171 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_WAITER_BIT)) ==
172 (DISPATCH_OBJ_SYNC_WAITER_BIT));
173 }
174
175 DISPATCH_ALWAYS_INLINE
176 static inline _os_object_t
177 _os_object_retain_internal_n_inline(_os_object_t obj, int n)
178 {
179 int ref_cnt = _os_object_refcnt_add(obj, n);
180 if (unlikely(ref_cnt <= 0)) {
181 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
182 }
183 return obj;
184 }
185
186 DISPATCH_ALWAYS_INLINE
187 static inline void
188 _os_object_release_internal_n_no_dispose_inline(_os_object_t obj, int n)
189 {
190 int ref_cnt = _os_object_refcnt_sub(obj, n);
191 if (likely(ref_cnt >= 0)) {
192 return;
193 }
194 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
195 }
196
197 DISPATCH_ALWAYS_INLINE
198 static inline void
199 _os_object_release_internal_n_inline(_os_object_t obj, int n)
200 {
201 int ref_cnt = _os_object_refcnt_sub(obj, n);
202 if (likely(ref_cnt >= 0)) {
203 return;
204 }
205 if (unlikely(ref_cnt < -1)) {
206 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
207 }
208 #if DISPATCH_DEBUG
209 int xref_cnt = obj->os_obj_xref_cnt;
210 if (unlikely(xref_cnt >= 0)) {
211 DISPATCH_INTERNAL_CRASH(xref_cnt,
212 "Release while external references exist");
213 }
214 #endif
215 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
216 return _os_object_dispose(obj);
217 }
218
219 DISPATCH_ALWAYS_INLINE_NDEBUG
220 static inline void
221 _dispatch_retain(dispatch_object_t dou)
222 {
223 (void)_os_object_retain_internal_n_inline(dou._os_obj, 1);
224 }
225
226 DISPATCH_ALWAYS_INLINE_NDEBUG
227 static inline void
228 _dispatch_retain_2(dispatch_object_t dou)
229 {
230 (void)_os_object_retain_internal_n_inline(dou._os_obj, 2);
231 }
232
233 DISPATCH_ALWAYS_INLINE_NDEBUG
234 static inline void
235 _dispatch_retain_n(dispatch_object_t dou, int n)
236 {
237 (void)_os_object_retain_internal_n_inline(dou._os_obj, n);
238 }
239
240 DISPATCH_ALWAYS_INLINE_NDEBUG
241 static inline void
242 _dispatch_release(dispatch_object_t dou)
243 {
244 _os_object_release_internal_n_inline(dou._os_obj, 1);
245 }
246
247 DISPATCH_ALWAYS_INLINE_NDEBUG
248 static inline void
249 _dispatch_release_2(dispatch_object_t dou)
250 {
251 _os_object_release_internal_n_inline(dou._os_obj, 2);
252 }
253
254 DISPATCH_ALWAYS_INLINE_NDEBUG
255 static inline void
256 _dispatch_release_n(dispatch_object_t dou, int n)
257 {
258 _os_object_release_internal_n_inline(dou._os_obj, n);
259 }
260
261 DISPATCH_ALWAYS_INLINE_NDEBUG
262 static inline void
263 _dispatch_release_no_dispose(dispatch_object_t dou)
264 {
265 _os_object_release_internal_n_no_dispose_inline(dou._os_obj, 1);
266 }
267
268 DISPATCH_ALWAYS_INLINE_NDEBUG
269 static inline void
270 _dispatch_release_2_no_dispose(dispatch_object_t dou)
271 {
272 _os_object_release_internal_n_no_dispose_inline(dou._os_obj, 2);
273 }
274
275 DISPATCH_ALWAYS_INLINE_NDEBUG
276 static inline void
277 _dispatch_release_tailcall(dispatch_object_t dou)
278 {
279 _os_object_release_internal(dou._os_obj);
280 }
281
282 DISPATCH_ALWAYS_INLINE_NDEBUG
283 static inline void
284 _dispatch_release_2_tailcall(dispatch_object_t dou)
285 {
286 _os_object_release_internal_n(dou._os_obj, 2);
287 }
288
289 DISPATCH_ALWAYS_INLINE
290 static inline void
291 _dispatch_queue_retain_storage(dispatch_queue_t dq)
292 {
293 int ref_cnt = os_atomic_inc2o(dq, dq_sref_cnt, relaxed);
294 if (unlikely(ref_cnt <= 0)) {
295 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
296 }
297 }
298
299 DISPATCH_ALWAYS_INLINE
300 static inline void
301 _dispatch_queue_release_storage(dispatch_queue_t dq)
302 {
303 // this refcount only delays the _dispatch_object_dealloc() and there's no
304 // need for visibility wrt to the allocation, the internal refcount already
305 // gives us that, and the object becomes immutable after the last internal
306 // refcount release.
307 int ref_cnt = os_atomic_dec2o(dq, dq_sref_cnt, relaxed);
308 if (unlikely(ref_cnt >= 0)) {
309 return;
310 }
311 if (unlikely(ref_cnt < -1)) {
312 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
313 }
314 dq->dq_state = 0xdead000000000000;
315 _dispatch_object_dealloc(dq);
316 }
317
318 DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
319 static inline void
320 _dispatch_object_set_target_queue_inline(dispatch_object_t dou,
321 dispatch_queue_t tq)
322 {
323 _dispatch_retain(tq);
324 tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release);
325 if (tq) _dispatch_release(tq);
326 _dispatch_object_debug(dou._do, "%s", __func__);
327 }
328
329 #endif // DISPATCH_PURE_C
330 #pragma mark -
331 #pragma mark dispatch_thread
332 #if DISPATCH_PURE_C
333
334 DISPATCH_ALWAYS_INLINE
335 static inline dispatch_thread_context_t
336 _dispatch_thread_context_find(const void *key)
337 {
338 dispatch_thread_context_t dtc =
339 _dispatch_thread_getspecific(dispatch_context_key);
340 while (dtc) {
341 if (dtc->dtc_key == key) {
342 return dtc;
343 }
344 dtc = dtc->dtc_prev;
345 }
346 return NULL;
347 }
348
349 DISPATCH_ALWAYS_INLINE
350 static inline void
351 _dispatch_thread_context_push(dispatch_thread_context_t ctxt)
352 {
353 ctxt->dtc_prev = _dispatch_thread_getspecific(dispatch_context_key);
354 _dispatch_thread_setspecific(dispatch_context_key, ctxt);
355 }
356
357 DISPATCH_ALWAYS_INLINE
358 static inline void
359 _dispatch_thread_context_pop(dispatch_thread_context_t ctxt)
360 {
361 dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key) == ctxt);
362 _dispatch_thread_setspecific(dispatch_context_key, ctxt->dtc_prev);
363 }
364
365 typedef struct dispatch_thread_frame_iterator_s {
366 dispatch_queue_t dtfi_queue;
367 dispatch_thread_frame_t dtfi_frame;
368 } *dispatch_thread_frame_iterator_t;
369
370 DISPATCH_ALWAYS_INLINE
371 static inline void
372 _dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it)
373 {
374 _dispatch_thread_getspecific_pair(
375 dispatch_queue_key, (void **)&it->dtfi_queue,
376 dispatch_frame_key, (void **)&it->dtfi_frame);
377 }
378
379 DISPATCH_ALWAYS_INLINE
380 static inline void
381 _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it)
382 {
383 dispatch_thread_frame_t dtf = it->dtfi_frame;
384 dispatch_queue_t dq = it->dtfi_queue;
385
386 if (dtf) {
387 dispatch_queue_t tq = dq->do_targetq;
388 if (tq) {
389 // redirections, dispatch_sync and dispatch_trysync_f may skip
390 // frames, so we need to simulate seeing the missing links
391 it->dtfi_queue = tq;
392 if (dq == dtf->dtf_queue) {
393 it->dtfi_frame = dtf->dtf_prev;
394 }
395 } else {
396 it->dtfi_queue = dtf->dtf_queue;
397 it->dtfi_frame = dtf->dtf_prev;
398 }
399 } else if (dq) {
400 it->dtfi_queue = dq->do_targetq;
401 }
402 }
403
404 DISPATCH_ALWAYS_INLINE
405 static inline bool
406 _dispatch_thread_frame_find_queue(dispatch_queue_t dq)
407 {
408 struct dispatch_thread_frame_iterator_s it;
409
410 _dispatch_thread_frame_iterate_start(&it);
411 while (it.dtfi_queue) {
412 if (it.dtfi_queue == dq) {
413 return true;
414 }
415 _dispatch_thread_frame_iterate_next(&it);
416 }
417 return false;
418 }
419
420 DISPATCH_ALWAYS_INLINE
421 static inline dispatch_thread_frame_t
422 _dispatch_thread_frame_get_current(void)
423 {
424 return _dispatch_thread_getspecific(dispatch_frame_key);
425 }
426
427 DISPATCH_ALWAYS_INLINE
428 static inline void
429 _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf)
430 {
431 _dispatch_thread_getspecific_packed_pair(
432 dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue);
433 }
434
435 DISPATCH_ALWAYS_INLINE
436 static inline void
437 _dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq)
438 {
439 _dispatch_thread_frame_save_state(dtf);
440 _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
441 dispatch_frame_key, dtf);
442 }
443
444 DISPATCH_ALWAYS_INLINE
445 static inline void
446 _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf,
447 dispatch_queue_t dq, dispatch_thread_frame_t new_base)
448 {
449 _dispatch_thread_frame_save_state(dtf);
450 _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
451 dispatch_frame_key, new_base);
452 }
453
454 DISPATCH_ALWAYS_INLINE
455 static inline void
456 _dispatch_thread_frame_pop(dispatch_thread_frame_t dtf)
457 {
458 _dispatch_thread_setspecific_packed_pair(
459 dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue);
460 }
461
462 DISPATCH_ALWAYS_INLINE
463 static inline dispatch_queue_t
464 _dispatch_thread_frame_stash(dispatch_thread_frame_t dtf)
465 {
466 _dispatch_thread_getspecific_pair(
467 dispatch_queue_key, (void **)&dtf->dtf_queue,
468 dispatch_frame_key, (void **)&dtf->dtf_prev);
469 _dispatch_thread_frame_pop(dtf->dtf_prev);
470 return dtf->dtf_queue;
471 }
472
473 DISPATCH_ALWAYS_INLINE
474 static inline void
475 _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf)
476 {
477 _dispatch_thread_frame_pop(dtf);
478 }
479
480 DISPATCH_ALWAYS_INLINE
481 static inline int
482 _dispatch_wqthread_override_start_check_owner(mach_port_t thread,
483 dispatch_qos_t qos, mach_port_t *ulock_addr)
484 {
485 #if HAVE_PTHREAD_WORKQUEUE_QOS
486 if (!_dispatch_set_qos_class_enabled) return 0;
487 return _pthread_workqueue_override_start_direct_check_owner(thread,
488 _dispatch_qos_to_pp(qos), ulock_addr);
489 #else
490 (void)thread; (void)qos; (void)ulock_addr;
491 return 0;
492 #endif
493 }
494
495 DISPATCH_ALWAYS_INLINE
496 static inline void
497 _dispatch_wqthread_override_start(mach_port_t thread, dispatch_qos_t qos)
498 {
499 #if HAVE_PTHREAD_WORKQUEUE_QOS
500 if (!_dispatch_set_qos_class_enabled) return;
501 (void)_pthread_workqueue_override_start_direct(thread,
502 _dispatch_qos_to_pp(qos));
503 #else
504 (void)thread; (void)qos;
505 #endif
506 }
507
508 DISPATCH_ALWAYS_INLINE
509 static inline void
510 _dispatch_wqthread_override_reset(void)
511 {
512 #if HAVE_PTHREAD_WORKQUEUE_QOS
513 if (!_dispatch_set_qos_class_enabled) return;
514 (void)_pthread_workqueue_override_reset();
515 #endif
516 }
517
518 DISPATCH_ALWAYS_INLINE
519 static inline void
520 _dispatch_thread_override_start(mach_port_t thread, pthread_priority_t pp,
521 void *resource)
522 {
523 #if HAVE_PTHREAD_WORKQUEUE_QOS
524 if (!_dispatch_set_qos_class_enabled) return;
525 (void)_pthread_qos_override_start_direct(thread, pp, resource);
526 #else
527 (void)thread; (void)pp; (void)resource;
528 #endif
529 }
530
531 DISPATCH_ALWAYS_INLINE
532 static inline void
533 _dispatch_thread_override_end(mach_port_t thread, void *resource)
534 {
535 #if HAVE_PTHREAD_WORKQUEUE_QOS
536 if (!_dispatch_set_qos_class_enabled) return;
537 (void)_pthread_qos_override_end_direct(thread, resource);
538 #else
539 (void)thread; (void)resource;
540 #endif
541 }
542
543 #endif // DISPATCH_PURE_C
544 #pragma mark -
545 #pragma mark dispatch_queue_t state accessors
546 #if DISPATCH_PURE_C
547
548 DISPATCH_ALWAYS_INLINE
549 static inline dispatch_queue_flags_t
550 _dispatch_queue_atomic_flags(dispatch_queue_t dq)
551 {
552 return os_atomic_load2o(dq, dq_atomic_flags, relaxed);
553 }
554
555 DISPATCH_ALWAYS_INLINE
556 static inline dispatch_queue_flags_t
557 _dispatch_queue_atomic_flags_set(dispatch_queue_t dq,
558 dispatch_queue_flags_t bits)
559 {
560 return os_atomic_or2o(dq, dq_atomic_flags, bits, relaxed);
561 }
562
563 DISPATCH_ALWAYS_INLINE
564 static inline dispatch_queue_flags_t
565 _dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq,
566 dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits)
567 {
568 dispatch_queue_flags_t oflags, nflags;
569 os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, {
570 nflags = (oflags | add_bits) & ~clr_bits;
571 });
572 return oflags;
573 }
574
575 DISPATCH_ALWAYS_INLINE
576 static inline dispatch_queue_flags_t
577 _dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq,
578 dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits)
579 {
580 dispatch_queue_flags_t oflags, nflags;
581 os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, {
582 nflags = (oflags | add_bits) & ~clr_bits;
583 });
584 return nflags;
585 }
586
587 DISPATCH_ALWAYS_INLINE
588 static inline dispatch_queue_flags_t
589 _dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq,
590 dispatch_queue_flags_t bits)
591 {
592 return os_atomic_or_orig2o(dq, dq_atomic_flags, bits, relaxed);
593 }
594
595 DISPATCH_ALWAYS_INLINE
596 static inline dispatch_queue_flags_t
597 _dispatch_queue_atomic_flags_clear(dispatch_queue_t dq,
598 dispatch_queue_flags_t bits)
599 {
600 return os_atomic_and2o(dq, dq_atomic_flags, ~bits, relaxed);
601 }
602
603 DISPATCH_ALWAYS_INLINE
604 static inline bool
605 _dispatch_queue_is_thread_bound(dispatch_queue_t dq)
606 {
607 return _dispatch_queue_atomic_flags(dq) & DQF_THREAD_BOUND;
608 }
609
610 DISPATCH_ALWAYS_INLINE
611 static inline bool
612 _dispatch_queue_cannot_trysync(dispatch_queue_t dq)
613 {
614 return _dispatch_queue_atomic_flags(dq) & DQF_CANNOT_TRYSYNC;
615 }
616
617 DISPATCH_ALWAYS_INLINE
618 static inline bool
619 _dispatch_queue_label_needs_free(dispatch_queue_t dq)
620 {
621 return _dispatch_queue_atomic_flags(dq) & DQF_LABEL_NEEDS_FREE;
622 }
623
624 DISPATCH_ALWAYS_INLINE
625 static inline dispatch_invoke_flags_t
626 _dispatch_queue_autorelease_frequency(dispatch_queue_t dq)
627 {
628 const unsigned long factor =
629 DISPATCH_INVOKE_AUTORELEASE_ALWAYS / DQF_AUTORELEASE_ALWAYS;
630 dispatch_static_assert(factor > 0);
631
632 dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dq);
633
634 qaf &= _DQF_AUTORELEASE_MASK;
635 return (dispatch_invoke_flags_t)qaf * factor;
636 }
637
638 DISPATCH_ALWAYS_INLINE
639 static inline dispatch_invoke_flags_t
640 _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq,
641 dispatch_invoke_flags_t flags)
642 {
643 dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dq);
644
645 if (qaf) {
646 flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK;
647 flags |= qaf;
648 }
649 return flags;
650 }
651
652 DISPATCH_ALWAYS_INLINE
653 static inline bool
654 _dispatch_queue_is_legacy(dispatch_queue_t dq)
655 {
656 return _dispatch_queue_atomic_flags(dq) & DQF_LEGACY;
657 }
658
659 DISPATCH_ALWAYS_INLINE
660 static inline void
661 _dispatch_wlh_retain(dispatch_wlh_t wlh)
662 {
663 if (wlh && wlh != DISPATCH_WLH_ANON) {
664 _dispatch_queue_retain_storage((dispatch_queue_t)wlh);
665 }
666 }
667
668 DISPATCH_ALWAYS_INLINE
669 static inline void
670 _dispatch_wlh_release(dispatch_wlh_t wlh)
671 {
672 if (wlh && wlh != DISPATCH_WLH_ANON) {
673 _dispatch_queue_release_storage((dispatch_queue_t)wlh);
674 }
675 }
676
677 #define DISPATCH_WLH_STORAGE_REF 1ul
678
679 DISPATCH_ALWAYS_INLINE DISPATCH_PURE
680 static inline dispatch_wlh_t
681 _dispatch_get_wlh(void)
682 {
683 return _dispatch_thread_getspecific(dispatch_wlh_key);
684 }
685
686 DISPATCH_ALWAYS_INLINE DISPATCH_PURE
687 static inline dispatch_wlh_t
688 _dispatch_get_wlh_reference(void)
689 {
690 dispatch_wlh_t wlh = _dispatch_thread_getspecific(dispatch_wlh_key);
691 if (wlh != DISPATCH_WLH_ANON) {
692 wlh = (dispatch_wlh_t)((uintptr_t)wlh & ~DISPATCH_WLH_STORAGE_REF);
693 }
694 return wlh;
695 }
696
697 DISPATCH_ALWAYS_INLINE
698 static inline bool
699 _dispatch_adopt_wlh_anon_recurse(void)
700 {
701 dispatch_wlh_t cur_wlh = _dispatch_get_wlh_reference();
702 if (cur_wlh == DISPATCH_WLH_ANON) return false;
703 _dispatch_debug("wlh[anon]: set current (releasing %p)", cur_wlh);
704 _dispatch_wlh_release(cur_wlh);
705 _dispatch_thread_setspecific(dispatch_wlh_key, (void *)DISPATCH_WLH_ANON);
706 return true;
707 }
708
709 DISPATCH_ALWAYS_INLINE
710 static inline void
711 _dispatch_adopt_wlh_anon(void)
712 {
713 if (unlikely(!_dispatch_adopt_wlh_anon_recurse())) {
714 DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON");
715 }
716 }
717
718 DISPATCH_ALWAYS_INLINE
719 static inline void
720 _dispatch_adopt_wlh(dispatch_wlh_t wlh)
721 {
722 dispatch_wlh_t cur_wlh = _dispatch_get_wlh_reference();
723 _dispatch_debug("wlh[%p]: adopt current (releasing %p)", wlh, cur_wlh);
724 if (cur_wlh == DISPATCH_WLH_ANON) {
725 DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON");
726 }
727 if (cur_wlh != wlh) {
728 dispatch_assert(wlh);
729 _dispatch_wlh_release(cur_wlh);
730 _dispatch_wlh_retain(wlh);
731 }
732 _dispatch_thread_setspecific(dispatch_wlh_key, (void *)wlh);
733 }
734
735 DISPATCH_ALWAYS_INLINE
736 static inline void
737 _dispatch_preserve_wlh_storage_reference(dispatch_wlh_t wlh)
738 {
739 dispatch_assert(wlh != DISPATCH_WLH_ANON);
740 dispatch_assert(wlh == _dispatch_get_wlh());
741 _dispatch_thread_setspecific(dispatch_wlh_key,
742 (void *)((uintptr_t)wlh | DISPATCH_WLH_STORAGE_REF));
743 }
744
745 DISPATCH_ALWAYS_INLINE
746 static inline void
747 _dispatch_reset_wlh(void)
748 {
749 dispatch_assert(_dispatch_get_wlh() == DISPATCH_WLH_ANON);
750 _dispatch_debug("wlh[anon]: clear current");
751 _dispatch_thread_setspecific(dispatch_wlh_key, NULL);
752 _dispatch_clear_return_to_kernel();
753 }
754
755 DISPATCH_ALWAYS_INLINE
756 static inline bool
757 _dispatch_wlh_should_poll_unote(dispatch_unote_t du)
758 {
759 if (likely(_dispatch_needs_to_return_to_kernel())) {
760 dispatch_wlh_t wlh = _dispatch_get_wlh();
761 return wlh != DISPATCH_WLH_ANON && du._du->du_wlh == wlh;
762 }
763 return false;
764 }
765
766 #endif // DISPATCH_PURE_C
767 #ifndef __cplusplus
768
769 DISPATCH_ALWAYS_INLINE
770 static inline uint32_t
771 _dq_state_suspend_cnt(uint64_t dq_state)
772 {
773 return (uint32_t)(dq_state / DISPATCH_QUEUE_SUSPEND_INTERVAL);
774 }
775
776 DISPATCH_ALWAYS_INLINE
777 static inline bool
778 _dq_state_has_side_suspend_cnt(uint64_t dq_state)
779 {
780 return dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT;
781 }
782
783 DISPATCH_ALWAYS_INLINE
784 static inline int32_t
785 _dq_state_extract_width_bits(uint64_t dq_state)
786 {
787 dq_state &= DISPATCH_QUEUE_WIDTH_MASK;
788 return (int32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT);
789 }
790
791 DISPATCH_ALWAYS_INLINE
792 static inline int32_t
793 _dq_state_available_width(uint64_t dq_state)
794 {
795 int32_t full = DISPATCH_QUEUE_WIDTH_FULL;
796 if (likely(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) {
797 return full - _dq_state_extract_width_bits(dq_state);
798 }
799 return 0;
800 }
801
802 DISPATCH_ALWAYS_INLINE
803 static inline int32_t
804 _dq_state_used_width(uint64_t dq_state, uint16_t dq_width)
805 {
806 int32_t full = DISPATCH_QUEUE_WIDTH_FULL;
807 int32_t width = _dq_state_extract_width_bits(dq_state);
808
809 if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) {
810 // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width
811 // is pre-reservation that we want to ignore
812 return width - (full - dq_width) - (dq_width - 1);
813 }
814 return width - (full - dq_width);
815 }
816
817 DISPATCH_ALWAYS_INLINE
818 static inline bool
819 _dq_state_is_suspended(uint64_t dq_state)
820 {
821 return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION;
822 }
823 #define DISPATCH_QUEUE_IS_SUSPENDED(x) \
824 _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed))
825
826 DISPATCH_ALWAYS_INLINE
827 static inline bool
828 _dq_state_is_inactive(uint64_t dq_state)
829 {
830 return dq_state & DISPATCH_QUEUE_INACTIVE;
831 }
832
833 DISPATCH_ALWAYS_INLINE
834 static inline bool
835 _dq_state_needs_activation(uint64_t dq_state)
836 {
837 return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION;
838 }
839
840 DISPATCH_ALWAYS_INLINE
841 static inline bool
842 _dq_state_is_in_barrier(uint64_t dq_state)
843 {
844 return dq_state & DISPATCH_QUEUE_IN_BARRIER;
845 }
846
847 DISPATCH_ALWAYS_INLINE
848 static inline bool
849 _dq_state_has_available_width(uint64_t dq_state)
850 {
851 return !(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT);
852 }
853
854 DISPATCH_ALWAYS_INLINE
855 static inline bool
856 _dq_state_has_pending_barrier(uint64_t dq_state)
857 {
858 return dq_state & DISPATCH_QUEUE_PENDING_BARRIER;
859 }
860
861 DISPATCH_ALWAYS_INLINE
862 static inline bool
863 _dq_state_is_dirty(uint64_t dq_state)
864 {
865 return dq_state & DISPATCH_QUEUE_DIRTY;
866 }
867
868 DISPATCH_ALWAYS_INLINE
869 static inline bool
870 _dq_state_is_base_wlh(uint64_t dq_state)
871 {
872 return dq_state & DISPATCH_QUEUE_ROLE_BASE_WLH;
873 }
874
875 DISPATCH_ALWAYS_INLINE
876 static inline bool
877 _dq_state_is_base_anon(uint64_t dq_state)
878 {
879 return dq_state & DISPATCH_QUEUE_ROLE_BASE_ANON;
880 }
881
882 DISPATCH_ALWAYS_INLINE
883 static inline bool
884 _dq_state_is_inner_queue(uint64_t dq_state)
885 {
886 return (dq_state & DISPATCH_QUEUE_ROLE_MASK) == DISPATCH_QUEUE_ROLE_INNER;
887 }
888
889 DISPATCH_ALWAYS_INLINE
890 static inline bool
891 _dq_state_is_enqueued(uint64_t dq_state)
892 {
893 return dq_state & (DISPATCH_QUEUE_ENQUEUED|DISPATCH_QUEUE_ENQUEUED_ON_MGR);
894 }
895
896 DISPATCH_ALWAYS_INLINE
897 static inline bool
898 _dq_state_is_enqueued_on_target(uint64_t dq_state)
899 {
900 return dq_state & DISPATCH_QUEUE_ENQUEUED;
901 }
902
903 DISPATCH_ALWAYS_INLINE
904 static inline bool
905 _dq_state_is_enqueued_on_manager(uint64_t dq_state)
906 {
907 return dq_state & DISPATCH_QUEUE_ENQUEUED_ON_MGR;
908 }
909
910 DISPATCH_ALWAYS_INLINE
911 static inline bool
912 _dq_state_in_sync_transfer(uint64_t dq_state)
913 {
914 return dq_state & DISPATCH_QUEUE_SYNC_TRANSFER;
915 }
916
917 DISPATCH_ALWAYS_INLINE
918 static inline bool
919 _dq_state_received_override(uint64_t dq_state)
920 {
921 return _dq_state_is_base_anon(dq_state) &&
922 (dq_state & DISPATCH_QUEUE_RECEIVED_OVERRIDE);
923 }
924
925 DISPATCH_ALWAYS_INLINE
926 static inline bool
927 _dq_state_received_sync_wait(uint64_t dq_state)
928 {
929 return _dq_state_is_base_wlh(dq_state) &&
930 (dq_state & DISPATCH_QUEUE_RECEIVED_SYNC_WAIT);
931 }
932
933 DISPATCH_ALWAYS_INLINE
934 static inline dispatch_qos_t
935 _dq_state_max_qos(uint64_t dq_state)
936 {
937 dq_state &= DISPATCH_QUEUE_MAX_QOS_MASK;
938 return (dispatch_qos_t)(dq_state >> DISPATCH_QUEUE_MAX_QOS_SHIFT);
939 }
940
941 DISPATCH_ALWAYS_INLINE
942 static inline uint64_t
943 _dq_state_from_qos(dispatch_qos_t qos)
944 {
945 return (uint64_t)(qos) << DISPATCH_QUEUE_MAX_QOS_SHIFT;
946 }
947
948 DISPATCH_ALWAYS_INLINE
949 static inline uint64_t
950 _dq_state_merge_qos(uint64_t dq_state, dispatch_qos_t qos)
951 {
952 uint64_t qos_bits = _dq_state_from_qos(qos);
953 if ((dq_state & DISPATCH_QUEUE_MAX_QOS_MASK) < qos_bits) {
954 dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
955 dq_state |= qos_bits;
956 if (unlikely(_dq_state_is_base_anon(dq_state))) {
957 dq_state |= DISPATCH_QUEUE_RECEIVED_OVERRIDE;
958 }
959 }
960 return dq_state;
961 }
962
963 DISPATCH_ALWAYS_INLINE
964 static inline dispatch_tid
965 _dq_state_drain_owner(uint64_t dq_state)
966 {
967 return _dispatch_lock_owner((dispatch_lock)dq_state);
968 }
969 #define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
970 _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
971
972 DISPATCH_ALWAYS_INLINE
973 static inline bool
974 _dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid)
975 {
976 return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid);
977 }
978
979 DISPATCH_ALWAYS_INLINE
980 static inline bool
981 _dq_state_drain_locked_by_self(uint64_t dq_state)
982 {
983 return _dispatch_lock_is_locked_by_self((dispatch_lock)dq_state);
984 }
985
986 DISPATCH_ALWAYS_INLINE
987 static inline bool
988 _dq_state_drain_locked(uint64_t dq_state)
989 {
990 return _dispatch_lock_is_locked((dispatch_lock)dq_state);
991 }
992
993 DISPATCH_ALWAYS_INLINE
994 static inline bool
995 _dq_state_is_sync_runnable(uint64_t dq_state)
996 {
997 return dq_state < DISPATCH_QUEUE_IN_BARRIER;
998 }
999
1000 DISPATCH_ALWAYS_INLINE
1001 static inline bool
1002 _dq_state_is_runnable(uint64_t dq_state)
1003 {
1004 return dq_state < DISPATCH_QUEUE_WIDTH_FULL_BIT;
1005 }
1006
1007 DISPATCH_ALWAYS_INLINE
1008 static inline bool
1009 _dq_state_should_override(uint64_t dq_state)
1010 {
1011 if (_dq_state_is_suspended(dq_state) ||
1012 _dq_state_is_enqueued_on_manager(dq_state)) {
1013 return false;
1014 }
1015 if (_dq_state_is_enqueued_on_target(dq_state)) {
1016 return true;
1017 }
1018 if (_dq_state_is_base_wlh(dq_state)) {
1019 return false;
1020 }
1021 return _dq_state_drain_locked(dq_state);
1022 }
1023
1024
1025 #endif // __cplusplus
1026 #pragma mark -
1027 #pragma mark dispatch_queue_t state machine
1028
1029 static inline pthread_priority_t _dispatch_get_priority(void);
1030 static inline dispatch_priority_t _dispatch_get_basepri(void);
1031 static inline dispatch_qos_t _dispatch_get_basepri_override_qos_floor(void);
1032 static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos);
1033 static inline void _dispatch_reset_basepri(dispatch_priority_t dbp);
1034 static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp);
1035 static inline bool _dispatch_queue_need_override_retain(
1036 dispatch_queue_class_t dqu, dispatch_qos_t qos);
1037
1038 #if DISPATCH_PURE_C
1039
1040 // Note to later developers: ensure that any initialization changes are
1041 // made for statically allocated queues (i.e. _dispatch_main_q).
1042 static inline void
1043 _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf,
1044 uint16_t width, uint64_t initial_state_bits)
1045 {
1046 uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
1047
1048 dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
1049 DISPATCH_QUEUE_INACTIVE)) == 0);
1050
1051 if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
1052 dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
1053 dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_queue_resume
1054 }
1055
1056 dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK);
1057 dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
1058 dqf |= DQF_WIDTH(width);
1059 os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
1060 dq->dq_state = dq_state;
1061 dq->dq_serialnum =
1062 os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
1063 }
1064
1065 /* Used by:
1066 * - _dispatch_queue_set_target_queue
1067 * - changing dispatch source handlers
1068 *
1069 * Tries to prevent concurrent wakeup of an inactive queue by suspending it.
1070 */
1071 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1072 static inline bool
1073 _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq)
1074 {
1075 uint64_t old_state, new_state;
1076
1077 (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
1078 if (unlikely(!_dq_state_is_inactive(old_state))) {
1079 os_atomic_rmw_loop_give_up(return false);
1080 }
1081 new_state = old_state + DISPATCH_QUEUE_SUSPEND_INTERVAL;
1082 });
1083 if (unlikely(!_dq_state_is_suspended(old_state) ||
1084 _dq_state_has_side_suspend_cnt(old_state))) {
1085 // Crashing here means that 128+ dispatch_suspend() calls have been
1086 // made on an inactive object and then dispatch_set_target_queue() or
1087 // dispatch_set_*_handler() has been called.
1088 //
1089 // We don't want to handle the side suspend count in a codepath that
1090 // needs to be fast.
1091 DISPATCH_CLIENT_CRASH(dq, "Too many calls to dispatch_suspend() "
1092 "prior to calling dispatch_set_target_queue() "
1093 "or dispatch_set_*_handler()");
1094 }
1095 return true;
1096 }
1097
1098 DISPATCH_ALWAYS_INLINE
1099 static inline bool
1100 _dq_state_needs_lock_override(uint64_t dq_state, dispatch_qos_t qos)
1101 {
1102 return _dq_state_is_base_anon(dq_state) &&
1103 qos < _dq_state_max_qos(dq_state);
1104 }
1105
1106 DISPATCH_ALWAYS_INLINE
1107 static inline dispatch_qos_t
1108 _dispatch_queue_override_self(uint64_t dq_state)
1109 {
1110 dispatch_qos_t qos = _dq_state_max_qos(dq_state);
1111 _dispatch_wqthread_override_start(_dispatch_tid_self(), qos);
1112 // ensure that the root queue sees
1113 // that this thread was overridden.
1114 _dispatch_set_basepri_override_qos(qos);
1115 return qos;
1116 }
1117
1118 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1119 static inline uint64_t
1120 _dispatch_queue_drain_try_lock(dispatch_queue_t dq,
1121 dispatch_invoke_flags_t flags)
1122 {
1123 uint64_t pending_barrier_width =
1124 (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
1125 uint64_t set_owner_and_set_full_width =
1126 _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT;
1127 uint64_t lock_fail_mask, old_state, new_state, dequeue_mask;
1128
1129 // same as !_dq_state_is_runnable()
1130 lock_fail_mask = ~(DISPATCH_QUEUE_WIDTH_FULL_BIT - 1);
1131 // same as _dq_state_drain_locked()
1132 lock_fail_mask |= DISPATCH_QUEUE_DRAIN_OWNER_MASK;
1133
1134 if (flags & DISPATCH_INVOKE_STEALING) {
1135 lock_fail_mask |= DISPATCH_QUEUE_ENQUEUED_ON_MGR;
1136 dequeue_mask = 0;
1137 } else if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) {
1138 dequeue_mask = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
1139 } else {
1140 lock_fail_mask |= DISPATCH_QUEUE_ENQUEUED_ON_MGR;
1141 dequeue_mask = DISPATCH_QUEUE_ENQUEUED;
1142 }
1143 dispatch_assert(!(flags & DISPATCH_INVOKE_WLH));
1144
1145 dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor();
1146 retry:
1147 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
1148 new_state = old_state;
1149 if (likely(!(old_state & lock_fail_mask))) {
1150 if (unlikely(_dq_state_needs_lock_override(old_state, oq_floor))) {
1151 os_atomic_rmw_loop_give_up({
1152 oq_floor = _dispatch_queue_override_self(old_state);
1153 goto retry;
1154 });
1155 }
1156 //
1157 // Only keep the HAS_WAITER, MAX_QOS and ENQUEUED bits
1158 // In particular acquiring the drain lock clears the DIRTY and
1159 // RECEIVED_OVERRIDE bits.
1160 //
1161 new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
1162 new_state |= set_owner_and_set_full_width;
1163 if (_dq_state_has_pending_barrier(old_state) ||
1164 old_state + pending_barrier_width <
1165 DISPATCH_QUEUE_WIDTH_FULL_BIT) {
1166 new_state |= DISPATCH_QUEUE_IN_BARRIER;
1167 }
1168 } else if (dequeue_mask) {
1169 // dequeue_mask is in a register, xor yields better assembly
1170 new_state ^= dequeue_mask;
1171 } else {
1172 os_atomic_rmw_loop_give_up(break);
1173 }
1174 });
1175
1176 dispatch_assert((old_state & dequeue_mask) == dequeue_mask);
1177 if (likely(!(old_state & lock_fail_mask))) {
1178 new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT |
1179 dequeue_mask;
1180 old_state &= DISPATCH_QUEUE_WIDTH_MASK;
1181 return new_state - old_state;
1182 }
1183 return 0;
1184 }
1185
1186 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1187 static inline bool
1188 _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state)
1189 {
1190 uint64_t old_state, new_state;
1191 uint64_t lock_bits = _dispatch_lock_value_for_self() |
1192 DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
1193
1194 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
1195 new_state = old_state;
1196 if (unlikely(_dq_state_is_suspended(old_state))) {
1197 new_state &= ~DISPATCH_QUEUE_ENQUEUED;
1198 } else if (unlikely(_dq_state_drain_locked(old_state))) {
1199 os_atomic_rmw_loop_give_up(break);
1200 } else {
1201 new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
1202 new_state |= lock_bits;
1203 }
1204 });
1205 if (unlikely(!_dq_state_is_base_wlh(old_state) ||
1206 !_dq_state_is_enqueued_on_target(old_state) ||
1207 _dq_state_is_enqueued_on_manager(old_state))) {
1208 #if !__LP64__
1209 old_state >>= 32;
1210 #endif
1211 DISPATCH_INTERNAL_CRASH(old_state, "Invalid wlh state");
1212 }
1213
1214 if (dq_state) *dq_state = new_state;
1215 return !_dq_state_is_suspended(old_state) &&
1216 !_dq_state_drain_locked(old_state);
1217 }
1218
1219 DISPATCH_ALWAYS_INLINE
1220 static inline void
1221 _dispatch_queue_mgr_lock(dispatch_queue_t dq)
1222 {
1223 uint64_t old_state, new_state, set_owner_and_set_full_width =
1224 _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
1225
1226 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
1227 new_state = old_state;
1228 if (unlikely(!_dq_state_is_runnable(old_state) ||
1229 _dq_state_drain_locked(old_state))) {
1230 DISPATCH_INTERNAL_CRASH((uintptr_t)old_state,
1231 "Locking the manager should not fail");
1232 }
1233 new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
1234 new_state |= set_owner_and_set_full_width;
1235 });
1236 }
1237
1238 DISPATCH_ALWAYS_INLINE
1239 static inline bool
1240 _dispatch_queue_mgr_unlock(dispatch_queue_t dq)
1241 {
1242 uint64_t old_state, new_state;
1243 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
1244 new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
1245 new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
1246 new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
1247 });
1248 return _dq_state_is_dirty(old_state);
1249 }
1250
1251 /* Used by _dispatch_barrier_{try,}sync
1252 *
1253 * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
1254 * simple cmpxchg which is significantly faster on Intel, and makes a
1255 * significant difference on the uncontended codepath.
1256 *
1257 * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
1258 *
1259 * Initial state must be `completely idle`
1260 * Final state forces { ib:1, qf:1, w:0 }
1261 */
1262 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1263 static inline bool
1264 _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_queue_t dq,
1265 uint32_t tid, uint64_t suspend_count)
1266 {
1267 uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
1268 uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
1269 _dispatch_lock_value_from_tid(tid) |
1270 (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
1271 uint64_t old_state, new_state;
1272
1273 return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
1274 uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
1275 if (old_state != (init | role)) {
1276 os_atomic_rmw_loop_give_up(break);
1277 }
1278 new_state = value | role;
1279 });
1280 }
1281
1282 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1283 static inline bool
1284 _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq, uint32_t tid)
1285 {
1286 return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, 0);
1287 }
1288
1289 /* Used by _dispatch_sync for root queues and some drain codepaths
1290 *
1291 * Root queues have no strict orderning and dispatch_sync() always goes through.
1292 * Drain is the sole setter of `dl` hence can use this non failing version of
1293 * _dispatch_queue_try_acquire_sync().
1294 *
1295 * Final state: { w += 1 }
1296 */
1297 DISPATCH_ALWAYS_INLINE
1298 static inline void
1299 _dispatch_queue_reserve_sync_width(dispatch_queue_t dq)
1300 {
1301 (void)os_atomic_add2o(dq, dq_state,
1302 DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
1303 }
1304
1305 /* Used by _dispatch_sync on non-serial queues
1306 *
1307 * Initial state must be { sc:0, ib:0, pb:0, d:0 }
1308 * Final state: { w += 1 }
1309 */
1310 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1311 static inline bool
1312 _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq)
1313 {
1314 uint64_t old_state, new_state;
1315
1316 // <rdar://problem/24738102&24743140> reserving non barrier width
1317 // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width
1318 // equivalent), so we have to check that this thread hasn't enqueued
1319 // anything ahead of this call or we can break ordering
1320 if (unlikely(dq->dq_items_tail)) {
1321 return false;
1322 }
1323
1324 return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
1325 if (unlikely(!_dq_state_is_sync_runnable(old_state)) ||
1326 _dq_state_is_dirty(old_state) ||
1327 _dq_state_has_pending_barrier(old_state)) {
1328 os_atomic_rmw_loop_give_up(return false);
1329 }
1330 new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
1331 });
1332 }
1333
1334 /* Used by _dispatch_apply_redirect
1335 *
1336 * Try to acquire at most da_width and returns what could be acquired,
1337 * possibly 0
1338 */
1339 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1340 static inline int32_t
1341 _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, int32_t da_width)
1342 {
1343 uint64_t old_state, new_state;
1344 int32_t width;
1345
1346 (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
1347 width = (int32_t)_dq_state_available_width(old_state);
1348 if (unlikely(!width)) {
1349 os_atomic_rmw_loop_give_up(return 0);
1350 }
1351 if (width > da_width) {
1352 width = da_width;
1353 }
1354 new_state = old_state + (uint64_t)width * DISPATCH_QUEUE_WIDTH_INTERVAL;
1355 });
1356 return width;
1357 }
1358
1359 /* Used by _dispatch_apply_redirect
1360 *
1361 * Release width acquired by _dispatch_queue_try_acquire_width
1362 */
1363 DISPATCH_ALWAYS_INLINE
1364 static inline void
1365 _dispatch_queue_relinquish_width(dispatch_queue_t dq, int32_t da_width)
1366 {
1367 (void)os_atomic_sub2o(dq, dq_state,
1368 (uint64_t)da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
1369 }
1370
1371 /* Used by target-queue recursing code
1372 *
1373 * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 }
1374 * Final state: { w += 1 }
1375 */
1376 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1377 static inline bool
1378 _dispatch_queue_try_acquire_async(dispatch_queue_t dq)
1379 {
1380 uint64_t old_state, new_state;
1381
1382 return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
1383 if (unlikely(!_dq_state_is_runnable(old_state) ||
1384 _dq_state_is_dirty(old_state) ||
1385 _dq_state_has_pending_barrier(old_state))) {
1386 os_atomic_rmw_loop_give_up(return false);
1387 }
1388 new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
1389 });
1390 }
1391
1392 /* Used by concurrent drain
1393 *
1394 * Either acquires the full barrier width, in which case the Final state is:
1395 * { ib:1 qf:1 pb:0 d:0 }
1396 * Or if there isn't enough width prepare the queue with the PENDING_BARRIER bit
1397 * { ib:0 pb:1 d:0}
1398 *
1399 * This always clears the dirty bit as we know for sure we shouldn't reevaluate
1400 * the state machine here
1401 */
1402 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1403 static inline bool
1404 _dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq, uint64_t owned)
1405 {
1406 uint64_t old_state, new_state;
1407 uint64_t pending_barrier_width = DISPATCH_QUEUE_PENDING_BARRIER +
1408 (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
1409
1410 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
1411 new_state = old_state - owned;
1412 if (likely(!_dq_state_has_pending_barrier(old_state))) {
1413 new_state += pending_barrier_width;
1414 }
1415 if (likely(_dq_state_is_runnable(new_state))) {
1416 new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
1417 new_state += DISPATCH_QUEUE_IN_BARRIER;
1418 new_state -= DISPATCH_QUEUE_PENDING_BARRIER;
1419 }
1420 new_state &= ~DISPATCH_QUEUE_DIRTY;
1421 });
1422 return new_state & DISPATCH_QUEUE_IN_BARRIER;
1423 }
1424
1425 /* Used at the end of Drainers
1426 *
1427 * This adjusts the `owned` width when the next continuation is already known
1428 * to account for its barrierness.
1429 */
1430 DISPATCH_ALWAYS_INLINE
1431 static inline uint64_t
1432 _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned,
1433 struct dispatch_object_s *next_dc)
1434 {
1435 uint64_t reservation;
1436
1437 if (unlikely(dq->dq_width > 1)) {
1438 if (next_dc && _dispatch_object_is_barrier(next_dc)) {
1439 reservation = DISPATCH_QUEUE_PENDING_BARRIER;
1440 reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
1441 owned -= reservation;
1442 }
1443 }
1444 return owned;
1445 }
1446
1447 /* Used at the end of Drainers
1448 *
1449 * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended).
1450 * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used
1451 * as a signal to renew the drain lock instead of releasing it.
1452 *
1453 * Successful unlock forces { dl:0, d:!done, qo:0 } and gives back `owned`
1454 */
1455 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1456 static inline bool
1457 _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done)
1458 {
1459 uint64_t old_state, new_state;
1460
1461 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
1462 new_state = old_state - owned;
1463 new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
1464 if (unlikely(_dq_state_is_suspended(old_state))) {
1465 // nothing to do
1466 } else if (unlikely(_dq_state_is_dirty(old_state))) {
1467 os_atomic_rmw_loop_give_up({
1468 // just renew the drain lock with an acquire barrier, to see
1469 // what the enqueuer that set DIRTY has done.
1470 // the xor generates better assembly as DISPATCH_QUEUE_DIRTY
1471 // is already in a register
1472 os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
1473 return false;
1474 });
1475 } else if (likely(done)) {
1476 new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
1477 } else {
1478 new_state |= DISPATCH_QUEUE_DIRTY;
1479 }
1480 });
1481
1482 if (_dq_state_received_override(old_state)) {
1483 // Ensure that the root queue sees that this thread was overridden.
1484 _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
1485 }
1486 return true;
1487 }
1488
1489 #pragma mark -
1490 #pragma mark os_mpsc_queue
1491
1492 // type_t * {volatile,const,_Atomic,...} -> type_t *
1493 // type_t[] -> type_t *
1494 #define os_unqualified_pointer_type(expr) \
1495 typeof(typeof(*(expr)) *)
1496
1497 #define os_mpsc_node_type(q, _ns) \
1498 os_unqualified_pointer_type((q)->_ns##_head)
1499
1500 //
1501 // Multi Producer calls, can be used safely concurrently
1502 //
1503
1504 // Returns true when the queue was empty and the head must be set
1505 #define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \
1506 os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
1507 _tail->_o_next = NULL; \
1508 _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
1509 if (likely(_prev)) { \
1510 os_atomic_store2o(_prev, _o_next, _head, relaxed); \
1511 } \
1512 (_prev == NULL); \
1513 })
1514
1515 // Returns true when the queue was empty and the head must be set
1516 #define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \
1517 os_mpsc_node_type(q, _ns) _o = (o); \
1518 os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \
1519 })
1520
1521 #define os_mpsc_push_update_head(q, _ns, o) ({ \
1522 os_atomic_store2o((q), _ns##_head, o, relaxed); \
1523 })
1524
1525 //
1526 // Single Consumer calls, can NOT be used safely concurrently
1527 //
1528
1529 #define os_mpsc_get_head(q, _ns) \
1530 _dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency))
1531
1532 #define os_mpsc_get_next(_n, _o_next) \
1533 _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency))
1534
1535 #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \
1536 typeof(q) _q = (q); \
1537 os_mpsc_node_type(_q, _ns) _head = (head), _n; \
1538 _n = os_atomic_load2o(_head, _o_next, dependency); \
1539 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1540 /* 22708742: set tail to NULL with release, so that NULL write */ \
1541 /* to head above doesn't clobber head from concurrent enqueuer */ \
1542 if (unlikely(!_n && \
1543 !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \
1544 _n = os_mpsc_get_next(_head, _o_next); \
1545 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1546 } \
1547 _n; \
1548 })
1549
1550 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \
1551 typeof(q) _q = (q); \
1552 os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
1553 if (unlikely(!_n && \
1554 !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \
1555 _n = os_mpsc_get_head(q, _ns); \
1556 os_atomic_store2o(_head, _o_next, _n, relaxed); \
1557 } \
1558 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1559 })
1560
1561 #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \
1562 typeof(q) _q = (q); \
1563 os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \
1564 os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
1565 /* 22708742: set tail to NULL with release, so that NULL write */ \
1566 /* to head above doesn't clobber head from concurrent enqueuer */ \
1567 *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \
1568 _head; \
1569 })
1570
1571 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
1572 os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
1573 if (_head != (tail)) { \
1574 _n = os_mpsc_get_next(_head, _o_next); \
1575 }; \
1576 _n; })
1577
1578 #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \
1579 typeof(q) _q = (q); \
1580 os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
1581 os_atomic_store2o(_tail, _o_next, NULL, relaxed); \
1582 if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \
1583 _n = os_mpsc_get_head(q, _ns); \
1584 os_atomic_store2o(_tail, _o_next, _n, relaxed); \
1585 } \
1586 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1587 })
1588
1589 #pragma mark -
1590 #pragma mark dispatch_queue_t tq lock
1591
1592 DISPATCH_ALWAYS_INLINE
1593 static inline bool
1594 _dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos)
1595 {
1596 dispatch_tid owner;
1597 if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) {
1598 return true;
1599 }
1600 _dispatch_wqthread_override_start_check_owner(owner, qos,
1601 &dq->dq_sidelock.dul_lock);
1602 return false;
1603 }
1604
1605 DISPATCH_ALWAYS_INLINE
1606 static inline void
1607 _dispatch_queue_sidelock_lock(dispatch_queue_t dq)
1608 {
1609 return _dispatch_unfair_lock_lock(&dq->dq_sidelock);
1610 }
1611
1612 DISPATCH_ALWAYS_INLINE
1613 static inline bool
1614 _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq)
1615 {
1616 if (_dispatch_unfair_lock_tryunlock(&dq->dq_sidelock)) {
1617 return true;
1618 }
1619 // Ensure that the root queue sees that this thread was overridden.
1620 // Since we don't know which override QoS was used, use MAINTENANCE
1621 // as a marker for _dispatch_reset_basepri_override()
1622 _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE);
1623 return false;
1624 }
1625
1626 DISPATCH_ALWAYS_INLINE
1627 static inline void
1628 _dispatch_queue_sidelock_unlock(dispatch_queue_t dq)
1629 {
1630 if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) {
1631 // Ensure that the root queue sees that this thread was overridden.
1632 // Since we don't know which override QoS was used, use MAINTENANCE
1633 // as a marker for _dispatch_reset_basepri_override()
1634 _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE);
1635 }
1636 }
1637
1638 #pragma mark -
1639 #pragma mark dispatch_queue_t misc
1640
1641 DISPATCH_ALWAYS_INLINE
1642 static inline dispatch_queue_t
1643 _dispatch_queue_get_current(void)
1644 {
1645 return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
1646 }
1647
1648 DISPATCH_ALWAYS_INLINE
1649 static inline void
1650 _dispatch_queue_set_current(dispatch_queue_t dq)
1651 {
1652 _dispatch_thread_setspecific(dispatch_queue_key, dq);
1653 }
1654
1655 DISPATCH_ALWAYS_INLINE
1656 static inline struct dispatch_object_s*
1657 _dispatch_queue_head(dispatch_queue_t dq)
1658 {
1659 return os_mpsc_get_head(dq, dq_items);
1660 }
1661
1662 DISPATCH_ALWAYS_INLINE
1663 static inline struct dispatch_object_s*
1664 _dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc)
1665 {
1666 return os_mpsc_pop_head(dq, dq_items, dc, do_next);
1667 }
1668
1669 DISPATCH_ALWAYS_INLINE
1670 static inline bool
1671 _dispatch_queue_push_update_tail(dispatch_queue_t dq,
1672 struct dispatch_object_s *tail)
1673 {
1674 // if we crash here with a value less than 0x1000, then we are
1675 // at a known bug in client code. for example, see
1676 // _dispatch_queue_dispose or _dispatch_atfork_child
1677 return os_mpsc_push_update_tail(dq, dq_items, tail, do_next);
1678 }
1679
1680 DISPATCH_ALWAYS_INLINE
1681 static inline bool
1682 _dispatch_queue_push_update_tail_list(dispatch_queue_t dq,
1683 struct dispatch_object_s *head, struct dispatch_object_s *tail)
1684 {
1685 // if we crash here with a value less than 0x1000, then we are
1686 // at a known bug in client code. for example, see
1687 // _dispatch_queue_dispose or _dispatch_atfork_child
1688 return os_mpsc_push_update_tail_list(dq, dq_items, head, tail, do_next);
1689 }
1690
1691 DISPATCH_ALWAYS_INLINE
1692 static inline void
1693 _dispatch_queue_push_update_head(dispatch_queue_t dq,
1694 struct dispatch_object_s *head)
1695 {
1696 os_mpsc_push_update_head(dq, dq_items, head);
1697 }
1698
1699 DISPATCH_ALWAYS_INLINE
1700 static inline void
1701 _dispatch_root_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _head,
1702 dispatch_object_t _tail, int n)
1703 {
1704 struct dispatch_object_s *head = _head._do, *tail = _tail._do;
1705 if (unlikely(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
1706 _dispatch_queue_push_update_head(dq, head);
1707 return _dispatch_global_queue_poke(dq, n, 0);
1708 }
1709 }
1710
1711 DISPATCH_ALWAYS_INLINE
1712 static inline void
1713 _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
1714 dispatch_qos_t qos)
1715 {
1716 struct dispatch_object_s *tail = _tail._do;
1717 dispatch_wakeup_flags_t flags = 0;
1718 // If we are going to call dx_wakeup(), the queue must be retained before
1719 // the item we're pushing can be dequeued, which means:
1720 // - before we exchange the tail if we may have to override
1721 // - before we set the head if we made the queue non empty.
1722 // Otherwise, if preempted between one of these and the call to dx_wakeup()
1723 // the blocks submitted to the queue may release the last reference to the
1724 // queue when invoked by _dispatch_queue_drain. <rdar://problem/6932776>
1725 bool overriding = _dispatch_queue_need_override_retain(dq, qos);
1726 if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
1727 if (!overriding) _dispatch_retain_2(dq->_as_os_obj);
1728 _dispatch_queue_push_update_head(dq, tail);
1729 flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
1730 } else if (overriding) {
1731 flags = DISPATCH_WAKEUP_CONSUME_2;
1732 } else {
1733 return;
1734 }
1735 return dx_wakeup(dq, qos, flags);
1736 }
1737
1738 DISPATCH_ALWAYS_INLINE
1739 static inline void
1740 _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq,
1741 uint64_t dq_state)
1742 {
1743 return dx_push(tq, dq, _dq_state_max_qos(dq_state));
1744 }
1745
1746 DISPATCH_ALWAYS_INLINE
1747 static inline dispatch_priority_t
1748 _dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq)
1749 {
1750 dispatch_priority_t old_dbp = _dispatch_get_basepri();
1751 dispatch_assert(dx_hastypeflag(assumed_rq, QUEUE_ROOT));
1752 _dispatch_reset_basepri(assumed_rq->dq_priority);
1753 _dispatch_queue_set_current(assumed_rq);
1754 return old_dbp;
1755 }
1756
1757 typedef dispatch_queue_wakeup_target_t
1758 _dispatch_queue_class_invoke_handler_t(dispatch_object_t,
1759 dispatch_invoke_context_t dic, dispatch_invoke_flags_t,
1760 uint64_t *owned);
1761
1762 DISPATCH_ALWAYS_INLINE
1763 static inline void
1764 _dispatch_queue_class_invoke(dispatch_object_t dou,
1765 dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
1766 dispatch_invoke_flags_t const_restrict_flags,
1767 _dispatch_queue_class_invoke_handler_t invoke)
1768 {
1769 dispatch_queue_t dq = dou._dq;
1770 dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
1771 bool owning = !(flags & DISPATCH_INVOKE_STEALING);
1772 uint64_t owned = 0;
1773
1774 // When called from a plain _dispatch_queue_drain:
1775 // overriding = false
1776 // owning = true
1777 //
1778 // When called from an override continuation:
1779 // overriding = true
1780 // owning depends on whether the override embedded the queue or steals
1781
1782 if (!(flags & (DISPATCH_INVOKE_STEALING | DISPATCH_INVOKE_WLH))) {
1783 dq->do_next = DISPATCH_OBJECT_LISTLESS;
1784 }
1785 flags |= const_restrict_flags;
1786 if (likely(flags & DISPATCH_INVOKE_WLH)) {
1787 owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED | DISPATCH_QUEUE_ENQUEUED;
1788 } else {
1789 owned = _dispatch_queue_drain_try_lock(dq, flags);
1790 }
1791 if (likely(owned)) {
1792 dispatch_priority_t old_dbp;
1793 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
1794 old_dbp = _dispatch_set_basepri(dq->dq_priority);
1795 } else {
1796 old_dbp = 0;
1797 }
1798
1799 flags = _dispatch_queue_merge_autorelease_frequency(dq, flags);
1800 attempt_running_slow_head:
1801 #if DISPATCH_COCOA_COMPAT
1802 if ((flags & DISPATCH_INVOKE_WLH) &&
1803 !(flags & DISPATCH_INVOKE_AUTORELEASE_ALWAYS)) {
1804 _dispatch_last_resort_autorelease_pool_push(dic);
1805 }
1806 #endif // DISPATCH_COCOA_COMPAT
1807 tq = invoke(dq, dic, flags, &owned);
1808 #if DISPATCH_COCOA_COMPAT
1809 if ((flags & DISPATCH_INVOKE_WLH) &&
1810 !(flags & DISPATCH_INVOKE_AUTORELEASE_ALWAYS)) {
1811 dispatch_thread_frame_s dtf;
1812 _dispatch_thread_frame_push(&dtf, dq);
1813 _dispatch_last_resort_autorelease_pool_pop(dic);
1814 _dispatch_thread_frame_pop(&dtf);
1815 }
1816 #endif // DISPATCH_COCOA_COMPAT
1817 dispatch_assert(tq != DISPATCH_QUEUE_WAKEUP_TARGET);
1818 if (unlikely(tq != DISPATCH_QUEUE_WAKEUP_NONE &&
1819 tq != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT)) {
1820 // Either dc is set, which is a deferred invoke case
1821 //
1822 // or only tq is and it means a reenqueue is required, because of:
1823 // a retarget, a suspension, or a width change.
1824 //
1825 // In both cases, we want to bypass the check for DIRTY.
1826 // That may cause us to leave DIRTY in place but all drain lock
1827 // acquirers clear it
1828 } else if (!_dispatch_queue_drain_try_unlock(dq, owned,
1829 tq == DISPATCH_QUEUE_WAKEUP_NONE)) {
1830 tq = _dispatch_queue_get_current();
1831 if (dx_hastypeflag(tq, QUEUE_ROOT) || !owning) {
1832 goto attempt_running_slow_head;
1833 }
1834 DISPATCH_COMPILER_CAN_ASSUME(tq != DISPATCH_QUEUE_WAKEUP_NONE);
1835 } else {
1836 owned = 0;
1837 tq = NULL;
1838 }
1839 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
1840 _dispatch_reset_basepri(old_dbp);
1841 }
1842 }
1843 if (likely(owning)) {
1844 _dispatch_introspection_queue_item_complete(dq);
1845 }
1846
1847 if (tq) {
1848 if (const_restrict_flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS) {
1849 dispatch_assert(dic->dic_deferred == NULL);
1850 } else if (dic->dic_deferred) {
1851 return _dispatch_queue_drain_sync_waiter(dq, dic,
1852 flags, owned);
1853 }
1854
1855 uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED;
1856 if (tq == DISPATCH_QUEUE_WAKEUP_MGR) {
1857 enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
1858 }
1859 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
1860 new_state = old_state - owned;
1861 new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
1862 new_state |= DISPATCH_QUEUE_DIRTY;
1863 if (_dq_state_is_runnable(new_state) &&
1864 !_dq_state_is_enqueued(new_state)) {
1865 // drain was not interupted for suspension
1866 // we will reenqueue right away, just put ENQUEUED back
1867 new_state |= enqueued;
1868 }
1869 });
1870 old_state -= owned;
1871 if (_dq_state_received_override(old_state)) {
1872 // Ensure that the root queue sees that this thread was overridden.
1873 _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state));
1874 }
1875 if ((old_state ^ new_state) & enqueued) {
1876 dispatch_assert(_dq_state_is_enqueued(new_state));
1877 return _dispatch_queue_push_queue(tq, dq, new_state);
1878 }
1879 }
1880
1881 _dispatch_release_2_tailcall(dq);
1882 }
1883
1884 DISPATCH_ALWAYS_INLINE
1885 static inline bool
1886 _dispatch_queue_class_probe(dispatch_queue_class_t dqu)
1887 {
1888 struct dispatch_object_s *tail;
1889 // seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
1890 // seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
1891 tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered);
1892 return unlikely(tail != NULL);
1893 }
1894
1895 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1896 static inline bool
1897 _dispatch_is_in_root_queues_array(dispatch_queue_t dq)
1898 {
1899 return (dq >= _dispatch_root_queues) &&
1900 (dq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT);
1901 }
1902
1903 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1904 static inline dispatch_queue_t
1905 _dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
1906 {
1907 if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) {
1908 DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
1909 }
1910 return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
1911 }
1912
1913 DISPATCH_ALWAYS_INLINE
1914 static inline void
1915 _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
1916 {
1917 // Tag thread-bound queues with the owning thread
1918 dispatch_assert(_dispatch_queue_is_thread_bound(dq));
1919 uint64_t old_state, new_state;
1920 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
1921 new_state = old_state;
1922 new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
1923 new_state |= _dispatch_lock_value_for_self();
1924 });
1925 }
1926
1927 DISPATCH_ALWAYS_INLINE
1928 static inline void
1929 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
1930 {
1931 dispatch_assert(_dispatch_queue_is_thread_bound(dq));
1932 _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC);
1933 os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DRAIN_OWNER_MASK, relaxed);
1934 }
1935
1936 DISPATCH_ALWAYS_INLINE
1937 static inline dispatch_pthread_root_queue_observer_hooks_t
1938 _dispatch_get_pthread_root_queue_observer_hooks(void)
1939 {
1940 return _dispatch_thread_getspecific(
1941 dispatch_pthread_root_queue_observer_hooks_key);
1942 }
1943
1944 DISPATCH_ALWAYS_INLINE
1945 static inline void
1946 _dispatch_set_pthread_root_queue_observer_hooks(
1947 dispatch_pthread_root_queue_observer_hooks_t observer_hooks)
1948 {
1949 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key,
1950 observer_hooks);
1951 }
1952
1953 #pragma mark -
1954 #pragma mark dispatch_priority
1955
1956 DISPATCH_ALWAYS_INLINE
1957 static inline dispatch_priority_t
1958 _dispatch_get_basepri(void)
1959 {
1960 #if HAVE_PTHREAD_WORKQUEUE_QOS
1961 return (dispatch_priority_t)(uintptr_t)_dispatch_thread_getspecific(
1962 dispatch_basepri_key);
1963 #else
1964 return 0;
1965 #endif
1966 }
1967
1968 DISPATCH_ALWAYS_INLINE
1969 static inline void
1970 _dispatch_reset_basepri(dispatch_priority_t dbp)
1971 {
1972 #if HAVE_PTHREAD_WORKQUEUE_QOS
1973 dispatch_priority_t old_dbp = _dispatch_get_basepri();
1974 // If an inner-loop or'd in the override flag to the per-thread priority,
1975 // it needs to be propagated up the chain.
1976 dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
1977 dbp |= (old_dbp & DISPATCH_PRIORITY_OVERRIDE_MASK);
1978 _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
1979 #else
1980 (void)dbp;
1981 #endif
1982 }
1983
1984 DISPATCH_ALWAYS_INLINE
1985 static inline dispatch_qos_t
1986 _dispatch_get_basepri_override_qos_floor(void)
1987 {
1988 dispatch_priority_t dbp = _dispatch_get_basepri();
1989 dispatch_qos_t qos = _dispatch_priority_qos(dbp);
1990 dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp);
1991 dispatch_qos_t qos_floor = MAX(qos, oqos);
1992 return qos_floor ? qos_floor : DISPATCH_QOS_SATURATED;
1993 }
1994
1995 DISPATCH_ALWAYS_INLINE
1996 static inline void
1997 _dispatch_set_basepri_override_qos(dispatch_qos_t qos)
1998 {
1999 #if HAVE_PTHREAD_WORKQUEUE_QOS
2000 dispatch_priority_t dbp = _dispatch_get_basepri();
2001 if (_dispatch_priority_override_qos(dbp) >= qos) return;
2002 dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
2003 dbp |= qos << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
2004 _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
2005 #else
2006 (void)qos;
2007 #endif
2008 }
2009
2010 DISPATCH_ALWAYS_INLINE
2011 static inline bool
2012 _dispatch_reset_basepri_override(void)
2013 {
2014 #if HAVE_PTHREAD_WORKQUEUE_QOS
2015 dispatch_priority_t dbp = _dispatch_get_basepri();
2016 dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp);
2017 if (oqos) {
2018 dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
2019 _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
2020 return oqos != DISPATCH_QOS_SATURATED;
2021 }
2022 #endif
2023 return false;
2024 }
2025
2026 DISPATCH_ALWAYS_INLINE
2027 static inline dispatch_priority_t
2028 _dispatch_set_basepri(dispatch_priority_t dbp)
2029 {
2030 #if HAVE_PTHREAD_WORKQUEUE_QOS
2031 const dispatch_priority_t preserved_mask =
2032 DISPATCH_PRIORITY_OVERRIDE_MASK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
2033 dispatch_priority_t old_dbp = _dispatch_get_basepri();
2034 if (old_dbp) {
2035 dispatch_priority_t flags, defaultqueue, basepri;
2036 flags = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
2037 defaultqueue = (old_dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
2038 basepri = old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK;
2039 dbp &= DISPATCH_PRIORITY_REQUESTED_MASK;
2040 if (!dbp) {
2041 flags = DISPATCH_PRIORITY_FLAG_INHERIT | defaultqueue;
2042 dbp = basepri;
2043 } else if (dbp < basepri && !defaultqueue) { // rdar://16349734
2044 dbp = basepri;
2045 }
2046 dbp |= flags | (old_dbp & preserved_mask);
2047 } else {
2048 dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
2049 }
2050 _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
2051 return old_dbp;
2052 #else
2053 (void)dbp;
2054 return 0;
2055 #endif
2056 }
2057
2058 DISPATCH_ALWAYS_INLINE
2059 static inline dispatch_priority_t
2060 _dispatch_set_basepri_wlh(dispatch_priority_t dbp)
2061 {
2062 #if HAVE_PTHREAD_WORKQUEUE_QOS
2063 dispatch_assert(!_dispatch_get_basepri());
2064 // _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED)
2065 dbp |= DISPATCH_QOS_SATURATED << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
2066 _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
2067 #else
2068 (void)dbp;
2069 #endif
2070 return 0;
2071 }
2072
2073 DISPATCH_ALWAYS_INLINE
2074 static inline pthread_priority_t
2075 _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags)
2076 {
2077 #if HAVE_PTHREAD_WORKQUEUE_QOS
2078 dispatch_priority_t inherited, defaultqueue, dbp = _dispatch_get_basepri();
2079 pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp);
2080 bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
2081 (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG);
2082 inherited = (dbp & DISPATCH_PRIORITY_FLAG_INHERIT);
2083 defaultqueue = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
2084 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2085
2086 if (!pp) {
2087 return basepp;
2088 } else if (defaultqueue) { // rdar://16349734
2089 return pp;
2090 } else if (pp < basepp) {
2091 return basepp;
2092 } else if (enforce || inherited) {
2093 return pp;
2094 } else {
2095 return basepp;
2096 }
2097 #else
2098 (void)pp; (void)flags;
2099 return 0;
2100 #endif
2101 }
2102
2103 DISPATCH_ALWAYS_INLINE
2104 static inline void
2105 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
2106 dispatch_queue_t tq)
2107 {
2108 #if HAVE_PTHREAD_WORKQUEUE_QOS
2109 const dispatch_priority_t rootqueue_flag = DISPATCH_PRIORITY_FLAG_ROOTQUEUE;
2110 const dispatch_priority_t inherited_flag = DISPATCH_PRIORITY_FLAG_INHERIT;
2111 const dispatch_priority_t defaultqueue_flag =
2112 DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
2113 dispatch_priority_t pri = dq->dq_priority, tpri = tq->dq_priority;
2114
2115 if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) &&
2116 (tpri & rootqueue_flag)) {
2117 if (_dispatch_priority_override_qos(pri) == DISPATCH_QOS_SATURATED) {
2118 pri &= DISPATCH_PRIORITY_OVERRIDE_MASK;
2119 } else {
2120 pri = 0;
2121 }
2122 if (tpri & defaultqueue_flag) {
2123 // <rdar://problem/32921639> base queues need to know they target
2124 // the default root queue so that _dispatch_queue_override_qos()
2125 // in _dispatch_queue_class_wakeup() can fallback to QOS_DEFAULT
2126 // if no other priority was provided.
2127 pri |= defaultqueue_flag;
2128 } else {
2129 pri |= (tpri & ~rootqueue_flag) | inherited_flag;
2130 }
2131 dq->dq_priority = pri;
2132 } else if (pri & defaultqueue_flag) {
2133 // the DEFAULTQUEUE flag is only set on queues due to the code above,
2134 // and must never be kept if we don't target a global root queue.
2135 dq->dq_priority = (pri & ~defaultqueue_flag);
2136 }
2137 #else
2138 (void)dq; (void)tq;
2139 #endif
2140 }
2141
2142 DISPATCH_ALWAYS_INLINE
2143 static inline dispatch_priority_t
2144 _dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri,
2145 dispatch_queue_t rq)
2146 {
2147 #if HAVE_PTHREAD_WORKQUEUE_QOS
2148 dispatch_priority_t p = pri & DISPATCH_PRIORITY_REQUESTED_MASK;
2149 dispatch_priority_t rqp = rq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
2150 dispatch_priority_t defaultqueue =
2151 rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
2152
2153 if (!p || (!defaultqueue && p < rqp)) {
2154 p = rqp | defaultqueue;
2155 }
2156 return p | (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
2157 #else
2158 (void)rq; (void)pri;
2159 return 0;
2160 #endif
2161 }
2162
2163 DISPATCH_ALWAYS_INLINE
2164 static inline pthread_priority_t
2165 _dispatch_get_priority(void)
2166 {
2167 #if HAVE_PTHREAD_WORKQUEUE_QOS
2168 pthread_priority_t pp = (uintptr_t)
2169 _dispatch_thread_getspecific(dispatch_priority_key);
2170 return pp;
2171 #else
2172 return 0;
2173 #endif
2174 }
2175
2176 #if HAVE_PTHREAD_WORKQUEUE_QOS
2177 DISPATCH_ALWAYS_INLINE
2178 static inline pthread_priority_t
2179 _dispatch_priority_compute_update(pthread_priority_t pp)
2180 {
2181 dispatch_assert(pp != DISPATCH_NO_PRIORITY);
2182 if (!_dispatch_set_qos_class_enabled) return 0;
2183 // the priority in _dispatch_get_priority() only tracks manager-ness
2184 // and overcommit, which is inherited from the current value for each update
2185 // however if the priority had the NEEDS_UNBIND flag set we need to clear it
2186 // the first chance we get
2187 //
2188 // the manager bit is invalid input, but we keep it to get meaningful
2189 // assertions in _dispatch_set_priority_and_voucher_slow()
2190 pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
2191 pthread_priority_t cur_priority = _dispatch_get_priority();
2192 pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2193 pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2194 if (unlikely(cur_priority & unbind)) {
2195 // else we always need an update if the NEEDS_UNBIND flag is set
2196 // the slow path in _dispatch_set_priority_and_voucher_slow() will
2197 // adjust the priority further with the proper overcommitness
2198 return pp ? pp : (cur_priority & ~unbind);
2199 } else {
2200 cur_priority &= ~overcommit;
2201 }
2202 if (unlikely(pp != cur_priority)) return pp;
2203 return 0;
2204 }
2205 #endif
2206
2207 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2208 static inline voucher_t
2209 _dispatch_set_priority_and_voucher(pthread_priority_t pp,
2210 voucher_t v, dispatch_thread_set_self_t flags)
2211 {
2212 #if HAVE_PTHREAD_WORKQUEUE_QOS
2213 pp = _dispatch_priority_compute_update(pp);
2214 if (likely(!pp)) {
2215 if (v == DISPATCH_NO_VOUCHER) {
2216 return DISPATCH_NO_VOUCHER;
2217 }
2218 if (likely(v == _voucher_get())) {
2219 bool retained = flags & DISPATCH_VOUCHER_CONSUME;
2220 if (flags & DISPATCH_VOUCHER_REPLACE) {
2221 if (retained && v) _voucher_release_no_dispose(v);
2222 v = DISPATCH_NO_VOUCHER;
2223 } else {
2224 if (!retained && v) _voucher_retain(v);
2225 }
2226 return v;
2227 }
2228 }
2229 return _dispatch_set_priority_and_voucher_slow(pp, v, flags);
2230 #else
2231 (void)pp; (void)v; (void)flags;
2232 return DISPATCH_NO_VOUCHER;
2233 #endif
2234 }
2235
2236 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2237 static inline voucher_t
2238 _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp,
2239 voucher_t v, dispatch_thread_set_self_t flags)
2240 {
2241 pthread_priority_t p = 0;
2242 if (pp != DISPATCH_NO_PRIORITY) {
2243 p = _dispatch_priority_adopt(pp, flags);
2244 }
2245 return _dispatch_set_priority_and_voucher(p, v, flags);
2246 }
2247
2248 DISPATCH_ALWAYS_INLINE
2249 static inline void
2250 _dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v)
2251 {
2252 if (pp == DISPATCH_NO_PRIORITY) pp = 0;
2253 (void)_dispatch_set_priority_and_voucher(pp, v,
2254 DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE);
2255 }
2256
2257 DISPATCH_ALWAYS_INLINE
2258 static inline void
2259 _dispatch_reset_voucher(voucher_t v, dispatch_thread_set_self_t flags)
2260 {
2261 flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE;
2262 (void)_dispatch_set_priority_and_voucher(0, v, flags);
2263 }
2264
2265 DISPATCH_ALWAYS_INLINE
2266 static inline bool
2267 _dispatch_queue_need_override(dispatch_queue_class_t dqu, dispatch_qos_t qos)
2268 {
2269 uint64_t dq_state = os_atomic_load2o(dqu._dq, dq_state, relaxed);
2270 // dq_priority "override qos" contains the priority at which the queue
2271 // is already running for thread-bound queues.
2272 // For non thread-bound queues, the qos of the queue may not be observed
2273 // when the first work item is dispatched synchronously.
2274 return _dq_state_max_qos(dq_state) < qos &&
2275 _dispatch_priority_override_qos(dqu._dq->dq_priority) < qos;
2276 }
2277
2278 DISPATCH_ALWAYS_INLINE
2279 static inline bool
2280 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu,
2281 dispatch_qos_t qos)
2282 {
2283 if (_dispatch_queue_need_override(dqu, qos)) {
2284 _os_object_retain_internal_n_inline(dqu._oq->_as_os_obj, 2);
2285 return true;
2286 }
2287 return false;
2288 }
2289
2290 DISPATCH_ALWAYS_INLINE
2291 static inline dispatch_qos_t
2292 _dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos)
2293 {
2294 if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) {
2295 // queues targeting the default root queue use any asynchronous
2296 // workitem priority available and fallback to QOS_DEFAULT otherwise.
2297 return qos ? qos : DISPATCH_QOS_DEFAULT;
2298 }
2299 // for asynchronous workitems, queue priority is the floor for overrides
2300 return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority));
2301 }
2302
2303 #define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1
2304 #define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2
2305
2306 DISPATCH_ALWAYS_INLINE
2307 static inline pthread_priority_t
2308 _dispatch_priority_compute_propagated(pthread_priority_t pp,
2309 unsigned int flags)
2310 {
2311 #if HAVE_PTHREAD_WORKQUEUE_QOS
2312 if (flags & DISPATCH_PRIORITY_PROPAGATE_CURRENT) {
2313 pp = _dispatch_get_priority();
2314 }
2315 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2316 if (!(flags & DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC) &&
2317 pp > _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED)) {
2318 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
2319 return _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED);
2320 }
2321 return pp;
2322 #else
2323 (void)pp; (void)flags;
2324 return 0;
2325 #endif
2326 }
2327
2328 DISPATCH_ALWAYS_INLINE
2329 static inline pthread_priority_t
2330 _dispatch_priority_propagate(void)
2331 {
2332 return _dispatch_priority_compute_propagated(0,
2333 DISPATCH_PRIORITY_PROPAGATE_CURRENT);
2334 }
2335
2336 // including maintenance
2337 DISPATCH_ALWAYS_INLINE
2338 static inline bool
2339 _dispatch_is_background_thread(void)
2340 {
2341 #if HAVE_PTHREAD_WORKQUEUE_QOS
2342 pthread_priority_t pp = _dispatch_get_priority();
2343 return _dispatch_qos_is_background(_dispatch_qos_from_pp(pp));
2344 #else
2345 return false;
2346 #endif
2347 }
2348
2349 #pragma mark -
2350 #pragma mark dispatch_block_t
2351
2352 #ifdef __BLOCKS__
2353
2354 DISPATCH_ALWAYS_INLINE
2355 static inline bool
2356 _dispatch_block_has_private_data(const dispatch_block_t block)
2357 {
2358 extern void (*_dispatch_block_special_invoke)(void*);
2359 return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
2360 }
2361
2362 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2363 static inline pthread_priority_t
2364 _dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags,
2365 pthread_priority_t new_pri)
2366 {
2367 pthread_priority_t old_pri, p = 0; // 0 means do not change priority.
2368 if ((flags & DISPATCH_BLOCK_HAS_PRIORITY)
2369 && ((flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ||
2370 !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS))) {
2371 old_pri = _dispatch_get_priority();
2372 new_pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2373 p = old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK;
2374 if (!p || p >= new_pri) p = 0;
2375 }
2376 return p;
2377 }
2378
2379 DISPATCH_ALWAYS_INLINE
2380 static inline dispatch_block_private_data_t
2381 _dispatch_block_get_data(const dispatch_block_t db)
2382 {
2383 if (!_dispatch_block_has_private_data(db)) {
2384 return NULL;
2385 }
2386 // Keep in sync with _dispatch_block_create implementation
2387 uint8_t *x = (uint8_t *)db;
2388 // x points to base of struct Block_layout
2389 x += sizeof(struct Block_layout);
2390 // x points to base of captured dispatch_block_private_data_s object
2391 dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
2392 if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
2393 DISPATCH_CLIENT_CRASH(dbpd->dbpd_magic,
2394 "Corruption of dispatch block object");
2395 }
2396 return dbpd;
2397 }
2398
2399 DISPATCH_ALWAYS_INLINE
2400 static inline pthread_priority_t
2401 _dispatch_block_get_priority(const dispatch_block_t db)
2402 {
2403 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
2404 return dbpd ? dbpd->dbpd_priority : 0;
2405 }
2406
2407 DISPATCH_ALWAYS_INLINE
2408 static inline dispatch_block_flags_t
2409 _dispatch_block_get_flags(const dispatch_block_t db)
2410 {
2411 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
2412 return dbpd ? dbpd->dbpd_flags : 0;
2413 }
2414
2415 #endif
2416
2417 #pragma mark -
2418 #pragma mark dispatch_continuation_t
2419
2420 DISPATCH_ALWAYS_INLINE
2421 static inline dispatch_continuation_t
2422 _dispatch_continuation_alloc_cacheonly(void)
2423 {
2424 dispatch_continuation_t dc = (dispatch_continuation_t)
2425 _dispatch_thread_getspecific(dispatch_cache_key);
2426 if (likely(dc)) {
2427 _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
2428 }
2429 return dc;
2430 }
2431
2432 DISPATCH_ALWAYS_INLINE
2433 static inline dispatch_continuation_t
2434 _dispatch_continuation_alloc(void)
2435 {
2436 dispatch_continuation_t dc =
2437 _dispatch_continuation_alloc_cacheonly();
2438 if (unlikely(!dc)) {
2439 return _dispatch_continuation_alloc_from_heap();
2440 }
2441 return dc;
2442 }
2443
2444 DISPATCH_ALWAYS_INLINE
2445 static inline dispatch_continuation_t
2446 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
2447 {
2448 dispatch_continuation_t prev_dc = (dispatch_continuation_t)
2449 _dispatch_thread_getspecific(dispatch_cache_key);
2450 int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1;
2451 // Cap continuation cache
2452 if (unlikely(cnt > _dispatch_continuation_cache_limit)) {
2453 return dc;
2454 }
2455 dc->do_next = prev_dc;
2456 dc->dc_cache_cnt = cnt;
2457 _dispatch_thread_setspecific(dispatch_cache_key, dc);
2458 return NULL;
2459 }
2460
2461 DISPATCH_ALWAYS_INLINE
2462 static inline void
2463 _dispatch_continuation_free(dispatch_continuation_t dc)
2464 {
2465 dc = _dispatch_continuation_free_cacheonly(dc);
2466 if (unlikely(dc)) {
2467 _dispatch_continuation_free_to_cache_limit(dc);
2468 }
2469 }
2470
2471 #include "trace.h"
2472
2473 DISPATCH_ALWAYS_INLINE
2474 static inline void
2475 _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
2476 {
2477 struct dispatch_object_s *dou = dc->dc_data;
2478 unsigned long type = dx_type(dou);
2479 if (type == DISPATCH_GROUP_TYPE) {
2480 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2481 _dispatch_introspection_queue_item_complete(dou);
2482 dispatch_group_leave((dispatch_group_t)dou);
2483 } else {
2484 DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
2485 }
2486 }
2487
2488 DISPATCH_ALWAYS_INLINE
2489 static inline void
2490 _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
2491 dispatch_invoke_flags_t flags)
2492 {
2493 dispatch_continuation_t dc = dou._dc, dc1;
2494 dispatch_invoke_with_autoreleasepool(flags, {
2495 uintptr_t dc_flags = dc->dc_flags;
2496 // Add the item back to the cache before calling the function. This
2497 // allows the 'hot' continuation to be used for a quick callback.
2498 //
2499 // The ccache version is per-thread.
2500 // Therefore, the object has not been reused yet.
2501 // This generates better assembly.
2502 _dispatch_continuation_voucher_adopt(dc, ov, dc_flags);
2503 if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
2504 dc1 = _dispatch_continuation_free_cacheonly(dc);
2505 } else {
2506 dc1 = NULL;
2507 }
2508 if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) {
2509 _dispatch_continuation_with_group_invoke(dc);
2510 } else {
2511 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2512 _dispatch_introspection_queue_item_complete(dou);
2513 }
2514 if (unlikely(dc1)) {
2515 _dispatch_continuation_free_to_cache_limit(dc1);
2516 }
2517 });
2518 _dispatch_perfmon_workitem_inc();
2519 }
2520
2521 DISPATCH_ALWAYS_INLINE_NDEBUG
2522 static inline void
2523 _dispatch_continuation_pop_inline(dispatch_object_t dou,
2524 dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
2525 dispatch_queue_t dq)
2526 {
2527 dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
2528 _dispatch_get_pthread_root_queue_observer_hooks();
2529 if (observer_hooks) observer_hooks->queue_will_execute(dq);
2530 _dispatch_trace_continuation_pop(dq, dou);
2531 flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
2532 if (_dispatch_object_has_vtable(dou)) {
2533 dx_invoke(dou._do, dic, flags);
2534 } else {
2535 _dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags);
2536 }
2537 if (observer_hooks) observer_hooks->queue_did_execute(dq);
2538 }
2539
2540 // used to forward the do_invoke of a continuation with a vtable to its real
2541 // implementation.
2542 #define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \
2543 ({ \
2544 dispatch_continuation_t _dc = (dc), _dc1; \
2545 uintptr_t _dc_flags = (dc_flags); \
2546 _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \
2547 if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \
2548 _dc1 = _dispatch_continuation_free_cacheonly(_dc); \
2549 } else { \
2550 _dc1 = NULL; \
2551 } \
2552 __VA_ARGS__; \
2553 _dispatch_introspection_queue_item_complete(_dc); \
2554 if (unlikely(_dc1)) { \
2555 _dispatch_continuation_free_to_cache_limit(_dc1); \
2556 } \
2557 })
2558
2559 DISPATCH_ALWAYS_INLINE
2560 static inline void
2561 _dispatch_continuation_priority_set(dispatch_continuation_t dc,
2562 pthread_priority_t pp, dispatch_block_flags_t flags)
2563 {
2564 #if HAVE_PTHREAD_WORKQUEUE_QOS
2565 if (likely(!(flags & DISPATCH_BLOCK_HAS_PRIORITY))) {
2566 pp = _dispatch_priority_propagate();
2567 }
2568 if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) {
2569 pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
2570 }
2571 dc->dc_priority = pp;
2572 #else
2573 (void)dc; (void)pp; (void)flags;
2574 #endif
2575 }
2576
2577 DISPATCH_ALWAYS_INLINE
2578 static inline dispatch_qos_t
2579 _dispatch_continuation_override_qos(dispatch_queue_t dq,
2580 dispatch_continuation_t dc)
2581 {
2582 #if HAVE_PTHREAD_WORKQUEUE_QOS
2583 dispatch_qos_t dc_qos = _dispatch_qos_from_pp(dc->dc_priority);
2584 bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
2585 dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority);
2586 bool defaultqueue = dq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
2587
2588 dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY);
2589 if (dc_qos && (enforce || !dq_qos || defaultqueue)) {
2590 return dc_qos;
2591 }
2592 return dq_qos;
2593 #else
2594 (void)dq; (void)dc;
2595 return 0;
2596 #endif
2597 }
2598
2599 DISPATCH_ALWAYS_INLINE
2600 static inline void
2601 _dispatch_continuation_init_f(dispatch_continuation_t dc,
2602 dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t func,
2603 pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
2604 {
2605 dc->dc_flags = dc_flags;
2606 dc->dc_func = func;
2607 dc->dc_ctxt = ctxt;
2608 _dispatch_continuation_voucher_set(dc, dqu, flags);
2609 _dispatch_continuation_priority_set(dc, pp, flags);
2610 }
2611
2612 DISPATCH_ALWAYS_INLINE
2613 static inline void
2614 _dispatch_continuation_init(dispatch_continuation_t dc,
2615 dispatch_queue_class_t dqu, dispatch_block_t work,
2616 pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
2617 {
2618 dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT;
2619 dc->dc_ctxt = _dispatch_Block_copy(work);
2620 _dispatch_continuation_priority_set(dc, pp, flags);
2621
2622 if (unlikely(_dispatch_block_has_private_data(work))) {
2623 // always sets dc_func & dc_voucher
2624 // may update dc_priority & do_vtable
2625 return _dispatch_continuation_init_slow(dc, dqu, flags);
2626 }
2627
2628 if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
2629 dc->dc_func = _dispatch_call_block_and_release;
2630 } else {
2631 dc->dc_func = _dispatch_Block_invoke(work);
2632 }
2633 _dispatch_continuation_voucher_set(dc, dqu, flags);
2634 }
2635
2636 #if HAVE_MACH
2637 #pragma mark dispatch_mach_reply_refs_t
2638
2639 // assumes low bit of mach port names is always set
2640 #define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u
2641
2642 DISPATCH_ALWAYS_INLINE
2643 static inline void
2644 _dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr)
2645 {
2646 dmr->du_ident &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED;
2647 }
2648
2649 DISPATCH_ALWAYS_INLINE
2650 static inline bool
2651 _dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr)
2652 {
2653 mach_port_t reply_port = (mach_port_t)dmr->du_ident;
2654 return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false;
2655 }
2656
2657 DISPATCH_ALWAYS_INLINE
2658 static inline mach_port_t
2659 _dispatch_mach_reply_get_reply_port(mach_port_t reply_port)
2660 {
2661 return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0;
2662 }
2663
2664 #endif // HAVE_MACH
2665
2666 #endif // DISPATCH_PURE_C
2667
2668 #endif /* __DISPATCH_INLINE_INTERNAL__ */