]> git.saurik.com Git - apple/libdispatch.git/blob - src/inline_internal.h
libdispatch-703.30.5.tar.gz
[apple/libdispatch.git] / src / inline_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if DISPATCH_USE_CLIENT_CALLOUT
36
37 DISPATCH_NOTHROW void
38 _dispatch_client_callout(void *ctxt, dispatch_function_t f);
39 DISPATCH_NOTHROW void
40 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
41 #if HAVE_MACH
42 DISPATCH_NOTHROW void
43 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
44 dispatch_mach_msg_t dmsg, mach_error_t error,
45 dispatch_mach_handler_function_t f);
46 #endif // HAVE_MACH
47
48 #else // !DISPATCH_USE_CLIENT_CALLOUT
49
50 DISPATCH_ALWAYS_INLINE
51 static inline void
52 _dispatch_client_callout(void *ctxt, dispatch_function_t f)
53 {
54 return f(ctxt);
55 }
56
57 DISPATCH_ALWAYS_INLINE
58 static inline void
59 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
60 {
61 return f(ctxt, i);
62 }
63
64 #if HAVE_MACH
65 DISPATCH_ALWAYS_INLINE
66 static inline void
67 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
68 dispatch_mach_msg_t dmsg, mach_error_t error,
69 dispatch_mach_handler_function_t f)
70 {
71 return f(ctxt, reason, dmsg, error);
72 }
73 #endif // HAVE_MACH
74
75 #endif // !DISPATCH_USE_CLIENT_CALLOUT
76
77 #pragma mark -
78 #pragma mark _os_object_t & dispatch_object_t
79 #if DISPATCH_PURE_C
80
81 DISPATCH_ALWAYS_INLINE
82 static inline bool
83 _dispatch_object_has_vtable(dispatch_object_t dou)
84 {
85 uintptr_t dc_flags = dou._dc->dc_flags;
86
87 // vtables are pointers far away from the low page in memory
88 return dc_flags > 0xffful;
89 }
90
91 DISPATCH_ALWAYS_INLINE
92 static inline bool
93 _dispatch_object_is_continuation(dispatch_object_t dou)
94 {
95 if (_dispatch_object_has_vtable(dou)) {
96 return dx_metatype(dou._do) == _DISPATCH_CONTINUATION_TYPE;
97 }
98 return true;
99 }
100
101 DISPATCH_ALWAYS_INLINE
102 static inline bool
103 _dispatch_object_has_type(dispatch_object_t dou, unsigned long type)
104 {
105 return _dispatch_object_has_vtable(dou) && dx_type(dou._do) == type;
106 }
107
108 DISPATCH_ALWAYS_INLINE
109 static inline bool
110 _dispatch_object_is_redirection(dispatch_object_t dou)
111 {
112 return _dispatch_object_has_type(dou,
113 DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT));
114 }
115
116 DISPATCH_ALWAYS_INLINE
117 static inline bool
118 _dispatch_object_is_barrier(dispatch_object_t dou)
119 {
120 dispatch_queue_flags_t dq_flags;
121
122 if (!_dispatch_object_has_vtable(dou)) {
123 return (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT);
124 }
125 switch (dx_metatype(dou._do)) {
126 case _DISPATCH_QUEUE_TYPE:
127 case _DISPATCH_SOURCE_TYPE:
128 dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed);
129 return dq_flags & DQF_BARRIER_BIT;
130 default:
131 return false;
132 }
133 }
134
135 DISPATCH_ALWAYS_INLINE
136 static inline bool
137 _dispatch_object_is_slow_item(dispatch_object_t dou)
138 {
139 if (_dispatch_object_has_vtable(dou)) {
140 return false;
141 }
142 return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT);
143 }
144
145 DISPATCH_ALWAYS_INLINE
146 static inline bool
147 _dispatch_object_is_slow_non_barrier(dispatch_object_t dou)
148 {
149 if (_dispatch_object_has_vtable(dou)) {
150 return false;
151 }
152 return ((dou._dc->dc_flags &
153 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) ==
154 (DISPATCH_OBJ_SYNC_SLOW_BIT));
155 }
156
157 DISPATCH_ALWAYS_INLINE
158 static inline bool
159 _dispatch_object_is_slow_barrier(dispatch_object_t dou)
160 {
161 if (_dispatch_object_has_vtable(dou)) {
162 return false;
163 }
164 return ((dou._dc->dc_flags &
165 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) ==
166 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT));
167 }
168
169 DISPATCH_ALWAYS_INLINE
170 static inline _os_object_t
171 _os_object_retain_internal_inline(_os_object_t obj)
172 {
173 int ref_cnt = _os_object_refcnt_inc(obj);
174 if (unlikely(ref_cnt <= 0)) {
175 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
176 }
177 return obj;
178 }
179
180 DISPATCH_ALWAYS_INLINE
181 static inline void
182 _os_object_release_internal_inline_no_dispose(_os_object_t obj)
183 {
184 int ref_cnt = _os_object_refcnt_dec(obj);
185 if (likely(ref_cnt >= 0)) {
186 return;
187 }
188 if (ref_cnt == 0) {
189 _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object");
190 }
191 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
192 }
193
194 DISPATCH_ALWAYS_INLINE
195 static inline void
196 _os_object_release_internal_inline(_os_object_t obj)
197 {
198 int ref_cnt = _os_object_refcnt_dec(obj);
199 if (likely(ref_cnt >= 0)) {
200 return;
201 }
202 if (unlikely(ref_cnt < -1)) {
203 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
204 }
205 #if DISPATCH_DEBUG
206 int xref_cnt = obj->os_obj_xref_cnt;
207 if (unlikely(xref_cnt >= 0)) {
208 DISPATCH_INTERNAL_CRASH(xref_cnt,
209 "Release while external references exist");
210 }
211 #endif
212 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
213 return _os_object_dispose(obj);
214 }
215
216 DISPATCH_ALWAYS_INLINE_NDEBUG
217 static inline void
218 _dispatch_retain(dispatch_object_t dou)
219 {
220 (void)_os_object_retain_internal_inline(dou._os_obj);
221 }
222
223 DISPATCH_ALWAYS_INLINE_NDEBUG
224 static inline void
225 _dispatch_release(dispatch_object_t dou)
226 {
227 _os_object_release_internal_inline(dou._os_obj);
228 }
229
230 DISPATCH_ALWAYS_INLINE_NDEBUG
231 static inline void
232 _dispatch_release_tailcall(dispatch_object_t dou)
233 {
234 _os_object_release_internal(dou._os_obj);
235 }
236
237 DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
238 static inline void
239 _dispatch_object_set_target_queue_inline(dispatch_object_t dou,
240 dispatch_queue_t tq)
241 {
242 _dispatch_retain(tq);
243 tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release);
244 if (tq) _dispatch_release(tq);
245 _dispatch_object_debug(dou._do, "%s", __func__);
246 }
247
248 #endif // DISPATCH_PURE_C
249 #pragma mark -
250 #pragma mark dispatch_thread
251 #if DISPATCH_PURE_C
252
253 #define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */
254 #define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8
255 #ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN
256 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >=
257 DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
258 "our list should not be longer than the kernel's");
259 #endif
260
261 typedef struct dispatch_deferred_items_s {
262 uint32_t ddi_magic;
263 dispatch_queue_t ddi_stashed_dq;
264 struct dispatch_object_s *ddi_stashed_dou;
265 dispatch_priority_t ddi_stashed_pp;
266 int ddi_nevents;
267 int ddi_maxevents;
268 _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
269 } dispatch_deferred_items_s, *dispatch_deferred_items_t;
270
271 DISPATCH_ALWAYS_INLINE
272 static inline void
273 _dispatch_deferred_items_set(dispatch_deferred_items_t ddi)
274 {
275 _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi);
276 }
277
278 DISPATCH_ALWAYS_INLINE
279 static inline dispatch_deferred_items_t
280 _dispatch_deferred_items_get(void)
281 {
282 dispatch_deferred_items_t ddi = (dispatch_deferred_items_t)
283 _dispatch_thread_getspecific(dispatch_deferred_items_key);
284 if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) {
285 return ddi;
286 }
287 return NULL;
288 }
289
290 #endif // DISPATCH_PURE_C
291 #pragma mark -
292 #pragma mark dispatch_thread
293 #if DISPATCH_PURE_C
294
295 DISPATCH_ALWAYS_INLINE
296 static inline dispatch_thread_context_t
297 _dispatch_thread_context_find(const void *key)
298 {
299 dispatch_thread_context_t dtc =
300 _dispatch_thread_getspecific(dispatch_context_key);
301 while (dtc) {
302 if (dtc->dtc_key == key) {
303 return dtc;
304 }
305 dtc = dtc->dtc_prev;
306 }
307 return NULL;
308 }
309
310 DISPATCH_ALWAYS_INLINE
311 static inline void
312 _dispatch_thread_context_push(dispatch_thread_context_t ctxt)
313 {
314 ctxt->dtc_prev = _dispatch_thread_getspecific(dispatch_context_key);
315 _dispatch_thread_setspecific(dispatch_context_key, ctxt);
316 }
317
318 DISPATCH_ALWAYS_INLINE
319 static inline void
320 _dispatch_thread_context_pop(dispatch_thread_context_t ctxt)
321 {
322 dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key) == ctxt);
323 _dispatch_thread_setspecific(dispatch_context_key, ctxt->dtc_prev);
324 }
325
326 typedef struct dispatch_thread_frame_iterator_s {
327 dispatch_queue_t dtfi_queue;
328 dispatch_thread_frame_t dtfi_frame;
329 } *dispatch_thread_frame_iterator_t;
330
331 DISPATCH_ALWAYS_INLINE
332 static inline void
333 _dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it)
334 {
335 _dispatch_thread_getspecific_pair(
336 dispatch_queue_key, (void **)&it->dtfi_queue,
337 dispatch_frame_key, (void **)&it->dtfi_frame);
338 }
339
340 DISPATCH_ALWAYS_INLINE
341 static inline void
342 _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it)
343 {
344 dispatch_thread_frame_t dtf = it->dtfi_frame;
345 dispatch_queue_t dq = it->dtfi_queue;
346
347 if (dtf) {
348 if (dq->do_targetq) {
349 // redirections and trysync_f may skip some frames,
350 // so we need to simulate seeing the missing links
351 // however the bottom root queue is always present
352 it->dtfi_queue = dq->do_targetq;
353 if (it->dtfi_queue == dtf->dtf_queue) {
354 it->dtfi_frame = dtf->dtf_prev;
355 }
356 } else {
357 it->dtfi_queue = dtf->dtf_queue;
358 it->dtfi_frame = dtf->dtf_prev;
359 }
360 } else if (dq) {
361 it->dtfi_queue = dq->do_targetq;
362 }
363 }
364
365 DISPATCH_ALWAYS_INLINE
366 static inline bool
367 _dispatch_thread_frame_find_queue(dispatch_queue_t dq)
368 {
369 struct dispatch_thread_frame_iterator_s it;
370
371 _dispatch_thread_frame_iterate_start(&it);
372 while (it.dtfi_queue) {
373 if (it.dtfi_queue == dq) {
374 return true;
375 }
376 _dispatch_thread_frame_iterate_next(&it);
377 }
378 return false;
379 }
380
381 DISPATCH_ALWAYS_INLINE
382 static inline dispatch_thread_frame_t
383 _dispatch_thread_frame_get_current(void)
384 {
385 return _dispatch_thread_getspecific(dispatch_frame_key);
386 }
387
388 DISPATCH_ALWAYS_INLINE
389 static inline void
390 _dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf)
391 {
392 _dispatch_thread_setspecific(dispatch_frame_key, dtf);
393 }
394
395 DISPATCH_ALWAYS_INLINE
396 static inline void
397 _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf)
398 {
399 _dispatch_thread_getspecific_packed_pair(
400 dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue);
401 }
402
403 DISPATCH_ALWAYS_INLINE
404 static inline void
405 _dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq)
406 {
407 _dispatch_thread_frame_save_state(dtf);
408 _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
409 dispatch_frame_key, dtf);
410 dtf->dtf_deferred = NULL;
411 }
412
413 DISPATCH_ALWAYS_INLINE
414 static inline void
415 _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf,
416 dispatch_queue_t dq, dispatch_thread_frame_t new_base)
417 {
418 _dispatch_thread_frame_save_state(dtf);
419 _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
420 dispatch_frame_key, new_base);
421 dtf->dtf_deferred = NULL;
422 }
423
424 DISPATCH_ALWAYS_INLINE
425 static inline void
426 _dispatch_thread_frame_pop(dispatch_thread_frame_t dtf)
427 {
428 _dispatch_thread_setspecific_packed_pair(
429 dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue);
430 }
431
432 DISPATCH_ALWAYS_INLINE
433 static inline dispatch_queue_t
434 _dispatch_thread_frame_stash(dispatch_thread_frame_t dtf)
435 {
436 _dispatch_thread_getspecific_pair(
437 dispatch_queue_key, (void **)&dtf->dtf_queue,
438 dispatch_frame_key, (void **)&dtf->dtf_prev);
439 _dispatch_thread_frame_pop(dtf->dtf_prev);
440 return dtf->dtf_queue;
441 }
442
443 DISPATCH_ALWAYS_INLINE
444 static inline void
445 _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf)
446 {
447 _dispatch_thread_frame_pop(dtf);
448 }
449
450 DISPATCH_ALWAYS_INLINE
451 static inline int
452 _dispatch_wqthread_override_start_check_owner(mach_port_t thread,
453 pthread_priority_t pp, mach_port_t *ulock_addr)
454 {
455 #if HAVE_PTHREAD_WORKQUEUE_QOS
456 if (!_dispatch_set_qos_class_enabled) return 0;
457 return _pthread_workqueue_override_start_direct_check_owner(thread,
458 pp, ulock_addr);
459 #else
460 (void)thread; (void)pp; (void)ulock_addr;
461 return 0;
462 #endif
463 }
464
465 DISPATCH_ALWAYS_INLINE
466 static inline void
467 _dispatch_wqthread_override_start(mach_port_t thread,
468 pthread_priority_t pp)
469 {
470 #if HAVE_PTHREAD_WORKQUEUE_QOS
471 if (!_dispatch_set_qos_class_enabled) return;
472 (void)_pthread_workqueue_override_start_direct(thread, pp);
473 #else
474 (void)thread; (void)pp;
475 #endif
476 }
477
478 DISPATCH_ALWAYS_INLINE
479 static inline void
480 _dispatch_wqthread_override_reset(void)
481 {
482 #if HAVE_PTHREAD_WORKQUEUE_QOS
483 if (!_dispatch_set_qos_class_enabled) return;
484 (void)_pthread_workqueue_override_reset();
485 #endif
486 }
487
488 DISPATCH_ALWAYS_INLINE
489 static inline void
490 _dispatch_thread_override_start(mach_port_t thread, pthread_priority_t pp,
491 void *resource)
492 {
493 #if HAVE_PTHREAD_WORKQUEUE_QOS
494 if (!_dispatch_set_qos_class_enabled) return;
495 (void)_pthread_qos_override_start_direct(thread, pp, resource);
496 #else
497 (void)thread; (void)pp; (void)resource;
498 #endif
499 }
500
501 DISPATCH_ALWAYS_INLINE
502 static inline void
503 _dispatch_thread_override_end(mach_port_t thread, void *resource)
504 {
505 #if HAVE_PTHREAD_WORKQUEUE_QOS
506 if (!_dispatch_set_qos_class_enabled) return;
507 (void)_pthread_qos_override_end_direct(thread, resource);
508 #else
509 (void)thread; (void)resource;
510 #endif
511 }
512
513 #if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS
514 DISPATCH_ALWAYS_INLINE
515 static inline bool
516 _dispatch_qos_class_is_valid(pthread_priority_t pp)
517 {
518 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
519 if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT +
520 _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) {
521 return false;
522 }
523 return true;
524 }
525 #define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \
526 if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \
527 DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \
528 } \
529 })
530
531 DISPATCH_ALWAYS_INLINE
532 static inline bool
533 _dispatch_qos_override_is_valid(pthread_priority_t pp)
534 {
535 if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) {
536 return false;
537 }
538 return _dispatch_qos_class_is_valid(pp);
539 }
540 #define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \
541 if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \
542 DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \
543 } \
544 })
545 #else
546 #define _dispatch_assert_is_valid_qos_override(pp) (void)(pp)
547 #define _dispatch_assert_is_valid_qos_class(pp) (void)(pp)
548 #endif
549
550 #endif // DISPATCH_PURE_C
551 #pragma mark -
552 #pragma mark dispatch_queue_t state accessors
553 #if DISPATCH_PURE_C
554
555 DISPATCH_ALWAYS_INLINE
556 static inline dispatch_queue_flags_t
557 _dispatch_queue_atomic_flags(dispatch_queue_t dq)
558 {
559 return os_atomic_load2o(dq, dq_atomic_flags, relaxed);
560 }
561
562 DISPATCH_ALWAYS_INLINE
563 static inline dispatch_queue_flags_t
564 _dispatch_queue_atomic_flags_set(dispatch_queue_t dq,
565 dispatch_queue_flags_t bits)
566 {
567 return os_atomic_or2o(dq, dq_atomic_flags, bits, relaxed);
568 }
569
570 DISPATCH_ALWAYS_INLINE
571 static inline dispatch_queue_flags_t
572 _dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq,
573 dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits)
574 {
575 dispatch_queue_flags_t oflags, nflags;
576 os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, {
577 nflags = (oflags | add_bits) & ~clr_bits;
578 });
579 return oflags;
580 }
581
582 DISPATCH_ALWAYS_INLINE
583 static inline dispatch_queue_flags_t
584 _dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq,
585 dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits)
586 {
587 dispatch_queue_flags_t oflags, nflags;
588 os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, {
589 nflags = (oflags | add_bits) & ~clr_bits;
590 });
591 return nflags;
592 }
593
594 DISPATCH_ALWAYS_INLINE
595 static inline dispatch_queue_flags_t
596 _dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq,
597 dispatch_queue_flags_t bits)
598 {
599 return os_atomic_or_orig2o(dq, dq_atomic_flags, bits, relaxed);
600 }
601
602 DISPATCH_ALWAYS_INLINE
603 static inline dispatch_queue_flags_t
604 _dispatch_queue_atomic_flags_clear(dispatch_queue_t dq,
605 dispatch_queue_flags_t bits)
606 {
607 return os_atomic_and2o(dq, dq_atomic_flags, ~bits, relaxed);
608 }
609
610 DISPATCH_ALWAYS_INLINE
611 static inline bool
612 _dispatch_queue_is_thread_bound(dispatch_queue_t dq)
613 {
614 return _dispatch_queue_atomic_flags(dq) & DQF_THREAD_BOUND;
615 }
616
617 DISPATCH_ALWAYS_INLINE
618 static inline bool
619 _dispatch_queue_cannot_trysync(dispatch_queue_t dq)
620 {
621 return _dispatch_queue_atomic_flags(dq) & DQF_CANNOT_TRYSYNC;
622 }
623
624 DISPATCH_ALWAYS_INLINE
625 static inline bool
626 _dispatch_queue_label_needs_free(dispatch_queue_t dq)
627 {
628 return _dispatch_queue_atomic_flags(dq) & DQF_LABEL_NEEDS_FREE;
629 }
630
631 DISPATCH_ALWAYS_INLINE
632 static inline dispatch_invoke_flags_t
633 _dispatch_queue_autorelease_frequency(dispatch_queue_t dq)
634 {
635 const unsigned long factor =
636 DISPATCH_INVOKE_AUTORELEASE_ALWAYS / DQF_AUTORELEASE_ALWAYS;
637 dispatch_static_assert(factor > 0);
638
639 dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dq);
640
641 qaf &= _DQF_AUTORELEASE_MASK;
642 return (dispatch_invoke_flags_t)qaf * factor;
643 }
644
645 DISPATCH_ALWAYS_INLINE
646 static inline dispatch_invoke_flags_t
647 _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq,
648 dispatch_invoke_flags_t flags)
649 {
650 dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dq);
651
652 if (qaf) {
653 flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK;
654 flags |= qaf;
655 }
656 return flags;
657 }
658
659 DISPATCH_ALWAYS_INLINE
660 static inline bool
661 _dispatch_queue_has_immutable_target(dispatch_queue_t dq)
662 {
663 if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) {
664 return false;
665 }
666 return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE;
667 }
668
669 #endif // DISPATCH_PURE_C
670 #ifndef __cplusplus
671
672 DISPATCH_ALWAYS_INLINE
673 static inline uint32_t
674 _dq_state_suspend_cnt(uint64_t dq_state)
675 {
676 return (uint32_t)(dq_state / DISPATCH_QUEUE_SUSPEND_INTERVAL);
677 }
678
679 DISPATCH_ALWAYS_INLINE
680 static inline bool
681 _dq_state_has_side_suspend_cnt(uint64_t dq_state)
682 {
683 return dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT;
684 }
685
686 DISPATCH_ALWAYS_INLINE
687 static inline uint32_t
688 _dq_state_extract_width_bits(uint64_t dq_state)
689 {
690 dq_state &= DISPATCH_QUEUE_WIDTH_MASK;
691 return (uint32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT);
692 }
693
694 DISPATCH_ALWAYS_INLINE
695 static inline uint32_t
696 _dq_state_available_width(uint64_t dq_state)
697 {
698 uint32_t full = DISPATCH_QUEUE_WIDTH_FULL;
699 if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) {
700 return full - _dq_state_extract_width_bits(dq_state);
701 }
702 return 0;
703 }
704
705 DISPATCH_ALWAYS_INLINE
706 static inline uint32_t
707 _dq_state_used_width(uint64_t dq_state, uint16_t dq_width)
708 {
709 uint32_t full = DISPATCH_QUEUE_WIDTH_FULL;
710 uint32_t width = _dq_state_extract_width_bits(dq_state);
711
712 if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) {
713 // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width
714 // is pre-reservation that we want to ignore
715 return width - (full - dq_width) - (dq_width - 1);
716 }
717 return width - (full - dq_width);
718 }
719
720 DISPATCH_ALWAYS_INLINE
721 static inline bool
722 _dq_state_is_suspended(uint64_t dq_state)
723 {
724 return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION;
725 }
726 #define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state)
727
728 DISPATCH_ALWAYS_INLINE
729 static inline bool
730 _dq_state_is_inactive(uint64_t dq_state)
731 {
732 return dq_state & DISPATCH_QUEUE_INACTIVE;
733 }
734
735 DISPATCH_ALWAYS_INLINE
736 static inline bool
737 _dq_state_needs_activation(uint64_t dq_state)
738 {
739 return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION;
740 }
741
742 DISPATCH_ALWAYS_INLINE
743 static inline bool
744 _dq_state_is_in_barrier(uint64_t dq_state)
745 {
746 return dq_state & DISPATCH_QUEUE_IN_BARRIER;
747 }
748
749 DISPATCH_ALWAYS_INLINE
750 static inline bool
751 _dq_state_has_available_width(uint64_t dq_state)
752 {
753 return !(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT);
754 }
755
756 DISPATCH_ALWAYS_INLINE
757 static inline bool
758 _dq_state_has_pending_barrier(uint64_t dq_state)
759 {
760 return dq_state & DISPATCH_QUEUE_PENDING_BARRIER;
761 }
762
763 DISPATCH_ALWAYS_INLINE
764 static inline bool
765 _dq_state_is_dirty(uint64_t dq_state)
766 {
767 return dq_state & DISPATCH_QUEUE_DIRTY;
768 }
769
770 DISPATCH_ALWAYS_INLINE
771 static inline bool
772 _dq_state_is_enqueued(uint64_t dq_state)
773 {
774 return dq_state & DISPATCH_QUEUE_ENQUEUED;
775 }
776
777 DISPATCH_ALWAYS_INLINE
778 static inline bool
779 _dq_state_has_override(uint64_t dq_state)
780 {
781 return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE;
782 }
783
784 DISPATCH_ALWAYS_INLINE
785 static inline dispatch_lock_owner
786 _dq_state_drain_owner(uint64_t dq_state)
787 {
788 return _dispatch_lock_owner((dispatch_lock)dq_state);
789 }
790 #define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
791 _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
792
793 DISPATCH_ALWAYS_INLINE
794 static inline bool
795 _dq_state_drain_pended(uint64_t dq_state)
796 {
797 return (dq_state & DISPATCH_QUEUE_DRAIN_PENDED);
798 }
799
800 DISPATCH_ALWAYS_INLINE
801 static inline bool
802 _dq_state_drain_locked_by(uint64_t dq_state, uint32_t owner)
803 {
804 if (_dq_state_drain_pended(dq_state)) {
805 return false;
806 }
807 return _dq_state_drain_owner(dq_state) == owner;
808 }
809
810 DISPATCH_ALWAYS_INLINE
811 static inline bool
812 _dq_state_drain_locked(uint64_t dq_state)
813 {
814 return (dq_state & DISPATCH_QUEUE_DRAIN_OWNER_MASK) != 0;
815 }
816
817 DISPATCH_ALWAYS_INLINE
818 static inline bool
819 _dq_state_has_waiters(uint64_t dq_state)
820 {
821 return _dispatch_lock_has_waiters((dispatch_lock)dq_state);
822 }
823
824 DISPATCH_ALWAYS_INLINE
825 static inline bool
826 _dq_state_is_sync_runnable(uint64_t dq_state)
827 {
828 return dq_state < DISPATCH_QUEUE_IN_BARRIER;
829 }
830
831 DISPATCH_ALWAYS_INLINE
832 static inline bool
833 _dq_state_is_runnable(uint64_t dq_state)
834 {
835 return dq_state < DISPATCH_QUEUE_WIDTH_FULL_BIT;
836 }
837
838 DISPATCH_ALWAYS_INLINE
839 static inline bool
840 _dq_state_should_wakeup(uint64_t dq_state)
841 {
842 return _dq_state_is_runnable(dq_state) &&
843 !_dq_state_is_enqueued(dq_state) &&
844 !_dq_state_drain_locked(dq_state);
845 }
846
847 #endif // __cplusplus
848 #pragma mark -
849 #pragma mark dispatch_queue_t state machine
850 #ifndef __cplusplus
851
852 static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu,
853 pthread_priority_t pp);
854 static inline bool _dispatch_queue_need_override_retain(
855 dispatch_queue_class_t dqu, pthread_priority_t pp);
856 static inline dispatch_priority_t _dispatch_queue_reset_override_priority(
857 dispatch_queue_class_t dqu, bool qp_is_floor);
858 static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
859 dispatch_priority_t new_op);
860 static inline pthread_priority_t _dispatch_get_defaultpriority(void);
861 static inline void _dispatch_set_defaultpriority_override(void);
862 static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp);
863 static inline pthread_priority_t _dispatch_get_priority(void);
864 static inline pthread_priority_t _dispatch_set_defaultpriority(
865 pthread_priority_t pp, pthread_priority_t *new_pp);
866
867 DISPATCH_ALWAYS_INLINE
868 static inline void
869 _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq)
870 {
871 if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) {
872 // Arguments for and against this assert are within 6705399
873 DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object");
874 }
875 os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed);
876 }
877
878 #endif
879 #if DISPATCH_PURE_C
880
881 // Note to later developers: ensure that any initialization changes are
882 // made for statically allocated queues (i.e. _dispatch_main_q).
883 static inline void
884 _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf,
885 uint16_t width, bool inactive)
886 {
887 uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
888
889 if (inactive) {
890 dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
891 dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume
892 }
893 dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
894 dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT;
895 os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
896 dq->dq_state = dq_state;
897 dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
898 dq->dq_serialnum =
899 os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
900 }
901
902 /* Used by:
903 * - _dispatch_queue_set_target_queue
904 * - changing dispatch source handlers
905 *
906 * Tries to prevent concurrent wakeup of an inactive queue by suspending it.
907 */
908 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
909 static inline bool
910 _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq)
911 {
912 uint64_t dq_state, value;
913
914 (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
915 if (!fastpath(_dq_state_is_inactive(dq_state))) {
916 os_atomic_rmw_loop_give_up(return false);
917 }
918 value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL;
919 });
920 if (slowpath(!_dq_state_is_suspended(dq_state)) ||
921 slowpath(_dq_state_has_side_suspend_cnt(dq_state))) {
922 // Crashing here means that 128+ dispatch_suspend() calls have been
923 // made on an inactive object and then dispatch_set_target_queue() or
924 // dispatch_set_*_handler() has been called.
925 //
926 // We don't want to handle the side suspend count in a codepath that
927 // needs to be fast.
928 DISPATCH_CLIENT_CRASH(dq, "Too many calls to dispatch_suspend() "
929 "prior to calling dispatch_set_target_queue() "
930 "or dispatch_set_*_handler()");
931 }
932 return true;
933 }
934
935 /* Must be used by any caller meaning to do a speculative wakeup when the caller
936 * was preventing other wakeups (for example dispatch_resume() or a drainer not
937 * doing a drain_try_unlock() and not observing DIRTY)
938 *
939 * In that case this call loads DIRTY with an acquire barrier so that when
940 * other threads have made changes (such as dispatch_source_cancel()) the
941 * caller can take these state machine changes into account in its decision to
942 * wake up the object.
943 */
944 DISPATCH_ALWAYS_INLINE
945 static inline void
946 _dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state,
947 dispatch_wakeup_flags_t flags)
948 {
949 if (_dq_state_is_runnable(dq_state) &&
950 !_dq_state_drain_locked(dq_state) &&
951 (!_dq_state_is_enqueued(dq_state) ||
952 (flags & DISPATCH_WAKEUP_WAITER_HANDOFF))) {
953 if (slowpath(_dq_state_is_dirty(dq_state))) {
954 // <rdar://problem/14637483>
955 // seq_cst wrt state changes that were flushed and not acted upon
956 os_atomic_thread_fence(acquire);
957 }
958 return dx_wakeup(dq, 0, flags);
959 }
960 if (flags & DISPATCH_WAKEUP_CONSUME) {
961 return _dispatch_release_tailcall(dq);
962 }
963 }
964
965 /* Used by:
966 * - _dispatch_queue_class_invoke (normal path)
967 * - _dispatch_queue_override_invoke (stealer)
968 *
969 * Initial state must be { sc:0, ib:0, qf:0, dl:0 }
970 * Final state forces { dl:self, qf:1, d: 0 }
971 * ib:1 is forced when the width acquired is equivalent to the barrier width
972 */
973 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
974 static inline uint64_t
975 _dispatch_queue_drain_try_lock(dispatch_queue_t dq,
976 dispatch_invoke_flags_t flags, uint64_t *dq_state)
977 {
978 uint64_t pending_barrier_width =
979 (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
980 uint64_t xor_owner_and_set_full_width =
981 _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT;
982 uint64_t clear_enqueued_bit, old_state, new_state;
983
984 if (flags & DISPATCH_INVOKE_STEALING) {
985 clear_enqueued_bit = 0;
986 } else {
987 clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED;
988 }
989
990 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
991 new_state = old_state;
992 new_state ^= clear_enqueued_bit;
993 if (likely(_dq_state_is_runnable(old_state) &&
994 !_dq_state_drain_locked(old_state))) {
995 //
996 // Only keep the HAS_WAITER bit (and ENQUEUED if stealing).
997 // In particular acquiring the drain lock clears the DIRTY bit
998 //
999 new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
1000 //
1001 // For the NOWAITERS_BIT case, the thread identity
1002 // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above,
1003 // so the xor below flips the NOWAITERS_BIT to 0 as expected.
1004 //
1005 // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in
1006 // the thread identity, and the xor leaves the bit alone.
1007 //
1008 new_state ^= xor_owner_and_set_full_width;
1009 if (_dq_state_has_pending_barrier(old_state) ||
1010 old_state + pending_barrier_width <
1011 DISPATCH_QUEUE_WIDTH_FULL_BIT) {
1012 new_state |= DISPATCH_QUEUE_IN_BARRIER;
1013 }
1014 } else if (!clear_enqueued_bit) {
1015 os_atomic_rmw_loop_give_up(break);
1016 }
1017 });
1018
1019 if (dq_state) *dq_state = new_state;
1020 if (likely(_dq_state_is_runnable(old_state) &&
1021 !_dq_state_drain_locked(old_state))) {
1022 new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT;
1023 old_state &= DISPATCH_QUEUE_WIDTH_MASK;
1024 return new_state - old_state;
1025 }
1026 return 0;
1027 }
1028
1029 /* Used by _dispatch_barrier_{try,}sync
1030 *
1031 * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
1032 * simple cmpxchg which is significantly faster on Intel, and makes a
1033 * significant difference on the uncontended codepath.
1034 *
1035 * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
1036 *
1037 * Initial state must be `completely idle`
1038 * Final state forces { ib:1, qf:1, w:0 }
1039 */
1040 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1041 static inline bool
1042 _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq)
1043 {
1044 uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
1045 value |= _dispatch_tid_self();
1046
1047 return os_atomic_cmpxchg2o(dq, dq_state,
1048 DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire);
1049 }
1050
1051 /* Used by _dispatch_sync for root queues and some drain codepaths
1052 *
1053 * Root queues have no strict orderning and dispatch_sync() always goes through.
1054 * Drain is the sole setter of `dl` hence can use this non failing version of
1055 * _dispatch_queue_try_acquire_sync().
1056 *
1057 * Final state: { w += 1 }
1058 */
1059 DISPATCH_ALWAYS_INLINE
1060 static inline void
1061 _dispatch_queue_reserve_sync_width(dispatch_queue_t dq)
1062 {
1063 (void)os_atomic_add2o(dq, dq_state,
1064 DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
1065 }
1066
1067 /* Used by _dispatch_sync on non-serial queues
1068 *
1069 * Initial state must be { sc:0, ib:0, pb:0, d:0 }
1070 * Final state: { w += 1 }
1071 */
1072 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1073 static inline bool
1074 _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq)
1075 {
1076 uint64_t dq_state, value;
1077
1078 return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
1079 if (!fastpath(_dq_state_is_sync_runnable(dq_state)) ||
1080 slowpath(_dq_state_is_dirty(dq_state)) ||
1081 slowpath(_dq_state_has_pending_barrier(dq_state))) {
1082 os_atomic_rmw_loop_give_up(return false);
1083 }
1084 value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
1085 });
1086 }
1087
1088 /* Used by _dispatch_apply_redirect
1089 *
1090 * Try to acquire at most da_width and returns what could be acquired,
1091 * possibly 0
1092 */
1093 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1094 static inline uint32_t
1095 _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width)
1096 {
1097 uint64_t dq_state, value;
1098 uint32_t width;
1099
1100 (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
1101 width = _dq_state_available_width(dq_state);
1102 if (!fastpath(width)) {
1103 os_atomic_rmw_loop_give_up(return 0);
1104 }
1105 if (width > da_width) {
1106 width = da_width;
1107 }
1108 value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL;
1109 });
1110 return width;
1111 }
1112
1113 /* Used by _dispatch_apply_redirect
1114 *
1115 * Release width acquired by _dispatch_queue_try_acquire_width
1116 */
1117 DISPATCH_ALWAYS_INLINE
1118 static inline void
1119 _dispatch_queue_relinquish_width(dispatch_queue_t dq, uint32_t da_width)
1120 {
1121 (void)os_atomic_sub2o(dq, dq_state,
1122 da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
1123 }
1124
1125 /* Used by target-queue recursing code
1126 *
1127 * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 }
1128 * Final state: { w += 1 }
1129 */
1130 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1131 static inline bool
1132 _dispatch_queue_try_acquire_async(dispatch_queue_t dq)
1133 {
1134 uint64_t dq_state, value;
1135
1136 return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, {
1137 if (!fastpath(_dq_state_is_runnable(dq_state)) ||
1138 slowpath(_dq_state_is_dirty(dq_state)) ||
1139 slowpath(_dq_state_has_pending_barrier(dq_state))) {
1140 os_atomic_rmw_loop_give_up(return false);
1141 }
1142 value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
1143 });
1144 }
1145
1146 /* Used at the end of Drainers
1147 *
1148 * This adjusts the `owned` width when the next continuation is already known
1149 * to account for its barrierness.
1150 */
1151 DISPATCH_ALWAYS_INLINE
1152 static inline uint64_t
1153 _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned,
1154 struct dispatch_object_s *next_dc)
1155 {
1156 uint64_t reservation;
1157
1158 if (slowpath(dq->dq_width > 1)) {
1159 if (next_dc && _dispatch_object_is_barrier(next_dc)) {
1160 reservation = DISPATCH_QUEUE_PENDING_BARRIER;
1161 reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
1162 owned -= reservation;
1163 }
1164 }
1165 return owned;
1166 }
1167
1168 /* Used at the end of Drainers
1169 *
1170 * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended).
1171 * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used
1172 * as a signal to renew the drain lock instead of releasing it.
1173 *
1174 * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned`
1175 */
1176 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1177 static inline bool
1178 _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned)
1179 {
1180 uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed);
1181 uint64_t new_state;
1182 dispatch_priority_t pp = 0, op;
1183
1184 do {
1185 if (unlikely(_dq_state_is_dirty(old_state) &&
1186 !_dq_state_is_suspended(old_state))) {
1187 // just renew the drain lock with an acquire barrier, to see
1188 // what the enqueuer that set DIRTY has done.
1189 os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire);
1190 _dispatch_queue_reinstate_override_priority(dq, pp);
1191 return false;
1192 }
1193 new_state = old_state - owned;
1194 if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) ||
1195 _dq_state_is_suspended(old_state)) {
1196 // the test for the WIDTH_FULL_BIT is about narrow concurrent queues
1197 // releasing the drain lock while being at the width limit
1198 //
1199 // _non_barrier_complete() will set the DIRTY bit when going back
1200 // under the limit which will cause the try_unlock to fail
1201 new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
1202 } else {
1203 new_state &= ~DISPATCH_QUEUE_DIRTY;
1204 new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
1205 // This current owner is the only one that can clear HAS_OVERRIDE,
1206 // so accumulating reset overrides here is valid.
1207 if (unlikely(_dq_state_has_override(new_state))) {
1208 new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE;
1209 dispatch_assert(!_dispatch_queue_is_thread_bound(dq));
1210 op = _dispatch_queue_reset_override_priority(dq, false);
1211 if (op > pp) pp = op;
1212 }
1213 }
1214 } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state,
1215 old_state, new_state, &old_state, release)));
1216
1217 if (_dq_state_has_override(old_state)) {
1218 // Ensure that the root queue sees that this thread was overridden.
1219 _dispatch_set_defaultpriority_override();
1220 }
1221 return true;
1222 }
1223
1224 /* Used at the end of Drainers when the next work item is known
1225 * and that the dirty-head check isn't needed.
1226 *
1227 * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen.
1228 */
1229 DISPATCH_ALWAYS_INLINE
1230 static inline uint64_t
1231 _dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq,
1232 uint64_t owned, mach_port_t next_owner, uint64_t *orig_state)
1233 {
1234 uint64_t dq_state, value;
1235
1236 #ifdef DLOCK_NOWAITERS_BIT
1237 // The NOWAITERS_BIT state must not change through the transfer. It means
1238 // that if next_owner is 0 the bit must be flipped in the rmw_loop below,
1239 // and if next_owner is set, then the bit must be left unchanged.
1240 //
1241 // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner,
1242 // which causes the second xor to flip the bit as expected.
1243 // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to
1244 // clear it so that the second xor leaves the NOWAITERS_BIT alone.
1245 next_owner ^= DLOCK_NOWAITERS_BIT;
1246 #endif
1247 os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, {
1248 value = dq_state - owned;
1249 // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
1250 // but we want to be more efficient wrt the WAITERS_BIT
1251 value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
1252 value &= ~DISPATCH_QUEUE_DRAIN_PENDED;
1253 value &= ~DISPATCH_QUEUE_DIRTY;
1254 value ^= next_owner;
1255 });
1256
1257 if (_dq_state_has_override(dq_state)) {
1258 // Ensure that the root queue sees that this thread was overridden.
1259 _dispatch_set_defaultpriority_override();
1260 }
1261 if (orig_state) *orig_state = dq_state;
1262 return value;
1263 }
1264 #define _dispatch_queue_drain_unlock(dq, owned, orig) \
1265 _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig)
1266
1267 DISPATCH_ALWAYS_INLINE
1268 static inline void
1269 _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
1270 uint64_t to_unlock, dispatch_object_t dou)
1271 {
1272 mach_port_t th_next = 0;
1273 if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) {
1274 th_next = (mach_port_t)dou._dc->dc_data;
1275 }
1276 _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL);
1277 }
1278
1279
1280 #pragma mark -
1281 #pragma mark os_mpsc_queue
1282
1283 // type_t * {volatile,const,_Atomic,...} -> type_t *
1284 // type_t[] -> type_t *
1285 #define os_unqualified_pointer_type(expr) \
1286 typeof(typeof(*(expr)) *)
1287
1288 #define os_mpsc_node_type(q, _ns) \
1289 os_unqualified_pointer_type((q)->_ns##_head)
1290
1291 //
1292 // Multi Producer calls, can be used safely concurrently
1293 //
1294
1295 // Returns true when the queue was empty and the head must be set
1296 #define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \
1297 os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
1298 _tail->_o_next = NULL; \
1299 _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
1300 if (fastpath(_prev)) { \
1301 os_atomic_store2o(_prev, _o_next, _head, relaxed); \
1302 } \
1303 (_prev == NULL); \
1304 })
1305
1306 // Returns true when the queue was empty and the head must be set
1307 #define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \
1308 os_mpsc_node_type(q, _ns) _o = (o); \
1309 os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \
1310 })
1311
1312 #define os_mpsc_push_update_head(q, _ns, o) ({ \
1313 os_atomic_store2o((q), _ns##_head, o, relaxed); \
1314 })
1315
1316 //
1317 // Single Consumer calls, can NOT be used safely concurrently
1318 //
1319
1320 #define os_mpsc_get_head(q, _ns) ({ \
1321 os_mpsc_node_type(q, _ns) _head; \
1322 _dispatch_wait_until(_head = (q)->_ns##_head); \
1323 _head; \
1324 })
1325
1326 #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \
1327 typeof(q) _q = (q); \
1328 os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \
1329 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1330 /* 22708742: set tail to NULL with release, so that NULL write */ \
1331 /* to head above doesn't clobber head from concurrent enqueuer */ \
1332 if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \
1333 _dispatch_wait_until(_n = fastpath(_head->_o_next)); \
1334 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1335 } \
1336 _n; \
1337 })
1338
1339 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \
1340 typeof(q) _q = (q); \
1341 os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
1342 if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \
1343 _dispatch_wait_until(_n = _q->_ns##_head); \
1344 _head->_o_next = _n; \
1345 } \
1346 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1347 })
1348
1349 #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \
1350 typeof(q) _q = (q); \
1351 os_mpsc_node_type(_q, _ns) _head; \
1352 _dispatch_wait_until(_head = _q->_ns##_head); \
1353 os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
1354 /* 22708742: set tail to NULL with release, so that NULL write */ \
1355 /* to head above doesn't clobber head from concurrent enqueuer */ \
1356 *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \
1357 _head; \
1358 })
1359
1360 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
1361 os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
1362 if (_head != (tail)) { \
1363 _dispatch_wait_until(_n = _head->_o_next); \
1364 }; \
1365 _n; })
1366
1367 #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \
1368 typeof(q) _q = (q); \
1369 os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
1370 _tail->_o_next = NULL; \
1371 if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \
1372 _dispatch_wait_until(_n = _q->_ns##_head); \
1373 _tail->_o_next = _n; \
1374 } \
1375 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1376 })
1377
1378 #pragma mark -
1379 #pragma mark dispatch_queue_t tq lock
1380
1381 DISPATCH_ALWAYS_INLINE
1382 static inline bool
1383 _dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp)
1384 {
1385 dispatch_lock_owner owner;
1386 if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) {
1387 return true;
1388 }
1389 _dispatch_wqthread_override_start_check_owner(owner, pp,
1390 &dq->dq_sidelock.dul_lock);
1391 return false;
1392 }
1393
1394 DISPATCH_ALWAYS_INLINE
1395 static inline void
1396 _dispatch_queue_sidelock_lock(dispatch_queue_t dq)
1397 {
1398 return _dispatch_unfair_lock_lock(&dq->dq_sidelock);
1399 }
1400
1401 DISPATCH_ALWAYS_INLINE
1402 static inline bool
1403 _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq)
1404 {
1405 if (_dispatch_unfair_lock_tryunlock(&dq->dq_sidelock)) {
1406 return true;
1407 }
1408 // Ensure that the root queue sees that this thread was overridden.
1409 _dispatch_set_defaultpriority_override();
1410 return false;
1411 }
1412
1413 DISPATCH_ALWAYS_INLINE
1414 static inline void
1415 _dispatch_queue_sidelock_unlock(dispatch_queue_t dq)
1416 {
1417 if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) {
1418 // Ensure that the root queue sees that this thread was overridden.
1419 _dispatch_set_defaultpriority_override();
1420 }
1421 }
1422
1423 #pragma mark -
1424 #pragma mark dispatch_queue_t misc
1425
1426 DISPATCH_ALWAYS_INLINE
1427 static inline dispatch_queue_t
1428 _dispatch_queue_get_current(void)
1429 {
1430 return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
1431 }
1432
1433 DISPATCH_ALWAYS_INLINE
1434 static inline void
1435 _dispatch_queue_set_current(dispatch_queue_t dq)
1436 {
1437 _dispatch_thread_setspecific(dispatch_queue_key, dq);
1438 }
1439
1440 DISPATCH_ALWAYS_INLINE
1441 static inline struct dispatch_object_s*
1442 _dispatch_queue_head(dispatch_queue_t dq)
1443 {
1444 return os_mpsc_get_head(dq, dq_items);
1445 }
1446
1447 DISPATCH_ALWAYS_INLINE
1448 static inline struct dispatch_object_s*
1449 _dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc)
1450 {
1451 return os_mpsc_pop_head(dq, dq_items, dc, do_next);
1452 }
1453
1454 DISPATCH_ALWAYS_INLINE
1455 static inline bool
1456 _dispatch_queue_push_update_tail(dispatch_queue_t dq,
1457 struct dispatch_object_s *tail)
1458 {
1459 // if we crash here with a value less than 0x1000, then we are
1460 // at a known bug in client code. for example, see
1461 // _dispatch_queue_dispose or _dispatch_atfork_child
1462 return os_mpsc_push_update_tail(dq, dq_items, tail, do_next);
1463 }
1464
1465 DISPATCH_ALWAYS_INLINE
1466 static inline bool
1467 _dispatch_queue_push_update_tail_list(dispatch_queue_t dq,
1468 struct dispatch_object_s *head, struct dispatch_object_s *tail)
1469 {
1470 // if we crash here with a value less than 0x1000, then we are
1471 // at a known bug in client code. for example, see
1472 // _dispatch_queue_dispose or _dispatch_atfork_child
1473 return os_mpsc_push_update_tail_list(dq, dq_items, head, tail, do_next);
1474 }
1475
1476 DISPATCH_ALWAYS_INLINE
1477 static inline void
1478 _dispatch_queue_push_update_head(dispatch_queue_t dq,
1479 struct dispatch_object_s *head, bool retained)
1480 {
1481 if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
1482 dispatch_assert(!retained);
1483 // Lie about "retained" here, it generates better assembly in this
1484 // hotpath, and _dispatch_root_queue_wakeup knows to ignore this
1485 // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH.
1486 //
1487 // We need to bypass the retain below because pthread root queues
1488 // are not global and retaining them would be wrong.
1489 //
1490 // We should eventually have a typeflag for "POOL" kind of root queues.
1491 retained = true;
1492 }
1493 // The queue must be retained before dq_items_head is written in order
1494 // to ensure that the reference is still valid when _dispatch_queue_wakeup
1495 // is called. Otherwise, if preempted between the assignment to
1496 // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the
1497 // queue may release the last reference to the queue when invoked by
1498 // _dispatch_queue_drain. <rdar://problem/6932776>
1499 if (!retained) _dispatch_retain(dq);
1500 os_mpsc_push_update_head(dq, dq_items, head);
1501 }
1502
1503 DISPATCH_ALWAYS_INLINE
1504 static inline void
1505 _dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
1506 dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
1507 {
1508 struct dispatch_object_s *head = _head._do, *tail = _tail._do;
1509 bool override = _dispatch_queue_need_override_retain(dq, pp);
1510 dispatch_queue_flags_t flags;
1511 if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
1512 _dispatch_queue_push_update_head(dq, head, override);
1513 if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) {
1514 return _dispatch_queue_push_list_slow(dq, n);
1515 }
1516 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
1517 } else if (override) {
1518 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
1519 } else {
1520 return;
1521 }
1522 dx_wakeup(dq, pp, flags);
1523 }
1524
1525 DISPATCH_ALWAYS_INLINE
1526 static inline void
1527 _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
1528 pthread_priority_t pp, dispatch_wakeup_flags_t flags)
1529 {
1530 struct dispatch_object_s *tail = _tail._do;
1531 bool override = _dispatch_queue_need_override(dq, pp);
1532 if (flags & DISPATCH_WAKEUP_SLOW_WAITER) {
1533 // when SLOW_WAITER is set, we borrow the reference of the caller
1534 if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
1535 _dispatch_queue_push_update_head(dq, tail, true);
1536 flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH;
1537 } else if (override) {
1538 flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING;
1539 } else {
1540 flags = DISPATCH_WAKEUP_SLOW_WAITER;
1541 }
1542 } else {
1543 if (override) _dispatch_retain(dq);
1544 if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
1545 _dispatch_queue_push_update_head(dq, tail, override);
1546 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
1547 } else if (override) {
1548 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
1549 } else {
1550 return;
1551 }
1552 }
1553 return dx_wakeup(dq, pp, flags);
1554 }
1555
1556 struct _dispatch_identity_s {
1557 pthread_priority_t old_pp;
1558 };
1559
1560 DISPATCH_ALWAYS_INLINE
1561 static inline void
1562 _dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di,
1563 pthread_priority_t pp)
1564 {
1565 // assumed_rq was set by the caller, we need to fake the priorities
1566 dispatch_queue_t assumed_rq = _dispatch_queue_get_current();
1567
1568 dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
1569
1570 di->old_pp = _dispatch_get_defaultpriority();
1571
1572 if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
1573 if (!pp) {
1574 pp = _dispatch_get_priority();
1575 // _dispatch_root_queue_drain_deferred_item() may turn a manager
1576 // thread into a regular root queue, and we must never try to
1577 // restore the manager flag once we became a regular work queue
1578 // thread.
1579 pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
1580 }
1581 if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >
1582 (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
1583 _dispatch_wqthread_override_start(_dispatch_tid_self(), pp);
1584 // Ensure that the root queue sees that this thread was overridden.
1585 _dispatch_set_defaultpriority_override();
1586 }
1587 }
1588 _dispatch_reset_defaultpriority(assumed_rq->dq_priority);
1589 }
1590
1591 DISPATCH_ALWAYS_INLINE
1592 static inline void
1593 _dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di)
1594 {
1595 _dispatch_reset_defaultpriority(di->old_pp);
1596 }
1597
1598 typedef dispatch_queue_t
1599 _dispatch_queue_class_invoke_handler_t(dispatch_object_t,
1600 dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **);
1601
1602 DISPATCH_ALWAYS_INLINE
1603 static inline void
1604 _dispatch_queue_class_invoke(dispatch_object_t dou,
1605 dispatch_invoke_flags_t flags,
1606 _dispatch_queue_class_invoke_handler_t invoke)
1607 {
1608 dispatch_queue_t dq = dou._dq;
1609 struct dispatch_object_s *dc = NULL;
1610 dispatch_queue_t tq = NULL;
1611 uint64_t dq_state, to_unlock = 0;
1612 bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING);
1613 bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING);
1614
1615 // When called from a plain _dispatch_queue_drain:
1616 // overriding = false
1617 // owning = true
1618 //
1619 // When called from an override continuation:
1620 // overriding = true
1621 // owning depends on whether the override embedded the queue or steals
1622 DISPATCH_COMPILER_CAN_ASSUME(owning || overriding);
1623
1624 if (owning) {
1625 dq->do_next = DISPATCH_OBJECT_LISTLESS;
1626 }
1627 to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state);
1628 if (likely(to_unlock)) {
1629 struct _dispatch_identity_s di;
1630 pthread_priority_t old_dp;
1631
1632 drain_pending_barrier:
1633 if (overriding) {
1634 _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
1635 _dispatch_tid_self(), _dispatch_get_defaultpriority());
1636 _dispatch_root_queue_identity_assume(&di, 0);
1637 }
1638
1639 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
1640 pthread_priority_t op, dp;
1641
1642 old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp);
1643 op = dq->dq_override;
1644 if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
1645 _dispatch_wqthread_override_start(_dispatch_tid_self(), op);
1646 // Ensure that the root queue sees that this thread was overridden.
1647 _dispatch_set_defaultpriority_override();
1648 }
1649 }
1650
1651 flags = _dispatch_queue_merge_autorelease_frequency(dq, flags);
1652 attempt_running_slow_head:
1653 tq = invoke(dq, flags, &to_unlock, &dc);
1654 if (slowpath(tq)) {
1655 // Either dc is set, which is a deferred invoke case
1656 //
1657 // or only tq is and it means a reenqueue is required, because of:
1658 // a retarget, a suspension, or a width change.
1659 //
1660 // In both cases, we want to bypass the check for DIRTY.
1661 // That may cause us to leave DIRTY in place but all drain lock
1662 // acquirers clear it
1663 } else {
1664 if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) {
1665 goto attempt_running_slow_head;
1666 }
1667 to_unlock = 0;
1668 }
1669 if (overriding) {
1670 _dispatch_root_queue_identity_restore(&di);
1671 }
1672 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
1673 _dispatch_reset_defaultpriority(old_dp);
1674 }
1675 } else if (overriding) {
1676 uint32_t owner = _dq_state_drain_owner(dq_state);
1677 pthread_priority_t p = dq->dq_override;
1678 if (owner && p) {
1679 _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
1680 owner, p);
1681 _dispatch_wqthread_override_start_check_owner(owner, p,
1682 &dq->dq_state_lock);
1683 }
1684 }
1685
1686 if (owning) {
1687 _dispatch_introspection_queue_item_complete(dq);
1688 }
1689
1690 if (tq && dc) {
1691 return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc);
1692 }
1693
1694 if (tq) {
1695 bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current());
1696 uint64_t old_state, new_state;
1697
1698 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
1699 new_state = old_state - to_unlock;
1700 if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) &&
1701 _dq_state_has_pending_barrier(new_state)) {
1702 new_state += DISPATCH_QUEUE_IN_BARRIER;
1703 new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
1704 new_state -= DISPATCH_QUEUE_PENDING_BARRIER;
1705 new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
1706 } else {
1707 new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
1708 if (_dq_state_should_wakeup(new_state)) {
1709 // drain was not interupted for suspension
1710 // we will reenqueue right away, just put ENQUEUED back
1711 new_state |= DISPATCH_QUEUE_ENQUEUED;
1712 new_state |= DISPATCH_QUEUE_DIRTY;
1713 }
1714 }
1715 });
1716 if (_dq_state_is_in_barrier(new_state)) {
1717 // we did a "full width upgrade" and just added IN_BARRIER
1718 // so adjust what we own and drain again
1719 to_unlock &= DISPATCH_QUEUE_ENQUEUED;
1720 to_unlock += DISPATCH_QUEUE_IN_BARRIER;
1721 to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
1722 goto drain_pending_barrier;
1723 }
1724 if (_dq_state_has_override(old_state)) {
1725 // Ensure that the root queue sees that this thread was overridden.
1726 _dispatch_set_defaultpriority_override();
1727 }
1728
1729 if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
1730 return _dispatch_queue_push(tq, dq, 0);
1731 }
1732 }
1733
1734 return _dispatch_release_tailcall(dq);
1735 }
1736
1737 DISPATCH_ALWAYS_INLINE
1738 static inline bool
1739 _dispatch_queue_class_probe(dispatch_queue_class_t dqu)
1740 {
1741 struct dispatch_object_s *tail;
1742 // seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
1743 // seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
1744 tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered);
1745 return slowpath(tail != NULL);
1746 }
1747
1748 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1749 static inline bool
1750 _dispatch_is_in_root_queues_array(dispatch_queue_t dq)
1751 {
1752 return (dq >= _dispatch_root_queues) &&
1753 (dq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT);
1754 }
1755
1756 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1757 static inline dispatch_queue_t
1758 _dispatch_get_root_queue(qos_class_t priority, bool overcommit)
1759 {
1760 if (overcommit) switch (priority) {
1761 case _DISPATCH_QOS_CLASS_MAINTENANCE:
1762 return &_dispatch_root_queues[
1763 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
1764 case _DISPATCH_QOS_CLASS_BACKGROUND:
1765 return &_dispatch_root_queues[
1766 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
1767 case _DISPATCH_QOS_CLASS_UTILITY:
1768 return &_dispatch_root_queues[
1769 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
1770 case _DISPATCH_QOS_CLASS_DEFAULT:
1771 return &_dispatch_root_queues[
1772 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
1773 case _DISPATCH_QOS_CLASS_USER_INITIATED:
1774 return &_dispatch_root_queues[
1775 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
1776 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
1777 return &_dispatch_root_queues[
1778 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
1779 } else switch (priority) {
1780 case _DISPATCH_QOS_CLASS_MAINTENANCE:
1781 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
1782 case _DISPATCH_QOS_CLASS_BACKGROUND:
1783 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
1784 case _DISPATCH_QOS_CLASS_UTILITY:
1785 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
1786 case _DISPATCH_QOS_CLASS_DEFAULT:
1787 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
1788 case _DISPATCH_QOS_CLASS_USER_INITIATED:
1789 return &_dispatch_root_queues[
1790 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
1791 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
1792 return &_dispatch_root_queues[
1793 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
1794 }
1795 return NULL;
1796 }
1797
1798 #if HAVE_PTHREAD_WORKQUEUE_QOS
1799 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1800 static inline dispatch_queue_t
1801 _dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit)
1802 {
1803 uint32_t idx;
1804
1805 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
1806 idx = (uint32_t)__builtin_ffs((int)pp);
1807 if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
1808 .dq_priority)) {
1809 // If kernel doesn't support maintenance, bottom bit is background.
1810 // Shift to our idea of where background bit is.
1811 idx++;
1812 }
1813 // ffs starts at 1, and account for the QOS_CLASS_SHIFT
1814 // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than
1815 // DISPATCH_QOS_COUNT
1816 idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1);
1817 if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) {
1818 DISPATCH_CLIENT_CRASH(pp, "Corrupted priority");
1819 }
1820 return &_dispatch_root_queues[2 * idx + overcommit];
1821 }
1822 #endif
1823
1824 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1825 static inline dispatch_queue_t
1826 _dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit)
1827 {
1828 bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
1829 // root queues in _dispatch_root_queues are not overcommit for even indices
1830 // and overcommit for odd ones, so fixing overcommit is either returning
1831 // the same queue, or picking its neighbour in _dispatch_root_queues
1832 if (overcommit && !rq_overcommit) {
1833 return rq + 1;
1834 }
1835 if (!overcommit && rq_overcommit) {
1836 return rq - 1;
1837 }
1838 return rq;
1839 }
1840
1841 DISPATCH_ALWAYS_INLINE
1842 static inline void
1843 _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
1844 {
1845 // Tag thread-bound queues with the owning thread
1846 dispatch_assert(_dispatch_queue_is_thread_bound(dq));
1847 mach_port_t old_owner, self = _dispatch_tid_self();
1848 uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed);
1849 if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) {
1850 DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice");
1851 }
1852 }
1853
1854 DISPATCH_ALWAYS_INLINE
1855 static inline void
1856 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
1857 {
1858 uint64_t dq_state, value;
1859
1860 dispatch_assert(_dispatch_queue_is_thread_bound(dq));
1861 os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
1862 value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state);
1863 });
1864 }
1865
1866 DISPATCH_ALWAYS_INLINE
1867 static inline dispatch_pthread_root_queue_observer_hooks_t
1868 _dispatch_get_pthread_root_queue_observer_hooks(void)
1869 {
1870 return _dispatch_thread_getspecific(
1871 dispatch_pthread_root_queue_observer_hooks_key);
1872 }
1873
1874 DISPATCH_ALWAYS_INLINE
1875 static inline void
1876 _dispatch_set_pthread_root_queue_observer_hooks(
1877 dispatch_pthread_root_queue_observer_hooks_t observer_hooks)
1878 {
1879 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key,
1880 observer_hooks);
1881 }
1882
1883 #pragma mark -
1884 #pragma mark dispatch_priority
1885
1886 DISPATCH_ALWAYS_INLINE
1887 static inline pthread_priority_t
1888 _dispatch_get_defaultpriority(void)
1889 {
1890 #if HAVE_PTHREAD_WORKQUEUE_QOS
1891 pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific(
1892 dispatch_defaultpriority_key);
1893 return pp;
1894 #else
1895 return 0;
1896 #endif
1897 }
1898
1899 DISPATCH_ALWAYS_INLINE
1900 static inline void
1901 _dispatch_reset_defaultpriority(pthread_priority_t pp)
1902 {
1903 #if HAVE_PTHREAD_WORKQUEUE_QOS
1904 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1905 // If an inner-loop or'd in the override flag to the per-thread priority,
1906 // it needs to be propagated up the chain.
1907 pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
1908 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1909 #else
1910 (void)pp;
1911 #endif
1912 }
1913
1914 DISPATCH_ALWAYS_INLINE
1915 static inline void
1916 _dispatch_set_defaultpriority_override(void)
1917 {
1918 #if HAVE_PTHREAD_WORKQUEUE_QOS
1919 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1920 pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG;
1921
1922 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1923 #endif
1924 }
1925
1926 DISPATCH_ALWAYS_INLINE
1927 static inline bool
1928 _dispatch_reset_defaultpriority_override(void)
1929 {
1930 #if HAVE_PTHREAD_WORKQUEUE_QOS
1931 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1932 pthread_priority_t pp = old_pp &
1933 ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
1934
1935 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1936 return unlikely(pp != old_pp);
1937 #endif
1938 return false;
1939 }
1940
1941 DISPATCH_ALWAYS_INLINE
1942 static inline void
1943 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
1944 dispatch_queue_t tq)
1945 {
1946 #if HAVE_PTHREAD_WORKQUEUE_QOS
1947 const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
1948 const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
1949 const dispatch_priority_t defaultqueue_flag =
1950 _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
1951 dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
1952 if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) &&
1953 (tqp & rootqueue_flag)) {
1954 if (tqp & defaultqueue_flag) {
1955 dq->dq_priority = 0;
1956 } else {
1957 dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
1958 }
1959 }
1960 #else
1961 (void)dq; (void)tq;
1962 #endif
1963 }
1964
1965 DISPATCH_ALWAYS_INLINE
1966 static inline pthread_priority_t
1967 _dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp)
1968 {
1969 #if HAVE_PTHREAD_WORKQUEUE_QOS
1970 const pthread_priority_t default_priority_preserved_flags =
1971 _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
1972 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1973 if (old_pp) {
1974 pthread_priority_t flags, defaultqueue, basepri;
1975 flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
1976 defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
1977 basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK);
1978 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
1979 if (!pp) {
1980 flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
1981 pp = basepri;
1982 } else if (pp < basepri && !defaultqueue) { // rdar://16349734
1983 pp = basepri;
1984 }
1985 pp |= flags | (old_pp & default_priority_preserved_flags);
1986 }
1987 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1988 if (new_pp) *new_pp = pp;
1989 return old_pp;
1990 #else
1991 (void)pp; (void)new_pp;
1992 return 0;
1993 #endif
1994 }
1995
1996 DISPATCH_ALWAYS_INLINE
1997 static inline pthread_priority_t
1998 _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags)
1999 {
2000 #if HAVE_PTHREAD_WORKQUEUE_QOS
2001 pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
2002 bool enforce, inherited, defaultqueue;
2003 enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
2004 (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG);
2005 inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
2006 defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
2007 defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2008 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2009
2010 if (!pp) {
2011 return defaultpri;
2012 } else if (defaultqueue) { // rdar://16349734
2013 return pp;
2014 } else if (pp < defaultpri) {
2015 return defaultpri;
2016 } else if (enforce || inherited) {
2017 return pp;
2018 } else {
2019 return defaultpri;
2020 }
2021 #else
2022 (void)pp; (void)flags;
2023 return 0;
2024 #endif
2025 }
2026
2027 DISPATCH_ALWAYS_INLINE
2028 static inline pthread_priority_t
2029 _dispatch_priority_inherit_from_root_queue(pthread_priority_t pp,
2030 dispatch_queue_t rq)
2031 {
2032 #if HAVE_PTHREAD_WORKQUEUE_QOS
2033 pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK;
2034 pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
2035 pthread_priority_t defaultqueue =
2036 rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
2037
2038 if (!p || (!defaultqueue && p < rqp)) {
2039 p = rqp | defaultqueue;
2040 }
2041 return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
2042 #else
2043 (void)rq; (void)pp;
2044 return 0;
2045 #endif
2046 }
2047
2048 DISPATCH_ALWAYS_INLINE
2049 static inline pthread_priority_t
2050 _dispatch_get_priority(void)
2051 {
2052 #if HAVE_PTHREAD_WORKQUEUE_QOS
2053 pthread_priority_t pp = (uintptr_t)
2054 _dispatch_thread_getspecific(dispatch_priority_key);
2055 return pp;
2056 #else
2057 return 0;
2058 #endif
2059 }
2060
2061 #if HAVE_PTHREAD_WORKQUEUE_QOS
2062 DISPATCH_ALWAYS_INLINE
2063 static inline pthread_priority_t
2064 _dispatch_priority_compute_update(pthread_priority_t pp)
2065 {
2066 dispatch_assert(pp != DISPATCH_NO_PRIORITY);
2067 if (!_dispatch_set_qos_class_enabled) return 0;
2068 // the priority in _dispatch_get_priority() only tracks manager-ness
2069 // and overcommit, which is inherited from the current value for each update
2070 // however if the priority had the NEEDS_UNBIND flag set we need to clear it
2071 // the first chance we get
2072 //
2073 // the manager bit is invalid input, but we keep it to get meaningful
2074 // assertions in _dispatch_set_priority_and_voucher_slow()
2075 pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
2076 pthread_priority_t cur_priority = _dispatch_get_priority();
2077 pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2078 pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2079 if (unlikely(cur_priority & unbind)) {
2080 // else we always need an update if the NEEDS_UNBIND flag is set
2081 // the slowpath in _dispatch_set_priority_and_voucher_slow() will
2082 // adjust the priority further with the proper overcommitness
2083 return pp ? pp : (cur_priority & ~unbind);
2084 } else {
2085 cur_priority &= ~overcommit;
2086 }
2087 if (unlikely(pp != cur_priority)) return pp;
2088 return 0;
2089 }
2090 #endif
2091
2092 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2093 static inline voucher_t
2094 _dispatch_set_priority_and_voucher(pthread_priority_t pp,
2095 voucher_t v, _dispatch_thread_set_self_t flags)
2096 {
2097 #if HAVE_PTHREAD_WORKQUEUE_QOS
2098 pp = _dispatch_priority_compute_update(pp);
2099 if (likely(!pp)) {
2100 if (v == DISPATCH_NO_VOUCHER) {
2101 return DISPATCH_NO_VOUCHER;
2102 }
2103 if (likely(v == _voucher_get())) {
2104 bool retained = flags & DISPATCH_VOUCHER_CONSUME;
2105 if (flags & DISPATCH_VOUCHER_REPLACE) {
2106 if (retained && v) _voucher_release_no_dispose(v);
2107 v = DISPATCH_NO_VOUCHER;
2108 } else {
2109 if (!retained && v) _voucher_retain(v);
2110 }
2111 return v;
2112 }
2113 }
2114 return _dispatch_set_priority_and_voucher_slow(pp, v, flags);
2115 #else
2116 (void)pp; (void)v; (void)flags;
2117 return DISPATCH_NO_VOUCHER;
2118 #endif
2119 }
2120
2121 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2122 static inline voucher_t
2123 _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp,
2124 voucher_t v, _dispatch_thread_set_self_t flags)
2125 {
2126 pthread_priority_t p = 0;
2127 if (pp != DISPATCH_NO_PRIORITY) {
2128 p = _dispatch_priority_adopt(pp, flags);
2129 }
2130 return _dispatch_set_priority_and_voucher(p, v, flags);
2131 }
2132
2133 DISPATCH_ALWAYS_INLINE
2134 static inline void
2135 _dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v)
2136 {
2137 if (pp == DISPATCH_NO_PRIORITY) pp = 0;
2138 (void)_dispatch_set_priority_and_voucher(pp, v,
2139 DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE);
2140 }
2141
2142 DISPATCH_ALWAYS_INLINE
2143 static inline void
2144 _dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags)
2145 {
2146 flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE;
2147 (void)_dispatch_set_priority_and_voucher(0, v, flags);
2148 }
2149
2150 DISPATCH_ALWAYS_INLINE
2151 static inline bool
2152 _dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp)
2153 {
2154 // global queues have their override set to DISPATCH_SATURATED_OVERRIDE
2155 // which makes this test always return false for them.
2156 return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
2157 }
2158
2159 DISPATCH_ALWAYS_INLINE
2160 static inline bool
2161 _dispatch_queue_received_override(dispatch_queue_class_t dqu,
2162 pthread_priority_t pp)
2163 {
2164 dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE);
2165 return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
2166 }
2167
2168 DISPATCH_ALWAYS_INLINE
2169 static inline bool
2170 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu,
2171 pthread_priority_t pp)
2172 {
2173 if (_dispatch_queue_need_override(dqu, pp)) {
2174 _os_object_retain_internal_inline(dqu._oq->_as_os_obj);
2175 return true;
2176 }
2177 return false;
2178 }
2179
2180 DISPATCH_ALWAYS_INLINE
2181 static inline bool
2182 _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
2183 dispatch_priority_t new_op)
2184 {
2185 dispatch_priority_t old_op;
2186 new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2187 if (!new_op) return false;
2188 os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, {
2189 if (new_op <= old_op) {
2190 os_atomic_rmw_loop_give_up(return false);
2191 }
2192 });
2193 return true;
2194 }
2195
2196 DISPATCH_ALWAYS_INLINE
2197 static inline void
2198 _dispatch_queue_override_priority(dispatch_queue_class_t dqu,
2199 pthread_priority_t *pp, dispatch_wakeup_flags_t *flags)
2200 {
2201 os_mpsc_queue_t oq = dqu._oq;
2202 dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2203 dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
2204 dispatch_priority_t o;
2205
2206 _dispatch_assert_is_valid_qos_override(np);
2207 if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) {
2208 qp = 0;
2209 } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) {
2210 // when a queue is used as a lock its priority doesn't count
2211 } else if (np < qp) {
2212 // for asynchronous workitems, queue priority is the floor for overrides
2213 np = qp;
2214 }
2215 *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS;
2216
2217 // this optimizes for the case when no update of the override is required
2218 // os_atomic_rmw_loop2o optimizes for the case when the update happens,
2219 // and can't be used.
2220 o = os_atomic_load2o(oq, oq_override, relaxed);
2221 do {
2222 if (likely(np <= o)) break;
2223 } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed)));
2224
2225 if (np <= o) {
2226 *pp = o;
2227 } else {
2228 *flags |= DISPATCH_WAKEUP_OVERRIDING;
2229 *pp = np;
2230 }
2231 if (o > qp) {
2232 *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN;
2233 }
2234 }
2235
2236 DISPATCH_ALWAYS_INLINE
2237 static inline dispatch_priority_t
2238 _dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu,
2239 bool qp_is_floor)
2240 {
2241 os_mpsc_queue_t oq = dqu._oq;
2242 dispatch_priority_t p = 0;
2243 if (qp_is_floor) {
2244 // thread bound queues floor their dq_override to their
2245 // priority to avoid receiving useless overrides
2246 p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2247 }
2248 dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed);
2249 dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE);
2250 return (o > p) ? o : 0;
2251 }
2252
2253 DISPATCH_ALWAYS_INLINE
2254 static inline pthread_priority_t
2255 _dispatch_priority_propagate(void)
2256 {
2257 #if HAVE_PTHREAD_WORKQUEUE_QOS
2258 pthread_priority_t pp = _dispatch_get_priority();
2259 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2260 if (pp > _dispatch_user_initiated_priority) {
2261 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
2262 pp = _dispatch_user_initiated_priority;
2263 }
2264 return pp;
2265 #else
2266 return 0;
2267 #endif
2268 }
2269
2270 // including maintenance
2271 DISPATCH_ALWAYS_INLINE
2272 static inline bool
2273 _dispatch_is_background_thread(void)
2274 {
2275 #if HAVE_PTHREAD_WORKQUEUE_QOS
2276 pthread_priority_t pp = _dispatch_get_priority();
2277 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2278 return pp && (pp <= _dispatch_background_priority);
2279 #else
2280 return false;
2281 #endif
2282 }
2283
2284 #pragma mark -
2285 #pragma mark dispatch_block_t
2286
2287 #ifdef __BLOCKS__
2288
2289 DISPATCH_ALWAYS_INLINE
2290 static inline bool
2291 _dispatch_block_has_private_data(const dispatch_block_t block)
2292 {
2293 extern void (*_dispatch_block_special_invoke)(void*);
2294 return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
2295 }
2296
2297 DISPATCH_ALWAYS_INLINE
2298 static inline bool
2299 _dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags)
2300 {
2301 /*
2302 * Generates better assembly than the actual readable test:
2303 * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS)
2304 */
2305 flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS;
2306 return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS;
2307 }
2308
2309 DISPATCH_ALWAYS_INLINE
2310 static inline dispatch_block_private_data_t
2311 _dispatch_block_get_data(const dispatch_block_t db)
2312 {
2313 if (!_dispatch_block_has_private_data(db)) {
2314 return NULL;
2315 }
2316 // Keep in sync with _dispatch_block_create implementation
2317 uint8_t *x = (uint8_t *)db;
2318 // x points to base of struct Block_layout
2319 x += sizeof(struct Block_layout);
2320 // x points to base of captured dispatch_block_private_data_s object
2321 dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
2322 if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
2323 DISPATCH_CLIENT_CRASH(dbpd->dbpd_magic,
2324 "Corruption of dispatch block object");
2325 }
2326 return dbpd;
2327 }
2328
2329 DISPATCH_ALWAYS_INLINE
2330 static inline pthread_priority_t
2331 _dispatch_block_get_priority(const dispatch_block_t db)
2332 {
2333 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
2334 return dbpd ? dbpd->dbpd_priority : 0;
2335 }
2336
2337 DISPATCH_ALWAYS_INLINE
2338 static inline dispatch_block_flags_t
2339 _dispatch_block_get_flags(const dispatch_block_t db)
2340 {
2341 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
2342 return dbpd ? dbpd->dbpd_flags : 0;
2343 }
2344
2345 #endif
2346
2347 #pragma mark -
2348 #pragma mark dispatch_continuation_t
2349
2350 DISPATCH_ALWAYS_INLINE
2351 static inline dispatch_continuation_t
2352 _dispatch_continuation_alloc_cacheonly(void)
2353 {
2354 dispatch_continuation_t dc = (dispatch_continuation_t)
2355 _dispatch_thread_getspecific(dispatch_cache_key);
2356 if (likely(dc)) {
2357 _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
2358 }
2359 return dc;
2360 }
2361
2362 DISPATCH_ALWAYS_INLINE
2363 static inline dispatch_continuation_t
2364 _dispatch_continuation_alloc(void)
2365 {
2366 dispatch_continuation_t dc =
2367 _dispatch_continuation_alloc_cacheonly();
2368 if (unlikely(!dc)) {
2369 return _dispatch_continuation_alloc_from_heap();
2370 }
2371 return dc;
2372 }
2373
2374 DISPATCH_ALWAYS_INLINE
2375 static inline dispatch_continuation_t
2376 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
2377 {
2378 dispatch_continuation_t prev_dc = (dispatch_continuation_t)
2379 _dispatch_thread_getspecific(dispatch_cache_key);
2380 int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1;
2381 // Cap continuation cache
2382 if (unlikely(cnt > _dispatch_continuation_cache_limit)) {
2383 return dc;
2384 }
2385 dc->do_next = prev_dc;
2386 dc->dc_cache_cnt = cnt;
2387 _dispatch_thread_setspecific(dispatch_cache_key, dc);
2388 return NULL;
2389 }
2390
2391 DISPATCH_ALWAYS_INLINE
2392 static inline void
2393 _dispatch_continuation_free(dispatch_continuation_t dc)
2394 {
2395 dc = _dispatch_continuation_free_cacheonly(dc);
2396 if (unlikely(dc)) {
2397 _dispatch_continuation_free_to_cache_limit(dc);
2398 }
2399 }
2400
2401 #include "trace.h"
2402
2403 DISPATCH_ALWAYS_INLINE
2404 static inline void
2405 _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
2406 {
2407 struct dispatch_object_s *dou = dc->dc_data;
2408 unsigned long type = dx_type(dou);
2409 if (type == DISPATCH_GROUP_TYPE) {
2410 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2411 _dispatch_introspection_queue_item_complete(dou);
2412 dispatch_group_leave((dispatch_group_t)dou);
2413 } else {
2414 DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
2415 }
2416 }
2417
2418 DISPATCH_ALWAYS_INLINE
2419 static inline void
2420 _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
2421 dispatch_invoke_flags_t flags)
2422 {
2423 dispatch_continuation_t dc = dou._dc, dc1;
2424 dispatch_invoke_with_autoreleasepool(flags, {
2425 uintptr_t dc_flags = dc->dc_flags;
2426 // Add the item back to the cache before calling the function. This
2427 // allows the 'hot' continuation to be used for a quick callback.
2428 //
2429 // The ccache version is per-thread.
2430 // Therefore, the object has not been reused yet.
2431 // This generates better assembly.
2432 _dispatch_continuation_voucher_adopt(dc, ov, dc_flags);
2433 if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
2434 dc1 = _dispatch_continuation_free_cacheonly(dc);
2435 } else {
2436 dc1 = NULL;
2437 }
2438 if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) {
2439 _dispatch_continuation_with_group_invoke(dc);
2440 } else {
2441 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2442 _dispatch_introspection_queue_item_complete(dou);
2443 }
2444 if (unlikely(dc1)) {
2445 _dispatch_continuation_free_to_cache_limit(dc1);
2446 }
2447 });
2448 }
2449
2450 DISPATCH_ALWAYS_INLINE_NDEBUG
2451 static inline void
2452 _dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq,
2453 dispatch_invoke_flags_t flags)
2454 {
2455 dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
2456 _dispatch_get_pthread_root_queue_observer_hooks();
2457 if (observer_hooks) observer_hooks->queue_will_execute(dq);
2458 _dispatch_trace_continuation_pop(dq, dou);
2459 flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
2460 if (_dispatch_object_has_vtable(dou)) {
2461 dx_invoke(dou._do, flags);
2462 } else {
2463 voucher_t ov = dq->dq_override_voucher;
2464 _dispatch_continuation_invoke_inline(dou, ov, flags);
2465 }
2466 if (observer_hooks) observer_hooks->queue_did_execute(dq);
2467 }
2468
2469 // used to forward the do_invoke of a continuation with a vtable to its real
2470 // implementation.
2471 #define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \
2472 ({ \
2473 dispatch_continuation_t _dc = (dc), _dc1; \
2474 uintptr_t _dc_flags = (dc_flags); \
2475 _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \
2476 if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \
2477 _dc1 = _dispatch_continuation_free_cacheonly(_dc); \
2478 } else { \
2479 _dc1 = NULL; \
2480 } \
2481 __VA_ARGS__; \
2482 _dispatch_introspection_queue_item_complete(_dc); \
2483 if (unlikely(_dc1)) { \
2484 _dispatch_continuation_free_to_cache_limit(_dc1); \
2485 } \
2486 })
2487
2488 DISPATCH_ALWAYS_INLINE
2489 static inline void
2490 _dispatch_continuation_priority_set(dispatch_continuation_t dc,
2491 pthread_priority_t pp, dispatch_block_flags_t flags)
2492 {
2493 #if HAVE_PTHREAD_WORKQUEUE_QOS
2494 if (likely(!(flags & DISPATCH_BLOCK_HAS_PRIORITY))) {
2495 pp = _dispatch_priority_propagate();
2496 }
2497 if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) {
2498 pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
2499 }
2500 dc->dc_priority = pp;
2501 #else
2502 (void)dc; (void)pp; (void)flags;
2503 #endif
2504 }
2505
2506 DISPATCH_ALWAYS_INLINE
2507 static inline pthread_priority_t
2508 _dispatch_continuation_get_override_priority(dispatch_queue_t dq,
2509 dispatch_continuation_t dc)
2510 {
2511 #if HAVE_PTHREAD_WORKQUEUE_QOS
2512 pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2513 bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
2514 pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2515 bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
2516
2517 dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY);
2518 if (p && (enforce || !dqp || defaultqueue)) {
2519 return p;
2520 }
2521 return dqp;
2522 #else
2523 (void)dq; (void)dc;
2524 return 0;
2525 #endif
2526 }
2527
2528 DISPATCH_ALWAYS_INLINE
2529 static inline void
2530 _dispatch_continuation_init_f(dispatch_continuation_t dc,
2531 dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t func,
2532 pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
2533 {
2534 dc->dc_flags = dc_flags;
2535 dc->dc_func = func;
2536 dc->dc_ctxt = ctxt;
2537 _dispatch_continuation_voucher_set(dc, dqu, flags);
2538 _dispatch_continuation_priority_set(dc, pp, flags);
2539 }
2540
2541 DISPATCH_ALWAYS_INLINE
2542 static inline void
2543 _dispatch_continuation_init(dispatch_continuation_t dc,
2544 dispatch_queue_class_t dqu, dispatch_block_t work,
2545 pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
2546 {
2547 dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT;
2548 dc->dc_ctxt = _dispatch_Block_copy(work);
2549 _dispatch_continuation_priority_set(dc, pp, flags);
2550
2551 if (unlikely(_dispatch_block_has_private_data(work))) {
2552 // always sets dc_func & dc_voucher
2553 // may update dc_priority & do_vtable
2554 return _dispatch_continuation_init_slow(dc, dqu, flags);
2555 }
2556
2557 if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
2558 dc->dc_func = _dispatch_call_block_and_release;
2559 } else {
2560 dc->dc_func = _dispatch_Block_invoke(work);
2561 }
2562 _dispatch_continuation_voucher_set(dc, dqu, flags);
2563 }
2564
2565 #endif // DISPATCH_PURE_C
2566
2567 #endif /* __DISPATCH_INLINE_INTERNAL__ */