]> git.saurik.com Git - apple/libdispatch.git/blob - src/inline_internal.h
libdispatch-703.1.4.tar.gz
[apple/libdispatch.git] / src / inline_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_INLINE_INTERNAL__
28 #define __DISPATCH_INLINE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if DISPATCH_USE_CLIENT_CALLOUT
36
37 DISPATCH_NOTHROW void
38 _dispatch_client_callout(void *ctxt, dispatch_function_t f);
39 DISPATCH_NOTHROW void
40 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
41 #if HAVE_MACH
42 DISPATCH_NOTHROW void
43 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
44 dispatch_mach_msg_t dmsg, mach_error_t error,
45 dispatch_mach_handler_function_t f);
46 #endif // HAVE_MACH
47
48 #else // !DISPATCH_USE_CLIENT_CALLOUT
49
50 DISPATCH_ALWAYS_INLINE
51 static inline void
52 _dispatch_client_callout(void *ctxt, dispatch_function_t f)
53 {
54 return f(ctxt);
55 }
56
57 DISPATCH_ALWAYS_INLINE
58 static inline void
59 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
60 {
61 return f(ctxt, i);
62 }
63
64 #if HAVE_MACH
65 DISPATCH_ALWAYS_INLINE
66 static inline void
67 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
68 dispatch_mach_msg_t dmsg, mach_error_t error,
69 dispatch_mach_handler_function_t f)
70 {
71 return f(ctxt, reason, dmsg, error);
72 }
73 #endif // HAVE_MACH
74
75 #endif // !DISPATCH_USE_CLIENT_CALLOUT
76
77 #pragma mark -
78 #pragma mark _os_object_t & dispatch_object_t
79 #if DISPATCH_PURE_C
80
81 DISPATCH_ALWAYS_INLINE
82 static inline bool
83 _dispatch_object_has_vtable(dispatch_object_t dou)
84 {
85 uintptr_t dc_flags = dou._dc->dc_flags;
86
87 // vtables are pointers far away from the low page in memory
88 return dc_flags > 0xffful;
89 }
90
91 DISPATCH_ALWAYS_INLINE
92 static inline bool
93 _dispatch_object_is_continuation(dispatch_object_t dou)
94 {
95 if (_dispatch_object_has_vtable(dou)) {
96 return dx_metatype(dou._do) == _DISPATCH_CONTINUATION_TYPE;
97 }
98 return true;
99 }
100
101 DISPATCH_ALWAYS_INLINE
102 static inline bool
103 _dispatch_object_has_type(dispatch_object_t dou, unsigned long type)
104 {
105 return _dispatch_object_has_vtable(dou) && dx_type(dou._do) == type;
106 }
107
108 DISPATCH_ALWAYS_INLINE
109 static inline bool
110 _dispatch_object_is_redirection(dispatch_object_t dou)
111 {
112 return _dispatch_object_has_type(dou,
113 DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT));
114 }
115
116 DISPATCH_ALWAYS_INLINE
117 static inline bool
118 _dispatch_object_is_barrier(dispatch_object_t dou)
119 {
120 dispatch_queue_flags_t dq_flags;
121
122 if (!_dispatch_object_has_vtable(dou)) {
123 return (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT);
124 }
125 switch (dx_metatype(dou._do)) {
126 case _DISPATCH_QUEUE_TYPE:
127 case _DISPATCH_SOURCE_TYPE:
128 dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed);
129 return dq_flags & DQF_BARRIER_BIT;
130 default:
131 return false;
132 }
133 }
134
135 DISPATCH_ALWAYS_INLINE
136 static inline bool
137 _dispatch_object_is_slow_item(dispatch_object_t dou)
138 {
139 if (_dispatch_object_has_vtable(dou)) {
140 return false;
141 }
142 return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT);
143 }
144
145 DISPATCH_ALWAYS_INLINE
146 static inline bool
147 _dispatch_object_is_slow_non_barrier(dispatch_object_t dou)
148 {
149 if (_dispatch_object_has_vtable(dou)) {
150 return false;
151 }
152 return ((dou._dc->dc_flags &
153 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) ==
154 (DISPATCH_OBJ_SYNC_SLOW_BIT));
155 }
156
157 DISPATCH_ALWAYS_INLINE
158 static inline bool
159 _dispatch_object_is_slow_barrier(dispatch_object_t dou)
160 {
161 if (_dispatch_object_has_vtable(dou)) {
162 return false;
163 }
164 return ((dou._dc->dc_flags &
165 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) ==
166 (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT));
167 }
168
169 DISPATCH_ALWAYS_INLINE
170 static inline _os_object_t
171 _os_object_retain_internal_inline(_os_object_t obj)
172 {
173 int ref_cnt = _os_object_refcnt_inc(obj);
174 if (unlikely(ref_cnt <= 0)) {
175 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
176 }
177 return obj;
178 }
179
180 DISPATCH_ALWAYS_INLINE
181 static inline void
182 _os_object_release_internal_inline_no_dispose(_os_object_t obj)
183 {
184 int ref_cnt = _os_object_refcnt_dec(obj);
185 if (likely(ref_cnt >= 0)) {
186 return;
187 }
188 if (ref_cnt == 0) {
189 _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object");
190 }
191 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
192 }
193
194 DISPATCH_ALWAYS_INLINE
195 static inline void
196 _os_object_release_internal_inline(_os_object_t obj)
197 {
198 int ref_cnt = _os_object_refcnt_dec(obj);
199 if (likely(ref_cnt >= 0)) {
200 return;
201 }
202 if (unlikely(ref_cnt < -1)) {
203 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
204 }
205 #if DISPATCH_DEBUG
206 int xref_cnt = obj->os_obj_xref_cnt;
207 if (unlikely(xref_cnt >= 0)) {
208 DISPATCH_INTERNAL_CRASH(xref_cnt,
209 "Release while external references exist");
210 }
211 #endif
212 // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
213 return _os_object_dispose(obj);
214 }
215
216 DISPATCH_ALWAYS_INLINE_NDEBUG
217 static inline void
218 _dispatch_retain(dispatch_object_t dou)
219 {
220 (void)_os_object_retain_internal_inline(dou._os_obj);
221 }
222
223 DISPATCH_ALWAYS_INLINE_NDEBUG
224 static inline void
225 _dispatch_release(dispatch_object_t dou)
226 {
227 _os_object_release_internal_inline(dou._os_obj);
228 }
229
230 DISPATCH_ALWAYS_INLINE_NDEBUG
231 static inline void
232 _dispatch_release_tailcall(dispatch_object_t dou)
233 {
234 _os_object_release_internal(dou._os_obj);
235 }
236
237 DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
238 static inline void
239 _dispatch_object_set_target_queue_inline(dispatch_object_t dou,
240 dispatch_queue_t tq)
241 {
242 _dispatch_retain(tq);
243 tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release);
244 if (tq) _dispatch_release(tq);
245 _dispatch_object_debug(dou._do, "%s", __func__);
246 }
247
248 #endif // DISPATCH_PURE_C
249 #pragma mark -
250 #pragma mark dispatch_thread
251 #if DISPATCH_PURE_C
252
253 #define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */
254 #define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8
255 #ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN
256 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >=
257 DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
258 "our list should not be longer than the kernel's");
259 #endif
260
261 typedef struct dispatch_deferred_items_s {
262 uint32_t ddi_magic;
263 dispatch_queue_t ddi_stashed_dq;
264 struct dispatch_object_s *ddi_stashed_dou;
265 dispatch_priority_t ddi_stashed_pp;
266 int ddi_nevents;
267 int ddi_maxevents;
268 _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
269 } dispatch_deferred_items_s, *dispatch_deferred_items_t;
270
271 DISPATCH_ALWAYS_INLINE
272 static inline void
273 _dispatch_deferred_items_set(dispatch_deferred_items_t ddi)
274 {
275 _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi);
276 }
277
278 DISPATCH_ALWAYS_INLINE
279 static inline dispatch_deferred_items_t
280 _dispatch_deferred_items_get(void)
281 {
282 dispatch_deferred_items_t ddi = (dispatch_deferred_items_t)
283 _dispatch_thread_getspecific(dispatch_deferred_items_key);
284 if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) {
285 return ddi;
286 }
287 return NULL;
288 }
289
290 #endif // DISPATCH_PURE_C
291 #pragma mark -
292 #pragma mark dispatch_thread
293 #if DISPATCH_PURE_C
294
295 DISPATCH_ALWAYS_INLINE
296 static inline dispatch_thread_context_t
297 _dispatch_thread_context_find(const void *key)
298 {
299 dispatch_thread_context_t dtc =
300 _dispatch_thread_getspecific(dispatch_context_key);
301 while (dtc) {
302 if (dtc->dtc_key == key) {
303 return dtc;
304 }
305 dtc = dtc->dtc_prev;
306 }
307 return NULL;
308 }
309
310 DISPATCH_ALWAYS_INLINE
311 static inline void
312 _dispatch_thread_context_push(dispatch_thread_context_t ctxt)
313 {
314 ctxt->dtc_prev = _dispatch_thread_getspecific(dispatch_context_key);
315 _dispatch_thread_setspecific(dispatch_context_key, ctxt);
316 }
317
318 DISPATCH_ALWAYS_INLINE
319 static inline void
320 _dispatch_thread_context_pop(dispatch_thread_context_t ctxt)
321 {
322 dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key) == ctxt);
323 _dispatch_thread_setspecific(dispatch_context_key, ctxt->dtc_prev);
324 }
325
326 typedef struct dispatch_thread_frame_iterator_s {
327 dispatch_queue_t dtfi_queue;
328 dispatch_thread_frame_t dtfi_frame;
329 } *dispatch_thread_frame_iterator_t;
330
331 DISPATCH_ALWAYS_INLINE
332 static inline void
333 _dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it)
334 {
335 _dispatch_thread_getspecific_pair(
336 dispatch_queue_key, (void **)&it->dtfi_queue,
337 dispatch_frame_key, (void **)&it->dtfi_frame);
338 }
339
340 DISPATCH_ALWAYS_INLINE
341 static inline void
342 _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it)
343 {
344 dispatch_thread_frame_t dtf = it->dtfi_frame;
345 dispatch_queue_t dq = it->dtfi_queue;
346
347 if (dtf) {
348 if (dq->do_targetq) {
349 // redirections and trysync_f may skip some frames,
350 // so we need to simulate seeing the missing links
351 // however the bottom root queue is always present
352 it->dtfi_queue = dq->do_targetq;
353 if (it->dtfi_queue == dtf->dtf_queue) {
354 it->dtfi_frame = dtf->dtf_prev;
355 }
356 } else {
357 it->dtfi_queue = dtf->dtf_queue;
358 it->dtfi_frame = dtf->dtf_prev;
359 }
360 } else if (dq) {
361 it->dtfi_queue = dq->do_targetq;
362 }
363 }
364
365 DISPATCH_ALWAYS_INLINE
366 static inline bool
367 _dispatch_thread_frame_find_queue(dispatch_queue_t dq)
368 {
369 struct dispatch_thread_frame_iterator_s it;
370
371 _dispatch_thread_frame_iterate_start(&it);
372 while (it.dtfi_queue) {
373 if (it.dtfi_queue == dq) {
374 return true;
375 }
376 _dispatch_thread_frame_iterate_next(&it);
377 }
378 return false;
379 }
380
381 DISPATCH_ALWAYS_INLINE
382 static inline dispatch_thread_frame_t
383 _dispatch_thread_frame_get_current(void)
384 {
385 return _dispatch_thread_getspecific(dispatch_frame_key);
386 }
387
388 DISPATCH_ALWAYS_INLINE
389 static inline void
390 _dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf)
391 {
392 _dispatch_thread_setspecific(dispatch_frame_key, dtf);
393 }
394
395 DISPATCH_ALWAYS_INLINE
396 static inline void
397 _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf)
398 {
399 _dispatch_thread_getspecific_packed_pair(
400 dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue);
401 }
402
403 DISPATCH_ALWAYS_INLINE
404 static inline void
405 _dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq)
406 {
407 _dispatch_thread_frame_save_state(dtf);
408 _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
409 dispatch_frame_key, dtf);
410 dtf->dtf_deferred = NULL;
411 }
412
413 DISPATCH_ALWAYS_INLINE
414 static inline void
415 _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf,
416 dispatch_queue_t dq, dispatch_thread_frame_t new_base)
417 {
418 _dispatch_thread_frame_save_state(dtf);
419 _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
420 dispatch_frame_key, new_base);
421 dtf->dtf_deferred = NULL;
422 }
423
424 DISPATCH_ALWAYS_INLINE
425 static inline void
426 _dispatch_thread_frame_pop(dispatch_thread_frame_t dtf)
427 {
428 _dispatch_thread_setspecific_packed_pair(
429 dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue);
430 }
431
432 DISPATCH_ALWAYS_INLINE
433 static inline dispatch_queue_t
434 _dispatch_thread_frame_stash(dispatch_thread_frame_t dtf)
435 {
436 _dispatch_thread_getspecific_pair(
437 dispatch_queue_key, (void **)&dtf->dtf_queue,
438 dispatch_frame_key, (void **)&dtf->dtf_prev);
439 _dispatch_thread_frame_pop(dtf->dtf_prev);
440 return dtf->dtf_queue;
441 }
442
443 DISPATCH_ALWAYS_INLINE
444 static inline void
445 _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf)
446 {
447 _dispatch_thread_frame_pop(dtf);
448 }
449
450 DISPATCH_ALWAYS_INLINE
451 static inline int
452 _dispatch_wqthread_override_start_check_owner(mach_port_t thread,
453 pthread_priority_t pp, mach_port_t *ulock_addr)
454 {
455 #if HAVE_PTHREAD_WORKQUEUE_QOS
456 if (!_dispatch_set_qos_class_enabled) return 0;
457 return _pthread_workqueue_override_start_direct_check_owner(thread,
458 pp, ulock_addr);
459 #else
460 (void)thread; (void)pp; (void)ulock_addr;
461 return 0;
462 #endif
463 }
464
465 DISPATCH_ALWAYS_INLINE
466 static inline void
467 _dispatch_wqthread_override_start(mach_port_t thread,
468 pthread_priority_t pp)
469 {
470 #if HAVE_PTHREAD_WORKQUEUE_QOS
471 if (!_dispatch_set_qos_class_enabled) return;
472 (void)_pthread_workqueue_override_start_direct(thread, pp);
473 #else
474 (void)thread; (void)pp;
475 #endif
476 }
477
478 DISPATCH_ALWAYS_INLINE
479 static inline void
480 _dispatch_wqthread_override_reset(void)
481 {
482 #if HAVE_PTHREAD_WORKQUEUE_QOS
483 if (!_dispatch_set_qos_class_enabled) return;
484 (void)_pthread_workqueue_override_reset();
485 #endif
486 }
487
488 DISPATCH_ALWAYS_INLINE
489 static inline void
490 _dispatch_thread_override_start(mach_port_t thread, pthread_priority_t pp,
491 void *resource)
492 {
493 #if HAVE_PTHREAD_WORKQUEUE_QOS
494 if (!_dispatch_set_qos_class_enabled) return;
495 (void)_pthread_qos_override_start_direct(thread, pp, resource);
496 #else
497 (void)thread; (void)pp; (void)resource;
498 #endif
499 }
500
501 DISPATCH_ALWAYS_INLINE
502 static inline void
503 _dispatch_thread_override_end(mach_port_t thread, void *resource)
504 {
505 #if HAVE_PTHREAD_WORKQUEUE_QOS
506 if (!_dispatch_set_qos_class_enabled) return;
507 (void)_pthread_qos_override_end_direct(thread, resource);
508 #else
509 (void)thread; (void)resource;
510 #endif
511 }
512
513 #if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS
514 DISPATCH_ALWAYS_INLINE
515 static inline bool
516 _dispatch_qos_class_is_valid(pthread_priority_t pp)
517 {
518 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
519 if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT +
520 _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) {
521 return false;
522 }
523 return true;
524 }
525 #define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \
526 if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \
527 DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \
528 } \
529 })
530
531 DISPATCH_ALWAYS_INLINE
532 static inline bool
533 _dispatch_qos_override_is_valid(pthread_priority_t pp)
534 {
535 if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) {
536 return false;
537 }
538 return _dispatch_qos_class_is_valid(pp);
539 }
540 #define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \
541 if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \
542 DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \
543 } \
544 })
545 #else
546 #define _dispatch_assert_is_valid_qos_override(pp) (void)(pp)
547 #define _dispatch_assert_is_valid_qos_class(pp) (void)(pp)
548 #endif
549
550 #endif // DISPATCH_PURE_C
551 #pragma mark -
552 #pragma mark dispatch_queue_t state accessors
553 #if DISPATCH_PURE_C
554
555 DISPATCH_ALWAYS_INLINE
556 static inline dispatch_queue_flags_t
557 _dispatch_queue_atomic_flags(dispatch_queue_t dq)
558 {
559 return os_atomic_load2o(dq, dq_atomic_flags, relaxed);
560 }
561
562 DISPATCH_ALWAYS_INLINE
563 static inline dispatch_queue_flags_t
564 _dispatch_queue_atomic_flags_set(dispatch_queue_t dq,
565 dispatch_queue_flags_t bits)
566 {
567 return os_atomic_or2o(dq, dq_atomic_flags, bits, relaxed);
568 }
569
570 DISPATCH_ALWAYS_INLINE
571 static inline dispatch_queue_flags_t
572 _dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq,
573 dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits)
574 {
575 dispatch_queue_flags_t oflags, nflags;
576 os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, {
577 nflags = (oflags | add_bits) & ~clr_bits;
578 });
579 return oflags;
580 }
581
582 DISPATCH_ALWAYS_INLINE
583 static inline dispatch_queue_flags_t
584 _dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq,
585 dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits)
586 {
587 dispatch_queue_flags_t oflags, nflags;
588 os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, {
589 nflags = (oflags | add_bits) & ~clr_bits;
590 });
591 return nflags;
592 }
593
594 DISPATCH_ALWAYS_INLINE
595 static inline dispatch_queue_flags_t
596 _dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq,
597 dispatch_queue_flags_t bits)
598 {
599 return os_atomic_or_orig2o(dq, dq_atomic_flags, bits, relaxed);
600 }
601
602 DISPATCH_ALWAYS_INLINE
603 static inline dispatch_queue_flags_t
604 _dispatch_queue_atomic_flags_clear(dispatch_queue_t dq,
605 dispatch_queue_flags_t bits)
606 {
607 return os_atomic_and2o(dq, dq_atomic_flags, ~bits, relaxed);
608 }
609
610 DISPATCH_ALWAYS_INLINE
611 static inline bool
612 _dispatch_queue_is_thread_bound(dispatch_queue_t dq)
613 {
614 return _dispatch_queue_atomic_flags(dq) & DQF_THREAD_BOUND;
615 }
616
617 DISPATCH_ALWAYS_INLINE
618 static inline bool
619 _dispatch_queue_cannot_trysync(dispatch_queue_t dq)
620 {
621 return _dispatch_queue_atomic_flags(dq) & DQF_CANNOT_TRYSYNC;
622 }
623
624 DISPATCH_ALWAYS_INLINE
625 static inline bool
626 _dispatch_queue_label_needs_free(dispatch_queue_t dq)
627 {
628 return _dispatch_queue_atomic_flags(dq) & DQF_LABEL_NEEDS_FREE;
629 }
630
631 DISPATCH_ALWAYS_INLINE
632 static inline dispatch_invoke_flags_t
633 _dispatch_queue_autorelease_frequency(dispatch_queue_t dq)
634 {
635 const unsigned long factor =
636 DISPATCH_INVOKE_AUTORELEASE_ALWAYS / DQF_AUTORELEASE_ALWAYS;
637 dispatch_static_assert(factor > 0);
638
639 dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dq);
640
641 qaf &= _DQF_AUTORELEASE_MASK;
642 return (dispatch_invoke_flags_t)qaf * factor;
643 }
644
645 DISPATCH_ALWAYS_INLINE
646 static inline dispatch_invoke_flags_t
647 _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq,
648 dispatch_invoke_flags_t flags)
649 {
650 dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dq);
651
652 if (qaf) {
653 flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK;
654 flags |= qaf;
655 }
656 return flags;
657 }
658
659 DISPATCH_ALWAYS_INLINE
660 static inline bool
661 _dispatch_queue_has_immutable_target(dispatch_queue_t dq)
662 {
663 if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) {
664 return false;
665 }
666 return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE;
667 }
668
669 #endif // DISPATCH_PURE_C
670 #ifndef __cplusplus
671
672 DISPATCH_ALWAYS_INLINE
673 static inline uint32_t
674 _dq_state_suspend_cnt(uint64_t dq_state)
675 {
676 return (uint32_t)(dq_state / DISPATCH_QUEUE_SUSPEND_INTERVAL);
677 }
678
679 DISPATCH_ALWAYS_INLINE
680 static inline bool
681 _dq_state_has_side_suspend_cnt(uint64_t dq_state)
682 {
683 return dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT;
684 }
685
686 DISPATCH_ALWAYS_INLINE
687 static inline uint32_t
688 _dq_state_extract_width_bits(uint64_t dq_state)
689 {
690 dq_state &= DISPATCH_QUEUE_WIDTH_MASK;
691 return (uint32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT);
692 }
693
694 DISPATCH_ALWAYS_INLINE
695 static inline uint32_t
696 _dq_state_available_width(uint64_t dq_state)
697 {
698 uint32_t full = DISPATCH_QUEUE_WIDTH_FULL;
699 if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) {
700 return full - _dq_state_extract_width_bits(dq_state);
701 }
702 return 0;
703 }
704
705 DISPATCH_ALWAYS_INLINE
706 static inline uint32_t
707 _dq_state_used_width(uint64_t dq_state, uint16_t dq_width)
708 {
709 uint32_t full = DISPATCH_QUEUE_WIDTH_FULL;
710 uint32_t width = _dq_state_extract_width_bits(dq_state);
711
712 if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) {
713 // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width
714 // is pre-reservation that we want to ignore
715 return width - (full - dq_width) - (dq_width - 1);
716 }
717 return width - (full - dq_width);
718 }
719
720 DISPATCH_ALWAYS_INLINE
721 static inline bool
722 _dq_state_is_suspended(uint64_t dq_state)
723 {
724 return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION;
725 }
726 #define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state)
727
728 DISPATCH_ALWAYS_INLINE
729 static inline bool
730 _dq_state_is_inactive(uint64_t dq_state)
731 {
732 return dq_state & DISPATCH_QUEUE_INACTIVE;
733 }
734
735 DISPATCH_ALWAYS_INLINE
736 static inline bool
737 _dq_state_needs_activation(uint64_t dq_state)
738 {
739 return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION;
740 }
741
742 DISPATCH_ALWAYS_INLINE
743 static inline bool
744 _dq_state_is_in_barrier(uint64_t dq_state)
745 {
746 return dq_state & DISPATCH_QUEUE_IN_BARRIER;
747 }
748
749 DISPATCH_ALWAYS_INLINE
750 static inline bool
751 _dq_state_has_available_width(uint64_t dq_state)
752 {
753 return !(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT);
754 }
755
756 DISPATCH_ALWAYS_INLINE
757 static inline bool
758 _dq_state_has_pending_barrier(uint64_t dq_state)
759 {
760 return dq_state & DISPATCH_QUEUE_PENDING_BARRIER;
761 }
762
763 DISPATCH_ALWAYS_INLINE
764 static inline bool
765 _dq_state_is_dirty(uint64_t dq_state)
766 {
767 return dq_state & DISPATCH_QUEUE_DIRTY;
768 }
769
770 DISPATCH_ALWAYS_INLINE
771 static inline bool
772 _dq_state_is_enqueued(uint64_t dq_state)
773 {
774 return dq_state & DISPATCH_QUEUE_ENQUEUED;
775 }
776
777 DISPATCH_ALWAYS_INLINE
778 static inline bool
779 _dq_state_has_override(uint64_t dq_state)
780 {
781 return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE;
782 }
783
784 DISPATCH_ALWAYS_INLINE
785 static inline dispatch_lock_owner
786 _dq_state_drain_owner(uint64_t dq_state)
787 {
788 return _dispatch_lock_owner((dispatch_lock)dq_state);
789 }
790 #define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
791 _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
792
793 DISPATCH_ALWAYS_INLINE
794 static inline bool
795 _dq_state_drain_pended(uint64_t dq_state)
796 {
797 return (dq_state & DISPATCH_QUEUE_DRAIN_PENDED);
798 }
799
800 DISPATCH_ALWAYS_INLINE
801 static inline bool
802 _dq_state_drain_locked_by(uint64_t dq_state, uint32_t owner)
803 {
804 if (_dq_state_drain_pended(dq_state)) {
805 return false;
806 }
807 return _dq_state_drain_owner(dq_state) == owner;
808 }
809
810 DISPATCH_ALWAYS_INLINE
811 static inline bool
812 _dq_state_drain_locked(uint64_t dq_state)
813 {
814 return (dq_state & DISPATCH_QUEUE_DRAIN_OWNER_MASK) != 0;
815 }
816
817 DISPATCH_ALWAYS_INLINE
818 static inline bool
819 _dq_state_has_waiters(uint64_t dq_state)
820 {
821 return _dispatch_lock_has_waiters((dispatch_lock)dq_state);
822 }
823
824 DISPATCH_ALWAYS_INLINE
825 static inline bool
826 _dq_state_is_sync_runnable(uint64_t dq_state)
827 {
828 return dq_state < DISPATCH_QUEUE_IN_BARRIER;
829 }
830
831 DISPATCH_ALWAYS_INLINE
832 static inline bool
833 _dq_state_is_runnable(uint64_t dq_state)
834 {
835 return dq_state < DISPATCH_QUEUE_WIDTH_FULL_BIT;
836 }
837
838 DISPATCH_ALWAYS_INLINE
839 static inline bool
840 _dq_state_should_wakeup(uint64_t dq_state)
841 {
842 return _dq_state_is_runnable(dq_state) &&
843 !_dq_state_is_enqueued(dq_state) &&
844 !_dq_state_drain_locked(dq_state);
845 }
846
847 #endif // __cplusplus
848 #pragma mark -
849 #pragma mark dispatch_queue_t state machine
850 #ifndef __cplusplus
851
852 static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu,
853 pthread_priority_t pp);
854 static inline bool _dispatch_queue_need_override_retain(
855 dispatch_queue_class_t dqu, pthread_priority_t pp);
856 static inline dispatch_priority_t _dispatch_queue_reset_override_priority(
857 dispatch_queue_class_t dqu, bool qp_is_floor);
858 static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
859 dispatch_priority_t new_op);
860 static inline pthread_priority_t _dispatch_get_defaultpriority(void);
861 static inline void _dispatch_set_defaultpriority_override(void);
862 static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp);
863 static inline pthread_priority_t _dispatch_get_priority(void);
864 static inline pthread_priority_t _dispatch_set_defaultpriority(
865 pthread_priority_t pp, pthread_priority_t *new_pp);
866
867 DISPATCH_ALWAYS_INLINE
868 static inline void
869 _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq)
870 {
871 if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) {
872 // Arguments for and against this assert are within 6705399
873 DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object");
874 }
875 os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed);
876 }
877
878 #endif
879 #if DISPATCH_PURE_C
880
881 // Note to later developers: ensure that any initialization changes are
882 // made for statically allocated queues (i.e. _dispatch_main_q).
883 static inline void
884 _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf,
885 uint16_t width, bool inactive)
886 {
887 uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
888
889 if (inactive) {
890 dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
891 dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume
892 }
893 dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
894 dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT;
895 os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
896 dq->dq_state = dq_state;
897 dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
898 dq->dq_serialnum =
899 os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
900 }
901
902 /* Used by:
903 * - _dispatch_queue_set_target_queue
904 * - changing dispatch source handlers
905 *
906 * Tries to prevent concurrent wakeup of an inactive queue by suspending it.
907 */
908 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
909 static inline bool
910 _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq)
911 {
912 uint64_t dq_state, value;
913
914 (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
915 if (!fastpath(_dq_state_is_inactive(dq_state))) {
916 os_atomic_rmw_loop_give_up(return false);
917 }
918 value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL;
919 });
920 if (slowpath(!_dq_state_is_suspended(dq_state)) ||
921 slowpath(_dq_state_has_side_suspend_cnt(dq_state))) {
922 // Crashing here means that 128+ dispatch_suspend() calls have been
923 // made on an inactive object and then dispatch_set_target_queue() or
924 // dispatch_set_*_handler() has been called.
925 //
926 // We don't want to handle the side suspend count in a codepath that
927 // needs to be fast.
928 DISPATCH_CLIENT_CRASH(dq, "Too many calls to dispatch_suspend() "
929 "prior to calling dispatch_set_target_queue() "
930 "or dispatch_set_*_handler()");
931 }
932 return true;
933 }
934
935 /* Must be used by any caller meaning to do a speculative wakeup when the caller
936 * was preventing other wakeups (for example dispatch_resume() or a drainer not
937 * doing a drain_try_unlock() and not observing DIRTY)
938 *
939 * In that case this call loads DIRTY with an acquire barrier so that when
940 * other threads have made changes (such as dispatch_source_cancel()) the
941 * caller can take these state machine changes into account in its decision to
942 * wake up the object.
943 */
944 DISPATCH_ALWAYS_INLINE
945 static inline void
946 _dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state,
947 dispatch_wakeup_flags_t flags)
948 {
949 if (_dq_state_should_wakeup(dq_state)) {
950 if (slowpath(_dq_state_is_dirty(dq_state))) {
951 // <rdar://problem/14637483>
952 // seq_cst wrt state changes that were flushed and not acted upon
953 os_atomic_thread_fence(acquire);
954 }
955 return dx_wakeup(dq, 0, flags);
956 }
957 if (flags & DISPATCH_WAKEUP_CONSUME) {
958 return _dispatch_release_tailcall(dq);
959 }
960 }
961
962 /* Used by:
963 * - _dispatch_queue_class_invoke (normal path)
964 * - _dispatch_queue_override_invoke (stealer)
965 *
966 * Initial state must be { sc:0, ib:0, qf:0, dl:0 }
967 * Final state forces { dl:self, qf:1, d: 0 }
968 * ib:1 is forced when the width acquired is equivalent to the barrier width
969 */
970 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
971 static inline uint64_t
972 _dispatch_queue_drain_try_lock(dispatch_queue_t dq,
973 dispatch_invoke_flags_t flags, uint64_t *dq_state)
974 {
975 uint64_t pending_barrier_width =
976 (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
977 uint64_t xor_owner_and_set_full_width =
978 _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT;
979 uint64_t clear_enqueued_bit, old_state, new_state;
980
981 if (flags & DISPATCH_INVOKE_STEALING) {
982 clear_enqueued_bit = 0;
983 } else {
984 clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED;
985 }
986
987 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
988 new_state = old_state;
989 new_state ^= clear_enqueued_bit;
990 if (likely(_dq_state_is_runnable(old_state) &&
991 !_dq_state_drain_locked(old_state))) {
992 //
993 // Only keep the HAS_WAITER bit (and ENQUEUED if stealing).
994 // In particular acquiring the drain lock clears the DIRTY bit
995 //
996 new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
997 //
998 // For the NOWAITERS_BIT case, the thread identity
999 // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above,
1000 // so the xor below flips the NOWAITERS_BIT to 0 as expected.
1001 //
1002 // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in
1003 // the thread identity, and the xor leaves the bit alone.
1004 //
1005 new_state ^= xor_owner_and_set_full_width;
1006 if (_dq_state_has_pending_barrier(old_state) ||
1007 old_state + pending_barrier_width <
1008 DISPATCH_QUEUE_WIDTH_FULL_BIT) {
1009 new_state |= DISPATCH_QUEUE_IN_BARRIER;
1010 }
1011 } else if (!clear_enqueued_bit) {
1012 os_atomic_rmw_loop_give_up(break);
1013 }
1014 });
1015
1016 if (dq_state) *dq_state = new_state;
1017 if (likely(_dq_state_is_runnable(old_state) &&
1018 !_dq_state_drain_locked(old_state))) {
1019 new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT;
1020 old_state &= DISPATCH_QUEUE_WIDTH_MASK;
1021 return new_state - old_state;
1022 }
1023 return 0;
1024 }
1025
1026 /* Used by _dispatch_barrier_{try,}sync
1027 *
1028 * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
1029 * simple cmpxchg which is significantly faster on Intel, and makes a
1030 * significant difference on the uncontended codepath.
1031 *
1032 * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
1033 *
1034 * Initial state must be `completely idle`
1035 * Final state forces { ib:1, qf:1, w:0 }
1036 */
1037 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1038 static inline bool
1039 _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq)
1040 {
1041 uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
1042 value |= _dispatch_tid_self();
1043
1044 return os_atomic_cmpxchg2o(dq, dq_state,
1045 DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire);
1046 }
1047
1048 /* Used by _dispatch_sync for root queues and some drain codepaths
1049 *
1050 * Root queues have no strict orderning and dispatch_sync() always goes through.
1051 * Drain is the sole setter of `dl` hence can use this non failing version of
1052 * _dispatch_queue_try_acquire_sync().
1053 *
1054 * Final state: { w += 1 }
1055 */
1056 DISPATCH_ALWAYS_INLINE
1057 static inline void
1058 _dispatch_queue_reserve_sync_width(dispatch_queue_t dq)
1059 {
1060 (void)os_atomic_add2o(dq, dq_state,
1061 DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
1062 }
1063
1064 /* Used by _dispatch_sync on non-serial queues
1065 *
1066 * Initial state must be { sc:0, ib:0, pb:0, d:0 }
1067 * Final state: { w += 1 }
1068 */
1069 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1070 static inline bool
1071 _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq)
1072 {
1073 uint64_t dq_state, value;
1074
1075 return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
1076 if (!fastpath(_dq_state_is_sync_runnable(dq_state)) ||
1077 slowpath(_dq_state_is_dirty(dq_state)) ||
1078 slowpath(_dq_state_has_pending_barrier(dq_state))) {
1079 os_atomic_rmw_loop_give_up(return false);
1080 }
1081 value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
1082 });
1083 }
1084
1085 /* Used by _dispatch_apply_redirect
1086 *
1087 * Try to acquire at most da_width and returns what could be acquired,
1088 * possibly 0
1089 */
1090 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1091 static inline uint32_t
1092 _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width)
1093 {
1094 uint64_t dq_state, value;
1095 uint32_t width;
1096
1097 (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
1098 width = _dq_state_available_width(dq_state);
1099 if (!fastpath(width)) {
1100 os_atomic_rmw_loop_give_up(return 0);
1101 }
1102 if (width > da_width) {
1103 width = da_width;
1104 }
1105 value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL;
1106 });
1107 return width;
1108 }
1109
1110 /* Used by _dispatch_apply_redirect
1111 *
1112 * Release width acquired by _dispatch_queue_try_acquire_width
1113 */
1114 DISPATCH_ALWAYS_INLINE
1115 static inline void
1116 _dispatch_queue_relinquish_width(dispatch_queue_t dq, uint32_t da_width)
1117 {
1118 (void)os_atomic_sub2o(dq, dq_state,
1119 da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
1120 }
1121
1122 /* Used by target-queue recursing code
1123 *
1124 * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 }
1125 * Final state: { w += 1 }
1126 */
1127 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1128 static inline bool
1129 _dispatch_queue_try_acquire_async(dispatch_queue_t dq)
1130 {
1131 uint64_t dq_state, value;
1132
1133 return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, {
1134 if (!fastpath(_dq_state_is_runnable(dq_state)) ||
1135 slowpath(_dq_state_is_dirty(dq_state)) ||
1136 slowpath(_dq_state_has_pending_barrier(dq_state))) {
1137 os_atomic_rmw_loop_give_up(return false);
1138 }
1139 value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
1140 });
1141 }
1142
1143 /* Used at the end of Drainers
1144 *
1145 * This adjusts the `owned` width when the next continuation is already known
1146 * to account for its barrierness.
1147 */
1148 DISPATCH_ALWAYS_INLINE
1149 static inline uint64_t
1150 _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned,
1151 struct dispatch_object_s *next_dc)
1152 {
1153 uint64_t reservation;
1154
1155 if (slowpath(dq->dq_width > 1)) {
1156 if (next_dc && _dispatch_object_is_barrier(next_dc)) {
1157 reservation = DISPATCH_QUEUE_PENDING_BARRIER;
1158 reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
1159 owned -= reservation;
1160 }
1161 }
1162 return owned;
1163 }
1164
1165 /* Used at the end of Drainers
1166 *
1167 * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended).
1168 * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used
1169 * as a signal to renew the drain lock instead of releasing it.
1170 *
1171 * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned`
1172 */
1173 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
1174 static inline bool
1175 _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned)
1176 {
1177 uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed);
1178 uint64_t new_state;
1179 dispatch_priority_t pp = 0, op;
1180
1181 do {
1182 if (unlikely(_dq_state_is_dirty(old_state) &&
1183 !_dq_state_is_suspended(old_state))) {
1184 // just renew the drain lock with an acquire barrier, to see
1185 // what the enqueuer that set DIRTY has done.
1186 os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire);
1187 _dispatch_queue_reinstate_override_priority(dq, pp);
1188 return false;
1189 }
1190 new_state = old_state - owned;
1191 if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) ||
1192 _dq_state_is_suspended(old_state)) {
1193 // the test for the WIDTH_FULL_BIT is about narrow concurrent queues
1194 // releasing the drain lock while being at the width limit
1195 //
1196 // _non_barrier_complete() will set the DIRTY bit when going back
1197 // under the limit which will cause the try_unlock to fail
1198 new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
1199 } else {
1200 new_state &= ~DISPATCH_QUEUE_DIRTY;
1201 new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
1202 // This current owner is the only one that can clear HAS_OVERRIDE,
1203 // so accumulating reset overrides here is valid.
1204 if (unlikely(_dq_state_has_override(new_state))) {
1205 new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE;
1206 dispatch_assert(!_dispatch_queue_is_thread_bound(dq));
1207 op = _dispatch_queue_reset_override_priority(dq, false);
1208 if (op > pp) pp = op;
1209 }
1210 }
1211 } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state,
1212 old_state, new_state, &old_state, release)));
1213
1214 if (_dq_state_has_override(old_state)) {
1215 // Ensure that the root queue sees that this thread was overridden.
1216 _dispatch_set_defaultpriority_override();
1217 }
1218 return true;
1219 }
1220
1221 /* Used at the end of Drainers when the next work item is known
1222 * and that the dirty-head check isn't needed.
1223 *
1224 * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen.
1225 */
1226 DISPATCH_ALWAYS_INLINE
1227 static inline uint64_t
1228 _dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq,
1229 uint64_t owned, mach_port_t next_owner, uint64_t *orig_state)
1230 {
1231 uint64_t dq_state, value;
1232
1233 #ifdef DLOCK_NOWAITERS_BIT
1234 // The NOWAITERS_BIT state must not change through the transfer. It means
1235 // that if next_owner is 0 the bit must be flipped in the rmw_loop below,
1236 // and if next_owner is set, then the bit must be left unchanged.
1237 //
1238 // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner,
1239 // which causes the second xor to flip the bit as expected.
1240 // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to
1241 // clear it so that the second xor leaves the NOWAITERS_BIT alone.
1242 next_owner ^= DLOCK_NOWAITERS_BIT;
1243 #endif
1244 os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, {
1245 value = dq_state - owned;
1246 // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
1247 // but we want to be more efficient wrt the WAITERS_BIT
1248 value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
1249 value &= ~DISPATCH_QUEUE_DRAIN_PENDED;
1250 value &= ~DISPATCH_QUEUE_DIRTY;
1251 value ^= next_owner;
1252 });
1253
1254 if (_dq_state_has_override(dq_state)) {
1255 // Ensure that the root queue sees that this thread was overridden.
1256 _dispatch_set_defaultpriority_override();
1257 }
1258 if (orig_state) *orig_state = dq_state;
1259 return value;
1260 }
1261 #define _dispatch_queue_drain_unlock(dq, owned, orig) \
1262 _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig)
1263
1264 DISPATCH_ALWAYS_INLINE
1265 static inline void
1266 _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
1267 uint64_t to_unlock, dispatch_object_t dou)
1268 {
1269 mach_port_t th_next = 0;
1270 if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) {
1271 th_next = (mach_port_t)dou._dc->dc_data;
1272 }
1273 _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL);
1274 }
1275
1276
1277 #pragma mark -
1278 #pragma mark os_mpsc_queue
1279
1280 // type_t * {volatile,const,_Atomic,...} -> type_t *
1281 // type_t[] -> type_t *
1282 #define os_unqualified_pointer_type(expr) \
1283 typeof(typeof(*(expr)) *)
1284
1285 #define os_mpsc_node_type(q, _ns) \
1286 os_unqualified_pointer_type((q)->_ns##_head)
1287
1288 //
1289 // Multi Producer calls, can be used safely concurrently
1290 //
1291
1292 // Returns true when the queue was empty and the head must be set
1293 #define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \
1294 os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
1295 _tail->_o_next = NULL; \
1296 _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
1297 if (fastpath(_prev)) { \
1298 os_atomic_store2o(_prev, _o_next, _head, relaxed); \
1299 } \
1300 (_prev == NULL); \
1301 })
1302
1303 // Returns true when the queue was empty and the head must be set
1304 #define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \
1305 os_mpsc_node_type(q, _ns) _o = (o); \
1306 os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \
1307 })
1308
1309 #define os_mpsc_push_update_head(q, _ns, o) ({ \
1310 os_atomic_store2o((q), _ns##_head, o, relaxed); \
1311 })
1312
1313 //
1314 // Single Consumer calls, can NOT be used safely concurrently
1315 //
1316
1317 #define os_mpsc_get_head(q, _ns) ({ \
1318 os_mpsc_node_type(q, _ns) _head; \
1319 _dispatch_wait_until(_head = (q)->_ns##_head); \
1320 _head; \
1321 })
1322
1323 #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \
1324 typeof(q) _q = (q); \
1325 os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \
1326 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1327 /* 22708742: set tail to NULL with release, so that NULL write */ \
1328 /* to head above doesn't clobber head from concurrent enqueuer */ \
1329 if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \
1330 _dispatch_wait_until(_n = fastpath(_head->_o_next)); \
1331 os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
1332 } \
1333 _n; \
1334 })
1335
1336 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \
1337 typeof(q) _q = (q); \
1338 os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
1339 if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \
1340 _dispatch_wait_until(_n = _q->_ns##_head); \
1341 _head->_o_next = _n; \
1342 } \
1343 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1344 })
1345
1346 #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \
1347 typeof(q) _q = (q); \
1348 os_mpsc_node_type(_q, _ns) _head; \
1349 _dispatch_wait_until(_head = _q->_ns##_head); \
1350 os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
1351 /* 22708742: set tail to NULL with release, so that NULL write */ \
1352 /* to head above doesn't clobber head from concurrent enqueuer */ \
1353 *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \
1354 _head; \
1355 })
1356
1357 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
1358 os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
1359 if (_head != (tail)) { \
1360 _dispatch_wait_until(_n = _head->_o_next); \
1361 }; \
1362 _n; })
1363
1364 #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \
1365 typeof(q) _q = (q); \
1366 os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
1367 _tail->_o_next = NULL; \
1368 if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \
1369 _dispatch_wait_until(_n = _q->_ns##_head); \
1370 _tail->_o_next = _n; \
1371 } \
1372 os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
1373 })
1374
1375 #pragma mark -
1376 #pragma mark dispatch_queue_t tq lock
1377
1378 DISPATCH_ALWAYS_INLINE
1379 static inline bool
1380 _dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp)
1381 {
1382 dispatch_lock_owner owner;
1383 if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) {
1384 return true;
1385 }
1386 _dispatch_wqthread_override_start_check_owner(owner, pp,
1387 &dq->dq_sidelock.dul_lock);
1388 return false;
1389 }
1390
1391 DISPATCH_ALWAYS_INLINE
1392 static inline void
1393 _dispatch_queue_sidelock_lock(dispatch_queue_t dq)
1394 {
1395 return _dispatch_unfair_lock_lock(&dq->dq_sidelock);
1396 }
1397
1398 DISPATCH_ALWAYS_INLINE
1399 static inline bool
1400 _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq)
1401 {
1402 if (_dispatch_unfair_lock_tryunlock(&dq->dq_sidelock)) {
1403 return true;
1404 }
1405 // Ensure that the root queue sees that this thread was overridden.
1406 _dispatch_set_defaultpriority_override();
1407 return false;
1408 }
1409
1410 DISPATCH_ALWAYS_INLINE
1411 static inline void
1412 _dispatch_queue_sidelock_unlock(dispatch_queue_t dq)
1413 {
1414 if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) {
1415 // Ensure that the root queue sees that this thread was overridden.
1416 _dispatch_set_defaultpriority_override();
1417 }
1418 }
1419
1420 #pragma mark -
1421 #pragma mark dispatch_queue_t misc
1422
1423 DISPATCH_ALWAYS_INLINE
1424 static inline dispatch_queue_t
1425 _dispatch_queue_get_current(void)
1426 {
1427 return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
1428 }
1429
1430 DISPATCH_ALWAYS_INLINE
1431 static inline void
1432 _dispatch_queue_set_current(dispatch_queue_t dq)
1433 {
1434 _dispatch_thread_setspecific(dispatch_queue_key, dq);
1435 }
1436
1437 DISPATCH_ALWAYS_INLINE
1438 static inline struct dispatch_object_s*
1439 _dispatch_queue_head(dispatch_queue_t dq)
1440 {
1441 return os_mpsc_get_head(dq, dq_items);
1442 }
1443
1444 DISPATCH_ALWAYS_INLINE
1445 static inline struct dispatch_object_s*
1446 _dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc)
1447 {
1448 return os_mpsc_pop_head(dq, dq_items, dc, do_next);
1449 }
1450
1451 DISPATCH_ALWAYS_INLINE
1452 static inline bool
1453 _dispatch_queue_push_update_tail(dispatch_queue_t dq,
1454 struct dispatch_object_s *tail)
1455 {
1456 // if we crash here with a value less than 0x1000, then we are
1457 // at a known bug in client code. for example, see
1458 // _dispatch_queue_dispose or _dispatch_atfork_child
1459 return os_mpsc_push_update_tail(dq, dq_items, tail, do_next);
1460 }
1461
1462 DISPATCH_ALWAYS_INLINE
1463 static inline bool
1464 _dispatch_queue_push_update_tail_list(dispatch_queue_t dq,
1465 struct dispatch_object_s *head, struct dispatch_object_s *tail)
1466 {
1467 // if we crash here with a value less than 0x1000, then we are
1468 // at a known bug in client code. for example, see
1469 // _dispatch_queue_dispose or _dispatch_atfork_child
1470 return os_mpsc_push_update_tail_list(dq, dq_items, head, tail, do_next);
1471 }
1472
1473 DISPATCH_ALWAYS_INLINE
1474 static inline void
1475 _dispatch_queue_push_update_head(dispatch_queue_t dq,
1476 struct dispatch_object_s *head, bool retained)
1477 {
1478 if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
1479 dispatch_assert(!retained);
1480 // Lie about "retained" here, it generates better assembly in this
1481 // hotpath, and _dispatch_root_queue_wakeup knows to ignore this
1482 // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH.
1483 //
1484 // We need to bypass the retain below because pthread root queues
1485 // are not global and retaining them would be wrong.
1486 //
1487 // We should eventually have a typeflag for "POOL" kind of root queues.
1488 retained = true;
1489 }
1490 // The queue must be retained before dq_items_head is written in order
1491 // to ensure that the reference is still valid when _dispatch_queue_wakeup
1492 // is called. Otherwise, if preempted between the assignment to
1493 // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the
1494 // queue may release the last reference to the queue when invoked by
1495 // _dispatch_queue_drain. <rdar://problem/6932776>
1496 if (!retained) _dispatch_retain(dq);
1497 os_mpsc_push_update_head(dq, dq_items, head);
1498 }
1499
1500 DISPATCH_ALWAYS_INLINE
1501 static inline void
1502 _dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
1503 dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
1504 {
1505 struct dispatch_object_s *head = _head._do, *tail = _tail._do;
1506 bool override = _dispatch_queue_need_override_retain(dq, pp);
1507 dispatch_queue_flags_t flags;
1508 if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
1509 _dispatch_queue_push_update_head(dq, head, override);
1510 if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) {
1511 return _dispatch_queue_push_list_slow(dq, n);
1512 }
1513 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
1514 } else if (override) {
1515 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
1516 } else {
1517 return;
1518 }
1519 dx_wakeup(dq, pp, flags);
1520 }
1521
1522 DISPATCH_ALWAYS_INLINE
1523 static inline void
1524 _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
1525 pthread_priority_t pp, dispatch_wakeup_flags_t flags)
1526 {
1527 struct dispatch_object_s *tail = _tail._do;
1528 bool override = _dispatch_queue_need_override(dq, pp);
1529 if (flags & DISPATCH_WAKEUP_SLOW_WAITER) {
1530 // when SLOW_WAITER is set, we borrow the reference of the caller
1531 if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
1532 _dispatch_queue_push_update_head(dq, tail, true);
1533 flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH;
1534 } else if (override) {
1535 flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING;
1536 } else {
1537 flags = DISPATCH_WAKEUP_SLOW_WAITER;
1538 }
1539 } else {
1540 if (override) _dispatch_retain(dq);
1541 if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
1542 _dispatch_queue_push_update_head(dq, tail, override);
1543 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
1544 } else if (override) {
1545 flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
1546 } else {
1547 return;
1548 }
1549 }
1550 return dx_wakeup(dq, pp, flags);
1551 }
1552
1553 struct _dispatch_identity_s {
1554 pthread_priority_t old_pp;
1555 };
1556
1557 DISPATCH_ALWAYS_INLINE
1558 static inline void
1559 _dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di,
1560 pthread_priority_t pp)
1561 {
1562 // assumed_rq was set by the caller, we need to fake the priorities
1563 dispatch_queue_t assumed_rq = _dispatch_queue_get_current();
1564
1565 dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
1566
1567 di->old_pp = _dispatch_get_defaultpriority();
1568
1569 if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
1570 if (!pp) {
1571 pp = _dispatch_get_priority();
1572 // _dispatch_root_queue_drain_deferred_item() may turn a manager
1573 // thread into a regular root queue, and we must never try to
1574 // restore the manager flag once we became a regular work queue
1575 // thread.
1576 pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
1577 }
1578 if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >
1579 (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
1580 _dispatch_wqthread_override_start(_dispatch_tid_self(), pp);
1581 // Ensure that the root queue sees that this thread was overridden.
1582 _dispatch_set_defaultpriority_override();
1583 }
1584 }
1585 _dispatch_reset_defaultpriority(assumed_rq->dq_priority);
1586 }
1587
1588 DISPATCH_ALWAYS_INLINE
1589 static inline void
1590 _dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di)
1591 {
1592 _dispatch_reset_defaultpriority(di->old_pp);
1593 }
1594
1595 typedef dispatch_queue_t
1596 _dispatch_queue_class_invoke_handler_t(dispatch_object_t,
1597 dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **);
1598
1599 DISPATCH_ALWAYS_INLINE
1600 static inline void
1601 _dispatch_queue_class_invoke(dispatch_object_t dou,
1602 dispatch_invoke_flags_t flags,
1603 _dispatch_queue_class_invoke_handler_t invoke)
1604 {
1605 dispatch_queue_t dq = dou._dq;
1606 struct dispatch_object_s *dc = NULL;
1607 dispatch_queue_t tq = NULL;
1608 uint64_t dq_state, to_unlock = 0;
1609 bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING);
1610 bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING);
1611
1612 // When called from a plain _dispatch_queue_drain:
1613 // overriding = false
1614 // owning = true
1615 //
1616 // When called from an override continuation:
1617 // overriding = true
1618 // owning depends on whether the override embedded the queue or steals
1619 DISPATCH_COMPILER_CAN_ASSUME(owning || overriding);
1620
1621 if (owning) {
1622 dq->do_next = DISPATCH_OBJECT_LISTLESS;
1623 }
1624 to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state);
1625 if (likely(to_unlock)) {
1626 struct _dispatch_identity_s di;
1627 pthread_priority_t old_dp;
1628
1629 drain_pending_barrier:
1630 if (overriding) {
1631 _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
1632 _dispatch_tid_self(), _dispatch_get_defaultpriority());
1633 _dispatch_root_queue_identity_assume(&di, 0);
1634 }
1635
1636 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
1637 pthread_priority_t op, dp;
1638
1639 old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp);
1640 op = dq->dq_override;
1641 if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
1642 _dispatch_wqthread_override_start(_dispatch_tid_self(), op);
1643 // Ensure that the root queue sees that this thread was overridden.
1644 _dispatch_set_defaultpriority_override();
1645 }
1646 }
1647
1648 flags = _dispatch_queue_merge_autorelease_frequency(dq, flags);
1649 attempt_running_slow_head:
1650 tq = invoke(dq, flags, &to_unlock, &dc);
1651 if (slowpath(tq)) {
1652 // Either dc is set, which is a deferred invoke case
1653 //
1654 // or only tq is and it means a reenqueue is required, because of:
1655 // a retarget, a suspension, or a width change.
1656 //
1657 // In both cases, we want to bypass the check for DIRTY.
1658 // That may cause us to leave DIRTY in place but all drain lock
1659 // acquirers clear it
1660 } else {
1661 if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) {
1662 goto attempt_running_slow_head;
1663 }
1664 to_unlock = 0;
1665 }
1666 if (overriding) {
1667 _dispatch_root_queue_identity_restore(&di);
1668 }
1669 if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
1670 _dispatch_reset_defaultpriority(old_dp);
1671 }
1672 } else if (overriding) {
1673 uint32_t owner = _dq_state_drain_owner(dq_state);
1674 pthread_priority_t p = dq->dq_override;
1675 if (owner && p) {
1676 _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
1677 owner, p);
1678 _dispatch_wqthread_override_start_check_owner(owner, p,
1679 &dq->dq_state_lock);
1680 }
1681 }
1682
1683 if (owning) {
1684 _dispatch_introspection_queue_item_complete(dq);
1685 }
1686
1687 if (tq && dc) {
1688 return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc);
1689 }
1690
1691 if (tq) {
1692 bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current());
1693 uint64_t old_state, new_state;
1694
1695 os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
1696 new_state = old_state - to_unlock;
1697 if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) &&
1698 _dq_state_has_pending_barrier(new_state)) {
1699 new_state += DISPATCH_QUEUE_IN_BARRIER;
1700 new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
1701 new_state -= DISPATCH_QUEUE_PENDING_BARRIER;
1702 new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
1703 } else {
1704 new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
1705 if (_dq_state_should_wakeup(new_state)) {
1706 // drain was not interupted for suspension
1707 // we will reenqueue right away, just put ENQUEUED back
1708 new_state |= DISPATCH_QUEUE_ENQUEUED;
1709 new_state |= DISPATCH_QUEUE_DIRTY;
1710 }
1711 }
1712 });
1713 if (_dq_state_is_in_barrier(new_state)) {
1714 // we did a "full width upgrade" and just added IN_BARRIER
1715 // so adjust what we own and drain again
1716 to_unlock &= DISPATCH_QUEUE_ENQUEUED;
1717 to_unlock += DISPATCH_QUEUE_IN_BARRIER;
1718 to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
1719 goto drain_pending_barrier;
1720 }
1721 if (_dq_state_has_override(old_state)) {
1722 // Ensure that the root queue sees that this thread was overridden.
1723 _dispatch_set_defaultpriority_override();
1724 }
1725
1726 if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
1727 return _dispatch_queue_push(tq, dq, 0);
1728 }
1729 }
1730
1731 return _dispatch_release_tailcall(dq);
1732 }
1733
1734 DISPATCH_ALWAYS_INLINE
1735 static inline bool
1736 _dispatch_queue_class_probe(dispatch_queue_class_t dqu)
1737 {
1738 struct dispatch_object_s *tail;
1739 // seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
1740 // seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
1741 tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered);
1742 return slowpath(tail != NULL);
1743 }
1744
1745 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1746 static inline bool
1747 _dispatch_is_in_root_queues_array(dispatch_queue_t dq)
1748 {
1749 return (dq >= _dispatch_root_queues) &&
1750 (dq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT);
1751 }
1752
1753 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1754 static inline dispatch_queue_t
1755 _dispatch_get_root_queue(qos_class_t priority, bool overcommit)
1756 {
1757 if (overcommit) switch (priority) {
1758 case _DISPATCH_QOS_CLASS_MAINTENANCE:
1759 return &_dispatch_root_queues[
1760 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
1761 case _DISPATCH_QOS_CLASS_BACKGROUND:
1762 return &_dispatch_root_queues[
1763 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
1764 case _DISPATCH_QOS_CLASS_UTILITY:
1765 return &_dispatch_root_queues[
1766 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
1767 case _DISPATCH_QOS_CLASS_DEFAULT:
1768 return &_dispatch_root_queues[
1769 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
1770 case _DISPATCH_QOS_CLASS_USER_INITIATED:
1771 return &_dispatch_root_queues[
1772 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
1773 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
1774 return &_dispatch_root_queues[
1775 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
1776 } else switch (priority) {
1777 case _DISPATCH_QOS_CLASS_MAINTENANCE:
1778 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
1779 case _DISPATCH_QOS_CLASS_BACKGROUND:
1780 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
1781 case _DISPATCH_QOS_CLASS_UTILITY:
1782 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
1783 case _DISPATCH_QOS_CLASS_DEFAULT:
1784 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
1785 case _DISPATCH_QOS_CLASS_USER_INITIATED:
1786 return &_dispatch_root_queues[
1787 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
1788 case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
1789 return &_dispatch_root_queues[
1790 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
1791 }
1792 return NULL;
1793 }
1794
1795 #if HAVE_PTHREAD_WORKQUEUE_QOS
1796 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1797 static inline dispatch_queue_t
1798 _dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit)
1799 {
1800 uint32_t idx;
1801
1802 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
1803 idx = (uint32_t)__builtin_ffs((int)pp);
1804 if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
1805 .dq_priority)) {
1806 // If kernel doesn't support maintenance, bottom bit is background.
1807 // Shift to our idea of where background bit is.
1808 idx++;
1809 }
1810 // ffs starts at 1, and account for the QOS_CLASS_SHIFT
1811 // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than
1812 // DISPATCH_QOS_COUNT
1813 idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1);
1814 if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) {
1815 DISPATCH_CLIENT_CRASH(pp, "Corrupted priority");
1816 }
1817 return &_dispatch_root_queues[2 * idx + overcommit];
1818 }
1819 #endif
1820
1821 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
1822 static inline dispatch_queue_t
1823 _dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit)
1824 {
1825 bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
1826 // root queues in _dispatch_root_queues are not overcommit for even indices
1827 // and overcommit for odd ones, so fixing overcommit is either returning
1828 // the same queue, or picking its neighbour in _dispatch_root_queues
1829 if (overcommit && !rq_overcommit) {
1830 return rq + 1;
1831 }
1832 if (!overcommit && rq_overcommit) {
1833 return rq - 1;
1834 }
1835 return rq;
1836 }
1837
1838 DISPATCH_ALWAYS_INLINE
1839 static inline void
1840 _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
1841 {
1842 // Tag thread-bound queues with the owning thread
1843 dispatch_assert(_dispatch_queue_is_thread_bound(dq));
1844 mach_port_t old_owner, self = _dispatch_tid_self();
1845 uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed);
1846 if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) {
1847 DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice");
1848 }
1849 }
1850
1851 DISPATCH_ALWAYS_INLINE
1852 static inline void
1853 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
1854 {
1855 uint64_t dq_state, value;
1856
1857 dispatch_assert(_dispatch_queue_is_thread_bound(dq));
1858 os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
1859 value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state);
1860 });
1861 }
1862
1863 DISPATCH_ALWAYS_INLINE
1864 static inline dispatch_pthread_root_queue_observer_hooks_t
1865 _dispatch_get_pthread_root_queue_observer_hooks(void)
1866 {
1867 return _dispatch_thread_getspecific(
1868 dispatch_pthread_root_queue_observer_hooks_key);
1869 }
1870
1871 DISPATCH_ALWAYS_INLINE
1872 static inline void
1873 _dispatch_set_pthread_root_queue_observer_hooks(
1874 dispatch_pthread_root_queue_observer_hooks_t observer_hooks)
1875 {
1876 _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key,
1877 observer_hooks);
1878 }
1879
1880 #pragma mark -
1881 #pragma mark dispatch_priority
1882
1883 DISPATCH_ALWAYS_INLINE
1884 static inline pthread_priority_t
1885 _dispatch_get_defaultpriority(void)
1886 {
1887 #if HAVE_PTHREAD_WORKQUEUE_QOS
1888 pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific(
1889 dispatch_defaultpriority_key);
1890 return pp;
1891 #else
1892 return 0;
1893 #endif
1894 }
1895
1896 DISPATCH_ALWAYS_INLINE
1897 static inline void
1898 _dispatch_reset_defaultpriority(pthread_priority_t pp)
1899 {
1900 #if HAVE_PTHREAD_WORKQUEUE_QOS
1901 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1902 // If an inner-loop or'd in the override flag to the per-thread priority,
1903 // it needs to be propagated up the chain.
1904 pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
1905 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1906 #else
1907 (void)pp;
1908 #endif
1909 }
1910
1911 DISPATCH_ALWAYS_INLINE
1912 static inline void
1913 _dispatch_set_defaultpriority_override(void)
1914 {
1915 #if HAVE_PTHREAD_WORKQUEUE_QOS
1916 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1917 pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG;
1918
1919 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1920 #endif
1921 }
1922
1923 DISPATCH_ALWAYS_INLINE
1924 static inline bool
1925 _dispatch_reset_defaultpriority_override(void)
1926 {
1927 #if HAVE_PTHREAD_WORKQUEUE_QOS
1928 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1929 pthread_priority_t pp = old_pp &
1930 ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
1931
1932 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1933 return unlikely(pp != old_pp);
1934 #endif
1935 return false;
1936 }
1937
1938 DISPATCH_ALWAYS_INLINE
1939 static inline void
1940 _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
1941 dispatch_queue_t tq)
1942 {
1943 #if HAVE_PTHREAD_WORKQUEUE_QOS
1944 const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
1945 const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
1946 const dispatch_priority_t defaultqueue_flag =
1947 _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
1948 dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
1949 if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) &&
1950 (tqp & rootqueue_flag)) {
1951 if (tqp & defaultqueue_flag) {
1952 dq->dq_priority = 0;
1953 } else {
1954 dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
1955 }
1956 }
1957 #else
1958 (void)dq; (void)tq;
1959 #endif
1960 }
1961
1962 DISPATCH_ALWAYS_INLINE
1963 static inline pthread_priority_t
1964 _dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp)
1965 {
1966 #if HAVE_PTHREAD_WORKQUEUE_QOS
1967 const pthread_priority_t default_priority_preserved_flags =
1968 _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
1969 pthread_priority_t old_pp = _dispatch_get_defaultpriority();
1970 if (old_pp) {
1971 pthread_priority_t flags, defaultqueue, basepri;
1972 flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
1973 defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
1974 basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK);
1975 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
1976 if (!pp) {
1977 flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
1978 pp = basepri;
1979 } else if (pp < basepri && !defaultqueue) { // rdar://16349734
1980 pp = basepri;
1981 }
1982 pp |= flags | (old_pp & default_priority_preserved_flags);
1983 }
1984 _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
1985 if (new_pp) *new_pp = pp;
1986 return old_pp;
1987 #else
1988 (void)pp; (void)new_pp;
1989 return 0;
1990 #endif
1991 }
1992
1993 DISPATCH_ALWAYS_INLINE
1994 static inline pthread_priority_t
1995 _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags)
1996 {
1997 #if HAVE_PTHREAD_WORKQUEUE_QOS
1998 pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
1999 bool enforce, inherited, defaultqueue;
2000 enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
2001 (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG);
2002 inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
2003 defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
2004 defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2005 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2006
2007 if (!pp) {
2008 return defaultpri;
2009 } else if (defaultqueue) { // rdar://16349734
2010 return pp;
2011 } else if (pp < defaultpri) {
2012 return defaultpri;
2013 } else if (enforce || inherited) {
2014 return pp;
2015 } else {
2016 return defaultpri;
2017 }
2018 #else
2019 (void)pp; (void)flags;
2020 return 0;
2021 #endif
2022 }
2023
2024 DISPATCH_ALWAYS_INLINE
2025 static inline pthread_priority_t
2026 _dispatch_priority_inherit_from_root_queue(pthread_priority_t pp,
2027 dispatch_queue_t rq)
2028 {
2029 #if HAVE_PTHREAD_WORKQUEUE_QOS
2030 pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK;
2031 pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
2032 pthread_priority_t defaultqueue =
2033 rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
2034
2035 if (!p || (!defaultqueue && p < rqp)) {
2036 p = rqp | defaultqueue;
2037 }
2038 return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
2039 #else
2040 (void)rq; (void)pp;
2041 return 0;
2042 #endif
2043 }
2044
2045 DISPATCH_ALWAYS_INLINE
2046 static inline pthread_priority_t
2047 _dispatch_get_priority(void)
2048 {
2049 #if HAVE_PTHREAD_WORKQUEUE_QOS
2050 pthread_priority_t pp = (uintptr_t)
2051 _dispatch_thread_getspecific(dispatch_priority_key);
2052 return pp;
2053 #else
2054 return 0;
2055 #endif
2056 }
2057
2058 #if HAVE_PTHREAD_WORKQUEUE_QOS
2059 DISPATCH_ALWAYS_INLINE
2060 static inline pthread_priority_t
2061 _dispatch_priority_compute_update(pthread_priority_t pp)
2062 {
2063 dispatch_assert(pp != DISPATCH_NO_PRIORITY);
2064 if (!_dispatch_set_qos_class_enabled) return 0;
2065 // the priority in _dispatch_get_priority() only tracks manager-ness
2066 // and overcommit, which is inherited from the current value for each update
2067 // however if the priority had the NEEDS_UNBIND flag set we need to clear it
2068 // the first chance we get
2069 //
2070 // the manager bit is invalid input, but we keep it to get meaningful
2071 // assertions in _dispatch_set_priority_and_voucher_slow()
2072 pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
2073 pthread_priority_t cur_priority = _dispatch_get_priority();
2074 pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2075 pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2076 if (unlikely(cur_priority & unbind)) {
2077 // else we always need an update if the NEEDS_UNBIND flag is set
2078 // the slowpath in _dispatch_set_priority_and_voucher_slow() will
2079 // adjust the priority further with the proper overcommitness
2080 return pp ? pp : (cur_priority & ~unbind);
2081 } else {
2082 cur_priority &= ~overcommit;
2083 }
2084 if (unlikely(pp != cur_priority)) return pp;
2085 return 0;
2086 }
2087 #endif
2088
2089 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2090 static inline voucher_t
2091 _dispatch_set_priority_and_voucher(pthread_priority_t pp,
2092 voucher_t v, _dispatch_thread_set_self_t flags)
2093 {
2094 #if HAVE_PTHREAD_WORKQUEUE_QOS
2095 pp = _dispatch_priority_compute_update(pp);
2096 if (likely(!pp)) {
2097 if (v == DISPATCH_NO_VOUCHER) {
2098 return DISPATCH_NO_VOUCHER;
2099 }
2100 if (likely(v == _voucher_get())) {
2101 bool retained = flags & DISPATCH_VOUCHER_CONSUME;
2102 if (flags & DISPATCH_VOUCHER_REPLACE) {
2103 if (retained && v) _voucher_release_no_dispose(v);
2104 v = DISPATCH_NO_VOUCHER;
2105 } else {
2106 if (!retained && v) _voucher_retain(v);
2107 }
2108 return v;
2109 }
2110 }
2111 return _dispatch_set_priority_and_voucher_slow(pp, v, flags);
2112 #else
2113 (void)pp; (void)v; (void)flags;
2114 return DISPATCH_NO_VOUCHER;
2115 #endif
2116 }
2117
2118 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
2119 static inline voucher_t
2120 _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp,
2121 voucher_t v, _dispatch_thread_set_self_t flags)
2122 {
2123 pthread_priority_t p = 0;
2124 if (pp != DISPATCH_NO_PRIORITY) {
2125 p = _dispatch_priority_adopt(pp, flags);
2126 }
2127 return _dispatch_set_priority_and_voucher(p, v, flags);
2128 }
2129
2130 DISPATCH_ALWAYS_INLINE
2131 static inline void
2132 _dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v)
2133 {
2134 if (pp == DISPATCH_NO_PRIORITY) pp = 0;
2135 (void)_dispatch_set_priority_and_voucher(pp, v,
2136 DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE);
2137 }
2138
2139 DISPATCH_ALWAYS_INLINE
2140 static inline void
2141 _dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags)
2142 {
2143 flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE;
2144 (void)_dispatch_set_priority_and_voucher(0, v, flags);
2145 }
2146
2147 DISPATCH_ALWAYS_INLINE
2148 static inline bool
2149 _dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp)
2150 {
2151 // global queues have their override set to DISPATCH_SATURATED_OVERRIDE
2152 // which makes this test always return false for them.
2153 return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
2154 }
2155
2156 DISPATCH_ALWAYS_INLINE
2157 static inline bool
2158 _dispatch_queue_received_override(dispatch_queue_class_t dqu,
2159 pthread_priority_t pp)
2160 {
2161 dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE);
2162 return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
2163 }
2164
2165 DISPATCH_ALWAYS_INLINE
2166 static inline bool
2167 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu,
2168 pthread_priority_t pp)
2169 {
2170 if (_dispatch_queue_need_override(dqu, pp)) {
2171 _os_object_retain_internal_inline(dqu._oq->_as_os_obj);
2172 return true;
2173 }
2174 return false;
2175 }
2176
2177 DISPATCH_ALWAYS_INLINE
2178 static inline bool
2179 _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
2180 dispatch_priority_t new_op)
2181 {
2182 dispatch_priority_t old_op;
2183 new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2184 if (!new_op) return false;
2185 os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, {
2186 if (new_op <= old_op) {
2187 os_atomic_rmw_loop_give_up(return false);
2188 }
2189 });
2190 return true;
2191 }
2192
2193 DISPATCH_ALWAYS_INLINE
2194 static inline void
2195 _dispatch_queue_override_priority(dispatch_queue_class_t dqu,
2196 pthread_priority_t *pp, dispatch_wakeup_flags_t *flags)
2197 {
2198 os_mpsc_queue_t oq = dqu._oq;
2199 dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2200 dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
2201 dispatch_priority_t o;
2202
2203 _dispatch_assert_is_valid_qos_override(np);
2204 if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) {
2205 qp = 0;
2206 } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) {
2207 // when a queue is used as a lock its priority doesn't count
2208 } else if (np < qp) {
2209 // for asynchronous workitems, queue priority is the floor for overrides
2210 np = qp;
2211 }
2212 *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS;
2213
2214 // this optimizes for the case when no update of the override is required
2215 // os_atomic_rmw_loop2o optimizes for the case when the update happens,
2216 // and can't be used.
2217 o = os_atomic_load2o(oq, oq_override, relaxed);
2218 do {
2219 if (likely(np <= o)) break;
2220 } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed)));
2221
2222 if (np <= o) {
2223 *pp = o;
2224 } else {
2225 *flags |= DISPATCH_WAKEUP_OVERRIDING;
2226 *pp = np;
2227 }
2228 if (o > qp) {
2229 *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN;
2230 }
2231 }
2232
2233 DISPATCH_ALWAYS_INLINE
2234 static inline dispatch_priority_t
2235 _dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu,
2236 bool qp_is_floor)
2237 {
2238 os_mpsc_queue_t oq = dqu._oq;
2239 dispatch_priority_t p = 0;
2240 if (qp_is_floor) {
2241 // thread bound queues floor their dq_override to their
2242 // priority to avoid receiving useless overrides
2243 p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2244 }
2245 dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed);
2246 dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE);
2247 return (o > p) ? o : 0;
2248 }
2249
2250 DISPATCH_ALWAYS_INLINE
2251 static inline pthread_priority_t
2252 _dispatch_priority_propagate(void)
2253 {
2254 #if HAVE_PTHREAD_WORKQUEUE_QOS
2255 pthread_priority_t pp = _dispatch_get_priority();
2256 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2257 if (pp > _dispatch_user_initiated_priority) {
2258 // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
2259 pp = _dispatch_user_initiated_priority;
2260 }
2261 return pp;
2262 #else
2263 return 0;
2264 #endif
2265 }
2266
2267 // including maintenance
2268 DISPATCH_ALWAYS_INLINE
2269 static inline bool
2270 _dispatch_is_background_thread(void)
2271 {
2272 #if HAVE_PTHREAD_WORKQUEUE_QOS
2273 pthread_priority_t pp = _dispatch_get_priority();
2274 pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
2275 return pp && (pp <= _dispatch_background_priority);
2276 #else
2277 return false;
2278 #endif
2279 }
2280
2281 #pragma mark -
2282 #pragma mark dispatch_block_t
2283
2284 #ifdef __BLOCKS__
2285
2286 DISPATCH_ALWAYS_INLINE
2287 static inline bool
2288 _dispatch_block_has_private_data(const dispatch_block_t block)
2289 {
2290 extern void (*_dispatch_block_special_invoke)(void*);
2291 return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
2292 }
2293
2294 DISPATCH_ALWAYS_INLINE
2295 static inline bool
2296 _dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags)
2297 {
2298 /*
2299 * Generates better assembly than the actual readable test:
2300 * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS)
2301 */
2302 flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS;
2303 return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS;
2304 }
2305
2306 DISPATCH_ALWAYS_INLINE
2307 static inline dispatch_block_private_data_t
2308 _dispatch_block_get_data(const dispatch_block_t db)
2309 {
2310 if (!_dispatch_block_has_private_data(db)) {
2311 return NULL;
2312 }
2313 // Keep in sync with _dispatch_block_create implementation
2314 uint8_t *x = (uint8_t *)db;
2315 // x points to base of struct Block_layout
2316 x += sizeof(struct Block_layout);
2317 // x points to base of captured dispatch_block_private_data_s object
2318 dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
2319 if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
2320 DISPATCH_CLIENT_CRASH(dbpd->dbpd_magic,
2321 "Corruption of dispatch block object");
2322 }
2323 return dbpd;
2324 }
2325
2326 DISPATCH_ALWAYS_INLINE
2327 static inline pthread_priority_t
2328 _dispatch_block_get_priority(const dispatch_block_t db)
2329 {
2330 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
2331 return dbpd ? dbpd->dbpd_priority : 0;
2332 }
2333
2334 DISPATCH_ALWAYS_INLINE
2335 static inline dispatch_block_flags_t
2336 _dispatch_block_get_flags(const dispatch_block_t db)
2337 {
2338 dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
2339 return dbpd ? dbpd->dbpd_flags : 0;
2340 }
2341
2342 #endif
2343
2344 #pragma mark -
2345 #pragma mark dispatch_continuation_t
2346
2347 DISPATCH_ALWAYS_INLINE
2348 static inline dispatch_continuation_t
2349 _dispatch_continuation_alloc_cacheonly(void)
2350 {
2351 dispatch_continuation_t dc = (dispatch_continuation_t)
2352 _dispatch_thread_getspecific(dispatch_cache_key);
2353 if (likely(dc)) {
2354 _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
2355 }
2356 return dc;
2357 }
2358
2359 DISPATCH_ALWAYS_INLINE
2360 static inline dispatch_continuation_t
2361 _dispatch_continuation_alloc(void)
2362 {
2363 dispatch_continuation_t dc =
2364 _dispatch_continuation_alloc_cacheonly();
2365 if (unlikely(!dc)) {
2366 return _dispatch_continuation_alloc_from_heap();
2367 }
2368 return dc;
2369 }
2370
2371 DISPATCH_ALWAYS_INLINE
2372 static inline dispatch_continuation_t
2373 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
2374 {
2375 dispatch_continuation_t prev_dc = (dispatch_continuation_t)
2376 _dispatch_thread_getspecific(dispatch_cache_key);
2377 int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1;
2378 // Cap continuation cache
2379 if (unlikely(cnt > _dispatch_continuation_cache_limit)) {
2380 return dc;
2381 }
2382 dc->do_next = prev_dc;
2383 dc->dc_cache_cnt = cnt;
2384 _dispatch_thread_setspecific(dispatch_cache_key, dc);
2385 return NULL;
2386 }
2387
2388 DISPATCH_ALWAYS_INLINE
2389 static inline void
2390 _dispatch_continuation_free(dispatch_continuation_t dc)
2391 {
2392 dc = _dispatch_continuation_free_cacheonly(dc);
2393 if (unlikely(dc)) {
2394 _dispatch_continuation_free_to_cache_limit(dc);
2395 }
2396 }
2397
2398 #include "trace.h"
2399
2400 DISPATCH_ALWAYS_INLINE
2401 static inline void
2402 _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
2403 {
2404 struct dispatch_object_s *dou = dc->dc_data;
2405 unsigned long type = dx_type(dou);
2406 if (type == DISPATCH_GROUP_TYPE) {
2407 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2408 _dispatch_introspection_queue_item_complete(dou);
2409 dispatch_group_leave((dispatch_group_t)dou);
2410 } else {
2411 DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
2412 }
2413 }
2414
2415 DISPATCH_ALWAYS_INLINE
2416 static inline void
2417 _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
2418 dispatch_invoke_flags_t flags)
2419 {
2420 dispatch_continuation_t dc = dou._dc, dc1;
2421 dispatch_invoke_with_autoreleasepool(flags, {
2422 uintptr_t dc_flags = dc->dc_flags;
2423 // Add the item back to the cache before calling the function. This
2424 // allows the 'hot' continuation to be used for a quick callback.
2425 //
2426 // The ccache version is per-thread.
2427 // Therefore, the object has not been reused yet.
2428 // This generates better assembly.
2429 _dispatch_continuation_voucher_adopt(dc, ov, dc_flags);
2430 if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
2431 dc1 = _dispatch_continuation_free_cacheonly(dc);
2432 } else {
2433 dc1 = NULL;
2434 }
2435 if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) {
2436 _dispatch_continuation_with_group_invoke(dc);
2437 } else {
2438 _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
2439 _dispatch_introspection_queue_item_complete(dou);
2440 }
2441 if (unlikely(dc1)) {
2442 _dispatch_continuation_free_to_cache_limit(dc1);
2443 }
2444 });
2445 }
2446
2447 DISPATCH_ALWAYS_INLINE_NDEBUG
2448 static inline void
2449 _dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq,
2450 dispatch_invoke_flags_t flags)
2451 {
2452 dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
2453 _dispatch_get_pthread_root_queue_observer_hooks();
2454 if (observer_hooks) observer_hooks->queue_will_execute(dq);
2455 _dispatch_trace_continuation_pop(dq, dou);
2456 flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
2457 if (_dispatch_object_has_vtable(dou)) {
2458 dx_invoke(dou._do, flags);
2459 } else {
2460 voucher_t ov = dq->dq_override_voucher;
2461 _dispatch_continuation_invoke_inline(dou, ov, flags);
2462 }
2463 if (observer_hooks) observer_hooks->queue_did_execute(dq);
2464 }
2465
2466 // used to forward the do_invoke of a continuation with a vtable to its real
2467 // implementation.
2468 #define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \
2469 ({ \
2470 dispatch_continuation_t _dc = (dc), _dc1; \
2471 uintptr_t _dc_flags = (dc_flags); \
2472 _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \
2473 if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \
2474 _dc1 = _dispatch_continuation_free_cacheonly(_dc); \
2475 } else { \
2476 _dc1 = NULL; \
2477 } \
2478 __VA_ARGS__; \
2479 _dispatch_introspection_queue_item_complete(_dc); \
2480 if (unlikely(_dc1)) { \
2481 _dispatch_continuation_free_to_cache_limit(_dc1); \
2482 } \
2483 })
2484
2485 DISPATCH_ALWAYS_INLINE
2486 static inline void
2487 _dispatch_continuation_priority_set(dispatch_continuation_t dc,
2488 pthread_priority_t pp, dispatch_block_flags_t flags)
2489 {
2490 #if HAVE_PTHREAD_WORKQUEUE_QOS
2491 if (likely(!(flags & DISPATCH_BLOCK_HAS_PRIORITY))) {
2492 pp = _dispatch_priority_propagate();
2493 }
2494 if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) {
2495 pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
2496 }
2497 dc->dc_priority = pp;
2498 #else
2499 (void)dc; (void)pp; (void)flags;
2500 #endif
2501 }
2502
2503 DISPATCH_ALWAYS_INLINE
2504 static inline pthread_priority_t
2505 _dispatch_continuation_get_override_priority(dispatch_queue_t dq,
2506 dispatch_continuation_t dc)
2507 {
2508 #if HAVE_PTHREAD_WORKQUEUE_QOS
2509 pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2510 bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
2511 pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
2512 bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
2513
2514 dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY);
2515 if (p && (enforce || !dqp || defaultqueue)) {
2516 return p;
2517 }
2518 return dqp;
2519 #else
2520 (void)dq; (void)dc;
2521 return 0;
2522 #endif
2523 }
2524
2525 DISPATCH_ALWAYS_INLINE
2526 static inline void
2527 _dispatch_continuation_init_f(dispatch_continuation_t dc,
2528 dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t func,
2529 pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
2530 {
2531 dc->dc_flags = dc_flags;
2532 dc->dc_func = func;
2533 dc->dc_ctxt = ctxt;
2534 _dispatch_continuation_voucher_set(dc, dqu, flags);
2535 _dispatch_continuation_priority_set(dc, pp, flags);
2536 }
2537
2538 DISPATCH_ALWAYS_INLINE
2539 static inline void
2540 _dispatch_continuation_init(dispatch_continuation_t dc,
2541 dispatch_queue_class_t dqu, dispatch_block_t work,
2542 pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
2543 {
2544 dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT;
2545 dc->dc_ctxt = _dispatch_Block_copy(work);
2546 _dispatch_continuation_priority_set(dc, pp, flags);
2547
2548 if (unlikely(_dispatch_block_has_private_data(work))) {
2549 // always sets dc_func & dc_voucher
2550 // may update dc_priority & do_vtable
2551 return _dispatch_continuation_init_slow(dc, dqu, flags);
2552 }
2553
2554 if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
2555 dc->dc_func = _dispatch_call_block_and_release;
2556 } else {
2557 dc->dc_func = _dispatch_Block_invoke(work);
2558 }
2559 _dispatch_continuation_voucher_set(dc, dqu, flags);
2560 }
2561
2562 #endif // DISPATCH_PURE_C
2563
2564 #endif /* __DISPATCH_INLINE_INTERNAL__ */