2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_QUEUE_INTERNAL__
28 #define __DISPATCH_QUEUE_INTERNAL__
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
35 #if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
36 #define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
39 /* x86 & cortex-a8 have a 64 byte cacheline */
40 #define DISPATCH_CACHELINE_SIZE 64u
41 #define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE
42 #define ROUND_UP_TO_CACHELINE_SIZE(x) \
43 (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
44 ~(DISPATCH_CACHELINE_SIZE - 1u))
45 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \
46 (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
47 ~(DISPATCH_CONTINUATION_SIZE - 1u))
48 #define ROUND_UP_TO_VECTOR_SIZE(x) \
50 #define DISPATCH_CACHELINE_ALIGN \
51 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
54 #define DISPATCH_QUEUE_CACHELINE_PADDING \
55 char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
57 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
58 (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
59 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
61 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
62 (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
63 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
64 #if !DISPATCH_INTROSPECTION
65 // No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0
66 #undef DISPATCH_QUEUE_CACHELINE_PADDING
67 #define DISPATCH_QUEUE_CACHELINE_PADDING
71 // If dc_vtable is less than 127, then the object is a continuation.
72 // Otherwise, the object has a private layout and memory management rules. The
73 // layout until after 'do_next' must align with normal objects.
74 #define DISPATCH_CONTINUATION_HEADER(x) \
76 const void *do_vtable, \
79 struct dispatch_##x##_s *volatile do_next; \
80 dispatch_function_t dc_func; \
85 #define DISPATCH_OBJ_ASYNC_BIT 0x1
86 #define DISPATCH_OBJ_BARRIER_BIT 0x2
87 #define DISPATCH_OBJ_GROUP_BIT 0x4
88 #define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8
89 // vtables are pointers far away from the low page in memory
90 #define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul)
92 struct dispatch_continuation_s
{
93 DISPATCH_CONTINUATION_HEADER(continuation
);
96 typedef struct dispatch_continuation_s
*dispatch_continuation_t
;
98 struct dispatch_apply_s
{
99 size_t volatile da_index
, da_todo
;
100 size_t da_iterations
, da_nested
;
101 dispatch_continuation_t da_dc
;
102 _dispatch_thread_semaphore_t da_sema
;
106 typedef struct dispatch_apply_s
*dispatch_apply_t
;
108 DISPATCH_CLASS_DECL(queue_attr
);
109 struct dispatch_queue_attr_s
{
110 DISPATCH_STRUCT_HEADER(queue_attr
);
113 #define DISPATCH_QUEUE_HEADER \
114 uint32_t volatile dq_running; \
115 struct dispatch_object_s *volatile dq_items_head; \
116 /* LP64 global queue cacheline boundary */ \
117 struct dispatch_object_s *volatile dq_items_tail; \
118 dispatch_queue_t dq_specific_q; \
120 unsigned int dq_is_thread_bound:1; \
121 unsigned long dq_serialnum; \
122 const char *dq_label; \
123 DISPATCH_INTROSPECTION_QUEUE_LIST;
125 DISPATCH_CLASS_DECL(queue
);
126 struct dispatch_queue_s
{
127 DISPATCH_STRUCT_HEADER(queue
);
128 DISPATCH_QUEUE_HEADER
;
129 DISPATCH_QUEUE_CACHELINE_PADDING
; // for static queues only
132 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root
, queue
);
133 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop
, queue
);
134 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr
, queue
);
136 DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue
, dispatch_queue
);
137 DISPATCH_CLASS_DECL(queue_specific_queue
);
139 extern struct dispatch_queue_s _dispatch_mgr_q
;
141 void _dispatch_queue_destroy(dispatch_object_t dou
);
142 void _dispatch_queue_dispose(dispatch_queue_t dq
);
143 void _dispatch_queue_invoke(dispatch_queue_t dq
);
144 void _dispatch_queue_push_list_slow(dispatch_queue_t dq
,
145 struct dispatch_object_s
*obj
, unsigned int n
);
146 void _dispatch_queue_push_slow(dispatch_queue_t dq
,
147 struct dispatch_object_s
*obj
);
148 unsigned long _dispatch_queue_probe(dispatch_queue_t dq
);
149 dispatch_queue_t
_dispatch_wakeup(dispatch_object_t dou
);
150 _dispatch_thread_semaphore_t
_dispatch_queue_drain(dispatch_object_t dou
);
151 void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
153 unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq
);
154 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq
);
155 unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq
);
156 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq
);
157 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq
);
158 void _dispatch_mgr_queue_drain(void);
159 unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq
);
160 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
161 void _dispatch_mgr_priority_init(void);
163 static inline void _dispatch_mgr_priority_init(void) {}
165 void _dispatch_after_timer_callback(void *ctxt
);
166 void _dispatch_async_redirect_invoke(void *ctxt
);
167 void _dispatch_sync_recurse_invoke(void *ctxt
);
168 void _dispatch_apply_invoke(void *ctxt
);
169 void _dispatch_apply_redirect_invoke(void *ctxt
);
170 void _dispatch_barrier_trysync_f(dispatch_queue_t dq
, void *ctxt
,
171 dispatch_function_t func
);
174 void dispatch_debug_queue(dispatch_queue_t dq
, const char* str
);
176 static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED
,
177 const char* str DISPATCH_UNUSED
) {}
180 size_t dispatch_queue_debug(dispatch_queue_t dq
, char* buf
, size_t bufsiz
);
181 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq
, char* buf
,
184 #define DISPATCH_QUEUE_PRIORITY_COUNT 4
185 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2)
187 // overcommit priority index values need bit 1 set
189 DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY
= 0,
190 DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY
,
191 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY
,
192 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY
,
193 DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY
,
194 DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY
,
195 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY
,
196 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY
,
199 extern unsigned long volatile _dispatch_queue_serial_numbers
;
200 extern struct dispatch_queue_s _dispatch_root_queues
[];
202 #if !(USE_OBJC && __OBJC2__)
204 DISPATCH_ALWAYS_INLINE
206 _dispatch_queue_push_list2(dispatch_queue_t dq
, struct dispatch_object_s
*head
,
207 struct dispatch_object_s
*tail
)
209 struct dispatch_object_s
*prev
;
210 tail
->do_next
= NULL
;
211 prev
= dispatch_atomic_xchg2o(dq
, dq_items_tail
, tail
, release
);
212 if (fastpath(prev
)) {
213 // if we crash here with a value less than 0x1000, then we are at a
214 // known bug in client code for example, see _dispatch_queue_dispose
215 // or _dispatch_atfork_child
216 prev
->do_next
= head
;
218 return (prev
!= NULL
);
221 DISPATCH_ALWAYS_INLINE
223 _dispatch_queue_push_list(dispatch_queue_t dq
, dispatch_object_t _head
,
224 dispatch_object_t _tail
, unsigned int n
)
226 struct dispatch_object_s
*head
= _head
._do
, *tail
= _tail
._do
;
227 if (!fastpath(_dispatch_queue_push_list2(dq
, head
, tail
))) {
228 _dispatch_queue_push_list_slow(dq
, head
, n
);
232 DISPATCH_ALWAYS_INLINE
234 _dispatch_queue_push(dispatch_queue_t dq
, dispatch_object_t _tail
)
236 struct dispatch_object_s
*tail
= _tail
._do
;
237 if (!fastpath(_dispatch_queue_push_list2(dq
, tail
, tail
))) {
238 _dispatch_queue_push_slow(dq
, tail
);
242 DISPATCH_ALWAYS_INLINE
244 _dispatch_queue_push_wakeup(dispatch_queue_t dq
, dispatch_object_t _tail
,
247 struct dispatch_object_s
*tail
= _tail
._do
;
248 if (!fastpath(_dispatch_queue_push_list2(dq
, tail
, tail
))) {
249 _dispatch_queue_push_slow(dq
, tail
);
250 } else if (slowpath(wakeup
)) {
251 _dispatch_wakeup(dq
);
255 DISPATCH_ALWAYS_INLINE
257 _dispatch_queue_class_invoke(dispatch_object_t dou
,
258 dispatch_queue_t (*invoke
)(dispatch_object_t
,
259 _dispatch_thread_semaphore_t
*))
261 dispatch_queue_t dq
= dou
._dq
;
262 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq
)) &&
263 fastpath(dispatch_atomic_cmpxchg2o(dq
, dq_running
, 0, 1, acquire
))){
264 dispatch_queue_t tq
= NULL
;
265 _dispatch_thread_semaphore_t sema
= 0;
266 tq
= invoke(dq
, &sema
);
267 // We do not need to check the result.
268 // When the suspend-count lock is dropped, then the check will happen.
269 (void)dispatch_atomic_dec2o(dq
, dq_running
, release
);
271 _dispatch_thread_semaphore_signal(sema
);
273 return _dispatch_queue_push(tq
, dq
);
276 dq
->do_next
= DISPATCH_OBJECT_LISTLESS
;
277 if (!dispatch_atomic_sub2o(dq
, do_suspend_cnt
,
278 DISPATCH_OBJECT_SUSPEND_LOCK
, release
)) {
279 dispatch_atomic_barrier(seq_cst
); // <rdar://problem/11915417>
280 if (dispatch_atomic_load2o(dq
, dq_running
, seq_cst
) == 0) {
281 _dispatch_wakeup(dq
); // verify that the queue is idle
284 _dispatch_release(dq
); // added when the queue is put on the list
287 DISPATCH_ALWAYS_INLINE
288 static inline dispatch_queue_t
289 _dispatch_queue_get_current(void)
291 return (dispatch_queue_t
)_dispatch_thread_getspecific(dispatch_queue_key
);
294 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
295 static inline dispatch_queue_t
296 _dispatch_get_root_queue(long priority
, bool overcommit
)
298 if (overcommit
) switch (priority
) {
299 case DISPATCH_QUEUE_PRIORITY_BACKGROUND
:
300 #if !DISPATCH_NO_BG_PRIORITY
301 return &_dispatch_root_queues
[
302 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY
];
304 case DISPATCH_QUEUE_PRIORITY_LOW
:
305 case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE
:
306 return &_dispatch_root_queues
[
307 DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY
];
308 case DISPATCH_QUEUE_PRIORITY_DEFAULT
:
309 return &_dispatch_root_queues
[
310 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY
];
311 case DISPATCH_QUEUE_PRIORITY_HIGH
:
312 return &_dispatch_root_queues
[
313 DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY
];
316 case DISPATCH_QUEUE_PRIORITY_BACKGROUND
:
317 #if !DISPATCH_NO_BG_PRIORITY
318 return &_dispatch_root_queues
[
319 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY
];
321 case DISPATCH_QUEUE_PRIORITY_LOW
:
322 case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE
:
323 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY
];
324 case DISPATCH_QUEUE_PRIORITY_DEFAULT
:
325 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY
];
326 case DISPATCH_QUEUE_PRIORITY_HIGH
:
327 return &_dispatch_root_queues
[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY
];
333 // Note to later developers: ensure that any initialization changes are
334 // made for statically allocated queues (i.e. _dispatch_main_q).
336 _dispatch_queue_init(dispatch_queue_t dq
)
338 dq
->do_next
= (struct dispatch_queue_s
*)DISPATCH_OBJECT_LISTLESS
;
342 dq
->dq_serialnum
= dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers
,
346 DISPATCH_ALWAYS_INLINE
348 _dispatch_queue_set_bound_thread(dispatch_queue_t dq
)
350 //Tag thread-bound queues with the owning thread
351 dispatch_assert(dq
->dq_is_thread_bound
);
352 dq
->do_finalizer
= (void*)_dispatch_thread_self();
355 DISPATCH_ALWAYS_INLINE
357 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq
)
359 dispatch_assert(dq
->dq_is_thread_bound
);
360 dq
->do_finalizer
= NULL
;
363 DISPATCH_ALWAYS_INLINE
364 static inline pthread_t
365 _dispatch_queue_get_bound_thread(dispatch_queue_t dq
)
367 dispatch_assert(dq
->dq_is_thread_bound
);
368 return (pthread_t
)dq
->do_finalizer
;
371 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
372 #if TARGET_OS_EMBEDDED
373 #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
374 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16
376 #define DISPATCH_CONTINUATION_CACHE_LIMIT 65536
377 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128
381 dispatch_continuation_t
_dispatch_continuation_alloc_from_heap(void);
382 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c
);
384 #if DISPATCH_USE_MEMORYSTATUS_SOURCE
385 extern int _dispatch_continuation_cache_limit
;
386 void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c
);
388 #define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
389 #define _dispatch_continuation_free_to_cache_limit(c) \
390 _dispatch_continuation_free_to_heap(c)
393 DISPATCH_ALWAYS_INLINE
394 static inline dispatch_continuation_t
395 _dispatch_continuation_alloc_cacheonly(void)
397 dispatch_continuation_t dc
= (dispatch_continuation_t
)
398 fastpath(_dispatch_thread_getspecific(dispatch_cache_key
));
400 _dispatch_thread_setspecific(dispatch_cache_key
, dc
->do_next
);
405 DISPATCH_ALWAYS_INLINE
406 static inline dispatch_continuation_t
407 _dispatch_continuation_alloc(void)
409 dispatch_continuation_t dc
=
410 fastpath(_dispatch_continuation_alloc_cacheonly());
412 return _dispatch_continuation_alloc_from_heap();
417 DISPATCH_ALWAYS_INLINE
418 static inline dispatch_continuation_t
419 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc
)
421 dispatch_continuation_t prev_dc
= (dispatch_continuation_t
)
422 fastpath(_dispatch_thread_getspecific(dispatch_cache_key
));
423 int cnt
= prev_dc
? prev_dc
->do_ref_cnt
+ 1 : 1;
424 // Cap continuation cache
425 if (slowpath(cnt
> _dispatch_continuation_cache_limit
)) {
428 dc
->do_next
= prev_dc
;
429 dc
->do_ref_cnt
= cnt
;
430 _dispatch_thread_setspecific(dispatch_cache_key
, dc
);
434 DISPATCH_ALWAYS_INLINE
436 _dispatch_continuation_free(dispatch_continuation_t dc
)
438 dc
= _dispatch_continuation_free_cacheonly(dc
);
440 _dispatch_continuation_free_to_cache_limit(dc
);
443 #endif // !(USE_OBJC && __OBJC2__)