]> git.saurik.com Git - apple/libdispatch.git/blob - src/queue_internal.h
libdispatch-339.92.1.tar.gz
[apple/libdispatch.git] / src / queue_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_QUEUE_INTERNAL__
28 #define __DISPATCH_QUEUE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
36 #define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
37 #endif
38
39 /* x86 & cortex-a8 have a 64 byte cacheline */
40 #define DISPATCH_CACHELINE_SIZE 64u
41 #define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE
42 #define ROUND_UP_TO_CACHELINE_SIZE(x) \
43 (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
44 ~(DISPATCH_CACHELINE_SIZE - 1u))
45 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \
46 (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
47 ~(DISPATCH_CONTINUATION_SIZE - 1u))
48 #define ROUND_UP_TO_VECTOR_SIZE(x) \
49 (((x) + 15u) & ~15u)
50 #define DISPATCH_CACHELINE_ALIGN \
51 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
52
53
54 #define DISPATCH_QUEUE_CACHELINE_PADDING \
55 char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
56 #ifdef __LP64__
57 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
58 (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
59 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
60 #else
61 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
62 (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
63 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
64 #if !DISPATCH_INTROSPECTION
65 // No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0
66 #undef DISPATCH_QUEUE_CACHELINE_PADDING
67 #define DISPATCH_QUEUE_CACHELINE_PADDING
68 #endif
69 #endif
70
71 // If dc_vtable is less than 127, then the object is a continuation.
72 // Otherwise, the object has a private layout and memory management rules. The
73 // layout until after 'do_next' must align with normal objects.
74 #define DISPATCH_CONTINUATION_HEADER(x) \
75 _OS_OBJECT_HEADER( \
76 const void *do_vtable, \
77 do_ref_cnt, \
78 do_xref_cnt); \
79 struct dispatch_##x##_s *volatile do_next; \
80 dispatch_function_t dc_func; \
81 void *dc_ctxt; \
82 void *dc_data; \
83 void *dc_other;
84
85 #define DISPATCH_OBJ_ASYNC_BIT 0x1
86 #define DISPATCH_OBJ_BARRIER_BIT 0x2
87 #define DISPATCH_OBJ_GROUP_BIT 0x4
88 #define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8
89 // vtables are pointers far away from the low page in memory
90 #define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul)
91
92 struct dispatch_continuation_s {
93 DISPATCH_CONTINUATION_HEADER(continuation);
94 };
95
96 typedef struct dispatch_continuation_s *dispatch_continuation_t;
97
98 struct dispatch_apply_s {
99 size_t volatile da_index, da_todo;
100 size_t da_iterations, da_nested;
101 dispatch_continuation_t da_dc;
102 _dispatch_thread_semaphore_t da_sema;
103 uint32_t da_thr_cnt;
104 };
105
106 typedef struct dispatch_apply_s *dispatch_apply_t;
107
108 DISPATCH_CLASS_DECL(queue_attr);
109 struct dispatch_queue_attr_s {
110 DISPATCH_STRUCT_HEADER(queue_attr);
111 };
112
113 #define DISPATCH_QUEUE_HEADER \
114 uint32_t volatile dq_running; \
115 struct dispatch_object_s *volatile dq_items_head; \
116 /* LP64 global queue cacheline boundary */ \
117 struct dispatch_object_s *volatile dq_items_tail; \
118 dispatch_queue_t dq_specific_q; \
119 uint32_t dq_width; \
120 unsigned int dq_is_thread_bound:1; \
121 unsigned long dq_serialnum; \
122 const char *dq_label; \
123 DISPATCH_INTROSPECTION_QUEUE_LIST;
124
125 DISPATCH_CLASS_DECL(queue);
126 struct dispatch_queue_s {
127 DISPATCH_STRUCT_HEADER(queue);
128 DISPATCH_QUEUE_HEADER;
129 DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
130 };
131
132 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
133 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
134 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue);
135
136 DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue);
137 DISPATCH_CLASS_DECL(queue_specific_queue);
138
139 extern struct dispatch_queue_s _dispatch_mgr_q;
140
141 void _dispatch_queue_destroy(dispatch_object_t dou);
142 void _dispatch_queue_dispose(dispatch_queue_t dq);
143 void _dispatch_queue_invoke(dispatch_queue_t dq);
144 void _dispatch_queue_push_list_slow(dispatch_queue_t dq,
145 struct dispatch_object_s *obj, unsigned int n);
146 void _dispatch_queue_push_slow(dispatch_queue_t dq,
147 struct dispatch_object_s *obj);
148 unsigned long _dispatch_queue_probe(dispatch_queue_t dq);
149 dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou);
150 _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou);
151 void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
152 dqsq);
153 unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq);
154 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
155 unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq);
156 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
157 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
158 void _dispatch_mgr_queue_drain(void);
159 unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq);
160 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
161 void _dispatch_mgr_priority_init(void);
162 #else
163 static inline void _dispatch_mgr_priority_init(void) {}
164 #endif
165 void _dispatch_after_timer_callback(void *ctxt);
166 void _dispatch_async_redirect_invoke(void *ctxt);
167 void _dispatch_sync_recurse_invoke(void *ctxt);
168 void _dispatch_apply_invoke(void *ctxt);
169 void _dispatch_apply_redirect_invoke(void *ctxt);
170 void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
171 dispatch_function_t func);
172
173 #if DISPATCH_DEBUG
174 void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
175 #else
176 static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED,
177 const char* str DISPATCH_UNUSED) {}
178 #endif
179
180 size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
181 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
182 size_t bufsiz);
183
184 #define DISPATCH_QUEUE_PRIORITY_COUNT 4
185 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2)
186
187 // overcommit priority index values need bit 1 set
188 enum {
189 DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY = 0,
190 DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY,
191 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY,
192 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY,
193 DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY,
194 DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY,
195 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY,
196 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY,
197 };
198
199 extern unsigned long volatile _dispatch_queue_serial_numbers;
200 extern struct dispatch_queue_s _dispatch_root_queues[];
201
202 #if !(USE_OBJC && __OBJC2__)
203
204 DISPATCH_ALWAYS_INLINE
205 static inline bool
206 _dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head,
207 struct dispatch_object_s *tail)
208 {
209 struct dispatch_object_s *prev;
210 tail->do_next = NULL;
211 prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release);
212 if (fastpath(prev)) {
213 // if we crash here with a value less than 0x1000, then we are at a
214 // known bug in client code for example, see _dispatch_queue_dispose
215 // or _dispatch_atfork_child
216 prev->do_next = head;
217 }
218 return (prev != NULL);
219 }
220
221 DISPATCH_ALWAYS_INLINE
222 static inline void
223 _dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
224 dispatch_object_t _tail, unsigned int n)
225 {
226 struct dispatch_object_s *head = _head._do, *tail = _tail._do;
227 if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) {
228 _dispatch_queue_push_list_slow(dq, head, n);
229 }
230 }
231
232 DISPATCH_ALWAYS_INLINE
233 static inline void
234 _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail)
235 {
236 struct dispatch_object_s *tail = _tail._do;
237 if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
238 _dispatch_queue_push_slow(dq, tail);
239 }
240 }
241
242 DISPATCH_ALWAYS_INLINE
243 static inline void
244 _dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
245 bool wakeup)
246 {
247 struct dispatch_object_s *tail = _tail._do;
248 if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
249 _dispatch_queue_push_slow(dq, tail);
250 } else if (slowpath(wakeup)) {
251 _dispatch_wakeup(dq);
252 }
253 }
254
255 DISPATCH_ALWAYS_INLINE
256 static inline void
257 _dispatch_queue_class_invoke(dispatch_object_t dou,
258 dispatch_queue_t (*invoke)(dispatch_object_t,
259 _dispatch_thread_semaphore_t*))
260 {
261 dispatch_queue_t dq = dou._dq;
262 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
263 fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
264 dispatch_queue_t tq = NULL;
265 _dispatch_thread_semaphore_t sema = 0;
266 tq = invoke(dq, &sema);
267 // We do not need to check the result.
268 // When the suspend-count lock is dropped, then the check will happen.
269 (void)dispatch_atomic_dec2o(dq, dq_running, release);
270 if (sema) {
271 _dispatch_thread_semaphore_signal(sema);
272 } else if (tq) {
273 _dispatch_introspection_queue_item_complete(dq);
274 return _dispatch_queue_push(tq, dq);
275 }
276 }
277 dq->do_next = DISPATCH_OBJECT_LISTLESS;
278 if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
279 DISPATCH_OBJECT_SUSPEND_LOCK, release)) {
280 dispatch_atomic_barrier(seq_cst); // <rdar://problem/11915417>
281 if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
282 _dispatch_wakeup(dq); // verify that the queue is idle
283 }
284 }
285 _dispatch_introspection_queue_item_complete(dq);
286 _dispatch_release(dq); // added when the queue is put on the list
287 }
288
289 DISPATCH_ALWAYS_INLINE
290 static inline dispatch_queue_t
291 _dispatch_queue_get_current(void)
292 {
293 return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
294 }
295
296 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
297 static inline dispatch_queue_t
298 _dispatch_get_root_queue(long priority, bool overcommit)
299 {
300 if (overcommit) switch (priority) {
301 case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
302 #if !DISPATCH_NO_BG_PRIORITY
303 return &_dispatch_root_queues[
304 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY];
305 #endif
306 case DISPATCH_QUEUE_PRIORITY_LOW:
307 case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
308 return &_dispatch_root_queues[
309 DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY];
310 case DISPATCH_QUEUE_PRIORITY_DEFAULT:
311 return &_dispatch_root_queues[
312 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY];
313 case DISPATCH_QUEUE_PRIORITY_HIGH:
314 return &_dispatch_root_queues[
315 DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY];
316 }
317 switch (priority) {
318 case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
319 #if !DISPATCH_NO_BG_PRIORITY
320 return &_dispatch_root_queues[
321 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY];
322 #endif
323 case DISPATCH_QUEUE_PRIORITY_LOW:
324 case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
325 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY];
326 case DISPATCH_QUEUE_PRIORITY_DEFAULT:
327 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY];
328 case DISPATCH_QUEUE_PRIORITY_HIGH:
329 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY];
330 default:
331 return NULL;
332 }
333 }
334
335 // Note to later developers: ensure that any initialization changes are
336 // made for statically allocated queues (i.e. _dispatch_main_q).
337 static inline void
338 _dispatch_queue_init(dispatch_queue_t dq)
339 {
340 dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
341
342 dq->dq_running = 0;
343 dq->dq_width = 1;
344 dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
345 relaxed);
346 }
347
348 DISPATCH_ALWAYS_INLINE
349 static inline void
350 _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
351 {
352 //Tag thread-bound queues with the owning thread
353 dispatch_assert(dq->dq_is_thread_bound);
354 dq->do_finalizer = (void*)_dispatch_thread_self();
355 }
356
357 DISPATCH_ALWAYS_INLINE
358 static inline void
359 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
360 {
361 dispatch_assert(dq->dq_is_thread_bound);
362 dq->do_finalizer = NULL;
363 }
364
365 DISPATCH_ALWAYS_INLINE
366 static inline pthread_t
367 _dispatch_queue_get_bound_thread(dispatch_queue_t dq)
368 {
369 dispatch_assert(dq->dq_is_thread_bound);
370 return (pthread_t)dq->do_finalizer;
371 }
372
373 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
374 #if TARGET_OS_EMBEDDED
375 #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
376 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16
377 #else
378 #define DISPATCH_CONTINUATION_CACHE_LIMIT 65536
379 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128
380 #endif
381 #endif
382
383 dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
384 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
385
386 #if DISPATCH_USE_MEMORYSTATUS_SOURCE
387 extern int _dispatch_continuation_cache_limit;
388 void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
389 #else
390 #define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
391 #define _dispatch_continuation_free_to_cache_limit(c) \
392 _dispatch_continuation_free_to_heap(c)
393 #endif
394
395 DISPATCH_ALWAYS_INLINE
396 static inline dispatch_continuation_t
397 _dispatch_continuation_alloc_cacheonly(void)
398 {
399 dispatch_continuation_t dc = (dispatch_continuation_t)
400 fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
401 if (dc) {
402 _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
403 }
404 return dc;
405 }
406
407 DISPATCH_ALWAYS_INLINE
408 static inline dispatch_continuation_t
409 _dispatch_continuation_alloc(void)
410 {
411 dispatch_continuation_t dc =
412 fastpath(_dispatch_continuation_alloc_cacheonly());
413 if(!dc) {
414 return _dispatch_continuation_alloc_from_heap();
415 }
416 return dc;
417 }
418
419 DISPATCH_ALWAYS_INLINE
420 static inline dispatch_continuation_t
421 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
422 {
423 dispatch_continuation_t prev_dc = (dispatch_continuation_t)
424 fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
425 int cnt = prev_dc ? prev_dc->do_ref_cnt + 1 : 1;
426 // Cap continuation cache
427 if (slowpath(cnt > _dispatch_continuation_cache_limit)) {
428 return dc;
429 }
430 dc->do_next = prev_dc;
431 dc->do_ref_cnt = cnt;
432 _dispatch_thread_setspecific(dispatch_cache_key, dc);
433 return NULL;
434 }
435
436 DISPATCH_ALWAYS_INLINE
437 static inline void
438 _dispatch_continuation_free(dispatch_continuation_t dc)
439 {
440 dc = _dispatch_continuation_free_cacheonly(dc);
441 if (slowpath(dc)) {
442 _dispatch_continuation_free_to_cache_limit(dc);
443 }
444 }
445 #endif // !(USE_OBJC && __OBJC2__)
446
447 #endif