]> git.saurik.com Git - apple/libdispatch.git/blob - src/queue_internal.h
libdispatch-339.1.9.tar.gz
[apple/libdispatch.git] / src / queue_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_QUEUE_INTERNAL__
28 #define __DISPATCH_QUEUE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
36 #define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
37 #endif
38
39 /* x86 & cortex-a8 have a 64 byte cacheline */
40 #define DISPATCH_CACHELINE_SIZE 64u
41 #define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE
42 #define ROUND_UP_TO_CACHELINE_SIZE(x) \
43 (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
44 ~(DISPATCH_CACHELINE_SIZE - 1u))
45 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \
46 (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
47 ~(DISPATCH_CONTINUATION_SIZE - 1u))
48 #define ROUND_UP_TO_VECTOR_SIZE(x) \
49 (((x) + 15u) & ~15u)
50 #define DISPATCH_CACHELINE_ALIGN \
51 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
52
53
54 #define DISPATCH_QUEUE_CACHELINE_PADDING \
55 char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
56 #ifdef __LP64__
57 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
58 (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
59 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
60 #else
61 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
62 (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
63 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
64 #if !DISPATCH_INTROSPECTION
65 // No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0
66 #undef DISPATCH_QUEUE_CACHELINE_PADDING
67 #define DISPATCH_QUEUE_CACHELINE_PADDING
68 #endif
69 #endif
70
71 // If dc_vtable is less than 127, then the object is a continuation.
72 // Otherwise, the object has a private layout and memory management rules. The
73 // layout until after 'do_next' must align with normal objects.
74 #define DISPATCH_CONTINUATION_HEADER(x) \
75 _OS_OBJECT_HEADER( \
76 const void *do_vtable, \
77 do_ref_cnt, \
78 do_xref_cnt); \
79 struct dispatch_##x##_s *volatile do_next; \
80 dispatch_function_t dc_func; \
81 void *dc_ctxt; \
82 void *dc_data; \
83 void *dc_other;
84
85 #define DISPATCH_OBJ_ASYNC_BIT 0x1
86 #define DISPATCH_OBJ_BARRIER_BIT 0x2
87 #define DISPATCH_OBJ_GROUP_BIT 0x4
88 #define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8
89 // vtables are pointers far away from the low page in memory
90 #define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul)
91
92 struct dispatch_continuation_s {
93 DISPATCH_CONTINUATION_HEADER(continuation);
94 };
95
96 typedef struct dispatch_continuation_s *dispatch_continuation_t;
97
98 struct dispatch_apply_s {
99 size_t volatile da_index, da_todo;
100 size_t da_iterations, da_nested;
101 dispatch_continuation_t da_dc;
102 _dispatch_thread_semaphore_t da_sema;
103 uint32_t da_thr_cnt;
104 };
105
106 typedef struct dispatch_apply_s *dispatch_apply_t;
107
108 DISPATCH_CLASS_DECL(queue_attr);
109 struct dispatch_queue_attr_s {
110 DISPATCH_STRUCT_HEADER(queue_attr);
111 };
112
113 #define DISPATCH_QUEUE_HEADER \
114 uint32_t volatile dq_running; \
115 struct dispatch_object_s *volatile dq_items_head; \
116 /* LP64 global queue cacheline boundary */ \
117 struct dispatch_object_s *volatile dq_items_tail; \
118 dispatch_queue_t dq_specific_q; \
119 uint32_t dq_width; \
120 unsigned int dq_is_thread_bound:1; \
121 unsigned long dq_serialnum; \
122 const char *dq_label; \
123 DISPATCH_INTROSPECTION_QUEUE_LIST;
124
125 DISPATCH_CLASS_DECL(queue);
126 struct dispatch_queue_s {
127 DISPATCH_STRUCT_HEADER(queue);
128 DISPATCH_QUEUE_HEADER;
129 DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
130 };
131
132 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
133 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
134 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue);
135
136 DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue);
137 DISPATCH_CLASS_DECL(queue_specific_queue);
138
139 extern struct dispatch_queue_s _dispatch_mgr_q;
140
141 void _dispatch_queue_destroy(dispatch_object_t dou);
142 void _dispatch_queue_dispose(dispatch_queue_t dq);
143 void _dispatch_queue_invoke(dispatch_queue_t dq);
144 void _dispatch_queue_push_list_slow(dispatch_queue_t dq,
145 struct dispatch_object_s *obj, unsigned int n);
146 void _dispatch_queue_push_slow(dispatch_queue_t dq,
147 struct dispatch_object_s *obj);
148 unsigned long _dispatch_queue_probe(dispatch_queue_t dq);
149 dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou);
150 _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou);
151 void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
152 dqsq);
153 unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq);
154 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
155 unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq);
156 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
157 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
158 void _dispatch_mgr_queue_drain(void);
159 unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq);
160 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
161 void _dispatch_mgr_priority_init(void);
162 #else
163 static inline void _dispatch_mgr_priority_init(void) {}
164 #endif
165 void _dispatch_after_timer_callback(void *ctxt);
166 void _dispatch_async_redirect_invoke(void *ctxt);
167 void _dispatch_sync_recurse_invoke(void *ctxt);
168 void _dispatch_apply_invoke(void *ctxt);
169 void _dispatch_apply_redirect_invoke(void *ctxt);
170 void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
171 dispatch_function_t func);
172
173 #if DISPATCH_DEBUG
174 void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
175 #else
176 static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED,
177 const char* str DISPATCH_UNUSED) {}
178 #endif
179
180 size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
181 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
182 size_t bufsiz);
183
184 #define DISPATCH_QUEUE_PRIORITY_COUNT 4
185 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2)
186
187 // overcommit priority index values need bit 1 set
188 enum {
189 DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY = 0,
190 DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY,
191 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY,
192 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY,
193 DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY,
194 DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY,
195 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY,
196 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY,
197 };
198
199 extern unsigned long volatile _dispatch_queue_serial_numbers;
200 extern struct dispatch_queue_s _dispatch_root_queues[];
201
202 #if !(USE_OBJC && __OBJC2__)
203
204 DISPATCH_ALWAYS_INLINE
205 static inline bool
206 _dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head,
207 struct dispatch_object_s *tail)
208 {
209 struct dispatch_object_s *prev;
210 tail->do_next = NULL;
211 prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release);
212 if (fastpath(prev)) {
213 // if we crash here with a value less than 0x1000, then we are at a
214 // known bug in client code for example, see _dispatch_queue_dispose
215 // or _dispatch_atfork_child
216 prev->do_next = head;
217 }
218 return (prev != NULL);
219 }
220
221 DISPATCH_ALWAYS_INLINE
222 static inline void
223 _dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
224 dispatch_object_t _tail, unsigned int n)
225 {
226 struct dispatch_object_s *head = _head._do, *tail = _tail._do;
227 if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) {
228 _dispatch_queue_push_list_slow(dq, head, n);
229 }
230 }
231
232 DISPATCH_ALWAYS_INLINE
233 static inline void
234 _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail)
235 {
236 struct dispatch_object_s *tail = _tail._do;
237 if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
238 _dispatch_queue_push_slow(dq, tail);
239 }
240 }
241
242 DISPATCH_ALWAYS_INLINE
243 static inline void
244 _dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
245 bool wakeup)
246 {
247 struct dispatch_object_s *tail = _tail._do;
248 if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
249 _dispatch_queue_push_slow(dq, tail);
250 } else if (slowpath(wakeup)) {
251 _dispatch_wakeup(dq);
252 }
253 }
254
255 DISPATCH_ALWAYS_INLINE
256 static inline void
257 _dispatch_queue_class_invoke(dispatch_object_t dou,
258 dispatch_queue_t (*invoke)(dispatch_object_t,
259 _dispatch_thread_semaphore_t*))
260 {
261 dispatch_queue_t dq = dou._dq;
262 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
263 fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
264 dispatch_queue_t tq = NULL;
265 _dispatch_thread_semaphore_t sema = 0;
266 tq = invoke(dq, &sema);
267 // We do not need to check the result.
268 // When the suspend-count lock is dropped, then the check will happen.
269 (void)dispatch_atomic_dec2o(dq, dq_running, release);
270 if (sema) {
271 _dispatch_thread_semaphore_signal(sema);
272 } else if (tq) {
273 return _dispatch_queue_push(tq, dq);
274 }
275 }
276 dq->do_next = DISPATCH_OBJECT_LISTLESS;
277 if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
278 DISPATCH_OBJECT_SUSPEND_LOCK, release)) {
279 dispatch_atomic_barrier(seq_cst); // <rdar://problem/11915417>
280 if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
281 _dispatch_wakeup(dq); // verify that the queue is idle
282 }
283 }
284 _dispatch_release(dq); // added when the queue is put on the list
285 }
286
287 DISPATCH_ALWAYS_INLINE
288 static inline dispatch_queue_t
289 _dispatch_queue_get_current(void)
290 {
291 return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
292 }
293
294 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
295 static inline dispatch_queue_t
296 _dispatch_get_root_queue(long priority, bool overcommit)
297 {
298 if (overcommit) switch (priority) {
299 case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
300 #if !DISPATCH_NO_BG_PRIORITY
301 return &_dispatch_root_queues[
302 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY];
303 #endif
304 case DISPATCH_QUEUE_PRIORITY_LOW:
305 case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
306 return &_dispatch_root_queues[
307 DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY];
308 case DISPATCH_QUEUE_PRIORITY_DEFAULT:
309 return &_dispatch_root_queues[
310 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY];
311 case DISPATCH_QUEUE_PRIORITY_HIGH:
312 return &_dispatch_root_queues[
313 DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY];
314 }
315 switch (priority) {
316 case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
317 #if !DISPATCH_NO_BG_PRIORITY
318 return &_dispatch_root_queues[
319 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY];
320 #endif
321 case DISPATCH_QUEUE_PRIORITY_LOW:
322 case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
323 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY];
324 case DISPATCH_QUEUE_PRIORITY_DEFAULT:
325 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY];
326 case DISPATCH_QUEUE_PRIORITY_HIGH:
327 return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY];
328 default:
329 return NULL;
330 }
331 }
332
333 // Note to later developers: ensure that any initialization changes are
334 // made for statically allocated queues (i.e. _dispatch_main_q).
335 static inline void
336 _dispatch_queue_init(dispatch_queue_t dq)
337 {
338 dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
339
340 dq->dq_running = 0;
341 dq->dq_width = 1;
342 dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
343 relaxed);
344 }
345
346 DISPATCH_ALWAYS_INLINE
347 static inline void
348 _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
349 {
350 //Tag thread-bound queues with the owning thread
351 dispatch_assert(dq->dq_is_thread_bound);
352 dq->do_finalizer = (void*)_dispatch_thread_self();
353 }
354
355 DISPATCH_ALWAYS_INLINE
356 static inline void
357 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
358 {
359 dispatch_assert(dq->dq_is_thread_bound);
360 dq->do_finalizer = NULL;
361 }
362
363 DISPATCH_ALWAYS_INLINE
364 static inline pthread_t
365 _dispatch_queue_get_bound_thread(dispatch_queue_t dq)
366 {
367 dispatch_assert(dq->dq_is_thread_bound);
368 return (pthread_t)dq->do_finalizer;
369 }
370
371 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
372 #if TARGET_OS_EMBEDDED
373 #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
374 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16
375 #else
376 #define DISPATCH_CONTINUATION_CACHE_LIMIT 65536
377 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128
378 #endif
379 #endif
380
381 dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
382 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
383
384 #if DISPATCH_USE_MEMORYSTATUS_SOURCE
385 extern int _dispatch_continuation_cache_limit;
386 void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
387 #else
388 #define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
389 #define _dispatch_continuation_free_to_cache_limit(c) \
390 _dispatch_continuation_free_to_heap(c)
391 #endif
392
393 DISPATCH_ALWAYS_INLINE
394 static inline dispatch_continuation_t
395 _dispatch_continuation_alloc_cacheonly(void)
396 {
397 dispatch_continuation_t dc = (dispatch_continuation_t)
398 fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
399 if (dc) {
400 _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
401 }
402 return dc;
403 }
404
405 DISPATCH_ALWAYS_INLINE
406 static inline dispatch_continuation_t
407 _dispatch_continuation_alloc(void)
408 {
409 dispatch_continuation_t dc =
410 fastpath(_dispatch_continuation_alloc_cacheonly());
411 if(!dc) {
412 return _dispatch_continuation_alloc_from_heap();
413 }
414 return dc;
415 }
416
417 DISPATCH_ALWAYS_INLINE
418 static inline dispatch_continuation_t
419 _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
420 {
421 dispatch_continuation_t prev_dc = (dispatch_continuation_t)
422 fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
423 int cnt = prev_dc ? prev_dc->do_ref_cnt + 1 : 1;
424 // Cap continuation cache
425 if (slowpath(cnt > _dispatch_continuation_cache_limit)) {
426 return dc;
427 }
428 dc->do_next = prev_dc;
429 dc->do_ref_cnt = cnt;
430 _dispatch_thread_setspecific(dispatch_cache_key, dc);
431 return NULL;
432 }
433
434 DISPATCH_ALWAYS_INLINE
435 static inline void
436 _dispatch_continuation_free(dispatch_continuation_t dc)
437 {
438 dc = _dispatch_continuation_free_cacheonly(dc);
439 if (slowpath(dc)) {
440 _dispatch_continuation_free_to_cache_limit(dc);
441 }
442 }
443 #endif // !(USE_OBJC && __OBJC2__)
444
445 #endif