]> git.saurik.com Git - apple/libdispatch.git/blob - src/queue_internal.h
libdispatch-500.1.5.tar.gz
[apple/libdispatch.git] / src / queue_internal.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_QUEUE_INTERNAL__
28 #define __DISPATCH_QUEUE_INTERNAL__
29
30 #ifndef __DISPATCH_INDIRECT__
31 #error "Please #include <dispatch/dispatch.h> instead of this file directly."
32 #include <dispatch/base.h> // for HeaderDoc
33 #endif
34
35 #if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
36 #define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
37 #endif
38
39 /* x86 & cortex-a8 have a 64 byte cacheline */
40 #define DISPATCH_CACHELINE_SIZE 64u
41 #define ROUND_UP_TO_CACHELINE_SIZE(x) \
42 (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
43 ~(DISPATCH_CACHELINE_SIZE - 1u))
44 #define DISPATCH_CACHELINE_ALIGN \
45 __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
46
47
48 #pragma mark -
49 #pragma mark dispatch_queue_t
50
51 #define DISPATCH_QUEUE_HEADER \
52 uint32_t volatile dq_running; \
53 struct dispatch_object_s *volatile dq_items_head; \
54 /* LP64 global queue cacheline boundary */ \
55 struct dispatch_object_s *volatile dq_items_tail; \
56 dispatch_queue_t dq_specific_q; \
57 uint16_t dq_width; \
58 uint16_t dq_is_thread_bound:1; \
59 uint32_t volatile dq_override; \
60 pthread_priority_t dq_priority; \
61 mach_port_t dq_thread; \
62 mach_port_t volatile dq_tqthread; \
63 voucher_t dq_override_voucher; \
64 unsigned long dq_serialnum; \
65 const char *dq_label; \
66 DISPATCH_INTROSPECTION_QUEUE_LIST;
67
68 #define DISPATCH_QUEUE_WIDTH_MAX UINT16_MAX
69
70 #define DISPATCH_QUEUE_CACHELINE_PADDING \
71 char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
72 #ifdef __LP64__
73 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
74 (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
75 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
76 #else
77 #define DISPATCH_QUEUE_CACHELINE_PAD (( \
78 (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
79 + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
80 #endif
81
82 DISPATCH_CLASS_DECL(queue);
83 #if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
84 struct dispatch_queue_s {
85 DISPATCH_STRUCT_HEADER(queue);
86 DISPATCH_QUEUE_HEADER;
87 DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
88 };
89 #endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
90
91 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
92 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
93 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue);
94
95 DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue);
96 DISPATCH_CLASS_DECL(queue_specific_queue);
97
98 void _dispatch_queue_destroy(dispatch_object_t dou);
99 void _dispatch_queue_dispose(dispatch_queue_t dq);
100 void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou,
101 dispatch_invoke_flags_t flags);
102 void _dispatch_queue_push_list_slow(dispatch_queue_t dq,
103 pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n,
104 bool retained);
105 void _dispatch_queue_push_slow(dispatch_queue_t dq,
106 pthread_priority_t pp, struct dispatch_object_s *obj, bool retained);
107 unsigned long _dispatch_queue_probe(dispatch_queue_t dq);
108 dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou);
109 dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq);
110 void _dispatch_queue_wakeup_and_release(dispatch_queue_t dq);
111 void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq,
112 pthread_priority_t pp);
113 void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq,
114 pthread_priority_t pp);
115 void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq,
116 pthread_priority_t pp);
117 _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou);
118 void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
119 dqsq);
120 unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq);
121 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
122 unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq);
123 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
124 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
125 void _dispatch_mgr_queue_drain(void);
126 unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq);
127 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
128 void _dispatch_mgr_priority_init(void);
129 #else
130 static inline void _dispatch_mgr_priority_init(void) {}
131 #endif
132 void _dispatch_after_timer_callback(void *ctxt);
133 void _dispatch_async_redirect_invoke(void *ctxt);
134 void _dispatch_sync_recurse_invoke(void *ctxt);
135 void _dispatch_apply_invoke(void *ctxt);
136 void _dispatch_apply_redirect_invoke(void *ctxt);
137 void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
138 dispatch_function_t func);
139 void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
140 dispatch_function_t func);
141
142 #if DISPATCH_DEBUG
143 void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
144 #else
145 static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED,
146 const char* str DISPATCH_UNUSED) {}
147 #endif
148
149 size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
150 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
151 size_t bufsiz);
152
153 #define DISPATCH_QUEUE_QOS_COUNT 6
154 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
155
156 // must be in lowest to highest qos order (as encoded in pthread_priority_t)
157 // overcommit qos index values need bit 1 set
158 enum {
159 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
160 DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
161 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
162 DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
163 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
164 DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
165 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
166 DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
167 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
168 DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
169 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
170 DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
171 };
172
173 extern unsigned long volatile _dispatch_queue_serial_numbers;
174 extern struct dispatch_queue_s _dispatch_root_queues[];
175 extern struct dispatch_queue_s _dispatch_mgr_q;
176
177 #if HAVE_PTHREAD_WORKQUEUE_QOS
178 extern pthread_priority_t _dispatch_background_priority;
179 extern pthread_priority_t _dispatch_user_initiated_priority;
180 #endif
181
182 #pragma mark -
183 #pragma mark dispatch_queue_attr_t
184
185 typedef enum {
186 _dispatch_queue_attr_overcommit_unspecified = 0,
187 _dispatch_queue_attr_overcommit_enabled,
188 _dispatch_queue_attr_overcommit_disabled,
189 } _dispatch_queue_attr_overcommit_t;
190
191 DISPATCH_CLASS_DECL(queue_attr);
192 struct dispatch_queue_attr_s {
193 DISPATCH_STRUCT_HEADER(queue_attr);
194 qos_class_t dqa_qos_class;
195 int dqa_relative_priority;
196 unsigned int dqa_overcommit:2, dqa_concurrent:1;
197 };
198
199 enum {
200 DQA_INDEX_NON_OVERCOMMIT = 0,
201 DQA_INDEX_OVERCOMMIT,
202 DQA_INDEX_UNSPECIFIED_OVERCOMMIT,
203 };
204
205 #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3
206
207 enum {
208 DQA_INDEX_CONCURRENT = 0,
209 DQA_INDEX_SERIAL,
210 };
211
212 #define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2
213
214 typedef enum {
215 DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0,
216 DQA_INDEX_QOS_CLASS_MAINTENANCE,
217 DQA_INDEX_QOS_CLASS_BACKGROUND,
218 DQA_INDEX_QOS_CLASS_UTILITY,
219 DQA_INDEX_QOS_CLASS_DEFAULT,
220 DQA_INDEX_QOS_CLASS_USER_INITIATED,
221 DQA_INDEX_QOS_CLASS_USER_INTERACTIVE,
222 } _dispatch_queue_attr_index_qos_class_t;
223
224 #define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
225
226 extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
227 [DISPATCH_QUEUE_ATTR_PRIO_COUNT]
228 [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT]
229 [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT];
230
231 #pragma mark -
232 #pragma mark dispatch_continuation_t
233
234 // If dc_vtable is less than 127, then the object is a continuation.
235 // Otherwise, the object has a private layout and memory management rules. The
236 // layout until after 'do_next' must align with normal objects.
237 #if __LP64__
238 #define DISPATCH_CONTINUATION_HEADER(x) \
239 const void *do_vtable; \
240 union { \
241 pthread_priority_t dc_priority; \
242 int dc_cache_cnt; \
243 uintptr_t dc_pad; \
244 }; \
245 struct dispatch_##x##_s *volatile do_next; \
246 struct voucher_s *dc_voucher; \
247 dispatch_function_t dc_func; \
248 void *dc_ctxt; \
249 void *dc_data; \
250 void *dc_other;
251 #define _DISPATCH_SIZEOF_PTR 8
252 #else
253 #define DISPATCH_CONTINUATION_HEADER(x) \
254 const void *do_vtable; \
255 union { \
256 pthread_priority_t dc_priority; \
257 int dc_cache_cnt; \
258 uintptr_t dc_pad; \
259 }; \
260 struct voucher_s *dc_voucher; \
261 struct dispatch_##x##_s *volatile do_next; \
262 dispatch_function_t dc_func; \
263 void *dc_ctxt; \
264 void *dc_data; \
265 void *dc_other;
266 #define _DISPATCH_SIZEOF_PTR 4
267 #endif
268 #define _DISPATCH_CONTINUATION_PTRS 8
269 #if DISPATCH_HW_CONFIG_UP
270 // UP devices don't contend on continuations so we don't need to force them to
271 // occupy a whole cacheline (which is intended to avoid contention)
272 #define DISPATCH_CONTINUATION_SIZE \
273 (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
274 #else
275 #define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \
276 (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
277 #endif
278 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \
279 (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
280 ~(DISPATCH_CONTINUATION_SIZE - 1u))
281
282 #define DISPATCH_OBJ_ASYNC_BIT 0x1
283 #define DISPATCH_OBJ_BARRIER_BIT 0x2
284 #define DISPATCH_OBJ_GROUP_BIT 0x4
285 #define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8
286 #define DISPATCH_OBJ_BLOCK_RELEASE_BIT 0x10
287 #define DISPATCH_OBJ_CTXT_FETCH_BIT 0x20
288 #define DISPATCH_OBJ_HAS_VOUCHER_BIT 0x80
289 // vtables are pointers far away from the low page in memory
290 #define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 0xfful)
291
292 struct dispatch_continuation_s {
293 DISPATCH_CONTINUATION_HEADER(continuation);
294 };
295 typedef struct dispatch_continuation_s *dispatch_continuation_t;
296
297 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
298 #if TARGET_OS_EMBEDDED
299 #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
300 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16
301 #else
302 #define DISPATCH_CONTINUATION_CACHE_LIMIT 1024
303 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128
304 #endif
305 #endif
306
307 dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
308 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
309
310 #if DISPATCH_USE_MEMORYSTATUS_SOURCE
311 extern int _dispatch_continuation_cache_limit;
312 void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
313 #else
314 #define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
315 #define _dispatch_continuation_free_to_cache_limit(c) \
316 _dispatch_continuation_free_to_heap(c)
317 #endif
318
319 #pragma mark -
320 #pragma mark dispatch_apply_t
321
322 struct dispatch_apply_s {
323 size_t volatile da_index, da_todo;
324 size_t da_iterations, da_nested;
325 dispatch_continuation_t da_dc;
326 _dispatch_thread_semaphore_t da_sema;
327 uint32_t da_thr_cnt;
328 };
329 typedef struct dispatch_apply_s *dispatch_apply_t;
330
331 #pragma mark -
332 #pragma mark dispatch_block_t
333
334 #ifdef __BLOCKS__
335
336 #define DISPATCH_BLOCK_API_MASK (0x80u - 1)
337 #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
338 #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
339
340 #define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \
341 unsigned long dbpd_magic; \
342 dispatch_block_flags_t dbpd_flags; \
343 unsigned int volatile dbpd_atomic_flags; \
344 int volatile dbpd_performed; \
345 pthread_priority_t dbpd_priority; \
346 voucher_t dbpd_voucher; \
347 dispatch_block_t dbpd_block; \
348 dispatch_group_t dbpd_group; \
349 dispatch_queue_t volatile dbpd_queue; \
350 mach_port_t dbpd_thread;
351
352 #if !defined(__cplusplus)
353 struct dispatch_block_private_data_s {
354 DISPATCH_BLOCK_PRIVATE_DATA_HEADER();
355 };
356 #endif
357 typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
358
359 // dbpd_atomic_flags bits
360 #define DBF_CANCELED 1u // block has been cancelled
361 #define DBF_WAITING 2u // dispatch_block_wait has begun
362 #define DBF_WAITED 4u // dispatch_block_wait has finished without timeout
363 #define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave
364
365 #define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
366
367 // struct for synchronous perform: no group_leave at end of invoke
368 #define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \
369 { \
370 .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
371 .dbpd_flags = (flags), \
372 .dbpd_atomic_flags = DBF_PERFORM, \
373 .dbpd_block = (block), \
374 }
375
376 dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
377 voucher_t voucher, pthread_priority_t priority, dispatch_block_t block);
378 void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd);
379
380 #endif /* __BLOCKS__ */
381
382 #endif