]>
Commit | Line | Data |
---|---|---|
0ab74447 A |
1 | .\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. |
2 | .Dd May 1, 2009 | |
3 | .Dt dispatch_async 3 | |
4 | .Os Darwin | |
5 | .Sh NAME | |
6 | .Nm dispatch_async , | |
7 | .Nm dispatch_sync | |
8 | .Nd schedule blocks for execution | |
9 | .Sh SYNOPSIS | |
10 | .Fd #include <dispatch/dispatch.h> | |
11 | .Ft void | |
12 | .Fo dispatch_async | |
13 | .Fa "dispatch_queue_t queue" "void (^block)(void)" | |
14 | .Fc | |
15 | .Ft void | |
16 | .Fo dispatch_sync | |
17 | .Fa "dispatch_queue_t queue" "void (^block)(void)" | |
18 | .Fc | |
19 | .Ft void | |
20 | .Fo dispatch_async_f | |
21 | .Fa "dispatch_queue_t queue" "void *context" "void (*function)(void *)" | |
22 | .Fc | |
23 | .Ft void | |
24 | .Fo dispatch_sync_f | |
25 | .Fa "dispatch_queue_t queue" "void *context" "void (*function)(void *)" | |
26 | .Fc | |
27 | .Sh DESCRIPTION | |
28 | The | |
29 | .Fn dispatch_async | |
30 | and | |
31 | .Fn dispatch_sync | |
32 | functions schedule blocks for concurrent execution within the | |
33 | .Xr dispatch 3 | |
34 | framework. Blocks are submitted to a queue which dictates the policy for their | |
35 | execution. See | |
36 | .Xr dispatch_queue_create 3 | |
37 | for more information about creating dispatch queues. | |
38 | .Pp | |
39 | These functions support efficient temporal synchronization, background | |
40 | concurrency and data-level concurrency. These same functions can also be used | |
41 | for efficient notification of the completion of asynchronous blocks (a.k.a. | |
42 | callbacks). | |
43 | .Sh TEMPORAL SYNCHRONIZATION | |
44 | Synchronization is often required when multiple threads of execution access | |
45 | shared data concurrently. The simplest form of synchronization is | |
46 | mutual-exclusion (a lock), whereby different subsystems execute concurrently | |
47 | until a shared critical section is entered. In the | |
48 | .Xr pthread 3 | |
49 | family of procedures, temporal synchronization is accomplished like so: | |
50 | .Bd -literal -offset indent | |
51 | int r = pthread_mutex_lock(&my_lock); | |
52 | assert(r == 0); | |
53 | ||
54 | // critical section | |
55 | ||
56 | r = pthread_mutex_unlock(&my_lock); | |
57 | assert(r == 0); | |
58 | .Ed | |
59 | .Pp | |
60 | The | |
61 | .Fn dispatch_sync | |
62 | function may be used with a serial queue to accomplish the same style of | |
63 | synchronization. For example: | |
64 | .Bd -literal -offset indent | |
65 | dispatch_sync(my_queue, ^{ | |
66 | // critical section | |
67 | }); | |
68 | .Ed | |
69 | .Pp | |
70 | In addition to providing a more concise expression of synchronization, this | |
71 | approach is less error prone as the critical section cannot be accidentally | |
72 | left without restoring the queue to a reentrant state. | |
73 | .Pp | |
74 | The | |
75 | .Fn dispatch_async | |
76 | function may be used to implement deferred critical sections when the result | |
77 | of the block is not needed locally. Deferred critical sections have the same | |
78 | synchronization properties as the above code, but are non-blocking and | |
79 | therefore more efficient to perform. For example: | |
80 | .Bd -literal | |
81 | dispatch_async(my_queue, ^{ | |
82 | // critical section | |
83 | }); | |
84 | .Ed | |
85 | .Sh BACKGROUND CONCURRENCY | |
86 | .The | |
87 | .Fn dispatch_async | |
88 | function may be used to execute trivial backgound tasks on a global concurrent | |
89 | queue. For example: | |
90 | .Bd -literal | |
91 | dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ | |
92 | // background operation | |
93 | }); | |
94 | .Ed | |
95 | .Pp | |
96 | This approach is an efficient replacement for | |
97 | .Xr pthread_create 3 . | |
98 | .Sh COMPLETION CALLBACKS | |
99 | Completion callbacks can be accomplished via nested calls to the | |
100 | .Fn dispatch_async | |
101 | function. It is important to remember to retain the destination queue before the | |
102 | first call to | |
103 | .Fn dispatch_async , | |
104 | and to release that queue at the end of the completion callback to ensure the | |
105 | destination queue is not deallocated while the completion callback is pending. | |
106 | For example: | |
107 | .Bd -literal | |
108 | void | |
109 | async_read(object_t obj, | |
110 | void *where, size_t bytes, | |
111 | dispatch_queue_t destination_queue, | |
112 | void (^reply_block)(ssize_t r, int err)) | |
113 | { | |
114 | // There are better ways of doing async I/O. | |
115 | // This is just an example of nested blocks. | |
116 | ||
117 | dispatch_retain(destination_queue); | |
118 | ||
119 | dispatch_async(obj->queue, ^{ | |
120 | ssize_t r = read(obj->fd, where, bytes); | |
121 | int err = errno; | |
122 | ||
123 | dispatch_async(destination_queue, ^{ | |
124 | reply_block(r, err); | |
125 | }); | |
126 | dispatch_release(destination_queue); | |
127 | }); | |
128 | } | |
129 | .Ed | |
130 | .Sh RECURSIVE LOCKS | |
131 | While | |
132 | .Fn dispatch_sync | |
133 | can replace a lock, it cannot replace a recursive lock. Unlike locks, queues | |
134 | support both asynchronous and synchrnous operations, and those operations are | |
135 | ordered by definition. A recursive call to | |
136 | .Fn dispatch_sync | |
137 | causes a simple deadlock as the currently executing block waits for the next | |
138 | block to complete, but the next block will not start until the currently | |
139 | running block completes. | |
140 | .Pp | |
141 | As the dispatch framework was designed, we studied recursive locks. We found | |
142 | that the vast majority of recursive locks are deployed retroactively when | |
143 | ill-defined lock hierarchies are discovered. As a consequence, the adoption of | |
144 | recursive locks often mutates obvious bugs into obscure ones. This study also | |
145 | revealed an insight: if reentrancy is unavoidable, then reader/writer locks are | |
146 | preferable to recursive locks. Disciplined use of reader/writer locks enable | |
147 | reentrancy only when reentrancy is safe (the "read" side of the lock). | |
148 | .Pp | |
149 | Nevertheless, if it is absolutely necessary, what follows is an imperfect way of | |
150 | implementing recursive locks using the dispatch framework: | |
151 | .Bd -literal | |
152 | void | |
153 | sloppy_lock(object_t object, void (^block)(void)) | |
154 | { | |
155 | if (object->owner == pthread_self()) { | |
156 | return block(); | |
157 | } | |
158 | dispatch_sync(object->queue, ^{ | |
159 | object->owner = pthread_self(); | |
160 | block(); | |
161 | object->owner = NULL; | |
162 | }); | |
163 | } | |
164 | .Ed | |
165 | .Pp | |
166 | The above example does not solve the case where queue A runs on thread X which | |
167 | calls | |
168 | .Fn dispatch_sync | |
169 | against queue B which runs on thread Y which recursively calls | |
170 | .Fn dispatch_sync | |
171 | against queue A, which deadlocks both examples. This is bug-for-bug compatible | |
172 | with nontrivial pthread usage. In fact, nontrivial reentrancy is impossible to | |
173 | support in recursive locks once the ultimate level of reentrancy is deployed | |
174 | (IPC or RPC). | |
175 | .Sh IMPLIED REFERENCES | |
176 | Synchronous functions within the dispatch framework hold an implied reference | |
177 | on the target queue. In other words, the synchronous function borrows the | |
178 | reference of the calling function (this is valid because the calling function | |
179 | is blocked waiting for the result of the synchronous function, and therefore | |
180 | cannot modify the reference count of the target queue until after the | |
181 | synchronous function has returned). | |
182 | For example: | |
183 | .Bd -literal | |
184 | queue = dispatch_queue_create("com.example.queue", NULL); | |
185 | assert(queue); | |
186 | dispatch_sync(queue, ^{ | |
187 | do_something(); | |
188 | //dispatch_release(queue); // NOT SAFE -- dispatch_sync() is still using 'queue' | |
189 | }); | |
190 | dispatch_release(queue); // SAFELY balanced outside of the block provided to dispatch_sync() | |
191 | .Ed | |
192 | .Pp | |
193 | This is in contrast to asynchronous functions which must retain both the block | |
194 | and target queue for the duration of the asynchronous operation (as the calling | |
195 | function may immediately release its interest in these objects). | |
196 | .Sh FUNDAMENTALS | |
197 | Conceptually, | |
198 | .Fn dispatch_sync | |
199 | is a convenient wrapper around | |
200 | .Fn dispatch_async | |
201 | with the addition of a semaphore to wait for completion of the block, and a | |
202 | wrapper around the block to signal its completion. See | |
203 | .Xr dispatch_semaphore_create 3 | |
204 | for more information about dispatch semaphores. The actual implementation of the | |
205 | .Fn dispatch_sync | |
206 | function may be optimized and differ from the above description. | |
207 | .Pp | |
208 | The | |
209 | .Fn dispatch_async | |
210 | function is a wrapper around | |
211 | .Fn dispatch_async_f . | |
212 | The application-defined | |
213 | .Fa context | |
214 | parameter is passed to the | |
215 | .Fa function | |
216 | when it is invoked on the target | |
217 | .Fa queue . | |
218 | .Pp | |
219 | The | |
220 | .Fn dispatch_sync | |
221 | function is a wrapper around | |
222 | .Fn dispatch_sync_f . | |
223 | The application-defined | |
224 | .Fa context | |
225 | parameter is passed to the | |
226 | .Fa function | |
227 | when it is invoked on the target | |
228 | .Fa queue . | |
229 | .Pp | |
230 | .Sh SEE ALSO | |
231 | .Xr dispatch_once 3 , | |
232 | .Xr dispatch_queue_create 3 , | |
233 | .Xr dispatch_semaphore_create 3 , | |
234 | .Xr dispatch_apply 3 |