]>
Commit | Line | Data |
---|---|---|
1 | .\" Copyright (c) 2008-2017 Apple Inc. All rights reserved. | |
2 | .Dd May 1, 2009 | |
3 | .Dt dispatch_apply 3 | |
4 | .Os Darwin | |
5 | .Sh NAME | |
6 | .Nm dispatch_apply | |
7 | .Nd schedule blocks for iterative execution | |
8 | .Sh SYNOPSIS | |
9 | .Fd #include <dispatch/dispatch.h> | |
10 | .Ft void | |
11 | .Fo dispatch_apply | |
12 | .Fa "size_t iterations" "dispatch_queue_t queue" "void (^block)(size_t)" | |
13 | .Fc | |
14 | .Ft void | |
15 | .Fo dispatch_apply_f | |
16 | .Fa "size_t iterations" "dispatch_queue_t queue" "void *context" "void (*function)(void *, size_t)" | |
17 | .Fc | |
18 | .Sh DESCRIPTION | |
19 | The | |
20 | .Fn dispatch_apply | |
21 | function provides data-level concurrency through a "for (;;)" loop like primitive: | |
22 | .Bd -literal | |
23 | size_t iterations = 10; | |
24 | ||
25 | // 'idx' is zero indexed, just like: | |
26 | // for (idx = 0; idx < iterations; idx++) | |
27 | ||
28 | dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) { | |
29 | printf("%zu\\n", idx); | |
30 | }); | |
31 | .Ed | |
32 | .Pp | |
33 | Although any queue can be used, it is strongly recommended to use | |
34 | .Vt DISPATCH_APPLY_AUTO | |
35 | as the | |
36 | .Vt queue | |
37 | argument to both | |
38 | .Fn dispatch_apply | |
39 | and | |
40 | .Fn dispatch_apply_f , | |
41 | as shown in the example above, since this allows the system to automatically use worker threads | |
42 | that match the configuration of the current thread as closely as possible. | |
43 | No assumptions should be made about which global concurrent queue will be used. | |
44 | .Pp | |
45 | Like a "for (;;)" loop, the | |
46 | .Fn dispatch_apply | |
47 | function is synchronous. | |
48 | If asynchronous behavior is desired, wrap the call to | |
49 | .Fn dispatch_apply | |
50 | with a call to | |
51 | .Fn dispatch_async | |
52 | against another queue. | |
53 | .Pp | |
54 | Sometimes, when the block passed to | |
55 | .Fn dispatch_apply | |
56 | is simple, the use of striding can tune performance. | |
57 | Calculating the optimal stride is best left to experimentation. | |
58 | Start with a stride of one and work upwards until the desired performance is | |
59 | achieved (perhaps using a power of two search): | |
60 | .Bd -literal | |
61 | #define STRIDE 3 | |
62 | ||
63 | dispatch_apply(count / STRIDE, DISPATCH_APPLY_AUTO, ^(size_t idx) { | |
64 | size_t j = idx * STRIDE; | |
65 | size_t j_stop = j + STRIDE; | |
66 | do { | |
67 | printf("%zu\\n", j++); | |
68 | } while (j < j_stop); | |
69 | }); | |
70 | ||
71 | size_t i; | |
72 | for (i = count - (count % STRIDE); i < count; i++) { | |
73 | printf("%zu\\n", i); | |
74 | } | |
75 | .Ed | |
76 | .Sh IMPLIED REFERENCES | |
77 | Synchronous functions within the dispatch framework hold an implied reference | |
78 | on the target queue. In other words, the synchronous function borrows the | |
79 | reference of the calling function (this is valid because the calling function | |
80 | is blocked waiting for the result of the synchronous function, and therefore | |
81 | cannot modify the reference count of the target queue until after the | |
82 | synchronous function has returned). | |
83 | .Pp | |
84 | This is in contrast to asynchronous functions which must retain both the block | |
85 | and target queue for the duration of the asynchronous operation (as the calling | |
86 | function may immediately release its interest in these objects). | |
87 | .Sh FUNDAMENTALS | |
88 | .Fn dispatch_apply | |
89 | and | |
90 | .Fn dispatch_apply_f | |
91 | attempt to quickly create enough worker threads to efficiently iterate work in parallel. | |
92 | By contrast, a loop that passes work items individually to | |
93 | .Fn dispatch_async | |
94 | or | |
95 | .Fn dispatch_async_f | |
96 | will incur more overhead and does not express the desired parallel execution semantics to | |
97 | the system, so may not create an optimal number of worker threads for a parallel workload. | |
98 | For this reason, prefer to use | |
99 | .Fn dispatch_apply | |
100 | or | |
101 | .Fn dispatch_apply_f | |
102 | when parallel execution is important. | |
103 | .Pp | |
104 | The | |
105 | .Fn dispatch_apply | |
106 | function is a wrapper around | |
107 | .Fn dispatch_apply_f . | |
108 | .Sh CAVEATS | |
109 | Unlike | |
110 | .Fn dispatch_async , | |
111 | a block submitted to | |
112 | .Fn dispatch_apply | |
113 | is expected to be either independent or dependent | |
114 | .Em only | |
115 | on work already performed in lower-indexed invocations of the block. If | |
116 | the block's index dependency is non-linear, it is recommended to | |
117 | use a for-loop around invocations of | |
118 | .Fn dispatch_async . | |
119 | .Sh SEE ALSO | |
120 | .Xr dispatch 3 , | |
121 | .Xr dispatch_async 3 , | |
122 | .Xr dispatch_queue_create 3 |