]> git.saurik.com Git - apple/libdispatch.git/blob - src/data.c
8048964606ffcb4d511f1c1f9850393367237648
[apple/libdispatch.git] / src / data.c
1 /*
2 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // Dispatch data objects are dispatch objects with standard retain/release
24 // memory management. A dispatch data object either points to a number of other
25 // dispatch data objects or is a leaf data object. A leaf data object contains
26 // a pointer to represented memory. A composite data object specifies the total
27 // size of data it represents and list of constituent records.
28 //
29 // A leaf data object has a single entry in records[], the object size is the
30 // same as records[0].length and records[0].from is always 0. In other words, a
31 // leaf data object always points to a full represented buffer, so a composite
32 // dispatch data object is needed to represent a subrange of a memory region.
33
34 #define _dispatch_data_retain(x) dispatch_retain(x)
35 #define _dispatch_data_release(x) dispatch_release(x)
36
37 #if DISPATCH_DATA_MOVABLE
38 #if DISPATCH_USE_RESOLVERS && !defined(DISPATCH_RESOLVED_VARIANT)
39 #error Resolved variant required for movable
40 #endif
41 static const dispatch_block_t _dispatch_data_destructor_unlock = ^{
42 DISPATCH_CRASH("unlock destructor called");
43 };
44 #define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock)
45 #endif
46
47 const dispatch_block_t _dispatch_data_destructor_free = ^{
48 DISPATCH_CRASH("free destructor called");
49 };
50
51 const dispatch_block_t _dispatch_data_destructor_none = ^{
52 DISPATCH_CRASH("none destructor called");
53 };
54
55 const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
56 DISPATCH_CRASH("vmdeallocate destructor called");
57 };
58
59 struct dispatch_data_s _dispatch_data_empty = {
60 .do_vtable = DISPATCH_VTABLE(data),
61 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
62 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
63 .do_next = DISPATCH_OBJECT_LISTLESS,
64 };
65
66 static dispatch_data_t
67 _dispatch_data_init(size_t n)
68 {
69 dispatch_data_t data = _dispatch_alloc(DISPATCH_VTABLE(data),
70 sizeof(struct dispatch_data_s) + n * sizeof(range_record));
71 data->num_records = n;
72 data->do_targetq = dispatch_get_global_queue(
73 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
74 data->do_next = DISPATCH_OBJECT_LISTLESS;
75 return data;
76 }
77
78 static void
79 _dispatch_data_destroy_buffer(const void* buffer, size_t size,
80 dispatch_queue_t queue, dispatch_block_t destructor)
81 {
82 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
83 free((void*)buffer);
84 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) {
85 // do nothing
86 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) {
87 vm_deallocate(mach_task_self(), (vm_address_t)buffer, size);
88 } else {
89 if (!queue) {
90 queue = dispatch_get_global_queue(
91 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
92 }
93 dispatch_async_f(queue, destructor, _dispatch_call_block_and_release);
94 }
95 }
96
97 dispatch_data_t
98 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
99 dispatch_block_t destructor)
100 {
101 dispatch_data_t data;
102 if (!buffer || !size) {
103 // Empty data requested so return the singleton empty object. Call
104 // destructor immediately in this case to ensure any unused associated
105 // storage is released.
106 if (destructor) {
107 _dispatch_data_destroy_buffer(buffer, size, queue,
108 _dispatch_Block_copy(destructor));
109 }
110 return dispatch_data_empty;
111 }
112 data = _dispatch_data_init(1);
113 // Leaf objects always point to the entirety of the memory region
114 data->leaf = true;
115 data->size = size;
116 data->records[0].from = 0;
117 data->records[0].length = size;
118 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
119 // The default destructor was provided, indicating the data should be
120 // copied.
121 void *data_buf = malloc(size);
122 if (slowpath(!data_buf)) {
123 free(data);
124 return NULL;
125 }
126 buffer = memcpy(data_buf, buffer, size);
127 data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
128 } else {
129 data->destructor = _dispatch_Block_copy(destructor);
130 #if DISPATCH_DATA_MOVABLE
131 // A non-default destructor was provided, indicating the system does not
132 // own the buffer. Mark the object as locked since the application has
133 // direct access to the buffer and it cannot be reallocated/moved.
134 data->locked = 1;
135 #endif
136 }
137 data->records[0].data_object = (void*)buffer;
138 if (queue) {
139 _dispatch_retain(queue);
140 data->do_targetq = queue;
141 }
142 return data;
143 }
144
145 void
146 _dispatch_data_dispose(dispatch_data_t dd)
147 {
148 dispatch_block_t destructor = dd->destructor;
149 if (destructor == NULL) {
150 size_t i;
151 for (i = 0; i < dd->num_records; ++i) {
152 _dispatch_data_release(dd->records[i].data_object);
153 }
154 #if DISPATCH_DATA_MOVABLE
155 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_UNLOCK) {
156 dispatch_data_t data = (dispatch_data_t)dd->records[0].data_object;
157 (void)dispatch_atomic_dec2o(data, locked);
158 _dispatch_data_release(data);
159 #endif
160 } else {
161 _dispatch_data_destroy_buffer(dd->records[0].data_object,
162 dd->records[0].length, dd->do_targetq, destructor);
163 }
164 }
165
166 size_t
167 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
168 {
169 size_t offset = 0;
170 if (dd->leaf) {
171 offset += snprintf(&buf[offset], bufsiz - offset,
172 "leaf: %d, size: %zd, data: %p", dd->leaf, dd->size,
173 dd->records[0].data_object);
174 } else {
175 offset += snprintf(&buf[offset], bufsiz - offset,
176 "leaf: %d, size: %zd, num_records: %zd", dd->leaf,
177 dd->size, dd->num_records);
178 size_t i;
179 for (i = 0; i < dd->num_records; ++i) {
180 range_record r = dd->records[i];
181 offset += snprintf(&buf[offset], bufsiz - offset,
182 "records[%zd] from: %zd, length %zd, data_object: %p", i,
183 r.from, r.length, r.data_object);
184 }
185 }
186 return offset;
187 }
188
189 size_t
190 dispatch_data_get_size(dispatch_data_t dd)
191 {
192 return dd->size;
193 }
194
195 dispatch_data_t
196 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
197 {
198 dispatch_data_t data;
199 if (!dd1->size) {
200 _dispatch_data_retain(dd2);
201 return dd2;
202 }
203 if (!dd2->size) {
204 _dispatch_data_retain(dd1);
205 return dd1;
206 }
207 data = _dispatch_data_init(dd1->num_records + dd2->num_records);
208 data->size = dd1->size + dd2->size;
209 // Copy the constituent records into the newly created data object
210 memcpy(data->records, dd1->records, dd1->num_records *
211 sizeof(range_record));
212 memcpy(data->records + dd1->num_records, dd2->records, dd2->num_records *
213 sizeof(range_record));
214 // Reference leaf objects as sub-objects
215 if (dd1->leaf) {
216 data->records[0].data_object = dd1;
217 }
218 if (dd2->leaf) {
219 data->records[dd1->num_records].data_object = dd2;
220 }
221 size_t i;
222 for (i = 0; i < data->num_records; ++i) {
223 _dispatch_data_retain(data->records[i].data_object);
224 }
225 return data;
226 }
227
228 dispatch_data_t
229 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
230 size_t length)
231 {
232 dispatch_data_t data;
233 if (offset >= dd->size || !length) {
234 return dispatch_data_empty;
235 } else if ((offset + length) > dd->size) {
236 length = dd->size - offset;
237 } else if (length == dd->size) {
238 _dispatch_data_retain(dd);
239 return dd;
240 }
241 if (dd->leaf) {
242 data = _dispatch_data_init(1);
243 data->size = length;
244 data->records[0].from = offset;
245 data->records[0].length = length;
246 data->records[0].data_object = dd;
247 _dispatch_data_retain(dd);
248 return data;
249 }
250 // Subrange of a composite dispatch data object: find the record containing
251 // the specified offset
252 data = dispatch_data_empty;
253 size_t i = 0, bytes_left = length;
254 while (i < dd->num_records && offset >= dd->records[i].length) {
255 offset -= dd->records[i++].length;
256 }
257 while (i < dd->num_records) {
258 size_t record_len = dd->records[i].length - offset;
259 if (record_len > bytes_left) {
260 record_len = bytes_left;
261 }
262 dispatch_data_t subrange = dispatch_data_create_subrange(
263 dd->records[i].data_object, dd->records[i].from + offset,
264 record_len);
265 dispatch_data_t concat = dispatch_data_create_concat(data, subrange);
266 _dispatch_data_release(data);
267 _dispatch_data_release(subrange);
268 data = concat;
269 bytes_left -= record_len;
270 if (!bytes_left) {
271 return data;
272 }
273 offset = 0;
274 i++;
275 }
276 // Crashing here indicates memory corruption of passed in data object
277 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
278 return NULL;
279 }
280
281 // When mapping a leaf object or a subrange of a leaf object, return a direct
282 // pointer to the represented buffer. For all other data objects, copy the
283 // represented buffers into a contiguous area. In the future it might
284 // be possible to relocate the buffers instead (if not marked as locked).
285 dispatch_data_t
286 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
287 size_t *size_ptr)
288 {
289 dispatch_data_t data = dd;
290 void *buffer = NULL;
291 size_t size = dd->size, offset = 0;
292 if (!size) {
293 data = dispatch_data_empty;
294 goto out;
295 }
296 if (!dd->leaf && dd->num_records == 1 &&
297 ((dispatch_data_t)dd->records[0].data_object)->leaf) {
298 offset = dd->records[0].from;
299 dd = (dispatch_data_t)(dd->records[0].data_object);
300 }
301 if (dd->leaf) {
302 #if DISPATCH_DATA_MOVABLE
303 data = _dispatch_data_init(1);
304 // Make sure the underlying leaf object does not move the backing buffer
305 (void)dispatch_atomic_inc2o(dd, locked);
306 data->size = size;
307 data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK;
308 data->records[0].data_object = dd;
309 data->records[0].from = offset;
310 data->records[0].length = size;
311 _dispatch_data_retain(dd);
312 #else
313 _dispatch_data_retain(data);
314 #endif
315 buffer = dd->records[0].data_object + offset;
316 goto out;
317 }
318 // Composite data object, copy the represented buffers
319 buffer = malloc(size);
320 if (!buffer) {
321 data = NULL;
322 size = 0;
323 goto out;
324 }
325 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
326 size_t off, const void* buf, size_t len) {
327 memcpy(buffer + off, buf, len);
328 return (bool)true;
329 });
330 data = dispatch_data_create(buffer, size, NULL,
331 DISPATCH_DATA_DESTRUCTOR_FREE);
332 out:
333 if (buffer_ptr) {
334 *buffer_ptr = buffer;
335 }
336 if (size_ptr) {
337 *size_ptr = size;
338 }
339 return data;
340 }
341
342 static bool
343 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
344 size_t size, dispatch_data_applier_t applier)
345 {
346 bool result = true;
347 dispatch_data_t data = dd;
348 const void *buffer;
349 dispatch_assert(dd->size);
350 #if DISPATCH_DATA_MOVABLE
351 if (dd->leaf) {
352 data = _dispatch_data_init(1);
353 // Make sure the underlying leaf object does not move the backing buffer
354 (void)dispatch_atomic_inc2o(dd, locked);
355 data->size = size;
356 data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK;
357 data->records[0].data_object = dd;
358 data->records[0].from = from;
359 data->records[0].length = size;
360 _dispatch_data_retain(dd);
361 buffer = dd->records[0].data_object + from;
362 result = applier(data, offset, buffer, size);
363 _dispatch_data_release(data);
364 return result;
365 }
366 #else
367 if (!dd->leaf && dd->num_records == 1 &&
368 ((dispatch_data_t)dd->records[0].data_object)->leaf) {
369 from = dd->records[0].from;
370 dd = (dispatch_data_t)(dd->records[0].data_object);
371 }
372 if (dd->leaf) {
373 buffer = dd->records[0].data_object + from;
374 return applier(data, offset, buffer, size);
375 }
376 #endif
377 size_t i;
378 for (i = 0; i < dd->num_records && result; ++i) {
379 result = _dispatch_data_apply(dd->records[i].data_object,
380 offset, dd->records[i].from, dd->records[i].length,
381 applier);
382 offset += dd->records[i].length;
383 }
384 return result;
385 }
386
387 bool
388 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
389 {
390 if (!dd->size) {
391 return true;
392 }
393 return _dispatch_data_apply(dd, 0, 0, dd->size, applier);
394 }
395
396 // Returs either a leaf object or an object composed of a single leaf object
397 dispatch_data_t
398 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
399 size_t *offset_ptr)
400 {
401 if (location >= dd->size) {
402 *offset_ptr = 0;
403 return dispatch_data_empty;
404 }
405 dispatch_data_t data;
406 size_t size = dd->size, offset = 0, from = 0;
407 while (true) {
408 if (dd->leaf) {
409 _dispatch_data_retain(dd);
410 *offset_ptr = offset;
411 if (size == dd->size) {
412 return dd;
413 } else {
414 // Create a new object for the requested subrange of the leaf
415 data = _dispatch_data_init(1);
416 data->size = size;
417 data->records[0].from = from;
418 data->records[0].length = size;
419 data->records[0].data_object = dd;
420 return data;
421 }
422 } else {
423 // Find record at the specified location
424 size_t i, pos;
425 for (i = 0; i < dd->num_records; ++i) {
426 pos = offset + dd->records[i].length;
427 if (location < pos) {
428 size = dd->records[i].length;
429 from = dd->records[i].from;
430 data = (dispatch_data_t)(dd->records[i].data_object);
431 if (dd->num_records == 1 && data->leaf) {
432 // Return objects composed of a single leaf node
433 *offset_ptr = offset;
434 _dispatch_data_retain(dd);
435 return dd;
436 } else {
437 // Drill down into other objects
438 dd = data;
439 break;
440 }
441 } else {
442 offset = pos;
443 }
444 }
445 }
446 }
447 }