]> git.saurik.com Git - apple/libdispatch.git/blob - src/data.c
libdispatch-187.9.tar.gz
[apple/libdispatch.git] / src / data.c
1 /*
2 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // Dispatch data objects are dispatch objects with standard retain/release
24 // memory management. A dispatch data object either points to a number of other
25 // dispatch data objects or is a leaf data object. A leaf data object contains
26 // a pointer to represented memory. A composite data object specifies the total
27 // size of data it represents and list of constituent records.
28 //
29 // A leaf data object has a single entry in records[], the object size is the
30 // same as records[0].length and records[0].from is always 0. In other words, a
31 // leaf data object always points to a full represented buffer, so a composite
32 // dispatch data object is needed to represent a subrange of a memory region.
33
34 #define _dispatch_data_retain(x) dispatch_retain(x)
35 #define _dispatch_data_release(x) dispatch_release(x)
36
37 static void _dispatch_data_dispose(dispatch_data_t data);
38 static size_t _dispatch_data_debug(dispatch_data_t data, char* buf,
39 size_t bufsiz);
40
41 #if DISPATCH_DATA_MOVABLE
42 static const dispatch_block_t _dispatch_data_destructor_unlock = ^{
43 DISPATCH_CRASH("unlock destructor called");
44 };
45 #define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock)
46 #endif
47
48 const struct dispatch_data_vtable_s _dispatch_data_vtable = {
49 .do_type = DISPATCH_DATA_TYPE,
50 .do_kind = "data",
51 .do_dispose = _dispatch_data_dispose,
52 .do_invoke = NULL,
53 .do_probe = (void *)dummy_function_r0,
54 .do_debug = _dispatch_data_debug,
55 };
56
57 static dispatch_data_t
58 _dispatch_data_init(size_t n)
59 {
60 dispatch_data_t data = calloc(1ul, sizeof(struct dispatch_data_s) +
61 n * sizeof(range_record));
62 data->num_records = n;
63 data->do_vtable = &_dispatch_data_vtable;
64 data->do_xref_cnt = 1;
65 data->do_ref_cnt = 1;
66 data->do_targetq = dispatch_get_global_queue(
67 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
68 data->do_next = DISPATCH_OBJECT_LISTLESS;
69 return data;
70 }
71
72 dispatch_data_t
73 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
74 dispatch_block_t destructor)
75 {
76 dispatch_data_t data;
77 if (!buffer || !size) {
78 // Empty data requested so return the singleton empty object. Call
79 // destructor immediately in this case to ensure any unused associated
80 // storage is released.
81 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
82 free((void*)buffer);
83 } else if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
84 dispatch_async(queue ? queue : dispatch_get_global_queue(
85 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), destructor);
86 }
87 return dispatch_data_empty;
88 }
89 data = _dispatch_data_init(1);
90 // Leaf objects always point to the entirety of the memory region
91 data->leaf = true;
92 data->size = size;
93 data->records[0].from = 0;
94 data->records[0].length = size;
95 data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
96 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
97 // The default destructor was provided, indicating the data should be
98 // copied.
99 void *data_buf = malloc(size);
100 if (slowpath(!data_buf)) {
101 free(data);
102 return NULL;
103 }
104 buffer = memcpy(data_buf, buffer, size);
105 } else {
106 if (destructor != DISPATCH_DATA_DESTRUCTOR_FREE) {
107 data->destructor = Block_copy(destructor);
108 }
109 #if DISPATCH_DATA_MOVABLE
110 // A non-default destructor was provided, indicating the system does not
111 // own the buffer. Mark the object as locked since the application has
112 // direct access to the buffer and it cannot be reallocated/moved.
113 data->locked = 1;
114 #endif
115 }
116 data->records[0].data_object = (void*)buffer;
117 if (queue) {
118 _dispatch_retain(queue);
119 data->do_targetq = queue;
120 }
121 return data;
122 }
123
124 static void
125 _dispatch_data_dispose(dispatch_data_t dd)
126 {
127 dispatch_block_t destructor = dd->destructor;
128 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
129 size_t i;
130 for (i = 0; i < dd->num_records; ++i) {
131 _dispatch_data_release(dd->records[i].data_object);
132 }
133 #if DISPATCH_DATA_MOVABLE
134 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_UNLOCK) {
135 dispatch_data_t data = (dispatch_data_t)dd->records[0].data_object;
136 (void)dispatch_atomic_dec2o(data, locked);
137 _dispatch_data_release(data);
138 #endif
139 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
140 free(dd->records[0].data_object);
141 } else {
142 dispatch_async_f(dd->do_targetq, destructor,
143 _dispatch_call_block_and_release);
144 }
145 _dispatch_dispose(dd);
146 }
147
148 static size_t
149 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
150 {
151 size_t offset = 0;
152 if (dd->leaf) {
153 offset += snprintf(&buf[offset], bufsiz - offset,
154 "leaf: %d, size: %zd, data: %p", dd->leaf, dd->size,
155 dd->records[0].data_object);
156 } else {
157 offset += snprintf(&buf[offset], bufsiz - offset,
158 "leaf: %d, size: %zd, num_records: %zd", dd->leaf,
159 dd->size, dd->num_records);
160 size_t i;
161 for (i = 0; i < dd->num_records; ++i) {
162 range_record r = dd->records[i];
163 offset += snprintf(&buf[offset], bufsiz - offset,
164 "records[%zd] from: %zd, length %zd, data_object: %p", i,
165 r.from, r.length, r.data_object);
166 }
167 }
168 return offset;
169 }
170
171 size_t
172 dispatch_data_get_size(dispatch_data_t dd)
173 {
174 return dd->size;
175 }
176
177 dispatch_data_t
178 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
179 {
180 dispatch_data_t data;
181 if (!dd1->size) {
182 _dispatch_data_retain(dd2);
183 return dd2;
184 }
185 if (!dd2->size) {
186 _dispatch_data_retain(dd1);
187 return dd1;
188 }
189 data = _dispatch_data_init(dd1->num_records + dd2->num_records);
190 data->size = dd1->size + dd2->size;
191 // Copy the constituent records into the newly created data object
192 memcpy(data->records, dd1->records, dd1->num_records *
193 sizeof(range_record));
194 memcpy(data->records + dd1->num_records, dd2->records, dd2->num_records *
195 sizeof(range_record));
196 // Reference leaf objects as sub-objects
197 if (dd1->leaf) {
198 data->records[0].data_object = dd1;
199 }
200 if (dd2->leaf) {
201 data->records[dd1->num_records].data_object = dd2;
202 }
203 size_t i;
204 for (i = 0; i < data->num_records; ++i) {
205 _dispatch_data_retain(data->records[i].data_object);
206 }
207 return data;
208 }
209
210 dispatch_data_t
211 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
212 size_t length)
213 {
214 dispatch_data_t data;
215 if (offset >= dd->size || !length) {
216 return dispatch_data_empty;
217 } else if ((offset + length) > dd->size) {
218 length = dd->size - offset;
219 } else if (length == dd->size) {
220 _dispatch_data_retain(dd);
221 return dd;
222 }
223 if (dd->leaf) {
224 data = _dispatch_data_init(1);
225 data->size = length;
226 data->records[0].from = offset;
227 data->records[0].length = length;
228 data->records[0].data_object = dd;
229 _dispatch_data_retain(dd);
230 return data;
231 }
232 // Subrange of a composite dispatch data object: find the record containing
233 // the specified offset
234 data = dispatch_data_empty;
235 size_t i = 0, bytes_left = length;
236 while (i < dd->num_records && offset >= dd->records[i].length) {
237 offset -= dd->records[i++].length;
238 }
239 while (i < dd->num_records) {
240 size_t record_len = dd->records[i].length - offset;
241 if (record_len > bytes_left) {
242 record_len = bytes_left;
243 }
244 dispatch_data_t subrange = dispatch_data_create_subrange(
245 dd->records[i].data_object, dd->records[i].from + offset,
246 record_len);
247 dispatch_data_t concat = dispatch_data_create_concat(data, subrange);
248 _dispatch_data_release(data);
249 _dispatch_data_release(subrange);
250 data = concat;
251 bytes_left -= record_len;
252 if (!bytes_left) {
253 return data;
254 }
255 offset = 0;
256 i++;
257 }
258 // Crashing here indicates memory corruption of passed in data object
259 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
260 return NULL;
261 }
262
263 // When mapping a leaf object or a subrange of a leaf object, return a direct
264 // pointer to the represented buffer. For all other data objects, copy the
265 // represented buffers into a contiguous area. In the future it might
266 // be possible to relocate the buffers instead (if not marked as locked).
267 dispatch_data_t
268 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
269 size_t *size_ptr)
270 {
271 dispatch_data_t data = dd;
272 void *buffer = NULL;
273 size_t size = dd->size, offset = 0;
274 if (!size) {
275 data = dispatch_data_empty;
276 goto out;
277 }
278 if (!dd->leaf && dd->num_records == 1 &&
279 ((dispatch_data_t)dd->records[0].data_object)->leaf) {
280 offset = dd->records[0].from;
281 dd = (dispatch_data_t)(dd->records[0].data_object);
282 }
283 if (dd->leaf) {
284 #if DISPATCH_DATA_MOVABLE
285 data = _dispatch_data_init(1);
286 // Make sure the underlying leaf object does not move the backing buffer
287 (void)dispatch_atomic_inc2o(dd, locked);
288 data->size = size;
289 data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK;
290 data->records[0].data_object = dd;
291 data->records[0].from = offset;
292 data->records[0].length = size;
293 _dispatch_data_retain(dd);
294 #else
295 _dispatch_data_retain(data);
296 #endif
297 buffer = dd->records[0].data_object + offset;
298 goto out;
299 }
300 // Composite data object, copy the represented buffers
301 buffer = malloc(size);
302 if (!buffer) {
303 data = NULL;
304 size = 0;
305 goto out;
306 }
307 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
308 size_t off, const void* buf, size_t len) {
309 memcpy(buffer + off, buf, len);
310 return (bool)true;
311 });
312 data = dispatch_data_create(buffer, size, NULL,
313 DISPATCH_DATA_DESTRUCTOR_FREE);
314 out:
315 if (buffer_ptr) {
316 *buffer_ptr = buffer;
317 }
318 if (size_ptr) {
319 *size_ptr = size;
320 }
321 return data;
322 }
323
324 static bool
325 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
326 size_t size, dispatch_data_applier_t applier)
327 {
328 bool result = true;
329 dispatch_data_t data = dd;
330 const void *buffer;
331 dispatch_assert(dd->size);
332 #if DISPATCH_DATA_MOVABLE
333 if (dd->leaf) {
334 data = _dispatch_data_init(1);
335 // Make sure the underlying leaf object does not move the backing buffer
336 (void)dispatch_atomic_inc2o(dd, locked);
337 data->size = size;
338 data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK;
339 data->records[0].data_object = dd;
340 data->records[0].from = from;
341 data->records[0].length = size;
342 _dispatch_data_retain(dd);
343 buffer = dd->records[0].data_object + from;
344 result = applier(data, offset, buffer, size);
345 _dispatch_data_release(data);
346 return result;
347 }
348 #else
349 if (!dd->leaf && dd->num_records == 1 &&
350 ((dispatch_data_t)dd->records[0].data_object)->leaf) {
351 from = dd->records[0].from;
352 dd = (dispatch_data_t)(dd->records[0].data_object);
353 }
354 if (dd->leaf) {
355 buffer = dd->records[0].data_object + from;
356 return applier(data, offset, buffer, size);
357 }
358 #endif
359 size_t i;
360 for (i = 0; i < dd->num_records && result; ++i) {
361 result = _dispatch_data_apply(dd->records[i].data_object,
362 offset, dd->records[i].from, dd->records[i].length,
363 applier);
364 offset += dd->records[i].length;
365 }
366 return result;
367 }
368
369 bool
370 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
371 {
372 if (!dd->size) {
373 return true;
374 }
375 return _dispatch_data_apply(dd, 0, 0, dd->size, applier);
376 }
377
378 // Returs either a leaf object or an object composed of a single leaf object
379 dispatch_data_t
380 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
381 size_t *offset_ptr)
382 {
383 if (location >= dd->size) {
384 *offset_ptr = 0;
385 return dispatch_data_empty;
386 }
387 dispatch_data_t data;
388 size_t size = dd->size, offset = 0, from = 0;
389 while (true) {
390 if (dd->leaf) {
391 _dispatch_data_retain(dd);
392 *offset_ptr = offset;
393 if (size == dd->size) {
394 return dd;
395 } else {
396 // Create a new object for the requested subrange of the leaf
397 data = _dispatch_data_init(1);
398 data->size = size;
399 data->records[0].from = from;
400 data->records[0].length = size;
401 data->records[0].data_object = dd;
402 return data;
403 }
404 } else {
405 // Find record at the specified location
406 size_t i, pos;
407 for (i = 0; i < dd->num_records; ++i) {
408 pos = offset + dd->records[i].length;
409 if (location < pos) {
410 size = dd->records[i].length;
411 from = dd->records[i].from;
412 data = (dispatch_data_t)(dd->records[i].data_object);
413 if (dd->num_records == 1 && data->leaf) {
414 // Return objects composed of a single leaf node
415 *offset_ptr = offset;
416 _dispatch_data_retain(dd);
417 return dd;
418 } else {
419 // Drill down into other objects
420 dd = data;
421 break;
422 }
423 } else {
424 offset = pos;
425 }
426 }
427 }
428 }
429 }