]> git.saurik.com Git - apple/libdispatch.git/blob - src/data.c
libdispatch-339.92.1.tar.gz
[apple/libdispatch.git] / src / data.c
1 /*
2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // Dispatch data objects are dispatch objects with standard retain/release
24 // memory management. A dispatch data object either points to a number of other
25 // dispatch data objects or is a leaf data object. A leaf data object contains
26 // a pointer to represented memory. A composite data object specifies the total
27 // size of data it represents and list of constituent records.
28 //
29 // A leaf data object always points to a full represented buffer, a composite
30 // dispatch data object is needed to represent a subrange of a memory region.
31
32 #if USE_OBJC
33 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
34 #define _dispatch_data_release(x) _dispatch_objc_release(x)
35 #else
36 #define _dispatch_data_retain(x) dispatch_retain(x)
37 #define _dispatch_data_release(x) dispatch_release(x)
38 #endif
39
40 const dispatch_block_t _dispatch_data_destructor_free = ^{
41 DISPATCH_CRASH("free destructor called");
42 };
43
44 const dispatch_block_t _dispatch_data_destructor_none = ^{
45 DISPATCH_CRASH("none destructor called");
46 };
47
48 const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
49 DISPATCH_CRASH("vmdeallocate destructor called");
50 };
51
52 const dispatch_block_t _dispatch_data_destructor_inline = ^{
53 DISPATCH_CRASH("inline destructor called");
54 };
55
56 struct dispatch_data_s _dispatch_data_empty = {
57 .do_vtable = DISPATCH_DATA_EMPTY_CLASS,
58 #if !USE_OBJC
59 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
60 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
61 .do_next = DISPATCH_OBJECT_LISTLESS,
62 #endif
63 };
64
65 DISPATCH_ALWAYS_INLINE
66 static inline dispatch_data_t
67 _dispatch_data_alloc(size_t n, size_t extra)
68 {
69 dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS,
70 sizeof(struct dispatch_data_s) + extra +
71 (n ? n * sizeof(range_record) - sizeof(data->buf) : 0));
72 data->num_records = n;
73 #if !USE_OBJC
74 data->do_targetq = dispatch_get_global_queue(
75 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
76 data->do_next = DISPATCH_OBJECT_LISTLESS;
77 #endif
78 return data;
79 }
80
81 static void
82 _dispatch_data_destroy_buffer(const void* buffer, size_t size,
83 dispatch_queue_t queue, dispatch_block_t destructor)
84 {
85 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
86 free((void*)buffer);
87 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) {
88 // do nothing
89 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) {
90 mach_vm_size_t vm_size = size;
91 mach_vm_address_t vm_addr = (uintptr_t)buffer;
92 mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
93 } else {
94 if (!queue) {
95 queue = dispatch_get_global_queue(
96 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
97 }
98 dispatch_async_f(queue, destructor, _dispatch_call_block_and_release);
99 }
100 }
101
102 DISPATCH_ALWAYS_INLINE
103 static inline void
104 _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
105 dispatch_queue_t queue, dispatch_block_t destructor)
106 {
107 data->buf = buffer;
108 data->size = size;
109 data->destructor = destructor;
110 #if DISPATCH_DATA_USE_LEAF_MEMBER
111 data->leaf = true;
112 data->num_records = 1;
113 #endif
114 if (queue) {
115 _dispatch_retain(queue);
116 data->do_targetq = queue;
117 }
118 }
119
120 void
121 dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
122 dispatch_block_t destructor)
123 {
124 if (!buffer || !size) {
125 if (destructor) {
126 _dispatch_data_destroy_buffer(buffer, size, NULL,
127 _dispatch_Block_copy(destructor));
128 }
129 buffer = NULL;
130 size = 0;
131 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
132 }
133 _dispatch_data_init(data, buffer, size, NULL, destructor);
134 }
135
136 dispatch_data_t
137 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
138 dispatch_block_t destructor)
139 {
140 dispatch_data_t data;
141 void *data_buf = NULL;
142 if (!buffer || !size) {
143 // Empty data requested so return the singleton empty object. Call
144 // destructor immediately in this case to ensure any unused associated
145 // storage is released.
146 if (destructor) {
147 _dispatch_data_destroy_buffer(buffer, size, queue,
148 _dispatch_Block_copy(destructor));
149 }
150 return dispatch_data_empty;
151 }
152 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
153 // The default destructor was provided, indicating the data should be
154 // copied.
155 data_buf = malloc(size);
156 if (slowpath(!data_buf)) {
157 return NULL;
158 }
159 buffer = memcpy(data_buf, buffer, size);
160 data = _dispatch_data_alloc(0, 0);
161 destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
162 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) {
163 data = _dispatch_data_alloc(0, size);
164 buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer,
165 size);
166 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
167 } else {
168 data = _dispatch_data_alloc(0, 0);
169 destructor = _dispatch_Block_copy(destructor);
170 }
171 _dispatch_data_init(data, buffer, size, queue, destructor);
172 return data;
173 }
174
175 dispatch_data_t
176 dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue,
177 dispatch_function_t destructor_function)
178 {
179 dispatch_block_t destructor = (dispatch_block_t)destructor_function;
180 if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT &&
181 destructor != DISPATCH_DATA_DESTRUCTOR_FREE &&
182 destructor != DISPATCH_DATA_DESTRUCTOR_NONE &&
183 destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE &&
184 destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) {
185 destructor = ^{ destructor_function((void*)buffer); };
186 }
187 return dispatch_data_create(buffer, size, queue, destructor);
188 }
189
190 dispatch_data_t
191 dispatch_data_create_alloc(size_t size, void** buffer_ptr)
192 {
193 dispatch_data_t data = dispatch_data_empty;
194 void *buffer = NULL;
195
196 if (slowpath(!size)) {
197 goto out;
198 }
199 data = _dispatch_data_alloc(0, size);
200 buffer = (void*)data + sizeof(struct dispatch_data_s);
201 _dispatch_data_init(data, buffer, size, NULL,
202 DISPATCH_DATA_DESTRUCTOR_NONE);
203 out:
204 if (buffer_ptr) {
205 *buffer_ptr = buffer;
206 }
207 return data;
208 }
209
210 void
211 _dispatch_data_dispose(dispatch_data_t dd)
212 {
213 dispatch_block_t destructor = dd->destructor;
214 if (destructor == NULL) {
215 size_t i;
216 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
217 _dispatch_data_release(dd->records[i].data_object);
218 }
219 } else {
220 _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
221 destructor);
222 }
223 }
224
225 size_t
226 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
227 {
228 size_t offset = 0;
229 offset += dsnprintf(&buf[offset], bufsiz - offset, "data[%p] = { ", dd);
230 if (_dispatch_data_leaf(dd)) {
231 offset += dsnprintf(&buf[offset], bufsiz - offset,
232 "leaf, size = %zd, buf = %p ", dd->size, dd->buf);
233 } else {
234 offset += dsnprintf(&buf[offset], bufsiz - offset,
235 "composite, size = %zd, num_records = %zd ", dd->size,
236 _dispatch_data_num_records(dd));
237 size_t i;
238 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
239 range_record r = dd->records[i];
240 offset += dsnprintf(&buf[offset], bufsiz - offset, "record[%zd] = "
241 "{ from = %zd, length = %zd, data_object = %p }, ", i,
242 r.from, r.length, r.data_object);
243 }
244 }
245 offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
246 return offset;
247 }
248
249 size_t
250 dispatch_data_get_size(dispatch_data_t dd)
251 {
252 return dd->size;
253 }
254
255 dispatch_data_t
256 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
257 {
258 dispatch_data_t data;
259 if (!dd1->size) {
260 _dispatch_data_retain(dd2);
261 return dd2;
262 }
263 if (!dd2->size) {
264 _dispatch_data_retain(dd1);
265 return dd1;
266 }
267 data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) +
268 _dispatch_data_num_records(dd2), 0);
269 data->size = dd1->size + dd2->size;
270 // Copy the constituent records into the newly created data object
271 // Reference leaf objects as sub-objects
272 if (_dispatch_data_leaf(dd1)) {
273 data->records[0].from = 0;
274 data->records[0].length = dd1->size;
275 data->records[0].data_object = dd1;
276 } else {
277 memcpy(data->records, dd1->records, _dispatch_data_num_records(dd1) *
278 sizeof(range_record));
279 }
280 if (_dispatch_data_leaf(dd2)) {
281 data->records[_dispatch_data_num_records(dd1)].from = 0;
282 data->records[_dispatch_data_num_records(dd1)].length = dd2->size;
283 data->records[_dispatch_data_num_records(dd1)].data_object = dd2;
284 } else {
285 memcpy(data->records + _dispatch_data_num_records(dd1), dd2->records,
286 _dispatch_data_num_records(dd2) * sizeof(range_record));
287 }
288 size_t i;
289 for (i = 0; i < _dispatch_data_num_records(data); ++i) {
290 _dispatch_data_retain(data->records[i].data_object);
291 }
292 return data;
293 }
294
295 dispatch_data_t
296 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
297 size_t length)
298 {
299 dispatch_data_t data;
300 if (offset >= dd->size || !length) {
301 return dispatch_data_empty;
302 } else if ((offset + length) > dd->size) {
303 length = dd->size - offset;
304 } else if (length == dd->size) {
305 _dispatch_data_retain(dd);
306 return dd;
307 }
308 if (_dispatch_data_leaf(dd)) {
309 data = _dispatch_data_alloc(1, 0);
310 data->size = length;
311 data->records[0].from = offset;
312 data->records[0].length = length;
313 data->records[0].data_object = dd;
314 _dispatch_data_retain(dd);
315 return data;
316 }
317 // Subrange of a composite dispatch data object: find the record containing
318 // the specified offset
319 data = dispatch_data_empty;
320 size_t i = 0, bytes_left = length;
321 while (i < _dispatch_data_num_records(dd) &&
322 offset >= dd->records[i].length) {
323 offset -= dd->records[i++].length;
324 }
325 while (i < _dispatch_data_num_records(dd)) {
326 size_t record_len = dd->records[i].length - offset;
327 if (record_len > bytes_left) {
328 record_len = bytes_left;
329 }
330 dispatch_data_t subrange = dispatch_data_create_subrange(
331 dd->records[i].data_object, dd->records[i].from + offset,
332 record_len);
333 dispatch_data_t concat = dispatch_data_create_concat(data, subrange);
334 _dispatch_data_release(data);
335 _dispatch_data_release(subrange);
336 data = concat;
337 bytes_left -= record_len;
338 if (!bytes_left) {
339 return data;
340 }
341 offset = 0;
342 i++;
343 }
344 // Crashing here indicates memory corruption of passed in data object
345 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
346 return NULL;
347 }
348
349 // When mapping a leaf object or a subrange of a leaf object, return a direct
350 // pointer to the represented buffer. For all other data objects, copy the
351 // represented buffers into a contiguous area. In the future it might
352 // be possible to relocate the buffers instead (if not marked as locked).
353 dispatch_data_t
354 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
355 size_t *size_ptr)
356 {
357 dispatch_data_t data = dd;
358 const void *buffer = NULL;
359 size_t size = dd->size, offset = 0;
360 if (!size) {
361 data = dispatch_data_empty;
362 goto out;
363 }
364 if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 &&
365 _dispatch_data_leaf(dd->records[0].data_object)) {
366 offset = dd->records[0].from;
367 dd = dd->records[0].data_object;
368 }
369 if (_dispatch_data_leaf(dd)) {
370 _dispatch_data_retain(data);
371 buffer = dd->buf + offset;
372 goto out;
373 }
374 // Composite data object, copy the represented buffers
375 buffer = malloc(size);
376 if (!buffer) {
377 data = NULL;
378 size = 0;
379 goto out;
380 }
381 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
382 size_t off, const void* buf, size_t len) {
383 memcpy((void*)buffer + off, buf, len);
384 return (bool)true;
385 });
386 data = dispatch_data_create(buffer, size, NULL,
387 DISPATCH_DATA_DESTRUCTOR_FREE);
388 out:
389 if (buffer_ptr) {
390 *buffer_ptr = buffer;
391 }
392 if (size_ptr) {
393 *size_ptr = size;
394 }
395 return data;
396 }
397
398 static bool
399 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
400 size_t size, void *ctxt, dispatch_data_applier_function_t applier)
401 {
402 bool result = true;
403 dispatch_data_t data = dd;
404 const void *buffer;
405 dispatch_assert(dd->size);
406 if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 &&
407 _dispatch_data_leaf(dd->records[0].data_object)) {
408 from = dd->records[0].from;
409 dd = dd->records[0].data_object;
410 }
411 if (_dispatch_data_leaf(dd)) {
412 buffer = dd->buf + from;
413 return _dispatch_client_callout3(ctxt, data, offset, buffer, size,
414 applier);
415 }
416 size_t i;
417 for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) {
418 result = _dispatch_data_apply(dd->records[i].data_object,
419 offset, dd->records[i].from, dd->records[i].length, ctxt,
420 applier);
421 offset += dd->records[i].length;
422 }
423 return result;
424 }
425
426 bool
427 dispatch_data_apply_f(dispatch_data_t dd, void *ctxt,
428 dispatch_data_applier_function_t applier)
429 {
430 if (!dd->size) {
431 return true;
432 }
433 return _dispatch_data_apply(dd, 0, 0, dd->size, ctxt, applier);
434 }
435
436 bool
437 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
438 {
439 if (!dd->size) {
440 return true;
441 }
442 return _dispatch_data_apply(dd, 0, 0, dd->size, applier,
443 (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier));
444 }
445
446 // Returs either a leaf object or an object composed of a single leaf object
447 dispatch_data_t
448 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
449 size_t *offset_ptr)
450 {
451 if (location >= dd->size) {
452 *offset_ptr = 0;
453 return dispatch_data_empty;
454 }
455 dispatch_data_t data;
456 size_t size = dd->size, offset = 0, from = 0;
457 while (true) {
458 if (_dispatch_data_leaf(dd)) {
459 _dispatch_data_retain(dd);
460 *offset_ptr = offset;
461 if (size == dd->size) {
462 return dd;
463 } else {
464 // Create a new object for the requested subrange of the leaf
465 data = _dispatch_data_alloc(1, 0);
466 data->size = size;
467 data->records[0].from = from;
468 data->records[0].length = size;
469 data->records[0].data_object = dd;
470 return data;
471 }
472 } else {
473 // Find record at the specified location
474 size_t i, pos;
475 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
476 pos = offset + dd->records[i].length;
477 if (location < pos) {
478 size = dd->records[i].length;
479 from = dd->records[i].from;
480 data = dd->records[i].data_object;
481 if (_dispatch_data_num_records(dd) == 1 &&
482 _dispatch_data_leaf(data)) {
483 // Return objects composed of a single leaf node
484 *offset_ptr = offset;
485 _dispatch_data_retain(dd);
486 return dd;
487 } else {
488 // Drill down into other objects
489 dd = data;
490 break;
491 }
492 } else {
493 offset = pos;
494 }
495 }
496 }
497 }
498 }
499
500 #if HAVE_MACH
501
502 #ifndef MAP_MEM_VM_COPY
503 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
504 #endif
505
506 mach_port_t
507 dispatch_data_make_memory_entry(dispatch_data_t dd)
508 {
509 mach_port_t mep = MACH_PORT_NULL;
510 memory_object_size_t mos;
511 mach_vm_size_t vm_size = dd->size;
512 mach_vm_address_t vm_addr;
513 vm_prot_t flags;
514 kern_return_t kr;
515 bool copy = (dd->destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE);
516
517 retry:
518 if (copy) {
519 vm_addr = vm_page_size;
520 kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
521 VM_FLAGS_ANYWHERE);
522 if (kr) {
523 if (kr != KERN_NO_SPACE) {
524 (void)dispatch_assume_zero(kr);
525 }
526 return mep;
527 }
528 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
529 size_t off, const void* buf, size_t len) {
530 memcpy((void*)(vm_addr + off), buf, len);
531 return (bool)true;
532 });
533 } else {
534 vm_addr = (uintptr_t)dd->buf;
535 }
536 flags = VM_PROT_DEFAULT|VM_PROT_IS_MASK|MAP_MEM_VM_COPY;
537 mos = vm_size;
538 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
539 &mep, MACH_PORT_NULL);
540 if (kr == KERN_INVALID_VALUE) {
541 // Fallback in case MAP_MEM_VM_COPY is not supported
542 flags &= ~MAP_MEM_VM_COPY;
543 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
544 &mep, MACH_PORT_NULL);
545 }
546 if (dispatch_assume_zero(kr)) {
547 mep = MACH_PORT_NULL;
548 } else if (mos < vm_size) {
549 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
550 kr = mach_port_deallocate(mach_task_self(), mep);
551 (void)dispatch_assume_zero(kr);
552 if (!copy) {
553 copy = true;
554 goto retry;
555 }
556 mep = MACH_PORT_NULL;
557 }
558 if (copy) {
559 kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
560 (void)dispatch_assume_zero(kr);
561 }
562 return mep;
563 }
564 #endif // HAVE_MACH