2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 * Dispatch data objects are dispatch objects with standard retain/release
25 * memory management. A dispatch data object either points to a number of other
26 * dispatch data objects or is a leaf data object.
27 * A composite data object specifies the total size of data it represents
28 * and list of constituent records.
30 *******************************************************************************
32 * CURRENT IMPLEMENTATION DETAILS
34 * There are actually 3 kinds of composite objects
36 * - unflattened composite data objects
37 * - flattened composite data objects
39 * LEAVES (num_records == 0, destructor != nil)
41 * Those objects have a pointer to represented memory in `buf`.
43 * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
45 * This is the generic case of a composite object.
47 * FLATTENED (num_records > 1, buf != nil, destructor == nil)
49 * Those objects are non trivial composite objects whose `buf` pointer
50 * is a contiguous representation (copied) of the memory it represents.
52 * Such objects are created when used as an NSData and -bytes is called and
53 * where the dispatch data object is an unflattened composite object.
54 * The underlying implementation is _dispatch_data_get_flattened_bytes
56 * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
58 * Those objects point to a single leaf, never to flattened objects.
60 *******************************************************************************
62 * Non trivial invariants:
64 * It is forbidden to point into a composite data object and ignore entire
65 * records from it. (for example by having `from` longer than the first
68 * dispatch_data_t's are either leaves, or composite objects pointing to
69 * leaves. Depth is never greater than 1.
71 *******************************************************************************
73 * There are 4 dispatch_data_t constructors who may create non leaf objects,
74 * and ensure proper invariants.
76 * dispatch_data_copy_region()
77 * This function first sees through trivial subranges, and may in turn
78 * generate new trivial subranges.
80 * dispatch_data_create_map()
81 * This function either returns existing data objects, or a leaf.
83 * dispatch_data_create_subrange()
84 * This function treats flattened objects like unflattened ones,
85 * and recurses into trivial subranges, it can create trivial subranges.
87 * dispatch_data_create_concat()
88 * This function unwraps the top-level composite objects, trivial or not,
89 * and else concatenates the two arguments range lists, hence always creating
90 * unflattened objects, unless one of the arguments was empty.
92 *******************************************************************************
95 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
96 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
97 #define _dispatch_data_release(x) _dispatch_objc_release(x)
99 #define _dispatch_data_retain(x) dispatch_retain(x)
100 #define _dispatch_data_release(x) dispatch_release(x)
103 DISPATCH_ALWAYS_INLINE
104 static inline dispatch_data_t
105 _dispatch_data_alloc(size_t n
, size_t extra
)
107 dispatch_data_t data
;
111 if (os_add_overflow(sizeof(struct dispatch_data_s
), extra
, &base_size
)) {
112 return DISPATCH_OUT_OF_MEMORY
;
114 if (os_mul_and_add_overflow(n
, sizeof(range_record
), base_size
, &size
)) {
115 return DISPATCH_OUT_OF_MEMORY
;
118 data
= _dispatch_object_alloc(DISPATCH_DATA_CLASS
, size
);
119 data
->num_records
= n
;
120 #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
121 data
->do_targetq
= dispatch_get_global_queue(
122 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
123 data
->do_next
= DISPATCH_OBJECT_LISTLESS
;
129 _dispatch_data_destroy_buffer(const void* buffer
, size_t size
,
130 dispatch_queue_t queue
, dispatch_block_t destructor
)
132 if (destructor
== DISPATCH_DATA_DESTRUCTOR_FREE
) {
134 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_NONE
) {
137 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
) {
138 mach_vm_size_t vm_size
= size
;
139 mach_vm_address_t vm_addr
= (uintptr_t)buffer
;
140 mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
146 queue
= dispatch_get_global_queue(
147 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
149 dispatch_async_f(queue
, destructor
, _dispatch_call_block_and_release
);
153 DISPATCH_ALWAYS_INLINE
155 _dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
156 dispatch_queue_t queue
, dispatch_block_t destructor
)
160 data
->destructor
= destructor
;
162 _dispatch_retain(queue
);
163 data
->do_targetq
= queue
;
168 _dispatch_data_init_with_bytes(dispatch_data_t data
, const void *buffer
,
169 size_t size
, dispatch_block_t destructor
)
171 if (!buffer
|| !size
) {
173 _dispatch_data_destroy_buffer(buffer
, size
, NULL
,
174 _dispatch_Block_copy(destructor
));
178 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
180 _dispatch_data_init(data
, buffer
, size
, NULL
, destructor
);
184 dispatch_data_create(const void* buffer
, size_t size
, dispatch_queue_t queue
,
185 dispatch_block_t destructor
)
187 dispatch_data_t data
;
188 void *data_buf
= NULL
;
189 if (!buffer
|| !size
) {
190 // Empty data requested so return the singleton empty object. Call
191 // destructor immediately in this case to ensure any unused associated
192 // storage is released.
194 _dispatch_data_destroy_buffer(buffer
, size
, queue
,
195 _dispatch_Block_copy(destructor
));
197 return dispatch_data_empty
;
199 if (destructor
== DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
200 // The default destructor was provided, indicating the data should be
202 data_buf
= malloc(size
);
203 if (slowpath(!data_buf
)) {
204 return DISPATCH_OUT_OF_MEMORY
;
206 buffer
= memcpy(data_buf
, buffer
, size
);
207 data
= _dispatch_data_alloc(0, 0);
208 destructor
= DISPATCH_DATA_DESTRUCTOR_FREE
;
209 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_INLINE
) {
210 data
= _dispatch_data_alloc(0, size
);
211 buffer
= memcpy((void*)data
+ sizeof(struct dispatch_data_s
), buffer
,
213 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
215 data
= _dispatch_data_alloc(0, 0);
216 destructor
= _dispatch_Block_copy(destructor
);
218 _dispatch_data_init(data
, buffer
, size
, queue
, destructor
);
223 dispatch_data_create_f(const void *buffer
, size_t size
, dispatch_queue_t queue
,
224 dispatch_function_t destructor_function
)
226 dispatch_block_t destructor
= (dispatch_block_t
)destructor_function
;
227 if (destructor
!= DISPATCH_DATA_DESTRUCTOR_DEFAULT
&&
228 destructor
!= DISPATCH_DATA_DESTRUCTOR_FREE
&&
229 destructor
!= DISPATCH_DATA_DESTRUCTOR_NONE
&&
231 destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
&&
233 destructor
!= DISPATCH_DATA_DESTRUCTOR_INLINE
) {
234 destructor
= ^{ destructor_function((void*)buffer
); };
236 return dispatch_data_create(buffer
, size
, queue
, destructor
);
240 dispatch_data_create_alloc(size_t size
, void** buffer_ptr
)
242 dispatch_data_t data
= dispatch_data_empty
;
245 if (slowpath(!size
)) {
248 data
= _dispatch_data_alloc(0, size
);
249 buffer
= (void*)data
+ sizeof(struct dispatch_data_s
);
250 _dispatch_data_init(data
, buffer
, size
, NULL
,
251 DISPATCH_DATA_DESTRUCTOR_NONE
);
254 *buffer_ptr
= buffer
;
260 _dispatch_data_dispose(dispatch_data_t dd
, DISPATCH_UNUSED
bool *allow_free
)
262 if (_dispatch_data_leaf(dd
)) {
263 _dispatch_data_destroy_buffer(dd
->buf
, dd
->size
, dd
->do_targetq
,
267 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
268 _dispatch_data_release(dd
->records
[i
].data_object
);
270 free((void *)dd
->buf
);
275 _dispatch_data_set_target_queue(dispatch_data_t dd
, dispatch_queue_t tq
)
277 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
278 _dispatch_retain(tq
);
279 tq
= os_atomic_xchg2o(dd
, do_targetq
, tq
, release
);
280 if (tq
) _dispatch_release(tq
);
282 _dispatch_object_set_target_queue_inline(dd
, tq
);
287 _dispatch_data_debug(dispatch_data_t dd
, char* buf
, size_t bufsiz
)
290 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "data[%p] = { ", dd
);
291 if (_dispatch_data_leaf(dd
)) {
292 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
293 "leaf, size = %zd, buf = %p ", dd
->size
, dd
->buf
);
295 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
296 "composite, size = %zd, num_records = %zd ", dd
->size
,
297 _dispatch_data_num_records(dd
));
299 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
300 ", flatbuf = %p ", dd
->buf
);
303 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
304 range_record r
= dd
->records
[i
];
305 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "record[%zd] = "
306 "{ from = %zd, length = %zd, data_object = %p }, ", i
,
307 r
.from
, r
.length
, r
.data_object
);
310 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "}");
315 dispatch_data_get_size(dispatch_data_t dd
)
321 dispatch_data_create_concat(dispatch_data_t dd1
, dispatch_data_t dd2
)
323 dispatch_data_t data
;
327 _dispatch_data_retain(dd2
);
331 _dispatch_data_retain(dd1
);
335 if (os_add_overflow(_dispatch_data_num_records(dd1
),
336 _dispatch_data_num_records(dd2
), &n
)) {
337 return DISPATCH_OUT_OF_MEMORY
;
339 data
= _dispatch_data_alloc(n
, 0);
340 data
->size
= dd1
->size
+ dd2
->size
;
341 // Copy the constituent records into the newly created data object
342 // Reference leaf objects as sub-objects
343 if (_dispatch_data_leaf(dd1
)) {
344 data
->records
[0].from
= 0;
345 data
->records
[0].length
= dd1
->size
;
346 data
->records
[0].data_object
= dd1
;
348 memcpy(data
->records
, dd1
->records
, _dispatch_data_num_records(dd1
) *
349 sizeof(range_record
));
351 if (_dispatch_data_leaf(dd2
)) {
352 data
->records
[_dispatch_data_num_records(dd1
)].from
= 0;
353 data
->records
[_dispatch_data_num_records(dd1
)].length
= dd2
->size
;
354 data
->records
[_dispatch_data_num_records(dd1
)].data_object
= dd2
;
356 memcpy(data
->records
+ _dispatch_data_num_records(dd1
), dd2
->records
,
357 _dispatch_data_num_records(dd2
) * sizeof(range_record
));
360 for (i
= 0; i
< _dispatch_data_num_records(data
); ++i
) {
361 _dispatch_data_retain(data
->records
[i
].data_object
);
367 dispatch_data_create_subrange(dispatch_data_t dd
, size_t offset
,
370 dispatch_data_t data
;
372 if (offset
>= dd
->size
|| !length
) {
373 return dispatch_data_empty
;
374 } else if (length
> dd
->size
- offset
) {
375 length
= dd
->size
- offset
;
376 } else if (length
== dd
->size
) {
377 _dispatch_data_retain(dd
);
381 * we must only optimize leaves and not flattened objects
382 * because lots of users want to keep the end of a buffer and release
383 * as much memory as they can from the beginning of it
385 * Using the flatbuf here would be very wrong with respect to that goal
387 if (_dispatch_data_leaf(dd
)) {
388 data
= _dispatch_data_alloc(1, 0);
390 data
->records
[0].from
= offset
;
391 data
->records
[0].length
= length
;
392 data
->records
[0].data_object
= dd
;
393 _dispatch_data_retain(dd
);
397 // Subrange of a composite dispatch data object
398 const size_t dd_num_records
= _dispatch_data_num_records(dd
);
399 bool to_the_end
= (offset
+ length
== dd
->size
);
402 // find the record containing the specified offset
403 while (i
< dd_num_records
&& offset
>= dd
->records
[i
].length
) {
404 offset
-= dd
->records
[i
++].length
;
407 // Crashing here indicates memory corruption of passed in data object
408 if (slowpath(i
>= dd_num_records
)) {
409 DISPATCH_INTERNAL_CRASH(i
,
410 "dispatch_data_create_subrange out of bounds");
413 // if everything is from a single dispatch data object, avoid boxing it
414 if (offset
+ length
<= dd
->records
[i
].length
) {
415 return dispatch_data_create_subrange(dd
->records
[i
].data_object
,
416 dd
->records
[i
].from
+ offset
, length
);
419 // find the record containing the end of the current range
420 // and optimize the case when you just remove bytes at the origin
421 size_t count
, last_length
= 0;
424 count
= dd_num_records
- i
;
426 last_length
= length
- (dd
->records
[i
].length
- offset
);
429 while (i
+ count
< dd_num_records
) {
430 size_t record_length
= dd
->records
[i
+ count
++].length
;
432 if (last_length
<= record_length
) {
435 last_length
-= record_length
;
437 // Crashing here indicates memory corruption of passed in data object
438 if (slowpath(i
+ count
>= dd_num_records
)) {
439 DISPATCH_INTERNAL_CRASH(i
+ count
,
440 "dispatch_data_create_subrange out of bounds");
445 data
= _dispatch_data_alloc(count
, 0);
447 memcpy(data
->records
, dd
->records
+ i
, count
* sizeof(range_record
));
450 data
->records
[0].from
+= offset
;
451 data
->records
[0].length
-= offset
;
454 data
->records
[count
- 1].length
= last_length
;
457 for (i
= 0; i
< count
; i
++) {
458 _dispatch_data_retain(data
->records
[i
].data_object
);
464 _dispatch_data_flatten(dispatch_data_t dd
)
466 void *buffer
= malloc(dd
->size
);
468 // Composite data object, copy the represented buffers
470 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
471 size_t off
, const void* buf
, size_t len
) {
472 memcpy(buffer
+ off
, buf
, len
);
480 // When mapping a leaf object or a subrange of a leaf object, return a direct
481 // pointer to the represented buffer. For all other data objects, copy the
482 // represented buffers into a contiguous area. In the future it might
483 // be possible to relocate the buffers instead (if not marked as locked).
485 dispatch_data_create_map(dispatch_data_t dd
, const void **buffer_ptr
,
488 dispatch_data_t data
= NULL
;
489 const void *buffer
= NULL
;
490 size_t size
= dd
->size
;
493 data
= dispatch_data_empty
;
497 buffer
= _dispatch_data_map_direct(dd
, 0, NULL
, NULL
);
499 _dispatch_data_retain(dd
);
504 buffer
= _dispatch_data_flatten(dd
);
505 if (fastpath(buffer
)) {
506 data
= dispatch_data_create(buffer
, size
, NULL
,
507 DISPATCH_DATA_DESTRUCTOR_FREE
);
514 *buffer_ptr
= buffer
;
523 _dispatch_data_get_flattened_bytes(dispatch_data_t dd
)
528 if (slowpath(!dd
->size
)) {
532 buffer
= _dispatch_data_map_direct(dd
, 0, &dd
, &offset
);
537 void *flatbuf
= _dispatch_data_flatten(dd
);
538 if (fastpath(flatbuf
)) {
539 // we need a release so that readers see the content of the buffer
540 if (slowpath(!os_atomic_cmpxchgv2o(dd
, buf
, NULL
, flatbuf
,
541 &buffer
, release
))) {
550 return buffer
+ offset
;
553 #if DISPATCH_USE_CLIENT_CALLOUT
556 DISPATCH_ALWAYS_INLINE
559 _dispatch_data_apply_client_callout(void *ctxt
, dispatch_data_t region
, size_t offset
,
560 const void *buffer
, size_t size
, dispatch_data_applier_function_t f
)
562 return f(ctxt
, region
, offset
, buffer
, size
);
567 _dispatch_data_apply(dispatch_data_t dd
, size_t offset
, size_t from
,
568 size_t size
, void *ctxt
, dispatch_data_applier_function_t applier
)
573 buffer
= _dispatch_data_map_direct(dd
, 0, NULL
, NULL
);
575 return _dispatch_data_apply_client_callout(ctxt
, dd
,
576 offset
, buffer
+ from
, size
, applier
);
580 for (i
= 0; i
< _dispatch_data_num_records(dd
) && result
; ++i
) {
581 result
= _dispatch_data_apply(dd
->records
[i
].data_object
,
582 offset
, dd
->records
[i
].from
, dd
->records
[i
].length
, ctxt
,
584 offset
+= dd
->records
[i
].length
;
590 dispatch_data_apply_f(dispatch_data_t dd
, void *ctxt
,
591 dispatch_data_applier_function_t applier
)
596 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, ctxt
, applier
);
600 dispatch_data_apply(dispatch_data_t dd
, dispatch_data_applier_t applier
)
605 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, applier
,
606 (dispatch_data_applier_function_t
)_dispatch_Block_invoke(applier
));
609 static dispatch_data_t
610 _dispatch_data_copy_region(dispatch_data_t dd
, size_t from
, size_t size
,
611 size_t location
, size_t *offset_ptr
)
613 dispatch_data_t reusable_dd
= NULL
;
616 if (from
== 0 && size
== dd
->size
) {
620 if (_dispatch_data_map_direct(dd
, from
, &dd
, &from
)) {
622 _dispatch_data_retain(reusable_dd
);
626 _dispatch_data_retain(dd
);
627 if (from
== 0 && size
== dd
->size
) {
631 dispatch_data_t data
= _dispatch_data_alloc(1, 0);
633 data
->records
[0].from
= from
;
634 data
->records
[0].length
= size
;
635 data
->records
[0].data_object
= dd
;
640 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
641 size_t length
= dd
->records
[i
].length
;
643 if (from
>= length
) {
649 if (location
>= offset
+ length
) {
655 from
+= dd
->records
[i
].from
;
656 dd
= dd
->records
[i
].data_object
;
657 *offset_ptr
+= offset
;
659 return _dispatch_data_copy_region(dd
, from
, length
, location
, offset_ptr
);
662 DISPATCH_INTERNAL_CRASH(*offset_ptr
+offset
,
663 "dispatch_data_copy_region out of bounds");
666 // Returs either a leaf object or an object composed of a single leaf object
668 dispatch_data_copy_region(dispatch_data_t dd
, size_t location
,
671 if (location
>= dd
->size
) {
672 *offset_ptr
= dd
->size
;
673 return dispatch_data_empty
;
676 return _dispatch_data_copy_region(dd
, 0, dd
->size
, location
, offset_ptr
);
681 #ifndef MAP_MEM_VM_COPY
682 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
686 dispatch_data_make_memory_entry(dispatch_data_t dd
)
688 mach_port_t mep
= MACH_PORT_NULL
;
689 memory_object_size_t mos
;
690 mach_vm_size_t vm_size
= dd
->size
;
691 mach_vm_address_t vm_addr
;
694 bool copy
= (dd
->destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
);
698 vm_addr
= vm_page_size
;
699 kr
= mach_vm_allocate(mach_task_self(), &vm_addr
, vm_size
,
702 if (kr
!= KERN_NO_SPACE
) {
703 (void)dispatch_assume_zero(kr
);
707 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
708 size_t off
, const void* buf
, size_t len
) {
709 memcpy((void*)(vm_addr
+ off
), buf
, len
);
713 vm_addr
= (uintptr_t)dd
->buf
;
715 flags
= VM_PROT_DEFAULT
|VM_PROT_IS_MASK
|MAP_MEM_VM_COPY
;
717 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
718 &mep
, MACH_PORT_NULL
);
719 if (kr
== KERN_INVALID_VALUE
) {
720 // Fallback in case MAP_MEM_VM_COPY is not supported
721 flags
&= ~MAP_MEM_VM_COPY
;
722 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
723 &mep
, MACH_PORT_NULL
);
725 if (dispatch_assume_zero(kr
)) {
726 mep
= MACH_PORT_NULL
;
727 } else if (mos
< vm_size
) {
728 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
729 kr
= mach_port_deallocate(mach_task_self(), mep
);
730 (void)dispatch_assume_zero(kr
);
735 mep
= MACH_PORT_NULL
;
738 kr
= mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
739 (void)dispatch_assume_zero(kr
);