2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 * Dispatch data objects are dispatch objects with standard retain/release
25 * memory management. A dispatch data object either points to a number of other
26 * dispatch data objects or is a leaf data object.
27 * A composite data object specifies the total size of data it represents
28 * and list of constituent records.
30 *******************************************************************************
32 * CURRENT IMPLEMENTATION DETAILS
34 * There are actually 3 kinds of composite objects
36 * - unflattened composite data objects
37 * - flattened composite data objects
39 * LEAVES (num_records == 0, destructor != nil)
41 * Those objects have a pointer to represented memory in `buf`.
43 * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
45 * This is the generic case of a composite object.
47 * FLATTENED (num_records > 1, buf != nil, destructor == nil)
49 * Those objects are non trivial composite objects whose `buf` pointer
50 * is a contiguous representation (copied) of the memory it represents.
52 * Such objects are created when used as an NSData and -bytes is called and
53 * where the dispatch data object is an unflattened composite object.
54 * The underlying implementation is _dispatch_data_get_flattened_bytes
56 * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
58 * Those objects point to a single leaf, never to flattened objects.
60 *******************************************************************************
62 * Non trivial invariants:
64 * It is forbidden to point into a composite data object and ignore entire
65 * records from it. (for example by having `from` longer than the first
68 * dispatch_data_t's are either leaves, or composite objects pointing to
69 * leaves. Depth is never greater than 1.
71 *******************************************************************************
73 * There are 4 dispatch_data_t constructors who may create non leaf objects,
74 * and ensure proper invariants.
76 * dispatch_data_copy_region()
77 * This function first sees through trivial subranges, and may in turn
78 * generate new trivial subranges.
80 * dispatch_data_create_map()
81 * This function either returns existing data objects, or a leaf.
83 * dispatch_data_create_subrange()
84 * This function treats flattened objects like unflattened ones,
85 * and recurses into trivial subranges, it can create trivial subranges.
87 * dispatch_data_create_concat()
88 * This function unwraps the top-level composite objects, trivial or not,
89 * and else concatenates the two arguments range lists, hence always creating
90 * unflattened objects, unless one of the arguments was empty.
92 *******************************************************************************
95 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
96 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
97 #define _dispatch_data_release(x) _dispatch_objc_release(x)
99 #define _dispatch_data_retain(x) dispatch_retain(x)
100 #define _dispatch_data_release(x) dispatch_release(x)
103 const dispatch_block_t _dispatch_data_destructor_free
= ^{
104 DISPATCH_INTERNAL_CRASH(0, "free destructor called");
107 const dispatch_block_t _dispatch_data_destructor_none
= ^{
108 DISPATCH_INTERNAL_CRASH(0, "none destructor called");
112 const dispatch_block_t _dispatch_data_destructor_munmap
= ^{
113 DISPATCH_INTERNAL_CRASH(0, "munmap destructor called");
116 // _dispatch_data_destructor_munmap is a linker alias to the following
117 const dispatch_block_t _dispatch_data_destructor_vm_deallocate
= ^{
118 DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called");
122 const dispatch_block_t _dispatch_data_destructor_inline
= ^{
123 DISPATCH_INTERNAL_CRASH(0, "inline destructor called");
126 struct dispatch_data_s _dispatch_data_empty
= {
127 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
128 .do_vtable
= DISPATCH_DATA_EMPTY_CLASS
,
130 DISPATCH_GLOBAL_OBJECT_HEADER(data
),
131 .do_next
= DISPATCH_OBJECT_LISTLESS
,
135 DISPATCH_ALWAYS_INLINE
136 static inline dispatch_data_t
137 _dispatch_data_alloc(size_t n
, size_t extra
)
139 dispatch_data_t data
;
142 if (os_mul_and_add_overflow(n
, sizeof(range_record
),
143 sizeof(struct dispatch_data_s
) + extra
, &size
)) {
144 return DISPATCH_OUT_OF_MEMORY
;
147 data
= _dispatch_alloc(DISPATCH_DATA_CLASS
, size
);
148 data
->num_records
= n
;
149 #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
150 data
->do_targetq
= dispatch_get_global_queue(
151 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
152 data
->do_next
= DISPATCH_OBJECT_LISTLESS
;
158 _dispatch_data_destroy_buffer(const void* buffer
, size_t size
,
159 dispatch_queue_t queue
, dispatch_block_t destructor
)
161 if (destructor
== DISPATCH_DATA_DESTRUCTOR_FREE
) {
163 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_NONE
) {
166 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
) {
167 mach_vm_size_t vm_size
= size
;
168 mach_vm_address_t vm_addr
= (uintptr_t)buffer
;
169 mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
173 queue
= dispatch_get_global_queue(
174 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
176 dispatch_async_f(queue
, destructor
, _dispatch_call_block_and_release
);
180 DISPATCH_ALWAYS_INLINE
182 _dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
183 dispatch_queue_t queue
, dispatch_block_t destructor
)
187 data
->destructor
= destructor
;
189 _dispatch_retain(queue
);
190 data
->do_targetq
= queue
;
195 dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
196 dispatch_block_t destructor
)
198 if (!buffer
|| !size
) {
200 _dispatch_data_destroy_buffer(buffer
, size
, NULL
,
201 _dispatch_Block_copy(destructor
));
205 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
207 _dispatch_data_init(data
, buffer
, size
, NULL
, destructor
);
211 dispatch_data_create(const void* buffer
, size_t size
, dispatch_queue_t queue
,
212 dispatch_block_t destructor
)
214 dispatch_data_t data
;
215 void *data_buf
= NULL
;
216 if (!buffer
|| !size
) {
217 // Empty data requested so return the singleton empty object. Call
218 // destructor immediately in this case to ensure any unused associated
219 // storage is released.
221 _dispatch_data_destroy_buffer(buffer
, size
, queue
,
222 _dispatch_Block_copy(destructor
));
224 return dispatch_data_empty
;
226 if (destructor
== DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
227 // The default destructor was provided, indicating the data should be
229 data_buf
= malloc(size
);
230 if (slowpath(!data_buf
)) {
231 return DISPATCH_OUT_OF_MEMORY
;
233 buffer
= memcpy(data_buf
, buffer
, size
);
234 data
= _dispatch_data_alloc(0, 0);
235 destructor
= DISPATCH_DATA_DESTRUCTOR_FREE
;
236 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_INLINE
) {
237 data
= _dispatch_data_alloc(0, size
);
238 buffer
= memcpy((void*)data
+ sizeof(struct dispatch_data_s
), buffer
,
240 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
242 data
= _dispatch_data_alloc(0, 0);
243 destructor
= _dispatch_Block_copy(destructor
);
245 _dispatch_data_init(data
, buffer
, size
, queue
, destructor
);
250 dispatch_data_create_f(const void *buffer
, size_t size
, dispatch_queue_t queue
,
251 dispatch_function_t destructor_function
)
253 dispatch_block_t destructor
= (dispatch_block_t
)destructor_function
;
254 if (destructor
!= DISPATCH_DATA_DESTRUCTOR_DEFAULT
&&
255 destructor
!= DISPATCH_DATA_DESTRUCTOR_FREE
&&
256 destructor
!= DISPATCH_DATA_DESTRUCTOR_NONE
&&
258 destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
&&
260 destructor
!= DISPATCH_DATA_DESTRUCTOR_INLINE
) {
261 destructor
= ^{ destructor_function((void*)buffer
); };
263 return dispatch_data_create(buffer
, size
, queue
, destructor
);
267 dispatch_data_create_alloc(size_t size
, void** buffer_ptr
)
269 dispatch_data_t data
= dispatch_data_empty
;
272 if (slowpath(!size
)) {
275 data
= _dispatch_data_alloc(0, size
);
276 buffer
= (void*)data
+ sizeof(struct dispatch_data_s
);
277 _dispatch_data_init(data
, buffer
, size
, NULL
,
278 DISPATCH_DATA_DESTRUCTOR_NONE
);
281 *buffer_ptr
= buffer
;
287 _dispatch_data_dispose(dispatch_data_t dd
)
289 if (_dispatch_data_leaf(dd
)) {
290 _dispatch_data_destroy_buffer(dd
->buf
, dd
->size
, dd
->do_targetq
,
294 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
295 _dispatch_data_release(dd
->records
[i
].data_object
);
297 free((void *)dd
->buf
);
302 _dispatch_data_debug(dispatch_data_t dd
, char* buf
, size_t bufsiz
)
305 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "data[%p] = { ", dd
);
306 if (_dispatch_data_leaf(dd
)) {
307 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
308 "leaf, size = %zd, buf = %p ", dd
->size
, dd
->buf
);
310 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
311 "composite, size = %zd, num_records = %zd ", dd
->size
,
312 _dispatch_data_num_records(dd
));
314 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
315 ", flatbuf = %p ", dd
->buf
);
318 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
319 range_record r
= dd
->records
[i
];
320 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "record[%zd] = "
321 "{ from = %zd, length = %zd, data_object = %p }, ", i
,
322 r
.from
, r
.length
, r
.data_object
);
325 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "}");
330 dispatch_data_get_size(dispatch_data_t dd
)
336 dispatch_data_create_concat(dispatch_data_t dd1
, dispatch_data_t dd2
)
338 dispatch_data_t data
;
342 _dispatch_data_retain(dd2
);
346 _dispatch_data_retain(dd1
);
350 if (os_add_overflow(_dispatch_data_num_records(dd1
),
351 _dispatch_data_num_records(dd2
), &n
)) {
352 return DISPATCH_OUT_OF_MEMORY
;
354 data
= _dispatch_data_alloc(n
, 0);
355 data
->size
= dd1
->size
+ dd2
->size
;
356 // Copy the constituent records into the newly created data object
357 // Reference leaf objects as sub-objects
358 if (_dispatch_data_leaf(dd1
)) {
359 data
->records
[0].from
= 0;
360 data
->records
[0].length
= dd1
->size
;
361 data
->records
[0].data_object
= dd1
;
363 memcpy(data
->records
, dd1
->records
, _dispatch_data_num_records(dd1
) *
364 sizeof(range_record
));
366 if (_dispatch_data_leaf(dd2
)) {
367 data
->records
[_dispatch_data_num_records(dd1
)].from
= 0;
368 data
->records
[_dispatch_data_num_records(dd1
)].length
= dd2
->size
;
369 data
->records
[_dispatch_data_num_records(dd1
)].data_object
= dd2
;
371 memcpy(data
->records
+ _dispatch_data_num_records(dd1
), dd2
->records
,
372 _dispatch_data_num_records(dd2
) * sizeof(range_record
));
375 for (i
= 0; i
< _dispatch_data_num_records(data
); ++i
) {
376 _dispatch_data_retain(data
->records
[i
].data_object
);
382 dispatch_data_create_subrange(dispatch_data_t dd
, size_t offset
,
385 dispatch_data_t data
;
387 if (offset
>= dd
->size
|| !length
) {
388 return dispatch_data_empty
;
389 } else if (length
> dd
->size
- offset
) {
390 length
= dd
->size
- offset
;
391 } else if (length
== dd
->size
) {
392 _dispatch_data_retain(dd
);
396 * we must only optimize leaves and not flattened objects
397 * because lots of users want to keep the end of a buffer and release
398 * as much memory as they can from the beginning of it
400 * Using the flatbuf here would be very wrong with respect to that goal
402 if (_dispatch_data_leaf(dd
)) {
403 data
= _dispatch_data_alloc(1, 0);
405 data
->records
[0].from
= offset
;
406 data
->records
[0].length
= length
;
407 data
->records
[0].data_object
= dd
;
408 _dispatch_data_retain(dd
);
412 // Subrange of a composite dispatch data object
413 const size_t dd_num_records
= _dispatch_data_num_records(dd
);
414 bool to_the_end
= (offset
+ length
== dd
->size
);
417 // find the record containing the specified offset
418 while (i
< dd_num_records
&& offset
>= dd
->records
[i
].length
) {
419 offset
-= dd
->records
[i
++].length
;
422 // Crashing here indicates memory corruption of passed in data object
423 if (slowpath(i
>= dd_num_records
)) {
424 DISPATCH_INTERNAL_CRASH(i
,
425 "dispatch_data_create_subrange out of bounds");
428 // if everything is from a single dispatch data object, avoid boxing it
429 if (offset
+ length
<= dd
->records
[i
].length
) {
430 return dispatch_data_create_subrange(dd
->records
[i
].data_object
,
431 dd
->records
[i
].from
+ offset
, length
);
434 // find the record containing the end of the current range
435 // and optimize the case when you just remove bytes at the origin
436 size_t count
, last_length
;
439 count
= dd_num_records
- i
;
441 last_length
= length
- (dd
->records
[i
].length
- offset
);
444 while (i
+ count
< dd_num_records
) {
445 size_t record_length
= dd
->records
[i
+ count
++].length
;
447 if (last_length
<= record_length
) {
450 last_length
-= record_length
;
452 // Crashing here indicates memory corruption of passed in data object
453 if (slowpath(i
+ count
>= dd_num_records
)) {
454 DISPATCH_INTERNAL_CRASH(i
+ count
,
455 "dispatch_data_create_subrange out of bounds");
460 data
= _dispatch_data_alloc(count
, 0);
462 memcpy(data
->records
, dd
->records
+ i
, count
* sizeof(range_record
));
465 data
->records
[0].from
+= offset
;
466 data
->records
[0].length
-= offset
;
469 data
->records
[count
- 1].length
= last_length
;
472 for (i
= 0; i
< count
; i
++) {
473 _dispatch_data_retain(data
->records
[i
].data_object
);
479 _dispatch_data_flatten(dispatch_data_t dd
)
481 void *buffer
= malloc(dd
->size
);
483 // Composite data object, copy the represented buffers
485 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
486 size_t off
, const void* buf
, size_t len
) {
487 memcpy(buffer
+ off
, buf
, len
);
495 // When mapping a leaf object or a subrange of a leaf object, return a direct
496 // pointer to the represented buffer. For all other data objects, copy the
497 // represented buffers into a contiguous area. In the future it might
498 // be possible to relocate the buffers instead (if not marked as locked).
500 dispatch_data_create_map(dispatch_data_t dd
, const void **buffer_ptr
,
503 dispatch_data_t data
= NULL
;
504 const void *buffer
= NULL
;
505 size_t size
= dd
->size
;
508 data
= dispatch_data_empty
;
512 buffer
= _dispatch_data_map_direct(dd
, 0, NULL
, NULL
);
514 _dispatch_data_retain(dd
);
519 buffer
= _dispatch_data_flatten(dd
);
520 if (fastpath(buffer
)) {
521 data
= dispatch_data_create(buffer
, size
, NULL
,
522 DISPATCH_DATA_DESTRUCTOR_FREE
);
529 *buffer_ptr
= buffer
;
538 _dispatch_data_get_flattened_bytes(dispatch_data_t dd
)
543 if (slowpath(!dd
->size
)) {
547 buffer
= _dispatch_data_map_direct(dd
, 0, &dd
, &offset
);
552 void *flatbuf
= _dispatch_data_flatten(dd
);
553 if (fastpath(flatbuf
)) {
554 // we need a release so that readers see the content of the buffer
555 if (slowpath(!os_atomic_cmpxchgv2o(dd
, buf
, NULL
, flatbuf
,
556 &buffer
, release
))) {
565 return buffer
+ offset
;
568 #if DISPATCH_USE_CLIENT_CALLOUT
571 DISPATCH_ALWAYS_INLINE
574 _dispatch_data_apply_client_callout(void *ctxt
, dispatch_data_t region
, size_t offset
,
575 const void *buffer
, size_t size
, dispatch_data_applier_function_t f
)
577 return f(ctxt
, region
, offset
, buffer
, size
);
582 _dispatch_data_apply(dispatch_data_t dd
, size_t offset
, size_t from
,
583 size_t size
, void *ctxt
, dispatch_data_applier_function_t applier
)
588 buffer
= _dispatch_data_map_direct(dd
, 0, NULL
, NULL
);
590 return _dispatch_data_apply_client_callout(ctxt
, dd
,
591 offset
, buffer
+ from
, size
, applier
);
595 for (i
= 0; i
< _dispatch_data_num_records(dd
) && result
; ++i
) {
596 result
= _dispatch_data_apply(dd
->records
[i
].data_object
,
597 offset
, dd
->records
[i
].from
, dd
->records
[i
].length
, ctxt
,
599 offset
+= dd
->records
[i
].length
;
605 dispatch_data_apply_f(dispatch_data_t dd
, void *ctxt
,
606 dispatch_data_applier_function_t applier
)
611 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, ctxt
, applier
);
615 dispatch_data_apply(dispatch_data_t dd
, dispatch_data_applier_t applier
)
620 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, applier
,
621 (dispatch_data_applier_function_t
)_dispatch_Block_invoke(applier
));
624 static dispatch_data_t
625 _dispatch_data_copy_region(dispatch_data_t dd
, size_t from
, size_t size
,
626 size_t location
, size_t *offset_ptr
)
628 dispatch_data_t reusable_dd
= NULL
;
631 if (from
== 0 && size
== dd
->size
) {
635 if (_dispatch_data_map_direct(dd
, from
, &dd
, &from
)) {
637 _dispatch_data_retain(reusable_dd
);
641 _dispatch_data_retain(dd
);
642 if (from
== 0 && size
== dd
->size
) {
646 dispatch_data_t data
= _dispatch_data_alloc(1, 0);
648 data
->records
[0].from
= from
;
649 data
->records
[0].length
= size
;
650 data
->records
[0].data_object
= dd
;
655 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
656 size_t length
= dd
->records
[i
].length
;
658 if (from
>= length
) {
664 if (location
>= offset
+ length
) {
670 from
+= dd
->records
[i
].from
;
671 dd
= dd
->records
[i
].data_object
;
672 *offset_ptr
+= offset
;
674 return _dispatch_data_copy_region(dd
, from
, length
, location
, offset_ptr
);
677 DISPATCH_INTERNAL_CRASH(*offset_ptr
+offset
,
678 "dispatch_data_copy_region out of bounds");
681 // Returs either a leaf object or an object composed of a single leaf object
683 dispatch_data_copy_region(dispatch_data_t dd
, size_t location
,
686 if (location
>= dd
->size
) {
687 *offset_ptr
= dd
->size
;
688 return dispatch_data_empty
;
691 return _dispatch_data_copy_region(dd
, 0, dd
->size
, location
, offset_ptr
);
696 #ifndef MAP_MEM_VM_COPY
697 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
701 dispatch_data_make_memory_entry(dispatch_data_t dd
)
703 mach_port_t mep
= MACH_PORT_NULL
;
704 memory_object_size_t mos
;
705 mach_vm_size_t vm_size
= dd
->size
;
706 mach_vm_address_t vm_addr
;
709 bool copy
= (dd
->destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
);
713 vm_addr
= vm_page_size
;
714 kr
= mach_vm_allocate(mach_task_self(), &vm_addr
, vm_size
,
717 if (kr
!= KERN_NO_SPACE
) {
718 (void)dispatch_assume_zero(kr
);
722 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
723 size_t off
, const void* buf
, size_t len
) {
724 memcpy((void*)(vm_addr
+ off
), buf
, len
);
728 vm_addr
= (uintptr_t)dd
->buf
;
730 flags
= VM_PROT_DEFAULT
|VM_PROT_IS_MASK
|MAP_MEM_VM_COPY
;
732 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
733 &mep
, MACH_PORT_NULL
);
734 if (kr
== KERN_INVALID_VALUE
) {
735 // Fallback in case MAP_MEM_VM_COPY is not supported
736 flags
&= ~MAP_MEM_VM_COPY
;
737 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
738 &mep
, MACH_PORT_NULL
);
740 if (dispatch_assume_zero(kr
)) {
741 mep
= MACH_PORT_NULL
;
742 } else if (mos
< vm_size
) {
743 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
744 kr
= mach_port_deallocate(mach_task_self(), mep
);
745 (void)dispatch_assume_zero(kr
);
750 mep
= MACH_PORT_NULL
;
753 kr
= mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
754 (void)dispatch_assume_zero(kr
);