2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 * Dispatch data objects are dispatch objects with standard retain/release
25 * memory management. A dispatch data object either points to a number of other
26 * dispatch data objects or is a leaf data object.
27 * A composite data object specifies the total size of data it represents
28 * and list of constituent records.
30 *******************************************************************************
32 * CURRENT IMPLEMENTATION DETAILS
34 * There are actually 3 kinds of composite objects
36 * - unflattened composite data objects
37 * - flattened composite data objects
39 * LEAVES (num_records == 0, destructor != nil)
41 * Those objects have a pointer to represented memory in `buf`.
43 * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
45 * This is the generic case of a composite object.
47 * FLATTENED (num_records > 1, buf != nil, destructor == nil)
49 * Those objects are non trivial composite objects whose `buf` pointer
50 * is a contiguous representation (copied) of the memory it represents.
52 * Such objects are created when used as an NSData and -bytes is called and
53 * where the dispatch data object is an unflattened composite object.
54 * The underlying implementation is _dispatch_data_get_flattened_bytes
56 * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
58 * Those objects point to a single leaf, never to flattened objects.
60 *******************************************************************************
62 * Non trivial invariants:
64 * It is forbidden to point into a composite data object and ignore entire
65 * records from it. (for example by having `from` longer than the first
68 * dispatch_data_t's are either leaves, or composite objects pointing to
69 * leaves. Depth is never greater than 1.
71 *******************************************************************************
73 * There are 4 dispatch_data_t constructors who may create non leaf objects,
74 * and ensure proper invariants.
76 * dispatch_data_copy_region()
77 * This function first sees through trivial subranges, and may in turn
78 * generate new trivial subranges.
80 * dispatch_data_create_map()
81 * This function either returns existing data objects, or a leaf.
83 * dispatch_data_create_subrange()
84 * This function treats flattened objects like unflattened ones,
85 * and recurses into trivial subranges, it can create trivial subranges.
87 * dispatch_data_create_concat()
88 * This function unwraps the top-level composite objects, trivial or not,
89 * and else concatenates the two arguments range lists, hence always creating
90 * unflattened objects, unless one of the arguments was empty.
92 *******************************************************************************
96 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
97 #define _dispatch_data_release(x) _dispatch_objc_release(x)
99 #define _dispatch_data_retain(x) dispatch_retain(x)
100 #define _dispatch_data_release(x) dispatch_release(x)
103 const dispatch_block_t _dispatch_data_destructor_free
= ^{
104 DISPATCH_CRASH("free destructor called");
107 const dispatch_block_t _dispatch_data_destructor_none
= ^{
108 DISPATCH_CRASH("none destructor called");
111 const dispatch_block_t _dispatch_data_destructor_vm_deallocate
= ^{
112 DISPATCH_CRASH("vmdeallocate destructor called");
115 const dispatch_block_t _dispatch_data_destructor_inline
= ^{
116 DISPATCH_CRASH("inline destructor called");
119 struct dispatch_data_s _dispatch_data_empty
= {
120 .do_vtable
= DISPATCH_DATA_EMPTY_CLASS
,
122 .do_ref_cnt
= DISPATCH_OBJECT_GLOBAL_REFCNT
,
123 .do_xref_cnt
= DISPATCH_OBJECT_GLOBAL_REFCNT
,
124 .do_next
= DISPATCH_OBJECT_LISTLESS
,
128 DISPATCH_ALWAYS_INLINE
129 static inline dispatch_data_t
130 _dispatch_data_alloc(size_t n
, size_t extra
)
132 dispatch_data_t data
= _dispatch_alloc(DISPATCH_DATA_CLASS
,
133 sizeof(struct dispatch_data_s
) + extra
+
134 n
* sizeof(range_record
));
135 data
->num_records
= n
;
137 data
->do_targetq
= dispatch_get_global_queue(
138 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
139 data
->do_next
= DISPATCH_OBJECT_LISTLESS
;
145 _dispatch_data_destroy_buffer(const void* buffer
, size_t size
,
146 dispatch_queue_t queue
, dispatch_block_t destructor
)
148 if (destructor
== DISPATCH_DATA_DESTRUCTOR_FREE
) {
150 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_NONE
) {
152 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
) {
153 mach_vm_size_t vm_size
= size
;
154 mach_vm_address_t vm_addr
= (uintptr_t)buffer
;
155 mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
158 queue
= dispatch_get_global_queue(
159 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
161 dispatch_async_f(queue
, destructor
, _dispatch_call_block_and_release
);
165 DISPATCH_ALWAYS_INLINE
167 _dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
168 dispatch_queue_t queue
, dispatch_block_t destructor
)
172 data
->destructor
= destructor
;
174 _dispatch_retain(queue
);
175 data
->do_targetq
= queue
;
180 dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
181 dispatch_block_t destructor
)
183 if (!buffer
|| !size
) {
185 _dispatch_data_destroy_buffer(buffer
, size
, NULL
,
186 _dispatch_Block_copy(destructor
));
190 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
192 _dispatch_data_init(data
, buffer
, size
, NULL
, destructor
);
196 dispatch_data_create(const void* buffer
, size_t size
, dispatch_queue_t queue
,
197 dispatch_block_t destructor
)
199 dispatch_data_t data
;
200 void *data_buf
= NULL
;
201 if (!buffer
|| !size
) {
202 // Empty data requested so return the singleton empty object. Call
203 // destructor immediately in this case to ensure any unused associated
204 // storage is released.
206 _dispatch_data_destroy_buffer(buffer
, size
, queue
,
207 _dispatch_Block_copy(destructor
));
209 return dispatch_data_empty
;
211 if (destructor
== DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
212 // The default destructor was provided, indicating the data should be
214 data_buf
= malloc(size
);
215 if (slowpath(!data_buf
)) {
218 buffer
= memcpy(data_buf
, buffer
, size
);
219 data
= _dispatch_data_alloc(0, 0);
220 destructor
= DISPATCH_DATA_DESTRUCTOR_FREE
;
221 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_INLINE
) {
222 data
= _dispatch_data_alloc(0, size
);
223 buffer
= memcpy((void*)data
+ sizeof(struct dispatch_data_s
), buffer
,
225 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
227 data
= _dispatch_data_alloc(0, 0);
228 destructor
= _dispatch_Block_copy(destructor
);
230 _dispatch_data_init(data
, buffer
, size
, queue
, destructor
);
235 dispatch_data_create_f(const void *buffer
, size_t size
, dispatch_queue_t queue
,
236 dispatch_function_t destructor_function
)
238 dispatch_block_t destructor
= (dispatch_block_t
)destructor_function
;
239 if (destructor
!= DISPATCH_DATA_DESTRUCTOR_DEFAULT
&&
240 destructor
!= DISPATCH_DATA_DESTRUCTOR_FREE
&&
241 destructor
!= DISPATCH_DATA_DESTRUCTOR_NONE
&&
242 destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
&&
243 destructor
!= DISPATCH_DATA_DESTRUCTOR_INLINE
) {
244 destructor
= ^{ destructor_function((void*)buffer
); };
246 return dispatch_data_create(buffer
, size
, queue
, destructor
);
250 dispatch_data_create_alloc(size_t size
, void** buffer_ptr
)
252 dispatch_data_t data
= dispatch_data_empty
;
255 if (slowpath(!size
)) {
258 data
= _dispatch_data_alloc(0, size
);
259 buffer
= (void*)data
+ sizeof(struct dispatch_data_s
);
260 _dispatch_data_init(data
, buffer
, size
, NULL
,
261 DISPATCH_DATA_DESTRUCTOR_NONE
);
264 *buffer_ptr
= buffer
;
270 _dispatch_data_dispose(dispatch_data_t dd
)
272 if (_dispatch_data_leaf(dd
)) {
273 _dispatch_data_destroy_buffer(dd
->buf
, dd
->size
, dd
->do_targetq
,
277 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
278 _dispatch_data_release(dd
->records
[i
].data_object
);
280 free((void *)dd
->buf
);
285 _dispatch_data_debug(dispatch_data_t dd
, char* buf
, size_t bufsiz
)
288 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "data[%p] = { ", dd
);
289 if (_dispatch_data_leaf(dd
)) {
290 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
291 "leaf, size = %zd, buf = %p ", dd
->size
, dd
->buf
);
293 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
294 "composite, size = %zd, num_records = %zd ", dd
->size
,
295 _dispatch_data_num_records(dd
));
297 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
298 ", flatbuf = %p ", dd
->buf
);
301 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
302 range_record r
= dd
->records
[i
];
303 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "record[%zd] = "
304 "{ from = %zd, length = %zd, data_object = %p }, ", i
,
305 r
.from
, r
.length
, r
.data_object
);
308 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "}");
313 dispatch_data_get_size(dispatch_data_t dd
)
319 dispatch_data_create_concat(dispatch_data_t dd1
, dispatch_data_t dd2
)
321 dispatch_data_t data
;
323 _dispatch_data_retain(dd2
);
327 _dispatch_data_retain(dd1
);
331 data
= _dispatch_data_alloc(_dispatch_data_num_records(dd1
) +
332 _dispatch_data_num_records(dd2
), 0);
333 data
->size
= dd1
->size
+ dd2
->size
;
334 // Copy the constituent records into the newly created data object
335 // Reference leaf objects as sub-objects
336 if (_dispatch_data_leaf(dd1
)) {
337 data
->records
[0].from
= 0;
338 data
->records
[0].length
= dd1
->size
;
339 data
->records
[0].data_object
= dd1
;
341 memcpy(data
->records
, dd1
->records
, _dispatch_data_num_records(dd1
) *
342 sizeof(range_record
));
344 if (_dispatch_data_leaf(dd2
)) {
345 data
->records
[_dispatch_data_num_records(dd1
)].from
= 0;
346 data
->records
[_dispatch_data_num_records(dd1
)].length
= dd2
->size
;
347 data
->records
[_dispatch_data_num_records(dd1
)].data_object
= dd2
;
349 memcpy(data
->records
+ _dispatch_data_num_records(dd1
), dd2
->records
,
350 _dispatch_data_num_records(dd2
) * sizeof(range_record
));
353 for (i
= 0; i
< _dispatch_data_num_records(data
); ++i
) {
354 _dispatch_data_retain(data
->records
[i
].data_object
);
360 dispatch_data_create_subrange(dispatch_data_t dd
, size_t offset
,
363 dispatch_data_t data
;
364 if (offset
>= dd
->size
|| !length
) {
365 return dispatch_data_empty
;
366 } else if ((offset
+ length
) > dd
->size
) {
367 length
= dd
->size
- offset
;
368 } else if (length
== dd
->size
) {
369 _dispatch_data_retain(dd
);
373 * we must only optimize leaves and not flattened objects
374 * because lots of users want to keep the end of a buffer and release
375 * as much memory as they can from the beginning of it
377 * Using the flatbuf here would be very wrong with respect to that goal
379 if (_dispatch_data_leaf(dd
)) {
380 data
= _dispatch_data_alloc(1, 0);
382 data
->records
[0].from
= offset
;
383 data
->records
[0].length
= length
;
384 data
->records
[0].data_object
= dd
;
385 _dispatch_data_retain(dd
);
389 // Subrange of a composite dispatch data object
390 const size_t dd_num_records
= _dispatch_data_num_records(dd
);
391 bool to_the_end
= (offset
+ length
== dd
->size
);
394 // find the record containing the specified offset
395 while (i
< dd_num_records
&& offset
>= dd
->records
[i
].length
) {
396 offset
-= dd
->records
[i
++].length
;
399 // Crashing here indicates memory corruption of passed in data object
400 if (slowpath(i
>= dd_num_records
)) {
401 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
405 // if everything is from a single dispatch data object, avoid boxing it
406 if (offset
+ length
<= dd
->records
[i
].length
) {
407 return dispatch_data_create_subrange(dd
->records
[i
].data_object
,
408 dd
->records
[i
].from
+ offset
, length
);
411 // find the record containing the end of the current range
412 // and optimize the case when you just remove bytes at the origin
413 size_t count
, last_length
;
416 count
= dd_num_records
- i
;
418 last_length
= length
- (dd
->records
[i
].length
- offset
);
421 while (i
+ count
< dd_num_records
) {
422 size_t record_length
= dd
->records
[i
+ count
++].length
;
424 if (last_length
<= record_length
) {
427 last_length
-= record_length
;
429 // Crashing here indicates memory corruption of passed in data object
430 if (slowpath(i
+ count
>= dd_num_records
)) {
431 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
437 data
= _dispatch_data_alloc(count
, 0);
439 memcpy(data
->records
, dd
->records
+ i
, count
* sizeof(range_record
));
442 data
->records
[0].from
+= offset
;
443 data
->records
[0].length
-= offset
;
446 data
->records
[count
- 1].length
= last_length
;
449 for (i
= 0; i
< count
; i
++) {
450 _dispatch_data_retain(data
->records
[i
].data_object
);
456 _dispatch_data_flatten(dispatch_data_t dd
)
458 void *buffer
= malloc(dd
->size
);
460 // Composite data object, copy the represented buffers
462 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
463 size_t off
, const void* buf
, size_t len
) {
464 memcpy(buffer
+ off
, buf
, len
);
472 // When mapping a leaf object or a subrange of a leaf object, return a direct
473 // pointer to the represented buffer. For all other data objects, copy the
474 // represented buffers into a contiguous area. In the future it might
475 // be possible to relocate the buffers instead (if not marked as locked).
477 dispatch_data_create_map(dispatch_data_t dd
, const void **buffer_ptr
,
480 dispatch_data_t data
= NULL
;
481 const void *buffer
= NULL
;
482 size_t size
= dd
->size
;
485 data
= dispatch_data_empty
;
489 buffer
= _dispatch_data_map_direct(dd
, 0, NULL
, NULL
);
491 _dispatch_data_retain(dd
);
496 buffer
= _dispatch_data_flatten(dd
);
497 if (fastpath(buffer
)) {
498 data
= dispatch_data_create(buffer
, size
, NULL
,
499 DISPATCH_DATA_DESTRUCTOR_FREE
);
506 *buffer_ptr
= buffer
;
515 _dispatch_data_get_flattened_bytes(dispatch_data_t dd
)
520 if (slowpath(!dd
->size
)) {
524 buffer
= _dispatch_data_map_direct(dd
, 0, &dd
, &offset
);
529 void *flatbuf
= _dispatch_data_flatten(dd
);
530 if (fastpath(flatbuf
)) {
531 // we need a release so that readers see the content of the buffer
532 if (slowpath(!dispatch_atomic_cmpxchgv2o(dd
, buf
, NULL
, flatbuf
,
533 &buffer
, release
))) {
542 return buffer
+ offset
;
545 #if DISPATCH_USE_CLIENT_CALLOUT
548 DISPATCH_ALWAYS_INLINE
551 _dispatch_data_apply_client_callout(void *ctxt
, dispatch_data_t region
, size_t offset
,
552 const void *buffer
, size_t size
, dispatch_data_applier_function_t f
)
554 return f(ctxt
, region
, offset
, buffer
, size
);
559 _dispatch_data_apply(dispatch_data_t dd
, size_t offset
, size_t from
,
560 size_t size
, void *ctxt
, dispatch_data_applier_function_t applier
)
565 buffer
= _dispatch_data_map_direct(dd
, 0, NULL
, NULL
);
567 return _dispatch_data_apply_client_callout(ctxt
, dd
,
568 offset
, buffer
+ from
, size
, applier
);
572 for (i
= 0; i
< _dispatch_data_num_records(dd
) && result
; ++i
) {
573 result
= _dispatch_data_apply(dd
->records
[i
].data_object
,
574 offset
, dd
->records
[i
].from
, dd
->records
[i
].length
, ctxt
,
576 offset
+= dd
->records
[i
].length
;
582 dispatch_data_apply_f(dispatch_data_t dd
, void *ctxt
,
583 dispatch_data_applier_function_t applier
)
588 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, ctxt
, applier
);
592 dispatch_data_apply(dispatch_data_t dd
, dispatch_data_applier_t applier
)
597 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, applier
,
598 (dispatch_data_applier_function_t
)_dispatch_Block_invoke(applier
));
601 static dispatch_data_t
602 _dispatch_data_copy_region(dispatch_data_t dd
, size_t from
, size_t size
,
603 size_t location
, size_t *offset_ptr
)
605 dispatch_data_t reusable_dd
= NULL
;
608 if (from
== 0 && size
== dd
->size
) {
612 if (_dispatch_data_map_direct(dd
, from
, &dd
, &from
)) {
614 _dispatch_data_retain(reusable_dd
);
618 _dispatch_data_retain(dd
);
619 if (from
== 0 && size
== dd
->size
) {
623 dispatch_data_t data
= _dispatch_data_alloc(1, 0);
625 data
->records
[0].from
= from
;
626 data
->records
[0].length
= size
;
627 data
->records
[0].data_object
= dd
;
632 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
633 size_t length
= dd
->records
[i
].length
;
635 if (from
>= length
) {
641 if (location
>= offset
+ length
) {
647 from
+= dd
->records
[i
].from
;
648 dd
= dd
->records
[i
].data_object
;
649 *offset_ptr
+= offset
;
651 return _dispatch_data_copy_region(dd
, from
, length
, location
, offset_ptr
);
654 DISPATCH_CRASH("dispatch_data_copy_region out of bounds");
657 // Returs either a leaf object or an object composed of a single leaf object
659 dispatch_data_copy_region(dispatch_data_t dd
, size_t location
,
662 if (location
>= dd
->size
) {
663 *offset_ptr
= dd
->size
;
664 return dispatch_data_empty
;
667 return _dispatch_data_copy_region(dd
, 0, dd
->size
, location
, offset_ptr
);
672 #ifndef MAP_MEM_VM_COPY
673 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
677 dispatch_data_make_memory_entry(dispatch_data_t dd
)
679 mach_port_t mep
= MACH_PORT_NULL
;
680 memory_object_size_t mos
;
681 mach_vm_size_t vm_size
= dd
->size
;
682 mach_vm_address_t vm_addr
;
685 bool copy
= (dd
->destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
);
689 vm_addr
= vm_page_size
;
690 kr
= mach_vm_allocate(mach_task_self(), &vm_addr
, vm_size
,
693 if (kr
!= KERN_NO_SPACE
) {
694 (void)dispatch_assume_zero(kr
);
698 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
699 size_t off
, const void* buf
, size_t len
) {
700 memcpy((void*)(vm_addr
+ off
), buf
, len
);
704 vm_addr
= (uintptr_t)dd
->buf
;
706 flags
= VM_PROT_DEFAULT
|VM_PROT_IS_MASK
|MAP_MEM_VM_COPY
;
708 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
709 &mep
, MACH_PORT_NULL
);
710 if (kr
== KERN_INVALID_VALUE
) {
711 // Fallback in case MAP_MEM_VM_COPY is not supported
712 flags
&= ~MAP_MEM_VM_COPY
;
713 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
714 &mep
, MACH_PORT_NULL
);
716 if (dispatch_assume_zero(kr
)) {
717 mep
= MACH_PORT_NULL
;
718 } else if (mos
< vm_size
) {
719 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
720 kr
= mach_port_deallocate(mach_task_self(), mep
);
721 (void)dispatch_assume_zero(kr
);
726 mep
= MACH_PORT_NULL
;
729 kr
= mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
730 (void)dispatch_assume_zero(kr
);