2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 // Dispatch data objects are dispatch objects with standard retain/release
24 // memory management. A dispatch data object either points to a number of other
25 // dispatch data objects or is a leaf data object. A leaf data object contains
26 // a pointer to represented memory. A composite data object specifies the total
27 // size of data it represents and list of constituent records.
29 // A leaf data object always points to a full represented buffer, a composite
30 // dispatch data object is needed to represent a subrange of a memory region.
33 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
34 #define _dispatch_data_release(x) _dispatch_objc_release(x)
36 #define _dispatch_data_retain(x) dispatch_retain(x)
37 #define _dispatch_data_release(x) dispatch_release(x)
40 const dispatch_block_t _dispatch_data_destructor_free
= ^{
41 DISPATCH_CRASH("free destructor called");
44 const dispatch_block_t _dispatch_data_destructor_none
= ^{
45 DISPATCH_CRASH("none destructor called");
48 const dispatch_block_t _dispatch_data_destructor_vm_deallocate
= ^{
49 DISPATCH_CRASH("vmdeallocate destructor called");
52 const dispatch_block_t _dispatch_data_destructor_inline
= ^{
53 DISPATCH_CRASH("inline destructor called");
56 struct dispatch_data_s _dispatch_data_empty
= {
57 .do_vtable
= DISPATCH_DATA_EMPTY_CLASS
,
59 .do_ref_cnt
= DISPATCH_OBJECT_GLOBAL_REFCNT
,
60 .do_xref_cnt
= DISPATCH_OBJECT_GLOBAL_REFCNT
,
61 .do_next
= DISPATCH_OBJECT_LISTLESS
,
65 DISPATCH_ALWAYS_INLINE
66 static inline dispatch_data_t
67 _dispatch_data_alloc(size_t n
, size_t extra
)
69 dispatch_data_t data
= _dispatch_alloc(DISPATCH_DATA_CLASS
,
70 sizeof(struct dispatch_data_s
) + extra
+
71 (n
? n
* sizeof(range_record
) - sizeof(data
->buf
) : 0));
72 data
->num_records
= n
;
74 data
->do_targetq
= dispatch_get_global_queue(
75 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
76 data
->do_next
= DISPATCH_OBJECT_LISTLESS
;
82 _dispatch_data_destroy_buffer(const void* buffer
, size_t size
,
83 dispatch_queue_t queue
, dispatch_block_t destructor
)
85 if (destructor
== DISPATCH_DATA_DESTRUCTOR_FREE
) {
87 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_NONE
) {
89 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
) {
90 mach_vm_size_t vm_size
= size
;
91 mach_vm_address_t vm_addr
= (uintptr_t)buffer
;
92 mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
95 queue
= dispatch_get_global_queue(
96 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
98 dispatch_async_f(queue
, destructor
, _dispatch_call_block_and_release
);
102 DISPATCH_ALWAYS_INLINE
104 _dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
105 dispatch_queue_t queue
, dispatch_block_t destructor
)
109 data
->destructor
= destructor
;
110 #if DISPATCH_DATA_USE_LEAF_MEMBER
112 data
->num_records
= 1;
115 _dispatch_retain(queue
);
116 data
->do_targetq
= queue
;
121 dispatch_data_init(dispatch_data_t data
, const void *buffer
, size_t size
,
122 dispatch_block_t destructor
)
124 if (!buffer
|| !size
) {
126 _dispatch_data_destroy_buffer(buffer
, size
, NULL
,
127 _dispatch_Block_copy(destructor
));
131 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
133 _dispatch_data_init(data
, buffer
, size
, NULL
, destructor
);
137 dispatch_data_create(const void* buffer
, size_t size
, dispatch_queue_t queue
,
138 dispatch_block_t destructor
)
140 dispatch_data_t data
;
141 void *data_buf
= NULL
;
142 if (!buffer
|| !size
) {
143 // Empty data requested so return the singleton empty object. Call
144 // destructor immediately in this case to ensure any unused associated
145 // storage is released.
147 _dispatch_data_destroy_buffer(buffer
, size
, queue
,
148 _dispatch_Block_copy(destructor
));
150 return dispatch_data_empty
;
152 if (destructor
== DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
153 // The default destructor was provided, indicating the data should be
155 data_buf
= malloc(size
);
156 if (slowpath(!data_buf
)) {
159 buffer
= memcpy(data_buf
, buffer
, size
);
160 data
= _dispatch_data_alloc(0, 0);
161 destructor
= DISPATCH_DATA_DESTRUCTOR_FREE
;
162 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_INLINE
) {
163 data
= _dispatch_data_alloc(0, size
);
164 buffer
= memcpy((void*)data
+ sizeof(struct dispatch_data_s
), buffer
,
166 destructor
= DISPATCH_DATA_DESTRUCTOR_NONE
;
168 data
= _dispatch_data_alloc(0, 0);
169 destructor
= _dispatch_Block_copy(destructor
);
171 _dispatch_data_init(data
, buffer
, size
, queue
, destructor
);
176 dispatch_data_create_f(const void *buffer
, size_t size
, dispatch_queue_t queue
,
177 dispatch_function_t destructor_function
)
179 dispatch_block_t destructor
= (dispatch_block_t
)destructor_function
;
180 if (destructor
!= DISPATCH_DATA_DESTRUCTOR_DEFAULT
&&
181 destructor
!= DISPATCH_DATA_DESTRUCTOR_FREE
&&
182 destructor
!= DISPATCH_DATA_DESTRUCTOR_NONE
&&
183 destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
&&
184 destructor
!= DISPATCH_DATA_DESTRUCTOR_INLINE
) {
185 destructor
= ^{ destructor_function((void*)buffer
); };
187 return dispatch_data_create(buffer
, size
, queue
, destructor
);
191 dispatch_data_create_alloc(size_t size
, void** buffer_ptr
)
193 dispatch_data_t data
= dispatch_data_empty
;
196 if (slowpath(!size
)) {
199 data
= _dispatch_data_alloc(0, size
);
200 buffer
= (void*)data
+ sizeof(struct dispatch_data_s
);
201 _dispatch_data_init(data
, buffer
, size
, NULL
,
202 DISPATCH_DATA_DESTRUCTOR_NONE
);
205 *buffer_ptr
= buffer
;
211 _dispatch_data_dispose(dispatch_data_t dd
)
213 dispatch_block_t destructor
= dd
->destructor
;
214 if (destructor
== NULL
) {
216 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
217 _dispatch_data_release(dd
->records
[i
].data_object
);
220 _dispatch_data_destroy_buffer(dd
->buf
, dd
->size
, dd
->do_targetq
,
226 _dispatch_data_debug(dispatch_data_t dd
, char* buf
, size_t bufsiz
)
229 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "data[%p] = { ", dd
);
230 if (_dispatch_data_leaf(dd
)) {
231 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
232 "leaf, size = %zd, buf = %p ", dd
->size
, dd
->buf
);
234 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
235 "composite, size = %zd, num_records = %zd ", dd
->size
,
236 _dispatch_data_num_records(dd
));
238 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
239 range_record r
= dd
->records
[i
];
240 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "record[%zd] = "
241 "{ from = %zd, length = %zd, data_object = %p }, ", i
,
242 r
.from
, r
.length
, r
.data_object
);
245 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "}");
250 dispatch_data_get_size(dispatch_data_t dd
)
256 dispatch_data_create_concat(dispatch_data_t dd1
, dispatch_data_t dd2
)
258 dispatch_data_t data
;
260 _dispatch_data_retain(dd2
);
264 _dispatch_data_retain(dd1
);
267 data
= _dispatch_data_alloc(_dispatch_data_num_records(dd1
) +
268 _dispatch_data_num_records(dd2
), 0);
269 data
->size
= dd1
->size
+ dd2
->size
;
270 // Copy the constituent records into the newly created data object
271 // Reference leaf objects as sub-objects
272 if (_dispatch_data_leaf(dd1
)) {
273 data
->records
[0].from
= 0;
274 data
->records
[0].length
= dd1
->size
;
275 data
->records
[0].data_object
= dd1
;
277 memcpy(data
->records
, dd1
->records
, _dispatch_data_num_records(dd1
) *
278 sizeof(range_record
));
280 if (_dispatch_data_leaf(dd2
)) {
281 data
->records
[_dispatch_data_num_records(dd1
)].from
= 0;
282 data
->records
[_dispatch_data_num_records(dd1
)].length
= dd2
->size
;
283 data
->records
[_dispatch_data_num_records(dd1
)].data_object
= dd2
;
285 memcpy(data
->records
+ _dispatch_data_num_records(dd1
), dd2
->records
,
286 _dispatch_data_num_records(dd2
) * sizeof(range_record
));
289 for (i
= 0; i
< _dispatch_data_num_records(data
); ++i
) {
290 _dispatch_data_retain(data
->records
[i
].data_object
);
296 dispatch_data_create_subrange(dispatch_data_t dd
, size_t offset
,
299 dispatch_data_t data
;
300 if (offset
>= dd
->size
|| !length
) {
301 return dispatch_data_empty
;
302 } else if ((offset
+ length
) > dd
->size
) {
303 length
= dd
->size
- offset
;
304 } else if (length
== dd
->size
) {
305 _dispatch_data_retain(dd
);
308 if (_dispatch_data_leaf(dd
)) {
309 data
= _dispatch_data_alloc(1, 0);
311 data
->records
[0].from
= offset
;
312 data
->records
[0].length
= length
;
313 data
->records
[0].data_object
= dd
;
314 _dispatch_data_retain(dd
);
317 // Subrange of a composite dispatch data object: find the record containing
318 // the specified offset
319 data
= dispatch_data_empty
;
320 size_t i
= 0, bytes_left
= length
;
321 while (i
< _dispatch_data_num_records(dd
) &&
322 offset
>= dd
->records
[i
].length
) {
323 offset
-= dd
->records
[i
++].length
;
325 while (i
< _dispatch_data_num_records(dd
)) {
326 size_t record_len
= dd
->records
[i
].length
- offset
;
327 if (record_len
> bytes_left
) {
328 record_len
= bytes_left
;
330 dispatch_data_t subrange
= dispatch_data_create_subrange(
331 dd
->records
[i
].data_object
, dd
->records
[i
].from
+ offset
,
333 dispatch_data_t concat
= dispatch_data_create_concat(data
, subrange
);
334 _dispatch_data_release(data
);
335 _dispatch_data_release(subrange
);
337 bytes_left
-= record_len
;
344 // Crashing here indicates memory corruption of passed in data object
345 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
349 // When mapping a leaf object or a subrange of a leaf object, return a direct
350 // pointer to the represented buffer. For all other data objects, copy the
351 // represented buffers into a contiguous area. In the future it might
352 // be possible to relocate the buffers instead (if not marked as locked).
354 dispatch_data_create_map(dispatch_data_t dd
, const void **buffer_ptr
,
357 dispatch_data_t data
= dd
;
358 const void *buffer
= NULL
;
359 size_t size
= dd
->size
, offset
= 0;
361 data
= dispatch_data_empty
;
364 if (!_dispatch_data_leaf(dd
) && _dispatch_data_num_records(dd
) == 1 &&
365 _dispatch_data_leaf(dd
->records
[0].data_object
)) {
366 offset
= dd
->records
[0].from
;
367 dd
= dd
->records
[0].data_object
;
369 if (_dispatch_data_leaf(dd
)) {
370 _dispatch_data_retain(data
);
371 buffer
= dd
->buf
+ offset
;
374 // Composite data object, copy the represented buffers
375 buffer
= malloc(size
);
381 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
382 size_t off
, const void* buf
, size_t len
) {
383 memcpy((void*)buffer
+ off
, buf
, len
);
386 data
= dispatch_data_create(buffer
, size
, NULL
,
387 DISPATCH_DATA_DESTRUCTOR_FREE
);
390 *buffer_ptr
= buffer
;
399 _dispatch_data_apply(dispatch_data_t dd
, size_t offset
, size_t from
,
400 size_t size
, void *ctxt
, dispatch_data_applier_function_t applier
)
403 dispatch_data_t data
= dd
;
405 dispatch_assert(dd
->size
);
406 if (!_dispatch_data_leaf(dd
) && _dispatch_data_num_records(dd
) == 1 &&
407 _dispatch_data_leaf(dd
->records
[0].data_object
)) {
408 from
= dd
->records
[0].from
;
409 dd
= dd
->records
[0].data_object
;
411 if (_dispatch_data_leaf(dd
)) {
412 buffer
= dd
->buf
+ from
;
413 return _dispatch_client_callout3(ctxt
, data
, offset
, buffer
, size
,
417 for (i
= 0; i
< _dispatch_data_num_records(dd
) && result
; ++i
) {
418 result
= _dispatch_data_apply(dd
->records
[i
].data_object
,
419 offset
, dd
->records
[i
].from
, dd
->records
[i
].length
, ctxt
,
421 offset
+= dd
->records
[i
].length
;
427 dispatch_data_apply_f(dispatch_data_t dd
, void *ctxt
,
428 dispatch_data_applier_function_t applier
)
433 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, ctxt
, applier
);
437 dispatch_data_apply(dispatch_data_t dd
, dispatch_data_applier_t applier
)
442 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, applier
,
443 (dispatch_data_applier_function_t
)_dispatch_Block_invoke(applier
));
446 // Returs either a leaf object or an object composed of a single leaf object
448 dispatch_data_copy_region(dispatch_data_t dd
, size_t location
,
451 if (location
>= dd
->size
) {
453 return dispatch_data_empty
;
455 dispatch_data_t data
;
456 size_t size
= dd
->size
, offset
= 0, from
= 0;
458 if (_dispatch_data_leaf(dd
)) {
459 _dispatch_data_retain(dd
);
460 *offset_ptr
= offset
;
461 if (size
== dd
->size
) {
464 // Create a new object for the requested subrange of the leaf
465 data
= _dispatch_data_alloc(1, 0);
467 data
->records
[0].from
= from
;
468 data
->records
[0].length
= size
;
469 data
->records
[0].data_object
= dd
;
473 // Find record at the specified location
475 for (i
= 0; i
< _dispatch_data_num_records(dd
); ++i
) {
476 pos
= offset
+ dd
->records
[i
].length
;
477 if (location
< pos
) {
478 size
= dd
->records
[i
].length
;
479 from
= dd
->records
[i
].from
;
480 data
= dd
->records
[i
].data_object
;
481 if (_dispatch_data_num_records(dd
) == 1 &&
482 _dispatch_data_leaf(data
)) {
483 // Return objects composed of a single leaf node
484 *offset_ptr
= offset
;
485 _dispatch_data_retain(dd
);
488 // Drill down into other objects
502 #ifndef MAP_MEM_VM_COPY
503 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
507 dispatch_data_make_memory_entry(dispatch_data_t dd
)
509 mach_port_t mep
= MACH_PORT_NULL
;
510 memory_object_size_t mos
;
511 mach_vm_size_t vm_size
= dd
->size
;
512 mach_vm_address_t vm_addr
;
515 bool copy
= (dd
->destructor
!= DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE
);
519 vm_addr
= vm_page_size
;
520 kr
= mach_vm_allocate(mach_task_self(), &vm_addr
, vm_size
,
523 if (kr
!= KERN_NO_SPACE
) {
524 (void)dispatch_assume_zero(kr
);
528 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
529 size_t off
, const void* buf
, size_t len
) {
530 memcpy((void*)(vm_addr
+ off
), buf
, len
);
534 vm_addr
= (uintptr_t)dd
->buf
;
536 flags
= VM_PROT_DEFAULT
|VM_PROT_IS_MASK
|MAP_MEM_VM_COPY
;
538 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
539 &mep
, MACH_PORT_NULL
);
540 if (kr
== KERN_INVALID_VALUE
) {
541 // Fallback in case MAP_MEM_VM_COPY is not supported
542 flags
&= ~MAP_MEM_VM_COPY
;
543 kr
= mach_make_memory_entry_64(mach_task_self(), &mos
, vm_addr
, flags
,
544 &mep
, MACH_PORT_NULL
);
546 if (dispatch_assume_zero(kr
)) {
547 mep
= MACH_PORT_NULL
;
548 } else if (mos
< vm_size
) {
549 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
550 kr
= mach_port_deallocate(mach_task_self(), mep
);
551 (void)dispatch_assume_zero(kr
);
556 mep
= MACH_PORT_NULL
;
559 kr
= mach_vm_deallocate(mach_task_self(), vm_addr
, vm_size
);
560 (void)dispatch_assume_zero(kr
);