2 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 // Dispatch data objects are dispatch objects with standard retain/release
24 // memory management. A dispatch data object either points to a number of other
25 // dispatch data objects or is a leaf data object. A leaf data object contains
26 // a pointer to represented memory. A composite data object specifies the total
27 // size of data it represents and list of constituent records.
29 // A leaf data object has a single entry in records[], the object size is the
30 // same as records[0].length and records[0].from is always 0. In other words, a
31 // leaf data object always points to a full represented buffer, so a composite
32 // dispatch data object is needed to represent a subrange of a memory region.
34 #define _dispatch_data_retain(x) dispatch_retain(x)
35 #define _dispatch_data_release(x) dispatch_release(x)
37 static void _dispatch_data_dispose(dispatch_data_t data
);
38 static size_t _dispatch_data_debug(dispatch_data_t data
, char* buf
,
41 #if DISPATCH_DATA_MOVABLE
42 static const dispatch_block_t _dispatch_data_destructor_unlock
= ^{
43 DISPATCH_CRASH("unlock destructor called");
45 #define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock)
48 const struct dispatch_data_vtable_s _dispatch_data_vtable
= {
49 .do_type
= DISPATCH_DATA_TYPE
,
51 .do_dispose
= _dispatch_data_dispose
,
53 .do_probe
= (void *)dummy_function_r0
,
54 .do_debug
= _dispatch_data_debug
,
57 static dispatch_data_t
58 _dispatch_data_init(size_t n
)
60 dispatch_data_t data
= calloc(1ul, sizeof(struct dispatch_data_s
) +
61 n
* sizeof(range_record
));
62 data
->num_records
= n
;
63 data
->do_vtable
= &_dispatch_data_vtable
;
64 data
->do_xref_cnt
= 1;
66 data
->do_targetq
= dispatch_get_global_queue(
67 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
68 data
->do_next
= DISPATCH_OBJECT_LISTLESS
;
73 dispatch_data_create(const void* buffer
, size_t size
, dispatch_queue_t queue
,
74 dispatch_block_t destructor
)
77 if (!buffer
|| !size
) {
78 // Empty data requested so return the singleton empty object. Call
79 // destructor immediately in this case to ensure any unused associated
80 // storage is released.
81 if (destructor
== DISPATCH_DATA_DESTRUCTOR_FREE
) {
83 } else if (destructor
!= DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
84 dispatch_async(queue
? queue
: dispatch_get_global_queue(
85 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0), destructor
);
87 return dispatch_data_empty
;
89 data
= _dispatch_data_init(1);
90 // Leaf objects always point to the entirety of the memory region
93 data
->records
[0].from
= 0;
94 data
->records
[0].length
= size
;
95 data
->destructor
= DISPATCH_DATA_DESTRUCTOR_FREE
;
96 if (destructor
== DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
97 // The default destructor was provided, indicating the data should be
99 void *data_buf
= malloc(size
);
100 if (slowpath(!data_buf
)) {
104 buffer
= memcpy(data_buf
, buffer
, size
);
106 if (destructor
!= DISPATCH_DATA_DESTRUCTOR_FREE
) {
107 data
->destructor
= Block_copy(destructor
);
109 #if DISPATCH_DATA_MOVABLE
110 // A non-default destructor was provided, indicating the system does not
111 // own the buffer. Mark the object as locked since the application has
112 // direct access to the buffer and it cannot be reallocated/moved.
116 data
->records
[0].data_object
= (void*)buffer
;
118 _dispatch_retain(queue
);
119 data
->do_targetq
= queue
;
125 _dispatch_data_dispose(dispatch_data_t dd
)
127 dispatch_block_t destructor
= dd
->destructor
;
128 if (destructor
== DISPATCH_DATA_DESTRUCTOR_DEFAULT
) {
130 for (i
= 0; i
< dd
->num_records
; ++i
) {
131 _dispatch_data_release(dd
->records
[i
].data_object
);
133 #if DISPATCH_DATA_MOVABLE
134 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_UNLOCK
) {
135 dispatch_data_t data
= (dispatch_data_t
)dd
->records
[0].data_object
;
136 (void)dispatch_atomic_dec2o(data
, locked
);
137 _dispatch_data_release(data
);
139 } else if (destructor
== DISPATCH_DATA_DESTRUCTOR_FREE
) {
140 free(dd
->records
[0].data_object
);
142 dispatch_async_f(dd
->do_targetq
, destructor
,
143 _dispatch_call_block_and_release
);
145 _dispatch_dispose(dd
);
149 _dispatch_data_debug(dispatch_data_t dd
, char* buf
, size_t bufsiz
)
153 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
,
154 "leaf: %d, size: %zd, data: %p", dd
->leaf
, dd
->size
,
155 dd
->records
[0].data_object
);
157 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
,
158 "leaf: %d, size: %zd, num_records: %zd", dd
->leaf
,
159 dd
->size
, dd
->num_records
);
161 for (i
= 0; i
< dd
->num_records
; ++i
) {
162 range_record r
= dd
->records
[i
];
163 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
,
164 "records[%zd] from: %zd, length %zd, data_object: %p", i
,
165 r
.from
, r
.length
, r
.data_object
);
172 dispatch_data_get_size(dispatch_data_t dd
)
178 dispatch_data_create_concat(dispatch_data_t dd1
, dispatch_data_t dd2
)
180 dispatch_data_t data
;
182 _dispatch_data_retain(dd2
);
186 _dispatch_data_retain(dd1
);
189 data
= _dispatch_data_init(dd1
->num_records
+ dd2
->num_records
);
190 data
->size
= dd1
->size
+ dd2
->size
;
191 // Copy the constituent records into the newly created data object
192 memcpy(data
->records
, dd1
->records
, dd1
->num_records
*
193 sizeof(range_record
));
194 memcpy(data
->records
+ dd1
->num_records
, dd2
->records
, dd2
->num_records
*
195 sizeof(range_record
));
196 // Reference leaf objects as sub-objects
198 data
->records
[0].data_object
= dd1
;
201 data
->records
[dd1
->num_records
].data_object
= dd2
;
204 for (i
= 0; i
< data
->num_records
; ++i
) {
205 _dispatch_data_retain(data
->records
[i
].data_object
);
211 dispatch_data_create_subrange(dispatch_data_t dd
, size_t offset
,
214 dispatch_data_t data
;
215 if (offset
>= dd
->size
|| !length
) {
216 return dispatch_data_empty
;
217 } else if ((offset
+ length
) > dd
->size
) {
218 length
= dd
->size
- offset
;
219 } else if (length
== dd
->size
) {
220 _dispatch_data_retain(dd
);
224 data
= _dispatch_data_init(1);
226 data
->records
[0].from
= offset
;
227 data
->records
[0].length
= length
;
228 data
->records
[0].data_object
= dd
;
229 _dispatch_data_retain(dd
);
232 // Subrange of a composite dispatch data object: find the record containing
233 // the specified offset
234 data
= dispatch_data_empty
;
235 size_t i
= 0, bytes_left
= length
;
236 while (i
< dd
->num_records
&& offset
>= dd
->records
[i
].length
) {
237 offset
-= dd
->records
[i
++].length
;
239 while (i
< dd
->num_records
) {
240 size_t record_len
= dd
->records
[i
].length
- offset
;
241 if (record_len
> bytes_left
) {
242 record_len
= bytes_left
;
244 dispatch_data_t subrange
= dispatch_data_create_subrange(
245 dd
->records
[i
].data_object
, dd
->records
[i
].from
+ offset
,
247 dispatch_data_t concat
= dispatch_data_create_concat(data
, subrange
);
248 _dispatch_data_release(data
);
249 _dispatch_data_release(subrange
);
251 bytes_left
-= record_len
;
258 // Crashing here indicates memory corruption of passed in data object
259 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
263 // When mapping a leaf object or a subrange of a leaf object, return a direct
264 // pointer to the represented buffer. For all other data objects, copy the
265 // represented buffers into a contiguous area. In the future it might
266 // be possible to relocate the buffers instead (if not marked as locked).
268 dispatch_data_create_map(dispatch_data_t dd
, const void **buffer_ptr
,
271 dispatch_data_t data
= dd
;
273 size_t size
= dd
->size
, offset
= 0;
275 data
= dispatch_data_empty
;
278 if (!dd
->leaf
&& dd
->num_records
== 1 &&
279 ((dispatch_data_t
)dd
->records
[0].data_object
)->leaf
) {
280 offset
= dd
->records
[0].from
;
281 dd
= (dispatch_data_t
)(dd
->records
[0].data_object
);
284 #if DISPATCH_DATA_MOVABLE
285 data
= _dispatch_data_init(1);
286 // Make sure the underlying leaf object does not move the backing buffer
287 (void)dispatch_atomic_inc2o(dd
, locked
);
289 data
->destructor
= DISPATCH_DATA_DESTRUCTOR_UNLOCK
;
290 data
->records
[0].data_object
= dd
;
291 data
->records
[0].from
= offset
;
292 data
->records
[0].length
= size
;
293 _dispatch_data_retain(dd
);
295 _dispatch_data_retain(data
);
297 buffer
= dd
->records
[0].data_object
+ offset
;
300 // Composite data object, copy the represented buffers
301 buffer
= malloc(size
);
307 dispatch_data_apply(dd
, ^(dispatch_data_t region DISPATCH_UNUSED
,
308 size_t off
, const void* buf
, size_t len
) {
309 memcpy(buffer
+ off
, buf
, len
);
312 data
= dispatch_data_create(buffer
, size
, NULL
,
313 DISPATCH_DATA_DESTRUCTOR_FREE
);
316 *buffer_ptr
= buffer
;
325 _dispatch_data_apply(dispatch_data_t dd
, size_t offset
, size_t from
,
326 size_t size
, dispatch_data_applier_t applier
)
329 dispatch_data_t data
= dd
;
331 dispatch_assert(dd
->size
);
332 #if DISPATCH_DATA_MOVABLE
334 data
= _dispatch_data_init(1);
335 // Make sure the underlying leaf object does not move the backing buffer
336 (void)dispatch_atomic_inc2o(dd
, locked
);
338 data
->destructor
= DISPATCH_DATA_DESTRUCTOR_UNLOCK
;
339 data
->records
[0].data_object
= dd
;
340 data
->records
[0].from
= from
;
341 data
->records
[0].length
= size
;
342 _dispatch_data_retain(dd
);
343 buffer
= dd
->records
[0].data_object
+ from
;
344 result
= applier(data
, offset
, buffer
, size
);
345 _dispatch_data_release(data
);
349 if (!dd
->leaf
&& dd
->num_records
== 1 &&
350 ((dispatch_data_t
)dd
->records
[0].data_object
)->leaf
) {
351 from
= dd
->records
[0].from
;
352 dd
= (dispatch_data_t
)(dd
->records
[0].data_object
);
355 buffer
= dd
->records
[0].data_object
+ from
;
356 return applier(data
, offset
, buffer
, size
);
360 for (i
= 0; i
< dd
->num_records
&& result
; ++i
) {
361 result
= _dispatch_data_apply(dd
->records
[i
].data_object
,
362 offset
, dd
->records
[i
].from
, dd
->records
[i
].length
,
364 offset
+= dd
->records
[i
].length
;
370 dispatch_data_apply(dispatch_data_t dd
, dispatch_data_applier_t applier
)
375 return _dispatch_data_apply(dd
, 0, 0, dd
->size
, applier
);
378 // Returs either a leaf object or an object composed of a single leaf object
380 dispatch_data_copy_region(dispatch_data_t dd
, size_t location
,
383 if (location
>= dd
->size
) {
385 return dispatch_data_empty
;
387 dispatch_data_t data
;
388 size_t size
= dd
->size
, offset
= 0, from
= 0;
391 _dispatch_data_retain(dd
);
392 *offset_ptr
= offset
;
393 if (size
== dd
->size
) {
396 // Create a new object for the requested subrange of the leaf
397 data
= _dispatch_data_init(1);
399 data
->records
[0].from
= from
;
400 data
->records
[0].length
= size
;
401 data
->records
[0].data_object
= dd
;
405 // Find record at the specified location
407 for (i
= 0; i
< dd
->num_records
; ++i
) {
408 pos
= offset
+ dd
->records
[i
].length
;
409 if (location
< pos
) {
410 size
= dd
->records
[i
].length
;
411 from
= dd
->records
[i
].from
;
412 data
= (dispatch_data_t
)(dd
->records
[i
].data_object
);
413 if (dd
->num_records
== 1 && data
->leaf
) {
414 // Return objects composed of a single leaf node
415 *offset_ptr
= offset
;
416 _dispatch_data_retain(dd
);
419 // Drill down into other objects