]> git.saurik.com Git - apple/libdispatch.git/blob - src/data.c
libdispatch-703.1.4.tar.gz
[apple/libdispatch.git] / src / data.c
1 /*
2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 /*
24 * Dispatch data objects are dispatch objects with standard retain/release
25 * memory management. A dispatch data object either points to a number of other
26 * dispatch data objects or is a leaf data object.
27 * A composite data object specifies the total size of data it represents
28 * and list of constituent records.
29 *
30 *******************************************************************************
31 *
32 * CURRENT IMPLEMENTATION DETAILS
33 *
34 * There are actually 3 kinds of composite objects
35 * - trivial subranges
36 * - unflattened composite data objects
37 * - flattened composite data objects
38 *
39 * LEAVES (num_records == 0, destructor != nil)
40 *
41 * Those objects have a pointer to represented memory in `buf`.
42 *
43 * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
44 *
45 * This is the generic case of a composite object.
46 *
47 * FLATTENED (num_records > 1, buf != nil, destructor == nil)
48 *
49 * Those objects are non trivial composite objects whose `buf` pointer
50 * is a contiguous representation (copied) of the memory it represents.
51 *
52 * Such objects are created when used as an NSData and -bytes is called and
53 * where the dispatch data object is an unflattened composite object.
54 * The underlying implementation is _dispatch_data_get_flattened_bytes
55 *
56 * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
57 *
58 * Those objects point to a single leaf, never to flattened objects.
59 *
60 *******************************************************************************
61 *
62 * Non trivial invariants:
63 *
64 * It is forbidden to point into a composite data object and ignore entire
65 * records from it. (for example by having `from` longer than the first
66 * record length).
67 *
68 * dispatch_data_t's are either leaves, or composite objects pointing to
69 * leaves. Depth is never greater than 1.
70 *
71 *******************************************************************************
72 *
73 * There are 4 dispatch_data_t constructors who may create non leaf objects,
74 * and ensure proper invariants.
75 *
76 * dispatch_data_copy_region()
77 * This function first sees through trivial subranges, and may in turn
78 * generate new trivial subranges.
79 *
80 * dispatch_data_create_map()
81 * This function either returns existing data objects, or a leaf.
82 *
83 * dispatch_data_create_subrange()
84 * This function treats flattened objects like unflattened ones,
85 * and recurses into trivial subranges, it can create trivial subranges.
86 *
87 * dispatch_data_create_concat()
88 * This function unwraps the top-level composite objects, trivial or not,
89 * and else concatenates the two arguments range lists, hence always creating
90 * unflattened objects, unless one of the arguments was empty.
91 *
92 *******************************************************************************
93 */
94
95 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
96 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
97 #define _dispatch_data_release(x) _dispatch_objc_release(x)
98 #else
99 #define _dispatch_data_retain(x) dispatch_retain(x)
100 #define _dispatch_data_release(x) dispatch_release(x)
101 #endif
102
103 const dispatch_block_t _dispatch_data_destructor_free = ^{
104 DISPATCH_INTERNAL_CRASH(0, "free destructor called");
105 };
106
107 const dispatch_block_t _dispatch_data_destructor_none = ^{
108 DISPATCH_INTERNAL_CRASH(0, "none destructor called");
109 };
110
111 #if !HAVE_MACH
112 const dispatch_block_t _dispatch_data_destructor_munmap = ^{
113 DISPATCH_INTERNAL_CRASH(0, "munmap destructor called");
114 };
115 #else
116 // _dispatch_data_destructor_munmap is a linker alias to the following
117 const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
118 DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called");
119 };
120 #endif
121
122 const dispatch_block_t _dispatch_data_destructor_inline = ^{
123 DISPATCH_INTERNAL_CRASH(0, "inline destructor called");
124 };
125
126 struct dispatch_data_s _dispatch_data_empty = {
127 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
128 .do_vtable = DISPATCH_DATA_EMPTY_CLASS,
129 #else
130 DISPATCH_GLOBAL_OBJECT_HEADER(data),
131 .do_next = DISPATCH_OBJECT_LISTLESS,
132 #endif
133 };
134
135 DISPATCH_ALWAYS_INLINE
136 static inline dispatch_data_t
137 _dispatch_data_alloc(size_t n, size_t extra)
138 {
139 dispatch_data_t data;
140 size_t size;
141
142 if (os_mul_and_add_overflow(n, sizeof(range_record),
143 sizeof(struct dispatch_data_s) + extra, &size)) {
144 return DISPATCH_OUT_OF_MEMORY;
145 }
146
147 data = _dispatch_alloc(DISPATCH_DATA_CLASS, size);
148 data->num_records = n;
149 #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
150 data->do_targetq = dispatch_get_global_queue(
151 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
152 data->do_next = DISPATCH_OBJECT_LISTLESS;
153 #endif
154 return data;
155 }
156
157 static void
158 _dispatch_data_destroy_buffer(const void* buffer, size_t size,
159 dispatch_queue_t queue, dispatch_block_t destructor)
160 {
161 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
162 free((void*)buffer);
163 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) {
164 // do nothing
165 #if HAVE_MACH
166 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) {
167 mach_vm_size_t vm_size = size;
168 mach_vm_address_t vm_addr = (uintptr_t)buffer;
169 mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
170 #endif
171 } else {
172 if (!queue) {
173 queue = dispatch_get_global_queue(
174 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
175 }
176 dispatch_async_f(queue, destructor, _dispatch_call_block_and_release);
177 }
178 }
179
180 DISPATCH_ALWAYS_INLINE
181 static inline void
182 _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
183 dispatch_queue_t queue, dispatch_block_t destructor)
184 {
185 data->buf = buffer;
186 data->size = size;
187 data->destructor = destructor;
188 if (queue) {
189 _dispatch_retain(queue);
190 data->do_targetq = queue;
191 }
192 }
193
194 void
195 dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
196 dispatch_block_t destructor)
197 {
198 if (!buffer || !size) {
199 if (destructor) {
200 _dispatch_data_destroy_buffer(buffer, size, NULL,
201 _dispatch_Block_copy(destructor));
202 }
203 buffer = NULL;
204 size = 0;
205 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
206 }
207 _dispatch_data_init(data, buffer, size, NULL, destructor);
208 }
209
210 dispatch_data_t
211 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
212 dispatch_block_t destructor)
213 {
214 dispatch_data_t data;
215 void *data_buf = NULL;
216 if (!buffer || !size) {
217 // Empty data requested so return the singleton empty object. Call
218 // destructor immediately in this case to ensure any unused associated
219 // storage is released.
220 if (destructor) {
221 _dispatch_data_destroy_buffer(buffer, size, queue,
222 _dispatch_Block_copy(destructor));
223 }
224 return dispatch_data_empty;
225 }
226 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
227 // The default destructor was provided, indicating the data should be
228 // copied.
229 data_buf = malloc(size);
230 if (slowpath(!data_buf)) {
231 return DISPATCH_OUT_OF_MEMORY;
232 }
233 buffer = memcpy(data_buf, buffer, size);
234 data = _dispatch_data_alloc(0, 0);
235 destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
236 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) {
237 data = _dispatch_data_alloc(0, size);
238 buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer,
239 size);
240 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
241 } else {
242 data = _dispatch_data_alloc(0, 0);
243 destructor = _dispatch_Block_copy(destructor);
244 }
245 _dispatch_data_init(data, buffer, size, queue, destructor);
246 return data;
247 }
248
249 dispatch_data_t
250 dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue,
251 dispatch_function_t destructor_function)
252 {
253 dispatch_block_t destructor = (dispatch_block_t)destructor_function;
254 if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT &&
255 destructor != DISPATCH_DATA_DESTRUCTOR_FREE &&
256 destructor != DISPATCH_DATA_DESTRUCTOR_NONE &&
257 #if HAVE_MACH
258 destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE &&
259 #endif
260 destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) {
261 destructor = ^{ destructor_function((void*)buffer); };
262 }
263 return dispatch_data_create(buffer, size, queue, destructor);
264 }
265
266 dispatch_data_t
267 dispatch_data_create_alloc(size_t size, void** buffer_ptr)
268 {
269 dispatch_data_t data = dispatch_data_empty;
270 void *buffer = NULL;
271
272 if (slowpath(!size)) {
273 goto out;
274 }
275 data = _dispatch_data_alloc(0, size);
276 buffer = (void*)data + sizeof(struct dispatch_data_s);
277 _dispatch_data_init(data, buffer, size, NULL,
278 DISPATCH_DATA_DESTRUCTOR_NONE);
279 out:
280 if (buffer_ptr) {
281 *buffer_ptr = buffer;
282 }
283 return data;
284 }
285
286 void
287 _dispatch_data_dispose(dispatch_data_t dd)
288 {
289 if (_dispatch_data_leaf(dd)) {
290 _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
291 dd->destructor);
292 } else {
293 size_t i;
294 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
295 _dispatch_data_release(dd->records[i].data_object);
296 }
297 free((void *)dd->buf);
298 }
299 }
300
301 size_t
302 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
303 {
304 size_t offset = 0;
305 offset += dsnprintf(&buf[offset], bufsiz - offset, "data[%p] = { ", dd);
306 if (_dispatch_data_leaf(dd)) {
307 offset += dsnprintf(&buf[offset], bufsiz - offset,
308 "leaf, size = %zd, buf = %p ", dd->size, dd->buf);
309 } else {
310 offset += dsnprintf(&buf[offset], bufsiz - offset,
311 "composite, size = %zd, num_records = %zd ", dd->size,
312 _dispatch_data_num_records(dd));
313 if (dd->buf) {
314 offset += dsnprintf(&buf[offset], bufsiz - offset,
315 ", flatbuf = %p ", dd->buf);
316 }
317 size_t i;
318 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
319 range_record r = dd->records[i];
320 offset += dsnprintf(&buf[offset], bufsiz - offset, "record[%zd] = "
321 "{ from = %zd, length = %zd, data_object = %p }, ", i,
322 r.from, r.length, r.data_object);
323 }
324 }
325 offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
326 return offset;
327 }
328
329 size_t
330 dispatch_data_get_size(dispatch_data_t dd)
331 {
332 return dd->size;
333 }
334
335 dispatch_data_t
336 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
337 {
338 dispatch_data_t data;
339 size_t n;
340
341 if (!dd1->size) {
342 _dispatch_data_retain(dd2);
343 return dd2;
344 }
345 if (!dd2->size) {
346 _dispatch_data_retain(dd1);
347 return dd1;
348 }
349
350 if (os_add_overflow(_dispatch_data_num_records(dd1),
351 _dispatch_data_num_records(dd2), &n)) {
352 return DISPATCH_OUT_OF_MEMORY;
353 }
354 data = _dispatch_data_alloc(n, 0);
355 data->size = dd1->size + dd2->size;
356 // Copy the constituent records into the newly created data object
357 // Reference leaf objects as sub-objects
358 if (_dispatch_data_leaf(dd1)) {
359 data->records[0].from = 0;
360 data->records[0].length = dd1->size;
361 data->records[0].data_object = dd1;
362 } else {
363 memcpy(data->records, dd1->records, _dispatch_data_num_records(dd1) *
364 sizeof(range_record));
365 }
366 if (_dispatch_data_leaf(dd2)) {
367 data->records[_dispatch_data_num_records(dd1)].from = 0;
368 data->records[_dispatch_data_num_records(dd1)].length = dd2->size;
369 data->records[_dispatch_data_num_records(dd1)].data_object = dd2;
370 } else {
371 memcpy(data->records + _dispatch_data_num_records(dd1), dd2->records,
372 _dispatch_data_num_records(dd2) * sizeof(range_record));
373 }
374 size_t i;
375 for (i = 0; i < _dispatch_data_num_records(data); ++i) {
376 _dispatch_data_retain(data->records[i].data_object);
377 }
378 return data;
379 }
380
381 dispatch_data_t
382 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
383 size_t length)
384 {
385 dispatch_data_t data;
386
387 if (offset >= dd->size || !length) {
388 return dispatch_data_empty;
389 } else if (length > dd->size - offset) {
390 length = dd->size - offset;
391 } else if (length == dd->size) {
392 _dispatch_data_retain(dd);
393 return dd;
394 }
395 /*
396 * we must only optimize leaves and not flattened objects
397 * because lots of users want to keep the end of a buffer and release
398 * as much memory as they can from the beginning of it
399 *
400 * Using the flatbuf here would be very wrong with respect to that goal
401 */
402 if (_dispatch_data_leaf(dd)) {
403 data = _dispatch_data_alloc(1, 0);
404 data->size = length;
405 data->records[0].from = offset;
406 data->records[0].length = length;
407 data->records[0].data_object = dd;
408 _dispatch_data_retain(dd);
409 return data;
410 }
411
412 // Subrange of a composite dispatch data object
413 const size_t dd_num_records = _dispatch_data_num_records(dd);
414 bool to_the_end = (offset + length == dd->size);
415 size_t i = 0;
416
417 // find the record containing the specified offset
418 while (i < dd_num_records && offset >= dd->records[i].length) {
419 offset -= dd->records[i++].length;
420 }
421
422 // Crashing here indicates memory corruption of passed in data object
423 if (slowpath(i >= dd_num_records)) {
424 DISPATCH_INTERNAL_CRASH(i,
425 "dispatch_data_create_subrange out of bounds");
426 }
427
428 // if everything is from a single dispatch data object, avoid boxing it
429 if (offset + length <= dd->records[i].length) {
430 return dispatch_data_create_subrange(dd->records[i].data_object,
431 dd->records[i].from + offset, length);
432 }
433
434 // find the record containing the end of the current range
435 // and optimize the case when you just remove bytes at the origin
436 size_t count, last_length;
437
438 if (to_the_end) {
439 count = dd_num_records - i;
440 } else {
441 last_length = length - (dd->records[i].length - offset);
442 count = 1;
443
444 while (i + count < dd_num_records) {
445 size_t record_length = dd->records[i + count++].length;
446
447 if (last_length <= record_length) {
448 break;
449 }
450 last_length -= record_length;
451
452 // Crashing here indicates memory corruption of passed in data object
453 if (slowpath(i + count >= dd_num_records)) {
454 DISPATCH_INTERNAL_CRASH(i + count,
455 "dispatch_data_create_subrange out of bounds");
456 }
457 }
458 }
459
460 data = _dispatch_data_alloc(count, 0);
461 data->size = length;
462 memcpy(data->records, dd->records + i, count * sizeof(range_record));
463
464 if (offset) {
465 data->records[0].from += offset;
466 data->records[0].length -= offset;
467 }
468 if (!to_the_end) {
469 data->records[count - 1].length = last_length;
470 }
471
472 for (i = 0; i < count; i++) {
473 _dispatch_data_retain(data->records[i].data_object);
474 }
475 return data;
476 }
477
478 static void*
479 _dispatch_data_flatten(dispatch_data_t dd)
480 {
481 void *buffer = malloc(dd->size);
482
483 // Composite data object, copy the represented buffers
484 if (buffer) {
485 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
486 size_t off, const void* buf, size_t len) {
487 memcpy(buffer + off, buf, len);
488 return (bool)true;
489 });
490 }
491
492 return buffer;
493 }
494
495 // When mapping a leaf object or a subrange of a leaf object, return a direct
496 // pointer to the represented buffer. For all other data objects, copy the
497 // represented buffers into a contiguous area. In the future it might
498 // be possible to relocate the buffers instead (if not marked as locked).
499 dispatch_data_t
500 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
501 size_t *size_ptr)
502 {
503 dispatch_data_t data = NULL;
504 const void *buffer = NULL;
505 size_t size = dd->size;
506
507 if (!size) {
508 data = dispatch_data_empty;
509 goto out;
510 }
511
512 buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
513 if (buffer) {
514 _dispatch_data_retain(dd);
515 data = dd;
516 goto out;
517 }
518
519 buffer = _dispatch_data_flatten(dd);
520 if (fastpath(buffer)) {
521 data = dispatch_data_create(buffer, size, NULL,
522 DISPATCH_DATA_DESTRUCTOR_FREE);
523 } else {
524 size = 0;
525 }
526
527 out:
528 if (buffer_ptr) {
529 *buffer_ptr = buffer;
530 }
531 if (size_ptr) {
532 *size_ptr = size;
533 }
534 return data;
535 }
536
537 const void *
538 _dispatch_data_get_flattened_bytes(dispatch_data_t dd)
539 {
540 const void *buffer;
541 size_t offset = 0;
542
543 if (slowpath(!dd->size)) {
544 return NULL;
545 }
546
547 buffer = _dispatch_data_map_direct(dd, 0, &dd, &offset);
548 if (buffer) {
549 return buffer;
550 }
551
552 void *flatbuf = _dispatch_data_flatten(dd);
553 if (fastpath(flatbuf)) {
554 // we need a release so that readers see the content of the buffer
555 if (slowpath(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf,
556 &buffer, release))) {
557 free(flatbuf);
558 } else {
559 buffer = flatbuf;
560 }
561 } else {
562 return NULL;
563 }
564
565 return buffer + offset;
566 }
567
568 #if DISPATCH_USE_CLIENT_CALLOUT
569 DISPATCH_NOINLINE
570 #else
571 DISPATCH_ALWAYS_INLINE
572 #endif
573 static bool
574 _dispatch_data_apply_client_callout(void *ctxt, dispatch_data_t region, size_t offset,
575 const void *buffer, size_t size, dispatch_data_applier_function_t f)
576 {
577 return f(ctxt, region, offset, buffer, size);
578 }
579
580
581 static bool
582 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
583 size_t size, void *ctxt, dispatch_data_applier_function_t applier)
584 {
585 bool result = true;
586 const void *buffer;
587
588 buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
589 if (buffer) {
590 return _dispatch_data_apply_client_callout(ctxt, dd,
591 offset, buffer + from, size, applier);
592 }
593
594 size_t i;
595 for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) {
596 result = _dispatch_data_apply(dd->records[i].data_object,
597 offset, dd->records[i].from, dd->records[i].length, ctxt,
598 applier);
599 offset += dd->records[i].length;
600 }
601 return result;
602 }
603
604 bool
605 dispatch_data_apply_f(dispatch_data_t dd, void *ctxt,
606 dispatch_data_applier_function_t applier)
607 {
608 if (!dd->size) {
609 return true;
610 }
611 return _dispatch_data_apply(dd, 0, 0, dd->size, ctxt, applier);
612 }
613
614 bool
615 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
616 {
617 if (!dd->size) {
618 return true;
619 }
620 return _dispatch_data_apply(dd, 0, 0, dd->size, applier,
621 (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier));
622 }
623
624 static dispatch_data_t
625 _dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size,
626 size_t location, size_t *offset_ptr)
627 {
628 dispatch_data_t reusable_dd = NULL;
629 size_t offset = 0;
630
631 if (from == 0 && size == dd->size) {
632 reusable_dd = dd;
633 }
634
635 if (_dispatch_data_map_direct(dd, from, &dd, &from)) {
636 if (reusable_dd) {
637 _dispatch_data_retain(reusable_dd);
638 return reusable_dd;
639 }
640
641 _dispatch_data_retain(dd);
642 if (from == 0 && size == dd->size) {
643 return dd;
644 }
645
646 dispatch_data_t data = _dispatch_data_alloc(1, 0);
647 data->size = size;
648 data->records[0].from = from;
649 data->records[0].length = size;
650 data->records[0].data_object = dd;
651 return data;
652 }
653
654 size_t i;
655 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
656 size_t length = dd->records[i].length;
657
658 if (from >= length) {
659 from -= length;
660 continue;
661 }
662
663 length -= from;
664 if (location >= offset + length) {
665 offset += length;
666 from = 0;
667 continue;
668 }
669
670 from += dd->records[i].from;
671 dd = dd->records[i].data_object;
672 *offset_ptr += offset;
673 location -= offset;
674 return _dispatch_data_copy_region(dd, from, length, location, offset_ptr);
675 }
676
677 DISPATCH_INTERNAL_CRASH(*offset_ptr+offset,
678 "dispatch_data_copy_region out of bounds");
679 }
680
681 // Returs either a leaf object or an object composed of a single leaf object
682 dispatch_data_t
683 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
684 size_t *offset_ptr)
685 {
686 if (location >= dd->size) {
687 *offset_ptr = dd->size;
688 return dispatch_data_empty;
689 }
690 *offset_ptr = 0;
691 return _dispatch_data_copy_region(dd, 0, dd->size, location, offset_ptr);
692 }
693
694 #if HAVE_MACH
695
696 #ifndef MAP_MEM_VM_COPY
697 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
698 #endif
699
700 mach_port_t
701 dispatch_data_make_memory_entry(dispatch_data_t dd)
702 {
703 mach_port_t mep = MACH_PORT_NULL;
704 memory_object_size_t mos;
705 mach_vm_size_t vm_size = dd->size;
706 mach_vm_address_t vm_addr;
707 vm_prot_t flags;
708 kern_return_t kr;
709 bool copy = (dd->destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE);
710
711 retry:
712 if (copy) {
713 vm_addr = vm_page_size;
714 kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
715 VM_FLAGS_ANYWHERE);
716 if (kr) {
717 if (kr != KERN_NO_SPACE) {
718 (void)dispatch_assume_zero(kr);
719 }
720 return mep;
721 }
722 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
723 size_t off, const void* buf, size_t len) {
724 memcpy((void*)(vm_addr + off), buf, len);
725 return (bool)true;
726 });
727 } else {
728 vm_addr = (uintptr_t)dd->buf;
729 }
730 flags = VM_PROT_DEFAULT|VM_PROT_IS_MASK|MAP_MEM_VM_COPY;
731 mos = vm_size;
732 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
733 &mep, MACH_PORT_NULL);
734 if (kr == KERN_INVALID_VALUE) {
735 // Fallback in case MAP_MEM_VM_COPY is not supported
736 flags &= ~MAP_MEM_VM_COPY;
737 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
738 &mep, MACH_PORT_NULL);
739 }
740 if (dispatch_assume_zero(kr)) {
741 mep = MACH_PORT_NULL;
742 } else if (mos < vm_size) {
743 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
744 kr = mach_port_deallocate(mach_task_self(), mep);
745 (void)dispatch_assume_zero(kr);
746 if (!copy) {
747 copy = true;
748 goto retry;
749 }
750 mep = MACH_PORT_NULL;
751 }
752 if (copy) {
753 kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
754 (void)dispatch_assume_zero(kr);
755 }
756 return mep;
757 }
758 #endif // HAVE_MACH