]> git.saurik.com Git - apple/libdispatch.git/blob - src/data.c
libdispatch-913.20.5.tar.gz
[apple/libdispatch.git] / src / data.c
1 /*
2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 /*
24 * Dispatch data objects are dispatch objects with standard retain/release
25 * memory management. A dispatch data object either points to a number of other
26 * dispatch data objects or is a leaf data object.
27 * A composite data object specifies the total size of data it represents
28 * and list of constituent records.
29 *
30 *******************************************************************************
31 *
32 * CURRENT IMPLEMENTATION DETAILS
33 *
34 * There are actually 3 kinds of composite objects
35 * - trivial subranges
36 * - unflattened composite data objects
37 * - flattened composite data objects
38 *
39 * LEAVES (num_records == 0, destructor != nil)
40 *
41 * Those objects have a pointer to represented memory in `buf`.
42 *
43 * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
44 *
45 * This is the generic case of a composite object.
46 *
47 * FLATTENED (num_records > 1, buf != nil, destructor == nil)
48 *
49 * Those objects are non trivial composite objects whose `buf` pointer
50 * is a contiguous representation (copied) of the memory it represents.
51 *
52 * Such objects are created when used as an NSData and -bytes is called and
53 * where the dispatch data object is an unflattened composite object.
54 * The underlying implementation is _dispatch_data_get_flattened_bytes
55 *
56 * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
57 *
58 * Those objects point to a single leaf, never to flattened objects.
59 *
60 *******************************************************************************
61 *
62 * Non trivial invariants:
63 *
64 * It is forbidden to point into a composite data object and ignore entire
65 * records from it. (for example by having `from` longer than the first
66 * record length).
67 *
68 * dispatch_data_t's are either leaves, or composite objects pointing to
69 * leaves. Depth is never greater than 1.
70 *
71 *******************************************************************************
72 *
73 * There are 4 dispatch_data_t constructors who may create non leaf objects,
74 * and ensure proper invariants.
75 *
76 * dispatch_data_copy_region()
77 * This function first sees through trivial subranges, and may in turn
78 * generate new trivial subranges.
79 *
80 * dispatch_data_create_map()
81 * This function either returns existing data objects, or a leaf.
82 *
83 * dispatch_data_create_subrange()
84 * This function treats flattened objects like unflattened ones,
85 * and recurses into trivial subranges, it can create trivial subranges.
86 *
87 * dispatch_data_create_concat()
88 * This function unwraps the top-level composite objects, trivial or not,
89 * and else concatenates the two arguments range lists, hence always creating
90 * unflattened objects, unless one of the arguments was empty.
91 *
92 *******************************************************************************
93 */
94
95 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
96 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
97 #define _dispatch_data_release(x) _dispatch_objc_release(x)
98 #else
99 #define _dispatch_data_retain(x) dispatch_retain(x)
100 #define _dispatch_data_release(x) dispatch_release(x)
101 #endif
102
103 DISPATCH_ALWAYS_INLINE
104 static inline dispatch_data_t
105 _dispatch_data_alloc(size_t n, size_t extra)
106 {
107 dispatch_data_t data;
108 size_t size;
109 size_t base_size;
110
111 if (os_add_overflow(sizeof(struct dispatch_data_s), extra, &base_size)) {
112 return DISPATCH_OUT_OF_MEMORY;
113 }
114 if (os_mul_and_add_overflow(n, sizeof(range_record), base_size, &size)) {
115 return DISPATCH_OUT_OF_MEMORY;
116 }
117
118 data = _dispatch_object_alloc(DISPATCH_DATA_CLASS, size);
119 data->num_records = n;
120 #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
121 data->do_targetq = dispatch_get_global_queue(
122 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
123 data->do_next = DISPATCH_OBJECT_LISTLESS;
124 #endif
125 return data;
126 }
127
128 static void
129 _dispatch_data_destroy_buffer(const void* buffer, size_t size,
130 dispatch_queue_t queue, dispatch_block_t destructor)
131 {
132 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
133 free((void*)buffer);
134 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) {
135 // do nothing
136 #if HAVE_MACH
137 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) {
138 mach_vm_size_t vm_size = size;
139 mach_vm_address_t vm_addr = (uintptr_t)buffer;
140 mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
141 #else
142 (void)size;
143 #endif
144 } else {
145 if (!queue) {
146 queue = dispatch_get_global_queue(
147 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
148 }
149 dispatch_async_f(queue, destructor, _dispatch_call_block_and_release);
150 }
151 }
152
153 DISPATCH_ALWAYS_INLINE
154 static inline void
155 _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
156 dispatch_queue_t queue, dispatch_block_t destructor)
157 {
158 data->buf = buffer;
159 data->size = size;
160 data->destructor = destructor;
161 if (queue) {
162 _dispatch_retain(queue);
163 data->do_targetq = queue;
164 }
165 }
166
167 void
168 _dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer,
169 size_t size, dispatch_block_t destructor)
170 {
171 if (!buffer || !size) {
172 if (destructor) {
173 _dispatch_data_destroy_buffer(buffer, size, NULL,
174 _dispatch_Block_copy(destructor));
175 }
176 buffer = NULL;
177 size = 0;
178 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
179 }
180 _dispatch_data_init(data, buffer, size, NULL, destructor);
181 }
182
183 dispatch_data_t
184 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
185 dispatch_block_t destructor)
186 {
187 dispatch_data_t data;
188 void *data_buf = NULL;
189 if (!buffer || !size) {
190 // Empty data requested so return the singleton empty object. Call
191 // destructor immediately in this case to ensure any unused associated
192 // storage is released.
193 if (destructor) {
194 _dispatch_data_destroy_buffer(buffer, size, queue,
195 _dispatch_Block_copy(destructor));
196 }
197 return dispatch_data_empty;
198 }
199 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
200 // The default destructor was provided, indicating the data should be
201 // copied.
202 data_buf = malloc(size);
203 if (slowpath(!data_buf)) {
204 return DISPATCH_OUT_OF_MEMORY;
205 }
206 buffer = memcpy(data_buf, buffer, size);
207 data = _dispatch_data_alloc(0, 0);
208 destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
209 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) {
210 data = _dispatch_data_alloc(0, size);
211 buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer,
212 size);
213 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
214 } else {
215 data = _dispatch_data_alloc(0, 0);
216 destructor = _dispatch_Block_copy(destructor);
217 }
218 _dispatch_data_init(data, buffer, size, queue, destructor);
219 return data;
220 }
221
222 dispatch_data_t
223 dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue,
224 dispatch_function_t destructor_function)
225 {
226 dispatch_block_t destructor = (dispatch_block_t)destructor_function;
227 if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT &&
228 destructor != DISPATCH_DATA_DESTRUCTOR_FREE &&
229 destructor != DISPATCH_DATA_DESTRUCTOR_NONE &&
230 #if HAVE_MACH
231 destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE &&
232 #endif
233 destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) {
234 destructor = ^{ destructor_function((void*)buffer); };
235 }
236 return dispatch_data_create(buffer, size, queue, destructor);
237 }
238
239 dispatch_data_t
240 dispatch_data_create_alloc(size_t size, void** buffer_ptr)
241 {
242 dispatch_data_t data = dispatch_data_empty;
243 void *buffer = NULL;
244
245 if (slowpath(!size)) {
246 goto out;
247 }
248 data = _dispatch_data_alloc(0, size);
249 buffer = (void*)data + sizeof(struct dispatch_data_s);
250 _dispatch_data_init(data, buffer, size, NULL,
251 DISPATCH_DATA_DESTRUCTOR_NONE);
252 out:
253 if (buffer_ptr) {
254 *buffer_ptr = buffer;
255 }
256 return data;
257 }
258
259 void
260 _dispatch_data_dispose(dispatch_data_t dd, DISPATCH_UNUSED bool *allow_free)
261 {
262 if (_dispatch_data_leaf(dd)) {
263 _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
264 dd->destructor);
265 } else {
266 size_t i;
267 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
268 _dispatch_data_release(dd->records[i].data_object);
269 }
270 free((void *)dd->buf);
271 }
272 }
273
274 void
275 _dispatch_data_set_target_queue(dispatch_data_t dd, dispatch_queue_t tq)
276 {
277 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
278 _dispatch_retain(tq);
279 tq = os_atomic_xchg2o(dd, do_targetq, tq, release);
280 if (tq) _dispatch_release(tq);
281 #else
282 _dispatch_object_set_target_queue_inline(dd, tq);
283 #endif
284 }
285
286 size_t
287 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
288 {
289 size_t offset = 0;
290 offset += dsnprintf(&buf[offset], bufsiz - offset, "data[%p] = { ", dd);
291 if (_dispatch_data_leaf(dd)) {
292 offset += dsnprintf(&buf[offset], bufsiz - offset,
293 "leaf, size = %zd, buf = %p ", dd->size, dd->buf);
294 } else {
295 offset += dsnprintf(&buf[offset], bufsiz - offset,
296 "composite, size = %zd, num_records = %zd ", dd->size,
297 _dispatch_data_num_records(dd));
298 if (dd->buf) {
299 offset += dsnprintf(&buf[offset], bufsiz - offset,
300 ", flatbuf = %p ", dd->buf);
301 }
302 size_t i;
303 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
304 range_record r = dd->records[i];
305 offset += dsnprintf(&buf[offset], bufsiz - offset, "record[%zd] = "
306 "{ from = %zd, length = %zd, data_object = %p }, ", i,
307 r.from, r.length, r.data_object);
308 }
309 }
310 offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
311 return offset;
312 }
313
314 size_t
315 dispatch_data_get_size(dispatch_data_t dd)
316 {
317 return dd->size;
318 }
319
320 dispatch_data_t
321 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
322 {
323 dispatch_data_t data;
324 size_t n;
325
326 if (!dd1->size) {
327 _dispatch_data_retain(dd2);
328 return dd2;
329 }
330 if (!dd2->size) {
331 _dispatch_data_retain(dd1);
332 return dd1;
333 }
334
335 if (os_add_overflow(_dispatch_data_num_records(dd1),
336 _dispatch_data_num_records(dd2), &n)) {
337 return DISPATCH_OUT_OF_MEMORY;
338 }
339 data = _dispatch_data_alloc(n, 0);
340 data->size = dd1->size + dd2->size;
341 // Copy the constituent records into the newly created data object
342 // Reference leaf objects as sub-objects
343 if (_dispatch_data_leaf(dd1)) {
344 data->records[0].from = 0;
345 data->records[0].length = dd1->size;
346 data->records[0].data_object = dd1;
347 } else {
348 memcpy(data->records, dd1->records, _dispatch_data_num_records(dd1) *
349 sizeof(range_record));
350 }
351 if (_dispatch_data_leaf(dd2)) {
352 data->records[_dispatch_data_num_records(dd1)].from = 0;
353 data->records[_dispatch_data_num_records(dd1)].length = dd2->size;
354 data->records[_dispatch_data_num_records(dd1)].data_object = dd2;
355 } else {
356 memcpy(data->records + _dispatch_data_num_records(dd1), dd2->records,
357 _dispatch_data_num_records(dd2) * sizeof(range_record));
358 }
359 size_t i;
360 for (i = 0; i < _dispatch_data_num_records(data); ++i) {
361 _dispatch_data_retain(data->records[i].data_object);
362 }
363 return data;
364 }
365
366 dispatch_data_t
367 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
368 size_t length)
369 {
370 dispatch_data_t data;
371
372 if (offset >= dd->size || !length) {
373 return dispatch_data_empty;
374 } else if (length > dd->size - offset) {
375 length = dd->size - offset;
376 } else if (length == dd->size) {
377 _dispatch_data_retain(dd);
378 return dd;
379 }
380 /*
381 * we must only optimize leaves and not flattened objects
382 * because lots of users want to keep the end of a buffer and release
383 * as much memory as they can from the beginning of it
384 *
385 * Using the flatbuf here would be very wrong with respect to that goal
386 */
387 if (_dispatch_data_leaf(dd)) {
388 data = _dispatch_data_alloc(1, 0);
389 data->size = length;
390 data->records[0].from = offset;
391 data->records[0].length = length;
392 data->records[0].data_object = dd;
393 _dispatch_data_retain(dd);
394 return data;
395 }
396
397 // Subrange of a composite dispatch data object
398 const size_t dd_num_records = _dispatch_data_num_records(dd);
399 bool to_the_end = (offset + length == dd->size);
400 size_t i = 0;
401
402 // find the record containing the specified offset
403 while (i < dd_num_records && offset >= dd->records[i].length) {
404 offset -= dd->records[i++].length;
405 }
406
407 // Crashing here indicates memory corruption of passed in data object
408 if (slowpath(i >= dd_num_records)) {
409 DISPATCH_INTERNAL_CRASH(i,
410 "dispatch_data_create_subrange out of bounds");
411 }
412
413 // if everything is from a single dispatch data object, avoid boxing it
414 if (offset + length <= dd->records[i].length) {
415 return dispatch_data_create_subrange(dd->records[i].data_object,
416 dd->records[i].from + offset, length);
417 }
418
419 // find the record containing the end of the current range
420 // and optimize the case when you just remove bytes at the origin
421 size_t count, last_length = 0;
422
423 if (to_the_end) {
424 count = dd_num_records - i;
425 } else {
426 last_length = length - (dd->records[i].length - offset);
427 count = 1;
428
429 while (i + count < dd_num_records) {
430 size_t record_length = dd->records[i + count++].length;
431
432 if (last_length <= record_length) {
433 break;
434 }
435 last_length -= record_length;
436
437 // Crashing here indicates memory corruption of passed in data object
438 if (slowpath(i + count >= dd_num_records)) {
439 DISPATCH_INTERNAL_CRASH(i + count,
440 "dispatch_data_create_subrange out of bounds");
441 }
442 }
443 }
444
445 data = _dispatch_data_alloc(count, 0);
446 data->size = length;
447 memcpy(data->records, dd->records + i, count * sizeof(range_record));
448
449 if (offset) {
450 data->records[0].from += offset;
451 data->records[0].length -= offset;
452 }
453 if (!to_the_end) {
454 data->records[count - 1].length = last_length;
455 }
456
457 for (i = 0; i < count; i++) {
458 _dispatch_data_retain(data->records[i].data_object);
459 }
460 return data;
461 }
462
463 static void*
464 _dispatch_data_flatten(dispatch_data_t dd)
465 {
466 void *buffer = malloc(dd->size);
467
468 // Composite data object, copy the represented buffers
469 if (buffer) {
470 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
471 size_t off, const void* buf, size_t len) {
472 memcpy(buffer + off, buf, len);
473 return (bool)true;
474 });
475 }
476
477 return buffer;
478 }
479
480 // When mapping a leaf object or a subrange of a leaf object, return a direct
481 // pointer to the represented buffer. For all other data objects, copy the
482 // represented buffers into a contiguous area. In the future it might
483 // be possible to relocate the buffers instead (if not marked as locked).
484 dispatch_data_t
485 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
486 size_t *size_ptr)
487 {
488 dispatch_data_t data = NULL;
489 const void *buffer = NULL;
490 size_t size = dd->size;
491
492 if (!size) {
493 data = dispatch_data_empty;
494 goto out;
495 }
496
497 buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
498 if (buffer) {
499 _dispatch_data_retain(dd);
500 data = dd;
501 goto out;
502 }
503
504 buffer = _dispatch_data_flatten(dd);
505 if (fastpath(buffer)) {
506 data = dispatch_data_create(buffer, size, NULL,
507 DISPATCH_DATA_DESTRUCTOR_FREE);
508 } else {
509 size = 0;
510 }
511
512 out:
513 if (buffer_ptr) {
514 *buffer_ptr = buffer;
515 }
516 if (size_ptr) {
517 *size_ptr = size;
518 }
519 return data;
520 }
521
522 const void *
523 _dispatch_data_get_flattened_bytes(dispatch_data_t dd)
524 {
525 const void *buffer;
526 size_t offset = 0;
527
528 if (slowpath(!dd->size)) {
529 return NULL;
530 }
531
532 buffer = _dispatch_data_map_direct(dd, 0, &dd, &offset);
533 if (buffer) {
534 return buffer;
535 }
536
537 void *flatbuf = _dispatch_data_flatten(dd);
538 if (fastpath(flatbuf)) {
539 // we need a release so that readers see the content of the buffer
540 if (slowpath(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf,
541 &buffer, release))) {
542 free(flatbuf);
543 } else {
544 buffer = flatbuf;
545 }
546 } else {
547 return NULL;
548 }
549
550 return buffer + offset;
551 }
552
553 #if DISPATCH_USE_CLIENT_CALLOUT
554 DISPATCH_NOINLINE
555 #else
556 DISPATCH_ALWAYS_INLINE
557 #endif
558 static bool
559 _dispatch_data_apply_client_callout(void *ctxt, dispatch_data_t region, size_t offset,
560 const void *buffer, size_t size, dispatch_data_applier_function_t f)
561 {
562 return f(ctxt, region, offset, buffer, size);
563 }
564
565
566 static bool
567 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
568 size_t size, void *ctxt, dispatch_data_applier_function_t applier)
569 {
570 bool result = true;
571 const void *buffer;
572
573 buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
574 if (buffer) {
575 return _dispatch_data_apply_client_callout(ctxt, dd,
576 offset, buffer + from, size, applier);
577 }
578
579 size_t i;
580 for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) {
581 result = _dispatch_data_apply(dd->records[i].data_object,
582 offset, dd->records[i].from, dd->records[i].length, ctxt,
583 applier);
584 offset += dd->records[i].length;
585 }
586 return result;
587 }
588
589 bool
590 dispatch_data_apply_f(dispatch_data_t dd, void *ctxt,
591 dispatch_data_applier_function_t applier)
592 {
593 if (!dd->size) {
594 return true;
595 }
596 return _dispatch_data_apply(dd, 0, 0, dd->size, ctxt, applier);
597 }
598
599 bool
600 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
601 {
602 if (!dd->size) {
603 return true;
604 }
605 return _dispatch_data_apply(dd, 0, 0, dd->size, applier,
606 (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier));
607 }
608
609 static dispatch_data_t
610 _dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size,
611 size_t location, size_t *offset_ptr)
612 {
613 dispatch_data_t reusable_dd = NULL;
614 size_t offset = 0;
615
616 if (from == 0 && size == dd->size) {
617 reusable_dd = dd;
618 }
619
620 if (_dispatch_data_map_direct(dd, from, &dd, &from)) {
621 if (reusable_dd) {
622 _dispatch_data_retain(reusable_dd);
623 return reusable_dd;
624 }
625
626 _dispatch_data_retain(dd);
627 if (from == 0 && size == dd->size) {
628 return dd;
629 }
630
631 dispatch_data_t data = _dispatch_data_alloc(1, 0);
632 data->size = size;
633 data->records[0].from = from;
634 data->records[0].length = size;
635 data->records[0].data_object = dd;
636 return data;
637 }
638
639 size_t i;
640 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
641 size_t length = dd->records[i].length;
642
643 if (from >= length) {
644 from -= length;
645 continue;
646 }
647
648 length -= from;
649 if (location >= offset + length) {
650 offset += length;
651 from = 0;
652 continue;
653 }
654
655 from += dd->records[i].from;
656 dd = dd->records[i].data_object;
657 *offset_ptr += offset;
658 location -= offset;
659 return _dispatch_data_copy_region(dd, from, length, location, offset_ptr);
660 }
661
662 DISPATCH_INTERNAL_CRASH(*offset_ptr+offset,
663 "dispatch_data_copy_region out of bounds");
664 }
665
666 // Returs either a leaf object or an object composed of a single leaf object
667 dispatch_data_t
668 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
669 size_t *offset_ptr)
670 {
671 if (location >= dd->size) {
672 *offset_ptr = dd->size;
673 return dispatch_data_empty;
674 }
675 *offset_ptr = 0;
676 return _dispatch_data_copy_region(dd, 0, dd->size, location, offset_ptr);
677 }
678
679 #if HAVE_MACH
680
681 #ifndef MAP_MEM_VM_COPY
682 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
683 #endif
684
685 mach_port_t
686 dispatch_data_make_memory_entry(dispatch_data_t dd)
687 {
688 mach_port_t mep = MACH_PORT_NULL;
689 memory_object_size_t mos;
690 mach_vm_size_t vm_size = dd->size;
691 mach_vm_address_t vm_addr;
692 vm_prot_t flags;
693 kern_return_t kr;
694 bool copy = (dd->destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE);
695
696 retry:
697 if (copy) {
698 vm_addr = vm_page_size;
699 kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
700 VM_FLAGS_ANYWHERE);
701 if (kr) {
702 if (kr != KERN_NO_SPACE) {
703 (void)dispatch_assume_zero(kr);
704 }
705 return mep;
706 }
707 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
708 size_t off, const void* buf, size_t len) {
709 memcpy((void*)(vm_addr + off), buf, len);
710 return (bool)true;
711 });
712 } else {
713 vm_addr = (uintptr_t)dd->buf;
714 }
715 flags = VM_PROT_DEFAULT|VM_PROT_IS_MASK|MAP_MEM_VM_COPY;
716 mos = vm_size;
717 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
718 &mep, MACH_PORT_NULL);
719 if (kr == KERN_INVALID_VALUE) {
720 // Fallback in case MAP_MEM_VM_COPY is not supported
721 flags &= ~MAP_MEM_VM_COPY;
722 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
723 &mep, MACH_PORT_NULL);
724 }
725 if (dispatch_assume_zero(kr)) {
726 mep = MACH_PORT_NULL;
727 } else if (mos < vm_size) {
728 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
729 kr = mach_port_deallocate(mach_task_self(), mep);
730 (void)dispatch_assume_zero(kr);
731 if (!copy) {
732 copy = true;
733 goto retry;
734 }
735 mep = MACH_PORT_NULL;
736 }
737 if (copy) {
738 kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
739 (void)dispatch_assume_zero(kr);
740 }
741 return mep;
742 }
743 #endif // HAVE_MACH