]> git.saurik.com Git - apple/libdispatch.git/blob - src/data.c
libdispatch-500.1.5.tar.gz
[apple/libdispatch.git] / src / data.c
1 /*
2 * Copyright (c) 2009-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 /*
24 * Dispatch data objects are dispatch objects with standard retain/release
25 * memory management. A dispatch data object either points to a number of other
26 * dispatch data objects or is a leaf data object.
27 * A composite data object specifies the total size of data it represents
28 * and list of constituent records.
29 *
30 *******************************************************************************
31 *
32 * CURRENT IMPLEMENTATION DETAILS
33 *
34 * There are actually 3 kinds of composite objects
35 * - trivial subranges
36 * - unflattened composite data objects
37 * - flattened composite data objects
38 *
39 * LEAVES (num_records == 0, destructor != nil)
40 *
41 * Those objects have a pointer to represented memory in `buf`.
42 *
43 * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
44 *
45 * This is the generic case of a composite object.
46 *
47 * FLATTENED (num_records > 1, buf != nil, destructor == nil)
48 *
49 * Those objects are non trivial composite objects whose `buf` pointer
50 * is a contiguous representation (copied) of the memory it represents.
51 *
52 * Such objects are created when used as an NSData and -bytes is called and
53 * where the dispatch data object is an unflattened composite object.
54 * The underlying implementation is _dispatch_data_get_flattened_bytes
55 *
56 * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
57 *
58 * Those objects point to a single leaf, never to flattened objects.
59 *
60 *******************************************************************************
61 *
62 * Non trivial invariants:
63 *
64 * It is forbidden to point into a composite data object and ignore entire
65 * records from it. (for example by having `from` longer than the first
66 * record length).
67 *
68 * dispatch_data_t's are either leaves, or composite objects pointing to
69 * leaves. Depth is never greater than 1.
70 *
71 *******************************************************************************
72 *
73 * There are 4 dispatch_data_t constructors who may create non leaf objects,
74 * and ensure proper invariants.
75 *
76 * dispatch_data_copy_region()
77 * This function first sees through trivial subranges, and may in turn
78 * generate new trivial subranges.
79 *
80 * dispatch_data_create_map()
81 * This function either returns existing data objects, or a leaf.
82 *
83 * dispatch_data_create_subrange()
84 * This function treats flattened objects like unflattened ones,
85 * and recurses into trivial subranges, it can create trivial subranges.
86 *
87 * dispatch_data_create_concat()
88 * This function unwraps the top-level composite objects, trivial or not,
89 * and else concatenates the two arguments range lists, hence always creating
90 * unflattened objects, unless one of the arguments was empty.
91 *
92 *******************************************************************************
93 */
94
95 #if USE_OBJC
96 #define _dispatch_data_retain(x) _dispatch_objc_retain(x)
97 #define _dispatch_data_release(x) _dispatch_objc_release(x)
98 #else
99 #define _dispatch_data_retain(x) dispatch_retain(x)
100 #define _dispatch_data_release(x) dispatch_release(x)
101 #endif
102
103 const dispatch_block_t _dispatch_data_destructor_free = ^{
104 DISPATCH_CRASH("free destructor called");
105 };
106
107 const dispatch_block_t _dispatch_data_destructor_none = ^{
108 DISPATCH_CRASH("none destructor called");
109 };
110
111 const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
112 DISPATCH_CRASH("vmdeallocate destructor called");
113 };
114
115 const dispatch_block_t _dispatch_data_destructor_inline = ^{
116 DISPATCH_CRASH("inline destructor called");
117 };
118
119 struct dispatch_data_s _dispatch_data_empty = {
120 .do_vtable = DISPATCH_DATA_EMPTY_CLASS,
121 #if !USE_OBJC
122 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
123 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
124 .do_next = DISPATCH_OBJECT_LISTLESS,
125 #endif
126 };
127
128 DISPATCH_ALWAYS_INLINE
129 static inline dispatch_data_t
130 _dispatch_data_alloc(size_t n, size_t extra)
131 {
132 dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS,
133 sizeof(struct dispatch_data_s) + extra +
134 n * sizeof(range_record));
135 data->num_records = n;
136 #if !USE_OBJC
137 data->do_targetq = dispatch_get_global_queue(
138 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
139 data->do_next = DISPATCH_OBJECT_LISTLESS;
140 #endif
141 return data;
142 }
143
144 static void
145 _dispatch_data_destroy_buffer(const void* buffer, size_t size,
146 dispatch_queue_t queue, dispatch_block_t destructor)
147 {
148 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
149 free((void*)buffer);
150 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) {
151 // do nothing
152 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) {
153 mach_vm_size_t vm_size = size;
154 mach_vm_address_t vm_addr = (uintptr_t)buffer;
155 mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
156 } else {
157 if (!queue) {
158 queue = dispatch_get_global_queue(
159 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
160 }
161 dispatch_async_f(queue, destructor, _dispatch_call_block_and_release);
162 }
163 }
164
165 DISPATCH_ALWAYS_INLINE
166 static inline void
167 _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
168 dispatch_queue_t queue, dispatch_block_t destructor)
169 {
170 data->buf = buffer;
171 data->size = size;
172 data->destructor = destructor;
173 if (queue) {
174 _dispatch_retain(queue);
175 data->do_targetq = queue;
176 }
177 }
178
179 void
180 dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
181 dispatch_block_t destructor)
182 {
183 if (!buffer || !size) {
184 if (destructor) {
185 _dispatch_data_destroy_buffer(buffer, size, NULL,
186 _dispatch_Block_copy(destructor));
187 }
188 buffer = NULL;
189 size = 0;
190 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
191 }
192 _dispatch_data_init(data, buffer, size, NULL, destructor);
193 }
194
195 dispatch_data_t
196 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
197 dispatch_block_t destructor)
198 {
199 dispatch_data_t data;
200 void *data_buf = NULL;
201 if (!buffer || !size) {
202 // Empty data requested so return the singleton empty object. Call
203 // destructor immediately in this case to ensure any unused associated
204 // storage is released.
205 if (destructor) {
206 _dispatch_data_destroy_buffer(buffer, size, queue,
207 _dispatch_Block_copy(destructor));
208 }
209 return dispatch_data_empty;
210 }
211 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
212 // The default destructor was provided, indicating the data should be
213 // copied.
214 data_buf = malloc(size);
215 if (slowpath(!data_buf)) {
216 return NULL;
217 }
218 buffer = memcpy(data_buf, buffer, size);
219 data = _dispatch_data_alloc(0, 0);
220 destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
221 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) {
222 data = _dispatch_data_alloc(0, size);
223 buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer,
224 size);
225 destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
226 } else {
227 data = _dispatch_data_alloc(0, 0);
228 destructor = _dispatch_Block_copy(destructor);
229 }
230 _dispatch_data_init(data, buffer, size, queue, destructor);
231 return data;
232 }
233
234 dispatch_data_t
235 dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue,
236 dispatch_function_t destructor_function)
237 {
238 dispatch_block_t destructor = (dispatch_block_t)destructor_function;
239 if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT &&
240 destructor != DISPATCH_DATA_DESTRUCTOR_FREE &&
241 destructor != DISPATCH_DATA_DESTRUCTOR_NONE &&
242 destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE &&
243 destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) {
244 destructor = ^{ destructor_function((void*)buffer); };
245 }
246 return dispatch_data_create(buffer, size, queue, destructor);
247 }
248
249 dispatch_data_t
250 dispatch_data_create_alloc(size_t size, void** buffer_ptr)
251 {
252 dispatch_data_t data = dispatch_data_empty;
253 void *buffer = NULL;
254
255 if (slowpath(!size)) {
256 goto out;
257 }
258 data = _dispatch_data_alloc(0, size);
259 buffer = (void*)data + sizeof(struct dispatch_data_s);
260 _dispatch_data_init(data, buffer, size, NULL,
261 DISPATCH_DATA_DESTRUCTOR_NONE);
262 out:
263 if (buffer_ptr) {
264 *buffer_ptr = buffer;
265 }
266 return data;
267 }
268
269 void
270 _dispatch_data_dispose(dispatch_data_t dd)
271 {
272 if (_dispatch_data_leaf(dd)) {
273 _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
274 dd->destructor);
275 } else {
276 size_t i;
277 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
278 _dispatch_data_release(dd->records[i].data_object);
279 }
280 free((void *)dd->buf);
281 }
282 }
283
284 size_t
285 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
286 {
287 size_t offset = 0;
288 offset += dsnprintf(&buf[offset], bufsiz - offset, "data[%p] = { ", dd);
289 if (_dispatch_data_leaf(dd)) {
290 offset += dsnprintf(&buf[offset], bufsiz - offset,
291 "leaf, size = %zd, buf = %p ", dd->size, dd->buf);
292 } else {
293 offset += dsnprintf(&buf[offset], bufsiz - offset,
294 "composite, size = %zd, num_records = %zd ", dd->size,
295 _dispatch_data_num_records(dd));
296 if (dd->buf) {
297 offset += dsnprintf(&buf[offset], bufsiz - offset,
298 ", flatbuf = %p ", dd->buf);
299 }
300 size_t i;
301 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
302 range_record r = dd->records[i];
303 offset += dsnprintf(&buf[offset], bufsiz - offset, "record[%zd] = "
304 "{ from = %zd, length = %zd, data_object = %p }, ", i,
305 r.from, r.length, r.data_object);
306 }
307 }
308 offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
309 return offset;
310 }
311
312 size_t
313 dispatch_data_get_size(dispatch_data_t dd)
314 {
315 return dd->size;
316 }
317
318 dispatch_data_t
319 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
320 {
321 dispatch_data_t data;
322 if (!dd1->size) {
323 _dispatch_data_retain(dd2);
324 return dd2;
325 }
326 if (!dd2->size) {
327 _dispatch_data_retain(dd1);
328 return dd1;
329 }
330
331 data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) +
332 _dispatch_data_num_records(dd2), 0);
333 data->size = dd1->size + dd2->size;
334 // Copy the constituent records into the newly created data object
335 // Reference leaf objects as sub-objects
336 if (_dispatch_data_leaf(dd1)) {
337 data->records[0].from = 0;
338 data->records[0].length = dd1->size;
339 data->records[0].data_object = dd1;
340 } else {
341 memcpy(data->records, dd1->records, _dispatch_data_num_records(dd1) *
342 sizeof(range_record));
343 }
344 if (_dispatch_data_leaf(dd2)) {
345 data->records[_dispatch_data_num_records(dd1)].from = 0;
346 data->records[_dispatch_data_num_records(dd1)].length = dd2->size;
347 data->records[_dispatch_data_num_records(dd1)].data_object = dd2;
348 } else {
349 memcpy(data->records + _dispatch_data_num_records(dd1), dd2->records,
350 _dispatch_data_num_records(dd2) * sizeof(range_record));
351 }
352 size_t i;
353 for (i = 0; i < _dispatch_data_num_records(data); ++i) {
354 _dispatch_data_retain(data->records[i].data_object);
355 }
356 return data;
357 }
358
359 dispatch_data_t
360 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
361 size_t length)
362 {
363 dispatch_data_t data;
364 if (offset >= dd->size || !length) {
365 return dispatch_data_empty;
366 } else if ((offset + length) > dd->size) {
367 length = dd->size - offset;
368 } else if (length == dd->size) {
369 _dispatch_data_retain(dd);
370 return dd;
371 }
372 /*
373 * we must only optimize leaves and not flattened objects
374 * because lots of users want to keep the end of a buffer and release
375 * as much memory as they can from the beginning of it
376 *
377 * Using the flatbuf here would be very wrong with respect to that goal
378 */
379 if (_dispatch_data_leaf(dd)) {
380 data = _dispatch_data_alloc(1, 0);
381 data->size = length;
382 data->records[0].from = offset;
383 data->records[0].length = length;
384 data->records[0].data_object = dd;
385 _dispatch_data_retain(dd);
386 return data;
387 }
388
389 // Subrange of a composite dispatch data object
390 const size_t dd_num_records = _dispatch_data_num_records(dd);
391 bool to_the_end = (offset + length == dd->size);
392 size_t i = 0;
393
394 // find the record containing the specified offset
395 while (i < dd_num_records && offset >= dd->records[i].length) {
396 offset -= dd->records[i++].length;
397 }
398
399 // Crashing here indicates memory corruption of passed in data object
400 if (slowpath(i >= dd_num_records)) {
401 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
402 return NULL;
403 }
404
405 // if everything is from a single dispatch data object, avoid boxing it
406 if (offset + length <= dd->records[i].length) {
407 return dispatch_data_create_subrange(dd->records[i].data_object,
408 dd->records[i].from + offset, length);
409 }
410
411 // find the record containing the end of the current range
412 // and optimize the case when you just remove bytes at the origin
413 size_t count, last_length;
414
415 if (to_the_end) {
416 count = dd_num_records - i;
417 } else {
418 last_length = length - (dd->records[i].length - offset);
419 count = 1;
420
421 while (i + count < dd_num_records) {
422 size_t record_length = dd->records[i + count++].length;
423
424 if (last_length <= record_length) {
425 break;
426 }
427 last_length -= record_length;
428
429 // Crashing here indicates memory corruption of passed in data object
430 if (slowpath(i + count >= dd_num_records)) {
431 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
432 return NULL;
433 }
434 }
435 }
436
437 data = _dispatch_data_alloc(count, 0);
438 data->size = length;
439 memcpy(data->records, dd->records + i, count * sizeof(range_record));
440
441 if (offset) {
442 data->records[0].from += offset;
443 data->records[0].length -= offset;
444 }
445 if (!to_the_end) {
446 data->records[count - 1].length = last_length;
447 }
448
449 for (i = 0; i < count; i++) {
450 _dispatch_data_retain(data->records[i].data_object);
451 }
452 return data;
453 }
454
455 static void*
456 _dispatch_data_flatten(dispatch_data_t dd)
457 {
458 void *buffer = malloc(dd->size);
459
460 // Composite data object, copy the represented buffers
461 if (buffer) {
462 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
463 size_t off, const void* buf, size_t len) {
464 memcpy(buffer + off, buf, len);
465 return (bool)true;
466 });
467 }
468
469 return buffer;
470 }
471
472 // When mapping a leaf object or a subrange of a leaf object, return a direct
473 // pointer to the represented buffer. For all other data objects, copy the
474 // represented buffers into a contiguous area. In the future it might
475 // be possible to relocate the buffers instead (if not marked as locked).
476 dispatch_data_t
477 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
478 size_t *size_ptr)
479 {
480 dispatch_data_t data = NULL;
481 const void *buffer = NULL;
482 size_t size = dd->size;
483
484 if (!size) {
485 data = dispatch_data_empty;
486 goto out;
487 }
488
489 buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
490 if (buffer) {
491 _dispatch_data_retain(dd);
492 data = dd;
493 goto out;
494 }
495
496 buffer = _dispatch_data_flatten(dd);
497 if (fastpath(buffer)) {
498 data = dispatch_data_create(buffer, size, NULL,
499 DISPATCH_DATA_DESTRUCTOR_FREE);
500 } else {
501 size = 0;
502 }
503
504 out:
505 if (buffer_ptr) {
506 *buffer_ptr = buffer;
507 }
508 if (size_ptr) {
509 *size_ptr = size;
510 }
511 return data;
512 }
513
514 const void *
515 _dispatch_data_get_flattened_bytes(dispatch_data_t dd)
516 {
517 const void *buffer;
518 size_t offset = 0;
519
520 if (slowpath(!dd->size)) {
521 return NULL;
522 }
523
524 buffer = _dispatch_data_map_direct(dd, 0, &dd, &offset);
525 if (buffer) {
526 return buffer;
527 }
528
529 void *flatbuf = _dispatch_data_flatten(dd);
530 if (fastpath(flatbuf)) {
531 // we need a release so that readers see the content of the buffer
532 if (slowpath(!dispatch_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf,
533 &buffer, release))) {
534 free(flatbuf);
535 } else {
536 buffer = flatbuf;
537 }
538 } else {
539 return NULL;
540 }
541
542 return buffer + offset;
543 }
544
545 #if DISPATCH_USE_CLIENT_CALLOUT
546 DISPATCH_NOINLINE
547 #else
548 DISPATCH_ALWAYS_INLINE
549 #endif
550 static bool
551 _dispatch_data_apply_client_callout(void *ctxt, dispatch_data_t region, size_t offset,
552 const void *buffer, size_t size, dispatch_data_applier_function_t f)
553 {
554 return f(ctxt, region, offset, buffer, size);
555 }
556
557
558 static bool
559 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
560 size_t size, void *ctxt, dispatch_data_applier_function_t applier)
561 {
562 bool result = true;
563 const void *buffer;
564
565 buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
566 if (buffer) {
567 return _dispatch_data_apply_client_callout(ctxt, dd,
568 offset, buffer + from, size, applier);
569 }
570
571 size_t i;
572 for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) {
573 result = _dispatch_data_apply(dd->records[i].data_object,
574 offset, dd->records[i].from, dd->records[i].length, ctxt,
575 applier);
576 offset += dd->records[i].length;
577 }
578 return result;
579 }
580
581 bool
582 dispatch_data_apply_f(dispatch_data_t dd, void *ctxt,
583 dispatch_data_applier_function_t applier)
584 {
585 if (!dd->size) {
586 return true;
587 }
588 return _dispatch_data_apply(dd, 0, 0, dd->size, ctxt, applier);
589 }
590
591 bool
592 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
593 {
594 if (!dd->size) {
595 return true;
596 }
597 return _dispatch_data_apply(dd, 0, 0, dd->size, applier,
598 (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier));
599 }
600
601 static dispatch_data_t
602 _dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size,
603 size_t location, size_t *offset_ptr)
604 {
605 dispatch_data_t reusable_dd = NULL;
606 size_t offset = 0;
607
608 if (from == 0 && size == dd->size) {
609 reusable_dd = dd;
610 }
611
612 if (_dispatch_data_map_direct(dd, from, &dd, &from)) {
613 if (reusable_dd) {
614 _dispatch_data_retain(reusable_dd);
615 return reusable_dd;
616 }
617
618 _dispatch_data_retain(dd);
619 if (from == 0 && size == dd->size) {
620 return dd;
621 }
622
623 dispatch_data_t data = _dispatch_data_alloc(1, 0);
624 data->size = size;
625 data->records[0].from = from;
626 data->records[0].length = size;
627 data->records[0].data_object = dd;
628 return data;
629 }
630
631 size_t i;
632 for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
633 size_t length = dd->records[i].length;
634
635 if (from >= length) {
636 from -= length;
637 continue;
638 }
639
640 length -= from;
641 if (location >= offset + length) {
642 offset += length;
643 from = 0;
644 continue;
645 }
646
647 from += dd->records[i].from;
648 dd = dd->records[i].data_object;
649 *offset_ptr += offset;
650 location -= offset;
651 return _dispatch_data_copy_region(dd, from, length, location, offset_ptr);
652 }
653
654 DISPATCH_CRASH("dispatch_data_copy_region out of bounds");
655 }
656
657 // Returs either a leaf object or an object composed of a single leaf object
658 dispatch_data_t
659 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
660 size_t *offset_ptr)
661 {
662 if (location >= dd->size) {
663 *offset_ptr = dd->size;
664 return dispatch_data_empty;
665 }
666 *offset_ptr = 0;
667 return _dispatch_data_copy_region(dd, 0, dd->size, location, offset_ptr);
668 }
669
670 #if HAVE_MACH
671
672 #ifndef MAP_MEM_VM_COPY
673 #define MAP_MEM_VM_COPY 0x200000 // <rdar://problem/13336613>
674 #endif
675
676 mach_port_t
677 dispatch_data_make_memory_entry(dispatch_data_t dd)
678 {
679 mach_port_t mep = MACH_PORT_NULL;
680 memory_object_size_t mos;
681 mach_vm_size_t vm_size = dd->size;
682 mach_vm_address_t vm_addr;
683 vm_prot_t flags;
684 kern_return_t kr;
685 bool copy = (dd->destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE);
686
687 retry:
688 if (copy) {
689 vm_addr = vm_page_size;
690 kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
691 VM_FLAGS_ANYWHERE);
692 if (kr) {
693 if (kr != KERN_NO_SPACE) {
694 (void)dispatch_assume_zero(kr);
695 }
696 return mep;
697 }
698 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
699 size_t off, const void* buf, size_t len) {
700 memcpy((void*)(vm_addr + off), buf, len);
701 return (bool)true;
702 });
703 } else {
704 vm_addr = (uintptr_t)dd->buf;
705 }
706 flags = VM_PROT_DEFAULT|VM_PROT_IS_MASK|MAP_MEM_VM_COPY;
707 mos = vm_size;
708 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
709 &mep, MACH_PORT_NULL);
710 if (kr == KERN_INVALID_VALUE) {
711 // Fallback in case MAP_MEM_VM_COPY is not supported
712 flags &= ~MAP_MEM_VM_COPY;
713 kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags,
714 &mep, MACH_PORT_NULL);
715 }
716 if (dispatch_assume_zero(kr)) {
717 mep = MACH_PORT_NULL;
718 } else if (mos < vm_size) {
719 // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY
720 kr = mach_port_deallocate(mach_task_self(), mep);
721 (void)dispatch_assume_zero(kr);
722 if (!copy) {
723 copy = true;
724 goto retry;
725 }
726 mep = MACH_PORT_NULL;
727 }
728 if (copy) {
729 kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
730 (void)dispatch_assume_zero(kr);
731 }
732 return mep;
733 }
734 #endif // HAVE_MACH