]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/vm/vm_user.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * User-exported virtual memory functions.
63 */
64
65/*
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73 * for new code.
74 *
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
83 *
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
86 */
87
88#include <debug.h>
89
90#include <vm_cpm.h>
91#include <mach/boolean.h>
92#include <mach/kern_return.h>
93#include <mach/mach_types.h> /* to get vm_address_t */
94#include <mach/memory_object.h>
95#include <mach/std_types.h> /* to get pointer_t */
96#include <mach/upl.h>
97#include <mach/vm_attributes.h>
98#include <mach/vm_param.h>
99#include <mach/vm_statistics.h>
100#include <mach/mach_syscalls.h>
101#include <mach/sdt.h>
102
103#include <mach/host_priv_server.h>
104#include <mach/mach_vm_server.h>
105#include <mach/memory_entry_server.h>
106#include <mach/vm_map_server.h>
107
108#include <kern/host.h>
109#include <kern/kalloc.h>
110#include <kern/task.h>
111#include <kern/misc_protos.h>
112#include <vm/vm_fault.h>
113#include <vm/vm_map.h>
114#include <vm/vm_object.h>
115#include <vm/vm_page.h>
116#include <vm/memory_object.h>
117#include <vm/vm_pageout.h>
118#include <vm/vm_protos.h>
119#include <vm/vm_purgeable_internal.h>
120#include <vm/vm_init.h>
121
122#include <san/kasan.h>
123
124#include <libkern/OSDebug.h>
125
126vm_size_t upl_offset_to_pagelist = 0;
127
128#if VM_CPM
129#include <vm/cpm.h>
130#endif /* VM_CPM */
131
132/*
133 * mach_vm_allocate allocates "zero fill" memory in the specfied
134 * map.
135 */
136kern_return_t
137mach_vm_allocate_external(
138 vm_map_t map,
139 mach_vm_offset_t *addr,
140 mach_vm_size_t size,
141 int flags)
142{
143 vm_tag_t tag;
144
145 VM_GET_FLAGS_ALIAS(flags, tag);
146 return mach_vm_allocate_kernel(map, addr, size, flags, tag);
147}
148
149kern_return_t
150mach_vm_allocate_kernel(
151 vm_map_t map,
152 mach_vm_offset_t *addr,
153 mach_vm_size_t size,
154 int flags,
155 vm_tag_t tag)
156{
157 vm_map_offset_t map_addr;
158 vm_map_size_t map_size;
159 kern_return_t result;
160 boolean_t anywhere;
161
162 /* filter out any kernel-only flags */
163 if (flags & ~VM_FLAGS_USER_ALLOCATE) {
164 return KERN_INVALID_ARGUMENT;
165 }
166
167 if (map == VM_MAP_NULL) {
168 return KERN_INVALID_ARGUMENT;
169 }
170 if (size == 0) {
171 *addr = 0;
172 return KERN_SUCCESS;
173 }
174
175 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
176 if (anywhere) {
177 /*
178 * No specific address requested, so start candidate address
179 * search at the minimum address in the map. However, if that
180 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
181 * allocations of PAGEZERO to explicit requests since its
182 * normal use is to catch dereferences of NULL and many
183 * applications also treat pointers with a value of 0 as
184 * special and suddenly having address 0 contain useable
185 * memory would tend to confuse those applications.
186 */
187 map_addr = vm_map_min(map);
188 if (map_addr == 0) {
189 map_addr += VM_MAP_PAGE_SIZE(map);
190 }
191 } else {
192 map_addr = vm_map_trunc_page(*addr,
193 VM_MAP_PAGE_MASK(map));
194 }
195 map_size = vm_map_round_page(size,
196 VM_MAP_PAGE_MASK(map));
197 if (map_size == 0) {
198 return KERN_INVALID_ARGUMENT;
199 }
200
201 result = vm_map_enter(
202 map,
203 &map_addr,
204 map_size,
205 (vm_map_offset_t)0,
206 flags,
207 VM_MAP_KERNEL_FLAGS_NONE,
208 tag,
209 VM_OBJECT_NULL,
210 (vm_object_offset_t)0,
211 FALSE,
212 VM_PROT_DEFAULT,
213 VM_PROT_ALL,
214 VM_INHERIT_DEFAULT);
215
216 *addr = map_addr;
217 return result;
218}
219
220/*
221 * vm_allocate
222 * Legacy routine that allocates "zero fill" memory in the specfied
223 * map (which is limited to the same size as the kernel).
224 */
225kern_return_t
226vm_allocate_external(
227 vm_map_t map,
228 vm_offset_t *addr,
229 vm_size_t size,
230 int flags)
231{
232 vm_tag_t tag;
233
234 VM_GET_FLAGS_ALIAS(flags, tag);
235 return vm_allocate_kernel(map, addr, size, flags, tag);
236}
237
238kern_return_t
239vm_allocate_kernel(
240 vm_map_t map,
241 vm_offset_t *addr,
242 vm_size_t size,
243 int flags,
244 vm_tag_t tag)
245{
246 vm_map_offset_t map_addr;
247 vm_map_size_t map_size;
248 kern_return_t result;
249 boolean_t anywhere;
250
251 /* filter out any kernel-only flags */
252 if (flags & ~VM_FLAGS_USER_ALLOCATE) {
253 return KERN_INVALID_ARGUMENT;
254 }
255
256 if (map == VM_MAP_NULL) {
257 return KERN_INVALID_ARGUMENT;
258 }
259 if (size == 0) {
260 *addr = 0;
261 return KERN_SUCCESS;
262 }
263
264 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
265 if (anywhere) {
266 /*
267 * No specific address requested, so start candidate address
268 * search at the minimum address in the map. However, if that
269 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
270 * allocations of PAGEZERO to explicit requests since its
271 * normal use is to catch dereferences of NULL and many
272 * applications also treat pointers with a value of 0 as
273 * special and suddenly having address 0 contain useable
274 * memory would tend to confuse those applications.
275 */
276 map_addr = vm_map_min(map);
277 if (map_addr == 0) {
278 map_addr += VM_MAP_PAGE_SIZE(map);
279 }
280 } else {
281 map_addr = vm_map_trunc_page(*addr,
282 VM_MAP_PAGE_MASK(map));
283 }
284 map_size = vm_map_round_page(size,
285 VM_MAP_PAGE_MASK(map));
286 if (map_size == 0) {
287 return KERN_INVALID_ARGUMENT;
288 }
289
290 result = vm_map_enter(
291 map,
292 &map_addr,
293 map_size,
294 (vm_map_offset_t)0,
295 flags,
296 VM_MAP_KERNEL_FLAGS_NONE,
297 tag,
298 VM_OBJECT_NULL,
299 (vm_object_offset_t)0,
300 FALSE,
301 VM_PROT_DEFAULT,
302 VM_PROT_ALL,
303 VM_INHERIT_DEFAULT);
304
305#if KASAN
306 if (result == KERN_SUCCESS && map->pmap == kernel_pmap) {
307 kasan_notify_address(map_addr, map_size);
308 }
309#endif
310
311 *addr = CAST_DOWN(vm_offset_t, map_addr);
312 return result;
313}
314
315/*
316 * mach_vm_deallocate -
317 * deallocates the specified range of addresses in the
318 * specified address map.
319 */
320kern_return_t
321mach_vm_deallocate(
322 vm_map_t map,
323 mach_vm_offset_t start,
324 mach_vm_size_t size)
325{
326 if ((map == VM_MAP_NULL) || (start + size < start)) {
327 return KERN_INVALID_ARGUMENT;
328 }
329
330 if (size == (mach_vm_offset_t) 0) {
331 return KERN_SUCCESS;
332 }
333
334 return vm_map_remove(map,
335 vm_map_trunc_page(start,
336 VM_MAP_PAGE_MASK(map)),
337 vm_map_round_page(start + size,
338 VM_MAP_PAGE_MASK(map)),
339 VM_MAP_REMOVE_NO_FLAGS);
340}
341
342/*
343 * vm_deallocate -
344 * deallocates the specified range of addresses in the
345 * specified address map (limited to addresses the same
346 * size as the kernel).
347 */
348kern_return_t
349vm_deallocate(
350 vm_map_t map,
351 vm_offset_t start,
352 vm_size_t size)
353{
354 if ((map == VM_MAP_NULL) || (start + size < start)) {
355 return KERN_INVALID_ARGUMENT;
356 }
357
358 if (size == (vm_offset_t) 0) {
359 return KERN_SUCCESS;
360 }
361
362 return vm_map_remove(map,
363 vm_map_trunc_page(start,
364 VM_MAP_PAGE_MASK(map)),
365 vm_map_round_page(start + size,
366 VM_MAP_PAGE_MASK(map)),
367 VM_MAP_REMOVE_NO_FLAGS);
368}
369
370/*
371 * mach_vm_inherit -
372 * Sets the inheritance of the specified range in the
373 * specified map.
374 */
375kern_return_t
376mach_vm_inherit(
377 vm_map_t map,
378 mach_vm_offset_t start,
379 mach_vm_size_t size,
380 vm_inherit_t new_inheritance)
381{
382 if ((map == VM_MAP_NULL) || (start + size < start) ||
383 (new_inheritance > VM_INHERIT_LAST_VALID)) {
384 return KERN_INVALID_ARGUMENT;
385 }
386
387 if (size == 0) {
388 return KERN_SUCCESS;
389 }
390
391 return vm_map_inherit(map,
392 vm_map_trunc_page(start,
393 VM_MAP_PAGE_MASK(map)),
394 vm_map_round_page(start + size,
395 VM_MAP_PAGE_MASK(map)),
396 new_inheritance);
397}
398
399/*
400 * vm_inherit -
401 * Sets the inheritance of the specified range in the
402 * specified map (range limited to addresses
403 */
404kern_return_t
405vm_inherit(
406 vm_map_t map,
407 vm_offset_t start,
408 vm_size_t size,
409 vm_inherit_t new_inheritance)
410{
411 if ((map == VM_MAP_NULL) || (start + size < start) ||
412 (new_inheritance > VM_INHERIT_LAST_VALID)) {
413 return KERN_INVALID_ARGUMENT;
414 }
415
416 if (size == 0) {
417 return KERN_SUCCESS;
418 }
419
420 return vm_map_inherit(map,
421 vm_map_trunc_page(start,
422 VM_MAP_PAGE_MASK(map)),
423 vm_map_round_page(start + size,
424 VM_MAP_PAGE_MASK(map)),
425 new_inheritance);
426}
427
428/*
429 * mach_vm_protect -
430 * Sets the protection of the specified range in the
431 * specified map.
432 */
433
434kern_return_t
435mach_vm_protect(
436 vm_map_t map,
437 mach_vm_offset_t start,
438 mach_vm_size_t size,
439 boolean_t set_maximum,
440 vm_prot_t new_protection)
441{
442 if ((map == VM_MAP_NULL) || (start + size < start) ||
443 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) {
444 return KERN_INVALID_ARGUMENT;
445 }
446
447 if (size == 0) {
448 return KERN_SUCCESS;
449 }
450
451 return vm_map_protect(map,
452 vm_map_trunc_page(start,
453 VM_MAP_PAGE_MASK(map)),
454 vm_map_round_page(start + size,
455 VM_MAP_PAGE_MASK(map)),
456 new_protection,
457 set_maximum);
458}
459
460/*
461 * vm_protect -
462 * Sets the protection of the specified range in the
463 * specified map. Addressability of the range limited
464 * to the same size as the kernel.
465 */
466
467kern_return_t
468vm_protect(
469 vm_map_t map,
470 vm_offset_t start,
471 vm_size_t size,
472 boolean_t set_maximum,
473 vm_prot_t new_protection)
474{
475 if ((map == VM_MAP_NULL) || (start + size < start) ||
476 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) {
477 return KERN_INVALID_ARGUMENT;
478 }
479
480 if (size == 0) {
481 return KERN_SUCCESS;
482 }
483
484 return vm_map_protect(map,
485 vm_map_trunc_page(start,
486 VM_MAP_PAGE_MASK(map)),
487 vm_map_round_page(start + size,
488 VM_MAP_PAGE_MASK(map)),
489 new_protection,
490 set_maximum);
491}
492
493/*
494 * mach_vm_machine_attributes -
495 * Handle machine-specific attributes for a mapping, such
496 * as cachability, migrability, etc.
497 */
498kern_return_t
499mach_vm_machine_attribute(
500 vm_map_t map,
501 mach_vm_address_t addr,
502 mach_vm_size_t size,
503 vm_machine_attribute_t attribute,
504 vm_machine_attribute_val_t* value) /* IN/OUT */
505{
506 if ((map == VM_MAP_NULL) || (addr + size < addr)) {
507 return KERN_INVALID_ARGUMENT;
508 }
509
510 if (size == 0) {
511 return KERN_SUCCESS;
512 }
513
514 return vm_map_machine_attribute(
515 map,
516 vm_map_trunc_page(addr,
517 VM_MAP_PAGE_MASK(map)),
518 vm_map_round_page(addr + size,
519 VM_MAP_PAGE_MASK(map)),
520 attribute,
521 value);
522}
523
524/*
525 * vm_machine_attribute -
526 * Handle machine-specific attributes for a mapping, such
527 * as cachability, migrability, etc. Limited addressability
528 * (same range limits as for the native kernel map).
529 */
530kern_return_t
531vm_machine_attribute(
532 vm_map_t map,
533 vm_address_t addr,
534 vm_size_t size,
535 vm_machine_attribute_t attribute,
536 vm_machine_attribute_val_t* value) /* IN/OUT */
537{
538 if ((map == VM_MAP_NULL) || (addr + size < addr)) {
539 return KERN_INVALID_ARGUMENT;
540 }
541
542 if (size == 0) {
543 return KERN_SUCCESS;
544 }
545
546 return vm_map_machine_attribute(
547 map,
548 vm_map_trunc_page(addr,
549 VM_MAP_PAGE_MASK(map)),
550 vm_map_round_page(addr + size,
551 VM_MAP_PAGE_MASK(map)),
552 attribute,
553 value);
554}
555
556/*
557 * mach_vm_read -
558 * Read/copy a range from one address space and return it to the caller.
559 *
560 * It is assumed that the address for the returned memory is selected by
561 * the IPC implementation as part of receiving the reply to this call.
562 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
563 * that gets returned.
564 *
565 * JMM - because of mach_msg_type_number_t, this call is limited to a
566 * single 4GB region at this time.
567 *
568 */
569kern_return_t
570mach_vm_read(
571 vm_map_t map,
572 mach_vm_address_t addr,
573 mach_vm_size_t size,
574 pointer_t *data,
575 mach_msg_type_number_t *data_size)
576{
577 kern_return_t error;
578 vm_map_copy_t ipc_address;
579
580 if (map == VM_MAP_NULL) {
581 return KERN_INVALID_ARGUMENT;
582 }
583
584 if ((mach_msg_type_number_t) size != size) {
585 return KERN_INVALID_ARGUMENT;
586 }
587
588 error = vm_map_copyin(map,
589 (vm_map_address_t)addr,
590 (vm_map_size_t)size,
591 FALSE, /* src_destroy */
592 &ipc_address);
593
594 if (KERN_SUCCESS == error) {
595 *data = (pointer_t) ipc_address;
596 *data_size = (mach_msg_type_number_t) size;
597 assert(*data_size == size);
598 }
599 return error;
600}
601
602/*
603 * vm_read -
604 * Read/copy a range from one address space and return it to the caller.
605 * Limited addressability (same range limits as for the native kernel map).
606 *
607 * It is assumed that the address for the returned memory is selected by
608 * the IPC implementation as part of receiving the reply to this call.
609 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
610 * that gets returned.
611 */
612kern_return_t
613vm_read(
614 vm_map_t map,
615 vm_address_t addr,
616 vm_size_t size,
617 pointer_t *data,
618 mach_msg_type_number_t *data_size)
619{
620 kern_return_t error;
621 vm_map_copy_t ipc_address;
622
623 if (map == VM_MAP_NULL) {
624 return KERN_INVALID_ARGUMENT;
625 }
626
627 mach_msg_type_number_t dsize;
628 if (os_convert_overflow(size, &dsize)) {
629 /*
630 * The kernel could handle a 64-bit "size" value, but
631 * it could not return the size of the data in "*data_size"
632 * without overflowing.
633 * Let's reject this "size" as invalid.
634 */
635 return KERN_INVALID_ARGUMENT;
636 }
637
638 error = vm_map_copyin(map,
639 (vm_map_address_t)addr,
640 (vm_map_size_t)size,
641 FALSE, /* src_destroy */
642 &ipc_address);
643
644 if (KERN_SUCCESS == error) {
645 *data = (pointer_t) ipc_address;
646 *data_size = dsize;
647 assert(*data_size == size);
648 }
649 return error;
650}
651
652/*
653 * mach_vm_read_list -
654 * Read/copy a list of address ranges from specified map.
655 *
656 * MIG does not know how to deal with a returned array of
657 * vm_map_copy_t structures, so we have to do the copyout
658 * manually here.
659 */
660kern_return_t
661mach_vm_read_list(
662 vm_map_t map,
663 mach_vm_read_entry_t data_list,
664 natural_t count)
665{
666 mach_msg_type_number_t i;
667 kern_return_t error;
668 vm_map_copy_t copy;
669
670 if (map == VM_MAP_NULL ||
671 count > VM_MAP_ENTRY_MAX) {
672 return KERN_INVALID_ARGUMENT;
673 }
674
675 error = KERN_SUCCESS;
676 for (i = 0; i < count; i++) {
677 vm_map_address_t map_addr;
678 vm_map_size_t map_size;
679
680 map_addr = (vm_map_address_t)(data_list[i].address);
681 map_size = (vm_map_size_t)(data_list[i].size);
682
683 if (map_size != 0) {
684 error = vm_map_copyin(map,
685 map_addr,
686 map_size,
687 FALSE, /* src_destroy */
688 &copy);
689 if (KERN_SUCCESS == error) {
690 error = vm_map_copyout(
691 current_task()->map,
692 &map_addr,
693 copy);
694 if (KERN_SUCCESS == error) {
695 data_list[i].address = map_addr;
696 continue;
697 }
698 vm_map_copy_discard(copy);
699 }
700 }
701 data_list[i].address = (mach_vm_address_t)0;
702 data_list[i].size = (mach_vm_size_t)0;
703 }
704 return error;
705}
706
707/*
708 * vm_read_list -
709 * Read/copy a list of address ranges from specified map.
710 *
711 * MIG does not know how to deal with a returned array of
712 * vm_map_copy_t structures, so we have to do the copyout
713 * manually here.
714 *
715 * The source and destination ranges are limited to those
716 * that can be described with a vm_address_t (i.e. same
717 * size map as the kernel).
718 *
719 * JMM - If the result of the copyout is an address range
720 * that cannot be described with a vm_address_t (i.e. the
721 * caller had a larger address space but used this call
722 * anyway), it will result in a truncated address being
723 * returned (and a likely confused caller).
724 */
725
726kern_return_t
727vm_read_list(
728 vm_map_t map,
729 vm_read_entry_t data_list,
730 natural_t count)
731{
732 mach_msg_type_number_t i;
733 kern_return_t error;
734 vm_map_copy_t copy;
735
736 if (map == VM_MAP_NULL ||
737 count > VM_MAP_ENTRY_MAX) {
738 return KERN_INVALID_ARGUMENT;
739 }
740
741 error = KERN_SUCCESS;
742 for (i = 0; i < count; i++) {
743 vm_map_address_t map_addr;
744 vm_map_size_t map_size;
745
746 map_addr = (vm_map_address_t)(data_list[i].address);
747 map_size = (vm_map_size_t)(data_list[i].size);
748
749 if (map_size != 0) {
750 error = vm_map_copyin(map,
751 map_addr,
752 map_size,
753 FALSE, /* src_destroy */
754 &copy);
755 if (KERN_SUCCESS == error) {
756 error = vm_map_copyout(current_task()->map,
757 &map_addr,
758 copy);
759 if (KERN_SUCCESS == error) {
760 data_list[i].address =
761 CAST_DOWN(vm_offset_t, map_addr);
762 continue;
763 }
764 vm_map_copy_discard(copy);
765 }
766 }
767 data_list[i].address = (mach_vm_address_t)0;
768 data_list[i].size = (mach_vm_size_t)0;
769 }
770 return error;
771}
772
773/*
774 * mach_vm_read_overwrite -
775 * Overwrite a range of the current map with data from the specified
776 * map/address range.
777 *
778 * In making an assumption that the current thread is local, it is
779 * no longer cluster-safe without a fully supportive local proxy
780 * thread/task (but we don't support cluster's anymore so this is moot).
781 */
782
783kern_return_t
784mach_vm_read_overwrite(
785 vm_map_t map,
786 mach_vm_address_t address,
787 mach_vm_size_t size,
788 mach_vm_address_t data,
789 mach_vm_size_t *data_size)
790{
791 kern_return_t error;
792 vm_map_copy_t copy;
793
794 if (map == VM_MAP_NULL) {
795 return KERN_INVALID_ARGUMENT;
796 }
797
798 error = vm_map_copyin(map, (vm_map_address_t)address,
799 (vm_map_size_t)size, FALSE, &copy);
800
801 if (KERN_SUCCESS == error) {
802 error = vm_map_copy_overwrite(current_thread()->map,
803 (vm_map_address_t)data,
804 copy, FALSE);
805 if (KERN_SUCCESS == error) {
806 *data_size = size;
807 return error;
808 }
809 vm_map_copy_discard(copy);
810 }
811 return error;
812}
813
814/*
815 * vm_read_overwrite -
816 * Overwrite a range of the current map with data from the specified
817 * map/address range.
818 *
819 * This routine adds the additional limitation that the source and
820 * destination ranges must be describable with vm_address_t values
821 * (i.e. the same size address spaces as the kernel, or at least the
822 * the ranges are in that first portion of the respective address
823 * spaces).
824 */
825
826kern_return_t
827vm_read_overwrite(
828 vm_map_t map,
829 vm_address_t address,
830 vm_size_t size,
831 vm_address_t data,
832 vm_size_t *data_size)
833{
834 kern_return_t error;
835 vm_map_copy_t copy;
836
837 if (map == VM_MAP_NULL) {
838 return KERN_INVALID_ARGUMENT;
839 }
840
841 error = vm_map_copyin(map, (vm_map_address_t)address,
842 (vm_map_size_t)size, FALSE, &copy);
843
844 if (KERN_SUCCESS == error) {
845 error = vm_map_copy_overwrite(current_thread()->map,
846 (vm_map_address_t)data,
847 copy, FALSE);
848 if (KERN_SUCCESS == error) {
849 *data_size = size;
850 return error;
851 }
852 vm_map_copy_discard(copy);
853 }
854 return error;
855}
856
857
858/*
859 * mach_vm_write -
860 * Overwrite the specified address range with the data provided
861 * (from the current map).
862 */
863kern_return_t
864mach_vm_write(
865 vm_map_t map,
866 mach_vm_address_t address,
867 pointer_t data,
868 __unused mach_msg_type_number_t size)
869{
870 if (map == VM_MAP_NULL) {
871 return KERN_INVALID_ARGUMENT;
872 }
873
874 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
875 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
876}
877
878/*
879 * vm_write -
880 * Overwrite the specified address range with the data provided
881 * (from the current map).
882 *
883 * The addressability of the range of addresses to overwrite is
884 * limited bu the use of a vm_address_t (same size as kernel map).
885 * Either the target map is also small, or the range is in the
886 * low addresses within it.
887 */
888kern_return_t
889vm_write(
890 vm_map_t map,
891 vm_address_t address,
892 pointer_t data,
893 __unused mach_msg_type_number_t size)
894{
895 if (map == VM_MAP_NULL) {
896 return KERN_INVALID_ARGUMENT;
897 }
898
899 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
900 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
901}
902
903/*
904 * mach_vm_copy -
905 * Overwrite one range of the specified map with the contents of
906 * another range within that same map (i.e. both address ranges
907 * are "over there").
908 */
909kern_return_t
910mach_vm_copy(
911 vm_map_t map,
912 mach_vm_address_t source_address,
913 mach_vm_size_t size,
914 mach_vm_address_t dest_address)
915{
916 vm_map_copy_t copy;
917 kern_return_t kr;
918
919 if (map == VM_MAP_NULL) {
920 return KERN_INVALID_ARGUMENT;
921 }
922
923 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
924 (vm_map_size_t)size, FALSE, &copy);
925
926 if (KERN_SUCCESS == kr) {
927 kr = vm_map_copy_overwrite(map,
928 (vm_map_address_t)dest_address,
929 copy, FALSE /* interruptible XXX */);
930
931 if (KERN_SUCCESS != kr) {
932 vm_map_copy_discard(copy);
933 }
934 }
935 return kr;
936}
937
938kern_return_t
939vm_copy(
940 vm_map_t map,
941 vm_address_t source_address,
942 vm_size_t size,
943 vm_address_t dest_address)
944{
945 vm_map_copy_t copy;
946 kern_return_t kr;
947
948 if (map == VM_MAP_NULL) {
949 return KERN_INVALID_ARGUMENT;
950 }
951
952 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
953 (vm_map_size_t)size, FALSE, &copy);
954
955 if (KERN_SUCCESS == kr) {
956 kr = vm_map_copy_overwrite(map,
957 (vm_map_address_t)dest_address,
958 copy, FALSE /* interruptible XXX */);
959
960 if (KERN_SUCCESS != kr) {
961 vm_map_copy_discard(copy);
962 }
963 }
964 return kr;
965}
966
967/*
968 * mach_vm_map -
969 * Map some range of an object into an address space.
970 *
971 * The object can be one of several types of objects:
972 * NULL - anonymous memory
973 * a named entry - a range within another address space
974 * or a range within a memory object
975 * a whole memory object
976 *
977 */
978kern_return_t
979mach_vm_map_external(
980 vm_map_t target_map,
981 mach_vm_offset_t *address,
982 mach_vm_size_t initial_size,
983 mach_vm_offset_t mask,
984 int flags,
985 ipc_port_t port,
986 vm_object_offset_t offset,
987 boolean_t copy,
988 vm_prot_t cur_protection,
989 vm_prot_t max_protection,
990 vm_inherit_t inheritance)
991{
992 vm_tag_t tag;
993
994 VM_GET_FLAGS_ALIAS(flags, tag);
995 return mach_vm_map_kernel(target_map, address, initial_size, mask,
996 flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
997 port, offset, copy,
998 cur_protection, max_protection,
999 inheritance);
1000}
1001
1002kern_return_t
1003mach_vm_map_kernel(
1004 vm_map_t target_map,
1005 mach_vm_offset_t *address,
1006 mach_vm_size_t initial_size,
1007 mach_vm_offset_t mask,
1008 int flags,
1009 vm_map_kernel_flags_t vmk_flags,
1010 vm_tag_t tag,
1011 ipc_port_t port,
1012 vm_object_offset_t offset,
1013 boolean_t copy,
1014 vm_prot_t cur_protection,
1015 vm_prot_t max_protection,
1016 vm_inherit_t inheritance)
1017{
1018 kern_return_t kr;
1019 vm_map_offset_t vmmaddr;
1020
1021 vmmaddr = (vm_map_offset_t) *address;
1022
1023 /* filter out any kernel-only flags */
1024 if (flags & ~VM_FLAGS_USER_MAP) {
1025 return KERN_INVALID_ARGUMENT;
1026 }
1027
1028 kr = vm_map_enter_mem_object(target_map,
1029 &vmmaddr,
1030 initial_size,
1031 mask,
1032 flags,
1033 vmk_flags,
1034 tag,
1035 port,
1036 offset,
1037 copy,
1038 cur_protection,
1039 max_protection,
1040 inheritance);
1041
1042#if KASAN
1043 if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) {
1044 kasan_notify_address(vmmaddr, initial_size);
1045 }
1046#endif
1047
1048 *address = vmmaddr;
1049 return kr;
1050}
1051
1052
1053/* legacy interface */
1054kern_return_t
1055vm_map_64_external(
1056 vm_map_t target_map,
1057 vm_offset_t *address,
1058 vm_size_t size,
1059 vm_offset_t mask,
1060 int flags,
1061 ipc_port_t port,
1062 vm_object_offset_t offset,
1063 boolean_t copy,
1064 vm_prot_t cur_protection,
1065 vm_prot_t max_protection,
1066 vm_inherit_t inheritance)
1067{
1068 vm_tag_t tag;
1069
1070 VM_GET_FLAGS_ALIAS(flags, tag);
1071 return vm_map_64_kernel(target_map, address, size, mask,
1072 flags, VM_MAP_KERNEL_FLAGS_NONE,
1073 tag, port, offset, copy,
1074 cur_protection, max_protection,
1075 inheritance);
1076}
1077
1078kern_return_t
1079vm_map_64_kernel(
1080 vm_map_t target_map,
1081 vm_offset_t *address,
1082 vm_size_t size,
1083 vm_offset_t mask,
1084 int flags,
1085 vm_map_kernel_flags_t vmk_flags,
1086 vm_tag_t tag,
1087 ipc_port_t port,
1088 vm_object_offset_t offset,
1089 boolean_t copy,
1090 vm_prot_t cur_protection,
1091 vm_prot_t max_protection,
1092 vm_inherit_t inheritance)
1093{
1094 mach_vm_address_t map_addr;
1095 mach_vm_size_t map_size;
1096 mach_vm_offset_t map_mask;
1097 kern_return_t kr;
1098
1099 map_addr = (mach_vm_address_t)*address;
1100 map_size = (mach_vm_size_t)size;
1101 map_mask = (mach_vm_offset_t)mask;
1102
1103 kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
1104 flags, vmk_flags, tag,
1105 port, offset, copy,
1106 cur_protection, max_protection, inheritance);
1107 *address = CAST_DOWN(vm_offset_t, map_addr);
1108 return kr;
1109}
1110
1111/* temporary, until world build */
1112kern_return_t
1113vm_map_external(
1114 vm_map_t target_map,
1115 vm_offset_t *address,
1116 vm_size_t size,
1117 vm_offset_t mask,
1118 int flags,
1119 ipc_port_t port,
1120 vm_offset_t offset,
1121 boolean_t copy,
1122 vm_prot_t cur_protection,
1123 vm_prot_t max_protection,
1124 vm_inherit_t inheritance)
1125{
1126 vm_tag_t tag;
1127
1128 VM_GET_FLAGS_ALIAS(flags, tag);
1129 return vm_map_kernel(target_map, address, size, mask,
1130 flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
1131 port, offset, copy,
1132 cur_protection, max_protection, inheritance);
1133}
1134
1135kern_return_t
1136vm_map_kernel(
1137 vm_map_t target_map,
1138 vm_offset_t *address,
1139 vm_size_t size,
1140 vm_offset_t mask,
1141 int flags,
1142 vm_map_kernel_flags_t vmk_flags,
1143 vm_tag_t tag,
1144 ipc_port_t port,
1145 vm_offset_t offset,
1146 boolean_t copy,
1147 vm_prot_t cur_protection,
1148 vm_prot_t max_protection,
1149 vm_inherit_t inheritance)
1150{
1151 mach_vm_address_t map_addr;
1152 mach_vm_size_t map_size;
1153 mach_vm_offset_t map_mask;
1154 vm_object_offset_t obj_offset;
1155 kern_return_t kr;
1156
1157 map_addr = (mach_vm_address_t)*address;
1158 map_size = (mach_vm_size_t)size;
1159 map_mask = (mach_vm_offset_t)mask;
1160 obj_offset = (vm_object_offset_t)offset;
1161
1162 kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
1163 flags, vmk_flags, tag,
1164 port, obj_offset, copy,
1165 cur_protection, max_protection, inheritance);
1166 *address = CAST_DOWN(vm_offset_t, map_addr);
1167 return kr;
1168}
1169
1170/*
1171 * mach_vm_remap -
1172 * Remap a range of memory from one task into another,
1173 * to another address range within the same task, or
1174 * over top of itself (with altered permissions and/or
1175 * as an in-place copy of itself).
1176 */
1177kern_return_t
1178mach_vm_remap_external(
1179 vm_map_t target_map,
1180 mach_vm_offset_t *address,
1181 mach_vm_size_t size,
1182 mach_vm_offset_t mask,
1183 int flags,
1184 vm_map_t src_map,
1185 mach_vm_offset_t memory_address,
1186 boolean_t copy,
1187 vm_prot_t *cur_protection,
1188 vm_prot_t *max_protection,
1189 vm_inherit_t inheritance)
1190{
1191 vm_tag_t tag;
1192 VM_GET_FLAGS_ALIAS(flags, tag);
1193
1194 return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address,
1195 copy, cur_protection, max_protection, inheritance);
1196}
1197
1198kern_return_t
1199mach_vm_remap_kernel(
1200 vm_map_t target_map,
1201 mach_vm_offset_t *address,
1202 mach_vm_size_t size,
1203 mach_vm_offset_t mask,
1204 int flags,
1205 vm_tag_t tag,
1206 vm_map_t src_map,
1207 mach_vm_offset_t memory_address,
1208 boolean_t copy,
1209 vm_prot_t *cur_protection,
1210 vm_prot_t *max_protection,
1211 vm_inherit_t inheritance)
1212{
1213 vm_map_offset_t map_addr;
1214 kern_return_t kr;
1215
1216 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
1217 return KERN_INVALID_ARGUMENT;
1218 }
1219
1220 /* filter out any kernel-only flags */
1221 if (flags & ~VM_FLAGS_USER_REMAP) {
1222 return KERN_INVALID_ARGUMENT;
1223 }
1224
1225 map_addr = (vm_map_offset_t)*address;
1226
1227 kr = vm_map_remap(target_map,
1228 &map_addr,
1229 size,
1230 mask,
1231 flags,
1232 VM_MAP_KERNEL_FLAGS_NONE,
1233 tag,
1234 src_map,
1235 memory_address,
1236 copy,
1237 cur_protection,
1238 max_protection,
1239 inheritance);
1240 *address = map_addr;
1241 return kr;
1242}
1243
1244/*
1245 * vm_remap -
1246 * Remap a range of memory from one task into another,
1247 * to another address range within the same task, or
1248 * over top of itself (with altered permissions and/or
1249 * as an in-place copy of itself).
1250 *
1251 * The addressability of the source and target address
1252 * range is limited by the size of vm_address_t (in the
1253 * kernel context).
1254 */
1255kern_return_t
1256vm_remap_external(
1257 vm_map_t target_map,
1258 vm_offset_t *address,
1259 vm_size_t size,
1260 vm_offset_t mask,
1261 int flags,
1262 vm_map_t src_map,
1263 vm_offset_t memory_address,
1264 boolean_t copy,
1265 vm_prot_t *cur_protection,
1266 vm_prot_t *max_protection,
1267 vm_inherit_t inheritance)
1268{
1269 vm_tag_t tag;
1270 VM_GET_FLAGS_ALIAS(flags, tag);
1271
1272 return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map,
1273 memory_address, copy, cur_protection, max_protection, inheritance);
1274}
1275
1276kern_return_t
1277vm_remap_kernel(
1278 vm_map_t target_map,
1279 vm_offset_t *address,
1280 vm_size_t size,
1281 vm_offset_t mask,
1282 int flags,
1283 vm_tag_t tag,
1284 vm_map_t src_map,
1285 vm_offset_t memory_address,
1286 boolean_t copy,
1287 vm_prot_t *cur_protection,
1288 vm_prot_t *max_protection,
1289 vm_inherit_t inheritance)
1290{
1291 vm_map_offset_t map_addr;
1292 kern_return_t kr;
1293
1294 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
1295 return KERN_INVALID_ARGUMENT;
1296 }
1297
1298 /* filter out any kernel-only flags */
1299 if (flags & ~VM_FLAGS_USER_REMAP) {
1300 return KERN_INVALID_ARGUMENT;
1301 }
1302
1303 map_addr = (vm_map_offset_t)*address;
1304
1305 kr = vm_map_remap(target_map,
1306 &map_addr,
1307 size,
1308 mask,
1309 flags,
1310 VM_MAP_KERNEL_FLAGS_NONE,
1311 tag,
1312 src_map,
1313 memory_address,
1314 copy,
1315 cur_protection,
1316 max_protection,
1317 inheritance);
1318 *address = CAST_DOWN(vm_offset_t, map_addr);
1319 return kr;
1320}
1321
1322/*
1323 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1324 * when mach_vm_wire and vm_wire are changed to use ledgers.
1325 */
1326#include <mach/mach_host_server.h>
1327/*
1328 * mach_vm_wire
1329 * Specify that the range of the virtual address space
1330 * of the target task must not cause page faults for
1331 * the indicated accesses.
1332 *
1333 * [ To unwire the pages, specify VM_PROT_NONE. ]
1334 */
1335kern_return_t
1336mach_vm_wire_external(
1337 host_priv_t host_priv,
1338 vm_map_t map,
1339 mach_vm_offset_t start,
1340 mach_vm_size_t size,
1341 vm_prot_t access)
1342{
1343 return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK);
1344}
1345
1346kern_return_t
1347mach_vm_wire_kernel(
1348 host_priv_t host_priv,
1349 vm_map_t map,
1350 mach_vm_offset_t start,
1351 mach_vm_size_t size,
1352 vm_prot_t access,
1353 vm_tag_t tag)
1354{
1355 kern_return_t rc;
1356
1357 if (host_priv == HOST_PRIV_NULL) {
1358 return KERN_INVALID_HOST;
1359 }
1360
1361 assert(host_priv == &realhost);
1362
1363 if (map == VM_MAP_NULL) {
1364 return KERN_INVALID_TASK;
1365 }
1366
1367 if (access & ~VM_PROT_ALL || (start + size < start)) {
1368 return KERN_INVALID_ARGUMENT;
1369 }
1370
1371 if (access != VM_PROT_NONE) {
1372 rc = vm_map_wire_kernel(map,
1373 vm_map_trunc_page(start,
1374 VM_MAP_PAGE_MASK(map)),
1375 vm_map_round_page(start + size,
1376 VM_MAP_PAGE_MASK(map)),
1377 access, tag,
1378 TRUE);
1379 } else {
1380 rc = vm_map_unwire(map,
1381 vm_map_trunc_page(start,
1382 VM_MAP_PAGE_MASK(map)),
1383 vm_map_round_page(start + size,
1384 VM_MAP_PAGE_MASK(map)),
1385 TRUE);
1386 }
1387 return rc;
1388}
1389
1390/*
1391 * vm_wire -
1392 * Specify that the range of the virtual address space
1393 * of the target task must not cause page faults for
1394 * the indicated accesses.
1395 *
1396 * [ To unwire the pages, specify VM_PROT_NONE. ]
1397 */
1398kern_return_t
1399vm_wire(
1400 host_priv_t host_priv,
1401 vm_map_t map,
1402 vm_offset_t start,
1403 vm_size_t size,
1404 vm_prot_t access)
1405{
1406 kern_return_t rc;
1407
1408 if (host_priv == HOST_PRIV_NULL) {
1409 return KERN_INVALID_HOST;
1410 }
1411
1412 assert(host_priv == &realhost);
1413
1414 if (map == VM_MAP_NULL) {
1415 return KERN_INVALID_TASK;
1416 }
1417
1418 if ((access & ~VM_PROT_ALL) || (start + size < start)) {
1419 return KERN_INVALID_ARGUMENT;
1420 }
1421
1422 if (size == 0) {
1423 rc = KERN_SUCCESS;
1424 } else if (access != VM_PROT_NONE) {
1425 rc = vm_map_wire_kernel(map,
1426 vm_map_trunc_page(start,
1427 VM_MAP_PAGE_MASK(map)),
1428 vm_map_round_page(start + size,
1429 VM_MAP_PAGE_MASK(map)),
1430 access, VM_KERN_MEMORY_OSFMK,
1431 TRUE);
1432 } else {
1433 rc = vm_map_unwire(map,
1434 vm_map_trunc_page(start,
1435 VM_MAP_PAGE_MASK(map)),
1436 vm_map_round_page(start + size,
1437 VM_MAP_PAGE_MASK(map)),
1438 TRUE);
1439 }
1440 return rc;
1441}
1442
1443/*
1444 * vm_msync
1445 *
1446 * Synchronises the memory range specified with its backing store
1447 * image by either flushing or cleaning the contents to the appropriate
1448 * memory manager.
1449 *
1450 * interpretation of sync_flags
1451 * VM_SYNC_INVALIDATE - discard pages, only return precious
1452 * pages to manager.
1453 *
1454 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1455 * - discard pages, write dirty or precious
1456 * pages back to memory manager.
1457 *
1458 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1459 * - write dirty or precious pages back to
1460 * the memory manager.
1461 *
1462 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1463 * is a hole in the region, and we would
1464 * have returned KERN_SUCCESS, return
1465 * KERN_INVALID_ADDRESS instead.
1466 *
1467 * RETURNS
1468 * KERN_INVALID_TASK Bad task parameter
1469 * KERN_INVALID_ARGUMENT both sync and async were specified.
1470 * KERN_SUCCESS The usual.
1471 * KERN_INVALID_ADDRESS There was a hole in the region.
1472 */
1473
1474kern_return_t
1475mach_vm_msync(
1476 vm_map_t map,
1477 mach_vm_address_t address,
1478 mach_vm_size_t size,
1479 vm_sync_t sync_flags)
1480{
1481 if (map == VM_MAP_NULL) {
1482 return KERN_INVALID_TASK;
1483 }
1484
1485 return vm_map_msync(map, (vm_map_address_t)address,
1486 (vm_map_size_t)size, sync_flags);
1487}
1488
1489/*
1490 * vm_msync
1491 *
1492 * Synchronises the memory range specified with its backing store
1493 * image by either flushing or cleaning the contents to the appropriate
1494 * memory manager.
1495 *
1496 * interpretation of sync_flags
1497 * VM_SYNC_INVALIDATE - discard pages, only return precious
1498 * pages to manager.
1499 *
1500 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1501 * - discard pages, write dirty or precious
1502 * pages back to memory manager.
1503 *
1504 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1505 * - write dirty or precious pages back to
1506 * the memory manager.
1507 *
1508 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1509 * is a hole in the region, and we would
1510 * have returned KERN_SUCCESS, return
1511 * KERN_INVALID_ADDRESS instead.
1512 *
1513 * The addressability of the range is limited to that which can
1514 * be described by a vm_address_t.
1515 *
1516 * RETURNS
1517 * KERN_INVALID_TASK Bad task parameter
1518 * KERN_INVALID_ARGUMENT both sync and async were specified.
1519 * KERN_SUCCESS The usual.
1520 * KERN_INVALID_ADDRESS There was a hole in the region.
1521 */
1522
1523kern_return_t
1524vm_msync(
1525 vm_map_t map,
1526 vm_address_t address,
1527 vm_size_t size,
1528 vm_sync_t sync_flags)
1529{
1530 if (map == VM_MAP_NULL) {
1531 return KERN_INVALID_TASK;
1532 }
1533
1534 return vm_map_msync(map, (vm_map_address_t)address,
1535 (vm_map_size_t)size, sync_flags);
1536}
1537
1538
1539int
1540vm_toggle_entry_reuse(int toggle, int *old_value)
1541{
1542 vm_map_t map = current_map();
1543
1544 assert(!map->is_nested_map);
1545 if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
1546 *old_value = map->disable_vmentry_reuse;
1547 } else if (toggle == VM_TOGGLE_SET) {
1548 vm_map_entry_t map_to_entry;
1549
1550 vm_map_lock(map);
1551 vm_map_disable_hole_optimization(map);
1552 map->disable_vmentry_reuse = TRUE;
1553 __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
1554 if (map->first_free == map_to_entry) {
1555 map->highest_entry_end = vm_map_min(map);
1556 } else {
1557 map->highest_entry_end = map->first_free->vme_end;
1558 }
1559 vm_map_unlock(map);
1560 } else if (toggle == VM_TOGGLE_CLEAR) {
1561 vm_map_lock(map);
1562 map->disable_vmentry_reuse = FALSE;
1563 vm_map_unlock(map);
1564 } else {
1565 return KERN_INVALID_ARGUMENT;
1566 }
1567
1568 return KERN_SUCCESS;
1569}
1570
1571/*
1572 * mach_vm_behavior_set
1573 *
1574 * Sets the paging behavior attribute for the specified range
1575 * in the specified map.
1576 *
1577 * This routine will fail with KERN_INVALID_ADDRESS if any address
1578 * in [start,start+size) is not a valid allocated memory region.
1579 */
1580kern_return_t
1581mach_vm_behavior_set(
1582 vm_map_t map,
1583 mach_vm_offset_t start,
1584 mach_vm_size_t size,
1585 vm_behavior_t new_behavior)
1586{
1587 vm_map_offset_t align_mask;
1588
1589 if ((map == VM_MAP_NULL) || (start + size < start)) {
1590 return KERN_INVALID_ARGUMENT;
1591 }
1592
1593 if (size == 0) {
1594 return KERN_SUCCESS;
1595 }
1596
1597 switch (new_behavior) {
1598 case VM_BEHAVIOR_REUSABLE:
1599 case VM_BEHAVIOR_REUSE:
1600 case VM_BEHAVIOR_CAN_REUSE:
1601 /*
1602 * Align to the hardware page size, to allow
1603 * malloc() to maximize the amount of re-usability,
1604 * even on systems with larger software page size.
1605 */
1606 align_mask = PAGE_MASK;
1607 break;
1608 default:
1609 align_mask = VM_MAP_PAGE_MASK(map);
1610 break;
1611 }
1612
1613 return vm_map_behavior_set(map,
1614 vm_map_trunc_page(start, align_mask),
1615 vm_map_round_page(start + size, align_mask),
1616 new_behavior);
1617}
1618
1619/*
1620 * vm_behavior_set
1621 *
1622 * Sets the paging behavior attribute for the specified range
1623 * in the specified map.
1624 *
1625 * This routine will fail with KERN_INVALID_ADDRESS if any address
1626 * in [start,start+size) is not a valid allocated memory region.
1627 *
1628 * This routine is potentially limited in addressibility by the
1629 * use of vm_offset_t (if the map provided is larger than the
1630 * kernel's).
1631 */
1632kern_return_t
1633vm_behavior_set(
1634 vm_map_t map,
1635 vm_offset_t start,
1636 vm_size_t size,
1637 vm_behavior_t new_behavior)
1638{
1639 if (start + size < start) {
1640 return KERN_INVALID_ARGUMENT;
1641 }
1642
1643 return mach_vm_behavior_set(map,
1644 (mach_vm_offset_t) start,
1645 (mach_vm_size_t) size,
1646 new_behavior);
1647}
1648
1649/*
1650 * mach_vm_region:
1651 *
1652 * User call to obtain information about a region in
1653 * a task's address map. Currently, only one flavor is
1654 * supported.
1655 *
1656 * XXX The reserved and behavior fields cannot be filled
1657 * in until the vm merge from the IK is completed, and
1658 * vm_reserve is implemented.
1659 *
1660 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1661 */
1662
1663kern_return_t
1664mach_vm_region(
1665 vm_map_t map,
1666 mach_vm_offset_t *address, /* IN/OUT */
1667 mach_vm_size_t *size, /* OUT */
1668 vm_region_flavor_t flavor, /* IN */
1669 vm_region_info_t info, /* OUT */
1670 mach_msg_type_number_t *count, /* IN/OUT */
1671 mach_port_t *object_name) /* OUT */
1672{
1673 vm_map_offset_t map_addr;
1674 vm_map_size_t map_size;
1675 kern_return_t kr;
1676
1677 if (VM_MAP_NULL == map) {
1678 return KERN_INVALID_ARGUMENT;
1679 }
1680
1681 map_addr = (vm_map_offset_t)*address;
1682 map_size = (vm_map_size_t)*size;
1683
1684 /* legacy conversion */
1685 if (VM_REGION_BASIC_INFO == flavor) {
1686 flavor = VM_REGION_BASIC_INFO_64;
1687 }
1688
1689 kr = vm_map_region(map,
1690 &map_addr, &map_size,
1691 flavor, info, count,
1692 object_name);
1693
1694 *address = map_addr;
1695 *size = map_size;
1696 return kr;
1697}
1698
1699/*
1700 * vm_region_64 and vm_region:
1701 *
1702 * User call to obtain information about a region in
1703 * a task's address map. Currently, only one flavor is
1704 * supported.
1705 *
1706 * XXX The reserved and behavior fields cannot be filled
1707 * in until the vm merge from the IK is completed, and
1708 * vm_reserve is implemented.
1709 *
1710 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1711 */
1712
1713kern_return_t
1714vm_region_64(
1715 vm_map_t map,
1716 vm_offset_t *address, /* IN/OUT */
1717 vm_size_t *size, /* OUT */
1718 vm_region_flavor_t flavor, /* IN */
1719 vm_region_info_t info, /* OUT */
1720 mach_msg_type_number_t *count, /* IN/OUT */
1721 mach_port_t *object_name) /* OUT */
1722{
1723 vm_map_offset_t map_addr;
1724 vm_map_size_t map_size;
1725 kern_return_t kr;
1726
1727 if (VM_MAP_NULL == map) {
1728 return KERN_INVALID_ARGUMENT;
1729 }
1730
1731 map_addr = (vm_map_offset_t)*address;
1732 map_size = (vm_map_size_t)*size;
1733
1734 /* legacy conversion */
1735 if (VM_REGION_BASIC_INFO == flavor) {
1736 flavor = VM_REGION_BASIC_INFO_64;
1737 }
1738
1739 kr = vm_map_region(map,
1740 &map_addr, &map_size,
1741 flavor, info, count,
1742 object_name);
1743
1744 *address = CAST_DOWN(vm_offset_t, map_addr);
1745 *size = CAST_DOWN(vm_size_t, map_size);
1746
1747 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
1748 return KERN_INVALID_ADDRESS;
1749 }
1750 return kr;
1751}
1752
1753kern_return_t
1754vm_region(
1755 vm_map_t map,
1756 vm_address_t *address, /* IN/OUT */
1757 vm_size_t *size, /* OUT */
1758 vm_region_flavor_t flavor, /* IN */
1759 vm_region_info_t info, /* OUT */
1760 mach_msg_type_number_t *count, /* IN/OUT */
1761 mach_port_t *object_name) /* OUT */
1762{
1763 vm_map_address_t map_addr;
1764 vm_map_size_t map_size;
1765 kern_return_t kr;
1766
1767 if (VM_MAP_NULL == map) {
1768 return KERN_INVALID_ARGUMENT;
1769 }
1770
1771 map_addr = (vm_map_address_t)*address;
1772 map_size = (vm_map_size_t)*size;
1773
1774 kr = vm_map_region(map,
1775 &map_addr, &map_size,
1776 flavor, info, count,
1777 object_name);
1778
1779 *address = CAST_DOWN(vm_address_t, map_addr);
1780 *size = CAST_DOWN(vm_size_t, map_size);
1781
1782 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
1783 return KERN_INVALID_ADDRESS;
1784 }
1785 return kr;
1786}
1787
1788/*
1789 * vm_region_recurse: A form of vm_region which follows the
1790 * submaps in a target map
1791 *
1792 */
1793kern_return_t
1794mach_vm_region_recurse(
1795 vm_map_t map,
1796 mach_vm_address_t *address,
1797 mach_vm_size_t *size,
1798 uint32_t *depth,
1799 vm_region_recurse_info_t info,
1800 mach_msg_type_number_t *infoCnt)
1801{
1802 vm_map_address_t map_addr;
1803 vm_map_size_t map_size;
1804 kern_return_t kr;
1805
1806 if (VM_MAP_NULL == map) {
1807 return KERN_INVALID_ARGUMENT;
1808 }
1809
1810 map_addr = (vm_map_address_t)*address;
1811 map_size = (vm_map_size_t)*size;
1812
1813 kr = vm_map_region_recurse_64(
1814 map,
1815 &map_addr,
1816 &map_size,
1817 depth,
1818 (vm_region_submap_info_64_t)info,
1819 infoCnt);
1820
1821 *address = map_addr;
1822 *size = map_size;
1823 return kr;
1824}
1825
1826/*
1827 * vm_region_recurse: A form of vm_region which follows the
1828 * submaps in a target map
1829 *
1830 */
1831kern_return_t
1832vm_region_recurse_64(
1833 vm_map_t map,
1834 vm_address_t *address,
1835 vm_size_t *size,
1836 uint32_t *depth,
1837 vm_region_recurse_info_64_t info,
1838 mach_msg_type_number_t *infoCnt)
1839{
1840 vm_map_address_t map_addr;
1841 vm_map_size_t map_size;
1842 kern_return_t kr;
1843
1844 if (VM_MAP_NULL == map) {
1845 return KERN_INVALID_ARGUMENT;
1846 }
1847
1848 map_addr = (vm_map_address_t)*address;
1849 map_size = (vm_map_size_t)*size;
1850
1851 kr = vm_map_region_recurse_64(
1852 map,
1853 &map_addr,
1854 &map_size,
1855 depth,
1856 (vm_region_submap_info_64_t)info,
1857 infoCnt);
1858
1859 *address = CAST_DOWN(vm_address_t, map_addr);
1860 *size = CAST_DOWN(vm_size_t, map_size);
1861
1862 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
1863 return KERN_INVALID_ADDRESS;
1864 }
1865 return kr;
1866}
1867
1868kern_return_t
1869vm_region_recurse(
1870 vm_map_t map,
1871 vm_offset_t *address, /* IN/OUT */
1872 vm_size_t *size, /* OUT */
1873 natural_t *depth, /* IN/OUT */
1874 vm_region_recurse_info_t info32, /* IN/OUT */
1875 mach_msg_type_number_t *infoCnt) /* IN/OUT */
1876{
1877 vm_region_submap_info_data_64_t info64;
1878 vm_region_submap_info_t info;
1879 vm_map_address_t map_addr;
1880 vm_map_size_t map_size;
1881 kern_return_t kr;
1882
1883 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
1884 return KERN_INVALID_ARGUMENT;
1885 }
1886
1887
1888 map_addr = (vm_map_address_t)*address;
1889 map_size = (vm_map_size_t)*size;
1890 info = (vm_region_submap_info_t)info32;
1891 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1892
1893 kr = vm_map_region_recurse_64(map, &map_addr, &map_size,
1894 depth, &info64, infoCnt);
1895
1896 info->protection = info64.protection;
1897 info->max_protection = info64.max_protection;
1898 info->inheritance = info64.inheritance;
1899 info->offset = (uint32_t)info64.offset; /* trouble-maker */
1900 info->user_tag = info64.user_tag;
1901 info->pages_resident = info64.pages_resident;
1902 info->pages_shared_now_private = info64.pages_shared_now_private;
1903 info->pages_swapped_out = info64.pages_swapped_out;
1904 info->pages_dirtied = info64.pages_dirtied;
1905 info->ref_count = info64.ref_count;
1906 info->shadow_depth = info64.shadow_depth;
1907 info->external_pager = info64.external_pager;
1908 info->share_mode = info64.share_mode;
1909 info->is_submap = info64.is_submap;
1910 info->behavior = info64.behavior;
1911 info->object_id = info64.object_id;
1912 info->user_wired_count = info64.user_wired_count;
1913
1914 *address = CAST_DOWN(vm_address_t, map_addr);
1915 *size = CAST_DOWN(vm_size_t, map_size);
1916 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1917
1918 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
1919 return KERN_INVALID_ADDRESS;
1920 }
1921 return kr;
1922}
1923
1924kern_return_t
1925mach_vm_purgable_control(
1926 vm_map_t map,
1927 mach_vm_offset_t address,
1928 vm_purgable_t control,
1929 int *state)
1930{
1931 if (VM_MAP_NULL == map) {
1932 return KERN_INVALID_ARGUMENT;
1933 }
1934
1935 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1936 /* not allowed from user-space */
1937 return KERN_INVALID_ARGUMENT;
1938 }
1939
1940 return vm_map_purgable_control(map,
1941 vm_map_trunc_page(address, PAGE_MASK),
1942 control,
1943 state);
1944}
1945
1946kern_return_t
1947vm_purgable_control(
1948 vm_map_t map,
1949 vm_offset_t address,
1950 vm_purgable_t control,
1951 int *state)
1952{
1953 if (VM_MAP_NULL == map) {
1954 return KERN_INVALID_ARGUMENT;
1955 }
1956
1957 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1958 /* not allowed from user-space */
1959 return KERN_INVALID_ARGUMENT;
1960 }
1961
1962 return vm_map_purgable_control(map,
1963 vm_map_trunc_page(address, PAGE_MASK),
1964 control,
1965 state);
1966}
1967
1968
1969/*
1970 * Ordinarily, the right to allocate CPM is restricted
1971 * to privileged applications (those that can gain access
1972 * to the host priv port). Set this variable to zero if
1973 * you want to let any application allocate CPM.
1974 */
1975unsigned int vm_allocate_cpm_privileged = 0;
1976
1977/*
1978 * Allocate memory in the specified map, with the caveat that
1979 * the memory is physically contiguous. This call may fail
1980 * if the system can't find sufficient contiguous memory.
1981 * This call may cause or lead to heart-stopping amounts of
1982 * paging activity.
1983 *
1984 * Memory obtained from this call should be freed in the
1985 * normal way, viz., via vm_deallocate.
1986 */
1987kern_return_t
1988vm_allocate_cpm(
1989 host_priv_t host_priv,
1990 vm_map_t map,
1991 vm_address_t *addr,
1992 vm_size_t size,
1993 int flags)
1994{
1995 vm_map_address_t map_addr;
1996 vm_map_size_t map_size;
1997 kern_return_t kr;
1998
1999 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) {
2000 return KERN_INVALID_HOST;
2001 }
2002
2003 if (VM_MAP_NULL == map) {
2004 return KERN_INVALID_ARGUMENT;
2005 }
2006
2007 map_addr = (vm_map_address_t)*addr;
2008 map_size = (vm_map_size_t)size;
2009
2010 kr = vm_map_enter_cpm(map,
2011 &map_addr,
2012 map_size,
2013 flags);
2014
2015 *addr = CAST_DOWN(vm_address_t, map_addr);
2016 return kr;
2017}
2018
2019
2020kern_return_t
2021mach_vm_page_query(
2022 vm_map_t map,
2023 mach_vm_offset_t offset,
2024 int *disposition,
2025 int *ref_count)
2026{
2027 if (VM_MAP_NULL == map) {
2028 return KERN_INVALID_ARGUMENT;
2029 }
2030
2031 return vm_map_page_query_internal(
2032 map,
2033 vm_map_trunc_page(offset, PAGE_MASK),
2034 disposition, ref_count);
2035}
2036
2037kern_return_t
2038vm_map_page_query(
2039 vm_map_t map,
2040 vm_offset_t offset,
2041 int *disposition,
2042 int *ref_count)
2043{
2044 if (VM_MAP_NULL == map) {
2045 return KERN_INVALID_ARGUMENT;
2046 }
2047
2048 return vm_map_page_query_internal(
2049 map,
2050 vm_map_trunc_page(offset, PAGE_MASK),
2051 disposition, ref_count);
2052}
2053
2054kern_return_t
2055mach_vm_page_range_query(
2056 vm_map_t map,
2057 mach_vm_offset_t address,
2058 mach_vm_size_t size,
2059 mach_vm_address_t dispositions_addr,
2060 mach_vm_size_t *dispositions_count)
2061{
2062 kern_return_t kr = KERN_SUCCESS;
2063 int num_pages = 0, i = 0;
2064 mach_vm_size_t curr_sz = 0, copy_sz = 0;
2065 mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0;
2066 mach_msg_type_number_t count = 0;
2067
2068 void *info = NULL;
2069 void *local_disp = NULL;;
2070 vm_map_size_t info_size = 0, local_disp_size = 0;
2071 mach_vm_offset_t start = 0, end = 0;
2072
2073 if (map == VM_MAP_NULL || dispositions_count == NULL) {
2074 return KERN_INVALID_ARGUMENT;
2075 }
2076
2077 disp_buf_req_size = (*dispositions_count * sizeof(int));
2078 start = mach_vm_trunc_page(address);
2079 end = mach_vm_round_page(address + size);
2080
2081 if (end < start) {
2082 return KERN_INVALID_ARGUMENT;
2083 }
2084
2085 if ((end - start) < size) {
2086 /*
2087 * Aligned size is less than unaligned size.
2088 */
2089 return KERN_INVALID_ARGUMENT;
2090 }
2091
2092 if (disp_buf_req_size == 0 || (end == start)) {
2093 return KERN_SUCCESS;
2094 }
2095
2096 /*
2097 * For large requests, we will go through them
2098 * MAX_PAGE_RANGE_QUERY chunk at a time.
2099 */
2100
2101 curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY);
2102 num_pages = (int) (curr_sz >> PAGE_SHIFT);
2103
2104 info_size = num_pages * sizeof(vm_page_info_basic_data_t);
2105 info = kalloc(info_size);
2106
2107 if (info == NULL) {
2108 return KERN_RESOURCE_SHORTAGE;
2109 }
2110
2111 local_disp_size = num_pages * sizeof(int);
2112 local_disp = kalloc(local_disp_size);
2113
2114 if (local_disp == NULL) {
2115 kfree(info, info_size);
2116 info = NULL;
2117 return KERN_RESOURCE_SHORTAGE;
2118 }
2119
2120 while (size) {
2121 count = VM_PAGE_INFO_BASIC_COUNT;
2122 kr = vm_map_page_range_info_internal(
2123 map,
2124 start,
2125 mach_vm_round_page(start + curr_sz),
2126 VM_PAGE_INFO_BASIC,
2127 (vm_page_info_t) info,
2128 &count);
2129
2130 assert(kr == KERN_SUCCESS);
2131
2132 for (i = 0; i < num_pages; i++) {
2133 ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
2134 }
2135
2136 copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
2137 kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
2138
2139 start += curr_sz;
2140 disp_buf_req_size -= copy_sz;
2141 disp_buf_total_size += copy_sz;
2142
2143 if (kr != 0) {
2144 break;
2145 }
2146
2147 if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
2148 /*
2149 * We might have inspected the full range OR
2150 * more than it esp. if the user passed in
2151 * non-page aligned start/size and/or if we
2152 * descended into a submap. We are done here.
2153 */
2154
2155 size = 0;
2156 } else {
2157 dispositions_addr += copy_sz;
2158
2159 size -= curr_sz;
2160
2161 curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY);
2162 num_pages = (int)(curr_sz >> PAGE_SHIFT);
2163 }
2164 }
2165
2166 *dispositions_count = disp_buf_total_size / sizeof(int);
2167
2168 kfree(local_disp, local_disp_size);
2169 local_disp = NULL;
2170
2171 kfree(info, info_size);
2172 info = NULL;
2173
2174 return kr;
2175}
2176
2177kern_return_t
2178mach_vm_page_info(
2179 vm_map_t map,
2180 mach_vm_address_t address,
2181 vm_page_info_flavor_t flavor,
2182 vm_page_info_t info,
2183 mach_msg_type_number_t *count)
2184{
2185 kern_return_t kr;
2186
2187 if (map == VM_MAP_NULL) {
2188 return KERN_INVALID_ARGUMENT;
2189 }
2190
2191 kr = vm_map_page_info(map, address, flavor, info, count);
2192 return kr;
2193}
2194
2195/* map a (whole) upl into an address space */
2196kern_return_t
2197vm_upl_map(
2198 vm_map_t map,
2199 upl_t upl,
2200 vm_address_t *dst_addr)
2201{
2202 vm_map_offset_t map_addr;
2203 kern_return_t kr;
2204
2205 if (VM_MAP_NULL == map) {
2206 return KERN_INVALID_ARGUMENT;
2207 }
2208
2209 kr = vm_map_enter_upl(map, upl, &map_addr);
2210 *dst_addr = CAST_DOWN(vm_address_t, map_addr);
2211 return kr;
2212}
2213
2214kern_return_t
2215vm_upl_unmap(
2216 vm_map_t map,
2217 upl_t upl)
2218{
2219 if (VM_MAP_NULL == map) {
2220 return KERN_INVALID_ARGUMENT;
2221 }
2222
2223 return vm_map_remove_upl(map, upl);
2224}
2225
2226/* Retrieve a upl for an object underlying an address range in a map */
2227
2228kern_return_t
2229vm_map_get_upl(
2230 vm_map_t map,
2231 vm_map_offset_t map_offset,
2232 upl_size_t *upl_size,
2233 upl_t *upl,
2234 upl_page_info_array_t page_list,
2235 unsigned int *count,
2236 upl_control_flags_t *flags,
2237 vm_tag_t tag,
2238 int force_data_sync)
2239{
2240 upl_control_flags_t map_flags;
2241 kern_return_t kr;
2242
2243 if (VM_MAP_NULL == map) {
2244 return KERN_INVALID_ARGUMENT;
2245 }
2246
2247 map_flags = *flags & ~UPL_NOZEROFILL;
2248 if (force_data_sync) {
2249 map_flags |= UPL_FORCE_DATA_SYNC;
2250 }
2251
2252 kr = vm_map_create_upl(map,
2253 map_offset,
2254 upl_size,
2255 upl,
2256 page_list,
2257 count,
2258 &map_flags,
2259 tag);
2260
2261 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
2262 return kr;
2263}
2264
2265#if CONFIG_EMBEDDED
2266extern int proc_selfpid(void);
2267extern char *proc_name_address(void *p);
2268int cs_executable_mem_entry = 0;
2269int log_executable_mem_entry = 0;
2270#endif /* CONFIG_EMBEDDED */
2271
2272/*
2273 * mach_make_memory_entry_64
2274 *
2275 * Think of it as a two-stage vm_remap() operation. First
2276 * you get a handle. Second, you get map that handle in
2277 * somewhere else. Rather than doing it all at once (and
2278 * without needing access to the other whole map).
2279 */
2280kern_return_t
2281mach_make_memory_entry_64(
2282 vm_map_t target_map,
2283 memory_object_size_t *size,
2284 memory_object_offset_t offset,
2285 vm_prot_t permission,
2286 ipc_port_t *object_handle,
2287 ipc_port_t parent_handle)
2288{
2289 if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
2290 /*
2291 * Unknown flag: reject for forward compatibility.
2292 */
2293 return KERN_INVALID_VALUE;
2294 }
2295
2296 return mach_make_memory_entry_internal(target_map,
2297 size,
2298 offset,
2299 permission,
2300 object_handle,
2301 parent_handle);
2302}
2303
2304kern_return_t
2305mach_make_memory_entry_internal(
2306 vm_map_t target_map,
2307 memory_object_size_t *size,
2308 memory_object_offset_t offset,
2309 vm_prot_t permission,
2310 ipc_port_t *object_handle,
2311 ipc_port_t parent_handle)
2312{
2313 vm_map_version_t version;
2314 vm_named_entry_t parent_entry;
2315 vm_named_entry_t user_entry;
2316 ipc_port_t user_handle;
2317 kern_return_t kr;
2318 vm_map_t real_map;
2319
2320 /* needed for call to vm_map_lookup_locked */
2321 boolean_t wired;
2322 boolean_t iskernel;
2323 vm_object_offset_t obj_off;
2324 vm_prot_t prot;
2325 struct vm_object_fault_info fault_info = {};
2326 vm_object_t object;
2327 vm_object_t shadow_object;
2328
2329 /* needed for direct map entry manipulation */
2330 vm_map_entry_t map_entry;
2331 vm_map_entry_t next_entry;
2332 vm_map_t local_map;
2333 vm_map_t original_map = target_map;
2334 vm_map_size_t total_size, map_size;
2335 vm_map_offset_t map_start, map_end;
2336 vm_map_offset_t local_offset;
2337 vm_object_size_t mappable_size;
2338
2339 /*
2340 * Stash the offset in the page for use by vm_map_enter_mem_object()
2341 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2342 */
2343 vm_object_offset_t offset_in_page;
2344
2345 unsigned int access;
2346 vm_prot_t protections;
2347 vm_prot_t original_protections, mask_protections;
2348 unsigned int wimg_mode;
2349
2350 boolean_t force_shadow = FALSE;
2351 boolean_t use_data_addr;
2352 boolean_t use_4K_compat;
2353#if VM_NAMED_ENTRY_LIST
2354 int alias = -1;
2355#endif /* VM_NAMED_ENTRY_LIST */
2356
2357 if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
2358 /*
2359 * Unknown flag: reject for forward compatibility.
2360 */
2361 return KERN_INVALID_VALUE;
2362 }
2363
2364 if (IP_VALID(parent_handle) &&
2365 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
2366 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
2367 } else {
2368 parent_entry = NULL;
2369 }
2370
2371 if (parent_entry && parent_entry->is_copy) {
2372 return KERN_INVALID_ARGUMENT;
2373 }
2374
2375 original_protections = permission & VM_PROT_ALL;
2376 protections = original_protections;
2377 mask_protections = permission & VM_PROT_IS_MASK;
2378 access = GET_MAP_MEM(permission);
2379 use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
2380 use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0);
2381
2382 user_handle = IP_NULL;
2383 user_entry = NULL;
2384
2385 map_start = vm_map_trunc_page(offset, PAGE_MASK);
2386
2387 if (permission & MAP_MEM_ONLY) {
2388 boolean_t parent_is_object;
2389
2390 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2391 map_size = map_end - map_start;
2392
2393 if (use_data_addr || use_4K_compat || parent_entry == NULL) {
2394 return KERN_INVALID_ARGUMENT;
2395 }
2396
2397 parent_is_object = !parent_entry->is_sub_map;
2398 object = parent_entry->backing.object;
2399 if (parent_is_object && object != VM_OBJECT_NULL) {
2400 wimg_mode = object->wimg_bits;
2401 } else {
2402 wimg_mode = VM_WIMG_USE_DEFAULT;
2403 }
2404 if ((access != GET_MAP_MEM(parent_entry->protection)) &&
2405 !(parent_entry->protection & VM_PROT_WRITE)) {
2406 return KERN_INVALID_RIGHT;
2407 }
2408 vm_prot_to_wimg(access, &wimg_mode);
2409 if (access != MAP_MEM_NOOP) {
2410 SET_MAP_MEM(access, parent_entry->protection);
2411 }
2412 if (parent_is_object && object &&
2413 (access != MAP_MEM_NOOP) &&
2414 (!(object->nophyscache))) {
2415 if (object->wimg_bits != wimg_mode) {
2416 vm_object_lock(object);
2417 vm_object_change_wimg_mode(object, wimg_mode);
2418 vm_object_unlock(object);
2419 }
2420 }
2421 if (object_handle) {
2422 *object_handle = IP_NULL;
2423 }
2424 return KERN_SUCCESS;
2425 } else if (permission & MAP_MEM_NAMED_CREATE) {
2426 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2427 map_size = map_end - map_start;
2428
2429 if (use_data_addr || use_4K_compat) {
2430 return KERN_INVALID_ARGUMENT;
2431 }
2432
2433 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2434 if (kr != KERN_SUCCESS) {
2435 return KERN_FAILURE;
2436 }
2437
2438 /*
2439 * Force the creation of the VM object now.
2440 */
2441 if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
2442 /*
2443 * LP64todo - for now, we can only allocate 4GB-4096
2444 * internal objects because the default pager can't
2445 * page bigger ones. Remove this when it can.
2446 */
2447 kr = KERN_FAILURE;
2448 goto make_mem_done;
2449 }
2450
2451 object = vm_object_allocate(map_size);
2452 assert(object != VM_OBJECT_NULL);
2453
2454 if (permission & MAP_MEM_PURGABLE) {
2455 task_t owner;
2456
2457 if (!(permission & VM_PROT_WRITE)) {
2458 /* if we can't write, we can't purge */
2459 vm_object_deallocate(object);
2460 kr = KERN_INVALID_ARGUMENT;
2461 goto make_mem_done;
2462 }
2463 object->purgable = VM_PURGABLE_NONVOLATILE;
2464 if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
2465 object->purgeable_only_by_kernel = TRUE;
2466 }
2467 assert(object->vo_owner == NULL);
2468 assert(object->resident_page_count == 0);
2469 assert(object->wired_page_count == 0);
2470 vm_object_lock(object);
2471 owner = current_task();
2472#if __arm64__
2473 if (owner->task_legacy_footprint) {
2474 /*
2475 * For ios11, we failed to account for
2476 * this memory. Keep doing that for
2477 * legacy apps (built before ios12),
2478 * for backwards compatibility's sake...
2479 */
2480 owner = kernel_task;
2481 }
2482#endif /* __arm64__ */
2483 vm_purgeable_nonvolatile_enqueue(object, owner);
2484 vm_object_unlock(object);
2485 }
2486
2487 if (permission & MAP_MEM_LEDGER_TAG_NETWORK) {
2488 /* make this object owned by the calling task */
2489 vm_object_lock(object);
2490 vm_object_ownership_change(
2491 object,
2492 VM_OBJECT_LEDGER_TAG_NETWORK,
2493 current_task(), /* new owner */
2494 FALSE); /* task_objq locked? */
2495 vm_object_unlock(object);
2496 }
2497
2498#if CONFIG_SECLUDED_MEMORY
2499 if (secluded_for_iokit && /* global boot-arg */
2500 ((permission & MAP_MEM_GRAB_SECLUDED)
2501#if 11
2502 /* XXX FBDP for my testing only */
2503 || (secluded_for_fbdp && map_size == 97550336)
2504#endif
2505 )) {
2506#if 11
2507 if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
2508 secluded_for_fbdp) {
2509 printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size);
2510 }
2511#endif
2512 object->can_grab_secluded = TRUE;
2513 assert(!object->eligible_for_secluded);
2514 }
2515#endif /* CONFIG_SECLUDED_MEMORY */
2516
2517 /*
2518 * The VM object is brand new and nobody else knows about it,
2519 * so we don't need to lock it.
2520 */
2521
2522 wimg_mode = object->wimg_bits;
2523 vm_prot_to_wimg(access, &wimg_mode);
2524 if (access != MAP_MEM_NOOP) {
2525 object->wimg_bits = wimg_mode;
2526 }
2527
2528 /* the object has no pages, so no WIMG bits to update here */
2529
2530 /*
2531 * XXX
2532 * We use this path when we want to make sure that
2533 * nobody messes with the object (coalesce, for
2534 * example) before we map it.
2535 * We might want to use these objects for transposition via
2536 * vm_object_transpose() too, so we don't want any copy or
2537 * shadow objects either...
2538 */
2539 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2540 object->true_share = TRUE;
2541
2542 user_entry->backing.object = object;
2543 user_entry->internal = TRUE;
2544 user_entry->is_sub_map = FALSE;
2545 user_entry->offset = 0;
2546 user_entry->data_offset = 0;
2547 user_entry->protection = protections;
2548 SET_MAP_MEM(access, user_entry->protection);
2549 user_entry->size = map_size;
2550
2551 /* user_object pager and internal fields are not used */
2552 /* when the object field is filled in. */
2553
2554 *size = CAST_DOWN(vm_size_t, (user_entry->size -
2555 user_entry->data_offset));
2556 *object_handle = user_handle;
2557 return KERN_SUCCESS;
2558 }
2559
2560 if (permission & MAP_MEM_VM_COPY) {
2561 vm_map_copy_t copy;
2562
2563 if (target_map == VM_MAP_NULL) {
2564 return KERN_INVALID_TASK;
2565 }
2566
2567 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2568 map_size = map_end - map_start;
2569 if (use_data_addr || use_4K_compat) {
2570 offset_in_page = offset - map_start;
2571 if (use_4K_compat) {
2572 offset_in_page &= ~((signed)(0xFFF));
2573 }
2574 } else {
2575 offset_in_page = 0;
2576 }
2577
2578 kr = vm_map_copyin_internal(target_map,
2579 map_start,
2580 map_size,
2581 VM_MAP_COPYIN_ENTRY_LIST,
2582 &copy);
2583 if (kr != KERN_SUCCESS) {
2584 return kr;
2585 }
2586
2587 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2588 if (kr != KERN_SUCCESS) {
2589 vm_map_copy_discard(copy);
2590 return KERN_FAILURE;
2591 }
2592
2593 user_entry->backing.copy = copy;
2594 user_entry->internal = FALSE;
2595 user_entry->is_sub_map = FALSE;
2596 user_entry->is_copy = TRUE;
2597 user_entry->offset = 0;
2598 user_entry->protection = protections;
2599 user_entry->size = map_size;
2600 user_entry->data_offset = offset_in_page;
2601
2602 *size = CAST_DOWN(vm_size_t, (user_entry->size -
2603 user_entry->data_offset));
2604 *object_handle = user_handle;
2605 return KERN_SUCCESS;
2606 }
2607
2608 if (permission & MAP_MEM_VM_SHARE) {
2609 vm_map_copy_t copy;
2610 vm_prot_t cur_prot, max_prot;
2611
2612 if (target_map == VM_MAP_NULL) {
2613 return KERN_INVALID_TASK;
2614 }
2615
2616 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2617 map_size = map_end - map_start;
2618 if (use_data_addr || use_4K_compat) {
2619 offset_in_page = offset - map_start;
2620 if (use_4K_compat) {
2621 offset_in_page &= ~((signed)(0xFFF));
2622 }
2623 } else {
2624 offset_in_page = 0;
2625 }
2626
2627 cur_prot = VM_PROT_ALL;
2628 kr = vm_map_copy_extract(target_map,
2629 map_start,
2630 map_size,
2631 &copy,
2632 &cur_prot,
2633 &max_prot);
2634 if (kr != KERN_SUCCESS) {
2635 return kr;
2636 }
2637
2638 if (mask_protections) {
2639 /*
2640 * We just want as much of "original_protections"
2641 * as we can get out of the actual "cur_prot".
2642 */
2643 protections &= cur_prot;
2644 if (protections == VM_PROT_NONE) {
2645 /* no access at all: fail */
2646 vm_map_copy_discard(copy);
2647 return KERN_PROTECTION_FAILURE;
2648 }
2649 } else {
2650 /*
2651 * We want exactly "original_protections"
2652 * out of "cur_prot".
2653 */
2654 if ((cur_prot & protections) != protections) {
2655 vm_map_copy_discard(copy);
2656 return KERN_PROTECTION_FAILURE;
2657 }
2658 }
2659
2660 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2661 if (kr != KERN_SUCCESS) {
2662 vm_map_copy_discard(copy);
2663 return KERN_FAILURE;
2664 }
2665
2666 user_entry->backing.copy = copy;
2667 user_entry->internal = FALSE;
2668 user_entry->is_sub_map = FALSE;
2669 user_entry->is_copy = TRUE;
2670 user_entry->offset = 0;
2671 user_entry->protection = protections;
2672 user_entry->size = map_size;
2673 user_entry->data_offset = offset_in_page;
2674
2675 *size = CAST_DOWN(vm_size_t, (user_entry->size -
2676 user_entry->data_offset));
2677 *object_handle = user_handle;
2678 return KERN_SUCCESS;
2679 }
2680
2681 if (parent_entry == NULL ||
2682 (permission & MAP_MEM_NAMED_REUSE)) {
2683 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2684 map_size = map_end - map_start;
2685 if (use_data_addr || use_4K_compat) {
2686 offset_in_page = offset - map_start;
2687 if (use_4K_compat) {
2688 offset_in_page &= ~((signed)(0xFFF));
2689 }
2690 } else {
2691 offset_in_page = 0;
2692 }
2693
2694 /* Create a named object based on address range within the task map */
2695 /* Go find the object at given address */
2696
2697 if (target_map == VM_MAP_NULL) {
2698 return KERN_INVALID_TASK;
2699 }
2700
2701redo_lookup:
2702 protections = original_protections;
2703 vm_map_lock_read(target_map);
2704
2705 /* get the object associated with the target address */
2706 /* note we check the permission of the range against */
2707 /* that requested by the caller */
2708
2709 kr = vm_map_lookup_locked(&target_map, map_start,
2710 protections | mask_protections,
2711 OBJECT_LOCK_EXCLUSIVE, &version,
2712 &object, &obj_off, &prot, &wired,
2713 &fault_info,
2714 &real_map);
2715 if (kr != KERN_SUCCESS) {
2716 vm_map_unlock_read(target_map);
2717 goto make_mem_done;
2718 }
2719 if (mask_protections) {
2720 /*
2721 * The caller asked us to use the "protections" as
2722 * a mask, so restrict "protections" to what this
2723 * mapping actually allows.
2724 */
2725 protections &= prot;
2726 }
2727#if CONFIG_EMBEDDED
2728 /*
2729 * Wiring would copy the pages to a shadow object.
2730 * The shadow object would not be code-signed so
2731 * attempting to execute code from these copied pages
2732 * would trigger a code-signing violation.
2733 */
2734 if (prot & VM_PROT_EXECUTE) {
2735 if (log_executable_mem_entry) {
2736 void *bsd_info;
2737 bsd_info = current_task()->bsd_info;
2738 printf("pid %d[%s] making memory entry out of "
2739 "executable range from 0x%llx to 0x%llx:"
2740 "might cause code-signing issues "
2741 "later\n",
2742 proc_selfpid(),
2743 (bsd_info != NULL
2744 ? proc_name_address(bsd_info)
2745 : "?"),
2746 (uint64_t) map_start,
2747 (uint64_t) map_end);
2748 }
2749 DTRACE_VM2(cs_executable_mem_entry,
2750 uint64_t, (uint64_t)map_start,
2751 uint64_t, (uint64_t)map_end);
2752 cs_executable_mem_entry++;
2753
2754#if 11
2755 /*
2756 * We don't know how the memory entry will be used.
2757 * It might never get wired and might not cause any
2758 * trouble, so let's not reject this request...
2759 */
2760#else /* 11 */
2761 kr = KERN_PROTECTION_FAILURE;
2762 vm_object_unlock(object);
2763 vm_map_unlock_read(target_map);
2764 if (real_map != target_map) {
2765 vm_map_unlock_read(real_map);
2766 }
2767 goto make_mem_done;
2768#endif /* 11 */
2769 }
2770#endif /* CONFIG_EMBEDDED */
2771
2772 if (((prot & protections) != protections)
2773 || (object == kernel_object)) {
2774 kr = KERN_INVALID_RIGHT;
2775 vm_object_unlock(object);
2776 vm_map_unlock_read(target_map);
2777 if (real_map != target_map) {
2778 vm_map_unlock_read(real_map);
2779 }
2780 if (object == kernel_object) {
2781 printf("Warning: Attempt to create a named"
2782 " entry from the kernel_object\n");
2783 }
2784 goto make_mem_done;
2785 }
2786
2787 /* We have an object, now check to see if this object */
2788 /* is suitable. If not, create a shadow and share that */
2789
2790 /*
2791 * We have to unlock the VM object to avoid deadlocking with
2792 * a VM map lock (the lock ordering is map, the object), if we
2793 * need to modify the VM map to create a shadow object. Since
2794 * we might release the VM map lock below anyway, we have
2795 * to release the VM map lock now.
2796 * XXX FBDP There must be a way to avoid this double lookup...
2797 *
2798 * Take an extra reference on the VM object to make sure it's
2799 * not going to disappear.
2800 */
2801 vm_object_reference_locked(object); /* extra ref to hold obj */
2802 vm_object_unlock(object);
2803
2804 local_map = original_map;
2805 local_offset = map_start;
2806 if (target_map != local_map) {
2807 vm_map_unlock_read(target_map);
2808 if (real_map != target_map) {
2809 vm_map_unlock_read(real_map);
2810 }
2811 vm_map_lock_read(local_map);
2812 target_map = local_map;
2813 real_map = local_map;
2814 }
2815 while (TRUE) {
2816 if (!vm_map_lookup_entry(local_map,
2817 local_offset, &map_entry)) {
2818 kr = KERN_INVALID_ARGUMENT;
2819 vm_map_unlock_read(target_map);
2820 if (real_map != target_map) {
2821 vm_map_unlock_read(real_map);
2822 }
2823 vm_object_deallocate(object); /* release extra ref */
2824 object = VM_OBJECT_NULL;
2825 goto make_mem_done;
2826 }
2827 iskernel = (local_map->pmap == kernel_pmap);
2828 if (!(map_entry->is_sub_map)) {
2829 if (VME_OBJECT(map_entry) != object) {
2830 kr = KERN_INVALID_ARGUMENT;
2831 vm_map_unlock_read(target_map);
2832 if (real_map != target_map) {
2833 vm_map_unlock_read(real_map);
2834 }
2835 vm_object_deallocate(object); /* release extra ref */
2836 object = VM_OBJECT_NULL;
2837 goto make_mem_done;
2838 }
2839 break;
2840 } else {
2841 vm_map_t tmap;
2842 tmap = local_map;
2843 local_map = VME_SUBMAP(map_entry);
2844
2845 vm_map_lock_read(local_map);
2846 vm_map_unlock_read(tmap);
2847 target_map = local_map;
2848 real_map = local_map;
2849 local_offset = local_offset - map_entry->vme_start;
2850 local_offset += VME_OFFSET(map_entry);
2851 }
2852 }
2853
2854#if VM_NAMED_ENTRY_LIST
2855 alias = VME_ALIAS(map_entry);
2856#endif /* VM_NAMED_ENTRY_LIST */
2857
2858 /*
2859 * We found the VM map entry, lock the VM object again.
2860 */
2861 vm_object_lock(object);
2862 if (map_entry->wired_count) {
2863 /* JMM - The check below should be reworked instead. */
2864 object->true_share = TRUE;
2865 }
2866 if (mask_protections) {
2867 /*
2868 * The caller asked us to use the "protections" as
2869 * a mask, so restrict "protections" to what this
2870 * mapping actually allows.
2871 */
2872 protections &= map_entry->max_protection;
2873 }
2874 if (((map_entry->max_protection) & protections) != protections) {
2875 kr = KERN_INVALID_RIGHT;
2876 vm_object_unlock(object);
2877 vm_map_unlock_read(target_map);
2878 if (real_map != target_map) {
2879 vm_map_unlock_read(real_map);
2880 }
2881 vm_object_deallocate(object);
2882 object = VM_OBJECT_NULL;
2883 goto make_mem_done;
2884 }
2885
2886 mappable_size = fault_info.hi_offset - obj_off;
2887 total_size = map_entry->vme_end - map_entry->vme_start;
2888 if (map_size > mappable_size) {
2889 /* try to extend mappable size if the entries */
2890 /* following are from the same object and are */
2891 /* compatible */
2892 next_entry = map_entry->vme_next;
2893 /* lets see if the next map entry is still */
2894 /* pointing at this object and is contiguous */
2895 while (map_size > mappable_size) {
2896 if ((VME_OBJECT(next_entry) == object) &&
2897 (next_entry->vme_start ==
2898 next_entry->vme_prev->vme_end) &&
2899 (VME_OFFSET(next_entry) ==
2900 (VME_OFFSET(next_entry->vme_prev) +
2901 (next_entry->vme_prev->vme_end -
2902 next_entry->vme_prev->vme_start)))) {
2903 if (mask_protections) {
2904 /*
2905 * The caller asked us to use
2906 * the "protections" as a mask,
2907 * so restrict "protections" to
2908 * what this mapping actually
2909 * allows.
2910 */
2911 protections &= next_entry->max_protection;
2912 }
2913 if ((next_entry->wired_count) &&
2914 (map_entry->wired_count == 0)) {
2915 break;
2916 }
2917 if (((next_entry->max_protection)
2918 & protections) != protections) {
2919 break;
2920 }
2921 if (next_entry->needs_copy !=
2922 map_entry->needs_copy) {
2923 break;
2924 }
2925 mappable_size += next_entry->vme_end
2926 - next_entry->vme_start;
2927 total_size += next_entry->vme_end
2928 - next_entry->vme_start;
2929 next_entry = next_entry->vme_next;
2930 } else {
2931 break;
2932 }
2933 }
2934 }
2935
2936 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2937 * never true in kernel */
2938 if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) &&
2939 object->vo_size > map_size &&
2940 map_size != 0) {
2941 /*
2942 * Set up the targeted range for copy-on-write to
2943 * limit the impact of "true_share"/"copy_delay" to
2944 * that range instead of the entire VM object...
2945 */
2946
2947 vm_object_unlock(object);
2948 if (vm_map_lock_read_to_write(target_map)) {
2949 vm_object_deallocate(object);
2950 target_map = original_map;
2951 goto redo_lookup;
2952 }
2953
2954 vm_map_clip_start(target_map,
2955 map_entry,
2956 vm_map_trunc_page(map_start,
2957 VM_MAP_PAGE_MASK(target_map)));
2958 vm_map_clip_end(target_map,
2959 map_entry,
2960 (vm_map_round_page(map_end,
2961 VM_MAP_PAGE_MASK(target_map))));
2962 force_shadow = TRUE;
2963
2964 if ((map_entry->vme_end - offset) < map_size) {
2965 map_size = map_entry->vme_end - map_start;
2966 }
2967 total_size = map_entry->vme_end - map_entry->vme_start;
2968
2969 vm_map_lock_write_to_read(target_map);
2970 vm_object_lock(object);
2971 }
2972
2973 if (object->internal) {
2974 /* vm_map_lookup_locked will create a shadow if */
2975 /* needs_copy is set but does not check for the */
2976 /* other two conditions shown. It is important to */
2977 /* set up an object which will not be pulled from */
2978 /* under us. */
2979
2980 if (force_shadow ||
2981 ((map_entry->needs_copy ||
2982 object->shadowed ||
2983 (object->vo_size > total_size &&
2984 (VME_OFFSET(map_entry) != 0 ||
2985 object->vo_size >
2986 vm_map_round_page(total_size,
2987 VM_MAP_PAGE_MASK(target_map)))))
2988 && !object->true_share
2989 && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) {
2990 /*
2991 * We have to unlock the VM object before
2992 * trying to upgrade the VM map lock, to
2993 * honor lock ordering (map then object).
2994 * Otherwise, we would deadlock if another
2995 * thread holds a read lock on the VM map and
2996 * is trying to acquire the VM object's lock.
2997 * We still hold an extra reference on the
2998 * VM object, guaranteeing that it won't
2999 * disappear.
3000 */
3001 vm_object_unlock(object);
3002
3003 if (vm_map_lock_read_to_write(target_map)) {
3004 /*
3005 * We couldn't upgrade our VM map lock
3006 * from "read" to "write" and we lost
3007 * our "read" lock.
3008 * Start all over again...
3009 */
3010 vm_object_deallocate(object); /* extra ref */
3011 target_map = original_map;
3012 goto redo_lookup;
3013 }
3014#if 00
3015 vm_object_lock(object);
3016#endif
3017
3018 /*
3019 * JMM - We need to avoid coming here when the object
3020 * is wired by anybody, not just the current map. Why
3021 * couldn't we use the standard vm_object_copy_quickly()
3022 * approach here?
3023 */
3024
3025 /* create a shadow object */
3026 VME_OBJECT_SHADOW(map_entry, total_size);
3027 shadow_object = VME_OBJECT(map_entry);
3028#if 00
3029 vm_object_unlock(object);
3030#endif
3031
3032 prot = map_entry->protection & ~VM_PROT_WRITE;
3033
3034 if (override_nx(target_map,
3035 VME_ALIAS(map_entry))
3036 && prot) {
3037 prot |= VM_PROT_EXECUTE;
3038 }
3039
3040 vm_object_pmap_protect(
3041 object, VME_OFFSET(map_entry),
3042 total_size,
3043 ((map_entry->is_shared
3044 || target_map->mapped_in_other_pmaps)
3045 ? PMAP_NULL :
3046 target_map->pmap),
3047 map_entry->vme_start,
3048 prot);
3049 total_size -= (map_entry->vme_end
3050 - map_entry->vme_start);
3051 next_entry = map_entry->vme_next;
3052 map_entry->needs_copy = FALSE;
3053
3054 vm_object_lock(shadow_object);
3055 while (total_size) {
3056 assert((next_entry->wired_count == 0) ||
3057 (map_entry->wired_count));
3058
3059 if (VME_OBJECT(next_entry) == object) {
3060 vm_object_reference_locked(shadow_object);
3061 VME_OBJECT_SET(next_entry,
3062 shadow_object);
3063 vm_object_deallocate(object);
3064 VME_OFFSET_SET(
3065 next_entry,
3066 (VME_OFFSET(next_entry->vme_prev) +
3067 (next_entry->vme_prev->vme_end
3068 - next_entry->vme_prev->vme_start)));
3069 next_entry->use_pmap = TRUE;
3070 next_entry->needs_copy = FALSE;
3071 } else {
3072 panic("mach_make_memory_entry_64:"
3073 " map entries out of sync\n");
3074 }
3075 total_size -=
3076 next_entry->vme_end
3077 - next_entry->vme_start;
3078 next_entry = next_entry->vme_next;
3079 }
3080
3081 /*
3082 * Transfer our extra reference to the
3083 * shadow object.
3084 */
3085 vm_object_reference_locked(shadow_object);
3086 vm_object_deallocate(object); /* extra ref */
3087 object = shadow_object;
3088
3089 obj_off = ((local_offset - map_entry->vme_start)
3090 + VME_OFFSET(map_entry));
3091
3092 vm_map_lock_write_to_read(target_map);
3093 }
3094 }
3095
3096 /* note: in the future we can (if necessary) allow for */
3097 /* memory object lists, this will better support */
3098 /* fragmentation, but is it necessary? The user should */
3099 /* be encouraged to create address space oriented */
3100 /* shared objects from CLEAN memory regions which have */
3101 /* a known and defined history. i.e. no inheritence */
3102 /* share, make this call before making the region the */
3103 /* target of ipc's, etc. The code above, protecting */
3104 /* against delayed copy, etc. is mostly defensive. */
3105
3106 wimg_mode = object->wimg_bits;
3107 if (!(object->nophyscache)) {
3108 vm_prot_to_wimg(access, &wimg_mode);
3109 }
3110
3111#if VM_OBJECT_TRACKING_OP_TRUESHARE
3112 if (!object->true_share &&
3113 vm_object_tracking_inited) {
3114 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
3115 int num = 0;
3116
3117 num = OSBacktrace(bt,
3118 VM_OBJECT_TRACKING_BTDEPTH);
3119 btlog_add_entry(vm_object_tracking_btlog,
3120 object,
3121 VM_OBJECT_TRACKING_OP_TRUESHARE,
3122 bt,
3123 num);
3124 }
3125#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3126
3127 vm_object_lock_assert_exclusive(object);
3128 object->true_share = TRUE;
3129 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3130 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3131 }
3132
3133 /*
3134 * The memory entry now points to this VM object and we
3135 * need to hold a reference on the VM object. Use the extra
3136 * reference we took earlier to keep the object alive when we
3137 * had to unlock it.
3138 */
3139
3140 vm_map_unlock_read(target_map);
3141 if (real_map != target_map) {
3142 vm_map_unlock_read(real_map);
3143 }
3144
3145 if (object->wimg_bits != wimg_mode) {
3146 vm_object_change_wimg_mode(object, wimg_mode);
3147 }
3148
3149 /* the size of mapped entry that overlaps with our region */
3150 /* which is targeted for share. */
3151 /* (entry_end - entry_start) - */
3152 /* offset of our beg addr within entry */
3153 /* it corresponds to this: */
3154
3155 if (map_size > mappable_size) {
3156 map_size = mappable_size;
3157 }
3158
3159 if (permission & MAP_MEM_NAMED_REUSE) {
3160 /*
3161 * Compare what we got with the "parent_entry".
3162 * If they match, re-use the "parent_entry" instead
3163 * of creating a new one.
3164 */
3165 if (parent_entry != NULL &&
3166 parent_entry->backing.object == object &&
3167 parent_entry->internal == object->internal &&
3168 parent_entry->is_sub_map == FALSE &&
3169 parent_entry->offset == obj_off &&
3170 parent_entry->protection == protections &&
3171 parent_entry->size == map_size &&
3172 ((!(use_data_addr || use_4K_compat) &&
3173 (parent_entry->data_offset == 0)) ||
3174 ((use_data_addr || use_4K_compat) &&
3175 (parent_entry->data_offset == offset_in_page)))) {
3176 /*
3177 * We have a match: re-use "parent_entry".
3178 */
3179 /* release our extra reference on object */
3180 vm_object_unlock(object);
3181 vm_object_deallocate(object);
3182 /* parent_entry->ref_count++; XXX ? */
3183 /* Get an extra send-right on handle */
3184 ipc_port_copy_send(parent_handle);
3185
3186 *size = CAST_DOWN(vm_size_t,
3187 (parent_entry->size -
3188 parent_entry->data_offset));
3189 *object_handle = parent_handle;
3190 return KERN_SUCCESS;
3191 } else {
3192 /*
3193 * No match: we need to create a new entry.
3194 * fall through...
3195 */
3196 }
3197 }
3198
3199 vm_object_unlock(object);
3200 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3201 != KERN_SUCCESS) {
3202 /* release our unused reference on the object */
3203 vm_object_deallocate(object);
3204 return KERN_FAILURE;
3205 }
3206
3207 user_entry->backing.object = object;
3208 user_entry->internal = object->internal;
3209 user_entry->is_sub_map = FALSE;
3210 user_entry->offset = obj_off;
3211 user_entry->data_offset = offset_in_page;
3212 user_entry->protection = protections;
3213 SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
3214 user_entry->size = map_size;
3215#if VM_NAMED_ENTRY_LIST
3216 user_entry->named_entry_alias = alias;
3217#endif /* VM_NAMED_ENTRY_LIST */
3218
3219 /* user_object pager and internal fields are not used */
3220 /* when the object field is filled in. */
3221
3222 *size = CAST_DOWN(vm_size_t, (user_entry->size -
3223 user_entry->data_offset));
3224 *object_handle = user_handle;
3225 return KERN_SUCCESS;
3226 } else {
3227 /* The new object will be base on an existing named object */
3228 if (parent_entry == NULL) {
3229 kr = KERN_INVALID_ARGUMENT;
3230 goto make_mem_done;
3231 }
3232
3233 if (use_data_addr || use_4K_compat) {
3234 /*
3235 * submaps and pagers should only be accessible from within
3236 * the kernel, which shouldn't use the data address flag, so can fail here.
3237 */
3238 if (parent_entry->is_sub_map) {
3239 panic("Shouldn't be using data address with a parent entry that is a submap.");
3240 }
3241 /*
3242 * Account for offset to data in parent entry and
3243 * compute our own offset to data.
3244 */
3245 if ((offset + *size + parent_entry->data_offset) > parent_entry->size) {
3246 kr = KERN_INVALID_ARGUMENT;
3247 goto make_mem_done;
3248 }
3249
3250 map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
3251 offset_in_page = (offset + parent_entry->data_offset) - map_start;
3252 if (use_4K_compat) {
3253 offset_in_page &= ~((signed)(0xFFF));
3254 }
3255 map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK);
3256 map_size = map_end - map_start;
3257 } else {
3258 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
3259 map_size = map_end - map_start;
3260 offset_in_page = 0;
3261
3262 if ((offset + map_size) > parent_entry->size) {
3263 kr = KERN_INVALID_ARGUMENT;
3264 goto make_mem_done;
3265 }
3266 }
3267
3268 if (mask_protections) {
3269 /*
3270 * The caller asked us to use the "protections" as
3271 * a mask, so restrict "protections" to what this
3272 * mapping actually allows.
3273 */
3274 protections &= parent_entry->protection;
3275 }
3276 if ((protections & parent_entry->protection) != protections) {
3277 kr = KERN_PROTECTION_FAILURE;
3278 goto make_mem_done;
3279 }
3280
3281 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3282 != KERN_SUCCESS) {
3283 kr = KERN_FAILURE;
3284 goto make_mem_done;
3285 }
3286
3287 user_entry->size = map_size;
3288 user_entry->offset = parent_entry->offset + map_start;
3289 user_entry->data_offset = offset_in_page;
3290 user_entry->is_sub_map = parent_entry->is_sub_map;
3291 user_entry->is_copy = parent_entry->is_copy;
3292 user_entry->internal = parent_entry->internal;
3293 user_entry->protection = protections;
3294
3295 if (access != MAP_MEM_NOOP) {
3296 SET_MAP_MEM(access, user_entry->protection);
3297 }
3298
3299 if (parent_entry->is_sub_map) {
3300 user_entry->backing.map = parent_entry->backing.map;
3301 vm_map_lock(user_entry->backing.map);
3302 user_entry->backing.map->map_refcnt++;
3303 vm_map_unlock(user_entry->backing.map);
3304 } else {
3305 object = parent_entry->backing.object;
3306 assert(object != VM_OBJECT_NULL);
3307 user_entry->backing.object = object;
3308 /* we now point to this object, hold on */
3309 vm_object_lock(object);
3310 vm_object_reference_locked(object);
3311#if VM_OBJECT_TRACKING_OP_TRUESHARE
3312 if (!object->true_share &&
3313 vm_object_tracking_inited) {
3314 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
3315 int num = 0;
3316
3317 num = OSBacktrace(bt,
3318 VM_OBJECT_TRACKING_BTDEPTH);
3319 btlog_add_entry(vm_object_tracking_btlog,
3320 object,
3321 VM_OBJECT_TRACKING_OP_TRUESHARE,
3322 bt,
3323 num);
3324 }
3325#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3326
3327 object->true_share = TRUE;
3328 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3329 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3330 }
3331 vm_object_unlock(object);
3332 }
3333 *size = CAST_DOWN(vm_size_t, (user_entry->size -
3334 user_entry->data_offset));
3335 *object_handle = user_handle;
3336 return KERN_SUCCESS;
3337 }
3338
3339make_mem_done:
3340 if (user_handle != IP_NULL) {
3341 /*
3342 * Releasing "user_handle" causes the kernel object
3343 * associated with it ("user_entry" here) to also be
3344 * released and freed.
3345 */
3346 mach_memory_entry_port_release(user_handle);
3347 }
3348 return kr;
3349}
3350
3351kern_return_t
3352_mach_make_memory_entry(
3353 vm_map_t target_map,
3354 memory_object_size_t *size,
3355 memory_object_offset_t offset,
3356 vm_prot_t permission,
3357 ipc_port_t *object_handle,
3358 ipc_port_t parent_entry)
3359{
3360 memory_object_size_t mo_size;
3361 kern_return_t kr;
3362
3363 mo_size = (memory_object_size_t)*size;
3364 kr = mach_make_memory_entry_64(target_map, &mo_size,
3365 (memory_object_offset_t)offset, permission, object_handle,
3366 parent_entry);
3367 *size = mo_size;
3368 return kr;
3369}
3370
3371kern_return_t
3372mach_make_memory_entry(
3373 vm_map_t target_map,
3374 vm_size_t *size,
3375 vm_offset_t offset,
3376 vm_prot_t permission,
3377 ipc_port_t *object_handle,
3378 ipc_port_t parent_entry)
3379{
3380 memory_object_size_t mo_size;
3381 kern_return_t kr;
3382
3383 mo_size = (memory_object_size_t)*size;
3384 kr = mach_make_memory_entry_64(target_map, &mo_size,
3385 (memory_object_offset_t)offset, permission, object_handle,
3386 parent_entry);
3387 *size = CAST_DOWN(vm_size_t, mo_size);
3388 return kr;
3389}
3390
3391/*
3392 * task_wire
3393 *
3394 * Set or clear the map's wiring_required flag. This flag, if set,
3395 * will cause all future virtual memory allocation to allocate
3396 * user wired memory. Unwiring pages wired down as a result of
3397 * this routine is done with the vm_wire interface.
3398 */
3399kern_return_t
3400task_wire(
3401 vm_map_t map,
3402 boolean_t must_wire)
3403{
3404 if (map == VM_MAP_NULL) {
3405 return KERN_INVALID_ARGUMENT;
3406 }
3407
3408 vm_map_lock(map);
3409 map->wiring_required = (must_wire == TRUE);
3410 vm_map_unlock(map);
3411
3412 return KERN_SUCCESS;
3413}
3414
3415kern_return_t
3416vm_map_exec_lockdown(
3417 vm_map_t map)
3418{
3419 if (map == VM_MAP_NULL) {
3420 return KERN_INVALID_ARGUMENT;
3421 }
3422
3423 vm_map_lock(map);
3424 map->map_disallow_new_exec = TRUE;
3425 vm_map_unlock(map);
3426
3427 return KERN_SUCCESS;
3428}
3429
3430#if VM_NAMED_ENTRY_LIST
3431queue_head_t vm_named_entry_list;
3432int vm_named_entry_count = 0;
3433lck_mtx_t vm_named_entry_list_lock_data;
3434lck_mtx_ext_t vm_named_entry_list_lock_data_ext;
3435#endif /* VM_NAMED_ENTRY_LIST */
3436
3437void vm_named_entry_init(void);
3438void
3439vm_named_entry_init(void)
3440{
3441#if VM_NAMED_ENTRY_LIST
3442 queue_init(&vm_named_entry_list);
3443 vm_named_entry_count = 0;
3444 lck_mtx_init_ext(&vm_named_entry_list_lock_data,
3445 &vm_named_entry_list_lock_data_ext,
3446 &vm_object_lck_grp,
3447 &vm_object_lck_attr);
3448#endif /* VM_NAMED_ENTRY_LIST */
3449}
3450
3451__private_extern__ kern_return_t
3452mach_memory_entry_allocate(
3453 vm_named_entry_t *user_entry_p,
3454 ipc_port_t *user_handle_p)
3455{
3456 vm_named_entry_t user_entry;
3457 ipc_port_t user_handle;
3458 ipc_port_t previous;
3459
3460 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
3461 if (user_entry == NULL) {
3462 return KERN_FAILURE;
3463 }
3464 bzero(user_entry, sizeof(*user_entry));
3465
3466 named_entry_lock_init(user_entry);
3467
3468 user_handle = ipc_port_alloc_kernel();
3469 if (user_handle == IP_NULL) {
3470 kfree(user_entry, sizeof *user_entry);
3471 return KERN_FAILURE;
3472 }
3473 ip_lock(user_handle);
3474
3475 /* make a sonce right */
3476 user_handle->ip_sorights++;
3477 ip_reference(user_handle);
3478
3479 /* make a send right */
3480 user_handle->ip_mscount++;
3481 user_handle->ip_srights++;
3482 ip_reference(user_handle);
3483
3484 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
3485 /* nsrequest unlocks user_handle */
3486
3487 user_entry->backing.object = NULL;
3488 user_entry->is_sub_map = FALSE;
3489 user_entry->is_copy = FALSE;
3490 user_entry->internal = FALSE;
3491 user_entry->size = 0;
3492 user_entry->offset = 0;
3493 user_entry->data_offset = 0;
3494 user_entry->protection = VM_PROT_NONE;
3495 user_entry->ref_count = 1;
3496
3497 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
3498 IKOT_NAMED_ENTRY);
3499
3500 *user_entry_p = user_entry;
3501 *user_handle_p = user_handle;
3502
3503#if VM_NAMED_ENTRY_LIST
3504 /* keep a loose (no reference) pointer to the Mach port, for debugging only */
3505 user_entry->named_entry_port = user_handle;
3506 /* backtrace at allocation time, for debugging only */
3507 OSBacktrace(&user_entry->named_entry_bt[0],
3508 NAMED_ENTRY_BT_DEPTH);
3509
3510 /* add this new named entry to the global list */
3511 lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
3512 queue_enter(&vm_named_entry_list, user_entry,
3513 vm_named_entry_t, named_entry_list);
3514 vm_named_entry_count++;
3515 lck_mtx_unlock(&vm_named_entry_list_lock_data);
3516#endif /* VM_NAMED_ENTRY_LIST */
3517
3518 return KERN_SUCCESS;
3519}
3520
3521/*
3522 * mach_memory_object_memory_entry_64
3523 *
3524 * Create a named entry backed by the provided pager.
3525 *
3526 */
3527kern_return_t
3528mach_memory_object_memory_entry_64(
3529 host_t host,
3530 boolean_t internal,
3531 vm_object_offset_t size,
3532 vm_prot_t permission,
3533 memory_object_t pager,
3534 ipc_port_t *entry_handle)
3535{
3536 unsigned int access;
3537 vm_named_entry_t user_entry;
3538 ipc_port_t user_handle;
3539 vm_object_t object;
3540
3541 if (host == HOST_NULL) {
3542 return KERN_INVALID_HOST;
3543 }
3544
3545 if (pager == MEMORY_OBJECT_NULL && internal) {
3546 object = vm_object_allocate(size);
3547 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3548 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3549 }
3550 } else {
3551 object = memory_object_to_vm_object(pager);
3552 if (object != VM_OBJECT_NULL) {
3553 vm_object_reference(object);
3554 }
3555 }
3556 if (object == VM_OBJECT_NULL) {
3557 return KERN_INVALID_ARGUMENT;
3558 }
3559
3560 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3561 != KERN_SUCCESS) {
3562 vm_object_deallocate(object);
3563 return KERN_FAILURE;
3564 }
3565
3566 user_entry->size = size;
3567 user_entry->offset = 0;
3568 user_entry->protection = permission & VM_PROT_ALL;
3569 access = GET_MAP_MEM(permission);
3570 SET_MAP_MEM(access, user_entry->protection);
3571 user_entry->is_sub_map = FALSE;
3572 assert(user_entry->ref_count == 1);
3573
3574 user_entry->backing.object = object;
3575 user_entry->internal = object->internal;
3576 assert(object->internal == internal);
3577
3578 *entry_handle = user_handle;
3579 return KERN_SUCCESS;
3580}
3581
3582kern_return_t
3583mach_memory_object_memory_entry(
3584 host_t host,
3585 boolean_t internal,
3586 vm_size_t size,
3587 vm_prot_t permission,
3588 memory_object_t pager,
3589 ipc_port_t *entry_handle)
3590{
3591 return mach_memory_object_memory_entry_64( host, internal,
3592 (vm_object_offset_t)size, permission, pager, entry_handle);
3593}
3594
3595
3596kern_return_t
3597mach_memory_entry_purgable_control(
3598 ipc_port_t entry_port,
3599 vm_purgable_t control,
3600 int *state)
3601{
3602 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
3603 /* not allowed from user-space */
3604 return KERN_INVALID_ARGUMENT;
3605 }
3606
3607 return memory_entry_purgeable_control_internal(entry_port, control, state);
3608}
3609
3610kern_return_t
3611memory_entry_purgeable_control_internal(
3612 ipc_port_t entry_port,
3613 vm_purgable_t control,
3614 int *state)
3615{
3616 kern_return_t kr;
3617 vm_named_entry_t mem_entry;
3618 vm_object_t object;
3619
3620 if (!IP_VALID(entry_port) ||
3621 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3622 return KERN_INVALID_ARGUMENT;
3623 }
3624 if (control != VM_PURGABLE_SET_STATE &&
3625 control != VM_PURGABLE_GET_STATE &&
3626 control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
3627 return KERN_INVALID_ARGUMENT;
3628 }
3629
3630 if ((control == VM_PURGABLE_SET_STATE ||
3631 control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
3632 (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
3633 ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) {
3634 return KERN_INVALID_ARGUMENT;
3635 }
3636
3637 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3638
3639 named_entry_lock(mem_entry);
3640
3641 if (mem_entry->is_sub_map ||
3642 mem_entry->is_copy) {
3643 named_entry_unlock(mem_entry);
3644 return KERN_INVALID_ARGUMENT;
3645 }
3646
3647 object = mem_entry->backing.object;
3648 if (object == VM_OBJECT_NULL) {
3649 named_entry_unlock(mem_entry);
3650 return KERN_INVALID_ARGUMENT;
3651 }
3652
3653 vm_object_lock(object);
3654
3655 /* check that named entry covers entire object ? */
3656 if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
3657 vm_object_unlock(object);
3658 named_entry_unlock(mem_entry);
3659 return KERN_INVALID_ARGUMENT;
3660 }
3661
3662 named_entry_unlock(mem_entry);
3663
3664 kr = vm_object_purgable_control(object, control, state);
3665
3666 vm_object_unlock(object);
3667
3668 return kr;
3669}
3670
3671kern_return_t
3672mach_memory_entry_access_tracking(
3673 ipc_port_t entry_port,
3674 int *access_tracking,
3675 uint32_t *access_tracking_reads,
3676 uint32_t *access_tracking_writes)
3677{
3678 return memory_entry_access_tracking_internal(entry_port,
3679 access_tracking,
3680 access_tracking_reads,
3681 access_tracking_writes);
3682}
3683
3684kern_return_t
3685memory_entry_access_tracking_internal(
3686 ipc_port_t entry_port,
3687 int *access_tracking,
3688 uint32_t *access_tracking_reads,
3689 uint32_t *access_tracking_writes)
3690{
3691 vm_named_entry_t mem_entry;
3692 vm_object_t object;
3693 kern_return_t kr;
3694
3695 if (!IP_VALID(entry_port) ||
3696 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3697 return KERN_INVALID_ARGUMENT;
3698 }
3699
3700 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3701
3702 named_entry_lock(mem_entry);
3703
3704 if (mem_entry->is_sub_map ||
3705 mem_entry->is_copy) {
3706 named_entry_unlock(mem_entry);
3707 return KERN_INVALID_ARGUMENT;
3708 }
3709
3710 object = mem_entry->backing.object;
3711 if (object == VM_OBJECT_NULL) {
3712 named_entry_unlock(mem_entry);
3713 return KERN_INVALID_ARGUMENT;
3714 }
3715
3716#if VM_OBJECT_ACCESS_TRACKING
3717 vm_object_access_tracking(object,
3718 access_tracking,
3719 access_tracking_reads,
3720 access_tracking_writes);
3721 kr = KERN_SUCCESS;
3722#else /* VM_OBJECT_ACCESS_TRACKING */
3723 (void) access_tracking;
3724 (void) access_tracking_reads;
3725 (void) access_tracking_writes;
3726 kr = KERN_NOT_SUPPORTED;
3727#endif /* VM_OBJECT_ACCESS_TRACKING */
3728
3729 named_entry_unlock(mem_entry);
3730
3731 return kr;
3732}
3733
3734kern_return_t
3735mach_memory_entry_get_page_counts(
3736 ipc_port_t entry_port,
3737 unsigned int *resident_page_count,
3738 unsigned int *dirty_page_count)
3739{
3740 kern_return_t kr;
3741 vm_named_entry_t mem_entry;
3742 vm_object_t object;
3743 vm_object_offset_t offset;
3744 vm_object_size_t size;
3745
3746 if (!IP_VALID(entry_port) ||
3747 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3748 return KERN_INVALID_ARGUMENT;
3749 }
3750
3751 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3752
3753 named_entry_lock(mem_entry);
3754
3755 if (mem_entry->is_sub_map ||
3756 mem_entry->is_copy) {
3757 named_entry_unlock(mem_entry);
3758 return KERN_INVALID_ARGUMENT;
3759 }
3760
3761 object = mem_entry->backing.object;
3762 if (object == VM_OBJECT_NULL) {
3763 named_entry_unlock(mem_entry);
3764 return KERN_INVALID_ARGUMENT;
3765 }
3766
3767 vm_object_lock(object);
3768
3769 offset = mem_entry->offset;
3770 size = mem_entry->size;
3771
3772 named_entry_unlock(mem_entry);
3773
3774 kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count);
3775
3776 vm_object_unlock(object);
3777
3778 return kr;
3779}
3780
3781/*
3782 * mach_memory_entry_port_release:
3783 *
3784 * Release a send right on a named entry port. This is the correct
3785 * way to destroy a named entry. When the last right on the port is
3786 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3787 */
3788void
3789mach_memory_entry_port_release(
3790 ipc_port_t port)
3791{
3792 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
3793 ipc_port_release_send(port);
3794}
3795
3796/*
3797 * mach_destroy_memory_entry:
3798 *
3799 * Drops a reference on a memory entry and destroys the memory entry if
3800 * there are no more references on it.
3801 * NOTE: This routine should not be called to destroy a memory entry from the
3802 * kernel, as it will not release the Mach port associated with the memory
3803 * entry. The proper way to destroy a memory entry in the kernel is to
3804 * call mach_memort_entry_port_release() to release the kernel's send-right on
3805 * the memory entry's port. When the last send right is released, the memory
3806 * entry will be destroyed via ipc_kobject_destroy().
3807 */
3808void
3809mach_destroy_memory_entry(
3810 ipc_port_t port)
3811{
3812 vm_named_entry_t named_entry;
3813#if MACH_ASSERT
3814 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
3815#endif /* MACH_ASSERT */
3816 named_entry = (vm_named_entry_t)port->ip_kobject;
3817
3818 named_entry_lock(named_entry);
3819 named_entry->ref_count -= 1;
3820
3821 if (named_entry->ref_count == 0) {
3822 if (named_entry->is_sub_map) {
3823 vm_map_deallocate(named_entry->backing.map);
3824 } else if (named_entry->is_copy) {
3825 vm_map_copy_discard(named_entry->backing.copy);
3826 } else {
3827 /* release the VM object we've been pointing to */
3828 vm_object_deallocate(named_entry->backing.object);
3829 }
3830
3831 named_entry_unlock(named_entry);
3832 named_entry_lock_destroy(named_entry);
3833
3834#if VM_NAMED_ENTRY_LIST
3835 lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
3836 queue_remove(&vm_named_entry_list, named_entry,
3837 vm_named_entry_t, named_entry_list);
3838 assert(vm_named_entry_count > 0);
3839 vm_named_entry_count--;
3840 lck_mtx_unlock(&vm_named_entry_list_lock_data);
3841#endif /* VM_NAMED_ENTRY_LIST */
3842
3843 kfree(port->ip_kobject,
3844 sizeof(struct vm_named_entry));
3845 } else {
3846 named_entry_unlock(named_entry);
3847 }
3848}
3849
3850/* Allow manipulation of individual page state. This is actually part of */
3851/* the UPL regimen but takes place on the memory entry rather than on a UPL */
3852
3853kern_return_t
3854mach_memory_entry_page_op(
3855 ipc_port_t entry_port,
3856 vm_object_offset_t offset,
3857 int ops,
3858 ppnum_t *phys_entry,
3859 int *flags)
3860{
3861 vm_named_entry_t mem_entry;
3862 vm_object_t object;
3863 kern_return_t kr;
3864
3865 if (!IP_VALID(entry_port) ||
3866 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3867 return KERN_INVALID_ARGUMENT;
3868 }
3869
3870 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3871
3872 named_entry_lock(mem_entry);
3873
3874 if (mem_entry->is_sub_map ||
3875 mem_entry->is_copy) {
3876 named_entry_unlock(mem_entry);
3877 return KERN_INVALID_ARGUMENT;
3878 }
3879
3880 object = mem_entry->backing.object;
3881 if (object == VM_OBJECT_NULL) {
3882 named_entry_unlock(mem_entry);
3883 return KERN_INVALID_ARGUMENT;
3884 }
3885
3886 vm_object_reference(object);
3887 named_entry_unlock(mem_entry);
3888
3889 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
3890
3891 vm_object_deallocate(object);
3892
3893 return kr;
3894}
3895
3896/*
3897 * mach_memory_entry_range_op offers performance enhancement over
3898 * mach_memory_entry_page_op for page_op functions which do not require page
3899 * level state to be returned from the call. Page_op was created to provide
3900 * a low-cost alternative to page manipulation via UPLs when only a single
3901 * page was involved. The range_op call establishes the ability in the _op
3902 * family of functions to work on multiple pages where the lack of page level
3903 * state handling allows the caller to avoid the overhead of the upl structures.
3904 */
3905
3906kern_return_t
3907mach_memory_entry_range_op(
3908 ipc_port_t entry_port,
3909 vm_object_offset_t offset_beg,
3910 vm_object_offset_t offset_end,
3911 int ops,
3912 int *range)
3913{
3914 vm_named_entry_t mem_entry;
3915 vm_object_t object;
3916 kern_return_t kr;
3917
3918 if (!IP_VALID(entry_port) ||
3919 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3920 return KERN_INVALID_ARGUMENT;
3921 }
3922
3923 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3924
3925 named_entry_lock(mem_entry);
3926
3927 if (mem_entry->is_sub_map ||
3928 mem_entry->is_copy) {
3929 named_entry_unlock(mem_entry);
3930 return KERN_INVALID_ARGUMENT;
3931 }
3932
3933 object = mem_entry->backing.object;
3934 if (object == VM_OBJECT_NULL) {
3935 named_entry_unlock(mem_entry);
3936 return KERN_INVALID_ARGUMENT;
3937 }
3938
3939 vm_object_reference(object);
3940 named_entry_unlock(mem_entry);
3941
3942 kr = vm_object_range_op(object,
3943 offset_beg,
3944 offset_end,
3945 ops,
3946 (uint32_t *) range);
3947
3948 vm_object_deallocate(object);
3949
3950 return kr;
3951}
3952
3953/* ******* Temporary Internal calls to UPL for BSD ***** */
3954
3955extern int kernel_upl_map(
3956 vm_map_t map,
3957 upl_t upl,
3958 vm_offset_t *dst_addr);
3959
3960extern int kernel_upl_unmap(
3961 vm_map_t map,
3962 upl_t upl);
3963
3964extern int kernel_upl_commit(
3965 upl_t upl,
3966 upl_page_info_t *pl,
3967 mach_msg_type_number_t count);
3968
3969extern int kernel_upl_commit_range(
3970 upl_t upl,
3971 upl_offset_t offset,
3972 upl_size_t size,
3973 int flags,
3974 upl_page_info_array_t pl,
3975 mach_msg_type_number_t count);
3976
3977extern int kernel_upl_abort(
3978 upl_t upl,
3979 int abort_type);
3980
3981extern int kernel_upl_abort_range(
3982 upl_t upl,
3983 upl_offset_t offset,
3984 upl_size_t size,
3985 int abort_flags);
3986
3987
3988kern_return_t
3989kernel_upl_map(
3990 vm_map_t map,
3991 upl_t upl,
3992 vm_offset_t *dst_addr)
3993{
3994 return vm_upl_map(map, upl, dst_addr);
3995}
3996
3997
3998kern_return_t
3999kernel_upl_unmap(
4000 vm_map_t map,
4001 upl_t upl)
4002{
4003 return vm_upl_unmap(map, upl);
4004}
4005
4006kern_return_t
4007kernel_upl_commit(
4008 upl_t upl,
4009 upl_page_info_t *pl,
4010 mach_msg_type_number_t count)
4011{
4012 kern_return_t kr;
4013
4014 kr = upl_commit(upl, pl, count);
4015 upl_deallocate(upl);
4016 return kr;
4017}
4018
4019
4020kern_return_t
4021kernel_upl_commit_range(
4022 upl_t upl,
4023 upl_offset_t offset,
4024 upl_size_t size,
4025 int flags,
4026 upl_page_info_array_t pl,
4027 mach_msg_type_number_t count)
4028{
4029 boolean_t finished = FALSE;
4030 kern_return_t kr;
4031
4032 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
4033 flags |= UPL_COMMIT_NOTIFY_EMPTY;
4034 }
4035
4036 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
4037 return KERN_INVALID_ARGUMENT;
4038 }
4039
4040 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
4041
4042 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) {
4043 upl_deallocate(upl);
4044 }
4045
4046 return kr;
4047}
4048
4049kern_return_t
4050kernel_upl_abort_range(
4051 upl_t upl,
4052 upl_offset_t offset,
4053 upl_size_t size,
4054 int abort_flags)
4055{
4056 kern_return_t kr;
4057 boolean_t finished = FALSE;
4058
4059 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) {
4060 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
4061 }
4062
4063 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
4064
4065 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) {
4066 upl_deallocate(upl);
4067 }
4068
4069 return kr;
4070}
4071
4072kern_return_t
4073kernel_upl_abort(
4074 upl_t upl,
4075 int abort_type)
4076{
4077 kern_return_t kr;
4078
4079 kr = upl_abort(upl, abort_type);
4080 upl_deallocate(upl);
4081 return kr;
4082}
4083
4084/*
4085 * Now a kernel-private interface (for BootCache
4086 * use only). Need a cleaner way to create an
4087 * empty vm_map() and return a handle to it.
4088 */
4089
4090kern_return_t
4091vm_region_object_create(
4092 __unused vm_map_t target_map,
4093 vm_size_t size,
4094 ipc_port_t *object_handle)
4095{
4096 vm_named_entry_t user_entry;
4097 ipc_port_t user_handle;
4098
4099 vm_map_t new_map;
4100
4101 if (mach_memory_entry_allocate(&user_entry, &user_handle)
4102 != KERN_SUCCESS) {
4103 return KERN_FAILURE;
4104 }
4105
4106 /* Create a named object based on a submap of specified size */
4107
4108 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
4109 vm_map_round_page(size,
4110 VM_MAP_PAGE_MASK(target_map)),
4111 TRUE);
4112 vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
4113
4114 user_entry->backing.map = new_map;
4115 user_entry->internal = TRUE;
4116 user_entry->is_sub_map = TRUE;
4117 user_entry->offset = 0;
4118 user_entry->protection = VM_PROT_ALL;
4119 user_entry->size = size;
4120 assert(user_entry->ref_count == 1);
4121
4122 *object_handle = user_handle;
4123 return KERN_SUCCESS;
4124}
4125
4126ppnum_t vm_map_get_phys_page( /* forward */
4127 vm_map_t map,
4128 vm_offset_t offset);
4129
4130ppnum_t
4131vm_map_get_phys_page(
4132 vm_map_t map,
4133 vm_offset_t addr)
4134{
4135 vm_object_offset_t offset;
4136 vm_object_t object;
4137 vm_map_offset_t map_offset;
4138 vm_map_entry_t entry;
4139 ppnum_t phys_page = 0;
4140
4141 map_offset = vm_map_trunc_page(addr, PAGE_MASK);
4142
4143 vm_map_lock(map);
4144 while (vm_map_lookup_entry(map, map_offset, &entry)) {
4145 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
4146 vm_map_unlock(map);
4147 return (ppnum_t) 0;
4148 }
4149 if (entry->is_sub_map) {
4150 vm_map_t old_map;
4151 vm_map_lock(VME_SUBMAP(entry));
4152 old_map = map;
4153 map = VME_SUBMAP(entry);
4154 map_offset = (VME_OFFSET(entry) +
4155 (map_offset - entry->vme_start));
4156 vm_map_unlock(old_map);
4157 continue;
4158 }
4159 if (VME_OBJECT(entry)->phys_contiguous) {
4160 /* These are not standard pageable memory mappings */
4161 /* If they are not present in the object they will */
4162 /* have to be picked up from the pager through the */
4163 /* fault mechanism. */
4164 if (VME_OBJECT(entry)->vo_shadow_offset == 0) {
4165 /* need to call vm_fault */
4166 vm_map_unlock(map);
4167 vm_fault(map, map_offset, VM_PROT_NONE,
4168 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
4169 THREAD_UNINT, NULL, 0);
4170 vm_map_lock(map);
4171 continue;
4172 }
4173 offset = (VME_OFFSET(entry) +
4174 (map_offset - entry->vme_start));
4175 phys_page = (ppnum_t)
4176 ((VME_OBJECT(entry)->vo_shadow_offset
4177 + offset) >> PAGE_SHIFT);
4178 break;
4179 }
4180 offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start));
4181 object = VME_OBJECT(entry);
4182 vm_object_lock(object);
4183 while (TRUE) {
4184 vm_page_t dst_page = vm_page_lookup(object, offset);
4185 if (dst_page == VM_PAGE_NULL) {
4186 if (object->shadow) {
4187 vm_object_t old_object;
4188 vm_object_lock(object->shadow);
4189 old_object = object;
4190 offset = offset + object->vo_shadow_offset;
4191 object = object->shadow;
4192 vm_object_unlock(old_object);
4193 } else {
4194 vm_object_unlock(object);
4195 break;
4196 }
4197 } else {
4198 phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page));
4199 vm_object_unlock(object);
4200 break;
4201 }
4202 }
4203 break;
4204 }
4205
4206 vm_map_unlock(map);
4207 return phys_page;
4208}
4209
4210#if 0
4211kern_return_t kernel_object_iopl_request( /* forward */
4212 vm_named_entry_t named_entry,
4213 memory_object_offset_t offset,
4214 upl_size_t *upl_size,
4215 upl_t *upl_ptr,
4216 upl_page_info_array_t user_page_list,
4217 unsigned int *page_list_count,
4218 int *flags);
4219
4220kern_return_t
4221kernel_object_iopl_request(
4222 vm_named_entry_t named_entry,
4223 memory_object_offset_t offset,
4224 upl_size_t *upl_size,
4225 upl_t *upl_ptr,
4226 upl_page_info_array_t user_page_list,
4227 unsigned int *page_list_count,
4228 int *flags)
4229{
4230 vm_object_t object;
4231 kern_return_t ret;
4232
4233 int caller_flags;
4234
4235 caller_flags = *flags;
4236
4237 if (caller_flags & ~UPL_VALID_FLAGS) {
4238 /*
4239 * For forward compatibility's sake,
4240 * reject any unknown flag.
4241 */
4242 return KERN_INVALID_VALUE;
4243 }
4244
4245 /* a few checks to make sure user is obeying rules */
4246 if (*upl_size == 0) {
4247 if (offset >= named_entry->size) {
4248 return KERN_INVALID_RIGHT;
4249 }
4250 *upl_size = (upl_size_t) (named_entry->size - offset);
4251 if (*upl_size != named_entry->size - offset) {
4252 return KERN_INVALID_ARGUMENT;
4253 }
4254 }
4255 if (caller_flags & UPL_COPYOUT_FROM) {
4256 if ((named_entry->protection & VM_PROT_READ)
4257 != VM_PROT_READ) {
4258 return KERN_INVALID_RIGHT;
4259 }
4260 } else {
4261 if ((named_entry->protection &
4262 (VM_PROT_READ | VM_PROT_WRITE))
4263 != (VM_PROT_READ | VM_PROT_WRITE)) {
4264 return KERN_INVALID_RIGHT;
4265 }
4266 }
4267 if (named_entry->size < (offset + *upl_size)) {
4268 return KERN_INVALID_ARGUMENT;
4269 }
4270
4271 /* the callers parameter offset is defined to be the */
4272 /* offset from beginning of named entry offset in object */
4273 offset = offset + named_entry->offset;
4274
4275 if (named_entry->is_sub_map ||
4276 named_entry->is_copy) {
4277 return KERN_INVALID_ARGUMENT;
4278 }
4279
4280 named_entry_lock(named_entry);
4281
4282 /* This is the case where we are going to operate */
4283 /* on an already known object. If the object is */
4284 /* not ready it is internal. An external */
4285 /* object cannot be mapped until it is ready */
4286 /* we can therefore avoid the ready check */
4287 /* in this case. */
4288 object = named_entry->backing.object;
4289 vm_object_reference(object);
4290 named_entry_unlock(named_entry);
4291
4292 if (!object->private) {
4293 if (*upl_size > MAX_UPL_TRANSFER_BYTES) {
4294 *upl_size = MAX_UPL_TRANSFER_BYTES;
4295 }
4296 if (object->phys_contiguous) {
4297 *flags = UPL_PHYS_CONTIG;
4298 } else {
4299 *flags = 0;
4300 }
4301 } else {
4302 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
4303 }
4304
4305 ret = vm_object_iopl_request(object,
4306 offset,
4307 *upl_size,
4308 upl_ptr,
4309 user_page_list,
4310 page_list_count,
4311 (upl_control_flags_t)(unsigned int)caller_flags);
4312 vm_object_deallocate(object);
4313 return ret;
4314}
4315#endif
4316
4317/*
4318 * These symbols are looked up at runtime by vmware, VirtualBox,
4319 * despite not being exported in the symbol sets.
4320 */
4321
4322#if defined(__x86_64__)
4323
4324kern_return_t
4325mach_vm_map(
4326 vm_map_t target_map,
4327 mach_vm_offset_t *address,
4328 mach_vm_size_t initial_size,
4329 mach_vm_offset_t mask,
4330 int flags,
4331 ipc_port_t port,
4332 vm_object_offset_t offset,
4333 boolean_t copy,
4334 vm_prot_t cur_protection,
4335 vm_prot_t max_protection,
4336 vm_inherit_t inheritance);
4337
4338kern_return_t
4339mach_vm_remap(
4340 vm_map_t target_map,
4341 mach_vm_offset_t *address,
4342 mach_vm_size_t size,
4343 mach_vm_offset_t mask,
4344 int flags,
4345 vm_map_t src_map,
4346 mach_vm_offset_t memory_address,
4347 boolean_t copy,
4348 vm_prot_t *cur_protection,
4349 vm_prot_t *max_protection,
4350 vm_inherit_t inheritance);
4351
4352kern_return_t
4353mach_vm_map(
4354 vm_map_t target_map,
4355 mach_vm_offset_t *address,
4356 mach_vm_size_t initial_size,
4357 mach_vm_offset_t mask,
4358 int flags,
4359 ipc_port_t port,
4360 vm_object_offset_t offset,
4361 boolean_t copy,
4362 vm_prot_t cur_protection,
4363 vm_prot_t max_protection,
4364 vm_inherit_t inheritance)
4365{
4366 return mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
4367 offset, copy, cur_protection, max_protection, inheritance);
4368}
4369
4370kern_return_t
4371mach_vm_remap(
4372 vm_map_t target_map,
4373 mach_vm_offset_t *address,
4374 mach_vm_size_t size,
4375 mach_vm_offset_t mask,
4376 int flags,
4377 vm_map_t src_map,
4378 mach_vm_offset_t memory_address,
4379 boolean_t copy,
4380 vm_prot_t *cur_protection,
4381 vm_prot_t *max_protection,
4382 vm_inherit_t inheritance)
4383{
4384 return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
4385 copy, cur_protection, max_protection, inheritance);
4386}
4387
4388kern_return_t
4389vm_map(
4390 vm_map_t target_map,
4391 vm_offset_t *address,
4392 vm_size_t size,
4393 vm_offset_t mask,
4394 int flags,
4395 ipc_port_t port,
4396 vm_offset_t offset,
4397 boolean_t copy,
4398 vm_prot_t cur_protection,
4399 vm_prot_t max_protection,
4400 vm_inherit_t inheritance);
4401
4402kern_return_t
4403vm_map(
4404 vm_map_t target_map,
4405 vm_offset_t *address,
4406 vm_size_t size,
4407 vm_offset_t mask,
4408 int flags,
4409 ipc_port_t port,
4410 vm_offset_t offset,
4411 boolean_t copy,
4412 vm_prot_t cur_protection,
4413 vm_prot_t max_protection,
4414 vm_inherit_t inheritance)
4415{
4416 vm_tag_t tag;
4417
4418 VM_GET_FLAGS_ALIAS(flags, tag);
4419 return vm_map_kernel(target_map, address, size, mask,
4420 flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
4421 port, offset, copy,
4422 cur_protection, max_protection, inheritance);
4423}
4424
4425#endif /* __x86_64__ */