2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101 #include <mach/sdt.h>
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/memory_entry_server.h>
106 #include <mach/vm_map_server.h>
108 #include <kern/host.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/misc_protos.h>
112 #include <vm/vm_fault.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 #include <vm/memory_object.h>
117 #include <vm/vm_pageout.h>
118 #include <vm/vm_protos.h>
119 #include <vm/vm_purgeable_internal.h>
120 #include <vm/vm_init.h>
122 #include <san/kasan.h>
124 #include <libkern/OSDebug.h>
125 #include <IOKit/IOBSD.h>
127 vm_size_t upl_offset_to_pagelist
= 0;
134 * mach_vm_allocate allocates "zero fill" memory in the specfied
138 mach_vm_allocate_external(
140 mach_vm_offset_t
*addr
,
146 VM_GET_FLAGS_ALIAS(flags
, tag
);
147 return mach_vm_allocate_kernel(map
, addr
, size
, flags
, tag
);
151 mach_vm_allocate_kernel(
153 mach_vm_offset_t
*addr
,
158 vm_map_offset_t map_addr
;
159 vm_map_size_t map_size
;
160 kern_return_t result
;
163 /* filter out any kernel-only flags */
164 if (flags
& ~VM_FLAGS_USER_ALLOCATE
) {
165 return KERN_INVALID_ARGUMENT
;
168 if (map
== VM_MAP_NULL
) {
169 return KERN_INVALID_ARGUMENT
;
176 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
179 * No specific address requested, so start candidate address
180 * search at the minimum address in the map. However, if that
181 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
182 * allocations of PAGEZERO to explicit requests since its
183 * normal use is to catch dereferences of NULL and many
184 * applications also treat pointers with a value of 0 as
185 * special and suddenly having address 0 contain useable
186 * memory would tend to confuse those applications.
188 map_addr
= vm_map_min(map
);
190 map_addr
+= VM_MAP_PAGE_SIZE(map
);
193 map_addr
= vm_map_trunc_page(*addr
,
194 VM_MAP_PAGE_MASK(map
));
196 map_size
= vm_map_round_page(size
,
197 VM_MAP_PAGE_MASK(map
));
199 return KERN_INVALID_ARGUMENT
;
202 result
= vm_map_enter(
208 VM_MAP_KERNEL_FLAGS_NONE
,
211 (vm_object_offset_t
)0,
223 * Legacy routine that allocates "zero fill" memory in the specfied
224 * map (which is limited to the same size as the kernel).
227 vm_allocate_external(
235 VM_GET_FLAGS_ALIAS(flags
, tag
);
236 return vm_allocate_kernel(map
, addr
, size
, flags
, tag
);
247 vm_map_offset_t map_addr
;
248 vm_map_size_t map_size
;
249 kern_return_t result
;
252 /* filter out any kernel-only flags */
253 if (flags
& ~VM_FLAGS_USER_ALLOCATE
) {
254 return KERN_INVALID_ARGUMENT
;
257 if (map
== VM_MAP_NULL
) {
258 return KERN_INVALID_ARGUMENT
;
265 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
268 * No specific address requested, so start candidate address
269 * search at the minimum address in the map. However, if that
270 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
271 * allocations of PAGEZERO to explicit requests since its
272 * normal use is to catch dereferences of NULL and many
273 * applications also treat pointers with a value of 0 as
274 * special and suddenly having address 0 contain useable
275 * memory would tend to confuse those applications.
277 map_addr
= vm_map_min(map
);
279 map_addr
+= VM_MAP_PAGE_SIZE(map
);
282 map_addr
= vm_map_trunc_page(*addr
,
283 VM_MAP_PAGE_MASK(map
));
285 map_size
= vm_map_round_page(size
,
286 VM_MAP_PAGE_MASK(map
));
288 return KERN_INVALID_ARGUMENT
;
291 result
= vm_map_enter(
297 VM_MAP_KERNEL_FLAGS_NONE
,
300 (vm_object_offset_t
)0,
307 if (result
== KERN_SUCCESS
&& map
->pmap
== kernel_pmap
) {
308 kasan_notify_address(map_addr
, map_size
);
312 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
317 * mach_vm_deallocate -
318 * deallocates the specified range of addresses in the
319 * specified address map.
324 mach_vm_offset_t start
,
327 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
328 return KERN_INVALID_ARGUMENT
;
331 if (size
== (mach_vm_offset_t
) 0) {
335 return vm_map_remove(map
,
336 vm_map_trunc_page(start
,
337 VM_MAP_PAGE_MASK(map
)),
338 vm_map_round_page(start
+ size
,
339 VM_MAP_PAGE_MASK(map
)),
340 VM_MAP_REMOVE_NO_FLAGS
);
345 * deallocates the specified range of addresses in the
346 * specified address map (limited to addresses the same
347 * size as the kernel).
355 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
356 return KERN_INVALID_ARGUMENT
;
359 if (size
== (vm_offset_t
) 0) {
363 return vm_map_remove(map
,
364 vm_map_trunc_page(start
,
365 VM_MAP_PAGE_MASK(map
)),
366 vm_map_round_page(start
+ size
,
367 VM_MAP_PAGE_MASK(map
)),
368 VM_MAP_REMOVE_NO_FLAGS
);
373 * Sets the inheritance of the specified range in the
379 mach_vm_offset_t start
,
381 vm_inherit_t new_inheritance
)
383 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
384 (new_inheritance
> VM_INHERIT_LAST_VALID
)) {
385 return KERN_INVALID_ARGUMENT
;
392 return vm_map_inherit(map
,
393 vm_map_trunc_page(start
,
394 VM_MAP_PAGE_MASK(map
)),
395 vm_map_round_page(start
+ size
,
396 VM_MAP_PAGE_MASK(map
)),
402 * Sets the inheritance of the specified range in the
403 * specified map (range limited to addresses
410 vm_inherit_t new_inheritance
)
412 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
413 (new_inheritance
> VM_INHERIT_LAST_VALID
)) {
414 return KERN_INVALID_ARGUMENT
;
421 return vm_map_inherit(map
,
422 vm_map_trunc_page(start
,
423 VM_MAP_PAGE_MASK(map
)),
424 vm_map_round_page(start
+ size
,
425 VM_MAP_PAGE_MASK(map
)),
431 * Sets the protection of the specified range in the
438 mach_vm_offset_t start
,
440 boolean_t set_maximum
,
441 vm_prot_t new_protection
)
443 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
444 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
))) {
445 return KERN_INVALID_ARGUMENT
;
452 return vm_map_protect(map
,
453 vm_map_trunc_page(start
,
454 VM_MAP_PAGE_MASK(map
)),
455 vm_map_round_page(start
+ size
,
456 VM_MAP_PAGE_MASK(map
)),
463 * Sets the protection of the specified range in the
464 * specified map. Addressability of the range limited
465 * to the same size as the kernel.
473 boolean_t set_maximum
,
474 vm_prot_t new_protection
)
476 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
477 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
))) {
478 return KERN_INVALID_ARGUMENT
;
485 return vm_map_protect(map
,
486 vm_map_trunc_page(start
,
487 VM_MAP_PAGE_MASK(map
)),
488 vm_map_round_page(start
+ size
,
489 VM_MAP_PAGE_MASK(map
)),
495 * mach_vm_machine_attributes -
496 * Handle machine-specific attributes for a mapping, such
497 * as cachability, migrability, etc.
500 mach_vm_machine_attribute(
502 mach_vm_address_t addr
,
504 vm_machine_attribute_t attribute
,
505 vm_machine_attribute_val_t
* value
) /* IN/OUT */
507 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
508 return KERN_INVALID_ARGUMENT
;
515 return vm_map_machine_attribute(
517 vm_map_trunc_page(addr
,
518 VM_MAP_PAGE_MASK(map
)),
519 vm_map_round_page(addr
+ size
,
520 VM_MAP_PAGE_MASK(map
)),
526 * vm_machine_attribute -
527 * Handle machine-specific attributes for a mapping, such
528 * as cachability, migrability, etc. Limited addressability
529 * (same range limits as for the native kernel map).
532 vm_machine_attribute(
536 vm_machine_attribute_t attribute
,
537 vm_machine_attribute_val_t
* value
) /* IN/OUT */
539 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
540 return KERN_INVALID_ARGUMENT
;
547 return vm_map_machine_attribute(
549 vm_map_trunc_page(addr
,
550 VM_MAP_PAGE_MASK(map
)),
551 vm_map_round_page(addr
+ size
,
552 VM_MAP_PAGE_MASK(map
)),
559 * Read/copy a range from one address space and return it to the caller.
561 * It is assumed that the address for the returned memory is selected by
562 * the IPC implementation as part of receiving the reply to this call.
563 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
564 * that gets returned.
566 * JMM - because of mach_msg_type_number_t, this call is limited to a
567 * single 4GB region at this time.
573 mach_vm_address_t addr
,
576 mach_msg_type_number_t
*data_size
)
579 vm_map_copy_t ipc_address
;
581 if (map
== VM_MAP_NULL
) {
582 return KERN_INVALID_ARGUMENT
;
585 if ((mach_msg_type_number_t
) size
!= size
) {
586 return KERN_INVALID_ARGUMENT
;
589 error
= vm_map_copyin(map
,
590 (vm_map_address_t
)addr
,
592 FALSE
, /* src_destroy */
595 if (KERN_SUCCESS
== error
) {
596 *data
= (pointer_t
) ipc_address
;
597 *data_size
= (mach_msg_type_number_t
) size
;
598 assert(*data_size
== size
);
605 * Read/copy a range from one address space and return it to the caller.
606 * Limited addressability (same range limits as for the native kernel map).
608 * It is assumed that the address for the returned memory is selected by
609 * the IPC implementation as part of receiving the reply to this call.
610 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
611 * that gets returned.
619 mach_msg_type_number_t
*data_size
)
622 vm_map_copy_t ipc_address
;
624 if (map
== VM_MAP_NULL
) {
625 return KERN_INVALID_ARGUMENT
;
628 mach_msg_type_number_t dsize
;
629 if (os_convert_overflow(size
, &dsize
)) {
631 * The kernel could handle a 64-bit "size" value, but
632 * it could not return the size of the data in "*data_size"
633 * without overflowing.
634 * Let's reject this "size" as invalid.
636 return KERN_INVALID_ARGUMENT
;
639 error
= vm_map_copyin(map
,
640 (vm_map_address_t
)addr
,
642 FALSE
, /* src_destroy */
645 if (KERN_SUCCESS
== error
) {
646 *data
= (pointer_t
) ipc_address
;
648 assert(*data_size
== size
);
654 * mach_vm_read_list -
655 * Read/copy a list of address ranges from specified map.
657 * MIG does not know how to deal with a returned array of
658 * vm_map_copy_t structures, so we have to do the copyout
664 mach_vm_read_entry_t data_list
,
667 mach_msg_type_number_t i
;
671 if (map
== VM_MAP_NULL
||
672 count
> VM_MAP_ENTRY_MAX
) {
673 return KERN_INVALID_ARGUMENT
;
676 error
= KERN_SUCCESS
;
677 for (i
= 0; i
< count
; i
++) {
678 vm_map_address_t map_addr
;
679 vm_map_size_t map_size
;
681 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
682 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
685 error
= vm_map_copyin(map
,
688 FALSE
, /* src_destroy */
690 if (KERN_SUCCESS
== error
) {
691 error
= vm_map_copyout(
695 if (KERN_SUCCESS
== error
) {
696 data_list
[i
].address
= map_addr
;
699 vm_map_copy_discard(copy
);
702 data_list
[i
].address
= (mach_vm_address_t
)0;
703 data_list
[i
].size
= (mach_vm_size_t
)0;
710 * Read/copy a list of address ranges from specified map.
712 * MIG does not know how to deal with a returned array of
713 * vm_map_copy_t structures, so we have to do the copyout
716 * The source and destination ranges are limited to those
717 * that can be described with a vm_address_t (i.e. same
718 * size map as the kernel).
720 * JMM - If the result of the copyout is an address range
721 * that cannot be described with a vm_address_t (i.e. the
722 * caller had a larger address space but used this call
723 * anyway), it will result in a truncated address being
724 * returned (and a likely confused caller).
730 vm_read_entry_t data_list
,
733 mach_msg_type_number_t i
;
737 if (map
== VM_MAP_NULL
||
738 count
> VM_MAP_ENTRY_MAX
) {
739 return KERN_INVALID_ARGUMENT
;
742 error
= KERN_SUCCESS
;
743 for (i
= 0; i
< count
; i
++) {
744 vm_map_address_t map_addr
;
745 vm_map_size_t map_size
;
747 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
748 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
751 error
= vm_map_copyin(map
,
754 FALSE
, /* src_destroy */
756 if (KERN_SUCCESS
== error
) {
757 error
= vm_map_copyout(current_task()->map
,
760 if (KERN_SUCCESS
== error
) {
761 data_list
[i
].address
=
762 CAST_DOWN(vm_offset_t
, map_addr
);
765 vm_map_copy_discard(copy
);
768 data_list
[i
].address
= (mach_vm_address_t
)0;
769 data_list
[i
].size
= (mach_vm_size_t
)0;
775 * mach_vm_read_overwrite -
776 * Overwrite a range of the current map with data from the specified
779 * In making an assumption that the current thread is local, it is
780 * no longer cluster-safe without a fully supportive local proxy
781 * thread/task (but we don't support cluster's anymore so this is moot).
785 mach_vm_read_overwrite(
787 mach_vm_address_t address
,
789 mach_vm_address_t data
,
790 mach_vm_size_t
*data_size
)
795 if (map
== VM_MAP_NULL
) {
796 return KERN_INVALID_ARGUMENT
;
799 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
800 (vm_map_size_t
)size
, FALSE
, ©
);
802 if (KERN_SUCCESS
== error
) {
803 error
= vm_map_copy_overwrite(current_thread()->map
,
804 (vm_map_address_t
)data
,
806 if (KERN_SUCCESS
== error
) {
810 vm_map_copy_discard(copy
);
816 * vm_read_overwrite -
817 * Overwrite a range of the current map with data from the specified
820 * This routine adds the additional limitation that the source and
821 * destination ranges must be describable with vm_address_t values
822 * (i.e. the same size address spaces as the kernel, or at least the
823 * the ranges are in that first portion of the respective address
830 vm_address_t address
,
833 vm_size_t
*data_size
)
838 if (map
== VM_MAP_NULL
) {
839 return KERN_INVALID_ARGUMENT
;
842 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
843 (vm_map_size_t
)size
, FALSE
, ©
);
845 if (KERN_SUCCESS
== error
) {
846 error
= vm_map_copy_overwrite(current_thread()->map
,
847 (vm_map_address_t
)data
,
849 if (KERN_SUCCESS
== error
) {
853 vm_map_copy_discard(copy
);
861 * Overwrite the specified address range with the data provided
862 * (from the current map).
867 mach_vm_address_t address
,
869 __unused mach_msg_type_number_t size
)
871 if (map
== VM_MAP_NULL
) {
872 return KERN_INVALID_ARGUMENT
;
875 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
876 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
881 * Overwrite the specified address range with the data provided
882 * (from the current map).
884 * The addressability of the range of addresses to overwrite is
885 * limited bu the use of a vm_address_t (same size as kernel map).
886 * Either the target map is also small, or the range is in the
887 * low addresses within it.
892 vm_address_t address
,
894 __unused mach_msg_type_number_t size
)
896 if (map
== VM_MAP_NULL
) {
897 return KERN_INVALID_ARGUMENT
;
900 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
901 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
906 * Overwrite one range of the specified map with the contents of
907 * another range within that same map (i.e. both address ranges
913 mach_vm_address_t source_address
,
915 mach_vm_address_t dest_address
)
920 if (map
== VM_MAP_NULL
) {
921 return KERN_INVALID_ARGUMENT
;
924 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
925 (vm_map_size_t
)size
, FALSE
, ©
);
927 if (KERN_SUCCESS
== kr
) {
928 kr
= vm_map_copy_overwrite(map
,
929 (vm_map_address_t
)dest_address
,
930 copy
, FALSE
/* interruptible XXX */);
932 if (KERN_SUCCESS
!= kr
) {
933 vm_map_copy_discard(copy
);
942 vm_address_t source_address
,
944 vm_address_t dest_address
)
949 if (map
== VM_MAP_NULL
) {
950 return KERN_INVALID_ARGUMENT
;
953 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
954 (vm_map_size_t
)size
, FALSE
, ©
);
956 if (KERN_SUCCESS
== kr
) {
957 kr
= vm_map_copy_overwrite(map
,
958 (vm_map_address_t
)dest_address
,
959 copy
, FALSE
/* interruptible XXX */);
961 if (KERN_SUCCESS
!= kr
) {
962 vm_map_copy_discard(copy
);
970 * Map some range of an object into an address space.
972 * The object can be one of several types of objects:
973 * NULL - anonymous memory
974 * a named entry - a range within another address space
975 * or a range within a memory object
976 * a whole memory object
980 mach_vm_map_external(
982 mach_vm_offset_t
*address
,
983 mach_vm_size_t initial_size
,
984 mach_vm_offset_t mask
,
987 vm_object_offset_t offset
,
989 vm_prot_t cur_protection
,
990 vm_prot_t max_protection
,
991 vm_inherit_t inheritance
)
995 VM_GET_FLAGS_ALIAS(flags
, tag
);
996 return mach_vm_map_kernel(target_map
, address
, initial_size
, mask
,
997 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
999 cur_protection
, max_protection
,
1005 vm_map_t target_map
,
1006 mach_vm_offset_t
*address
,
1007 mach_vm_size_t initial_size
,
1008 mach_vm_offset_t mask
,
1010 vm_map_kernel_flags_t vmk_flags
,
1013 vm_object_offset_t offset
,
1015 vm_prot_t cur_protection
,
1016 vm_prot_t max_protection
,
1017 vm_inherit_t inheritance
)
1020 vm_map_offset_t vmmaddr
;
1022 vmmaddr
= (vm_map_offset_t
) *address
;
1024 /* filter out any kernel-only flags */
1025 if (flags
& ~VM_FLAGS_USER_MAP
) {
1026 return KERN_INVALID_ARGUMENT
;
1029 kr
= vm_map_enter_mem_object(target_map
,
1044 if (kr
== KERN_SUCCESS
&& target_map
->pmap
== kernel_pmap
) {
1045 kasan_notify_address(vmmaddr
, initial_size
);
1054 /* legacy interface */
1057 vm_map_t target_map
,
1058 vm_offset_t
*address
,
1063 vm_object_offset_t offset
,
1065 vm_prot_t cur_protection
,
1066 vm_prot_t max_protection
,
1067 vm_inherit_t inheritance
)
1071 VM_GET_FLAGS_ALIAS(flags
, tag
);
1072 return vm_map_64_kernel(target_map
, address
, size
, mask
,
1073 flags
, VM_MAP_KERNEL_FLAGS_NONE
,
1074 tag
, port
, offset
, copy
,
1075 cur_protection
, max_protection
,
1081 vm_map_t target_map
,
1082 vm_offset_t
*address
,
1086 vm_map_kernel_flags_t vmk_flags
,
1089 vm_object_offset_t offset
,
1091 vm_prot_t cur_protection
,
1092 vm_prot_t max_protection
,
1093 vm_inherit_t inheritance
)
1095 mach_vm_address_t map_addr
;
1096 mach_vm_size_t map_size
;
1097 mach_vm_offset_t map_mask
;
1100 map_addr
= (mach_vm_address_t
)*address
;
1101 map_size
= (mach_vm_size_t
)size
;
1102 map_mask
= (mach_vm_offset_t
)mask
;
1104 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
,
1105 flags
, vmk_flags
, tag
,
1107 cur_protection
, max_protection
, inheritance
);
1108 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1112 /* temporary, until world build */
1115 vm_map_t target_map
,
1116 vm_offset_t
*address
,
1123 vm_prot_t cur_protection
,
1124 vm_prot_t max_protection
,
1125 vm_inherit_t inheritance
)
1129 VM_GET_FLAGS_ALIAS(flags
, tag
);
1130 return vm_map_kernel(target_map
, address
, size
, mask
,
1131 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
1133 cur_protection
, max_protection
, inheritance
);
1138 vm_map_t target_map
,
1139 vm_offset_t
*address
,
1143 vm_map_kernel_flags_t vmk_flags
,
1148 vm_prot_t cur_protection
,
1149 vm_prot_t max_protection
,
1150 vm_inherit_t inheritance
)
1152 mach_vm_address_t map_addr
;
1153 mach_vm_size_t map_size
;
1154 mach_vm_offset_t map_mask
;
1155 vm_object_offset_t obj_offset
;
1158 map_addr
= (mach_vm_address_t
)*address
;
1159 map_size
= (mach_vm_size_t
)size
;
1160 map_mask
= (mach_vm_offset_t
)mask
;
1161 obj_offset
= (vm_object_offset_t
)offset
;
1163 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
,
1164 flags
, vmk_flags
, tag
,
1165 port
, obj_offset
, copy
,
1166 cur_protection
, max_protection
, inheritance
);
1167 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1173 * Remap a range of memory from one task into another,
1174 * to another address range within the same task, or
1175 * over top of itself (with altered permissions and/or
1176 * as an in-place copy of itself).
1179 mach_vm_remap_external(
1180 vm_map_t target_map
,
1181 mach_vm_offset_t
*address
,
1182 mach_vm_size_t size
,
1183 mach_vm_offset_t mask
,
1186 mach_vm_offset_t memory_address
,
1188 vm_prot_t
*cur_protection
,
1189 vm_prot_t
*max_protection
,
1190 vm_inherit_t inheritance
)
1193 VM_GET_FLAGS_ALIAS(flags
, tag
);
1195 return mach_vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
, memory_address
,
1196 copy
, cur_protection
, max_protection
, inheritance
);
1200 mach_vm_remap_kernel(
1201 vm_map_t target_map
,
1202 mach_vm_offset_t
*address
,
1203 mach_vm_size_t size
,
1204 mach_vm_offset_t mask
,
1208 mach_vm_offset_t memory_address
,
1210 vm_prot_t
*cur_protection
,
1211 vm_prot_t
*max_protection
,
1212 vm_inherit_t inheritance
)
1214 vm_map_offset_t map_addr
;
1217 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
) {
1218 return KERN_INVALID_ARGUMENT
;
1221 /* filter out any kernel-only flags */
1222 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1223 return KERN_INVALID_ARGUMENT
;
1226 map_addr
= (vm_map_offset_t
)*address
;
1228 kr
= vm_map_remap(target_map
,
1233 VM_MAP_KERNEL_FLAGS_NONE
,
1241 *address
= map_addr
;
1247 * Remap a range of memory from one task into another,
1248 * to another address range within the same task, or
1249 * over top of itself (with altered permissions and/or
1250 * as an in-place copy of itself).
1252 * The addressability of the source and target address
1253 * range is limited by the size of vm_address_t (in the
1258 vm_map_t target_map
,
1259 vm_offset_t
*address
,
1264 vm_offset_t memory_address
,
1266 vm_prot_t
*cur_protection
,
1267 vm_prot_t
*max_protection
,
1268 vm_inherit_t inheritance
)
1271 VM_GET_FLAGS_ALIAS(flags
, tag
);
1273 return vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
,
1274 memory_address
, copy
, cur_protection
, max_protection
, inheritance
);
1279 vm_map_t target_map
,
1280 vm_offset_t
*address
,
1286 vm_offset_t memory_address
,
1288 vm_prot_t
*cur_protection
,
1289 vm_prot_t
*max_protection
,
1290 vm_inherit_t inheritance
)
1292 vm_map_offset_t map_addr
;
1295 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
) {
1296 return KERN_INVALID_ARGUMENT
;
1299 /* filter out any kernel-only flags */
1300 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1301 return KERN_INVALID_ARGUMENT
;
1304 map_addr
= (vm_map_offset_t
)*address
;
1306 kr
= vm_map_remap(target_map
,
1311 VM_MAP_KERNEL_FLAGS_NONE
,
1319 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1324 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1325 * when mach_vm_wire and vm_wire are changed to use ledgers.
1327 #include <mach/mach_host_server.h>
1330 * Specify that the range of the virtual address space
1331 * of the target task must not cause page faults for
1332 * the indicated accesses.
1334 * [ To unwire the pages, specify VM_PROT_NONE. ]
1337 mach_vm_wire_external(
1338 host_priv_t host_priv
,
1340 mach_vm_offset_t start
,
1341 mach_vm_size_t size
,
1344 return mach_vm_wire_kernel(host_priv
, map
, start
, size
, access
, VM_KERN_MEMORY_MLOCK
);
1348 mach_vm_wire_kernel(
1349 host_priv_t host_priv
,
1351 mach_vm_offset_t start
,
1352 mach_vm_size_t size
,
1358 if (host_priv
== HOST_PRIV_NULL
) {
1359 return KERN_INVALID_HOST
;
1362 assert(host_priv
== &realhost
);
1364 if (map
== VM_MAP_NULL
) {
1365 return KERN_INVALID_TASK
;
1368 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
)) {
1369 return KERN_INVALID_ARGUMENT
;
1372 if (access
!= VM_PROT_NONE
) {
1373 rc
= vm_map_wire_kernel(map
,
1374 vm_map_trunc_page(start
,
1375 VM_MAP_PAGE_MASK(map
)),
1376 vm_map_round_page(start
+ size
,
1377 VM_MAP_PAGE_MASK(map
)),
1381 rc
= vm_map_unwire(map
,
1382 vm_map_trunc_page(start
,
1383 VM_MAP_PAGE_MASK(map
)),
1384 vm_map_round_page(start
+ size
,
1385 VM_MAP_PAGE_MASK(map
)),
1393 * Specify that the range of the virtual address space
1394 * of the target task must not cause page faults for
1395 * the indicated accesses.
1397 * [ To unwire the pages, specify VM_PROT_NONE. ]
1401 host_priv_t host_priv
,
1409 if (host_priv
== HOST_PRIV_NULL
) {
1410 return KERN_INVALID_HOST
;
1413 assert(host_priv
== &realhost
);
1415 if (map
== VM_MAP_NULL
) {
1416 return KERN_INVALID_TASK
;
1419 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
)) {
1420 return KERN_INVALID_ARGUMENT
;
1425 } else if (access
!= VM_PROT_NONE
) {
1426 rc
= vm_map_wire_kernel(map
,
1427 vm_map_trunc_page(start
,
1428 VM_MAP_PAGE_MASK(map
)),
1429 vm_map_round_page(start
+ size
,
1430 VM_MAP_PAGE_MASK(map
)),
1431 access
, VM_KERN_MEMORY_OSFMK
,
1434 rc
= vm_map_unwire(map
,
1435 vm_map_trunc_page(start
,
1436 VM_MAP_PAGE_MASK(map
)),
1437 vm_map_round_page(start
+ size
,
1438 VM_MAP_PAGE_MASK(map
)),
1447 * Synchronises the memory range specified with its backing store
1448 * image by either flushing or cleaning the contents to the appropriate
1451 * interpretation of sync_flags
1452 * VM_SYNC_INVALIDATE - discard pages, only return precious
1455 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1456 * - discard pages, write dirty or precious
1457 * pages back to memory manager.
1459 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1460 * - write dirty or precious pages back to
1461 * the memory manager.
1463 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1464 * is a hole in the region, and we would
1465 * have returned KERN_SUCCESS, return
1466 * KERN_INVALID_ADDRESS instead.
1469 * KERN_INVALID_TASK Bad task parameter
1470 * KERN_INVALID_ARGUMENT both sync and async were specified.
1471 * KERN_SUCCESS The usual.
1472 * KERN_INVALID_ADDRESS There was a hole in the region.
1478 mach_vm_address_t address
,
1479 mach_vm_size_t size
,
1480 vm_sync_t sync_flags
)
1482 if (map
== VM_MAP_NULL
) {
1483 return KERN_INVALID_TASK
;
1486 return vm_map_msync(map
, (vm_map_address_t
)address
,
1487 (vm_map_size_t
)size
, sync_flags
);
1493 * Synchronises the memory range specified with its backing store
1494 * image by either flushing or cleaning the contents to the appropriate
1497 * interpretation of sync_flags
1498 * VM_SYNC_INVALIDATE - discard pages, only return precious
1501 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1502 * - discard pages, write dirty or precious
1503 * pages back to memory manager.
1505 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1506 * - write dirty or precious pages back to
1507 * the memory manager.
1509 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1510 * is a hole in the region, and we would
1511 * have returned KERN_SUCCESS, return
1512 * KERN_INVALID_ADDRESS instead.
1514 * The addressability of the range is limited to that which can
1515 * be described by a vm_address_t.
1518 * KERN_INVALID_TASK Bad task parameter
1519 * KERN_INVALID_ARGUMENT both sync and async were specified.
1520 * KERN_SUCCESS The usual.
1521 * KERN_INVALID_ADDRESS There was a hole in the region.
1527 vm_address_t address
,
1529 vm_sync_t sync_flags
)
1531 if (map
== VM_MAP_NULL
) {
1532 return KERN_INVALID_TASK
;
1535 return vm_map_msync(map
, (vm_map_address_t
)address
,
1536 (vm_map_size_t
)size
, sync_flags
);
1541 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1543 vm_map_t map
= current_map();
1545 assert(!map
->is_nested_map
);
1546 if (toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
) {
1547 *old_value
= map
->disable_vmentry_reuse
;
1548 } else if (toggle
== VM_TOGGLE_SET
) {
1549 vm_map_entry_t map_to_entry
;
1552 vm_map_disable_hole_optimization(map
);
1553 map
->disable_vmentry_reuse
= TRUE
;
1554 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1555 if (map
->first_free
== map_to_entry
) {
1556 map
->highest_entry_end
= vm_map_min(map
);
1558 map
->highest_entry_end
= map
->first_free
->vme_end
;
1561 } else if (toggle
== VM_TOGGLE_CLEAR
) {
1563 map
->disable_vmentry_reuse
= FALSE
;
1566 return KERN_INVALID_ARGUMENT
;
1569 return KERN_SUCCESS
;
1573 * mach_vm_behavior_set
1575 * Sets the paging behavior attribute for the specified range
1576 * in the specified map.
1578 * This routine will fail with KERN_INVALID_ADDRESS if any address
1579 * in [start,start+size) is not a valid allocated memory region.
1582 mach_vm_behavior_set(
1584 mach_vm_offset_t start
,
1585 mach_vm_size_t size
,
1586 vm_behavior_t new_behavior
)
1588 vm_map_offset_t align_mask
;
1590 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
1591 return KERN_INVALID_ARGUMENT
;
1595 return KERN_SUCCESS
;
1598 switch (new_behavior
) {
1599 case VM_BEHAVIOR_REUSABLE
:
1600 case VM_BEHAVIOR_REUSE
:
1601 case VM_BEHAVIOR_CAN_REUSE
:
1603 * Align to the hardware page size, to allow
1604 * malloc() to maximize the amount of re-usability,
1605 * even on systems with larger software page size.
1607 align_mask
= PAGE_MASK
;
1610 align_mask
= VM_MAP_PAGE_MASK(map
);
1614 return vm_map_behavior_set(map
,
1615 vm_map_trunc_page(start
, align_mask
),
1616 vm_map_round_page(start
+ size
, align_mask
),
1623 * Sets the paging behavior attribute for the specified range
1624 * in the specified map.
1626 * This routine will fail with KERN_INVALID_ADDRESS if any address
1627 * in [start,start+size) is not a valid allocated memory region.
1629 * This routine is potentially limited in addressibility by the
1630 * use of vm_offset_t (if the map provided is larger than the
1638 vm_behavior_t new_behavior
)
1640 if (start
+ size
< start
) {
1641 return KERN_INVALID_ARGUMENT
;
1644 return mach_vm_behavior_set(map
,
1645 (mach_vm_offset_t
) start
,
1646 (mach_vm_size_t
) size
,
1653 * User call to obtain information about a region in
1654 * a task's address map. Currently, only one flavor is
1657 * XXX The reserved and behavior fields cannot be filled
1658 * in until the vm merge from the IK is completed, and
1659 * vm_reserve is implemented.
1661 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1667 mach_vm_offset_t
*address
, /* IN/OUT */
1668 mach_vm_size_t
*size
, /* OUT */
1669 vm_region_flavor_t flavor
, /* IN */
1670 vm_region_info_t info
, /* OUT */
1671 mach_msg_type_number_t
*count
, /* IN/OUT */
1672 mach_port_t
*object_name
) /* OUT */
1674 vm_map_offset_t map_addr
;
1675 vm_map_size_t map_size
;
1678 if (VM_MAP_NULL
== map
) {
1679 return KERN_INVALID_ARGUMENT
;
1682 map_addr
= (vm_map_offset_t
)*address
;
1683 map_size
= (vm_map_size_t
)*size
;
1685 /* legacy conversion */
1686 if (VM_REGION_BASIC_INFO
== flavor
) {
1687 flavor
= VM_REGION_BASIC_INFO_64
;
1690 kr
= vm_map_region(map
,
1691 &map_addr
, &map_size
,
1692 flavor
, info
, count
,
1695 *address
= map_addr
;
1701 * vm_region_64 and vm_region:
1703 * User call to obtain information about a region in
1704 * a task's address map. Currently, only one flavor is
1707 * XXX The reserved and behavior fields cannot be filled
1708 * in until the vm merge from the IK is completed, and
1709 * vm_reserve is implemented.
1711 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1717 vm_offset_t
*address
, /* IN/OUT */
1718 vm_size_t
*size
, /* OUT */
1719 vm_region_flavor_t flavor
, /* IN */
1720 vm_region_info_t info
, /* OUT */
1721 mach_msg_type_number_t
*count
, /* IN/OUT */
1722 mach_port_t
*object_name
) /* OUT */
1724 vm_map_offset_t map_addr
;
1725 vm_map_size_t map_size
;
1728 if (VM_MAP_NULL
== map
) {
1729 return KERN_INVALID_ARGUMENT
;
1732 map_addr
= (vm_map_offset_t
)*address
;
1733 map_size
= (vm_map_size_t
)*size
;
1735 /* legacy conversion */
1736 if (VM_REGION_BASIC_INFO
== flavor
) {
1737 flavor
= VM_REGION_BASIC_INFO_64
;
1740 kr
= vm_map_region(map
,
1741 &map_addr
, &map_size
,
1742 flavor
, info
, count
,
1745 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1746 *size
= CAST_DOWN(vm_size_t
, map_size
);
1748 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1749 return KERN_INVALID_ADDRESS
;
1757 vm_address_t
*address
, /* IN/OUT */
1758 vm_size_t
*size
, /* OUT */
1759 vm_region_flavor_t flavor
, /* IN */
1760 vm_region_info_t info
, /* OUT */
1761 mach_msg_type_number_t
*count
, /* IN/OUT */
1762 mach_port_t
*object_name
) /* OUT */
1764 vm_map_address_t map_addr
;
1765 vm_map_size_t map_size
;
1768 if (VM_MAP_NULL
== map
) {
1769 return KERN_INVALID_ARGUMENT
;
1772 map_addr
= (vm_map_address_t
)*address
;
1773 map_size
= (vm_map_size_t
)*size
;
1775 kr
= vm_map_region(map
,
1776 &map_addr
, &map_size
,
1777 flavor
, info
, count
,
1780 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1781 *size
= CAST_DOWN(vm_size_t
, map_size
);
1783 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1784 return KERN_INVALID_ADDRESS
;
1790 * vm_region_recurse: A form of vm_region which follows the
1791 * submaps in a target map
1795 mach_vm_region_recurse(
1797 mach_vm_address_t
*address
,
1798 mach_vm_size_t
*size
,
1800 vm_region_recurse_info_t info
,
1801 mach_msg_type_number_t
*infoCnt
)
1803 vm_map_address_t map_addr
;
1804 vm_map_size_t map_size
;
1807 if (VM_MAP_NULL
== map
) {
1808 return KERN_INVALID_ARGUMENT
;
1811 map_addr
= (vm_map_address_t
)*address
;
1812 map_size
= (vm_map_size_t
)*size
;
1814 kr
= vm_map_region_recurse_64(
1819 (vm_region_submap_info_64_t
)info
,
1822 *address
= map_addr
;
1828 * vm_region_recurse: A form of vm_region which follows the
1829 * submaps in a target map
1833 vm_region_recurse_64(
1835 vm_address_t
*address
,
1838 vm_region_recurse_info_64_t info
,
1839 mach_msg_type_number_t
*infoCnt
)
1841 vm_map_address_t map_addr
;
1842 vm_map_size_t map_size
;
1845 if (VM_MAP_NULL
== map
) {
1846 return KERN_INVALID_ARGUMENT
;
1849 map_addr
= (vm_map_address_t
)*address
;
1850 map_size
= (vm_map_size_t
)*size
;
1852 kr
= vm_map_region_recurse_64(
1857 (vm_region_submap_info_64_t
)info
,
1860 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1861 *size
= CAST_DOWN(vm_size_t
, map_size
);
1863 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1864 return KERN_INVALID_ADDRESS
;
1872 vm_offset_t
*address
, /* IN/OUT */
1873 vm_size_t
*size
, /* OUT */
1874 natural_t
*depth
, /* IN/OUT */
1875 vm_region_recurse_info_t info32
, /* IN/OUT */
1876 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1878 vm_region_submap_info_data_64_t info64
;
1879 vm_region_submap_info_t info
;
1880 vm_map_address_t map_addr
;
1881 vm_map_size_t map_size
;
1884 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
) {
1885 return KERN_INVALID_ARGUMENT
;
1889 map_addr
= (vm_map_address_t
)*address
;
1890 map_size
= (vm_map_size_t
)*size
;
1891 info
= (vm_region_submap_info_t
)info32
;
1892 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1894 kr
= vm_map_region_recurse_64(map
, &map_addr
, &map_size
,
1895 depth
, &info64
, infoCnt
);
1897 info
->protection
= info64
.protection
;
1898 info
->max_protection
= info64
.max_protection
;
1899 info
->inheritance
= info64
.inheritance
;
1900 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1901 info
->user_tag
= info64
.user_tag
;
1902 info
->pages_resident
= info64
.pages_resident
;
1903 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1904 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1905 info
->pages_dirtied
= info64
.pages_dirtied
;
1906 info
->ref_count
= info64
.ref_count
;
1907 info
->shadow_depth
= info64
.shadow_depth
;
1908 info
->external_pager
= info64
.external_pager
;
1909 info
->share_mode
= info64
.share_mode
;
1910 info
->is_submap
= info64
.is_submap
;
1911 info
->behavior
= info64
.behavior
;
1912 info
->object_id
= info64
.object_id
;
1913 info
->user_wired_count
= info64
.user_wired_count
;
1915 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1916 *size
= CAST_DOWN(vm_size_t
, map_size
);
1917 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1919 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1920 return KERN_INVALID_ADDRESS
;
1926 mach_vm_purgable_control(
1928 mach_vm_offset_t address
,
1929 vm_purgable_t control
,
1932 if (VM_MAP_NULL
== map
) {
1933 return KERN_INVALID_ARGUMENT
;
1936 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
1937 /* not allowed from user-space */
1938 return KERN_INVALID_ARGUMENT
;
1941 return vm_map_purgable_control(map
,
1942 vm_map_trunc_page(address
, PAGE_MASK
),
1948 vm_purgable_control(
1950 vm_offset_t address
,
1951 vm_purgable_t control
,
1954 if (VM_MAP_NULL
== map
) {
1955 return KERN_INVALID_ARGUMENT
;
1958 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
1959 /* not allowed from user-space */
1960 return KERN_INVALID_ARGUMENT
;
1963 return vm_map_purgable_control(map
,
1964 vm_map_trunc_page(address
, PAGE_MASK
),
1971 * Ordinarily, the right to allocate CPM is restricted
1972 * to privileged applications (those that can gain access
1973 * to the host priv port). Set this variable to zero if
1974 * you want to let any application allocate CPM.
1976 unsigned int vm_allocate_cpm_privileged
= 0;
1979 * Allocate memory in the specified map, with the caveat that
1980 * the memory is physically contiguous. This call may fail
1981 * if the system can't find sufficient contiguous memory.
1982 * This call may cause or lead to heart-stopping amounts of
1985 * Memory obtained from this call should be freed in the
1986 * normal way, viz., via vm_deallocate.
1990 host_priv_t host_priv
,
1996 vm_map_address_t map_addr
;
1997 vm_map_size_t map_size
;
2000 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
) {
2001 return KERN_INVALID_HOST
;
2004 if (VM_MAP_NULL
== map
) {
2005 return KERN_INVALID_ARGUMENT
;
2008 map_addr
= (vm_map_address_t
)*addr
;
2009 map_size
= (vm_map_size_t
)size
;
2011 kr
= vm_map_enter_cpm(map
,
2016 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
2024 mach_vm_offset_t offset
,
2028 if (VM_MAP_NULL
== map
) {
2029 return KERN_INVALID_ARGUMENT
;
2032 return vm_map_page_query_internal(
2034 vm_map_trunc_page(offset
, PAGE_MASK
),
2035 disposition
, ref_count
);
2045 if (VM_MAP_NULL
== map
) {
2046 return KERN_INVALID_ARGUMENT
;
2049 return vm_map_page_query_internal(
2051 vm_map_trunc_page(offset
, PAGE_MASK
),
2052 disposition
, ref_count
);
2056 mach_vm_page_range_query(
2058 mach_vm_offset_t address
,
2059 mach_vm_size_t size
,
2060 mach_vm_address_t dispositions_addr
,
2061 mach_vm_size_t
*dispositions_count
)
2063 kern_return_t kr
= KERN_SUCCESS
;
2064 int num_pages
= 0, i
= 0;
2065 mach_vm_size_t curr_sz
= 0, copy_sz
= 0;
2066 mach_vm_size_t disp_buf_req_size
= 0, disp_buf_total_size
= 0;
2067 mach_msg_type_number_t count
= 0;
2070 void *local_disp
= NULL
;;
2071 vm_map_size_t info_size
= 0, local_disp_size
= 0;
2072 mach_vm_offset_t start
= 0, end
= 0;
2074 if (map
== VM_MAP_NULL
|| dispositions_count
== NULL
) {
2075 return KERN_INVALID_ARGUMENT
;
2078 disp_buf_req_size
= (*dispositions_count
* sizeof(int));
2079 start
= mach_vm_trunc_page(address
);
2080 end
= mach_vm_round_page(address
+ size
);
2083 return KERN_INVALID_ARGUMENT
;
2086 if ((end
- start
) < size
) {
2088 * Aligned size is less than unaligned size.
2090 return KERN_INVALID_ARGUMENT
;
2093 if (disp_buf_req_size
== 0 || (end
== start
)) {
2094 return KERN_SUCCESS
;
2098 * For large requests, we will go through them
2099 * MAX_PAGE_RANGE_QUERY chunk at a time.
2102 curr_sz
= MIN(end
- start
, MAX_PAGE_RANGE_QUERY
);
2103 num_pages
= (int) (curr_sz
>> PAGE_SHIFT
);
2105 info_size
= num_pages
* sizeof(vm_page_info_basic_data_t
);
2106 info
= kalloc(info_size
);
2109 return KERN_RESOURCE_SHORTAGE
;
2112 local_disp_size
= num_pages
* sizeof(int);
2113 local_disp
= kalloc(local_disp_size
);
2115 if (local_disp
== NULL
) {
2116 kfree(info
, info_size
);
2118 return KERN_RESOURCE_SHORTAGE
;
2122 count
= VM_PAGE_INFO_BASIC_COUNT
;
2123 kr
= vm_map_page_range_info_internal(
2126 mach_vm_round_page(start
+ curr_sz
),
2128 (vm_page_info_t
) info
,
2131 assert(kr
== KERN_SUCCESS
);
2133 for (i
= 0; i
< num_pages
; i
++) {
2134 ((int*)local_disp
)[i
] = ((vm_page_info_basic_t
)info
)[i
].disposition
;
2137 copy_sz
= MIN(disp_buf_req_size
, num_pages
* sizeof(int) /* an int per page */);
2138 kr
= copyout(local_disp
, (mach_vm_address_t
)dispositions_addr
, copy_sz
);
2141 disp_buf_req_size
-= copy_sz
;
2142 disp_buf_total_size
+= copy_sz
;
2148 if ((disp_buf_req_size
== 0) || (curr_sz
>= size
)) {
2150 * We might have inspected the full range OR
2151 * more than it esp. if the user passed in
2152 * non-page aligned start/size and/or if we
2153 * descended into a submap. We are done here.
2158 dispositions_addr
+= copy_sz
;
2162 curr_sz
= MIN(mach_vm_round_page(size
), MAX_PAGE_RANGE_QUERY
);
2163 num_pages
= (int)(curr_sz
>> PAGE_SHIFT
);
2167 *dispositions_count
= disp_buf_total_size
/ sizeof(int);
2169 kfree(local_disp
, local_disp_size
);
2172 kfree(info
, info_size
);
2181 mach_vm_address_t address
,
2182 vm_page_info_flavor_t flavor
,
2183 vm_page_info_t info
,
2184 mach_msg_type_number_t
*count
)
2188 if (map
== VM_MAP_NULL
) {
2189 return KERN_INVALID_ARGUMENT
;
2192 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
2196 /* map a (whole) upl into an address space */
2201 vm_address_t
*dst_addr
)
2203 vm_map_offset_t map_addr
;
2206 if (VM_MAP_NULL
== map
) {
2207 return KERN_INVALID_ARGUMENT
;
2210 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
2211 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
2220 if (VM_MAP_NULL
== map
) {
2221 return KERN_INVALID_ARGUMENT
;
2224 return vm_map_remove_upl(map
, upl
);
2227 /* Retrieve a upl for an object underlying an address range in a map */
2232 vm_map_offset_t map_offset
,
2233 upl_size_t
*upl_size
,
2235 upl_page_info_array_t page_list
,
2236 unsigned int *count
,
2237 upl_control_flags_t
*flags
,
2239 int force_data_sync
)
2241 upl_control_flags_t map_flags
;
2244 if (VM_MAP_NULL
== map
) {
2245 return KERN_INVALID_ARGUMENT
;
2248 map_flags
= *flags
& ~UPL_NOZEROFILL
;
2249 if (force_data_sync
) {
2250 map_flags
|= UPL_FORCE_DATA_SYNC
;
2253 kr
= vm_map_create_upl(map
,
2262 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
2267 extern int proc_selfpid(void);
2268 extern char *proc_name_address(void *p
);
2269 int cs_executable_mem_entry
= 0;
2270 int log_executable_mem_entry
= 0;
2271 #endif /* CONFIG_EMBEDDED */
2274 * mach_make_memory_entry_64
2276 * Think of it as a two-stage vm_remap() operation. First
2277 * you get a handle. Second, you get map that handle in
2278 * somewhere else. Rather than doing it all at once (and
2279 * without needing access to the other whole map).
2282 mach_make_memory_entry_64(
2283 vm_map_t target_map
,
2284 memory_object_size_t
*size
,
2285 memory_object_offset_t offset
,
2286 vm_prot_t permission
,
2287 ipc_port_t
*object_handle
,
2288 ipc_port_t parent_handle
)
2290 vm_named_entry_kernel_flags_t vmne_kflags
;
2292 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_USER
) {
2294 * Unknown flag: reject for forward compatibility.
2296 return KERN_INVALID_VALUE
;
2299 vmne_kflags
= VM_NAMED_ENTRY_KERNEL_FLAGS_NONE
;
2300 if (permission
& MAP_MEM_LEDGER_TAGGED
) {
2301 vmne_kflags
.vmnekf_ledger_tag
= VM_LEDGER_TAG_DEFAULT
;
2303 return mach_make_memory_entry_internal(target_map
,
2313 mach_make_memory_entry_internal(
2314 vm_map_t target_map
,
2315 memory_object_size_t
*size
,
2316 memory_object_offset_t offset
,
2317 vm_prot_t permission
,
2318 vm_named_entry_kernel_flags_t vmne_kflags
,
2319 ipc_port_t
*object_handle
,
2320 ipc_port_t parent_handle
)
2322 vm_map_version_t version
;
2323 vm_named_entry_t parent_entry
;
2324 vm_named_entry_t user_entry
;
2325 ipc_port_t user_handle
;
2329 /* needed for call to vm_map_lookup_locked */
2332 vm_object_offset_t obj_off
;
2334 struct vm_object_fault_info fault_info
= {};
2336 vm_object_t shadow_object
;
2338 /* needed for direct map entry manipulation */
2339 vm_map_entry_t map_entry
;
2340 vm_map_entry_t next_entry
;
2342 vm_map_t original_map
= target_map
;
2343 vm_map_size_t total_size
, map_size
;
2344 vm_map_offset_t map_start
, map_end
;
2345 vm_map_offset_t local_offset
;
2346 vm_object_size_t mappable_size
;
2349 * Stash the offset in the page for use by vm_map_enter_mem_object()
2350 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2352 vm_object_offset_t offset_in_page
;
2354 unsigned int access
;
2355 vm_prot_t protections
;
2356 vm_prot_t original_protections
, mask_protections
;
2357 unsigned int wimg_mode
;
2359 boolean_t force_shadow
= FALSE
;
2360 boolean_t use_data_addr
;
2361 boolean_t use_4K_compat
;
2362 #if VM_NAMED_ENTRY_LIST
2364 #endif /* VM_NAMED_ENTRY_LIST */
2366 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_ALL
) {
2368 * Unknown flag: reject for forward compatibility.
2370 return KERN_INVALID_VALUE
;
2373 if (IP_VALID(parent_handle
) &&
2374 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
2375 parent_entry
= (vm_named_entry_t
) ip_get_kobject(parent_handle
);
2377 parent_entry
= NULL
;
2380 if (parent_entry
&& parent_entry
->is_copy
) {
2381 return KERN_INVALID_ARGUMENT
;
2384 original_protections
= permission
& VM_PROT_ALL
;
2385 protections
= original_protections
;
2386 mask_protections
= permission
& VM_PROT_IS_MASK
;
2387 access
= GET_MAP_MEM(permission
);
2388 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
2389 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
2391 user_handle
= IP_NULL
;
2394 map_start
= vm_map_trunc_page(offset
, PAGE_MASK
);
2396 if (permission
& MAP_MEM_ONLY
) {
2397 boolean_t parent_is_object
;
2399 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2400 map_size
= map_end
- map_start
;
2402 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
2403 return KERN_INVALID_ARGUMENT
;
2406 parent_is_object
= !parent_entry
->is_sub_map
;
2407 object
= parent_entry
->backing
.object
;
2408 if (parent_is_object
&& object
!= VM_OBJECT_NULL
) {
2409 wimg_mode
= object
->wimg_bits
;
2411 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2413 if ((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
2414 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
2415 return KERN_INVALID_RIGHT
;
2417 vm_prot_to_wimg(access
, &wimg_mode
);
2418 if (access
!= MAP_MEM_NOOP
) {
2419 SET_MAP_MEM(access
, parent_entry
->protection
);
2421 if (parent_is_object
&& object
&&
2422 (access
!= MAP_MEM_NOOP
) &&
2423 (!(object
->nophyscache
))) {
2424 if (object
->wimg_bits
!= wimg_mode
) {
2425 vm_object_lock(object
);
2426 vm_object_change_wimg_mode(object
, wimg_mode
);
2427 vm_object_unlock(object
);
2430 if (object_handle
) {
2431 *object_handle
= IP_NULL
;
2433 return KERN_SUCCESS
;
2434 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2435 int ledger_flags
= 0;
2438 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2439 map_size
= map_end
- map_start
;
2441 if (use_data_addr
|| use_4K_compat
) {
2442 return KERN_INVALID_ARGUMENT
;
2445 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2446 if (kr
!= KERN_SUCCESS
) {
2447 return KERN_FAILURE
;
2451 * Force the creation of the VM object now.
2453 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2455 * LP64todo - for now, we can only allocate 4GB-4096
2456 * internal objects because the default pager can't
2457 * page bigger ones. Remove this when it can.
2463 object
= vm_object_allocate(map_size
);
2464 assert(object
!= VM_OBJECT_NULL
);
2468 * We use this path when we want to make sure that
2469 * nobody messes with the object (coalesce, for
2470 * example) before we map it.
2471 * We might want to use these objects for transposition via
2472 * vm_object_transpose() too, so we don't want any copy or
2473 * shadow objects either...
2475 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2476 object
->true_share
= TRUE
;
2478 owner
= current_task();
2479 if ((permission
& MAP_MEM_PURGABLE
) ||
2480 vmne_kflags
.vmnekf_ledger_tag
) {
2481 assert(object
->vo_owner
== NULL
);
2482 assert(object
->resident_page_count
== 0);
2483 assert(object
->wired_page_count
== 0);
2484 assert(owner
!= TASK_NULL
);
2485 if (vmne_kflags
.vmnekf_ledger_no_footprint
) {
2486 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
2487 object
->vo_no_footprint
= TRUE
;
2489 if (permission
& MAP_MEM_PURGABLE
) {
2490 if (!(permission
& VM_PROT_WRITE
)) {
2491 /* if we can't write, we can't purge */
2492 vm_object_deallocate(object
);
2493 kr
= KERN_INVALID_ARGUMENT
;
2496 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2497 if (permission
& MAP_MEM_PURGABLE_KERNEL_ONLY
) {
2498 object
->purgeable_only_by_kernel
= TRUE
;
2501 if (owner
->task_legacy_footprint
) {
2503 * For ios11, we failed to account for
2504 * this memory. Keep doing that for
2505 * legacy apps (built before ios12),
2506 * for backwards compatibility's sake...
2508 owner
= kernel_task
;
2510 #endif /* __arm64__ */
2511 vm_object_lock(object
);
2512 vm_purgeable_nonvolatile_enqueue(object
, owner
);
2513 vm_object_unlock(object
);
2517 if (vmne_kflags
.vmnekf_ledger_tag
) {
2519 * Bill this object to the current task's
2520 * ledgers for the given tag.
2522 if (vmne_kflags
.vmnekf_ledger_no_footprint
) {
2523 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
2525 vm_object_lock(object
);
2526 object
->vo_ledger_tag
= vmne_kflags
.vmnekf_ledger_tag
;
2527 kr
= vm_object_ownership_change(
2529 vmne_kflags
.vmnekf_ledger_tag
,
2530 owner
, /* new owner */
2532 FALSE
); /* task_objq locked? */
2533 vm_object_unlock(object
);
2534 if (kr
!= KERN_SUCCESS
) {
2535 vm_object_deallocate(object
);
2540 #if CONFIG_SECLUDED_MEMORY
2541 if (secluded_for_iokit
&& /* global boot-arg */
2542 ((permission
& MAP_MEM_GRAB_SECLUDED
)
2544 /* XXX FBDP for my testing only */
2545 || (secluded_for_fbdp
&& map_size
== 97550336)
2549 if (!(permission
& MAP_MEM_GRAB_SECLUDED
) &&
2550 secluded_for_fbdp
) {
2551 printf("FBDP: object %p size %lld can grab secluded\n", object
, (uint64_t) map_size
);
2554 object
->can_grab_secluded
= TRUE
;
2555 assert(!object
->eligible_for_secluded
);
2557 #endif /* CONFIG_SECLUDED_MEMORY */
2560 * The VM object is brand new and nobody else knows about it,
2561 * so we don't need to lock it.
2564 wimg_mode
= object
->wimg_bits
;
2565 vm_prot_to_wimg(access
, &wimg_mode
);
2566 if (access
!= MAP_MEM_NOOP
) {
2567 object
->wimg_bits
= wimg_mode
;
2570 /* the object has no pages, so no WIMG bits to update here */
2572 user_entry
->backing
.object
= object
;
2573 user_entry
->internal
= TRUE
;
2574 user_entry
->is_sub_map
= FALSE
;
2575 user_entry
->offset
= 0;
2576 user_entry
->data_offset
= 0;
2577 user_entry
->protection
= protections
;
2578 SET_MAP_MEM(access
, user_entry
->protection
);
2579 user_entry
->size
= map_size
;
2581 /* user_object pager and internal fields are not used */
2582 /* when the object field is filled in. */
2584 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2585 user_entry
->data_offset
));
2586 *object_handle
= user_handle
;
2587 return KERN_SUCCESS
;
2590 if (permission
& MAP_MEM_VM_COPY
) {
2593 if (target_map
== VM_MAP_NULL
) {
2594 return KERN_INVALID_TASK
;
2597 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2598 map_size
= map_end
- map_start
;
2599 if (use_data_addr
|| use_4K_compat
) {
2600 offset_in_page
= offset
- map_start
;
2601 if (use_4K_compat
) {
2602 offset_in_page
&= ~((signed)(0xFFF));
2608 kr
= vm_map_copyin_internal(target_map
,
2611 VM_MAP_COPYIN_ENTRY_LIST
,
2613 if (kr
!= KERN_SUCCESS
) {
2617 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2618 if (kr
!= KERN_SUCCESS
) {
2619 vm_map_copy_discard(copy
);
2620 return KERN_FAILURE
;
2623 user_entry
->backing
.copy
= copy
;
2624 user_entry
->internal
= FALSE
;
2625 user_entry
->is_sub_map
= FALSE
;
2626 user_entry
->is_copy
= TRUE
;
2627 user_entry
->offset
= 0;
2628 user_entry
->protection
= protections
;
2629 user_entry
->size
= map_size
;
2630 user_entry
->data_offset
= offset_in_page
;
2632 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2633 user_entry
->data_offset
));
2634 *object_handle
= user_handle
;
2635 return KERN_SUCCESS
;
2638 if (permission
& MAP_MEM_VM_SHARE
) {
2640 vm_prot_t cur_prot
, max_prot
;
2642 if (target_map
== VM_MAP_NULL
) {
2643 return KERN_INVALID_TASK
;
2646 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2647 map_size
= map_end
- map_start
;
2648 if (use_data_addr
|| use_4K_compat
) {
2649 offset_in_page
= offset
- map_start
;
2650 if (use_4K_compat
) {
2651 offset_in_page
&= ~((signed)(0xFFF));
2657 cur_prot
= VM_PROT_ALL
;
2658 kr
= vm_map_copy_extract(target_map
,
2664 if (kr
!= KERN_SUCCESS
) {
2668 if (mask_protections
) {
2670 * We just want as much of "original_protections"
2671 * as we can get out of the actual "cur_prot".
2673 protections
&= cur_prot
;
2674 if (protections
== VM_PROT_NONE
) {
2675 /* no access at all: fail */
2676 vm_map_copy_discard(copy
);
2677 return KERN_PROTECTION_FAILURE
;
2681 * We want exactly "original_protections"
2682 * out of "cur_prot".
2684 if ((cur_prot
& protections
) != protections
) {
2685 vm_map_copy_discard(copy
);
2686 return KERN_PROTECTION_FAILURE
;
2690 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2691 if (kr
!= KERN_SUCCESS
) {
2692 vm_map_copy_discard(copy
);
2693 return KERN_FAILURE
;
2696 user_entry
->backing
.copy
= copy
;
2697 user_entry
->internal
= FALSE
;
2698 user_entry
->is_sub_map
= FALSE
;
2699 user_entry
->is_copy
= TRUE
;
2700 user_entry
->offset
= 0;
2701 user_entry
->protection
= protections
;
2702 user_entry
->size
= map_size
;
2703 user_entry
->data_offset
= offset_in_page
;
2705 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2706 user_entry
->data_offset
));
2707 *object_handle
= user_handle
;
2708 return KERN_SUCCESS
;
2711 if (parent_entry
== NULL
||
2712 (permission
& MAP_MEM_NAMED_REUSE
)) {
2713 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2714 map_size
= map_end
- map_start
;
2715 if (use_data_addr
|| use_4K_compat
) {
2716 offset_in_page
= offset
- map_start
;
2717 if (use_4K_compat
) {
2718 offset_in_page
&= ~((signed)(0xFFF));
2724 /* Create a named object based on address range within the task map */
2725 /* Go find the object at given address */
2727 if (target_map
== VM_MAP_NULL
) {
2728 return KERN_INVALID_TASK
;
2732 protections
= original_protections
;
2733 vm_map_lock_read(target_map
);
2735 /* get the object associated with the target address */
2736 /* note we check the permission of the range against */
2737 /* that requested by the caller */
2739 kr
= vm_map_lookup_locked(&target_map
, map_start
,
2740 protections
| mask_protections
,
2741 OBJECT_LOCK_EXCLUSIVE
, &version
,
2742 &object
, &obj_off
, &prot
, &wired
,
2745 if (kr
!= KERN_SUCCESS
) {
2746 vm_map_unlock_read(target_map
);
2749 if (mask_protections
) {
2751 * The caller asked us to use the "protections" as
2752 * a mask, so restrict "protections" to what this
2753 * mapping actually allows.
2755 protections
&= prot
;
2759 * Wiring would copy the pages to a shadow object.
2760 * The shadow object would not be code-signed so
2761 * attempting to execute code from these copied pages
2762 * would trigger a code-signing violation.
2764 if (prot
& VM_PROT_EXECUTE
) {
2765 if (log_executable_mem_entry
) {
2767 bsd_info
= current_task()->bsd_info
;
2768 printf("pid %d[%s] making memory entry out of "
2769 "executable range from 0x%llx to 0x%llx:"
2770 "might cause code-signing issues "
2774 ? proc_name_address(bsd_info
)
2776 (uint64_t) map_start
,
2777 (uint64_t) map_end
);
2779 DTRACE_VM2(cs_executable_mem_entry
,
2780 uint64_t, (uint64_t)map_start
,
2781 uint64_t, (uint64_t)map_end
);
2782 cs_executable_mem_entry
++;
2786 * We don't know how the memory entry will be used.
2787 * It might never get wired and might not cause any
2788 * trouble, so let's not reject this request...
2791 kr
= KERN_PROTECTION_FAILURE
;
2792 vm_object_unlock(object
);
2793 vm_map_unlock_read(target_map
);
2794 if (real_map
!= target_map
) {
2795 vm_map_unlock_read(real_map
);
2800 #endif /* CONFIG_EMBEDDED */
2802 if (((prot
& protections
) != protections
)
2803 || (object
== kernel_object
)) {
2804 kr
= KERN_INVALID_RIGHT
;
2805 vm_object_unlock(object
);
2806 vm_map_unlock_read(target_map
);
2807 if (real_map
!= target_map
) {
2808 vm_map_unlock_read(real_map
);
2810 if (object
== kernel_object
) {
2811 printf("Warning: Attempt to create a named"
2812 " entry from the kernel_object\n");
2817 /* We have an object, now check to see if this object */
2818 /* is suitable. If not, create a shadow and share that */
2821 * We have to unlock the VM object to avoid deadlocking with
2822 * a VM map lock (the lock ordering is map, the object), if we
2823 * need to modify the VM map to create a shadow object. Since
2824 * we might release the VM map lock below anyway, we have
2825 * to release the VM map lock now.
2826 * XXX FBDP There must be a way to avoid this double lookup...
2828 * Take an extra reference on the VM object to make sure it's
2829 * not going to disappear.
2831 vm_object_reference_locked(object
); /* extra ref to hold obj */
2832 vm_object_unlock(object
);
2834 local_map
= original_map
;
2835 local_offset
= map_start
;
2836 if (target_map
!= local_map
) {
2837 vm_map_unlock_read(target_map
);
2838 if (real_map
!= target_map
) {
2839 vm_map_unlock_read(real_map
);
2841 vm_map_lock_read(local_map
);
2842 target_map
= local_map
;
2843 real_map
= local_map
;
2846 if (!vm_map_lookup_entry(local_map
,
2847 local_offset
, &map_entry
)) {
2848 kr
= KERN_INVALID_ARGUMENT
;
2849 vm_map_unlock_read(target_map
);
2850 if (real_map
!= target_map
) {
2851 vm_map_unlock_read(real_map
);
2853 vm_object_deallocate(object
); /* release extra ref */
2854 object
= VM_OBJECT_NULL
;
2857 iskernel
= (local_map
->pmap
== kernel_pmap
);
2858 if (!(map_entry
->is_sub_map
)) {
2859 if (VME_OBJECT(map_entry
) != object
) {
2860 kr
= KERN_INVALID_ARGUMENT
;
2861 vm_map_unlock_read(target_map
);
2862 if (real_map
!= target_map
) {
2863 vm_map_unlock_read(real_map
);
2865 vm_object_deallocate(object
); /* release extra ref */
2866 object
= VM_OBJECT_NULL
;
2873 local_map
= VME_SUBMAP(map_entry
);
2875 vm_map_lock_read(local_map
);
2876 vm_map_unlock_read(tmap
);
2877 target_map
= local_map
;
2878 real_map
= local_map
;
2879 local_offset
= local_offset
- map_entry
->vme_start
;
2880 local_offset
+= VME_OFFSET(map_entry
);
2884 #if VM_NAMED_ENTRY_LIST
2885 alias
= VME_ALIAS(map_entry
);
2886 #endif /* VM_NAMED_ENTRY_LIST */
2889 * We found the VM map entry, lock the VM object again.
2891 vm_object_lock(object
);
2892 if (map_entry
->wired_count
) {
2893 /* JMM - The check below should be reworked instead. */
2894 object
->true_share
= TRUE
;
2896 if (mask_protections
) {
2898 * The caller asked us to use the "protections" as
2899 * a mask, so restrict "protections" to what this
2900 * mapping actually allows.
2902 protections
&= map_entry
->max_protection
;
2904 if (((map_entry
->max_protection
) & protections
) != protections
) {
2905 kr
= KERN_INVALID_RIGHT
;
2906 vm_object_unlock(object
);
2907 vm_map_unlock_read(target_map
);
2908 if (real_map
!= target_map
) {
2909 vm_map_unlock_read(real_map
);
2911 vm_object_deallocate(object
);
2912 object
= VM_OBJECT_NULL
;
2916 mappable_size
= fault_info
.hi_offset
- obj_off
;
2917 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2918 if (map_size
> mappable_size
) {
2919 /* try to extend mappable size if the entries */
2920 /* following are from the same object and are */
2922 next_entry
= map_entry
->vme_next
;
2923 /* lets see if the next map entry is still */
2924 /* pointing at this object and is contiguous */
2925 while (map_size
> mappable_size
) {
2926 if ((VME_OBJECT(next_entry
) == object
) &&
2927 (next_entry
->vme_start
==
2928 next_entry
->vme_prev
->vme_end
) &&
2929 (VME_OFFSET(next_entry
) ==
2930 (VME_OFFSET(next_entry
->vme_prev
) +
2931 (next_entry
->vme_prev
->vme_end
-
2932 next_entry
->vme_prev
->vme_start
)))) {
2933 if (mask_protections
) {
2935 * The caller asked us to use
2936 * the "protections" as a mask,
2937 * so restrict "protections" to
2938 * what this mapping actually
2941 protections
&= next_entry
->max_protection
;
2943 if ((next_entry
->wired_count
) &&
2944 (map_entry
->wired_count
== 0)) {
2947 if (((next_entry
->max_protection
)
2948 & protections
) != protections
) {
2951 if (next_entry
->needs_copy
!=
2952 map_entry
->needs_copy
) {
2955 mappable_size
+= next_entry
->vme_end
2956 - next_entry
->vme_start
;
2957 total_size
+= next_entry
->vme_end
2958 - next_entry
->vme_start
;
2959 next_entry
= next_entry
->vme_next
;
2966 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2967 * never true in kernel */
2968 if (!iskernel
&& vm_map_entry_should_cow_for_true_share(map_entry
) &&
2969 object
->vo_size
> map_size
&&
2972 * Set up the targeted range for copy-on-write to
2973 * limit the impact of "true_share"/"copy_delay" to
2974 * that range instead of the entire VM object...
2977 vm_object_unlock(object
);
2978 if (vm_map_lock_read_to_write(target_map
)) {
2979 vm_object_deallocate(object
);
2980 target_map
= original_map
;
2984 vm_map_clip_start(target_map
,
2986 vm_map_trunc_page(map_start
,
2987 VM_MAP_PAGE_MASK(target_map
)));
2988 vm_map_clip_end(target_map
,
2990 (vm_map_round_page(map_end
,
2991 VM_MAP_PAGE_MASK(target_map
))));
2992 force_shadow
= TRUE
;
2994 if ((map_entry
->vme_end
- offset
) < map_size
) {
2995 map_size
= map_entry
->vme_end
- map_start
;
2997 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2999 vm_map_lock_write_to_read(target_map
);
3000 vm_object_lock(object
);
3003 if (object
->internal
) {
3004 /* vm_map_lookup_locked will create a shadow if */
3005 /* needs_copy is set but does not check for the */
3006 /* other two conditions shown. It is important to */
3007 /* set up an object which will not be pulled from */
3011 ((map_entry
->needs_copy
||
3013 (object
->vo_size
> total_size
&&
3014 (VME_OFFSET(map_entry
) != 0 ||
3016 vm_map_round_page(total_size
,
3017 VM_MAP_PAGE_MASK(target_map
)))))
3018 && !object
->true_share
3019 && object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)) {
3021 * We have to unlock the VM object before
3022 * trying to upgrade the VM map lock, to
3023 * honor lock ordering (map then object).
3024 * Otherwise, we would deadlock if another
3025 * thread holds a read lock on the VM map and
3026 * is trying to acquire the VM object's lock.
3027 * We still hold an extra reference on the
3028 * VM object, guaranteeing that it won't
3031 vm_object_unlock(object
);
3033 if (vm_map_lock_read_to_write(target_map
)) {
3035 * We couldn't upgrade our VM map lock
3036 * from "read" to "write" and we lost
3038 * Start all over again...
3040 vm_object_deallocate(object
); /* extra ref */
3041 target_map
= original_map
;
3045 vm_object_lock(object
);
3049 * JMM - We need to avoid coming here when the object
3050 * is wired by anybody, not just the current map. Why
3051 * couldn't we use the standard vm_object_copy_quickly()
3055 /* create a shadow object */
3056 VME_OBJECT_SHADOW(map_entry
, total_size
);
3057 shadow_object
= VME_OBJECT(map_entry
);
3059 vm_object_unlock(object
);
3062 prot
= map_entry
->protection
& ~VM_PROT_WRITE
;
3064 if (override_nx(target_map
,
3065 VME_ALIAS(map_entry
))
3067 prot
|= VM_PROT_EXECUTE
;
3070 vm_object_pmap_protect(
3071 object
, VME_OFFSET(map_entry
),
3073 ((map_entry
->is_shared
3074 || target_map
->mapped_in_other_pmaps
)
3077 map_entry
->vme_start
,
3079 total_size
-= (map_entry
->vme_end
3080 - map_entry
->vme_start
);
3081 next_entry
= map_entry
->vme_next
;
3082 map_entry
->needs_copy
= FALSE
;
3084 vm_object_lock(shadow_object
);
3085 while (total_size
) {
3086 assert((next_entry
->wired_count
== 0) ||
3087 (map_entry
->wired_count
));
3089 if (VME_OBJECT(next_entry
) == object
) {
3090 vm_object_reference_locked(shadow_object
);
3091 VME_OBJECT_SET(next_entry
,
3093 vm_object_deallocate(object
);
3096 (VME_OFFSET(next_entry
->vme_prev
) +
3097 (next_entry
->vme_prev
->vme_end
3098 - next_entry
->vme_prev
->vme_start
)));
3099 next_entry
->use_pmap
= TRUE
;
3100 next_entry
->needs_copy
= FALSE
;
3102 panic("mach_make_memory_entry_64:"
3103 " map entries out of sync\n");
3107 - next_entry
->vme_start
;
3108 next_entry
= next_entry
->vme_next
;
3112 * Transfer our extra reference to the
3115 vm_object_reference_locked(shadow_object
);
3116 vm_object_deallocate(object
); /* extra ref */
3117 object
= shadow_object
;
3119 obj_off
= ((local_offset
- map_entry
->vme_start
)
3120 + VME_OFFSET(map_entry
));
3122 vm_map_lock_write_to_read(target_map
);
3126 /* note: in the future we can (if necessary) allow for */
3127 /* memory object lists, this will better support */
3128 /* fragmentation, but is it necessary? The user should */
3129 /* be encouraged to create address space oriented */
3130 /* shared objects from CLEAN memory regions which have */
3131 /* a known and defined history. i.e. no inheritence */
3132 /* share, make this call before making the region the */
3133 /* target of ipc's, etc. The code above, protecting */
3134 /* against delayed copy, etc. is mostly defensive. */
3136 wimg_mode
= object
->wimg_bits
;
3137 if (!(object
->nophyscache
)) {
3138 vm_prot_to_wimg(access
, &wimg_mode
);
3141 #if VM_OBJECT_TRACKING_OP_TRUESHARE
3142 if (!object
->true_share
&&
3143 vm_object_tracking_inited
) {
3144 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
3147 num
= OSBacktrace(bt
,
3148 VM_OBJECT_TRACKING_BTDEPTH
);
3149 btlog_add_entry(vm_object_tracking_btlog
,
3151 VM_OBJECT_TRACKING_OP_TRUESHARE
,
3155 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3157 vm_object_lock_assert_exclusive(object
);
3158 object
->true_share
= TRUE
;
3159 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3160 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3164 * The memory entry now points to this VM object and we
3165 * need to hold a reference on the VM object. Use the extra
3166 * reference we took earlier to keep the object alive when we
3170 vm_map_unlock_read(target_map
);
3171 if (real_map
!= target_map
) {
3172 vm_map_unlock_read(real_map
);
3175 if (object
->wimg_bits
!= wimg_mode
) {
3176 vm_object_change_wimg_mode(object
, wimg_mode
);
3179 /* the size of mapped entry that overlaps with our region */
3180 /* which is targeted for share. */
3181 /* (entry_end - entry_start) - */
3182 /* offset of our beg addr within entry */
3183 /* it corresponds to this: */
3185 if (map_size
> mappable_size
) {
3186 map_size
= mappable_size
;
3189 if (permission
& MAP_MEM_NAMED_REUSE
) {
3191 * Compare what we got with the "parent_entry".
3192 * If they match, re-use the "parent_entry" instead
3193 * of creating a new one.
3195 if (parent_entry
!= NULL
&&
3196 parent_entry
->backing
.object
== object
&&
3197 parent_entry
->internal
== object
->internal
&&
3198 parent_entry
->is_sub_map
== FALSE
&&
3199 parent_entry
->offset
== obj_off
&&
3200 parent_entry
->protection
== protections
&&
3201 parent_entry
->size
== map_size
&&
3202 ((!(use_data_addr
|| use_4K_compat
) &&
3203 (parent_entry
->data_offset
== 0)) ||
3204 ((use_data_addr
|| use_4K_compat
) &&
3205 (parent_entry
->data_offset
== offset_in_page
)))) {
3207 * We have a match: re-use "parent_entry".
3209 /* release our extra reference on object */
3210 vm_object_unlock(object
);
3211 vm_object_deallocate(object
);
3212 /* parent_entry->ref_count++; XXX ? */
3213 /* Get an extra send-right on handle */
3214 ipc_port_copy_send(parent_handle
);
3216 *size
= CAST_DOWN(vm_size_t
,
3217 (parent_entry
->size
-
3218 parent_entry
->data_offset
));
3219 *object_handle
= parent_handle
;
3220 return KERN_SUCCESS
;
3223 * No match: we need to create a new entry.
3229 vm_object_unlock(object
);
3230 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3232 /* release our unused reference on the object */
3233 vm_object_deallocate(object
);
3234 return KERN_FAILURE
;
3237 user_entry
->backing
.object
= object
;
3238 user_entry
->internal
= object
->internal
;
3239 user_entry
->is_sub_map
= FALSE
;
3240 user_entry
->offset
= obj_off
;
3241 user_entry
->data_offset
= offset_in_page
;
3242 user_entry
->protection
= protections
;
3243 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
3244 user_entry
->size
= map_size
;
3245 #if VM_NAMED_ENTRY_LIST
3246 user_entry
->named_entry_alias
= alias
;
3247 #endif /* VM_NAMED_ENTRY_LIST */
3249 /* user_object pager and internal fields are not used */
3250 /* when the object field is filled in. */
3252 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
3253 user_entry
->data_offset
));
3254 *object_handle
= user_handle
;
3255 return KERN_SUCCESS
;
3257 /* The new object will be base on an existing named object */
3258 if (parent_entry
== NULL
) {
3259 kr
= KERN_INVALID_ARGUMENT
;
3263 if (use_data_addr
|| use_4K_compat
) {
3265 * submaps and pagers should only be accessible from within
3266 * the kernel, which shouldn't use the data address flag, so can fail here.
3268 if (parent_entry
->is_sub_map
) {
3269 panic("Shouldn't be using data address with a parent entry that is a submap.");
3272 * Account for offset to data in parent entry and
3273 * compute our own offset to data.
3275 if ((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
3276 kr
= KERN_INVALID_ARGUMENT
;
3280 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
3281 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
3282 if (use_4K_compat
) {
3283 offset_in_page
&= ~((signed)(0xFFF));
3285 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
3286 map_size
= map_end
- map_start
;
3288 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
3289 map_size
= map_end
- map_start
;
3292 if ((offset
+ map_size
) > parent_entry
->size
) {
3293 kr
= KERN_INVALID_ARGUMENT
;
3298 if (mask_protections
) {
3300 * The caller asked us to use the "protections" as
3301 * a mask, so restrict "protections" to what this
3302 * mapping actually allows.
3304 protections
&= parent_entry
->protection
;
3306 if ((protections
& parent_entry
->protection
) != protections
) {
3307 kr
= KERN_PROTECTION_FAILURE
;
3311 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3317 user_entry
->size
= map_size
;
3318 user_entry
->offset
= parent_entry
->offset
+ map_start
;
3319 user_entry
->data_offset
= offset_in_page
;
3320 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
3321 user_entry
->is_copy
= parent_entry
->is_copy
;
3322 user_entry
->internal
= parent_entry
->internal
;
3323 user_entry
->protection
= protections
;
3325 if (access
!= MAP_MEM_NOOP
) {
3326 SET_MAP_MEM(access
, user_entry
->protection
);
3329 if (parent_entry
->is_sub_map
) {
3330 vm_map_t map
= parent_entry
->backing
.map
;
3331 user_entry
->backing
.map
= map
;
3332 lck_mtx_lock(&map
->s_lock
);
3333 os_ref_retain_locked(&map
->map_refcnt
);
3334 lck_mtx_unlock(&map
->s_lock
);
3336 object
= parent_entry
->backing
.object
;
3337 assert(object
!= VM_OBJECT_NULL
);
3338 user_entry
->backing
.object
= object
;
3339 /* we now point to this object, hold on */
3340 vm_object_lock(object
);
3341 vm_object_reference_locked(object
);
3342 #if VM_OBJECT_TRACKING_OP_TRUESHARE
3343 if (!object
->true_share
&&
3344 vm_object_tracking_inited
) {
3345 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
3348 num
= OSBacktrace(bt
,
3349 VM_OBJECT_TRACKING_BTDEPTH
);
3350 btlog_add_entry(vm_object_tracking_btlog
,
3352 VM_OBJECT_TRACKING_OP_TRUESHARE
,
3356 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3358 object
->true_share
= TRUE
;
3359 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3360 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3362 vm_object_unlock(object
);
3364 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
3365 user_entry
->data_offset
));
3366 *object_handle
= user_handle
;
3367 return KERN_SUCCESS
;
3371 if (user_handle
!= IP_NULL
) {
3373 * Releasing "user_handle" causes the kernel object
3374 * associated with it ("user_entry" here) to also be
3375 * released and freed.
3377 mach_memory_entry_port_release(user_handle
);
3383 _mach_make_memory_entry(
3384 vm_map_t target_map
,
3385 memory_object_size_t
*size
,
3386 memory_object_offset_t offset
,
3387 vm_prot_t permission
,
3388 ipc_port_t
*object_handle
,
3389 ipc_port_t parent_entry
)
3391 memory_object_size_t mo_size
;
3394 mo_size
= (memory_object_size_t
)*size
;
3395 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3396 (memory_object_offset_t
)offset
, permission
, object_handle
,
3403 mach_make_memory_entry(
3404 vm_map_t target_map
,
3407 vm_prot_t permission
,
3408 ipc_port_t
*object_handle
,
3409 ipc_port_t parent_entry
)
3411 memory_object_size_t mo_size
;
3414 mo_size
= (memory_object_size_t
)*size
;
3415 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3416 (memory_object_offset_t
)offset
, permission
, object_handle
,
3418 *size
= CAST_DOWN(vm_size_t
, mo_size
);
3425 * Set or clear the map's wiring_required flag. This flag, if set,
3426 * will cause all future virtual memory allocation to allocate
3427 * user wired memory. Unwiring pages wired down as a result of
3428 * this routine is done with the vm_wire interface.
3433 boolean_t must_wire
)
3435 if (map
== VM_MAP_NULL
) {
3436 return KERN_INVALID_ARGUMENT
;
3440 map
->wiring_required
= (must_wire
== TRUE
);
3443 return KERN_SUCCESS
;
3447 vm_map_exec_lockdown(
3450 if (map
== VM_MAP_NULL
) {
3451 return KERN_INVALID_ARGUMENT
;
3455 map
->map_disallow_new_exec
= TRUE
;
3458 return KERN_SUCCESS
;
3461 #if VM_NAMED_ENTRY_LIST
3462 queue_head_t vm_named_entry_list
;
3463 int vm_named_entry_count
= 0;
3464 lck_mtx_t vm_named_entry_list_lock_data
;
3465 lck_mtx_ext_t vm_named_entry_list_lock_data_ext
;
3466 #endif /* VM_NAMED_ENTRY_LIST */
3468 void vm_named_entry_init(void);
3470 vm_named_entry_init(void)
3472 #if VM_NAMED_ENTRY_LIST
3473 queue_init(&vm_named_entry_list
);
3474 vm_named_entry_count
= 0;
3475 lck_mtx_init_ext(&vm_named_entry_list_lock_data
,
3476 &vm_named_entry_list_lock_data_ext
,
3478 &vm_object_lck_attr
);
3479 #endif /* VM_NAMED_ENTRY_LIST */
3482 __private_extern__ kern_return_t
3483 mach_memory_entry_allocate(
3484 vm_named_entry_t
*user_entry_p
,
3485 ipc_port_t
*user_handle_p
)
3487 vm_named_entry_t user_entry
;
3488 ipc_port_t user_handle
;
3490 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
3491 if (user_entry
== NULL
) {
3492 return KERN_FAILURE
;
3494 bzero(user_entry
, sizeof(*user_entry
));
3496 named_entry_lock_init(user_entry
);
3498 user_entry
->backing
.object
= NULL
;
3499 user_entry
->is_sub_map
= FALSE
;
3500 user_entry
->is_copy
= FALSE
;
3501 user_entry
->internal
= FALSE
;
3502 user_entry
->size
= 0;
3503 user_entry
->offset
= 0;
3504 user_entry
->data_offset
= 0;
3505 user_entry
->protection
= VM_PROT_NONE
;
3506 user_entry
->ref_count
= 1;
3508 user_handle
= ipc_kobject_alloc_port((ipc_kobject_t
)user_entry
,
3510 IPC_KOBJECT_ALLOC_MAKE_SEND
| IPC_KOBJECT_ALLOC_NSREQUEST
);
3512 *user_entry_p
= user_entry
;
3513 *user_handle_p
= user_handle
;
3515 #if VM_NAMED_ENTRY_LIST
3516 /* keep a loose (no reference) pointer to the Mach port, for debugging only */
3517 user_entry
->named_entry_port
= user_handle
;
3518 /* backtrace at allocation time, for debugging only */
3519 OSBacktrace(&user_entry
->named_entry_bt
[0],
3520 NAMED_ENTRY_BT_DEPTH
);
3522 /* add this new named entry to the global list */
3523 lck_mtx_lock_spin(&vm_named_entry_list_lock_data
);
3524 queue_enter(&vm_named_entry_list
, user_entry
,
3525 vm_named_entry_t
, named_entry_list
);
3526 vm_named_entry_count
++;
3527 lck_mtx_unlock(&vm_named_entry_list_lock_data
);
3528 #endif /* VM_NAMED_ENTRY_LIST */
3530 return KERN_SUCCESS
;
3534 * mach_memory_object_memory_entry_64
3536 * Create a named entry backed by the provided pager.
3540 mach_memory_object_memory_entry_64(
3543 vm_object_offset_t size
,
3544 vm_prot_t permission
,
3545 memory_object_t pager
,
3546 ipc_port_t
*entry_handle
)
3548 unsigned int access
;
3549 vm_named_entry_t user_entry
;
3550 ipc_port_t user_handle
;
3553 if (host
== HOST_NULL
) {
3554 return KERN_INVALID_HOST
;
3557 if (pager
== MEMORY_OBJECT_NULL
&& internal
) {
3558 object
= vm_object_allocate(size
);
3559 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3560 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3563 object
= memory_object_to_vm_object(pager
);
3564 if (object
!= VM_OBJECT_NULL
) {
3565 vm_object_reference(object
);
3568 if (object
== VM_OBJECT_NULL
) {
3569 return KERN_INVALID_ARGUMENT
;
3572 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3574 vm_object_deallocate(object
);
3575 return KERN_FAILURE
;
3578 user_entry
->size
= size
;
3579 user_entry
->offset
= 0;
3580 user_entry
->protection
= permission
& VM_PROT_ALL
;
3581 access
= GET_MAP_MEM(permission
);
3582 SET_MAP_MEM(access
, user_entry
->protection
);
3583 user_entry
->is_sub_map
= FALSE
;
3584 assert(user_entry
->ref_count
== 1);
3586 user_entry
->backing
.object
= object
;
3587 user_entry
->internal
= object
->internal
;
3588 assert(object
->internal
== internal
);
3590 *entry_handle
= user_handle
;
3591 return KERN_SUCCESS
;
3595 mach_memory_object_memory_entry(
3599 vm_prot_t permission
,
3600 memory_object_t pager
,
3601 ipc_port_t
*entry_handle
)
3603 return mach_memory_object_memory_entry_64( host
, internal
,
3604 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3609 mach_memory_entry_purgable_control(
3610 ipc_port_t entry_port
,
3611 vm_purgable_t control
,
3614 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3615 /* not allowed from user-space */
3616 return KERN_INVALID_ARGUMENT
;
3619 return memory_entry_purgeable_control_internal(entry_port
, control
, state
);
3623 memory_entry_purgeable_control_internal(
3624 ipc_port_t entry_port
,
3625 vm_purgable_t control
,
3629 vm_named_entry_t mem_entry
;
3632 if (!IP_VALID(entry_port
) ||
3633 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3634 return KERN_INVALID_ARGUMENT
;
3636 if (control
!= VM_PURGABLE_SET_STATE
&&
3637 control
!= VM_PURGABLE_GET_STATE
&&
3638 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3639 return KERN_INVALID_ARGUMENT
;
3642 if ((control
== VM_PURGABLE_SET_STATE
||
3643 control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) &&
3644 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3645 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
))) {
3646 return KERN_INVALID_ARGUMENT
;
3649 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3651 named_entry_lock(mem_entry
);
3653 if (mem_entry
->is_sub_map
||
3654 mem_entry
->is_copy
) {
3655 named_entry_unlock(mem_entry
);
3656 return KERN_INVALID_ARGUMENT
;
3659 object
= mem_entry
->backing
.object
;
3660 if (object
== VM_OBJECT_NULL
) {
3661 named_entry_unlock(mem_entry
);
3662 return KERN_INVALID_ARGUMENT
;
3665 vm_object_lock(object
);
3667 /* check that named entry covers entire object ? */
3668 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3669 vm_object_unlock(object
);
3670 named_entry_unlock(mem_entry
);
3671 return KERN_INVALID_ARGUMENT
;
3674 named_entry_unlock(mem_entry
);
3676 kr
= vm_object_purgable_control(object
, control
, state
);
3678 vm_object_unlock(object
);
3684 mach_memory_entry_access_tracking(
3685 ipc_port_t entry_port
,
3686 int *access_tracking
,
3687 uint32_t *access_tracking_reads
,
3688 uint32_t *access_tracking_writes
)
3690 return memory_entry_access_tracking_internal(entry_port
,
3692 access_tracking_reads
,
3693 access_tracking_writes
);
3697 memory_entry_access_tracking_internal(
3698 ipc_port_t entry_port
,
3699 int *access_tracking
,
3700 uint32_t *access_tracking_reads
,
3701 uint32_t *access_tracking_writes
)
3703 vm_named_entry_t mem_entry
;
3707 if (!IP_VALID(entry_port
) ||
3708 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3709 return KERN_INVALID_ARGUMENT
;
3712 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3714 named_entry_lock(mem_entry
);
3716 if (mem_entry
->is_sub_map
||
3717 mem_entry
->is_copy
) {
3718 named_entry_unlock(mem_entry
);
3719 return KERN_INVALID_ARGUMENT
;
3722 object
= mem_entry
->backing
.object
;
3723 if (object
== VM_OBJECT_NULL
) {
3724 named_entry_unlock(mem_entry
);
3725 return KERN_INVALID_ARGUMENT
;
3728 #if VM_OBJECT_ACCESS_TRACKING
3729 vm_object_access_tracking(object
,
3731 access_tracking_reads
,
3732 access_tracking_writes
);
3734 #else /* VM_OBJECT_ACCESS_TRACKING */
3735 (void) access_tracking
;
3736 (void) access_tracking_reads
;
3737 (void) access_tracking_writes
;
3738 kr
= KERN_NOT_SUPPORTED
;
3739 #endif /* VM_OBJECT_ACCESS_TRACKING */
3741 named_entry_unlock(mem_entry
);
3747 mach_memory_entry_ownership(
3748 ipc_port_t entry_port
,
3755 vm_named_entry_t mem_entry
;
3758 cur_task
= current_task();
3759 if (cur_task
!= kernel_task
&&
3760 (owner
!= cur_task
||
3761 (ledger_flags
& VM_LEDGER_FLAG_NO_FOOTPRINT
) ||
3762 ledger_tag
== VM_LEDGER_TAG_NETWORK
)) {
3764 * An entitlement is required to:
3765 * + tranfer memory ownership to someone else,
3766 * + request that the memory not count against the footprint,
3767 * + tag as "network" (since that implies "no footprint")
3769 if (!cur_task
->task_can_transfer_memory_ownership
&&
3770 IOTaskHasEntitlement(cur_task
,
3771 "com.apple.private.memory.ownership_transfer")) {
3772 cur_task
->task_can_transfer_memory_ownership
= TRUE
;
3774 if (!cur_task
->task_can_transfer_memory_ownership
) {
3775 return KERN_NO_ACCESS
;
3779 if (ledger_flags
& ~VM_LEDGER_FLAGS
) {
3780 return KERN_INVALID_ARGUMENT
;
3782 if (ledger_tag
<= 0 ||
3783 ledger_tag
> VM_LEDGER_TAG_MAX
) {
3784 return KERN_INVALID_ARGUMENT
;
3787 if (!IP_VALID(entry_port
) ||
3788 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3789 return KERN_INVALID_ARGUMENT
;
3791 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3793 named_entry_lock(mem_entry
);
3795 if (mem_entry
->is_sub_map
||
3796 mem_entry
->is_copy
) {
3797 named_entry_unlock(mem_entry
);
3798 return KERN_INVALID_ARGUMENT
;
3801 object
= mem_entry
->backing
.object
;
3802 if (object
== VM_OBJECT_NULL
) {
3803 named_entry_unlock(mem_entry
);
3804 return KERN_INVALID_ARGUMENT
;
3807 vm_object_lock(object
);
3809 /* check that named entry covers entire object ? */
3810 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3811 vm_object_unlock(object
);
3812 named_entry_unlock(mem_entry
);
3813 return KERN_INVALID_ARGUMENT
;
3816 named_entry_unlock(mem_entry
);
3818 kr
= vm_object_ownership_change(object
,
3822 FALSE
); /* task_objq_locked */
3823 vm_object_unlock(object
);
3829 mach_memory_entry_get_page_counts(
3830 ipc_port_t entry_port
,
3831 unsigned int *resident_page_count
,
3832 unsigned int *dirty_page_count
)
3835 vm_named_entry_t mem_entry
;
3837 vm_object_offset_t offset
;
3838 vm_object_size_t size
;
3840 if (!IP_VALID(entry_port
) ||
3841 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3842 return KERN_INVALID_ARGUMENT
;
3845 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3847 named_entry_lock(mem_entry
);
3849 if (mem_entry
->is_sub_map
||
3850 mem_entry
->is_copy
) {
3851 named_entry_unlock(mem_entry
);
3852 return KERN_INVALID_ARGUMENT
;
3855 object
= mem_entry
->backing
.object
;
3856 if (object
== VM_OBJECT_NULL
) {
3857 named_entry_unlock(mem_entry
);
3858 return KERN_INVALID_ARGUMENT
;
3861 vm_object_lock(object
);
3863 offset
= mem_entry
->offset
;
3864 size
= mem_entry
->size
;
3866 named_entry_unlock(mem_entry
);
3868 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3870 vm_object_unlock(object
);
3876 * mach_memory_entry_port_release:
3878 * Release a send right on a named entry port. This is the correct
3879 * way to destroy a named entry. When the last right on the port is
3880 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3883 mach_memory_entry_port_release(
3886 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3887 ipc_port_release_send(port
);
3891 * mach_destroy_memory_entry:
3893 * Drops a reference on a memory entry and destroys the memory entry if
3894 * there are no more references on it.
3895 * NOTE: This routine should not be called to destroy a memory entry from the
3896 * kernel, as it will not release the Mach port associated with the memory
3897 * entry. The proper way to destroy a memory entry in the kernel is to
3898 * call mach_memort_entry_port_release() to release the kernel's send-right on
3899 * the memory entry's port. When the last send right is released, the memory
3900 * entry will be destroyed via ipc_kobject_destroy().
3903 mach_destroy_memory_entry(
3906 vm_named_entry_t named_entry
;
3908 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3909 #endif /* MACH_ASSERT */
3910 named_entry
= (vm_named_entry_t
) ip_get_kobject(port
);
3912 named_entry_lock(named_entry
);
3913 named_entry
->ref_count
-= 1;
3915 if (named_entry
->ref_count
== 0) {
3916 if (named_entry
->is_sub_map
) {
3917 vm_map_deallocate(named_entry
->backing
.map
);
3918 } else if (named_entry
->is_copy
) {
3919 vm_map_copy_discard(named_entry
->backing
.copy
);
3921 /* release the VM object we've been pointing to */
3922 vm_object_deallocate(named_entry
->backing
.object
);
3925 named_entry_unlock(named_entry
);
3926 named_entry_lock_destroy(named_entry
);
3928 #if VM_NAMED_ENTRY_LIST
3929 lck_mtx_lock_spin(&vm_named_entry_list_lock_data
);
3930 queue_remove(&vm_named_entry_list
, named_entry
,
3931 vm_named_entry_t
, named_entry_list
);
3932 assert(vm_named_entry_count
> 0);
3933 vm_named_entry_count
--;
3934 lck_mtx_unlock(&vm_named_entry_list_lock_data
);
3935 #endif /* VM_NAMED_ENTRY_LIST */
3937 kfree(named_entry
, sizeof(struct vm_named_entry
));
3939 named_entry_unlock(named_entry
);
3943 /* Allow manipulation of individual page state. This is actually part of */
3944 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3947 mach_memory_entry_page_op(
3948 ipc_port_t entry_port
,
3949 vm_object_offset_t offset
,
3951 ppnum_t
*phys_entry
,
3954 vm_named_entry_t mem_entry
;
3958 if (!IP_VALID(entry_port
) ||
3959 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3960 return KERN_INVALID_ARGUMENT
;
3963 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3965 named_entry_lock(mem_entry
);
3967 if (mem_entry
->is_sub_map
||
3968 mem_entry
->is_copy
) {
3969 named_entry_unlock(mem_entry
);
3970 return KERN_INVALID_ARGUMENT
;
3973 object
= mem_entry
->backing
.object
;
3974 if (object
== VM_OBJECT_NULL
) {
3975 named_entry_unlock(mem_entry
);
3976 return KERN_INVALID_ARGUMENT
;
3979 vm_object_reference(object
);
3980 named_entry_unlock(mem_entry
);
3982 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3984 vm_object_deallocate(object
);
3990 * mach_memory_entry_range_op offers performance enhancement over
3991 * mach_memory_entry_page_op for page_op functions which do not require page
3992 * level state to be returned from the call. Page_op was created to provide
3993 * a low-cost alternative to page manipulation via UPLs when only a single
3994 * page was involved. The range_op call establishes the ability in the _op
3995 * family of functions to work on multiple pages where the lack of page level
3996 * state handling allows the caller to avoid the overhead of the upl structures.
4000 mach_memory_entry_range_op(
4001 ipc_port_t entry_port
,
4002 vm_object_offset_t offset_beg
,
4003 vm_object_offset_t offset_end
,
4007 vm_named_entry_t mem_entry
;
4011 if (!IP_VALID(entry_port
) ||
4012 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
4013 return KERN_INVALID_ARGUMENT
;
4016 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
4018 named_entry_lock(mem_entry
);
4020 if (mem_entry
->is_sub_map
||
4021 mem_entry
->is_copy
) {
4022 named_entry_unlock(mem_entry
);
4023 return KERN_INVALID_ARGUMENT
;
4026 object
= mem_entry
->backing
.object
;
4027 if (object
== VM_OBJECT_NULL
) {
4028 named_entry_unlock(mem_entry
);
4029 return KERN_INVALID_ARGUMENT
;
4032 vm_object_reference(object
);
4033 named_entry_unlock(mem_entry
);
4035 kr
= vm_object_range_op(object
,
4039 (uint32_t *) range
);
4041 vm_object_deallocate(object
);
4046 /* ******* Temporary Internal calls to UPL for BSD ***** */
4048 extern int kernel_upl_map(
4051 vm_offset_t
*dst_addr
);
4053 extern int kernel_upl_unmap(
4057 extern int kernel_upl_commit(
4059 upl_page_info_t
*pl
,
4060 mach_msg_type_number_t count
);
4062 extern int kernel_upl_commit_range(
4064 upl_offset_t offset
,
4067 upl_page_info_array_t pl
,
4068 mach_msg_type_number_t count
);
4070 extern int kernel_upl_abort(
4074 extern int kernel_upl_abort_range(
4076 upl_offset_t offset
,
4085 vm_offset_t
*dst_addr
)
4087 return vm_upl_map(map
, upl
, dst_addr
);
4096 return vm_upl_unmap(map
, upl
);
4102 upl_page_info_t
*pl
,
4103 mach_msg_type_number_t count
)
4107 kr
= upl_commit(upl
, pl
, count
);
4108 upl_deallocate(upl
);
4114 kernel_upl_commit_range(
4116 upl_offset_t offset
,
4119 upl_page_info_array_t pl
,
4120 mach_msg_type_number_t count
)
4122 boolean_t finished
= FALSE
;
4125 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
4126 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
4129 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
4130 return KERN_INVALID_ARGUMENT
;
4133 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
4135 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
) {
4136 upl_deallocate(upl
);
4143 kernel_upl_abort_range(
4145 upl_offset_t offset
,
4150 boolean_t finished
= FALSE
;
4152 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
4153 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
4156 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
4158 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
) {
4159 upl_deallocate(upl
);
4172 kr
= upl_abort(upl
, abort_type
);
4173 upl_deallocate(upl
);
4178 * Now a kernel-private interface (for BootCache
4179 * use only). Need a cleaner way to create an
4180 * empty vm_map() and return a handle to it.
4184 vm_region_object_create(
4185 __unused vm_map_t target_map
,
4187 ipc_port_t
*object_handle
)
4189 vm_named_entry_t user_entry
;
4190 ipc_port_t user_handle
;
4194 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
4196 return KERN_FAILURE
;
4199 /* Create a named object based on a submap of specified size */
4201 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
4202 vm_map_round_page(size
,
4203 VM_MAP_PAGE_MASK(target_map
)),
4205 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
4207 user_entry
->backing
.map
= new_map
;
4208 user_entry
->internal
= TRUE
;
4209 user_entry
->is_sub_map
= TRUE
;
4210 user_entry
->offset
= 0;
4211 user_entry
->protection
= VM_PROT_ALL
;
4212 user_entry
->size
= size
;
4213 assert(user_entry
->ref_count
== 1);
4215 *object_handle
= user_handle
;
4216 return KERN_SUCCESS
;
4219 ppnum_t
vm_map_get_phys_page( /* forward */
4221 vm_offset_t offset
);
4224 vm_map_get_phys_page(
4228 vm_object_offset_t offset
;
4230 vm_map_offset_t map_offset
;
4231 vm_map_entry_t entry
;
4232 ppnum_t phys_page
= 0;
4234 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
4237 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
4238 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
4242 if (entry
->is_sub_map
) {
4244 vm_map_lock(VME_SUBMAP(entry
));
4246 map
= VME_SUBMAP(entry
);
4247 map_offset
= (VME_OFFSET(entry
) +
4248 (map_offset
- entry
->vme_start
));
4249 vm_map_unlock(old_map
);
4252 if (VME_OBJECT(entry
)->phys_contiguous
) {
4253 /* These are not standard pageable memory mappings */
4254 /* If they are not present in the object they will */
4255 /* have to be picked up from the pager through the */
4256 /* fault mechanism. */
4257 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
4258 /* need to call vm_fault */
4260 vm_fault(map
, map_offset
, VM_PROT_NONE
,
4261 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
4262 THREAD_UNINT
, NULL
, 0);
4266 offset
= (VME_OFFSET(entry
) +
4267 (map_offset
- entry
->vme_start
));
4268 phys_page
= (ppnum_t
)
4269 ((VME_OBJECT(entry
)->vo_shadow_offset
4270 + offset
) >> PAGE_SHIFT
);
4273 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
4274 object
= VME_OBJECT(entry
);
4275 vm_object_lock(object
);
4277 vm_page_t dst_page
= vm_page_lookup(object
, offset
);
4278 if (dst_page
== VM_PAGE_NULL
) {
4279 if (object
->shadow
) {
4280 vm_object_t old_object
;
4281 vm_object_lock(object
->shadow
);
4282 old_object
= object
;
4283 offset
= offset
+ object
->vo_shadow_offset
;
4284 object
= object
->shadow
;
4285 vm_object_unlock(old_object
);
4287 vm_object_unlock(object
);
4291 phys_page
= (ppnum_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
));
4292 vm_object_unlock(object
);
4304 kern_return_t
kernel_object_iopl_request( /* forward */
4305 vm_named_entry_t named_entry
,
4306 memory_object_offset_t offset
,
4307 upl_size_t
*upl_size
,
4309 upl_page_info_array_t user_page_list
,
4310 unsigned int *page_list_count
,
4314 kernel_object_iopl_request(
4315 vm_named_entry_t named_entry
,
4316 memory_object_offset_t offset
,
4317 upl_size_t
*upl_size
,
4319 upl_page_info_array_t user_page_list
,
4320 unsigned int *page_list_count
,
4328 caller_flags
= *flags
;
4330 if (caller_flags
& ~UPL_VALID_FLAGS
) {
4332 * For forward compatibility's sake,
4333 * reject any unknown flag.
4335 return KERN_INVALID_VALUE
;
4338 /* a few checks to make sure user is obeying rules */
4339 if (*upl_size
== 0) {
4340 if (offset
>= named_entry
->size
) {
4341 return KERN_INVALID_RIGHT
;
4343 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
4344 if (*upl_size
!= named_entry
->size
- offset
) {
4345 return KERN_INVALID_ARGUMENT
;
4348 if (caller_flags
& UPL_COPYOUT_FROM
) {
4349 if ((named_entry
->protection
& VM_PROT_READ
)
4351 return KERN_INVALID_RIGHT
;
4354 if ((named_entry
->protection
&
4355 (VM_PROT_READ
| VM_PROT_WRITE
))
4356 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
4357 return KERN_INVALID_RIGHT
;
4360 if (named_entry
->size
< (offset
+ *upl_size
)) {
4361 return KERN_INVALID_ARGUMENT
;
4364 /* the callers parameter offset is defined to be the */
4365 /* offset from beginning of named entry offset in object */
4366 offset
= offset
+ named_entry
->offset
;
4368 if (named_entry
->is_sub_map
||
4369 named_entry
->is_copy
) {
4370 return KERN_INVALID_ARGUMENT
;
4373 named_entry_lock(named_entry
);
4375 /* This is the case where we are going to operate */
4376 /* on an already known object. If the object is */
4377 /* not ready it is internal. An external */
4378 /* object cannot be mapped until it is ready */
4379 /* we can therefore avoid the ready check */
4381 object
= named_entry
->backing
.object
;
4382 vm_object_reference(object
);
4383 named_entry_unlock(named_entry
);
4385 if (!object
->private) {
4386 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
) {
4387 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
4389 if (object
->phys_contiguous
) {
4390 *flags
= UPL_PHYS_CONTIG
;
4395 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
4398 ret
= vm_object_iopl_request(object
,
4404 (upl_control_flags_t
)(unsigned int)caller_flags
);
4405 vm_object_deallocate(object
);
4411 * These symbols are looked up at runtime by vmware, VirtualBox,
4412 * despite not being exported in the symbol sets.
4415 #if defined(__x86_64__)
4419 vm_map_t target_map
,
4420 mach_vm_offset_t
*address
,
4421 mach_vm_size_t initial_size
,
4422 mach_vm_offset_t mask
,
4425 vm_object_offset_t offset
,
4427 vm_prot_t cur_protection
,
4428 vm_prot_t max_protection
,
4429 vm_inherit_t inheritance
);
4433 vm_map_t target_map
,
4434 mach_vm_offset_t
*address
,
4435 mach_vm_size_t size
,
4436 mach_vm_offset_t mask
,
4439 mach_vm_offset_t memory_address
,
4441 vm_prot_t
*cur_protection
,
4442 vm_prot_t
*max_protection
,
4443 vm_inherit_t inheritance
);
4447 vm_map_t target_map
,
4448 mach_vm_offset_t
*address
,
4449 mach_vm_size_t initial_size
,
4450 mach_vm_offset_t mask
,
4453 vm_object_offset_t offset
,
4455 vm_prot_t cur_protection
,
4456 vm_prot_t max_protection
,
4457 vm_inherit_t inheritance
)
4459 return mach_vm_map_external(target_map
, address
, initial_size
, mask
, flags
, port
,
4460 offset
, copy
, cur_protection
, max_protection
, inheritance
);
4465 vm_map_t target_map
,
4466 mach_vm_offset_t
*address
,
4467 mach_vm_size_t size
,
4468 mach_vm_offset_t mask
,
4471 mach_vm_offset_t memory_address
,
4473 vm_prot_t
*cur_protection
,
4474 vm_prot_t
*max_protection
,
4475 vm_inherit_t inheritance
)
4477 return mach_vm_remap_external(target_map
, address
, size
, mask
, flags
, src_map
, memory_address
,
4478 copy
, cur_protection
, max_protection
, inheritance
);
4483 vm_map_t target_map
,
4484 vm_offset_t
*address
,
4491 vm_prot_t cur_protection
,
4492 vm_prot_t max_protection
,
4493 vm_inherit_t inheritance
);
4497 vm_map_t target_map
,
4498 vm_offset_t
*address
,
4505 vm_prot_t cur_protection
,
4506 vm_prot_t max_protection
,
4507 vm_inherit_t inheritance
)
4511 VM_GET_FLAGS_ALIAS(flags
, tag
);
4512 return vm_map_kernel(target_map
, address
, size
, mask
,
4513 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
4515 cur_protection
, max_protection
, inheritance
);
4518 #endif /* __x86_64__ */