2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101 #include <mach/sdt.h>
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/memory_entry_server.h>
106 #include <mach/vm_map_server.h>
108 #include <kern/host.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/misc_protos.h>
112 #include <vm/vm_fault.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 #include <vm/memory_object.h>
117 #include <vm/vm_pageout.h>
118 #include <vm/vm_protos.h>
119 #include <vm/vm_purgeable_internal.h>
120 #include <vm/vm_init.h>
122 #include <san/kasan.h>
124 #include <libkern/OSDebug.h>
125 #include <IOKit/IOBSD.h>
127 vm_size_t upl_offset_to_pagelist
= 0;
134 * mach_vm_allocate allocates "zero fill" memory in the specfied
138 mach_vm_allocate_external(
140 mach_vm_offset_t
*addr
,
146 VM_GET_FLAGS_ALIAS(flags
, tag
);
147 return mach_vm_allocate_kernel(map
, addr
, size
, flags
, tag
);
151 mach_vm_allocate_kernel(
153 mach_vm_offset_t
*addr
,
158 vm_map_offset_t map_addr
;
159 vm_map_size_t map_size
;
160 kern_return_t result
;
163 /* filter out any kernel-only flags */
164 if (flags
& ~VM_FLAGS_USER_ALLOCATE
) {
165 return KERN_INVALID_ARGUMENT
;
168 if (map
== VM_MAP_NULL
) {
169 return KERN_INVALID_ARGUMENT
;
176 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
179 * No specific address requested, so start candidate address
180 * search at the minimum address in the map. However, if that
181 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
182 * allocations of PAGEZERO to explicit requests since its
183 * normal use is to catch dereferences of NULL and many
184 * applications also treat pointers with a value of 0 as
185 * special and suddenly having address 0 contain useable
186 * memory would tend to confuse those applications.
188 map_addr
= vm_map_min(map
);
190 map_addr
+= VM_MAP_PAGE_SIZE(map
);
193 map_addr
= vm_map_trunc_page(*addr
,
194 VM_MAP_PAGE_MASK(map
));
196 map_size
= vm_map_round_page(size
,
197 VM_MAP_PAGE_MASK(map
));
199 return KERN_INVALID_ARGUMENT
;
202 result
= vm_map_enter(
208 VM_MAP_KERNEL_FLAGS_NONE
,
211 (vm_object_offset_t
)0,
223 * Legacy routine that allocates "zero fill" memory in the specfied
224 * map (which is limited to the same size as the kernel).
227 vm_allocate_external(
235 VM_GET_FLAGS_ALIAS(flags
, tag
);
236 return vm_allocate_kernel(map
, addr
, size
, flags
, tag
);
247 vm_map_offset_t map_addr
;
248 vm_map_size_t map_size
;
249 kern_return_t result
;
252 /* filter out any kernel-only flags */
253 if (flags
& ~VM_FLAGS_USER_ALLOCATE
) {
254 return KERN_INVALID_ARGUMENT
;
257 if (map
== VM_MAP_NULL
) {
258 return KERN_INVALID_ARGUMENT
;
265 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
268 * No specific address requested, so start candidate address
269 * search at the minimum address in the map. However, if that
270 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
271 * allocations of PAGEZERO to explicit requests since its
272 * normal use is to catch dereferences of NULL and many
273 * applications also treat pointers with a value of 0 as
274 * special and suddenly having address 0 contain useable
275 * memory would tend to confuse those applications.
277 map_addr
= vm_map_min(map
);
279 map_addr
+= VM_MAP_PAGE_SIZE(map
);
282 map_addr
= vm_map_trunc_page(*addr
,
283 VM_MAP_PAGE_MASK(map
));
285 map_size
= vm_map_round_page(size
,
286 VM_MAP_PAGE_MASK(map
));
288 return KERN_INVALID_ARGUMENT
;
291 result
= vm_map_enter(
297 VM_MAP_KERNEL_FLAGS_NONE
,
300 (vm_object_offset_t
)0,
307 if (result
== KERN_SUCCESS
&& map
->pmap
== kernel_pmap
) {
308 kasan_notify_address(map_addr
, map_size
);
312 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
317 * mach_vm_deallocate -
318 * deallocates the specified range of addresses in the
319 * specified address map.
324 mach_vm_offset_t start
,
327 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
328 return KERN_INVALID_ARGUMENT
;
331 if (size
== (mach_vm_offset_t
) 0) {
335 return vm_map_remove(map
,
336 vm_map_trunc_page(start
,
337 VM_MAP_PAGE_MASK(map
)),
338 vm_map_round_page(start
+ size
,
339 VM_MAP_PAGE_MASK(map
)),
340 VM_MAP_REMOVE_NO_FLAGS
);
345 * deallocates the specified range of addresses in the
346 * specified address map (limited to addresses the same
347 * size as the kernel).
355 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
356 return KERN_INVALID_ARGUMENT
;
359 if (size
== (vm_offset_t
) 0) {
363 return vm_map_remove(map
,
364 vm_map_trunc_page(start
,
365 VM_MAP_PAGE_MASK(map
)),
366 vm_map_round_page(start
+ size
,
367 VM_MAP_PAGE_MASK(map
)),
368 VM_MAP_REMOVE_NO_FLAGS
);
373 * Sets the inheritance of the specified range in the
379 mach_vm_offset_t start
,
381 vm_inherit_t new_inheritance
)
383 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
384 (new_inheritance
> VM_INHERIT_LAST_VALID
)) {
385 return KERN_INVALID_ARGUMENT
;
392 return vm_map_inherit(map
,
393 vm_map_trunc_page(start
,
394 VM_MAP_PAGE_MASK(map
)),
395 vm_map_round_page(start
+ size
,
396 VM_MAP_PAGE_MASK(map
)),
402 * Sets the inheritance of the specified range in the
403 * specified map (range limited to addresses
410 vm_inherit_t new_inheritance
)
412 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
413 (new_inheritance
> VM_INHERIT_LAST_VALID
)) {
414 return KERN_INVALID_ARGUMENT
;
421 return vm_map_inherit(map
,
422 vm_map_trunc_page(start
,
423 VM_MAP_PAGE_MASK(map
)),
424 vm_map_round_page(start
+ size
,
425 VM_MAP_PAGE_MASK(map
)),
431 * Sets the protection of the specified range in the
438 mach_vm_offset_t start
,
440 boolean_t set_maximum
,
441 vm_prot_t new_protection
)
443 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
444 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
))) {
445 return KERN_INVALID_ARGUMENT
;
452 return vm_map_protect(map
,
453 vm_map_trunc_page(start
,
454 VM_MAP_PAGE_MASK(map
)),
455 vm_map_round_page(start
+ size
,
456 VM_MAP_PAGE_MASK(map
)),
463 * Sets the protection of the specified range in the
464 * specified map. Addressability of the range limited
465 * to the same size as the kernel.
473 boolean_t set_maximum
,
474 vm_prot_t new_protection
)
476 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
477 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
))) {
478 return KERN_INVALID_ARGUMENT
;
485 return vm_map_protect(map
,
486 vm_map_trunc_page(start
,
487 VM_MAP_PAGE_MASK(map
)),
488 vm_map_round_page(start
+ size
,
489 VM_MAP_PAGE_MASK(map
)),
495 * mach_vm_machine_attributes -
496 * Handle machine-specific attributes for a mapping, such
497 * as cachability, migrability, etc.
500 mach_vm_machine_attribute(
502 mach_vm_address_t addr
,
504 vm_machine_attribute_t attribute
,
505 vm_machine_attribute_val_t
* value
) /* IN/OUT */
507 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
508 return KERN_INVALID_ARGUMENT
;
515 return vm_map_machine_attribute(
517 vm_map_trunc_page(addr
,
518 VM_MAP_PAGE_MASK(map
)),
519 vm_map_round_page(addr
+ size
,
520 VM_MAP_PAGE_MASK(map
)),
526 * vm_machine_attribute -
527 * Handle machine-specific attributes for a mapping, such
528 * as cachability, migrability, etc. Limited addressability
529 * (same range limits as for the native kernel map).
532 vm_machine_attribute(
536 vm_machine_attribute_t attribute
,
537 vm_machine_attribute_val_t
* value
) /* IN/OUT */
539 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
540 return KERN_INVALID_ARGUMENT
;
547 return vm_map_machine_attribute(
549 vm_map_trunc_page(addr
,
550 VM_MAP_PAGE_MASK(map
)),
551 vm_map_round_page(addr
+ size
,
552 VM_MAP_PAGE_MASK(map
)),
559 * Read/copy a range from one address space and return it to the caller.
561 * It is assumed that the address for the returned memory is selected by
562 * the IPC implementation as part of receiving the reply to this call.
563 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
564 * that gets returned.
566 * JMM - because of mach_msg_type_number_t, this call is limited to a
567 * single 4GB region at this time.
573 mach_vm_address_t addr
,
576 mach_msg_type_number_t
*data_size
)
579 vm_map_copy_t ipc_address
;
581 if (map
== VM_MAP_NULL
) {
582 return KERN_INVALID_ARGUMENT
;
585 if ((mach_msg_type_number_t
) size
!= size
) {
586 return KERN_INVALID_ARGUMENT
;
589 error
= vm_map_copyin(map
,
590 (vm_map_address_t
)addr
,
592 FALSE
, /* src_destroy */
595 if (KERN_SUCCESS
== error
) {
596 *data
= (pointer_t
) ipc_address
;
597 *data_size
= (mach_msg_type_number_t
) size
;
598 assert(*data_size
== size
);
605 * Read/copy a range from one address space and return it to the caller.
606 * Limited addressability (same range limits as for the native kernel map).
608 * It is assumed that the address for the returned memory is selected by
609 * the IPC implementation as part of receiving the reply to this call.
610 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
611 * that gets returned.
619 mach_msg_type_number_t
*data_size
)
622 vm_map_copy_t ipc_address
;
624 if (map
== VM_MAP_NULL
) {
625 return KERN_INVALID_ARGUMENT
;
628 mach_msg_type_number_t dsize
;
629 if (os_convert_overflow(size
, &dsize
)) {
631 * The kernel could handle a 64-bit "size" value, but
632 * it could not return the size of the data in "*data_size"
633 * without overflowing.
634 * Let's reject this "size" as invalid.
636 return KERN_INVALID_ARGUMENT
;
639 error
= vm_map_copyin(map
,
640 (vm_map_address_t
)addr
,
642 FALSE
, /* src_destroy */
645 if (KERN_SUCCESS
== error
) {
646 *data
= (pointer_t
) ipc_address
;
648 assert(*data_size
== size
);
654 * mach_vm_read_list -
655 * Read/copy a list of address ranges from specified map.
657 * MIG does not know how to deal with a returned array of
658 * vm_map_copy_t structures, so we have to do the copyout
664 mach_vm_read_entry_t data_list
,
667 mach_msg_type_number_t i
;
671 if (map
== VM_MAP_NULL
||
672 count
> VM_MAP_ENTRY_MAX
) {
673 return KERN_INVALID_ARGUMENT
;
676 error
= KERN_SUCCESS
;
677 for (i
= 0; i
< count
; i
++) {
678 vm_map_address_t map_addr
;
679 vm_map_size_t map_size
;
681 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
682 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
685 error
= vm_map_copyin(map
,
688 FALSE
, /* src_destroy */
690 if (KERN_SUCCESS
== error
) {
691 error
= vm_map_copyout(
695 if (KERN_SUCCESS
== error
) {
696 data_list
[i
].address
= map_addr
;
699 vm_map_copy_discard(copy
);
702 data_list
[i
].address
= (mach_vm_address_t
)0;
703 data_list
[i
].size
= (mach_vm_size_t
)0;
710 * Read/copy a list of address ranges from specified map.
712 * MIG does not know how to deal with a returned array of
713 * vm_map_copy_t structures, so we have to do the copyout
716 * The source and destination ranges are limited to those
717 * that can be described with a vm_address_t (i.e. same
718 * size map as the kernel).
720 * JMM - If the result of the copyout is an address range
721 * that cannot be described with a vm_address_t (i.e. the
722 * caller had a larger address space but used this call
723 * anyway), it will result in a truncated address being
724 * returned (and a likely confused caller).
730 vm_read_entry_t data_list
,
733 mach_msg_type_number_t i
;
737 if (map
== VM_MAP_NULL
||
738 count
> VM_MAP_ENTRY_MAX
) {
739 return KERN_INVALID_ARGUMENT
;
742 error
= KERN_SUCCESS
;
743 for (i
= 0; i
< count
; i
++) {
744 vm_map_address_t map_addr
;
745 vm_map_size_t map_size
;
747 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
748 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
751 error
= vm_map_copyin(map
,
754 FALSE
, /* src_destroy */
756 if (KERN_SUCCESS
== error
) {
757 error
= vm_map_copyout(current_task()->map
,
760 if (KERN_SUCCESS
== error
) {
761 data_list
[i
].address
=
762 CAST_DOWN(vm_offset_t
, map_addr
);
765 vm_map_copy_discard(copy
);
768 data_list
[i
].address
= (mach_vm_address_t
)0;
769 data_list
[i
].size
= (mach_vm_size_t
)0;
775 * mach_vm_read_overwrite -
776 * Overwrite a range of the current map with data from the specified
779 * In making an assumption that the current thread is local, it is
780 * no longer cluster-safe without a fully supportive local proxy
781 * thread/task (but we don't support cluster's anymore so this is moot).
785 mach_vm_read_overwrite(
787 mach_vm_address_t address
,
789 mach_vm_address_t data
,
790 mach_vm_size_t
*data_size
)
795 if (map
== VM_MAP_NULL
) {
796 return KERN_INVALID_ARGUMENT
;
799 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
800 (vm_map_size_t
)size
, FALSE
, ©
);
802 if (KERN_SUCCESS
== error
) {
804 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
807 error
= vm_map_copy_overwrite(current_thread()->map
,
808 (vm_map_address_t
)data
,
809 copy
, (vm_map_size_t
) size
, FALSE
);
810 if (KERN_SUCCESS
== error
) {
814 vm_map_copy_discard(copy
);
820 * vm_read_overwrite -
821 * Overwrite a range of the current map with data from the specified
824 * This routine adds the additional limitation that the source and
825 * destination ranges must be describable with vm_address_t values
826 * (i.e. the same size address spaces as the kernel, or at least the
827 * the ranges are in that first portion of the respective address
834 vm_address_t address
,
837 vm_size_t
*data_size
)
842 if (map
== VM_MAP_NULL
) {
843 return KERN_INVALID_ARGUMENT
;
846 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
847 (vm_map_size_t
)size
, FALSE
, ©
);
849 if (KERN_SUCCESS
== error
) {
851 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
854 error
= vm_map_copy_overwrite(current_thread()->map
,
855 (vm_map_address_t
)data
,
856 copy
, (vm_map_size_t
) size
, FALSE
);
857 if (KERN_SUCCESS
== error
) {
861 vm_map_copy_discard(copy
);
869 * Overwrite the specified address range with the data provided
870 * (from the current map).
875 mach_vm_address_t address
,
877 mach_msg_type_number_t size
)
879 if (map
== VM_MAP_NULL
) {
880 return KERN_INVALID_ARGUMENT
;
883 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
884 (vm_map_copy_t
) data
, size
, FALSE
/* interruptible XXX */);
889 * Overwrite the specified address range with the data provided
890 * (from the current map).
892 * The addressability of the range of addresses to overwrite is
893 * limited bu the use of a vm_address_t (same size as kernel map).
894 * Either the target map is also small, or the range is in the
895 * low addresses within it.
900 vm_address_t address
,
902 mach_msg_type_number_t size
)
904 if (map
== VM_MAP_NULL
) {
905 return KERN_INVALID_ARGUMENT
;
908 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
909 (vm_map_copy_t
) data
, size
, FALSE
/* interruptible XXX */);
914 * Overwrite one range of the specified map with the contents of
915 * another range within that same map (i.e. both address ranges
921 mach_vm_address_t source_address
,
923 mach_vm_address_t dest_address
)
928 if (map
== VM_MAP_NULL
) {
929 return KERN_INVALID_ARGUMENT
;
932 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
933 (vm_map_size_t
)size
, FALSE
, ©
);
935 if (KERN_SUCCESS
== kr
) {
937 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
940 kr
= vm_map_copy_overwrite(map
,
941 (vm_map_address_t
)dest_address
,
942 copy
, (vm_map_size_t
) size
, FALSE
/* interruptible XXX */);
944 if (KERN_SUCCESS
!= kr
) {
945 vm_map_copy_discard(copy
);
954 vm_address_t source_address
,
956 vm_address_t dest_address
)
961 if (map
== VM_MAP_NULL
) {
962 return KERN_INVALID_ARGUMENT
;
965 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
966 (vm_map_size_t
)size
, FALSE
, ©
);
968 if (KERN_SUCCESS
== kr
) {
970 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
973 kr
= vm_map_copy_overwrite(map
,
974 (vm_map_address_t
)dest_address
,
975 copy
, (vm_map_size_t
) size
, FALSE
/* interruptible XXX */);
977 if (KERN_SUCCESS
!= kr
) {
978 vm_map_copy_discard(copy
);
986 * Map some range of an object into an address space.
988 * The object can be one of several types of objects:
989 * NULL - anonymous memory
990 * a named entry - a range within another address space
991 * or a range within a memory object
992 * a whole memory object
996 mach_vm_map_external(
998 mach_vm_offset_t
*address
,
999 mach_vm_size_t initial_size
,
1000 mach_vm_offset_t mask
,
1003 vm_object_offset_t offset
,
1005 vm_prot_t cur_protection
,
1006 vm_prot_t max_protection
,
1007 vm_inherit_t inheritance
)
1011 VM_GET_FLAGS_ALIAS(flags
, tag
);
1012 return mach_vm_map_kernel(target_map
, address
, initial_size
, mask
,
1013 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
1015 cur_protection
, max_protection
,
1021 vm_map_t target_map
,
1022 mach_vm_offset_t
*address
,
1023 mach_vm_size_t initial_size
,
1024 mach_vm_offset_t mask
,
1026 vm_map_kernel_flags_t vmk_flags
,
1029 vm_object_offset_t offset
,
1031 vm_prot_t cur_protection
,
1032 vm_prot_t max_protection
,
1033 vm_inherit_t inheritance
)
1036 vm_map_offset_t vmmaddr
;
1038 vmmaddr
= (vm_map_offset_t
) *address
;
1040 /* filter out any kernel-only flags */
1041 if (flags
& ~VM_FLAGS_USER_MAP
) {
1042 return KERN_INVALID_ARGUMENT
;
1045 kr
= vm_map_enter_mem_object(target_map
,
1060 if (kr
== KERN_SUCCESS
&& target_map
->pmap
== kernel_pmap
) {
1061 kasan_notify_address(vmmaddr
, initial_size
);
1070 /* legacy interface */
1073 vm_map_t target_map
,
1074 vm_offset_t
*address
,
1079 vm_object_offset_t offset
,
1081 vm_prot_t cur_protection
,
1082 vm_prot_t max_protection
,
1083 vm_inherit_t inheritance
)
1087 VM_GET_FLAGS_ALIAS(flags
, tag
);
1088 return vm_map_64_kernel(target_map
, address
, size
, mask
,
1089 flags
, VM_MAP_KERNEL_FLAGS_NONE
,
1090 tag
, port
, offset
, copy
,
1091 cur_protection
, max_protection
,
1097 vm_map_t target_map
,
1098 vm_offset_t
*address
,
1102 vm_map_kernel_flags_t vmk_flags
,
1105 vm_object_offset_t offset
,
1107 vm_prot_t cur_protection
,
1108 vm_prot_t max_protection
,
1109 vm_inherit_t inheritance
)
1111 mach_vm_address_t map_addr
;
1112 mach_vm_size_t map_size
;
1113 mach_vm_offset_t map_mask
;
1116 map_addr
= (mach_vm_address_t
)*address
;
1117 map_size
= (mach_vm_size_t
)size
;
1118 map_mask
= (mach_vm_offset_t
)mask
;
1120 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
,
1121 flags
, vmk_flags
, tag
,
1123 cur_protection
, max_protection
, inheritance
);
1124 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1128 /* temporary, until world build */
1131 vm_map_t target_map
,
1132 vm_offset_t
*address
,
1139 vm_prot_t cur_protection
,
1140 vm_prot_t max_protection
,
1141 vm_inherit_t inheritance
)
1145 VM_GET_FLAGS_ALIAS(flags
, tag
);
1146 return vm_map_kernel(target_map
, address
, size
, mask
,
1147 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
1149 cur_protection
, max_protection
, inheritance
);
1154 vm_map_t target_map
,
1155 vm_offset_t
*address
,
1159 vm_map_kernel_flags_t vmk_flags
,
1164 vm_prot_t cur_protection
,
1165 vm_prot_t max_protection
,
1166 vm_inherit_t inheritance
)
1168 mach_vm_address_t map_addr
;
1169 mach_vm_size_t map_size
;
1170 mach_vm_offset_t map_mask
;
1171 vm_object_offset_t obj_offset
;
1174 map_addr
= (mach_vm_address_t
)*address
;
1175 map_size
= (mach_vm_size_t
)size
;
1176 map_mask
= (mach_vm_offset_t
)mask
;
1177 obj_offset
= (vm_object_offset_t
)offset
;
1179 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
,
1180 flags
, vmk_flags
, tag
,
1181 port
, obj_offset
, copy
,
1182 cur_protection
, max_protection
, inheritance
);
1183 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1188 * mach_vm_remap_new -
1189 * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
1190 * and {cur,max}_protection are in/out.
1193 mach_vm_remap_new_external(
1194 vm_map_t target_map
,
1195 mach_vm_offset_t
*address
,
1196 mach_vm_size_t size
,
1197 mach_vm_offset_t mask
,
1199 mach_port_t src_tport
,
1200 mach_vm_offset_t memory_address
,
1202 vm_prot_t
*cur_protection
, /* IN/OUT */
1203 vm_prot_t
*max_protection
, /* IN/OUT */
1204 vm_inherit_t inheritance
)
1207 vm_map_offset_t map_addr
;
1211 flags
|= VM_FLAGS_RETURN_DATA_ADDR
;
1212 VM_GET_FLAGS_ALIAS(flags
, tag
);
1214 /* filter out any kernel-only flags */
1215 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1216 return KERN_INVALID_ARGUMENT
;
1219 if (target_map
== VM_MAP_NULL
) {
1220 return KERN_INVALID_ARGUMENT
;
1223 if ((*cur_protection
& ~VM_PROT_ALL
) ||
1224 (*max_protection
& ~VM_PROT_ALL
) ||
1225 (*cur_protection
& *max_protection
) != *cur_protection
) {
1226 return KERN_INVALID_ARGUMENT
;
1228 if ((*max_protection
& (VM_PROT_WRITE
| VM_PROT_EXECUTE
)) ==
1229 (VM_PROT_WRITE
| VM_PROT_EXECUTE
)) {
1232 * enforce target's "wx" policies
1234 return KERN_PROTECTION_FAILURE
;
1237 if (copy
|| *max_protection
== VM_PROT_READ
|| *max_protection
== VM_PROT_NONE
) {
1238 src_map
= convert_port_to_map_read(src_tport
);
1240 src_map
= convert_port_to_map(src_tport
);
1243 if (src_map
== VM_MAP_NULL
) {
1244 return KERN_INVALID_ARGUMENT
;
1247 map_addr
= (vm_map_offset_t
)*address
;
1249 kr
= vm_map_remap(target_map
,
1254 VM_MAP_KERNEL_FLAGS_NONE
,
1259 cur_protection
, /* IN/OUT */
1260 max_protection
, /* IN/OUT */
1263 *address
= map_addr
;
1264 vm_map_deallocate(src_map
);
1266 if (kr
== KERN_SUCCESS
) {
1267 ipc_port_release_send(src_tport
); /* consume on success */
1274 * Remap a range of memory from one task into another,
1275 * to another address range within the same task, or
1276 * over top of itself (with altered permissions and/or
1277 * as an in-place copy of itself).
1280 mach_vm_remap_external(
1281 vm_map_t target_map
,
1282 mach_vm_offset_t
*address
,
1283 mach_vm_size_t size
,
1284 mach_vm_offset_t mask
,
1287 mach_vm_offset_t memory_address
,
1289 vm_prot_t
*cur_protection
, /* OUT */
1290 vm_prot_t
*max_protection
, /* OUT */
1291 vm_inherit_t inheritance
)
1294 VM_GET_FLAGS_ALIAS(flags
, tag
);
1296 return mach_vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
, memory_address
,
1297 copy
, cur_protection
, max_protection
, inheritance
);
1301 mach_vm_remap_kernel(
1302 vm_map_t target_map
,
1303 mach_vm_offset_t
*address
,
1304 mach_vm_size_t size
,
1305 mach_vm_offset_t mask
,
1309 mach_vm_offset_t memory_address
,
1311 vm_prot_t
*cur_protection
, /* OUT */
1312 vm_prot_t
*max_protection
, /* OUT */
1313 vm_inherit_t inheritance
)
1315 vm_map_offset_t map_addr
;
1318 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
) {
1319 return KERN_INVALID_ARGUMENT
;
1322 /* filter out any kernel-only flags */
1323 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1324 return KERN_INVALID_ARGUMENT
;
1327 map_addr
= (vm_map_offset_t
)*address
;
1329 *cur_protection
= VM_PROT_NONE
;
1330 *max_protection
= VM_PROT_NONE
;
1332 kr
= vm_map_remap(target_map
,
1337 VM_MAP_KERNEL_FLAGS_NONE
,
1342 cur_protection
, /* IN/OUT */
1343 max_protection
, /* IN/OUT */
1345 *address
= map_addr
;
1351 * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
1352 * and {cur,max}_protection are in/out.
1355 vm_remap_new_external(
1356 vm_map_t target_map
,
1357 vm_offset_t
*address
,
1361 mach_port_t src_tport
,
1362 vm_offset_t memory_address
,
1364 vm_prot_t
*cur_protection
, /* IN/OUT */
1365 vm_prot_t
*max_protection
, /* IN/OUT */
1366 vm_inherit_t inheritance
)
1369 vm_map_offset_t map_addr
;
1373 flags
|= VM_FLAGS_RETURN_DATA_ADDR
;
1374 VM_GET_FLAGS_ALIAS(flags
, tag
);
1376 /* filter out any kernel-only flags */
1377 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1378 return KERN_INVALID_ARGUMENT
;
1381 if (target_map
== VM_MAP_NULL
) {
1382 return KERN_INVALID_ARGUMENT
;
1385 if ((*cur_protection
& ~VM_PROT_ALL
) ||
1386 (*max_protection
& ~VM_PROT_ALL
) ||
1387 (*cur_protection
& *max_protection
) != *cur_protection
) {
1388 return KERN_INVALID_ARGUMENT
;
1390 if ((*max_protection
& (VM_PROT_WRITE
| VM_PROT_EXECUTE
)) ==
1391 (VM_PROT_WRITE
| VM_PROT_EXECUTE
)) {
1394 * enforce target's "wx" policies
1396 return KERN_PROTECTION_FAILURE
;
1399 if (copy
|| *max_protection
== VM_PROT_READ
|| *max_protection
== VM_PROT_NONE
) {
1400 src_map
= convert_port_to_map_read(src_tport
);
1402 src_map
= convert_port_to_map(src_tport
);
1405 if (src_map
== VM_MAP_NULL
) {
1406 return KERN_INVALID_ARGUMENT
;
1409 map_addr
= (vm_map_offset_t
)*address
;
1411 kr
= vm_map_remap(target_map
,
1416 VM_MAP_KERNEL_FLAGS_NONE
,
1421 cur_protection
, /* IN/OUT */
1422 max_protection
, /* IN/OUT */
1425 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1426 vm_map_deallocate(src_map
);
1428 if (kr
== KERN_SUCCESS
) {
1429 ipc_port_release_send(src_tport
); /* consume on success */
1436 * Remap a range of memory from one task into another,
1437 * to another address range within the same task, or
1438 * over top of itself (with altered permissions and/or
1439 * as an in-place copy of itself).
1441 * The addressability of the source and target address
1442 * range is limited by the size of vm_address_t (in the
1447 vm_map_t target_map
,
1448 vm_offset_t
*address
,
1453 vm_offset_t memory_address
,
1455 vm_prot_t
*cur_protection
, /* OUT */
1456 vm_prot_t
*max_protection
, /* OUT */
1457 vm_inherit_t inheritance
)
1460 VM_GET_FLAGS_ALIAS(flags
, tag
);
1462 return vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
,
1463 memory_address
, copy
, cur_protection
, max_protection
, inheritance
);
1468 vm_map_t target_map
,
1469 vm_offset_t
*address
,
1475 vm_offset_t memory_address
,
1477 vm_prot_t
*cur_protection
, /* OUT */
1478 vm_prot_t
*max_protection
, /* OUT */
1479 vm_inherit_t inheritance
)
1481 vm_map_offset_t map_addr
;
1484 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
) {
1485 return KERN_INVALID_ARGUMENT
;
1488 /* filter out any kernel-only flags */
1489 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1490 return KERN_INVALID_ARGUMENT
;
1493 map_addr
= (vm_map_offset_t
)*address
;
1495 *cur_protection
= VM_PROT_NONE
;
1496 *max_protection
= VM_PROT_NONE
;
1498 kr
= vm_map_remap(target_map
,
1503 VM_MAP_KERNEL_FLAGS_NONE
,
1508 cur_protection
, /* IN/OUT */
1509 max_protection
, /* IN/OUT */
1511 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1516 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1517 * when mach_vm_wire and vm_wire are changed to use ledgers.
1519 #include <mach/mach_host_server.h>
1522 * Specify that the range of the virtual address space
1523 * of the target task must not cause page faults for
1524 * the indicated accesses.
1526 * [ To unwire the pages, specify VM_PROT_NONE. ]
1529 mach_vm_wire_external(
1530 host_priv_t host_priv
,
1532 mach_vm_offset_t start
,
1533 mach_vm_size_t size
,
1536 return mach_vm_wire_kernel(host_priv
, map
, start
, size
, access
, VM_KERN_MEMORY_MLOCK
);
1540 mach_vm_wire_kernel(
1541 host_priv_t host_priv
,
1543 mach_vm_offset_t start
,
1544 mach_vm_size_t size
,
1550 if (host_priv
== HOST_PRIV_NULL
) {
1551 return KERN_INVALID_HOST
;
1554 if (map
== VM_MAP_NULL
) {
1555 return KERN_INVALID_TASK
;
1558 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
)) {
1559 return KERN_INVALID_ARGUMENT
;
1562 if (access
!= VM_PROT_NONE
) {
1563 rc
= vm_map_wire_kernel(map
,
1564 vm_map_trunc_page(start
,
1565 VM_MAP_PAGE_MASK(map
)),
1566 vm_map_round_page(start
+ size
,
1567 VM_MAP_PAGE_MASK(map
)),
1571 rc
= vm_map_unwire(map
,
1572 vm_map_trunc_page(start
,
1573 VM_MAP_PAGE_MASK(map
)),
1574 vm_map_round_page(start
+ size
,
1575 VM_MAP_PAGE_MASK(map
)),
1583 * Specify that the range of the virtual address space
1584 * of the target task must not cause page faults for
1585 * the indicated accesses.
1587 * [ To unwire the pages, specify VM_PROT_NONE. ]
1591 host_priv_t host_priv
,
1599 if (host_priv
== HOST_PRIV_NULL
) {
1600 return KERN_INVALID_HOST
;
1603 if (map
== VM_MAP_NULL
) {
1604 return KERN_INVALID_TASK
;
1607 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
)) {
1608 return KERN_INVALID_ARGUMENT
;
1613 } else if (access
!= VM_PROT_NONE
) {
1614 rc
= vm_map_wire_kernel(map
,
1615 vm_map_trunc_page(start
,
1616 VM_MAP_PAGE_MASK(map
)),
1617 vm_map_round_page(start
+ size
,
1618 VM_MAP_PAGE_MASK(map
)),
1619 access
, VM_KERN_MEMORY_OSFMK
,
1622 rc
= vm_map_unwire(map
,
1623 vm_map_trunc_page(start
,
1624 VM_MAP_PAGE_MASK(map
)),
1625 vm_map_round_page(start
+ size
,
1626 VM_MAP_PAGE_MASK(map
)),
1635 * Synchronises the memory range specified with its backing store
1636 * image by either flushing or cleaning the contents to the appropriate
1639 * interpretation of sync_flags
1640 * VM_SYNC_INVALIDATE - discard pages, only return precious
1643 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1644 * - discard pages, write dirty or precious
1645 * pages back to memory manager.
1647 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1648 * - write dirty or precious pages back to
1649 * the memory manager.
1651 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1652 * is a hole in the region, and we would
1653 * have returned KERN_SUCCESS, return
1654 * KERN_INVALID_ADDRESS instead.
1657 * KERN_INVALID_TASK Bad task parameter
1658 * KERN_INVALID_ARGUMENT both sync and async were specified.
1659 * KERN_SUCCESS The usual.
1660 * KERN_INVALID_ADDRESS There was a hole in the region.
1666 mach_vm_address_t address
,
1667 mach_vm_size_t size
,
1668 vm_sync_t sync_flags
)
1670 if (map
== VM_MAP_NULL
) {
1671 return KERN_INVALID_TASK
;
1674 return vm_map_msync(map
, (vm_map_address_t
)address
,
1675 (vm_map_size_t
)size
, sync_flags
);
1681 * Synchronises the memory range specified with its backing store
1682 * image by either flushing or cleaning the contents to the appropriate
1685 * interpretation of sync_flags
1686 * VM_SYNC_INVALIDATE - discard pages, only return precious
1689 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1690 * - discard pages, write dirty or precious
1691 * pages back to memory manager.
1693 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1694 * - write dirty or precious pages back to
1695 * the memory manager.
1697 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1698 * is a hole in the region, and we would
1699 * have returned KERN_SUCCESS, return
1700 * KERN_INVALID_ADDRESS instead.
1702 * The addressability of the range is limited to that which can
1703 * be described by a vm_address_t.
1706 * KERN_INVALID_TASK Bad task parameter
1707 * KERN_INVALID_ARGUMENT both sync and async were specified.
1708 * KERN_SUCCESS The usual.
1709 * KERN_INVALID_ADDRESS There was a hole in the region.
1715 vm_address_t address
,
1717 vm_sync_t sync_flags
)
1719 if (map
== VM_MAP_NULL
) {
1720 return KERN_INVALID_TASK
;
1723 return vm_map_msync(map
, (vm_map_address_t
)address
,
1724 (vm_map_size_t
)size
, sync_flags
);
1729 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1731 vm_map_t map
= current_map();
1733 assert(!map
->is_nested_map
);
1734 if (toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
) {
1735 *old_value
= map
->disable_vmentry_reuse
;
1736 } else if (toggle
== VM_TOGGLE_SET
) {
1737 vm_map_entry_t map_to_entry
;
1740 vm_map_disable_hole_optimization(map
);
1741 map
->disable_vmentry_reuse
= TRUE
;
1742 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1743 if (map
->first_free
== map_to_entry
) {
1744 map
->highest_entry_end
= vm_map_min(map
);
1746 map
->highest_entry_end
= map
->first_free
->vme_end
;
1749 } else if (toggle
== VM_TOGGLE_CLEAR
) {
1751 map
->disable_vmentry_reuse
= FALSE
;
1754 return KERN_INVALID_ARGUMENT
;
1757 return KERN_SUCCESS
;
1761 * mach_vm_behavior_set
1763 * Sets the paging behavior attribute for the specified range
1764 * in the specified map.
1766 * This routine will fail with KERN_INVALID_ADDRESS if any address
1767 * in [start,start+size) is not a valid allocated memory region.
1770 mach_vm_behavior_set(
1772 mach_vm_offset_t start
,
1773 mach_vm_size_t size
,
1774 vm_behavior_t new_behavior
)
1776 vm_map_offset_t align_mask
;
1778 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
1779 return KERN_INVALID_ARGUMENT
;
1783 return KERN_SUCCESS
;
1786 switch (new_behavior
) {
1787 case VM_BEHAVIOR_REUSABLE
:
1788 case VM_BEHAVIOR_REUSE
:
1789 case VM_BEHAVIOR_CAN_REUSE
:
1791 * Align to the hardware page size, to allow
1792 * malloc() to maximize the amount of re-usability,
1793 * even on systems with larger software page size.
1795 align_mask
= PAGE_MASK
;
1798 align_mask
= VM_MAP_PAGE_MASK(map
);
1802 return vm_map_behavior_set(map
,
1803 vm_map_trunc_page(start
, align_mask
),
1804 vm_map_round_page(start
+ size
, align_mask
),
1811 * Sets the paging behavior attribute for the specified range
1812 * in the specified map.
1814 * This routine will fail with KERN_INVALID_ADDRESS if any address
1815 * in [start,start+size) is not a valid allocated memory region.
1817 * This routine is potentially limited in addressibility by the
1818 * use of vm_offset_t (if the map provided is larger than the
1826 vm_behavior_t new_behavior
)
1828 if (start
+ size
< start
) {
1829 return KERN_INVALID_ARGUMENT
;
1832 return mach_vm_behavior_set(map
,
1833 (mach_vm_offset_t
) start
,
1834 (mach_vm_size_t
) size
,
1841 * User call to obtain information about a region in
1842 * a task's address map. Currently, only one flavor is
1845 * XXX The reserved and behavior fields cannot be filled
1846 * in until the vm merge from the IK is completed, and
1847 * vm_reserve is implemented.
1849 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1855 mach_vm_offset_t
*address
, /* IN/OUT */
1856 mach_vm_size_t
*size
, /* OUT */
1857 vm_region_flavor_t flavor
, /* IN */
1858 vm_region_info_t info
, /* OUT */
1859 mach_msg_type_number_t
*count
, /* IN/OUT */
1860 mach_port_t
*object_name
) /* OUT */
1862 vm_map_offset_t map_addr
;
1863 vm_map_size_t map_size
;
1866 if (VM_MAP_NULL
== map
) {
1867 return KERN_INVALID_ARGUMENT
;
1870 map_addr
= (vm_map_offset_t
)*address
;
1871 map_size
= (vm_map_size_t
)*size
;
1873 /* legacy conversion */
1874 if (VM_REGION_BASIC_INFO
== flavor
) {
1875 flavor
= VM_REGION_BASIC_INFO_64
;
1878 kr
= vm_map_region(map
,
1879 &map_addr
, &map_size
,
1880 flavor
, info
, count
,
1883 *address
= map_addr
;
1889 * vm_region_64 and vm_region:
1891 * User call to obtain information about a region in
1892 * a task's address map. Currently, only one flavor is
1895 * XXX The reserved and behavior fields cannot be filled
1896 * in until the vm merge from the IK is completed, and
1897 * vm_reserve is implemented.
1899 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1905 vm_offset_t
*address
, /* IN/OUT */
1906 vm_size_t
*size
, /* OUT */
1907 vm_region_flavor_t flavor
, /* IN */
1908 vm_region_info_t info
, /* OUT */
1909 mach_msg_type_number_t
*count
, /* IN/OUT */
1910 mach_port_t
*object_name
) /* OUT */
1912 vm_map_offset_t map_addr
;
1913 vm_map_size_t map_size
;
1916 if (VM_MAP_NULL
== map
) {
1917 return KERN_INVALID_ARGUMENT
;
1920 map_addr
= (vm_map_offset_t
)*address
;
1921 map_size
= (vm_map_size_t
)*size
;
1923 /* legacy conversion */
1924 if (VM_REGION_BASIC_INFO
== flavor
) {
1925 flavor
= VM_REGION_BASIC_INFO_64
;
1928 kr
= vm_map_region(map
,
1929 &map_addr
, &map_size
,
1930 flavor
, info
, count
,
1933 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1934 *size
= CAST_DOWN(vm_size_t
, map_size
);
1936 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1937 return KERN_INVALID_ADDRESS
;
1945 vm_address_t
*address
, /* IN/OUT */
1946 vm_size_t
*size
, /* OUT */
1947 vm_region_flavor_t flavor
, /* IN */
1948 vm_region_info_t info
, /* OUT */
1949 mach_msg_type_number_t
*count
, /* IN/OUT */
1950 mach_port_t
*object_name
) /* OUT */
1952 vm_map_address_t map_addr
;
1953 vm_map_size_t map_size
;
1956 if (VM_MAP_NULL
== map
) {
1957 return KERN_INVALID_ARGUMENT
;
1960 map_addr
= (vm_map_address_t
)*address
;
1961 map_size
= (vm_map_size_t
)*size
;
1963 kr
= vm_map_region(map
,
1964 &map_addr
, &map_size
,
1965 flavor
, info
, count
,
1968 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1969 *size
= CAST_DOWN(vm_size_t
, map_size
);
1971 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1972 return KERN_INVALID_ADDRESS
;
1978 * vm_region_recurse: A form of vm_region which follows the
1979 * submaps in a target map
1983 mach_vm_region_recurse(
1985 mach_vm_address_t
*address
,
1986 mach_vm_size_t
*size
,
1988 vm_region_recurse_info_t info
,
1989 mach_msg_type_number_t
*infoCnt
)
1991 vm_map_address_t map_addr
;
1992 vm_map_size_t map_size
;
1995 if (VM_MAP_NULL
== map
) {
1996 return KERN_INVALID_ARGUMENT
;
1999 map_addr
= (vm_map_address_t
)*address
;
2000 map_size
= (vm_map_size_t
)*size
;
2002 kr
= vm_map_region_recurse_64(
2007 (vm_region_submap_info_64_t
)info
,
2010 *address
= map_addr
;
2016 * vm_region_recurse: A form of vm_region which follows the
2017 * submaps in a target map
2021 vm_region_recurse_64(
2023 vm_address_t
*address
,
2026 vm_region_recurse_info_64_t info
,
2027 mach_msg_type_number_t
*infoCnt
)
2029 vm_map_address_t map_addr
;
2030 vm_map_size_t map_size
;
2033 if (VM_MAP_NULL
== map
) {
2034 return KERN_INVALID_ARGUMENT
;
2037 map_addr
= (vm_map_address_t
)*address
;
2038 map_size
= (vm_map_size_t
)*size
;
2040 kr
= vm_map_region_recurse_64(
2045 (vm_region_submap_info_64_t
)info
,
2048 *address
= CAST_DOWN(vm_address_t
, map_addr
);
2049 *size
= CAST_DOWN(vm_size_t
, map_size
);
2051 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
2052 return KERN_INVALID_ADDRESS
;
2060 vm_offset_t
*address
, /* IN/OUT */
2061 vm_size_t
*size
, /* OUT */
2062 natural_t
*depth
, /* IN/OUT */
2063 vm_region_recurse_info_t info32
, /* IN/OUT */
2064 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
2066 vm_region_submap_info_data_64_t info64
;
2067 vm_region_submap_info_t info
;
2068 vm_map_address_t map_addr
;
2069 vm_map_size_t map_size
;
2072 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
) {
2073 return KERN_INVALID_ARGUMENT
;
2077 map_addr
= (vm_map_address_t
)*address
;
2078 map_size
= (vm_map_size_t
)*size
;
2079 info
= (vm_region_submap_info_t
)info32
;
2080 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
2082 kr
= vm_map_region_recurse_64(map
, &map_addr
, &map_size
,
2083 depth
, &info64
, infoCnt
);
2085 info
->protection
= info64
.protection
;
2086 info
->max_protection
= info64
.max_protection
;
2087 info
->inheritance
= info64
.inheritance
;
2088 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
2089 info
->user_tag
= info64
.user_tag
;
2090 info
->pages_resident
= info64
.pages_resident
;
2091 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
2092 info
->pages_swapped_out
= info64
.pages_swapped_out
;
2093 info
->pages_dirtied
= info64
.pages_dirtied
;
2094 info
->ref_count
= info64
.ref_count
;
2095 info
->shadow_depth
= info64
.shadow_depth
;
2096 info
->external_pager
= info64
.external_pager
;
2097 info
->share_mode
= info64
.share_mode
;
2098 info
->is_submap
= info64
.is_submap
;
2099 info
->behavior
= info64
.behavior
;
2100 info
->object_id
= info64
.object_id
;
2101 info
->user_wired_count
= info64
.user_wired_count
;
2103 *address
= CAST_DOWN(vm_address_t
, map_addr
);
2104 *size
= CAST_DOWN(vm_size_t
, map_size
);
2105 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
2107 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
2108 return KERN_INVALID_ADDRESS
;
2114 mach_vm_purgable_control(
2116 mach_vm_offset_t address
,
2117 vm_purgable_t control
,
2120 if (VM_MAP_NULL
== map
) {
2121 return KERN_INVALID_ARGUMENT
;
2124 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
2125 /* not allowed from user-space */
2126 return KERN_INVALID_ARGUMENT
;
2129 return vm_map_purgable_control(map
,
2130 vm_map_trunc_page(address
, VM_MAP_PAGE_MASK(map
)),
2136 vm_purgable_control(
2138 vm_offset_t address
,
2139 vm_purgable_t control
,
2142 if (VM_MAP_NULL
== map
) {
2143 return KERN_INVALID_ARGUMENT
;
2146 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
2147 /* not allowed from user-space */
2148 return KERN_INVALID_ARGUMENT
;
2151 return vm_map_purgable_control(map
,
2152 vm_map_trunc_page(address
, VM_MAP_PAGE_MASK(map
)),
2159 * Ordinarily, the right to allocate CPM is restricted
2160 * to privileged applications (those that can gain access
2161 * to the host priv port). Set this variable to zero if
2162 * you want to let any application allocate CPM.
2164 unsigned int vm_allocate_cpm_privileged
= 0;
2167 * Allocate memory in the specified map, with the caveat that
2168 * the memory is physically contiguous. This call may fail
2169 * if the system can't find sufficient contiguous memory.
2170 * This call may cause or lead to heart-stopping amounts of
2173 * Memory obtained from this call should be freed in the
2174 * normal way, viz., via vm_deallocate.
2178 host_priv_t host_priv
,
2184 vm_map_address_t map_addr
;
2185 vm_map_size_t map_size
;
2188 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
) {
2189 return KERN_INVALID_HOST
;
2192 if (VM_MAP_NULL
== map
) {
2193 return KERN_INVALID_ARGUMENT
;
2196 map_addr
= (vm_map_address_t
)*addr
;
2197 map_size
= (vm_map_size_t
)size
;
2199 kr
= vm_map_enter_cpm(map
,
2204 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
2212 mach_vm_offset_t offset
,
2216 if (VM_MAP_NULL
== map
) {
2217 return KERN_INVALID_ARGUMENT
;
2220 return vm_map_page_query_internal(
2222 vm_map_trunc_page(offset
, PAGE_MASK
),
2223 disposition
, ref_count
);
2233 if (VM_MAP_NULL
== map
) {
2234 return KERN_INVALID_ARGUMENT
;
2237 return vm_map_page_query_internal(
2239 vm_map_trunc_page(offset
, PAGE_MASK
),
2240 disposition
, ref_count
);
2244 mach_vm_page_range_query(
2246 mach_vm_offset_t address
,
2247 mach_vm_size_t size
,
2248 mach_vm_address_t dispositions_addr
,
2249 mach_vm_size_t
*dispositions_count
)
2251 kern_return_t kr
= KERN_SUCCESS
;
2252 int num_pages
= 0, i
= 0;
2253 mach_vm_size_t curr_sz
= 0, copy_sz
= 0;
2254 mach_vm_size_t disp_buf_req_size
= 0, disp_buf_total_size
= 0;
2255 mach_msg_type_number_t count
= 0;
2258 void *local_disp
= NULL
;;
2259 vm_map_size_t info_size
= 0, local_disp_size
= 0;
2260 mach_vm_offset_t start
= 0, end
= 0;
2261 int effective_page_shift
, effective_page_size
, effective_page_mask
;
2263 if (map
== VM_MAP_NULL
|| dispositions_count
== NULL
) {
2264 return KERN_INVALID_ARGUMENT
;
2267 effective_page_shift
= vm_self_region_page_shift_safely(map
);
2268 if (effective_page_shift
== -1) {
2269 return KERN_INVALID_ARGUMENT
;
2271 effective_page_size
= (1 << effective_page_shift
);
2272 effective_page_mask
= effective_page_size
- 1;
2274 if (os_mul_overflow(*dispositions_count
, sizeof(int), &disp_buf_req_size
)) {
2275 return KERN_INVALID_ARGUMENT
;
2278 start
= vm_map_trunc_page(address
, effective_page_mask
);
2279 end
= vm_map_round_page(address
+ size
, effective_page_mask
);
2282 return KERN_INVALID_ARGUMENT
;
2285 if ((end
- start
) < size
) {
2287 * Aligned size is less than unaligned size.
2289 return KERN_INVALID_ARGUMENT
;
2292 if (disp_buf_req_size
== 0 || (end
== start
)) {
2293 return KERN_SUCCESS
;
2297 * For large requests, we will go through them
2298 * MAX_PAGE_RANGE_QUERY chunk at a time.
2301 curr_sz
= MIN(end
- start
, MAX_PAGE_RANGE_QUERY
);
2302 num_pages
= (int) (curr_sz
>> effective_page_shift
);
2304 info_size
= num_pages
* sizeof(vm_page_info_basic_data_t
);
2305 info
= kheap_alloc(KHEAP_TEMP
, info_size
, Z_WAITOK
);
2307 local_disp_size
= num_pages
* sizeof(int);
2308 local_disp
= kheap_alloc(KHEAP_TEMP
, local_disp_size
, Z_WAITOK
);
2310 if (info
== NULL
|| local_disp
== NULL
) {
2311 kr
= KERN_RESOURCE_SHORTAGE
;
2316 count
= VM_PAGE_INFO_BASIC_COUNT
;
2317 kr
= vm_map_page_range_info_internal(
2320 vm_map_round_page(start
+ curr_sz
, effective_page_mask
),
2321 effective_page_shift
,
2323 (vm_page_info_t
) info
,
2326 assert(kr
== KERN_SUCCESS
);
2328 for (i
= 0; i
< num_pages
; i
++) {
2329 ((int*)local_disp
)[i
] = ((vm_page_info_basic_t
)info
)[i
].disposition
;
2332 copy_sz
= MIN(disp_buf_req_size
, num_pages
* sizeof(int) /* an int per page */);
2333 kr
= copyout(local_disp
, (mach_vm_address_t
)dispositions_addr
, copy_sz
);
2336 disp_buf_req_size
-= copy_sz
;
2337 disp_buf_total_size
+= copy_sz
;
2343 if ((disp_buf_req_size
== 0) || (curr_sz
>= size
)) {
2345 * We might have inspected the full range OR
2346 * more than it esp. if the user passed in
2347 * non-page aligned start/size and/or if we
2348 * descended into a submap. We are done here.
2353 dispositions_addr
+= copy_sz
;
2357 curr_sz
= MIN(vm_map_round_page(size
, effective_page_mask
), MAX_PAGE_RANGE_QUERY
);
2358 num_pages
= (int)(curr_sz
>> effective_page_shift
);
2362 *dispositions_count
= disp_buf_total_size
/ sizeof(int);
2366 kheap_free(KHEAP_TEMP
, local_disp
, local_disp_size
);
2369 kheap_free(KHEAP_TEMP
, info
, info_size
);
2377 mach_vm_address_t address
,
2378 vm_page_info_flavor_t flavor
,
2379 vm_page_info_t info
,
2380 mach_msg_type_number_t
*count
)
2384 if (map
== VM_MAP_NULL
) {
2385 return KERN_INVALID_ARGUMENT
;
2388 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
2392 /* map a (whole) upl into an address space */
2397 vm_address_t
*dst_addr
)
2399 vm_map_offset_t map_addr
;
2402 if (VM_MAP_NULL
== map
) {
2403 return KERN_INVALID_ARGUMENT
;
2406 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
2407 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
2416 if (VM_MAP_NULL
== map
) {
2417 return KERN_INVALID_ARGUMENT
;
2420 return vm_map_remove_upl(map
, upl
);
2423 /* Retrieve a upl for an object underlying an address range in a map */
2428 vm_map_offset_t map_offset
,
2429 upl_size_t
*upl_size
,
2431 upl_page_info_array_t page_list
,
2432 unsigned int *count
,
2433 upl_control_flags_t
*flags
,
2435 int force_data_sync
)
2437 upl_control_flags_t map_flags
;
2440 if (VM_MAP_NULL
== map
) {
2441 return KERN_INVALID_ARGUMENT
;
2444 map_flags
= *flags
& ~UPL_NOZEROFILL
;
2445 if (force_data_sync
) {
2446 map_flags
|= UPL_FORCE_DATA_SYNC
;
2449 kr
= vm_map_create_upl(map
,
2458 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
2463 * mach_make_memory_entry_64
2465 * Think of it as a two-stage vm_remap() operation. First
2466 * you get a handle. Second, you get map that handle in
2467 * somewhere else. Rather than doing it all at once (and
2468 * without needing access to the other whole map).
2471 mach_make_memory_entry_64(
2472 vm_map_t target_map
,
2473 memory_object_size_t
*size
,
2474 memory_object_offset_t offset
,
2475 vm_prot_t permission
,
2476 ipc_port_t
*object_handle
,
2477 ipc_port_t parent_handle
)
2479 vm_named_entry_kernel_flags_t vmne_kflags
;
2481 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_USER
) {
2483 * Unknown flag: reject for forward compatibility.
2485 return KERN_INVALID_VALUE
;
2488 vmne_kflags
= VM_NAMED_ENTRY_KERNEL_FLAGS_NONE
;
2489 if (permission
& MAP_MEM_LEDGER_TAGGED
) {
2490 vmne_kflags
.vmnekf_ledger_tag
= VM_LEDGER_TAG_DEFAULT
;
2492 return mach_make_memory_entry_internal(target_map
,
2502 mach_make_memory_entry_internal(
2503 vm_map_t target_map
,
2504 memory_object_size_t
*size
,
2505 memory_object_offset_t offset
,
2506 vm_prot_t permission
,
2507 vm_named_entry_kernel_flags_t vmne_kflags
,
2508 ipc_port_t
*object_handle
,
2509 ipc_port_t parent_handle
)
2511 vm_named_entry_t parent_entry
;
2512 vm_named_entry_t user_entry
;
2513 ipc_port_t user_handle
;
2516 vm_map_size_t map_size
;
2517 vm_map_offset_t map_start
, map_end
;
2520 * Stash the offset in the page for use by vm_map_enter_mem_object()
2521 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2523 vm_object_offset_t offset_in_page
;
2525 unsigned int access
;
2526 vm_prot_t protections
;
2527 vm_prot_t original_protections
, mask_protections
;
2528 unsigned int wimg_mode
;
2529 boolean_t use_data_addr
;
2530 boolean_t use_4K_compat
;
2532 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x\n", target_map
, offset
, *size
, permission
);
2536 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_ALL
) {
2538 * Unknown flag: reject for forward compatibility.
2540 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_VALUE
);
2541 return KERN_INVALID_VALUE
;
2544 if (IP_VALID(parent_handle
) &&
2545 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
2546 parent_entry
= (vm_named_entry_t
) ip_get_kobject(parent_handle
);
2548 parent_entry
= NULL
;
2551 if (parent_entry
&& parent_entry
->is_copy
) {
2552 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2553 return KERN_INVALID_ARGUMENT
;
2556 original_protections
= permission
& VM_PROT_ALL
;
2557 protections
= original_protections
;
2558 mask_protections
= permission
& VM_PROT_IS_MASK
;
2559 access
= GET_MAP_MEM(permission
);
2560 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
2561 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
2563 user_handle
= IP_NULL
;
2566 map_start
= vm_map_trunc_page(offset
, VM_MAP_PAGE_MASK(target_map
));
2568 if (permission
& MAP_MEM_ONLY
) {
2569 boolean_t parent_is_object
;
2571 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2572 map_size
= map_end
- map_start
;
2574 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
2575 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2576 return KERN_INVALID_ARGUMENT
;
2579 parent_is_object
= parent_entry
->is_object
;
2580 if (!parent_is_object
) {
2581 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2582 return KERN_INVALID_ARGUMENT
;
2584 object
= vm_named_entry_to_vm_object(parent_entry
);
2585 if (parent_is_object
&& object
!= VM_OBJECT_NULL
) {
2586 wimg_mode
= object
->wimg_bits
;
2588 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2590 if ((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
2591 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
2592 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_RIGHT
);
2593 return KERN_INVALID_RIGHT
;
2595 vm_prot_to_wimg(access
, &wimg_mode
);
2596 if (access
!= MAP_MEM_NOOP
) {
2597 SET_MAP_MEM(access
, parent_entry
->protection
);
2599 if (parent_is_object
&& object
&&
2600 (access
!= MAP_MEM_NOOP
) &&
2601 (!(object
->nophyscache
))) {
2602 if (object
->wimg_bits
!= wimg_mode
) {
2603 vm_object_lock(object
);
2604 vm_object_change_wimg_mode(object
, wimg_mode
);
2605 vm_object_unlock(object
);
2608 if (object_handle
) {
2609 *object_handle
= IP_NULL
;
2611 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2612 return KERN_SUCCESS
;
2613 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2614 int ledger_flags
= 0;
2617 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2618 map_size
= map_end
- map_start
;
2620 if (use_data_addr
|| use_4K_compat
) {
2621 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2622 return KERN_INVALID_ARGUMENT
;
2625 if (map_size
== 0) {
2627 *object_handle
= IPC_PORT_NULL
;
2628 return KERN_SUCCESS
;
2631 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2632 if (kr
!= KERN_SUCCESS
) {
2633 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_FAILURE
);
2634 return KERN_FAILURE
;
2638 * Force the creation of the VM object now.
2640 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2642 * LP64todo - for now, we can only allocate 4GB-4096
2643 * internal objects because the default pager can't
2644 * page bigger ones. Remove this when it can.
2650 object
= vm_object_allocate(map_size
);
2651 assert(object
!= VM_OBJECT_NULL
);
2655 * We use this path when we want to make sure that
2656 * nobody messes with the object (coalesce, for
2657 * example) before we map it.
2658 * We might want to use these objects for transposition via
2659 * vm_object_transpose() too, so we don't want any copy or
2660 * shadow objects either...
2662 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2663 object
->true_share
= TRUE
;
2665 owner
= current_task();
2666 if ((permission
& MAP_MEM_PURGABLE
) ||
2667 vmne_kflags
.vmnekf_ledger_tag
) {
2668 assert(object
->vo_owner
== NULL
);
2669 assert(object
->resident_page_count
== 0);
2670 assert(object
->wired_page_count
== 0);
2671 assert(owner
!= TASK_NULL
);
2672 if (vmne_kflags
.vmnekf_ledger_no_footprint
) {
2673 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
2674 object
->vo_no_footprint
= TRUE
;
2676 if (permission
& MAP_MEM_PURGABLE
) {
2677 if (!(permission
& VM_PROT_WRITE
)) {
2678 /* if we can't write, we can't purge */
2679 vm_object_deallocate(object
);
2680 kr
= KERN_INVALID_ARGUMENT
;
2683 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2684 if (permission
& MAP_MEM_PURGABLE_KERNEL_ONLY
) {
2685 object
->purgeable_only_by_kernel
= TRUE
;
2688 if (owner
->task_legacy_footprint
) {
2690 * For ios11, we failed to account for
2691 * this memory. Keep doing that for
2692 * legacy apps (built before ios12),
2693 * for backwards compatibility's sake...
2695 owner
= kernel_task
;
2697 #endif /* __arm64__ */
2698 vm_object_lock(object
);
2699 vm_purgeable_nonvolatile_enqueue(object
, owner
);
2700 vm_object_unlock(object
);
2704 if (vmne_kflags
.vmnekf_ledger_tag
) {
2706 * Bill this object to the current task's
2707 * ledgers for the given tag.
2709 if (vmne_kflags
.vmnekf_ledger_no_footprint
) {
2710 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
2712 vm_object_lock(object
);
2713 object
->vo_ledger_tag
= vmne_kflags
.vmnekf_ledger_tag
;
2714 kr
= vm_object_ownership_change(
2716 vmne_kflags
.vmnekf_ledger_tag
,
2717 owner
, /* new owner */
2719 FALSE
); /* task_objq locked? */
2720 vm_object_unlock(object
);
2721 if (kr
!= KERN_SUCCESS
) {
2722 vm_object_deallocate(object
);
2727 #if CONFIG_SECLUDED_MEMORY
2728 if (secluded_for_iokit
&& /* global boot-arg */
2729 ((permission
& MAP_MEM_GRAB_SECLUDED
)
2731 /* XXX FBDP for my testing only */
2732 || (secluded_for_fbdp
&& map_size
== 97550336)
2736 if (!(permission
& MAP_MEM_GRAB_SECLUDED
) &&
2737 secluded_for_fbdp
) {
2738 printf("FBDP: object %p size %lld can grab secluded\n", object
, (uint64_t) map_size
);
2741 object
->can_grab_secluded
= TRUE
;
2742 assert(!object
->eligible_for_secluded
);
2744 #endif /* CONFIG_SECLUDED_MEMORY */
2747 * The VM object is brand new and nobody else knows about it,
2748 * so we don't need to lock it.
2751 wimg_mode
= object
->wimg_bits
;
2752 vm_prot_to_wimg(access
, &wimg_mode
);
2753 if (access
!= MAP_MEM_NOOP
) {
2754 object
->wimg_bits
= wimg_mode
;
2757 /* the object has no pages, so no WIMG bits to update here */
2759 kr
= vm_named_entry_from_vm_object(
2764 (protections
& VM_PROT_ALL
));
2765 if (kr
!= KERN_SUCCESS
) {
2766 vm_object_deallocate(object
);
2769 user_entry
->internal
= TRUE
;
2770 user_entry
->is_sub_map
= FALSE
;
2771 user_entry
->offset
= 0;
2772 user_entry
->data_offset
= 0;
2773 user_entry
->protection
= protections
;
2774 SET_MAP_MEM(access
, user_entry
->protection
);
2775 user_entry
->size
= map_size
;
2777 /* user_object pager and internal fields are not used */
2778 /* when the object field is filled in. */
2780 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2781 user_entry
->data_offset
));
2782 *object_handle
= user_handle
;
2783 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2784 return KERN_SUCCESS
;
2787 if (permission
& MAP_MEM_VM_COPY
) {
2790 if (target_map
== VM_MAP_NULL
) {
2791 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_TASK
);
2792 return KERN_INVALID_TASK
;
2795 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2796 map_size
= map_end
- map_start
;
2797 if (map_size
== 0) {
2798 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2799 return KERN_INVALID_ARGUMENT
;
2802 if (use_data_addr
|| use_4K_compat
) {
2803 offset_in_page
= offset
- map_start
;
2804 if (use_4K_compat
) {
2805 offset_in_page
&= ~((signed)(0xFFF));
2811 kr
= vm_map_copyin_internal(target_map
,
2814 VM_MAP_COPYIN_ENTRY_LIST
,
2816 if (kr
!= KERN_SUCCESS
) {
2817 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, kr
);
2820 assert(copy
!= VM_MAP_COPY_NULL
);
2822 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2823 if (kr
!= KERN_SUCCESS
) {
2824 vm_map_copy_discard(copy
);
2825 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_FAILURE
);
2826 return KERN_FAILURE
;
2829 user_entry
->backing
.copy
= copy
;
2830 user_entry
->internal
= FALSE
;
2831 user_entry
->is_sub_map
= FALSE
;
2832 user_entry
->is_copy
= TRUE
;
2833 user_entry
->offset
= 0;
2834 user_entry
->protection
= protections
;
2835 user_entry
->size
= map_size
;
2836 user_entry
->data_offset
= offset_in_page
;
2838 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2839 user_entry
->data_offset
));
2840 *object_handle
= user_handle
;
2841 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2842 return KERN_SUCCESS
;
2845 if ((permission
& MAP_MEM_VM_SHARE
)
2846 || parent_entry
== NULL
2847 || (permission
& MAP_MEM_NAMED_REUSE
)) {
2849 vm_prot_t cur_prot
, max_prot
;
2850 vm_map_kernel_flags_t vmk_flags
;
2851 vm_map_entry_t parent_copy_entry
;
2853 if (target_map
== VM_MAP_NULL
) {
2854 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_TASK
);
2855 return KERN_INVALID_TASK
;
2858 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2859 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2860 parent_copy_entry
= VM_MAP_ENTRY_NULL
;
2861 if (!(permission
& MAP_MEM_VM_SHARE
)) {
2862 vm_map_t tmp_map
, real_map
;
2863 vm_map_version_t version
;
2864 vm_object_t tmp_object
;
2865 vm_object_offset_t obj_off
;
2870 /* resolve any pending submap copy-on-write... */
2871 if (protections
& VM_PROT_WRITE
) {
2872 tmp_map
= target_map
;
2873 vm_map_lock_read(tmp_map
);
2874 kr
= vm_map_lookup_locked(&tmp_map
,
2876 protections
| mask_protections
,
2877 OBJECT_LOCK_EXCLUSIVE
,
2883 NULL
, /* fault_info */
2886 if (kr
!= KERN_SUCCESS
) {
2887 vm_map_unlock_read(tmp_map
);
2889 vm_object_unlock(tmp_object
);
2890 vm_map_unlock_read(tmp_map
);
2891 if (real_map
!= tmp_map
) {
2892 vm_map_unlock_read(real_map
);
2896 /* ... and carry on */
2898 /* stop extracting if VM object changes */
2899 vmk_flags
.vmkf_copy_single_object
= TRUE
;
2900 if ((permission
& MAP_MEM_NAMED_REUSE
) &&
2901 parent_entry
!= NULL
&&
2902 parent_entry
->is_object
) {
2903 vm_map_copy_t parent_copy
;
2904 parent_copy
= parent_entry
->backing
.copy
;
2905 assert(parent_copy
->cpy_hdr
.nentries
== 1);
2906 parent_copy_entry
= vm_map_copy_first_entry(parent_copy
);
2907 assert(!parent_copy_entry
->is_sub_map
);
2911 map_size
= map_end
- map_start
;
2912 if (map_size
== 0) {
2913 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2914 return KERN_INVALID_ARGUMENT
;
2917 if (use_data_addr
|| use_4K_compat
) {
2918 offset_in_page
= offset
- map_start
;
2919 if (use_4K_compat
) {
2920 offset_in_page
&= ~((signed)(0xFFF));
2926 if (mask_protections
) {
2928 * caller is asking for whichever proctections are
2929 * available: no required protections.
2931 cur_prot
= VM_PROT_NONE
;
2932 max_prot
= VM_PROT_NONE
;
2935 * Caller wants a memory entry with "protections".
2936 * Make sure we extract only memory that matches that.
2938 cur_prot
= protections
;
2939 max_prot
= protections
;
2941 if (target_map
->pmap
== kernel_pmap
) {
2943 * Get "reserved" map entries to avoid deadlocking
2944 * on the kernel map or a kernel submap if we
2945 * run out of VM map entries and need to refill that
2948 vmk_flags
.vmkf_copy_pageable
= FALSE
;
2950 vmk_flags
.vmkf_copy_pageable
= TRUE
;
2952 vmk_flags
.vmkf_copy_same_map
= FALSE
;
2953 assert(map_size
!= 0);
2954 kr
= vm_map_copy_extract(target_map
,
2963 if (kr
!= KERN_SUCCESS
) {
2964 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, kr
);
2965 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2966 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
2970 assert(copy
!= VM_MAP_COPY_NULL
);
2972 if (mask_protections
) {
2974 * We just want as much of "original_protections"
2975 * as we can get out of the actual "cur_prot".
2977 protections
&= cur_prot
;
2978 if (protections
== VM_PROT_NONE
) {
2979 /* no access at all: fail */
2980 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_PROTECTION_FAILURE
);
2981 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2982 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
2984 vm_map_copy_discard(copy
);
2985 return KERN_PROTECTION_FAILURE
;
2989 * We want exactly "original_protections"
2990 * out of "cur_prot".
2992 assert((cur_prot
& protections
) == protections
);
2993 assert((max_prot
& protections
) == protections
);
2994 /* XXX FBDP TODO: no longer needed? */
2995 if ((cur_prot
& protections
) != protections
) {
2996 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2997 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, KERN_PROTECTION_FAILURE);
2999 vm_map_copy_discard(copy
);
3000 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_PROTECTION_FAILURE
);
3001 return KERN_PROTECTION_FAILURE
;
3005 if (!(permission
& MAP_MEM_VM_SHARE
)) {
3006 vm_map_entry_t copy_entry
;
3008 /* limit size to what's actually covered by "copy" */
3009 assert(copy
->cpy_hdr
.nentries
== 1);
3010 copy_entry
= vm_map_copy_first_entry(copy
);
3011 map_size
= copy_entry
->vme_end
- copy_entry
->vme_start
;
3013 if ((permission
& MAP_MEM_NAMED_REUSE
) &&
3014 parent_copy_entry
!= VM_MAP_ENTRY_NULL
&&
3015 VME_OBJECT(copy_entry
) == VME_OBJECT(parent_copy_entry
) &&
3016 VME_OFFSET(copy_entry
) == VME_OFFSET(parent_copy_entry
) &&
3017 parent_entry
->offset
== 0 &&
3018 parent_entry
->size
== map_size
&&
3019 (parent_entry
->data_offset
== offset_in_page
)) {
3020 /* we have a match: re-use "parent_entry" */
3022 /* release our new "copy" */
3023 vm_map_copy_discard(copy
);
3024 /* get extra send right on handle */
3025 ipc_port_copy_send(parent_handle
);
3027 *size
= CAST_DOWN(vm_size_t
,
3028 (parent_entry
->size
-
3029 parent_entry
->data_offset
));
3030 *object_handle
= parent_handle
;
3031 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
3032 return KERN_SUCCESS
;
3035 /* no match: we need to create a new entry */
3036 object
= VME_OBJECT(copy_entry
);
3037 vm_object_lock(object
);
3038 wimg_mode
= object
->wimg_bits
;
3039 if (!(object
->nophyscache
)) {
3040 vm_prot_to_wimg(access
, &wimg_mode
);
3042 if (object
->wimg_bits
!= wimg_mode
) {
3043 vm_object_change_wimg_mode(object
, wimg_mode
);
3045 vm_object_unlock(object
);
3048 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
3049 if (kr
!= KERN_SUCCESS
) {
3050 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
3051 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
3053 vm_map_copy_discard(copy
);
3054 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_FAILURE
);
3055 return KERN_FAILURE
;
3058 user_entry
->backing
.copy
= copy
;
3059 user_entry
->is_sub_map
= FALSE
;
3060 user_entry
->is_object
= FALSE
;
3061 user_entry
->internal
= FALSE
;
3062 user_entry
->protection
= protections
;
3063 user_entry
->size
= map_size
;
3064 user_entry
->data_offset
= offset_in_page
;
3066 if (permission
& MAP_MEM_VM_SHARE
) {
3067 user_entry
->is_copy
= TRUE
;
3068 user_entry
->offset
= 0;
3070 user_entry
->is_object
= TRUE
;
3071 user_entry
->internal
= object
->internal
;
3072 user_entry
->offset
= VME_OFFSET(vm_map_copy_first_entry(copy
));
3073 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
3076 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
3077 user_entry
->data_offset
));
3078 *object_handle
= user_handle
;
3079 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
3080 return KERN_SUCCESS
;
3083 /* The new object will be based on an existing named object */
3084 if (parent_entry
== NULL
) {
3085 kr
= KERN_INVALID_ARGUMENT
;
3089 if (parent_entry
->is_copy
) {
3090 panic("parent_entry %p is_copy not supported\n", parent_entry
);
3091 kr
= KERN_INVALID_ARGUMENT
;
3095 if (use_data_addr
|| use_4K_compat
) {
3097 * submaps and pagers should only be accessible from within
3098 * the kernel, which shouldn't use the data address flag, so can fail here.
3100 if (parent_entry
->is_sub_map
) {
3101 panic("Shouldn't be using data address with a parent entry that is a submap.");
3104 * Account for offset to data in parent entry and
3105 * compute our own offset to data.
3107 if ((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
3108 kr
= KERN_INVALID_ARGUMENT
;
3112 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
3113 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
3114 if (use_4K_compat
) {
3115 offset_in_page
&= ~((signed)(0xFFF));
3117 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
3118 map_size
= map_end
- map_start
;
3120 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
3121 map_size
= map_end
- map_start
;
3124 if ((offset
+ map_size
) > parent_entry
->size
) {
3125 kr
= KERN_INVALID_ARGUMENT
;
3130 if (mask_protections
) {
3132 * The caller asked us to use the "protections" as
3133 * a mask, so restrict "protections" to what this
3134 * mapping actually allows.
3136 protections
&= parent_entry
->protection
;
3138 if ((protections
& parent_entry
->protection
) != protections
) {
3139 kr
= KERN_PROTECTION_FAILURE
;
3143 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3149 user_entry
->size
= map_size
;
3150 user_entry
->offset
= parent_entry
->offset
+ map_start
;
3151 user_entry
->data_offset
= offset_in_page
;
3152 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
3153 user_entry
->is_copy
= parent_entry
->is_copy
;
3154 user_entry
->internal
= parent_entry
->internal
;
3155 user_entry
->protection
= protections
;
3157 if (access
!= MAP_MEM_NOOP
) {
3158 SET_MAP_MEM(access
, user_entry
->protection
);
3161 if (parent_entry
->is_sub_map
) {
3162 vm_map_t map
= parent_entry
->backing
.map
;
3163 vm_map_reference(map
);
3164 user_entry
->backing
.map
= map
;
3166 object
= vm_named_entry_to_vm_object(parent_entry
);
3167 assert(object
!= VM_OBJECT_NULL
);
3168 assert(object
->copy_strategy
!= MEMORY_OBJECT_COPY_SYMMETRIC
);
3169 kr
= vm_named_entry_from_vm_object(
3174 (user_entry
->protection
& VM_PROT_ALL
));
3175 if (kr
!= KERN_SUCCESS
) {
3178 assert(user_entry
->is_object
);
3179 /* we now point to this object, hold on */
3180 vm_object_lock(object
);
3181 vm_object_reference_locked(object
);
3182 #if VM_OBJECT_TRACKING_OP_TRUESHARE
3183 if (!object
->true_share
&&
3184 vm_object_tracking_inited
) {
3185 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
3188 num
= OSBacktrace(bt
,
3189 VM_OBJECT_TRACKING_BTDEPTH
);
3190 btlog_add_entry(vm_object_tracking_btlog
,
3192 VM_OBJECT_TRACKING_OP_TRUESHARE
,
3196 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3198 object
->true_share
= TRUE
;
3199 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3200 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3202 vm_object_unlock(object
);
3204 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
3205 user_entry
->data_offset
));
3206 *object_handle
= user_handle
;
3207 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
3208 return KERN_SUCCESS
;
3211 if (user_handle
!= IP_NULL
) {
3213 * Releasing "user_handle" causes the kernel object
3214 * associated with it ("user_entry" here) to also be
3215 * released and freed.
3217 mach_memory_entry_port_release(user_handle
);
3219 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, kr
);
3224 _mach_make_memory_entry(
3225 vm_map_t target_map
,
3226 memory_object_size_t
*size
,
3227 memory_object_offset_t offset
,
3228 vm_prot_t permission
,
3229 ipc_port_t
*object_handle
,
3230 ipc_port_t parent_entry
)
3232 memory_object_size_t mo_size
;
3235 mo_size
= (memory_object_size_t
)*size
;
3236 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3237 (memory_object_offset_t
)offset
, permission
, object_handle
,
3244 mach_make_memory_entry(
3245 vm_map_t target_map
,
3248 vm_prot_t permission
,
3249 ipc_port_t
*object_handle
,
3250 ipc_port_t parent_entry
)
3252 memory_object_size_t mo_size
;
3255 mo_size
= (memory_object_size_t
)*size
;
3256 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3257 (memory_object_offset_t
)offset
, permission
, object_handle
,
3259 *size
= CAST_DOWN(vm_size_t
, mo_size
);
3266 * Set or clear the map's wiring_required flag. This flag, if set,
3267 * will cause all future virtual memory allocation to allocate
3268 * user wired memory. Unwiring pages wired down as a result of
3269 * this routine is done with the vm_wire interface.
3274 boolean_t must_wire
)
3276 if (map
== VM_MAP_NULL
) {
3277 return KERN_INVALID_ARGUMENT
;
3281 map
->wiring_required
= (must_wire
== TRUE
);
3284 return KERN_SUCCESS
;
3288 vm_map_exec_lockdown(
3291 if (map
== VM_MAP_NULL
) {
3292 return KERN_INVALID_ARGUMENT
;
3296 map
->map_disallow_new_exec
= TRUE
;
3299 return KERN_SUCCESS
;
3302 #if VM_NAMED_ENTRY_LIST
3303 queue_head_t vm_named_entry_list
= QUEUE_HEAD_INITIALIZER(vm_named_entry_list
);
3304 int vm_named_entry_count
= 0;
3305 LCK_MTX_EARLY_DECLARE_ATTR(vm_named_entry_list_lock_data
,
3306 &vm_object_lck_grp
, &vm_object_lck_attr
);
3307 #endif /* VM_NAMED_ENTRY_LIST */
3309 __private_extern__ kern_return_t
3310 mach_memory_entry_allocate(
3311 vm_named_entry_t
*user_entry_p
,
3312 ipc_port_t
*user_handle_p
)
3314 vm_named_entry_t user_entry
;
3315 ipc_port_t user_handle
;
3317 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
3318 if (user_entry
== NULL
) {
3319 return KERN_FAILURE
;
3321 bzero(user_entry
, sizeof(*user_entry
));
3323 named_entry_lock_init(user_entry
);
3325 user_entry
->backing
.copy
= NULL
;
3326 user_entry
->is_object
= FALSE
;
3327 user_entry
->is_sub_map
= FALSE
;
3328 user_entry
->is_copy
= FALSE
;
3329 user_entry
->internal
= FALSE
;
3330 user_entry
->size
= 0;
3331 user_entry
->offset
= 0;
3332 user_entry
->data_offset
= 0;
3333 user_entry
->protection
= VM_PROT_NONE
;
3334 user_entry
->ref_count
= 1;
3336 user_handle
= ipc_kobject_alloc_port((ipc_kobject_t
)user_entry
,
3338 IPC_KOBJECT_ALLOC_MAKE_SEND
| IPC_KOBJECT_ALLOC_NSREQUEST
);
3340 *user_entry_p
= user_entry
;
3341 *user_handle_p
= user_handle
;
3343 #if VM_NAMED_ENTRY_LIST
3344 /* keep a loose (no reference) pointer to the Mach port, for debugging only */
3345 user_entry
->named_entry_port
= user_handle
;
3346 /* backtrace at allocation time, for debugging only */
3347 OSBacktrace(&user_entry
->named_entry_bt
[0],
3348 NAMED_ENTRY_BT_DEPTH
);
3350 /* add this new named entry to the global list */
3351 lck_mtx_lock_spin(&vm_named_entry_list_lock_data
);
3352 queue_enter(&vm_named_entry_list
, user_entry
,
3353 vm_named_entry_t
, named_entry_list
);
3354 vm_named_entry_count
++;
3355 lck_mtx_unlock(&vm_named_entry_list_lock_data
);
3356 #endif /* VM_NAMED_ENTRY_LIST */
3358 return KERN_SUCCESS
;
3362 * mach_memory_object_memory_entry_64
3364 * Create a named entry backed by the provided pager.
3368 mach_memory_object_memory_entry_64(
3371 vm_object_offset_t size
,
3372 vm_prot_t permission
,
3373 memory_object_t pager
,
3374 ipc_port_t
*entry_handle
)
3376 unsigned int access
;
3377 vm_named_entry_t user_entry
;
3378 ipc_port_t user_handle
;
3382 if (host
== HOST_NULL
) {
3383 return KERN_INVALID_HOST
;
3386 if (pager
== MEMORY_OBJECT_NULL
&& internal
) {
3387 object
= vm_object_allocate(size
);
3388 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3389 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3392 object
= memory_object_to_vm_object(pager
);
3393 if (object
!= VM_OBJECT_NULL
) {
3394 vm_object_reference(object
);
3397 if (object
== VM_OBJECT_NULL
) {
3398 return KERN_INVALID_ARGUMENT
;
3401 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3403 vm_object_deallocate(object
);
3404 return KERN_FAILURE
;
3407 user_entry
->size
= size
;
3408 user_entry
->offset
= 0;
3409 user_entry
->protection
= permission
& VM_PROT_ALL
;
3410 access
= GET_MAP_MEM(permission
);
3411 SET_MAP_MEM(access
, user_entry
->protection
);
3412 user_entry
->is_sub_map
= FALSE
;
3413 assert(user_entry
->ref_count
== 1);
3415 kr
= vm_named_entry_from_vm_object(user_entry
, object
, 0, size
,
3416 (user_entry
->protection
& VM_PROT_ALL
));
3417 if (kr
!= KERN_SUCCESS
) {
3420 user_entry
->internal
= object
->internal
;
3421 assert(object
->internal
== internal
);
3423 *entry_handle
= user_handle
;
3424 return KERN_SUCCESS
;
3428 mach_memory_object_memory_entry(
3432 vm_prot_t permission
,
3433 memory_object_t pager
,
3434 ipc_port_t
*entry_handle
)
3436 return mach_memory_object_memory_entry_64( host
, internal
,
3437 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3442 mach_memory_entry_purgable_control(
3443 ipc_port_t entry_port
,
3444 vm_purgable_t control
,
3447 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3448 /* not allowed from user-space */
3449 return KERN_INVALID_ARGUMENT
;
3452 return memory_entry_purgeable_control_internal(entry_port
, control
, state
);
3456 memory_entry_purgeable_control_internal(
3457 ipc_port_t entry_port
,
3458 vm_purgable_t control
,
3462 vm_named_entry_t mem_entry
;
3465 if (!IP_VALID(entry_port
) ||
3466 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3467 return KERN_INVALID_ARGUMENT
;
3469 if (control
!= VM_PURGABLE_SET_STATE
&&
3470 control
!= VM_PURGABLE_GET_STATE
&&
3471 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3472 return KERN_INVALID_ARGUMENT
;
3475 if ((control
== VM_PURGABLE_SET_STATE
||
3476 control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) &&
3477 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3478 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
))) {
3479 return KERN_INVALID_ARGUMENT
;
3482 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3484 named_entry_lock(mem_entry
);
3486 if (mem_entry
->is_sub_map
||
3487 mem_entry
->is_copy
) {
3488 named_entry_unlock(mem_entry
);
3489 return KERN_INVALID_ARGUMENT
;
3492 assert(mem_entry
->is_object
);
3493 object
= vm_named_entry_to_vm_object(mem_entry
);
3494 if (object
== VM_OBJECT_NULL
) {
3495 named_entry_unlock(mem_entry
);
3496 return KERN_INVALID_ARGUMENT
;
3499 vm_object_lock(object
);
3501 /* check that named entry covers entire object ? */
3502 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3503 vm_object_unlock(object
);
3504 named_entry_unlock(mem_entry
);
3505 return KERN_INVALID_ARGUMENT
;
3508 named_entry_unlock(mem_entry
);
3510 kr
= vm_object_purgable_control(object
, control
, state
);
3512 vm_object_unlock(object
);
3518 mach_memory_entry_access_tracking(
3519 ipc_port_t entry_port
,
3520 int *access_tracking
,
3521 uint32_t *access_tracking_reads
,
3522 uint32_t *access_tracking_writes
)
3524 return memory_entry_access_tracking_internal(entry_port
,
3526 access_tracking_reads
,
3527 access_tracking_writes
);
3531 memory_entry_access_tracking_internal(
3532 ipc_port_t entry_port
,
3533 int *access_tracking
,
3534 uint32_t *access_tracking_reads
,
3535 uint32_t *access_tracking_writes
)
3537 vm_named_entry_t mem_entry
;
3541 if (!IP_VALID(entry_port
) ||
3542 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3543 return KERN_INVALID_ARGUMENT
;
3546 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3548 named_entry_lock(mem_entry
);
3550 if (mem_entry
->is_sub_map
||
3551 mem_entry
->is_copy
) {
3552 named_entry_unlock(mem_entry
);
3553 return KERN_INVALID_ARGUMENT
;
3556 assert(mem_entry
->is_object
);
3557 object
= vm_named_entry_to_vm_object(mem_entry
);
3558 if (object
== VM_OBJECT_NULL
) {
3559 named_entry_unlock(mem_entry
);
3560 return KERN_INVALID_ARGUMENT
;
3563 #if VM_OBJECT_ACCESS_TRACKING
3564 vm_object_access_tracking(object
,
3566 access_tracking_reads
,
3567 access_tracking_writes
);
3569 #else /* VM_OBJECT_ACCESS_TRACKING */
3570 (void) access_tracking
;
3571 (void) access_tracking_reads
;
3572 (void) access_tracking_writes
;
3573 kr
= KERN_NOT_SUPPORTED
;
3574 #endif /* VM_OBJECT_ACCESS_TRACKING */
3576 named_entry_unlock(mem_entry
);
3582 mach_memory_entry_ownership(
3583 ipc_port_t entry_port
,
3590 vm_named_entry_t mem_entry
;
3593 cur_task
= current_task();
3594 if (cur_task
!= kernel_task
&&
3595 (owner
!= cur_task
||
3596 (ledger_flags
& VM_LEDGER_FLAG_NO_FOOTPRINT
) ||
3597 ledger_tag
== VM_LEDGER_TAG_NETWORK
)) {
3599 * An entitlement is required to:
3600 * + tranfer memory ownership to someone else,
3601 * + request that the memory not count against the footprint,
3602 * + tag as "network" (since that implies "no footprint")
3604 if (!cur_task
->task_can_transfer_memory_ownership
&&
3605 IOTaskHasEntitlement(cur_task
,
3606 "com.apple.private.memory.ownership_transfer")) {
3607 cur_task
->task_can_transfer_memory_ownership
= TRUE
;
3609 if (!cur_task
->task_can_transfer_memory_ownership
) {
3610 return KERN_NO_ACCESS
;
3614 if (ledger_flags
& ~VM_LEDGER_FLAGS
) {
3615 return KERN_INVALID_ARGUMENT
;
3617 if (ledger_tag
<= 0 ||
3618 ledger_tag
> VM_LEDGER_TAG_MAX
) {
3619 return KERN_INVALID_ARGUMENT
;
3622 if (!IP_VALID(entry_port
) ||
3623 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3624 return KERN_INVALID_ARGUMENT
;
3626 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3628 named_entry_lock(mem_entry
);
3630 if (mem_entry
->is_sub_map
||
3631 mem_entry
->is_copy
) {
3632 named_entry_unlock(mem_entry
);
3633 return KERN_INVALID_ARGUMENT
;
3636 assert(mem_entry
->is_object
);
3637 object
= vm_named_entry_to_vm_object(mem_entry
);
3638 if (object
== VM_OBJECT_NULL
) {
3639 named_entry_unlock(mem_entry
);
3640 return KERN_INVALID_ARGUMENT
;
3643 vm_object_lock(object
);
3645 /* check that named entry covers entire object ? */
3646 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3647 vm_object_unlock(object
);
3648 named_entry_unlock(mem_entry
);
3649 return KERN_INVALID_ARGUMENT
;
3652 named_entry_unlock(mem_entry
);
3654 kr
= vm_object_ownership_change(object
,
3658 FALSE
); /* task_objq_locked */
3659 vm_object_unlock(object
);
3665 mach_memory_entry_get_page_counts(
3666 ipc_port_t entry_port
,
3667 unsigned int *resident_page_count
,
3668 unsigned int *dirty_page_count
)
3671 vm_named_entry_t mem_entry
;
3673 vm_object_offset_t offset
;
3674 vm_object_size_t size
;
3676 if (!IP_VALID(entry_port
) ||
3677 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3678 return KERN_INVALID_ARGUMENT
;
3681 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3683 named_entry_lock(mem_entry
);
3685 if (mem_entry
->is_sub_map
||
3686 mem_entry
->is_copy
) {
3687 named_entry_unlock(mem_entry
);
3688 return KERN_INVALID_ARGUMENT
;
3691 assert(mem_entry
->is_object
);
3692 object
= vm_named_entry_to_vm_object(mem_entry
);
3693 if (object
== VM_OBJECT_NULL
) {
3694 named_entry_unlock(mem_entry
);
3695 return KERN_INVALID_ARGUMENT
;
3698 vm_object_lock(object
);
3700 offset
= mem_entry
->offset
;
3701 size
= mem_entry
->size
;
3702 size
= vm_object_round_page(offset
+ size
) - vm_object_trunc_page(offset
);
3703 offset
= vm_object_trunc_page(offset
);
3705 named_entry_unlock(mem_entry
);
3707 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3709 vm_object_unlock(object
);
3715 mach_memory_entry_phys_page_offset(
3716 ipc_port_t entry_port
,
3717 vm_object_offset_t
*offset_p
)
3719 vm_named_entry_t mem_entry
;
3721 vm_object_offset_t offset
;
3722 vm_object_offset_t data_offset
;
3724 if (!IP_VALID(entry_port
) ||
3725 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3726 return KERN_INVALID_ARGUMENT
;
3729 mem_entry
= (vm_named_entry_t
) ipc_kobject_get(entry_port
);
3731 named_entry_lock(mem_entry
);
3733 if (mem_entry
->is_sub_map
||
3734 mem_entry
->is_copy
) {
3735 named_entry_unlock(mem_entry
);
3736 return KERN_INVALID_ARGUMENT
;
3739 assert(mem_entry
->is_object
);
3740 object
= vm_named_entry_to_vm_object(mem_entry
);
3741 if (object
== VM_OBJECT_NULL
) {
3742 named_entry_unlock(mem_entry
);
3743 return KERN_INVALID_ARGUMENT
;
3746 offset
= mem_entry
->offset
;
3747 data_offset
= mem_entry
->data_offset
;
3749 named_entry_unlock(mem_entry
);
3751 *offset_p
= offset
- vm_object_trunc_page(offset
) + data_offset
;
3752 return KERN_SUCCESS
;
3756 mach_memory_entry_map_size(
3757 ipc_port_t entry_port
,
3759 memory_object_offset_t offset
,
3760 memory_object_offset_t size
,
3761 mach_vm_size_t
*map_size
)
3763 vm_named_entry_t mem_entry
;
3765 vm_object_offset_t object_offset_start
, object_offset_end
;
3766 vm_map_copy_t copy_map
, target_copy_map
;
3767 vm_map_offset_t overmap_start
, overmap_end
, trimmed_start
;
3770 if (!IP_VALID(entry_port
) ||
3771 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3772 return KERN_INVALID_ARGUMENT
;
3775 mem_entry
= (vm_named_entry_t
) ipc_kobject_get(entry_port
);
3776 named_entry_lock(mem_entry
);
3778 if (mem_entry
->is_sub_map
) {
3779 named_entry_unlock(mem_entry
);
3780 return KERN_INVALID_ARGUMENT
;
3783 if (mem_entry
->is_object
) {
3784 object
= vm_named_entry_to_vm_object(mem_entry
);
3785 if (object
== VM_OBJECT_NULL
) {
3786 named_entry_unlock(mem_entry
);
3787 return KERN_INVALID_ARGUMENT
;
3790 object_offset_start
= mem_entry
->offset
;
3791 object_offset_start
+= mem_entry
->data_offset
;
3792 object_offset_start
+= offset
;
3793 object_offset_end
= object_offset_start
+ size
;
3794 object_offset_start
= vm_map_trunc_page(object_offset_start
,
3795 VM_MAP_PAGE_MASK(map
));
3796 object_offset_end
= vm_map_round_page(object_offset_end
,
3797 VM_MAP_PAGE_MASK(map
));
3799 named_entry_unlock(mem_entry
);
3801 *map_size
= object_offset_end
- object_offset_start
;
3802 return KERN_SUCCESS
;
3805 if (!mem_entry
->is_copy
) {
3806 panic("unsupported type of mem_entry %p\n", mem_entry
);
3809 assert(mem_entry
->is_copy
);
3810 if (VM_MAP_COPY_PAGE_MASK(mem_entry
->backing
.copy
) == VM_MAP_PAGE_MASK(map
)) {
3811 *map_size
= vm_map_round_page(mem_entry
->offset
+ mem_entry
->data_offset
+ offset
+ size
, VM_MAP_PAGE_MASK(map
)) - vm_map_trunc_page(mem_entry
->offset
+ mem_entry
->data_offset
+ offset
, VM_MAP_PAGE_MASK(map
));
3812 DEBUG4K_SHARE("map %p (%d) mem_entry %p offset 0x%llx + 0x%llx + 0x%llx size 0x%llx -> map_size 0x%llx\n", map
, VM_MAP_PAGE_MASK(map
), mem_entry
, mem_entry
->offset
, mem_entry
->data_offset
, offset
, size
, *map_size
);
3813 named_entry_unlock(mem_entry
);
3814 return KERN_SUCCESS
;
3817 DEBUG4K_SHARE("mem_entry %p copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx\n", mem_entry
, mem_entry
->backing
.copy
, VM_MAP_COPY_PAGE_SHIFT(mem_entry
->backing
.copy
), map
, VM_MAP_PAGE_SHIFT(map
), offset
, size
);
3818 copy_map
= mem_entry
->backing
.copy
;
3819 target_copy_map
= VM_MAP_COPY_NULL
;
3820 DEBUG4K_ADJUST("adjusting...\n");
3821 kr
= vm_map_copy_adjust_to_target(copy_map
,
3822 mem_entry
->data_offset
+ offset
,
3830 if (kr
== KERN_SUCCESS
) {
3831 if (target_copy_map
->size
!= copy_map
->size
) {
3832 DEBUG4K_ADJUST("copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx map_size 0x%llx -> 0x%llx\n", copy_map
, VM_MAP_COPY_PAGE_SHIFT(copy_map
), map
, VM_MAP_PAGE_SHIFT(map
), (uint64_t)offset
, (uint64_t)size
, (uint64_t)overmap_start
, (uint64_t)overmap_end
, (uint64_t)trimmed_start
, (uint64_t)copy_map
->size
, (uint64_t)target_copy_map
->size
);
3834 *map_size
= target_copy_map
->size
;
3835 if (target_copy_map
!= copy_map
) {
3836 vm_map_copy_discard(target_copy_map
);
3838 target_copy_map
= VM_MAP_COPY_NULL
;
3840 named_entry_unlock(mem_entry
);
3845 * mach_memory_entry_port_release:
3847 * Release a send right on a named entry port. This is the correct
3848 * way to destroy a named entry. When the last right on the port is
3849 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3852 mach_memory_entry_port_release(
3855 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3856 ipc_port_release_send(port
);
3860 * mach_destroy_memory_entry:
3862 * Drops a reference on a memory entry and destroys the memory entry if
3863 * there are no more references on it.
3864 * NOTE: This routine should not be called to destroy a memory entry from the
3865 * kernel, as it will not release the Mach port associated with the memory
3866 * entry. The proper way to destroy a memory entry in the kernel is to
3867 * call mach_memort_entry_port_release() to release the kernel's send-right on
3868 * the memory entry's port. When the last send right is released, the memory
3869 * entry will be destroyed via ipc_kobject_destroy().
3872 mach_destroy_memory_entry(
3875 vm_named_entry_t named_entry
;
3877 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3878 #endif /* MACH_ASSERT */
3879 named_entry
= (vm_named_entry_t
) ip_get_kobject(port
);
3881 named_entry_lock(named_entry
);
3882 named_entry
->ref_count
-= 1;
3884 if (named_entry
->ref_count
== 0) {
3885 if (named_entry
->is_sub_map
) {
3886 vm_map_deallocate(named_entry
->backing
.map
);
3887 } else if (named_entry
->is_copy
) {
3888 vm_map_copy_discard(named_entry
->backing
.copy
);
3889 } else if (named_entry
->is_object
) {
3890 assert(named_entry
->backing
.copy
->cpy_hdr
.nentries
== 1);
3891 vm_map_copy_discard(named_entry
->backing
.copy
);
3893 assert(named_entry
->backing
.copy
== VM_MAP_COPY_NULL
);
3896 named_entry_unlock(named_entry
);
3897 named_entry_lock_destroy(named_entry
);
3899 #if VM_NAMED_ENTRY_LIST
3900 lck_mtx_lock_spin(&vm_named_entry_list_lock_data
);
3901 queue_remove(&vm_named_entry_list
, named_entry
,
3902 vm_named_entry_t
, named_entry_list
);
3903 assert(vm_named_entry_count
> 0);
3904 vm_named_entry_count
--;
3905 lck_mtx_unlock(&vm_named_entry_list_lock_data
);
3906 #endif /* VM_NAMED_ENTRY_LIST */
3908 kfree(named_entry
, sizeof(struct vm_named_entry
));
3910 named_entry_unlock(named_entry
);
3914 /* Allow manipulation of individual page state. This is actually part of */
3915 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3918 mach_memory_entry_page_op(
3919 ipc_port_t entry_port
,
3920 vm_object_offset_t offset
,
3922 ppnum_t
*phys_entry
,
3925 vm_named_entry_t mem_entry
;
3929 if (!IP_VALID(entry_port
) ||
3930 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3931 return KERN_INVALID_ARGUMENT
;
3934 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3936 named_entry_lock(mem_entry
);
3938 if (mem_entry
->is_sub_map
||
3939 mem_entry
->is_copy
) {
3940 named_entry_unlock(mem_entry
);
3941 return KERN_INVALID_ARGUMENT
;
3944 assert(mem_entry
->is_object
);
3945 object
= vm_named_entry_to_vm_object(mem_entry
);
3946 if (object
== VM_OBJECT_NULL
) {
3947 named_entry_unlock(mem_entry
);
3948 return KERN_INVALID_ARGUMENT
;
3951 vm_object_reference(object
);
3952 named_entry_unlock(mem_entry
);
3954 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3956 vm_object_deallocate(object
);
3962 * mach_memory_entry_range_op offers performance enhancement over
3963 * mach_memory_entry_page_op for page_op functions which do not require page
3964 * level state to be returned from the call. Page_op was created to provide
3965 * a low-cost alternative to page manipulation via UPLs when only a single
3966 * page was involved. The range_op call establishes the ability in the _op
3967 * family of functions to work on multiple pages where the lack of page level
3968 * state handling allows the caller to avoid the overhead of the upl structures.
3972 mach_memory_entry_range_op(
3973 ipc_port_t entry_port
,
3974 vm_object_offset_t offset_beg
,
3975 vm_object_offset_t offset_end
,
3979 vm_named_entry_t mem_entry
;
3983 if (!IP_VALID(entry_port
) ||
3984 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3985 return KERN_INVALID_ARGUMENT
;
3988 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3990 named_entry_lock(mem_entry
);
3992 if (mem_entry
->is_sub_map
||
3993 mem_entry
->is_copy
) {
3994 named_entry_unlock(mem_entry
);
3995 return KERN_INVALID_ARGUMENT
;
3998 assert(mem_entry
->is_object
);
3999 object
= vm_named_entry_to_vm_object(mem_entry
);
4000 if (object
== VM_OBJECT_NULL
) {
4001 named_entry_unlock(mem_entry
);
4002 return KERN_INVALID_ARGUMENT
;
4005 vm_object_reference(object
);
4006 named_entry_unlock(mem_entry
);
4008 kr
= vm_object_range_op(object
,
4012 (uint32_t *) range
);
4014 vm_object_deallocate(object
);
4019 /* ******* Temporary Internal calls to UPL for BSD ***** */
4021 extern int kernel_upl_map(
4024 vm_offset_t
*dst_addr
);
4026 extern int kernel_upl_unmap(
4030 extern int kernel_upl_commit(
4032 upl_page_info_t
*pl
,
4033 mach_msg_type_number_t count
);
4035 extern int kernel_upl_commit_range(
4037 upl_offset_t offset
,
4040 upl_page_info_array_t pl
,
4041 mach_msg_type_number_t count
);
4043 extern int kernel_upl_abort(
4047 extern int kernel_upl_abort_range(
4049 upl_offset_t offset
,
4058 vm_offset_t
*dst_addr
)
4060 return vm_upl_map(map
, upl
, dst_addr
);
4069 return vm_upl_unmap(map
, upl
);
4075 upl_page_info_t
*pl
,
4076 mach_msg_type_number_t count
)
4080 kr
= upl_commit(upl
, pl
, count
);
4081 upl_deallocate(upl
);
4087 kernel_upl_commit_range(
4089 upl_offset_t offset
,
4092 upl_page_info_array_t pl
,
4093 mach_msg_type_number_t count
)
4095 boolean_t finished
= FALSE
;
4098 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
4099 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
4102 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
4103 return KERN_INVALID_ARGUMENT
;
4106 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
4108 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
) {
4109 upl_deallocate(upl
);
4116 kernel_upl_abort_range(
4118 upl_offset_t offset
,
4123 boolean_t finished
= FALSE
;
4125 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
4126 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
4129 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
4131 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
) {
4132 upl_deallocate(upl
);
4145 kr
= upl_abort(upl
, abort_type
);
4146 upl_deallocate(upl
);
4151 * Now a kernel-private interface (for BootCache
4152 * use only). Need a cleaner way to create an
4153 * empty vm_map() and return a handle to it.
4157 vm_region_object_create(
4158 __unused vm_map_t target_map
,
4160 ipc_port_t
*object_handle
)
4162 vm_named_entry_t user_entry
;
4163 ipc_port_t user_handle
;
4167 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
4169 return KERN_FAILURE
;
4172 /* Create a named object based on a submap of specified size */
4174 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
4175 vm_map_round_page(size
,
4176 VM_MAP_PAGE_MASK(target_map
)),
4178 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
4180 user_entry
->backing
.map
= new_map
;
4181 user_entry
->internal
= TRUE
;
4182 user_entry
->is_sub_map
= TRUE
;
4183 user_entry
->offset
= 0;
4184 user_entry
->protection
= VM_PROT_ALL
;
4185 user_entry
->size
= size
;
4186 assert(user_entry
->ref_count
== 1);
4188 *object_handle
= user_handle
;
4189 return KERN_SUCCESS
;
4192 ppnum_t
vm_map_get_phys_page( /* forward */
4194 vm_offset_t offset
);
4197 vm_map_get_phys_page(
4201 vm_object_offset_t offset
;
4203 vm_map_offset_t map_offset
;
4204 vm_map_entry_t entry
;
4205 ppnum_t phys_page
= 0;
4207 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
4210 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
4211 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
4215 if (entry
->is_sub_map
) {
4217 vm_map_lock(VME_SUBMAP(entry
));
4219 map
= VME_SUBMAP(entry
);
4220 map_offset
= (VME_OFFSET(entry
) +
4221 (map_offset
- entry
->vme_start
));
4222 vm_map_unlock(old_map
);
4225 if (VME_OBJECT(entry
)->phys_contiguous
) {
4226 /* These are not standard pageable memory mappings */
4227 /* If they are not present in the object they will */
4228 /* have to be picked up from the pager through the */
4229 /* fault mechanism. */
4230 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
4231 /* need to call vm_fault */
4233 vm_fault(map
, map_offset
, VM_PROT_NONE
,
4234 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
4235 THREAD_UNINT
, NULL
, 0);
4239 offset
= (VME_OFFSET(entry
) +
4240 (map_offset
- entry
->vme_start
));
4241 phys_page
= (ppnum_t
)
4242 ((VME_OBJECT(entry
)->vo_shadow_offset
4243 + offset
) >> PAGE_SHIFT
);
4246 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
4247 object
= VME_OBJECT(entry
);
4248 vm_object_lock(object
);
4250 vm_page_t dst_page
= vm_page_lookup(object
, offset
);
4251 if (dst_page
== VM_PAGE_NULL
) {
4252 if (object
->shadow
) {
4253 vm_object_t old_object
;
4254 vm_object_lock(object
->shadow
);
4255 old_object
= object
;
4256 offset
= offset
+ object
->vo_shadow_offset
;
4257 object
= object
->shadow
;
4258 vm_object_unlock(old_object
);
4260 vm_object_unlock(object
);
4264 phys_page
= (ppnum_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
));
4265 vm_object_unlock(object
);
4277 kern_return_t
kernel_object_iopl_request( /* forward */
4278 vm_named_entry_t named_entry
,
4279 memory_object_offset_t offset
,
4280 upl_size_t
*upl_size
,
4282 upl_page_info_array_t user_page_list
,
4283 unsigned int *page_list_count
,
4287 kernel_object_iopl_request(
4288 vm_named_entry_t named_entry
,
4289 memory_object_offset_t offset
,
4290 upl_size_t
*upl_size
,
4292 upl_page_info_array_t user_page_list
,
4293 unsigned int *page_list_count
,
4301 caller_flags
= *flags
;
4303 if (caller_flags
& ~UPL_VALID_FLAGS
) {
4305 * For forward compatibility's sake,
4306 * reject any unknown flag.
4308 return KERN_INVALID_VALUE
;
4311 /* a few checks to make sure user is obeying rules */
4312 if (*upl_size
== 0) {
4313 if (offset
>= named_entry
->size
) {
4314 return KERN_INVALID_RIGHT
;
4316 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
4317 if (*upl_size
!= named_entry
->size
- offset
) {
4318 return KERN_INVALID_ARGUMENT
;
4321 if (caller_flags
& UPL_COPYOUT_FROM
) {
4322 if ((named_entry
->protection
& VM_PROT_READ
)
4324 return KERN_INVALID_RIGHT
;
4327 if ((named_entry
->protection
&
4328 (VM_PROT_READ
| VM_PROT_WRITE
))
4329 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
4330 return KERN_INVALID_RIGHT
;
4333 if (named_entry
->size
< (offset
+ *upl_size
)) {
4334 return KERN_INVALID_ARGUMENT
;
4337 /* the callers parameter offset is defined to be the */
4338 /* offset from beginning of named entry offset in object */
4339 offset
= offset
+ named_entry
->offset
;
4341 if (named_entry
->is_sub_map
||
4342 named_entry
->is_copy
) {
4343 return KERN_INVALID_ARGUMENT
;
4346 named_entry_lock(named_entry
);
4348 /* This is the case where we are going to operate */
4349 /* on an already known object. If the object is */
4350 /* not ready it is internal. An external */
4351 /* object cannot be mapped until it is ready */
4352 /* we can therefore avoid the ready check */
4354 assert(named_entry
->is_object
);
4355 object
= vm_named_entry_to_vm_object(named_entry
);
4356 vm_object_reference(object
);
4357 named_entry_unlock(named_entry
);
4359 if (!object
->private) {
4360 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
) {
4361 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
4363 if (object
->phys_contiguous
) {
4364 *flags
= UPL_PHYS_CONTIG
;
4369 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
4372 ret
= vm_object_iopl_request(object
,
4378 (upl_control_flags_t
)(unsigned int)caller_flags
);
4379 vm_object_deallocate(object
);
4385 * These symbols are looked up at runtime by vmware, VirtualBox,
4386 * despite not being exported in the symbol sets.
4389 #if defined(__x86_64__)
4393 vm_map_t target_map
,
4394 mach_vm_offset_t
*address
,
4395 mach_vm_size_t initial_size
,
4396 mach_vm_offset_t mask
,
4399 vm_object_offset_t offset
,
4401 vm_prot_t cur_protection
,
4402 vm_prot_t max_protection
,
4403 vm_inherit_t inheritance
);
4407 vm_map_t target_map
,
4408 mach_vm_offset_t
*address
,
4409 mach_vm_size_t size
,
4410 mach_vm_offset_t mask
,
4413 mach_vm_offset_t memory_address
,
4415 vm_prot_t
*cur_protection
,
4416 vm_prot_t
*max_protection
,
4417 vm_inherit_t inheritance
);
4421 vm_map_t target_map
,
4422 mach_vm_offset_t
*address
,
4423 mach_vm_size_t initial_size
,
4424 mach_vm_offset_t mask
,
4427 vm_object_offset_t offset
,
4429 vm_prot_t cur_protection
,
4430 vm_prot_t max_protection
,
4431 vm_inherit_t inheritance
)
4433 return mach_vm_map_external(target_map
, address
, initial_size
, mask
, flags
, port
,
4434 offset
, copy
, cur_protection
, max_protection
, inheritance
);
4439 vm_map_t target_map
,
4440 mach_vm_offset_t
*address
,
4441 mach_vm_size_t size
,
4442 mach_vm_offset_t mask
,
4445 mach_vm_offset_t memory_address
,
4447 vm_prot_t
*cur_protection
, /* OUT */
4448 vm_prot_t
*max_protection
, /* OUT */
4449 vm_inherit_t inheritance
)
4451 return mach_vm_remap_external(target_map
, address
, size
, mask
, flags
, src_map
, memory_address
,
4452 copy
, cur_protection
, max_protection
, inheritance
);
4457 vm_map_t target_map
,
4458 vm_offset_t
*address
,
4465 vm_prot_t cur_protection
,
4466 vm_prot_t max_protection
,
4467 vm_inherit_t inheritance
);
4471 vm_map_t target_map
,
4472 vm_offset_t
*address
,
4479 vm_prot_t cur_protection
,
4480 vm_prot_t max_protection
,
4481 vm_inherit_t inheritance
)
4485 VM_GET_FLAGS_ALIAS(flags
, tag
);
4486 return vm_map_kernel(target_map
, address
, size
, mask
,
4487 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
4489 cur_protection
, max_protection
, inheritance
);
4492 #endif /* __x86_64__ */