2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101 #include <mach/sdt.h>
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/memory_entry_server.h>
106 #include <mach/vm_map_server.h>
108 #include <kern/host.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/misc_protos.h>
112 #include <vm/vm_fault.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 #include <vm/memory_object.h>
117 #include <vm/vm_pageout.h>
118 #include <vm/vm_protos.h>
119 #include <vm/vm_purgeable_internal.h>
120 #include <vm/vm_init.h>
122 #include <san/kasan.h>
124 #include <libkern/OSDebug.h>
125 #include <IOKit/IOBSD.h>
127 vm_size_t upl_offset_to_pagelist
= 0;
134 * mach_vm_allocate allocates "zero fill" memory in the specfied
138 mach_vm_allocate_external(
140 mach_vm_offset_t
*addr
,
146 VM_GET_FLAGS_ALIAS(flags
, tag
);
147 return mach_vm_allocate_kernel(map
, addr
, size
, flags
, tag
);
151 mach_vm_allocate_kernel(
153 mach_vm_offset_t
*addr
,
158 vm_map_offset_t map_addr
;
159 vm_map_size_t map_size
;
160 kern_return_t result
;
163 /* filter out any kernel-only flags */
164 if (flags
& ~VM_FLAGS_USER_ALLOCATE
) {
165 return KERN_INVALID_ARGUMENT
;
168 if (map
== VM_MAP_NULL
) {
169 return KERN_INVALID_ARGUMENT
;
176 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
179 * No specific address requested, so start candidate address
180 * search at the minimum address in the map. However, if that
181 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
182 * allocations of PAGEZERO to explicit requests since its
183 * normal use is to catch dereferences of NULL and many
184 * applications also treat pointers with a value of 0 as
185 * special and suddenly having address 0 contain useable
186 * memory would tend to confuse those applications.
188 map_addr
= vm_map_min(map
);
190 map_addr
+= VM_MAP_PAGE_SIZE(map
);
193 map_addr
= vm_map_trunc_page(*addr
,
194 VM_MAP_PAGE_MASK(map
));
196 map_size
= vm_map_round_page(size
,
197 VM_MAP_PAGE_MASK(map
));
199 return KERN_INVALID_ARGUMENT
;
202 result
= vm_map_enter(
208 VM_MAP_KERNEL_FLAGS_NONE
,
211 (vm_object_offset_t
)0,
223 * Legacy routine that allocates "zero fill" memory in the specfied
224 * map (which is limited to the same size as the kernel).
227 vm_allocate_external(
235 VM_GET_FLAGS_ALIAS(flags
, tag
);
236 return vm_allocate_kernel(map
, addr
, size
, flags
, tag
);
247 vm_map_offset_t map_addr
;
248 vm_map_size_t map_size
;
249 kern_return_t result
;
252 /* filter out any kernel-only flags */
253 if (flags
& ~VM_FLAGS_USER_ALLOCATE
) {
254 return KERN_INVALID_ARGUMENT
;
257 if (map
== VM_MAP_NULL
) {
258 return KERN_INVALID_ARGUMENT
;
265 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
268 * No specific address requested, so start candidate address
269 * search at the minimum address in the map. However, if that
270 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
271 * allocations of PAGEZERO to explicit requests since its
272 * normal use is to catch dereferences of NULL and many
273 * applications also treat pointers with a value of 0 as
274 * special and suddenly having address 0 contain useable
275 * memory would tend to confuse those applications.
277 map_addr
= vm_map_min(map
);
279 map_addr
+= VM_MAP_PAGE_SIZE(map
);
282 map_addr
= vm_map_trunc_page(*addr
,
283 VM_MAP_PAGE_MASK(map
));
285 map_size
= vm_map_round_page(size
,
286 VM_MAP_PAGE_MASK(map
));
288 return KERN_INVALID_ARGUMENT
;
291 result
= vm_map_enter(
297 VM_MAP_KERNEL_FLAGS_NONE
,
300 (vm_object_offset_t
)0,
307 if (result
== KERN_SUCCESS
&& map
->pmap
== kernel_pmap
) {
308 kasan_notify_address(map_addr
, map_size
);
312 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
317 * mach_vm_deallocate -
318 * deallocates the specified range of addresses in the
319 * specified address map.
324 mach_vm_offset_t start
,
327 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
328 return KERN_INVALID_ARGUMENT
;
331 if (size
== (mach_vm_offset_t
) 0) {
335 return vm_map_remove(map
,
336 vm_map_trunc_page(start
,
337 VM_MAP_PAGE_MASK(map
)),
338 vm_map_round_page(start
+ size
,
339 VM_MAP_PAGE_MASK(map
)),
340 VM_MAP_REMOVE_NO_FLAGS
);
345 * deallocates the specified range of addresses in the
346 * specified address map (limited to addresses the same
347 * size as the kernel).
355 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
356 return KERN_INVALID_ARGUMENT
;
359 if (size
== (vm_offset_t
) 0) {
363 return vm_map_remove(map
,
364 vm_map_trunc_page(start
,
365 VM_MAP_PAGE_MASK(map
)),
366 vm_map_round_page(start
+ size
,
367 VM_MAP_PAGE_MASK(map
)),
368 VM_MAP_REMOVE_NO_FLAGS
);
373 * Sets the inheritance of the specified range in the
379 mach_vm_offset_t start
,
381 vm_inherit_t new_inheritance
)
383 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
384 (new_inheritance
> VM_INHERIT_LAST_VALID
)) {
385 return KERN_INVALID_ARGUMENT
;
392 return vm_map_inherit(map
,
393 vm_map_trunc_page(start
,
394 VM_MAP_PAGE_MASK(map
)),
395 vm_map_round_page(start
+ size
,
396 VM_MAP_PAGE_MASK(map
)),
402 * Sets the inheritance of the specified range in the
403 * specified map (range limited to addresses
410 vm_inherit_t new_inheritance
)
412 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
413 (new_inheritance
> VM_INHERIT_LAST_VALID
)) {
414 return KERN_INVALID_ARGUMENT
;
421 return vm_map_inherit(map
,
422 vm_map_trunc_page(start
,
423 VM_MAP_PAGE_MASK(map
)),
424 vm_map_round_page(start
+ size
,
425 VM_MAP_PAGE_MASK(map
)),
431 * Sets the protection of the specified range in the
438 mach_vm_offset_t start
,
440 boolean_t set_maximum
,
441 vm_prot_t new_protection
)
443 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
444 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
))) {
445 return KERN_INVALID_ARGUMENT
;
452 return vm_map_protect(map
,
453 vm_map_trunc_page(start
,
454 VM_MAP_PAGE_MASK(map
)),
455 vm_map_round_page(start
+ size
,
456 VM_MAP_PAGE_MASK(map
)),
463 * Sets the protection of the specified range in the
464 * specified map. Addressability of the range limited
465 * to the same size as the kernel.
473 boolean_t set_maximum
,
474 vm_prot_t new_protection
)
476 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
477 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
))) {
478 return KERN_INVALID_ARGUMENT
;
485 return vm_map_protect(map
,
486 vm_map_trunc_page(start
,
487 VM_MAP_PAGE_MASK(map
)),
488 vm_map_round_page(start
+ size
,
489 VM_MAP_PAGE_MASK(map
)),
495 * mach_vm_machine_attributes -
496 * Handle machine-specific attributes for a mapping, such
497 * as cachability, migrability, etc.
500 mach_vm_machine_attribute(
502 mach_vm_address_t addr
,
504 vm_machine_attribute_t attribute
,
505 vm_machine_attribute_val_t
* value
) /* IN/OUT */
507 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
508 return KERN_INVALID_ARGUMENT
;
515 return vm_map_machine_attribute(
517 vm_map_trunc_page(addr
,
518 VM_MAP_PAGE_MASK(map
)),
519 vm_map_round_page(addr
+ size
,
520 VM_MAP_PAGE_MASK(map
)),
526 * vm_machine_attribute -
527 * Handle machine-specific attributes for a mapping, such
528 * as cachability, migrability, etc. Limited addressability
529 * (same range limits as for the native kernel map).
532 vm_machine_attribute(
536 vm_machine_attribute_t attribute
,
537 vm_machine_attribute_val_t
* value
) /* IN/OUT */
539 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
540 return KERN_INVALID_ARGUMENT
;
547 return vm_map_machine_attribute(
549 vm_map_trunc_page(addr
,
550 VM_MAP_PAGE_MASK(map
)),
551 vm_map_round_page(addr
+ size
,
552 VM_MAP_PAGE_MASK(map
)),
559 * Read/copy a range from one address space and return it to the caller.
561 * It is assumed that the address for the returned memory is selected by
562 * the IPC implementation as part of receiving the reply to this call.
563 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
564 * that gets returned.
566 * JMM - because of mach_msg_type_number_t, this call is limited to a
567 * single 4GB region at this time.
573 mach_vm_address_t addr
,
576 mach_msg_type_number_t
*data_size
)
579 vm_map_copy_t ipc_address
;
581 if (map
== VM_MAP_NULL
) {
582 return KERN_INVALID_ARGUMENT
;
585 if ((mach_msg_type_number_t
) size
!= size
) {
586 return KERN_INVALID_ARGUMENT
;
589 error
= vm_map_copyin(map
,
590 (vm_map_address_t
)addr
,
592 FALSE
, /* src_destroy */
595 if (KERN_SUCCESS
== error
) {
596 *data
= (pointer_t
) ipc_address
;
597 *data_size
= (mach_msg_type_number_t
) size
;
598 assert(*data_size
== size
);
605 * Read/copy a range from one address space and return it to the caller.
606 * Limited addressability (same range limits as for the native kernel map).
608 * It is assumed that the address for the returned memory is selected by
609 * the IPC implementation as part of receiving the reply to this call.
610 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
611 * that gets returned.
619 mach_msg_type_number_t
*data_size
)
622 vm_map_copy_t ipc_address
;
624 if (map
== VM_MAP_NULL
) {
625 return KERN_INVALID_ARGUMENT
;
628 mach_msg_type_number_t dsize
;
629 if (os_convert_overflow(size
, &dsize
)) {
631 * The kernel could handle a 64-bit "size" value, but
632 * it could not return the size of the data in "*data_size"
633 * without overflowing.
634 * Let's reject this "size" as invalid.
636 return KERN_INVALID_ARGUMENT
;
639 error
= vm_map_copyin(map
,
640 (vm_map_address_t
)addr
,
642 FALSE
, /* src_destroy */
645 if (KERN_SUCCESS
== error
) {
646 *data
= (pointer_t
) ipc_address
;
648 assert(*data_size
== size
);
654 * mach_vm_read_list -
655 * Read/copy a list of address ranges from specified map.
657 * MIG does not know how to deal with a returned array of
658 * vm_map_copy_t structures, so we have to do the copyout
664 mach_vm_read_entry_t data_list
,
667 mach_msg_type_number_t i
;
671 if (map
== VM_MAP_NULL
||
672 count
> VM_MAP_ENTRY_MAX
) {
673 return KERN_INVALID_ARGUMENT
;
676 error
= KERN_SUCCESS
;
677 for (i
= 0; i
< count
; i
++) {
678 vm_map_address_t map_addr
;
679 vm_map_size_t map_size
;
681 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
682 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
685 error
= vm_map_copyin(map
,
688 FALSE
, /* src_destroy */
690 if (KERN_SUCCESS
== error
) {
691 error
= vm_map_copyout(
695 if (KERN_SUCCESS
== error
) {
696 data_list
[i
].address
= map_addr
;
699 vm_map_copy_discard(copy
);
702 data_list
[i
].address
= (mach_vm_address_t
)0;
703 data_list
[i
].size
= (mach_vm_size_t
)0;
710 * Read/copy a list of address ranges from specified map.
712 * MIG does not know how to deal with a returned array of
713 * vm_map_copy_t structures, so we have to do the copyout
716 * The source and destination ranges are limited to those
717 * that can be described with a vm_address_t (i.e. same
718 * size map as the kernel).
720 * JMM - If the result of the copyout is an address range
721 * that cannot be described with a vm_address_t (i.e. the
722 * caller had a larger address space but used this call
723 * anyway), it will result in a truncated address being
724 * returned (and a likely confused caller).
730 vm_read_entry_t data_list
,
733 mach_msg_type_number_t i
;
737 if (map
== VM_MAP_NULL
||
738 count
> VM_MAP_ENTRY_MAX
) {
739 return KERN_INVALID_ARGUMENT
;
742 error
= KERN_SUCCESS
;
743 for (i
= 0; i
< count
; i
++) {
744 vm_map_address_t map_addr
;
745 vm_map_size_t map_size
;
747 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
748 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
751 error
= vm_map_copyin(map
,
754 FALSE
, /* src_destroy */
756 if (KERN_SUCCESS
== error
) {
757 error
= vm_map_copyout(current_task()->map
,
760 if (KERN_SUCCESS
== error
) {
761 data_list
[i
].address
=
762 CAST_DOWN(vm_offset_t
, map_addr
);
765 vm_map_copy_discard(copy
);
768 data_list
[i
].address
= (mach_vm_address_t
)0;
769 data_list
[i
].size
= (mach_vm_size_t
)0;
775 * mach_vm_read_overwrite -
776 * Overwrite a range of the current map with data from the specified
779 * In making an assumption that the current thread is local, it is
780 * no longer cluster-safe without a fully supportive local proxy
781 * thread/task (but we don't support cluster's anymore so this is moot).
785 mach_vm_read_overwrite(
787 mach_vm_address_t address
,
789 mach_vm_address_t data
,
790 mach_vm_size_t
*data_size
)
795 if (map
== VM_MAP_NULL
) {
796 return KERN_INVALID_ARGUMENT
;
799 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
800 (vm_map_size_t
)size
, FALSE
, ©
);
802 if (KERN_SUCCESS
== error
) {
804 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
807 error
= vm_map_copy_overwrite(current_thread()->map
,
808 (vm_map_address_t
)data
,
809 copy
, (vm_map_size_t
) size
, FALSE
);
810 if (KERN_SUCCESS
== error
) {
814 vm_map_copy_discard(copy
);
820 * vm_read_overwrite -
821 * Overwrite a range of the current map with data from the specified
824 * This routine adds the additional limitation that the source and
825 * destination ranges must be describable with vm_address_t values
826 * (i.e. the same size address spaces as the kernel, or at least the
827 * the ranges are in that first portion of the respective address
834 vm_address_t address
,
837 vm_size_t
*data_size
)
842 if (map
== VM_MAP_NULL
) {
843 return KERN_INVALID_ARGUMENT
;
846 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
847 (vm_map_size_t
)size
, FALSE
, ©
);
849 if (KERN_SUCCESS
== error
) {
851 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
854 error
= vm_map_copy_overwrite(current_thread()->map
,
855 (vm_map_address_t
)data
,
856 copy
, (vm_map_size_t
) size
, FALSE
);
857 if (KERN_SUCCESS
== error
) {
861 vm_map_copy_discard(copy
);
869 * Overwrite the specified address range with the data provided
870 * (from the current map).
875 mach_vm_address_t address
,
877 mach_msg_type_number_t size
)
879 if (map
== VM_MAP_NULL
) {
880 return KERN_INVALID_ARGUMENT
;
883 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
884 (vm_map_copy_t
) data
, size
, FALSE
/* interruptible XXX */);
889 * Overwrite the specified address range with the data provided
890 * (from the current map).
892 * The addressability of the range of addresses to overwrite is
893 * limited bu the use of a vm_address_t (same size as kernel map).
894 * Either the target map is also small, or the range is in the
895 * low addresses within it.
900 vm_address_t address
,
902 mach_msg_type_number_t size
)
904 if (map
== VM_MAP_NULL
) {
905 return KERN_INVALID_ARGUMENT
;
908 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
909 (vm_map_copy_t
) data
, size
, FALSE
/* interruptible XXX */);
914 * Overwrite one range of the specified map with the contents of
915 * another range within that same map (i.e. both address ranges
921 mach_vm_address_t source_address
,
923 mach_vm_address_t dest_address
)
928 if (map
== VM_MAP_NULL
) {
929 return KERN_INVALID_ARGUMENT
;
932 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
933 (vm_map_size_t
)size
, FALSE
, ©
);
935 if (KERN_SUCCESS
== kr
) {
937 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
940 kr
= vm_map_copy_overwrite(map
,
941 (vm_map_address_t
)dest_address
,
942 copy
, (vm_map_size_t
) size
, FALSE
/* interruptible XXX */);
944 if (KERN_SUCCESS
!= kr
) {
945 vm_map_copy_discard(copy
);
954 vm_address_t source_address
,
956 vm_address_t dest_address
)
961 if (map
== VM_MAP_NULL
) {
962 return KERN_INVALID_ARGUMENT
;
965 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
966 (vm_map_size_t
)size
, FALSE
, ©
);
968 if (KERN_SUCCESS
== kr
) {
970 assertf(copy
->size
== (vm_map_size_t
) size
, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size
, (uint64_t) copy
->size
);
973 kr
= vm_map_copy_overwrite(map
,
974 (vm_map_address_t
)dest_address
,
975 copy
, (vm_map_size_t
) size
, FALSE
/* interruptible XXX */);
977 if (KERN_SUCCESS
!= kr
) {
978 vm_map_copy_discard(copy
);
986 * Map some range of an object into an address space.
988 * The object can be one of several types of objects:
989 * NULL - anonymous memory
990 * a named entry - a range within another address space
991 * or a range within a memory object
992 * a whole memory object
996 mach_vm_map_external(
998 mach_vm_offset_t
*address
,
999 mach_vm_size_t initial_size
,
1000 mach_vm_offset_t mask
,
1003 vm_object_offset_t offset
,
1005 vm_prot_t cur_protection
,
1006 vm_prot_t max_protection
,
1007 vm_inherit_t inheritance
)
1011 VM_GET_FLAGS_ALIAS(flags
, tag
);
1012 return mach_vm_map_kernel(target_map
, address
, initial_size
, mask
,
1013 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
1015 cur_protection
, max_protection
,
1021 vm_map_t target_map
,
1022 mach_vm_offset_t
*address
,
1023 mach_vm_size_t initial_size
,
1024 mach_vm_offset_t mask
,
1026 vm_map_kernel_flags_t vmk_flags
,
1029 vm_object_offset_t offset
,
1031 vm_prot_t cur_protection
,
1032 vm_prot_t max_protection
,
1033 vm_inherit_t inheritance
)
1036 vm_map_offset_t vmmaddr
;
1038 vmmaddr
= (vm_map_offset_t
) *address
;
1040 /* filter out any kernel-only flags */
1041 if (flags
& ~VM_FLAGS_USER_MAP
) {
1042 return KERN_INVALID_ARGUMENT
;
1045 kr
= vm_map_enter_mem_object(target_map
,
1060 if (kr
== KERN_SUCCESS
&& target_map
->pmap
== kernel_pmap
) {
1061 kasan_notify_address(vmmaddr
, initial_size
);
1070 /* legacy interface */
1073 vm_map_t target_map
,
1074 vm_offset_t
*address
,
1079 vm_object_offset_t offset
,
1081 vm_prot_t cur_protection
,
1082 vm_prot_t max_protection
,
1083 vm_inherit_t inheritance
)
1087 VM_GET_FLAGS_ALIAS(flags
, tag
);
1088 return vm_map_64_kernel(target_map
, address
, size
, mask
,
1089 flags
, VM_MAP_KERNEL_FLAGS_NONE
,
1090 tag
, port
, offset
, copy
,
1091 cur_protection
, max_protection
,
1097 vm_map_t target_map
,
1098 vm_offset_t
*address
,
1102 vm_map_kernel_flags_t vmk_flags
,
1105 vm_object_offset_t offset
,
1107 vm_prot_t cur_protection
,
1108 vm_prot_t max_protection
,
1109 vm_inherit_t inheritance
)
1111 mach_vm_address_t map_addr
;
1112 mach_vm_size_t map_size
;
1113 mach_vm_offset_t map_mask
;
1116 map_addr
= (mach_vm_address_t
)*address
;
1117 map_size
= (mach_vm_size_t
)size
;
1118 map_mask
= (mach_vm_offset_t
)mask
;
1120 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
,
1121 flags
, vmk_flags
, tag
,
1123 cur_protection
, max_protection
, inheritance
);
1124 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1128 /* temporary, until world build */
1131 vm_map_t target_map
,
1132 vm_offset_t
*address
,
1139 vm_prot_t cur_protection
,
1140 vm_prot_t max_protection
,
1141 vm_inherit_t inheritance
)
1145 VM_GET_FLAGS_ALIAS(flags
, tag
);
1146 return vm_map_kernel(target_map
, address
, size
, mask
,
1147 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
1149 cur_protection
, max_protection
, inheritance
);
1154 vm_map_t target_map
,
1155 vm_offset_t
*address
,
1159 vm_map_kernel_flags_t vmk_flags
,
1164 vm_prot_t cur_protection
,
1165 vm_prot_t max_protection
,
1166 vm_inherit_t inheritance
)
1168 mach_vm_address_t map_addr
;
1169 mach_vm_size_t map_size
;
1170 mach_vm_offset_t map_mask
;
1171 vm_object_offset_t obj_offset
;
1174 map_addr
= (mach_vm_address_t
)*address
;
1175 map_size
= (mach_vm_size_t
)size
;
1176 map_mask
= (mach_vm_offset_t
)mask
;
1177 obj_offset
= (vm_object_offset_t
)offset
;
1179 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
,
1180 flags
, vmk_flags
, tag
,
1181 port
, obj_offset
, copy
,
1182 cur_protection
, max_protection
, inheritance
);
1183 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1189 * Remap a range of memory from one task into another,
1190 * to another address range within the same task, or
1191 * over top of itself (with altered permissions and/or
1192 * as an in-place copy of itself).
1195 mach_vm_remap_external(
1196 vm_map_t target_map
,
1197 mach_vm_offset_t
*address
,
1198 mach_vm_size_t size
,
1199 mach_vm_offset_t mask
,
1202 mach_vm_offset_t memory_address
,
1204 vm_prot_t
*cur_protection
,
1205 vm_prot_t
*max_protection
,
1206 vm_inherit_t inheritance
)
1209 VM_GET_FLAGS_ALIAS(flags
, tag
);
1211 return mach_vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
, memory_address
,
1212 copy
, cur_protection
, max_protection
, inheritance
);
1216 mach_vm_remap_kernel(
1217 vm_map_t target_map
,
1218 mach_vm_offset_t
*address
,
1219 mach_vm_size_t size
,
1220 mach_vm_offset_t mask
,
1224 mach_vm_offset_t memory_address
,
1226 vm_prot_t
*cur_protection
,
1227 vm_prot_t
*max_protection
,
1228 vm_inherit_t inheritance
)
1230 vm_map_offset_t map_addr
;
1233 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
) {
1234 return KERN_INVALID_ARGUMENT
;
1237 /* filter out any kernel-only flags */
1238 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1239 return KERN_INVALID_ARGUMENT
;
1242 map_addr
= (vm_map_offset_t
)*address
;
1244 kr
= vm_map_remap(target_map
,
1249 VM_MAP_KERNEL_FLAGS_NONE
,
1257 *address
= map_addr
;
1263 * Remap a range of memory from one task into another,
1264 * to another address range within the same task, or
1265 * over top of itself (with altered permissions and/or
1266 * as an in-place copy of itself).
1268 * The addressability of the source and target address
1269 * range is limited by the size of vm_address_t (in the
1274 vm_map_t target_map
,
1275 vm_offset_t
*address
,
1280 vm_offset_t memory_address
,
1282 vm_prot_t
*cur_protection
,
1283 vm_prot_t
*max_protection
,
1284 vm_inherit_t inheritance
)
1287 VM_GET_FLAGS_ALIAS(flags
, tag
);
1289 return vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
,
1290 memory_address
, copy
, cur_protection
, max_protection
, inheritance
);
1295 vm_map_t target_map
,
1296 vm_offset_t
*address
,
1302 vm_offset_t memory_address
,
1304 vm_prot_t
*cur_protection
,
1305 vm_prot_t
*max_protection
,
1306 vm_inherit_t inheritance
)
1308 vm_map_offset_t map_addr
;
1311 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
) {
1312 return KERN_INVALID_ARGUMENT
;
1315 /* filter out any kernel-only flags */
1316 if (flags
& ~VM_FLAGS_USER_REMAP
) {
1317 return KERN_INVALID_ARGUMENT
;
1320 map_addr
= (vm_map_offset_t
)*address
;
1322 kr
= vm_map_remap(target_map
,
1327 VM_MAP_KERNEL_FLAGS_NONE
,
1335 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1340 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1341 * when mach_vm_wire and vm_wire are changed to use ledgers.
1343 #include <mach/mach_host_server.h>
1346 * Specify that the range of the virtual address space
1347 * of the target task must not cause page faults for
1348 * the indicated accesses.
1350 * [ To unwire the pages, specify VM_PROT_NONE. ]
1353 mach_vm_wire_external(
1354 host_priv_t host_priv
,
1356 mach_vm_offset_t start
,
1357 mach_vm_size_t size
,
1360 return mach_vm_wire_kernel(host_priv
, map
, start
, size
, access
, VM_KERN_MEMORY_MLOCK
);
1364 mach_vm_wire_kernel(
1365 host_priv_t host_priv
,
1367 mach_vm_offset_t start
,
1368 mach_vm_size_t size
,
1374 if (host_priv
== HOST_PRIV_NULL
) {
1375 return KERN_INVALID_HOST
;
1378 assert(host_priv
== &realhost
);
1380 if (map
== VM_MAP_NULL
) {
1381 return KERN_INVALID_TASK
;
1384 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
)) {
1385 return KERN_INVALID_ARGUMENT
;
1388 if (access
!= VM_PROT_NONE
) {
1389 rc
= vm_map_wire_kernel(map
,
1390 vm_map_trunc_page(start
,
1391 VM_MAP_PAGE_MASK(map
)),
1392 vm_map_round_page(start
+ size
,
1393 VM_MAP_PAGE_MASK(map
)),
1397 rc
= vm_map_unwire(map
,
1398 vm_map_trunc_page(start
,
1399 VM_MAP_PAGE_MASK(map
)),
1400 vm_map_round_page(start
+ size
,
1401 VM_MAP_PAGE_MASK(map
)),
1409 * Specify that the range of the virtual address space
1410 * of the target task must not cause page faults for
1411 * the indicated accesses.
1413 * [ To unwire the pages, specify VM_PROT_NONE. ]
1417 host_priv_t host_priv
,
1425 if (host_priv
== HOST_PRIV_NULL
) {
1426 return KERN_INVALID_HOST
;
1429 assert(host_priv
== &realhost
);
1431 if (map
== VM_MAP_NULL
) {
1432 return KERN_INVALID_TASK
;
1435 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
)) {
1436 return KERN_INVALID_ARGUMENT
;
1441 } else if (access
!= VM_PROT_NONE
) {
1442 rc
= vm_map_wire_kernel(map
,
1443 vm_map_trunc_page(start
,
1444 VM_MAP_PAGE_MASK(map
)),
1445 vm_map_round_page(start
+ size
,
1446 VM_MAP_PAGE_MASK(map
)),
1447 access
, VM_KERN_MEMORY_OSFMK
,
1450 rc
= vm_map_unwire(map
,
1451 vm_map_trunc_page(start
,
1452 VM_MAP_PAGE_MASK(map
)),
1453 vm_map_round_page(start
+ size
,
1454 VM_MAP_PAGE_MASK(map
)),
1463 * Synchronises the memory range specified with its backing store
1464 * image by either flushing or cleaning the contents to the appropriate
1467 * interpretation of sync_flags
1468 * VM_SYNC_INVALIDATE - discard pages, only return precious
1471 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1472 * - discard pages, write dirty or precious
1473 * pages back to memory manager.
1475 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1476 * - write dirty or precious pages back to
1477 * the memory manager.
1479 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1480 * is a hole in the region, and we would
1481 * have returned KERN_SUCCESS, return
1482 * KERN_INVALID_ADDRESS instead.
1485 * KERN_INVALID_TASK Bad task parameter
1486 * KERN_INVALID_ARGUMENT both sync and async were specified.
1487 * KERN_SUCCESS The usual.
1488 * KERN_INVALID_ADDRESS There was a hole in the region.
1494 mach_vm_address_t address
,
1495 mach_vm_size_t size
,
1496 vm_sync_t sync_flags
)
1498 if (map
== VM_MAP_NULL
) {
1499 return KERN_INVALID_TASK
;
1502 return vm_map_msync(map
, (vm_map_address_t
)address
,
1503 (vm_map_size_t
)size
, sync_flags
);
1509 * Synchronises the memory range specified with its backing store
1510 * image by either flushing or cleaning the contents to the appropriate
1513 * interpretation of sync_flags
1514 * VM_SYNC_INVALIDATE - discard pages, only return precious
1517 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1518 * - discard pages, write dirty or precious
1519 * pages back to memory manager.
1521 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1522 * - write dirty or precious pages back to
1523 * the memory manager.
1525 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1526 * is a hole in the region, and we would
1527 * have returned KERN_SUCCESS, return
1528 * KERN_INVALID_ADDRESS instead.
1530 * The addressability of the range is limited to that which can
1531 * be described by a vm_address_t.
1534 * KERN_INVALID_TASK Bad task parameter
1535 * KERN_INVALID_ARGUMENT both sync and async were specified.
1536 * KERN_SUCCESS The usual.
1537 * KERN_INVALID_ADDRESS There was a hole in the region.
1543 vm_address_t address
,
1545 vm_sync_t sync_flags
)
1547 if (map
== VM_MAP_NULL
) {
1548 return KERN_INVALID_TASK
;
1551 return vm_map_msync(map
, (vm_map_address_t
)address
,
1552 (vm_map_size_t
)size
, sync_flags
);
1557 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1559 vm_map_t map
= current_map();
1561 assert(!map
->is_nested_map
);
1562 if (toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
) {
1563 *old_value
= map
->disable_vmentry_reuse
;
1564 } else if (toggle
== VM_TOGGLE_SET
) {
1565 vm_map_entry_t map_to_entry
;
1568 vm_map_disable_hole_optimization(map
);
1569 map
->disable_vmentry_reuse
= TRUE
;
1570 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1571 if (map
->first_free
== map_to_entry
) {
1572 map
->highest_entry_end
= vm_map_min(map
);
1574 map
->highest_entry_end
= map
->first_free
->vme_end
;
1577 } else if (toggle
== VM_TOGGLE_CLEAR
) {
1579 map
->disable_vmentry_reuse
= FALSE
;
1582 return KERN_INVALID_ARGUMENT
;
1585 return KERN_SUCCESS
;
1589 * mach_vm_behavior_set
1591 * Sets the paging behavior attribute for the specified range
1592 * in the specified map.
1594 * This routine will fail with KERN_INVALID_ADDRESS if any address
1595 * in [start,start+size) is not a valid allocated memory region.
1598 mach_vm_behavior_set(
1600 mach_vm_offset_t start
,
1601 mach_vm_size_t size
,
1602 vm_behavior_t new_behavior
)
1604 vm_map_offset_t align_mask
;
1606 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
1607 return KERN_INVALID_ARGUMENT
;
1611 return KERN_SUCCESS
;
1614 switch (new_behavior
) {
1615 case VM_BEHAVIOR_REUSABLE
:
1616 case VM_BEHAVIOR_REUSE
:
1617 case VM_BEHAVIOR_CAN_REUSE
:
1619 * Align to the hardware page size, to allow
1620 * malloc() to maximize the amount of re-usability,
1621 * even on systems with larger software page size.
1623 align_mask
= PAGE_MASK
;
1626 align_mask
= VM_MAP_PAGE_MASK(map
);
1630 return vm_map_behavior_set(map
,
1631 vm_map_trunc_page(start
, align_mask
),
1632 vm_map_round_page(start
+ size
, align_mask
),
1639 * Sets the paging behavior attribute for the specified range
1640 * in the specified map.
1642 * This routine will fail with KERN_INVALID_ADDRESS if any address
1643 * in [start,start+size) is not a valid allocated memory region.
1645 * This routine is potentially limited in addressibility by the
1646 * use of vm_offset_t (if the map provided is larger than the
1654 vm_behavior_t new_behavior
)
1656 if (start
+ size
< start
) {
1657 return KERN_INVALID_ARGUMENT
;
1660 return mach_vm_behavior_set(map
,
1661 (mach_vm_offset_t
) start
,
1662 (mach_vm_size_t
) size
,
1669 * User call to obtain information about a region in
1670 * a task's address map. Currently, only one flavor is
1673 * XXX The reserved and behavior fields cannot be filled
1674 * in until the vm merge from the IK is completed, and
1675 * vm_reserve is implemented.
1677 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1683 mach_vm_offset_t
*address
, /* IN/OUT */
1684 mach_vm_size_t
*size
, /* OUT */
1685 vm_region_flavor_t flavor
, /* IN */
1686 vm_region_info_t info
, /* OUT */
1687 mach_msg_type_number_t
*count
, /* IN/OUT */
1688 mach_port_t
*object_name
) /* OUT */
1690 vm_map_offset_t map_addr
;
1691 vm_map_size_t map_size
;
1694 if (VM_MAP_NULL
== map
) {
1695 return KERN_INVALID_ARGUMENT
;
1698 map_addr
= (vm_map_offset_t
)*address
;
1699 map_size
= (vm_map_size_t
)*size
;
1701 /* legacy conversion */
1702 if (VM_REGION_BASIC_INFO
== flavor
) {
1703 flavor
= VM_REGION_BASIC_INFO_64
;
1706 kr
= vm_map_region(map
,
1707 &map_addr
, &map_size
,
1708 flavor
, info
, count
,
1711 *address
= map_addr
;
1717 * vm_region_64 and vm_region:
1719 * User call to obtain information about a region in
1720 * a task's address map. Currently, only one flavor is
1723 * XXX The reserved and behavior fields cannot be filled
1724 * in until the vm merge from the IK is completed, and
1725 * vm_reserve is implemented.
1727 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1733 vm_offset_t
*address
, /* IN/OUT */
1734 vm_size_t
*size
, /* OUT */
1735 vm_region_flavor_t flavor
, /* IN */
1736 vm_region_info_t info
, /* OUT */
1737 mach_msg_type_number_t
*count
, /* IN/OUT */
1738 mach_port_t
*object_name
) /* OUT */
1740 vm_map_offset_t map_addr
;
1741 vm_map_size_t map_size
;
1744 if (VM_MAP_NULL
== map
) {
1745 return KERN_INVALID_ARGUMENT
;
1748 map_addr
= (vm_map_offset_t
)*address
;
1749 map_size
= (vm_map_size_t
)*size
;
1751 /* legacy conversion */
1752 if (VM_REGION_BASIC_INFO
== flavor
) {
1753 flavor
= VM_REGION_BASIC_INFO_64
;
1756 kr
= vm_map_region(map
,
1757 &map_addr
, &map_size
,
1758 flavor
, info
, count
,
1761 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1762 *size
= CAST_DOWN(vm_size_t
, map_size
);
1764 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1765 return KERN_INVALID_ADDRESS
;
1773 vm_address_t
*address
, /* IN/OUT */
1774 vm_size_t
*size
, /* OUT */
1775 vm_region_flavor_t flavor
, /* IN */
1776 vm_region_info_t info
, /* OUT */
1777 mach_msg_type_number_t
*count
, /* IN/OUT */
1778 mach_port_t
*object_name
) /* OUT */
1780 vm_map_address_t map_addr
;
1781 vm_map_size_t map_size
;
1784 if (VM_MAP_NULL
== map
) {
1785 return KERN_INVALID_ARGUMENT
;
1788 map_addr
= (vm_map_address_t
)*address
;
1789 map_size
= (vm_map_size_t
)*size
;
1791 kr
= vm_map_region(map
,
1792 &map_addr
, &map_size
,
1793 flavor
, info
, count
,
1796 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1797 *size
= CAST_DOWN(vm_size_t
, map_size
);
1799 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1800 return KERN_INVALID_ADDRESS
;
1806 * vm_region_recurse: A form of vm_region which follows the
1807 * submaps in a target map
1811 mach_vm_region_recurse(
1813 mach_vm_address_t
*address
,
1814 mach_vm_size_t
*size
,
1816 vm_region_recurse_info_t info
,
1817 mach_msg_type_number_t
*infoCnt
)
1819 vm_map_address_t map_addr
;
1820 vm_map_size_t map_size
;
1823 if (VM_MAP_NULL
== map
) {
1824 return KERN_INVALID_ARGUMENT
;
1827 map_addr
= (vm_map_address_t
)*address
;
1828 map_size
= (vm_map_size_t
)*size
;
1830 kr
= vm_map_region_recurse_64(
1835 (vm_region_submap_info_64_t
)info
,
1838 *address
= map_addr
;
1844 * vm_region_recurse: A form of vm_region which follows the
1845 * submaps in a target map
1849 vm_region_recurse_64(
1851 vm_address_t
*address
,
1854 vm_region_recurse_info_64_t info
,
1855 mach_msg_type_number_t
*infoCnt
)
1857 vm_map_address_t map_addr
;
1858 vm_map_size_t map_size
;
1861 if (VM_MAP_NULL
== map
) {
1862 return KERN_INVALID_ARGUMENT
;
1865 map_addr
= (vm_map_address_t
)*address
;
1866 map_size
= (vm_map_size_t
)*size
;
1868 kr
= vm_map_region_recurse_64(
1873 (vm_region_submap_info_64_t
)info
,
1876 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1877 *size
= CAST_DOWN(vm_size_t
, map_size
);
1879 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1880 return KERN_INVALID_ADDRESS
;
1888 vm_offset_t
*address
, /* IN/OUT */
1889 vm_size_t
*size
, /* OUT */
1890 natural_t
*depth
, /* IN/OUT */
1891 vm_region_recurse_info_t info32
, /* IN/OUT */
1892 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1894 vm_region_submap_info_data_64_t info64
;
1895 vm_region_submap_info_t info
;
1896 vm_map_address_t map_addr
;
1897 vm_map_size_t map_size
;
1900 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
) {
1901 return KERN_INVALID_ARGUMENT
;
1905 map_addr
= (vm_map_address_t
)*address
;
1906 map_size
= (vm_map_size_t
)*size
;
1907 info
= (vm_region_submap_info_t
)info32
;
1908 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1910 kr
= vm_map_region_recurse_64(map
, &map_addr
, &map_size
,
1911 depth
, &info64
, infoCnt
);
1913 info
->protection
= info64
.protection
;
1914 info
->max_protection
= info64
.max_protection
;
1915 info
->inheritance
= info64
.inheritance
;
1916 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1917 info
->user_tag
= info64
.user_tag
;
1918 info
->pages_resident
= info64
.pages_resident
;
1919 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1920 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1921 info
->pages_dirtied
= info64
.pages_dirtied
;
1922 info
->ref_count
= info64
.ref_count
;
1923 info
->shadow_depth
= info64
.shadow_depth
;
1924 info
->external_pager
= info64
.external_pager
;
1925 info
->share_mode
= info64
.share_mode
;
1926 info
->is_submap
= info64
.is_submap
;
1927 info
->behavior
= info64
.behavior
;
1928 info
->object_id
= info64
.object_id
;
1929 info
->user_wired_count
= info64
.user_wired_count
;
1931 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1932 *size
= CAST_DOWN(vm_size_t
, map_size
);
1933 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1935 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
) {
1936 return KERN_INVALID_ADDRESS
;
1942 mach_vm_purgable_control(
1944 mach_vm_offset_t address
,
1945 vm_purgable_t control
,
1948 if (VM_MAP_NULL
== map
) {
1949 return KERN_INVALID_ARGUMENT
;
1952 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
1953 /* not allowed from user-space */
1954 return KERN_INVALID_ARGUMENT
;
1957 return vm_map_purgable_control(map
,
1958 vm_map_trunc_page(address
, VM_MAP_PAGE_MASK(map
)),
1964 vm_purgable_control(
1966 vm_offset_t address
,
1967 vm_purgable_t control
,
1970 if (VM_MAP_NULL
== map
) {
1971 return KERN_INVALID_ARGUMENT
;
1974 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
1975 /* not allowed from user-space */
1976 return KERN_INVALID_ARGUMENT
;
1979 return vm_map_purgable_control(map
,
1980 vm_map_trunc_page(address
, VM_MAP_PAGE_MASK(map
)),
1987 * Ordinarily, the right to allocate CPM is restricted
1988 * to privileged applications (those that can gain access
1989 * to the host priv port). Set this variable to zero if
1990 * you want to let any application allocate CPM.
1992 unsigned int vm_allocate_cpm_privileged
= 0;
1995 * Allocate memory in the specified map, with the caveat that
1996 * the memory is physically contiguous. This call may fail
1997 * if the system can't find sufficient contiguous memory.
1998 * This call may cause or lead to heart-stopping amounts of
2001 * Memory obtained from this call should be freed in the
2002 * normal way, viz., via vm_deallocate.
2006 host_priv_t host_priv
,
2012 vm_map_address_t map_addr
;
2013 vm_map_size_t map_size
;
2016 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
) {
2017 return KERN_INVALID_HOST
;
2020 if (VM_MAP_NULL
== map
) {
2021 return KERN_INVALID_ARGUMENT
;
2024 map_addr
= (vm_map_address_t
)*addr
;
2025 map_size
= (vm_map_size_t
)size
;
2027 kr
= vm_map_enter_cpm(map
,
2032 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
2040 mach_vm_offset_t offset
,
2044 if (VM_MAP_NULL
== map
) {
2045 return KERN_INVALID_ARGUMENT
;
2048 return vm_map_page_query_internal(
2050 vm_map_trunc_page(offset
, PAGE_MASK
),
2051 disposition
, ref_count
);
2061 if (VM_MAP_NULL
== map
) {
2062 return KERN_INVALID_ARGUMENT
;
2065 return vm_map_page_query_internal(
2067 vm_map_trunc_page(offset
, PAGE_MASK
),
2068 disposition
, ref_count
);
2072 mach_vm_page_range_query(
2074 mach_vm_offset_t address
,
2075 mach_vm_size_t size
,
2076 mach_vm_address_t dispositions_addr
,
2077 mach_vm_size_t
*dispositions_count
)
2079 kern_return_t kr
= KERN_SUCCESS
;
2080 int num_pages
= 0, i
= 0;
2081 mach_vm_size_t curr_sz
= 0, copy_sz
= 0;
2082 mach_vm_size_t disp_buf_req_size
= 0, disp_buf_total_size
= 0;
2083 mach_msg_type_number_t count
= 0;
2086 void *local_disp
= NULL
;;
2087 vm_map_size_t info_size
= 0, local_disp_size
= 0;
2088 mach_vm_offset_t start
= 0, end
= 0;
2089 int effective_page_shift
, effective_page_size
, effective_page_mask
;
2091 if (map
== VM_MAP_NULL
|| dispositions_count
== NULL
) {
2092 return KERN_INVALID_ARGUMENT
;
2095 effective_page_shift
= vm_self_region_page_shift_safely(map
);
2096 if (effective_page_shift
== -1) {
2097 return KERN_INVALID_ARGUMENT
;
2099 effective_page_size
= (1 << effective_page_shift
);
2100 effective_page_mask
= effective_page_size
- 1;
2102 disp_buf_req_size
= (*dispositions_count
* sizeof(int));
2103 start
= vm_map_trunc_page(address
, effective_page_mask
);
2104 end
= vm_map_round_page(address
+ size
, effective_page_mask
);
2107 return KERN_INVALID_ARGUMENT
;
2110 if ((end
- start
) < size
) {
2112 * Aligned size is less than unaligned size.
2114 return KERN_INVALID_ARGUMENT
;
2117 if (disp_buf_req_size
== 0 || (end
== start
)) {
2118 return KERN_SUCCESS
;
2122 * For large requests, we will go through them
2123 * MAX_PAGE_RANGE_QUERY chunk at a time.
2126 curr_sz
= MIN(end
- start
, MAX_PAGE_RANGE_QUERY
);
2127 num_pages
= (int) (curr_sz
>> effective_page_shift
);
2129 info_size
= num_pages
* sizeof(vm_page_info_basic_data_t
);
2130 info
= kheap_alloc(KHEAP_TEMP
, info_size
, Z_WAITOK
);
2132 local_disp_size
= num_pages
* sizeof(int);
2133 local_disp
= kheap_alloc(KHEAP_TEMP
, local_disp_size
, Z_WAITOK
);
2135 if (info
== NULL
|| local_disp
== NULL
) {
2136 kr
= KERN_RESOURCE_SHORTAGE
;
2141 count
= VM_PAGE_INFO_BASIC_COUNT
;
2142 kr
= vm_map_page_range_info_internal(
2145 vm_map_round_page(start
+ curr_sz
, effective_page_mask
),
2146 effective_page_shift
,
2148 (vm_page_info_t
) info
,
2151 assert(kr
== KERN_SUCCESS
);
2153 for (i
= 0; i
< num_pages
; i
++) {
2154 ((int*)local_disp
)[i
] = ((vm_page_info_basic_t
)info
)[i
].disposition
;
2157 copy_sz
= MIN(disp_buf_req_size
, num_pages
* sizeof(int) /* an int per page */);
2158 kr
= copyout(local_disp
, (mach_vm_address_t
)dispositions_addr
, copy_sz
);
2161 disp_buf_req_size
-= copy_sz
;
2162 disp_buf_total_size
+= copy_sz
;
2168 if ((disp_buf_req_size
== 0) || (curr_sz
>= size
)) {
2170 * We might have inspected the full range OR
2171 * more than it esp. if the user passed in
2172 * non-page aligned start/size and/or if we
2173 * descended into a submap. We are done here.
2178 dispositions_addr
+= copy_sz
;
2182 curr_sz
= MIN(vm_map_round_page(size
, effective_page_mask
), MAX_PAGE_RANGE_QUERY
);
2183 num_pages
= (int)(curr_sz
>> effective_page_shift
);
2187 *dispositions_count
= disp_buf_total_size
/ sizeof(int);
2191 kheap_free(KHEAP_TEMP
, local_disp
, local_disp_size
);
2194 kheap_free(KHEAP_TEMP
, info
, info_size
);
2202 mach_vm_address_t address
,
2203 vm_page_info_flavor_t flavor
,
2204 vm_page_info_t info
,
2205 mach_msg_type_number_t
*count
)
2209 if (map
== VM_MAP_NULL
) {
2210 return KERN_INVALID_ARGUMENT
;
2213 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
2217 /* map a (whole) upl into an address space */
2222 vm_address_t
*dst_addr
)
2224 vm_map_offset_t map_addr
;
2227 if (VM_MAP_NULL
== map
) {
2228 return KERN_INVALID_ARGUMENT
;
2231 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
2232 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
2241 if (VM_MAP_NULL
== map
) {
2242 return KERN_INVALID_ARGUMENT
;
2245 return vm_map_remove_upl(map
, upl
);
2248 /* Retrieve a upl for an object underlying an address range in a map */
2253 vm_map_offset_t map_offset
,
2254 upl_size_t
*upl_size
,
2256 upl_page_info_array_t page_list
,
2257 unsigned int *count
,
2258 upl_control_flags_t
*flags
,
2260 int force_data_sync
)
2262 upl_control_flags_t map_flags
;
2265 if (VM_MAP_NULL
== map
) {
2266 return KERN_INVALID_ARGUMENT
;
2269 map_flags
= *flags
& ~UPL_NOZEROFILL
;
2270 if (force_data_sync
) {
2271 map_flags
|= UPL_FORCE_DATA_SYNC
;
2274 kr
= vm_map_create_upl(map
,
2283 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
2288 * mach_make_memory_entry_64
2290 * Think of it as a two-stage vm_remap() operation. First
2291 * you get a handle. Second, you get map that handle in
2292 * somewhere else. Rather than doing it all at once (and
2293 * without needing access to the other whole map).
2296 mach_make_memory_entry_64(
2297 vm_map_t target_map
,
2298 memory_object_size_t
*size
,
2299 memory_object_offset_t offset
,
2300 vm_prot_t permission
,
2301 ipc_port_t
*object_handle
,
2302 ipc_port_t parent_handle
)
2304 vm_named_entry_kernel_flags_t vmne_kflags
;
2306 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_USER
) {
2308 * Unknown flag: reject for forward compatibility.
2310 return KERN_INVALID_VALUE
;
2313 vmne_kflags
= VM_NAMED_ENTRY_KERNEL_FLAGS_NONE
;
2314 if (permission
& MAP_MEM_LEDGER_TAGGED
) {
2315 vmne_kflags
.vmnekf_ledger_tag
= VM_LEDGER_TAG_DEFAULT
;
2317 return mach_make_memory_entry_internal(target_map
,
2327 mach_make_memory_entry_internal(
2328 vm_map_t target_map
,
2329 memory_object_size_t
*size
,
2330 memory_object_offset_t offset
,
2331 vm_prot_t permission
,
2332 vm_named_entry_kernel_flags_t vmne_kflags
,
2333 ipc_port_t
*object_handle
,
2334 ipc_port_t parent_handle
)
2336 vm_named_entry_t parent_entry
;
2337 vm_named_entry_t user_entry
;
2338 ipc_port_t user_handle
;
2341 vm_map_size_t map_size
;
2342 vm_map_offset_t map_start
, map_end
;
2345 * Stash the offset in the page for use by vm_map_enter_mem_object()
2346 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2348 vm_object_offset_t offset_in_page
;
2350 unsigned int access
;
2351 vm_prot_t protections
;
2352 vm_prot_t original_protections
, mask_protections
;
2353 unsigned int wimg_mode
;
2354 boolean_t use_data_addr
;
2355 boolean_t use_4K_compat
;
2357 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x\n", target_map
, offset
, *size
, permission
);
2361 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_ALL
) {
2363 * Unknown flag: reject for forward compatibility.
2365 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_VALUE
);
2366 return KERN_INVALID_VALUE
;
2369 if (IP_VALID(parent_handle
) &&
2370 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
2371 parent_entry
= (vm_named_entry_t
) ip_get_kobject(parent_handle
);
2373 parent_entry
= NULL
;
2376 if (parent_entry
&& parent_entry
->is_copy
) {
2377 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2378 return KERN_INVALID_ARGUMENT
;
2381 original_protections
= permission
& VM_PROT_ALL
;
2382 protections
= original_protections
;
2383 mask_protections
= permission
& VM_PROT_IS_MASK
;
2384 access
= GET_MAP_MEM(permission
);
2385 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
2386 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
2388 user_handle
= IP_NULL
;
2391 map_start
= vm_map_trunc_page(offset
, VM_MAP_PAGE_MASK(target_map
));
2393 if (permission
& MAP_MEM_ONLY
) {
2394 boolean_t parent_is_object
;
2396 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2397 map_size
= map_end
- map_start
;
2399 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
2400 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2401 return KERN_INVALID_ARGUMENT
;
2404 parent_is_object
= parent_entry
->is_object
;
2405 if (!parent_is_object
) {
2406 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2407 return KERN_INVALID_ARGUMENT
;
2409 object
= vm_named_entry_to_vm_object(parent_entry
);
2410 if (parent_is_object
&& object
!= VM_OBJECT_NULL
) {
2411 wimg_mode
= object
->wimg_bits
;
2413 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2415 if ((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
2416 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
2417 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_RIGHT
);
2418 return KERN_INVALID_RIGHT
;
2420 vm_prot_to_wimg(access
, &wimg_mode
);
2421 if (access
!= MAP_MEM_NOOP
) {
2422 SET_MAP_MEM(access
, parent_entry
->protection
);
2424 if (parent_is_object
&& object
&&
2425 (access
!= MAP_MEM_NOOP
) &&
2426 (!(object
->nophyscache
))) {
2427 if (object
->wimg_bits
!= wimg_mode
) {
2428 vm_object_lock(object
);
2429 vm_object_change_wimg_mode(object
, wimg_mode
);
2430 vm_object_unlock(object
);
2433 if (object_handle
) {
2434 *object_handle
= IP_NULL
;
2436 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2437 return KERN_SUCCESS
;
2438 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2439 int ledger_flags
= 0;
2442 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2443 map_size
= map_end
- map_start
;
2445 if (use_data_addr
|| use_4K_compat
) {
2446 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2447 return KERN_INVALID_ARGUMENT
;
2450 if (map_size
== 0) {
2452 *object_handle
= IPC_PORT_NULL
;
2453 return KERN_SUCCESS
;
2456 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2457 if (kr
!= KERN_SUCCESS
) {
2458 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_FAILURE
);
2459 return KERN_FAILURE
;
2463 * Force the creation of the VM object now.
2465 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2467 * LP64todo - for now, we can only allocate 4GB-4096
2468 * internal objects because the default pager can't
2469 * page bigger ones. Remove this when it can.
2475 object
= vm_object_allocate(map_size
);
2476 assert(object
!= VM_OBJECT_NULL
);
2480 * We use this path when we want to make sure that
2481 * nobody messes with the object (coalesce, for
2482 * example) before we map it.
2483 * We might want to use these objects for transposition via
2484 * vm_object_transpose() too, so we don't want any copy or
2485 * shadow objects either...
2487 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2488 object
->true_share
= TRUE
;
2490 owner
= current_task();
2491 if ((permission
& MAP_MEM_PURGABLE
) ||
2492 vmne_kflags
.vmnekf_ledger_tag
) {
2493 assert(object
->vo_owner
== NULL
);
2494 assert(object
->resident_page_count
== 0);
2495 assert(object
->wired_page_count
== 0);
2496 assert(owner
!= TASK_NULL
);
2497 if (vmne_kflags
.vmnekf_ledger_no_footprint
) {
2498 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
2499 object
->vo_no_footprint
= TRUE
;
2501 if (permission
& MAP_MEM_PURGABLE
) {
2502 if (!(permission
& VM_PROT_WRITE
)) {
2503 /* if we can't write, we can't purge */
2504 vm_object_deallocate(object
);
2505 kr
= KERN_INVALID_ARGUMENT
;
2508 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2509 if (permission
& MAP_MEM_PURGABLE_KERNEL_ONLY
) {
2510 object
->purgeable_only_by_kernel
= TRUE
;
2513 if (owner
->task_legacy_footprint
) {
2515 * For ios11, we failed to account for
2516 * this memory. Keep doing that for
2517 * legacy apps (built before ios12),
2518 * for backwards compatibility's sake...
2520 owner
= kernel_task
;
2522 #endif /* __arm64__ */
2523 vm_object_lock(object
);
2524 vm_purgeable_nonvolatile_enqueue(object
, owner
);
2525 vm_object_unlock(object
);
2529 if (vmne_kflags
.vmnekf_ledger_tag
) {
2531 * Bill this object to the current task's
2532 * ledgers for the given tag.
2534 if (vmne_kflags
.vmnekf_ledger_no_footprint
) {
2535 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
2537 vm_object_lock(object
);
2538 object
->vo_ledger_tag
= vmne_kflags
.vmnekf_ledger_tag
;
2539 kr
= vm_object_ownership_change(
2541 vmne_kflags
.vmnekf_ledger_tag
,
2542 owner
, /* new owner */
2544 FALSE
); /* task_objq locked? */
2545 vm_object_unlock(object
);
2546 if (kr
!= KERN_SUCCESS
) {
2547 vm_object_deallocate(object
);
2552 #if CONFIG_SECLUDED_MEMORY
2553 if (secluded_for_iokit
&& /* global boot-arg */
2554 ((permission
& MAP_MEM_GRAB_SECLUDED
)
2556 /* XXX FBDP for my testing only */
2557 || (secluded_for_fbdp
&& map_size
== 97550336)
2561 if (!(permission
& MAP_MEM_GRAB_SECLUDED
) &&
2562 secluded_for_fbdp
) {
2563 printf("FBDP: object %p size %lld can grab secluded\n", object
, (uint64_t) map_size
);
2566 object
->can_grab_secluded
= TRUE
;
2567 assert(!object
->eligible_for_secluded
);
2569 #endif /* CONFIG_SECLUDED_MEMORY */
2572 * The VM object is brand new and nobody else knows about it,
2573 * so we don't need to lock it.
2576 wimg_mode
= object
->wimg_bits
;
2577 vm_prot_to_wimg(access
, &wimg_mode
);
2578 if (access
!= MAP_MEM_NOOP
) {
2579 object
->wimg_bits
= wimg_mode
;
2582 /* the object has no pages, so no WIMG bits to update here */
2584 kr
= vm_named_entry_from_vm_object(
2589 (protections
& VM_PROT_ALL
));
2590 if (kr
!= KERN_SUCCESS
) {
2591 vm_object_deallocate(object
);
2594 user_entry
->internal
= TRUE
;
2595 user_entry
->is_sub_map
= FALSE
;
2596 user_entry
->offset
= 0;
2597 user_entry
->data_offset
= 0;
2598 user_entry
->protection
= protections
;
2599 SET_MAP_MEM(access
, user_entry
->protection
);
2600 user_entry
->size
= map_size
;
2602 /* user_object pager and internal fields are not used */
2603 /* when the object field is filled in. */
2605 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2606 user_entry
->data_offset
));
2607 *object_handle
= user_handle
;
2608 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2609 return KERN_SUCCESS
;
2612 if (permission
& MAP_MEM_VM_COPY
) {
2615 if (target_map
== VM_MAP_NULL
) {
2616 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_TASK
);
2617 return KERN_INVALID_TASK
;
2620 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2621 map_size
= map_end
- map_start
;
2622 if (map_size
== 0) {
2623 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2624 return KERN_INVALID_ARGUMENT
;
2627 if (use_data_addr
|| use_4K_compat
) {
2628 offset_in_page
= offset
- map_start
;
2629 if (use_4K_compat
) {
2630 offset_in_page
&= ~((signed)(0xFFF));
2636 kr
= vm_map_copyin_internal(target_map
,
2639 VM_MAP_COPYIN_ENTRY_LIST
,
2641 if (kr
!= KERN_SUCCESS
) {
2642 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, kr
);
2645 assert(copy
!= VM_MAP_COPY_NULL
);
2647 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2648 if (kr
!= KERN_SUCCESS
) {
2649 vm_map_copy_discard(copy
);
2650 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_FAILURE
);
2651 return KERN_FAILURE
;
2654 user_entry
->backing
.copy
= copy
;
2655 user_entry
->internal
= FALSE
;
2656 user_entry
->is_sub_map
= FALSE
;
2657 user_entry
->is_copy
= TRUE
;
2658 user_entry
->offset
= 0;
2659 user_entry
->protection
= protections
;
2660 user_entry
->size
= map_size
;
2661 user_entry
->data_offset
= offset_in_page
;
2663 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2664 user_entry
->data_offset
));
2665 *object_handle
= user_handle
;
2666 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2667 return KERN_SUCCESS
;
2670 if ((permission
& MAP_MEM_VM_SHARE
)
2671 || parent_entry
== NULL
2672 || (permission
& MAP_MEM_NAMED_REUSE
)) {
2674 vm_prot_t cur_prot
, max_prot
;
2675 vm_map_kernel_flags_t vmk_flags
;
2676 vm_map_entry_t parent_copy_entry
;
2677 vm_prot_t required_protection
;
2679 if (target_map
== VM_MAP_NULL
) {
2680 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_TASK
);
2681 return KERN_INVALID_TASK
;
2684 map_end
= vm_map_round_page(offset
+ *size
, VM_MAP_PAGE_MASK(target_map
));
2685 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2686 parent_copy_entry
= VM_MAP_ENTRY_NULL
;
2687 if (!(permission
& MAP_MEM_VM_SHARE
)) {
2688 /* stop extracting if VM object changes */
2689 vmk_flags
.vmkf_copy_single_object
= TRUE
;
2690 if ((permission
& MAP_MEM_NAMED_REUSE
) &&
2691 parent_entry
!= NULL
&&
2692 parent_entry
->is_object
) {
2693 vm_map_copy_t parent_copy
;
2694 parent_copy
= parent_entry
->backing
.copy
;
2695 assert(parent_copy
->cpy_hdr
.nentries
== 1);
2696 parent_copy_entry
= vm_map_copy_first_entry(parent_copy
);
2697 assert(!parent_copy_entry
->is_sub_map
);
2701 map_size
= map_end
- map_start
;
2702 if (map_size
== 0) {
2703 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_INVALID_ARGUMENT
);
2704 return KERN_INVALID_ARGUMENT
;
2707 if (use_data_addr
|| use_4K_compat
) {
2708 offset_in_page
= offset
- map_start
;
2709 if (use_4K_compat
) {
2710 offset_in_page
&= ~((signed)(0xFFF));
2716 if (mask_protections
) {
2718 * caller is asking for whichever proctections are
2719 * available: no required protections.
2721 required_protection
= VM_PROT_NONE
;
2724 * Caller wants a memory entry with "protections".
2725 * Make sure we extract only memory that matches that.
2727 required_protection
= protections
;
2729 cur_prot
= VM_PROT_ALL
;
2730 if (target_map
->pmap
== kernel_pmap
) {
2732 * Get "reserved" map entries to avoid deadlocking
2733 * on the kernel map or a kernel submap if we
2734 * run out of VM map entries and need to refill that
2737 vmk_flags
.vmkf_copy_pageable
= FALSE
;
2739 vmk_flags
.vmkf_copy_pageable
= TRUE
;
2741 vmk_flags
.vmkf_copy_same_map
= FALSE
;
2742 assert(map_size
!= 0);
2743 kr
= vm_map_copy_extract(target_map
,
2746 required_protection
,
2753 if (kr
!= KERN_SUCCESS
) {
2754 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, kr
);
2755 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2756 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
2760 assert(copy
!= VM_MAP_COPY_NULL
);
2761 assert((cur_prot
& required_protection
) == required_protection
);
2763 if (mask_protections
) {
2765 * We just want as much of "original_protections"
2766 * as we can get out of the actual "cur_prot".
2768 protections
&= cur_prot
;
2769 if (protections
== VM_PROT_NONE
) {
2770 /* no access at all: fail */
2771 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_PROTECTION_FAILURE
);
2772 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2773 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
2775 vm_map_copy_discard(copy
);
2776 return KERN_PROTECTION_FAILURE
;
2780 * We want exactly "original_protections"
2781 * out of "cur_prot".
2783 if ((cur_prot
& protections
) != protections
) {
2784 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2785 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, KERN_PROTECTION_FAILURE);
2787 vm_map_copy_discard(copy
);
2788 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_PROTECTION_FAILURE
);
2789 return KERN_PROTECTION_FAILURE
;
2793 if (!(permission
& MAP_MEM_VM_SHARE
)) {
2794 vm_map_entry_t copy_entry
;
2796 /* limit size to what's actually covered by "copy" */
2797 assert(copy
->cpy_hdr
.nentries
== 1);
2798 copy_entry
= vm_map_copy_first_entry(copy
);
2799 map_size
= copy_entry
->vme_end
- copy_entry
->vme_start
;
2801 if ((permission
& MAP_MEM_NAMED_REUSE
) &&
2802 parent_copy_entry
!= VM_MAP_ENTRY_NULL
&&
2803 VME_OBJECT(copy_entry
) == VME_OBJECT(parent_copy_entry
) &&
2804 VME_OFFSET(copy_entry
) == VME_OFFSET(parent_copy_entry
) &&
2805 parent_entry
->offset
== 0 &&
2806 parent_entry
->size
== map_size
&&
2807 (parent_entry
->data_offset
== offset_in_page
)) {
2808 /* we have a match: re-use "parent_entry" */
2810 /* release our new "copy" */
2811 vm_map_copy_discard(copy
);
2812 /* get extra send right on handle */
2813 ipc_port_copy_send(parent_handle
);
2815 *size
= CAST_DOWN(vm_size_t
,
2816 (parent_entry
->size
-
2817 parent_entry
->data_offset
));
2818 *object_handle
= parent_handle
;
2819 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2820 return KERN_SUCCESS
;
2823 /* no match: we need to create a new entry */
2824 object
= VME_OBJECT(copy_entry
);
2825 vm_object_lock(object
);
2826 wimg_mode
= object
->wimg_bits
;
2827 if (!(object
->nophyscache
)) {
2828 vm_prot_to_wimg(access
, &wimg_mode
);
2830 if (object
->wimg_bits
!= wimg_mode
) {
2831 vm_object_change_wimg_mode(object
, wimg_mode
);
2833 vm_object_unlock(object
);
2836 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2837 if (kr
!= KERN_SUCCESS
) {
2838 if (VM_MAP_PAGE_SHIFT(target_map
) < PAGE_SHIFT
) {
2839 // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
2841 vm_map_copy_discard(copy
);
2842 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_FAILURE
);
2843 return KERN_FAILURE
;
2846 user_entry
->backing
.copy
= copy
;
2847 user_entry
->is_sub_map
= FALSE
;
2848 user_entry
->is_object
= FALSE
;
2849 user_entry
->internal
= FALSE
;
2850 user_entry
->protection
= protections
;
2851 user_entry
->size
= map_size
;
2852 user_entry
->data_offset
= offset_in_page
;
2854 if (permission
& MAP_MEM_VM_SHARE
) {
2855 user_entry
->is_copy
= TRUE
;
2856 user_entry
->offset
= 0;
2858 user_entry
->is_object
= TRUE
;
2859 user_entry
->internal
= object
->internal
;
2860 user_entry
->offset
= VME_OFFSET(vm_map_copy_first_entry(copy
));
2861 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
2864 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2865 user_entry
->data_offset
));
2866 *object_handle
= user_handle
;
2867 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2868 return KERN_SUCCESS
;
2871 /* The new object will be based on an existing named object */
2872 if (parent_entry
== NULL
) {
2873 kr
= KERN_INVALID_ARGUMENT
;
2877 if (parent_entry
->is_copy
) {
2878 panic("parent_entry %p is_copy not supported\n", parent_entry
);
2879 kr
= KERN_INVALID_ARGUMENT
;
2883 if (use_data_addr
|| use_4K_compat
) {
2885 * submaps and pagers should only be accessible from within
2886 * the kernel, which shouldn't use the data address flag, so can fail here.
2888 if (parent_entry
->is_sub_map
) {
2889 panic("Shouldn't be using data address with a parent entry that is a submap.");
2892 * Account for offset to data in parent entry and
2893 * compute our own offset to data.
2895 if ((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
2896 kr
= KERN_INVALID_ARGUMENT
;
2900 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
2901 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
2902 if (use_4K_compat
) {
2903 offset_in_page
&= ~((signed)(0xFFF));
2905 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
2906 map_size
= map_end
- map_start
;
2908 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2909 map_size
= map_end
- map_start
;
2912 if ((offset
+ map_size
) > parent_entry
->size
) {
2913 kr
= KERN_INVALID_ARGUMENT
;
2918 if (mask_protections
) {
2920 * The caller asked us to use the "protections" as
2921 * a mask, so restrict "protections" to what this
2922 * mapping actually allows.
2924 protections
&= parent_entry
->protection
;
2926 if ((protections
& parent_entry
->protection
) != protections
) {
2927 kr
= KERN_PROTECTION_FAILURE
;
2931 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2937 user_entry
->size
= map_size
;
2938 user_entry
->offset
= parent_entry
->offset
+ map_start
;
2939 user_entry
->data_offset
= offset_in_page
;
2940 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2941 user_entry
->is_copy
= parent_entry
->is_copy
;
2942 user_entry
->internal
= parent_entry
->internal
;
2943 user_entry
->protection
= protections
;
2945 if (access
!= MAP_MEM_NOOP
) {
2946 SET_MAP_MEM(access
, user_entry
->protection
);
2949 if (parent_entry
->is_sub_map
) {
2950 vm_map_t map
= parent_entry
->backing
.map
;
2951 user_entry
->backing
.map
= map
;
2952 lck_mtx_lock(&map
->s_lock
);
2953 os_ref_retain_locked(&map
->map_refcnt
);
2954 lck_mtx_unlock(&map
->s_lock
);
2956 object
= vm_named_entry_to_vm_object(parent_entry
);
2957 assert(object
!= VM_OBJECT_NULL
);
2958 assert(object
->copy_strategy
!= MEMORY_OBJECT_COPY_SYMMETRIC
);
2959 kr
= vm_named_entry_from_vm_object(
2964 (user_entry
->protection
& VM_PROT_ALL
));
2965 if (kr
!= KERN_SUCCESS
) {
2968 assert(user_entry
->is_object
);
2969 /* we now point to this object, hold on */
2970 vm_object_lock(object
);
2971 vm_object_reference_locked(object
);
2972 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2973 if (!object
->true_share
&&
2974 vm_object_tracking_inited
) {
2975 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2978 num
= OSBacktrace(bt
,
2979 VM_OBJECT_TRACKING_BTDEPTH
);
2980 btlog_add_entry(vm_object_tracking_btlog
,
2982 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2986 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2988 object
->true_share
= TRUE
;
2989 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
2990 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2992 vm_object_unlock(object
);
2994 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2995 user_entry
->data_offset
));
2996 *object_handle
= user_handle
;
2997 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, KERN_SUCCESS
);
2998 return KERN_SUCCESS
;
3001 if (user_handle
!= IP_NULL
) {
3003 * Releasing "user_handle" causes the kernel object
3004 * associated with it ("user_entry" here) to also be
3005 * released and freed.
3007 mach_memory_entry_port_release(user_handle
);
3009 DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map
, offset
, *size
, permission
, user_entry
, kr
);
3014 _mach_make_memory_entry(
3015 vm_map_t target_map
,
3016 memory_object_size_t
*size
,
3017 memory_object_offset_t offset
,
3018 vm_prot_t permission
,
3019 ipc_port_t
*object_handle
,
3020 ipc_port_t parent_entry
)
3022 memory_object_size_t mo_size
;
3025 mo_size
= (memory_object_size_t
)*size
;
3026 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3027 (memory_object_offset_t
)offset
, permission
, object_handle
,
3034 mach_make_memory_entry(
3035 vm_map_t target_map
,
3038 vm_prot_t permission
,
3039 ipc_port_t
*object_handle
,
3040 ipc_port_t parent_entry
)
3042 memory_object_size_t mo_size
;
3045 mo_size
= (memory_object_size_t
)*size
;
3046 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3047 (memory_object_offset_t
)offset
, permission
, object_handle
,
3049 *size
= CAST_DOWN(vm_size_t
, mo_size
);
3056 * Set or clear the map's wiring_required flag. This flag, if set,
3057 * will cause all future virtual memory allocation to allocate
3058 * user wired memory. Unwiring pages wired down as a result of
3059 * this routine is done with the vm_wire interface.
3064 boolean_t must_wire
)
3066 if (map
== VM_MAP_NULL
) {
3067 return KERN_INVALID_ARGUMENT
;
3071 map
->wiring_required
= (must_wire
== TRUE
);
3074 return KERN_SUCCESS
;
3078 vm_map_exec_lockdown(
3081 if (map
== VM_MAP_NULL
) {
3082 return KERN_INVALID_ARGUMENT
;
3086 map
->map_disallow_new_exec
= TRUE
;
3089 return KERN_SUCCESS
;
3092 #if VM_NAMED_ENTRY_LIST
3093 queue_head_t vm_named_entry_list
= QUEUE_HEAD_INITIALIZER(vm_named_entry_list
);
3094 int vm_named_entry_count
= 0;
3095 LCK_MTX_EARLY_DECLARE_ATTR(vm_named_entry_list_lock_data
,
3096 &vm_object_lck_grp
, &vm_object_lck_attr
);
3097 #endif /* VM_NAMED_ENTRY_LIST */
3099 __private_extern__ kern_return_t
3100 mach_memory_entry_allocate(
3101 vm_named_entry_t
*user_entry_p
,
3102 ipc_port_t
*user_handle_p
)
3104 vm_named_entry_t user_entry
;
3105 ipc_port_t user_handle
;
3107 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
3108 if (user_entry
== NULL
) {
3109 return KERN_FAILURE
;
3111 bzero(user_entry
, sizeof(*user_entry
));
3113 named_entry_lock_init(user_entry
);
3115 user_entry
->backing
.copy
= NULL
;
3116 user_entry
->is_object
= FALSE
;
3117 user_entry
->is_sub_map
= FALSE
;
3118 user_entry
->is_copy
= FALSE
;
3119 user_entry
->internal
= FALSE
;
3120 user_entry
->size
= 0;
3121 user_entry
->offset
= 0;
3122 user_entry
->data_offset
= 0;
3123 user_entry
->protection
= VM_PROT_NONE
;
3124 user_entry
->ref_count
= 1;
3126 user_handle
= ipc_kobject_alloc_port((ipc_kobject_t
)user_entry
,
3128 IPC_KOBJECT_ALLOC_MAKE_SEND
| IPC_KOBJECT_ALLOC_NSREQUEST
);
3130 *user_entry_p
= user_entry
;
3131 *user_handle_p
= user_handle
;
3133 #if VM_NAMED_ENTRY_LIST
3134 /* keep a loose (no reference) pointer to the Mach port, for debugging only */
3135 user_entry
->named_entry_port
= user_handle
;
3136 /* backtrace at allocation time, for debugging only */
3137 OSBacktrace(&user_entry
->named_entry_bt
[0],
3138 NAMED_ENTRY_BT_DEPTH
);
3140 /* add this new named entry to the global list */
3141 lck_mtx_lock_spin(&vm_named_entry_list_lock_data
);
3142 queue_enter(&vm_named_entry_list
, user_entry
,
3143 vm_named_entry_t
, named_entry_list
);
3144 vm_named_entry_count
++;
3145 lck_mtx_unlock(&vm_named_entry_list_lock_data
);
3146 #endif /* VM_NAMED_ENTRY_LIST */
3148 return KERN_SUCCESS
;
3152 * mach_memory_object_memory_entry_64
3154 * Create a named entry backed by the provided pager.
3158 mach_memory_object_memory_entry_64(
3161 vm_object_offset_t size
,
3162 vm_prot_t permission
,
3163 memory_object_t pager
,
3164 ipc_port_t
*entry_handle
)
3166 unsigned int access
;
3167 vm_named_entry_t user_entry
;
3168 ipc_port_t user_handle
;
3172 if (host
== HOST_NULL
) {
3173 return KERN_INVALID_HOST
;
3176 if (pager
== MEMORY_OBJECT_NULL
&& internal
) {
3177 object
= vm_object_allocate(size
);
3178 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3179 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3182 object
= memory_object_to_vm_object(pager
);
3183 if (object
!= VM_OBJECT_NULL
) {
3184 vm_object_reference(object
);
3187 if (object
== VM_OBJECT_NULL
) {
3188 return KERN_INVALID_ARGUMENT
;
3191 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3193 vm_object_deallocate(object
);
3194 return KERN_FAILURE
;
3197 user_entry
->size
= size
;
3198 user_entry
->offset
= 0;
3199 user_entry
->protection
= permission
& VM_PROT_ALL
;
3200 access
= GET_MAP_MEM(permission
);
3201 SET_MAP_MEM(access
, user_entry
->protection
);
3202 user_entry
->is_sub_map
= FALSE
;
3203 assert(user_entry
->ref_count
== 1);
3205 kr
= vm_named_entry_from_vm_object(user_entry
, object
, 0, size
,
3206 (user_entry
->protection
& VM_PROT_ALL
));
3207 if (kr
!= KERN_SUCCESS
) {
3210 user_entry
->internal
= object
->internal
;
3211 assert(object
->internal
== internal
);
3213 *entry_handle
= user_handle
;
3214 return KERN_SUCCESS
;
3218 mach_memory_object_memory_entry(
3222 vm_prot_t permission
,
3223 memory_object_t pager
,
3224 ipc_port_t
*entry_handle
)
3226 return mach_memory_object_memory_entry_64( host
, internal
,
3227 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3232 mach_memory_entry_purgable_control(
3233 ipc_port_t entry_port
,
3234 vm_purgable_t control
,
3237 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3238 /* not allowed from user-space */
3239 return KERN_INVALID_ARGUMENT
;
3242 return memory_entry_purgeable_control_internal(entry_port
, control
, state
);
3246 memory_entry_purgeable_control_internal(
3247 ipc_port_t entry_port
,
3248 vm_purgable_t control
,
3252 vm_named_entry_t mem_entry
;
3255 if (!IP_VALID(entry_port
) ||
3256 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3257 return KERN_INVALID_ARGUMENT
;
3259 if (control
!= VM_PURGABLE_SET_STATE
&&
3260 control
!= VM_PURGABLE_GET_STATE
&&
3261 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3262 return KERN_INVALID_ARGUMENT
;
3265 if ((control
== VM_PURGABLE_SET_STATE
||
3266 control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) &&
3267 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3268 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
))) {
3269 return KERN_INVALID_ARGUMENT
;
3272 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3274 named_entry_lock(mem_entry
);
3276 if (mem_entry
->is_sub_map
||
3277 mem_entry
->is_copy
) {
3278 named_entry_unlock(mem_entry
);
3279 return KERN_INVALID_ARGUMENT
;
3282 assert(mem_entry
->is_object
);
3283 object
= vm_named_entry_to_vm_object(mem_entry
);
3284 if (object
== VM_OBJECT_NULL
) {
3285 named_entry_unlock(mem_entry
);
3286 return KERN_INVALID_ARGUMENT
;
3289 vm_object_lock(object
);
3291 /* check that named entry covers entire object ? */
3292 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3293 vm_object_unlock(object
);
3294 named_entry_unlock(mem_entry
);
3295 return KERN_INVALID_ARGUMENT
;
3298 named_entry_unlock(mem_entry
);
3300 kr
= vm_object_purgable_control(object
, control
, state
);
3302 vm_object_unlock(object
);
3308 mach_memory_entry_access_tracking(
3309 ipc_port_t entry_port
,
3310 int *access_tracking
,
3311 uint32_t *access_tracking_reads
,
3312 uint32_t *access_tracking_writes
)
3314 return memory_entry_access_tracking_internal(entry_port
,
3316 access_tracking_reads
,
3317 access_tracking_writes
);
3321 memory_entry_access_tracking_internal(
3322 ipc_port_t entry_port
,
3323 int *access_tracking
,
3324 uint32_t *access_tracking_reads
,
3325 uint32_t *access_tracking_writes
)
3327 vm_named_entry_t mem_entry
;
3331 if (!IP_VALID(entry_port
) ||
3332 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3333 return KERN_INVALID_ARGUMENT
;
3336 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3338 named_entry_lock(mem_entry
);
3340 if (mem_entry
->is_sub_map
||
3341 mem_entry
->is_copy
) {
3342 named_entry_unlock(mem_entry
);
3343 return KERN_INVALID_ARGUMENT
;
3346 assert(mem_entry
->is_object
);
3347 object
= vm_named_entry_to_vm_object(mem_entry
);
3348 if (object
== VM_OBJECT_NULL
) {
3349 named_entry_unlock(mem_entry
);
3350 return KERN_INVALID_ARGUMENT
;
3353 #if VM_OBJECT_ACCESS_TRACKING
3354 vm_object_access_tracking(object
,
3356 access_tracking_reads
,
3357 access_tracking_writes
);
3359 #else /* VM_OBJECT_ACCESS_TRACKING */
3360 (void) access_tracking
;
3361 (void) access_tracking_reads
;
3362 (void) access_tracking_writes
;
3363 kr
= KERN_NOT_SUPPORTED
;
3364 #endif /* VM_OBJECT_ACCESS_TRACKING */
3366 named_entry_unlock(mem_entry
);
3372 mach_memory_entry_ownership(
3373 ipc_port_t entry_port
,
3380 vm_named_entry_t mem_entry
;
3383 cur_task
= current_task();
3384 if (cur_task
!= kernel_task
&&
3385 (owner
!= cur_task
||
3386 (ledger_flags
& VM_LEDGER_FLAG_NO_FOOTPRINT
) ||
3387 ledger_tag
== VM_LEDGER_TAG_NETWORK
)) {
3389 * An entitlement is required to:
3390 * + tranfer memory ownership to someone else,
3391 * + request that the memory not count against the footprint,
3392 * + tag as "network" (since that implies "no footprint")
3394 if (!cur_task
->task_can_transfer_memory_ownership
&&
3395 IOTaskHasEntitlement(cur_task
,
3396 "com.apple.private.memory.ownership_transfer")) {
3397 cur_task
->task_can_transfer_memory_ownership
= TRUE
;
3399 if (!cur_task
->task_can_transfer_memory_ownership
) {
3400 return KERN_NO_ACCESS
;
3404 if (ledger_flags
& ~VM_LEDGER_FLAGS
) {
3405 return KERN_INVALID_ARGUMENT
;
3407 if (ledger_tag
<= 0 ||
3408 ledger_tag
> VM_LEDGER_TAG_MAX
) {
3409 return KERN_INVALID_ARGUMENT
;
3412 if (!IP_VALID(entry_port
) ||
3413 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3414 return KERN_INVALID_ARGUMENT
;
3416 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3418 named_entry_lock(mem_entry
);
3420 if (mem_entry
->is_sub_map
||
3421 mem_entry
->is_copy
) {
3422 named_entry_unlock(mem_entry
);
3423 return KERN_INVALID_ARGUMENT
;
3426 assert(mem_entry
->is_object
);
3427 object
= vm_named_entry_to_vm_object(mem_entry
);
3428 if (object
== VM_OBJECT_NULL
) {
3429 named_entry_unlock(mem_entry
);
3430 return KERN_INVALID_ARGUMENT
;
3433 vm_object_lock(object
);
3435 /* check that named entry covers entire object ? */
3436 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3437 vm_object_unlock(object
);
3438 named_entry_unlock(mem_entry
);
3439 return KERN_INVALID_ARGUMENT
;
3442 named_entry_unlock(mem_entry
);
3444 kr
= vm_object_ownership_change(object
,
3448 FALSE
); /* task_objq_locked */
3449 vm_object_unlock(object
);
3455 mach_memory_entry_get_page_counts(
3456 ipc_port_t entry_port
,
3457 unsigned int *resident_page_count
,
3458 unsigned int *dirty_page_count
)
3461 vm_named_entry_t mem_entry
;
3463 vm_object_offset_t offset
;
3464 vm_object_size_t size
;
3466 if (!IP_VALID(entry_port
) ||
3467 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3468 return KERN_INVALID_ARGUMENT
;
3471 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3473 named_entry_lock(mem_entry
);
3475 if (mem_entry
->is_sub_map
||
3476 mem_entry
->is_copy
) {
3477 named_entry_unlock(mem_entry
);
3478 return KERN_INVALID_ARGUMENT
;
3481 assert(mem_entry
->is_object
);
3482 object
= vm_named_entry_to_vm_object(mem_entry
);
3483 if (object
== VM_OBJECT_NULL
) {
3484 named_entry_unlock(mem_entry
);
3485 return KERN_INVALID_ARGUMENT
;
3488 vm_object_lock(object
);
3490 offset
= mem_entry
->offset
;
3491 size
= mem_entry
->size
;
3492 size
= vm_object_round_page(offset
+ size
) - vm_object_trunc_page(offset
);
3493 offset
= vm_object_trunc_page(offset
);
3495 named_entry_unlock(mem_entry
);
3497 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3499 vm_object_unlock(object
);
3505 mach_memory_entry_phys_page_offset(
3506 ipc_port_t entry_port
,
3507 vm_object_offset_t
*offset_p
)
3509 vm_named_entry_t mem_entry
;
3511 vm_object_offset_t offset
;
3512 vm_object_offset_t data_offset
;
3514 if (!IP_VALID(entry_port
) ||
3515 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3516 return KERN_INVALID_ARGUMENT
;
3519 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3521 named_entry_lock(mem_entry
);
3523 if (mem_entry
->is_sub_map
||
3524 mem_entry
->is_copy
) {
3525 named_entry_unlock(mem_entry
);
3526 return KERN_INVALID_ARGUMENT
;
3529 assert(mem_entry
->is_object
);
3530 object
= vm_named_entry_to_vm_object(mem_entry
);
3531 if (object
== VM_OBJECT_NULL
) {
3532 named_entry_unlock(mem_entry
);
3533 return KERN_INVALID_ARGUMENT
;
3536 offset
= mem_entry
->offset
;
3537 data_offset
= mem_entry
->data_offset
;
3539 named_entry_unlock(mem_entry
);
3541 *offset_p
= offset
- vm_object_trunc_page(offset
) + data_offset
;
3542 return KERN_SUCCESS
;
3546 mach_memory_entry_map_size(
3547 ipc_port_t entry_port
,
3549 memory_object_offset_t offset
,
3550 memory_object_offset_t size
,
3551 mach_vm_size_t
*map_size
)
3553 vm_named_entry_t mem_entry
;
3555 vm_object_offset_t object_offset_start
, object_offset_end
;
3556 vm_map_copy_t copy_map
, target_copy_map
;
3557 vm_map_offset_t overmap_start
, overmap_end
, trimmed_start
;
3560 if (!IP_VALID(entry_port
) ||
3561 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3562 return KERN_INVALID_ARGUMENT
;
3565 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3566 named_entry_lock(mem_entry
);
3568 if (mem_entry
->is_sub_map
) {
3569 named_entry_unlock(mem_entry
);
3570 return KERN_INVALID_ARGUMENT
;
3573 if (mem_entry
->is_object
) {
3574 object
= vm_named_entry_to_vm_object(mem_entry
);
3575 if (object
== VM_OBJECT_NULL
) {
3576 named_entry_unlock(mem_entry
);
3577 return KERN_INVALID_ARGUMENT
;
3580 object_offset_start
= mem_entry
->offset
;
3581 object_offset_start
+= mem_entry
->data_offset
;
3582 object_offset_start
+= offset
;
3583 object_offset_end
= object_offset_start
+ size
;
3584 object_offset_start
= vm_map_trunc_page(object_offset_start
,
3585 VM_MAP_PAGE_MASK(map
));
3586 object_offset_end
= vm_map_round_page(object_offset_end
,
3587 VM_MAP_PAGE_MASK(map
));
3589 named_entry_unlock(mem_entry
);
3591 *map_size
= object_offset_end
- object_offset_start
;
3592 return KERN_SUCCESS
;
3595 if (!mem_entry
->is_copy
) {
3596 panic("unsupported type of mem_entry %p\n", mem_entry
);
3599 assert(mem_entry
->is_copy
);
3600 if (VM_MAP_COPY_PAGE_MASK(mem_entry
->backing
.copy
) == VM_MAP_PAGE_MASK(map
)) {
3601 *map_size
= vm_map_round_page(mem_entry
->offset
+ mem_entry
->data_offset
+ offset
+ size
, VM_MAP_PAGE_MASK(map
)) - vm_map_trunc_page(mem_entry
->offset
+ mem_entry
->data_offset
+ offset
, VM_MAP_PAGE_MASK(map
));
3602 DEBUG4K_SHARE("map %p (%d) mem_entry %p offset 0x%llx + 0x%llx + 0x%llx size 0x%llx -> map_size 0x%llx\n", map
, VM_MAP_PAGE_MASK(map
), mem_entry
, mem_entry
->offset
, mem_entry
->data_offset
, offset
, size
, *map_size
);
3603 named_entry_unlock(mem_entry
);
3604 return KERN_SUCCESS
;
3607 DEBUG4K_SHARE("mem_entry %p copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx\n", mem_entry
, mem_entry
->backing
.copy
, VM_MAP_COPY_PAGE_SHIFT(mem_entry
->backing
.copy
), map
, VM_MAP_PAGE_SHIFT(map
), offset
, size
);
3608 copy_map
= mem_entry
->backing
.copy
;
3609 target_copy_map
= VM_MAP_COPY_NULL
;
3610 DEBUG4K_ADJUST("adjusting...\n");
3611 kr
= vm_map_copy_adjust_to_target(copy_map
,
3612 mem_entry
->data_offset
+ offset
,
3620 if (kr
== KERN_SUCCESS
) {
3621 if (target_copy_map
->size
!= copy_map
->size
) {
3622 DEBUG4K_ADJUST("copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx map_size 0x%llx -> 0x%llx\n", copy_map
, VM_MAP_COPY_PAGE_SHIFT(copy_map
), map
, VM_MAP_PAGE_SHIFT(map
), (uint64_t)offset
, (uint64_t)size
, (uint64_t)overmap_start
, (uint64_t)overmap_end
, (uint64_t)trimmed_start
, (uint64_t)copy_map
->size
, (uint64_t)target_copy_map
->size
);
3624 *map_size
= target_copy_map
->size
;
3625 if (target_copy_map
!= copy_map
) {
3626 vm_map_copy_discard(target_copy_map
);
3628 target_copy_map
= VM_MAP_COPY_NULL
;
3630 named_entry_unlock(mem_entry
);
3635 * mach_memory_entry_port_release:
3637 * Release a send right on a named entry port. This is the correct
3638 * way to destroy a named entry. When the last right on the port is
3639 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3642 mach_memory_entry_port_release(
3645 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3646 ipc_port_release_send(port
);
3650 * mach_destroy_memory_entry:
3652 * Drops a reference on a memory entry and destroys the memory entry if
3653 * there are no more references on it.
3654 * NOTE: This routine should not be called to destroy a memory entry from the
3655 * kernel, as it will not release the Mach port associated with the memory
3656 * entry. The proper way to destroy a memory entry in the kernel is to
3657 * call mach_memort_entry_port_release() to release the kernel's send-right on
3658 * the memory entry's port. When the last send right is released, the memory
3659 * entry will be destroyed via ipc_kobject_destroy().
3662 mach_destroy_memory_entry(
3665 vm_named_entry_t named_entry
;
3667 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3668 #endif /* MACH_ASSERT */
3669 named_entry
= (vm_named_entry_t
) ip_get_kobject(port
);
3671 named_entry_lock(named_entry
);
3672 named_entry
->ref_count
-= 1;
3674 if (named_entry
->ref_count
== 0) {
3675 if (named_entry
->is_sub_map
) {
3676 vm_map_deallocate(named_entry
->backing
.map
);
3677 } else if (named_entry
->is_copy
) {
3678 vm_map_copy_discard(named_entry
->backing
.copy
);
3679 } else if (named_entry
->is_object
) {
3680 assert(named_entry
->backing
.copy
->cpy_hdr
.nentries
== 1);
3681 vm_map_copy_discard(named_entry
->backing
.copy
);
3683 assert(named_entry
->backing
.copy
== VM_MAP_COPY_NULL
);
3686 named_entry_unlock(named_entry
);
3687 named_entry_lock_destroy(named_entry
);
3689 #if VM_NAMED_ENTRY_LIST
3690 lck_mtx_lock_spin(&vm_named_entry_list_lock_data
);
3691 queue_remove(&vm_named_entry_list
, named_entry
,
3692 vm_named_entry_t
, named_entry_list
);
3693 assert(vm_named_entry_count
> 0);
3694 vm_named_entry_count
--;
3695 lck_mtx_unlock(&vm_named_entry_list_lock_data
);
3696 #endif /* VM_NAMED_ENTRY_LIST */
3698 kfree(named_entry
, sizeof(struct vm_named_entry
));
3700 named_entry_unlock(named_entry
);
3704 /* Allow manipulation of individual page state. This is actually part of */
3705 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3708 mach_memory_entry_page_op(
3709 ipc_port_t entry_port
,
3710 vm_object_offset_t offset
,
3712 ppnum_t
*phys_entry
,
3715 vm_named_entry_t mem_entry
;
3719 if (!IP_VALID(entry_port
) ||
3720 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3721 return KERN_INVALID_ARGUMENT
;
3724 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3726 named_entry_lock(mem_entry
);
3728 if (mem_entry
->is_sub_map
||
3729 mem_entry
->is_copy
) {
3730 named_entry_unlock(mem_entry
);
3731 return KERN_INVALID_ARGUMENT
;
3734 assert(mem_entry
->is_object
);
3735 object
= vm_named_entry_to_vm_object(mem_entry
);
3736 if (object
== VM_OBJECT_NULL
) {
3737 named_entry_unlock(mem_entry
);
3738 return KERN_INVALID_ARGUMENT
;
3741 vm_object_reference(object
);
3742 named_entry_unlock(mem_entry
);
3744 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3746 vm_object_deallocate(object
);
3752 * mach_memory_entry_range_op offers performance enhancement over
3753 * mach_memory_entry_page_op for page_op functions which do not require page
3754 * level state to be returned from the call. Page_op was created to provide
3755 * a low-cost alternative to page manipulation via UPLs when only a single
3756 * page was involved. The range_op call establishes the ability in the _op
3757 * family of functions to work on multiple pages where the lack of page level
3758 * state handling allows the caller to avoid the overhead of the upl structures.
3762 mach_memory_entry_range_op(
3763 ipc_port_t entry_port
,
3764 vm_object_offset_t offset_beg
,
3765 vm_object_offset_t offset_end
,
3769 vm_named_entry_t mem_entry
;
3773 if (!IP_VALID(entry_port
) ||
3774 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3775 return KERN_INVALID_ARGUMENT
;
3778 mem_entry
= (vm_named_entry_t
) ip_get_kobject(entry_port
);
3780 named_entry_lock(mem_entry
);
3782 if (mem_entry
->is_sub_map
||
3783 mem_entry
->is_copy
) {
3784 named_entry_unlock(mem_entry
);
3785 return KERN_INVALID_ARGUMENT
;
3788 assert(mem_entry
->is_object
);
3789 object
= vm_named_entry_to_vm_object(mem_entry
);
3790 if (object
== VM_OBJECT_NULL
) {
3791 named_entry_unlock(mem_entry
);
3792 return KERN_INVALID_ARGUMENT
;
3795 vm_object_reference(object
);
3796 named_entry_unlock(mem_entry
);
3798 kr
= vm_object_range_op(object
,
3802 (uint32_t *) range
);
3804 vm_object_deallocate(object
);
3809 /* ******* Temporary Internal calls to UPL for BSD ***** */
3811 extern int kernel_upl_map(
3814 vm_offset_t
*dst_addr
);
3816 extern int kernel_upl_unmap(
3820 extern int kernel_upl_commit(
3822 upl_page_info_t
*pl
,
3823 mach_msg_type_number_t count
);
3825 extern int kernel_upl_commit_range(
3827 upl_offset_t offset
,
3830 upl_page_info_array_t pl
,
3831 mach_msg_type_number_t count
);
3833 extern int kernel_upl_abort(
3837 extern int kernel_upl_abort_range(
3839 upl_offset_t offset
,
3848 vm_offset_t
*dst_addr
)
3850 return vm_upl_map(map
, upl
, dst_addr
);
3859 return vm_upl_unmap(map
, upl
);
3865 upl_page_info_t
*pl
,
3866 mach_msg_type_number_t count
)
3870 kr
= upl_commit(upl
, pl
, count
);
3871 upl_deallocate(upl
);
3877 kernel_upl_commit_range(
3879 upl_offset_t offset
,
3882 upl_page_info_array_t pl
,
3883 mach_msg_type_number_t count
)
3885 boolean_t finished
= FALSE
;
3888 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
3889 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3892 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
3893 return KERN_INVALID_ARGUMENT
;
3896 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
3898 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
) {
3899 upl_deallocate(upl
);
3906 kernel_upl_abort_range(
3908 upl_offset_t offset
,
3913 boolean_t finished
= FALSE
;
3915 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
3916 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3919 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3921 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
) {
3922 upl_deallocate(upl
);
3935 kr
= upl_abort(upl
, abort_type
);
3936 upl_deallocate(upl
);
3941 * Now a kernel-private interface (for BootCache
3942 * use only). Need a cleaner way to create an
3943 * empty vm_map() and return a handle to it.
3947 vm_region_object_create(
3948 __unused vm_map_t target_map
,
3950 ipc_port_t
*object_handle
)
3952 vm_named_entry_t user_entry
;
3953 ipc_port_t user_handle
;
3957 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3959 return KERN_FAILURE
;
3962 /* Create a named object based on a submap of specified size */
3964 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3965 vm_map_round_page(size
,
3966 VM_MAP_PAGE_MASK(target_map
)),
3968 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
3970 user_entry
->backing
.map
= new_map
;
3971 user_entry
->internal
= TRUE
;
3972 user_entry
->is_sub_map
= TRUE
;
3973 user_entry
->offset
= 0;
3974 user_entry
->protection
= VM_PROT_ALL
;
3975 user_entry
->size
= size
;
3976 assert(user_entry
->ref_count
== 1);
3978 *object_handle
= user_handle
;
3979 return KERN_SUCCESS
;
3982 ppnum_t
vm_map_get_phys_page( /* forward */
3984 vm_offset_t offset
);
3987 vm_map_get_phys_page(
3991 vm_object_offset_t offset
;
3993 vm_map_offset_t map_offset
;
3994 vm_map_entry_t entry
;
3995 ppnum_t phys_page
= 0;
3997 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
4000 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
4001 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
4005 if (entry
->is_sub_map
) {
4007 vm_map_lock(VME_SUBMAP(entry
));
4009 map
= VME_SUBMAP(entry
);
4010 map_offset
= (VME_OFFSET(entry
) +
4011 (map_offset
- entry
->vme_start
));
4012 vm_map_unlock(old_map
);
4015 if (VME_OBJECT(entry
)->phys_contiguous
) {
4016 /* These are not standard pageable memory mappings */
4017 /* If they are not present in the object they will */
4018 /* have to be picked up from the pager through the */
4019 /* fault mechanism. */
4020 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
4021 /* need to call vm_fault */
4023 vm_fault(map
, map_offset
, VM_PROT_NONE
,
4024 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
4025 THREAD_UNINT
, NULL
, 0);
4029 offset
= (VME_OFFSET(entry
) +
4030 (map_offset
- entry
->vme_start
));
4031 phys_page
= (ppnum_t
)
4032 ((VME_OBJECT(entry
)->vo_shadow_offset
4033 + offset
) >> PAGE_SHIFT
);
4036 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
4037 object
= VME_OBJECT(entry
);
4038 vm_object_lock(object
);
4040 vm_page_t dst_page
= vm_page_lookup(object
, offset
);
4041 if (dst_page
== VM_PAGE_NULL
) {
4042 if (object
->shadow
) {
4043 vm_object_t old_object
;
4044 vm_object_lock(object
->shadow
);
4045 old_object
= object
;
4046 offset
= offset
+ object
->vo_shadow_offset
;
4047 object
= object
->shadow
;
4048 vm_object_unlock(old_object
);
4050 vm_object_unlock(object
);
4054 phys_page
= (ppnum_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
));
4055 vm_object_unlock(object
);
4067 kern_return_t
kernel_object_iopl_request( /* forward */
4068 vm_named_entry_t named_entry
,
4069 memory_object_offset_t offset
,
4070 upl_size_t
*upl_size
,
4072 upl_page_info_array_t user_page_list
,
4073 unsigned int *page_list_count
,
4077 kernel_object_iopl_request(
4078 vm_named_entry_t named_entry
,
4079 memory_object_offset_t offset
,
4080 upl_size_t
*upl_size
,
4082 upl_page_info_array_t user_page_list
,
4083 unsigned int *page_list_count
,
4091 caller_flags
= *flags
;
4093 if (caller_flags
& ~UPL_VALID_FLAGS
) {
4095 * For forward compatibility's sake,
4096 * reject any unknown flag.
4098 return KERN_INVALID_VALUE
;
4101 /* a few checks to make sure user is obeying rules */
4102 if (*upl_size
== 0) {
4103 if (offset
>= named_entry
->size
) {
4104 return KERN_INVALID_RIGHT
;
4106 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
4107 if (*upl_size
!= named_entry
->size
- offset
) {
4108 return KERN_INVALID_ARGUMENT
;
4111 if (caller_flags
& UPL_COPYOUT_FROM
) {
4112 if ((named_entry
->protection
& VM_PROT_READ
)
4114 return KERN_INVALID_RIGHT
;
4117 if ((named_entry
->protection
&
4118 (VM_PROT_READ
| VM_PROT_WRITE
))
4119 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
4120 return KERN_INVALID_RIGHT
;
4123 if (named_entry
->size
< (offset
+ *upl_size
)) {
4124 return KERN_INVALID_ARGUMENT
;
4127 /* the callers parameter offset is defined to be the */
4128 /* offset from beginning of named entry offset in object */
4129 offset
= offset
+ named_entry
->offset
;
4131 if (named_entry
->is_sub_map
||
4132 named_entry
->is_copy
) {
4133 return KERN_INVALID_ARGUMENT
;
4136 named_entry_lock(named_entry
);
4138 /* This is the case where we are going to operate */
4139 /* on an already known object. If the object is */
4140 /* not ready it is internal. An external */
4141 /* object cannot be mapped until it is ready */
4142 /* we can therefore avoid the ready check */
4144 assert(named_entry
->is_object
);
4145 object
= vm_named_entry_to_vm_object(named_entry
);
4146 vm_object_reference(object
);
4147 named_entry_unlock(named_entry
);
4149 if (!object
->private) {
4150 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
) {
4151 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
4153 if (object
->phys_contiguous
) {
4154 *flags
= UPL_PHYS_CONTIG
;
4159 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
4162 ret
= vm_object_iopl_request(object
,
4168 (upl_control_flags_t
)(unsigned int)caller_flags
);
4169 vm_object_deallocate(object
);
4175 * These symbols are looked up at runtime by vmware, VirtualBox,
4176 * despite not being exported in the symbol sets.
4179 #if defined(__x86_64__)
4183 vm_map_t target_map
,
4184 mach_vm_offset_t
*address
,
4185 mach_vm_size_t initial_size
,
4186 mach_vm_offset_t mask
,
4189 vm_object_offset_t offset
,
4191 vm_prot_t cur_protection
,
4192 vm_prot_t max_protection
,
4193 vm_inherit_t inheritance
);
4197 vm_map_t target_map
,
4198 mach_vm_offset_t
*address
,
4199 mach_vm_size_t size
,
4200 mach_vm_offset_t mask
,
4203 mach_vm_offset_t memory_address
,
4205 vm_prot_t
*cur_protection
,
4206 vm_prot_t
*max_protection
,
4207 vm_inherit_t inheritance
);
4211 vm_map_t target_map
,
4212 mach_vm_offset_t
*address
,
4213 mach_vm_size_t initial_size
,
4214 mach_vm_offset_t mask
,
4217 vm_object_offset_t offset
,
4219 vm_prot_t cur_protection
,
4220 vm_prot_t max_protection
,
4221 vm_inherit_t inheritance
)
4223 return mach_vm_map_external(target_map
, address
, initial_size
, mask
, flags
, port
,
4224 offset
, copy
, cur_protection
, max_protection
, inheritance
);
4229 vm_map_t target_map
,
4230 mach_vm_offset_t
*address
,
4231 mach_vm_size_t size
,
4232 mach_vm_offset_t mask
,
4235 mach_vm_offset_t memory_address
,
4237 vm_prot_t
*cur_protection
,
4238 vm_prot_t
*max_protection
,
4239 vm_inherit_t inheritance
)
4241 return mach_vm_remap_external(target_map
, address
, size
, mask
, flags
, src_map
, memory_address
,
4242 copy
, cur_protection
, max_protection
, inheritance
);
4247 vm_map_t target_map
,
4248 vm_offset_t
*address
,
4255 vm_prot_t cur_protection
,
4256 vm_prot_t max_protection
,
4257 vm_inherit_t inheritance
);
4261 vm_map_t target_map
,
4262 vm_offset_t
*address
,
4269 vm_prot_t cur_protection
,
4270 vm_prot_t max_protection
,
4271 vm_inherit_t inheritance
)
4275 VM_GET_FLAGS_ALIAS(flags
, tag
);
4276 return vm_map_kernel(target_map
, address
, size
, mask
,
4277 flags
, VM_MAP_KERNEL_FLAGS_NONE
, tag
,
4279 cur_protection
, max_protection
, inheritance
);
4282 #endif /* __x86_64__ */