2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101 #include <mach/sdt.h>
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/vm_map_server.h>
107 #include <kern/host.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110 #include <kern/misc_protos.h>
111 #include <vm/vm_fault.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/memory_object.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/vm_protos.h>
118 #include <vm/vm_purgeable_internal.h>
119 #include <vm/vm_init.h>
121 #include <san/kasan.h>
123 vm_size_t upl_offset_to_pagelist
= 0;
130 * mach_vm_allocate allocates "zero fill" memory in the specfied
134 mach_vm_allocate_external(
136 mach_vm_offset_t
*addr
,
142 VM_GET_FLAGS_ALIAS(flags
, tag
);
143 return (mach_vm_allocate_kernel(map
, addr
, size
, flags
, tag
));
147 mach_vm_allocate_kernel(
149 mach_vm_offset_t
*addr
,
154 vm_map_offset_t map_addr
;
155 vm_map_size_t map_size
;
156 kern_return_t result
;
159 /* filter out any kernel-only flags */
160 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
161 return KERN_INVALID_ARGUMENT
;
163 if (map
== VM_MAP_NULL
)
164 return(KERN_INVALID_ARGUMENT
);
167 return(KERN_SUCCESS
);
170 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
173 * No specific address requested, so start candidate address
174 * search at the minimum address in the map. However, if that
175 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
176 * allocations of PAGEZERO to explicit requests since its
177 * normal use is to catch dereferences of NULL and many
178 * applications also treat pointers with a value of 0 as
179 * special and suddenly having address 0 contain useable
180 * memory would tend to confuse those applications.
182 map_addr
= vm_map_min(map
);
184 map_addr
+= VM_MAP_PAGE_SIZE(map
);
186 map_addr
= vm_map_trunc_page(*addr
,
187 VM_MAP_PAGE_MASK(map
));
188 map_size
= vm_map_round_page(size
,
189 VM_MAP_PAGE_MASK(map
));
191 return(KERN_INVALID_ARGUMENT
);
194 result
= vm_map_enter(
200 VM_MAP_KERNEL_FLAGS_NONE
,
203 (vm_object_offset_t
)0,
215 * Legacy routine that allocates "zero fill" memory in the specfied
216 * map (which is limited to the same size as the kernel).
219 vm_allocate_external(
227 VM_GET_FLAGS_ALIAS(flags
, tag
);
228 return (vm_allocate_kernel(map
, addr
, size
, flags
, tag
));
239 vm_map_offset_t map_addr
;
240 vm_map_size_t map_size
;
241 kern_return_t result
;
244 /* filter out any kernel-only flags */
245 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
246 return KERN_INVALID_ARGUMENT
;
248 if (map
== VM_MAP_NULL
)
249 return(KERN_INVALID_ARGUMENT
);
252 return(KERN_SUCCESS
);
255 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
258 * No specific address requested, so start candidate address
259 * search at the minimum address in the map. However, if that
260 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
261 * allocations of PAGEZERO to explicit requests since its
262 * normal use is to catch dereferences of NULL and many
263 * applications also treat pointers with a value of 0 as
264 * special and suddenly having address 0 contain useable
265 * memory would tend to confuse those applications.
267 map_addr
= vm_map_min(map
);
269 map_addr
+= VM_MAP_PAGE_SIZE(map
);
271 map_addr
= vm_map_trunc_page(*addr
,
272 VM_MAP_PAGE_MASK(map
));
273 map_size
= vm_map_round_page(size
,
274 VM_MAP_PAGE_MASK(map
));
276 return(KERN_INVALID_ARGUMENT
);
279 result
= vm_map_enter(
285 VM_MAP_KERNEL_FLAGS_NONE
,
288 (vm_object_offset_t
)0,
295 if (result
== KERN_SUCCESS
&& map
->pmap
== kernel_pmap
) {
296 kasan_notify_address(map_addr
, map_size
);
300 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
305 * mach_vm_deallocate -
306 * deallocates the specified range of addresses in the
307 * specified address map.
312 mach_vm_offset_t start
,
315 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
316 return(KERN_INVALID_ARGUMENT
);
318 if (size
== (mach_vm_offset_t
) 0)
319 return(KERN_SUCCESS
);
321 return(vm_map_remove(map
,
322 vm_map_trunc_page(start
,
323 VM_MAP_PAGE_MASK(map
)),
324 vm_map_round_page(start
+size
,
325 VM_MAP_PAGE_MASK(map
)),
331 * deallocates the specified range of addresses in the
332 * specified address map (limited to addresses the same
333 * size as the kernel).
341 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
342 return(KERN_INVALID_ARGUMENT
);
344 if (size
== (vm_offset_t
) 0)
345 return(KERN_SUCCESS
);
347 return(vm_map_remove(map
,
348 vm_map_trunc_page(start
,
349 VM_MAP_PAGE_MASK(map
)),
350 vm_map_round_page(start
+size
,
351 VM_MAP_PAGE_MASK(map
)),
357 * Sets the inheritance of the specified range in the
363 mach_vm_offset_t start
,
365 vm_inherit_t new_inheritance
)
367 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
368 (new_inheritance
> VM_INHERIT_LAST_VALID
))
369 return(KERN_INVALID_ARGUMENT
);
374 return(vm_map_inherit(map
,
375 vm_map_trunc_page(start
,
376 VM_MAP_PAGE_MASK(map
)),
377 vm_map_round_page(start
+size
,
378 VM_MAP_PAGE_MASK(map
)),
384 * Sets the inheritance of the specified range in the
385 * specified map (range limited to addresses
392 vm_inherit_t new_inheritance
)
394 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
395 (new_inheritance
> VM_INHERIT_LAST_VALID
))
396 return(KERN_INVALID_ARGUMENT
);
401 return(vm_map_inherit(map
,
402 vm_map_trunc_page(start
,
403 VM_MAP_PAGE_MASK(map
)),
404 vm_map_round_page(start
+size
,
405 VM_MAP_PAGE_MASK(map
)),
411 * Sets the protection of the specified range in the
418 mach_vm_offset_t start
,
420 boolean_t set_maximum
,
421 vm_prot_t new_protection
)
423 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
424 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
425 return(KERN_INVALID_ARGUMENT
);
430 return(vm_map_protect(map
,
431 vm_map_trunc_page(start
,
432 VM_MAP_PAGE_MASK(map
)),
433 vm_map_round_page(start
+size
,
434 VM_MAP_PAGE_MASK(map
)),
441 * Sets the protection of the specified range in the
442 * specified map. Addressability of the range limited
443 * to the same size as the kernel.
451 boolean_t set_maximum
,
452 vm_prot_t new_protection
)
454 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
455 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
456 return(KERN_INVALID_ARGUMENT
);
461 return(vm_map_protect(map
,
462 vm_map_trunc_page(start
,
463 VM_MAP_PAGE_MASK(map
)),
464 vm_map_round_page(start
+size
,
465 VM_MAP_PAGE_MASK(map
)),
471 * mach_vm_machine_attributes -
472 * Handle machine-specific attributes for a mapping, such
473 * as cachability, migrability, etc.
476 mach_vm_machine_attribute(
478 mach_vm_address_t addr
,
480 vm_machine_attribute_t attribute
,
481 vm_machine_attribute_val_t
* value
) /* IN/OUT */
483 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
484 return(KERN_INVALID_ARGUMENT
);
489 return vm_map_machine_attribute(
491 vm_map_trunc_page(addr
,
492 VM_MAP_PAGE_MASK(map
)),
493 vm_map_round_page(addr
+size
,
494 VM_MAP_PAGE_MASK(map
)),
500 * vm_machine_attribute -
501 * Handle machine-specific attributes for a mapping, such
502 * as cachability, migrability, etc. Limited addressability
503 * (same range limits as for the native kernel map).
506 vm_machine_attribute(
510 vm_machine_attribute_t attribute
,
511 vm_machine_attribute_val_t
* value
) /* IN/OUT */
513 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
514 return(KERN_INVALID_ARGUMENT
);
519 return vm_map_machine_attribute(
521 vm_map_trunc_page(addr
,
522 VM_MAP_PAGE_MASK(map
)),
523 vm_map_round_page(addr
+size
,
524 VM_MAP_PAGE_MASK(map
)),
531 * Read/copy a range from one address space and return it to the caller.
533 * It is assumed that the address for the returned memory is selected by
534 * the IPC implementation as part of receiving the reply to this call.
535 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
536 * that gets returned.
538 * JMM - because of mach_msg_type_number_t, this call is limited to a
539 * single 4GB region at this time.
545 mach_vm_address_t addr
,
548 mach_msg_type_number_t
*data_size
)
551 vm_map_copy_t ipc_address
;
553 if (map
== VM_MAP_NULL
)
554 return(KERN_INVALID_ARGUMENT
);
556 if ((mach_msg_type_number_t
) size
!= size
)
557 return KERN_INVALID_ARGUMENT
;
559 error
= vm_map_copyin(map
,
560 (vm_map_address_t
)addr
,
562 FALSE
, /* src_destroy */
565 if (KERN_SUCCESS
== error
) {
566 *data
= (pointer_t
) ipc_address
;
567 *data_size
= (mach_msg_type_number_t
) size
;
568 assert(*data_size
== size
);
575 * Read/copy a range from one address space and return it to the caller.
576 * Limited addressability (same range limits as for the native kernel map).
578 * It is assumed that the address for the returned memory is selected by
579 * the IPC implementation as part of receiving the reply to this call.
580 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
581 * that gets returned.
589 mach_msg_type_number_t
*data_size
)
592 vm_map_copy_t ipc_address
;
594 if (map
== VM_MAP_NULL
)
595 return(KERN_INVALID_ARGUMENT
);
597 if (size
> (unsigned)(mach_msg_type_number_t
) -1) {
599 * The kernel could handle a 64-bit "size" value, but
600 * it could not return the size of the data in "*data_size"
601 * without overflowing.
602 * Let's reject this "size" as invalid.
604 return KERN_INVALID_ARGUMENT
;
607 error
= vm_map_copyin(map
,
608 (vm_map_address_t
)addr
,
610 FALSE
, /* src_destroy */
613 if (KERN_SUCCESS
== error
) {
614 *data
= (pointer_t
) ipc_address
;
615 *data_size
= (mach_msg_type_number_t
) size
;
616 assert(*data_size
== size
);
622 * mach_vm_read_list -
623 * Read/copy a list of address ranges from specified map.
625 * MIG does not know how to deal with a returned array of
626 * vm_map_copy_t structures, so we have to do the copyout
632 mach_vm_read_entry_t data_list
,
635 mach_msg_type_number_t i
;
639 if (map
== VM_MAP_NULL
||
640 count
> VM_MAP_ENTRY_MAX
)
641 return(KERN_INVALID_ARGUMENT
);
643 error
= KERN_SUCCESS
;
644 for(i
=0; i
<count
; i
++) {
645 vm_map_address_t map_addr
;
646 vm_map_size_t map_size
;
648 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
649 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
652 error
= vm_map_copyin(map
,
655 FALSE
, /* src_destroy */
657 if (KERN_SUCCESS
== error
) {
658 error
= vm_map_copyout(
662 if (KERN_SUCCESS
== error
) {
663 data_list
[i
].address
= map_addr
;
666 vm_map_copy_discard(copy
);
669 data_list
[i
].address
= (mach_vm_address_t
)0;
670 data_list
[i
].size
= (mach_vm_size_t
)0;
677 * Read/copy a list of address ranges from specified map.
679 * MIG does not know how to deal with a returned array of
680 * vm_map_copy_t structures, so we have to do the copyout
683 * The source and destination ranges are limited to those
684 * that can be described with a vm_address_t (i.e. same
685 * size map as the kernel).
687 * JMM - If the result of the copyout is an address range
688 * that cannot be described with a vm_address_t (i.e. the
689 * caller had a larger address space but used this call
690 * anyway), it will result in a truncated address being
691 * returned (and a likely confused caller).
697 vm_read_entry_t data_list
,
700 mach_msg_type_number_t i
;
704 if (map
== VM_MAP_NULL
||
705 count
> VM_MAP_ENTRY_MAX
)
706 return(KERN_INVALID_ARGUMENT
);
708 error
= KERN_SUCCESS
;
709 for(i
=0; i
<count
; i
++) {
710 vm_map_address_t map_addr
;
711 vm_map_size_t map_size
;
713 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
714 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
717 error
= vm_map_copyin(map
,
720 FALSE
, /* src_destroy */
722 if (KERN_SUCCESS
== error
) {
723 error
= vm_map_copyout(current_task()->map
,
726 if (KERN_SUCCESS
== error
) {
727 data_list
[i
].address
=
728 CAST_DOWN(vm_offset_t
, map_addr
);
731 vm_map_copy_discard(copy
);
734 data_list
[i
].address
= (mach_vm_address_t
)0;
735 data_list
[i
].size
= (mach_vm_size_t
)0;
741 * mach_vm_read_overwrite -
742 * Overwrite a range of the current map with data from the specified
745 * In making an assumption that the current thread is local, it is
746 * no longer cluster-safe without a fully supportive local proxy
747 * thread/task (but we don't support cluster's anymore so this is moot).
751 mach_vm_read_overwrite(
753 mach_vm_address_t address
,
755 mach_vm_address_t data
,
756 mach_vm_size_t
*data_size
)
761 if (map
== VM_MAP_NULL
)
762 return(KERN_INVALID_ARGUMENT
);
764 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
765 (vm_map_size_t
)size
, FALSE
, ©
);
767 if (KERN_SUCCESS
== error
) {
768 error
= vm_map_copy_overwrite(current_thread()->map
,
769 (vm_map_address_t
)data
,
771 if (KERN_SUCCESS
== error
) {
775 vm_map_copy_discard(copy
);
781 * vm_read_overwrite -
782 * Overwrite a range of the current map with data from the specified
785 * This routine adds the additional limitation that the source and
786 * destination ranges must be describable with vm_address_t values
787 * (i.e. the same size address spaces as the kernel, or at least the
788 * the ranges are in that first portion of the respective address
795 vm_address_t address
,
798 vm_size_t
*data_size
)
803 if (map
== VM_MAP_NULL
)
804 return(KERN_INVALID_ARGUMENT
);
806 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
807 (vm_map_size_t
)size
, FALSE
, ©
);
809 if (KERN_SUCCESS
== error
) {
810 error
= vm_map_copy_overwrite(current_thread()->map
,
811 (vm_map_address_t
)data
,
813 if (KERN_SUCCESS
== error
) {
817 vm_map_copy_discard(copy
);
825 * Overwrite the specified address range with the data provided
826 * (from the current map).
831 mach_vm_address_t address
,
833 __unused mach_msg_type_number_t size
)
835 if (map
== VM_MAP_NULL
)
836 return KERN_INVALID_ARGUMENT
;
838 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
839 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
844 * Overwrite the specified address range with the data provided
845 * (from the current map).
847 * The addressability of the range of addresses to overwrite is
848 * limited bu the use of a vm_address_t (same size as kernel map).
849 * Either the target map is also small, or the range is in the
850 * low addresses within it.
855 vm_address_t address
,
857 __unused mach_msg_type_number_t size
)
859 if (map
== VM_MAP_NULL
)
860 return KERN_INVALID_ARGUMENT
;
862 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
863 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
868 * Overwrite one range of the specified map with the contents of
869 * another range within that same map (i.e. both address ranges
875 mach_vm_address_t source_address
,
877 mach_vm_address_t dest_address
)
882 if (map
== VM_MAP_NULL
)
883 return KERN_INVALID_ARGUMENT
;
885 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
886 (vm_map_size_t
)size
, FALSE
, ©
);
888 if (KERN_SUCCESS
== kr
) {
889 kr
= vm_map_copy_overwrite(map
,
890 (vm_map_address_t
)dest_address
,
891 copy
, FALSE
/* interruptible XXX */);
893 if (KERN_SUCCESS
!= kr
)
894 vm_map_copy_discard(copy
);
902 vm_address_t source_address
,
904 vm_address_t dest_address
)
909 if (map
== VM_MAP_NULL
)
910 return KERN_INVALID_ARGUMENT
;
912 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
913 (vm_map_size_t
)size
, FALSE
, ©
);
915 if (KERN_SUCCESS
== kr
) {
916 kr
= vm_map_copy_overwrite(map
,
917 (vm_map_address_t
)dest_address
,
918 copy
, FALSE
/* interruptible XXX */);
920 if (KERN_SUCCESS
!= kr
)
921 vm_map_copy_discard(copy
);
928 * Map some range of an object into an address space.
930 * The object can be one of several types of objects:
931 * NULL - anonymous memory
932 * a named entry - a range within another address space
933 * or a range within a memory object
934 * a whole memory object
938 mach_vm_map_external(
940 mach_vm_offset_t
*address
,
941 mach_vm_size_t initial_size
,
942 mach_vm_offset_t mask
,
945 vm_object_offset_t offset
,
947 vm_prot_t cur_protection
,
948 vm_prot_t max_protection
,
949 vm_inherit_t inheritance
)
953 VM_GET_FLAGS_ALIAS(flags
, tag
);
954 return (mach_vm_map_kernel(target_map
, address
, initial_size
, mask
, flags
, tag
, port
,
955 offset
, copy
, cur_protection
, max_protection
, inheritance
));
961 mach_vm_offset_t
*address
,
962 mach_vm_size_t initial_size
,
963 mach_vm_offset_t mask
,
967 vm_object_offset_t offset
,
969 vm_prot_t cur_protection
,
970 vm_prot_t max_protection
,
971 vm_inherit_t inheritance
)
974 vm_map_offset_t vmmaddr
;
976 vmmaddr
= (vm_map_offset_t
) *address
;
978 /* filter out any kernel-only flags */
979 if (flags
& ~VM_FLAGS_USER_MAP
)
980 return KERN_INVALID_ARGUMENT
;
982 kr
= vm_map_enter_mem_object(target_map
,
987 VM_MAP_KERNEL_FLAGS_NONE
,
997 if (kr
== KERN_SUCCESS
&& target_map
->pmap
== kernel_pmap
) {
998 kasan_notify_address(vmmaddr
, initial_size
);
1007 /* legacy interface */
1010 vm_map_t target_map
,
1011 vm_offset_t
*address
,
1016 vm_object_offset_t offset
,
1018 vm_prot_t cur_protection
,
1019 vm_prot_t max_protection
,
1020 vm_inherit_t inheritance
)
1024 VM_GET_FLAGS_ALIAS(flags
, tag
);
1025 return (vm_map_64_kernel(target_map
, address
, size
, mask
, flags
, tag
, port
, offset
,
1026 copy
, cur_protection
, max_protection
, inheritance
));
1031 vm_map_t target_map
,
1032 vm_offset_t
*address
,
1038 vm_object_offset_t offset
,
1040 vm_prot_t cur_protection
,
1041 vm_prot_t max_protection
,
1042 vm_inherit_t inheritance
)
1044 mach_vm_address_t map_addr
;
1045 mach_vm_size_t map_size
;
1046 mach_vm_offset_t map_mask
;
1049 map_addr
= (mach_vm_address_t
)*address
;
1050 map_size
= (mach_vm_size_t
)size
;
1051 map_mask
= (mach_vm_offset_t
)mask
;
1053 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
, flags
, tag
,
1055 cur_protection
, max_protection
, inheritance
);
1056 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1060 /* temporary, until world build */
1063 vm_map_t target_map
,
1064 vm_offset_t
*address
,
1071 vm_prot_t cur_protection
,
1072 vm_prot_t max_protection
,
1073 vm_inherit_t inheritance
)
1077 VM_GET_FLAGS_ALIAS(flags
, tag
);
1078 return (vm_map_kernel(target_map
, address
, size
, mask
, flags
, tag
, port
, offset
, copy
, cur_protection
, max_protection
, inheritance
));
1083 vm_map_t target_map
,
1084 vm_offset_t
*address
,
1092 vm_prot_t cur_protection
,
1093 vm_prot_t max_protection
,
1094 vm_inherit_t inheritance
)
1096 mach_vm_address_t map_addr
;
1097 mach_vm_size_t map_size
;
1098 mach_vm_offset_t map_mask
;
1099 vm_object_offset_t obj_offset
;
1102 map_addr
= (mach_vm_address_t
)*address
;
1103 map_size
= (mach_vm_size_t
)size
;
1104 map_mask
= (mach_vm_offset_t
)mask
;
1105 obj_offset
= (vm_object_offset_t
)offset
;
1107 kr
= mach_vm_map_kernel(target_map
, &map_addr
, map_size
, map_mask
, flags
, tag
,
1108 port
, obj_offset
, copy
,
1109 cur_protection
, max_protection
, inheritance
);
1110 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1116 * Remap a range of memory from one task into another,
1117 * to another address range within the same task, or
1118 * over top of itself (with altered permissions and/or
1119 * as an in-place copy of itself).
1122 mach_vm_remap_external(
1123 vm_map_t target_map
,
1124 mach_vm_offset_t
*address
,
1125 mach_vm_size_t size
,
1126 mach_vm_offset_t mask
,
1129 mach_vm_offset_t memory_address
,
1131 vm_prot_t
*cur_protection
,
1132 vm_prot_t
*max_protection
,
1133 vm_inherit_t inheritance
)
1136 VM_GET_FLAGS_ALIAS(flags
, tag
);
1138 return (mach_vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
, memory_address
,
1139 copy
, cur_protection
, max_protection
, inheritance
));
1143 mach_vm_remap_kernel(
1144 vm_map_t target_map
,
1145 mach_vm_offset_t
*address
,
1146 mach_vm_size_t size
,
1147 mach_vm_offset_t mask
,
1151 mach_vm_offset_t memory_address
,
1153 vm_prot_t
*cur_protection
,
1154 vm_prot_t
*max_protection
,
1155 vm_inherit_t inheritance
)
1157 vm_map_offset_t map_addr
;
1160 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1161 return KERN_INVALID_ARGUMENT
;
1163 /* filter out any kernel-only flags */
1164 if (flags
& ~VM_FLAGS_USER_REMAP
)
1165 return KERN_INVALID_ARGUMENT
;
1167 map_addr
= (vm_map_offset_t
)*address
;
1169 kr
= vm_map_remap(target_map
,
1174 VM_MAP_KERNEL_FLAGS_NONE
,
1182 *address
= map_addr
;
1188 * Remap a range of memory from one task into another,
1189 * to another address range within the same task, or
1190 * over top of itself (with altered permissions and/or
1191 * as an in-place copy of itself).
1193 * The addressability of the source and target address
1194 * range is limited by the size of vm_address_t (in the
1199 vm_map_t target_map
,
1200 vm_offset_t
*address
,
1205 vm_offset_t memory_address
,
1207 vm_prot_t
*cur_protection
,
1208 vm_prot_t
*max_protection
,
1209 vm_inherit_t inheritance
)
1212 VM_GET_FLAGS_ALIAS(flags
, tag
);
1214 return (vm_remap_kernel(target_map
, address
, size
, mask
, flags
, tag
, src_map
,
1215 memory_address
, copy
, cur_protection
, max_protection
, inheritance
));
1220 vm_map_t target_map
,
1221 vm_offset_t
*address
,
1227 vm_offset_t memory_address
,
1229 vm_prot_t
*cur_protection
,
1230 vm_prot_t
*max_protection
,
1231 vm_inherit_t inheritance
)
1233 vm_map_offset_t map_addr
;
1236 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1237 return KERN_INVALID_ARGUMENT
;
1239 /* filter out any kernel-only flags */
1240 if (flags
& ~VM_FLAGS_USER_REMAP
)
1241 return KERN_INVALID_ARGUMENT
;
1243 map_addr
= (vm_map_offset_t
)*address
;
1245 kr
= vm_map_remap(target_map
,
1250 VM_MAP_KERNEL_FLAGS_NONE
,
1258 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1263 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1264 * when mach_vm_wire and vm_wire are changed to use ledgers.
1266 #include <mach/mach_host_server.h>
1269 * Specify that the range of the virtual address space
1270 * of the target task must not cause page faults for
1271 * the indicated accesses.
1273 * [ To unwire the pages, specify VM_PROT_NONE. ]
1276 mach_vm_wire_external(
1277 host_priv_t host_priv
,
1279 mach_vm_offset_t start
,
1280 mach_vm_size_t size
,
1283 return (mach_vm_wire_kernel(host_priv
, map
, start
, size
, access
, VM_KERN_MEMORY_MLOCK
));
1287 mach_vm_wire_kernel(
1288 host_priv_t host_priv
,
1290 mach_vm_offset_t start
,
1291 mach_vm_size_t size
,
1297 if (host_priv
== HOST_PRIV_NULL
)
1298 return KERN_INVALID_HOST
;
1300 assert(host_priv
== &realhost
);
1302 if (map
== VM_MAP_NULL
)
1303 return KERN_INVALID_TASK
;
1305 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
))
1306 return KERN_INVALID_ARGUMENT
;
1308 if (access
!= VM_PROT_NONE
) {
1309 rc
= vm_map_wire_kernel(map
,
1310 vm_map_trunc_page(start
,
1311 VM_MAP_PAGE_MASK(map
)),
1312 vm_map_round_page(start
+size
,
1313 VM_MAP_PAGE_MASK(map
)),
1317 rc
= vm_map_unwire(map
,
1318 vm_map_trunc_page(start
,
1319 VM_MAP_PAGE_MASK(map
)),
1320 vm_map_round_page(start
+size
,
1321 VM_MAP_PAGE_MASK(map
)),
1329 * Specify that the range of the virtual address space
1330 * of the target task must not cause page faults for
1331 * the indicated accesses.
1333 * [ To unwire the pages, specify VM_PROT_NONE. ]
1337 host_priv_t host_priv
,
1345 if (host_priv
== HOST_PRIV_NULL
)
1346 return KERN_INVALID_HOST
;
1348 assert(host_priv
== &realhost
);
1350 if (map
== VM_MAP_NULL
)
1351 return KERN_INVALID_TASK
;
1353 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1354 return KERN_INVALID_ARGUMENT
;
1358 } else if (access
!= VM_PROT_NONE
) {
1359 rc
= vm_map_wire_kernel(map
,
1360 vm_map_trunc_page(start
,
1361 VM_MAP_PAGE_MASK(map
)),
1362 vm_map_round_page(start
+size
,
1363 VM_MAP_PAGE_MASK(map
)),
1364 access
, VM_KERN_MEMORY_OSFMK
,
1367 rc
= vm_map_unwire(map
,
1368 vm_map_trunc_page(start
,
1369 VM_MAP_PAGE_MASK(map
)),
1370 vm_map_round_page(start
+size
,
1371 VM_MAP_PAGE_MASK(map
)),
1380 * Synchronises the memory range specified with its backing store
1381 * image by either flushing or cleaning the contents to the appropriate
1384 * interpretation of sync_flags
1385 * VM_SYNC_INVALIDATE - discard pages, only return precious
1388 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1389 * - discard pages, write dirty or precious
1390 * pages back to memory manager.
1392 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1393 * - write dirty or precious pages back to
1394 * the memory manager.
1396 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1397 * is a hole in the region, and we would
1398 * have returned KERN_SUCCESS, return
1399 * KERN_INVALID_ADDRESS instead.
1402 * KERN_INVALID_TASK Bad task parameter
1403 * KERN_INVALID_ARGUMENT both sync and async were specified.
1404 * KERN_SUCCESS The usual.
1405 * KERN_INVALID_ADDRESS There was a hole in the region.
1411 mach_vm_address_t address
,
1412 mach_vm_size_t size
,
1413 vm_sync_t sync_flags
)
1416 if (map
== VM_MAP_NULL
)
1417 return(KERN_INVALID_TASK
);
1419 return vm_map_msync(map
, (vm_map_address_t
)address
,
1420 (vm_map_size_t
)size
, sync_flags
);
1426 * Synchronises the memory range specified with its backing store
1427 * image by either flushing or cleaning the contents to the appropriate
1430 * interpretation of sync_flags
1431 * VM_SYNC_INVALIDATE - discard pages, only return precious
1434 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1435 * - discard pages, write dirty or precious
1436 * pages back to memory manager.
1438 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1439 * - write dirty or precious pages back to
1440 * the memory manager.
1442 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1443 * is a hole in the region, and we would
1444 * have returned KERN_SUCCESS, return
1445 * KERN_INVALID_ADDRESS instead.
1447 * The addressability of the range is limited to that which can
1448 * be described by a vm_address_t.
1451 * KERN_INVALID_TASK Bad task parameter
1452 * KERN_INVALID_ARGUMENT both sync and async were specified.
1453 * KERN_SUCCESS The usual.
1454 * KERN_INVALID_ADDRESS There was a hole in the region.
1460 vm_address_t address
,
1462 vm_sync_t sync_flags
)
1465 if (map
== VM_MAP_NULL
)
1466 return(KERN_INVALID_TASK
);
1468 return vm_map_msync(map
, (vm_map_address_t
)address
,
1469 (vm_map_size_t
)size
, sync_flags
);
1474 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1476 vm_map_t map
= current_map();
1478 assert(!map
->is_nested_map
);
1479 if(toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
){
1480 *old_value
= map
->disable_vmentry_reuse
;
1481 } else if(toggle
== VM_TOGGLE_SET
){
1482 vm_map_entry_t map_to_entry
;
1485 vm_map_disable_hole_optimization(map
);
1486 map
->disable_vmentry_reuse
= TRUE
;
1487 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1488 if (map
->first_free
== map_to_entry
) {
1489 map
->highest_entry_end
= vm_map_min(map
);
1491 map
->highest_entry_end
= map
->first_free
->vme_end
;
1494 } else if (toggle
== VM_TOGGLE_CLEAR
){
1496 map
->disable_vmentry_reuse
= FALSE
;
1499 return KERN_INVALID_ARGUMENT
;
1501 return KERN_SUCCESS
;
1505 * mach_vm_behavior_set
1507 * Sets the paging behavior attribute for the specified range
1508 * in the specified map.
1510 * This routine will fail with KERN_INVALID_ADDRESS if any address
1511 * in [start,start+size) is not a valid allocated memory region.
1514 mach_vm_behavior_set(
1516 mach_vm_offset_t start
,
1517 mach_vm_size_t size
,
1518 vm_behavior_t new_behavior
)
1520 vm_map_offset_t align_mask
;
1522 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1523 return(KERN_INVALID_ARGUMENT
);
1526 return KERN_SUCCESS
;
1528 switch (new_behavior
) {
1529 case VM_BEHAVIOR_REUSABLE
:
1530 case VM_BEHAVIOR_REUSE
:
1531 case VM_BEHAVIOR_CAN_REUSE
:
1533 * Align to the hardware page size, to allow
1534 * malloc() to maximize the amount of re-usability,
1535 * even on systems with larger software page size.
1537 align_mask
= PAGE_MASK
;
1540 align_mask
= VM_MAP_PAGE_MASK(map
);
1544 return vm_map_behavior_set(map
,
1545 vm_map_trunc_page(start
, align_mask
),
1546 vm_map_round_page(start
+size
, align_mask
),
1553 * Sets the paging behavior attribute for the specified range
1554 * in the specified map.
1556 * This routine will fail with KERN_INVALID_ADDRESS if any address
1557 * in [start,start+size) is not a valid allocated memory region.
1559 * This routine is potentially limited in addressibility by the
1560 * use of vm_offset_t (if the map provided is larger than the
1568 vm_behavior_t new_behavior
)
1570 if (start
+ size
< start
)
1571 return KERN_INVALID_ARGUMENT
;
1573 return mach_vm_behavior_set(map
,
1574 (mach_vm_offset_t
) start
,
1575 (mach_vm_size_t
) size
,
1582 * User call to obtain information about a region in
1583 * a task's address map. Currently, only one flavor is
1586 * XXX The reserved and behavior fields cannot be filled
1587 * in until the vm merge from the IK is completed, and
1588 * vm_reserve is implemented.
1590 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1596 mach_vm_offset_t
*address
, /* IN/OUT */
1597 mach_vm_size_t
*size
, /* OUT */
1598 vm_region_flavor_t flavor
, /* IN */
1599 vm_region_info_t info
, /* OUT */
1600 mach_msg_type_number_t
*count
, /* IN/OUT */
1601 mach_port_t
*object_name
) /* OUT */
1603 vm_map_offset_t map_addr
;
1604 vm_map_size_t map_size
;
1607 if (VM_MAP_NULL
== map
)
1608 return KERN_INVALID_ARGUMENT
;
1610 map_addr
= (vm_map_offset_t
)*address
;
1611 map_size
= (vm_map_size_t
)*size
;
1613 /* legacy conversion */
1614 if (VM_REGION_BASIC_INFO
== flavor
)
1615 flavor
= VM_REGION_BASIC_INFO_64
;
1617 kr
= vm_map_region(map
,
1618 &map_addr
, &map_size
,
1619 flavor
, info
, count
,
1622 *address
= map_addr
;
1628 * vm_region_64 and vm_region:
1630 * User call to obtain information about a region in
1631 * a task's address map. Currently, only one flavor is
1634 * XXX The reserved and behavior fields cannot be filled
1635 * in until the vm merge from the IK is completed, and
1636 * vm_reserve is implemented.
1638 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1644 vm_offset_t
*address
, /* IN/OUT */
1645 vm_size_t
*size
, /* OUT */
1646 vm_region_flavor_t flavor
, /* IN */
1647 vm_region_info_t info
, /* OUT */
1648 mach_msg_type_number_t
*count
, /* IN/OUT */
1649 mach_port_t
*object_name
) /* OUT */
1651 vm_map_offset_t map_addr
;
1652 vm_map_size_t map_size
;
1655 if (VM_MAP_NULL
== map
)
1656 return KERN_INVALID_ARGUMENT
;
1658 map_addr
= (vm_map_offset_t
)*address
;
1659 map_size
= (vm_map_size_t
)*size
;
1661 /* legacy conversion */
1662 if (VM_REGION_BASIC_INFO
== flavor
)
1663 flavor
= VM_REGION_BASIC_INFO_64
;
1665 kr
= vm_map_region(map
,
1666 &map_addr
, &map_size
,
1667 flavor
, info
, count
,
1670 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1671 *size
= CAST_DOWN(vm_size_t
, map_size
);
1673 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1674 return KERN_INVALID_ADDRESS
;
1681 vm_address_t
*address
, /* IN/OUT */
1682 vm_size_t
*size
, /* OUT */
1683 vm_region_flavor_t flavor
, /* IN */
1684 vm_region_info_t info
, /* OUT */
1685 mach_msg_type_number_t
*count
, /* IN/OUT */
1686 mach_port_t
*object_name
) /* OUT */
1688 vm_map_address_t map_addr
;
1689 vm_map_size_t map_size
;
1692 if (VM_MAP_NULL
== map
)
1693 return KERN_INVALID_ARGUMENT
;
1695 map_addr
= (vm_map_address_t
)*address
;
1696 map_size
= (vm_map_size_t
)*size
;
1698 kr
= vm_map_region(map
,
1699 &map_addr
, &map_size
,
1700 flavor
, info
, count
,
1703 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1704 *size
= CAST_DOWN(vm_size_t
, map_size
);
1706 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1707 return KERN_INVALID_ADDRESS
;
1712 * vm_region_recurse: A form of vm_region which follows the
1713 * submaps in a target map
1717 mach_vm_region_recurse(
1719 mach_vm_address_t
*address
,
1720 mach_vm_size_t
*size
,
1722 vm_region_recurse_info_t info
,
1723 mach_msg_type_number_t
*infoCnt
)
1725 vm_map_address_t map_addr
;
1726 vm_map_size_t map_size
;
1729 if (VM_MAP_NULL
== map
)
1730 return KERN_INVALID_ARGUMENT
;
1732 map_addr
= (vm_map_address_t
)*address
;
1733 map_size
= (vm_map_size_t
)*size
;
1735 kr
= vm_map_region_recurse_64(
1740 (vm_region_submap_info_64_t
)info
,
1743 *address
= map_addr
;
1749 * vm_region_recurse: A form of vm_region which follows the
1750 * submaps in a target map
1754 vm_region_recurse_64(
1756 vm_address_t
*address
,
1759 vm_region_recurse_info_64_t info
,
1760 mach_msg_type_number_t
*infoCnt
)
1762 vm_map_address_t map_addr
;
1763 vm_map_size_t map_size
;
1766 if (VM_MAP_NULL
== map
)
1767 return KERN_INVALID_ARGUMENT
;
1769 map_addr
= (vm_map_address_t
)*address
;
1770 map_size
= (vm_map_size_t
)*size
;
1772 kr
= vm_map_region_recurse_64(
1777 (vm_region_submap_info_64_t
)info
,
1780 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1781 *size
= CAST_DOWN(vm_size_t
, map_size
);
1783 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1784 return KERN_INVALID_ADDRESS
;
1791 vm_offset_t
*address
, /* IN/OUT */
1792 vm_size_t
*size
, /* OUT */
1793 natural_t
*depth
, /* IN/OUT */
1794 vm_region_recurse_info_t info32
, /* IN/OUT */
1795 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1797 vm_region_submap_info_data_64_t info64
;
1798 vm_region_submap_info_t info
;
1799 vm_map_address_t map_addr
;
1800 vm_map_size_t map_size
;
1803 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1804 return KERN_INVALID_ARGUMENT
;
1807 map_addr
= (vm_map_address_t
)*address
;
1808 map_size
= (vm_map_size_t
)*size
;
1809 info
= (vm_region_submap_info_t
)info32
;
1810 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1812 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1813 depth
, &info64
, infoCnt
);
1815 info
->protection
= info64
.protection
;
1816 info
->max_protection
= info64
.max_protection
;
1817 info
->inheritance
= info64
.inheritance
;
1818 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1819 info
->user_tag
= info64
.user_tag
;
1820 info
->pages_resident
= info64
.pages_resident
;
1821 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1822 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1823 info
->pages_dirtied
= info64
.pages_dirtied
;
1824 info
->ref_count
= info64
.ref_count
;
1825 info
->shadow_depth
= info64
.shadow_depth
;
1826 info
->external_pager
= info64
.external_pager
;
1827 info
->share_mode
= info64
.share_mode
;
1828 info
->is_submap
= info64
.is_submap
;
1829 info
->behavior
= info64
.behavior
;
1830 info
->object_id
= info64
.object_id
;
1831 info
->user_wired_count
= info64
.user_wired_count
;
1833 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1834 *size
= CAST_DOWN(vm_size_t
, map_size
);
1835 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1837 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1838 return KERN_INVALID_ADDRESS
;
1843 mach_vm_purgable_control(
1845 mach_vm_offset_t address
,
1846 vm_purgable_t control
,
1849 if (VM_MAP_NULL
== map
)
1850 return KERN_INVALID_ARGUMENT
;
1852 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
1853 /* not allowed from user-space */
1854 return KERN_INVALID_ARGUMENT
;
1857 return vm_map_purgable_control(map
,
1858 vm_map_trunc_page(address
, PAGE_MASK
),
1864 vm_purgable_control(
1866 vm_offset_t address
,
1867 vm_purgable_t control
,
1870 if (VM_MAP_NULL
== map
)
1871 return KERN_INVALID_ARGUMENT
;
1873 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
1874 /* not allowed from user-space */
1875 return KERN_INVALID_ARGUMENT
;
1878 return vm_map_purgable_control(map
,
1879 vm_map_trunc_page(address
, PAGE_MASK
),
1886 * Ordinarily, the right to allocate CPM is restricted
1887 * to privileged applications (those that can gain access
1888 * to the host priv port). Set this variable to zero if
1889 * you want to let any application allocate CPM.
1891 unsigned int vm_allocate_cpm_privileged
= 0;
1894 * Allocate memory in the specified map, with the caveat that
1895 * the memory is physically contiguous. This call may fail
1896 * if the system can't find sufficient contiguous memory.
1897 * This call may cause or lead to heart-stopping amounts of
1900 * Memory obtained from this call should be freed in the
1901 * normal way, viz., via vm_deallocate.
1905 host_priv_t host_priv
,
1911 vm_map_address_t map_addr
;
1912 vm_map_size_t map_size
;
1915 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1916 return KERN_INVALID_HOST
;
1918 if (VM_MAP_NULL
== map
)
1919 return KERN_INVALID_ARGUMENT
;
1921 map_addr
= (vm_map_address_t
)*addr
;
1922 map_size
= (vm_map_size_t
)size
;
1924 kr
= vm_map_enter_cpm(map
,
1929 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1937 mach_vm_offset_t offset
,
1941 if (VM_MAP_NULL
== map
)
1942 return KERN_INVALID_ARGUMENT
;
1944 return vm_map_page_query_internal(
1946 vm_map_trunc_page(offset
, PAGE_MASK
),
1947 disposition
, ref_count
);
1957 if (VM_MAP_NULL
== map
)
1958 return KERN_INVALID_ARGUMENT
;
1960 return vm_map_page_query_internal(
1962 vm_map_trunc_page(offset
, PAGE_MASK
),
1963 disposition
, ref_count
);
1967 mach_vm_page_range_query(
1969 mach_vm_offset_t address
,
1970 mach_vm_size_t size
,
1971 mach_vm_address_t dispositions_addr
,
1972 mach_vm_size_t
*dispositions_count
)
1974 kern_return_t kr
= KERN_SUCCESS
;
1975 int num_pages
= 0, i
= 0;
1976 mach_vm_size_t curr_sz
= 0, copy_sz
= 0;
1977 mach_vm_size_t disp_buf_req_size
= 0, disp_buf_total_size
= 0;
1978 mach_msg_type_number_t count
= 0;
1981 void *local_disp
= NULL
;;
1982 vm_map_size_t info_size
= 0, local_disp_size
= 0;
1983 mach_vm_offset_t start
= 0, end
= 0;
1985 if (map
== VM_MAP_NULL
|| dispositions_count
== NULL
) {
1986 return KERN_INVALID_ARGUMENT
;
1989 disp_buf_req_size
= ( *dispositions_count
* sizeof(int));
1990 start
= mach_vm_trunc_page(address
);
1991 end
= mach_vm_round_page(address
+ size
);
1994 return KERN_INVALID_ARGUMENT
;
1997 if (disp_buf_req_size
== 0 || (end
== start
)) {
1998 return KERN_SUCCESS
;
2002 * For large requests, we will go through them
2003 * MAX_PAGE_RANGE_QUERY chunk at a time.
2006 curr_sz
= MIN(end
- start
, MAX_PAGE_RANGE_QUERY
);
2007 num_pages
= (int) (curr_sz
>> PAGE_SHIFT
);
2009 info_size
= num_pages
* sizeof(vm_page_info_basic_data_t
);
2010 info
= kalloc(info_size
);
2013 return KERN_RESOURCE_SHORTAGE
;
2016 local_disp_size
= num_pages
* sizeof(int);
2017 local_disp
= kalloc(local_disp_size
);
2019 if (local_disp
== NULL
) {
2021 kfree(info
, info_size
);
2023 return KERN_RESOURCE_SHORTAGE
;
2028 count
= VM_PAGE_INFO_BASIC_COUNT
;
2029 kr
= vm_map_page_range_info_internal(
2032 mach_vm_round_page(start
+ curr_sz
),
2034 (vm_page_info_t
) info
,
2037 assert(kr
== KERN_SUCCESS
);
2039 for (i
= 0; i
< num_pages
; i
++) {
2041 ((int*)local_disp
)[i
] = ((vm_page_info_basic_t
)info
)[i
].disposition
;
2044 copy_sz
= MIN(disp_buf_req_size
, num_pages
* sizeof(int)/* an int per page */);
2045 kr
= copyout(local_disp
, (mach_vm_address_t
)dispositions_addr
, copy_sz
);
2048 disp_buf_req_size
-= copy_sz
;
2049 disp_buf_total_size
+= copy_sz
;
2055 if ((disp_buf_req_size
== 0) || (curr_sz
>= size
)) {
2058 * We might have inspected the full range OR
2059 * more than it esp. if the user passed in
2060 * non-page aligned start/size and/or if we
2061 * descended into a submap. We are done here.
2068 dispositions_addr
+= copy_sz
;
2072 curr_sz
= MIN(mach_vm_round_page(size
), MAX_PAGE_RANGE_QUERY
);
2073 num_pages
= (int)(curr_sz
>> PAGE_SHIFT
);
2077 *dispositions_count
= disp_buf_total_size
/ sizeof(int);
2079 kfree(local_disp
, local_disp_size
);
2082 kfree(info
, info_size
);
2091 mach_vm_address_t address
,
2092 vm_page_info_flavor_t flavor
,
2093 vm_page_info_t info
,
2094 mach_msg_type_number_t
*count
)
2098 if (map
== VM_MAP_NULL
) {
2099 return KERN_INVALID_ARGUMENT
;
2102 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
2106 /* map a (whole) upl into an address space */
2111 vm_address_t
*dst_addr
)
2113 vm_map_offset_t map_addr
;
2116 if (VM_MAP_NULL
== map
)
2117 return KERN_INVALID_ARGUMENT
;
2119 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
2120 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
2129 if (VM_MAP_NULL
== map
)
2130 return KERN_INVALID_ARGUMENT
;
2132 return (vm_map_remove_upl(map
, upl
));
2135 /* Retrieve a upl for an object underlying an address range in a map */
2140 vm_map_offset_t map_offset
,
2141 upl_size_t
*upl_size
,
2143 upl_page_info_array_t page_list
,
2144 unsigned int *count
,
2145 upl_control_flags_t
*flags
,
2147 int force_data_sync
)
2149 upl_control_flags_t map_flags
;
2152 if (VM_MAP_NULL
== map
)
2153 return KERN_INVALID_ARGUMENT
;
2155 map_flags
= *flags
& ~UPL_NOZEROFILL
;
2156 if (force_data_sync
)
2157 map_flags
|= UPL_FORCE_DATA_SYNC
;
2159 kr
= vm_map_create_upl(map
,
2168 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
2173 extern int proc_selfpid(void);
2174 extern char *proc_name_address(void *p
);
2175 int cs_executable_mem_entry
= 0;
2176 int log_executable_mem_entry
= 0;
2177 #endif /* CONFIG_EMBEDDED */
2180 * mach_make_memory_entry_64
2182 * Think of it as a two-stage vm_remap() operation. First
2183 * you get a handle. Second, you get map that handle in
2184 * somewhere else. Rather than doing it all at once (and
2185 * without needing access to the other whole map).
2188 mach_make_memory_entry_64(
2189 vm_map_t target_map
,
2190 memory_object_size_t
*size
,
2191 memory_object_offset_t offset
,
2192 vm_prot_t permission
,
2193 ipc_port_t
*object_handle
,
2194 ipc_port_t parent_handle
)
2196 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_USER
) {
2198 * Unknown flag: reject for forward compatibility.
2200 return KERN_INVALID_VALUE
;
2203 return mach_make_memory_entry_internal(target_map
,
2211 extern int pacified_purgeable_iokit
;
2214 mach_make_memory_entry_internal(
2215 vm_map_t target_map
,
2216 memory_object_size_t
*size
,
2217 memory_object_offset_t offset
,
2218 vm_prot_t permission
,
2219 ipc_port_t
*object_handle
,
2220 ipc_port_t parent_handle
)
2222 vm_map_version_t version
;
2223 vm_named_entry_t parent_entry
;
2224 vm_named_entry_t user_entry
;
2225 ipc_port_t user_handle
;
2229 /* needed for call to vm_map_lookup_locked */
2232 vm_object_offset_t obj_off
;
2234 struct vm_object_fault_info fault_info
;
2236 vm_object_t shadow_object
;
2238 /* needed for direct map entry manipulation */
2239 vm_map_entry_t map_entry
;
2240 vm_map_entry_t next_entry
;
2242 vm_map_t original_map
= target_map
;
2243 vm_map_size_t total_size
, map_size
;
2244 vm_map_offset_t map_start
, map_end
;
2245 vm_map_offset_t local_offset
;
2246 vm_object_size_t mappable_size
;
2249 * Stash the offset in the page for use by vm_map_enter_mem_object()
2250 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2252 vm_object_offset_t offset_in_page
;
2254 unsigned int access
;
2255 vm_prot_t protections
;
2256 vm_prot_t original_protections
, mask_protections
;
2257 unsigned int wimg_mode
;
2259 boolean_t force_shadow
= FALSE
;
2260 boolean_t use_data_addr
;
2261 boolean_t use_4K_compat
;
2263 if ((permission
& MAP_MEM_FLAGS_MASK
) & ~MAP_MEM_FLAGS_ALL
) {
2265 * Unknown flag: reject for forward compatibility.
2267 return KERN_INVALID_VALUE
;
2270 if (parent_handle
!= IP_NULL
&&
2271 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
2272 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
2274 parent_entry
= NULL
;
2277 if (parent_entry
&& parent_entry
->is_copy
) {
2278 return KERN_INVALID_ARGUMENT
;
2281 original_protections
= permission
& VM_PROT_ALL
;
2282 protections
= original_protections
;
2283 mask_protections
= permission
& VM_PROT_IS_MASK
;
2284 access
= GET_MAP_MEM(permission
);
2285 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
2286 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
2288 user_handle
= IP_NULL
;
2291 map_start
= vm_map_trunc_page(offset
, PAGE_MASK
);
2293 if (permission
& MAP_MEM_ONLY
) {
2294 boolean_t parent_is_object
;
2296 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2297 map_size
= map_end
- map_start
;
2299 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
2300 return KERN_INVALID_ARGUMENT
;
2303 parent_is_object
= !parent_entry
->is_sub_map
;
2304 object
= parent_entry
->backing
.object
;
2305 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
2306 wimg_mode
= object
->wimg_bits
;
2308 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2309 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
2310 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
2311 return KERN_INVALID_RIGHT
;
2313 vm_prot_to_wimg(access
, &wimg_mode
);
2314 if (access
!= MAP_MEM_NOOP
)
2315 SET_MAP_MEM(access
, parent_entry
->protection
);
2316 if (parent_is_object
&& object
&&
2317 (access
!= MAP_MEM_NOOP
) &&
2318 (!(object
->nophyscache
))) {
2320 if (object
->wimg_bits
!= wimg_mode
) {
2321 vm_object_lock(object
);
2322 vm_object_change_wimg_mode(object
, wimg_mode
);
2323 vm_object_unlock(object
);
2327 *object_handle
= IP_NULL
;
2328 return KERN_SUCCESS
;
2329 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2330 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2331 map_size
= map_end
- map_start
;
2333 if (use_data_addr
|| use_4K_compat
) {
2334 return KERN_INVALID_ARGUMENT
;
2337 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2338 if (kr
!= KERN_SUCCESS
) {
2339 return KERN_FAILURE
;
2343 * Force the creation of the VM object now.
2345 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2347 * LP64todo - for now, we can only allocate 4GB-4096
2348 * internal objects because the default pager can't
2349 * page bigger ones. Remove this when it can.
2355 object
= vm_object_allocate(map_size
);
2356 assert(object
!= VM_OBJECT_NULL
);
2358 if (permission
& MAP_MEM_PURGABLE
) {
2359 if (! (permission
& VM_PROT_WRITE
)) {
2360 /* if we can't write, we can't purge */
2361 vm_object_deallocate(object
);
2362 kr
= KERN_INVALID_ARGUMENT
;
2365 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2366 if (permission
& MAP_MEM_PURGABLE_KERNEL_ONLY
) {
2367 object
->purgeable_only_by_kernel
= TRUE
;
2369 assert(object
->vo_purgeable_owner
== NULL
);
2370 assert(object
->resident_page_count
== 0);
2371 assert(object
->wired_page_count
== 0);
2372 vm_object_lock(object
);
2373 if (pacified_purgeable_iokit
) {
2374 if (permission
& MAP_MEM_LEDGER_TAG_NETWORK
) {
2375 vm_purgeable_nonvolatile_enqueue(object
,
2378 vm_purgeable_nonvolatile_enqueue(object
,
2382 if (object
->purgeable_only_by_kernel
) {
2383 vm_purgeable_nonvolatile_enqueue(object
,
2386 vm_purgeable_nonvolatile_enqueue(object
,
2390 vm_object_unlock(object
);
2393 #if CONFIG_SECLUDED_MEMORY
2394 if (secluded_for_iokit
&& /* global boot-arg */
2395 ((permission
& MAP_MEM_GRAB_SECLUDED
)
2397 /* XXX FBDP for my testing only */
2398 || (secluded_for_fbdp
&& map_size
== 97550336)
2402 if (!(permission
& MAP_MEM_GRAB_SECLUDED
) &&
2403 secluded_for_fbdp
) {
2404 printf("FBDP: object %p size %lld can grab secluded\n", object
, (uint64_t) map_size
);
2407 object
->can_grab_secluded
= TRUE
;
2408 assert(!object
->eligible_for_secluded
);
2410 #endif /* CONFIG_SECLUDED_MEMORY */
2413 * The VM object is brand new and nobody else knows about it,
2414 * so we don't need to lock it.
2417 wimg_mode
= object
->wimg_bits
;
2418 vm_prot_to_wimg(access
, &wimg_mode
);
2419 if (access
!= MAP_MEM_NOOP
) {
2420 object
->wimg_bits
= wimg_mode
;
2423 /* the object has no pages, so no WIMG bits to update here */
2427 * We use this path when we want to make sure that
2428 * nobody messes with the object (coalesce, for
2429 * example) before we map it.
2430 * We might want to use these objects for transposition via
2431 * vm_object_transpose() too, so we don't want any copy or
2432 * shadow objects either...
2434 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2435 object
->true_share
= TRUE
;
2437 user_entry
->backing
.object
= object
;
2438 user_entry
->internal
= TRUE
;
2439 user_entry
->is_sub_map
= FALSE
;
2440 user_entry
->offset
= 0;
2441 user_entry
->data_offset
= 0;
2442 user_entry
->protection
= protections
;
2443 SET_MAP_MEM(access
, user_entry
->protection
);
2444 user_entry
->size
= map_size
;
2446 /* user_object pager and internal fields are not used */
2447 /* when the object field is filled in. */
2449 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2450 user_entry
->data_offset
));
2451 *object_handle
= user_handle
;
2452 return KERN_SUCCESS
;
2455 if (permission
& MAP_MEM_VM_COPY
) {
2458 if (target_map
== VM_MAP_NULL
) {
2459 return KERN_INVALID_TASK
;
2462 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2463 map_size
= map_end
- map_start
;
2464 if (use_data_addr
|| use_4K_compat
) {
2465 offset_in_page
= offset
- map_start
;
2467 offset_in_page
&= ~((signed)(0xFFF));
2472 kr
= vm_map_copyin_internal(target_map
,
2475 VM_MAP_COPYIN_ENTRY_LIST
,
2477 if (kr
!= KERN_SUCCESS
) {
2481 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2482 if (kr
!= KERN_SUCCESS
) {
2483 vm_map_copy_discard(copy
);
2484 return KERN_FAILURE
;
2487 user_entry
->backing
.copy
= copy
;
2488 user_entry
->internal
= FALSE
;
2489 user_entry
->is_sub_map
= FALSE
;
2490 user_entry
->is_copy
= TRUE
;
2491 user_entry
->offset
= 0;
2492 user_entry
->protection
= protections
;
2493 user_entry
->size
= map_size
;
2494 user_entry
->data_offset
= offset_in_page
;
2496 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2497 user_entry
->data_offset
));
2498 *object_handle
= user_handle
;
2499 return KERN_SUCCESS
;
2502 if (permission
& MAP_MEM_VM_SHARE
) {
2504 vm_prot_t cur_prot
, max_prot
;
2506 if (target_map
== VM_MAP_NULL
) {
2507 return KERN_INVALID_TASK
;
2510 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2511 map_size
= map_end
- map_start
;
2512 if (use_data_addr
|| use_4K_compat
) {
2513 offset_in_page
= offset
- map_start
;
2515 offset_in_page
&= ~((signed)(0xFFF));
2520 cur_prot
= VM_PROT_ALL
;
2521 kr
= vm_map_copy_extract(target_map
,
2527 if (kr
!= KERN_SUCCESS
) {
2531 if (mask_protections
) {
2533 * We just want as much of "original_protections"
2534 * as we can get out of the actual "cur_prot".
2536 protections
&= cur_prot
;
2537 if (protections
== VM_PROT_NONE
) {
2538 /* no access at all: fail */
2539 vm_map_copy_discard(copy
);
2540 return KERN_PROTECTION_FAILURE
;
2544 * We want exactly "original_protections"
2545 * out of "cur_prot".
2547 if ((cur_prot
& protections
) != protections
) {
2548 vm_map_copy_discard(copy
);
2549 return KERN_PROTECTION_FAILURE
;
2553 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2554 if (kr
!= KERN_SUCCESS
) {
2555 vm_map_copy_discard(copy
);
2556 return KERN_FAILURE
;
2559 user_entry
->backing
.copy
= copy
;
2560 user_entry
->internal
= FALSE
;
2561 user_entry
->is_sub_map
= FALSE
;
2562 user_entry
->is_copy
= TRUE
;
2563 user_entry
->offset
= 0;
2564 user_entry
->protection
= protections
;
2565 user_entry
->size
= map_size
;
2566 user_entry
->data_offset
= offset_in_page
;
2568 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2569 user_entry
->data_offset
));
2570 *object_handle
= user_handle
;
2571 return KERN_SUCCESS
;
2574 if (parent_entry
== NULL
||
2575 (permission
& MAP_MEM_NAMED_REUSE
)) {
2577 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2578 map_size
= map_end
- map_start
;
2579 if (use_data_addr
|| use_4K_compat
) {
2580 offset_in_page
= offset
- map_start
;
2582 offset_in_page
&= ~((signed)(0xFFF));
2587 /* Create a named object based on address range within the task map */
2588 /* Go find the object at given address */
2590 if (target_map
== VM_MAP_NULL
) {
2591 return KERN_INVALID_TASK
;
2595 protections
= original_protections
;
2596 vm_map_lock_read(target_map
);
2598 /* get the object associated with the target address */
2599 /* note we check the permission of the range against */
2600 /* that requested by the caller */
2602 kr
= vm_map_lookup_locked(&target_map
, map_start
,
2603 protections
| mask_protections
,
2604 OBJECT_LOCK_EXCLUSIVE
, &version
,
2605 &object
, &obj_off
, &prot
, &wired
,
2608 if (kr
!= KERN_SUCCESS
) {
2609 vm_map_unlock_read(target_map
);
2612 if (mask_protections
) {
2614 * The caller asked us to use the "protections" as
2615 * a mask, so restrict "protections" to what this
2616 * mapping actually allows.
2618 protections
&= prot
;
2622 * Wiring would copy the pages to a shadow object.
2623 * The shadow object would not be code-signed so
2624 * attempting to execute code from these copied pages
2625 * would trigger a code-signing violation.
2627 if (prot
& VM_PROT_EXECUTE
) {
2628 if (log_executable_mem_entry
) {
2630 bsd_info
= current_task()->bsd_info
;
2631 printf("pid %d[%s] making memory entry out of "
2632 "executable range from 0x%llx to 0x%llx:"
2633 "might cause code-signing issues "
2637 ? proc_name_address(bsd_info
)
2639 (uint64_t) map_start
,
2640 (uint64_t) map_end
);
2642 DTRACE_VM2(cs_executable_mem_entry
,
2643 uint64_t, (uint64_t)map_start
,
2644 uint64_t, (uint64_t)map_end
);
2645 cs_executable_mem_entry
++;
2649 * We don't know how the memory entry will be used.
2650 * It might never get wired and might not cause any
2651 * trouble, so let's not reject this request...
2654 kr
= KERN_PROTECTION_FAILURE
;
2655 vm_object_unlock(object
);
2656 vm_map_unlock_read(target_map
);
2657 if(real_map
!= target_map
)
2658 vm_map_unlock_read(real_map
);
2663 #endif /* CONFIG_EMBEDDED */
2665 if (((prot
& protections
) != protections
)
2666 || (object
== kernel_object
)) {
2667 kr
= KERN_INVALID_RIGHT
;
2668 vm_object_unlock(object
);
2669 vm_map_unlock_read(target_map
);
2670 if(real_map
!= target_map
)
2671 vm_map_unlock_read(real_map
);
2672 if(object
== kernel_object
) {
2673 printf("Warning: Attempt to create a named"
2674 " entry from the kernel_object\n");
2679 /* We have an object, now check to see if this object */
2680 /* is suitable. If not, create a shadow and share that */
2683 * We have to unlock the VM object to avoid deadlocking with
2684 * a VM map lock (the lock ordering is map, the object), if we
2685 * need to modify the VM map to create a shadow object. Since
2686 * we might release the VM map lock below anyway, we have
2687 * to release the VM map lock now.
2688 * XXX FBDP There must be a way to avoid this double lookup...
2690 * Take an extra reference on the VM object to make sure it's
2691 * not going to disappear.
2693 vm_object_reference_locked(object
); /* extra ref to hold obj */
2694 vm_object_unlock(object
);
2696 local_map
= original_map
;
2697 local_offset
= map_start
;
2698 if(target_map
!= local_map
) {
2699 vm_map_unlock_read(target_map
);
2700 if(real_map
!= target_map
)
2701 vm_map_unlock_read(real_map
);
2702 vm_map_lock_read(local_map
);
2703 target_map
= local_map
;
2704 real_map
= local_map
;
2707 if(!vm_map_lookup_entry(local_map
,
2708 local_offset
, &map_entry
)) {
2709 kr
= KERN_INVALID_ARGUMENT
;
2710 vm_map_unlock_read(target_map
);
2711 if(real_map
!= target_map
)
2712 vm_map_unlock_read(real_map
);
2713 vm_object_deallocate(object
); /* release extra ref */
2714 object
= VM_OBJECT_NULL
;
2717 iskernel
= (local_map
->pmap
== kernel_pmap
);
2718 if(!(map_entry
->is_sub_map
)) {
2719 if (VME_OBJECT(map_entry
) != object
) {
2720 kr
= KERN_INVALID_ARGUMENT
;
2721 vm_map_unlock_read(target_map
);
2722 if(real_map
!= target_map
)
2723 vm_map_unlock_read(real_map
);
2724 vm_object_deallocate(object
); /* release extra ref */
2725 object
= VM_OBJECT_NULL
;
2732 local_map
= VME_SUBMAP(map_entry
);
2734 vm_map_lock_read(local_map
);
2735 vm_map_unlock_read(tmap
);
2736 target_map
= local_map
;
2737 real_map
= local_map
;
2738 local_offset
= local_offset
- map_entry
->vme_start
;
2739 local_offset
+= VME_OFFSET(map_entry
);
2744 * We found the VM map entry, lock the VM object again.
2746 vm_object_lock(object
);
2747 if(map_entry
->wired_count
) {
2748 /* JMM - The check below should be reworked instead. */
2749 object
->true_share
= TRUE
;
2751 if (mask_protections
) {
2753 * The caller asked us to use the "protections" as
2754 * a mask, so restrict "protections" to what this
2755 * mapping actually allows.
2757 protections
&= map_entry
->max_protection
;
2759 if(((map_entry
->max_protection
) & protections
) != protections
) {
2760 kr
= KERN_INVALID_RIGHT
;
2761 vm_object_unlock(object
);
2762 vm_map_unlock_read(target_map
);
2763 if(real_map
!= target_map
)
2764 vm_map_unlock_read(real_map
);
2765 vm_object_deallocate(object
);
2766 object
= VM_OBJECT_NULL
;
2770 mappable_size
= fault_info
.hi_offset
- obj_off
;
2771 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2772 if(map_size
> mappable_size
) {
2773 /* try to extend mappable size if the entries */
2774 /* following are from the same object and are */
2776 next_entry
= map_entry
->vme_next
;
2777 /* lets see if the next map entry is still */
2778 /* pointing at this object and is contiguous */
2779 while(map_size
> mappable_size
) {
2780 if ((VME_OBJECT(next_entry
) == object
) &&
2781 (next_entry
->vme_start
==
2782 next_entry
->vme_prev
->vme_end
) &&
2783 (VME_OFFSET(next_entry
) ==
2784 (VME_OFFSET(next_entry
->vme_prev
) +
2785 (next_entry
->vme_prev
->vme_end
-
2786 next_entry
->vme_prev
->vme_start
)))) {
2787 if (mask_protections
) {
2789 * The caller asked us to use
2790 * the "protections" as a mask,
2791 * so restrict "protections" to
2792 * what this mapping actually
2795 protections
&= next_entry
->max_protection
;
2797 if ((next_entry
->wired_count
) &&
2798 (map_entry
->wired_count
== 0)) {
2801 if(((next_entry
->max_protection
)
2802 & protections
) != protections
) {
2805 if (next_entry
->needs_copy
!=
2806 map_entry
->needs_copy
)
2808 mappable_size
+= next_entry
->vme_end
2809 - next_entry
->vme_start
;
2810 total_size
+= next_entry
->vme_end
2811 - next_entry
->vme_start
;
2812 next_entry
= next_entry
->vme_next
;
2820 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2821 * never true in kernel */
2822 if (!iskernel
&& vm_map_entry_should_cow_for_true_share(map_entry
) &&
2823 object
->vo_size
> map_size
&&
2826 * Set up the targeted range for copy-on-write to
2827 * limit the impact of "true_share"/"copy_delay" to
2828 * that range instead of the entire VM object...
2831 vm_object_unlock(object
);
2832 if (vm_map_lock_read_to_write(target_map
)) {
2833 vm_object_deallocate(object
);
2834 target_map
= original_map
;
2838 vm_map_clip_start(target_map
,
2840 vm_map_trunc_page(map_start
,
2841 VM_MAP_PAGE_MASK(target_map
)));
2842 vm_map_clip_end(target_map
,
2844 (vm_map_round_page(map_end
,
2845 VM_MAP_PAGE_MASK(target_map
))));
2846 force_shadow
= TRUE
;
2848 if ((map_entry
->vme_end
- offset
) < map_size
) {
2849 map_size
= map_entry
->vme_end
- map_start
;
2851 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2853 vm_map_lock_write_to_read(target_map
);
2854 vm_object_lock(object
);
2857 if (object
->internal
) {
2858 /* vm_map_lookup_locked will create a shadow if */
2859 /* needs_copy is set but does not check for the */
2860 /* other two conditions shown. It is important to */
2861 /* set up an object which will not be pulled from */
2865 ((map_entry
->needs_copy
||
2867 (object
->vo_size
> total_size
&&
2868 (VME_OFFSET(map_entry
) != 0 ||
2870 vm_map_round_page(total_size
,
2871 VM_MAP_PAGE_MASK(target_map
)))))
2872 && !object
->true_share
)) {
2874 * We have to unlock the VM object before
2875 * trying to upgrade the VM map lock, to
2876 * honor lock ordering (map then object).
2877 * Otherwise, we would deadlock if another
2878 * thread holds a read lock on the VM map and
2879 * is trying to acquire the VM object's lock.
2880 * We still hold an extra reference on the
2881 * VM object, guaranteeing that it won't
2884 vm_object_unlock(object
);
2886 if (vm_map_lock_read_to_write(target_map
)) {
2888 * We couldn't upgrade our VM map lock
2889 * from "read" to "write" and we lost
2891 * Start all over again...
2893 vm_object_deallocate(object
); /* extra ref */
2894 target_map
= original_map
;
2898 vm_object_lock(object
);
2902 * JMM - We need to avoid coming here when the object
2903 * is wired by anybody, not just the current map. Why
2904 * couldn't we use the standard vm_object_copy_quickly()
2908 /* create a shadow object */
2909 VME_OBJECT_SHADOW(map_entry
, total_size
);
2910 shadow_object
= VME_OBJECT(map_entry
);
2912 vm_object_unlock(object
);
2915 prot
= map_entry
->protection
& ~VM_PROT_WRITE
;
2917 if (override_nx(target_map
,
2918 VME_ALIAS(map_entry
))
2920 prot
|= VM_PROT_EXECUTE
;
2922 vm_object_pmap_protect(
2923 object
, VME_OFFSET(map_entry
),
2925 ((map_entry
->is_shared
2926 || target_map
->mapped_in_other_pmaps
)
2929 map_entry
->vme_start
,
2931 total_size
-= (map_entry
->vme_end
2932 - map_entry
->vme_start
);
2933 next_entry
= map_entry
->vme_next
;
2934 map_entry
->needs_copy
= FALSE
;
2936 vm_object_lock(shadow_object
);
2937 while (total_size
) {
2938 assert((next_entry
->wired_count
== 0) ||
2939 (map_entry
->wired_count
));
2941 if (VME_OBJECT(next_entry
) == object
) {
2942 vm_object_reference_locked(shadow_object
);
2943 VME_OBJECT_SET(next_entry
,
2945 vm_object_deallocate(object
);
2948 (VME_OFFSET(next_entry
->vme_prev
) +
2949 (next_entry
->vme_prev
->vme_end
2950 - next_entry
->vme_prev
->vme_start
)));
2951 next_entry
->use_pmap
= TRUE
;
2952 next_entry
->needs_copy
= FALSE
;
2954 panic("mach_make_memory_entry_64:"
2955 " map entries out of sync\n");
2959 - next_entry
->vme_start
;
2960 next_entry
= next_entry
->vme_next
;
2964 * Transfer our extra reference to the
2967 vm_object_reference_locked(shadow_object
);
2968 vm_object_deallocate(object
); /* extra ref */
2969 object
= shadow_object
;
2971 obj_off
= ((local_offset
- map_entry
->vme_start
)
2972 + VME_OFFSET(map_entry
));
2974 vm_map_lock_write_to_read(target_map
);
2978 /* note: in the future we can (if necessary) allow for */
2979 /* memory object lists, this will better support */
2980 /* fragmentation, but is it necessary? The user should */
2981 /* be encouraged to create address space oriented */
2982 /* shared objects from CLEAN memory regions which have */
2983 /* a known and defined history. i.e. no inheritence */
2984 /* share, make this call before making the region the */
2985 /* target of ipc's, etc. The code above, protecting */
2986 /* against delayed copy, etc. is mostly defensive. */
2988 wimg_mode
= object
->wimg_bits
;
2989 if(!(object
->nophyscache
))
2990 vm_prot_to_wimg(access
, &wimg_mode
);
2992 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2993 if (!object
->true_share
&&
2994 vm_object_tracking_inited
) {
2995 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2998 num
= OSBacktrace(bt
,
2999 VM_OBJECT_TRACKING_BTDEPTH
);
3000 btlog_add_entry(vm_object_tracking_btlog
,
3002 VM_OBJECT_TRACKING_OP_TRUESHARE
,
3006 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3008 vm_object_lock_assert_exclusive(object
);
3009 object
->true_share
= TRUE
;
3010 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
3011 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3014 * The memory entry now points to this VM object and we
3015 * need to hold a reference on the VM object. Use the extra
3016 * reference we took earlier to keep the object alive when we
3020 vm_map_unlock_read(target_map
);
3021 if(real_map
!= target_map
)
3022 vm_map_unlock_read(real_map
);
3024 if (object
->wimg_bits
!= wimg_mode
)
3025 vm_object_change_wimg_mode(object
, wimg_mode
);
3027 /* the size of mapped entry that overlaps with our region */
3028 /* which is targeted for share. */
3029 /* (entry_end - entry_start) - */
3030 /* offset of our beg addr within entry */
3031 /* it corresponds to this: */
3033 if(map_size
> mappable_size
)
3034 map_size
= mappable_size
;
3036 if (permission
& MAP_MEM_NAMED_REUSE
) {
3038 * Compare what we got with the "parent_entry".
3039 * If they match, re-use the "parent_entry" instead
3040 * of creating a new one.
3042 if (parent_entry
!= NULL
&&
3043 parent_entry
->backing
.object
== object
&&
3044 parent_entry
->internal
== object
->internal
&&
3045 parent_entry
->is_sub_map
== FALSE
&&
3046 parent_entry
->offset
== obj_off
&&
3047 parent_entry
->protection
== protections
&&
3048 parent_entry
->size
== map_size
&&
3049 ((!(use_data_addr
|| use_4K_compat
) &&
3050 (parent_entry
->data_offset
== 0)) ||
3051 ((use_data_addr
|| use_4K_compat
) &&
3052 (parent_entry
->data_offset
== offset_in_page
)))) {
3054 * We have a match: re-use "parent_entry".
3056 /* release our extra reference on object */
3057 vm_object_unlock(object
);
3058 vm_object_deallocate(object
);
3059 /* parent_entry->ref_count++; XXX ? */
3060 /* Get an extra send-right on handle */
3061 ipc_port_copy_send(parent_handle
);
3063 *size
= CAST_DOWN(vm_size_t
,
3064 (parent_entry
->size
-
3065 parent_entry
->data_offset
));
3066 *object_handle
= parent_handle
;
3067 return KERN_SUCCESS
;
3070 * No match: we need to create a new entry.
3076 vm_object_unlock(object
);
3077 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3079 /* release our unused reference on the object */
3080 vm_object_deallocate(object
);
3081 return KERN_FAILURE
;
3084 user_entry
->backing
.object
= object
;
3085 user_entry
->internal
= object
->internal
;
3086 user_entry
->is_sub_map
= FALSE
;
3087 user_entry
->offset
= obj_off
;
3088 user_entry
->data_offset
= offset_in_page
;
3089 user_entry
->protection
= protections
;
3090 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
3091 user_entry
->size
= map_size
;
3093 /* user_object pager and internal fields are not used */
3094 /* when the object field is filled in. */
3096 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
3097 user_entry
->data_offset
));
3098 *object_handle
= user_handle
;
3099 return KERN_SUCCESS
;
3102 /* The new object will be base on an existing named object */
3103 if (parent_entry
== NULL
) {
3104 kr
= KERN_INVALID_ARGUMENT
;
3108 if (use_data_addr
|| use_4K_compat
) {
3110 * submaps and pagers should only be accessible from within
3111 * the kernel, which shouldn't use the data address flag, so can fail here.
3113 if (parent_entry
->is_sub_map
) {
3114 panic("Shouldn't be using data address with a parent entry that is a submap.");
3117 * Account for offset to data in parent entry and
3118 * compute our own offset to data.
3120 if((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
3121 kr
= KERN_INVALID_ARGUMENT
;
3125 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
3126 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
3128 offset_in_page
&= ~((signed)(0xFFF));
3129 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
3130 map_size
= map_end
- map_start
;
3132 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
3133 map_size
= map_end
- map_start
;
3136 if((offset
+ map_size
) > parent_entry
->size
) {
3137 kr
= KERN_INVALID_ARGUMENT
;
3142 if (mask_protections
) {
3144 * The caller asked us to use the "protections" as
3145 * a mask, so restrict "protections" to what this
3146 * mapping actually allows.
3148 protections
&= parent_entry
->protection
;
3150 if((protections
& parent_entry
->protection
) != protections
) {
3151 kr
= KERN_PROTECTION_FAILURE
;
3155 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3161 user_entry
->size
= map_size
;
3162 user_entry
->offset
= parent_entry
->offset
+ map_start
;
3163 user_entry
->data_offset
= offset_in_page
;
3164 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
3165 user_entry
->is_copy
= parent_entry
->is_copy
;
3166 user_entry
->internal
= parent_entry
->internal
;
3167 user_entry
->protection
= protections
;
3169 if(access
!= MAP_MEM_NOOP
) {
3170 SET_MAP_MEM(access
, user_entry
->protection
);
3173 if(parent_entry
->is_sub_map
) {
3174 user_entry
->backing
.map
= parent_entry
->backing
.map
;
3175 vm_map_lock(user_entry
->backing
.map
);
3176 user_entry
->backing
.map
->ref_count
++;
3177 vm_map_unlock(user_entry
->backing
.map
);
3179 object
= parent_entry
->backing
.object
;
3180 assert(object
!= VM_OBJECT_NULL
);
3181 user_entry
->backing
.object
= object
;
3182 /* we now point to this object, hold on */
3183 vm_object_lock(object
);
3184 vm_object_reference_locked(object
);
3185 #if VM_OBJECT_TRACKING_OP_TRUESHARE
3186 if (!object
->true_share
&&
3187 vm_object_tracking_inited
) {
3188 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
3191 num
= OSBacktrace(bt
,
3192 VM_OBJECT_TRACKING_BTDEPTH
);
3193 btlog_add_entry(vm_object_tracking_btlog
,
3195 VM_OBJECT_TRACKING_OP_TRUESHARE
,
3199 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3201 object
->true_share
= TRUE
;
3202 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
3203 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3204 vm_object_unlock(object
);
3206 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
3207 user_entry
->data_offset
));
3208 *object_handle
= user_handle
;
3209 return KERN_SUCCESS
;
3213 if (user_handle
!= IP_NULL
) {
3215 * Releasing "user_handle" causes the kernel object
3216 * associated with it ("user_entry" here) to also be
3217 * released and freed.
3219 mach_memory_entry_port_release(user_handle
);
3225 _mach_make_memory_entry(
3226 vm_map_t target_map
,
3227 memory_object_size_t
*size
,
3228 memory_object_offset_t offset
,
3229 vm_prot_t permission
,
3230 ipc_port_t
*object_handle
,
3231 ipc_port_t parent_entry
)
3233 memory_object_size_t mo_size
;
3236 mo_size
= (memory_object_size_t
)*size
;
3237 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3238 (memory_object_offset_t
)offset
, permission
, object_handle
,
3245 mach_make_memory_entry(
3246 vm_map_t target_map
,
3249 vm_prot_t permission
,
3250 ipc_port_t
*object_handle
,
3251 ipc_port_t parent_entry
)
3253 memory_object_size_t mo_size
;
3256 mo_size
= (memory_object_size_t
)*size
;
3257 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
3258 (memory_object_offset_t
)offset
, permission
, object_handle
,
3260 *size
= CAST_DOWN(vm_size_t
, mo_size
);
3267 * Set or clear the map's wiring_required flag. This flag, if set,
3268 * will cause all future virtual memory allocation to allocate
3269 * user wired memory. Unwiring pages wired down as a result of
3270 * this routine is done with the vm_wire interface.
3275 boolean_t must_wire
)
3277 if (map
== VM_MAP_NULL
)
3278 return(KERN_INVALID_ARGUMENT
);
3281 map
->wiring_required
= TRUE
;
3283 map
->wiring_required
= FALSE
;
3285 return(KERN_SUCCESS
);
3289 vm_map_exec_lockdown(
3292 if (map
== VM_MAP_NULL
)
3293 return(KERN_INVALID_ARGUMENT
);
3296 map
->map_disallow_new_exec
= TRUE
;
3299 return(KERN_SUCCESS
);
3302 __private_extern__ kern_return_t
3303 mach_memory_entry_allocate(
3304 vm_named_entry_t
*user_entry_p
,
3305 ipc_port_t
*user_handle_p
)
3307 vm_named_entry_t user_entry
;
3308 ipc_port_t user_handle
;
3309 ipc_port_t previous
;
3311 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
3312 if (user_entry
== NULL
)
3313 return KERN_FAILURE
;
3315 named_entry_lock_init(user_entry
);
3317 user_handle
= ipc_port_alloc_kernel();
3318 if (user_handle
== IP_NULL
) {
3319 kfree(user_entry
, sizeof *user_entry
);
3320 return KERN_FAILURE
;
3322 ip_lock(user_handle
);
3324 /* make a sonce right */
3325 user_handle
->ip_sorights
++;
3326 ip_reference(user_handle
);
3328 user_handle
->ip_destination
= IP_NULL
;
3329 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
3330 user_handle
->ip_receiver
= ipc_space_kernel
;
3332 /* make a send right */
3333 user_handle
->ip_mscount
++;
3334 user_handle
->ip_srights
++;
3335 ip_reference(user_handle
);
3337 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
3338 /* nsrequest unlocks user_handle */
3340 user_entry
->backing
.object
= NULL
;
3341 user_entry
->is_sub_map
= FALSE
;
3342 user_entry
->is_copy
= FALSE
;
3343 user_entry
->internal
= FALSE
;
3344 user_entry
->size
= 0;
3345 user_entry
->offset
= 0;
3346 user_entry
->data_offset
= 0;
3347 user_entry
->protection
= VM_PROT_NONE
;
3348 user_entry
->ref_count
= 1;
3350 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
3353 *user_entry_p
= user_entry
;
3354 *user_handle_p
= user_handle
;
3356 return KERN_SUCCESS
;
3360 * mach_memory_object_memory_entry_64
3362 * Create a named entry backed by the provided pager.
3366 mach_memory_object_memory_entry_64(
3369 vm_object_offset_t size
,
3370 vm_prot_t permission
,
3371 memory_object_t pager
,
3372 ipc_port_t
*entry_handle
)
3374 unsigned int access
;
3375 vm_named_entry_t user_entry
;
3376 ipc_port_t user_handle
;
3379 if (host
== HOST_NULL
)
3380 return(KERN_INVALID_HOST
);
3382 if (pager
== MEMORY_OBJECT_NULL
&& internal
) {
3383 object
= vm_object_allocate(size
);
3384 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
3385 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3388 object
= memory_object_to_vm_object(pager
);
3389 if (object
!= VM_OBJECT_NULL
) {
3390 vm_object_reference(object
);
3393 if (object
== VM_OBJECT_NULL
) {
3394 return KERN_INVALID_ARGUMENT
;
3397 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3399 vm_object_deallocate(object
);
3400 return KERN_FAILURE
;
3403 user_entry
->size
= size
;
3404 user_entry
->offset
= 0;
3405 user_entry
->protection
= permission
& VM_PROT_ALL
;
3406 access
= GET_MAP_MEM(permission
);
3407 SET_MAP_MEM(access
, user_entry
->protection
);
3408 user_entry
->is_sub_map
= FALSE
;
3409 assert(user_entry
->ref_count
== 1);
3411 user_entry
->backing
.object
= object
;
3412 user_entry
->internal
= object
->internal
;
3413 assert(object
->internal
== internal
);
3415 *entry_handle
= user_handle
;
3416 return KERN_SUCCESS
;
3420 mach_memory_object_memory_entry(
3424 vm_prot_t permission
,
3425 memory_object_t pager
,
3426 ipc_port_t
*entry_handle
)
3428 return mach_memory_object_memory_entry_64( host
, internal
,
3429 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3434 mach_memory_entry_purgable_control(
3435 ipc_port_t entry_port
,
3436 vm_purgable_t control
,
3439 if (control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
3440 /* not allowed from user-space */
3441 return KERN_INVALID_ARGUMENT
;
3444 return memory_entry_purgeable_control_internal(entry_port
, control
, state
);
3448 memory_entry_purgeable_control_internal(
3449 ipc_port_t entry_port
,
3450 vm_purgable_t control
,
3454 vm_named_entry_t mem_entry
;
3457 if (entry_port
== IP_NULL
||
3458 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3459 return KERN_INVALID_ARGUMENT
;
3461 if (control
!= VM_PURGABLE_SET_STATE
&&
3462 control
!= VM_PURGABLE_GET_STATE
&&
3463 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
)
3464 return(KERN_INVALID_ARGUMENT
);
3466 if ((control
== VM_PURGABLE_SET_STATE
||
3467 control
== VM_PURGABLE_SET_STATE_FROM_KERNEL
) &&
3468 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3469 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
)))
3470 return(KERN_INVALID_ARGUMENT
);
3472 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3474 named_entry_lock(mem_entry
);
3476 if (mem_entry
->is_sub_map
||
3477 mem_entry
->is_copy
) {
3478 named_entry_unlock(mem_entry
);
3479 return KERN_INVALID_ARGUMENT
;
3482 object
= mem_entry
->backing
.object
;
3483 if (object
== VM_OBJECT_NULL
) {
3484 named_entry_unlock(mem_entry
);
3485 return KERN_INVALID_ARGUMENT
;
3488 vm_object_lock(object
);
3490 /* check that named entry covers entire object ? */
3491 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3492 vm_object_unlock(object
);
3493 named_entry_unlock(mem_entry
);
3494 return KERN_INVALID_ARGUMENT
;
3497 named_entry_unlock(mem_entry
);
3499 kr
= vm_object_purgable_control(object
, control
, state
);
3501 vm_object_unlock(object
);
3507 mach_memory_entry_get_page_counts(
3508 ipc_port_t entry_port
,
3509 unsigned int *resident_page_count
,
3510 unsigned int *dirty_page_count
)
3513 vm_named_entry_t mem_entry
;
3515 vm_object_offset_t offset
;
3516 vm_object_size_t size
;
3518 if (entry_port
== IP_NULL
||
3519 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3520 return KERN_INVALID_ARGUMENT
;
3523 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3525 named_entry_lock(mem_entry
);
3527 if (mem_entry
->is_sub_map
||
3528 mem_entry
->is_copy
) {
3529 named_entry_unlock(mem_entry
);
3530 return KERN_INVALID_ARGUMENT
;
3533 object
= mem_entry
->backing
.object
;
3534 if (object
== VM_OBJECT_NULL
) {
3535 named_entry_unlock(mem_entry
);
3536 return KERN_INVALID_ARGUMENT
;
3539 vm_object_lock(object
);
3541 offset
= mem_entry
->offset
;
3542 size
= mem_entry
->size
;
3544 named_entry_unlock(mem_entry
);
3546 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3548 vm_object_unlock(object
);
3554 * mach_memory_entry_port_release:
3556 * Release a send right on a named entry port. This is the correct
3557 * way to destroy a named entry. When the last right on the port is
3558 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3561 mach_memory_entry_port_release(
3564 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3565 ipc_port_release_send(port
);
3569 * mach_destroy_memory_entry:
3571 * Drops a reference on a memory entry and destroys the memory entry if
3572 * there are no more references on it.
3573 * NOTE: This routine should not be called to destroy a memory entry from the
3574 * kernel, as it will not release the Mach port associated with the memory
3575 * entry. The proper way to destroy a memory entry in the kernel is to
3576 * call mach_memort_entry_port_release() to release the kernel's send-right on
3577 * the memory entry's port. When the last send right is released, the memory
3578 * entry will be destroyed via ipc_kobject_destroy().
3581 mach_destroy_memory_entry(
3584 vm_named_entry_t named_entry
;
3586 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3587 #endif /* MACH_ASSERT */
3588 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
3590 named_entry_lock(named_entry
);
3591 named_entry
->ref_count
-= 1;
3593 if(named_entry
->ref_count
== 0) {
3594 if (named_entry
->is_sub_map
) {
3595 vm_map_deallocate(named_entry
->backing
.map
);
3596 } else if (named_entry
->is_copy
) {
3597 vm_map_copy_discard(named_entry
->backing
.copy
);
3599 /* release the VM object we've been pointing to */
3600 vm_object_deallocate(named_entry
->backing
.object
);
3603 named_entry_unlock(named_entry
);
3604 named_entry_lock_destroy(named_entry
);
3606 kfree((void *) port
->ip_kobject
,
3607 sizeof (struct vm_named_entry
));
3609 named_entry_unlock(named_entry
);
3612 /* Allow manipulation of individual page state. This is actually part of */
3613 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3616 mach_memory_entry_page_op(
3617 ipc_port_t entry_port
,
3618 vm_object_offset_t offset
,
3620 ppnum_t
*phys_entry
,
3623 vm_named_entry_t mem_entry
;
3627 if (entry_port
== IP_NULL
||
3628 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3629 return KERN_INVALID_ARGUMENT
;
3632 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3634 named_entry_lock(mem_entry
);
3636 if (mem_entry
->is_sub_map
||
3637 mem_entry
->is_copy
) {
3638 named_entry_unlock(mem_entry
);
3639 return KERN_INVALID_ARGUMENT
;
3642 object
= mem_entry
->backing
.object
;
3643 if (object
== VM_OBJECT_NULL
) {
3644 named_entry_unlock(mem_entry
);
3645 return KERN_INVALID_ARGUMENT
;
3648 vm_object_reference(object
);
3649 named_entry_unlock(mem_entry
);
3651 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3653 vm_object_deallocate(object
);
3659 * mach_memory_entry_range_op offers performance enhancement over
3660 * mach_memory_entry_page_op for page_op functions which do not require page
3661 * level state to be returned from the call. Page_op was created to provide
3662 * a low-cost alternative to page manipulation via UPLs when only a single
3663 * page was involved. The range_op call establishes the ability in the _op
3664 * family of functions to work on multiple pages where the lack of page level
3665 * state handling allows the caller to avoid the overhead of the upl structures.
3669 mach_memory_entry_range_op(
3670 ipc_port_t entry_port
,
3671 vm_object_offset_t offset_beg
,
3672 vm_object_offset_t offset_end
,
3676 vm_named_entry_t mem_entry
;
3680 if (entry_port
== IP_NULL
||
3681 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3682 return KERN_INVALID_ARGUMENT
;
3685 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3687 named_entry_lock(mem_entry
);
3689 if (mem_entry
->is_sub_map
||
3690 mem_entry
->is_copy
) {
3691 named_entry_unlock(mem_entry
);
3692 return KERN_INVALID_ARGUMENT
;
3695 object
= mem_entry
->backing
.object
;
3696 if (object
== VM_OBJECT_NULL
) {
3697 named_entry_unlock(mem_entry
);
3698 return KERN_INVALID_ARGUMENT
;
3701 vm_object_reference(object
);
3702 named_entry_unlock(mem_entry
);
3704 kr
= vm_object_range_op(object
,
3708 (uint32_t *) range
);
3710 vm_object_deallocate(object
);
3715 /* ******* Temporary Internal calls to UPL for BSD ***** */
3717 extern int kernel_upl_map(
3720 vm_offset_t
*dst_addr
);
3722 extern int kernel_upl_unmap(
3726 extern int kernel_upl_commit(
3728 upl_page_info_t
*pl
,
3729 mach_msg_type_number_t count
);
3731 extern int kernel_upl_commit_range(
3733 upl_offset_t offset
,
3736 upl_page_info_array_t pl
,
3737 mach_msg_type_number_t count
);
3739 extern int kernel_upl_abort(
3743 extern int kernel_upl_abort_range(
3745 upl_offset_t offset
,
3754 vm_offset_t
*dst_addr
)
3756 return vm_upl_map(map
, upl
, dst_addr
);
3765 return vm_upl_unmap(map
, upl
);
3771 upl_page_info_t
*pl
,
3772 mach_msg_type_number_t count
)
3776 kr
= upl_commit(upl
, pl
, count
);
3777 upl_deallocate(upl
);
3783 kernel_upl_commit_range(
3785 upl_offset_t offset
,
3788 upl_page_info_array_t pl
,
3789 mach_msg_type_number_t count
)
3791 boolean_t finished
= FALSE
;
3794 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3795 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3797 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
3798 return KERN_INVALID_ARGUMENT
;
3801 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
3803 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
3804 upl_deallocate(upl
);
3810 kernel_upl_abort_range(
3812 upl_offset_t offset
,
3817 boolean_t finished
= FALSE
;
3819 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3820 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3822 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3824 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3825 upl_deallocate(upl
);
3837 kr
= upl_abort(upl
, abort_type
);
3838 upl_deallocate(upl
);
3843 * Now a kernel-private interface (for BootCache
3844 * use only). Need a cleaner way to create an
3845 * empty vm_map() and return a handle to it.
3849 vm_region_object_create(
3850 __unused vm_map_t target_map
,
3852 ipc_port_t
*object_handle
)
3854 vm_named_entry_t user_entry
;
3855 ipc_port_t user_handle
;
3859 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3861 return KERN_FAILURE
;
3864 /* Create a named object based on a submap of specified size */
3866 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3867 vm_map_round_page(size
,
3868 VM_MAP_PAGE_MASK(target_map
)),
3870 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
3872 user_entry
->backing
.map
= new_map
;
3873 user_entry
->internal
= TRUE
;
3874 user_entry
->is_sub_map
= TRUE
;
3875 user_entry
->offset
= 0;
3876 user_entry
->protection
= VM_PROT_ALL
;
3877 user_entry
->size
= size
;
3878 assert(user_entry
->ref_count
== 1);
3880 *object_handle
= user_handle
;
3881 return KERN_SUCCESS
;
3885 ppnum_t
vm_map_get_phys_page( /* forward */
3887 vm_offset_t offset
);
3890 vm_map_get_phys_page(
3894 vm_object_offset_t offset
;
3896 vm_map_offset_t map_offset
;
3897 vm_map_entry_t entry
;
3898 ppnum_t phys_page
= 0;
3900 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
3903 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3905 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
3909 if (entry
->is_sub_map
) {
3911 vm_map_lock(VME_SUBMAP(entry
));
3913 map
= VME_SUBMAP(entry
);
3914 map_offset
= (VME_OFFSET(entry
) +
3915 (map_offset
- entry
->vme_start
));
3916 vm_map_unlock(old_map
);
3919 if (VME_OBJECT(entry
)->phys_contiguous
) {
3920 /* These are not standard pageable memory mappings */
3921 /* If they are not present in the object they will */
3922 /* have to be picked up from the pager through the */
3923 /* fault mechanism. */
3924 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
3925 /* need to call vm_fault */
3927 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3928 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
3929 THREAD_UNINT
, NULL
, 0);
3933 offset
= (VME_OFFSET(entry
) +
3934 (map_offset
- entry
->vme_start
));
3935 phys_page
= (ppnum_t
)
3936 ((VME_OBJECT(entry
)->vo_shadow_offset
3937 + offset
) >> PAGE_SHIFT
);
3941 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
3942 object
= VME_OBJECT(entry
);
3943 vm_object_lock(object
);
3945 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3946 if(dst_page
== VM_PAGE_NULL
) {
3947 if(object
->shadow
) {
3948 vm_object_t old_object
;
3949 vm_object_lock(object
->shadow
);
3950 old_object
= object
;
3951 offset
= offset
+ object
->vo_shadow_offset
;
3952 object
= object
->shadow
;
3953 vm_object_unlock(old_object
);
3955 vm_object_unlock(object
);
3959 phys_page
= (ppnum_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
));
3960 vm_object_unlock(object
);
3973 kern_return_t
kernel_object_iopl_request( /* forward */
3974 vm_named_entry_t named_entry
,
3975 memory_object_offset_t offset
,
3976 upl_size_t
*upl_size
,
3978 upl_page_info_array_t user_page_list
,
3979 unsigned int *page_list_count
,
3983 kernel_object_iopl_request(
3984 vm_named_entry_t named_entry
,
3985 memory_object_offset_t offset
,
3986 upl_size_t
*upl_size
,
3988 upl_page_info_array_t user_page_list
,
3989 unsigned int *page_list_count
,
3997 caller_flags
= *flags
;
3999 if (caller_flags
& ~UPL_VALID_FLAGS
) {
4001 * For forward compatibility's sake,
4002 * reject any unknown flag.
4004 return KERN_INVALID_VALUE
;
4007 /* a few checks to make sure user is obeying rules */
4008 if(*upl_size
== 0) {
4009 if(offset
>= named_entry
->size
)
4010 return(KERN_INVALID_RIGHT
);
4011 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
4012 if (*upl_size
!= named_entry
->size
- offset
)
4013 return KERN_INVALID_ARGUMENT
;
4015 if(caller_flags
& UPL_COPYOUT_FROM
) {
4016 if((named_entry
->protection
& VM_PROT_READ
)
4018 return(KERN_INVALID_RIGHT
);
4021 if((named_entry
->protection
&
4022 (VM_PROT_READ
| VM_PROT_WRITE
))
4023 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
4024 return(KERN_INVALID_RIGHT
);
4027 if(named_entry
->size
< (offset
+ *upl_size
))
4028 return(KERN_INVALID_ARGUMENT
);
4030 /* the callers parameter offset is defined to be the */
4031 /* offset from beginning of named entry offset in object */
4032 offset
= offset
+ named_entry
->offset
;
4034 if (named_entry
->is_sub_map
||
4035 named_entry
->is_copy
)
4036 return KERN_INVALID_ARGUMENT
;
4038 named_entry_lock(named_entry
);
4040 /* This is the case where we are going to operate */
4041 /* on an already known object. If the object is */
4042 /* not ready it is internal. An external */
4043 /* object cannot be mapped until it is ready */
4044 /* we can therefore avoid the ready check */
4046 object
= named_entry
->backing
.object
;
4047 vm_object_reference(object
);
4048 named_entry_unlock(named_entry
);
4050 if (!object
->private) {
4051 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
)
4052 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
4053 if (object
->phys_contiguous
) {
4054 *flags
= UPL_PHYS_CONTIG
;
4059 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
4062 ret
= vm_object_iopl_request(object
,
4068 (upl_control_flags_t
)(unsigned int)caller_flags
);
4069 vm_object_deallocate(object
);
4075 * These symbols are looked up at runtime by vmware, VirtualBox,
4076 * despite not being exported in the symbol sets.
4079 #if defined(__x86_64__)
4083 vm_map_t target_map
,
4084 mach_vm_offset_t
*address
,
4085 mach_vm_size_t initial_size
,
4086 mach_vm_offset_t mask
,
4089 vm_object_offset_t offset
,
4091 vm_prot_t cur_protection
,
4092 vm_prot_t max_protection
,
4093 vm_inherit_t inheritance
);
4097 vm_map_t target_map
,
4098 mach_vm_offset_t
*address
,
4099 mach_vm_size_t size
,
4100 mach_vm_offset_t mask
,
4103 mach_vm_offset_t memory_address
,
4105 vm_prot_t
*cur_protection
,
4106 vm_prot_t
*max_protection
,
4107 vm_inherit_t inheritance
);
4111 vm_map_t target_map
,
4112 mach_vm_offset_t
*address
,
4113 mach_vm_size_t initial_size
,
4114 mach_vm_offset_t mask
,
4117 vm_object_offset_t offset
,
4119 vm_prot_t cur_protection
,
4120 vm_prot_t max_protection
,
4121 vm_inherit_t inheritance
)
4123 return (mach_vm_map_external(target_map
, address
, initial_size
, mask
, flags
, port
,
4124 offset
, copy
, cur_protection
, max_protection
, inheritance
));
4129 vm_map_t target_map
,
4130 mach_vm_offset_t
*address
,
4131 mach_vm_size_t size
,
4132 mach_vm_offset_t mask
,
4135 mach_vm_offset_t memory_address
,
4137 vm_prot_t
*cur_protection
,
4138 vm_prot_t
*max_protection
,
4139 vm_inherit_t inheritance
)
4141 return (mach_vm_remap_external(target_map
, address
, size
, mask
, flags
, src_map
, memory_address
,
4142 copy
, cur_protection
, max_protection
, inheritance
));
4147 vm_map_t target_map
,
4148 vm_offset_t
*address
,
4155 vm_prot_t cur_protection
,
4156 vm_prot_t max_protection
,
4157 vm_inherit_t inheritance
);
4161 vm_map_t target_map
,
4162 vm_offset_t
*address
,
4169 vm_prot_t cur_protection
,
4170 vm_prot_t max_protection
,
4171 vm_inherit_t inheritance
)
4175 VM_GET_FLAGS_ALIAS(flags
, tag
);
4176 return (vm_map_kernel(target_map
, address
, size
, mask
, flags
, tag
, port
, offset
, copy
, cur_protection
, max_protection
, inheritance
));
4179 #endif /* __x86_64__ */