2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101 #include <mach/sdt.h>
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/vm_map_server.h>
107 #include <kern/host.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110 #include <kern/misc_protos.h>
111 #include <vm/vm_fault.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/memory_object.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/vm_protos.h>
118 #include <vm/vm_purgeable_internal.h>
120 vm_size_t upl_offset_to_pagelist
= 0;
126 ipc_port_t dynamic_pager_control_port
=NULL
;
129 * mach_vm_allocate allocates "zero fill" memory in the specfied
135 mach_vm_offset_t
*addr
,
139 vm_map_offset_t map_addr
;
140 vm_map_size_t map_size
;
141 kern_return_t result
;
144 /* filter out any kernel-only flags */
145 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
146 return KERN_INVALID_ARGUMENT
;
148 if (map
== VM_MAP_NULL
)
149 return(KERN_INVALID_ARGUMENT
);
152 return(KERN_SUCCESS
);
155 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
158 * No specific address requested, so start candidate address
159 * search at the minimum address in the map. However, if that
160 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
161 * allocations of PAGEZERO to explicit requests since its
162 * normal use is to catch dereferences of NULL and many
163 * applications also treat pointers with a value of 0 as
164 * special and suddenly having address 0 contain useable
165 * memory would tend to confuse those applications.
167 map_addr
= vm_map_min(map
);
169 map_addr
+= VM_MAP_PAGE_SIZE(map
);
171 map_addr
= vm_map_trunc_page(*addr
,
172 VM_MAP_PAGE_MASK(map
));
173 map_size
= vm_map_round_page(size
,
174 VM_MAP_PAGE_MASK(map
));
176 return(KERN_INVALID_ARGUMENT
);
179 result
= vm_map_enter(
186 (vm_object_offset_t
)0,
198 * Legacy routine that allocates "zero fill" memory in the specfied
199 * map (which is limited to the same size as the kernel).
208 vm_map_offset_t map_addr
;
209 vm_map_size_t map_size
;
210 kern_return_t result
;
213 /* filter out any kernel-only flags */
214 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
215 return KERN_INVALID_ARGUMENT
;
217 if (map
== VM_MAP_NULL
)
218 return(KERN_INVALID_ARGUMENT
);
221 return(KERN_SUCCESS
);
224 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
227 * No specific address requested, so start candidate address
228 * search at the minimum address in the map. However, if that
229 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
230 * allocations of PAGEZERO to explicit requests since its
231 * normal use is to catch dereferences of NULL and many
232 * applications also treat pointers with a value of 0 as
233 * special and suddenly having address 0 contain useable
234 * memory would tend to confuse those applications.
236 map_addr
= vm_map_min(map
);
238 map_addr
+= VM_MAP_PAGE_SIZE(map
);
240 map_addr
= vm_map_trunc_page(*addr
,
241 VM_MAP_PAGE_MASK(map
));
242 map_size
= vm_map_round_page(size
,
243 VM_MAP_PAGE_MASK(map
));
245 return(KERN_INVALID_ARGUMENT
);
248 result
= vm_map_enter(
255 (vm_object_offset_t
)0,
261 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
266 * mach_vm_deallocate -
267 * deallocates the specified range of addresses in the
268 * specified address map.
273 mach_vm_offset_t start
,
276 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
277 return(KERN_INVALID_ARGUMENT
);
279 if (size
== (mach_vm_offset_t
) 0)
280 return(KERN_SUCCESS
);
282 return(vm_map_remove(map
,
283 vm_map_trunc_page(start
,
284 VM_MAP_PAGE_MASK(map
)),
285 vm_map_round_page(start
+size
,
286 VM_MAP_PAGE_MASK(map
)),
292 * deallocates the specified range of addresses in the
293 * specified address map (limited to addresses the same
294 * size as the kernel).
302 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
303 return(KERN_INVALID_ARGUMENT
);
305 if (size
== (vm_offset_t
) 0)
306 return(KERN_SUCCESS
);
308 return(vm_map_remove(map
,
309 vm_map_trunc_page(start
,
310 VM_MAP_PAGE_MASK(map
)),
311 vm_map_round_page(start
+size
,
312 VM_MAP_PAGE_MASK(map
)),
318 * Sets the inheritance of the specified range in the
324 mach_vm_offset_t start
,
326 vm_inherit_t new_inheritance
)
328 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
329 (new_inheritance
> VM_INHERIT_LAST_VALID
))
330 return(KERN_INVALID_ARGUMENT
);
335 return(vm_map_inherit(map
,
336 vm_map_trunc_page(start
,
337 VM_MAP_PAGE_MASK(map
)),
338 vm_map_round_page(start
+size
,
339 VM_MAP_PAGE_MASK(map
)),
345 * Sets the inheritance of the specified range in the
346 * specified map (range limited to addresses
353 vm_inherit_t new_inheritance
)
355 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
356 (new_inheritance
> VM_INHERIT_LAST_VALID
))
357 return(KERN_INVALID_ARGUMENT
);
362 return(vm_map_inherit(map
,
363 vm_map_trunc_page(start
,
364 VM_MAP_PAGE_MASK(map
)),
365 vm_map_round_page(start
+size
,
366 VM_MAP_PAGE_MASK(map
)),
372 * Sets the protection of the specified range in the
379 mach_vm_offset_t start
,
381 boolean_t set_maximum
,
382 vm_prot_t new_protection
)
384 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
385 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
386 return(KERN_INVALID_ARGUMENT
);
391 return(vm_map_protect(map
,
392 vm_map_trunc_page(start
,
393 VM_MAP_PAGE_MASK(map
)),
394 vm_map_round_page(start
+size
,
395 VM_MAP_PAGE_MASK(map
)),
402 * Sets the protection of the specified range in the
403 * specified map. Addressability of the range limited
404 * to the same size as the kernel.
412 boolean_t set_maximum
,
413 vm_prot_t new_protection
)
415 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
416 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
417 return(KERN_INVALID_ARGUMENT
);
422 return(vm_map_protect(map
,
423 vm_map_trunc_page(start
,
424 VM_MAP_PAGE_MASK(map
)),
425 vm_map_round_page(start
+size
,
426 VM_MAP_PAGE_MASK(map
)),
432 * mach_vm_machine_attributes -
433 * Handle machine-specific attributes for a mapping, such
434 * as cachability, migrability, etc.
437 mach_vm_machine_attribute(
439 mach_vm_address_t addr
,
441 vm_machine_attribute_t attribute
,
442 vm_machine_attribute_val_t
* value
) /* IN/OUT */
444 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
445 return(KERN_INVALID_ARGUMENT
);
450 return vm_map_machine_attribute(
452 vm_map_trunc_page(addr
,
453 VM_MAP_PAGE_MASK(map
)),
454 vm_map_round_page(addr
+size
,
455 VM_MAP_PAGE_MASK(map
)),
461 * vm_machine_attribute -
462 * Handle machine-specific attributes for a mapping, such
463 * as cachability, migrability, etc. Limited addressability
464 * (same range limits as for the native kernel map).
467 vm_machine_attribute(
471 vm_machine_attribute_t attribute
,
472 vm_machine_attribute_val_t
* value
) /* IN/OUT */
474 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
475 return(KERN_INVALID_ARGUMENT
);
480 return vm_map_machine_attribute(
482 vm_map_trunc_page(addr
,
483 VM_MAP_PAGE_MASK(map
)),
484 vm_map_round_page(addr
+size
,
485 VM_MAP_PAGE_MASK(map
)),
492 * Read/copy a range from one address space and return it to the caller.
494 * It is assumed that the address for the returned memory is selected by
495 * the IPC implementation as part of receiving the reply to this call.
496 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
497 * that gets returned.
499 * JMM - because of mach_msg_type_number_t, this call is limited to a
500 * single 4GB region at this time.
506 mach_vm_address_t addr
,
509 mach_msg_type_number_t
*data_size
)
512 vm_map_copy_t ipc_address
;
514 if (map
== VM_MAP_NULL
)
515 return(KERN_INVALID_ARGUMENT
);
517 if ((mach_msg_type_number_t
) size
!= size
)
518 return KERN_INVALID_ARGUMENT
;
520 error
= vm_map_copyin(map
,
521 (vm_map_address_t
)addr
,
523 FALSE
, /* src_destroy */
526 if (KERN_SUCCESS
== error
) {
527 *data
= (pointer_t
) ipc_address
;
528 *data_size
= (mach_msg_type_number_t
) size
;
529 assert(*data_size
== size
);
536 * Read/copy a range from one address space and return it to the caller.
537 * Limited addressability (same range limits as for the native kernel map).
539 * It is assumed that the address for the returned memory is selected by
540 * the IPC implementation as part of receiving the reply to this call.
541 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
542 * that gets returned.
550 mach_msg_type_number_t
*data_size
)
553 vm_map_copy_t ipc_address
;
555 if (map
== VM_MAP_NULL
)
556 return(KERN_INVALID_ARGUMENT
);
558 if (size
> (unsigned)(mach_msg_type_number_t
) -1) {
560 * The kernel could handle a 64-bit "size" value, but
561 * it could not return the size of the data in "*data_size"
562 * without overflowing.
563 * Let's reject this "size" as invalid.
565 return KERN_INVALID_ARGUMENT
;
568 error
= vm_map_copyin(map
,
569 (vm_map_address_t
)addr
,
571 FALSE
, /* src_destroy */
574 if (KERN_SUCCESS
== error
) {
575 *data
= (pointer_t
) ipc_address
;
576 *data_size
= (mach_msg_type_number_t
) size
;
577 assert(*data_size
== size
);
583 * mach_vm_read_list -
584 * Read/copy a list of address ranges from specified map.
586 * MIG does not know how to deal with a returned array of
587 * vm_map_copy_t structures, so we have to do the copyout
593 mach_vm_read_entry_t data_list
,
596 mach_msg_type_number_t i
;
600 if (map
== VM_MAP_NULL
||
601 count
> VM_MAP_ENTRY_MAX
)
602 return(KERN_INVALID_ARGUMENT
);
604 error
= KERN_SUCCESS
;
605 for(i
=0; i
<count
; i
++) {
606 vm_map_address_t map_addr
;
607 vm_map_size_t map_size
;
609 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
610 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
613 error
= vm_map_copyin(map
,
616 FALSE
, /* src_destroy */
618 if (KERN_SUCCESS
== error
) {
619 error
= vm_map_copyout(
623 if (KERN_SUCCESS
== error
) {
624 data_list
[i
].address
= map_addr
;
627 vm_map_copy_discard(copy
);
630 data_list
[i
].address
= (mach_vm_address_t
)0;
631 data_list
[i
].size
= (mach_vm_size_t
)0;
638 * Read/copy a list of address ranges from specified map.
640 * MIG does not know how to deal with a returned array of
641 * vm_map_copy_t structures, so we have to do the copyout
644 * The source and destination ranges are limited to those
645 * that can be described with a vm_address_t (i.e. same
646 * size map as the kernel).
648 * JMM - If the result of the copyout is an address range
649 * that cannot be described with a vm_address_t (i.e. the
650 * caller had a larger address space but used this call
651 * anyway), it will result in a truncated address being
652 * returned (and a likely confused caller).
658 vm_read_entry_t data_list
,
661 mach_msg_type_number_t i
;
665 if (map
== VM_MAP_NULL
||
666 count
> VM_MAP_ENTRY_MAX
)
667 return(KERN_INVALID_ARGUMENT
);
669 error
= KERN_SUCCESS
;
670 for(i
=0; i
<count
; i
++) {
671 vm_map_address_t map_addr
;
672 vm_map_size_t map_size
;
674 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
675 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
678 error
= vm_map_copyin(map
,
681 FALSE
, /* src_destroy */
683 if (KERN_SUCCESS
== error
) {
684 error
= vm_map_copyout(current_task()->map
,
687 if (KERN_SUCCESS
== error
) {
688 data_list
[i
].address
=
689 CAST_DOWN(vm_offset_t
, map_addr
);
692 vm_map_copy_discard(copy
);
695 data_list
[i
].address
= (mach_vm_address_t
)0;
696 data_list
[i
].size
= (mach_vm_size_t
)0;
702 * mach_vm_read_overwrite -
703 * Overwrite a range of the current map with data from the specified
706 * In making an assumption that the current thread is local, it is
707 * no longer cluster-safe without a fully supportive local proxy
708 * thread/task (but we don't support cluster's anymore so this is moot).
712 mach_vm_read_overwrite(
714 mach_vm_address_t address
,
716 mach_vm_address_t data
,
717 mach_vm_size_t
*data_size
)
722 if (map
== VM_MAP_NULL
)
723 return(KERN_INVALID_ARGUMENT
);
725 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
726 (vm_map_size_t
)size
, FALSE
, ©
);
728 if (KERN_SUCCESS
== error
) {
729 error
= vm_map_copy_overwrite(current_thread()->map
,
730 (vm_map_address_t
)data
,
732 if (KERN_SUCCESS
== error
) {
736 vm_map_copy_discard(copy
);
742 * vm_read_overwrite -
743 * Overwrite a range of the current map with data from the specified
746 * This routine adds the additional limitation that the source and
747 * destination ranges must be describable with vm_address_t values
748 * (i.e. the same size address spaces as the kernel, or at least the
749 * the ranges are in that first portion of the respective address
756 vm_address_t address
,
759 vm_size_t
*data_size
)
764 if (map
== VM_MAP_NULL
)
765 return(KERN_INVALID_ARGUMENT
);
767 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
768 (vm_map_size_t
)size
, FALSE
, ©
);
770 if (KERN_SUCCESS
== error
) {
771 error
= vm_map_copy_overwrite(current_thread()->map
,
772 (vm_map_address_t
)data
,
774 if (KERN_SUCCESS
== error
) {
778 vm_map_copy_discard(copy
);
786 * Overwrite the specified address range with the data provided
787 * (from the current map).
792 mach_vm_address_t address
,
794 __unused mach_msg_type_number_t size
)
796 if (map
== VM_MAP_NULL
)
797 return KERN_INVALID_ARGUMENT
;
799 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
800 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
805 * Overwrite the specified address range with the data provided
806 * (from the current map).
808 * The addressability of the range of addresses to overwrite is
809 * limited bu the use of a vm_address_t (same size as kernel map).
810 * Either the target map is also small, or the range is in the
811 * low addresses within it.
816 vm_address_t address
,
818 __unused mach_msg_type_number_t size
)
820 if (map
== VM_MAP_NULL
)
821 return KERN_INVALID_ARGUMENT
;
823 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
824 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
829 * Overwrite one range of the specified map with the contents of
830 * another range within that same map (i.e. both address ranges
836 mach_vm_address_t source_address
,
838 mach_vm_address_t dest_address
)
843 if (map
== VM_MAP_NULL
)
844 return KERN_INVALID_ARGUMENT
;
846 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
847 (vm_map_size_t
)size
, FALSE
, ©
);
849 if (KERN_SUCCESS
== kr
) {
850 kr
= vm_map_copy_overwrite(map
,
851 (vm_map_address_t
)dest_address
,
852 copy
, FALSE
/* interruptible XXX */);
854 if (KERN_SUCCESS
!= kr
)
855 vm_map_copy_discard(copy
);
863 vm_address_t source_address
,
865 vm_address_t dest_address
)
870 if (map
== VM_MAP_NULL
)
871 return KERN_INVALID_ARGUMENT
;
873 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
874 (vm_map_size_t
)size
, FALSE
, ©
);
876 if (KERN_SUCCESS
== kr
) {
877 kr
= vm_map_copy_overwrite(map
,
878 (vm_map_address_t
)dest_address
,
879 copy
, FALSE
/* interruptible XXX */);
881 if (KERN_SUCCESS
!= kr
)
882 vm_map_copy_discard(copy
);
889 * Map some range of an object into an address space.
891 * The object can be one of several types of objects:
892 * NULL - anonymous memory
893 * a named entry - a range within another address space
894 * or a range within a memory object
895 * a whole memory object
901 mach_vm_offset_t
*address
,
902 mach_vm_size_t initial_size
,
903 mach_vm_offset_t mask
,
906 vm_object_offset_t offset
,
908 vm_prot_t cur_protection
,
909 vm_prot_t max_protection
,
910 vm_inherit_t inheritance
)
913 vm_map_offset_t vmmaddr
;
915 vmmaddr
= (vm_map_offset_t
) *address
;
917 /* filter out any kernel-only flags */
918 if (flags
& ~VM_FLAGS_USER_MAP
)
919 return KERN_INVALID_ARGUMENT
;
921 kr
= vm_map_enter_mem_object(target_map
,
938 /* legacy interface */
942 vm_offset_t
*address
,
947 vm_object_offset_t offset
,
949 vm_prot_t cur_protection
,
950 vm_prot_t max_protection
,
951 vm_inherit_t inheritance
)
953 mach_vm_address_t map_addr
;
954 mach_vm_size_t map_size
;
955 mach_vm_offset_t map_mask
;
958 map_addr
= (mach_vm_address_t
)*address
;
959 map_size
= (mach_vm_size_t
)size
;
960 map_mask
= (mach_vm_offset_t
)mask
;
962 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
964 cur_protection
, max_protection
, inheritance
);
965 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
969 /* temporary, until world build */
973 vm_offset_t
*address
,
980 vm_prot_t cur_protection
,
981 vm_prot_t max_protection
,
982 vm_inherit_t inheritance
)
984 mach_vm_address_t map_addr
;
985 mach_vm_size_t map_size
;
986 mach_vm_offset_t map_mask
;
987 vm_object_offset_t obj_offset
;
990 map_addr
= (mach_vm_address_t
)*address
;
991 map_size
= (mach_vm_size_t
)size
;
992 map_mask
= (mach_vm_offset_t
)mask
;
993 obj_offset
= (vm_object_offset_t
)offset
;
995 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
996 port
, obj_offset
, copy
,
997 cur_protection
, max_protection
, inheritance
);
998 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1004 * Remap a range of memory from one task into another,
1005 * to another address range within the same task, or
1006 * over top of itself (with altered permissions and/or
1007 * as an in-place copy of itself).
1012 vm_map_t target_map
,
1013 mach_vm_offset_t
*address
,
1014 mach_vm_size_t size
,
1015 mach_vm_offset_t mask
,
1018 mach_vm_offset_t memory_address
,
1020 vm_prot_t
*cur_protection
,
1021 vm_prot_t
*max_protection
,
1022 vm_inherit_t inheritance
)
1024 vm_map_offset_t map_addr
;
1027 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1028 return KERN_INVALID_ARGUMENT
;
1030 /* filter out any kernel-only flags */
1031 if (flags
& ~VM_FLAGS_USER_REMAP
)
1032 return KERN_INVALID_ARGUMENT
;
1034 map_addr
= (vm_map_offset_t
)*address
;
1036 kr
= vm_map_remap(target_map
,
1047 *address
= map_addr
;
1053 * Remap a range of memory from one task into another,
1054 * to another address range within the same task, or
1055 * over top of itself (with altered permissions and/or
1056 * as an in-place copy of itself).
1058 * The addressability of the source and target address
1059 * range is limited by the size of vm_address_t (in the
1064 vm_map_t target_map
,
1065 vm_offset_t
*address
,
1070 vm_offset_t memory_address
,
1072 vm_prot_t
*cur_protection
,
1073 vm_prot_t
*max_protection
,
1074 vm_inherit_t inheritance
)
1076 vm_map_offset_t map_addr
;
1079 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1080 return KERN_INVALID_ARGUMENT
;
1082 /* filter out any kernel-only flags */
1083 if (flags
& ~VM_FLAGS_USER_REMAP
)
1084 return KERN_INVALID_ARGUMENT
;
1086 map_addr
= (vm_map_offset_t
)*address
;
1088 kr
= vm_map_remap(target_map
,
1099 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1104 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1105 * when mach_vm_wire and vm_wire are changed to use ledgers.
1107 #include <mach/mach_host_server.h>
1110 * Specify that the range of the virtual address space
1111 * of the target task must not cause page faults for
1112 * the indicated accesses.
1114 * [ To unwire the pages, specify VM_PROT_NONE. ]
1118 host_priv_t host_priv
,
1120 mach_vm_offset_t start
,
1121 mach_vm_size_t size
,
1126 if (host_priv
== HOST_PRIV_NULL
)
1127 return KERN_INVALID_HOST
;
1129 assert(host_priv
== &realhost
);
1131 if (map
== VM_MAP_NULL
)
1132 return KERN_INVALID_TASK
;
1134 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
))
1135 return KERN_INVALID_ARGUMENT
;
1137 if (access
!= VM_PROT_NONE
) {
1138 rc
= vm_map_wire(map
,
1139 vm_map_trunc_page(start
,
1140 VM_MAP_PAGE_MASK(map
)),
1141 vm_map_round_page(start
+size
,
1142 VM_MAP_PAGE_MASK(map
)),
1143 access
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK
),
1146 rc
= vm_map_unwire(map
,
1147 vm_map_trunc_page(start
,
1148 VM_MAP_PAGE_MASK(map
)),
1149 vm_map_round_page(start
+size
,
1150 VM_MAP_PAGE_MASK(map
)),
1158 * Specify that the range of the virtual address space
1159 * of the target task must not cause page faults for
1160 * the indicated accesses.
1162 * [ To unwire the pages, specify VM_PROT_NONE. ]
1166 host_priv_t host_priv
,
1174 if (host_priv
== HOST_PRIV_NULL
)
1175 return KERN_INVALID_HOST
;
1177 assert(host_priv
== &realhost
);
1179 if (map
== VM_MAP_NULL
)
1180 return KERN_INVALID_TASK
;
1182 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1183 return KERN_INVALID_ARGUMENT
;
1187 } else if (access
!= VM_PROT_NONE
) {
1188 rc
= vm_map_wire(map
,
1189 vm_map_trunc_page(start
,
1190 VM_MAP_PAGE_MASK(map
)),
1191 vm_map_round_page(start
+size
,
1192 VM_MAP_PAGE_MASK(map
)),
1193 access
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
),
1196 rc
= vm_map_unwire(map
,
1197 vm_map_trunc_page(start
,
1198 VM_MAP_PAGE_MASK(map
)),
1199 vm_map_round_page(start
+size
,
1200 VM_MAP_PAGE_MASK(map
)),
1209 * Synchronises the memory range specified with its backing store
1210 * image by either flushing or cleaning the contents to the appropriate
1213 * interpretation of sync_flags
1214 * VM_SYNC_INVALIDATE - discard pages, only return precious
1217 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1218 * - discard pages, write dirty or precious
1219 * pages back to memory manager.
1221 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1222 * - write dirty or precious pages back to
1223 * the memory manager.
1225 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1226 * is a hole in the region, and we would
1227 * have returned KERN_SUCCESS, return
1228 * KERN_INVALID_ADDRESS instead.
1231 * KERN_INVALID_TASK Bad task parameter
1232 * KERN_INVALID_ARGUMENT both sync and async were specified.
1233 * KERN_SUCCESS The usual.
1234 * KERN_INVALID_ADDRESS There was a hole in the region.
1240 mach_vm_address_t address
,
1241 mach_vm_size_t size
,
1242 vm_sync_t sync_flags
)
1245 if (map
== VM_MAP_NULL
)
1246 return(KERN_INVALID_TASK
);
1248 return vm_map_msync(map
, (vm_map_address_t
)address
,
1249 (vm_map_size_t
)size
, sync_flags
);
1255 * Synchronises the memory range specified with its backing store
1256 * image by either flushing or cleaning the contents to the appropriate
1259 * interpretation of sync_flags
1260 * VM_SYNC_INVALIDATE - discard pages, only return precious
1263 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1264 * - discard pages, write dirty or precious
1265 * pages back to memory manager.
1267 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1268 * - write dirty or precious pages back to
1269 * the memory manager.
1271 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1272 * is a hole in the region, and we would
1273 * have returned KERN_SUCCESS, return
1274 * KERN_INVALID_ADDRESS instead.
1276 * The addressability of the range is limited to that which can
1277 * be described by a vm_address_t.
1280 * KERN_INVALID_TASK Bad task parameter
1281 * KERN_INVALID_ARGUMENT both sync and async were specified.
1282 * KERN_SUCCESS The usual.
1283 * KERN_INVALID_ADDRESS There was a hole in the region.
1289 vm_address_t address
,
1291 vm_sync_t sync_flags
)
1294 if (map
== VM_MAP_NULL
)
1295 return(KERN_INVALID_TASK
);
1297 return vm_map_msync(map
, (vm_map_address_t
)address
,
1298 (vm_map_size_t
)size
, sync_flags
);
1303 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1305 vm_map_t map
= current_map();
1307 assert(!map
->is_nested_map
);
1308 if(toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
){
1309 *old_value
= map
->disable_vmentry_reuse
;
1310 } else if(toggle
== VM_TOGGLE_SET
){
1311 vm_map_entry_t map_to_entry
;
1314 vm_map_disable_hole_optimization(map
);
1315 map
->disable_vmentry_reuse
= TRUE
;
1316 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1317 if (map
->first_free
== map_to_entry
) {
1318 map
->highest_entry_end
= vm_map_min(map
);
1320 map
->highest_entry_end
= map
->first_free
->vme_end
;
1323 } else if (toggle
== VM_TOGGLE_CLEAR
){
1325 map
->disable_vmentry_reuse
= FALSE
;
1328 return KERN_INVALID_ARGUMENT
;
1330 return KERN_SUCCESS
;
1334 * mach_vm_behavior_set
1336 * Sets the paging behavior attribute for the specified range
1337 * in the specified map.
1339 * This routine will fail with KERN_INVALID_ADDRESS if any address
1340 * in [start,start+size) is not a valid allocated memory region.
1343 mach_vm_behavior_set(
1345 mach_vm_offset_t start
,
1346 mach_vm_size_t size
,
1347 vm_behavior_t new_behavior
)
1349 vm_map_offset_t align_mask
;
1351 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1352 return(KERN_INVALID_ARGUMENT
);
1355 return KERN_SUCCESS
;
1357 switch (new_behavior
) {
1358 case VM_BEHAVIOR_REUSABLE
:
1359 case VM_BEHAVIOR_REUSE
:
1360 case VM_BEHAVIOR_CAN_REUSE
:
1362 * Align to the hardware page size, to allow
1363 * malloc() to maximize the amount of re-usability,
1364 * even on systems with larger software page size.
1366 align_mask
= PAGE_MASK
;
1369 align_mask
= VM_MAP_PAGE_MASK(map
);
1373 return vm_map_behavior_set(map
,
1374 vm_map_trunc_page(start
, align_mask
),
1375 vm_map_round_page(start
+size
, align_mask
),
1382 * Sets the paging behavior attribute for the specified range
1383 * in the specified map.
1385 * This routine will fail with KERN_INVALID_ADDRESS if any address
1386 * in [start,start+size) is not a valid allocated memory region.
1388 * This routine is potentially limited in addressibility by the
1389 * use of vm_offset_t (if the map provided is larger than the
1397 vm_behavior_t new_behavior
)
1399 if (start
+ size
< start
)
1400 return KERN_INVALID_ARGUMENT
;
1402 return mach_vm_behavior_set(map
,
1403 (mach_vm_offset_t
) start
,
1404 (mach_vm_size_t
) size
,
1411 * User call to obtain information about a region in
1412 * a task's address map. Currently, only one flavor is
1415 * XXX The reserved and behavior fields cannot be filled
1416 * in until the vm merge from the IK is completed, and
1417 * vm_reserve is implemented.
1419 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1425 mach_vm_offset_t
*address
, /* IN/OUT */
1426 mach_vm_size_t
*size
, /* OUT */
1427 vm_region_flavor_t flavor
, /* IN */
1428 vm_region_info_t info
, /* OUT */
1429 mach_msg_type_number_t
*count
, /* IN/OUT */
1430 mach_port_t
*object_name
) /* OUT */
1432 vm_map_offset_t map_addr
;
1433 vm_map_size_t map_size
;
1436 if (VM_MAP_NULL
== map
)
1437 return KERN_INVALID_ARGUMENT
;
1439 map_addr
= (vm_map_offset_t
)*address
;
1440 map_size
= (vm_map_size_t
)*size
;
1442 /* legacy conversion */
1443 if (VM_REGION_BASIC_INFO
== flavor
)
1444 flavor
= VM_REGION_BASIC_INFO_64
;
1446 kr
= vm_map_region(map
,
1447 &map_addr
, &map_size
,
1448 flavor
, info
, count
,
1451 *address
= map_addr
;
1457 * vm_region_64 and vm_region:
1459 * User call to obtain information about a region in
1460 * a task's address map. Currently, only one flavor is
1463 * XXX The reserved and behavior fields cannot be filled
1464 * in until the vm merge from the IK is completed, and
1465 * vm_reserve is implemented.
1467 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1473 vm_offset_t
*address
, /* IN/OUT */
1474 vm_size_t
*size
, /* OUT */
1475 vm_region_flavor_t flavor
, /* IN */
1476 vm_region_info_t info
, /* OUT */
1477 mach_msg_type_number_t
*count
, /* IN/OUT */
1478 mach_port_t
*object_name
) /* OUT */
1480 vm_map_offset_t map_addr
;
1481 vm_map_size_t map_size
;
1484 if (VM_MAP_NULL
== map
)
1485 return KERN_INVALID_ARGUMENT
;
1487 map_addr
= (vm_map_offset_t
)*address
;
1488 map_size
= (vm_map_size_t
)*size
;
1490 /* legacy conversion */
1491 if (VM_REGION_BASIC_INFO
== flavor
)
1492 flavor
= VM_REGION_BASIC_INFO_64
;
1494 kr
= vm_map_region(map
,
1495 &map_addr
, &map_size
,
1496 flavor
, info
, count
,
1499 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1500 *size
= CAST_DOWN(vm_size_t
, map_size
);
1502 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1503 return KERN_INVALID_ADDRESS
;
1510 vm_address_t
*address
, /* IN/OUT */
1511 vm_size_t
*size
, /* OUT */
1512 vm_region_flavor_t flavor
, /* IN */
1513 vm_region_info_t info
, /* OUT */
1514 mach_msg_type_number_t
*count
, /* IN/OUT */
1515 mach_port_t
*object_name
) /* OUT */
1517 vm_map_address_t map_addr
;
1518 vm_map_size_t map_size
;
1521 if (VM_MAP_NULL
== map
)
1522 return KERN_INVALID_ARGUMENT
;
1524 map_addr
= (vm_map_address_t
)*address
;
1525 map_size
= (vm_map_size_t
)*size
;
1527 kr
= vm_map_region(map
,
1528 &map_addr
, &map_size
,
1529 flavor
, info
, count
,
1532 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1533 *size
= CAST_DOWN(vm_size_t
, map_size
);
1535 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1536 return KERN_INVALID_ADDRESS
;
1541 * vm_region_recurse: A form of vm_region which follows the
1542 * submaps in a target map
1546 mach_vm_region_recurse(
1548 mach_vm_address_t
*address
,
1549 mach_vm_size_t
*size
,
1551 vm_region_recurse_info_t info
,
1552 mach_msg_type_number_t
*infoCnt
)
1554 vm_map_address_t map_addr
;
1555 vm_map_size_t map_size
;
1558 if (VM_MAP_NULL
== map
)
1559 return KERN_INVALID_ARGUMENT
;
1561 map_addr
= (vm_map_address_t
)*address
;
1562 map_size
= (vm_map_size_t
)*size
;
1564 kr
= vm_map_region_recurse_64(
1569 (vm_region_submap_info_64_t
)info
,
1572 *address
= map_addr
;
1578 * vm_region_recurse: A form of vm_region which follows the
1579 * submaps in a target map
1583 vm_region_recurse_64(
1585 vm_address_t
*address
,
1588 vm_region_recurse_info_64_t info
,
1589 mach_msg_type_number_t
*infoCnt
)
1591 vm_map_address_t map_addr
;
1592 vm_map_size_t map_size
;
1595 if (VM_MAP_NULL
== map
)
1596 return KERN_INVALID_ARGUMENT
;
1598 map_addr
= (vm_map_address_t
)*address
;
1599 map_size
= (vm_map_size_t
)*size
;
1601 kr
= vm_map_region_recurse_64(
1606 (vm_region_submap_info_64_t
)info
,
1609 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1610 *size
= CAST_DOWN(vm_size_t
, map_size
);
1612 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1613 return KERN_INVALID_ADDRESS
;
1620 vm_offset_t
*address
, /* IN/OUT */
1621 vm_size_t
*size
, /* OUT */
1622 natural_t
*depth
, /* IN/OUT */
1623 vm_region_recurse_info_t info32
, /* IN/OUT */
1624 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1626 vm_region_submap_info_data_64_t info64
;
1627 vm_region_submap_info_t info
;
1628 vm_map_address_t map_addr
;
1629 vm_map_size_t map_size
;
1632 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1633 return KERN_INVALID_ARGUMENT
;
1636 map_addr
= (vm_map_address_t
)*address
;
1637 map_size
= (vm_map_size_t
)*size
;
1638 info
= (vm_region_submap_info_t
)info32
;
1639 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1641 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1642 depth
, &info64
, infoCnt
);
1644 info
->protection
= info64
.protection
;
1645 info
->max_protection
= info64
.max_protection
;
1646 info
->inheritance
= info64
.inheritance
;
1647 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1648 info
->user_tag
= info64
.user_tag
;
1649 info
->pages_resident
= info64
.pages_resident
;
1650 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1651 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1652 info
->pages_dirtied
= info64
.pages_dirtied
;
1653 info
->ref_count
= info64
.ref_count
;
1654 info
->shadow_depth
= info64
.shadow_depth
;
1655 info
->external_pager
= info64
.external_pager
;
1656 info
->share_mode
= info64
.share_mode
;
1657 info
->is_submap
= info64
.is_submap
;
1658 info
->behavior
= info64
.behavior
;
1659 info
->object_id
= info64
.object_id
;
1660 info
->user_wired_count
= info64
.user_wired_count
;
1662 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1663 *size
= CAST_DOWN(vm_size_t
, map_size
);
1664 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1666 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1667 return KERN_INVALID_ADDRESS
;
1672 mach_vm_purgable_control(
1674 mach_vm_offset_t address
,
1675 vm_purgable_t control
,
1678 if (VM_MAP_NULL
== map
)
1679 return KERN_INVALID_ARGUMENT
;
1681 return vm_map_purgable_control(map
,
1682 vm_map_trunc_page(address
, PAGE_MASK
),
1688 vm_purgable_control(
1690 vm_offset_t address
,
1691 vm_purgable_t control
,
1694 if (VM_MAP_NULL
== map
)
1695 return KERN_INVALID_ARGUMENT
;
1697 return vm_map_purgable_control(map
,
1698 vm_map_trunc_page(address
, PAGE_MASK
),
1705 * Ordinarily, the right to allocate CPM is restricted
1706 * to privileged applications (those that can gain access
1707 * to the host priv port). Set this variable to zero if
1708 * you want to let any application allocate CPM.
1710 unsigned int vm_allocate_cpm_privileged
= 0;
1713 * Allocate memory in the specified map, with the caveat that
1714 * the memory is physically contiguous. This call may fail
1715 * if the system can't find sufficient contiguous memory.
1716 * This call may cause or lead to heart-stopping amounts of
1719 * Memory obtained from this call should be freed in the
1720 * normal way, viz., via vm_deallocate.
1724 host_priv_t host_priv
,
1730 vm_map_address_t map_addr
;
1731 vm_map_size_t map_size
;
1734 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1735 return KERN_INVALID_HOST
;
1737 if (VM_MAP_NULL
== map
)
1738 return KERN_INVALID_ARGUMENT
;
1740 map_addr
= (vm_map_address_t
)*addr
;
1741 map_size
= (vm_map_size_t
)size
;
1743 kr
= vm_map_enter_cpm(map
,
1748 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1756 mach_vm_offset_t offset
,
1760 if (VM_MAP_NULL
== map
)
1761 return KERN_INVALID_ARGUMENT
;
1763 return vm_map_page_query_internal(
1765 vm_map_trunc_page(offset
, PAGE_MASK
),
1766 disposition
, ref_count
);
1776 if (VM_MAP_NULL
== map
)
1777 return KERN_INVALID_ARGUMENT
;
1779 return vm_map_page_query_internal(
1781 vm_map_trunc_page(offset
, PAGE_MASK
),
1782 disposition
, ref_count
);
1788 mach_vm_address_t address
,
1789 vm_page_info_flavor_t flavor
,
1790 vm_page_info_t info
,
1791 mach_msg_type_number_t
*count
)
1795 if (map
== VM_MAP_NULL
) {
1796 return KERN_INVALID_ARGUMENT
;
1799 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
1803 /* map a (whole) upl into an address space */
1808 vm_address_t
*dst_addr
)
1810 vm_map_offset_t map_addr
;
1813 if (VM_MAP_NULL
== map
)
1814 return KERN_INVALID_ARGUMENT
;
1816 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
1817 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
1826 if (VM_MAP_NULL
== map
)
1827 return KERN_INVALID_ARGUMENT
;
1829 return (vm_map_remove_upl(map
, upl
));
1832 /* Retrieve a upl for an object underlying an address range in a map */
1837 vm_map_offset_t map_offset
,
1838 upl_size_t
*upl_size
,
1840 upl_page_info_array_t page_list
,
1841 unsigned int *count
,
1842 upl_control_flags_t
*flags
,
1843 int force_data_sync
)
1845 upl_control_flags_t map_flags
;
1848 if (VM_MAP_NULL
== map
)
1849 return KERN_INVALID_ARGUMENT
;
1851 map_flags
= *flags
& ~UPL_NOZEROFILL
;
1852 if (force_data_sync
)
1853 map_flags
|= UPL_FORCE_DATA_SYNC
;
1855 kr
= vm_map_create_upl(map
,
1863 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
1869 * mach_make_memory_entry_64
1871 * Think of it as a two-stage vm_remap() operation. First
1872 * you get a handle. Second, you get map that handle in
1873 * somewhere else. Rather than doing it all at once (and
1874 * without needing access to the other whole map).
1878 mach_make_memory_entry_64(
1879 vm_map_t target_map
,
1880 memory_object_size_t
*size
,
1881 memory_object_offset_t offset
,
1882 vm_prot_t permission
,
1883 ipc_port_t
*object_handle
,
1884 ipc_port_t parent_handle
)
1886 vm_map_version_t version
;
1887 vm_named_entry_t parent_entry
;
1888 vm_named_entry_t user_entry
;
1889 ipc_port_t user_handle
;
1893 /* needed for call to vm_map_lookup_locked */
1896 vm_object_offset_t obj_off
;
1898 struct vm_object_fault_info fault_info
;
1900 vm_object_t shadow_object
;
1902 /* needed for direct map entry manipulation */
1903 vm_map_entry_t map_entry
;
1904 vm_map_entry_t next_entry
;
1906 vm_map_t original_map
= target_map
;
1907 vm_map_size_t total_size
, map_size
;
1908 vm_map_offset_t map_start
, map_end
;
1909 vm_map_offset_t local_offset
;
1910 vm_object_size_t mappable_size
;
1913 * Stash the offset in the page for use by vm_map_enter_mem_object()
1914 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
1916 vm_object_offset_t offset_in_page
;
1918 unsigned int access
;
1919 vm_prot_t protections
;
1920 vm_prot_t original_protections
, mask_protections
;
1921 unsigned int wimg_mode
;
1923 boolean_t force_shadow
= FALSE
;
1924 boolean_t use_data_addr
;
1925 boolean_t use_4K_compat
;
1927 if (((permission
& 0x00FF0000) &
1929 MAP_MEM_NAMED_CREATE
|
1930 MAP_MEM_GRAB_SECLUDED
| /* XXX FBDP TODO: restrict usage? */
1932 MAP_MEM_NAMED_REUSE
|
1933 MAP_MEM_USE_DATA_ADDR
|
1935 MAP_MEM_4K_DATA_ADDR
|
1936 MAP_MEM_VM_SHARE
))) {
1938 * Unknown flag: reject for forward compatibility.
1940 return KERN_INVALID_VALUE
;
1943 if (parent_handle
!= IP_NULL
&&
1944 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
1945 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
1947 parent_entry
= NULL
;
1950 if (parent_entry
&& parent_entry
->is_copy
) {
1951 return KERN_INVALID_ARGUMENT
;
1954 original_protections
= permission
& VM_PROT_ALL
;
1955 protections
= original_protections
;
1956 mask_protections
= permission
& VM_PROT_IS_MASK
;
1957 access
= GET_MAP_MEM(permission
);
1958 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
1959 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
1961 user_handle
= IP_NULL
;
1964 map_start
= vm_map_trunc_page(offset
, PAGE_MASK
);
1966 if (permission
& MAP_MEM_ONLY
) {
1967 boolean_t parent_is_object
;
1969 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
1970 map_size
= map_end
- map_start
;
1972 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
1973 return KERN_INVALID_ARGUMENT
;
1976 parent_is_object
= !(parent_entry
->is_sub_map
||
1977 parent_entry
->is_pager
);
1978 object
= parent_entry
->backing
.object
;
1979 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
1980 wimg_mode
= object
->wimg_bits
;
1982 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1983 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
1984 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
1985 return KERN_INVALID_RIGHT
;
1987 if(access
== MAP_MEM_IO
) {
1988 SET_MAP_MEM(access
, parent_entry
->protection
);
1989 wimg_mode
= VM_WIMG_IO
;
1990 } else if (access
== MAP_MEM_COPYBACK
) {
1991 SET_MAP_MEM(access
, parent_entry
->protection
);
1992 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1993 } else if (access
== MAP_MEM_INNERWBACK
) {
1994 SET_MAP_MEM(access
, parent_entry
->protection
);
1995 wimg_mode
= VM_WIMG_INNERWBACK
;
1996 } else if (access
== MAP_MEM_WTHRU
) {
1997 SET_MAP_MEM(access
, parent_entry
->protection
);
1998 wimg_mode
= VM_WIMG_WTHRU
;
1999 } else if (access
== MAP_MEM_WCOMB
) {
2000 SET_MAP_MEM(access
, parent_entry
->protection
);
2001 wimg_mode
= VM_WIMG_WCOMB
;
2003 if (parent_is_object
&& object
&&
2004 (access
!= MAP_MEM_NOOP
) &&
2005 (!(object
->nophyscache
))) {
2007 if (object
->wimg_bits
!= wimg_mode
) {
2008 vm_object_lock(object
);
2009 vm_object_change_wimg_mode(object
, wimg_mode
);
2010 vm_object_unlock(object
);
2014 *object_handle
= IP_NULL
;
2015 return KERN_SUCCESS
;
2016 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2017 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2018 map_size
= map_end
- map_start
;
2020 if (use_data_addr
|| use_4K_compat
) {
2021 return KERN_INVALID_ARGUMENT
;
2024 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2025 if (kr
!= KERN_SUCCESS
) {
2026 return KERN_FAILURE
;
2030 * Force the creation of the VM object now.
2032 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2034 * LP64todo - for now, we can only allocate 4GB-4096
2035 * internal objects because the default pager can't
2036 * page bigger ones. Remove this when it can.
2042 object
= vm_object_allocate(map_size
);
2043 assert(object
!= VM_OBJECT_NULL
);
2045 if (permission
& MAP_MEM_PURGABLE
) {
2046 if (! (permission
& VM_PROT_WRITE
)) {
2047 /* if we can't write, we can't purge */
2048 vm_object_deallocate(object
);
2049 kr
= KERN_INVALID_ARGUMENT
;
2052 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2053 assert(object
->vo_purgeable_owner
== NULL
);
2054 assert(object
->resident_page_count
== 0);
2055 assert(object
->wired_page_count
== 0);
2056 vm_object_lock(object
);
2057 vm_purgeable_nonvolatile_enqueue(object
,
2059 vm_object_unlock(object
);
2062 #if CONFIG_SECLUDED_MEMORY
2063 if (secluded_for_iokit
&& /* global boot-arg */
2064 ((permission
& MAP_MEM_GRAB_SECLUDED
)
2066 /* XXX FBDP for my testing only */
2067 || (secluded_for_fbdp
&& map_size
== 97550336)
2071 if (!(permission
& MAP_MEM_GRAB_SECLUDED
) &&
2072 secluded_for_fbdp
) {
2073 printf("FBDP: object %p size %lld can grab secluded\n", object
, (uint64_t) map_size
);
2076 object
->can_grab_secluded
= TRUE
;
2077 assert(!object
->eligible_for_secluded
);
2079 #endif /* CONFIG_SECLUDED_MEMORY */
2082 * The VM object is brand new and nobody else knows about it,
2083 * so we don't need to lock it.
2086 wimg_mode
= object
->wimg_bits
;
2087 if (access
== MAP_MEM_IO
) {
2088 wimg_mode
= VM_WIMG_IO
;
2089 } else if (access
== MAP_MEM_COPYBACK
) {
2090 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2091 } else if (access
== MAP_MEM_INNERWBACK
) {
2092 wimg_mode
= VM_WIMG_INNERWBACK
;
2093 } else if (access
== MAP_MEM_WTHRU
) {
2094 wimg_mode
= VM_WIMG_WTHRU
;
2095 } else if (access
== MAP_MEM_WCOMB
) {
2096 wimg_mode
= VM_WIMG_WCOMB
;
2098 if (access
!= MAP_MEM_NOOP
) {
2099 object
->wimg_bits
= wimg_mode
;
2101 /* the object has no pages, so no WIMG bits to update here */
2105 * We use this path when we want to make sure that
2106 * nobody messes with the object (coalesce, for
2107 * example) before we map it.
2108 * We might want to use these objects for transposition via
2109 * vm_object_transpose() too, so we don't want any copy or
2110 * shadow objects either...
2112 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2113 object
->true_share
= TRUE
;
2115 user_entry
->backing
.object
= object
;
2116 user_entry
->internal
= TRUE
;
2117 user_entry
->is_sub_map
= FALSE
;
2118 user_entry
->is_pager
= FALSE
;
2119 user_entry
->offset
= 0;
2120 user_entry
->data_offset
= 0;
2121 user_entry
->protection
= protections
;
2122 SET_MAP_MEM(access
, user_entry
->protection
);
2123 user_entry
->size
= map_size
;
2125 /* user_object pager and internal fields are not used */
2126 /* when the object field is filled in. */
2128 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2129 user_entry
->data_offset
));
2130 *object_handle
= user_handle
;
2131 return KERN_SUCCESS
;
2134 if (permission
& MAP_MEM_VM_COPY
) {
2137 if (target_map
== VM_MAP_NULL
) {
2138 return KERN_INVALID_TASK
;
2141 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2142 map_size
= map_end
- map_start
;
2143 if (use_data_addr
|| use_4K_compat
) {
2144 offset_in_page
= offset
- map_start
;
2146 offset_in_page
&= ~((signed)(0xFFF));
2151 kr
= vm_map_copyin_internal(target_map
,
2154 VM_MAP_COPYIN_ENTRY_LIST
,
2156 if (kr
!= KERN_SUCCESS
) {
2160 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2161 if (kr
!= KERN_SUCCESS
) {
2162 vm_map_copy_discard(copy
);
2163 return KERN_FAILURE
;
2166 user_entry
->backing
.copy
= copy
;
2167 user_entry
->internal
= FALSE
;
2168 user_entry
->is_sub_map
= FALSE
;
2169 user_entry
->is_pager
= FALSE
;
2170 user_entry
->is_copy
= TRUE
;
2171 user_entry
->offset
= 0;
2172 user_entry
->protection
= protections
;
2173 user_entry
->size
= map_size
;
2174 user_entry
->data_offset
= offset_in_page
;
2176 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2177 user_entry
->data_offset
));
2178 *object_handle
= user_handle
;
2179 return KERN_SUCCESS
;
2182 if (permission
& MAP_MEM_VM_SHARE
) {
2184 vm_prot_t cur_prot
, max_prot
;
2186 if (target_map
== VM_MAP_NULL
) {
2187 return KERN_INVALID_TASK
;
2190 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2191 map_size
= map_end
- map_start
;
2192 if (use_data_addr
|| use_4K_compat
) {
2193 offset_in_page
= offset
- map_start
;
2195 offset_in_page
&= ~((signed)(0xFFF));
2200 cur_prot
= VM_PROT_ALL
;
2201 kr
= vm_map_copy_extract(target_map
,
2207 if (kr
!= KERN_SUCCESS
) {
2211 if (mask_protections
) {
2213 * We just want as much of "original_protections"
2214 * as we can get out of the actual "cur_prot".
2216 protections
&= cur_prot
;
2217 if (protections
== VM_PROT_NONE
) {
2218 /* no access at all: fail */
2219 vm_map_copy_discard(copy
);
2220 return KERN_PROTECTION_FAILURE
;
2224 * We want exactly "original_protections"
2225 * out of "cur_prot".
2227 if ((cur_prot
& protections
) != protections
) {
2228 vm_map_copy_discard(copy
);
2229 return KERN_PROTECTION_FAILURE
;
2233 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2234 if (kr
!= KERN_SUCCESS
) {
2235 vm_map_copy_discard(copy
);
2236 return KERN_FAILURE
;
2239 user_entry
->backing
.copy
= copy
;
2240 user_entry
->internal
= FALSE
;
2241 user_entry
->is_sub_map
= FALSE
;
2242 user_entry
->is_pager
= FALSE
;
2243 user_entry
->is_copy
= TRUE
;
2244 user_entry
->offset
= 0;
2245 user_entry
->protection
= protections
;
2246 user_entry
->size
= map_size
;
2247 user_entry
->data_offset
= offset_in_page
;
2249 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2250 user_entry
->data_offset
));
2251 *object_handle
= user_handle
;
2252 return KERN_SUCCESS
;
2255 if (parent_entry
== NULL
||
2256 (permission
& MAP_MEM_NAMED_REUSE
)) {
2258 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2259 map_size
= map_end
- map_start
;
2260 if (use_data_addr
|| use_4K_compat
) {
2261 offset_in_page
= offset
- map_start
;
2263 offset_in_page
&= ~((signed)(0xFFF));
2268 /* Create a named object based on address range within the task map */
2269 /* Go find the object at given address */
2271 if (target_map
== VM_MAP_NULL
) {
2272 return KERN_INVALID_TASK
;
2276 protections
= original_protections
;
2277 vm_map_lock_read(target_map
);
2279 /* get the object associated with the target address */
2280 /* note we check the permission of the range against */
2281 /* that requested by the caller */
2283 kr
= vm_map_lookup_locked(&target_map
, map_start
,
2284 protections
| mask_protections
,
2285 OBJECT_LOCK_EXCLUSIVE
, &version
,
2286 &object
, &obj_off
, &prot
, &wired
,
2289 if (kr
!= KERN_SUCCESS
) {
2290 vm_map_unlock_read(target_map
);
2293 if (mask_protections
) {
2295 * The caller asked us to use the "protections" as
2296 * a mask, so restrict "protections" to what this
2297 * mapping actually allows.
2299 protections
&= prot
;
2302 if (((prot
& protections
) != protections
)
2303 || (object
== kernel_object
)) {
2304 kr
= KERN_INVALID_RIGHT
;
2305 vm_object_unlock(object
);
2306 vm_map_unlock_read(target_map
);
2307 if(real_map
!= target_map
)
2308 vm_map_unlock_read(real_map
);
2309 if(object
== kernel_object
) {
2310 printf("Warning: Attempt to create a named"
2311 " entry from the kernel_object\n");
2316 /* We have an object, now check to see if this object */
2317 /* is suitable. If not, create a shadow and share that */
2320 * We have to unlock the VM object to avoid deadlocking with
2321 * a VM map lock (the lock ordering is map, the object), if we
2322 * need to modify the VM map to create a shadow object. Since
2323 * we might release the VM map lock below anyway, we have
2324 * to release the VM map lock now.
2325 * XXX FBDP There must be a way to avoid this double lookup...
2327 * Take an extra reference on the VM object to make sure it's
2328 * not going to disappear.
2330 vm_object_reference_locked(object
); /* extra ref to hold obj */
2331 vm_object_unlock(object
);
2333 local_map
= original_map
;
2334 local_offset
= map_start
;
2335 if(target_map
!= local_map
) {
2336 vm_map_unlock_read(target_map
);
2337 if(real_map
!= target_map
)
2338 vm_map_unlock_read(real_map
);
2339 vm_map_lock_read(local_map
);
2340 target_map
= local_map
;
2341 real_map
= local_map
;
2344 if(!vm_map_lookup_entry(local_map
,
2345 local_offset
, &map_entry
)) {
2346 kr
= KERN_INVALID_ARGUMENT
;
2347 vm_map_unlock_read(target_map
);
2348 if(real_map
!= target_map
)
2349 vm_map_unlock_read(real_map
);
2350 vm_object_deallocate(object
); /* release extra ref */
2351 object
= VM_OBJECT_NULL
;
2354 iskernel
= (local_map
->pmap
== kernel_pmap
);
2355 if(!(map_entry
->is_sub_map
)) {
2356 if (VME_OBJECT(map_entry
) != object
) {
2357 kr
= KERN_INVALID_ARGUMENT
;
2358 vm_map_unlock_read(target_map
);
2359 if(real_map
!= target_map
)
2360 vm_map_unlock_read(real_map
);
2361 vm_object_deallocate(object
); /* release extra ref */
2362 object
= VM_OBJECT_NULL
;
2369 local_map
= VME_SUBMAP(map_entry
);
2371 vm_map_lock_read(local_map
);
2372 vm_map_unlock_read(tmap
);
2373 target_map
= local_map
;
2374 real_map
= local_map
;
2375 local_offset
= local_offset
- map_entry
->vme_start
;
2376 local_offset
+= VME_OFFSET(map_entry
);
2381 * We found the VM map entry, lock the VM object again.
2383 vm_object_lock(object
);
2384 if(map_entry
->wired_count
) {
2385 /* JMM - The check below should be reworked instead. */
2386 object
->true_share
= TRUE
;
2388 if (mask_protections
) {
2390 * The caller asked us to use the "protections" as
2391 * a mask, so restrict "protections" to what this
2392 * mapping actually allows.
2394 protections
&= map_entry
->max_protection
;
2396 if(((map_entry
->max_protection
) & protections
) != protections
) {
2397 kr
= KERN_INVALID_RIGHT
;
2398 vm_object_unlock(object
);
2399 vm_map_unlock_read(target_map
);
2400 if(real_map
!= target_map
)
2401 vm_map_unlock_read(real_map
);
2402 vm_object_deallocate(object
);
2403 object
= VM_OBJECT_NULL
;
2407 mappable_size
= fault_info
.hi_offset
- obj_off
;
2408 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2409 if(map_size
> mappable_size
) {
2410 /* try to extend mappable size if the entries */
2411 /* following are from the same object and are */
2413 next_entry
= map_entry
->vme_next
;
2414 /* lets see if the next map entry is still */
2415 /* pointing at this object and is contiguous */
2416 while(map_size
> mappable_size
) {
2417 if ((VME_OBJECT(next_entry
) == object
) &&
2418 (next_entry
->vme_start
==
2419 next_entry
->vme_prev
->vme_end
) &&
2420 (VME_OFFSET(next_entry
) ==
2421 (VME_OFFSET(next_entry
->vme_prev
) +
2422 (next_entry
->vme_prev
->vme_end
-
2423 next_entry
->vme_prev
->vme_start
)))) {
2424 if (mask_protections
) {
2426 * The caller asked us to use
2427 * the "protections" as a mask,
2428 * so restrict "protections" to
2429 * what this mapping actually
2432 protections
&= next_entry
->max_protection
;
2434 if ((next_entry
->wired_count
) &&
2435 (map_entry
->wired_count
== 0)) {
2438 if(((next_entry
->max_protection
)
2439 & protections
) != protections
) {
2442 if (next_entry
->needs_copy
!=
2443 map_entry
->needs_copy
)
2445 mappable_size
+= next_entry
->vme_end
2446 - next_entry
->vme_start
;
2447 total_size
+= next_entry
->vme_end
2448 - next_entry
->vme_start
;
2449 next_entry
= next_entry
->vme_next
;
2457 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2458 * never true in kernel */
2459 if (!iskernel
&& vm_map_entry_should_cow_for_true_share(map_entry
) &&
2460 object
->vo_size
> map_size
&&
2463 * Set up the targeted range for copy-on-write to
2464 * limit the impact of "true_share"/"copy_delay" to
2465 * that range instead of the entire VM object...
2468 vm_object_unlock(object
);
2469 if (vm_map_lock_read_to_write(target_map
)) {
2470 vm_object_deallocate(object
);
2471 target_map
= original_map
;
2475 vm_map_clip_start(target_map
,
2477 vm_map_trunc_page(map_start
,
2478 VM_MAP_PAGE_MASK(target_map
)));
2479 vm_map_clip_end(target_map
,
2481 (vm_map_round_page(map_end
,
2482 VM_MAP_PAGE_MASK(target_map
))));
2483 force_shadow
= TRUE
;
2485 if ((map_entry
->vme_end
- offset
) < map_size
) {
2486 map_size
= map_entry
->vme_end
- map_start
;
2488 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2490 vm_map_lock_write_to_read(target_map
);
2491 vm_object_lock(object
);
2494 if (object
->internal
) {
2495 /* vm_map_lookup_locked will create a shadow if */
2496 /* needs_copy is set but does not check for the */
2497 /* other two conditions shown. It is important to */
2498 /* set up an object which will not be pulled from */
2502 ((map_entry
->needs_copy
||
2504 (object
->vo_size
> total_size
&&
2505 (VME_OFFSET(map_entry
) != 0 ||
2507 vm_map_round_page(total_size
,
2508 VM_MAP_PAGE_MASK(target_map
)))))
2509 && !object
->true_share
)) {
2511 * We have to unlock the VM object before
2512 * trying to upgrade the VM map lock, to
2513 * honor lock ordering (map then object).
2514 * Otherwise, we would deadlock if another
2515 * thread holds a read lock on the VM map and
2516 * is trying to acquire the VM object's lock.
2517 * We still hold an extra reference on the
2518 * VM object, guaranteeing that it won't
2521 vm_object_unlock(object
);
2523 if (vm_map_lock_read_to_write(target_map
)) {
2525 * We couldn't upgrade our VM map lock
2526 * from "read" to "write" and we lost
2528 * Start all over again...
2530 vm_object_deallocate(object
); /* extra ref */
2531 target_map
= original_map
;
2535 vm_object_lock(object
);
2539 * JMM - We need to avoid coming here when the object
2540 * is wired by anybody, not just the current map. Why
2541 * couldn't we use the standard vm_object_copy_quickly()
2545 /* create a shadow object */
2546 VME_OBJECT_SHADOW(map_entry
, total_size
);
2547 shadow_object
= VME_OBJECT(map_entry
);
2549 vm_object_unlock(object
);
2552 prot
= map_entry
->protection
& ~VM_PROT_WRITE
;
2554 if (override_nx(target_map
,
2555 VME_ALIAS(map_entry
))
2557 prot
|= VM_PROT_EXECUTE
;
2559 vm_object_pmap_protect(
2560 object
, VME_OFFSET(map_entry
),
2562 ((map_entry
->is_shared
2563 || target_map
->mapped_in_other_pmaps
)
2566 map_entry
->vme_start
,
2568 total_size
-= (map_entry
->vme_end
2569 - map_entry
->vme_start
);
2570 next_entry
= map_entry
->vme_next
;
2571 map_entry
->needs_copy
= FALSE
;
2573 vm_object_lock(shadow_object
);
2574 while (total_size
) {
2575 assert((next_entry
->wired_count
== 0) ||
2576 (map_entry
->wired_count
));
2578 if (VME_OBJECT(next_entry
) == object
) {
2579 vm_object_reference_locked(shadow_object
);
2580 VME_OBJECT_SET(next_entry
,
2582 vm_object_deallocate(object
);
2585 (VME_OFFSET(next_entry
->vme_prev
) +
2586 (next_entry
->vme_prev
->vme_end
2587 - next_entry
->vme_prev
->vme_start
)));
2588 next_entry
->needs_copy
= FALSE
;
2590 panic("mach_make_memory_entry_64:"
2591 " map entries out of sync\n");
2595 - next_entry
->vme_start
;
2596 next_entry
= next_entry
->vme_next
;
2600 * Transfer our extra reference to the
2603 vm_object_reference_locked(shadow_object
);
2604 vm_object_deallocate(object
); /* extra ref */
2605 object
= shadow_object
;
2607 obj_off
= ((local_offset
- map_entry
->vme_start
)
2608 + VME_OFFSET(map_entry
));
2610 vm_map_lock_write_to_read(target_map
);
2614 /* note: in the future we can (if necessary) allow for */
2615 /* memory object lists, this will better support */
2616 /* fragmentation, but is it necessary? The user should */
2617 /* be encouraged to create address space oriented */
2618 /* shared objects from CLEAN memory regions which have */
2619 /* a known and defined history. i.e. no inheritence */
2620 /* share, make this call before making the region the */
2621 /* target of ipc's, etc. The code above, protecting */
2622 /* against delayed copy, etc. is mostly defensive. */
2624 wimg_mode
= object
->wimg_bits
;
2625 if(!(object
->nophyscache
)) {
2626 if(access
== MAP_MEM_IO
) {
2627 wimg_mode
= VM_WIMG_IO
;
2628 } else if (access
== MAP_MEM_COPYBACK
) {
2629 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2630 } else if (access
== MAP_MEM_INNERWBACK
) {
2631 wimg_mode
= VM_WIMG_INNERWBACK
;
2632 } else if (access
== MAP_MEM_WTHRU
) {
2633 wimg_mode
= VM_WIMG_WTHRU
;
2634 } else if (access
== MAP_MEM_WCOMB
) {
2635 wimg_mode
= VM_WIMG_WCOMB
;
2639 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2640 if (!object
->true_share
&&
2641 vm_object_tracking_inited
) {
2642 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2645 num
= OSBacktrace(bt
,
2646 VM_OBJECT_TRACKING_BTDEPTH
);
2647 btlog_add_entry(vm_object_tracking_btlog
,
2649 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2653 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2655 vm_object_lock_assert_exclusive(object
);
2656 object
->true_share
= TRUE
;
2657 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2658 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2661 * The memory entry now points to this VM object and we
2662 * need to hold a reference on the VM object. Use the extra
2663 * reference we took earlier to keep the object alive when we
2667 vm_map_unlock_read(target_map
);
2668 if(real_map
!= target_map
)
2669 vm_map_unlock_read(real_map
);
2671 if (object
->wimg_bits
!= wimg_mode
)
2672 vm_object_change_wimg_mode(object
, wimg_mode
);
2674 /* the size of mapped entry that overlaps with our region */
2675 /* which is targeted for share. */
2676 /* (entry_end - entry_start) - */
2677 /* offset of our beg addr within entry */
2678 /* it corresponds to this: */
2680 if(map_size
> mappable_size
)
2681 map_size
= mappable_size
;
2683 if (permission
& MAP_MEM_NAMED_REUSE
) {
2685 * Compare what we got with the "parent_entry".
2686 * If they match, re-use the "parent_entry" instead
2687 * of creating a new one.
2689 if (parent_entry
!= NULL
&&
2690 parent_entry
->backing
.object
== object
&&
2691 parent_entry
->internal
== object
->internal
&&
2692 parent_entry
->is_sub_map
== FALSE
&&
2693 parent_entry
->is_pager
== FALSE
&&
2694 parent_entry
->offset
== obj_off
&&
2695 parent_entry
->protection
== protections
&&
2696 parent_entry
->size
== map_size
&&
2697 ((!(use_data_addr
|| use_4K_compat
) &&
2698 (parent_entry
->data_offset
== 0)) ||
2699 ((use_data_addr
|| use_4K_compat
) &&
2700 (parent_entry
->data_offset
== offset_in_page
)))) {
2702 * We have a match: re-use "parent_entry".
2704 /* release our extra reference on object */
2705 vm_object_unlock(object
);
2706 vm_object_deallocate(object
);
2707 /* parent_entry->ref_count++; XXX ? */
2708 /* Get an extra send-right on handle */
2709 ipc_port_copy_send(parent_handle
);
2711 *size
= CAST_DOWN(vm_size_t
,
2712 (parent_entry
->size
-
2713 parent_entry
->data_offset
));
2714 *object_handle
= parent_handle
;
2715 return KERN_SUCCESS
;
2718 * No match: we need to create a new entry.
2724 vm_object_unlock(object
);
2725 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2727 /* release our unused reference on the object */
2728 vm_object_deallocate(object
);
2729 return KERN_FAILURE
;
2732 user_entry
->backing
.object
= object
;
2733 user_entry
->internal
= object
->internal
;
2734 user_entry
->is_sub_map
= FALSE
;
2735 user_entry
->is_pager
= FALSE
;
2736 user_entry
->offset
= obj_off
;
2737 user_entry
->data_offset
= offset_in_page
;
2738 user_entry
->protection
= protections
;
2739 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
2740 user_entry
->size
= map_size
;
2742 /* user_object pager and internal fields are not used */
2743 /* when the object field is filled in. */
2745 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2746 user_entry
->data_offset
));
2747 *object_handle
= user_handle
;
2748 return KERN_SUCCESS
;
2751 /* The new object will be base on an existing named object */
2752 if (parent_entry
== NULL
) {
2753 kr
= KERN_INVALID_ARGUMENT
;
2757 if (use_data_addr
|| use_4K_compat
) {
2759 * submaps and pagers should only be accessible from within
2760 * the kernel, which shouldn't use the data address flag, so can fail here.
2762 if (parent_entry
->is_pager
|| parent_entry
->is_sub_map
) {
2763 panic("Shouldn't be using data address with a parent entry that is a submap or pager.");
2766 * Account for offset to data in parent entry and
2767 * compute our own offset to data.
2769 if((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
2770 kr
= KERN_INVALID_ARGUMENT
;
2774 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
2775 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
2777 offset_in_page
&= ~((signed)(0xFFF));
2778 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
2779 map_size
= map_end
- map_start
;
2781 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2782 map_size
= map_end
- map_start
;
2785 if((offset
+ map_size
) > parent_entry
->size
) {
2786 kr
= KERN_INVALID_ARGUMENT
;
2791 if (mask_protections
) {
2793 * The caller asked us to use the "protections" as
2794 * a mask, so restrict "protections" to what this
2795 * mapping actually allows.
2797 protections
&= parent_entry
->protection
;
2799 if((protections
& parent_entry
->protection
) != protections
) {
2800 kr
= KERN_PROTECTION_FAILURE
;
2804 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2810 user_entry
->size
= map_size
;
2811 user_entry
->offset
= parent_entry
->offset
+ map_start
;
2812 user_entry
->data_offset
= offset_in_page
;
2813 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2814 user_entry
->is_pager
= parent_entry
->is_pager
;
2815 user_entry
->is_copy
= parent_entry
->is_copy
;
2816 user_entry
->internal
= parent_entry
->internal
;
2817 user_entry
->protection
= protections
;
2819 if(access
!= MAP_MEM_NOOP
) {
2820 SET_MAP_MEM(access
, user_entry
->protection
);
2823 if(parent_entry
->is_sub_map
) {
2824 user_entry
->backing
.map
= parent_entry
->backing
.map
;
2825 vm_map_lock(user_entry
->backing
.map
);
2826 user_entry
->backing
.map
->ref_count
++;
2827 vm_map_unlock(user_entry
->backing
.map
);
2829 else if (parent_entry
->is_pager
) {
2830 user_entry
->backing
.pager
= parent_entry
->backing
.pager
;
2831 /* JMM - don't we need a reference here? */
2833 object
= parent_entry
->backing
.object
;
2834 assert(object
!= VM_OBJECT_NULL
);
2835 user_entry
->backing
.object
= object
;
2836 /* we now point to this object, hold on */
2837 vm_object_lock(object
);
2838 vm_object_reference_locked(object
);
2839 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2840 if (!object
->true_share
&&
2841 vm_object_tracking_inited
) {
2842 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2845 num
= OSBacktrace(bt
,
2846 VM_OBJECT_TRACKING_BTDEPTH
);
2847 btlog_add_entry(vm_object_tracking_btlog
,
2849 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2853 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2855 object
->true_share
= TRUE
;
2856 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2857 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2858 vm_object_unlock(object
);
2860 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2861 user_entry
->data_offset
));
2862 *object_handle
= user_handle
;
2863 return KERN_SUCCESS
;
2867 if (user_handle
!= IP_NULL
) {
2869 * Releasing "user_handle" causes the kernel object
2870 * associated with it ("user_entry" here) to also be
2871 * released and freed.
2873 mach_memory_entry_port_release(user_handle
);
2879 _mach_make_memory_entry(
2880 vm_map_t target_map
,
2881 memory_object_size_t
*size
,
2882 memory_object_offset_t offset
,
2883 vm_prot_t permission
,
2884 ipc_port_t
*object_handle
,
2885 ipc_port_t parent_entry
)
2887 memory_object_size_t mo_size
;
2890 mo_size
= (memory_object_size_t
)*size
;
2891 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2892 (memory_object_offset_t
)offset
, permission
, object_handle
,
2899 mach_make_memory_entry(
2900 vm_map_t target_map
,
2903 vm_prot_t permission
,
2904 ipc_port_t
*object_handle
,
2905 ipc_port_t parent_entry
)
2907 memory_object_size_t mo_size
;
2910 mo_size
= (memory_object_size_t
)*size
;
2911 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2912 (memory_object_offset_t
)offset
, permission
, object_handle
,
2914 *size
= CAST_DOWN(vm_size_t
, mo_size
);
2921 * Set or clear the map's wiring_required flag. This flag, if set,
2922 * will cause all future virtual memory allocation to allocate
2923 * user wired memory. Unwiring pages wired down as a result of
2924 * this routine is done with the vm_wire interface.
2929 boolean_t must_wire
)
2931 if (map
== VM_MAP_NULL
)
2932 return(KERN_INVALID_ARGUMENT
);
2935 map
->wiring_required
= TRUE
;
2937 map
->wiring_required
= FALSE
;
2939 return(KERN_SUCCESS
);
2942 __private_extern__ kern_return_t
2943 mach_memory_entry_allocate(
2944 vm_named_entry_t
*user_entry_p
,
2945 ipc_port_t
*user_handle_p
)
2947 vm_named_entry_t user_entry
;
2948 ipc_port_t user_handle
;
2949 ipc_port_t previous
;
2951 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
2952 if (user_entry
== NULL
)
2953 return KERN_FAILURE
;
2955 named_entry_lock_init(user_entry
);
2957 user_handle
= ipc_port_alloc_kernel();
2958 if (user_handle
== IP_NULL
) {
2959 kfree(user_entry
, sizeof *user_entry
);
2960 return KERN_FAILURE
;
2962 ip_lock(user_handle
);
2964 /* make a sonce right */
2965 user_handle
->ip_sorights
++;
2966 ip_reference(user_handle
);
2968 user_handle
->ip_destination
= IP_NULL
;
2969 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
2970 user_handle
->ip_receiver
= ipc_space_kernel
;
2972 /* make a send right */
2973 user_handle
->ip_mscount
++;
2974 user_handle
->ip_srights
++;
2975 ip_reference(user_handle
);
2977 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
2978 /* nsrequest unlocks user_handle */
2980 user_entry
->backing
.pager
= NULL
;
2981 user_entry
->is_sub_map
= FALSE
;
2982 user_entry
->is_pager
= FALSE
;
2983 user_entry
->is_copy
= FALSE
;
2984 user_entry
->internal
= FALSE
;
2985 user_entry
->size
= 0;
2986 user_entry
->offset
= 0;
2987 user_entry
->data_offset
= 0;
2988 user_entry
->protection
= VM_PROT_NONE
;
2989 user_entry
->ref_count
= 1;
2991 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
2994 *user_entry_p
= user_entry
;
2995 *user_handle_p
= user_handle
;
2997 return KERN_SUCCESS
;
3001 * mach_memory_object_memory_entry_64
3003 * Create a named entry backed by the provided pager.
3005 * JMM - we need to hold a reference on the pager -
3006 * and release it when the named entry is destroyed.
3009 mach_memory_object_memory_entry_64(
3012 vm_object_offset_t size
,
3013 vm_prot_t permission
,
3014 memory_object_t pager
,
3015 ipc_port_t
*entry_handle
)
3017 unsigned int access
;
3018 vm_named_entry_t user_entry
;
3019 ipc_port_t user_handle
;
3021 if (host
== HOST_NULL
)
3022 return(KERN_INVALID_HOST
);
3024 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3026 return KERN_FAILURE
;
3029 user_entry
->backing
.pager
= pager
;
3030 user_entry
->size
= size
;
3031 user_entry
->offset
= 0;
3032 user_entry
->protection
= permission
& VM_PROT_ALL
;
3033 access
= GET_MAP_MEM(permission
);
3034 SET_MAP_MEM(access
, user_entry
->protection
);
3035 user_entry
->internal
= internal
;
3036 user_entry
->is_sub_map
= FALSE
;
3037 user_entry
->is_pager
= TRUE
;
3038 assert(user_entry
->ref_count
== 1);
3040 *entry_handle
= user_handle
;
3041 return KERN_SUCCESS
;
3045 mach_memory_object_memory_entry(
3049 vm_prot_t permission
,
3050 memory_object_t pager
,
3051 ipc_port_t
*entry_handle
)
3053 return mach_memory_object_memory_entry_64( host
, internal
,
3054 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3059 mach_memory_entry_purgable_control(
3060 ipc_port_t entry_port
,
3061 vm_purgable_t control
,
3065 vm_named_entry_t mem_entry
;
3068 if (entry_port
== IP_NULL
||
3069 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3070 return KERN_INVALID_ARGUMENT
;
3072 if (control
!= VM_PURGABLE_SET_STATE
&&
3073 control
!= VM_PURGABLE_GET_STATE
)
3074 return(KERN_INVALID_ARGUMENT
);
3076 if (control
== VM_PURGABLE_SET_STATE
&&
3077 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3078 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
)))
3079 return(KERN_INVALID_ARGUMENT
);
3081 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3083 named_entry_lock(mem_entry
);
3085 if (mem_entry
->is_sub_map
||
3086 mem_entry
->is_pager
||
3087 mem_entry
->is_copy
) {
3088 named_entry_unlock(mem_entry
);
3089 return KERN_INVALID_ARGUMENT
;
3092 object
= mem_entry
->backing
.object
;
3093 if (object
== VM_OBJECT_NULL
) {
3094 named_entry_unlock(mem_entry
);
3095 return KERN_INVALID_ARGUMENT
;
3098 vm_object_lock(object
);
3100 /* check that named entry covers entire object ? */
3101 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3102 vm_object_unlock(object
);
3103 named_entry_unlock(mem_entry
);
3104 return KERN_INVALID_ARGUMENT
;
3107 named_entry_unlock(mem_entry
);
3109 kr
= vm_object_purgable_control(object
, control
, state
);
3111 vm_object_unlock(object
);
3117 mach_memory_entry_get_page_counts(
3118 ipc_port_t entry_port
,
3119 unsigned int *resident_page_count
,
3120 unsigned int *dirty_page_count
)
3123 vm_named_entry_t mem_entry
;
3125 vm_object_offset_t offset
;
3126 vm_object_size_t size
;
3128 if (entry_port
== IP_NULL
||
3129 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3130 return KERN_INVALID_ARGUMENT
;
3133 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3135 named_entry_lock(mem_entry
);
3137 if (mem_entry
->is_sub_map
||
3138 mem_entry
->is_pager
||
3139 mem_entry
->is_copy
) {
3140 named_entry_unlock(mem_entry
);
3141 return KERN_INVALID_ARGUMENT
;
3144 object
= mem_entry
->backing
.object
;
3145 if (object
== VM_OBJECT_NULL
) {
3146 named_entry_unlock(mem_entry
);
3147 return KERN_INVALID_ARGUMENT
;
3150 vm_object_lock(object
);
3152 offset
= mem_entry
->offset
;
3153 size
= mem_entry
->size
;
3155 named_entry_unlock(mem_entry
);
3157 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3159 vm_object_unlock(object
);
3165 * mach_memory_entry_port_release:
3167 * Release a send right on a named entry port. This is the correct
3168 * way to destroy a named entry. When the last right on the port is
3169 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3172 mach_memory_entry_port_release(
3175 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3176 ipc_port_release_send(port
);
3180 * mach_destroy_memory_entry:
3182 * Drops a reference on a memory entry and destroys the memory entry if
3183 * there are no more references on it.
3184 * NOTE: This routine should not be called to destroy a memory entry from the
3185 * kernel, as it will not release the Mach port associated with the memory
3186 * entry. The proper way to destroy a memory entry in the kernel is to
3187 * call mach_memort_entry_port_release() to release the kernel's send-right on
3188 * the memory entry's port. When the last send right is released, the memory
3189 * entry will be destroyed via ipc_kobject_destroy().
3192 mach_destroy_memory_entry(
3195 vm_named_entry_t named_entry
;
3197 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3198 #endif /* MACH_ASSERT */
3199 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
3201 named_entry_lock(named_entry
);
3202 named_entry
->ref_count
-= 1;
3204 if(named_entry
->ref_count
== 0) {
3205 if (named_entry
->is_sub_map
) {
3206 vm_map_deallocate(named_entry
->backing
.map
);
3207 } else if (named_entry
->is_pager
) {
3208 /* JMM - need to drop reference on pager in that case */
3209 } else if (named_entry
->is_copy
) {
3210 vm_map_copy_discard(named_entry
->backing
.copy
);
3212 /* release the VM object we've been pointing to */
3213 vm_object_deallocate(named_entry
->backing
.object
);
3216 named_entry_unlock(named_entry
);
3217 named_entry_lock_destroy(named_entry
);
3219 kfree((void *) port
->ip_kobject
,
3220 sizeof (struct vm_named_entry
));
3222 named_entry_unlock(named_entry
);
3225 /* Allow manipulation of individual page state. This is actually part of */
3226 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3229 mach_memory_entry_page_op(
3230 ipc_port_t entry_port
,
3231 vm_object_offset_t offset
,
3233 ppnum_t
*phys_entry
,
3236 vm_named_entry_t mem_entry
;
3240 if (entry_port
== IP_NULL
||
3241 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3242 return KERN_INVALID_ARGUMENT
;
3245 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3247 named_entry_lock(mem_entry
);
3249 if (mem_entry
->is_sub_map
||
3250 mem_entry
->is_pager
||
3251 mem_entry
->is_copy
) {
3252 named_entry_unlock(mem_entry
);
3253 return KERN_INVALID_ARGUMENT
;
3256 object
= mem_entry
->backing
.object
;
3257 if (object
== VM_OBJECT_NULL
) {
3258 named_entry_unlock(mem_entry
);
3259 return KERN_INVALID_ARGUMENT
;
3262 vm_object_reference(object
);
3263 named_entry_unlock(mem_entry
);
3265 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3267 vm_object_deallocate(object
);
3273 * mach_memory_entry_range_op offers performance enhancement over
3274 * mach_memory_entry_page_op for page_op functions which do not require page
3275 * level state to be returned from the call. Page_op was created to provide
3276 * a low-cost alternative to page manipulation via UPLs when only a single
3277 * page was involved. The range_op call establishes the ability in the _op
3278 * family of functions to work on multiple pages where the lack of page level
3279 * state handling allows the caller to avoid the overhead of the upl structures.
3283 mach_memory_entry_range_op(
3284 ipc_port_t entry_port
,
3285 vm_object_offset_t offset_beg
,
3286 vm_object_offset_t offset_end
,
3290 vm_named_entry_t mem_entry
;
3294 if (entry_port
== IP_NULL
||
3295 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3296 return KERN_INVALID_ARGUMENT
;
3299 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3301 named_entry_lock(mem_entry
);
3303 if (mem_entry
->is_sub_map
||
3304 mem_entry
->is_pager
||
3305 mem_entry
->is_copy
) {
3306 named_entry_unlock(mem_entry
);
3307 return KERN_INVALID_ARGUMENT
;
3310 object
= mem_entry
->backing
.object
;
3311 if (object
== VM_OBJECT_NULL
) {
3312 named_entry_unlock(mem_entry
);
3313 return KERN_INVALID_ARGUMENT
;
3316 vm_object_reference(object
);
3317 named_entry_unlock(mem_entry
);
3319 kr
= vm_object_range_op(object
,
3323 (uint32_t *) range
);
3325 vm_object_deallocate(object
);
3332 set_dp_control_port(
3333 host_priv_t host_priv
,
3334 ipc_port_t control_port
)
3336 if (host_priv
== HOST_PRIV_NULL
)
3337 return (KERN_INVALID_HOST
);
3339 if (IP_VALID(dynamic_pager_control_port
))
3340 ipc_port_release_send(dynamic_pager_control_port
);
3342 dynamic_pager_control_port
= control_port
;
3343 return KERN_SUCCESS
;
3347 get_dp_control_port(
3348 host_priv_t host_priv
,
3349 ipc_port_t
*control_port
)
3351 if (host_priv
== HOST_PRIV_NULL
)
3352 return (KERN_INVALID_HOST
);
3354 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
3355 return KERN_SUCCESS
;
3359 /* ******* Temporary Internal calls to UPL for BSD ***** */
3361 extern int kernel_upl_map(
3364 vm_offset_t
*dst_addr
);
3366 extern int kernel_upl_unmap(
3370 extern int kernel_upl_commit(
3372 upl_page_info_t
*pl
,
3373 mach_msg_type_number_t count
);
3375 extern int kernel_upl_commit_range(
3377 upl_offset_t offset
,
3380 upl_page_info_array_t pl
,
3381 mach_msg_type_number_t count
);
3383 extern int kernel_upl_abort(
3387 extern int kernel_upl_abort_range(
3389 upl_offset_t offset
,
3398 vm_offset_t
*dst_addr
)
3400 return vm_upl_map(map
, upl
, dst_addr
);
3409 return vm_upl_unmap(map
, upl
);
3415 upl_page_info_t
*pl
,
3416 mach_msg_type_number_t count
)
3420 kr
= upl_commit(upl
, pl
, count
);
3421 upl_deallocate(upl
);
3427 kernel_upl_commit_range(
3429 upl_offset_t offset
,
3432 upl_page_info_array_t pl
,
3433 mach_msg_type_number_t count
)
3435 boolean_t finished
= FALSE
;
3438 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3439 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3441 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
3442 return KERN_INVALID_ARGUMENT
;
3445 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
3447 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
3448 upl_deallocate(upl
);
3454 kernel_upl_abort_range(
3456 upl_offset_t offset
,
3461 boolean_t finished
= FALSE
;
3463 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3464 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3466 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3468 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3469 upl_deallocate(upl
);
3481 kr
= upl_abort(upl
, abort_type
);
3482 upl_deallocate(upl
);
3487 * Now a kernel-private interface (for BootCache
3488 * use only). Need a cleaner way to create an
3489 * empty vm_map() and return a handle to it.
3493 vm_region_object_create(
3494 __unused vm_map_t target_map
,
3496 ipc_port_t
*object_handle
)
3498 vm_named_entry_t user_entry
;
3499 ipc_port_t user_handle
;
3503 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3505 return KERN_FAILURE
;
3508 /* Create a named object based on a submap of specified size */
3510 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3511 vm_map_round_page(size
,
3512 VM_MAP_PAGE_MASK(target_map
)),
3514 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
3516 user_entry
->backing
.map
= new_map
;
3517 user_entry
->internal
= TRUE
;
3518 user_entry
->is_sub_map
= TRUE
;
3519 user_entry
->offset
= 0;
3520 user_entry
->protection
= VM_PROT_ALL
;
3521 user_entry
->size
= size
;
3522 assert(user_entry
->ref_count
== 1);
3524 *object_handle
= user_handle
;
3525 return KERN_SUCCESS
;
3529 ppnum_t
vm_map_get_phys_page( /* forward */
3531 vm_offset_t offset
);
3534 vm_map_get_phys_page(
3538 vm_object_offset_t offset
;
3540 vm_map_offset_t map_offset
;
3541 vm_map_entry_t entry
;
3542 ppnum_t phys_page
= 0;
3544 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
3547 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3549 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
3553 if (entry
->is_sub_map
) {
3555 vm_map_lock(VME_SUBMAP(entry
));
3557 map
= VME_SUBMAP(entry
);
3558 map_offset
= (VME_OFFSET(entry
) +
3559 (map_offset
- entry
->vme_start
));
3560 vm_map_unlock(old_map
);
3563 if (VME_OBJECT(entry
)->phys_contiguous
) {
3564 /* These are not standard pageable memory mappings */
3565 /* If they are not present in the object they will */
3566 /* have to be picked up from the pager through the */
3567 /* fault mechanism. */
3568 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
3569 /* need to call vm_fault */
3571 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3572 FALSE
, THREAD_UNINT
, NULL
, 0);
3576 offset
= (VME_OFFSET(entry
) +
3577 (map_offset
- entry
->vme_start
));
3578 phys_page
= (ppnum_t
)
3579 ((VME_OBJECT(entry
)->vo_shadow_offset
3580 + offset
) >> PAGE_SHIFT
);
3584 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
3585 object
= VME_OBJECT(entry
);
3586 vm_object_lock(object
);
3588 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3589 if(dst_page
== VM_PAGE_NULL
) {
3590 if(object
->shadow
) {
3591 vm_object_t old_object
;
3592 vm_object_lock(object
->shadow
);
3593 old_object
= object
;
3594 offset
= offset
+ object
->vo_shadow_offset
;
3595 object
= object
->shadow
;
3596 vm_object_unlock(old_object
);
3598 vm_object_unlock(object
);
3602 phys_page
= (ppnum_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
));
3603 vm_object_unlock(object
);
3617 kern_return_t
kernel_object_iopl_request( /* forward */
3618 vm_named_entry_t named_entry
,
3619 memory_object_offset_t offset
,
3620 upl_size_t
*upl_size
,
3622 upl_page_info_array_t user_page_list
,
3623 unsigned int *page_list_count
,
3627 kernel_object_iopl_request(
3628 vm_named_entry_t named_entry
,
3629 memory_object_offset_t offset
,
3630 upl_size_t
*upl_size
,
3632 upl_page_info_array_t user_page_list
,
3633 unsigned int *page_list_count
,
3641 caller_flags
= *flags
;
3643 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3645 * For forward compatibility's sake,
3646 * reject any unknown flag.
3648 return KERN_INVALID_VALUE
;
3651 /* a few checks to make sure user is obeying rules */
3652 if(*upl_size
== 0) {
3653 if(offset
>= named_entry
->size
)
3654 return(KERN_INVALID_RIGHT
);
3655 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
3656 if (*upl_size
!= named_entry
->size
- offset
)
3657 return KERN_INVALID_ARGUMENT
;
3659 if(caller_flags
& UPL_COPYOUT_FROM
) {
3660 if((named_entry
->protection
& VM_PROT_READ
)
3662 return(KERN_INVALID_RIGHT
);
3665 if((named_entry
->protection
&
3666 (VM_PROT_READ
| VM_PROT_WRITE
))
3667 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
3668 return(KERN_INVALID_RIGHT
);
3671 if(named_entry
->size
< (offset
+ *upl_size
))
3672 return(KERN_INVALID_ARGUMENT
);
3674 /* the callers parameter offset is defined to be the */
3675 /* offset from beginning of named entry offset in object */
3676 offset
= offset
+ named_entry
->offset
;
3678 if (named_entry
->is_sub_map
||
3679 named_entry
->is_copy
)
3680 return KERN_INVALID_ARGUMENT
;
3682 named_entry_lock(named_entry
);
3684 if (named_entry
->is_pager
) {
3685 object
= vm_object_enter(named_entry
->backing
.pager
,
3686 named_entry
->offset
+ named_entry
->size
,
3687 named_entry
->internal
,
3690 if (object
== VM_OBJECT_NULL
) {
3691 named_entry_unlock(named_entry
);
3692 return(KERN_INVALID_OBJECT
);
3695 /* JMM - drop reference on the pager here? */
3697 /* create an extra reference for the object */
3698 vm_object_lock(object
);
3699 vm_object_reference_locked(object
);
3700 named_entry
->backing
.object
= object
;
3701 named_entry
->is_pager
= FALSE
;
3702 named_entry_unlock(named_entry
);
3704 /* wait for object (if any) to be ready */
3705 if (!named_entry
->internal
) {
3706 while (!object
->pager_ready
) {
3707 vm_object_wait(object
,
3708 VM_OBJECT_EVENT_PAGER_READY
,
3710 vm_object_lock(object
);
3713 vm_object_unlock(object
);
3716 /* This is the case where we are going to operate */
3717 /* an an already known object. If the object is */
3718 /* not ready it is internal. An external */
3719 /* object cannot be mapped until it is ready */
3720 /* we can therefore avoid the ready check */
3722 object
= named_entry
->backing
.object
;
3723 vm_object_reference(object
);
3724 named_entry_unlock(named_entry
);
3727 if (!object
->private) {
3728 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
)
3729 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
3730 if (object
->phys_contiguous
) {
3731 *flags
= UPL_PHYS_CONTIG
;
3736 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
3739 ret
= vm_object_iopl_request(object
,
3745 (upl_control_flags_t
)(unsigned int)caller_flags
);
3746 vm_object_deallocate(object
);