2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101 #include <mach/sdt.h>
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/vm_map_server.h>
107 #include <kern/host.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110 #include <kern/misc_protos.h>
111 #include <vm/vm_fault.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/memory_object.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/vm_protos.h>
118 #include <vm/vm_purgeable_internal.h>
119 #include <vm/vm_init.h>
121 vm_size_t upl_offset_to_pagelist
= 0;
127 lck_grp_t dynamic_pager_control_port_lock_group
;
128 decl_lck_mtx_data(, dynamic_pager_control_port_lock
);
129 ipc_port_t dynamic_pager_control_port
=NULL
;
132 * mach_vm_allocate allocates "zero fill" memory in the specfied
138 mach_vm_offset_t
*addr
,
142 vm_map_offset_t map_addr
;
143 vm_map_size_t map_size
;
144 kern_return_t result
;
147 /* filter out any kernel-only flags */
148 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
149 return KERN_INVALID_ARGUMENT
;
151 if (map
== VM_MAP_NULL
)
152 return(KERN_INVALID_ARGUMENT
);
155 return(KERN_SUCCESS
);
158 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
161 * No specific address requested, so start candidate address
162 * search at the minimum address in the map. However, if that
163 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
164 * allocations of PAGEZERO to explicit requests since its
165 * normal use is to catch dereferences of NULL and many
166 * applications also treat pointers with a value of 0 as
167 * special and suddenly having address 0 contain useable
168 * memory would tend to confuse those applications.
170 map_addr
= vm_map_min(map
);
172 map_addr
+= VM_MAP_PAGE_SIZE(map
);
174 map_addr
= vm_map_trunc_page(*addr
,
175 VM_MAP_PAGE_MASK(map
));
176 map_size
= vm_map_round_page(size
,
177 VM_MAP_PAGE_MASK(map
));
179 return(KERN_INVALID_ARGUMENT
);
182 result
= vm_map_enter(
189 (vm_object_offset_t
)0,
201 * Legacy routine that allocates "zero fill" memory in the specfied
202 * map (which is limited to the same size as the kernel).
211 vm_map_offset_t map_addr
;
212 vm_map_size_t map_size
;
213 kern_return_t result
;
216 /* filter out any kernel-only flags */
217 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
218 return KERN_INVALID_ARGUMENT
;
220 if (map
== VM_MAP_NULL
)
221 return(KERN_INVALID_ARGUMENT
);
224 return(KERN_SUCCESS
);
227 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
230 * No specific address requested, so start candidate address
231 * search at the minimum address in the map. However, if that
232 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
233 * allocations of PAGEZERO to explicit requests since its
234 * normal use is to catch dereferences of NULL and many
235 * applications also treat pointers with a value of 0 as
236 * special and suddenly having address 0 contain useable
237 * memory would tend to confuse those applications.
239 map_addr
= vm_map_min(map
);
241 map_addr
+= VM_MAP_PAGE_SIZE(map
);
243 map_addr
= vm_map_trunc_page(*addr
,
244 VM_MAP_PAGE_MASK(map
));
245 map_size
= vm_map_round_page(size
,
246 VM_MAP_PAGE_MASK(map
));
248 return(KERN_INVALID_ARGUMENT
);
251 result
= vm_map_enter(
258 (vm_object_offset_t
)0,
264 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
269 * mach_vm_deallocate -
270 * deallocates the specified range of addresses in the
271 * specified address map.
276 mach_vm_offset_t start
,
279 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
280 return(KERN_INVALID_ARGUMENT
);
282 if (size
== (mach_vm_offset_t
) 0)
283 return(KERN_SUCCESS
);
285 return(vm_map_remove(map
,
286 vm_map_trunc_page(start
,
287 VM_MAP_PAGE_MASK(map
)),
288 vm_map_round_page(start
+size
,
289 VM_MAP_PAGE_MASK(map
)),
295 * deallocates the specified range of addresses in the
296 * specified address map (limited to addresses the same
297 * size as the kernel).
305 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
306 return(KERN_INVALID_ARGUMENT
);
308 if (size
== (vm_offset_t
) 0)
309 return(KERN_SUCCESS
);
311 return(vm_map_remove(map
,
312 vm_map_trunc_page(start
,
313 VM_MAP_PAGE_MASK(map
)),
314 vm_map_round_page(start
+size
,
315 VM_MAP_PAGE_MASK(map
)),
321 * Sets the inheritance of the specified range in the
327 mach_vm_offset_t start
,
329 vm_inherit_t new_inheritance
)
331 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
332 (new_inheritance
> VM_INHERIT_LAST_VALID
))
333 return(KERN_INVALID_ARGUMENT
);
338 return(vm_map_inherit(map
,
339 vm_map_trunc_page(start
,
340 VM_MAP_PAGE_MASK(map
)),
341 vm_map_round_page(start
+size
,
342 VM_MAP_PAGE_MASK(map
)),
348 * Sets the inheritance of the specified range in the
349 * specified map (range limited to addresses
356 vm_inherit_t new_inheritance
)
358 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
359 (new_inheritance
> VM_INHERIT_LAST_VALID
))
360 return(KERN_INVALID_ARGUMENT
);
365 return(vm_map_inherit(map
,
366 vm_map_trunc_page(start
,
367 VM_MAP_PAGE_MASK(map
)),
368 vm_map_round_page(start
+size
,
369 VM_MAP_PAGE_MASK(map
)),
375 * Sets the protection of the specified range in the
382 mach_vm_offset_t start
,
384 boolean_t set_maximum
,
385 vm_prot_t new_protection
)
387 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
388 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
389 return(KERN_INVALID_ARGUMENT
);
394 return(vm_map_protect(map
,
395 vm_map_trunc_page(start
,
396 VM_MAP_PAGE_MASK(map
)),
397 vm_map_round_page(start
+size
,
398 VM_MAP_PAGE_MASK(map
)),
405 * Sets the protection of the specified range in the
406 * specified map. Addressability of the range limited
407 * to the same size as the kernel.
415 boolean_t set_maximum
,
416 vm_prot_t new_protection
)
418 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
419 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
420 return(KERN_INVALID_ARGUMENT
);
425 return(vm_map_protect(map
,
426 vm_map_trunc_page(start
,
427 VM_MAP_PAGE_MASK(map
)),
428 vm_map_round_page(start
+size
,
429 VM_MAP_PAGE_MASK(map
)),
435 * mach_vm_machine_attributes -
436 * Handle machine-specific attributes for a mapping, such
437 * as cachability, migrability, etc.
440 mach_vm_machine_attribute(
442 mach_vm_address_t addr
,
444 vm_machine_attribute_t attribute
,
445 vm_machine_attribute_val_t
* value
) /* IN/OUT */
447 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
448 return(KERN_INVALID_ARGUMENT
);
453 return vm_map_machine_attribute(
455 vm_map_trunc_page(addr
,
456 VM_MAP_PAGE_MASK(map
)),
457 vm_map_round_page(addr
+size
,
458 VM_MAP_PAGE_MASK(map
)),
464 * vm_machine_attribute -
465 * Handle machine-specific attributes for a mapping, such
466 * as cachability, migrability, etc. Limited addressability
467 * (same range limits as for the native kernel map).
470 vm_machine_attribute(
474 vm_machine_attribute_t attribute
,
475 vm_machine_attribute_val_t
* value
) /* IN/OUT */
477 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
478 return(KERN_INVALID_ARGUMENT
);
483 return vm_map_machine_attribute(
485 vm_map_trunc_page(addr
,
486 VM_MAP_PAGE_MASK(map
)),
487 vm_map_round_page(addr
+size
,
488 VM_MAP_PAGE_MASK(map
)),
495 * Read/copy a range from one address space and return it to the caller.
497 * It is assumed that the address for the returned memory is selected by
498 * the IPC implementation as part of receiving the reply to this call.
499 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
500 * that gets returned.
502 * JMM - because of mach_msg_type_number_t, this call is limited to a
503 * single 4GB region at this time.
509 mach_vm_address_t addr
,
512 mach_msg_type_number_t
*data_size
)
515 vm_map_copy_t ipc_address
;
517 if (map
== VM_MAP_NULL
)
518 return(KERN_INVALID_ARGUMENT
);
520 if ((mach_msg_type_number_t
) size
!= size
)
521 return KERN_INVALID_ARGUMENT
;
523 error
= vm_map_copyin(map
,
524 (vm_map_address_t
)addr
,
526 FALSE
, /* src_destroy */
529 if (KERN_SUCCESS
== error
) {
530 *data
= (pointer_t
) ipc_address
;
531 *data_size
= (mach_msg_type_number_t
) size
;
532 assert(*data_size
== size
);
539 * Read/copy a range from one address space and return it to the caller.
540 * Limited addressability (same range limits as for the native kernel map).
542 * It is assumed that the address for the returned memory is selected by
543 * the IPC implementation as part of receiving the reply to this call.
544 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
545 * that gets returned.
553 mach_msg_type_number_t
*data_size
)
556 vm_map_copy_t ipc_address
;
558 if (map
== VM_MAP_NULL
)
559 return(KERN_INVALID_ARGUMENT
);
561 if (size
> (unsigned)(mach_msg_type_number_t
) -1) {
563 * The kernel could handle a 64-bit "size" value, but
564 * it could not return the size of the data in "*data_size"
565 * without overflowing.
566 * Let's reject this "size" as invalid.
568 return KERN_INVALID_ARGUMENT
;
571 error
= vm_map_copyin(map
,
572 (vm_map_address_t
)addr
,
574 FALSE
, /* src_destroy */
577 if (KERN_SUCCESS
== error
) {
578 *data
= (pointer_t
) ipc_address
;
579 *data_size
= (mach_msg_type_number_t
) size
;
580 assert(*data_size
== size
);
586 * mach_vm_read_list -
587 * Read/copy a list of address ranges from specified map.
589 * MIG does not know how to deal with a returned array of
590 * vm_map_copy_t structures, so we have to do the copyout
596 mach_vm_read_entry_t data_list
,
599 mach_msg_type_number_t i
;
603 if (map
== VM_MAP_NULL
||
604 count
> VM_MAP_ENTRY_MAX
)
605 return(KERN_INVALID_ARGUMENT
);
607 error
= KERN_SUCCESS
;
608 for(i
=0; i
<count
; i
++) {
609 vm_map_address_t map_addr
;
610 vm_map_size_t map_size
;
612 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
613 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
616 error
= vm_map_copyin(map
,
619 FALSE
, /* src_destroy */
621 if (KERN_SUCCESS
== error
) {
622 error
= vm_map_copyout(
626 if (KERN_SUCCESS
== error
) {
627 data_list
[i
].address
= map_addr
;
630 vm_map_copy_discard(copy
);
633 data_list
[i
].address
= (mach_vm_address_t
)0;
634 data_list
[i
].size
= (mach_vm_size_t
)0;
641 * Read/copy a list of address ranges from specified map.
643 * MIG does not know how to deal with a returned array of
644 * vm_map_copy_t structures, so we have to do the copyout
647 * The source and destination ranges are limited to those
648 * that can be described with a vm_address_t (i.e. same
649 * size map as the kernel).
651 * JMM - If the result of the copyout is an address range
652 * that cannot be described with a vm_address_t (i.e. the
653 * caller had a larger address space but used this call
654 * anyway), it will result in a truncated address being
655 * returned (and a likely confused caller).
661 vm_read_entry_t data_list
,
664 mach_msg_type_number_t i
;
668 if (map
== VM_MAP_NULL
||
669 count
> VM_MAP_ENTRY_MAX
)
670 return(KERN_INVALID_ARGUMENT
);
672 error
= KERN_SUCCESS
;
673 for(i
=0; i
<count
; i
++) {
674 vm_map_address_t map_addr
;
675 vm_map_size_t map_size
;
677 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
678 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
681 error
= vm_map_copyin(map
,
684 FALSE
, /* src_destroy */
686 if (KERN_SUCCESS
== error
) {
687 error
= vm_map_copyout(current_task()->map
,
690 if (KERN_SUCCESS
== error
) {
691 data_list
[i
].address
=
692 CAST_DOWN(vm_offset_t
, map_addr
);
695 vm_map_copy_discard(copy
);
698 data_list
[i
].address
= (mach_vm_address_t
)0;
699 data_list
[i
].size
= (mach_vm_size_t
)0;
705 * mach_vm_read_overwrite -
706 * Overwrite a range of the current map with data from the specified
709 * In making an assumption that the current thread is local, it is
710 * no longer cluster-safe without a fully supportive local proxy
711 * thread/task (but we don't support cluster's anymore so this is moot).
715 mach_vm_read_overwrite(
717 mach_vm_address_t address
,
719 mach_vm_address_t data
,
720 mach_vm_size_t
*data_size
)
725 if (map
== VM_MAP_NULL
)
726 return(KERN_INVALID_ARGUMENT
);
728 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
729 (vm_map_size_t
)size
, FALSE
, ©
);
731 if (KERN_SUCCESS
== error
) {
732 error
= vm_map_copy_overwrite(current_thread()->map
,
733 (vm_map_address_t
)data
,
735 if (KERN_SUCCESS
== error
) {
739 vm_map_copy_discard(copy
);
745 * vm_read_overwrite -
746 * Overwrite a range of the current map with data from the specified
749 * This routine adds the additional limitation that the source and
750 * destination ranges must be describable with vm_address_t values
751 * (i.e. the same size address spaces as the kernel, or at least the
752 * the ranges are in that first portion of the respective address
759 vm_address_t address
,
762 vm_size_t
*data_size
)
767 if (map
== VM_MAP_NULL
)
768 return(KERN_INVALID_ARGUMENT
);
770 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
771 (vm_map_size_t
)size
, FALSE
, ©
);
773 if (KERN_SUCCESS
== error
) {
774 error
= vm_map_copy_overwrite(current_thread()->map
,
775 (vm_map_address_t
)data
,
777 if (KERN_SUCCESS
== error
) {
781 vm_map_copy_discard(copy
);
789 * Overwrite the specified address range with the data provided
790 * (from the current map).
795 mach_vm_address_t address
,
797 __unused mach_msg_type_number_t size
)
799 if (map
== VM_MAP_NULL
)
800 return KERN_INVALID_ARGUMENT
;
802 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
803 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
808 * Overwrite the specified address range with the data provided
809 * (from the current map).
811 * The addressability of the range of addresses to overwrite is
812 * limited bu the use of a vm_address_t (same size as kernel map).
813 * Either the target map is also small, or the range is in the
814 * low addresses within it.
819 vm_address_t address
,
821 __unused mach_msg_type_number_t size
)
823 if (map
== VM_MAP_NULL
)
824 return KERN_INVALID_ARGUMENT
;
826 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
827 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
832 * Overwrite one range of the specified map with the contents of
833 * another range within that same map (i.e. both address ranges
839 mach_vm_address_t source_address
,
841 mach_vm_address_t dest_address
)
846 if (map
== VM_MAP_NULL
)
847 return KERN_INVALID_ARGUMENT
;
849 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
850 (vm_map_size_t
)size
, FALSE
, ©
);
852 if (KERN_SUCCESS
== kr
) {
853 kr
= vm_map_copy_overwrite(map
,
854 (vm_map_address_t
)dest_address
,
855 copy
, FALSE
/* interruptible XXX */);
857 if (KERN_SUCCESS
!= kr
)
858 vm_map_copy_discard(copy
);
866 vm_address_t source_address
,
868 vm_address_t dest_address
)
873 if (map
== VM_MAP_NULL
)
874 return KERN_INVALID_ARGUMENT
;
876 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
877 (vm_map_size_t
)size
, FALSE
, ©
);
879 if (KERN_SUCCESS
== kr
) {
880 kr
= vm_map_copy_overwrite(map
,
881 (vm_map_address_t
)dest_address
,
882 copy
, FALSE
/* interruptible XXX */);
884 if (KERN_SUCCESS
!= kr
)
885 vm_map_copy_discard(copy
);
892 * Map some range of an object into an address space.
894 * The object can be one of several types of objects:
895 * NULL - anonymous memory
896 * a named entry - a range within another address space
897 * or a range within a memory object
898 * a whole memory object
904 mach_vm_offset_t
*address
,
905 mach_vm_size_t initial_size
,
906 mach_vm_offset_t mask
,
909 vm_object_offset_t offset
,
911 vm_prot_t cur_protection
,
912 vm_prot_t max_protection
,
913 vm_inherit_t inheritance
)
916 vm_map_offset_t vmmaddr
;
918 vmmaddr
= (vm_map_offset_t
) *address
;
920 /* filter out any kernel-only flags */
921 if (flags
& ~VM_FLAGS_USER_MAP
)
922 return KERN_INVALID_ARGUMENT
;
924 kr
= vm_map_enter_mem_object(target_map
,
941 /* legacy interface */
945 vm_offset_t
*address
,
950 vm_object_offset_t offset
,
952 vm_prot_t cur_protection
,
953 vm_prot_t max_protection
,
954 vm_inherit_t inheritance
)
956 mach_vm_address_t map_addr
;
957 mach_vm_size_t map_size
;
958 mach_vm_offset_t map_mask
;
961 map_addr
= (mach_vm_address_t
)*address
;
962 map_size
= (mach_vm_size_t
)size
;
963 map_mask
= (mach_vm_offset_t
)mask
;
965 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
967 cur_protection
, max_protection
, inheritance
);
968 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
972 /* temporary, until world build */
976 vm_offset_t
*address
,
983 vm_prot_t cur_protection
,
984 vm_prot_t max_protection
,
985 vm_inherit_t inheritance
)
987 mach_vm_address_t map_addr
;
988 mach_vm_size_t map_size
;
989 mach_vm_offset_t map_mask
;
990 vm_object_offset_t obj_offset
;
993 map_addr
= (mach_vm_address_t
)*address
;
994 map_size
= (mach_vm_size_t
)size
;
995 map_mask
= (mach_vm_offset_t
)mask
;
996 obj_offset
= (vm_object_offset_t
)offset
;
998 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
999 port
, obj_offset
, copy
,
1000 cur_protection
, max_protection
, inheritance
);
1001 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1007 * Remap a range of memory from one task into another,
1008 * to another address range within the same task, or
1009 * over top of itself (with altered permissions and/or
1010 * as an in-place copy of itself).
1015 vm_map_t target_map
,
1016 mach_vm_offset_t
*address
,
1017 mach_vm_size_t size
,
1018 mach_vm_offset_t mask
,
1021 mach_vm_offset_t memory_address
,
1023 vm_prot_t
*cur_protection
,
1024 vm_prot_t
*max_protection
,
1025 vm_inherit_t inheritance
)
1027 vm_map_offset_t map_addr
;
1030 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1031 return KERN_INVALID_ARGUMENT
;
1033 /* filter out any kernel-only flags */
1034 if (flags
& ~VM_FLAGS_USER_REMAP
)
1035 return KERN_INVALID_ARGUMENT
;
1037 map_addr
= (vm_map_offset_t
)*address
;
1039 kr
= vm_map_remap(target_map
,
1050 *address
= map_addr
;
1056 * Remap a range of memory from one task into another,
1057 * to another address range within the same task, or
1058 * over top of itself (with altered permissions and/or
1059 * as an in-place copy of itself).
1061 * The addressability of the source and target address
1062 * range is limited by the size of vm_address_t (in the
1067 vm_map_t target_map
,
1068 vm_offset_t
*address
,
1073 vm_offset_t memory_address
,
1075 vm_prot_t
*cur_protection
,
1076 vm_prot_t
*max_protection
,
1077 vm_inherit_t inheritance
)
1079 vm_map_offset_t map_addr
;
1082 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1083 return KERN_INVALID_ARGUMENT
;
1085 /* filter out any kernel-only flags */
1086 if (flags
& ~VM_FLAGS_USER_REMAP
)
1087 return KERN_INVALID_ARGUMENT
;
1089 map_addr
= (vm_map_offset_t
)*address
;
1091 kr
= vm_map_remap(target_map
,
1102 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1107 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1108 * when mach_vm_wire and vm_wire are changed to use ledgers.
1110 #include <mach/mach_host_server.h>
1113 * Specify that the range of the virtual address space
1114 * of the target task must not cause page faults for
1115 * the indicated accesses.
1117 * [ To unwire the pages, specify VM_PROT_NONE. ]
1121 host_priv_t host_priv
,
1123 mach_vm_offset_t start
,
1124 mach_vm_size_t size
,
1129 if (host_priv
== HOST_PRIV_NULL
)
1130 return KERN_INVALID_HOST
;
1132 assert(host_priv
== &realhost
);
1134 if (map
== VM_MAP_NULL
)
1135 return KERN_INVALID_TASK
;
1137 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
))
1138 return KERN_INVALID_ARGUMENT
;
1140 if (access
!= VM_PROT_NONE
) {
1141 rc
= vm_map_wire(map
,
1142 vm_map_trunc_page(start
,
1143 VM_MAP_PAGE_MASK(map
)),
1144 vm_map_round_page(start
+size
,
1145 VM_MAP_PAGE_MASK(map
)),
1146 access
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK
),
1149 rc
= vm_map_unwire(map
,
1150 vm_map_trunc_page(start
,
1151 VM_MAP_PAGE_MASK(map
)),
1152 vm_map_round_page(start
+size
,
1153 VM_MAP_PAGE_MASK(map
)),
1161 * Specify that the range of the virtual address space
1162 * of the target task must not cause page faults for
1163 * the indicated accesses.
1165 * [ To unwire the pages, specify VM_PROT_NONE. ]
1169 host_priv_t host_priv
,
1177 if (host_priv
== HOST_PRIV_NULL
)
1178 return KERN_INVALID_HOST
;
1180 assert(host_priv
== &realhost
);
1182 if (map
== VM_MAP_NULL
)
1183 return KERN_INVALID_TASK
;
1185 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1186 return KERN_INVALID_ARGUMENT
;
1190 } else if (access
!= VM_PROT_NONE
) {
1191 rc
= vm_map_wire(map
,
1192 vm_map_trunc_page(start
,
1193 VM_MAP_PAGE_MASK(map
)),
1194 vm_map_round_page(start
+size
,
1195 VM_MAP_PAGE_MASK(map
)),
1196 access
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
),
1199 rc
= vm_map_unwire(map
,
1200 vm_map_trunc_page(start
,
1201 VM_MAP_PAGE_MASK(map
)),
1202 vm_map_round_page(start
+size
,
1203 VM_MAP_PAGE_MASK(map
)),
1212 * Synchronises the memory range specified with its backing store
1213 * image by either flushing or cleaning the contents to the appropriate
1216 * interpretation of sync_flags
1217 * VM_SYNC_INVALIDATE - discard pages, only return precious
1220 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1221 * - discard pages, write dirty or precious
1222 * pages back to memory manager.
1224 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1225 * - write dirty or precious pages back to
1226 * the memory manager.
1228 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1229 * is a hole in the region, and we would
1230 * have returned KERN_SUCCESS, return
1231 * KERN_INVALID_ADDRESS instead.
1234 * KERN_INVALID_TASK Bad task parameter
1235 * KERN_INVALID_ARGUMENT both sync and async were specified.
1236 * KERN_SUCCESS The usual.
1237 * KERN_INVALID_ADDRESS There was a hole in the region.
1243 mach_vm_address_t address
,
1244 mach_vm_size_t size
,
1245 vm_sync_t sync_flags
)
1248 if (map
== VM_MAP_NULL
)
1249 return(KERN_INVALID_TASK
);
1251 return vm_map_msync(map
, (vm_map_address_t
)address
,
1252 (vm_map_size_t
)size
, sync_flags
);
1258 * Synchronises the memory range specified with its backing store
1259 * image by either flushing or cleaning the contents to the appropriate
1262 * interpretation of sync_flags
1263 * VM_SYNC_INVALIDATE - discard pages, only return precious
1266 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1267 * - discard pages, write dirty or precious
1268 * pages back to memory manager.
1270 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1271 * - write dirty or precious pages back to
1272 * the memory manager.
1274 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1275 * is a hole in the region, and we would
1276 * have returned KERN_SUCCESS, return
1277 * KERN_INVALID_ADDRESS instead.
1279 * The addressability of the range is limited to that which can
1280 * be described by a vm_address_t.
1283 * KERN_INVALID_TASK Bad task parameter
1284 * KERN_INVALID_ARGUMENT both sync and async were specified.
1285 * KERN_SUCCESS The usual.
1286 * KERN_INVALID_ADDRESS There was a hole in the region.
1292 vm_address_t address
,
1294 vm_sync_t sync_flags
)
1297 if (map
== VM_MAP_NULL
)
1298 return(KERN_INVALID_TASK
);
1300 return vm_map_msync(map
, (vm_map_address_t
)address
,
1301 (vm_map_size_t
)size
, sync_flags
);
1306 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1308 vm_map_t map
= current_map();
1310 assert(!map
->is_nested_map
);
1311 if(toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
){
1312 *old_value
= map
->disable_vmentry_reuse
;
1313 } else if(toggle
== VM_TOGGLE_SET
){
1314 vm_map_entry_t map_to_entry
;
1317 vm_map_disable_hole_optimization(map
);
1318 map
->disable_vmentry_reuse
= TRUE
;
1319 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1320 if (map
->first_free
== map_to_entry
) {
1321 map
->highest_entry_end
= vm_map_min(map
);
1323 map
->highest_entry_end
= map
->first_free
->vme_end
;
1326 } else if (toggle
== VM_TOGGLE_CLEAR
){
1328 map
->disable_vmentry_reuse
= FALSE
;
1331 return KERN_INVALID_ARGUMENT
;
1333 return KERN_SUCCESS
;
1337 * mach_vm_behavior_set
1339 * Sets the paging behavior attribute for the specified range
1340 * in the specified map.
1342 * This routine will fail with KERN_INVALID_ADDRESS if any address
1343 * in [start,start+size) is not a valid allocated memory region.
1346 mach_vm_behavior_set(
1348 mach_vm_offset_t start
,
1349 mach_vm_size_t size
,
1350 vm_behavior_t new_behavior
)
1352 vm_map_offset_t align_mask
;
1354 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1355 return(KERN_INVALID_ARGUMENT
);
1358 return KERN_SUCCESS
;
1360 switch (new_behavior
) {
1361 case VM_BEHAVIOR_REUSABLE
:
1362 case VM_BEHAVIOR_REUSE
:
1363 case VM_BEHAVIOR_CAN_REUSE
:
1365 * Align to the hardware page size, to allow
1366 * malloc() to maximize the amount of re-usability,
1367 * even on systems with larger software page size.
1369 align_mask
= PAGE_MASK
;
1372 align_mask
= VM_MAP_PAGE_MASK(map
);
1376 return vm_map_behavior_set(map
,
1377 vm_map_trunc_page(start
, align_mask
),
1378 vm_map_round_page(start
+size
, align_mask
),
1385 * Sets the paging behavior attribute for the specified range
1386 * in the specified map.
1388 * This routine will fail with KERN_INVALID_ADDRESS if any address
1389 * in [start,start+size) is not a valid allocated memory region.
1391 * This routine is potentially limited in addressibility by the
1392 * use of vm_offset_t (if the map provided is larger than the
1400 vm_behavior_t new_behavior
)
1402 if (start
+ size
< start
)
1403 return KERN_INVALID_ARGUMENT
;
1405 return mach_vm_behavior_set(map
,
1406 (mach_vm_offset_t
) start
,
1407 (mach_vm_size_t
) size
,
1414 * User call to obtain information about a region in
1415 * a task's address map. Currently, only one flavor is
1418 * XXX The reserved and behavior fields cannot be filled
1419 * in until the vm merge from the IK is completed, and
1420 * vm_reserve is implemented.
1422 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1428 mach_vm_offset_t
*address
, /* IN/OUT */
1429 mach_vm_size_t
*size
, /* OUT */
1430 vm_region_flavor_t flavor
, /* IN */
1431 vm_region_info_t info
, /* OUT */
1432 mach_msg_type_number_t
*count
, /* IN/OUT */
1433 mach_port_t
*object_name
) /* OUT */
1435 vm_map_offset_t map_addr
;
1436 vm_map_size_t map_size
;
1439 if (VM_MAP_NULL
== map
)
1440 return KERN_INVALID_ARGUMENT
;
1442 map_addr
= (vm_map_offset_t
)*address
;
1443 map_size
= (vm_map_size_t
)*size
;
1445 /* legacy conversion */
1446 if (VM_REGION_BASIC_INFO
== flavor
)
1447 flavor
= VM_REGION_BASIC_INFO_64
;
1449 kr
= vm_map_region(map
,
1450 &map_addr
, &map_size
,
1451 flavor
, info
, count
,
1454 *address
= map_addr
;
1460 * vm_region_64 and vm_region:
1462 * User call to obtain information about a region in
1463 * a task's address map. Currently, only one flavor is
1466 * XXX The reserved and behavior fields cannot be filled
1467 * in until the vm merge from the IK is completed, and
1468 * vm_reserve is implemented.
1470 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1476 vm_offset_t
*address
, /* IN/OUT */
1477 vm_size_t
*size
, /* OUT */
1478 vm_region_flavor_t flavor
, /* IN */
1479 vm_region_info_t info
, /* OUT */
1480 mach_msg_type_number_t
*count
, /* IN/OUT */
1481 mach_port_t
*object_name
) /* OUT */
1483 vm_map_offset_t map_addr
;
1484 vm_map_size_t map_size
;
1487 if (VM_MAP_NULL
== map
)
1488 return KERN_INVALID_ARGUMENT
;
1490 map_addr
= (vm_map_offset_t
)*address
;
1491 map_size
= (vm_map_size_t
)*size
;
1493 /* legacy conversion */
1494 if (VM_REGION_BASIC_INFO
== flavor
)
1495 flavor
= VM_REGION_BASIC_INFO_64
;
1497 kr
= vm_map_region(map
,
1498 &map_addr
, &map_size
,
1499 flavor
, info
, count
,
1502 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1503 *size
= CAST_DOWN(vm_size_t
, map_size
);
1505 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1506 return KERN_INVALID_ADDRESS
;
1513 vm_address_t
*address
, /* IN/OUT */
1514 vm_size_t
*size
, /* OUT */
1515 vm_region_flavor_t flavor
, /* IN */
1516 vm_region_info_t info
, /* OUT */
1517 mach_msg_type_number_t
*count
, /* IN/OUT */
1518 mach_port_t
*object_name
) /* OUT */
1520 vm_map_address_t map_addr
;
1521 vm_map_size_t map_size
;
1524 if (VM_MAP_NULL
== map
)
1525 return KERN_INVALID_ARGUMENT
;
1527 map_addr
= (vm_map_address_t
)*address
;
1528 map_size
= (vm_map_size_t
)*size
;
1530 kr
= vm_map_region(map
,
1531 &map_addr
, &map_size
,
1532 flavor
, info
, count
,
1535 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1536 *size
= CAST_DOWN(vm_size_t
, map_size
);
1538 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1539 return KERN_INVALID_ADDRESS
;
1544 * vm_region_recurse: A form of vm_region which follows the
1545 * submaps in a target map
1549 mach_vm_region_recurse(
1551 mach_vm_address_t
*address
,
1552 mach_vm_size_t
*size
,
1554 vm_region_recurse_info_t info
,
1555 mach_msg_type_number_t
*infoCnt
)
1557 vm_map_address_t map_addr
;
1558 vm_map_size_t map_size
;
1561 if (VM_MAP_NULL
== map
)
1562 return KERN_INVALID_ARGUMENT
;
1564 map_addr
= (vm_map_address_t
)*address
;
1565 map_size
= (vm_map_size_t
)*size
;
1567 kr
= vm_map_region_recurse_64(
1572 (vm_region_submap_info_64_t
)info
,
1575 *address
= map_addr
;
1581 * vm_region_recurse: A form of vm_region which follows the
1582 * submaps in a target map
1586 vm_region_recurse_64(
1588 vm_address_t
*address
,
1591 vm_region_recurse_info_64_t info
,
1592 mach_msg_type_number_t
*infoCnt
)
1594 vm_map_address_t map_addr
;
1595 vm_map_size_t map_size
;
1598 if (VM_MAP_NULL
== map
)
1599 return KERN_INVALID_ARGUMENT
;
1601 map_addr
= (vm_map_address_t
)*address
;
1602 map_size
= (vm_map_size_t
)*size
;
1604 kr
= vm_map_region_recurse_64(
1609 (vm_region_submap_info_64_t
)info
,
1612 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1613 *size
= CAST_DOWN(vm_size_t
, map_size
);
1615 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1616 return KERN_INVALID_ADDRESS
;
1623 vm_offset_t
*address
, /* IN/OUT */
1624 vm_size_t
*size
, /* OUT */
1625 natural_t
*depth
, /* IN/OUT */
1626 vm_region_recurse_info_t info32
, /* IN/OUT */
1627 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1629 vm_region_submap_info_data_64_t info64
;
1630 vm_region_submap_info_t info
;
1631 vm_map_address_t map_addr
;
1632 vm_map_size_t map_size
;
1635 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1636 return KERN_INVALID_ARGUMENT
;
1639 map_addr
= (vm_map_address_t
)*address
;
1640 map_size
= (vm_map_size_t
)*size
;
1641 info
= (vm_region_submap_info_t
)info32
;
1642 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1644 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1645 depth
, &info64
, infoCnt
);
1647 info
->protection
= info64
.protection
;
1648 info
->max_protection
= info64
.max_protection
;
1649 info
->inheritance
= info64
.inheritance
;
1650 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1651 info
->user_tag
= info64
.user_tag
;
1652 info
->pages_resident
= info64
.pages_resident
;
1653 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1654 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1655 info
->pages_dirtied
= info64
.pages_dirtied
;
1656 info
->ref_count
= info64
.ref_count
;
1657 info
->shadow_depth
= info64
.shadow_depth
;
1658 info
->external_pager
= info64
.external_pager
;
1659 info
->share_mode
= info64
.share_mode
;
1660 info
->is_submap
= info64
.is_submap
;
1661 info
->behavior
= info64
.behavior
;
1662 info
->object_id
= info64
.object_id
;
1663 info
->user_wired_count
= info64
.user_wired_count
;
1665 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1666 *size
= CAST_DOWN(vm_size_t
, map_size
);
1667 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1669 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1670 return KERN_INVALID_ADDRESS
;
1675 mach_vm_purgable_control(
1677 mach_vm_offset_t address
,
1678 vm_purgable_t control
,
1681 if (VM_MAP_NULL
== map
)
1682 return KERN_INVALID_ARGUMENT
;
1684 return vm_map_purgable_control(map
,
1685 vm_map_trunc_page(address
, PAGE_MASK
),
1691 vm_purgable_control(
1693 vm_offset_t address
,
1694 vm_purgable_t control
,
1697 if (VM_MAP_NULL
== map
)
1698 return KERN_INVALID_ARGUMENT
;
1700 return vm_map_purgable_control(map
,
1701 vm_map_trunc_page(address
, PAGE_MASK
),
1708 * Ordinarily, the right to allocate CPM is restricted
1709 * to privileged applications (those that can gain access
1710 * to the host priv port). Set this variable to zero if
1711 * you want to let any application allocate CPM.
1713 unsigned int vm_allocate_cpm_privileged
= 0;
1716 * Allocate memory in the specified map, with the caveat that
1717 * the memory is physically contiguous. This call may fail
1718 * if the system can't find sufficient contiguous memory.
1719 * This call may cause or lead to heart-stopping amounts of
1722 * Memory obtained from this call should be freed in the
1723 * normal way, viz., via vm_deallocate.
1727 host_priv_t host_priv
,
1733 vm_map_address_t map_addr
;
1734 vm_map_size_t map_size
;
1737 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1738 return KERN_INVALID_HOST
;
1740 if (VM_MAP_NULL
== map
)
1741 return KERN_INVALID_ARGUMENT
;
1743 map_addr
= (vm_map_address_t
)*addr
;
1744 map_size
= (vm_map_size_t
)size
;
1746 kr
= vm_map_enter_cpm(map
,
1751 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1759 mach_vm_offset_t offset
,
1763 if (VM_MAP_NULL
== map
)
1764 return KERN_INVALID_ARGUMENT
;
1766 return vm_map_page_query_internal(
1768 vm_map_trunc_page(offset
, PAGE_MASK
),
1769 disposition
, ref_count
);
1779 if (VM_MAP_NULL
== map
)
1780 return KERN_INVALID_ARGUMENT
;
1782 return vm_map_page_query_internal(
1784 vm_map_trunc_page(offset
, PAGE_MASK
),
1785 disposition
, ref_count
);
1791 mach_vm_address_t address
,
1792 vm_page_info_flavor_t flavor
,
1793 vm_page_info_t info
,
1794 mach_msg_type_number_t
*count
)
1798 if (map
== VM_MAP_NULL
) {
1799 return KERN_INVALID_ARGUMENT
;
1802 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
1806 /* map a (whole) upl into an address space */
1811 vm_address_t
*dst_addr
)
1813 vm_map_offset_t map_addr
;
1816 if (VM_MAP_NULL
== map
)
1817 return KERN_INVALID_ARGUMENT
;
1819 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
1820 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
1829 if (VM_MAP_NULL
== map
)
1830 return KERN_INVALID_ARGUMENT
;
1832 return (vm_map_remove_upl(map
, upl
));
1835 /* Retrieve a upl for an object underlying an address range in a map */
1840 vm_map_offset_t map_offset
,
1841 upl_size_t
*upl_size
,
1843 upl_page_info_array_t page_list
,
1844 unsigned int *count
,
1845 upl_control_flags_t
*flags
,
1846 int force_data_sync
)
1848 upl_control_flags_t map_flags
;
1851 if (VM_MAP_NULL
== map
)
1852 return KERN_INVALID_ARGUMENT
;
1854 map_flags
= *flags
& ~UPL_NOZEROFILL
;
1855 if (force_data_sync
)
1856 map_flags
|= UPL_FORCE_DATA_SYNC
;
1858 kr
= vm_map_create_upl(map
,
1866 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
1872 * mach_make_memory_entry_64
1874 * Think of it as a two-stage vm_remap() operation. First
1875 * you get a handle. Second, you get map that handle in
1876 * somewhere else. Rather than doing it all at once (and
1877 * without needing access to the other whole map).
1881 mach_make_memory_entry_64(
1882 vm_map_t target_map
,
1883 memory_object_size_t
*size
,
1884 memory_object_offset_t offset
,
1885 vm_prot_t permission
,
1886 ipc_port_t
*object_handle
,
1887 ipc_port_t parent_handle
)
1889 vm_map_version_t version
;
1890 vm_named_entry_t parent_entry
;
1891 vm_named_entry_t user_entry
;
1892 ipc_port_t user_handle
;
1896 /* needed for call to vm_map_lookup_locked */
1899 vm_object_offset_t obj_off
;
1901 struct vm_object_fault_info fault_info
;
1903 vm_object_t shadow_object
;
1905 /* needed for direct map entry manipulation */
1906 vm_map_entry_t map_entry
;
1907 vm_map_entry_t next_entry
;
1909 vm_map_t original_map
= target_map
;
1910 vm_map_size_t total_size
, map_size
;
1911 vm_map_offset_t map_start
, map_end
;
1912 vm_map_offset_t local_offset
;
1913 vm_object_size_t mappable_size
;
1916 * Stash the offset in the page for use by vm_map_enter_mem_object()
1917 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
1919 vm_object_offset_t offset_in_page
;
1921 unsigned int access
;
1922 vm_prot_t protections
;
1923 vm_prot_t original_protections
, mask_protections
;
1924 unsigned int wimg_mode
;
1926 boolean_t force_shadow
= FALSE
;
1927 boolean_t use_data_addr
;
1928 boolean_t use_4K_compat
;
1930 if (((permission
& 0x00FF0000) &
1932 MAP_MEM_NAMED_CREATE
|
1933 MAP_MEM_GRAB_SECLUDED
| /* XXX FBDP TODO: restrict usage? */
1935 MAP_MEM_NAMED_REUSE
|
1936 MAP_MEM_USE_DATA_ADDR
|
1938 MAP_MEM_4K_DATA_ADDR
|
1939 MAP_MEM_VM_SHARE
))) {
1941 * Unknown flag: reject for forward compatibility.
1943 return KERN_INVALID_VALUE
;
1946 if (parent_handle
!= IP_NULL
&&
1947 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
1948 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
1950 parent_entry
= NULL
;
1953 if (parent_entry
&& parent_entry
->is_copy
) {
1954 return KERN_INVALID_ARGUMENT
;
1957 original_protections
= permission
& VM_PROT_ALL
;
1958 protections
= original_protections
;
1959 mask_protections
= permission
& VM_PROT_IS_MASK
;
1960 access
= GET_MAP_MEM(permission
);
1961 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
1962 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
1964 user_handle
= IP_NULL
;
1967 map_start
= vm_map_trunc_page(offset
, PAGE_MASK
);
1969 if (permission
& MAP_MEM_ONLY
) {
1970 boolean_t parent_is_object
;
1972 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
1973 map_size
= map_end
- map_start
;
1975 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
1976 return KERN_INVALID_ARGUMENT
;
1979 parent_is_object
= !(parent_entry
->is_sub_map
||
1980 parent_entry
->is_pager
);
1981 object
= parent_entry
->backing
.object
;
1982 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
1983 wimg_mode
= object
->wimg_bits
;
1985 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1986 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
1987 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
1988 return KERN_INVALID_RIGHT
;
1990 if(access
== MAP_MEM_IO
) {
1991 SET_MAP_MEM(access
, parent_entry
->protection
);
1992 wimg_mode
= VM_WIMG_IO
;
1993 } else if (access
== MAP_MEM_COPYBACK
) {
1994 SET_MAP_MEM(access
, parent_entry
->protection
);
1995 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1996 } else if (access
== MAP_MEM_INNERWBACK
) {
1997 SET_MAP_MEM(access
, parent_entry
->protection
);
1998 wimg_mode
= VM_WIMG_INNERWBACK
;
1999 } else if (access
== MAP_MEM_WTHRU
) {
2000 SET_MAP_MEM(access
, parent_entry
->protection
);
2001 wimg_mode
= VM_WIMG_WTHRU
;
2002 } else if (access
== MAP_MEM_WCOMB
) {
2003 SET_MAP_MEM(access
, parent_entry
->protection
);
2004 wimg_mode
= VM_WIMG_WCOMB
;
2006 if (parent_is_object
&& object
&&
2007 (access
!= MAP_MEM_NOOP
) &&
2008 (!(object
->nophyscache
))) {
2010 if (object
->wimg_bits
!= wimg_mode
) {
2011 vm_object_lock(object
);
2012 vm_object_change_wimg_mode(object
, wimg_mode
);
2013 vm_object_unlock(object
);
2017 *object_handle
= IP_NULL
;
2018 return KERN_SUCCESS
;
2019 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2020 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2021 map_size
= map_end
- map_start
;
2023 if (use_data_addr
|| use_4K_compat
) {
2024 return KERN_INVALID_ARGUMENT
;
2027 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2028 if (kr
!= KERN_SUCCESS
) {
2029 return KERN_FAILURE
;
2033 * Force the creation of the VM object now.
2035 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2037 * LP64todo - for now, we can only allocate 4GB-4096
2038 * internal objects because the default pager can't
2039 * page bigger ones. Remove this when it can.
2045 object
= vm_object_allocate(map_size
);
2046 assert(object
!= VM_OBJECT_NULL
);
2048 if (permission
& MAP_MEM_PURGABLE
) {
2049 if (! (permission
& VM_PROT_WRITE
)) {
2050 /* if we can't write, we can't purge */
2051 vm_object_deallocate(object
);
2052 kr
= KERN_INVALID_ARGUMENT
;
2055 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2056 assert(object
->vo_purgeable_owner
== NULL
);
2057 assert(object
->resident_page_count
== 0);
2058 assert(object
->wired_page_count
== 0);
2059 vm_object_lock(object
);
2060 vm_purgeable_nonvolatile_enqueue(object
,
2062 vm_object_unlock(object
);
2065 #if CONFIG_SECLUDED_MEMORY
2066 if (secluded_for_iokit
&& /* global boot-arg */
2067 ((permission
& MAP_MEM_GRAB_SECLUDED
)
2069 /* XXX FBDP for my testing only */
2070 || (secluded_for_fbdp
&& map_size
== 97550336)
2074 if (!(permission
& MAP_MEM_GRAB_SECLUDED
) &&
2075 secluded_for_fbdp
) {
2076 printf("FBDP: object %p size %lld can grab secluded\n", object
, (uint64_t) map_size
);
2079 object
->can_grab_secluded
= TRUE
;
2080 assert(!object
->eligible_for_secluded
);
2082 #endif /* CONFIG_SECLUDED_MEMORY */
2085 * The VM object is brand new and nobody else knows about it,
2086 * so we don't need to lock it.
2089 wimg_mode
= object
->wimg_bits
;
2090 if (access
== MAP_MEM_IO
) {
2091 wimg_mode
= VM_WIMG_IO
;
2092 } else if (access
== MAP_MEM_COPYBACK
) {
2093 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2094 } else if (access
== MAP_MEM_INNERWBACK
) {
2095 wimg_mode
= VM_WIMG_INNERWBACK
;
2096 } else if (access
== MAP_MEM_WTHRU
) {
2097 wimg_mode
= VM_WIMG_WTHRU
;
2098 } else if (access
== MAP_MEM_WCOMB
) {
2099 wimg_mode
= VM_WIMG_WCOMB
;
2101 if (access
!= MAP_MEM_NOOP
) {
2102 object
->wimg_bits
= wimg_mode
;
2104 /* the object has no pages, so no WIMG bits to update here */
2108 * We use this path when we want to make sure that
2109 * nobody messes with the object (coalesce, for
2110 * example) before we map it.
2111 * We might want to use these objects for transposition via
2112 * vm_object_transpose() too, so we don't want any copy or
2113 * shadow objects either...
2115 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2116 object
->true_share
= TRUE
;
2118 user_entry
->backing
.object
= object
;
2119 user_entry
->internal
= TRUE
;
2120 user_entry
->is_sub_map
= FALSE
;
2121 user_entry
->is_pager
= FALSE
;
2122 user_entry
->offset
= 0;
2123 user_entry
->data_offset
= 0;
2124 user_entry
->protection
= protections
;
2125 SET_MAP_MEM(access
, user_entry
->protection
);
2126 user_entry
->size
= map_size
;
2128 /* user_object pager and internal fields are not used */
2129 /* when the object field is filled in. */
2131 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2132 user_entry
->data_offset
));
2133 *object_handle
= user_handle
;
2134 return KERN_SUCCESS
;
2137 if (permission
& MAP_MEM_VM_COPY
) {
2140 if (target_map
== VM_MAP_NULL
) {
2141 return KERN_INVALID_TASK
;
2144 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2145 map_size
= map_end
- map_start
;
2146 if (use_data_addr
|| use_4K_compat
) {
2147 offset_in_page
= offset
- map_start
;
2149 offset_in_page
&= ~((signed)(0xFFF));
2154 kr
= vm_map_copyin_internal(target_map
,
2157 VM_MAP_COPYIN_ENTRY_LIST
,
2159 if (kr
!= KERN_SUCCESS
) {
2163 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2164 if (kr
!= KERN_SUCCESS
) {
2165 vm_map_copy_discard(copy
);
2166 return KERN_FAILURE
;
2169 user_entry
->backing
.copy
= copy
;
2170 user_entry
->internal
= FALSE
;
2171 user_entry
->is_sub_map
= FALSE
;
2172 user_entry
->is_pager
= FALSE
;
2173 user_entry
->is_copy
= TRUE
;
2174 user_entry
->offset
= 0;
2175 user_entry
->protection
= protections
;
2176 user_entry
->size
= map_size
;
2177 user_entry
->data_offset
= offset_in_page
;
2179 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2180 user_entry
->data_offset
));
2181 *object_handle
= user_handle
;
2182 return KERN_SUCCESS
;
2185 if (permission
& MAP_MEM_VM_SHARE
) {
2187 vm_prot_t cur_prot
, max_prot
;
2189 if (target_map
== VM_MAP_NULL
) {
2190 return KERN_INVALID_TASK
;
2193 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2194 map_size
= map_end
- map_start
;
2195 if (use_data_addr
|| use_4K_compat
) {
2196 offset_in_page
= offset
- map_start
;
2198 offset_in_page
&= ~((signed)(0xFFF));
2203 cur_prot
= VM_PROT_ALL
;
2204 kr
= vm_map_copy_extract(target_map
,
2210 if (kr
!= KERN_SUCCESS
) {
2214 if (mask_protections
) {
2216 * We just want as much of "original_protections"
2217 * as we can get out of the actual "cur_prot".
2219 protections
&= cur_prot
;
2220 if (protections
== VM_PROT_NONE
) {
2221 /* no access at all: fail */
2222 vm_map_copy_discard(copy
);
2223 return KERN_PROTECTION_FAILURE
;
2227 * We want exactly "original_protections"
2228 * out of "cur_prot".
2230 if ((cur_prot
& protections
) != protections
) {
2231 vm_map_copy_discard(copy
);
2232 return KERN_PROTECTION_FAILURE
;
2236 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2237 if (kr
!= KERN_SUCCESS
) {
2238 vm_map_copy_discard(copy
);
2239 return KERN_FAILURE
;
2242 user_entry
->backing
.copy
= copy
;
2243 user_entry
->internal
= FALSE
;
2244 user_entry
->is_sub_map
= FALSE
;
2245 user_entry
->is_pager
= FALSE
;
2246 user_entry
->is_copy
= TRUE
;
2247 user_entry
->offset
= 0;
2248 user_entry
->protection
= protections
;
2249 user_entry
->size
= map_size
;
2250 user_entry
->data_offset
= offset_in_page
;
2252 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2253 user_entry
->data_offset
));
2254 *object_handle
= user_handle
;
2255 return KERN_SUCCESS
;
2258 if (parent_entry
== NULL
||
2259 (permission
& MAP_MEM_NAMED_REUSE
)) {
2261 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2262 map_size
= map_end
- map_start
;
2263 if (use_data_addr
|| use_4K_compat
) {
2264 offset_in_page
= offset
- map_start
;
2266 offset_in_page
&= ~((signed)(0xFFF));
2271 /* Create a named object based on address range within the task map */
2272 /* Go find the object at given address */
2274 if (target_map
== VM_MAP_NULL
) {
2275 return KERN_INVALID_TASK
;
2279 protections
= original_protections
;
2280 vm_map_lock_read(target_map
);
2282 /* get the object associated with the target address */
2283 /* note we check the permission of the range against */
2284 /* that requested by the caller */
2286 kr
= vm_map_lookup_locked(&target_map
, map_start
,
2287 protections
| mask_protections
,
2288 OBJECT_LOCK_EXCLUSIVE
, &version
,
2289 &object
, &obj_off
, &prot
, &wired
,
2292 if (kr
!= KERN_SUCCESS
) {
2293 vm_map_unlock_read(target_map
);
2296 if (mask_protections
) {
2298 * The caller asked us to use the "protections" as
2299 * a mask, so restrict "protections" to what this
2300 * mapping actually allows.
2302 protections
&= prot
;
2305 if (((prot
& protections
) != protections
)
2306 || (object
== kernel_object
)) {
2307 kr
= KERN_INVALID_RIGHT
;
2308 vm_object_unlock(object
);
2309 vm_map_unlock_read(target_map
);
2310 if(real_map
!= target_map
)
2311 vm_map_unlock_read(real_map
);
2312 if(object
== kernel_object
) {
2313 printf("Warning: Attempt to create a named"
2314 " entry from the kernel_object\n");
2319 /* We have an object, now check to see if this object */
2320 /* is suitable. If not, create a shadow and share that */
2323 * We have to unlock the VM object to avoid deadlocking with
2324 * a VM map lock (the lock ordering is map, the object), if we
2325 * need to modify the VM map to create a shadow object. Since
2326 * we might release the VM map lock below anyway, we have
2327 * to release the VM map lock now.
2328 * XXX FBDP There must be a way to avoid this double lookup...
2330 * Take an extra reference on the VM object to make sure it's
2331 * not going to disappear.
2333 vm_object_reference_locked(object
); /* extra ref to hold obj */
2334 vm_object_unlock(object
);
2336 local_map
= original_map
;
2337 local_offset
= map_start
;
2338 if(target_map
!= local_map
) {
2339 vm_map_unlock_read(target_map
);
2340 if(real_map
!= target_map
)
2341 vm_map_unlock_read(real_map
);
2342 vm_map_lock_read(local_map
);
2343 target_map
= local_map
;
2344 real_map
= local_map
;
2347 if(!vm_map_lookup_entry(local_map
,
2348 local_offset
, &map_entry
)) {
2349 kr
= KERN_INVALID_ARGUMENT
;
2350 vm_map_unlock_read(target_map
);
2351 if(real_map
!= target_map
)
2352 vm_map_unlock_read(real_map
);
2353 vm_object_deallocate(object
); /* release extra ref */
2354 object
= VM_OBJECT_NULL
;
2357 iskernel
= (local_map
->pmap
== kernel_pmap
);
2358 if(!(map_entry
->is_sub_map
)) {
2359 if (VME_OBJECT(map_entry
) != object
) {
2360 kr
= KERN_INVALID_ARGUMENT
;
2361 vm_map_unlock_read(target_map
);
2362 if(real_map
!= target_map
)
2363 vm_map_unlock_read(real_map
);
2364 vm_object_deallocate(object
); /* release extra ref */
2365 object
= VM_OBJECT_NULL
;
2372 local_map
= VME_SUBMAP(map_entry
);
2374 vm_map_lock_read(local_map
);
2375 vm_map_unlock_read(tmap
);
2376 target_map
= local_map
;
2377 real_map
= local_map
;
2378 local_offset
= local_offset
- map_entry
->vme_start
;
2379 local_offset
+= VME_OFFSET(map_entry
);
2384 * We found the VM map entry, lock the VM object again.
2386 vm_object_lock(object
);
2387 if(map_entry
->wired_count
) {
2388 /* JMM - The check below should be reworked instead. */
2389 object
->true_share
= TRUE
;
2391 if (mask_protections
) {
2393 * The caller asked us to use the "protections" as
2394 * a mask, so restrict "protections" to what this
2395 * mapping actually allows.
2397 protections
&= map_entry
->max_protection
;
2399 if(((map_entry
->max_protection
) & protections
) != protections
) {
2400 kr
= KERN_INVALID_RIGHT
;
2401 vm_object_unlock(object
);
2402 vm_map_unlock_read(target_map
);
2403 if(real_map
!= target_map
)
2404 vm_map_unlock_read(real_map
);
2405 vm_object_deallocate(object
);
2406 object
= VM_OBJECT_NULL
;
2410 mappable_size
= fault_info
.hi_offset
- obj_off
;
2411 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2412 if(map_size
> mappable_size
) {
2413 /* try to extend mappable size if the entries */
2414 /* following are from the same object and are */
2416 next_entry
= map_entry
->vme_next
;
2417 /* lets see if the next map entry is still */
2418 /* pointing at this object and is contiguous */
2419 while(map_size
> mappable_size
) {
2420 if ((VME_OBJECT(next_entry
) == object
) &&
2421 (next_entry
->vme_start
==
2422 next_entry
->vme_prev
->vme_end
) &&
2423 (VME_OFFSET(next_entry
) ==
2424 (VME_OFFSET(next_entry
->vme_prev
) +
2425 (next_entry
->vme_prev
->vme_end
-
2426 next_entry
->vme_prev
->vme_start
)))) {
2427 if (mask_protections
) {
2429 * The caller asked us to use
2430 * the "protections" as a mask,
2431 * so restrict "protections" to
2432 * what this mapping actually
2435 protections
&= next_entry
->max_protection
;
2437 if ((next_entry
->wired_count
) &&
2438 (map_entry
->wired_count
== 0)) {
2441 if(((next_entry
->max_protection
)
2442 & protections
) != protections
) {
2445 if (next_entry
->needs_copy
!=
2446 map_entry
->needs_copy
)
2448 mappable_size
+= next_entry
->vme_end
2449 - next_entry
->vme_start
;
2450 total_size
+= next_entry
->vme_end
2451 - next_entry
->vme_start
;
2452 next_entry
= next_entry
->vme_next
;
2460 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2461 * never true in kernel */
2462 if (!iskernel
&& vm_map_entry_should_cow_for_true_share(map_entry
) &&
2463 object
->vo_size
> map_size
&&
2466 * Set up the targeted range for copy-on-write to
2467 * limit the impact of "true_share"/"copy_delay" to
2468 * that range instead of the entire VM object...
2471 vm_object_unlock(object
);
2472 if (vm_map_lock_read_to_write(target_map
)) {
2473 vm_object_deallocate(object
);
2474 target_map
= original_map
;
2478 vm_map_clip_start(target_map
,
2480 vm_map_trunc_page(map_start
,
2481 VM_MAP_PAGE_MASK(target_map
)));
2482 vm_map_clip_end(target_map
,
2484 (vm_map_round_page(map_end
,
2485 VM_MAP_PAGE_MASK(target_map
))));
2486 force_shadow
= TRUE
;
2488 if ((map_entry
->vme_end
- offset
) < map_size
) {
2489 map_size
= map_entry
->vme_end
- map_start
;
2491 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2493 vm_map_lock_write_to_read(target_map
);
2494 vm_object_lock(object
);
2497 if (object
->internal
) {
2498 /* vm_map_lookup_locked will create a shadow if */
2499 /* needs_copy is set but does not check for the */
2500 /* other two conditions shown. It is important to */
2501 /* set up an object which will not be pulled from */
2505 ((map_entry
->needs_copy
||
2507 (object
->vo_size
> total_size
&&
2508 (VME_OFFSET(map_entry
) != 0 ||
2510 vm_map_round_page(total_size
,
2511 VM_MAP_PAGE_MASK(target_map
)))))
2512 && !object
->true_share
)) {
2514 * We have to unlock the VM object before
2515 * trying to upgrade the VM map lock, to
2516 * honor lock ordering (map then object).
2517 * Otherwise, we would deadlock if another
2518 * thread holds a read lock on the VM map and
2519 * is trying to acquire the VM object's lock.
2520 * We still hold an extra reference on the
2521 * VM object, guaranteeing that it won't
2524 vm_object_unlock(object
);
2526 if (vm_map_lock_read_to_write(target_map
)) {
2528 * We couldn't upgrade our VM map lock
2529 * from "read" to "write" and we lost
2531 * Start all over again...
2533 vm_object_deallocate(object
); /* extra ref */
2534 target_map
= original_map
;
2538 vm_object_lock(object
);
2542 * JMM - We need to avoid coming here when the object
2543 * is wired by anybody, not just the current map. Why
2544 * couldn't we use the standard vm_object_copy_quickly()
2548 /* create a shadow object */
2549 VME_OBJECT_SHADOW(map_entry
, total_size
);
2550 shadow_object
= VME_OBJECT(map_entry
);
2552 vm_object_unlock(object
);
2555 prot
= map_entry
->protection
& ~VM_PROT_WRITE
;
2557 if (override_nx(target_map
,
2558 VME_ALIAS(map_entry
))
2560 prot
|= VM_PROT_EXECUTE
;
2562 vm_object_pmap_protect(
2563 object
, VME_OFFSET(map_entry
),
2565 ((map_entry
->is_shared
2566 || target_map
->mapped_in_other_pmaps
)
2569 map_entry
->vme_start
,
2571 total_size
-= (map_entry
->vme_end
2572 - map_entry
->vme_start
);
2573 next_entry
= map_entry
->vme_next
;
2574 map_entry
->needs_copy
= FALSE
;
2576 vm_object_lock(shadow_object
);
2577 while (total_size
) {
2578 assert((next_entry
->wired_count
== 0) ||
2579 (map_entry
->wired_count
));
2581 if (VME_OBJECT(next_entry
) == object
) {
2582 vm_object_reference_locked(shadow_object
);
2583 VME_OBJECT_SET(next_entry
,
2585 vm_object_deallocate(object
);
2588 (VME_OFFSET(next_entry
->vme_prev
) +
2589 (next_entry
->vme_prev
->vme_end
2590 - next_entry
->vme_prev
->vme_start
)));
2591 next_entry
->needs_copy
= FALSE
;
2593 panic("mach_make_memory_entry_64:"
2594 " map entries out of sync\n");
2598 - next_entry
->vme_start
;
2599 next_entry
= next_entry
->vme_next
;
2603 * Transfer our extra reference to the
2606 vm_object_reference_locked(shadow_object
);
2607 vm_object_deallocate(object
); /* extra ref */
2608 object
= shadow_object
;
2610 obj_off
= ((local_offset
- map_entry
->vme_start
)
2611 + VME_OFFSET(map_entry
));
2613 vm_map_lock_write_to_read(target_map
);
2617 /* note: in the future we can (if necessary) allow for */
2618 /* memory object lists, this will better support */
2619 /* fragmentation, but is it necessary? The user should */
2620 /* be encouraged to create address space oriented */
2621 /* shared objects from CLEAN memory regions which have */
2622 /* a known and defined history. i.e. no inheritence */
2623 /* share, make this call before making the region the */
2624 /* target of ipc's, etc. The code above, protecting */
2625 /* against delayed copy, etc. is mostly defensive. */
2627 wimg_mode
= object
->wimg_bits
;
2628 if(!(object
->nophyscache
)) {
2629 if(access
== MAP_MEM_IO
) {
2630 wimg_mode
= VM_WIMG_IO
;
2631 } else if (access
== MAP_MEM_COPYBACK
) {
2632 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2633 } else if (access
== MAP_MEM_INNERWBACK
) {
2634 wimg_mode
= VM_WIMG_INNERWBACK
;
2635 } else if (access
== MAP_MEM_WTHRU
) {
2636 wimg_mode
= VM_WIMG_WTHRU
;
2637 } else if (access
== MAP_MEM_WCOMB
) {
2638 wimg_mode
= VM_WIMG_WCOMB
;
2642 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2643 if (!object
->true_share
&&
2644 vm_object_tracking_inited
) {
2645 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2648 num
= OSBacktrace(bt
,
2649 VM_OBJECT_TRACKING_BTDEPTH
);
2650 btlog_add_entry(vm_object_tracking_btlog
,
2652 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2656 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2658 vm_object_lock_assert_exclusive(object
);
2659 object
->true_share
= TRUE
;
2660 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2661 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2664 * The memory entry now points to this VM object and we
2665 * need to hold a reference on the VM object. Use the extra
2666 * reference we took earlier to keep the object alive when we
2670 vm_map_unlock_read(target_map
);
2671 if(real_map
!= target_map
)
2672 vm_map_unlock_read(real_map
);
2674 if (object
->wimg_bits
!= wimg_mode
)
2675 vm_object_change_wimg_mode(object
, wimg_mode
);
2677 /* the size of mapped entry that overlaps with our region */
2678 /* which is targeted for share. */
2679 /* (entry_end - entry_start) - */
2680 /* offset of our beg addr within entry */
2681 /* it corresponds to this: */
2683 if(map_size
> mappable_size
)
2684 map_size
= mappable_size
;
2686 if (permission
& MAP_MEM_NAMED_REUSE
) {
2688 * Compare what we got with the "parent_entry".
2689 * If they match, re-use the "parent_entry" instead
2690 * of creating a new one.
2692 if (parent_entry
!= NULL
&&
2693 parent_entry
->backing
.object
== object
&&
2694 parent_entry
->internal
== object
->internal
&&
2695 parent_entry
->is_sub_map
== FALSE
&&
2696 parent_entry
->is_pager
== FALSE
&&
2697 parent_entry
->offset
== obj_off
&&
2698 parent_entry
->protection
== protections
&&
2699 parent_entry
->size
== map_size
&&
2700 ((!(use_data_addr
|| use_4K_compat
) &&
2701 (parent_entry
->data_offset
== 0)) ||
2702 ((use_data_addr
|| use_4K_compat
) &&
2703 (parent_entry
->data_offset
== offset_in_page
)))) {
2705 * We have a match: re-use "parent_entry".
2707 /* release our extra reference on object */
2708 vm_object_unlock(object
);
2709 vm_object_deallocate(object
);
2710 /* parent_entry->ref_count++; XXX ? */
2711 /* Get an extra send-right on handle */
2712 ipc_port_copy_send(parent_handle
);
2714 *size
= CAST_DOWN(vm_size_t
,
2715 (parent_entry
->size
-
2716 parent_entry
->data_offset
));
2717 *object_handle
= parent_handle
;
2718 return KERN_SUCCESS
;
2721 * No match: we need to create a new entry.
2727 vm_object_unlock(object
);
2728 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2730 /* release our unused reference on the object */
2731 vm_object_deallocate(object
);
2732 return KERN_FAILURE
;
2735 user_entry
->backing
.object
= object
;
2736 user_entry
->internal
= object
->internal
;
2737 user_entry
->is_sub_map
= FALSE
;
2738 user_entry
->is_pager
= FALSE
;
2739 user_entry
->offset
= obj_off
;
2740 user_entry
->data_offset
= offset_in_page
;
2741 user_entry
->protection
= protections
;
2742 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
2743 user_entry
->size
= map_size
;
2745 /* user_object pager and internal fields are not used */
2746 /* when the object field is filled in. */
2748 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2749 user_entry
->data_offset
));
2750 *object_handle
= user_handle
;
2751 return KERN_SUCCESS
;
2754 /* The new object will be base on an existing named object */
2755 if (parent_entry
== NULL
) {
2756 kr
= KERN_INVALID_ARGUMENT
;
2760 if (use_data_addr
|| use_4K_compat
) {
2762 * submaps and pagers should only be accessible from within
2763 * the kernel, which shouldn't use the data address flag, so can fail here.
2765 if (parent_entry
->is_pager
|| parent_entry
->is_sub_map
) {
2766 panic("Shouldn't be using data address with a parent entry that is a submap or pager.");
2769 * Account for offset to data in parent entry and
2770 * compute our own offset to data.
2772 if((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
2773 kr
= KERN_INVALID_ARGUMENT
;
2777 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
2778 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
2780 offset_in_page
&= ~((signed)(0xFFF));
2781 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
2782 map_size
= map_end
- map_start
;
2784 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2785 map_size
= map_end
- map_start
;
2788 if((offset
+ map_size
) > parent_entry
->size
) {
2789 kr
= KERN_INVALID_ARGUMENT
;
2794 if (mask_protections
) {
2796 * The caller asked us to use the "protections" as
2797 * a mask, so restrict "protections" to what this
2798 * mapping actually allows.
2800 protections
&= parent_entry
->protection
;
2802 if((protections
& parent_entry
->protection
) != protections
) {
2803 kr
= KERN_PROTECTION_FAILURE
;
2807 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2813 user_entry
->size
= map_size
;
2814 user_entry
->offset
= parent_entry
->offset
+ map_start
;
2815 user_entry
->data_offset
= offset_in_page
;
2816 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2817 user_entry
->is_pager
= parent_entry
->is_pager
;
2818 user_entry
->is_copy
= parent_entry
->is_copy
;
2819 user_entry
->internal
= parent_entry
->internal
;
2820 user_entry
->protection
= protections
;
2822 if(access
!= MAP_MEM_NOOP
) {
2823 SET_MAP_MEM(access
, user_entry
->protection
);
2826 if(parent_entry
->is_sub_map
) {
2827 user_entry
->backing
.map
= parent_entry
->backing
.map
;
2828 vm_map_lock(user_entry
->backing
.map
);
2829 user_entry
->backing
.map
->ref_count
++;
2830 vm_map_unlock(user_entry
->backing
.map
);
2832 else if (parent_entry
->is_pager
) {
2833 user_entry
->backing
.pager
= parent_entry
->backing
.pager
;
2834 /* JMM - don't we need a reference here? */
2836 object
= parent_entry
->backing
.object
;
2837 assert(object
!= VM_OBJECT_NULL
);
2838 user_entry
->backing
.object
= object
;
2839 /* we now point to this object, hold on */
2840 vm_object_lock(object
);
2841 vm_object_reference_locked(object
);
2842 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2843 if (!object
->true_share
&&
2844 vm_object_tracking_inited
) {
2845 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2848 num
= OSBacktrace(bt
,
2849 VM_OBJECT_TRACKING_BTDEPTH
);
2850 btlog_add_entry(vm_object_tracking_btlog
,
2852 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2856 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2858 object
->true_share
= TRUE
;
2859 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2860 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2861 vm_object_unlock(object
);
2863 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2864 user_entry
->data_offset
));
2865 *object_handle
= user_handle
;
2866 return KERN_SUCCESS
;
2870 if (user_handle
!= IP_NULL
) {
2872 * Releasing "user_handle" causes the kernel object
2873 * associated with it ("user_entry" here) to also be
2874 * released and freed.
2876 mach_memory_entry_port_release(user_handle
);
2882 _mach_make_memory_entry(
2883 vm_map_t target_map
,
2884 memory_object_size_t
*size
,
2885 memory_object_offset_t offset
,
2886 vm_prot_t permission
,
2887 ipc_port_t
*object_handle
,
2888 ipc_port_t parent_entry
)
2890 memory_object_size_t mo_size
;
2893 mo_size
= (memory_object_size_t
)*size
;
2894 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2895 (memory_object_offset_t
)offset
, permission
, object_handle
,
2902 mach_make_memory_entry(
2903 vm_map_t target_map
,
2906 vm_prot_t permission
,
2907 ipc_port_t
*object_handle
,
2908 ipc_port_t parent_entry
)
2910 memory_object_size_t mo_size
;
2913 mo_size
= (memory_object_size_t
)*size
;
2914 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2915 (memory_object_offset_t
)offset
, permission
, object_handle
,
2917 *size
= CAST_DOWN(vm_size_t
, mo_size
);
2924 * Set or clear the map's wiring_required flag. This flag, if set,
2925 * will cause all future virtual memory allocation to allocate
2926 * user wired memory. Unwiring pages wired down as a result of
2927 * this routine is done with the vm_wire interface.
2932 boolean_t must_wire
)
2934 if (map
== VM_MAP_NULL
)
2935 return(KERN_INVALID_ARGUMENT
);
2938 map
->wiring_required
= TRUE
;
2940 map
->wiring_required
= FALSE
;
2942 return(KERN_SUCCESS
);
2945 __private_extern__ kern_return_t
2946 mach_memory_entry_allocate(
2947 vm_named_entry_t
*user_entry_p
,
2948 ipc_port_t
*user_handle_p
)
2950 vm_named_entry_t user_entry
;
2951 ipc_port_t user_handle
;
2952 ipc_port_t previous
;
2954 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
2955 if (user_entry
== NULL
)
2956 return KERN_FAILURE
;
2958 named_entry_lock_init(user_entry
);
2960 user_handle
= ipc_port_alloc_kernel();
2961 if (user_handle
== IP_NULL
) {
2962 kfree(user_entry
, sizeof *user_entry
);
2963 return KERN_FAILURE
;
2965 ip_lock(user_handle
);
2967 /* make a sonce right */
2968 user_handle
->ip_sorights
++;
2969 ip_reference(user_handle
);
2971 user_handle
->ip_destination
= IP_NULL
;
2972 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
2973 user_handle
->ip_receiver
= ipc_space_kernel
;
2975 /* make a send right */
2976 user_handle
->ip_mscount
++;
2977 user_handle
->ip_srights
++;
2978 ip_reference(user_handle
);
2980 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
2981 /* nsrequest unlocks user_handle */
2983 user_entry
->backing
.pager
= NULL
;
2984 user_entry
->is_sub_map
= FALSE
;
2985 user_entry
->is_pager
= FALSE
;
2986 user_entry
->is_copy
= FALSE
;
2987 user_entry
->internal
= FALSE
;
2988 user_entry
->size
= 0;
2989 user_entry
->offset
= 0;
2990 user_entry
->data_offset
= 0;
2991 user_entry
->protection
= VM_PROT_NONE
;
2992 user_entry
->ref_count
= 1;
2994 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
2997 *user_entry_p
= user_entry
;
2998 *user_handle_p
= user_handle
;
3000 return KERN_SUCCESS
;
3004 * mach_memory_object_memory_entry_64
3006 * Create a named entry backed by the provided pager.
3008 * JMM - we need to hold a reference on the pager -
3009 * and release it when the named entry is destroyed.
3012 mach_memory_object_memory_entry_64(
3015 vm_object_offset_t size
,
3016 vm_prot_t permission
,
3017 memory_object_t pager
,
3018 ipc_port_t
*entry_handle
)
3020 unsigned int access
;
3021 vm_named_entry_t user_entry
;
3022 ipc_port_t user_handle
;
3024 if (host
== HOST_NULL
)
3025 return(KERN_INVALID_HOST
);
3027 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3029 return KERN_FAILURE
;
3032 user_entry
->backing
.pager
= pager
;
3033 user_entry
->size
= size
;
3034 user_entry
->offset
= 0;
3035 user_entry
->protection
= permission
& VM_PROT_ALL
;
3036 access
= GET_MAP_MEM(permission
);
3037 SET_MAP_MEM(access
, user_entry
->protection
);
3038 user_entry
->internal
= internal
;
3039 user_entry
->is_sub_map
= FALSE
;
3040 user_entry
->is_pager
= TRUE
;
3041 assert(user_entry
->ref_count
== 1);
3043 *entry_handle
= user_handle
;
3044 return KERN_SUCCESS
;
3048 mach_memory_object_memory_entry(
3052 vm_prot_t permission
,
3053 memory_object_t pager
,
3054 ipc_port_t
*entry_handle
)
3056 return mach_memory_object_memory_entry_64( host
, internal
,
3057 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3062 mach_memory_entry_purgable_control(
3063 ipc_port_t entry_port
,
3064 vm_purgable_t control
,
3068 vm_named_entry_t mem_entry
;
3071 if (entry_port
== IP_NULL
||
3072 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3073 return KERN_INVALID_ARGUMENT
;
3075 if (control
!= VM_PURGABLE_SET_STATE
&&
3076 control
!= VM_PURGABLE_GET_STATE
)
3077 return(KERN_INVALID_ARGUMENT
);
3079 if (control
== VM_PURGABLE_SET_STATE
&&
3080 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3081 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
)))
3082 return(KERN_INVALID_ARGUMENT
);
3084 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3086 named_entry_lock(mem_entry
);
3088 if (mem_entry
->is_sub_map
||
3089 mem_entry
->is_pager
||
3090 mem_entry
->is_copy
) {
3091 named_entry_unlock(mem_entry
);
3092 return KERN_INVALID_ARGUMENT
;
3095 object
= mem_entry
->backing
.object
;
3096 if (object
== VM_OBJECT_NULL
) {
3097 named_entry_unlock(mem_entry
);
3098 return KERN_INVALID_ARGUMENT
;
3101 vm_object_lock(object
);
3103 /* check that named entry covers entire object ? */
3104 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3105 vm_object_unlock(object
);
3106 named_entry_unlock(mem_entry
);
3107 return KERN_INVALID_ARGUMENT
;
3110 named_entry_unlock(mem_entry
);
3112 kr
= vm_object_purgable_control(object
, control
, state
);
3114 vm_object_unlock(object
);
3120 mach_memory_entry_get_page_counts(
3121 ipc_port_t entry_port
,
3122 unsigned int *resident_page_count
,
3123 unsigned int *dirty_page_count
)
3126 vm_named_entry_t mem_entry
;
3128 vm_object_offset_t offset
;
3129 vm_object_size_t size
;
3131 if (entry_port
== IP_NULL
||
3132 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3133 return KERN_INVALID_ARGUMENT
;
3136 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3138 named_entry_lock(mem_entry
);
3140 if (mem_entry
->is_sub_map
||
3141 mem_entry
->is_pager
||
3142 mem_entry
->is_copy
) {
3143 named_entry_unlock(mem_entry
);
3144 return KERN_INVALID_ARGUMENT
;
3147 object
= mem_entry
->backing
.object
;
3148 if (object
== VM_OBJECT_NULL
) {
3149 named_entry_unlock(mem_entry
);
3150 return KERN_INVALID_ARGUMENT
;
3153 vm_object_lock(object
);
3155 offset
= mem_entry
->offset
;
3156 size
= mem_entry
->size
;
3158 named_entry_unlock(mem_entry
);
3160 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3162 vm_object_unlock(object
);
3168 * mach_memory_entry_port_release:
3170 * Release a send right on a named entry port. This is the correct
3171 * way to destroy a named entry. When the last right on the port is
3172 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3175 mach_memory_entry_port_release(
3178 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3179 ipc_port_release_send(port
);
3183 * mach_destroy_memory_entry:
3185 * Drops a reference on a memory entry and destroys the memory entry if
3186 * there are no more references on it.
3187 * NOTE: This routine should not be called to destroy a memory entry from the
3188 * kernel, as it will not release the Mach port associated with the memory
3189 * entry. The proper way to destroy a memory entry in the kernel is to
3190 * call mach_memort_entry_port_release() to release the kernel's send-right on
3191 * the memory entry's port. When the last send right is released, the memory
3192 * entry will be destroyed via ipc_kobject_destroy().
3195 mach_destroy_memory_entry(
3198 vm_named_entry_t named_entry
;
3200 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3201 #endif /* MACH_ASSERT */
3202 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
3204 named_entry_lock(named_entry
);
3205 named_entry
->ref_count
-= 1;
3207 if(named_entry
->ref_count
== 0) {
3208 if (named_entry
->is_sub_map
) {
3209 vm_map_deallocate(named_entry
->backing
.map
);
3210 } else if (named_entry
->is_pager
) {
3211 /* JMM - need to drop reference on pager in that case */
3212 } else if (named_entry
->is_copy
) {
3213 vm_map_copy_discard(named_entry
->backing
.copy
);
3215 /* release the VM object we've been pointing to */
3216 vm_object_deallocate(named_entry
->backing
.object
);
3219 named_entry_unlock(named_entry
);
3220 named_entry_lock_destroy(named_entry
);
3222 kfree((void *) port
->ip_kobject
,
3223 sizeof (struct vm_named_entry
));
3225 named_entry_unlock(named_entry
);
3228 /* Allow manipulation of individual page state. This is actually part of */
3229 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3232 mach_memory_entry_page_op(
3233 ipc_port_t entry_port
,
3234 vm_object_offset_t offset
,
3236 ppnum_t
*phys_entry
,
3239 vm_named_entry_t mem_entry
;
3243 if (entry_port
== IP_NULL
||
3244 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3245 return KERN_INVALID_ARGUMENT
;
3248 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3250 named_entry_lock(mem_entry
);
3252 if (mem_entry
->is_sub_map
||
3253 mem_entry
->is_pager
||
3254 mem_entry
->is_copy
) {
3255 named_entry_unlock(mem_entry
);
3256 return KERN_INVALID_ARGUMENT
;
3259 object
= mem_entry
->backing
.object
;
3260 if (object
== VM_OBJECT_NULL
) {
3261 named_entry_unlock(mem_entry
);
3262 return KERN_INVALID_ARGUMENT
;
3265 vm_object_reference(object
);
3266 named_entry_unlock(mem_entry
);
3268 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3270 vm_object_deallocate(object
);
3276 * mach_memory_entry_range_op offers performance enhancement over
3277 * mach_memory_entry_page_op for page_op functions which do not require page
3278 * level state to be returned from the call. Page_op was created to provide
3279 * a low-cost alternative to page manipulation via UPLs when only a single
3280 * page was involved. The range_op call establishes the ability in the _op
3281 * family of functions to work on multiple pages where the lack of page level
3282 * state handling allows the caller to avoid the overhead of the upl structures.
3286 mach_memory_entry_range_op(
3287 ipc_port_t entry_port
,
3288 vm_object_offset_t offset_beg
,
3289 vm_object_offset_t offset_end
,
3293 vm_named_entry_t mem_entry
;
3297 if (entry_port
== IP_NULL
||
3298 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3299 return KERN_INVALID_ARGUMENT
;
3302 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3304 named_entry_lock(mem_entry
);
3306 if (mem_entry
->is_sub_map
||
3307 mem_entry
->is_pager
||
3308 mem_entry
->is_copy
) {
3309 named_entry_unlock(mem_entry
);
3310 return KERN_INVALID_ARGUMENT
;
3313 object
= mem_entry
->backing
.object
;
3314 if (object
== VM_OBJECT_NULL
) {
3315 named_entry_unlock(mem_entry
);
3316 return KERN_INVALID_ARGUMENT
;
3319 vm_object_reference(object
);
3320 named_entry_unlock(mem_entry
);
3322 kr
= vm_object_range_op(object
,
3326 (uint32_t *) range
);
3328 vm_object_deallocate(object
);
3333 static void dp_control_port_init(void)
3335 lck_grp_init(&dynamic_pager_control_port_lock_group
,"dp_control_port", LCK_GRP_ATTR_NULL
);
3336 lck_mtx_init(&dynamic_pager_control_port_lock
, &dynamic_pager_control_port_lock_group
, LCK_ATTR_NULL
);
3340 set_dp_control_port(
3341 host_priv_t host_priv
,
3342 ipc_port_t control_port
)
3344 ipc_port_t old_port
;
3346 if (host_priv
== HOST_PRIV_NULL
)
3347 return (KERN_INVALID_HOST
);
3349 lck_mtx_lock(&dynamic_pager_control_port_lock
);
3350 old_port
= dynamic_pager_control_port
;
3351 dynamic_pager_control_port
= control_port
;
3352 lck_mtx_unlock(&dynamic_pager_control_port_lock
);
3354 if (IP_VALID(old_port
))
3355 ipc_port_release_send(old_port
);
3357 return KERN_SUCCESS
;
3361 get_dp_control_port(
3362 host_priv_t host_priv
,
3363 ipc_port_t
*control_port
)
3365 if (host_priv
== HOST_PRIV_NULL
)
3366 return (KERN_INVALID_HOST
);
3368 lck_mtx_lock(&dynamic_pager_control_port_lock
);
3369 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
3370 lck_mtx_unlock(&dynamic_pager_control_port_lock
);
3372 return KERN_SUCCESS
;
3376 /* ******* Temporary Internal calls to UPL for BSD ***** */
3378 extern int kernel_upl_map(
3381 vm_offset_t
*dst_addr
);
3383 extern int kernel_upl_unmap(
3387 extern int kernel_upl_commit(
3389 upl_page_info_t
*pl
,
3390 mach_msg_type_number_t count
);
3392 extern int kernel_upl_commit_range(
3394 upl_offset_t offset
,
3397 upl_page_info_array_t pl
,
3398 mach_msg_type_number_t count
);
3400 extern int kernel_upl_abort(
3404 extern int kernel_upl_abort_range(
3406 upl_offset_t offset
,
3415 vm_offset_t
*dst_addr
)
3417 return vm_upl_map(map
, upl
, dst_addr
);
3426 return vm_upl_unmap(map
, upl
);
3432 upl_page_info_t
*pl
,
3433 mach_msg_type_number_t count
)
3437 kr
= upl_commit(upl
, pl
, count
);
3438 upl_deallocate(upl
);
3444 kernel_upl_commit_range(
3446 upl_offset_t offset
,
3449 upl_page_info_array_t pl
,
3450 mach_msg_type_number_t count
)
3452 boolean_t finished
= FALSE
;
3455 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3456 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3458 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
3459 return KERN_INVALID_ARGUMENT
;
3462 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
3464 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
3465 upl_deallocate(upl
);
3471 kernel_upl_abort_range(
3473 upl_offset_t offset
,
3478 boolean_t finished
= FALSE
;
3480 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3481 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3483 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3485 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3486 upl_deallocate(upl
);
3498 kr
= upl_abort(upl
, abort_type
);
3499 upl_deallocate(upl
);
3504 * Now a kernel-private interface (for BootCache
3505 * use only). Need a cleaner way to create an
3506 * empty vm_map() and return a handle to it.
3510 vm_region_object_create(
3511 __unused vm_map_t target_map
,
3513 ipc_port_t
*object_handle
)
3515 vm_named_entry_t user_entry
;
3516 ipc_port_t user_handle
;
3520 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3522 return KERN_FAILURE
;
3525 /* Create a named object based on a submap of specified size */
3527 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3528 vm_map_round_page(size
,
3529 VM_MAP_PAGE_MASK(target_map
)),
3531 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
3533 user_entry
->backing
.map
= new_map
;
3534 user_entry
->internal
= TRUE
;
3535 user_entry
->is_sub_map
= TRUE
;
3536 user_entry
->offset
= 0;
3537 user_entry
->protection
= VM_PROT_ALL
;
3538 user_entry
->size
= size
;
3539 assert(user_entry
->ref_count
== 1);
3541 *object_handle
= user_handle
;
3542 return KERN_SUCCESS
;
3546 ppnum_t
vm_map_get_phys_page( /* forward */
3548 vm_offset_t offset
);
3551 vm_map_get_phys_page(
3555 vm_object_offset_t offset
;
3557 vm_map_offset_t map_offset
;
3558 vm_map_entry_t entry
;
3559 ppnum_t phys_page
= 0;
3561 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
3564 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3566 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
3570 if (entry
->is_sub_map
) {
3572 vm_map_lock(VME_SUBMAP(entry
));
3574 map
= VME_SUBMAP(entry
);
3575 map_offset
= (VME_OFFSET(entry
) +
3576 (map_offset
- entry
->vme_start
));
3577 vm_map_unlock(old_map
);
3580 if (VME_OBJECT(entry
)->phys_contiguous
) {
3581 /* These are not standard pageable memory mappings */
3582 /* If they are not present in the object they will */
3583 /* have to be picked up from the pager through the */
3584 /* fault mechanism. */
3585 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
3586 /* need to call vm_fault */
3588 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3589 FALSE
, THREAD_UNINT
, NULL
, 0);
3593 offset
= (VME_OFFSET(entry
) +
3594 (map_offset
- entry
->vme_start
));
3595 phys_page
= (ppnum_t
)
3596 ((VME_OBJECT(entry
)->vo_shadow_offset
3597 + offset
) >> PAGE_SHIFT
);
3601 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
3602 object
= VME_OBJECT(entry
);
3603 vm_object_lock(object
);
3605 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3606 if(dst_page
== VM_PAGE_NULL
) {
3607 if(object
->shadow
) {
3608 vm_object_t old_object
;
3609 vm_object_lock(object
->shadow
);
3610 old_object
= object
;
3611 offset
= offset
+ object
->vo_shadow_offset
;
3612 object
= object
->shadow
;
3613 vm_object_unlock(old_object
);
3615 vm_object_unlock(object
);
3619 phys_page
= (ppnum_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
));
3620 vm_object_unlock(object
);
3635 dp_control_port_init();
3639 kern_return_t
kernel_object_iopl_request( /* forward */
3640 vm_named_entry_t named_entry
,
3641 memory_object_offset_t offset
,
3642 upl_size_t
*upl_size
,
3644 upl_page_info_array_t user_page_list
,
3645 unsigned int *page_list_count
,
3649 kernel_object_iopl_request(
3650 vm_named_entry_t named_entry
,
3651 memory_object_offset_t offset
,
3652 upl_size_t
*upl_size
,
3654 upl_page_info_array_t user_page_list
,
3655 unsigned int *page_list_count
,
3663 caller_flags
= *flags
;
3665 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3667 * For forward compatibility's sake,
3668 * reject any unknown flag.
3670 return KERN_INVALID_VALUE
;
3673 /* a few checks to make sure user is obeying rules */
3674 if(*upl_size
== 0) {
3675 if(offset
>= named_entry
->size
)
3676 return(KERN_INVALID_RIGHT
);
3677 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
3678 if (*upl_size
!= named_entry
->size
- offset
)
3679 return KERN_INVALID_ARGUMENT
;
3681 if(caller_flags
& UPL_COPYOUT_FROM
) {
3682 if((named_entry
->protection
& VM_PROT_READ
)
3684 return(KERN_INVALID_RIGHT
);
3687 if((named_entry
->protection
&
3688 (VM_PROT_READ
| VM_PROT_WRITE
))
3689 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
3690 return(KERN_INVALID_RIGHT
);
3693 if(named_entry
->size
< (offset
+ *upl_size
))
3694 return(KERN_INVALID_ARGUMENT
);
3696 /* the callers parameter offset is defined to be the */
3697 /* offset from beginning of named entry offset in object */
3698 offset
= offset
+ named_entry
->offset
;
3700 if (named_entry
->is_sub_map
||
3701 named_entry
->is_copy
)
3702 return KERN_INVALID_ARGUMENT
;
3704 named_entry_lock(named_entry
);
3706 if (named_entry
->is_pager
) {
3707 object
= vm_object_enter(named_entry
->backing
.pager
,
3708 named_entry
->offset
+ named_entry
->size
,
3709 named_entry
->internal
,
3712 if (object
== VM_OBJECT_NULL
) {
3713 named_entry_unlock(named_entry
);
3714 return(KERN_INVALID_OBJECT
);
3717 /* JMM - drop reference on the pager here? */
3719 /* create an extra reference for the object */
3720 vm_object_lock(object
);
3721 vm_object_reference_locked(object
);
3722 named_entry
->backing
.object
= object
;
3723 named_entry
->is_pager
= FALSE
;
3724 named_entry_unlock(named_entry
);
3726 /* wait for object (if any) to be ready */
3727 if (!named_entry
->internal
) {
3728 while (!object
->pager_ready
) {
3729 vm_object_wait(object
,
3730 VM_OBJECT_EVENT_PAGER_READY
,
3732 vm_object_lock(object
);
3735 vm_object_unlock(object
);
3738 /* This is the case where we are going to operate */
3739 /* an an already known object. If the object is */
3740 /* not ready it is internal. An external */
3741 /* object cannot be mapped until it is ready */
3742 /* we can therefore avoid the ready check */
3744 object
= named_entry
->backing
.object
;
3745 vm_object_reference(object
);
3746 named_entry_unlock(named_entry
);
3749 if (!object
->private) {
3750 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
)
3751 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
3752 if (object
->phys_contiguous
) {
3753 *flags
= UPL_PHYS_CONTIG
;
3758 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
3761 ret
= vm_object_iopl_request(object
,
3767 (upl_control_flags_t
)(unsigned int)caller_flags
);
3768 vm_object_deallocate(object
);