2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
102 #include <mach/host_priv_server.h>
103 #include <mach/mach_vm_server.h>
104 #include <mach/vm_map_server.h>
106 #include <kern/host.h>
107 #include <kern/kalloc.h>
108 #include <kern/task.h>
109 #include <kern/misc_protos.h>
110 #include <vm/vm_fault.h>
111 #include <vm/vm_map.h>
112 #include <vm/vm_object.h>
113 #include <vm/vm_page.h>
114 #include <vm/memory_object.h>
115 #include <vm/vm_pageout.h>
116 #include <vm/vm_protos.h>
118 vm_size_t upl_offset_to_pagelist
= 0;
124 ipc_port_t dynamic_pager_control_port
=NULL
;
127 * mach_vm_allocate allocates "zero fill" memory in the specfied
133 mach_vm_offset_t
*addr
,
137 vm_map_offset_t map_addr
;
138 vm_map_size_t map_size
;
139 kern_return_t result
;
142 /* filter out any kernel-only flags */
143 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
144 return KERN_INVALID_ARGUMENT
;
146 if (map
== VM_MAP_NULL
)
147 return(KERN_INVALID_ARGUMENT
);
150 return(KERN_SUCCESS
);
153 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
156 * No specific address requested, so start candidate address
157 * search at the minimum address in the map. However, if that
158 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
159 * allocations of PAGEZERO to explicit requests since its
160 * normal use is to catch dereferences of NULL and many
161 * applications also treat pointers with a value of 0 as
162 * special and suddenly having address 0 contain useable
163 * memory would tend to confuse those applications.
165 map_addr
= vm_map_min(map
);
167 map_addr
+= PAGE_SIZE
;
169 map_addr
= vm_map_trunc_page(*addr
);
170 map_size
= vm_map_round_page(size
);
172 return(KERN_INVALID_ARGUMENT
);
175 result
= vm_map_enter(
182 (vm_object_offset_t
)0,
194 * Legacy routine that allocates "zero fill" memory in the specfied
195 * map (which is limited to the same size as the kernel).
204 vm_map_offset_t map_addr
;
205 vm_map_size_t map_size
;
206 kern_return_t result
;
209 /* filter out any kernel-only flags */
210 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
211 return KERN_INVALID_ARGUMENT
;
213 if (map
== VM_MAP_NULL
)
214 return(KERN_INVALID_ARGUMENT
);
217 return(KERN_SUCCESS
);
220 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
223 * No specific address requested, so start candidate address
224 * search at the minimum address in the map. However, if that
225 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
226 * allocations of PAGEZERO to explicit requests since its
227 * normal use is to catch dereferences of NULL and many
228 * applications also treat pointers with a value of 0 as
229 * special and suddenly having address 0 contain useable
230 * memory would tend to confuse those applications.
232 map_addr
= vm_map_min(map
);
234 map_addr
+= PAGE_SIZE
;
236 map_addr
= vm_map_trunc_page(*addr
);
237 map_size
= vm_map_round_page(size
);
239 return(KERN_INVALID_ARGUMENT
);
242 result
= vm_map_enter(
249 (vm_object_offset_t
)0,
255 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
260 * mach_vm_deallocate -
261 * deallocates the specified range of addresses in the
262 * specified address map.
267 mach_vm_offset_t start
,
270 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
271 return(KERN_INVALID_ARGUMENT
);
273 if (size
== (mach_vm_offset_t
) 0)
274 return(KERN_SUCCESS
);
276 return(vm_map_remove(map
, vm_map_trunc_page(start
),
277 vm_map_round_page(start
+size
), VM_MAP_NO_FLAGS
));
282 * deallocates the specified range of addresses in the
283 * specified address map (limited to addresses the same
284 * size as the kernel).
288 register vm_map_t map
,
292 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
293 return(KERN_INVALID_ARGUMENT
);
295 if (size
== (vm_offset_t
) 0)
296 return(KERN_SUCCESS
);
298 return(vm_map_remove(map
, vm_map_trunc_page(start
),
299 vm_map_round_page(start
+size
), VM_MAP_NO_FLAGS
));
304 * Sets the inheritance of the specified range in the
310 mach_vm_offset_t start
,
312 vm_inherit_t new_inheritance
)
314 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
315 (new_inheritance
> VM_INHERIT_LAST_VALID
))
316 return(KERN_INVALID_ARGUMENT
);
321 return(vm_map_inherit(map
,
322 vm_map_trunc_page(start
),
323 vm_map_round_page(start
+size
),
329 * Sets the inheritance of the specified range in the
330 * specified map (range limited to addresses
334 register vm_map_t map
,
337 vm_inherit_t new_inheritance
)
339 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
340 (new_inheritance
> VM_INHERIT_LAST_VALID
))
341 return(KERN_INVALID_ARGUMENT
);
346 return(vm_map_inherit(map
,
347 vm_map_trunc_page(start
),
348 vm_map_round_page(start
+size
),
354 * Sets the protection of the specified range in the
361 mach_vm_offset_t start
,
363 boolean_t set_maximum
,
364 vm_prot_t new_protection
)
366 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
367 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
368 return(KERN_INVALID_ARGUMENT
);
373 return(vm_map_protect(map
,
374 vm_map_trunc_page(start
),
375 vm_map_round_page(start
+size
),
382 * Sets the protection of the specified range in the
383 * specified map. Addressability of the range limited
384 * to the same size as the kernel.
392 boolean_t set_maximum
,
393 vm_prot_t new_protection
)
395 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
396 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
397 return(KERN_INVALID_ARGUMENT
);
402 return(vm_map_protect(map
,
403 vm_map_trunc_page(start
),
404 vm_map_round_page(start
+size
),
410 * mach_vm_machine_attributes -
411 * Handle machine-specific attributes for a mapping, such
412 * as cachability, migrability, etc.
415 mach_vm_machine_attribute(
417 mach_vm_address_t addr
,
419 vm_machine_attribute_t attribute
,
420 vm_machine_attribute_val_t
* value
) /* IN/OUT */
422 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
423 return(KERN_INVALID_ARGUMENT
);
428 return vm_map_machine_attribute(map
,
429 vm_map_trunc_page(addr
),
430 vm_map_round_page(addr
+size
),
436 * vm_machine_attribute -
437 * Handle machine-specific attributes for a mapping, such
438 * as cachability, migrability, etc. Limited addressability
439 * (same range limits as for the native kernel map).
442 vm_machine_attribute(
446 vm_machine_attribute_t attribute
,
447 vm_machine_attribute_val_t
* value
) /* IN/OUT */
449 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
450 return(KERN_INVALID_ARGUMENT
);
455 return vm_map_machine_attribute(map
,
456 vm_map_trunc_page(addr
),
457 vm_map_round_page(addr
+size
),
464 * Read/copy a range from one address space and return it to the caller.
466 * It is assumed that the address for the returned memory is selected by
467 * the IPC implementation as part of receiving the reply to this call.
468 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
469 * that gets returned.
471 * JMM - because of mach_msg_type_number_t, this call is limited to a
472 * single 4GB region at this time.
478 mach_vm_address_t addr
,
481 mach_msg_type_number_t
*data_size
)
484 vm_map_copy_t ipc_address
;
486 if (map
== VM_MAP_NULL
)
487 return(KERN_INVALID_ARGUMENT
);
489 if ((mach_msg_type_number_t
) size
!= size
)
490 return KERN_INVALID_ARGUMENT
;
492 error
= vm_map_copyin(map
,
493 (vm_map_address_t
)addr
,
495 FALSE
, /* src_destroy */
498 if (KERN_SUCCESS
== error
) {
499 *data
= (pointer_t
) ipc_address
;
500 *data_size
= (mach_msg_type_number_t
) size
;
501 assert(*data_size
== size
);
508 * Read/copy a range from one address space and return it to the caller.
509 * Limited addressability (same range limits as for the native kernel map).
511 * It is assumed that the address for the returned memory is selected by
512 * the IPC implementation as part of receiving the reply to this call.
513 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
514 * that gets returned.
522 mach_msg_type_number_t
*data_size
)
525 vm_map_copy_t ipc_address
;
527 if (map
== VM_MAP_NULL
)
528 return(KERN_INVALID_ARGUMENT
);
530 if (size
> (unsigned)(mach_msg_type_number_t
) -1) {
532 * The kernel could handle a 64-bit "size" value, but
533 * it could not return the size of the data in "*data_size"
534 * without overflowing.
535 * Let's reject this "size" as invalid.
537 return KERN_INVALID_ARGUMENT
;
540 error
= vm_map_copyin(map
,
541 (vm_map_address_t
)addr
,
543 FALSE
, /* src_destroy */
546 if (KERN_SUCCESS
== error
) {
547 *data
= (pointer_t
) ipc_address
;
548 *data_size
= (mach_msg_type_number_t
) size
;
549 assert(*data_size
== size
);
555 * mach_vm_read_list -
556 * Read/copy a list of address ranges from specified map.
558 * MIG does not know how to deal with a returned array of
559 * vm_map_copy_t structures, so we have to do the copyout
565 mach_vm_read_entry_t data_list
,
568 mach_msg_type_number_t i
;
572 if (map
== VM_MAP_NULL
||
573 count
> VM_MAP_ENTRY_MAX
)
574 return(KERN_INVALID_ARGUMENT
);
576 error
= KERN_SUCCESS
;
577 for(i
=0; i
<count
; i
++) {
578 vm_map_address_t map_addr
;
579 vm_map_size_t map_size
;
581 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
582 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
585 error
= vm_map_copyin(map
,
588 FALSE
, /* src_destroy */
590 if (KERN_SUCCESS
== error
) {
591 error
= vm_map_copyout(
595 if (KERN_SUCCESS
== error
) {
596 data_list
[i
].address
= map_addr
;
599 vm_map_copy_discard(copy
);
602 data_list
[i
].address
= (mach_vm_address_t
)0;
603 data_list
[i
].size
= (mach_vm_size_t
)0;
610 * Read/copy a list of address ranges from specified map.
612 * MIG does not know how to deal with a returned array of
613 * vm_map_copy_t structures, so we have to do the copyout
616 * The source and destination ranges are limited to those
617 * that can be described with a vm_address_t (i.e. same
618 * size map as the kernel).
620 * JMM - If the result of the copyout is an address range
621 * that cannot be described with a vm_address_t (i.e. the
622 * caller had a larger address space but used this call
623 * anyway), it will result in a truncated address being
624 * returned (and a likely confused caller).
630 vm_read_entry_t data_list
,
633 mach_msg_type_number_t i
;
637 if (map
== VM_MAP_NULL
||
638 count
> VM_MAP_ENTRY_MAX
)
639 return(KERN_INVALID_ARGUMENT
);
641 error
= KERN_SUCCESS
;
642 for(i
=0; i
<count
; i
++) {
643 vm_map_address_t map_addr
;
644 vm_map_size_t map_size
;
646 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
647 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
650 error
= vm_map_copyin(map
,
653 FALSE
, /* src_destroy */
655 if (KERN_SUCCESS
== error
) {
656 error
= vm_map_copyout(current_task()->map
,
659 if (KERN_SUCCESS
== error
) {
660 data_list
[i
].address
=
661 CAST_DOWN(vm_offset_t
, map_addr
);
664 vm_map_copy_discard(copy
);
667 data_list
[i
].address
= (mach_vm_address_t
)0;
668 data_list
[i
].size
= (mach_vm_size_t
)0;
674 * mach_vm_read_overwrite -
675 * Overwrite a range of the current map with data from the specified
678 * In making an assumption that the current thread is local, it is
679 * no longer cluster-safe without a fully supportive local proxy
680 * thread/task (but we don't support cluster's anymore so this is moot).
684 mach_vm_read_overwrite(
686 mach_vm_address_t address
,
688 mach_vm_address_t data
,
689 mach_vm_size_t
*data_size
)
694 if (map
== VM_MAP_NULL
)
695 return(KERN_INVALID_ARGUMENT
);
697 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
698 (vm_map_size_t
)size
, FALSE
, ©
);
700 if (KERN_SUCCESS
== error
) {
701 error
= vm_map_copy_overwrite(current_thread()->map
,
702 (vm_map_address_t
)data
,
704 if (KERN_SUCCESS
== error
) {
708 vm_map_copy_discard(copy
);
714 * vm_read_overwrite -
715 * Overwrite a range of the current map with data from the specified
718 * This routine adds the additional limitation that the source and
719 * destination ranges must be describable with vm_address_t values
720 * (i.e. the same size address spaces as the kernel, or at least the
721 * the ranges are in that first portion of the respective address
728 vm_address_t address
,
731 vm_size_t
*data_size
)
736 if (map
== VM_MAP_NULL
)
737 return(KERN_INVALID_ARGUMENT
);
739 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
740 (vm_map_size_t
)size
, FALSE
, ©
);
742 if (KERN_SUCCESS
== error
) {
743 error
= vm_map_copy_overwrite(current_thread()->map
,
744 (vm_map_address_t
)data
,
746 if (KERN_SUCCESS
== error
) {
750 vm_map_copy_discard(copy
);
758 * Overwrite the specified address range with the data provided
759 * (from the current map).
764 mach_vm_address_t address
,
766 __unused mach_msg_type_number_t size
)
768 if (map
== VM_MAP_NULL
)
769 return KERN_INVALID_ARGUMENT
;
771 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
772 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
777 * Overwrite the specified address range with the data provided
778 * (from the current map).
780 * The addressability of the range of addresses to overwrite is
781 * limited bu the use of a vm_address_t (same size as kernel map).
782 * Either the target map is also small, or the range is in the
783 * low addresses within it.
788 vm_address_t address
,
790 __unused mach_msg_type_number_t size
)
792 if (map
== VM_MAP_NULL
)
793 return KERN_INVALID_ARGUMENT
;
795 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
796 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
801 * Overwrite one range of the specified map with the contents of
802 * another range within that same map (i.e. both address ranges
808 mach_vm_address_t source_address
,
810 mach_vm_address_t dest_address
)
815 if (map
== VM_MAP_NULL
)
816 return KERN_INVALID_ARGUMENT
;
818 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
819 (vm_map_size_t
)size
, FALSE
, ©
);
821 if (KERN_SUCCESS
== kr
) {
822 kr
= vm_map_copy_overwrite(map
,
823 (vm_map_address_t
)dest_address
,
824 copy
, FALSE
/* interruptible XXX */);
826 if (KERN_SUCCESS
!= kr
)
827 vm_map_copy_discard(copy
);
835 vm_address_t source_address
,
837 vm_address_t dest_address
)
842 if (map
== VM_MAP_NULL
)
843 return KERN_INVALID_ARGUMENT
;
845 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
846 (vm_map_size_t
)size
, FALSE
, ©
);
848 if (KERN_SUCCESS
== kr
) {
849 kr
= vm_map_copy_overwrite(map
,
850 (vm_map_address_t
)dest_address
,
851 copy
, FALSE
/* interruptible XXX */);
853 if (KERN_SUCCESS
!= kr
)
854 vm_map_copy_discard(copy
);
861 * Map some range of an object into an address space.
863 * The object can be one of several types of objects:
864 * NULL - anonymous memory
865 * a named entry - a range within another address space
866 * or a range within a memory object
867 * a whole memory object
873 mach_vm_offset_t
*address
,
874 mach_vm_size_t initial_size
,
875 mach_vm_offset_t mask
,
878 vm_object_offset_t offset
,
880 vm_prot_t cur_protection
,
881 vm_prot_t max_protection
,
882 vm_inherit_t inheritance
)
884 /* filter out any kernel-only flags */
885 if (flags
& ~VM_FLAGS_USER_MAP
)
886 return KERN_INVALID_ARGUMENT
;
888 return vm_map_enter_mem_object(target_map
,
902 /* legacy interface */
906 vm_offset_t
*address
,
911 vm_object_offset_t offset
,
913 vm_prot_t cur_protection
,
914 vm_prot_t max_protection
,
915 vm_inherit_t inheritance
)
917 mach_vm_address_t map_addr
;
918 mach_vm_size_t map_size
;
919 mach_vm_offset_t map_mask
;
922 map_addr
= (mach_vm_address_t
)*address
;
923 map_size
= (mach_vm_size_t
)size
;
924 map_mask
= (mach_vm_offset_t
)mask
;
926 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
928 cur_protection
, max_protection
, inheritance
);
929 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
933 /* temporary, until world build */
937 vm_offset_t
*address
,
944 vm_prot_t cur_protection
,
945 vm_prot_t max_protection
,
946 vm_inherit_t inheritance
)
948 mach_vm_address_t map_addr
;
949 mach_vm_size_t map_size
;
950 mach_vm_offset_t map_mask
;
951 vm_object_offset_t obj_offset
;
954 map_addr
= (mach_vm_address_t
)*address
;
955 map_size
= (mach_vm_size_t
)size
;
956 map_mask
= (mach_vm_offset_t
)mask
;
957 obj_offset
= (vm_object_offset_t
)offset
;
959 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
960 port
, obj_offset
, copy
,
961 cur_protection
, max_protection
, inheritance
);
962 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
968 * Remap a range of memory from one task into another,
969 * to another address range within the same task, or
970 * over top of itself (with altered permissions and/or
971 * as an in-place copy of itself).
977 mach_vm_offset_t
*address
,
979 mach_vm_offset_t mask
,
982 mach_vm_offset_t memory_address
,
984 vm_prot_t
*cur_protection
,
985 vm_prot_t
*max_protection
,
986 vm_inherit_t inheritance
)
988 vm_map_offset_t map_addr
;
991 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
992 return KERN_INVALID_ARGUMENT
;
994 /* filter out any kernel-only flags */
995 if (flags
& ~VM_FLAGS_USER_REMAP
)
996 return KERN_INVALID_ARGUMENT
;
998 map_addr
= (vm_map_offset_t
)*address
;
1000 kr
= vm_map_remap(target_map
,
1011 *address
= map_addr
;
1017 * Remap a range of memory from one task into another,
1018 * to another address range within the same task, or
1019 * over top of itself (with altered permissions and/or
1020 * as an in-place copy of itself).
1022 * The addressability of the source and target address
1023 * range is limited by the size of vm_address_t (in the
1028 vm_map_t target_map
,
1029 vm_offset_t
*address
,
1034 vm_offset_t memory_address
,
1036 vm_prot_t
*cur_protection
,
1037 vm_prot_t
*max_protection
,
1038 vm_inherit_t inheritance
)
1040 vm_map_offset_t map_addr
;
1043 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1044 return KERN_INVALID_ARGUMENT
;
1046 /* filter out any kernel-only flags */
1047 if (flags
& ~VM_FLAGS_USER_REMAP
)
1048 return KERN_INVALID_ARGUMENT
;
1050 map_addr
= (vm_map_offset_t
)*address
;
1052 kr
= vm_map_remap(target_map
,
1063 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1068 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1069 * when mach_vm_wire and vm_wire are changed to use ledgers.
1071 #include <mach/mach_host_server.h>
1074 * Specify that the range of the virtual address space
1075 * of the target task must not cause page faults for
1076 * the indicated accesses.
1078 * [ To unwire the pages, specify VM_PROT_NONE. ]
1082 host_priv_t host_priv
,
1084 mach_vm_offset_t start
,
1085 mach_vm_size_t size
,
1090 if (host_priv
== HOST_PRIV_NULL
)
1091 return KERN_INVALID_HOST
;
1093 assert(host_priv
== &realhost
);
1095 if (map
== VM_MAP_NULL
)
1096 return KERN_INVALID_TASK
;
1098 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
))
1099 return KERN_INVALID_ARGUMENT
;
1101 if (access
!= VM_PROT_NONE
) {
1102 rc
= vm_map_wire(map
, vm_map_trunc_page(start
),
1103 vm_map_round_page(start
+size
), access
, TRUE
);
1105 rc
= vm_map_unwire(map
, vm_map_trunc_page(start
),
1106 vm_map_round_page(start
+size
), TRUE
);
1113 * Specify that the range of the virtual address space
1114 * of the target task must not cause page faults for
1115 * the indicated accesses.
1117 * [ To unwire the pages, specify VM_PROT_NONE. ]
1121 host_priv_t host_priv
,
1122 register vm_map_t map
,
1129 if (host_priv
== HOST_PRIV_NULL
)
1130 return KERN_INVALID_HOST
;
1132 assert(host_priv
== &realhost
);
1134 if (map
== VM_MAP_NULL
)
1135 return KERN_INVALID_TASK
;
1137 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1138 return KERN_INVALID_ARGUMENT
;
1142 } else if (access
!= VM_PROT_NONE
) {
1143 rc
= vm_map_wire(map
, vm_map_trunc_page(start
),
1144 vm_map_round_page(start
+size
), access
, TRUE
);
1146 rc
= vm_map_unwire(map
, vm_map_trunc_page(start
),
1147 vm_map_round_page(start
+size
), TRUE
);
1155 * Synchronises the memory range specified with its backing store
1156 * image by either flushing or cleaning the contents to the appropriate
1159 * interpretation of sync_flags
1160 * VM_SYNC_INVALIDATE - discard pages, only return precious
1163 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1164 * - discard pages, write dirty or precious
1165 * pages back to memory manager.
1167 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1168 * - write dirty or precious pages back to
1169 * the memory manager.
1171 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1172 * is a hole in the region, and we would
1173 * have returned KERN_SUCCESS, return
1174 * KERN_INVALID_ADDRESS instead.
1177 * KERN_INVALID_TASK Bad task parameter
1178 * KERN_INVALID_ARGUMENT both sync and async were specified.
1179 * KERN_SUCCESS The usual.
1180 * KERN_INVALID_ADDRESS There was a hole in the region.
1186 mach_vm_address_t address
,
1187 mach_vm_size_t size
,
1188 vm_sync_t sync_flags
)
1191 if (map
== VM_MAP_NULL
)
1192 return(KERN_INVALID_TASK
);
1194 return vm_map_msync(map
, (vm_map_address_t
)address
,
1195 (vm_map_size_t
)size
, sync_flags
);
1201 * Synchronises the memory range specified with its backing store
1202 * image by either flushing or cleaning the contents to the appropriate
1205 * interpretation of sync_flags
1206 * VM_SYNC_INVALIDATE - discard pages, only return precious
1209 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1210 * - discard pages, write dirty or precious
1211 * pages back to memory manager.
1213 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1214 * - write dirty or precious pages back to
1215 * the memory manager.
1217 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1218 * is a hole in the region, and we would
1219 * have returned KERN_SUCCESS, return
1220 * KERN_INVALID_ADDRESS instead.
1222 * The addressability of the range is limited to that which can
1223 * be described by a vm_address_t.
1226 * KERN_INVALID_TASK Bad task parameter
1227 * KERN_INVALID_ARGUMENT both sync and async were specified.
1228 * KERN_SUCCESS The usual.
1229 * KERN_INVALID_ADDRESS There was a hole in the region.
1235 vm_address_t address
,
1237 vm_sync_t sync_flags
)
1240 if (map
== VM_MAP_NULL
)
1241 return(KERN_INVALID_TASK
);
1243 return vm_map_msync(map
, (vm_map_address_t
)address
,
1244 (vm_map_size_t
)size
, sync_flags
);
1249 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1251 vm_map_t map
= current_map();
1253 if(toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
){
1254 *old_value
= map
->disable_vmentry_reuse
;
1255 } else if(toggle
== VM_TOGGLE_SET
){
1257 map
->disable_vmentry_reuse
= TRUE
;
1258 if (map
->first_free
== vm_map_to_entry(map
)) {
1259 map
->highest_entry_end
= vm_map_min(map
);
1261 map
->highest_entry_end
= map
->first_free
->vme_end
;
1264 } else if (toggle
== VM_TOGGLE_CLEAR
){
1266 map
->disable_vmentry_reuse
= FALSE
;
1269 return KERN_INVALID_ARGUMENT
;
1271 return KERN_SUCCESS
;
1275 * mach_vm_behavior_set
1277 * Sets the paging behavior attribute for the specified range
1278 * in the specified map.
1280 * This routine will fail with KERN_INVALID_ADDRESS if any address
1281 * in [start,start+size) is not a valid allocated memory region.
1284 mach_vm_behavior_set(
1286 mach_vm_offset_t start
,
1287 mach_vm_size_t size
,
1288 vm_behavior_t new_behavior
)
1290 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1291 return(KERN_INVALID_ARGUMENT
);
1294 return KERN_SUCCESS
;
1296 return(vm_map_behavior_set(map
, vm_map_trunc_page(start
),
1297 vm_map_round_page(start
+size
), new_behavior
));
1303 * Sets the paging behavior attribute for the specified range
1304 * in the specified map.
1306 * This routine will fail with KERN_INVALID_ADDRESS if any address
1307 * in [start,start+size) is not a valid allocated memory region.
1309 * This routine is potentially limited in addressibility by the
1310 * use of vm_offset_t (if the map provided is larger than the
1318 vm_behavior_t new_behavior
)
1320 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1321 return(KERN_INVALID_ARGUMENT
);
1324 return KERN_SUCCESS
;
1326 return(vm_map_behavior_set(map
, vm_map_trunc_page(start
),
1327 vm_map_round_page(start
+size
), new_behavior
));
1333 * User call to obtain information about a region in
1334 * a task's address map. Currently, only one flavor is
1337 * XXX The reserved and behavior fields cannot be filled
1338 * in until the vm merge from the IK is completed, and
1339 * vm_reserve is implemented.
1341 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1347 mach_vm_offset_t
*address
, /* IN/OUT */
1348 mach_vm_size_t
*size
, /* OUT */
1349 vm_region_flavor_t flavor
, /* IN */
1350 vm_region_info_t info
, /* OUT */
1351 mach_msg_type_number_t
*count
, /* IN/OUT */
1352 mach_port_t
*object_name
) /* OUT */
1354 vm_map_offset_t map_addr
;
1355 vm_map_size_t map_size
;
1358 if (VM_MAP_NULL
== map
)
1359 return KERN_INVALID_ARGUMENT
;
1361 map_addr
= (vm_map_offset_t
)*address
;
1362 map_size
= (vm_map_size_t
)*size
;
1364 /* legacy conversion */
1365 if (VM_REGION_BASIC_INFO
== flavor
)
1366 flavor
= VM_REGION_BASIC_INFO_64
;
1368 kr
= vm_map_region(map
,
1369 &map_addr
, &map_size
,
1370 flavor
, info
, count
,
1373 *address
= map_addr
;
1379 * vm_region_64 and vm_region:
1381 * User call to obtain information about a region in
1382 * a task's address map. Currently, only one flavor is
1385 * XXX The reserved and behavior fields cannot be filled
1386 * in until the vm merge from the IK is completed, and
1387 * vm_reserve is implemented.
1389 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1395 vm_offset_t
*address
, /* IN/OUT */
1396 vm_size_t
*size
, /* OUT */
1397 vm_region_flavor_t flavor
, /* IN */
1398 vm_region_info_t info
, /* OUT */
1399 mach_msg_type_number_t
*count
, /* IN/OUT */
1400 mach_port_t
*object_name
) /* OUT */
1402 vm_map_offset_t map_addr
;
1403 vm_map_size_t map_size
;
1406 if (VM_MAP_NULL
== map
)
1407 return KERN_INVALID_ARGUMENT
;
1409 map_addr
= (vm_map_offset_t
)*address
;
1410 map_size
= (vm_map_size_t
)*size
;
1412 /* legacy conversion */
1413 if (VM_REGION_BASIC_INFO
== flavor
)
1414 flavor
= VM_REGION_BASIC_INFO_64
;
1416 kr
= vm_map_region(map
,
1417 &map_addr
, &map_size
,
1418 flavor
, info
, count
,
1421 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1422 *size
= CAST_DOWN(vm_size_t
, map_size
);
1424 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1425 return KERN_INVALID_ADDRESS
;
1432 vm_address_t
*address
, /* IN/OUT */
1433 vm_size_t
*size
, /* OUT */
1434 vm_region_flavor_t flavor
, /* IN */
1435 vm_region_info_t info
, /* OUT */
1436 mach_msg_type_number_t
*count
, /* IN/OUT */
1437 mach_port_t
*object_name
) /* OUT */
1439 vm_map_address_t map_addr
;
1440 vm_map_size_t map_size
;
1443 if (VM_MAP_NULL
== map
)
1444 return KERN_INVALID_ARGUMENT
;
1446 map_addr
= (vm_map_address_t
)*address
;
1447 map_size
= (vm_map_size_t
)*size
;
1449 kr
= vm_map_region(map
,
1450 &map_addr
, &map_size
,
1451 flavor
, info
, count
,
1454 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1455 *size
= CAST_DOWN(vm_size_t
, map_size
);
1457 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1458 return KERN_INVALID_ADDRESS
;
1463 * vm_region_recurse: A form of vm_region which follows the
1464 * submaps in a target map
1468 mach_vm_region_recurse(
1470 mach_vm_address_t
*address
,
1471 mach_vm_size_t
*size
,
1473 vm_region_recurse_info_t info
,
1474 mach_msg_type_number_t
*infoCnt
)
1476 vm_map_address_t map_addr
;
1477 vm_map_size_t map_size
;
1480 if (VM_MAP_NULL
== map
)
1481 return KERN_INVALID_ARGUMENT
;
1483 map_addr
= (vm_map_address_t
)*address
;
1484 map_size
= (vm_map_size_t
)*size
;
1486 kr
= vm_map_region_recurse_64(
1491 (vm_region_submap_info_64_t
)info
,
1494 *address
= map_addr
;
1500 * vm_region_recurse: A form of vm_region which follows the
1501 * submaps in a target map
1505 vm_region_recurse_64(
1507 vm_address_t
*address
,
1510 vm_region_recurse_info_64_t info
,
1511 mach_msg_type_number_t
*infoCnt
)
1513 vm_map_address_t map_addr
;
1514 vm_map_size_t map_size
;
1517 if (VM_MAP_NULL
== map
)
1518 return KERN_INVALID_ARGUMENT
;
1520 map_addr
= (vm_map_address_t
)*address
;
1521 map_size
= (vm_map_size_t
)*size
;
1523 kr
= vm_map_region_recurse_64(
1528 (vm_region_submap_info_64_t
)info
,
1531 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1532 *size
= CAST_DOWN(vm_size_t
, map_size
);
1534 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1535 return KERN_INVALID_ADDRESS
;
1542 vm_offset_t
*address
, /* IN/OUT */
1543 vm_size_t
*size
, /* OUT */
1544 natural_t
*depth
, /* IN/OUT */
1545 vm_region_recurse_info_t info32
, /* IN/OUT */
1546 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1548 vm_region_submap_info_data_64_t info64
;
1549 vm_region_submap_info_t info
;
1550 vm_map_address_t map_addr
;
1551 vm_map_size_t map_size
;
1554 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1555 return KERN_INVALID_ARGUMENT
;
1558 map_addr
= (vm_map_address_t
)*address
;
1559 map_size
= (vm_map_size_t
)*size
;
1560 info
= (vm_region_submap_info_t
)info32
;
1561 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1563 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1564 depth
, &info64
, infoCnt
);
1566 info
->protection
= info64
.protection
;
1567 info
->max_protection
= info64
.max_protection
;
1568 info
->inheritance
= info64
.inheritance
;
1569 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1570 info
->user_tag
= info64
.user_tag
;
1571 info
->pages_resident
= info64
.pages_resident
;
1572 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1573 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1574 info
->pages_dirtied
= info64
.pages_dirtied
;
1575 info
->ref_count
= info64
.ref_count
;
1576 info
->shadow_depth
= info64
.shadow_depth
;
1577 info
->external_pager
= info64
.external_pager
;
1578 info
->share_mode
= info64
.share_mode
;
1579 info
->is_submap
= info64
.is_submap
;
1580 info
->behavior
= info64
.behavior
;
1581 info
->object_id
= info64
.object_id
;
1582 info
->user_wired_count
= info64
.user_wired_count
;
1584 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1585 *size
= CAST_DOWN(vm_size_t
, map_size
);
1586 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1588 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1589 return KERN_INVALID_ADDRESS
;
1594 mach_vm_purgable_control(
1596 mach_vm_offset_t address
,
1597 vm_purgable_t control
,
1600 if (VM_MAP_NULL
== map
)
1601 return KERN_INVALID_ARGUMENT
;
1603 return vm_map_purgable_control(map
,
1604 vm_map_trunc_page(address
),
1610 vm_purgable_control(
1612 vm_offset_t address
,
1613 vm_purgable_t control
,
1616 if (VM_MAP_NULL
== map
)
1617 return KERN_INVALID_ARGUMENT
;
1619 return vm_map_purgable_control(map
,
1620 vm_map_trunc_page(address
),
1627 * Ordinarily, the right to allocate CPM is restricted
1628 * to privileged applications (those that can gain access
1629 * to the host priv port). Set this variable to zero if
1630 * you want to let any application allocate CPM.
1632 unsigned int vm_allocate_cpm_privileged
= 0;
1635 * Allocate memory in the specified map, with the caveat that
1636 * the memory is physically contiguous. This call may fail
1637 * if the system can't find sufficient contiguous memory.
1638 * This call may cause or lead to heart-stopping amounts of
1641 * Memory obtained from this call should be freed in the
1642 * normal way, viz., via vm_deallocate.
1646 host_priv_t host_priv
,
1652 vm_map_address_t map_addr
;
1653 vm_map_size_t map_size
;
1656 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1657 return KERN_INVALID_HOST
;
1659 if (VM_MAP_NULL
== map
)
1660 return KERN_INVALID_ARGUMENT
;
1662 map_addr
= (vm_map_address_t
)*addr
;
1663 map_size
= (vm_map_size_t
)size
;
1665 kr
= vm_map_enter_cpm(map
,
1670 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1678 mach_vm_offset_t offset
,
1682 if (VM_MAP_NULL
== map
)
1683 return KERN_INVALID_ARGUMENT
;
1685 return vm_map_page_query_internal(map
,
1686 vm_map_trunc_page(offset
),
1687 disposition
, ref_count
);
1697 if (VM_MAP_NULL
== map
)
1698 return KERN_INVALID_ARGUMENT
;
1700 return vm_map_page_query_internal(map
,
1701 vm_map_trunc_page(offset
),
1702 disposition
, ref_count
);
1708 mach_vm_address_t address
,
1709 vm_page_info_flavor_t flavor
,
1710 vm_page_info_t info
,
1711 mach_msg_type_number_t
*count
)
1715 if (map
== VM_MAP_NULL
) {
1716 return KERN_INVALID_ARGUMENT
;
1719 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
1723 /* map a (whole) upl into an address space */
1728 vm_address_t
*dst_addr
)
1730 vm_map_offset_t map_addr
;
1733 if (VM_MAP_NULL
== map
)
1734 return KERN_INVALID_ARGUMENT
;
1736 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
1737 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
1746 if (VM_MAP_NULL
== map
)
1747 return KERN_INVALID_ARGUMENT
;
1749 return (vm_map_remove_upl(map
, upl
));
1752 /* Retrieve a upl for an object underlying an address range in a map */
1757 vm_map_offset_t map_offset
,
1758 upl_size_t
*upl_size
,
1760 upl_page_info_array_t page_list
,
1761 unsigned int *count
,
1763 int force_data_sync
)
1768 if (VM_MAP_NULL
== map
)
1769 return KERN_INVALID_ARGUMENT
;
1771 map_flags
= *flags
& ~UPL_NOZEROFILL
;
1772 if (force_data_sync
)
1773 map_flags
|= UPL_FORCE_DATA_SYNC
;
1775 kr
= vm_map_create_upl(map
,
1783 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
1788 * mach_make_memory_entry_64
1790 * Think of it as a two-stage vm_remap() operation. First
1791 * you get a handle. Second, you get map that handle in
1792 * somewhere else. Rather than doing it all at once (and
1793 * without needing access to the other whole map).
1797 mach_make_memory_entry_64(
1798 vm_map_t target_map
,
1799 memory_object_size_t
*size
,
1800 memory_object_offset_t offset
,
1801 vm_prot_t permission
,
1802 ipc_port_t
*object_handle
,
1803 ipc_port_t parent_handle
)
1805 vm_map_version_t version
;
1806 vm_named_entry_t parent_entry
;
1807 vm_named_entry_t user_entry
;
1808 ipc_port_t user_handle
;
1812 /* needed for call to vm_map_lookup_locked */
1814 vm_object_offset_t obj_off
;
1816 struct vm_object_fault_info fault_info
;
1818 vm_object_t shadow_object
;
1820 /* needed for direct map entry manipulation */
1821 vm_map_entry_t map_entry
;
1822 vm_map_entry_t next_entry
;
1824 vm_map_t original_map
= target_map
;
1825 vm_map_size_t total_size
;
1826 vm_map_size_t map_size
;
1827 vm_map_offset_t map_offset
;
1828 vm_map_offset_t local_offset
;
1829 vm_object_size_t mappable_size
;
1831 unsigned int access
;
1832 vm_prot_t protections
;
1833 vm_prot_t original_protections
, mask_protections
;
1834 unsigned int wimg_mode
;
1836 boolean_t force_shadow
= FALSE
;
1838 if (((permission
& 0x00FF0000) &
1840 MAP_MEM_NAMED_CREATE
|
1842 MAP_MEM_NAMED_REUSE
))) {
1844 * Unknown flag: reject for forward compatibility.
1846 return KERN_INVALID_VALUE
;
1849 if (parent_handle
!= IP_NULL
&&
1850 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
1851 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
1853 parent_entry
= NULL
;
1856 original_protections
= permission
& VM_PROT_ALL
;
1857 protections
= original_protections
;
1858 mask_protections
= permission
& VM_PROT_IS_MASK
;
1859 access
= GET_MAP_MEM(permission
);
1861 user_handle
= IP_NULL
;
1864 map_offset
= vm_map_trunc_page(offset
);
1865 map_size
= vm_map_round_page(*size
);
1867 if (permission
& MAP_MEM_ONLY
) {
1868 boolean_t parent_is_object
;
1870 if (parent_entry
== NULL
) {
1871 return KERN_INVALID_ARGUMENT
;
1874 parent_is_object
= !(parent_entry
->is_sub_map
|| parent_entry
->is_pager
);
1875 object
= parent_entry
->backing
.object
;
1876 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
1877 wimg_mode
= object
->wimg_bits
;
1879 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1880 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
1881 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
1882 return KERN_INVALID_RIGHT
;
1884 if(access
== MAP_MEM_IO
) {
1885 SET_MAP_MEM(access
, parent_entry
->protection
);
1886 wimg_mode
= VM_WIMG_IO
;
1887 } else if (access
== MAP_MEM_COPYBACK
) {
1888 SET_MAP_MEM(access
, parent_entry
->protection
);
1889 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1890 } else if (access
== MAP_MEM_WTHRU
) {
1891 SET_MAP_MEM(access
, parent_entry
->protection
);
1892 wimg_mode
= VM_WIMG_WTHRU
;
1893 } else if (access
== MAP_MEM_WCOMB
) {
1894 SET_MAP_MEM(access
, parent_entry
->protection
);
1895 wimg_mode
= VM_WIMG_WCOMB
;
1897 if (parent_is_object
&& object
&&
1898 (access
!= MAP_MEM_NOOP
) &&
1899 (!(object
->nophyscache
))) {
1901 if (object
->wimg_bits
!= wimg_mode
) {
1902 vm_object_lock(object
);
1903 vm_object_change_wimg_mode(object
, wimg_mode
);
1904 vm_object_unlock(object
);
1908 *object_handle
= IP_NULL
;
1909 return KERN_SUCCESS
;
1912 if(permission
& MAP_MEM_NAMED_CREATE
) {
1913 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
1914 if (kr
!= KERN_SUCCESS
) {
1915 return KERN_FAILURE
;
1919 * Force the creation of the VM object now.
1921 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
1923 * LP64todo - for now, we can only allocate 4GB-4096
1924 * internal objects because the default pager can't
1925 * page bigger ones. Remove this when it can.
1931 object
= vm_object_allocate(map_size
);
1932 assert(object
!= VM_OBJECT_NULL
);
1934 if (permission
& MAP_MEM_PURGABLE
) {
1935 if (! (permission
& VM_PROT_WRITE
)) {
1936 /* if we can't write, we can't purge */
1937 vm_object_deallocate(object
);
1938 kr
= KERN_INVALID_ARGUMENT
;
1941 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
1945 * The VM object is brand new and nobody else knows about it,
1946 * so we don't need to lock it.
1949 wimg_mode
= object
->wimg_bits
;
1950 if (access
== MAP_MEM_IO
) {
1951 wimg_mode
= VM_WIMG_IO
;
1952 } else if (access
== MAP_MEM_COPYBACK
) {
1953 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1954 } else if (access
== MAP_MEM_WTHRU
) {
1955 wimg_mode
= VM_WIMG_WTHRU
;
1956 } else if (access
== MAP_MEM_WCOMB
) {
1957 wimg_mode
= VM_WIMG_WCOMB
;
1959 if (access
!= MAP_MEM_NOOP
) {
1960 object
->wimg_bits
= wimg_mode
;
1962 /* the object has no pages, so no WIMG bits to update here */
1966 * We use this path when we want to make sure that
1967 * nobody messes with the object (coalesce, for
1968 * example) before we map it.
1969 * We might want to use these objects for transposition via
1970 * vm_object_transpose() too, so we don't want any copy or
1971 * shadow objects either...
1973 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
1975 user_entry
->backing
.object
= object
;
1976 user_entry
->internal
= TRUE
;
1977 user_entry
->is_sub_map
= FALSE
;
1978 user_entry
->is_pager
= FALSE
;
1979 user_entry
->offset
= 0;
1980 user_entry
->protection
= protections
;
1981 SET_MAP_MEM(access
, user_entry
->protection
);
1982 user_entry
->size
= map_size
;
1984 /* user_object pager and internal fields are not used */
1985 /* when the object field is filled in. */
1987 *size
= CAST_DOWN(vm_size_t
, map_size
);
1988 *object_handle
= user_handle
;
1989 return KERN_SUCCESS
;
1992 if (parent_entry
== NULL
||
1993 (permission
& MAP_MEM_NAMED_REUSE
)) {
1995 /* Create a named object based on address range within the task map */
1996 /* Go find the object at given address */
1998 if (target_map
== VM_MAP_NULL
) {
1999 return KERN_INVALID_TASK
;
2003 protections
= original_protections
;
2004 vm_map_lock_read(target_map
);
2006 /* get the object associated with the target address */
2007 /* note we check the permission of the range against */
2008 /* that requested by the caller */
2010 kr
= vm_map_lookup_locked(&target_map
, map_offset
,
2011 protections
| mask_protections
,
2012 OBJECT_LOCK_EXCLUSIVE
, &version
,
2013 &object
, &obj_off
, &prot
, &wired
,
2016 if (kr
!= KERN_SUCCESS
) {
2017 vm_map_unlock_read(target_map
);
2020 if (mask_protections
) {
2022 * The caller asked us to use the "protections" as
2023 * a mask, so restrict "protections" to what this
2024 * mapping actually allows.
2026 protections
&= prot
;
2028 if (((prot
& protections
) != protections
)
2029 || (object
== kernel_object
)) {
2030 kr
= KERN_INVALID_RIGHT
;
2031 vm_object_unlock(object
);
2032 vm_map_unlock_read(target_map
);
2033 if(real_map
!= target_map
)
2034 vm_map_unlock_read(real_map
);
2035 if(object
== kernel_object
) {
2036 printf("Warning: Attempt to create a named"
2037 " entry from the kernel_object\n");
2042 /* We have an object, now check to see if this object */
2043 /* is suitable. If not, create a shadow and share that */
2046 * We have to unlock the VM object to avoid deadlocking with
2047 * a VM map lock (the lock ordering is map, the object), if we
2048 * need to modify the VM map to create a shadow object. Since
2049 * we might release the VM map lock below anyway, we have
2050 * to release the VM map lock now.
2051 * XXX FBDP There must be a way to avoid this double lookup...
2053 * Take an extra reference on the VM object to make sure it's
2054 * not going to disappear.
2056 vm_object_reference_locked(object
); /* extra ref to hold obj */
2057 vm_object_unlock(object
);
2059 local_map
= original_map
;
2060 local_offset
= map_offset
;
2061 if(target_map
!= local_map
) {
2062 vm_map_unlock_read(target_map
);
2063 if(real_map
!= target_map
)
2064 vm_map_unlock_read(real_map
);
2065 vm_map_lock_read(local_map
);
2066 target_map
= local_map
;
2067 real_map
= local_map
;
2070 if(!vm_map_lookup_entry(local_map
,
2071 local_offset
, &map_entry
)) {
2072 kr
= KERN_INVALID_ARGUMENT
;
2073 vm_map_unlock_read(target_map
);
2074 if(real_map
!= target_map
)
2075 vm_map_unlock_read(real_map
);
2076 vm_object_deallocate(object
); /* release extra ref */
2077 object
= VM_OBJECT_NULL
;
2080 if(!(map_entry
->is_sub_map
)) {
2081 if(map_entry
->object
.vm_object
!= object
) {
2082 kr
= KERN_INVALID_ARGUMENT
;
2083 vm_map_unlock_read(target_map
);
2084 if(real_map
!= target_map
)
2085 vm_map_unlock_read(real_map
);
2086 vm_object_deallocate(object
); /* release extra ref */
2087 object
= VM_OBJECT_NULL
;
2094 local_map
= map_entry
->object
.sub_map
;
2096 vm_map_lock_read(local_map
);
2097 vm_map_unlock_read(tmap
);
2098 target_map
= local_map
;
2099 real_map
= local_map
;
2100 local_offset
= local_offset
- map_entry
->vme_start
;
2101 local_offset
+= map_entry
->offset
;
2106 * We found the VM map entry, lock the VM object again.
2108 vm_object_lock(object
);
2109 if(map_entry
->wired_count
) {
2110 /* JMM - The check below should be reworked instead. */
2111 object
->true_share
= TRUE
;
2113 if (mask_protections
) {
2115 * The caller asked us to use the "protections" as
2116 * a mask, so restrict "protections" to what this
2117 * mapping actually allows.
2119 protections
&= map_entry
->max_protection
;
2121 if(((map_entry
->max_protection
) & protections
) != protections
) {
2122 kr
= KERN_INVALID_RIGHT
;
2123 vm_object_unlock(object
);
2124 vm_map_unlock_read(target_map
);
2125 if(real_map
!= target_map
)
2126 vm_map_unlock_read(real_map
);
2127 vm_object_deallocate(object
);
2128 object
= VM_OBJECT_NULL
;
2132 mappable_size
= fault_info
.hi_offset
- obj_off
;
2133 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2134 if(map_size
> mappable_size
) {
2135 /* try to extend mappable size if the entries */
2136 /* following are from the same object and are */
2138 next_entry
= map_entry
->vme_next
;
2139 /* lets see if the next map entry is still */
2140 /* pointing at this object and is contiguous */
2141 while(map_size
> mappable_size
) {
2142 if((next_entry
->object
.vm_object
== object
) &&
2143 (next_entry
->vme_start
==
2144 next_entry
->vme_prev
->vme_end
) &&
2145 (next_entry
->offset
==
2146 next_entry
->vme_prev
->offset
+
2147 (next_entry
->vme_prev
->vme_end
-
2148 next_entry
->vme_prev
->vme_start
))) {
2149 if (mask_protections
) {
2151 * The caller asked us to use
2152 * the "protections" as a mask,
2153 * so restrict "protections" to
2154 * what this mapping actually
2157 protections
&= next_entry
->max_protection
;
2159 if(((next_entry
->max_protection
)
2160 & protections
) != protections
) {
2163 if (next_entry
->needs_copy
!=
2164 map_entry
->needs_copy
)
2166 mappable_size
+= next_entry
->vme_end
2167 - next_entry
->vme_start
;
2168 total_size
+= next_entry
->vme_end
2169 - next_entry
->vme_start
;
2170 next_entry
= next_entry
->vme_next
;
2178 #if !CONFIG_EMBEDDED
2179 if (vm_map_entry_should_cow_for_true_share(map_entry
) &&
2180 object
->vo_size
> map_size
&&
2183 * Set up the targeted range for copy-on-write to
2184 * limit the impact of "true_share"/"copy_delay" to
2185 * that range instead of the entire VM object...
2188 vm_object_unlock(object
);
2189 if (vm_map_lock_read_to_write(target_map
)) {
2190 vm_object_deallocate(object
);
2191 target_map
= original_map
;
2195 vm_map_clip_start(target_map
, map_entry
, vm_map_trunc_page(offset
));
2196 vm_map_clip_end(target_map
, map_entry
, vm_map_round_page(offset
) + map_size
);
2197 force_shadow
= TRUE
;
2199 map_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2200 total_size
= map_size
;
2202 vm_map_lock_write_to_read(target_map
);
2203 vm_object_lock(object
);
2205 #endif /* !CONFIG_EMBEDDED */
2207 if(object
->internal
) {
2208 /* vm_map_lookup_locked will create a shadow if */
2209 /* needs_copy is set but does not check for the */
2210 /* other two conditions shown. It is important to */
2211 /* set up an object which will not be pulled from */
2215 ((map_entry
->needs_copy
||
2217 (object
->vo_size
> total_size
)) &&
2218 !object
->true_share
)) {
2220 * We have to unlock the VM object before
2221 * trying to upgrade the VM map lock, to
2222 * honor lock ordering (map then object).
2223 * Otherwise, we would deadlock if another
2224 * thread holds a read lock on the VM map and
2225 * is trying to acquire the VM object's lock.
2226 * We still hold an extra reference on the
2227 * VM object, guaranteeing that it won't
2230 vm_object_unlock(object
);
2232 if (vm_map_lock_read_to_write(target_map
)) {
2234 * We couldn't upgrade our VM map lock
2235 * from "read" to "write" and we lost
2237 * Start all over again...
2239 vm_object_deallocate(object
); /* extra ref */
2240 target_map
= original_map
;
2243 vm_object_lock(object
);
2246 * JMM - We need to avoid coming here when the object
2247 * is wired by anybody, not just the current map. Why
2248 * couldn't we use the standard vm_object_copy_quickly()
2252 /* create a shadow object */
2253 vm_object_shadow(&map_entry
->object
.vm_object
,
2254 &map_entry
->offset
, total_size
);
2255 shadow_object
= map_entry
->object
.vm_object
;
2256 vm_object_unlock(object
);
2258 prot
= map_entry
->protection
& ~VM_PROT_WRITE
;
2260 if (override_nx(target_map
, map_entry
->alias
) && prot
)
2261 prot
|= VM_PROT_EXECUTE
;
2263 vm_object_pmap_protect(
2264 object
, map_entry
->offset
,
2266 ((map_entry
->is_shared
2267 || target_map
->mapped
)
2270 map_entry
->vme_start
,
2272 total_size
-= (map_entry
->vme_end
2273 - map_entry
->vme_start
);
2274 next_entry
= map_entry
->vme_next
;
2275 map_entry
->needs_copy
= FALSE
;
2277 vm_object_lock(shadow_object
);
2278 while (total_size
) {
2279 if(next_entry
->object
.vm_object
== object
) {
2280 vm_object_reference_locked(shadow_object
);
2281 next_entry
->object
.vm_object
2283 vm_object_deallocate(object
);
2285 = next_entry
->vme_prev
->offset
+
2286 (next_entry
->vme_prev
->vme_end
2287 - next_entry
->vme_prev
->vme_start
);
2288 next_entry
->needs_copy
= FALSE
;
2290 panic("mach_make_memory_entry_64:"
2291 " map entries out of sync\n");
2295 - next_entry
->vme_start
;
2296 next_entry
= next_entry
->vme_next
;
2300 * Transfer our extra reference to the
2303 vm_object_reference_locked(shadow_object
);
2304 vm_object_deallocate(object
); /* extra ref */
2305 object
= shadow_object
;
2307 obj_off
= (local_offset
- map_entry
->vme_start
)
2308 + map_entry
->offset
;
2310 vm_map_lock_write_to_read(target_map
);
2314 /* note: in the future we can (if necessary) allow for */
2315 /* memory object lists, this will better support */
2316 /* fragmentation, but is it necessary? The user should */
2317 /* be encouraged to create address space oriented */
2318 /* shared objects from CLEAN memory regions which have */
2319 /* a known and defined history. i.e. no inheritence */
2320 /* share, make this call before making the region the */
2321 /* target of ipc's, etc. The code above, protecting */
2322 /* against delayed copy, etc. is mostly defensive. */
2324 wimg_mode
= object
->wimg_bits
;
2325 if(!(object
->nophyscache
)) {
2326 if(access
== MAP_MEM_IO
) {
2327 wimg_mode
= VM_WIMG_IO
;
2328 } else if (access
== MAP_MEM_COPYBACK
) {
2329 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2330 } else if (access
== MAP_MEM_WTHRU
) {
2331 wimg_mode
= VM_WIMG_WTHRU
;
2332 } else if (access
== MAP_MEM_WCOMB
) {
2333 wimg_mode
= VM_WIMG_WCOMB
;
2337 object
->true_share
= TRUE
;
2338 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2339 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2342 * The memory entry now points to this VM object and we
2343 * need to hold a reference on the VM object. Use the extra
2344 * reference we took earlier to keep the object alive when we
2348 vm_map_unlock_read(target_map
);
2349 if(real_map
!= target_map
)
2350 vm_map_unlock_read(real_map
);
2352 if (object
->wimg_bits
!= wimg_mode
)
2353 vm_object_change_wimg_mode(object
, wimg_mode
);
2355 /* the size of mapped entry that overlaps with our region */
2356 /* which is targeted for share. */
2357 /* (entry_end - entry_start) - */
2358 /* offset of our beg addr within entry */
2359 /* it corresponds to this: */
2361 if(map_size
> mappable_size
)
2362 map_size
= mappable_size
;
2364 if (permission
& MAP_MEM_NAMED_REUSE
) {
2366 * Compare what we got with the "parent_entry".
2367 * If they match, re-use the "parent_entry" instead
2368 * of creating a new one.
2370 if (parent_entry
!= NULL
&&
2371 parent_entry
->backing
.object
== object
&&
2372 parent_entry
->internal
== object
->internal
&&
2373 parent_entry
->is_sub_map
== FALSE
&&
2374 parent_entry
->is_pager
== FALSE
&&
2375 parent_entry
->offset
== obj_off
&&
2376 parent_entry
->protection
== protections
&&
2377 parent_entry
->size
== map_size
) {
2379 * We have a match: re-use "parent_entry".
2381 /* release our extra reference on object */
2382 vm_object_unlock(object
);
2383 vm_object_deallocate(object
);
2384 /* parent_entry->ref_count++; XXX ? */
2385 /* Get an extra send-right on handle */
2386 ipc_port_copy_send(parent_handle
);
2387 *object_handle
= parent_handle
;
2388 return KERN_SUCCESS
;
2391 * No match: we need to create a new entry.
2397 vm_object_unlock(object
);
2398 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2400 /* release our unused reference on the object */
2401 vm_object_deallocate(object
);
2402 return KERN_FAILURE
;
2405 user_entry
->backing
.object
= object
;
2406 user_entry
->internal
= object
->internal
;
2407 user_entry
->is_sub_map
= FALSE
;
2408 user_entry
->is_pager
= FALSE
;
2409 user_entry
->offset
= obj_off
;
2410 user_entry
->protection
= protections
;
2411 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
2412 user_entry
->size
= map_size
;
2414 /* user_object pager and internal fields are not used */
2415 /* when the object field is filled in. */
2417 *size
= CAST_DOWN(vm_size_t
, map_size
);
2418 *object_handle
= user_handle
;
2419 return KERN_SUCCESS
;
2422 /* The new object will be base on an existing named object */
2424 if (parent_entry
== NULL
) {
2425 kr
= KERN_INVALID_ARGUMENT
;
2428 if((offset
+ map_size
) > parent_entry
->size
) {
2429 kr
= KERN_INVALID_ARGUMENT
;
2433 if (mask_protections
) {
2435 * The caller asked us to use the "protections" as
2436 * a mask, so restrict "protections" to what this
2437 * mapping actually allows.
2439 protections
&= parent_entry
->protection
;
2441 if((protections
& parent_entry
->protection
) != protections
) {
2442 kr
= KERN_PROTECTION_FAILURE
;
2446 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2452 user_entry
->size
= map_size
;
2453 user_entry
->offset
= parent_entry
->offset
+ map_offset
;
2454 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2455 user_entry
->is_pager
= parent_entry
->is_pager
;
2456 user_entry
->internal
= parent_entry
->internal
;
2457 user_entry
->protection
= protections
;
2459 if(access
!= MAP_MEM_NOOP
) {
2460 SET_MAP_MEM(access
, user_entry
->protection
);
2463 if(parent_entry
->is_sub_map
) {
2464 user_entry
->backing
.map
= parent_entry
->backing
.map
;
2465 vm_map_lock(user_entry
->backing
.map
);
2466 user_entry
->backing
.map
->ref_count
++;
2467 vm_map_unlock(user_entry
->backing
.map
);
2469 else if (parent_entry
->is_pager
) {
2470 user_entry
->backing
.pager
= parent_entry
->backing
.pager
;
2471 /* JMM - don't we need a reference here? */
2473 object
= parent_entry
->backing
.object
;
2474 assert(object
!= VM_OBJECT_NULL
);
2475 user_entry
->backing
.object
= object
;
2476 /* we now point to this object, hold on */
2477 vm_object_reference(object
);
2478 vm_object_lock(object
);
2479 object
->true_share
= TRUE
;
2480 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2481 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2482 vm_object_unlock(object
);
2484 *size
= CAST_DOWN(vm_size_t
, map_size
);
2485 *object_handle
= user_handle
;
2486 return KERN_SUCCESS
;
2490 if (user_handle
!= IP_NULL
) {
2492 * Releasing "user_handle" causes the kernel object
2493 * associated with it ("user_entry" here) to also be
2494 * released and freed.
2496 mach_memory_entry_port_release(user_handle
);
2502 _mach_make_memory_entry(
2503 vm_map_t target_map
,
2504 memory_object_size_t
*size
,
2505 memory_object_offset_t offset
,
2506 vm_prot_t permission
,
2507 ipc_port_t
*object_handle
,
2508 ipc_port_t parent_entry
)
2510 memory_object_size_t mo_size
;
2513 mo_size
= (memory_object_size_t
)*size
;
2514 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2515 (memory_object_offset_t
)offset
, permission
, object_handle
,
2522 mach_make_memory_entry(
2523 vm_map_t target_map
,
2526 vm_prot_t permission
,
2527 ipc_port_t
*object_handle
,
2528 ipc_port_t parent_entry
)
2530 memory_object_size_t mo_size
;
2533 mo_size
= (memory_object_size_t
)*size
;
2534 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2535 (memory_object_offset_t
)offset
, permission
, object_handle
,
2537 *size
= CAST_DOWN(vm_size_t
, mo_size
);
2544 * Set or clear the map's wiring_required flag. This flag, if set,
2545 * will cause all future virtual memory allocation to allocate
2546 * user wired memory. Unwiring pages wired down as a result of
2547 * this routine is done with the vm_wire interface.
2552 boolean_t must_wire
)
2554 if (map
== VM_MAP_NULL
)
2555 return(KERN_INVALID_ARGUMENT
);
2558 map
->wiring_required
= TRUE
;
2560 map
->wiring_required
= FALSE
;
2562 return(KERN_SUCCESS
);
2565 __private_extern__ kern_return_t
2566 mach_memory_entry_allocate(
2567 vm_named_entry_t
*user_entry_p
,
2568 ipc_port_t
*user_handle_p
)
2570 vm_named_entry_t user_entry
;
2571 ipc_port_t user_handle
;
2572 ipc_port_t previous
;
2574 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
2575 if (user_entry
== NULL
)
2576 return KERN_FAILURE
;
2578 named_entry_lock_init(user_entry
);
2580 user_handle
= ipc_port_alloc_kernel();
2581 if (user_handle
== IP_NULL
) {
2582 kfree(user_entry
, sizeof *user_entry
);
2583 return KERN_FAILURE
;
2585 ip_lock(user_handle
);
2587 /* make a sonce right */
2588 user_handle
->ip_sorights
++;
2589 ip_reference(user_handle
);
2591 user_handle
->ip_destination
= IP_NULL
;
2592 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
2593 user_handle
->ip_receiver
= ipc_space_kernel
;
2595 /* make a send right */
2596 user_handle
->ip_mscount
++;
2597 user_handle
->ip_srights
++;
2598 ip_reference(user_handle
);
2600 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
2601 /* nsrequest unlocks user_handle */
2603 user_entry
->backing
.pager
= NULL
;
2604 user_entry
->is_sub_map
= FALSE
;
2605 user_entry
->is_pager
= FALSE
;
2606 user_entry
->internal
= FALSE
;
2607 user_entry
->size
= 0;
2608 user_entry
->offset
= 0;
2609 user_entry
->protection
= VM_PROT_NONE
;
2610 user_entry
->ref_count
= 1;
2612 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
2615 *user_entry_p
= user_entry
;
2616 *user_handle_p
= user_handle
;
2618 return KERN_SUCCESS
;
2622 * mach_memory_object_memory_entry_64
2624 * Create a named entry backed by the provided pager.
2626 * JMM - we need to hold a reference on the pager -
2627 * and release it when the named entry is destroyed.
2630 mach_memory_object_memory_entry_64(
2633 vm_object_offset_t size
,
2634 vm_prot_t permission
,
2635 memory_object_t pager
,
2636 ipc_port_t
*entry_handle
)
2638 unsigned int access
;
2639 vm_named_entry_t user_entry
;
2640 ipc_port_t user_handle
;
2642 if (host
== HOST_NULL
)
2643 return(KERN_INVALID_HOST
);
2645 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2647 return KERN_FAILURE
;
2650 user_entry
->backing
.pager
= pager
;
2651 user_entry
->size
= size
;
2652 user_entry
->offset
= 0;
2653 user_entry
->protection
= permission
& VM_PROT_ALL
;
2654 access
= GET_MAP_MEM(permission
);
2655 SET_MAP_MEM(access
, user_entry
->protection
);
2656 user_entry
->internal
= internal
;
2657 user_entry
->is_sub_map
= FALSE
;
2658 user_entry
->is_pager
= TRUE
;
2659 assert(user_entry
->ref_count
== 1);
2661 *entry_handle
= user_handle
;
2662 return KERN_SUCCESS
;
2666 mach_memory_object_memory_entry(
2670 vm_prot_t permission
,
2671 memory_object_t pager
,
2672 ipc_port_t
*entry_handle
)
2674 return mach_memory_object_memory_entry_64( host
, internal
,
2675 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
2680 mach_memory_entry_purgable_control(
2681 ipc_port_t entry_port
,
2682 vm_purgable_t control
,
2686 vm_named_entry_t mem_entry
;
2689 if (entry_port
== IP_NULL
||
2690 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
2691 return KERN_INVALID_ARGUMENT
;
2693 if (control
!= VM_PURGABLE_SET_STATE
&&
2694 control
!= VM_PURGABLE_GET_STATE
)
2695 return(KERN_INVALID_ARGUMENT
);
2697 if (control
== VM_PURGABLE_SET_STATE
&&
2698 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
2699 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
)))
2700 return(KERN_INVALID_ARGUMENT
);
2702 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
2704 named_entry_lock(mem_entry
);
2706 if (mem_entry
->is_sub_map
|| mem_entry
->is_pager
) {
2707 named_entry_unlock(mem_entry
);
2708 return KERN_INVALID_ARGUMENT
;
2711 object
= mem_entry
->backing
.object
;
2712 if (object
== VM_OBJECT_NULL
) {
2713 named_entry_unlock(mem_entry
);
2714 return KERN_INVALID_ARGUMENT
;
2717 vm_object_lock(object
);
2719 /* check that named entry covers entire object ? */
2720 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
2721 vm_object_unlock(object
);
2722 named_entry_unlock(mem_entry
);
2723 return KERN_INVALID_ARGUMENT
;
2726 named_entry_unlock(mem_entry
);
2728 kr
= vm_object_purgable_control(object
, control
, state
);
2730 vm_object_unlock(object
);
2736 * mach_memory_entry_port_release:
2738 * Release a send right on a named entry port. This is the correct
2739 * way to destroy a named entry. When the last right on the port is
2740 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2743 mach_memory_entry_port_release(
2746 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2747 ipc_port_release_send(port
);
2751 * mach_destroy_memory_entry:
2753 * Drops a reference on a memory entry and destroys the memory entry if
2754 * there are no more references on it.
2755 * NOTE: This routine should not be called to destroy a memory entry from the
2756 * kernel, as it will not release the Mach port associated with the memory
2757 * entry. The proper way to destroy a memory entry in the kernel is to
2758 * call mach_memort_entry_port_release() to release the kernel's send-right on
2759 * the memory entry's port. When the last send right is released, the memory
2760 * entry will be destroyed via ipc_kobject_destroy().
2763 mach_destroy_memory_entry(
2766 vm_named_entry_t named_entry
;
2768 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2769 #endif /* MACH_ASSERT */
2770 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
2771 lck_mtx_lock(&(named_entry
)->Lock
);
2772 named_entry
->ref_count
-= 1;
2773 if(named_entry
->ref_count
== 0) {
2774 if (named_entry
->is_sub_map
) {
2775 vm_map_deallocate(named_entry
->backing
.map
);
2776 } else if (!named_entry
->is_pager
) {
2777 /* release the memory object we've been pointing to */
2778 vm_object_deallocate(named_entry
->backing
.object
);
2779 } /* else JMM - need to drop reference on pager in that case */
2781 lck_mtx_unlock(&(named_entry
)->Lock
);
2783 kfree((void *) port
->ip_kobject
,
2784 sizeof (struct vm_named_entry
));
2786 lck_mtx_unlock(&(named_entry
)->Lock
);
2789 /* Allow manipulation of individual page state. This is actually part of */
2790 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
2793 mach_memory_entry_page_op(
2794 ipc_port_t entry_port
,
2795 vm_object_offset_t offset
,
2797 ppnum_t
*phys_entry
,
2800 vm_named_entry_t mem_entry
;
2804 if (entry_port
== IP_NULL
||
2805 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
2806 return KERN_INVALID_ARGUMENT
;
2809 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
2811 named_entry_lock(mem_entry
);
2813 if (mem_entry
->is_sub_map
|| mem_entry
->is_pager
) {
2814 named_entry_unlock(mem_entry
);
2815 return KERN_INVALID_ARGUMENT
;
2818 object
= mem_entry
->backing
.object
;
2819 if (object
== VM_OBJECT_NULL
) {
2820 named_entry_unlock(mem_entry
);
2821 return KERN_INVALID_ARGUMENT
;
2824 vm_object_reference(object
);
2825 named_entry_unlock(mem_entry
);
2827 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
2829 vm_object_deallocate(object
);
2835 * mach_memory_entry_range_op offers performance enhancement over
2836 * mach_memory_entry_page_op for page_op functions which do not require page
2837 * level state to be returned from the call. Page_op was created to provide
2838 * a low-cost alternative to page manipulation via UPLs when only a single
2839 * page was involved. The range_op call establishes the ability in the _op
2840 * family of functions to work on multiple pages where the lack of page level
2841 * state handling allows the caller to avoid the overhead of the upl structures.
2845 mach_memory_entry_range_op(
2846 ipc_port_t entry_port
,
2847 vm_object_offset_t offset_beg
,
2848 vm_object_offset_t offset_end
,
2852 vm_named_entry_t mem_entry
;
2856 if (entry_port
== IP_NULL
||
2857 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
2858 return KERN_INVALID_ARGUMENT
;
2861 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
2863 named_entry_lock(mem_entry
);
2865 if (mem_entry
->is_sub_map
|| mem_entry
->is_pager
) {
2866 named_entry_unlock(mem_entry
);
2867 return KERN_INVALID_ARGUMENT
;
2870 object
= mem_entry
->backing
.object
;
2871 if (object
== VM_OBJECT_NULL
) {
2872 named_entry_unlock(mem_entry
);
2873 return KERN_INVALID_ARGUMENT
;
2876 vm_object_reference(object
);
2877 named_entry_unlock(mem_entry
);
2879 kr
= vm_object_range_op(object
,
2883 (uint32_t *) range
);
2885 vm_object_deallocate(object
);
2892 set_dp_control_port(
2893 host_priv_t host_priv
,
2894 ipc_port_t control_port
)
2896 if (host_priv
== HOST_PRIV_NULL
)
2897 return (KERN_INVALID_HOST
);
2899 if (IP_VALID(dynamic_pager_control_port
))
2900 ipc_port_release_send(dynamic_pager_control_port
);
2902 dynamic_pager_control_port
= control_port
;
2903 return KERN_SUCCESS
;
2907 get_dp_control_port(
2908 host_priv_t host_priv
,
2909 ipc_port_t
*control_port
)
2911 if (host_priv
== HOST_PRIV_NULL
)
2912 return (KERN_INVALID_HOST
);
2914 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
2915 return KERN_SUCCESS
;
2919 /* ******* Temporary Internal calls to UPL for BSD ***** */
2921 extern int kernel_upl_map(
2924 vm_offset_t
*dst_addr
);
2926 extern int kernel_upl_unmap(
2930 extern int kernel_upl_commit(
2932 upl_page_info_t
*pl
,
2933 mach_msg_type_number_t count
);
2935 extern int kernel_upl_commit_range(
2937 upl_offset_t offset
,
2940 upl_page_info_array_t pl
,
2941 mach_msg_type_number_t count
);
2943 extern int kernel_upl_abort(
2947 extern int kernel_upl_abort_range(
2949 upl_offset_t offset
,
2958 vm_offset_t
*dst_addr
)
2960 return vm_upl_map(map
, upl
, dst_addr
);
2969 return vm_upl_unmap(map
, upl
);
2975 upl_page_info_t
*pl
,
2976 mach_msg_type_number_t count
)
2980 kr
= upl_commit(upl
, pl
, count
);
2981 upl_deallocate(upl
);
2987 kernel_upl_commit_range(
2989 upl_offset_t offset
,
2992 upl_page_info_array_t pl
,
2993 mach_msg_type_number_t count
)
2995 boolean_t finished
= FALSE
;
2998 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2999 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3001 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
3002 return KERN_INVALID_ARGUMENT
;
3005 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
3007 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
3008 upl_deallocate(upl
);
3014 kernel_upl_abort_range(
3016 upl_offset_t offset
,
3021 boolean_t finished
= FALSE
;
3023 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3024 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3026 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3028 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3029 upl_deallocate(upl
);
3041 kr
= upl_abort(upl
, abort_type
);
3042 upl_deallocate(upl
);
3047 * Now a kernel-private interface (for BootCache
3048 * use only). Need a cleaner way to create an
3049 * empty vm_map() and return a handle to it.
3053 vm_region_object_create(
3054 __unused vm_map_t target_map
,
3056 ipc_port_t
*object_handle
)
3058 vm_named_entry_t user_entry
;
3059 ipc_port_t user_handle
;
3063 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3065 return KERN_FAILURE
;
3068 /* Create a named object based on a submap of specified size */
3070 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3071 vm_map_round_page(size
), TRUE
);
3073 user_entry
->backing
.map
= new_map
;
3074 user_entry
->internal
= TRUE
;
3075 user_entry
->is_sub_map
= TRUE
;
3076 user_entry
->offset
= 0;
3077 user_entry
->protection
= VM_PROT_ALL
;
3078 user_entry
->size
= size
;
3079 assert(user_entry
->ref_count
== 1);
3081 *object_handle
= user_handle
;
3082 return KERN_SUCCESS
;
3086 ppnum_t
vm_map_get_phys_page( /* forward */
3088 vm_offset_t offset
);
3091 vm_map_get_phys_page(
3095 vm_object_offset_t offset
;
3097 vm_map_offset_t map_offset
;
3098 vm_map_entry_t entry
;
3099 ppnum_t phys_page
= 0;
3101 map_offset
= vm_map_trunc_page(addr
);
3104 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3106 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
3110 if (entry
->is_sub_map
) {
3112 vm_map_lock(entry
->object
.sub_map
);
3114 map
= entry
->object
.sub_map
;
3115 map_offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3116 vm_map_unlock(old_map
);
3119 if (entry
->object
.vm_object
->phys_contiguous
) {
3120 /* These are not standard pageable memory mappings */
3121 /* If they are not present in the object they will */
3122 /* have to be picked up from the pager through the */
3123 /* fault mechanism. */
3124 if(entry
->object
.vm_object
->vo_shadow_offset
== 0) {
3125 /* need to call vm_fault */
3127 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3128 FALSE
, THREAD_UNINT
, NULL
, 0);
3132 offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3133 phys_page
= (ppnum_t
)
3134 ((entry
->object
.vm_object
->vo_shadow_offset
3139 offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3140 object
= entry
->object
.vm_object
;
3141 vm_object_lock(object
);
3143 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3144 if(dst_page
== VM_PAGE_NULL
) {
3145 if(object
->shadow
) {
3146 vm_object_t old_object
;
3147 vm_object_lock(object
->shadow
);
3148 old_object
= object
;
3149 offset
= offset
+ object
->vo_shadow_offset
;
3150 object
= object
->shadow
;
3151 vm_object_unlock(old_object
);
3153 vm_object_unlock(object
);
3157 phys_page
= (ppnum_t
)(dst_page
->phys_page
);
3158 vm_object_unlock(object
);
3172 kern_return_t
kernel_object_iopl_request( /* forward */
3173 vm_named_entry_t named_entry
,
3174 memory_object_offset_t offset
,
3175 upl_size_t
*upl_size
,
3177 upl_page_info_array_t user_page_list
,
3178 unsigned int *page_list_count
,
3182 kernel_object_iopl_request(
3183 vm_named_entry_t named_entry
,
3184 memory_object_offset_t offset
,
3185 upl_size_t
*upl_size
,
3187 upl_page_info_array_t user_page_list
,
3188 unsigned int *page_list_count
,
3196 caller_flags
= *flags
;
3198 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3200 * For forward compatibility's sake,
3201 * reject any unknown flag.
3203 return KERN_INVALID_VALUE
;
3206 /* a few checks to make sure user is obeying rules */
3207 if(*upl_size
== 0) {
3208 if(offset
>= named_entry
->size
)
3209 return(KERN_INVALID_RIGHT
);
3210 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
3211 if (*upl_size
!= named_entry
->size
- offset
)
3212 return KERN_INVALID_ARGUMENT
;
3214 if(caller_flags
& UPL_COPYOUT_FROM
) {
3215 if((named_entry
->protection
& VM_PROT_READ
)
3217 return(KERN_INVALID_RIGHT
);
3220 if((named_entry
->protection
&
3221 (VM_PROT_READ
| VM_PROT_WRITE
))
3222 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
3223 return(KERN_INVALID_RIGHT
);
3226 if(named_entry
->size
< (offset
+ *upl_size
))
3227 return(KERN_INVALID_ARGUMENT
);
3229 /* the callers parameter offset is defined to be the */
3230 /* offset from beginning of named entry offset in object */
3231 offset
= offset
+ named_entry
->offset
;
3233 if(named_entry
->is_sub_map
)
3234 return (KERN_INVALID_ARGUMENT
);
3236 named_entry_lock(named_entry
);
3238 if (named_entry
->is_pager
) {
3239 object
= vm_object_enter(named_entry
->backing
.pager
,
3240 named_entry
->offset
+ named_entry
->size
,
3241 named_entry
->internal
,
3244 if (object
== VM_OBJECT_NULL
) {
3245 named_entry_unlock(named_entry
);
3246 return(KERN_INVALID_OBJECT
);
3249 /* JMM - drop reference on the pager here? */
3251 /* create an extra reference for the object */
3252 vm_object_lock(object
);
3253 vm_object_reference_locked(object
);
3254 named_entry
->backing
.object
= object
;
3255 named_entry
->is_pager
= FALSE
;
3256 named_entry_unlock(named_entry
);
3258 /* wait for object (if any) to be ready */
3259 if (!named_entry
->internal
) {
3260 while (!object
->pager_ready
) {
3261 vm_object_wait(object
,
3262 VM_OBJECT_EVENT_PAGER_READY
,
3264 vm_object_lock(object
);
3267 vm_object_unlock(object
);
3270 /* This is the case where we are going to operate */
3271 /* an an already known object. If the object is */
3272 /* not ready it is internal. An external */
3273 /* object cannot be mapped until it is ready */
3274 /* we can therefore avoid the ready check */
3276 object
= named_entry
->backing
.object
;
3277 vm_object_reference(object
);
3278 named_entry_unlock(named_entry
);
3281 if (!object
->private) {
3282 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
3283 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
3284 if (object
->phys_contiguous
) {
3285 *flags
= UPL_PHYS_CONTIG
;
3290 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
3293 ret
= vm_object_iopl_request(object
,
3300 vm_object_deallocate(object
);