2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
102 #include <mach/host_priv_server.h>
103 #include <mach/mach_vm_server.h>
104 #include <mach/vm_map_server.h>
106 #include <kern/host.h>
107 #include <kern/kalloc.h>
108 #include <kern/task.h>
109 #include <kern/misc_protos.h>
110 #include <vm/vm_fault.h>
111 #include <vm/vm_map.h>
112 #include <vm/vm_object.h>
113 #include <vm/vm_page.h>
114 #include <vm/memory_object.h>
115 #include <vm/vm_pageout.h>
116 #include <vm/vm_protos.h>
117 #include <vm/vm_purgeable_internal.h>
119 vm_size_t upl_offset_to_pagelist
= 0;
125 ipc_port_t dynamic_pager_control_port
=NULL
;
128 * mach_vm_allocate allocates "zero fill" memory in the specfied
134 mach_vm_offset_t
*addr
,
138 vm_map_offset_t map_addr
;
139 vm_map_size_t map_size
;
140 kern_return_t result
;
143 /* filter out any kernel-only flags */
144 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
145 return KERN_INVALID_ARGUMENT
;
147 if (map
== VM_MAP_NULL
)
148 return(KERN_INVALID_ARGUMENT
);
151 return(KERN_SUCCESS
);
154 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
157 * No specific address requested, so start candidate address
158 * search at the minimum address in the map. However, if that
159 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
160 * allocations of PAGEZERO to explicit requests since its
161 * normal use is to catch dereferences of NULL and many
162 * applications also treat pointers with a value of 0 as
163 * special and suddenly having address 0 contain useable
164 * memory would tend to confuse those applications.
166 map_addr
= vm_map_min(map
);
168 map_addr
+= VM_MAP_PAGE_SIZE(map
);
170 map_addr
= vm_map_trunc_page(*addr
,
171 VM_MAP_PAGE_MASK(map
));
172 map_size
= vm_map_round_page(size
,
173 VM_MAP_PAGE_MASK(map
));
175 return(KERN_INVALID_ARGUMENT
);
178 result
= vm_map_enter(
185 (vm_object_offset_t
)0,
197 * Legacy routine that allocates "zero fill" memory in the specfied
198 * map (which is limited to the same size as the kernel).
207 vm_map_offset_t map_addr
;
208 vm_map_size_t map_size
;
209 kern_return_t result
;
212 /* filter out any kernel-only flags */
213 if (flags
& ~VM_FLAGS_USER_ALLOCATE
)
214 return KERN_INVALID_ARGUMENT
;
216 if (map
== VM_MAP_NULL
)
217 return(KERN_INVALID_ARGUMENT
);
220 return(KERN_SUCCESS
);
223 anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
226 * No specific address requested, so start candidate address
227 * search at the minimum address in the map. However, if that
228 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
229 * allocations of PAGEZERO to explicit requests since its
230 * normal use is to catch dereferences of NULL and many
231 * applications also treat pointers with a value of 0 as
232 * special and suddenly having address 0 contain useable
233 * memory would tend to confuse those applications.
235 map_addr
= vm_map_min(map
);
237 map_addr
+= VM_MAP_PAGE_SIZE(map
);
239 map_addr
= vm_map_trunc_page(*addr
,
240 VM_MAP_PAGE_MASK(map
));
241 map_size
= vm_map_round_page(size
,
242 VM_MAP_PAGE_MASK(map
));
244 return(KERN_INVALID_ARGUMENT
);
247 result
= vm_map_enter(
254 (vm_object_offset_t
)0,
260 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
265 * mach_vm_deallocate -
266 * deallocates the specified range of addresses in the
267 * specified address map.
272 mach_vm_offset_t start
,
275 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
276 return(KERN_INVALID_ARGUMENT
);
278 if (size
== (mach_vm_offset_t
) 0)
279 return(KERN_SUCCESS
);
281 return(vm_map_remove(map
,
282 vm_map_trunc_page(start
,
283 VM_MAP_PAGE_MASK(map
)),
284 vm_map_round_page(start
+size
,
285 VM_MAP_PAGE_MASK(map
)),
291 * deallocates the specified range of addresses in the
292 * specified address map (limited to addresses the same
293 * size as the kernel).
297 register vm_map_t map
,
301 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
302 return(KERN_INVALID_ARGUMENT
);
304 if (size
== (vm_offset_t
) 0)
305 return(KERN_SUCCESS
);
307 return(vm_map_remove(map
,
308 vm_map_trunc_page(start
,
309 VM_MAP_PAGE_MASK(map
)),
310 vm_map_round_page(start
+size
,
311 VM_MAP_PAGE_MASK(map
)),
317 * Sets the inheritance of the specified range in the
323 mach_vm_offset_t start
,
325 vm_inherit_t new_inheritance
)
327 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
328 (new_inheritance
> VM_INHERIT_LAST_VALID
))
329 return(KERN_INVALID_ARGUMENT
);
334 return(vm_map_inherit(map
,
335 vm_map_trunc_page(start
,
336 VM_MAP_PAGE_MASK(map
)),
337 vm_map_round_page(start
+size
,
338 VM_MAP_PAGE_MASK(map
)),
344 * Sets the inheritance of the specified range in the
345 * specified map (range limited to addresses
349 register vm_map_t map
,
352 vm_inherit_t new_inheritance
)
354 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
355 (new_inheritance
> VM_INHERIT_LAST_VALID
))
356 return(KERN_INVALID_ARGUMENT
);
361 return(vm_map_inherit(map
,
362 vm_map_trunc_page(start
,
363 VM_MAP_PAGE_MASK(map
)),
364 vm_map_round_page(start
+size
,
365 VM_MAP_PAGE_MASK(map
)),
371 * Sets the protection of the specified range in the
378 mach_vm_offset_t start
,
380 boolean_t set_maximum
,
381 vm_prot_t new_protection
)
383 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
384 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
385 return(KERN_INVALID_ARGUMENT
);
390 return(vm_map_protect(map
,
391 vm_map_trunc_page(start
,
392 VM_MAP_PAGE_MASK(map
)),
393 vm_map_round_page(start
+size
,
394 VM_MAP_PAGE_MASK(map
)),
401 * Sets the protection of the specified range in the
402 * specified map. Addressability of the range limited
403 * to the same size as the kernel.
411 boolean_t set_maximum
,
412 vm_prot_t new_protection
)
414 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
415 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
416 return(KERN_INVALID_ARGUMENT
);
421 return(vm_map_protect(map
,
422 vm_map_trunc_page(start
,
423 VM_MAP_PAGE_MASK(map
)),
424 vm_map_round_page(start
+size
,
425 VM_MAP_PAGE_MASK(map
)),
431 * mach_vm_machine_attributes -
432 * Handle machine-specific attributes for a mapping, such
433 * as cachability, migrability, etc.
436 mach_vm_machine_attribute(
438 mach_vm_address_t addr
,
440 vm_machine_attribute_t attribute
,
441 vm_machine_attribute_val_t
* value
) /* IN/OUT */
443 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
444 return(KERN_INVALID_ARGUMENT
);
449 return vm_map_machine_attribute(
451 vm_map_trunc_page(addr
,
452 VM_MAP_PAGE_MASK(map
)),
453 vm_map_round_page(addr
+size
,
454 VM_MAP_PAGE_MASK(map
)),
460 * vm_machine_attribute -
461 * Handle machine-specific attributes for a mapping, such
462 * as cachability, migrability, etc. Limited addressability
463 * (same range limits as for the native kernel map).
466 vm_machine_attribute(
470 vm_machine_attribute_t attribute
,
471 vm_machine_attribute_val_t
* value
) /* IN/OUT */
473 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
474 return(KERN_INVALID_ARGUMENT
);
479 return vm_map_machine_attribute(
481 vm_map_trunc_page(addr
,
482 VM_MAP_PAGE_MASK(map
)),
483 vm_map_round_page(addr
+size
,
484 VM_MAP_PAGE_MASK(map
)),
491 * Read/copy a range from one address space and return it to the caller.
493 * It is assumed that the address for the returned memory is selected by
494 * the IPC implementation as part of receiving the reply to this call.
495 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
496 * that gets returned.
498 * JMM - because of mach_msg_type_number_t, this call is limited to a
499 * single 4GB region at this time.
505 mach_vm_address_t addr
,
508 mach_msg_type_number_t
*data_size
)
511 vm_map_copy_t ipc_address
;
513 if (map
== VM_MAP_NULL
)
514 return(KERN_INVALID_ARGUMENT
);
516 if ((mach_msg_type_number_t
) size
!= size
)
517 return KERN_INVALID_ARGUMENT
;
519 error
= vm_map_copyin(map
,
520 (vm_map_address_t
)addr
,
522 FALSE
, /* src_destroy */
525 if (KERN_SUCCESS
== error
) {
526 *data
= (pointer_t
) ipc_address
;
527 *data_size
= (mach_msg_type_number_t
) size
;
528 assert(*data_size
== size
);
535 * Read/copy a range from one address space and return it to the caller.
536 * Limited addressability (same range limits as for the native kernel map).
538 * It is assumed that the address for the returned memory is selected by
539 * the IPC implementation as part of receiving the reply to this call.
540 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
541 * that gets returned.
549 mach_msg_type_number_t
*data_size
)
552 vm_map_copy_t ipc_address
;
554 if (map
== VM_MAP_NULL
)
555 return(KERN_INVALID_ARGUMENT
);
557 if (size
> (unsigned)(mach_msg_type_number_t
) -1) {
559 * The kernel could handle a 64-bit "size" value, but
560 * it could not return the size of the data in "*data_size"
561 * without overflowing.
562 * Let's reject this "size" as invalid.
564 return KERN_INVALID_ARGUMENT
;
567 error
= vm_map_copyin(map
,
568 (vm_map_address_t
)addr
,
570 FALSE
, /* src_destroy */
573 if (KERN_SUCCESS
== error
) {
574 *data
= (pointer_t
) ipc_address
;
575 *data_size
= (mach_msg_type_number_t
) size
;
576 assert(*data_size
== size
);
582 * mach_vm_read_list -
583 * Read/copy a list of address ranges from specified map.
585 * MIG does not know how to deal with a returned array of
586 * vm_map_copy_t structures, so we have to do the copyout
592 mach_vm_read_entry_t data_list
,
595 mach_msg_type_number_t i
;
599 if (map
== VM_MAP_NULL
||
600 count
> VM_MAP_ENTRY_MAX
)
601 return(KERN_INVALID_ARGUMENT
);
603 error
= KERN_SUCCESS
;
604 for(i
=0; i
<count
; i
++) {
605 vm_map_address_t map_addr
;
606 vm_map_size_t map_size
;
608 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
609 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
612 error
= vm_map_copyin(map
,
615 FALSE
, /* src_destroy */
617 if (KERN_SUCCESS
== error
) {
618 error
= vm_map_copyout(
622 if (KERN_SUCCESS
== error
) {
623 data_list
[i
].address
= map_addr
;
626 vm_map_copy_discard(copy
);
629 data_list
[i
].address
= (mach_vm_address_t
)0;
630 data_list
[i
].size
= (mach_vm_size_t
)0;
637 * Read/copy a list of address ranges from specified map.
639 * MIG does not know how to deal with a returned array of
640 * vm_map_copy_t structures, so we have to do the copyout
643 * The source and destination ranges are limited to those
644 * that can be described with a vm_address_t (i.e. same
645 * size map as the kernel).
647 * JMM - If the result of the copyout is an address range
648 * that cannot be described with a vm_address_t (i.e. the
649 * caller had a larger address space but used this call
650 * anyway), it will result in a truncated address being
651 * returned (and a likely confused caller).
657 vm_read_entry_t data_list
,
660 mach_msg_type_number_t i
;
664 if (map
== VM_MAP_NULL
||
665 count
> VM_MAP_ENTRY_MAX
)
666 return(KERN_INVALID_ARGUMENT
);
668 error
= KERN_SUCCESS
;
669 for(i
=0; i
<count
; i
++) {
670 vm_map_address_t map_addr
;
671 vm_map_size_t map_size
;
673 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
674 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
677 error
= vm_map_copyin(map
,
680 FALSE
, /* src_destroy */
682 if (KERN_SUCCESS
== error
) {
683 error
= vm_map_copyout(current_task()->map
,
686 if (KERN_SUCCESS
== error
) {
687 data_list
[i
].address
=
688 CAST_DOWN(vm_offset_t
, map_addr
);
691 vm_map_copy_discard(copy
);
694 data_list
[i
].address
= (mach_vm_address_t
)0;
695 data_list
[i
].size
= (mach_vm_size_t
)0;
701 * mach_vm_read_overwrite -
702 * Overwrite a range of the current map with data from the specified
705 * In making an assumption that the current thread is local, it is
706 * no longer cluster-safe without a fully supportive local proxy
707 * thread/task (but we don't support cluster's anymore so this is moot).
711 mach_vm_read_overwrite(
713 mach_vm_address_t address
,
715 mach_vm_address_t data
,
716 mach_vm_size_t
*data_size
)
721 if (map
== VM_MAP_NULL
)
722 return(KERN_INVALID_ARGUMENT
);
724 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
725 (vm_map_size_t
)size
, FALSE
, ©
);
727 if (KERN_SUCCESS
== error
) {
728 error
= vm_map_copy_overwrite(current_thread()->map
,
729 (vm_map_address_t
)data
,
731 if (KERN_SUCCESS
== error
) {
735 vm_map_copy_discard(copy
);
741 * vm_read_overwrite -
742 * Overwrite a range of the current map with data from the specified
745 * This routine adds the additional limitation that the source and
746 * destination ranges must be describable with vm_address_t values
747 * (i.e. the same size address spaces as the kernel, or at least the
748 * the ranges are in that first portion of the respective address
755 vm_address_t address
,
758 vm_size_t
*data_size
)
763 if (map
== VM_MAP_NULL
)
764 return(KERN_INVALID_ARGUMENT
);
766 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
767 (vm_map_size_t
)size
, FALSE
, ©
);
769 if (KERN_SUCCESS
== error
) {
770 error
= vm_map_copy_overwrite(current_thread()->map
,
771 (vm_map_address_t
)data
,
773 if (KERN_SUCCESS
== error
) {
777 vm_map_copy_discard(copy
);
785 * Overwrite the specified address range with the data provided
786 * (from the current map).
791 mach_vm_address_t address
,
793 __unused mach_msg_type_number_t size
)
795 if (map
== VM_MAP_NULL
)
796 return KERN_INVALID_ARGUMENT
;
798 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
799 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
804 * Overwrite the specified address range with the data provided
805 * (from the current map).
807 * The addressability of the range of addresses to overwrite is
808 * limited bu the use of a vm_address_t (same size as kernel map).
809 * Either the target map is also small, or the range is in the
810 * low addresses within it.
815 vm_address_t address
,
817 __unused mach_msg_type_number_t size
)
819 if (map
== VM_MAP_NULL
)
820 return KERN_INVALID_ARGUMENT
;
822 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
823 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
828 * Overwrite one range of the specified map with the contents of
829 * another range within that same map (i.e. both address ranges
835 mach_vm_address_t source_address
,
837 mach_vm_address_t dest_address
)
842 if (map
== VM_MAP_NULL
)
843 return KERN_INVALID_ARGUMENT
;
845 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
846 (vm_map_size_t
)size
, FALSE
, ©
);
848 if (KERN_SUCCESS
== kr
) {
849 kr
= vm_map_copy_overwrite(map
,
850 (vm_map_address_t
)dest_address
,
851 copy
, FALSE
/* interruptible XXX */);
853 if (KERN_SUCCESS
!= kr
)
854 vm_map_copy_discard(copy
);
862 vm_address_t source_address
,
864 vm_address_t dest_address
)
869 if (map
== VM_MAP_NULL
)
870 return KERN_INVALID_ARGUMENT
;
872 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
873 (vm_map_size_t
)size
, FALSE
, ©
);
875 if (KERN_SUCCESS
== kr
) {
876 kr
= vm_map_copy_overwrite(map
,
877 (vm_map_address_t
)dest_address
,
878 copy
, FALSE
/* interruptible XXX */);
880 if (KERN_SUCCESS
!= kr
)
881 vm_map_copy_discard(copy
);
888 * Map some range of an object into an address space.
890 * The object can be one of several types of objects:
891 * NULL - anonymous memory
892 * a named entry - a range within another address space
893 * or a range within a memory object
894 * a whole memory object
900 mach_vm_offset_t
*address
,
901 mach_vm_size_t initial_size
,
902 mach_vm_offset_t mask
,
905 vm_object_offset_t offset
,
907 vm_prot_t cur_protection
,
908 vm_prot_t max_protection
,
909 vm_inherit_t inheritance
)
912 vm_map_offset_t vmmaddr
;
914 vmmaddr
= (vm_map_offset_t
) *address
;
916 /* filter out any kernel-only flags */
917 if (flags
& ~VM_FLAGS_USER_MAP
)
918 return KERN_INVALID_ARGUMENT
;
920 kr
= vm_map_enter_mem_object(target_map
,
937 /* legacy interface */
941 vm_offset_t
*address
,
946 vm_object_offset_t offset
,
948 vm_prot_t cur_protection
,
949 vm_prot_t max_protection
,
950 vm_inherit_t inheritance
)
952 mach_vm_address_t map_addr
;
953 mach_vm_size_t map_size
;
954 mach_vm_offset_t map_mask
;
957 map_addr
= (mach_vm_address_t
)*address
;
958 map_size
= (mach_vm_size_t
)size
;
959 map_mask
= (mach_vm_offset_t
)mask
;
961 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
963 cur_protection
, max_protection
, inheritance
);
964 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
968 /* temporary, until world build */
972 vm_offset_t
*address
,
979 vm_prot_t cur_protection
,
980 vm_prot_t max_protection
,
981 vm_inherit_t inheritance
)
983 mach_vm_address_t map_addr
;
984 mach_vm_size_t map_size
;
985 mach_vm_offset_t map_mask
;
986 vm_object_offset_t obj_offset
;
989 map_addr
= (mach_vm_address_t
)*address
;
990 map_size
= (mach_vm_size_t
)size
;
991 map_mask
= (mach_vm_offset_t
)mask
;
992 obj_offset
= (vm_object_offset_t
)offset
;
994 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
995 port
, obj_offset
, copy
,
996 cur_protection
, max_protection
, inheritance
);
997 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1003 * Remap a range of memory from one task into another,
1004 * to another address range within the same task, or
1005 * over top of itself (with altered permissions and/or
1006 * as an in-place copy of itself).
1011 vm_map_t target_map
,
1012 mach_vm_offset_t
*address
,
1013 mach_vm_size_t size
,
1014 mach_vm_offset_t mask
,
1017 mach_vm_offset_t memory_address
,
1019 vm_prot_t
*cur_protection
,
1020 vm_prot_t
*max_protection
,
1021 vm_inherit_t inheritance
)
1023 vm_map_offset_t map_addr
;
1026 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1027 return KERN_INVALID_ARGUMENT
;
1029 /* filter out any kernel-only flags */
1030 if (flags
& ~VM_FLAGS_USER_REMAP
)
1031 return KERN_INVALID_ARGUMENT
;
1033 map_addr
= (vm_map_offset_t
)*address
;
1035 kr
= vm_map_remap(target_map
,
1046 *address
= map_addr
;
1052 * Remap a range of memory from one task into another,
1053 * to another address range within the same task, or
1054 * over top of itself (with altered permissions and/or
1055 * as an in-place copy of itself).
1057 * The addressability of the source and target address
1058 * range is limited by the size of vm_address_t (in the
1063 vm_map_t target_map
,
1064 vm_offset_t
*address
,
1069 vm_offset_t memory_address
,
1071 vm_prot_t
*cur_protection
,
1072 vm_prot_t
*max_protection
,
1073 vm_inherit_t inheritance
)
1075 vm_map_offset_t map_addr
;
1078 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1079 return KERN_INVALID_ARGUMENT
;
1081 /* filter out any kernel-only flags */
1082 if (flags
& ~VM_FLAGS_USER_REMAP
)
1083 return KERN_INVALID_ARGUMENT
;
1085 map_addr
= (vm_map_offset_t
)*address
;
1087 kr
= vm_map_remap(target_map
,
1098 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1103 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1104 * when mach_vm_wire and vm_wire are changed to use ledgers.
1106 #include <mach/mach_host_server.h>
1109 * Specify that the range of the virtual address space
1110 * of the target task must not cause page faults for
1111 * the indicated accesses.
1113 * [ To unwire the pages, specify VM_PROT_NONE. ]
1117 host_priv_t host_priv
,
1119 mach_vm_offset_t start
,
1120 mach_vm_size_t size
,
1125 if (host_priv
== HOST_PRIV_NULL
)
1126 return KERN_INVALID_HOST
;
1128 assert(host_priv
== &realhost
);
1130 if (map
== VM_MAP_NULL
)
1131 return KERN_INVALID_TASK
;
1133 if (access
& ~VM_PROT_ALL
|| (start
+ size
< start
))
1134 return KERN_INVALID_ARGUMENT
;
1136 if (access
!= VM_PROT_NONE
) {
1137 rc
= vm_map_wire(map
,
1138 vm_map_trunc_page(start
,
1139 VM_MAP_PAGE_MASK(map
)),
1140 vm_map_round_page(start
+size
,
1141 VM_MAP_PAGE_MASK(map
)),
1142 access
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK
),
1145 rc
= vm_map_unwire(map
,
1146 vm_map_trunc_page(start
,
1147 VM_MAP_PAGE_MASK(map
)),
1148 vm_map_round_page(start
+size
,
1149 VM_MAP_PAGE_MASK(map
)),
1157 * Specify that the range of the virtual address space
1158 * of the target task must not cause page faults for
1159 * the indicated accesses.
1161 * [ To unwire the pages, specify VM_PROT_NONE. ]
1165 host_priv_t host_priv
,
1166 register vm_map_t map
,
1173 if (host_priv
== HOST_PRIV_NULL
)
1174 return KERN_INVALID_HOST
;
1176 assert(host_priv
== &realhost
);
1178 if (map
== VM_MAP_NULL
)
1179 return KERN_INVALID_TASK
;
1181 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1182 return KERN_INVALID_ARGUMENT
;
1186 } else if (access
!= VM_PROT_NONE
) {
1187 rc
= vm_map_wire(map
,
1188 vm_map_trunc_page(start
,
1189 VM_MAP_PAGE_MASK(map
)),
1190 vm_map_round_page(start
+size
,
1191 VM_MAP_PAGE_MASK(map
)),
1192 access
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
),
1195 rc
= vm_map_unwire(map
,
1196 vm_map_trunc_page(start
,
1197 VM_MAP_PAGE_MASK(map
)),
1198 vm_map_round_page(start
+size
,
1199 VM_MAP_PAGE_MASK(map
)),
1208 * Synchronises the memory range specified with its backing store
1209 * image by either flushing or cleaning the contents to the appropriate
1212 * interpretation of sync_flags
1213 * VM_SYNC_INVALIDATE - discard pages, only return precious
1216 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1217 * - discard pages, write dirty or precious
1218 * pages back to memory manager.
1220 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1221 * - write dirty or precious pages back to
1222 * the memory manager.
1224 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1225 * is a hole in the region, and we would
1226 * have returned KERN_SUCCESS, return
1227 * KERN_INVALID_ADDRESS instead.
1230 * KERN_INVALID_TASK Bad task parameter
1231 * KERN_INVALID_ARGUMENT both sync and async were specified.
1232 * KERN_SUCCESS The usual.
1233 * KERN_INVALID_ADDRESS There was a hole in the region.
1239 mach_vm_address_t address
,
1240 mach_vm_size_t size
,
1241 vm_sync_t sync_flags
)
1244 if (map
== VM_MAP_NULL
)
1245 return(KERN_INVALID_TASK
);
1247 return vm_map_msync(map
, (vm_map_address_t
)address
,
1248 (vm_map_size_t
)size
, sync_flags
);
1254 * Synchronises the memory range specified with its backing store
1255 * image by either flushing or cleaning the contents to the appropriate
1258 * interpretation of sync_flags
1259 * VM_SYNC_INVALIDATE - discard pages, only return precious
1262 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1263 * - discard pages, write dirty or precious
1264 * pages back to memory manager.
1266 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1267 * - write dirty or precious pages back to
1268 * the memory manager.
1270 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1271 * is a hole in the region, and we would
1272 * have returned KERN_SUCCESS, return
1273 * KERN_INVALID_ADDRESS instead.
1275 * The addressability of the range is limited to that which can
1276 * be described by a vm_address_t.
1279 * KERN_INVALID_TASK Bad task parameter
1280 * KERN_INVALID_ARGUMENT both sync and async were specified.
1281 * KERN_SUCCESS The usual.
1282 * KERN_INVALID_ADDRESS There was a hole in the region.
1288 vm_address_t address
,
1290 vm_sync_t sync_flags
)
1293 if (map
== VM_MAP_NULL
)
1294 return(KERN_INVALID_TASK
);
1296 return vm_map_msync(map
, (vm_map_address_t
)address
,
1297 (vm_map_size_t
)size
, sync_flags
);
1302 vm_toggle_entry_reuse(int toggle
, int *old_value
)
1304 vm_map_t map
= current_map();
1306 if(toggle
== VM_TOGGLE_GETVALUE
&& old_value
!= NULL
){
1307 *old_value
= map
->disable_vmentry_reuse
;
1308 } else if(toggle
== VM_TOGGLE_SET
){
1309 vm_map_entry_t map_to_entry
;
1312 vm_map_disable_hole_optimization(map
);
1313 map
->disable_vmentry_reuse
= TRUE
;
1314 __IGNORE_WCASTALIGN(map_to_entry
= vm_map_to_entry(map
));
1315 if (map
->first_free
== map_to_entry
) {
1316 map
->highest_entry_end
= vm_map_min(map
);
1318 map
->highest_entry_end
= map
->first_free
->vme_end
;
1321 } else if (toggle
== VM_TOGGLE_CLEAR
){
1323 map
->disable_vmentry_reuse
= FALSE
;
1326 return KERN_INVALID_ARGUMENT
;
1328 return KERN_SUCCESS
;
1332 * mach_vm_behavior_set
1334 * Sets the paging behavior attribute for the specified range
1335 * in the specified map.
1337 * This routine will fail with KERN_INVALID_ADDRESS if any address
1338 * in [start,start+size) is not a valid allocated memory region.
1341 mach_vm_behavior_set(
1343 mach_vm_offset_t start
,
1344 mach_vm_size_t size
,
1345 vm_behavior_t new_behavior
)
1347 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1348 return(KERN_INVALID_ARGUMENT
);
1351 return KERN_SUCCESS
;
1353 return(vm_map_behavior_set(map
,
1354 vm_map_trunc_page(start
,
1355 VM_MAP_PAGE_MASK(map
)),
1356 vm_map_round_page(start
+size
,
1357 VM_MAP_PAGE_MASK(map
)),
1364 * Sets the paging behavior attribute for the specified range
1365 * in the specified map.
1367 * This routine will fail with KERN_INVALID_ADDRESS if any address
1368 * in [start,start+size) is not a valid allocated memory region.
1370 * This routine is potentially limited in addressibility by the
1371 * use of vm_offset_t (if the map provided is larger than the
1379 vm_behavior_t new_behavior
)
1381 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1382 return(KERN_INVALID_ARGUMENT
);
1385 return KERN_SUCCESS
;
1387 return(vm_map_behavior_set(map
,
1388 vm_map_trunc_page(start
,
1389 VM_MAP_PAGE_MASK(map
)),
1390 vm_map_round_page(start
+size
,
1391 VM_MAP_PAGE_MASK(map
)),
1398 * User call to obtain information about a region in
1399 * a task's address map. Currently, only one flavor is
1402 * XXX The reserved and behavior fields cannot be filled
1403 * in until the vm merge from the IK is completed, and
1404 * vm_reserve is implemented.
1406 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1412 mach_vm_offset_t
*address
, /* IN/OUT */
1413 mach_vm_size_t
*size
, /* OUT */
1414 vm_region_flavor_t flavor
, /* IN */
1415 vm_region_info_t info
, /* OUT */
1416 mach_msg_type_number_t
*count
, /* IN/OUT */
1417 mach_port_t
*object_name
) /* OUT */
1419 vm_map_offset_t map_addr
;
1420 vm_map_size_t map_size
;
1423 if (VM_MAP_NULL
== map
)
1424 return KERN_INVALID_ARGUMENT
;
1426 map_addr
= (vm_map_offset_t
)*address
;
1427 map_size
= (vm_map_size_t
)*size
;
1429 /* legacy conversion */
1430 if (VM_REGION_BASIC_INFO
== flavor
)
1431 flavor
= VM_REGION_BASIC_INFO_64
;
1433 kr
= vm_map_region(map
,
1434 &map_addr
, &map_size
,
1435 flavor
, info
, count
,
1438 *address
= map_addr
;
1444 * vm_region_64 and vm_region:
1446 * User call to obtain information about a region in
1447 * a task's address map. Currently, only one flavor is
1450 * XXX The reserved and behavior fields cannot be filled
1451 * in until the vm merge from the IK is completed, and
1452 * vm_reserve is implemented.
1454 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1460 vm_offset_t
*address
, /* IN/OUT */
1461 vm_size_t
*size
, /* OUT */
1462 vm_region_flavor_t flavor
, /* IN */
1463 vm_region_info_t info
, /* OUT */
1464 mach_msg_type_number_t
*count
, /* IN/OUT */
1465 mach_port_t
*object_name
) /* OUT */
1467 vm_map_offset_t map_addr
;
1468 vm_map_size_t map_size
;
1471 if (VM_MAP_NULL
== map
)
1472 return KERN_INVALID_ARGUMENT
;
1474 map_addr
= (vm_map_offset_t
)*address
;
1475 map_size
= (vm_map_size_t
)*size
;
1477 /* legacy conversion */
1478 if (VM_REGION_BASIC_INFO
== flavor
)
1479 flavor
= VM_REGION_BASIC_INFO_64
;
1481 kr
= vm_map_region(map
,
1482 &map_addr
, &map_size
,
1483 flavor
, info
, count
,
1486 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1487 *size
= CAST_DOWN(vm_size_t
, map_size
);
1489 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1490 return KERN_INVALID_ADDRESS
;
1497 vm_address_t
*address
, /* IN/OUT */
1498 vm_size_t
*size
, /* OUT */
1499 vm_region_flavor_t flavor
, /* IN */
1500 vm_region_info_t info
, /* OUT */
1501 mach_msg_type_number_t
*count
, /* IN/OUT */
1502 mach_port_t
*object_name
) /* OUT */
1504 vm_map_address_t map_addr
;
1505 vm_map_size_t map_size
;
1508 if (VM_MAP_NULL
== map
)
1509 return KERN_INVALID_ARGUMENT
;
1511 map_addr
= (vm_map_address_t
)*address
;
1512 map_size
= (vm_map_size_t
)*size
;
1514 kr
= vm_map_region(map
,
1515 &map_addr
, &map_size
,
1516 flavor
, info
, count
,
1519 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1520 *size
= CAST_DOWN(vm_size_t
, map_size
);
1522 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1523 return KERN_INVALID_ADDRESS
;
1528 * vm_region_recurse: A form of vm_region which follows the
1529 * submaps in a target map
1533 mach_vm_region_recurse(
1535 mach_vm_address_t
*address
,
1536 mach_vm_size_t
*size
,
1538 vm_region_recurse_info_t info
,
1539 mach_msg_type_number_t
*infoCnt
)
1541 vm_map_address_t map_addr
;
1542 vm_map_size_t map_size
;
1545 if (VM_MAP_NULL
== map
)
1546 return KERN_INVALID_ARGUMENT
;
1548 map_addr
= (vm_map_address_t
)*address
;
1549 map_size
= (vm_map_size_t
)*size
;
1551 kr
= vm_map_region_recurse_64(
1556 (vm_region_submap_info_64_t
)info
,
1559 *address
= map_addr
;
1565 * vm_region_recurse: A form of vm_region which follows the
1566 * submaps in a target map
1570 vm_region_recurse_64(
1572 vm_address_t
*address
,
1575 vm_region_recurse_info_64_t info
,
1576 mach_msg_type_number_t
*infoCnt
)
1578 vm_map_address_t map_addr
;
1579 vm_map_size_t map_size
;
1582 if (VM_MAP_NULL
== map
)
1583 return KERN_INVALID_ARGUMENT
;
1585 map_addr
= (vm_map_address_t
)*address
;
1586 map_size
= (vm_map_size_t
)*size
;
1588 kr
= vm_map_region_recurse_64(
1593 (vm_region_submap_info_64_t
)info
,
1596 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1597 *size
= CAST_DOWN(vm_size_t
, map_size
);
1599 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1600 return KERN_INVALID_ADDRESS
;
1607 vm_offset_t
*address
, /* IN/OUT */
1608 vm_size_t
*size
, /* OUT */
1609 natural_t
*depth
, /* IN/OUT */
1610 vm_region_recurse_info_t info32
, /* IN/OUT */
1611 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1613 vm_region_submap_info_data_64_t info64
;
1614 vm_region_submap_info_t info
;
1615 vm_map_address_t map_addr
;
1616 vm_map_size_t map_size
;
1619 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1620 return KERN_INVALID_ARGUMENT
;
1623 map_addr
= (vm_map_address_t
)*address
;
1624 map_size
= (vm_map_size_t
)*size
;
1625 info
= (vm_region_submap_info_t
)info32
;
1626 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1628 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1629 depth
, &info64
, infoCnt
);
1631 info
->protection
= info64
.protection
;
1632 info
->max_protection
= info64
.max_protection
;
1633 info
->inheritance
= info64
.inheritance
;
1634 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1635 info
->user_tag
= info64
.user_tag
;
1636 info
->pages_resident
= info64
.pages_resident
;
1637 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1638 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1639 info
->pages_dirtied
= info64
.pages_dirtied
;
1640 info
->ref_count
= info64
.ref_count
;
1641 info
->shadow_depth
= info64
.shadow_depth
;
1642 info
->external_pager
= info64
.external_pager
;
1643 info
->share_mode
= info64
.share_mode
;
1644 info
->is_submap
= info64
.is_submap
;
1645 info
->behavior
= info64
.behavior
;
1646 info
->object_id
= info64
.object_id
;
1647 info
->user_wired_count
= info64
.user_wired_count
;
1649 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1650 *size
= CAST_DOWN(vm_size_t
, map_size
);
1651 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1653 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1654 return KERN_INVALID_ADDRESS
;
1659 mach_vm_purgable_control(
1661 mach_vm_offset_t address
,
1662 vm_purgable_t control
,
1665 if (VM_MAP_NULL
== map
)
1666 return KERN_INVALID_ARGUMENT
;
1668 return vm_map_purgable_control(map
,
1669 vm_map_trunc_page(address
, PAGE_MASK
),
1675 vm_purgable_control(
1677 vm_offset_t address
,
1678 vm_purgable_t control
,
1681 if (VM_MAP_NULL
== map
)
1682 return KERN_INVALID_ARGUMENT
;
1684 return vm_map_purgable_control(map
,
1685 vm_map_trunc_page(address
, PAGE_MASK
),
1692 * Ordinarily, the right to allocate CPM is restricted
1693 * to privileged applications (those that can gain access
1694 * to the host priv port). Set this variable to zero if
1695 * you want to let any application allocate CPM.
1697 unsigned int vm_allocate_cpm_privileged
= 0;
1700 * Allocate memory in the specified map, with the caveat that
1701 * the memory is physically contiguous. This call may fail
1702 * if the system can't find sufficient contiguous memory.
1703 * This call may cause or lead to heart-stopping amounts of
1706 * Memory obtained from this call should be freed in the
1707 * normal way, viz., via vm_deallocate.
1711 host_priv_t host_priv
,
1717 vm_map_address_t map_addr
;
1718 vm_map_size_t map_size
;
1721 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1722 return KERN_INVALID_HOST
;
1724 if (VM_MAP_NULL
== map
)
1725 return KERN_INVALID_ARGUMENT
;
1727 map_addr
= (vm_map_address_t
)*addr
;
1728 map_size
= (vm_map_size_t
)size
;
1730 kr
= vm_map_enter_cpm(map
,
1735 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1743 mach_vm_offset_t offset
,
1747 if (VM_MAP_NULL
== map
)
1748 return KERN_INVALID_ARGUMENT
;
1750 return vm_map_page_query_internal(
1752 vm_map_trunc_page(offset
, PAGE_MASK
),
1753 disposition
, ref_count
);
1763 if (VM_MAP_NULL
== map
)
1764 return KERN_INVALID_ARGUMENT
;
1766 return vm_map_page_query_internal(
1768 vm_map_trunc_page(offset
, PAGE_MASK
),
1769 disposition
, ref_count
);
1775 mach_vm_address_t address
,
1776 vm_page_info_flavor_t flavor
,
1777 vm_page_info_t info
,
1778 mach_msg_type_number_t
*count
)
1782 if (map
== VM_MAP_NULL
) {
1783 return KERN_INVALID_ARGUMENT
;
1786 kr
= vm_map_page_info(map
, address
, flavor
, info
, count
);
1790 /* map a (whole) upl into an address space */
1795 vm_address_t
*dst_addr
)
1797 vm_map_offset_t map_addr
;
1800 if (VM_MAP_NULL
== map
)
1801 return KERN_INVALID_ARGUMENT
;
1803 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
1804 *dst_addr
= CAST_DOWN(vm_address_t
, map_addr
);
1813 if (VM_MAP_NULL
== map
)
1814 return KERN_INVALID_ARGUMENT
;
1816 return (vm_map_remove_upl(map
, upl
));
1819 /* Retrieve a upl for an object underlying an address range in a map */
1824 vm_map_offset_t map_offset
,
1825 upl_size_t
*upl_size
,
1827 upl_page_info_array_t page_list
,
1828 unsigned int *count
,
1829 upl_control_flags_t
*flags
,
1830 int force_data_sync
)
1832 upl_control_flags_t map_flags
;
1835 if (VM_MAP_NULL
== map
)
1836 return KERN_INVALID_ARGUMENT
;
1838 map_flags
= *flags
& ~UPL_NOZEROFILL
;
1839 if (force_data_sync
)
1840 map_flags
|= UPL_FORCE_DATA_SYNC
;
1842 kr
= vm_map_create_upl(map
,
1850 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
1855 * mach_make_memory_entry_64
1857 * Think of it as a two-stage vm_remap() operation. First
1858 * you get a handle. Second, you get map that handle in
1859 * somewhere else. Rather than doing it all at once (and
1860 * without needing access to the other whole map).
1864 mach_make_memory_entry_64(
1865 vm_map_t target_map
,
1866 memory_object_size_t
*size
,
1867 memory_object_offset_t offset
,
1868 vm_prot_t permission
,
1869 ipc_port_t
*object_handle
,
1870 ipc_port_t parent_handle
)
1872 vm_map_version_t version
;
1873 vm_named_entry_t parent_entry
;
1874 vm_named_entry_t user_entry
;
1875 ipc_port_t user_handle
;
1879 /* needed for call to vm_map_lookup_locked */
1882 vm_object_offset_t obj_off
;
1884 struct vm_object_fault_info fault_info
;
1886 vm_object_t shadow_object
;
1888 /* needed for direct map entry manipulation */
1889 vm_map_entry_t map_entry
;
1890 vm_map_entry_t next_entry
;
1892 vm_map_t original_map
= target_map
;
1893 vm_map_size_t total_size
, map_size
;
1894 vm_map_offset_t map_start
, map_end
;
1895 vm_map_offset_t local_offset
;
1896 vm_object_size_t mappable_size
;
1899 * Stash the offset in the page for use by vm_map_enter_mem_object()
1900 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
1902 vm_object_offset_t offset_in_page
;
1904 unsigned int access
;
1905 vm_prot_t protections
;
1906 vm_prot_t original_protections
, mask_protections
;
1907 unsigned int wimg_mode
;
1909 boolean_t force_shadow
= FALSE
;
1910 boolean_t use_data_addr
;
1911 boolean_t use_4K_compat
;
1913 if (((permission
& 0x00FF0000) &
1915 MAP_MEM_NAMED_CREATE
|
1917 MAP_MEM_NAMED_REUSE
|
1918 MAP_MEM_USE_DATA_ADDR
|
1920 MAP_MEM_4K_DATA_ADDR
|
1921 MAP_MEM_VM_SHARE
))) {
1923 * Unknown flag: reject for forward compatibility.
1925 return KERN_INVALID_VALUE
;
1928 if (parent_handle
!= IP_NULL
&&
1929 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
1930 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
1932 parent_entry
= NULL
;
1935 if (parent_entry
&& parent_entry
->is_copy
) {
1936 return KERN_INVALID_ARGUMENT
;
1939 original_protections
= permission
& VM_PROT_ALL
;
1940 protections
= original_protections
;
1941 mask_protections
= permission
& VM_PROT_IS_MASK
;
1942 access
= GET_MAP_MEM(permission
);
1943 use_data_addr
= ((permission
& MAP_MEM_USE_DATA_ADDR
) != 0);
1944 use_4K_compat
= ((permission
& MAP_MEM_4K_DATA_ADDR
) != 0);
1946 user_handle
= IP_NULL
;
1949 map_start
= vm_map_trunc_page(offset
, PAGE_MASK
);
1951 if (permission
& MAP_MEM_ONLY
) {
1952 boolean_t parent_is_object
;
1954 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
1955 map_size
= map_end
- map_start
;
1957 if (use_data_addr
|| use_4K_compat
|| parent_entry
== NULL
) {
1958 return KERN_INVALID_ARGUMENT
;
1961 parent_is_object
= !(parent_entry
->is_sub_map
||
1962 parent_entry
->is_pager
);
1963 object
= parent_entry
->backing
.object
;
1964 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
1965 wimg_mode
= object
->wimg_bits
;
1967 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1968 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
1969 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
1970 return KERN_INVALID_RIGHT
;
1972 if(access
== MAP_MEM_IO
) {
1973 SET_MAP_MEM(access
, parent_entry
->protection
);
1974 wimg_mode
= VM_WIMG_IO
;
1975 } else if (access
== MAP_MEM_COPYBACK
) {
1976 SET_MAP_MEM(access
, parent_entry
->protection
);
1977 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1978 } else if (access
== MAP_MEM_INNERWBACK
) {
1979 SET_MAP_MEM(access
, parent_entry
->protection
);
1980 wimg_mode
= VM_WIMG_INNERWBACK
;
1981 } else if (access
== MAP_MEM_WTHRU
) {
1982 SET_MAP_MEM(access
, parent_entry
->protection
);
1983 wimg_mode
= VM_WIMG_WTHRU
;
1984 } else if (access
== MAP_MEM_WCOMB
) {
1985 SET_MAP_MEM(access
, parent_entry
->protection
);
1986 wimg_mode
= VM_WIMG_WCOMB
;
1988 if (parent_is_object
&& object
&&
1989 (access
!= MAP_MEM_NOOP
) &&
1990 (!(object
->nophyscache
))) {
1992 if (object
->wimg_bits
!= wimg_mode
) {
1993 vm_object_lock(object
);
1994 vm_object_change_wimg_mode(object
, wimg_mode
);
1995 vm_object_unlock(object
);
1999 *object_handle
= IP_NULL
;
2000 return KERN_SUCCESS
;
2001 } else if (permission
& MAP_MEM_NAMED_CREATE
) {
2002 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2003 map_size
= map_end
- map_start
;
2005 if (use_data_addr
|| use_4K_compat
) {
2006 return KERN_INVALID_ARGUMENT
;
2009 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2010 if (kr
!= KERN_SUCCESS
) {
2011 return KERN_FAILURE
;
2015 * Force the creation of the VM object now.
2017 if (map_size
> (vm_map_size_t
) ANON_MAX_SIZE
) {
2019 * LP64todo - for now, we can only allocate 4GB-4096
2020 * internal objects because the default pager can't
2021 * page bigger ones. Remove this when it can.
2027 object
= vm_object_allocate(map_size
);
2028 assert(object
!= VM_OBJECT_NULL
);
2030 if (permission
& MAP_MEM_PURGABLE
) {
2031 if (! (permission
& VM_PROT_WRITE
)) {
2032 /* if we can't write, we can't purge */
2033 vm_object_deallocate(object
);
2034 kr
= KERN_INVALID_ARGUMENT
;
2037 object
->purgable
= VM_PURGABLE_NONVOLATILE
;
2038 assert(object
->vo_purgeable_owner
== NULL
);
2039 assert(object
->resident_page_count
== 0);
2040 assert(object
->wired_page_count
== 0);
2041 vm_object_lock(object
);
2042 vm_purgeable_nonvolatile_enqueue(object
,
2044 vm_object_unlock(object
);
2048 * The VM object is brand new and nobody else knows about it,
2049 * so we don't need to lock it.
2052 wimg_mode
= object
->wimg_bits
;
2053 if (access
== MAP_MEM_IO
) {
2054 wimg_mode
= VM_WIMG_IO
;
2055 } else if (access
== MAP_MEM_COPYBACK
) {
2056 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2057 } else if (access
== MAP_MEM_INNERWBACK
) {
2058 wimg_mode
= VM_WIMG_INNERWBACK
;
2059 } else if (access
== MAP_MEM_WTHRU
) {
2060 wimg_mode
= VM_WIMG_WTHRU
;
2061 } else if (access
== MAP_MEM_WCOMB
) {
2062 wimg_mode
= VM_WIMG_WCOMB
;
2064 if (access
!= MAP_MEM_NOOP
) {
2065 object
->wimg_bits
= wimg_mode
;
2067 /* the object has no pages, so no WIMG bits to update here */
2071 * We use this path when we want to make sure that
2072 * nobody messes with the object (coalesce, for
2073 * example) before we map it.
2074 * We might want to use these objects for transposition via
2075 * vm_object_transpose() too, so we don't want any copy or
2076 * shadow objects either...
2078 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2079 object
->true_share
= TRUE
;
2081 user_entry
->backing
.object
= object
;
2082 user_entry
->internal
= TRUE
;
2083 user_entry
->is_sub_map
= FALSE
;
2084 user_entry
->is_pager
= FALSE
;
2085 user_entry
->offset
= 0;
2086 user_entry
->data_offset
= 0;
2087 user_entry
->protection
= protections
;
2088 SET_MAP_MEM(access
, user_entry
->protection
);
2089 user_entry
->size
= map_size
;
2091 /* user_object pager and internal fields are not used */
2092 /* when the object field is filled in. */
2094 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2095 user_entry
->data_offset
));
2096 *object_handle
= user_handle
;
2097 return KERN_SUCCESS
;
2100 if (permission
& MAP_MEM_VM_COPY
) {
2103 if (target_map
== VM_MAP_NULL
) {
2104 return KERN_INVALID_TASK
;
2107 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2108 map_size
= map_end
- map_start
;
2109 if (use_data_addr
|| use_4K_compat
) {
2110 offset_in_page
= offset
- map_start
;
2112 offset_in_page
&= ~((signed)(0xFFF));
2117 kr
= vm_map_copyin_internal(target_map
,
2120 VM_MAP_COPYIN_ENTRY_LIST
,
2122 if (kr
!= KERN_SUCCESS
) {
2126 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2127 if (kr
!= KERN_SUCCESS
) {
2128 vm_map_copy_discard(copy
);
2129 return KERN_FAILURE
;
2132 user_entry
->backing
.copy
= copy
;
2133 user_entry
->internal
= FALSE
;
2134 user_entry
->is_sub_map
= FALSE
;
2135 user_entry
->is_pager
= FALSE
;
2136 user_entry
->is_copy
= TRUE
;
2137 user_entry
->offset
= 0;
2138 user_entry
->protection
= protections
;
2139 user_entry
->size
= map_size
;
2140 user_entry
->data_offset
= offset_in_page
;
2142 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2143 user_entry
->data_offset
));
2144 *object_handle
= user_handle
;
2145 return KERN_SUCCESS
;
2148 if (permission
& MAP_MEM_VM_SHARE
) {
2150 vm_prot_t cur_prot
, max_prot
;
2152 if (target_map
== VM_MAP_NULL
) {
2153 return KERN_INVALID_TASK
;
2156 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2157 map_size
= map_end
- map_start
;
2158 if (use_data_addr
|| use_4K_compat
) {
2159 offset_in_page
= offset
- map_start
;
2161 offset_in_page
&= ~((signed)(0xFFF));
2166 kr
= vm_map_copy_extract(target_map
,
2172 if (kr
!= KERN_SUCCESS
) {
2176 if (mask_protections
) {
2178 * We just want as much of "original_protections"
2179 * as we can get out of the actual "cur_prot".
2181 protections
&= cur_prot
;
2182 if (protections
== VM_PROT_NONE
) {
2183 /* no access at all: fail */
2184 vm_map_copy_discard(copy
);
2185 return KERN_PROTECTION_FAILURE
;
2189 * We want exactly "original_protections"
2190 * out of "cur_prot".
2192 if ((cur_prot
& protections
) != protections
) {
2193 vm_map_copy_discard(copy
);
2194 return KERN_PROTECTION_FAILURE
;
2198 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2199 if (kr
!= KERN_SUCCESS
) {
2200 vm_map_copy_discard(copy
);
2201 return KERN_FAILURE
;
2204 user_entry
->backing
.copy
= copy
;
2205 user_entry
->internal
= FALSE
;
2206 user_entry
->is_sub_map
= FALSE
;
2207 user_entry
->is_pager
= FALSE
;
2208 user_entry
->is_copy
= TRUE
;
2209 user_entry
->offset
= 0;
2210 user_entry
->protection
= protections
;
2211 user_entry
->size
= map_size
;
2212 user_entry
->data_offset
= offset_in_page
;
2214 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2215 user_entry
->data_offset
));
2216 *object_handle
= user_handle
;
2217 return KERN_SUCCESS
;
2220 if (parent_entry
== NULL
||
2221 (permission
& MAP_MEM_NAMED_REUSE
)) {
2223 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2224 map_size
= map_end
- map_start
;
2225 if (use_data_addr
|| use_4K_compat
) {
2226 offset_in_page
= offset
- map_start
;
2228 offset_in_page
&= ~((signed)(0xFFF));
2233 /* Create a named object based on address range within the task map */
2234 /* Go find the object at given address */
2236 if (target_map
== VM_MAP_NULL
) {
2237 return KERN_INVALID_TASK
;
2241 protections
= original_protections
;
2242 vm_map_lock_read(target_map
);
2244 /* get the object associated with the target address */
2245 /* note we check the permission of the range against */
2246 /* that requested by the caller */
2248 kr
= vm_map_lookup_locked(&target_map
, map_start
,
2249 protections
| mask_protections
,
2250 OBJECT_LOCK_EXCLUSIVE
, &version
,
2251 &object
, &obj_off
, &prot
, &wired
,
2254 if (kr
!= KERN_SUCCESS
) {
2255 vm_map_unlock_read(target_map
);
2258 if (mask_protections
) {
2260 * The caller asked us to use the "protections" as
2261 * a mask, so restrict "protections" to what this
2262 * mapping actually allows.
2264 protections
&= prot
;
2266 if (((prot
& protections
) != protections
)
2267 || (object
== kernel_object
)) {
2268 kr
= KERN_INVALID_RIGHT
;
2269 vm_object_unlock(object
);
2270 vm_map_unlock_read(target_map
);
2271 if(real_map
!= target_map
)
2272 vm_map_unlock_read(real_map
);
2273 if(object
== kernel_object
) {
2274 printf("Warning: Attempt to create a named"
2275 " entry from the kernel_object\n");
2280 /* We have an object, now check to see if this object */
2281 /* is suitable. If not, create a shadow and share that */
2284 * We have to unlock the VM object to avoid deadlocking with
2285 * a VM map lock (the lock ordering is map, the object), if we
2286 * need to modify the VM map to create a shadow object. Since
2287 * we might release the VM map lock below anyway, we have
2288 * to release the VM map lock now.
2289 * XXX FBDP There must be a way to avoid this double lookup...
2291 * Take an extra reference on the VM object to make sure it's
2292 * not going to disappear.
2294 vm_object_reference_locked(object
); /* extra ref to hold obj */
2295 vm_object_unlock(object
);
2297 local_map
= original_map
;
2298 local_offset
= map_start
;
2299 if(target_map
!= local_map
) {
2300 vm_map_unlock_read(target_map
);
2301 if(real_map
!= target_map
)
2302 vm_map_unlock_read(real_map
);
2303 vm_map_lock_read(local_map
);
2304 target_map
= local_map
;
2305 real_map
= local_map
;
2308 if(!vm_map_lookup_entry(local_map
,
2309 local_offset
, &map_entry
)) {
2310 kr
= KERN_INVALID_ARGUMENT
;
2311 vm_map_unlock_read(target_map
);
2312 if(real_map
!= target_map
)
2313 vm_map_unlock_read(real_map
);
2314 vm_object_deallocate(object
); /* release extra ref */
2315 object
= VM_OBJECT_NULL
;
2318 iskernel
= (local_map
->pmap
== kernel_pmap
);
2319 if(!(map_entry
->is_sub_map
)) {
2320 if (VME_OBJECT(map_entry
) != object
) {
2321 kr
= KERN_INVALID_ARGUMENT
;
2322 vm_map_unlock_read(target_map
);
2323 if(real_map
!= target_map
)
2324 vm_map_unlock_read(real_map
);
2325 vm_object_deallocate(object
); /* release extra ref */
2326 object
= VM_OBJECT_NULL
;
2333 local_map
= VME_SUBMAP(map_entry
);
2335 vm_map_lock_read(local_map
);
2336 vm_map_unlock_read(tmap
);
2337 target_map
= local_map
;
2338 real_map
= local_map
;
2339 local_offset
= local_offset
- map_entry
->vme_start
;
2340 local_offset
+= VME_OFFSET(map_entry
);
2345 * We found the VM map entry, lock the VM object again.
2347 vm_object_lock(object
);
2348 if(map_entry
->wired_count
) {
2349 /* JMM - The check below should be reworked instead. */
2350 object
->true_share
= TRUE
;
2352 if (mask_protections
) {
2354 * The caller asked us to use the "protections" as
2355 * a mask, so restrict "protections" to what this
2356 * mapping actually allows.
2358 protections
&= map_entry
->max_protection
;
2360 if(((map_entry
->max_protection
) & protections
) != protections
) {
2361 kr
= KERN_INVALID_RIGHT
;
2362 vm_object_unlock(object
);
2363 vm_map_unlock_read(target_map
);
2364 if(real_map
!= target_map
)
2365 vm_map_unlock_read(real_map
);
2366 vm_object_deallocate(object
);
2367 object
= VM_OBJECT_NULL
;
2371 mappable_size
= fault_info
.hi_offset
- obj_off
;
2372 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2373 if(map_size
> mappable_size
) {
2374 /* try to extend mappable size if the entries */
2375 /* following are from the same object and are */
2377 next_entry
= map_entry
->vme_next
;
2378 /* lets see if the next map entry is still */
2379 /* pointing at this object and is contiguous */
2380 while(map_size
> mappable_size
) {
2381 if ((VME_OBJECT(next_entry
) == object
) &&
2382 (next_entry
->vme_start
==
2383 next_entry
->vme_prev
->vme_end
) &&
2384 (VME_OFFSET(next_entry
) ==
2385 (VME_OFFSET(next_entry
->vme_prev
) +
2386 (next_entry
->vme_prev
->vme_end
-
2387 next_entry
->vme_prev
->vme_start
)))) {
2388 if (mask_protections
) {
2390 * The caller asked us to use
2391 * the "protections" as a mask,
2392 * so restrict "protections" to
2393 * what this mapping actually
2396 protections
&= next_entry
->max_protection
;
2398 if ((next_entry
->wired_count
) &&
2399 (map_entry
->wired_count
== 0)) {
2402 if(((next_entry
->max_protection
)
2403 & protections
) != protections
) {
2406 if (next_entry
->needs_copy
!=
2407 map_entry
->needs_copy
)
2409 mappable_size
+= next_entry
->vme_end
2410 - next_entry
->vme_start
;
2411 total_size
+= next_entry
->vme_end
2412 - next_entry
->vme_start
;
2413 next_entry
= next_entry
->vme_next
;
2421 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2422 * never true in kernel */
2423 if (!iskernel
&& vm_map_entry_should_cow_for_true_share(map_entry
) &&
2424 object
->vo_size
> map_size
&&
2427 * Set up the targeted range for copy-on-write to
2428 * limit the impact of "true_share"/"copy_delay" to
2429 * that range instead of the entire VM object...
2432 vm_object_unlock(object
);
2433 if (vm_map_lock_read_to_write(target_map
)) {
2434 vm_object_deallocate(object
);
2435 target_map
= original_map
;
2439 vm_map_clip_start(target_map
,
2441 vm_map_trunc_page(map_start
,
2442 VM_MAP_PAGE_MASK(target_map
)));
2443 vm_map_clip_end(target_map
,
2445 (vm_map_round_page(map_end
,
2446 VM_MAP_PAGE_MASK(target_map
))));
2447 force_shadow
= TRUE
;
2449 if ((map_entry
->vme_end
- offset
) < map_size
) {
2450 map_size
= map_entry
->vme_end
- map_start
;
2452 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2454 vm_map_lock_write_to_read(target_map
);
2455 vm_object_lock(object
);
2458 if (object
->internal
) {
2459 /* vm_map_lookup_locked will create a shadow if */
2460 /* needs_copy is set but does not check for the */
2461 /* other two conditions shown. It is important to */
2462 /* set up an object which will not be pulled from */
2466 ((map_entry
->needs_copy
||
2468 (object
->vo_size
> total_size
&&
2469 (VME_OFFSET(map_entry
) != 0 ||
2471 vm_map_round_page(total_size
,
2472 VM_MAP_PAGE_MASK(target_map
)))))
2473 && !object
->true_share
)) {
2475 * We have to unlock the VM object before
2476 * trying to upgrade the VM map lock, to
2477 * honor lock ordering (map then object).
2478 * Otherwise, we would deadlock if another
2479 * thread holds a read lock on the VM map and
2480 * is trying to acquire the VM object's lock.
2481 * We still hold an extra reference on the
2482 * VM object, guaranteeing that it won't
2485 vm_object_unlock(object
);
2487 if (vm_map_lock_read_to_write(target_map
)) {
2489 * We couldn't upgrade our VM map lock
2490 * from "read" to "write" and we lost
2492 * Start all over again...
2494 vm_object_deallocate(object
); /* extra ref */
2495 target_map
= original_map
;
2499 vm_object_lock(object
);
2503 * JMM - We need to avoid coming here when the object
2504 * is wired by anybody, not just the current map. Why
2505 * couldn't we use the standard vm_object_copy_quickly()
2509 /* create a shadow object */
2510 VME_OBJECT_SHADOW(map_entry
, total_size
);
2511 shadow_object
= VME_OBJECT(map_entry
);
2513 vm_object_unlock(object
);
2516 prot
= map_entry
->protection
& ~VM_PROT_WRITE
;
2518 if (override_nx(target_map
,
2519 VME_ALIAS(map_entry
))
2521 prot
|= VM_PROT_EXECUTE
;
2523 vm_object_pmap_protect(
2524 object
, VME_OFFSET(map_entry
),
2526 ((map_entry
->is_shared
2527 || target_map
->mapped_in_other_pmaps
)
2530 map_entry
->vme_start
,
2532 total_size
-= (map_entry
->vme_end
2533 - map_entry
->vme_start
);
2534 next_entry
= map_entry
->vme_next
;
2535 map_entry
->needs_copy
= FALSE
;
2537 vm_object_lock(shadow_object
);
2538 while (total_size
) {
2539 assert((next_entry
->wired_count
== 0) ||
2540 (map_entry
->wired_count
));
2542 if (VME_OBJECT(next_entry
) == object
) {
2543 vm_object_reference_locked(shadow_object
);
2544 VME_OBJECT_SET(next_entry
,
2546 vm_object_deallocate(object
);
2549 (VME_OFFSET(next_entry
->vme_prev
) +
2550 (next_entry
->vme_prev
->vme_end
2551 - next_entry
->vme_prev
->vme_start
)));
2552 next_entry
->needs_copy
= FALSE
;
2554 panic("mach_make_memory_entry_64:"
2555 " map entries out of sync\n");
2559 - next_entry
->vme_start
;
2560 next_entry
= next_entry
->vme_next
;
2564 * Transfer our extra reference to the
2567 vm_object_reference_locked(shadow_object
);
2568 vm_object_deallocate(object
); /* extra ref */
2569 object
= shadow_object
;
2571 obj_off
= ((local_offset
- map_entry
->vme_start
)
2572 + VME_OFFSET(map_entry
));
2574 vm_map_lock_write_to_read(target_map
);
2578 /* note: in the future we can (if necessary) allow for */
2579 /* memory object lists, this will better support */
2580 /* fragmentation, but is it necessary? The user should */
2581 /* be encouraged to create address space oriented */
2582 /* shared objects from CLEAN memory regions which have */
2583 /* a known and defined history. i.e. no inheritence */
2584 /* share, make this call before making the region the */
2585 /* target of ipc's, etc. The code above, protecting */
2586 /* against delayed copy, etc. is mostly defensive. */
2588 wimg_mode
= object
->wimg_bits
;
2589 if(!(object
->nophyscache
)) {
2590 if(access
== MAP_MEM_IO
) {
2591 wimg_mode
= VM_WIMG_IO
;
2592 } else if (access
== MAP_MEM_COPYBACK
) {
2593 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2594 } else if (access
== MAP_MEM_INNERWBACK
) {
2595 wimg_mode
= VM_WIMG_INNERWBACK
;
2596 } else if (access
== MAP_MEM_WTHRU
) {
2597 wimg_mode
= VM_WIMG_WTHRU
;
2598 } else if (access
== MAP_MEM_WCOMB
) {
2599 wimg_mode
= VM_WIMG_WCOMB
;
2603 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2604 if (!object
->true_share
&&
2605 vm_object_tracking_inited
) {
2606 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2609 num
= OSBacktrace(bt
,
2610 VM_OBJECT_TRACKING_BTDEPTH
);
2611 btlog_add_entry(vm_object_tracking_btlog
,
2613 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2617 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2619 object
->true_share
= TRUE
;
2620 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2621 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2624 * The memory entry now points to this VM object and we
2625 * need to hold a reference on the VM object. Use the extra
2626 * reference we took earlier to keep the object alive when we
2630 vm_map_unlock_read(target_map
);
2631 if(real_map
!= target_map
)
2632 vm_map_unlock_read(real_map
);
2634 if (object
->wimg_bits
!= wimg_mode
)
2635 vm_object_change_wimg_mode(object
, wimg_mode
);
2637 /* the size of mapped entry that overlaps with our region */
2638 /* which is targeted for share. */
2639 /* (entry_end - entry_start) - */
2640 /* offset of our beg addr within entry */
2641 /* it corresponds to this: */
2643 if(map_size
> mappable_size
)
2644 map_size
= mappable_size
;
2646 if (permission
& MAP_MEM_NAMED_REUSE
) {
2648 * Compare what we got with the "parent_entry".
2649 * If they match, re-use the "parent_entry" instead
2650 * of creating a new one.
2652 if (parent_entry
!= NULL
&&
2653 parent_entry
->backing
.object
== object
&&
2654 parent_entry
->internal
== object
->internal
&&
2655 parent_entry
->is_sub_map
== FALSE
&&
2656 parent_entry
->is_pager
== FALSE
&&
2657 parent_entry
->offset
== obj_off
&&
2658 parent_entry
->protection
== protections
&&
2659 parent_entry
->size
== map_size
&&
2660 ((!(use_data_addr
|| use_4K_compat
) &&
2661 (parent_entry
->data_offset
== 0)) ||
2662 ((use_data_addr
|| use_4K_compat
) &&
2663 (parent_entry
->data_offset
== offset_in_page
)))) {
2665 * We have a match: re-use "parent_entry".
2667 /* release our extra reference on object */
2668 vm_object_unlock(object
);
2669 vm_object_deallocate(object
);
2670 /* parent_entry->ref_count++; XXX ? */
2671 /* Get an extra send-right on handle */
2672 ipc_port_copy_send(parent_handle
);
2674 *size
= CAST_DOWN(vm_size_t
,
2675 (parent_entry
->size
-
2676 parent_entry
->data_offset
));
2677 *object_handle
= parent_handle
;
2678 return KERN_SUCCESS
;
2681 * No match: we need to create a new entry.
2687 vm_object_unlock(object
);
2688 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2690 /* release our unused reference on the object */
2691 vm_object_deallocate(object
);
2692 return KERN_FAILURE
;
2695 user_entry
->backing
.object
= object
;
2696 user_entry
->internal
= object
->internal
;
2697 user_entry
->is_sub_map
= FALSE
;
2698 user_entry
->is_pager
= FALSE
;
2699 user_entry
->offset
= obj_off
;
2700 user_entry
->data_offset
= offset_in_page
;
2701 user_entry
->protection
= protections
;
2702 SET_MAP_MEM(GET_MAP_MEM(permission
), user_entry
->protection
);
2703 user_entry
->size
= map_size
;
2705 /* user_object pager and internal fields are not used */
2706 /* when the object field is filled in. */
2708 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2709 user_entry
->data_offset
));
2710 *object_handle
= user_handle
;
2711 return KERN_SUCCESS
;
2714 /* The new object will be base on an existing named object */
2715 if (parent_entry
== NULL
) {
2716 kr
= KERN_INVALID_ARGUMENT
;
2720 if (use_data_addr
|| use_4K_compat
) {
2722 * submaps and pagers should only be accessible from within
2723 * the kernel, which shouldn't use the data address flag, so can fail here.
2725 if (parent_entry
->is_pager
|| parent_entry
->is_sub_map
) {
2726 panic("Shouldn't be using data address with a parent entry that is a submap or pager.");
2729 * Account for offset to data in parent entry and
2730 * compute our own offset to data.
2732 if((offset
+ *size
+ parent_entry
->data_offset
) > parent_entry
->size
) {
2733 kr
= KERN_INVALID_ARGUMENT
;
2737 map_start
= vm_map_trunc_page(offset
+ parent_entry
->data_offset
, PAGE_MASK
);
2738 offset_in_page
= (offset
+ parent_entry
->data_offset
) - map_start
;
2740 offset_in_page
&= ~((signed)(0xFFF));
2741 map_end
= vm_map_round_page(offset
+ parent_entry
->data_offset
+ *size
, PAGE_MASK
);
2742 map_size
= map_end
- map_start
;
2744 map_end
= vm_map_round_page(offset
+ *size
, PAGE_MASK
);
2745 map_size
= map_end
- map_start
;
2748 if((offset
+ map_size
) > parent_entry
->size
) {
2749 kr
= KERN_INVALID_ARGUMENT
;
2754 if (mask_protections
) {
2756 * The caller asked us to use the "protections" as
2757 * a mask, so restrict "protections" to what this
2758 * mapping actually allows.
2760 protections
&= parent_entry
->protection
;
2762 if((protections
& parent_entry
->protection
) != protections
) {
2763 kr
= KERN_PROTECTION_FAILURE
;
2767 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2773 user_entry
->size
= map_size
;
2774 user_entry
->offset
= parent_entry
->offset
+ map_start
;
2775 user_entry
->data_offset
= offset_in_page
;
2776 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2777 user_entry
->is_pager
= parent_entry
->is_pager
;
2778 user_entry
->is_copy
= parent_entry
->is_copy
;
2779 user_entry
->internal
= parent_entry
->internal
;
2780 user_entry
->protection
= protections
;
2782 if(access
!= MAP_MEM_NOOP
) {
2783 SET_MAP_MEM(access
, user_entry
->protection
);
2786 if(parent_entry
->is_sub_map
) {
2787 user_entry
->backing
.map
= parent_entry
->backing
.map
;
2788 vm_map_lock(user_entry
->backing
.map
);
2789 user_entry
->backing
.map
->ref_count
++;
2790 vm_map_unlock(user_entry
->backing
.map
);
2792 else if (parent_entry
->is_pager
) {
2793 user_entry
->backing
.pager
= parent_entry
->backing
.pager
;
2794 /* JMM - don't we need a reference here? */
2796 object
= parent_entry
->backing
.object
;
2797 assert(object
!= VM_OBJECT_NULL
);
2798 user_entry
->backing
.object
= object
;
2799 /* we now point to this object, hold on */
2800 vm_object_reference(object
);
2801 vm_object_lock(object
);
2802 #if VM_OBJECT_TRACKING_OP_TRUESHARE
2803 if (!object
->true_share
&&
2804 vm_object_tracking_inited
) {
2805 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
2808 num
= OSBacktrace(bt
,
2809 VM_OBJECT_TRACKING_BTDEPTH
);
2810 btlog_add_entry(vm_object_tracking_btlog
,
2812 VM_OBJECT_TRACKING_OP_TRUESHARE
,
2816 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
2818 object
->true_share
= TRUE
;
2819 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2820 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2821 vm_object_unlock(object
);
2823 *size
= CAST_DOWN(vm_size_t
, (user_entry
->size
-
2824 user_entry
->data_offset
));
2825 *object_handle
= user_handle
;
2826 return KERN_SUCCESS
;
2830 if (user_handle
!= IP_NULL
) {
2832 * Releasing "user_handle" causes the kernel object
2833 * associated with it ("user_entry" here) to also be
2834 * released and freed.
2836 mach_memory_entry_port_release(user_handle
);
2842 _mach_make_memory_entry(
2843 vm_map_t target_map
,
2844 memory_object_size_t
*size
,
2845 memory_object_offset_t offset
,
2846 vm_prot_t permission
,
2847 ipc_port_t
*object_handle
,
2848 ipc_port_t parent_entry
)
2850 memory_object_size_t mo_size
;
2853 mo_size
= (memory_object_size_t
)*size
;
2854 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2855 (memory_object_offset_t
)offset
, permission
, object_handle
,
2862 mach_make_memory_entry(
2863 vm_map_t target_map
,
2866 vm_prot_t permission
,
2867 ipc_port_t
*object_handle
,
2868 ipc_port_t parent_entry
)
2870 memory_object_size_t mo_size
;
2873 mo_size
= (memory_object_size_t
)*size
;
2874 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2875 (memory_object_offset_t
)offset
, permission
, object_handle
,
2877 *size
= CAST_DOWN(vm_size_t
, mo_size
);
2884 * Set or clear the map's wiring_required flag. This flag, if set,
2885 * will cause all future virtual memory allocation to allocate
2886 * user wired memory. Unwiring pages wired down as a result of
2887 * this routine is done with the vm_wire interface.
2892 boolean_t must_wire
)
2894 if (map
== VM_MAP_NULL
)
2895 return(KERN_INVALID_ARGUMENT
);
2898 map
->wiring_required
= TRUE
;
2900 map
->wiring_required
= FALSE
;
2902 return(KERN_SUCCESS
);
2905 __private_extern__ kern_return_t
2906 mach_memory_entry_allocate(
2907 vm_named_entry_t
*user_entry_p
,
2908 ipc_port_t
*user_handle_p
)
2910 vm_named_entry_t user_entry
;
2911 ipc_port_t user_handle
;
2912 ipc_port_t previous
;
2914 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
2915 if (user_entry
== NULL
)
2916 return KERN_FAILURE
;
2918 named_entry_lock_init(user_entry
);
2920 user_handle
= ipc_port_alloc_kernel();
2921 if (user_handle
== IP_NULL
) {
2922 kfree(user_entry
, sizeof *user_entry
);
2923 return KERN_FAILURE
;
2925 ip_lock(user_handle
);
2927 /* make a sonce right */
2928 user_handle
->ip_sorights
++;
2929 ip_reference(user_handle
);
2931 user_handle
->ip_destination
= IP_NULL
;
2932 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
2933 user_handle
->ip_receiver
= ipc_space_kernel
;
2935 /* make a send right */
2936 user_handle
->ip_mscount
++;
2937 user_handle
->ip_srights
++;
2938 ip_reference(user_handle
);
2940 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
2941 /* nsrequest unlocks user_handle */
2943 user_entry
->backing
.pager
= NULL
;
2944 user_entry
->is_sub_map
= FALSE
;
2945 user_entry
->is_pager
= FALSE
;
2946 user_entry
->is_copy
= FALSE
;
2947 user_entry
->internal
= FALSE
;
2948 user_entry
->size
= 0;
2949 user_entry
->offset
= 0;
2950 user_entry
->data_offset
= 0;
2951 user_entry
->protection
= VM_PROT_NONE
;
2952 user_entry
->ref_count
= 1;
2954 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
2957 *user_entry_p
= user_entry
;
2958 *user_handle_p
= user_handle
;
2960 return KERN_SUCCESS
;
2964 * mach_memory_object_memory_entry_64
2966 * Create a named entry backed by the provided pager.
2968 * JMM - we need to hold a reference on the pager -
2969 * and release it when the named entry is destroyed.
2972 mach_memory_object_memory_entry_64(
2975 vm_object_offset_t size
,
2976 vm_prot_t permission
,
2977 memory_object_t pager
,
2978 ipc_port_t
*entry_handle
)
2980 unsigned int access
;
2981 vm_named_entry_t user_entry
;
2982 ipc_port_t user_handle
;
2984 if (host
== HOST_NULL
)
2985 return(KERN_INVALID_HOST
);
2987 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2989 return KERN_FAILURE
;
2992 user_entry
->backing
.pager
= pager
;
2993 user_entry
->size
= size
;
2994 user_entry
->offset
= 0;
2995 user_entry
->protection
= permission
& VM_PROT_ALL
;
2996 access
= GET_MAP_MEM(permission
);
2997 SET_MAP_MEM(access
, user_entry
->protection
);
2998 user_entry
->internal
= internal
;
2999 user_entry
->is_sub_map
= FALSE
;
3000 user_entry
->is_pager
= TRUE
;
3001 assert(user_entry
->ref_count
== 1);
3003 *entry_handle
= user_handle
;
3004 return KERN_SUCCESS
;
3008 mach_memory_object_memory_entry(
3012 vm_prot_t permission
,
3013 memory_object_t pager
,
3014 ipc_port_t
*entry_handle
)
3016 return mach_memory_object_memory_entry_64( host
, internal
,
3017 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
3022 mach_memory_entry_purgable_control(
3023 ipc_port_t entry_port
,
3024 vm_purgable_t control
,
3028 vm_named_entry_t mem_entry
;
3031 if (entry_port
== IP_NULL
||
3032 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3033 return KERN_INVALID_ARGUMENT
;
3035 if (control
!= VM_PURGABLE_SET_STATE
&&
3036 control
!= VM_PURGABLE_GET_STATE
)
3037 return(KERN_INVALID_ARGUMENT
);
3039 if (control
== VM_PURGABLE_SET_STATE
&&
3040 (((*state
& ~(VM_PURGABLE_ALL_MASKS
)) != 0) ||
3041 ((*state
& VM_PURGABLE_STATE_MASK
) > VM_PURGABLE_STATE_MASK
)))
3042 return(KERN_INVALID_ARGUMENT
);
3044 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3046 named_entry_lock(mem_entry
);
3048 if (mem_entry
->is_sub_map
||
3049 mem_entry
->is_pager
||
3050 mem_entry
->is_copy
) {
3051 named_entry_unlock(mem_entry
);
3052 return KERN_INVALID_ARGUMENT
;
3055 object
= mem_entry
->backing
.object
;
3056 if (object
== VM_OBJECT_NULL
) {
3057 named_entry_unlock(mem_entry
);
3058 return KERN_INVALID_ARGUMENT
;
3061 vm_object_lock(object
);
3063 /* check that named entry covers entire object ? */
3064 if (mem_entry
->offset
!= 0 || object
->vo_size
!= mem_entry
->size
) {
3065 vm_object_unlock(object
);
3066 named_entry_unlock(mem_entry
);
3067 return KERN_INVALID_ARGUMENT
;
3070 named_entry_unlock(mem_entry
);
3072 kr
= vm_object_purgable_control(object
, control
, state
);
3074 vm_object_unlock(object
);
3080 mach_memory_entry_get_page_counts(
3081 ipc_port_t entry_port
,
3082 unsigned int *resident_page_count
,
3083 unsigned int *dirty_page_count
)
3086 vm_named_entry_t mem_entry
;
3088 vm_object_offset_t offset
;
3089 vm_object_size_t size
;
3091 if (entry_port
== IP_NULL
||
3092 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3093 return KERN_INVALID_ARGUMENT
;
3096 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3098 named_entry_lock(mem_entry
);
3100 if (mem_entry
->is_sub_map
||
3101 mem_entry
->is_pager
||
3102 mem_entry
->is_copy
) {
3103 named_entry_unlock(mem_entry
);
3104 return KERN_INVALID_ARGUMENT
;
3107 object
= mem_entry
->backing
.object
;
3108 if (object
== VM_OBJECT_NULL
) {
3109 named_entry_unlock(mem_entry
);
3110 return KERN_INVALID_ARGUMENT
;
3113 vm_object_lock(object
);
3115 offset
= mem_entry
->offset
;
3116 size
= mem_entry
->size
;
3118 named_entry_unlock(mem_entry
);
3120 kr
= vm_object_get_page_counts(object
, offset
, size
, resident_page_count
, dirty_page_count
);
3122 vm_object_unlock(object
);
3128 * mach_memory_entry_port_release:
3130 * Release a send right on a named entry port. This is the correct
3131 * way to destroy a named entry. When the last right on the port is
3132 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3135 mach_memory_entry_port_release(
3138 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3139 ipc_port_release_send(port
);
3143 * mach_destroy_memory_entry:
3145 * Drops a reference on a memory entry and destroys the memory entry if
3146 * there are no more references on it.
3147 * NOTE: This routine should not be called to destroy a memory entry from the
3148 * kernel, as it will not release the Mach port associated with the memory
3149 * entry. The proper way to destroy a memory entry in the kernel is to
3150 * call mach_memort_entry_port_release() to release the kernel's send-right on
3151 * the memory entry's port. When the last send right is released, the memory
3152 * entry will be destroyed via ipc_kobject_destroy().
3155 mach_destroy_memory_entry(
3158 vm_named_entry_t named_entry
;
3160 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
3161 #endif /* MACH_ASSERT */
3162 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
3164 named_entry_lock(named_entry
);
3165 named_entry
->ref_count
-= 1;
3167 if(named_entry
->ref_count
== 0) {
3168 if (named_entry
->is_sub_map
) {
3169 vm_map_deallocate(named_entry
->backing
.map
);
3170 } else if (named_entry
->is_pager
) {
3171 /* JMM - need to drop reference on pager in that case */
3172 } else if (named_entry
->is_copy
) {
3173 vm_map_copy_discard(named_entry
->backing
.copy
);
3175 /* release the VM object we've been pointing to */
3176 vm_object_deallocate(named_entry
->backing
.object
);
3179 named_entry_unlock(named_entry
);
3180 named_entry_lock_destroy(named_entry
);
3182 kfree((void *) port
->ip_kobject
,
3183 sizeof (struct vm_named_entry
));
3185 named_entry_unlock(named_entry
);
3188 /* Allow manipulation of individual page state. This is actually part of */
3189 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
3192 mach_memory_entry_page_op(
3193 ipc_port_t entry_port
,
3194 vm_object_offset_t offset
,
3196 ppnum_t
*phys_entry
,
3199 vm_named_entry_t mem_entry
;
3203 if (entry_port
== IP_NULL
||
3204 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3205 return KERN_INVALID_ARGUMENT
;
3208 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3210 named_entry_lock(mem_entry
);
3212 if (mem_entry
->is_sub_map
||
3213 mem_entry
->is_pager
||
3214 mem_entry
->is_copy
) {
3215 named_entry_unlock(mem_entry
);
3216 return KERN_INVALID_ARGUMENT
;
3219 object
= mem_entry
->backing
.object
;
3220 if (object
== VM_OBJECT_NULL
) {
3221 named_entry_unlock(mem_entry
);
3222 return KERN_INVALID_ARGUMENT
;
3225 vm_object_reference(object
);
3226 named_entry_unlock(mem_entry
);
3228 kr
= vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
3230 vm_object_deallocate(object
);
3236 * mach_memory_entry_range_op offers performance enhancement over
3237 * mach_memory_entry_page_op for page_op functions which do not require page
3238 * level state to be returned from the call. Page_op was created to provide
3239 * a low-cost alternative to page manipulation via UPLs when only a single
3240 * page was involved. The range_op call establishes the ability in the _op
3241 * family of functions to work on multiple pages where the lack of page level
3242 * state handling allows the caller to avoid the overhead of the upl structures.
3246 mach_memory_entry_range_op(
3247 ipc_port_t entry_port
,
3248 vm_object_offset_t offset_beg
,
3249 vm_object_offset_t offset_end
,
3253 vm_named_entry_t mem_entry
;
3257 if (entry_port
== IP_NULL
||
3258 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
3259 return KERN_INVALID_ARGUMENT
;
3262 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
3264 named_entry_lock(mem_entry
);
3266 if (mem_entry
->is_sub_map
||
3267 mem_entry
->is_pager
||
3268 mem_entry
->is_copy
) {
3269 named_entry_unlock(mem_entry
);
3270 return KERN_INVALID_ARGUMENT
;
3273 object
= mem_entry
->backing
.object
;
3274 if (object
== VM_OBJECT_NULL
) {
3275 named_entry_unlock(mem_entry
);
3276 return KERN_INVALID_ARGUMENT
;
3279 vm_object_reference(object
);
3280 named_entry_unlock(mem_entry
);
3282 kr
= vm_object_range_op(object
,
3286 (uint32_t *) range
);
3288 vm_object_deallocate(object
);
3295 set_dp_control_port(
3296 host_priv_t host_priv
,
3297 ipc_port_t control_port
)
3299 if (host_priv
== HOST_PRIV_NULL
)
3300 return (KERN_INVALID_HOST
);
3302 if (IP_VALID(dynamic_pager_control_port
))
3303 ipc_port_release_send(dynamic_pager_control_port
);
3305 dynamic_pager_control_port
= control_port
;
3306 return KERN_SUCCESS
;
3310 get_dp_control_port(
3311 host_priv_t host_priv
,
3312 ipc_port_t
*control_port
)
3314 if (host_priv
== HOST_PRIV_NULL
)
3315 return (KERN_INVALID_HOST
);
3317 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
3318 return KERN_SUCCESS
;
3322 /* ******* Temporary Internal calls to UPL for BSD ***** */
3324 extern int kernel_upl_map(
3327 vm_offset_t
*dst_addr
);
3329 extern int kernel_upl_unmap(
3333 extern int kernel_upl_commit(
3335 upl_page_info_t
*pl
,
3336 mach_msg_type_number_t count
);
3338 extern int kernel_upl_commit_range(
3340 upl_offset_t offset
,
3343 upl_page_info_array_t pl
,
3344 mach_msg_type_number_t count
);
3346 extern int kernel_upl_abort(
3350 extern int kernel_upl_abort_range(
3352 upl_offset_t offset
,
3361 vm_offset_t
*dst_addr
)
3363 return vm_upl_map(map
, upl
, dst_addr
);
3372 return vm_upl_unmap(map
, upl
);
3378 upl_page_info_t
*pl
,
3379 mach_msg_type_number_t count
)
3383 kr
= upl_commit(upl
, pl
, count
);
3384 upl_deallocate(upl
);
3390 kernel_upl_commit_range(
3392 upl_offset_t offset
,
3395 upl_page_info_array_t pl
,
3396 mach_msg_type_number_t count
)
3398 boolean_t finished
= FALSE
;
3401 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3402 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3404 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
3405 return KERN_INVALID_ARGUMENT
;
3408 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
3410 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
3411 upl_deallocate(upl
);
3417 kernel_upl_abort_range(
3419 upl_offset_t offset
,
3424 boolean_t finished
= FALSE
;
3426 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3427 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3429 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3431 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3432 upl_deallocate(upl
);
3444 kr
= upl_abort(upl
, abort_type
);
3445 upl_deallocate(upl
);
3450 * Now a kernel-private interface (for BootCache
3451 * use only). Need a cleaner way to create an
3452 * empty vm_map() and return a handle to it.
3456 vm_region_object_create(
3457 __unused vm_map_t target_map
,
3459 ipc_port_t
*object_handle
)
3461 vm_named_entry_t user_entry
;
3462 ipc_port_t user_handle
;
3466 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3468 return KERN_FAILURE
;
3471 /* Create a named object based on a submap of specified size */
3473 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3474 vm_map_round_page(size
,
3475 VM_MAP_PAGE_MASK(target_map
)),
3477 vm_map_set_page_shift(new_map
, VM_MAP_PAGE_SHIFT(target_map
));
3479 user_entry
->backing
.map
= new_map
;
3480 user_entry
->internal
= TRUE
;
3481 user_entry
->is_sub_map
= TRUE
;
3482 user_entry
->offset
= 0;
3483 user_entry
->protection
= VM_PROT_ALL
;
3484 user_entry
->size
= size
;
3485 assert(user_entry
->ref_count
== 1);
3487 *object_handle
= user_handle
;
3488 return KERN_SUCCESS
;
3492 ppnum_t
vm_map_get_phys_page( /* forward */
3494 vm_offset_t offset
);
3497 vm_map_get_phys_page(
3501 vm_object_offset_t offset
;
3503 vm_map_offset_t map_offset
;
3504 vm_map_entry_t entry
;
3505 ppnum_t phys_page
= 0;
3507 map_offset
= vm_map_trunc_page(addr
, PAGE_MASK
);
3510 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3512 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
3516 if (entry
->is_sub_map
) {
3518 vm_map_lock(VME_SUBMAP(entry
));
3520 map
= VME_SUBMAP(entry
);
3521 map_offset
= (VME_OFFSET(entry
) +
3522 (map_offset
- entry
->vme_start
));
3523 vm_map_unlock(old_map
);
3526 if (VME_OBJECT(entry
)->phys_contiguous
) {
3527 /* These are not standard pageable memory mappings */
3528 /* If they are not present in the object they will */
3529 /* have to be picked up from the pager through the */
3530 /* fault mechanism. */
3531 if (VME_OBJECT(entry
)->vo_shadow_offset
== 0) {
3532 /* need to call vm_fault */
3534 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3535 FALSE
, THREAD_UNINT
, NULL
, 0);
3539 offset
= (VME_OFFSET(entry
) +
3540 (map_offset
- entry
->vme_start
));
3541 phys_page
= (ppnum_t
)
3542 ((VME_OBJECT(entry
)->vo_shadow_offset
3543 + offset
) >> PAGE_SHIFT
);
3547 offset
= (VME_OFFSET(entry
) + (map_offset
- entry
->vme_start
));
3548 object
= VME_OBJECT(entry
);
3549 vm_object_lock(object
);
3551 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3552 if(dst_page
== VM_PAGE_NULL
) {
3553 if(object
->shadow
) {
3554 vm_object_t old_object
;
3555 vm_object_lock(object
->shadow
);
3556 old_object
= object
;
3557 offset
= offset
+ object
->vo_shadow_offset
;
3558 object
= object
->shadow
;
3559 vm_object_unlock(old_object
);
3561 vm_object_unlock(object
);
3565 phys_page
= (ppnum_t
)(dst_page
->phys_page
);
3566 vm_object_unlock(object
);
3580 kern_return_t
kernel_object_iopl_request( /* forward */
3581 vm_named_entry_t named_entry
,
3582 memory_object_offset_t offset
,
3583 upl_size_t
*upl_size
,
3585 upl_page_info_array_t user_page_list
,
3586 unsigned int *page_list_count
,
3590 kernel_object_iopl_request(
3591 vm_named_entry_t named_entry
,
3592 memory_object_offset_t offset
,
3593 upl_size_t
*upl_size
,
3595 upl_page_info_array_t user_page_list
,
3596 unsigned int *page_list_count
,
3604 caller_flags
= *flags
;
3606 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3608 * For forward compatibility's sake,
3609 * reject any unknown flag.
3611 return KERN_INVALID_VALUE
;
3614 /* a few checks to make sure user is obeying rules */
3615 if(*upl_size
== 0) {
3616 if(offset
>= named_entry
->size
)
3617 return(KERN_INVALID_RIGHT
);
3618 *upl_size
= (upl_size_t
) (named_entry
->size
- offset
);
3619 if (*upl_size
!= named_entry
->size
- offset
)
3620 return KERN_INVALID_ARGUMENT
;
3622 if(caller_flags
& UPL_COPYOUT_FROM
) {
3623 if((named_entry
->protection
& VM_PROT_READ
)
3625 return(KERN_INVALID_RIGHT
);
3628 if((named_entry
->protection
&
3629 (VM_PROT_READ
| VM_PROT_WRITE
))
3630 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
3631 return(KERN_INVALID_RIGHT
);
3634 if(named_entry
->size
< (offset
+ *upl_size
))
3635 return(KERN_INVALID_ARGUMENT
);
3637 /* the callers parameter offset is defined to be the */
3638 /* offset from beginning of named entry offset in object */
3639 offset
= offset
+ named_entry
->offset
;
3641 if (named_entry
->is_sub_map
||
3642 named_entry
->is_copy
)
3643 return KERN_INVALID_ARGUMENT
;
3645 named_entry_lock(named_entry
);
3647 if (named_entry
->is_pager
) {
3648 object
= vm_object_enter(named_entry
->backing
.pager
,
3649 named_entry
->offset
+ named_entry
->size
,
3650 named_entry
->internal
,
3653 if (object
== VM_OBJECT_NULL
) {
3654 named_entry_unlock(named_entry
);
3655 return(KERN_INVALID_OBJECT
);
3658 /* JMM - drop reference on the pager here? */
3660 /* create an extra reference for the object */
3661 vm_object_lock(object
);
3662 vm_object_reference_locked(object
);
3663 named_entry
->backing
.object
= object
;
3664 named_entry
->is_pager
= FALSE
;
3665 named_entry_unlock(named_entry
);
3667 /* wait for object (if any) to be ready */
3668 if (!named_entry
->internal
) {
3669 while (!object
->pager_ready
) {
3670 vm_object_wait(object
,
3671 VM_OBJECT_EVENT_PAGER_READY
,
3673 vm_object_lock(object
);
3676 vm_object_unlock(object
);
3679 /* This is the case where we are going to operate */
3680 /* an an already known object. If the object is */
3681 /* not ready it is internal. An external */
3682 /* object cannot be mapped until it is ready */
3683 /* we can therefore avoid the ready check */
3685 object
= named_entry
->backing
.object
;
3686 vm_object_reference(object
);
3687 named_entry_unlock(named_entry
);
3690 if (!object
->private) {
3691 if (*upl_size
> MAX_UPL_TRANSFER_BYTES
)
3692 *upl_size
= MAX_UPL_TRANSFER_BYTES
;
3693 if (object
->phys_contiguous
) {
3694 *flags
= UPL_PHYS_CONTIG
;
3699 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
3702 ret
= vm_object_iopl_request(object
,
3708 (upl_control_flags_t
)(unsigned int)caller_flags
);
3709 vm_object_deallocate(object
);