2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
68 #include <mach/boolean.h>
69 #include <mach/kern_return.h>
70 #include <mach/mach_types.h> /* to get vm_address_t */
71 #include <mach/memory_object.h>
72 #include <mach/std_types.h> /* to get pointer_t */
74 #include <mach/vm_attributes.h>
75 #include <mach/vm_param.h>
76 #include <mach/vm_statistics.h>
77 #include <mach/mach_syscalls.h>
79 #include <mach/host_priv_server.h>
80 #include <mach/mach_vm_server.h>
81 #include <mach/shared_memory_server.h>
82 #include <mach/vm_map_server.h>
83 #include <vm/vm_shared_memory_server.h>
85 #include <kern/host.h>
86 #include <kern/kalloc.h>
87 #include <kern/task.h>
88 #include <kern/misc_protos.h>
89 #include <vm/vm_fault.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/memory_object.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_protos.h>
97 vm_size_t upl_offset_to_pagelist
= 0;
103 ipc_port_t dynamic_pager_control_port
=NULL
;
106 * mach_vm_allocate allocates "zero fill" memory in the specfied
112 mach_vm_offset_t
*addr
,
116 vm_map_offset_t map_addr
;
117 vm_map_size_t map_size
;
118 kern_return_t result
;
119 boolean_t anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
121 if (map
== VM_MAP_NULL
)
122 return(KERN_INVALID_ARGUMENT
);
125 return(KERN_SUCCESS
);
130 * No specific address requested, so start candidate address
131 * search at the minimum address in the map. However, if that
132 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
133 * allocations of PAGEZERO to explicit requests since its
134 * normal use is to catch dereferences of NULL and many
135 * applications also treat pointers with a value of 0 as
136 * special and suddenly having address 0 contain useable
137 * memory would tend to confuse those applications.
139 map_addr
= vm_map_min(map
);
141 map_addr
+= PAGE_SIZE
;
143 map_addr
= vm_map_trunc_page(*addr
);
144 map_size
= vm_map_round_page(size
);
146 return(KERN_INVALID_ARGUMENT
);
149 result
= vm_map_enter(
156 (vm_object_offset_t
)0,
168 * Legacy routine that allocates "zero fill" memory in the specfied
169 * map (which is limited to the same size as the kernel).
178 vm_map_offset_t map_addr
;
179 vm_map_size_t map_size
;
180 kern_return_t result
;
181 boolean_t anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
183 if (map
== VM_MAP_NULL
)
184 return(KERN_INVALID_ARGUMENT
);
187 return(KERN_SUCCESS
);
192 * No specific address requested, so start candidate address
193 * search at the minimum address in the map. However, if that
194 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
195 * allocations of PAGEZERO to explicit requests since its
196 * normal use is to catch dereferences of NULL and many
197 * applications also treat pointers with a value of 0 as
198 * special and suddenly having address 0 contain useable
199 * memory would tend to confuse those applications.
201 map_addr
= vm_map_min(map
);
203 map_addr
+= PAGE_SIZE
;
205 map_addr
= vm_map_trunc_page(*addr
);
206 map_size
= vm_map_round_page(size
);
208 return(KERN_INVALID_ARGUMENT
);
211 result
= vm_map_enter(
218 (vm_object_offset_t
)0,
224 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
229 * mach_vm_deallocate -
230 * deallocates the specified range of addresses in the
231 * specified address map.
236 mach_vm_offset_t start
,
239 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
240 return(KERN_INVALID_ARGUMENT
);
242 if (size
== (mach_vm_offset_t
) 0)
243 return(KERN_SUCCESS
);
245 return(vm_map_remove(map
, vm_map_trunc_page(start
),
246 vm_map_round_page(start
+size
), VM_MAP_NO_FLAGS
));
251 * deallocates the specified range of addresses in the
252 * specified address map (limited to addresses the same
253 * size as the kernel).
257 register vm_map_t map
,
261 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
262 return(KERN_INVALID_ARGUMENT
);
264 if (size
== (vm_offset_t
) 0)
265 return(KERN_SUCCESS
);
267 return(vm_map_remove(map
, vm_map_trunc_page(start
),
268 vm_map_round_page(start
+size
), VM_MAP_NO_FLAGS
));
273 * Sets the inheritance of the specified range in the
279 mach_vm_offset_t start
,
281 vm_inherit_t new_inheritance
)
283 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
284 (new_inheritance
> VM_INHERIT_LAST_VALID
))
285 return(KERN_INVALID_ARGUMENT
);
290 return(vm_map_inherit(map
,
291 vm_map_trunc_page(start
),
292 vm_map_round_page(start
+size
),
298 * Sets the inheritance of the specified range in the
299 * specified map (range limited to addresses
303 register vm_map_t map
,
306 vm_inherit_t new_inheritance
)
308 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
309 (new_inheritance
> VM_INHERIT_LAST_VALID
))
310 return(KERN_INVALID_ARGUMENT
);
315 return(vm_map_inherit(map
,
316 vm_map_trunc_page(start
),
317 vm_map_round_page(start
+size
),
323 * Sets the protection of the specified range in the
330 mach_vm_offset_t start
,
332 boolean_t set_maximum
,
333 vm_prot_t new_protection
)
335 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
336 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
337 return(KERN_INVALID_ARGUMENT
);
342 return(vm_map_protect(map
,
343 vm_map_trunc_page(start
),
344 vm_map_round_page(start
+size
),
351 * Sets the protection of the specified range in the
352 * specified map. Addressability of the range limited
353 * to the same size as the kernel.
361 boolean_t set_maximum
,
362 vm_prot_t new_protection
)
364 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
365 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
366 return(KERN_INVALID_ARGUMENT
);
371 return(vm_map_protect(map
,
372 vm_map_trunc_page(start
),
373 vm_map_round_page(start
+size
),
379 * mach_vm_machine_attributes -
380 * Handle machine-specific attributes for a mapping, such
381 * as cachability, migrability, etc.
384 mach_vm_machine_attribute(
386 mach_vm_address_t addr
,
388 vm_machine_attribute_t attribute
,
389 vm_machine_attribute_val_t
* value
) /* IN/OUT */
391 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
392 return(KERN_INVALID_ARGUMENT
);
397 return vm_map_machine_attribute(map
,
398 vm_map_trunc_page(addr
),
399 vm_map_round_page(addr
+size
),
405 * vm_machine_attribute -
406 * Handle machine-specific attributes for a mapping, such
407 * as cachability, migrability, etc. Limited addressability
408 * (same range limits as for the native kernel map).
411 vm_machine_attribute(
415 vm_machine_attribute_t attribute
,
416 vm_machine_attribute_val_t
* value
) /* IN/OUT */
418 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
419 return(KERN_INVALID_ARGUMENT
);
424 return vm_map_machine_attribute(map
,
425 vm_map_trunc_page(addr
),
426 vm_map_round_page(addr
+size
),
433 * Read/copy a range from one address space and return it to the caller.
435 * It is assumed that the address for the returned memory is selected by
436 * the IPC implementation as part of receiving the reply to this call.
437 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
438 * that gets returned.
440 * JMM - because of mach_msg_type_number_t, this call is limited to a
441 * single 4GB region at this time.
447 mach_vm_address_t addr
,
450 mach_msg_type_number_t
*data_size
)
453 vm_map_copy_t ipc_address
;
455 if (map
== VM_MAP_NULL
)
456 return(KERN_INVALID_ARGUMENT
);
459 error
= vm_map_copyin(map
,
460 (vm_map_address_t
)addr
,
462 FALSE
, /* src_destroy */
465 if (KERN_SUCCESS
== error
) {
466 *data
= (pointer_t
) ipc_address
;
474 * Read/copy a range from one address space and return it to the caller.
475 * Limited addressability (same range limits as for the native kernel map).
477 * It is assumed that the address for the returned memory is selected by
478 * the IPC implementation as part of receiving the reply to this call.
479 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
480 * that gets returned.
488 mach_msg_type_number_t
*data_size
)
491 vm_map_copy_t ipc_address
;
493 if (map
== VM_MAP_NULL
)
494 return(KERN_INVALID_ARGUMENT
);
496 error
= vm_map_copyin(map
,
497 (vm_map_address_t
)addr
,
499 FALSE
, /* src_destroy */
502 if (KERN_SUCCESS
== error
) {
503 *data
= (pointer_t
) ipc_address
;
510 * mach_vm_read_list -
511 * Read/copy a list of address ranges from specified map.
513 * MIG does not know how to deal with a returned array of
514 * vm_map_copy_t structures, so we have to do the copyout
520 mach_vm_read_entry_t data_list
,
523 mach_msg_type_number_t i
;
527 if (map
== VM_MAP_NULL
||
528 count
> VM_MAP_ENTRY_MAX
)
529 return(KERN_INVALID_ARGUMENT
);
531 error
= KERN_SUCCESS
;
532 for(i
=0; i
<count
; i
++) {
533 vm_map_address_t map_addr
;
534 vm_map_size_t map_size
;
536 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
537 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
540 error
= vm_map_copyin(map
,
543 FALSE
, /* src_destroy */
545 if (KERN_SUCCESS
== error
) {
546 error
= vm_map_copyout(
550 if (KERN_SUCCESS
== error
) {
551 data_list
[i
].address
= map_addr
;
554 vm_map_copy_discard(copy
);
557 data_list
[i
].address
= (mach_vm_address_t
)0;
558 data_list
[i
].size
= (mach_vm_size_t
)0;
565 * Read/copy a list of address ranges from specified map.
567 * MIG does not know how to deal with a returned array of
568 * vm_map_copy_t structures, so we have to do the copyout
571 * The source and destination ranges are limited to those
572 * that can be described with a vm_address_t (i.e. same
573 * size map as the kernel).
575 * JMM - If the result of the copyout is an address range
576 * that cannot be described with a vm_address_t (i.e. the
577 * caller had a larger address space but used this call
578 * anyway), it will result in a truncated address being
579 * returned (and a likely confused caller).
585 vm_read_entry_t data_list
,
588 mach_msg_type_number_t i
;
592 if (map
== VM_MAP_NULL
||
593 count
> VM_MAP_ENTRY_MAX
)
594 return(KERN_INVALID_ARGUMENT
);
596 error
= KERN_SUCCESS
;
597 for(i
=0; i
<count
; i
++) {
598 vm_map_address_t map_addr
;
599 vm_map_size_t map_size
;
601 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
602 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
605 error
= vm_map_copyin(map
,
608 FALSE
, /* src_destroy */
610 if (KERN_SUCCESS
== error
) {
611 error
= vm_map_copyout(current_task()->map
,
614 if (KERN_SUCCESS
== error
) {
615 data_list
[i
].address
=
616 CAST_DOWN(vm_offset_t
, map_addr
);
619 vm_map_copy_discard(copy
);
622 data_list
[i
].address
= (mach_vm_address_t
)0;
623 data_list
[i
].size
= (mach_vm_size_t
)0;
629 * mach_vm_read_overwrite -
630 * Overwrite a range of the current map with data from the specified
633 * In making an assumption that the current thread is local, it is
634 * no longer cluster-safe without a fully supportive local proxy
635 * thread/task (but we don't support cluster's anymore so this is moot).
639 mach_vm_read_overwrite(
641 mach_vm_address_t address
,
643 mach_vm_address_t data
,
644 mach_vm_size_t
*data_size
)
649 if (map
== VM_MAP_NULL
)
650 return(KERN_INVALID_ARGUMENT
);
652 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
653 (vm_map_size_t
)size
, FALSE
, ©
);
655 if (KERN_SUCCESS
== error
) {
656 error
= vm_map_copy_overwrite(current_thread()->map
,
657 (vm_map_address_t
)data
,
659 if (KERN_SUCCESS
== error
) {
663 vm_map_copy_discard(copy
);
669 * vm_read_overwrite -
670 * Overwrite a range of the current map with data from the specified
673 * This routine adds the additional limitation that the source and
674 * destination ranges must be describable with vm_address_t values
675 * (i.e. the same size address spaces as the kernel, or at least the
676 * the ranges are in that first portion of the respective address
683 vm_address_t address
,
686 vm_size_t
*data_size
)
691 if (map
== VM_MAP_NULL
)
692 return(KERN_INVALID_ARGUMENT
);
694 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
695 (vm_map_size_t
)size
, FALSE
, ©
);
697 if (KERN_SUCCESS
== error
) {
698 error
= vm_map_copy_overwrite(current_thread()->map
,
699 (vm_map_address_t
)data
,
701 if (KERN_SUCCESS
== error
) {
705 vm_map_copy_discard(copy
);
713 * Overwrite the specified address range with the data provided
714 * (from the current map).
719 mach_vm_address_t address
,
721 __unused mach_msg_type_number_t size
)
723 if (map
== VM_MAP_NULL
)
724 return KERN_INVALID_ARGUMENT
;
726 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
727 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
732 * Overwrite the specified address range with the data provided
733 * (from the current map).
735 * The addressability of the range of addresses to overwrite is
736 * limited bu the use of a vm_address_t (same size as kernel map).
737 * Either the target map is also small, or the range is in the
738 * low addresses within it.
743 vm_address_t address
,
745 __unused mach_msg_type_number_t size
)
747 if (map
== VM_MAP_NULL
)
748 return KERN_INVALID_ARGUMENT
;
750 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
751 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
756 * Overwrite one range of the specified map with the contents of
757 * another range within that same map (i.e. both address ranges
763 mach_vm_address_t source_address
,
765 mach_vm_address_t dest_address
)
770 if (map
== VM_MAP_NULL
)
771 return KERN_INVALID_ARGUMENT
;
773 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
774 (vm_map_size_t
)size
, FALSE
, ©
);
776 if (KERN_SUCCESS
== kr
) {
777 kr
= vm_map_copy_overwrite(map
,
778 (vm_map_address_t
)dest_address
,
779 copy
, FALSE
/* interruptible XXX */);
781 if (KERN_SUCCESS
!= kr
)
782 vm_map_copy_discard(copy
);
790 vm_address_t source_address
,
792 vm_address_t dest_address
)
797 if (map
== VM_MAP_NULL
)
798 return KERN_INVALID_ARGUMENT
;
800 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
801 (vm_map_size_t
)size
, FALSE
, ©
);
803 if (KERN_SUCCESS
== kr
) {
804 kr
= vm_map_copy_overwrite(map
,
805 (vm_map_address_t
)dest_address
,
806 copy
, FALSE
/* interruptible XXX */);
808 if (KERN_SUCCESS
!= kr
)
809 vm_map_copy_discard(copy
);
816 * Map some range of an object into an address space.
818 * The object can be one of several types of objects:
819 * NULL - anonymous memory
820 * a named entry - a range within another address space
821 * or a range within a memory object
822 * a whole memory object
828 mach_vm_offset_t
*address
,
829 mach_vm_size_t initial_size
,
830 mach_vm_offset_t mask
,
833 vm_object_offset_t offset
,
835 vm_prot_t cur_protection
,
836 vm_prot_t max_protection
,
837 vm_inherit_t inheritance
)
839 vm_map_address_t map_addr
;
840 vm_map_size_t map_size
;
842 vm_object_size_t size
;
843 kern_return_t result
;
846 * Check arguments for validity
848 if ((target_map
== VM_MAP_NULL
) ||
849 (cur_protection
& ~VM_PROT_ALL
) ||
850 (max_protection
& ~VM_PROT_ALL
) ||
851 (inheritance
> VM_INHERIT_LAST_VALID
) ||
853 return(KERN_INVALID_ARGUMENT
);
855 map_addr
= vm_map_trunc_page(*address
);
856 map_size
= vm_map_round_page(initial_size
);
857 size
= vm_object_round_page(initial_size
);
860 * Find the vm object (if any) corresponding to this port.
862 if (!IP_VALID(port
)) {
863 object
= VM_OBJECT_NULL
;
866 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
867 vm_named_entry_t named_entry
;
869 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
870 /* a few checks to make sure user is obeying rules */
872 if(offset
>= named_entry
->size
)
873 return(KERN_INVALID_RIGHT
);
874 size
= named_entry
->size
- offset
;
876 if((named_entry
->protection
& max_protection
) != max_protection
)
877 return(KERN_INVALID_RIGHT
);
878 if((named_entry
->protection
& cur_protection
) != cur_protection
)
879 return(KERN_INVALID_RIGHT
);
880 if(named_entry
->size
< (offset
+ size
))
881 return(KERN_INVALID_ARGUMENT
);
883 /* the callers parameter offset is defined to be the */
884 /* offset from beginning of named entry offset in object */
885 offset
= offset
+ named_entry
->offset
;
887 named_entry_lock(named_entry
);
888 if(named_entry
->is_sub_map
) {
889 vm_map_entry_t map_entry
;
891 named_entry_unlock(named_entry
);
892 vm_object_reference(vm_submap_object
);
893 if ((result
= vm_map_enter(target_map
,
895 (vm_map_offset_t
)mask
, flags
,
898 cur_protection
, max_protection
, inheritance
899 )) != KERN_SUCCESS
) {
900 vm_object_deallocate(vm_submap_object
);
904 VM_GET_FLAGS_ALIAS(flags
, alias
);
905 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
907 vm_map_submap(target_map
, map_addr
,
909 named_entry
->backing
.map
,
910 (vm_map_offset_t
)offset
, TRUE
);
912 vm_map_submap(target_map
, map_addr
,
914 named_entry
->backing
.map
,
915 (vm_map_offset_t
)offset
, FALSE
);
918 if(vm_map_lookup_entry(
919 target_map
, map_addr
, &map_entry
)) {
920 map_entry
->needs_copy
= TRUE
;
927 } else if (named_entry
->is_pager
) {
929 vm_prot_t protections
;
930 unsigned int wimg_mode
;
931 boolean_t cache_attr
;
933 protections
= named_entry
->protection
935 access
= GET_MAP_MEM(named_entry
->protection
);
937 object
= vm_object_enter(
938 named_entry
->backing
.pager
,
940 named_entry
->internal
,
943 if (object
== VM_OBJECT_NULL
) {
944 named_entry_unlock(named_entry
);
945 return(KERN_INVALID_OBJECT
);
948 /* JMM - drop reference on pager here */
950 /* create an extra ref for the named entry */
951 vm_object_lock(object
);
952 vm_object_reference_locked(object
);
953 named_entry
->backing
.object
= object
;
954 named_entry
->is_pager
= FALSE
;
955 named_entry_unlock(named_entry
);
957 wimg_mode
= object
->wimg_bits
;
958 if(access
== MAP_MEM_IO
) {
959 wimg_mode
= VM_WIMG_IO
;
960 } else if (access
== MAP_MEM_COPYBACK
) {
961 wimg_mode
= VM_WIMG_USE_DEFAULT
;
962 } else if (access
== MAP_MEM_WTHRU
) {
963 wimg_mode
= VM_WIMG_WTHRU
;
964 } else if (access
== MAP_MEM_WCOMB
) {
965 wimg_mode
= VM_WIMG_WCOMB
;
967 if ((wimg_mode
== VM_WIMG_IO
)
968 || (wimg_mode
== VM_WIMG_WCOMB
))
973 /* wait for object (if any) to be ready */
974 if (!named_entry
->internal
) {
975 while (!object
->pager_ready
) {
976 vm_object_wait(object
,
977 VM_OBJECT_EVENT_PAGER_READY
,
979 vm_object_lock(object
);
983 if(object
->wimg_bits
!= wimg_mode
) {
986 vm_object_paging_wait(object
, THREAD_UNINT
);
988 object
->wimg_bits
= wimg_mode
;
989 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
990 if (!p
->fictitious
) {
991 pmap_disconnect(p
->phys_page
);
993 pmap_sync_page_attributes_phys(p
->phys_page
);
997 object
->true_share
= TRUE
;
998 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
999 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1000 vm_object_unlock(object
);
1002 /* This is the case where we are going to map */
1003 /* an already mapped object. If the object is */
1004 /* not ready it is internal. An external */
1005 /* object cannot be mapped until it is ready */
1006 /* we can therefore avoid the ready check */
1008 object
= named_entry
->backing
.object
;
1009 assert(object
!= VM_OBJECT_NULL
);
1010 named_entry_unlock(named_entry
);
1011 vm_object_reference(object
);
1013 } else if (ip_kotype(port
) == IKOT_MEMORY_OBJECT
) {
1015 * JMM - This is temporary until we unify named entries
1016 * and raw memory objects.
1018 * Detected fake ip_kotype for a memory object. In
1019 * this case, the port isn't really a port at all, but
1020 * instead is just a raw memory object.
1023 if ((object
= vm_object_enter((memory_object_t
)port
,
1024 size
, FALSE
, FALSE
, FALSE
))
1026 return(KERN_INVALID_OBJECT
);
1028 /* wait for object (if any) to be ready */
1029 if (object
!= VM_OBJECT_NULL
) {
1030 if(object
== kernel_object
) {
1031 printf("Warning: Attempt to map kernel object"
1032 " by a non-private kernel entity\n");
1033 return(KERN_INVALID_OBJECT
);
1035 vm_object_lock(object
);
1036 while (!object
->pager_ready
) {
1037 vm_object_wait(object
,
1038 VM_OBJECT_EVENT_PAGER_READY
,
1040 vm_object_lock(object
);
1042 vm_object_unlock(object
);
1045 return (KERN_INVALID_OBJECT
);
1049 * Perform the copy if requested
1053 vm_object_t new_object
;
1054 vm_object_offset_t new_offset
;
1056 result
= vm_object_copy_strategically(object
, offset
, size
,
1057 &new_object
, &new_offset
,
1061 if (result
== KERN_MEMORY_RESTART_COPY
) {
1063 boolean_t src_needs_copy
;
1067 * We currently ignore src_needs_copy.
1068 * This really is the issue of how to make
1069 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
1070 * non-kernel users to use. Solution forthcoming.
1071 * In the meantime, since we don't allow non-kernel
1072 * memory managers to specify symmetric copy,
1073 * we won't run into problems here.
1075 new_object
= object
;
1076 new_offset
= offset
;
1077 success
= vm_object_copy_quickly(&new_object
,
1082 result
= KERN_SUCCESS
;
1085 * Throw away the reference to the
1086 * original object, as it won't be mapped.
1089 vm_object_deallocate(object
);
1091 if (result
!= KERN_SUCCESS
)
1094 object
= new_object
;
1095 offset
= new_offset
;
1098 if ((result
= vm_map_enter(target_map
,
1099 &map_addr
, map_size
,
1100 (vm_map_offset_t
)mask
,
1104 cur_protection
, max_protection
, inheritance
1106 vm_object_deallocate(object
);
1107 *address
= map_addr
;
1112 /* legacy interface */
1115 vm_map_t target_map
,
1116 vm_offset_t
*address
,
1121 vm_object_offset_t offset
,
1123 vm_prot_t cur_protection
,
1124 vm_prot_t max_protection
,
1125 vm_inherit_t inheritance
)
1127 mach_vm_address_t map_addr
;
1128 mach_vm_size_t map_size
;
1129 mach_vm_offset_t map_mask
;
1132 map_addr
= (mach_vm_address_t
)*address
;
1133 map_size
= (mach_vm_size_t
)size
;
1134 map_mask
= (mach_vm_offset_t
)mask
;
1136 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
1138 cur_protection
, max_protection
, inheritance
);
1139 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1143 /* temporary, until world build */
1146 vm_map_t target_map
,
1147 vm_offset_t
*address
,
1154 vm_prot_t cur_protection
,
1155 vm_prot_t max_protection
,
1156 vm_inherit_t inheritance
)
1158 mach_vm_address_t map_addr
;
1159 mach_vm_size_t map_size
;
1160 mach_vm_offset_t map_mask
;
1161 vm_object_offset_t obj_offset
;
1164 map_addr
= (mach_vm_address_t
)*address
;
1165 map_size
= (mach_vm_size_t
)size
;
1166 map_mask
= (mach_vm_offset_t
)mask
;
1167 obj_offset
= (vm_object_offset_t
)offset
;
1169 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
1170 port
, obj_offset
, copy
,
1171 cur_protection
, max_protection
, inheritance
);
1172 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1178 * Remap a range of memory from one task into another,
1179 * to another address range within the same task, or
1180 * over top of itself (with altered permissions and/or
1181 * as an in-place copy of itself).
1186 vm_map_t target_map
,
1187 mach_vm_offset_t
*address
,
1188 mach_vm_size_t size
,
1189 mach_vm_offset_t mask
,
1192 mach_vm_offset_t memory_address
,
1194 vm_prot_t
*cur_protection
,
1195 vm_prot_t
*max_protection
,
1196 vm_inherit_t inheritance
)
1198 vm_map_offset_t map_addr
;
1201 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1202 return KERN_INVALID_ARGUMENT
;
1204 map_addr
= (vm_map_offset_t
)*address
;
1206 kr
= vm_map_remap(target_map
,
1217 *address
= map_addr
;
1223 * Remap a range of memory from one task into another,
1224 * to another address range within the same task, or
1225 * over top of itself (with altered permissions and/or
1226 * as an in-place copy of itself).
1228 * The addressability of the source and target address
1229 * range is limited by the size of vm_address_t (in the
1234 vm_map_t target_map
,
1235 vm_offset_t
*address
,
1240 vm_offset_t memory_address
,
1242 vm_prot_t
*cur_protection
,
1243 vm_prot_t
*max_protection
,
1244 vm_inherit_t inheritance
)
1246 vm_map_offset_t map_addr
;
1249 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1250 return KERN_INVALID_ARGUMENT
;
1252 map_addr
= (vm_map_offset_t
)*address
;
1254 kr
= vm_map_remap(target_map
,
1265 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1270 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1271 * when mach_vm_wire and vm_wire are changed to use ledgers.
1273 #include <mach/mach_host_server.h>
1276 * Specify that the range of the virtual address space
1277 * of the target task must not cause page faults for
1278 * the indicated accesses.
1280 * [ To unwire the pages, specify VM_PROT_NONE. ]
1284 host_priv_t host_priv
,
1286 mach_vm_offset_t start
,
1287 mach_vm_size_t size
,
1292 if (host_priv
== HOST_PRIV_NULL
)
1293 return KERN_INVALID_HOST
;
1295 assert(host_priv
== &realhost
);
1297 if (map
== VM_MAP_NULL
)
1298 return KERN_INVALID_TASK
;
1300 if (access
& ~VM_PROT_ALL
)
1301 return KERN_INVALID_ARGUMENT
;
1303 if (access
!= VM_PROT_NONE
) {
1304 rc
= vm_map_wire(map
, vm_map_trunc_page(start
),
1305 vm_map_round_page(start
+size
), access
, TRUE
);
1307 rc
= vm_map_unwire(map
, vm_map_trunc_page(start
),
1308 vm_map_round_page(start
+size
), TRUE
);
1315 * Specify that the range of the virtual address space
1316 * of the target task must not cause page faults for
1317 * the indicated accesses.
1319 * [ To unwire the pages, specify VM_PROT_NONE. ]
1323 host_priv_t host_priv
,
1324 register vm_map_t map
,
1331 if (host_priv
== HOST_PRIV_NULL
)
1332 return KERN_INVALID_HOST
;
1334 assert(host_priv
== &realhost
);
1336 if (map
== VM_MAP_NULL
)
1337 return KERN_INVALID_TASK
;
1339 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1340 return KERN_INVALID_ARGUMENT
;
1344 } else if (access
!= VM_PROT_NONE
) {
1345 rc
= vm_map_wire(map
, vm_map_trunc_page(start
),
1346 vm_map_round_page(start
+size
), access
, TRUE
);
1348 rc
= vm_map_unwire(map
, vm_map_trunc_page(start
),
1349 vm_map_round_page(start
+size
), TRUE
);
1357 * Synchronises the memory range specified with its backing store
1358 * image by either flushing or cleaning the contents to the appropriate
1361 * interpretation of sync_flags
1362 * VM_SYNC_INVALIDATE - discard pages, only return precious
1365 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1366 * - discard pages, write dirty or precious
1367 * pages back to memory manager.
1369 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1370 * - write dirty or precious pages back to
1371 * the memory manager.
1373 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1374 * is a hole in the region, and we would
1375 * have returned KERN_SUCCESS, return
1376 * KERN_INVALID_ADDRESS instead.
1379 * KERN_INVALID_TASK Bad task parameter
1380 * KERN_INVALID_ARGUMENT both sync and async were specified.
1381 * KERN_SUCCESS The usual.
1382 * KERN_INVALID_ADDRESS There was a hole in the region.
1388 mach_vm_address_t address
,
1389 mach_vm_size_t size
,
1390 vm_sync_t sync_flags
)
1393 if (map
== VM_MAP_NULL
)
1394 return(KERN_INVALID_TASK
);
1396 return vm_map_msync(map
, (vm_map_address_t
)address
,
1397 (vm_map_size_t
)size
, sync_flags
);
1403 * Synchronises the memory range specified with its backing store
1404 * image by either flushing or cleaning the contents to the appropriate
1407 * interpretation of sync_flags
1408 * VM_SYNC_INVALIDATE - discard pages, only return precious
1411 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1412 * - discard pages, write dirty or precious
1413 * pages back to memory manager.
1415 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1416 * - write dirty or precious pages back to
1417 * the memory manager.
1419 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1420 * is a hole in the region, and we would
1421 * have returned KERN_SUCCESS, return
1422 * KERN_INVALID_ADDRESS instead.
1424 * The addressability of the range is limited to that which can
1425 * be described by a vm_address_t.
1428 * KERN_INVALID_TASK Bad task parameter
1429 * KERN_INVALID_ARGUMENT both sync and async were specified.
1430 * KERN_SUCCESS The usual.
1431 * KERN_INVALID_ADDRESS There was a hole in the region.
1437 vm_address_t address
,
1439 vm_sync_t sync_flags
)
1442 if (map
== VM_MAP_NULL
)
1443 return(KERN_INVALID_TASK
);
1445 return vm_map_msync(map
, (vm_map_address_t
)address
,
1446 (vm_map_size_t
)size
, sync_flags
);
1451 * mach_vm_behavior_set
1453 * Sets the paging behavior attribute for the specified range
1454 * in the specified map.
1456 * This routine will fail with KERN_INVALID_ADDRESS if any address
1457 * in [start,start+size) is not a valid allocated memory region.
1460 mach_vm_behavior_set(
1462 mach_vm_offset_t start
,
1463 mach_vm_size_t size
,
1464 vm_behavior_t new_behavior
)
1466 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1467 return(KERN_INVALID_ARGUMENT
);
1470 return KERN_SUCCESS
;
1472 return(vm_map_behavior_set(map
, vm_map_trunc_page(start
),
1473 vm_map_round_page(start
+size
), new_behavior
));
1479 * Sets the paging behavior attribute for the specified range
1480 * in the specified map.
1482 * This routine will fail with KERN_INVALID_ADDRESS if any address
1483 * in [start,start+size) is not a valid allocated memory region.
1485 * This routine is potentially limited in addressibility by the
1486 * use of vm_offset_t (if the map provided is larger than the
1494 vm_behavior_t new_behavior
)
1496 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1497 return(KERN_INVALID_ARGUMENT
);
1500 return KERN_SUCCESS
;
1502 return(vm_map_behavior_set(map
, vm_map_trunc_page(start
),
1503 vm_map_round_page(start
+size
), new_behavior
));
1509 * User call to obtain information about a region in
1510 * a task's address map. Currently, only one flavor is
1513 * XXX The reserved and behavior fields cannot be filled
1514 * in until the vm merge from the IK is completed, and
1515 * vm_reserve is implemented.
1517 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1523 mach_vm_offset_t
*address
, /* IN/OUT */
1524 mach_vm_size_t
*size
, /* OUT */
1525 vm_region_flavor_t flavor
, /* IN */
1526 vm_region_info_t info
, /* OUT */
1527 mach_msg_type_number_t
*count
, /* IN/OUT */
1528 mach_port_t
*object_name
) /* OUT */
1530 vm_map_offset_t map_addr
;
1531 vm_map_size_t map_size
;
1534 if (VM_MAP_NULL
== map
)
1535 return KERN_INVALID_ARGUMENT
;
1537 map_addr
= (vm_map_offset_t
)*address
;
1538 map_size
= (vm_map_size_t
)*size
;
1540 /* legacy conversion */
1541 if (VM_REGION_BASIC_INFO
== flavor
)
1542 flavor
= VM_REGION_BASIC_INFO_64
;
1544 kr
= vm_map_region(map
,
1545 &map_addr
, &map_size
,
1546 flavor
, info
, count
,
1549 *address
= map_addr
;
1555 * vm_region_64 and vm_region:
1557 * User call to obtain information about a region in
1558 * a task's address map. Currently, only one flavor is
1561 * XXX The reserved and behavior fields cannot be filled
1562 * in until the vm merge from the IK is completed, and
1563 * vm_reserve is implemented.
1565 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1571 vm_offset_t
*address
, /* IN/OUT */
1572 vm_size_t
*size
, /* OUT */
1573 vm_region_flavor_t flavor
, /* IN */
1574 vm_region_info_t info
, /* OUT */
1575 mach_msg_type_number_t
*count
, /* IN/OUT */
1576 mach_port_t
*object_name
) /* OUT */
1578 vm_map_offset_t map_addr
;
1579 vm_map_size_t map_size
;
1582 if (VM_MAP_NULL
== map
)
1583 return KERN_INVALID_ARGUMENT
;
1585 map_addr
= (vm_map_offset_t
)*address
;
1586 map_size
= (vm_map_size_t
)*size
;
1588 /* legacy conversion */
1589 if (VM_REGION_BASIC_INFO
== flavor
)
1590 flavor
= VM_REGION_BASIC_INFO_64
;
1592 kr
= vm_map_region(map
,
1593 &map_addr
, &map_size
,
1594 flavor
, info
, count
,
1597 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1598 *size
= CAST_DOWN(vm_size_t
, map_size
);
1600 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1601 return KERN_INVALID_ADDRESS
;
1608 vm_address_t
*address
, /* IN/OUT */
1609 vm_size_t
*size
, /* OUT */
1610 vm_region_flavor_t flavor
, /* IN */
1611 vm_region_info_t info
, /* OUT */
1612 mach_msg_type_number_t
*count
, /* IN/OUT */
1613 mach_port_t
*object_name
) /* OUT */
1615 vm_map_address_t map_addr
;
1616 vm_map_size_t map_size
;
1619 if (VM_MAP_NULL
== map
)
1620 return KERN_INVALID_ARGUMENT
;
1622 map_addr
= (vm_map_address_t
)*address
;
1623 map_size
= (vm_map_size_t
)*size
;
1625 kr
= vm_map_region(map
,
1626 &map_addr
, &map_size
,
1627 flavor
, info
, count
,
1630 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1631 *size
= CAST_DOWN(vm_size_t
, map_size
);
1633 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1634 return KERN_INVALID_ADDRESS
;
1639 * vm_region_recurse: A form of vm_region which follows the
1640 * submaps in a target map
1644 mach_vm_region_recurse(
1646 mach_vm_address_t
*address
,
1647 mach_vm_size_t
*size
,
1649 vm_region_recurse_info_t info
,
1650 mach_msg_type_number_t
*infoCnt
)
1652 vm_map_address_t map_addr
;
1653 vm_map_size_t map_size
;
1656 if (VM_MAP_NULL
== map
)
1657 return KERN_INVALID_ARGUMENT
;
1659 map_addr
= (vm_map_address_t
)*address
;
1660 map_size
= (vm_map_size_t
)*size
;
1662 kr
= vm_map_region_recurse_64(
1667 (vm_region_submap_info_64_t
)info
,
1670 *address
= map_addr
;
1676 * vm_region_recurse: A form of vm_region which follows the
1677 * submaps in a target map
1681 vm_region_recurse_64(
1683 vm_address_t
*address
,
1686 vm_region_recurse_info_64_t info
,
1687 mach_msg_type_number_t
*infoCnt
)
1689 vm_map_address_t map_addr
;
1690 vm_map_size_t map_size
;
1693 if (VM_MAP_NULL
== map
)
1694 return KERN_INVALID_ARGUMENT
;
1696 map_addr
= (vm_map_address_t
)*address
;
1697 map_size
= (vm_map_size_t
)*size
;
1699 kr
= vm_map_region_recurse_64(
1704 (vm_region_submap_info_64_t
)info
,
1707 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1708 *size
= CAST_DOWN(vm_size_t
, map_size
);
1710 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1711 return KERN_INVALID_ADDRESS
;
1718 vm_offset_t
*address
, /* IN/OUT */
1719 vm_size_t
*size
, /* OUT */
1720 natural_t
*depth
, /* IN/OUT */
1721 vm_region_recurse_info_t info32
, /* IN/OUT */
1722 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1724 vm_region_submap_info_data_64_t info64
;
1725 vm_region_submap_info_t info
;
1726 vm_map_address_t map_addr
;
1727 vm_map_size_t map_size
;
1730 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1731 return KERN_INVALID_ARGUMENT
;
1734 map_addr
= (vm_map_address_t
)*address
;
1735 map_size
= (vm_map_size_t
)*size
;
1736 info
= (vm_region_submap_info_t
)info32
;
1737 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1739 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1740 depth
, &info64
, infoCnt
);
1742 info
->protection
= info64
.protection
;
1743 info
->max_protection
= info64
.max_protection
;
1744 info
->inheritance
= info64
.inheritance
;
1745 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1746 info
->user_tag
= info64
.user_tag
;
1747 info
->pages_resident
= info64
.pages_resident
;
1748 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1749 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1750 info
->pages_dirtied
= info64
.pages_dirtied
;
1751 info
->ref_count
= info64
.ref_count
;
1752 info
->shadow_depth
= info64
.shadow_depth
;
1753 info
->external_pager
= info64
.external_pager
;
1754 info
->share_mode
= info64
.share_mode
;
1755 info
->is_submap
= info64
.is_submap
;
1756 info
->behavior
= info64
.behavior
;
1757 info
->object_id
= info64
.object_id
;
1758 info
->user_wired_count
= info64
.user_wired_count
;
1760 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1761 *size
= CAST_DOWN(vm_size_t
, map_size
);
1762 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1764 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1765 return KERN_INVALID_ADDRESS
;
1770 vm_purgable_control(
1772 vm_offset_t address
,
1773 vm_purgable_t control
,
1776 if (VM_MAP_NULL
== map
)
1777 return KERN_INVALID_ARGUMENT
;
1779 return vm_map_purgable_control(map
,
1780 vm_map_trunc_page(address
),
1787 * Ordinarily, the right to allocate CPM is restricted
1788 * to privileged applications (those that can gain access
1789 * to the host priv port). Set this variable to zero if
1790 * you want to let any application allocate CPM.
1792 unsigned int vm_allocate_cpm_privileged
= 0;
1795 * Allocate memory in the specified map, with the caveat that
1796 * the memory is physically contiguous. This call may fail
1797 * if the system can't find sufficient contiguous memory.
1798 * This call may cause or lead to heart-stopping amounts of
1801 * Memory obtained from this call should be freed in the
1802 * normal way, viz., via vm_deallocate.
1806 host_priv_t host_priv
,
1812 vm_map_address_t map_addr
;
1813 vm_map_size_t map_size
;
1816 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1817 return KERN_INVALID_HOST
;
1819 if (VM_MAP_NULL
== map
)
1820 return KERN_INVALID_ARGUMENT
;
1822 map_addr
= (vm_map_address_t
)*addr
;
1823 map_size
= (vm_map_size_t
)size
;
1825 kr
= vm_map_enter_cpm(map
,
1830 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1838 mach_vm_offset_t offset
,
1842 if (VM_MAP_NULL
== map
)
1843 return KERN_INVALID_ARGUMENT
;
1845 return vm_map_page_info(map
,
1846 vm_map_trunc_page(offset
),
1847 disposition
, ref_count
);
1857 if (VM_MAP_NULL
== map
)
1858 return KERN_INVALID_ARGUMENT
;
1860 return vm_map_page_info(map
,
1861 vm_map_trunc_page(offset
),
1862 disposition
, ref_count
);
1865 /* map a (whole) upl into an address space */
1870 vm_offset_t
*dst_addr
)
1872 vm_map_offset_t map_addr
;
1875 if (VM_MAP_NULL
== map
)
1876 return KERN_INVALID_ARGUMENT
;
1878 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
1879 *dst_addr
= CAST_DOWN(vm_offset_t
, map_addr
);
1888 if (VM_MAP_NULL
== map
)
1889 return KERN_INVALID_ARGUMENT
;
1891 return (vm_map_remove_upl(map
, upl
));
1894 /* Retrieve a upl for an object underlying an address range in a map */
1899 vm_map_offset_t map_offset
,
1900 upl_size_t
*upl_size
,
1902 upl_page_info_array_t page_list
,
1903 unsigned int *count
,
1905 int force_data_sync
)
1910 if (VM_MAP_NULL
== map
)
1911 return KERN_INVALID_ARGUMENT
;
1913 map_flags
= *flags
& ~UPL_NOZEROFILL
;
1914 if (force_data_sync
)
1915 map_flags
|= UPL_FORCE_DATA_SYNC
;
1917 kr
= vm_map_create_upl(map
,
1925 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
1930 __private_extern__ kern_return_t
1931 mach_memory_entry_allocate(
1932 vm_named_entry_t
*user_entry_p
,
1933 ipc_port_t
*user_handle_p
); /* forward */
1936 * mach_make_memory_entry_64
1938 * Think of it as a two-stage vm_remap() operation. First
1939 * you get a handle. Second, you get map that handle in
1940 * somewhere else. Rather than doing it all at once (and
1941 * without needing access to the other whole map).
1945 mach_make_memory_entry_64(
1946 vm_map_t target_map
,
1947 memory_object_size_t
*size
,
1948 memory_object_offset_t offset
,
1949 vm_prot_t permission
,
1950 ipc_port_t
*object_handle
,
1951 ipc_port_t parent_handle
)
1953 vm_map_version_t version
;
1954 vm_named_entry_t parent_entry
;
1955 vm_named_entry_t user_entry
;
1956 ipc_port_t user_handle
;
1960 /* needed for call to vm_map_lookup_locked */
1962 vm_object_offset_t obj_off
;
1964 vm_map_offset_t lo_offset
, hi_offset
;
1965 vm_behavior_t behavior
;
1967 vm_object_t shadow_object
;
1969 /* needed for direct map entry manipulation */
1970 vm_map_entry_t map_entry
;
1971 vm_map_entry_t next_entry
;
1973 vm_map_t original_map
= target_map
;
1974 vm_map_size_t total_size
;
1975 vm_map_size_t map_size
;
1976 vm_map_offset_t map_offset
;
1977 vm_map_offset_t local_offset
;
1978 vm_object_size_t mappable_size
;
1980 unsigned int access
;
1981 vm_prot_t protections
;
1982 unsigned int wimg_mode
;
1983 boolean_t cache_attr
= FALSE
;
1985 if (((permission
& 0x00FF0000) &
1987 MAP_MEM_NAMED_CREATE
|
1989 MAP_MEM_NAMED_REUSE
))) {
1991 * Unknown flag: reject for forward compatibility.
1993 return KERN_INVALID_VALUE
;
1996 if (parent_handle
!= IP_NULL
&&
1997 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
1998 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
2000 parent_entry
= NULL
;
2003 protections
= permission
& VM_PROT_ALL
;
2004 access
= GET_MAP_MEM(permission
);
2006 user_handle
= IP_NULL
;
2009 map_offset
= vm_map_trunc_page(offset
);
2010 map_size
= vm_map_round_page(*size
);
2012 if (permission
& MAP_MEM_ONLY
) {
2013 boolean_t parent_is_object
;
2015 if (parent_entry
== NULL
) {
2016 return KERN_INVALID_ARGUMENT
;
2019 parent_is_object
= !(parent_entry
->is_sub_map
|| parent_entry
->is_pager
);
2020 object
= parent_entry
->backing
.object
;
2021 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
2022 wimg_mode
= object
->wimg_bits
;
2024 wimg_mode
= VM_WIMG_DEFAULT
;
2025 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
2026 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
2027 return KERN_INVALID_RIGHT
;
2029 if(access
== MAP_MEM_IO
) {
2030 SET_MAP_MEM(access
, parent_entry
->protection
);
2031 wimg_mode
= VM_WIMG_IO
;
2032 } else if (access
== MAP_MEM_COPYBACK
) {
2033 SET_MAP_MEM(access
, parent_entry
->protection
);
2034 wimg_mode
= VM_WIMG_DEFAULT
;
2035 } else if (access
== MAP_MEM_WTHRU
) {
2036 SET_MAP_MEM(access
, parent_entry
->protection
);
2037 wimg_mode
= VM_WIMG_WTHRU
;
2038 } else if (access
== MAP_MEM_WCOMB
) {
2039 SET_MAP_MEM(access
, parent_entry
->protection
);
2040 wimg_mode
= VM_WIMG_WCOMB
;
2042 if(parent_is_object
&& object
&&
2043 (access
!= MAP_MEM_NOOP
) &&
2044 (!(object
->nophyscache
))) {
2045 if(object
->wimg_bits
!= wimg_mode
) {
2047 if ((wimg_mode
== VM_WIMG_IO
)
2048 || (wimg_mode
== VM_WIMG_WCOMB
))
2052 vm_object_lock(object
);
2053 vm_object_paging_wait(object
, THREAD_UNINT
);
2054 object
->wimg_bits
= wimg_mode
;
2055 queue_iterate(&object
->memq
,
2056 p
, vm_page_t
, listq
) {
2057 if (!p
->fictitious
) {
2058 pmap_disconnect(p
->phys_page
);
2060 pmap_sync_page_attributes_phys(p
->phys_page
);
2063 vm_object_unlock(object
);
2067 *object_handle
= IP_NULL
;
2068 return KERN_SUCCESS
;
2071 if(permission
& MAP_MEM_NAMED_CREATE
) {
2072 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2073 if (kr
!= KERN_SUCCESS
) {
2074 return KERN_FAILURE
;
2078 * Force the creation of the VM object now.
2080 if (map_size
> (vm_map_size_t
) VM_MAX_ADDRESS
) {
2082 * LP64todo - for now, we can only allocate 4GB
2083 * internal objects because the default pager can't
2084 * page bigger ones. Remove this when it can.
2090 object
= vm_object_allocate(map_size
);
2091 assert(object
!= VM_OBJECT_NULL
);
2093 if (permission
& MAP_MEM_PURGABLE
) {
2094 if (! (permission
& VM_PROT_WRITE
)) {
2095 /* if we can't write, we can't purge */
2096 vm_object_deallocate(object
);
2097 kr
= KERN_INVALID_ARGUMENT
;
2100 object
->purgable
= VM_OBJECT_PURGABLE_NONVOLATILE
;
2104 * The VM object is brand new and nobody else knows about it,
2105 * so we don't need to lock it.
2108 wimg_mode
= object
->wimg_bits
;
2109 if (access
== MAP_MEM_IO
) {
2110 wimg_mode
= VM_WIMG_IO
;
2111 } else if (access
== MAP_MEM_COPYBACK
) {
2112 wimg_mode
= VM_WIMG_DEFAULT
;
2113 } else if (access
== MAP_MEM_WTHRU
) {
2114 wimg_mode
= VM_WIMG_WTHRU
;
2115 } else if (access
== MAP_MEM_WCOMB
) {
2116 wimg_mode
= VM_WIMG_WCOMB
;
2118 if (access
!= MAP_MEM_NOOP
) {
2119 object
->wimg_bits
= wimg_mode
;
2121 /* the object has no pages, so no WIMG bits to update here */
2125 * We use this path when we want to make sure that
2126 * nobody messes with the object (coalesce, for
2127 * example) before we map it.
2128 * We might want to use these objects for transposition via
2129 * vm_object_transpose() too, so we don't want any copy or
2130 * shadow objects either...
2132 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2134 user_entry
->backing
.object
= object
;
2135 user_entry
->internal
= TRUE
;
2136 user_entry
->is_sub_map
= FALSE
;
2137 user_entry
->is_pager
= FALSE
;
2138 user_entry
->offset
= 0;
2139 user_entry
->protection
= protections
;
2140 SET_MAP_MEM(access
, user_entry
->protection
);
2141 user_entry
->size
= map_size
;
2143 /* user_object pager and internal fields are not used */
2144 /* when the object field is filled in. */
2146 *size
= CAST_DOWN(vm_size_t
, map_size
);
2147 *object_handle
= user_handle
;
2148 return KERN_SUCCESS
;
2151 if (parent_entry
== NULL
||
2152 (permission
& MAP_MEM_NAMED_REUSE
)) {
2154 /* Create a named object based on address range within the task map */
2155 /* Go find the object at given address */
2158 vm_map_lock_read(target_map
);
2160 /* get the object associated with the target address */
2161 /* note we check the permission of the range against */
2162 /* that requested by the caller */
2164 kr
= vm_map_lookup_locked(&target_map
, map_offset
,
2165 protections
, &version
,
2166 &object
, &obj_off
, &prot
, &wired
, &behavior
,
2167 &lo_offset
, &hi_offset
, &real_map
);
2168 if (kr
!= KERN_SUCCESS
) {
2169 vm_map_unlock_read(target_map
);
2172 if (((prot
& protections
) != protections
)
2173 || (object
== kernel_object
)) {
2174 kr
= KERN_INVALID_RIGHT
;
2175 vm_object_unlock(object
);
2176 vm_map_unlock_read(target_map
);
2177 if(real_map
!= target_map
)
2178 vm_map_unlock_read(real_map
);
2179 if(object
== kernel_object
) {
2180 printf("Warning: Attempt to create a named"
2181 " entry from the kernel_object\n");
2186 /* We have an object, now check to see if this object */
2187 /* is suitable. If not, create a shadow and share that */
2190 * We have to unlock the VM object to avoid deadlocking with
2191 * a VM map lock (the lock ordering is map, the object), if we
2192 * need to modify the VM map to create a shadow object. Since
2193 * we might release the VM map lock below anyway, we have
2194 * to release the VM map lock now.
2195 * XXX FBDP There must be a way to avoid this double lookup...
2197 * Take an extra reference on the VM object to make sure it's
2198 * not going to disappear.
2200 vm_object_reference_locked(object
); /* extra ref to hold obj */
2201 vm_object_unlock(object
);
2203 local_map
= original_map
;
2204 local_offset
= map_offset
;
2205 if(target_map
!= local_map
) {
2206 vm_map_unlock_read(target_map
);
2207 if(real_map
!= target_map
)
2208 vm_map_unlock_read(real_map
);
2209 vm_map_lock_read(local_map
);
2210 target_map
= local_map
;
2211 real_map
= local_map
;
2214 if(!vm_map_lookup_entry(local_map
,
2215 local_offset
, &map_entry
)) {
2216 kr
= KERN_INVALID_ARGUMENT
;
2217 vm_map_unlock_read(target_map
);
2218 if(real_map
!= target_map
)
2219 vm_map_unlock_read(real_map
);
2220 vm_object_deallocate(object
); /* release extra ref */
2221 object
= VM_OBJECT_NULL
;
2224 if(!(map_entry
->is_sub_map
)) {
2225 if(map_entry
->object
.vm_object
!= object
) {
2226 kr
= KERN_INVALID_ARGUMENT
;
2227 vm_map_unlock_read(target_map
);
2228 if(real_map
!= target_map
)
2229 vm_map_unlock_read(real_map
);
2230 vm_object_deallocate(object
); /* release extra ref */
2231 object
= VM_OBJECT_NULL
;
2238 local_map
= map_entry
->object
.sub_map
;
2240 vm_map_lock_read(local_map
);
2241 vm_map_unlock_read(tmap
);
2242 target_map
= local_map
;
2243 real_map
= local_map
;
2244 local_offset
= local_offset
- map_entry
->vme_start
;
2245 local_offset
+= map_entry
->offset
;
2250 * We found the VM map entry, lock the VM object again.
2252 vm_object_lock(object
);
2253 if(map_entry
->wired_count
) {
2254 /* JMM - The check below should be reworked instead. */
2255 object
->true_share
= TRUE
;
2257 if(((map_entry
->max_protection
) & protections
) != protections
) {
2258 kr
= KERN_INVALID_RIGHT
;
2259 vm_object_unlock(object
);
2260 vm_map_unlock_read(target_map
);
2261 if(real_map
!= target_map
)
2262 vm_map_unlock_read(real_map
);
2263 vm_object_deallocate(object
);
2264 object
= VM_OBJECT_NULL
;
2268 mappable_size
= hi_offset
- obj_off
;
2269 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2270 if(map_size
> mappable_size
) {
2271 /* try to extend mappable size if the entries */
2272 /* following are from the same object and are */
2274 next_entry
= map_entry
->vme_next
;
2275 /* lets see if the next map entry is still */
2276 /* pointing at this object and is contiguous */
2277 while(map_size
> mappable_size
) {
2278 if((next_entry
->object
.vm_object
== object
) &&
2279 (next_entry
->vme_start
==
2280 next_entry
->vme_prev
->vme_end
) &&
2281 (next_entry
->offset
==
2282 next_entry
->vme_prev
->offset
+
2283 (next_entry
->vme_prev
->vme_end
-
2284 next_entry
->vme_prev
->vme_start
))) {
2285 if(((next_entry
->max_protection
)
2286 & protections
) != protections
) {
2289 if (next_entry
->needs_copy
!=
2290 map_entry
->needs_copy
)
2292 mappable_size
+= next_entry
->vme_end
2293 - next_entry
->vme_start
;
2294 total_size
+= next_entry
->vme_end
2295 - next_entry
->vme_start
;
2296 next_entry
= next_entry
->vme_next
;
2304 if(object
->internal
) {
2305 /* vm_map_lookup_locked will create a shadow if */
2306 /* needs_copy is set but does not check for the */
2307 /* other two conditions shown. It is important to */
2308 /* set up an object which will not be pulled from */
2311 if ((map_entry
->needs_copy
|| object
->shadowed
||
2312 (object
->size
> total_size
))
2313 && !object
->true_share
) {
2315 * We have to unlock the VM object before
2316 * trying to upgrade the VM map lock, to
2317 * honor lock ordering (map then object).
2318 * Otherwise, we would deadlock if another
2319 * thread holds a read lock on the VM map and
2320 * is trying to acquire the VM object's lock.
2321 * We still hold an extra reference on the
2322 * VM object, guaranteeing that it won't
2325 vm_object_unlock(object
);
2327 if (vm_map_lock_read_to_write(target_map
)) {
2329 * We couldn't upgrade our VM map lock
2330 * from "read" to "write" and we lost
2332 * Start all over again...
2334 vm_object_deallocate(object
); /* extra ref */
2335 target_map
= original_map
;
2338 vm_object_lock(object
);
2341 * JMM - We need to avoid coming here when the object
2342 * is wired by anybody, not just the current map. Why
2343 * couldn't we use the standard vm_object_copy_quickly()
2347 /* create a shadow object */
2348 vm_object_shadow(&map_entry
->object
.vm_object
,
2349 &map_entry
->offset
, total_size
);
2350 shadow_object
= map_entry
->object
.vm_object
;
2351 vm_object_unlock(object
);
2353 vm_object_pmap_protect(
2354 object
, map_entry
->offset
,
2356 ((map_entry
->is_shared
2357 || target_map
->mapped
)
2360 map_entry
->vme_start
,
2361 map_entry
->protection
& ~VM_PROT_WRITE
);
2362 total_size
-= (map_entry
->vme_end
2363 - map_entry
->vme_start
);
2364 next_entry
= map_entry
->vme_next
;
2365 map_entry
->needs_copy
= FALSE
;
2366 while (total_size
) {
2367 if(next_entry
->object
.vm_object
== object
) {
2368 shadow_object
->ref_count
++;
2369 vm_object_res_reference(shadow_object
);
2370 next_entry
->object
.vm_object
2372 vm_object_deallocate(object
);
2374 = next_entry
->vme_prev
->offset
+
2375 (next_entry
->vme_prev
->vme_end
2376 - next_entry
->vme_prev
->vme_start
);
2377 next_entry
->needs_copy
= FALSE
;
2379 panic("mach_make_memory_entry_64:"
2380 " map entries out of sync\n");
2384 - next_entry
->vme_start
;
2385 next_entry
= next_entry
->vme_next
;
2389 * Transfer our extra reference to the
2392 vm_object_reference_locked(shadow_object
);
2393 vm_object_deallocate(object
); /* extra ref */
2394 object
= shadow_object
;
2396 obj_off
= (local_offset
- map_entry
->vme_start
)
2397 + map_entry
->offset
;
2399 vm_map_lock_write_to_read(target_map
);
2400 vm_object_lock(object
);
2405 /* note: in the future we can (if necessary) allow for */
2406 /* memory object lists, this will better support */
2407 /* fragmentation, but is it necessary? The user should */
2408 /* be encouraged to create address space oriented */
2409 /* shared objects from CLEAN memory regions which have */
2410 /* a known and defined history. i.e. no inheritence */
2411 /* share, make this call before making the region the */
2412 /* target of ipc's, etc. The code above, protecting */
2413 /* against delayed copy, etc. is mostly defensive. */
2415 wimg_mode
= object
->wimg_bits
;
2416 if(!(object
->nophyscache
)) {
2417 if(access
== MAP_MEM_IO
) {
2418 wimg_mode
= VM_WIMG_IO
;
2419 } else if (access
== MAP_MEM_COPYBACK
) {
2420 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2421 } else if (access
== MAP_MEM_WTHRU
) {
2422 wimg_mode
= VM_WIMG_WTHRU
;
2423 } else if (access
== MAP_MEM_WCOMB
) {
2424 wimg_mode
= VM_WIMG_WCOMB
;
2428 object
->true_share
= TRUE
;
2429 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2430 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2433 * The memory entry now points to this VM object and we
2434 * need to hold a reference on the VM object. Use the extra
2435 * reference we took earlier to keep the object alive when we
2439 vm_map_unlock_read(target_map
);
2440 if(real_map
!= target_map
)
2441 vm_map_unlock_read(real_map
);
2443 if(object
->wimg_bits
!= wimg_mode
) {
2446 vm_object_paging_wait(object
, THREAD_UNINT
);
2448 if ((wimg_mode
== VM_WIMG_IO
)
2449 || (wimg_mode
== VM_WIMG_WCOMB
))
2454 queue_iterate(&object
->memq
,
2455 p
, vm_page_t
, listq
) {
2456 if (!p
->fictitious
) {
2457 pmap_disconnect(p
->phys_page
);
2459 pmap_sync_page_attributes_phys(p
->phys_page
);
2462 object
->wimg_bits
= wimg_mode
;
2465 /* the size of mapped entry that overlaps with our region */
2466 /* which is targeted for share. */
2467 /* (entry_end - entry_start) - */
2468 /* offset of our beg addr within entry */
2469 /* it corresponds to this: */
2471 if(map_size
> mappable_size
)
2472 map_size
= mappable_size
;
2474 if (permission
& MAP_MEM_NAMED_REUSE
) {
2476 * Compare what we got with the "parent_entry".
2477 * If they match, re-use the "parent_entry" instead
2478 * of creating a new one.
2480 if (parent_entry
!= NULL
&&
2481 parent_entry
->backing
.object
== object
&&
2482 parent_entry
->internal
== object
->internal
&&
2483 parent_entry
->is_sub_map
== FALSE
&&
2484 parent_entry
->is_pager
== FALSE
&&
2485 parent_entry
->offset
== obj_off
&&
2486 parent_entry
->protection
== protections
&&
2487 parent_entry
->size
== map_size
) {
2489 * We have a match: re-use "parent_entry".
2491 /* release our extra reference on object */
2492 vm_object_unlock(object
);
2493 vm_object_deallocate(object
);
2494 /* parent_entry->ref_count++; XXX ? */
2495 /* Get an extra send-right on handle */
2496 ipc_port_copy_send(parent_handle
);
2497 *object_handle
= parent_handle
;
2498 return KERN_SUCCESS
;
2501 * No match: we need to create a new entry.
2507 vm_object_unlock(object
);
2508 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2510 /* release our unused reference on the object */
2511 vm_object_deallocate(object
);
2512 return KERN_FAILURE
;
2515 user_entry
->backing
.object
= object
;
2516 user_entry
->internal
= object
->internal
;
2517 user_entry
->is_sub_map
= FALSE
;
2518 user_entry
->is_pager
= FALSE
;
2519 user_entry
->offset
= obj_off
;
2520 user_entry
->protection
= permission
;
2521 user_entry
->size
= map_size
;
2523 /* user_object pager and internal fields are not used */
2524 /* when the object field is filled in. */
2526 *size
= CAST_DOWN(vm_size_t
, map_size
);
2527 *object_handle
= user_handle
;
2528 return KERN_SUCCESS
;
2531 /* The new object will be base on an existing named object */
2533 if (parent_entry
== NULL
) {
2534 kr
= KERN_INVALID_ARGUMENT
;
2537 if((offset
+ map_size
) > parent_entry
->size
) {
2538 kr
= KERN_INVALID_ARGUMENT
;
2542 if((protections
& parent_entry
->protection
) != protections
) {
2543 kr
= KERN_PROTECTION_FAILURE
;
2547 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2553 user_entry
->size
= map_size
;
2554 user_entry
->offset
= parent_entry
->offset
+ map_offset
;
2555 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2556 user_entry
->is_pager
= parent_entry
->is_pager
;
2557 user_entry
->internal
= parent_entry
->internal
;
2558 user_entry
->protection
= protections
;
2560 if(access
!= MAP_MEM_NOOP
) {
2561 SET_MAP_MEM(access
, user_entry
->protection
);
2564 if(parent_entry
->is_sub_map
) {
2565 user_entry
->backing
.map
= parent_entry
->backing
.map
;
2566 vm_map_lock(user_entry
->backing
.map
);
2567 user_entry
->backing
.map
->ref_count
++;
2568 vm_map_unlock(user_entry
->backing
.map
);
2570 else if (parent_entry
->is_pager
) {
2571 user_entry
->backing
.pager
= parent_entry
->backing
.pager
;
2572 /* JMM - don't we need a reference here? */
2574 object
= parent_entry
->backing
.object
;
2575 assert(object
!= VM_OBJECT_NULL
);
2576 user_entry
->backing
.object
= object
;
2577 /* we now point to this object, hold on */
2578 vm_object_reference(object
);
2579 vm_object_lock(object
);
2580 object
->true_share
= TRUE
;
2581 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2582 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2583 vm_object_unlock(object
);
2585 *size
= CAST_DOWN(vm_size_t
, map_size
);
2586 *object_handle
= user_handle
;
2587 return KERN_SUCCESS
;
2591 if (user_handle
!= IP_NULL
) {
2592 ipc_port_dealloc_kernel(user_handle
);
2594 if (user_entry
!= NULL
) {
2595 kfree(user_entry
, sizeof *user_entry
);
2601 _mach_make_memory_entry(
2602 vm_map_t target_map
,
2603 memory_object_size_t
*size
,
2604 memory_object_offset_t offset
,
2605 vm_prot_t permission
,
2606 ipc_port_t
*object_handle
,
2607 ipc_port_t parent_entry
)
2609 memory_object_offset_t mo_size
;
2612 mo_size
= (memory_object_offset_t
)*size
;
2613 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2614 (memory_object_offset_t
)offset
, permission
, object_handle
,
2621 mach_make_memory_entry(
2622 vm_map_t target_map
,
2625 vm_prot_t permission
,
2626 ipc_port_t
*object_handle
,
2627 ipc_port_t parent_entry
)
2629 memory_object_offset_t mo_size
;
2632 mo_size
= (memory_object_offset_t
)*size
;
2633 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2634 (memory_object_offset_t
)offset
, permission
, object_handle
,
2636 *size
= CAST_DOWN(vm_size_t
, mo_size
);
2643 * Set or clear the map's wiring_required flag. This flag, if set,
2644 * will cause all future virtual memory allocation to allocate
2645 * user wired memory. Unwiring pages wired down as a result of
2646 * this routine is done with the vm_wire interface.
2651 boolean_t must_wire
)
2653 if (map
== VM_MAP_NULL
)
2654 return(KERN_INVALID_ARGUMENT
);
2657 map
->wiring_required
= TRUE
;
2659 map
->wiring_required
= FALSE
;
2661 return(KERN_SUCCESS
);
2664 __private_extern__ kern_return_t
2665 mach_memory_entry_allocate(
2666 vm_named_entry_t
*user_entry_p
,
2667 ipc_port_t
*user_handle_p
)
2669 vm_named_entry_t user_entry
;
2670 ipc_port_t user_handle
;
2671 ipc_port_t previous
;
2673 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
2674 if (user_entry
== NULL
)
2675 return KERN_FAILURE
;
2677 named_entry_lock_init(user_entry
);
2679 user_handle
= ipc_port_alloc_kernel();
2680 if (user_handle
== IP_NULL
) {
2681 kfree(user_entry
, sizeof *user_entry
);
2682 return KERN_FAILURE
;
2684 ip_lock(user_handle
);
2686 /* make a sonce right */
2687 user_handle
->ip_sorights
++;
2688 ip_reference(user_handle
);
2690 user_handle
->ip_destination
= IP_NULL
;
2691 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
2692 user_handle
->ip_receiver
= ipc_space_kernel
;
2694 /* make a send right */
2695 user_handle
->ip_mscount
++;
2696 user_handle
->ip_srights
++;
2697 ip_reference(user_handle
);
2699 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
2700 /* nsrequest unlocks user_handle */
2702 user_entry
->backing
.pager
= NULL
;
2703 user_entry
->is_sub_map
= FALSE
;
2704 user_entry
->is_pager
= FALSE
;
2705 user_entry
->size
= 0;
2706 user_entry
->internal
= FALSE
;
2707 user_entry
->ref_count
= 1;
2709 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
2712 *user_entry_p
= user_entry
;
2713 *user_handle_p
= user_handle
;
2715 return KERN_SUCCESS
;
2719 * mach_memory_object_memory_entry_64
2721 * Create a named entry backed by the provided pager.
2723 * JMM - we need to hold a reference on the pager -
2724 * and release it when the named entry is destroyed.
2727 mach_memory_object_memory_entry_64(
2730 vm_object_offset_t size
,
2731 vm_prot_t permission
,
2732 memory_object_t pager
,
2733 ipc_port_t
*entry_handle
)
2735 unsigned int access
;
2736 vm_named_entry_t user_entry
;
2737 ipc_port_t user_handle
;
2739 if (host
== HOST_NULL
)
2740 return(KERN_INVALID_HOST
);
2742 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2744 return KERN_FAILURE
;
2747 user_entry
->backing
.pager
= pager
;
2748 user_entry
->size
= size
;
2749 user_entry
->offset
= 0;
2750 user_entry
->protection
= permission
& VM_PROT_ALL
;
2751 access
= GET_MAP_MEM(permission
);
2752 SET_MAP_MEM(access
, user_entry
->protection
);
2753 user_entry
->internal
= internal
;
2754 user_entry
->is_sub_map
= FALSE
;
2755 user_entry
->is_pager
= TRUE
;
2756 assert(user_entry
->ref_count
== 1);
2758 *entry_handle
= user_handle
;
2759 return KERN_SUCCESS
;
2763 mach_memory_object_memory_entry(
2767 vm_prot_t permission
,
2768 memory_object_t pager
,
2769 ipc_port_t
*entry_handle
)
2771 return mach_memory_object_memory_entry_64( host
, internal
,
2772 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
2777 mach_memory_entry_purgable_control(
2778 ipc_port_t entry_port
,
2779 vm_purgable_t control
,
2783 vm_named_entry_t mem_entry
;
2786 if (entry_port
== IP_NULL
||
2787 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
2788 return KERN_INVALID_ARGUMENT
;
2791 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
2793 named_entry_lock(mem_entry
);
2795 if (mem_entry
->is_sub_map
|| mem_entry
->is_pager
) {
2796 named_entry_unlock(mem_entry
);
2797 return KERN_INVALID_ARGUMENT
;
2800 object
= mem_entry
->backing
.object
;
2801 if (object
== VM_OBJECT_NULL
) {
2802 named_entry_unlock(mem_entry
);
2803 return KERN_INVALID_ARGUMENT
;
2806 vm_object_lock(object
);
2808 /* check that named entry covers entire object ? */
2809 if (mem_entry
->offset
!= 0 || object
->size
!= mem_entry
->size
) {
2810 vm_object_unlock(object
);
2811 named_entry_unlock(mem_entry
);
2812 return KERN_INVALID_ARGUMENT
;
2815 named_entry_unlock(mem_entry
);
2817 kr
= vm_object_purgable_control(object
, control
, state
);
2819 vm_object_unlock(object
);
2825 * mach_memory_entry_port_release:
2827 * Release a send right on a named entry port. This is the correct
2828 * way to destroy a named entry. When the last right on the port is
2829 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2832 mach_memory_entry_port_release(
2835 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2836 ipc_port_release_send(port
);
2840 * mach_destroy_memory_entry:
2842 * Drops a reference on a memory entry and destroys the memory entry if
2843 * there are no more references on it.
2844 * NOTE: This routine should not be called to destroy a memory entry from the
2845 * kernel, as it will not release the Mach port associated with the memory
2846 * entry. The proper way to destroy a memory entry in the kernel is to
2847 * call mach_memort_entry_port_release() to release the kernel's send-right on
2848 * the memory entry's port. When the last send right is released, the memory
2849 * entry will be destroyed via ipc_kobject_destroy().
2852 mach_destroy_memory_entry(
2855 vm_named_entry_t named_entry
;
2857 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2858 #endif /* MACH_ASSERT */
2859 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
2860 mutex_lock(&(named_entry
)->Lock
);
2861 named_entry
->ref_count
-= 1;
2862 if(named_entry
->ref_count
== 0) {
2863 if (named_entry
->is_sub_map
) {
2864 vm_map_deallocate(named_entry
->backing
.map
);
2865 } else if (!named_entry
->is_pager
) {
2866 /* release the memory object we've been pointing to */
2867 vm_object_deallocate(named_entry
->backing
.object
);
2868 } /* else JMM - need to drop reference on pager in that case */
2870 mutex_unlock(&(named_entry
)->Lock
);
2872 kfree((void *) port
->ip_kobject
,
2873 sizeof (struct vm_named_entry
));
2875 mutex_unlock(&(named_entry
)->Lock
);
2881 set_dp_control_port(
2882 host_priv_t host_priv
,
2883 ipc_port_t control_port
)
2885 if (host_priv
== HOST_PRIV_NULL
)
2886 return (KERN_INVALID_HOST
);
2888 if (IP_VALID(dynamic_pager_control_port
))
2889 ipc_port_release_send(dynamic_pager_control_port
);
2891 dynamic_pager_control_port
= control_port
;
2892 return KERN_SUCCESS
;
2896 get_dp_control_port(
2897 host_priv_t host_priv
,
2898 ipc_port_t
*control_port
)
2900 if (host_priv
== HOST_PRIV_NULL
)
2901 return (KERN_INVALID_HOST
);
2903 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
2904 return KERN_SUCCESS
;
2908 /* ******* Temporary Internal calls to UPL for BSD ***** */
2910 extern int kernel_upl_map(
2913 vm_offset_t
*dst_addr
);
2915 extern int kernel_upl_unmap(
2919 extern int kernel_upl_commit(
2921 upl_page_info_t
*pl
,
2922 mach_msg_type_number_t count
);
2924 extern int kernel_upl_commit_range(
2926 upl_offset_t offset
,
2929 upl_page_info_array_t pl
,
2930 mach_msg_type_number_t count
);
2932 extern int kernel_upl_abort(
2936 extern int kernel_upl_abort_range(
2938 upl_offset_t offset
,
2947 vm_offset_t
*dst_addr
)
2949 return vm_upl_map(map
, upl
, dst_addr
);
2958 return vm_upl_unmap(map
, upl
);
2964 upl_page_info_t
*pl
,
2965 mach_msg_type_number_t count
)
2969 kr
= upl_commit(upl
, pl
, count
);
2970 upl_deallocate(upl
);
2976 kernel_upl_commit_range(
2978 upl_offset_t offset
,
2981 upl_page_info_array_t pl
,
2982 mach_msg_type_number_t count
)
2984 boolean_t finished
= FALSE
;
2987 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2988 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2990 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
2992 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
2993 upl_deallocate(upl
);
2999 kernel_upl_abort_range(
3001 upl_offset_t offset
,
3006 boolean_t finished
= FALSE
;
3008 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3009 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3011 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3013 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3014 upl_deallocate(upl
);
3026 kr
= upl_abort(upl
, abort_type
);
3027 upl_deallocate(upl
);
3032 * Now a kernel-private interface (for BootCache
3033 * use only). Need a cleaner way to create an
3034 * empty vm_map() and return a handle to it.
3038 vm_region_object_create(
3039 __unused vm_map_t target_map
,
3041 ipc_port_t
*object_handle
)
3043 vm_named_entry_t user_entry
;
3044 ipc_port_t user_handle
;
3048 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3050 return KERN_FAILURE
;
3053 /* Create a named object based on a submap of specified size */
3055 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3056 vm_map_round_page(size
), TRUE
);
3058 user_entry
->backing
.map
= new_map
;
3059 user_entry
->internal
= TRUE
;
3060 user_entry
->is_sub_map
= TRUE
;
3061 user_entry
->offset
= 0;
3062 user_entry
->protection
= VM_PROT_ALL
;
3063 user_entry
->size
= size
;
3064 assert(user_entry
->ref_count
== 1);
3066 *object_handle
= user_handle
;
3067 return KERN_SUCCESS
;
3071 ppnum_t
vm_map_get_phys_page( /* forward */
3073 vm_offset_t offset
);
3076 vm_map_get_phys_page(
3080 vm_object_offset_t offset
;
3082 vm_map_offset_t map_offset
;
3083 vm_map_entry_t entry
;
3084 ppnum_t phys_page
= 0;
3086 map_offset
= vm_map_trunc_page(addr
);
3089 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3091 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
3095 if (entry
->is_sub_map
) {
3097 vm_map_lock(entry
->object
.sub_map
);
3099 map
= entry
->object
.sub_map
;
3100 map_offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3101 vm_map_unlock(old_map
);
3104 if (entry
->object
.vm_object
->phys_contiguous
) {
3105 /* These are not standard pageable memory mappings */
3106 /* If they are not present in the object they will */
3107 /* have to be picked up from the pager through the */
3108 /* fault mechanism. */
3109 if(entry
->object
.vm_object
->shadow_offset
== 0) {
3110 /* need to call vm_fault */
3112 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3113 FALSE
, THREAD_UNINT
, NULL
, 0);
3117 offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3118 phys_page
= (ppnum_t
)
3119 ((entry
->object
.vm_object
->shadow_offset
3124 offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3125 object
= entry
->object
.vm_object
;
3126 vm_object_lock(object
);
3128 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3129 if(dst_page
== VM_PAGE_NULL
) {
3130 if(object
->shadow
) {
3131 vm_object_t old_object
;
3132 vm_object_lock(object
->shadow
);
3133 old_object
= object
;
3134 offset
= offset
+ object
->shadow_offset
;
3135 object
= object
->shadow
;
3136 vm_object_unlock(old_object
);
3138 vm_object_unlock(object
);
3142 phys_page
= (ppnum_t
)(dst_page
->phys_page
);
3143 vm_object_unlock(object
);
3157 kern_return_t
kernel_object_iopl_request( /* forward */
3158 vm_named_entry_t named_entry
,
3159 memory_object_offset_t offset
,
3160 vm_size_t
*upl_size
,
3162 upl_page_info_array_t user_page_list
,
3163 unsigned int *page_list_count
,
3167 kernel_object_iopl_request(
3168 vm_named_entry_t named_entry
,
3169 memory_object_offset_t offset
,
3170 vm_size_t
*upl_size
,
3172 upl_page_info_array_t user_page_list
,
3173 unsigned int *page_list_count
,
3181 caller_flags
= *flags
;
3183 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3185 * For forward compatibility's sake,
3186 * reject any unknown flag.
3188 return KERN_INVALID_VALUE
;
3191 /* a few checks to make sure user is obeying rules */
3192 if(*upl_size
== 0) {
3193 if(offset
>= named_entry
->size
)
3194 return(KERN_INVALID_RIGHT
);
3195 *upl_size
= named_entry
->size
- offset
;
3197 if(caller_flags
& UPL_COPYOUT_FROM
) {
3198 if((named_entry
->protection
& VM_PROT_READ
)
3200 return(KERN_INVALID_RIGHT
);
3203 if((named_entry
->protection
&
3204 (VM_PROT_READ
| VM_PROT_WRITE
))
3205 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
3206 return(KERN_INVALID_RIGHT
);
3209 if(named_entry
->size
< (offset
+ *upl_size
))
3210 return(KERN_INVALID_ARGUMENT
);
3212 /* the callers parameter offset is defined to be the */
3213 /* offset from beginning of named entry offset in object */
3214 offset
= offset
+ named_entry
->offset
;
3216 if(named_entry
->is_sub_map
)
3217 return (KERN_INVALID_ARGUMENT
);
3219 named_entry_lock(named_entry
);
3221 if (named_entry
->is_pager
) {
3222 object
= vm_object_enter(named_entry
->backing
.pager
,
3223 named_entry
->offset
+ named_entry
->size
,
3224 named_entry
->internal
,
3227 if (object
== VM_OBJECT_NULL
) {
3228 named_entry_unlock(named_entry
);
3229 return(KERN_INVALID_OBJECT
);
3232 /* JMM - drop reference on the pager here? */
3234 /* create an extra reference for the object */
3235 vm_object_lock(object
);
3236 vm_object_reference_locked(object
);
3237 named_entry
->backing
.object
= object
;
3238 named_entry
->is_pager
= FALSE
;
3239 named_entry_unlock(named_entry
);
3241 /* wait for object (if any) to be ready */
3242 if (!named_entry
->internal
) {
3243 while (!object
->pager_ready
) {
3244 vm_object_wait(object
,
3245 VM_OBJECT_EVENT_PAGER_READY
,
3247 vm_object_lock(object
);
3250 vm_object_unlock(object
);
3253 /* This is the case where we are going to operate */
3254 /* an an already known object. If the object is */
3255 /* not ready it is internal. An external */
3256 /* object cannot be mapped until it is ready */
3257 /* we can therefore avoid the ready check */
3259 object
= named_entry
->backing
.object
;
3260 vm_object_reference(object
);
3261 named_entry_unlock(named_entry
);
3264 if (!object
->private) {
3265 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
3266 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
3267 if (object
->phys_contiguous
) {
3268 *flags
= UPL_PHYS_CONTIG
;
3273 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
3276 ret
= vm_object_iopl_request(object
,
3283 vm_object_deallocate(object
);