2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * User-exported virtual memory functions.
63 #include <mach/boolean.h>
64 #include <mach/kern_return.h>
65 #include <mach/mach_types.h> /* to get vm_address_t */
66 #include <mach/memory_object.h>
67 #include <mach/std_types.h> /* to get pointer_t */
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/mach_syscalls.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/mach_vm_server.h>
76 #include <mach/shared_memory_server.h>
77 #include <mach/vm_map_server.h>
78 #include <vm/vm_shared_memory_server.h>
80 #include <kern/host.h>
81 #include <kern/kalloc.h>
82 #include <kern/task.h>
83 #include <kern/misc_protos.h>
84 #include <vm/vm_fault.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/memory_object.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_protos.h>
92 vm_size_t upl_offset_to_pagelist
= 0;
98 ipc_port_t dynamic_pager_control_port
=NULL
;
101 * mach_vm_allocate allocates "zero fill" memory in the specfied
107 mach_vm_offset_t
*addr
,
111 vm_map_offset_t map_addr
;
112 vm_map_size_t map_size
;
113 kern_return_t result
;
114 boolean_t anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
116 if (map
== VM_MAP_NULL
)
117 return(KERN_INVALID_ARGUMENT
);
120 return(KERN_SUCCESS
);
125 * No specific address requested, so start candidate address
126 * search at the minimum address in the map. However, if that
127 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
128 * allocations of PAGEZERO to explicit requests since its
129 * normal use is to catch dereferences of NULL and many
130 * applications also treat pointers with a value of 0 as
131 * special and suddenly having address 0 contain useable
132 * memory would tend to confuse those applications.
134 map_addr
= vm_map_min(map
);
136 map_addr
+= PAGE_SIZE
;
138 map_addr
= vm_map_trunc_page(*addr
);
139 map_size
= vm_map_round_page(size
);
141 return(KERN_INVALID_ARGUMENT
);
144 result
= vm_map_enter(
151 (vm_object_offset_t
)0,
163 * Legacy routine that allocates "zero fill" memory in the specfied
164 * map (which is limited to the same size as the kernel).
173 vm_map_offset_t map_addr
;
174 vm_map_size_t map_size
;
175 kern_return_t result
;
176 boolean_t anywhere
= ((VM_FLAGS_ANYWHERE
& flags
) != 0);
178 if (map
== VM_MAP_NULL
)
179 return(KERN_INVALID_ARGUMENT
);
182 return(KERN_SUCCESS
);
187 * No specific address requested, so start candidate address
188 * search at the minimum address in the map. However, if that
189 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
190 * allocations of PAGEZERO to explicit requests since its
191 * normal use is to catch dereferences of NULL and many
192 * applications also treat pointers with a value of 0 as
193 * special and suddenly having address 0 contain useable
194 * memory would tend to confuse those applications.
196 map_addr
= vm_map_min(map
);
198 map_addr
+= PAGE_SIZE
;
200 map_addr
= vm_map_trunc_page(*addr
);
201 map_size
= vm_map_round_page(size
);
203 return(KERN_INVALID_ARGUMENT
);
206 result
= vm_map_enter(
213 (vm_object_offset_t
)0,
219 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
224 * mach_vm_deallocate -
225 * deallocates the specified range of addresses in the
226 * specified address map.
231 mach_vm_offset_t start
,
234 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
235 return(KERN_INVALID_ARGUMENT
);
237 if (size
== (mach_vm_offset_t
) 0)
238 return(KERN_SUCCESS
);
240 return(vm_map_remove(map
, vm_map_trunc_page(start
),
241 vm_map_round_page(start
+size
), VM_MAP_NO_FLAGS
));
246 * deallocates the specified range of addresses in the
247 * specified address map (limited to addresses the same
248 * size as the kernel).
252 register vm_map_t map
,
256 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
257 return(KERN_INVALID_ARGUMENT
);
259 if (size
== (vm_offset_t
) 0)
260 return(KERN_SUCCESS
);
262 return(vm_map_remove(map
, vm_map_trunc_page(start
),
263 vm_map_round_page(start
+size
), VM_MAP_NO_FLAGS
));
268 * Sets the inheritance of the specified range in the
274 mach_vm_offset_t start
,
276 vm_inherit_t new_inheritance
)
278 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
279 (new_inheritance
> VM_INHERIT_LAST_VALID
))
280 return(KERN_INVALID_ARGUMENT
);
285 return(vm_map_inherit(map
,
286 vm_map_trunc_page(start
),
287 vm_map_round_page(start
+size
),
293 * Sets the inheritance of the specified range in the
294 * specified map (range limited to addresses
298 register vm_map_t map
,
301 vm_inherit_t new_inheritance
)
303 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
304 (new_inheritance
> VM_INHERIT_LAST_VALID
))
305 return(KERN_INVALID_ARGUMENT
);
310 return(vm_map_inherit(map
,
311 vm_map_trunc_page(start
),
312 vm_map_round_page(start
+size
),
318 * Sets the protection of the specified range in the
325 mach_vm_offset_t start
,
327 boolean_t set_maximum
,
328 vm_prot_t new_protection
)
330 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
331 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
332 return(KERN_INVALID_ARGUMENT
);
337 return(vm_map_protect(map
,
338 vm_map_trunc_page(start
),
339 vm_map_round_page(start
+size
),
346 * Sets the protection of the specified range in the
347 * specified map. Addressability of the range limited
348 * to the same size as the kernel.
356 boolean_t set_maximum
,
357 vm_prot_t new_protection
)
359 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
) ||
360 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
361 return(KERN_INVALID_ARGUMENT
);
366 return(vm_map_protect(map
,
367 vm_map_trunc_page(start
),
368 vm_map_round_page(start
+size
),
374 * mach_vm_machine_attributes -
375 * Handle machine-specific attributes for a mapping, such
376 * as cachability, migrability, etc.
379 mach_vm_machine_attribute(
381 mach_vm_address_t addr
,
383 vm_machine_attribute_t attribute
,
384 vm_machine_attribute_val_t
* value
) /* IN/OUT */
386 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
387 return(KERN_INVALID_ARGUMENT
);
392 return vm_map_machine_attribute(map
,
393 vm_map_trunc_page(addr
),
394 vm_map_round_page(addr
+size
),
400 * vm_machine_attribute -
401 * Handle machine-specific attributes for a mapping, such
402 * as cachability, migrability, etc. Limited addressability
403 * (same range limits as for the native kernel map).
406 vm_machine_attribute(
410 vm_machine_attribute_t attribute
,
411 vm_machine_attribute_val_t
* value
) /* IN/OUT */
413 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
414 return(KERN_INVALID_ARGUMENT
);
419 return vm_map_machine_attribute(map
,
420 vm_map_trunc_page(addr
),
421 vm_map_round_page(addr
+size
),
428 * Read/copy a range from one address space and return it to the caller.
430 * It is assumed that the address for the returned memory is selected by
431 * the IPC implementation as part of receiving the reply to this call.
432 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
433 * that gets returned.
435 * JMM - because of mach_msg_type_number_t, this call is limited to a
436 * single 4GB region at this time.
442 mach_vm_address_t addr
,
445 mach_msg_type_number_t
*data_size
)
448 vm_map_copy_t ipc_address
;
450 if (map
== VM_MAP_NULL
)
451 return(KERN_INVALID_ARGUMENT
);
454 error
= vm_map_copyin(map
,
455 (vm_map_address_t
)addr
,
457 FALSE
, /* src_destroy */
460 if (KERN_SUCCESS
== error
) {
461 *data
= (pointer_t
) ipc_address
;
469 * Read/copy a range from one address space and return it to the caller.
470 * Limited addressability (same range limits as for the native kernel map).
472 * It is assumed that the address for the returned memory is selected by
473 * the IPC implementation as part of receiving the reply to this call.
474 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
475 * that gets returned.
483 mach_msg_type_number_t
*data_size
)
486 vm_map_copy_t ipc_address
;
488 if (map
== VM_MAP_NULL
)
489 return(KERN_INVALID_ARGUMENT
);
491 error
= vm_map_copyin(map
,
492 (vm_map_address_t
)addr
,
494 FALSE
, /* src_destroy */
497 if (KERN_SUCCESS
== error
) {
498 *data
= (pointer_t
) ipc_address
;
505 * mach_vm_read_list -
506 * Read/copy a list of address ranges from specified map.
508 * MIG does not know how to deal with a returned array of
509 * vm_map_copy_t structures, so we have to do the copyout
515 mach_vm_read_entry_t data_list
,
518 mach_msg_type_number_t i
;
522 if (map
== VM_MAP_NULL
)
523 return(KERN_INVALID_ARGUMENT
);
525 error
= KERN_SUCCESS
;
526 for(i
=0; i
<count
; i
++) {
527 vm_map_address_t map_addr
;
528 vm_map_size_t map_size
;
530 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
531 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
534 error
= vm_map_copyin(map
,
537 FALSE
, /* src_destroy */
539 if (KERN_SUCCESS
== error
) {
540 error
= vm_map_copyout(
544 if (KERN_SUCCESS
== error
) {
545 data_list
[i
].address
= map_addr
;
548 vm_map_copy_discard(copy
);
551 data_list
[i
].address
= (mach_vm_address_t
)0;
552 data_list
[i
].size
= (mach_vm_size_t
)0;
559 * Read/copy a list of address ranges from specified map.
561 * MIG does not know how to deal with a returned array of
562 * vm_map_copy_t structures, so we have to do the copyout
565 * The source and destination ranges are limited to those
566 * that can be described with a vm_address_t (i.e. same
567 * size map as the kernel).
569 * JMM - If the result of the copyout is an address range
570 * that cannot be described with a vm_address_t (i.e. the
571 * caller had a larger address space but used this call
572 * anyway), it will result in a truncated address being
573 * returned (and a likely confused caller).
579 vm_read_entry_t data_list
,
582 mach_msg_type_number_t i
;
586 if (map
== VM_MAP_NULL
)
587 return(KERN_INVALID_ARGUMENT
);
589 error
= KERN_SUCCESS
;
590 for(i
=0; i
<count
; i
++) {
591 vm_map_address_t map_addr
;
592 vm_map_size_t map_size
;
594 map_addr
= (vm_map_address_t
)(data_list
[i
].address
);
595 map_size
= (vm_map_size_t
)(data_list
[i
].size
);
598 error
= vm_map_copyin(map
,
601 FALSE
, /* src_destroy */
603 if (KERN_SUCCESS
== error
) {
604 error
= vm_map_copyout(current_task()->map
,
607 if (KERN_SUCCESS
== error
) {
608 data_list
[i
].address
=
609 CAST_DOWN(vm_offset_t
, map_addr
);
612 vm_map_copy_discard(copy
);
615 data_list
[i
].address
= (mach_vm_address_t
)0;
616 data_list
[i
].size
= (mach_vm_size_t
)0;
622 * mach_vm_read_overwrite -
623 * Overwrite a range of the current map with data from the specified
626 * In making an assumption that the current thread is local, it is
627 * no longer cluster-safe without a fully supportive local proxy
628 * thread/task (but we don't support cluster's anymore so this is moot).
632 mach_vm_read_overwrite(
634 mach_vm_address_t address
,
636 mach_vm_address_t data
,
637 mach_vm_size_t
*data_size
)
642 if (map
== VM_MAP_NULL
)
643 return(KERN_INVALID_ARGUMENT
);
645 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
646 (vm_map_size_t
)size
, FALSE
, ©
);
648 if (KERN_SUCCESS
== error
) {
649 error
= vm_map_copy_overwrite(current_thread()->map
,
650 (vm_map_address_t
)data
,
652 if (KERN_SUCCESS
== error
) {
656 vm_map_copy_discard(copy
);
662 * vm_read_overwrite -
663 * Overwrite a range of the current map with data from the specified
666 * This routine adds the additional limitation that the source and
667 * destination ranges must be describable with vm_address_t values
668 * (i.e. the same size address spaces as the kernel, or at least the
669 * the ranges are in that first portion of the respective address
676 vm_address_t address
,
679 vm_size_t
*data_size
)
684 if (map
== VM_MAP_NULL
)
685 return(KERN_INVALID_ARGUMENT
);
687 error
= vm_map_copyin(map
, (vm_map_address_t
)address
,
688 (vm_map_size_t
)size
, FALSE
, ©
);
690 if (KERN_SUCCESS
== error
) {
691 error
= vm_map_copy_overwrite(current_thread()->map
,
692 (vm_map_address_t
)data
,
694 if (KERN_SUCCESS
== error
) {
698 vm_map_copy_discard(copy
);
706 * Overwrite the specified address range with the data provided
707 * (from the current map).
712 mach_vm_address_t address
,
714 __unused mach_msg_type_number_t size
)
716 if (map
== VM_MAP_NULL
)
717 return KERN_INVALID_ARGUMENT
;
719 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
720 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
725 * Overwrite the specified address range with the data provided
726 * (from the current map).
728 * The addressability of the range of addresses to overwrite is
729 * limited bu the use of a vm_address_t (same size as kernel map).
730 * Either the target map is also small, or the range is in the
731 * low addresses within it.
736 vm_address_t address
,
738 __unused mach_msg_type_number_t size
)
740 if (map
== VM_MAP_NULL
)
741 return KERN_INVALID_ARGUMENT
;
743 return vm_map_copy_overwrite(map
, (vm_map_address_t
)address
,
744 (vm_map_copy_t
) data
, FALSE
/* interruptible XXX */);
749 * Overwrite one range of the specified map with the contents of
750 * another range within that same map (i.e. both address ranges
756 mach_vm_address_t source_address
,
758 mach_vm_address_t dest_address
)
763 if (map
== VM_MAP_NULL
)
764 return KERN_INVALID_ARGUMENT
;
766 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
767 (vm_map_size_t
)size
, FALSE
, ©
);
769 if (KERN_SUCCESS
== kr
) {
770 kr
= vm_map_copy_overwrite(map
,
771 (vm_map_address_t
)dest_address
,
772 copy
, FALSE
/* interruptible XXX */);
774 if (KERN_SUCCESS
!= kr
)
775 vm_map_copy_discard(copy
);
783 vm_address_t source_address
,
785 vm_address_t dest_address
)
790 if (map
== VM_MAP_NULL
)
791 return KERN_INVALID_ARGUMENT
;
793 kr
= vm_map_copyin(map
, (vm_map_address_t
)source_address
,
794 (vm_map_size_t
)size
, FALSE
, ©
);
796 if (KERN_SUCCESS
== kr
) {
797 kr
= vm_map_copy_overwrite(map
,
798 (vm_map_address_t
)dest_address
,
799 copy
, FALSE
/* interruptible XXX */);
801 if (KERN_SUCCESS
!= kr
)
802 vm_map_copy_discard(copy
);
809 * Map some range of an object into an address space.
811 * The object can be one of several types of objects:
812 * NULL - anonymous memory
813 * a named entry - a range within another address space
814 * or a range within a memory object
815 * a whole memory object
821 mach_vm_offset_t
*address
,
822 mach_vm_size_t initial_size
,
823 mach_vm_offset_t mask
,
826 vm_object_offset_t offset
,
828 vm_prot_t cur_protection
,
829 vm_prot_t max_protection
,
830 vm_inherit_t inheritance
)
832 vm_map_address_t map_addr
;
833 vm_map_size_t map_size
;
835 vm_object_size_t size
;
836 kern_return_t result
;
839 * Check arguments for validity
841 if ((target_map
== VM_MAP_NULL
) ||
842 (cur_protection
& ~VM_PROT_ALL
) ||
843 (max_protection
& ~VM_PROT_ALL
) ||
844 (inheritance
> VM_INHERIT_LAST_VALID
) ||
846 return(KERN_INVALID_ARGUMENT
);
848 map_addr
= vm_map_trunc_page(*address
);
849 map_size
= vm_map_round_page(initial_size
);
850 size
= vm_object_round_page(initial_size
);
853 * Find the vm object (if any) corresponding to this port.
855 if (!IP_VALID(port
)) {
856 object
= VM_OBJECT_NULL
;
859 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
860 vm_named_entry_t named_entry
;
862 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
863 /* a few checks to make sure user is obeying rules */
865 if(offset
>= named_entry
->size
)
866 return(KERN_INVALID_RIGHT
);
867 size
= named_entry
->size
- offset
;
869 if((named_entry
->protection
& max_protection
) != max_protection
)
870 return(KERN_INVALID_RIGHT
);
871 if((named_entry
->protection
& cur_protection
) != cur_protection
)
872 return(KERN_INVALID_RIGHT
);
873 if(named_entry
->size
< (offset
+ size
))
874 return(KERN_INVALID_ARGUMENT
);
876 /* the callers parameter offset is defined to be the */
877 /* offset from beginning of named entry offset in object */
878 offset
= offset
+ named_entry
->offset
;
880 named_entry_lock(named_entry
);
881 if(named_entry
->is_sub_map
) {
882 vm_map_entry_t map_entry
;
884 named_entry_unlock(named_entry
);
885 vm_object_reference(vm_submap_object
);
886 if ((result
= vm_map_enter(target_map
,
888 (vm_map_offset_t
)mask
, flags
,
891 cur_protection
, max_protection
, inheritance
892 )) != KERN_SUCCESS
) {
893 vm_object_deallocate(vm_submap_object
);
897 VM_GET_FLAGS_ALIAS(flags
, alias
);
898 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
900 vm_map_submap(target_map
, map_addr
,
902 named_entry
->backing
.map
,
903 (vm_map_offset_t
)offset
, TRUE
);
905 vm_map_submap(target_map
, map_addr
,
907 named_entry
->backing
.map
,
908 (vm_map_offset_t
)offset
, FALSE
);
911 if(vm_map_lookup_entry(
912 target_map
, map_addr
, &map_entry
)) {
913 map_entry
->needs_copy
= TRUE
;
920 } else if (named_entry
->is_pager
) {
922 vm_prot_t protections
;
923 unsigned int wimg_mode
;
924 boolean_t cache_attr
;
926 protections
= named_entry
->protection
928 access
= GET_MAP_MEM(named_entry
->protection
);
930 object
= vm_object_enter(
931 named_entry
->backing
.pager
,
933 named_entry
->internal
,
936 if (object
== VM_OBJECT_NULL
) {
937 named_entry_unlock(named_entry
);
938 return(KERN_INVALID_OBJECT
);
941 /* JMM - drop reference on pager here */
943 /* create an extra ref for the named entry */
944 vm_object_lock(object
);
945 vm_object_reference_locked(object
);
946 named_entry
->backing
.object
= object
;
947 named_entry
->is_pager
= FALSE
;
948 named_entry_unlock(named_entry
);
950 wimg_mode
= object
->wimg_bits
;
951 if(access
== MAP_MEM_IO
) {
952 wimg_mode
= VM_WIMG_IO
;
953 } else if (access
== MAP_MEM_COPYBACK
) {
954 wimg_mode
= VM_WIMG_USE_DEFAULT
;
955 } else if (access
== MAP_MEM_WTHRU
) {
956 wimg_mode
= VM_WIMG_WTHRU
;
957 } else if (access
== MAP_MEM_WCOMB
) {
958 wimg_mode
= VM_WIMG_WCOMB
;
960 if ((wimg_mode
== VM_WIMG_IO
)
961 || (wimg_mode
== VM_WIMG_WCOMB
))
966 /* wait for object (if any) to be ready */
967 if (!named_entry
->internal
) {
968 while (!object
->pager_ready
) {
969 vm_object_wait(object
,
970 VM_OBJECT_EVENT_PAGER_READY
,
972 vm_object_lock(object
);
976 if(object
->wimg_bits
!= wimg_mode
) {
979 vm_object_paging_wait(object
, THREAD_UNINT
);
981 object
->wimg_bits
= wimg_mode
;
982 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
983 if (!p
->fictitious
) {
984 pmap_disconnect(p
->phys_page
);
986 pmap_sync_page_attributes_phys(p
->phys_page
);
990 object
->true_share
= TRUE
;
991 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
992 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
993 vm_object_unlock(object
);
995 /* This is the case where we are going to map */
996 /* an already mapped object. If the object is */
997 /* not ready it is internal. An external */
998 /* object cannot be mapped until it is ready */
999 /* we can therefore avoid the ready check */
1001 object
= named_entry
->backing
.object
;
1002 assert(object
!= VM_OBJECT_NULL
);
1003 named_entry_unlock(named_entry
);
1004 vm_object_reference(object
);
1006 } else if (ip_kotype(port
) == IKOT_MEMORY_OBJECT
) {
1008 * JMM - This is temporary until we unify named entries
1009 * and raw memory objects.
1011 * Detected fake ip_kotype for a memory object. In
1012 * this case, the port isn't really a port at all, but
1013 * instead is just a raw memory object.
1016 if ((object
= vm_object_enter((memory_object_t
)port
,
1017 size
, FALSE
, FALSE
, FALSE
))
1019 return(KERN_INVALID_OBJECT
);
1021 /* wait for object (if any) to be ready */
1022 if (object
!= VM_OBJECT_NULL
) {
1023 if(object
== kernel_object
) {
1024 printf("Warning: Attempt to map kernel object"
1025 " by a non-private kernel entity\n");
1026 return(KERN_INVALID_OBJECT
);
1028 vm_object_lock(object
);
1029 while (!object
->pager_ready
) {
1030 vm_object_wait(object
,
1031 VM_OBJECT_EVENT_PAGER_READY
,
1033 vm_object_lock(object
);
1035 vm_object_unlock(object
);
1038 return (KERN_INVALID_OBJECT
);
1042 * Perform the copy if requested
1046 vm_object_t new_object
;
1047 vm_object_offset_t new_offset
;
1049 result
= vm_object_copy_strategically(object
, offset
, size
,
1050 &new_object
, &new_offset
,
1054 if (result
== KERN_MEMORY_RESTART_COPY
) {
1056 boolean_t src_needs_copy
;
1060 * We currently ignore src_needs_copy.
1061 * This really is the issue of how to make
1062 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
1063 * non-kernel users to use. Solution forthcoming.
1064 * In the meantime, since we don't allow non-kernel
1065 * memory managers to specify symmetric copy,
1066 * we won't run into problems here.
1068 new_object
= object
;
1069 new_offset
= offset
;
1070 success
= vm_object_copy_quickly(&new_object
,
1075 result
= KERN_SUCCESS
;
1078 * Throw away the reference to the
1079 * original object, as it won't be mapped.
1082 vm_object_deallocate(object
);
1084 if (result
!= KERN_SUCCESS
)
1087 object
= new_object
;
1088 offset
= new_offset
;
1091 if ((result
= vm_map_enter(target_map
,
1092 &map_addr
, map_size
,
1093 (vm_map_offset_t
)mask
,
1097 cur_protection
, max_protection
, inheritance
1099 vm_object_deallocate(object
);
1100 *address
= map_addr
;
1105 /* legacy interface */
1108 vm_map_t target_map
,
1109 vm_offset_t
*address
,
1114 vm_object_offset_t offset
,
1116 vm_prot_t cur_protection
,
1117 vm_prot_t max_protection
,
1118 vm_inherit_t inheritance
)
1120 mach_vm_address_t map_addr
;
1121 mach_vm_size_t map_size
;
1122 mach_vm_offset_t map_mask
;
1125 map_addr
= (mach_vm_address_t
)*address
;
1126 map_size
= (mach_vm_size_t
)size
;
1127 map_mask
= (mach_vm_offset_t
)mask
;
1129 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
1131 cur_protection
, max_protection
, inheritance
);
1132 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1136 /* temporary, until world build */
1139 vm_map_t target_map
,
1140 vm_offset_t
*address
,
1147 vm_prot_t cur_protection
,
1148 vm_prot_t max_protection
,
1149 vm_inherit_t inheritance
)
1151 mach_vm_address_t map_addr
;
1152 mach_vm_size_t map_size
;
1153 mach_vm_offset_t map_mask
;
1154 vm_object_offset_t obj_offset
;
1157 map_addr
= (mach_vm_address_t
)*address
;
1158 map_size
= (mach_vm_size_t
)size
;
1159 map_mask
= (mach_vm_offset_t
)mask
;
1160 obj_offset
= (vm_object_offset_t
)offset
;
1162 kr
= mach_vm_map(target_map
, &map_addr
, map_size
, map_mask
, flags
,
1163 port
, obj_offset
, copy
,
1164 cur_protection
, max_protection
, inheritance
);
1165 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1171 * Remap a range of memory from one task into another,
1172 * to another address range within the same task, or
1173 * over top of itself (with altered permissions and/or
1174 * as an in-place copy of itself).
1179 vm_map_t target_map
,
1180 mach_vm_offset_t
*address
,
1181 mach_vm_size_t size
,
1182 mach_vm_offset_t mask
,
1185 mach_vm_offset_t memory_address
,
1187 vm_prot_t
*cur_protection
,
1188 vm_prot_t
*max_protection
,
1189 vm_inherit_t inheritance
)
1191 vm_map_offset_t map_addr
;
1194 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1195 return KERN_INVALID_ARGUMENT
;
1197 map_addr
= (vm_map_offset_t
)*address
;
1199 kr
= vm_map_remap(target_map
,
1210 *address
= map_addr
;
1216 * Remap a range of memory from one task into another,
1217 * to another address range within the same task, or
1218 * over top of itself (with altered permissions and/or
1219 * as an in-place copy of itself).
1221 * The addressability of the source and target address
1222 * range is limited by the size of vm_address_t (in the
1227 vm_map_t target_map
,
1228 vm_offset_t
*address
,
1233 vm_offset_t memory_address
,
1235 vm_prot_t
*cur_protection
,
1236 vm_prot_t
*max_protection
,
1237 vm_inherit_t inheritance
)
1239 vm_map_offset_t map_addr
;
1242 if (VM_MAP_NULL
== target_map
|| VM_MAP_NULL
== src_map
)
1243 return KERN_INVALID_ARGUMENT
;
1245 map_addr
= (vm_map_offset_t
)*address
;
1247 kr
= vm_map_remap(target_map
,
1258 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1263 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1264 * when mach_vm_wire and vm_wire are changed to use ledgers.
1266 #include <mach/mach_host_server.h>
1269 * Specify that the range of the virtual address space
1270 * of the target task must not cause page faults for
1271 * the indicated accesses.
1273 * [ To unwire the pages, specify VM_PROT_NONE. ]
1277 host_priv_t host_priv
,
1279 mach_vm_offset_t start
,
1280 mach_vm_size_t size
,
1285 if (host_priv
== HOST_PRIV_NULL
)
1286 return KERN_INVALID_HOST
;
1288 assert(host_priv
== &realhost
);
1290 if (map
== VM_MAP_NULL
)
1291 return KERN_INVALID_TASK
;
1293 if (access
& ~VM_PROT_ALL
)
1294 return KERN_INVALID_ARGUMENT
;
1296 if (access
!= VM_PROT_NONE
) {
1297 rc
= vm_map_wire(map
, vm_map_trunc_page(start
),
1298 vm_map_round_page(start
+size
), access
, TRUE
);
1300 rc
= vm_map_unwire(map
, vm_map_trunc_page(start
),
1301 vm_map_round_page(start
+size
), TRUE
);
1308 * Specify that the range of the virtual address space
1309 * of the target task must not cause page faults for
1310 * the indicated accesses.
1312 * [ To unwire the pages, specify VM_PROT_NONE. ]
1316 host_priv_t host_priv
,
1317 register vm_map_t map
,
1324 if (host_priv
== HOST_PRIV_NULL
)
1325 return KERN_INVALID_HOST
;
1327 assert(host_priv
== &realhost
);
1329 if (map
== VM_MAP_NULL
)
1330 return KERN_INVALID_TASK
;
1332 if ((access
& ~VM_PROT_ALL
) || (start
+ size
< start
))
1333 return KERN_INVALID_ARGUMENT
;
1337 } else if (access
!= VM_PROT_NONE
) {
1338 rc
= vm_map_wire(map
, vm_map_trunc_page(start
),
1339 vm_map_round_page(start
+size
), access
, TRUE
);
1341 rc
= vm_map_unwire(map
, vm_map_trunc_page(start
),
1342 vm_map_round_page(start
+size
), TRUE
);
1350 * Synchronises the memory range specified with its backing store
1351 * image by either flushing or cleaning the contents to the appropriate
1354 * interpretation of sync_flags
1355 * VM_SYNC_INVALIDATE - discard pages, only return precious
1358 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1359 * - discard pages, write dirty or precious
1360 * pages back to memory manager.
1362 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1363 * - write dirty or precious pages back to
1364 * the memory manager.
1366 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1367 * is a hole in the region, and we would
1368 * have returned KERN_SUCCESS, return
1369 * KERN_INVALID_ADDRESS instead.
1372 * KERN_INVALID_TASK Bad task parameter
1373 * KERN_INVALID_ARGUMENT both sync and async were specified.
1374 * KERN_SUCCESS The usual.
1375 * KERN_INVALID_ADDRESS There was a hole in the region.
1381 mach_vm_address_t address
,
1382 mach_vm_size_t size
,
1383 vm_sync_t sync_flags
)
1386 if (map
== VM_MAP_NULL
)
1387 return(KERN_INVALID_TASK
);
1389 return vm_map_msync(map
, (vm_map_address_t
)address
,
1390 (vm_map_size_t
)size
, sync_flags
);
1396 * Synchronises the memory range specified with its backing store
1397 * image by either flushing or cleaning the contents to the appropriate
1400 * interpretation of sync_flags
1401 * VM_SYNC_INVALIDATE - discard pages, only return precious
1404 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1405 * - discard pages, write dirty or precious
1406 * pages back to memory manager.
1408 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1409 * - write dirty or precious pages back to
1410 * the memory manager.
1412 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1413 * is a hole in the region, and we would
1414 * have returned KERN_SUCCESS, return
1415 * KERN_INVALID_ADDRESS instead.
1417 * The addressability of the range is limited to that which can
1418 * be described by a vm_address_t.
1421 * KERN_INVALID_TASK Bad task parameter
1422 * KERN_INVALID_ARGUMENT both sync and async were specified.
1423 * KERN_SUCCESS The usual.
1424 * KERN_INVALID_ADDRESS There was a hole in the region.
1430 vm_address_t address
,
1432 vm_sync_t sync_flags
)
1435 if (map
== VM_MAP_NULL
)
1436 return(KERN_INVALID_TASK
);
1438 return vm_map_msync(map
, (vm_map_address_t
)address
,
1439 (vm_map_size_t
)size
, sync_flags
);
1444 * mach_vm_behavior_set
1446 * Sets the paging behavior attribute for the specified range
1447 * in the specified map.
1449 * This routine will fail with KERN_INVALID_ADDRESS if any address
1450 * in [start,start+size) is not a valid allocated memory region.
1453 mach_vm_behavior_set(
1455 mach_vm_offset_t start
,
1456 mach_vm_size_t size
,
1457 vm_behavior_t new_behavior
)
1459 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1460 return(KERN_INVALID_ARGUMENT
);
1463 return KERN_SUCCESS
;
1465 return(vm_map_behavior_set(map
, vm_map_trunc_page(start
),
1466 vm_map_round_page(start
+size
), new_behavior
));
1472 * Sets the paging behavior attribute for the specified range
1473 * in the specified map.
1475 * This routine will fail with KERN_INVALID_ADDRESS if any address
1476 * in [start,start+size) is not a valid allocated memory region.
1478 * This routine is potentially limited in addressibility by the
1479 * use of vm_offset_t (if the map provided is larger than the
1487 vm_behavior_t new_behavior
)
1489 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
1490 return(KERN_INVALID_ARGUMENT
);
1493 return KERN_SUCCESS
;
1495 return(vm_map_behavior_set(map
, vm_map_trunc_page(start
),
1496 vm_map_round_page(start
+size
), new_behavior
));
1502 * User call to obtain information about a region in
1503 * a task's address map. Currently, only one flavor is
1506 * XXX The reserved and behavior fields cannot be filled
1507 * in until the vm merge from the IK is completed, and
1508 * vm_reserve is implemented.
1510 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1516 mach_vm_offset_t
*address
, /* IN/OUT */
1517 mach_vm_size_t
*size
, /* OUT */
1518 vm_region_flavor_t flavor
, /* IN */
1519 vm_region_info_t info
, /* OUT */
1520 mach_msg_type_number_t
*count
, /* IN/OUT */
1521 mach_port_t
*object_name
) /* OUT */
1523 vm_map_offset_t map_addr
;
1524 vm_map_size_t map_size
;
1527 if (VM_MAP_NULL
== map
)
1528 return KERN_INVALID_ARGUMENT
;
1530 map_addr
= (vm_map_offset_t
)*address
;
1531 map_size
= (vm_map_size_t
)*size
;
1533 /* legacy conversion */
1534 if (VM_REGION_BASIC_INFO
== flavor
)
1535 flavor
= VM_REGION_BASIC_INFO_64
;
1537 kr
= vm_map_region(map
,
1538 &map_addr
, &map_size
,
1539 flavor
, info
, count
,
1542 *address
= map_addr
;
1548 * vm_region_64 and vm_region:
1550 * User call to obtain information about a region in
1551 * a task's address map. Currently, only one flavor is
1554 * XXX The reserved and behavior fields cannot be filled
1555 * in until the vm merge from the IK is completed, and
1556 * vm_reserve is implemented.
1558 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1564 vm_offset_t
*address
, /* IN/OUT */
1565 vm_size_t
*size
, /* OUT */
1566 vm_region_flavor_t flavor
, /* IN */
1567 vm_region_info_t info
, /* OUT */
1568 mach_msg_type_number_t
*count
, /* IN/OUT */
1569 mach_port_t
*object_name
) /* OUT */
1571 vm_map_offset_t map_addr
;
1572 vm_map_size_t map_size
;
1575 if (VM_MAP_NULL
== map
)
1576 return KERN_INVALID_ARGUMENT
;
1578 map_addr
= (vm_map_offset_t
)*address
;
1579 map_size
= (vm_map_size_t
)*size
;
1581 /* legacy conversion */
1582 if (VM_REGION_BASIC_INFO
== flavor
)
1583 flavor
= VM_REGION_BASIC_INFO_64
;
1585 kr
= vm_map_region(map
,
1586 &map_addr
, &map_size
,
1587 flavor
, info
, count
,
1590 *address
= CAST_DOWN(vm_offset_t
, map_addr
);
1591 *size
= CAST_DOWN(vm_size_t
, map_size
);
1593 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1594 return KERN_INVALID_ADDRESS
;
1601 vm_address_t
*address
, /* IN/OUT */
1602 vm_size_t
*size
, /* OUT */
1603 vm_region_flavor_t flavor
, /* IN */
1604 vm_region_info_t info
, /* OUT */
1605 mach_msg_type_number_t
*count
, /* IN/OUT */
1606 mach_port_t
*object_name
) /* OUT */
1608 vm_map_address_t map_addr
;
1609 vm_map_size_t map_size
;
1612 if (VM_MAP_NULL
== map
)
1613 return KERN_INVALID_ARGUMENT
;
1615 map_addr
= (vm_map_address_t
)*address
;
1616 map_size
= (vm_map_size_t
)*size
;
1618 kr
= vm_map_region(map
,
1619 &map_addr
, &map_size
,
1620 flavor
, info
, count
,
1623 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1624 *size
= CAST_DOWN(vm_size_t
, map_size
);
1626 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1627 return KERN_INVALID_ADDRESS
;
1632 * vm_region_recurse: A form of vm_region which follows the
1633 * submaps in a target map
1637 mach_vm_region_recurse(
1639 mach_vm_address_t
*address
,
1640 mach_vm_size_t
*size
,
1642 vm_region_recurse_info_t info
,
1643 mach_msg_type_number_t
*infoCnt
)
1645 vm_map_address_t map_addr
;
1646 vm_map_size_t map_size
;
1649 if (VM_MAP_NULL
== map
)
1650 return KERN_INVALID_ARGUMENT
;
1652 map_addr
= (vm_map_address_t
)*address
;
1653 map_size
= (vm_map_size_t
)*size
;
1655 kr
= vm_map_region_recurse_64(
1660 (vm_region_submap_info_64_t
)info
,
1663 *address
= map_addr
;
1669 * vm_region_recurse: A form of vm_region which follows the
1670 * submaps in a target map
1674 vm_region_recurse_64(
1676 vm_address_t
*address
,
1679 vm_region_recurse_info_64_t info
,
1680 mach_msg_type_number_t
*infoCnt
)
1682 vm_map_address_t map_addr
;
1683 vm_map_size_t map_size
;
1686 if (VM_MAP_NULL
== map
)
1687 return KERN_INVALID_ARGUMENT
;
1689 map_addr
= (vm_map_address_t
)*address
;
1690 map_size
= (vm_map_size_t
)*size
;
1692 kr
= vm_map_region_recurse_64(
1697 (vm_region_submap_info_64_t
)info
,
1700 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1701 *size
= CAST_DOWN(vm_size_t
, map_size
);
1703 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1704 return KERN_INVALID_ADDRESS
;
1711 vm_offset_t
*address
, /* IN/OUT */
1712 vm_size_t
*size
, /* OUT */
1713 natural_t
*depth
, /* IN/OUT */
1714 vm_region_recurse_info_t info32
, /* IN/OUT */
1715 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
1717 vm_region_submap_info_data_64_t info64
;
1718 vm_region_submap_info_t info
;
1719 vm_map_address_t map_addr
;
1720 vm_map_size_t map_size
;
1723 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
1724 return KERN_INVALID_ARGUMENT
;
1727 map_addr
= (vm_map_address_t
)*address
;
1728 map_size
= (vm_map_size_t
)*size
;
1729 info
= (vm_region_submap_info_t
)info32
;
1730 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
1732 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
1733 depth
, &info64
, infoCnt
);
1735 info
->protection
= info64
.protection
;
1736 info
->max_protection
= info64
.max_protection
;
1737 info
->inheritance
= info64
.inheritance
;
1738 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
1739 info
->user_tag
= info64
.user_tag
;
1740 info
->pages_resident
= info64
.pages_resident
;
1741 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
1742 info
->pages_swapped_out
= info64
.pages_swapped_out
;
1743 info
->pages_dirtied
= info64
.pages_dirtied
;
1744 info
->ref_count
= info64
.ref_count
;
1745 info
->shadow_depth
= info64
.shadow_depth
;
1746 info
->external_pager
= info64
.external_pager
;
1747 info
->share_mode
= info64
.share_mode
;
1748 info
->is_submap
= info64
.is_submap
;
1749 info
->behavior
= info64
.behavior
;
1750 info
->object_id
= info64
.object_id
;
1751 info
->user_wired_count
= info64
.user_wired_count
;
1753 *address
= CAST_DOWN(vm_address_t
, map_addr
);
1754 *size
= CAST_DOWN(vm_size_t
, map_size
);
1755 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
1757 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM_MAX_ADDRESS
)
1758 return KERN_INVALID_ADDRESS
;
1763 vm_purgable_control(
1765 vm_offset_t address
,
1766 vm_purgable_t control
,
1769 if (VM_MAP_NULL
== map
)
1770 return KERN_INVALID_ARGUMENT
;
1772 return vm_map_purgable_control(map
,
1773 vm_map_trunc_page(address
),
1780 * Ordinarily, the right to allocate CPM is restricted
1781 * to privileged applications (those that can gain access
1782 * to the host priv port). Set this variable to zero if
1783 * you want to let any application allocate CPM.
1785 unsigned int vm_allocate_cpm_privileged
= 0;
1788 * Allocate memory in the specified map, with the caveat that
1789 * the memory is physically contiguous. This call may fail
1790 * if the system can't find sufficient contiguous memory.
1791 * This call may cause or lead to heart-stopping amounts of
1794 * Memory obtained from this call should be freed in the
1795 * normal way, viz., via vm_deallocate.
1799 host_priv_t host_priv
,
1805 vm_map_address_t map_addr
;
1806 vm_map_size_t map_size
;
1809 if (vm_allocate_cpm_privileged
&& HOST_PRIV_NULL
== host_priv
)
1810 return KERN_INVALID_HOST
;
1812 if (VM_MAP_NULL
== map
)
1813 return KERN_INVALID_ARGUMENT
;
1815 map_addr
= (vm_map_address_t
)*addr
;
1816 map_size
= (vm_map_size_t
)size
;
1818 kr
= vm_map_enter_cpm(map
,
1823 *addr
= CAST_DOWN(vm_address_t
, map_addr
);
1831 mach_vm_offset_t offset
,
1835 if (VM_MAP_NULL
== map
)
1836 return KERN_INVALID_ARGUMENT
;
1838 return vm_map_page_info(map
,
1839 vm_map_trunc_page(offset
),
1840 disposition
, ref_count
);
1850 if (VM_MAP_NULL
== map
)
1851 return KERN_INVALID_ARGUMENT
;
1853 return vm_map_page_info(map
,
1854 vm_map_trunc_page(offset
),
1855 disposition
, ref_count
);
1858 /* map a (whole) upl into an address space */
1863 vm_offset_t
*dst_addr
)
1865 vm_map_offset_t map_addr
;
1868 if (VM_MAP_NULL
== map
)
1869 return KERN_INVALID_ARGUMENT
;
1871 kr
= vm_map_enter_upl(map
, upl
, &map_addr
);
1872 *dst_addr
= CAST_DOWN(vm_offset_t
, map_addr
);
1881 if (VM_MAP_NULL
== map
)
1882 return KERN_INVALID_ARGUMENT
;
1884 return (vm_map_remove_upl(map
, upl
));
1887 /* Retrieve a upl for an object underlying an address range in a map */
1892 vm_map_offset_t map_offset
,
1893 upl_size_t
*upl_size
,
1895 upl_page_info_array_t page_list
,
1896 unsigned int *count
,
1898 int force_data_sync
)
1903 if (VM_MAP_NULL
== map
)
1904 return KERN_INVALID_ARGUMENT
;
1906 map_flags
= *flags
& ~UPL_NOZEROFILL
;
1907 if (force_data_sync
)
1908 map_flags
|= UPL_FORCE_DATA_SYNC
;
1910 kr
= vm_map_create_upl(map
,
1918 *flags
= (map_flags
& ~UPL_FORCE_DATA_SYNC
);
1923 __private_extern__ kern_return_t
1924 mach_memory_entry_allocate(
1925 vm_named_entry_t
*user_entry_p
,
1926 ipc_port_t
*user_handle_p
); /* forward */
1929 * mach_make_memory_entry_64
1931 * Think of it as a two-stage vm_remap() operation. First
1932 * you get a handle. Second, you get map that handle in
1933 * somewhere else. Rather than doing it all at once (and
1934 * without needing access to the other whole map).
1938 mach_make_memory_entry_64(
1939 vm_map_t target_map
,
1940 memory_object_size_t
*size
,
1941 memory_object_offset_t offset
,
1942 vm_prot_t permission
,
1943 ipc_port_t
*object_handle
,
1944 ipc_port_t parent_handle
)
1946 vm_map_version_t version
;
1947 vm_named_entry_t parent_entry
;
1948 vm_named_entry_t user_entry
;
1949 ipc_port_t user_handle
;
1953 /* needed for call to vm_map_lookup_locked */
1955 vm_object_offset_t obj_off
;
1957 vm_map_offset_t lo_offset
, hi_offset
;
1958 vm_behavior_t behavior
;
1960 vm_object_t shadow_object
;
1962 /* needed for direct map entry manipulation */
1963 vm_map_entry_t map_entry
;
1964 vm_map_entry_t next_entry
;
1966 vm_map_t original_map
= target_map
;
1967 vm_map_size_t total_size
;
1968 vm_map_size_t map_size
;
1969 vm_map_offset_t map_offset
;
1970 vm_map_offset_t local_offset
;
1971 vm_object_size_t mappable_size
;
1973 unsigned int access
;
1974 vm_prot_t protections
;
1975 unsigned int wimg_mode
;
1976 boolean_t cache_attr
= FALSE
;
1978 if (((permission
& 0x00FF0000) &
1980 MAP_MEM_NAMED_CREATE
|
1982 MAP_MEM_NAMED_REUSE
))) {
1984 * Unknown flag: reject for forward compatibility.
1986 return KERN_INVALID_VALUE
;
1989 if (parent_handle
!= IP_NULL
&&
1990 ip_kotype(parent_handle
) == IKOT_NAMED_ENTRY
) {
1991 parent_entry
= (vm_named_entry_t
) parent_handle
->ip_kobject
;
1993 parent_entry
= NULL
;
1996 protections
= permission
& VM_PROT_ALL
;
1997 access
= GET_MAP_MEM(permission
);
1999 user_handle
= IP_NULL
;
2002 map_offset
= vm_map_trunc_page(offset
);
2003 map_size
= vm_map_round_page(*size
);
2005 if (permission
& MAP_MEM_ONLY
) {
2006 boolean_t parent_is_object
;
2008 if (parent_entry
== NULL
) {
2009 return KERN_INVALID_ARGUMENT
;
2012 parent_is_object
= !(parent_entry
->is_sub_map
|| parent_entry
->is_pager
);
2013 object
= parent_entry
->backing
.object
;
2014 if(parent_is_object
&& object
!= VM_OBJECT_NULL
)
2015 wimg_mode
= object
->wimg_bits
;
2017 wimg_mode
= VM_WIMG_DEFAULT
;
2018 if((access
!= GET_MAP_MEM(parent_entry
->protection
)) &&
2019 !(parent_entry
->protection
& VM_PROT_WRITE
)) {
2020 return KERN_INVALID_RIGHT
;
2022 if(access
== MAP_MEM_IO
) {
2023 SET_MAP_MEM(access
, parent_entry
->protection
);
2024 wimg_mode
= VM_WIMG_IO
;
2025 } else if (access
== MAP_MEM_COPYBACK
) {
2026 SET_MAP_MEM(access
, parent_entry
->protection
);
2027 wimg_mode
= VM_WIMG_DEFAULT
;
2028 } else if (access
== MAP_MEM_WTHRU
) {
2029 SET_MAP_MEM(access
, parent_entry
->protection
);
2030 wimg_mode
= VM_WIMG_WTHRU
;
2031 } else if (access
== MAP_MEM_WCOMB
) {
2032 SET_MAP_MEM(access
, parent_entry
->protection
);
2033 wimg_mode
= VM_WIMG_WCOMB
;
2035 if(parent_is_object
&& object
&&
2036 (access
!= MAP_MEM_NOOP
) &&
2037 (!(object
->nophyscache
))) {
2038 if(object
->wimg_bits
!= wimg_mode
) {
2040 if ((wimg_mode
== VM_WIMG_IO
)
2041 || (wimg_mode
== VM_WIMG_WCOMB
))
2045 vm_object_lock(object
);
2046 vm_object_paging_wait(object
, THREAD_UNINT
);
2047 object
->wimg_bits
= wimg_mode
;
2048 queue_iterate(&object
->memq
,
2049 p
, vm_page_t
, listq
) {
2050 if (!p
->fictitious
) {
2051 pmap_disconnect(p
->phys_page
);
2053 pmap_sync_page_attributes_phys(p
->phys_page
);
2056 vm_object_unlock(object
);
2060 *object_handle
= IP_NULL
;
2061 return KERN_SUCCESS
;
2064 if(permission
& MAP_MEM_NAMED_CREATE
) {
2065 kr
= mach_memory_entry_allocate(&user_entry
, &user_handle
);
2066 if (kr
!= KERN_SUCCESS
) {
2067 return KERN_FAILURE
;
2071 * Force the creation of the VM object now.
2073 if (map_size
> (vm_map_size_t
) VM_MAX_ADDRESS
) {
2075 * LP64todo - for now, we can only allocate 4GB
2076 * internal objects because the default pager can't
2077 * page bigger ones. Remove this when it can.
2083 object
= vm_object_allocate(map_size
);
2084 assert(object
!= VM_OBJECT_NULL
);
2086 if (permission
& MAP_MEM_PURGABLE
) {
2087 if (! (permission
& VM_PROT_WRITE
)) {
2088 /* if we can't write, we can't purge */
2089 vm_object_deallocate(object
);
2090 kr
= KERN_INVALID_ARGUMENT
;
2093 object
->purgable
= VM_OBJECT_PURGABLE_NONVOLATILE
;
2097 * The VM object is brand new and nobody else knows about it,
2098 * so we don't need to lock it.
2101 wimg_mode
= object
->wimg_bits
;
2102 if (access
== MAP_MEM_IO
) {
2103 wimg_mode
= VM_WIMG_IO
;
2104 } else if (access
== MAP_MEM_COPYBACK
) {
2105 wimg_mode
= VM_WIMG_DEFAULT
;
2106 } else if (access
== MAP_MEM_WTHRU
) {
2107 wimg_mode
= VM_WIMG_WTHRU
;
2108 } else if (access
== MAP_MEM_WCOMB
) {
2109 wimg_mode
= VM_WIMG_WCOMB
;
2111 if (access
!= MAP_MEM_NOOP
) {
2112 object
->wimg_bits
= wimg_mode
;
2114 /* the object has no pages, so no WIMG bits to update here */
2118 * We use this path when we want to make sure that
2119 * nobody messes with the object (coalesce, for
2120 * example) before we map it.
2121 * We might want to use these objects for transposition via
2122 * vm_object_transpose() too, so we don't want any copy or
2123 * shadow objects either...
2125 object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2127 user_entry
->backing
.object
= object
;
2128 user_entry
->internal
= TRUE
;
2129 user_entry
->is_sub_map
= FALSE
;
2130 user_entry
->is_pager
= FALSE
;
2131 user_entry
->offset
= 0;
2132 user_entry
->protection
= protections
;
2133 SET_MAP_MEM(access
, user_entry
->protection
);
2134 user_entry
->size
= map_size
;
2136 /* user_object pager and internal fields are not used */
2137 /* when the object field is filled in. */
2139 *size
= CAST_DOWN(vm_size_t
, map_size
);
2140 *object_handle
= user_handle
;
2141 return KERN_SUCCESS
;
2144 if (parent_entry
== NULL
||
2145 (permission
& MAP_MEM_NAMED_REUSE
)) {
2147 /* Create a named object based on address range within the task map */
2148 /* Go find the object at given address */
2151 vm_map_lock_read(target_map
);
2153 /* get the object associated with the target address */
2154 /* note we check the permission of the range against */
2155 /* that requested by the caller */
2157 kr
= vm_map_lookup_locked(&target_map
, map_offset
,
2158 protections
, &version
,
2159 &object
, &obj_off
, &prot
, &wired
, &behavior
,
2160 &lo_offset
, &hi_offset
, &real_map
);
2161 if (kr
!= KERN_SUCCESS
) {
2162 vm_map_unlock_read(target_map
);
2165 if (((prot
& protections
) != protections
)
2166 || (object
== kernel_object
)) {
2167 kr
= KERN_INVALID_RIGHT
;
2168 vm_object_unlock(object
);
2169 vm_map_unlock_read(target_map
);
2170 if(real_map
!= target_map
)
2171 vm_map_unlock_read(real_map
);
2172 if(object
== kernel_object
) {
2173 printf("Warning: Attempt to create a named"
2174 " entry from the kernel_object\n");
2179 /* We have an object, now check to see if this object */
2180 /* is suitable. If not, create a shadow and share that */
2183 * We have to unlock the VM object to avoid deadlocking with
2184 * a VM map lock (the lock ordering is map, the object), if we
2185 * need to modify the VM map to create a shadow object. Since
2186 * we might release the VM map lock below anyway, we have
2187 * to release the VM map lock now.
2188 * XXX FBDP There must be a way to avoid this double lookup...
2190 * Take an extra reference on the VM object to make sure it's
2191 * not going to disappear.
2193 vm_object_reference_locked(object
); /* extra ref to hold obj */
2194 vm_object_unlock(object
);
2196 local_map
= original_map
;
2197 local_offset
= map_offset
;
2198 if(target_map
!= local_map
) {
2199 vm_map_unlock_read(target_map
);
2200 if(real_map
!= target_map
)
2201 vm_map_unlock_read(real_map
);
2202 vm_map_lock_read(local_map
);
2203 target_map
= local_map
;
2204 real_map
= local_map
;
2207 if(!vm_map_lookup_entry(local_map
,
2208 local_offset
, &map_entry
)) {
2209 kr
= KERN_INVALID_ARGUMENT
;
2210 vm_map_unlock_read(target_map
);
2211 if(real_map
!= target_map
)
2212 vm_map_unlock_read(real_map
);
2213 vm_object_deallocate(object
); /* release extra ref */
2214 object
= VM_OBJECT_NULL
;
2217 if(!(map_entry
->is_sub_map
)) {
2218 if(map_entry
->object
.vm_object
!= object
) {
2219 kr
= KERN_INVALID_ARGUMENT
;
2220 vm_map_unlock_read(target_map
);
2221 if(real_map
!= target_map
)
2222 vm_map_unlock_read(real_map
);
2223 vm_object_deallocate(object
); /* release extra ref */
2224 object
= VM_OBJECT_NULL
;
2231 local_map
= map_entry
->object
.sub_map
;
2233 vm_map_lock_read(local_map
);
2234 vm_map_unlock_read(tmap
);
2235 target_map
= local_map
;
2236 real_map
= local_map
;
2237 local_offset
= local_offset
- map_entry
->vme_start
;
2238 local_offset
+= map_entry
->offset
;
2243 * We found the VM map entry, lock the VM object again.
2245 vm_object_lock(object
);
2246 if(map_entry
->wired_count
) {
2247 /* JMM - The check below should be reworked instead. */
2248 object
->true_share
= TRUE
;
2250 if(((map_entry
->max_protection
) & protections
) != protections
) {
2251 kr
= KERN_INVALID_RIGHT
;
2252 vm_object_unlock(object
);
2253 vm_map_unlock_read(target_map
);
2254 if(real_map
!= target_map
)
2255 vm_map_unlock_read(real_map
);
2256 vm_object_deallocate(object
);
2257 object
= VM_OBJECT_NULL
;
2261 mappable_size
= hi_offset
- obj_off
;
2262 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
2263 if(map_size
> mappable_size
) {
2264 /* try to extend mappable size if the entries */
2265 /* following are from the same object and are */
2267 next_entry
= map_entry
->vme_next
;
2268 /* lets see if the next map entry is still */
2269 /* pointing at this object and is contiguous */
2270 while(map_size
> mappable_size
) {
2271 if((next_entry
->object
.vm_object
== object
) &&
2272 (next_entry
->vme_start
==
2273 next_entry
->vme_prev
->vme_end
) &&
2274 (next_entry
->offset
==
2275 next_entry
->vme_prev
->offset
+
2276 (next_entry
->vme_prev
->vme_end
-
2277 next_entry
->vme_prev
->vme_start
))) {
2278 if(((next_entry
->max_protection
)
2279 & protections
) != protections
) {
2282 if (next_entry
->needs_copy
!=
2283 map_entry
->needs_copy
)
2285 mappable_size
+= next_entry
->vme_end
2286 - next_entry
->vme_start
;
2287 total_size
+= next_entry
->vme_end
2288 - next_entry
->vme_start
;
2289 next_entry
= next_entry
->vme_next
;
2297 if(object
->internal
) {
2298 /* vm_map_lookup_locked will create a shadow if */
2299 /* needs_copy is set but does not check for the */
2300 /* other two conditions shown. It is important to */
2301 /* set up an object which will not be pulled from */
2304 if ((map_entry
->needs_copy
|| object
->shadowed
||
2305 (object
->size
> total_size
))
2306 && !object
->true_share
) {
2308 * We have to unlock the VM object before
2309 * trying to upgrade the VM map lock, to
2310 * honor lock ordering (map then object).
2311 * Otherwise, we would deadlock if another
2312 * thread holds a read lock on the VM map and
2313 * is trying to acquire the VM object's lock.
2314 * We still hold an extra reference on the
2315 * VM object, guaranteeing that it won't
2318 vm_object_unlock(object
);
2320 if (vm_map_lock_read_to_write(target_map
)) {
2322 * We couldn't upgrade our VM map lock
2323 * from "read" to "write" and we lost
2325 * Start all over again...
2327 vm_object_deallocate(object
); /* extra ref */
2328 target_map
= original_map
;
2331 vm_object_lock(object
);
2334 * JMM - We need to avoid coming here when the object
2335 * is wired by anybody, not just the current map. Why
2336 * couldn't we use the standard vm_object_copy_quickly()
2340 /* create a shadow object */
2341 vm_object_shadow(&map_entry
->object
.vm_object
,
2342 &map_entry
->offset
, total_size
);
2343 shadow_object
= map_entry
->object
.vm_object
;
2344 vm_object_unlock(object
);
2346 vm_object_pmap_protect(
2347 object
, map_entry
->offset
,
2349 ((map_entry
->is_shared
2350 || target_map
->mapped
)
2353 map_entry
->vme_start
,
2354 map_entry
->protection
& ~VM_PROT_WRITE
);
2355 total_size
-= (map_entry
->vme_end
2356 - map_entry
->vme_start
);
2357 next_entry
= map_entry
->vme_next
;
2358 map_entry
->needs_copy
= FALSE
;
2359 while (total_size
) {
2360 if(next_entry
->object
.vm_object
== object
) {
2361 shadow_object
->ref_count
++;
2362 vm_object_res_reference(shadow_object
);
2363 next_entry
->object
.vm_object
2365 vm_object_deallocate(object
);
2367 = next_entry
->vme_prev
->offset
+
2368 (next_entry
->vme_prev
->vme_end
2369 - next_entry
->vme_prev
->vme_start
);
2370 next_entry
->needs_copy
= FALSE
;
2372 panic("mach_make_memory_entry_64:"
2373 " map entries out of sync\n");
2377 - next_entry
->vme_start
;
2378 next_entry
= next_entry
->vme_next
;
2382 * Transfer our extra reference to the
2385 vm_object_reference_locked(shadow_object
);
2386 vm_object_deallocate(object
); /* extra ref */
2387 object
= shadow_object
;
2389 obj_off
= (local_offset
- map_entry
->vme_start
)
2390 + map_entry
->offset
;
2392 vm_map_lock_write_to_read(target_map
);
2393 vm_object_lock(object
);
2398 /* note: in the future we can (if necessary) allow for */
2399 /* memory object lists, this will better support */
2400 /* fragmentation, but is it necessary? The user should */
2401 /* be encouraged to create address space oriented */
2402 /* shared objects from CLEAN memory regions which have */
2403 /* a known and defined history. i.e. no inheritence */
2404 /* share, make this call before making the region the */
2405 /* target of ipc's, etc. The code above, protecting */
2406 /* against delayed copy, etc. is mostly defensive. */
2408 wimg_mode
= object
->wimg_bits
;
2409 if(!(object
->nophyscache
)) {
2410 if(access
== MAP_MEM_IO
) {
2411 wimg_mode
= VM_WIMG_IO
;
2412 } else if (access
== MAP_MEM_COPYBACK
) {
2413 wimg_mode
= VM_WIMG_USE_DEFAULT
;
2414 } else if (access
== MAP_MEM_WTHRU
) {
2415 wimg_mode
= VM_WIMG_WTHRU
;
2416 } else if (access
== MAP_MEM_WCOMB
) {
2417 wimg_mode
= VM_WIMG_WCOMB
;
2421 object
->true_share
= TRUE
;
2422 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2423 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2426 * The memory entry now points to this VM object and we
2427 * need to hold a reference on the VM object. Use the extra
2428 * reference we took earlier to keep the object alive when we
2432 vm_map_unlock_read(target_map
);
2433 if(real_map
!= target_map
)
2434 vm_map_unlock_read(real_map
);
2436 if(object
->wimg_bits
!= wimg_mode
) {
2439 vm_object_paging_wait(object
, THREAD_UNINT
);
2441 if ((wimg_mode
== VM_WIMG_IO
)
2442 || (wimg_mode
== VM_WIMG_WCOMB
))
2447 queue_iterate(&object
->memq
,
2448 p
, vm_page_t
, listq
) {
2449 if (!p
->fictitious
) {
2450 pmap_disconnect(p
->phys_page
);
2452 pmap_sync_page_attributes_phys(p
->phys_page
);
2455 object
->wimg_bits
= wimg_mode
;
2458 /* the size of mapped entry that overlaps with our region */
2459 /* which is targeted for share. */
2460 /* (entry_end - entry_start) - */
2461 /* offset of our beg addr within entry */
2462 /* it corresponds to this: */
2464 if(map_size
> mappable_size
)
2465 map_size
= mappable_size
;
2467 if (permission
& MAP_MEM_NAMED_REUSE
) {
2469 * Compare what we got with the "parent_entry".
2470 * If they match, re-use the "parent_entry" instead
2471 * of creating a new one.
2473 if (parent_entry
!= NULL
&&
2474 parent_entry
->backing
.object
== object
&&
2475 parent_entry
->internal
== object
->internal
&&
2476 parent_entry
->is_sub_map
== FALSE
&&
2477 parent_entry
->is_pager
== FALSE
&&
2478 parent_entry
->offset
== obj_off
&&
2479 parent_entry
->protection
== protections
&&
2480 parent_entry
->size
== map_size
) {
2482 * We have a match: re-use "parent_entry".
2484 /* release our extra reference on object */
2485 vm_object_unlock(object
);
2486 vm_object_deallocate(object
);
2487 /* parent_entry->ref_count++; XXX ? */
2488 /* Get an extra send-right on handle */
2489 ipc_port_copy_send(parent_handle
);
2490 *object_handle
= parent_handle
;
2491 return KERN_SUCCESS
;
2494 * No match: we need to create a new entry.
2500 vm_object_unlock(object
);
2501 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2503 /* release our unused reference on the object */
2504 vm_object_deallocate(object
);
2505 return KERN_FAILURE
;
2508 user_entry
->backing
.object
= object
;
2509 user_entry
->internal
= object
->internal
;
2510 user_entry
->is_sub_map
= FALSE
;
2511 user_entry
->is_pager
= FALSE
;
2512 user_entry
->offset
= obj_off
;
2513 user_entry
->protection
= permission
;
2514 user_entry
->size
= map_size
;
2516 /* user_object pager and internal fields are not used */
2517 /* when the object field is filled in. */
2519 *size
= CAST_DOWN(vm_size_t
, map_size
);
2520 *object_handle
= user_handle
;
2521 return KERN_SUCCESS
;
2524 /* The new object will be base on an existing named object */
2526 if (parent_entry
== NULL
) {
2527 kr
= KERN_INVALID_ARGUMENT
;
2530 if((offset
+ map_size
) > parent_entry
->size
) {
2531 kr
= KERN_INVALID_ARGUMENT
;
2535 if((protections
& parent_entry
->protection
) != protections
) {
2536 kr
= KERN_PROTECTION_FAILURE
;
2540 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2546 user_entry
->size
= map_size
;
2547 user_entry
->offset
= parent_entry
->offset
+ map_offset
;
2548 user_entry
->is_sub_map
= parent_entry
->is_sub_map
;
2549 user_entry
->is_pager
= parent_entry
->is_pager
;
2550 user_entry
->internal
= parent_entry
->internal
;
2551 user_entry
->protection
= protections
;
2553 if(access
!= MAP_MEM_NOOP
) {
2554 SET_MAP_MEM(access
, user_entry
->protection
);
2557 if(parent_entry
->is_sub_map
) {
2558 user_entry
->backing
.map
= parent_entry
->backing
.map
;
2559 vm_map_lock(user_entry
->backing
.map
);
2560 user_entry
->backing
.map
->ref_count
++;
2561 vm_map_unlock(user_entry
->backing
.map
);
2563 else if (parent_entry
->is_pager
) {
2564 user_entry
->backing
.pager
= parent_entry
->backing
.pager
;
2565 /* JMM - don't we need a reference here? */
2567 object
= parent_entry
->backing
.object
;
2568 assert(object
!= VM_OBJECT_NULL
);
2569 user_entry
->backing
.object
= object
;
2570 /* we now point to this object, hold on */
2571 vm_object_reference(object
);
2572 vm_object_lock(object
);
2573 object
->true_share
= TRUE
;
2574 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
2575 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
2576 vm_object_unlock(object
);
2578 *size
= CAST_DOWN(vm_size_t
, map_size
);
2579 *object_handle
= user_handle
;
2580 return KERN_SUCCESS
;
2584 if (user_handle
!= IP_NULL
) {
2585 ipc_port_dealloc_kernel(user_handle
);
2587 if (user_entry
!= NULL
) {
2588 kfree(user_entry
, sizeof *user_entry
);
2594 _mach_make_memory_entry(
2595 vm_map_t target_map
,
2596 memory_object_size_t
*size
,
2597 memory_object_offset_t offset
,
2598 vm_prot_t permission
,
2599 ipc_port_t
*object_handle
,
2600 ipc_port_t parent_entry
)
2602 memory_object_offset_t mo_size
;
2605 mo_size
= (memory_object_offset_t
)*size
;
2606 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2607 (memory_object_offset_t
)offset
, permission
, object_handle
,
2614 mach_make_memory_entry(
2615 vm_map_t target_map
,
2618 vm_prot_t permission
,
2619 ipc_port_t
*object_handle
,
2620 ipc_port_t parent_entry
)
2622 memory_object_offset_t mo_size
;
2625 mo_size
= (memory_object_offset_t
)*size
;
2626 kr
= mach_make_memory_entry_64(target_map
, &mo_size
,
2627 (memory_object_offset_t
)offset
, permission
, object_handle
,
2629 *size
= CAST_DOWN(vm_size_t
, mo_size
);
2636 * Set or clear the map's wiring_required flag. This flag, if set,
2637 * will cause all future virtual memory allocation to allocate
2638 * user wired memory. Unwiring pages wired down as a result of
2639 * this routine is done with the vm_wire interface.
2644 boolean_t must_wire
)
2646 if (map
== VM_MAP_NULL
)
2647 return(KERN_INVALID_ARGUMENT
);
2650 map
->wiring_required
= TRUE
;
2652 map
->wiring_required
= FALSE
;
2654 return(KERN_SUCCESS
);
2657 __private_extern__ kern_return_t
2658 mach_memory_entry_allocate(
2659 vm_named_entry_t
*user_entry_p
,
2660 ipc_port_t
*user_handle_p
)
2662 vm_named_entry_t user_entry
;
2663 ipc_port_t user_handle
;
2664 ipc_port_t previous
;
2666 user_entry
= (vm_named_entry_t
) kalloc(sizeof *user_entry
);
2667 if (user_entry
== NULL
)
2668 return KERN_FAILURE
;
2670 named_entry_lock_init(user_entry
);
2672 user_handle
= ipc_port_alloc_kernel();
2673 if (user_handle
== IP_NULL
) {
2674 kfree(user_entry
, sizeof *user_entry
);
2675 return KERN_FAILURE
;
2677 ip_lock(user_handle
);
2679 /* make a sonce right */
2680 user_handle
->ip_sorights
++;
2681 ip_reference(user_handle
);
2683 user_handle
->ip_destination
= IP_NULL
;
2684 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
2685 user_handle
->ip_receiver
= ipc_space_kernel
;
2687 /* make a send right */
2688 user_handle
->ip_mscount
++;
2689 user_handle
->ip_srights
++;
2690 ip_reference(user_handle
);
2692 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
2693 /* nsrequest unlocks user_handle */
2695 user_entry
->backing
.pager
= NULL
;
2696 user_entry
->is_sub_map
= FALSE
;
2697 user_entry
->is_pager
= FALSE
;
2698 user_entry
->size
= 0;
2699 user_entry
->internal
= FALSE
;
2700 user_entry
->ref_count
= 1;
2702 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
2705 *user_entry_p
= user_entry
;
2706 *user_handle_p
= user_handle
;
2708 return KERN_SUCCESS
;
2712 * mach_memory_object_memory_entry_64
2714 * Create a named entry backed by the provided pager.
2716 * JMM - we need to hold a reference on the pager -
2717 * and release it when the named entry is destroyed.
2720 mach_memory_object_memory_entry_64(
2723 vm_object_offset_t size
,
2724 vm_prot_t permission
,
2725 memory_object_t pager
,
2726 ipc_port_t
*entry_handle
)
2728 unsigned int access
;
2729 vm_named_entry_t user_entry
;
2730 ipc_port_t user_handle
;
2732 if (host
== HOST_NULL
)
2733 return(KERN_INVALID_HOST
);
2735 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
2737 return KERN_FAILURE
;
2740 user_entry
->backing
.pager
= pager
;
2741 user_entry
->size
= size
;
2742 user_entry
->offset
= 0;
2743 user_entry
->protection
= permission
& VM_PROT_ALL
;
2744 access
= GET_MAP_MEM(permission
);
2745 SET_MAP_MEM(access
, user_entry
->protection
);
2746 user_entry
->internal
= internal
;
2747 user_entry
->is_sub_map
= FALSE
;
2748 user_entry
->is_pager
= TRUE
;
2749 assert(user_entry
->ref_count
== 1);
2751 *entry_handle
= user_handle
;
2752 return KERN_SUCCESS
;
2756 mach_memory_object_memory_entry(
2760 vm_prot_t permission
,
2761 memory_object_t pager
,
2762 ipc_port_t
*entry_handle
)
2764 return mach_memory_object_memory_entry_64( host
, internal
,
2765 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
2770 mach_memory_entry_purgable_control(
2771 ipc_port_t entry_port
,
2772 vm_purgable_t control
,
2776 vm_named_entry_t mem_entry
;
2779 if (entry_port
== IP_NULL
||
2780 ip_kotype(entry_port
) != IKOT_NAMED_ENTRY
) {
2781 return KERN_INVALID_ARGUMENT
;
2784 mem_entry
= (vm_named_entry_t
) entry_port
->ip_kobject
;
2786 named_entry_lock(mem_entry
);
2788 if (mem_entry
->is_sub_map
|| mem_entry
->is_pager
) {
2789 named_entry_unlock(mem_entry
);
2790 return KERN_INVALID_ARGUMENT
;
2793 object
= mem_entry
->backing
.object
;
2794 if (object
== VM_OBJECT_NULL
) {
2795 named_entry_unlock(mem_entry
);
2796 return KERN_INVALID_ARGUMENT
;
2799 vm_object_lock(object
);
2801 /* check that named entry covers entire object ? */
2802 if (mem_entry
->offset
!= 0 || object
->size
!= mem_entry
->size
) {
2803 vm_object_unlock(object
);
2804 named_entry_unlock(mem_entry
);
2805 return KERN_INVALID_ARGUMENT
;
2808 named_entry_unlock(mem_entry
);
2810 kr
= vm_object_purgable_control(object
, control
, state
);
2812 vm_object_unlock(object
);
2818 * mach_memory_entry_port_release:
2820 * Release a send right on a named entry port. This is the correct
2821 * way to destroy a named entry. When the last right on the port is
2822 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2825 mach_memory_entry_port_release(
2828 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2829 ipc_port_release_send(port
);
2833 * mach_destroy_memory_entry:
2835 * Drops a reference on a memory entry and destroys the memory entry if
2836 * there are no more references on it.
2837 * NOTE: This routine should not be called to destroy a memory entry from the
2838 * kernel, as it will not release the Mach port associated with the memory
2839 * entry. The proper way to destroy a memory entry in the kernel is to
2840 * call mach_memort_entry_port_release() to release the kernel's send-right on
2841 * the memory entry's port. When the last send right is released, the memory
2842 * entry will be destroyed via ipc_kobject_destroy().
2845 mach_destroy_memory_entry(
2848 vm_named_entry_t named_entry
;
2850 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2851 #endif /* MACH_ASSERT */
2852 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
2853 mutex_lock(&(named_entry
)->Lock
);
2854 named_entry
->ref_count
-= 1;
2855 if(named_entry
->ref_count
== 0) {
2856 if (named_entry
->is_sub_map
) {
2857 vm_map_deallocate(named_entry
->backing
.map
);
2858 } else if (!named_entry
->is_pager
) {
2859 /* release the memory object we've been pointing to */
2860 vm_object_deallocate(named_entry
->backing
.object
);
2861 } /* else JMM - need to drop reference on pager in that case */
2863 mutex_unlock(&(named_entry
)->Lock
);
2865 kfree((void *) port
->ip_kobject
,
2866 sizeof (struct vm_named_entry
));
2868 mutex_unlock(&(named_entry
)->Lock
);
2874 set_dp_control_port(
2875 host_priv_t host_priv
,
2876 ipc_port_t control_port
)
2878 if (host_priv
== HOST_PRIV_NULL
)
2879 return (KERN_INVALID_HOST
);
2881 if (IP_VALID(dynamic_pager_control_port
))
2882 ipc_port_release_send(dynamic_pager_control_port
);
2884 dynamic_pager_control_port
= control_port
;
2885 return KERN_SUCCESS
;
2889 get_dp_control_port(
2890 host_priv_t host_priv
,
2891 ipc_port_t
*control_port
)
2893 if (host_priv
== HOST_PRIV_NULL
)
2894 return (KERN_INVALID_HOST
);
2896 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
2897 return KERN_SUCCESS
;
2901 /* ******* Temporary Internal calls to UPL for BSD ***** */
2903 extern int kernel_upl_map(
2906 vm_offset_t
*dst_addr
);
2908 extern int kernel_upl_unmap(
2912 extern int kernel_upl_commit(
2914 upl_page_info_t
*pl
,
2915 mach_msg_type_number_t count
);
2917 extern int kernel_upl_commit_range(
2919 upl_offset_t offset
,
2922 upl_page_info_array_t pl
,
2923 mach_msg_type_number_t count
);
2925 extern int kernel_upl_abort(
2929 extern int kernel_upl_abort_range(
2931 upl_offset_t offset
,
2940 vm_offset_t
*dst_addr
)
2942 return vm_upl_map(map
, upl
, dst_addr
);
2951 return vm_upl_unmap(map
, upl
);
2957 upl_page_info_t
*pl
,
2958 mach_msg_type_number_t count
)
2962 kr
= upl_commit(upl
, pl
, count
);
2963 upl_deallocate(upl
);
2969 kernel_upl_commit_range(
2971 upl_offset_t offset
,
2974 upl_page_info_array_t pl
,
2975 mach_msg_type_number_t count
)
2977 boolean_t finished
= FALSE
;
2980 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2981 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2983 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
2985 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
2986 upl_deallocate(upl
);
2992 kernel_upl_abort_range(
2994 upl_offset_t offset
,
2999 boolean_t finished
= FALSE
;
3001 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
3002 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
3004 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
3006 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
3007 upl_deallocate(upl
);
3019 kr
= upl_abort(upl
, abort_type
);
3020 upl_deallocate(upl
);
3025 * Now a kernel-private interface (for BootCache
3026 * use only). Need a cleaner way to create an
3027 * empty vm_map() and return a handle to it.
3031 vm_region_object_create(
3032 __unused vm_map_t target_map
,
3034 ipc_port_t
*object_handle
)
3036 vm_named_entry_t user_entry
;
3037 ipc_port_t user_handle
;
3041 if (mach_memory_entry_allocate(&user_entry
, &user_handle
)
3043 return KERN_FAILURE
;
3046 /* Create a named object based on a submap of specified size */
3048 new_map
= vm_map_create(PMAP_NULL
, VM_MAP_MIN_ADDRESS
,
3049 vm_map_round_page(size
), TRUE
);
3051 user_entry
->backing
.map
= new_map
;
3052 user_entry
->internal
= TRUE
;
3053 user_entry
->is_sub_map
= TRUE
;
3054 user_entry
->offset
= 0;
3055 user_entry
->protection
= VM_PROT_ALL
;
3056 user_entry
->size
= size
;
3057 assert(user_entry
->ref_count
== 1);
3059 *object_handle
= user_handle
;
3060 return KERN_SUCCESS
;
3064 ppnum_t
vm_map_get_phys_page( /* forward */
3066 vm_offset_t offset
);
3069 vm_map_get_phys_page(
3073 vm_object_offset_t offset
;
3075 vm_map_offset_t map_offset
;
3076 vm_map_entry_t entry
;
3077 ppnum_t phys_page
= 0;
3079 map_offset
= vm_map_trunc_page(addr
);
3082 while (vm_map_lookup_entry(map
, map_offset
, &entry
)) {
3084 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
3088 if (entry
->is_sub_map
) {
3090 vm_map_lock(entry
->object
.sub_map
);
3092 map
= entry
->object
.sub_map
;
3093 map_offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3094 vm_map_unlock(old_map
);
3097 if (entry
->object
.vm_object
->phys_contiguous
) {
3098 /* These are not standard pageable memory mappings */
3099 /* If they are not present in the object they will */
3100 /* have to be picked up from the pager through the */
3101 /* fault mechanism. */
3102 if(entry
->object
.vm_object
->shadow_offset
== 0) {
3103 /* need to call vm_fault */
3105 vm_fault(map
, map_offset
, VM_PROT_NONE
,
3106 FALSE
, THREAD_UNINT
, NULL
, 0);
3110 offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3111 phys_page
= (ppnum_t
)
3112 ((entry
->object
.vm_object
->shadow_offset
3117 offset
= entry
->offset
+ (map_offset
- entry
->vme_start
);
3118 object
= entry
->object
.vm_object
;
3119 vm_object_lock(object
);
3121 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
3122 if(dst_page
== VM_PAGE_NULL
) {
3123 if(object
->shadow
) {
3124 vm_object_t old_object
;
3125 vm_object_lock(object
->shadow
);
3126 old_object
= object
;
3127 offset
= offset
+ object
->shadow_offset
;
3128 object
= object
->shadow
;
3129 vm_object_unlock(old_object
);
3131 vm_object_unlock(object
);
3135 phys_page
= (ppnum_t
)(dst_page
->phys_page
);
3136 vm_object_unlock(object
);
3150 kern_return_t
kernel_object_iopl_request( /* forward */
3151 vm_named_entry_t named_entry
,
3152 memory_object_offset_t offset
,
3153 vm_size_t
*upl_size
,
3155 upl_page_info_array_t user_page_list
,
3156 unsigned int *page_list_count
,
3160 kernel_object_iopl_request(
3161 vm_named_entry_t named_entry
,
3162 memory_object_offset_t offset
,
3163 vm_size_t
*upl_size
,
3165 upl_page_info_array_t user_page_list
,
3166 unsigned int *page_list_count
,
3174 caller_flags
= *flags
;
3176 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3178 * For forward compatibility's sake,
3179 * reject any unknown flag.
3181 return KERN_INVALID_VALUE
;
3184 /* a few checks to make sure user is obeying rules */
3185 if(*upl_size
== 0) {
3186 if(offset
>= named_entry
->size
)
3187 return(KERN_INVALID_RIGHT
);
3188 *upl_size
= named_entry
->size
- offset
;
3190 if(caller_flags
& UPL_COPYOUT_FROM
) {
3191 if((named_entry
->protection
& VM_PROT_READ
)
3193 return(KERN_INVALID_RIGHT
);
3196 if((named_entry
->protection
&
3197 (VM_PROT_READ
| VM_PROT_WRITE
))
3198 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
3199 return(KERN_INVALID_RIGHT
);
3202 if(named_entry
->size
< (offset
+ *upl_size
))
3203 return(KERN_INVALID_ARGUMENT
);
3205 /* the callers parameter offset is defined to be the */
3206 /* offset from beginning of named entry offset in object */
3207 offset
= offset
+ named_entry
->offset
;
3209 if(named_entry
->is_sub_map
)
3210 return (KERN_INVALID_ARGUMENT
);
3212 named_entry_lock(named_entry
);
3214 if (named_entry
->is_pager
) {
3215 object
= vm_object_enter(named_entry
->backing
.pager
,
3216 named_entry
->offset
+ named_entry
->size
,
3217 named_entry
->internal
,
3220 if (object
== VM_OBJECT_NULL
) {
3221 named_entry_unlock(named_entry
);
3222 return(KERN_INVALID_OBJECT
);
3225 /* JMM - drop reference on the pager here? */
3227 /* create an extra reference for the object */
3228 vm_object_lock(object
);
3229 vm_object_reference_locked(object
);
3230 named_entry
->backing
.object
= object
;
3231 named_entry
->is_pager
= FALSE
;
3232 named_entry_unlock(named_entry
);
3234 /* wait for object (if any) to be ready */
3235 if (!named_entry
->internal
) {
3236 while (!object
->pager_ready
) {
3237 vm_object_wait(object
,
3238 VM_OBJECT_EVENT_PAGER_READY
,
3240 vm_object_lock(object
);
3243 vm_object_unlock(object
);
3246 /* This is the case where we are going to operate */
3247 /* an an already known object. If the object is */
3248 /* not ready it is internal. An external */
3249 /* object cannot be mapped until it is ready */
3250 /* we can therefore avoid the ready check */
3252 object
= named_entry
->backing
.object
;
3253 vm_object_reference(object
);
3254 named_entry_unlock(named_entry
);
3257 if (!object
->private) {
3258 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
3259 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
3260 if (object
->phys_contiguous
) {
3261 *flags
= UPL_PHYS_CONTIG
;
3266 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
3269 ret
= vm_object_iopl_request(object
,
3276 vm_object_deallocate(object
);