2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm32_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
67 #include <mach/boolean.h>
68 #include <mach/kern_return.h>
69 #include <mach/mach_types.h> /* to get vm_address_t */
70 #include <mach/memory_object.h>
71 #include <mach/std_types.h> /* to get pointer_t */
72 #include <mach/vm_attributes.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <mach/mach_syscalls.h>
77 #include <mach/host_priv_server.h>
78 #include <mach/mach_vm_server.h>
79 #include <mach/vm32_map_server.h>
81 #include <kern/host.h>
82 #include <kern/kalloc.h>
83 #include <kern/task.h>
84 #include <kern/misc_protos.h>
85 #include <vm/vm_fault.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/memory_object.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_protos.h>
96 * See vm_user.c for the real implementation of all of these functions.
97 * We call through to the mach_ "wide" versions of the routines, and trust
98 * that the VM system verifies the arguments and only returns address that
99 * are appropriate for the task's address space size.
101 * New VM call implementations should not be added here, because they would
102 * be available only to 32-bit userspace clients. Add them to vm_user.c
103 * and the corresponding prototype to mach_vm.defs (subsystem 4800).
113 mach_vm_offset_t maddr
;
114 kern_return_t result
;
117 result
= mach_vm_allocate(map
, &maddr
, size
, flags
);
118 *addr
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddr
);
129 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
130 return(KERN_INVALID_ARGUMENT
);
132 return mach_vm_deallocate(map
, start
, size
);
140 vm_inherit_t new_inheritance
)
142 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
143 return(KERN_INVALID_ARGUMENT
);
145 return mach_vm_inherit(map
, start
, size
, new_inheritance
);
153 boolean_t set_maximum
,
154 vm_prot_t new_protection
)
156 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
157 return(KERN_INVALID_ARGUMENT
);
159 return mach_vm_protect(map
, start
, size
, set_maximum
, new_protection
);
163 vm32_machine_attribute(
167 vm_machine_attribute_t attribute
,
168 vm_machine_attribute_val_t
* value
) /* IN/OUT */
170 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
))
171 return(KERN_INVALID_ARGUMENT
);
173 return mach_vm_machine_attribute(map
, addr
, size
, attribute
, value
);
182 mach_msg_type_number_t
*data_size
)
184 return mach_vm_read(map
, addr
, size
, data
, data_size
);
190 vm32_read_entry_t data_list
,
193 mach_vm_read_entry_t mdata_list
;
194 mach_msg_type_number_t i
;
195 kern_return_t result
;
197 for (i
=0; i
< VM_MAP_ENTRY_MAX
; i
++) {
198 mdata_list
[i
].address
= data_list
[i
].address
;
199 mdata_list
[i
].size
= data_list
[i
].size
;
202 result
= mach_vm_read_list(map
, mdata_list
, count
);
204 for (i
=0; i
< VM_MAP_ENTRY_MAX
; i
++) {
205 data_list
[i
].address
= CAST_DOWN_EXPLICIT(vm32_address_t
, mdata_list
[i
].address
);
206 data_list
[i
].size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mdata_list
[i
].size
);
215 vm32_address_t address
,
218 vm32_size_t
*data_size
)
220 kern_return_t result
;
221 mach_vm_size_t mdata_size
;
223 mdata_size
= *data_size
;
224 result
= mach_vm_read_overwrite(map
, address
, size
, data
, &mdata_size
);
225 *data_size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mdata_size
);
233 vm32_address_t address
,
235 mach_msg_type_number_t size
)
237 return mach_vm_write(map
, address
, data
, size
);
243 vm32_address_t source_address
,
245 vm32_address_t dest_address
)
247 return mach_vm_copy(map
, source_address
, size
, dest_address
);
253 vm32_offset_t
*address
,
258 vm_object_offset_t offset
,
260 vm_prot_t cur_protection
,
261 vm_prot_t max_protection
,
262 vm_inherit_t inheritance
)
264 mach_vm_offset_t maddress
;
265 kern_return_t result
;
268 result
= mach_vm_map(target_map
, &maddress
, size
, mask
,
269 flags
, port
, offset
, copy
,
270 cur_protection
, max_protection
, inheritance
);
271 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
279 vm32_offset_t
*address
,
284 vm32_offset_t offset
,
286 vm_prot_t cur_protection
,
287 vm_prot_t max_protection
,
288 vm_inherit_t inheritance
)
290 return vm32_map_64(target_map
, address
, size
, mask
,
291 flags
, port
, offset
, copy
,
292 cur_protection
, max_protection
, inheritance
);
298 vm32_offset_t
*address
,
303 vm32_offset_t memory_address
,
305 vm_prot_t
*cur_protection
,
306 vm_prot_t
*max_protection
,
307 vm_inherit_t inheritance
)
309 mach_vm_offset_t maddress
;
310 kern_return_t result
;
313 result
= mach_vm_remap(target_map
, &maddress
, size
, mask
,
314 anywhere
, src_map
, memory_address
, copy
,
315 cur_protection
, max_protection
, inheritance
);
316 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
324 vm32_address_t address
,
326 vm_sync_t sync_flags
)
328 return mach_vm_msync(map
, address
, size
, sync_flags
);
336 vm_behavior_t new_behavior
)
338 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
))
339 return(KERN_INVALID_ARGUMENT
);
341 return mach_vm_behavior_set(map
, start
, size
, new_behavior
);
347 vm32_offset_t
*address
, /* IN/OUT */
348 vm32_size_t
*size
, /* OUT */
349 vm_region_flavor_t flavor
, /* IN */
350 vm_region_info_t info
, /* OUT */
351 mach_msg_type_number_t
*count
, /* IN/OUT */
352 mach_port_t
*object_name
) /* OUT */
354 mach_vm_offset_t maddress
;
355 mach_vm_size_t msize
;
356 kern_return_t result
;
360 result
= mach_vm_region(map
, &maddress
, &msize
, flavor
, info
, count
, object_name
);
361 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, msize
);
362 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
370 vm32_address_t
*address
, /* IN/OUT */
371 vm32_size_t
*size
, /* OUT */
372 vm_region_flavor_t flavor
, /* IN */
373 vm_region_info_t info
, /* OUT */
374 mach_msg_type_number_t
*count
, /* IN/OUT */
375 mach_port_t
*object_name
) /* OUT */
377 vm_map_address_t map_addr
;
378 vm_map_size_t map_size
;
381 if (VM_MAP_NULL
== map
)
382 return KERN_INVALID_ARGUMENT
;
384 map_addr
= (vm_map_address_t
)*address
;
385 map_size
= (vm_map_size_t
)*size
;
387 kr
= vm_map_region(map
,
388 &map_addr
, &map_size
,
392 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, map_addr
);
393 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, map_size
);
395 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM32_MAX_ADDRESS
)
396 return KERN_INVALID_ADDRESS
;
401 vm32_region_recurse_64(
403 vm32_address_t
*address
,
406 vm_region_recurse_info_64_t info
,
407 mach_msg_type_number_t
*infoCnt
)
409 mach_vm_address_t maddress
;
410 mach_vm_size_t msize
;
411 kern_return_t result
;
415 result
= mach_vm_region_recurse(map
, &maddress
, &msize
, depth
, info
, infoCnt
);
416 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, maddress
);
417 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, msize
);
425 vm32_offset_t
*address
, /* IN/OUT */
426 vm32_size_t
*size
, /* OUT */
427 natural_t
*depth
, /* IN/OUT */
428 vm_region_recurse_info_t info32
, /* IN/OUT */
429 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
431 vm_region_submap_info_data_64_t info64
;
432 vm_region_submap_info_t info
;
433 vm_map_address_t map_addr
;
434 vm_map_size_t map_size
;
437 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
)
438 return KERN_INVALID_ARGUMENT
;
441 map_addr
= (vm_map_address_t
)*address
;
442 map_size
= (vm_map_size_t
)*size
;
443 info
= (vm_region_submap_info_t
)info32
;
444 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
446 kr
= vm_map_region_recurse_64(map
, &map_addr
,&map_size
,
447 depth
, &info64
, infoCnt
);
449 info
->protection
= info64
.protection
;
450 info
->max_protection
= info64
.max_protection
;
451 info
->inheritance
= info64
.inheritance
;
452 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
453 info
->user_tag
= info64
.user_tag
;
454 info
->pages_resident
= info64
.pages_resident
;
455 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
456 info
->pages_swapped_out
= info64
.pages_swapped_out
;
457 info
->pages_dirtied
= info64
.pages_dirtied
;
458 info
->ref_count
= info64
.ref_count
;
459 info
->shadow_depth
= info64
.shadow_depth
;
460 info
->external_pager
= info64
.external_pager
;
461 info
->share_mode
= info64
.share_mode
;
462 info
->is_submap
= info64
.is_submap
;
463 info
->behavior
= info64
.behavior
;
464 info
->object_id
= info64
.object_id
;
465 info
->user_wired_count
= info64
.user_wired_count
;
467 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, map_addr
);
468 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, map_size
);
469 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
471 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM32_MAX_ADDRESS
)
472 return KERN_INVALID_ADDRESS
;
477 vm32_purgable_control(
479 vm32_offset_t address
,
480 vm_purgable_t control
,
483 if (VM_MAP_NULL
== map
)
484 return KERN_INVALID_ARGUMENT
;
486 return vm_map_purgable_control(map
,
487 vm_map_trunc_page(address
, PAGE_MASK
),
495 vm32_offset_t offset
,
499 if (VM_MAP_NULL
== map
)
500 return KERN_INVALID_ARGUMENT
;
502 return vm_map_page_query_internal(
504 vm_map_trunc_page(offset
, PAGE_MASK
),
510 vm32_make_memory_entry_64(
512 memory_object_size_t
*size
,
513 memory_object_offset_t offset
,
514 vm_prot_t permission
,
515 ipc_port_t
*object_handle
,
516 ipc_port_t parent_handle
)
518 // use the existing entrypoint
519 return _mach_make_memory_entry(target_map
, size
, offset
, permission
, object_handle
, parent_handle
);
523 vm32_make_memory_entry(
526 vm32_offset_t offset
,
527 vm_prot_t permission
,
528 ipc_port_t
*object_handle
,
529 ipc_port_t parent_entry
)
531 memory_object_size_t mo_size
;
534 mo_size
= (memory_object_size_t
)*size
;
535 kr
= _mach_make_memory_entry(target_map
, &mo_size
,
536 (memory_object_offset_t
)offset
, permission
, object_handle
,
538 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mo_size
);
547 if (map
== VM_MAP_NULL
)
548 return(KERN_INVALID_ARGUMENT
);
551 map
->wiring_required
= TRUE
;
553 map
->wiring_required
= FALSE
;
555 return(KERN_SUCCESS
);
558 #endif /* VM32_SUPPORT */