2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm32_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
67 #include <mach/boolean.h>
68 #include <mach/kern_return.h>
69 #include <mach/mach_types.h> /* to get vm_address_t */
70 #include <mach/memory_object.h>
71 #include <mach/std_types.h> /* to get pointer_t */
72 #include <mach/vm_attributes.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <mach/mach_syscalls.h>
77 #include <mach/host_priv_server.h>
78 #include <mach/mach_vm_server.h>
79 #include <mach/vm32_map_server.h>
81 #include <kern/host.h>
82 #include <kern/kalloc.h>
83 #include <kern/task.h>
84 #include <kern/misc_protos.h>
85 #include <vm/vm_fault.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/memory_object.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_protos.h>
96 * See vm_user.c for the real implementation of all of these functions.
97 * We call through to the mach_ "wide" versions of the routines, and trust
98 * that the VM system verifies the arguments and only returns address that
99 * are appropriate for the task's address space size.
101 * New VM call implementations should not be added here, because they would
102 * be available only to 32-bit userspace clients. Add them to vm_user.c
103 * and the corresponding prototype to mach_vm.defs (subsystem 4800).
113 mach_vm_offset_t maddr
;
114 kern_return_t result
;
117 result
= mach_vm_allocate_external(map
, &maddr
, size
, flags
);
118 *addr
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddr
);
129 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
130 return KERN_INVALID_ARGUMENT
;
133 return mach_vm_deallocate(map
, start
, size
);
141 vm_inherit_t new_inheritance
)
143 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
144 return KERN_INVALID_ARGUMENT
;
147 return mach_vm_inherit(map
, start
, size
, new_inheritance
);
155 boolean_t set_maximum
,
156 vm_prot_t new_protection
)
158 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
159 return KERN_INVALID_ARGUMENT
;
162 return mach_vm_protect(map
, start
, size
, set_maximum
, new_protection
);
166 vm32_machine_attribute(
170 vm_machine_attribute_t attribute
,
171 vm_machine_attribute_val_t
* value
) /* IN/OUT */
173 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
174 return KERN_INVALID_ARGUMENT
;
177 return mach_vm_machine_attribute(map
, addr
, size
, attribute
, value
);
186 mach_msg_type_number_t
*data_size
)
188 return mach_vm_read(map
, addr
, size
, data
, data_size
);
194 vm32_read_entry_t data_list
,
197 mach_vm_read_entry_t mdata_list
;
198 mach_msg_type_number_t i
;
199 kern_return_t result
;
201 for (i
= 0; i
< VM_MAP_ENTRY_MAX
; i
++) {
202 mdata_list
[i
].address
= data_list
[i
].address
;
203 mdata_list
[i
].size
= data_list
[i
].size
;
206 result
= mach_vm_read_list(map
, mdata_list
, count
);
208 for (i
= 0; i
< VM_MAP_ENTRY_MAX
; i
++) {
209 data_list
[i
].address
= CAST_DOWN_EXPLICIT(vm32_address_t
, mdata_list
[i
].address
);
210 data_list
[i
].size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mdata_list
[i
].size
);
219 vm32_address_t address
,
222 vm32_size_t
*data_size
)
224 kern_return_t result
;
225 mach_vm_size_t mdata_size
;
227 mdata_size
= *data_size
;
228 result
= mach_vm_read_overwrite(map
, address
, size
, data
, &mdata_size
);
229 *data_size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mdata_size
);
237 vm32_address_t address
,
239 mach_msg_type_number_t size
)
241 return mach_vm_write(map
, address
, data
, size
);
247 vm32_address_t source_address
,
249 vm32_address_t dest_address
)
251 return mach_vm_copy(map
, source_address
, size
, dest_address
);
257 vm32_offset_t
*address
,
262 vm_object_offset_t offset
,
264 vm_prot_t cur_protection
,
265 vm_prot_t max_protection
,
266 vm_inherit_t inheritance
)
268 mach_vm_offset_t maddress
;
269 kern_return_t result
;
272 result
= mach_vm_map_external(target_map
, &maddress
, size
, mask
,
273 flags
, port
, offset
, copy
,
274 cur_protection
, max_protection
, inheritance
);
275 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
283 vm32_offset_t
*address
,
288 vm32_offset_t offset
,
290 vm_prot_t cur_protection
,
291 vm_prot_t max_protection
,
292 vm_inherit_t inheritance
)
294 return vm32_map_64(target_map
, address
, size
, mask
,
295 flags
, port
, offset
, copy
,
296 cur_protection
, max_protection
, inheritance
);
302 vm32_offset_t
*address
,
307 vm32_offset_t memory_address
,
309 vm_prot_t
*cur_protection
,
310 vm_prot_t
*max_protection
,
311 vm_inherit_t inheritance
)
313 mach_vm_offset_t maddress
;
314 kern_return_t result
;
317 result
= mach_vm_remap_external(target_map
, &maddress
, size
, mask
,
318 anywhere
, src_map
, memory_address
, copy
,
319 cur_protection
, max_protection
, inheritance
);
320 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
328 vm32_address_t address
,
330 vm_sync_t sync_flags
)
332 return mach_vm_msync(map
, address
, size
, sync_flags
);
340 vm_behavior_t new_behavior
)
342 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
343 return KERN_INVALID_ARGUMENT
;
346 return mach_vm_behavior_set(map
, start
, size
, new_behavior
);
352 vm32_offset_t
*address
, /* IN/OUT */
353 vm32_size_t
*size
, /* OUT */
354 vm_region_flavor_t flavor
, /* IN */
355 vm_region_info_t info
, /* OUT */
356 mach_msg_type_number_t
*count
, /* IN/OUT */
357 mach_port_t
*object_name
) /* OUT */
359 mach_vm_offset_t maddress
;
360 mach_vm_size_t msize
;
361 kern_return_t result
;
365 result
= mach_vm_region(map
, &maddress
, &msize
, flavor
, info
, count
, object_name
);
366 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, msize
);
367 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
375 vm32_address_t
*address
, /* IN/OUT */
376 vm32_size_t
*size
, /* OUT */
377 vm_region_flavor_t flavor
, /* IN */
378 vm_region_info_t info
, /* OUT */
379 mach_msg_type_number_t
*count
, /* IN/OUT */
380 mach_port_t
*object_name
) /* OUT */
382 vm_map_address_t map_addr
;
383 vm_map_size_t map_size
;
386 if (VM_MAP_NULL
== map
) {
387 return KERN_INVALID_ARGUMENT
;
390 map_addr
= (vm_map_address_t
)*address
;
391 map_size
= (vm_map_size_t
)*size
;
393 kr
= vm_map_region(map
,
394 &map_addr
, &map_size
,
398 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, map_addr
);
399 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, map_size
);
401 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM32_MAX_ADDRESS
) {
402 return KERN_INVALID_ADDRESS
;
408 vm32_region_recurse_64(
410 vm32_address_t
*address
,
413 vm_region_recurse_info_64_t info
,
414 mach_msg_type_number_t
*infoCnt
)
416 mach_vm_address_t maddress
;
417 mach_vm_size_t msize
;
418 kern_return_t result
;
422 result
= mach_vm_region_recurse(map
, &maddress
, &msize
, depth
, info
, infoCnt
);
423 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, maddress
);
424 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, msize
);
432 vm32_offset_t
*address
, /* IN/OUT */
433 vm32_size_t
*size
, /* OUT */
434 natural_t
*depth
, /* IN/OUT */
435 vm_region_recurse_info_t info32
, /* IN/OUT */
436 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
438 vm_region_submap_info_data_64_t info64
;
439 vm_region_submap_info_t info
;
440 vm_map_address_t map_addr
;
441 vm_map_size_t map_size
;
444 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
) {
445 return KERN_INVALID_ARGUMENT
;
449 map_addr
= (vm_map_address_t
)*address
;
450 map_size
= (vm_map_size_t
)*size
;
451 info
= (vm_region_submap_info_t
)info32
;
452 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
454 kr
= vm_map_region_recurse_64(map
, &map_addr
, &map_size
,
455 depth
, &info64
, infoCnt
);
457 info
->protection
= info64
.protection
;
458 info
->max_protection
= info64
.max_protection
;
459 info
->inheritance
= info64
.inheritance
;
460 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
461 info
->user_tag
= info64
.user_tag
;
462 info
->pages_resident
= info64
.pages_resident
;
463 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
464 info
->pages_swapped_out
= info64
.pages_swapped_out
;
465 info
->pages_dirtied
= info64
.pages_dirtied
;
466 info
->ref_count
= info64
.ref_count
;
467 info
->shadow_depth
= info64
.shadow_depth
;
468 info
->external_pager
= info64
.external_pager
;
469 info
->share_mode
= info64
.share_mode
;
470 info
->is_submap
= info64
.is_submap
;
471 info
->behavior
= info64
.behavior
;
472 info
->object_id
= info64
.object_id
;
473 info
->user_wired_count
= info64
.user_wired_count
;
475 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, map_addr
);
476 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, map_size
);
477 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
479 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM32_MAX_ADDRESS
) {
480 return KERN_INVALID_ADDRESS
;
486 vm32_purgable_control(
488 vm32_offset_t address
,
489 vm_purgable_t control
,
492 if (VM_MAP_NULL
== map
) {
493 return KERN_INVALID_ARGUMENT
;
496 return vm_map_purgable_control(map
,
497 vm_map_trunc_page(address
, PAGE_MASK
),
505 vm32_offset_t offset
,
509 if (VM_MAP_NULL
== map
) {
510 return KERN_INVALID_ARGUMENT
;
513 return vm_map_page_query_internal(
515 vm_map_trunc_page(offset
, PAGE_MASK
),
521 vm32_make_memory_entry_64(
523 memory_object_size_t
*size
,
524 memory_object_offset_t offset
,
525 vm_prot_t permission
,
526 ipc_port_t
*object_handle
,
527 ipc_port_t parent_handle
)
529 // use the existing entrypoint
530 return _mach_make_memory_entry(target_map
, size
, offset
, permission
, object_handle
, parent_handle
);
534 vm32_make_memory_entry(
537 vm32_offset_t offset
,
538 vm_prot_t permission
,
539 ipc_port_t
*object_handle
,
540 ipc_port_t parent_entry
)
542 memory_object_size_t mo_size
;
545 mo_size
= (memory_object_size_t
)*size
;
546 kr
= _mach_make_memory_entry(target_map
, &mo_size
,
547 (memory_object_offset_t
)offset
, permission
, object_handle
,
549 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mo_size
);
558 if (map
== VM_MAP_NULL
) {
559 return KERN_INVALID_ARGUMENT
;
563 map
->wiring_required
= (must_wire
== TRUE
);
570 vm32__map_exec_lockdown(
573 if (map
== VM_MAP_NULL
) {
574 return KERN_INVALID_ARGUMENT
;
578 map
->map_disallow_new_exec
= TRUE
;
585 #endif /* VM32_SUPPORT */