2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm32_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * User-exported virtual memory functions.
67 #include <mach/boolean.h>
68 #include <mach/kern_return.h>
69 #include <mach/mach_types.h> /* to get vm_address_t */
70 #include <mach/memory_object.h>
71 #include <mach/std_types.h> /* to get pointer_t */
72 #include <mach/vm_attributes.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <mach/mach_syscalls.h>
77 #include <mach/host_priv_server.h>
78 #include <mach/mach_vm_server.h>
79 #include <mach/vm32_map_server.h>
81 #include <kern/host.h>
82 #include <kern/task.h>
83 #include <kern/misc_protos.h>
84 #include <vm/vm_fault.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/memory_object.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_protos.h>
95 * See vm_user.c for the real implementation of all of these functions.
96 * We call through to the mach_ "wide" versions of the routines, and trust
97 * that the VM system verifies the arguments and only returns address that
98 * are appropriate for the task's address space size.
100 * New VM call implementations should not be added here, because they would
101 * be available only to 32-bit userspace clients. Add them to vm_user.c
102 * and the corresponding prototype to mach_vm.defs (subsystem 4800).
112 mach_vm_offset_t maddr
;
113 kern_return_t result
;
116 result
= mach_vm_allocate_external(map
, &maddr
, size
, flags
);
117 *addr
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddr
);
128 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
129 return KERN_INVALID_ARGUMENT
;
132 return mach_vm_deallocate(map
, start
, size
);
140 vm_inherit_t new_inheritance
)
142 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
143 return KERN_INVALID_ARGUMENT
;
146 return mach_vm_inherit(map
, start
, size
, new_inheritance
);
154 boolean_t set_maximum
,
155 vm_prot_t new_protection
)
157 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
158 return KERN_INVALID_ARGUMENT
;
161 return mach_vm_protect(map
, start
, size
, set_maximum
, new_protection
);
165 vm32_machine_attribute(
169 vm_machine_attribute_t attribute
,
170 vm_machine_attribute_val_t
* value
) /* IN/OUT */
172 if ((map
== VM_MAP_NULL
) || (addr
+ size
< addr
)) {
173 return KERN_INVALID_ARGUMENT
;
176 return mach_vm_machine_attribute(map
, addr
, size
, attribute
, value
);
185 mach_msg_type_number_t
*data_size
)
187 return mach_vm_read(map
, addr
, size
, data
, data_size
);
193 vm32_read_entry_t data_list
,
196 mach_vm_read_entry_t mdata_list
;
197 mach_msg_type_number_t i
;
198 kern_return_t result
;
200 for (i
= 0; i
< VM_MAP_ENTRY_MAX
; i
++) {
201 mdata_list
[i
].address
= data_list
[i
].address
;
202 mdata_list
[i
].size
= data_list
[i
].size
;
205 result
= mach_vm_read_list(map
, mdata_list
, count
);
207 for (i
= 0; i
< VM_MAP_ENTRY_MAX
; i
++) {
208 data_list
[i
].address
= CAST_DOWN_EXPLICIT(vm32_address_t
, mdata_list
[i
].address
);
209 data_list
[i
].size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mdata_list
[i
].size
);
218 vm32_address_t address
,
221 vm32_size_t
*data_size
)
223 kern_return_t result
;
224 mach_vm_size_t mdata_size
;
226 mdata_size
= *data_size
;
227 result
= mach_vm_read_overwrite(map
, address
, size
, data
, &mdata_size
);
228 *data_size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mdata_size
);
236 vm32_address_t address
,
238 mach_msg_type_number_t size
)
240 return mach_vm_write(map
, address
, data
, size
);
246 vm32_address_t source_address
,
248 vm32_address_t dest_address
)
250 return mach_vm_copy(map
, source_address
, size
, dest_address
);
256 vm32_offset_t
*address
,
261 vm_object_offset_t offset
,
263 vm_prot_t cur_protection
,
264 vm_prot_t max_protection
,
265 vm_inherit_t inheritance
)
267 mach_vm_offset_t maddress
;
268 kern_return_t result
;
271 result
= mach_vm_map_external(target_map
, &maddress
, size
, mask
,
272 flags
, port
, offset
, copy
,
273 cur_protection
, max_protection
, inheritance
);
274 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
282 vm32_offset_t
*address
,
287 vm32_offset_t offset
,
289 vm_prot_t cur_protection
,
290 vm_prot_t max_protection
,
291 vm_inherit_t inheritance
)
293 return vm32_map_64(target_map
, address
, size
, mask
,
294 flags
, port
, offset
, copy
,
295 cur_protection
, max_protection
, inheritance
);
301 vm32_offset_t
*address
,
306 vm32_offset_t memory_address
,
308 vm_prot_t
*cur_protection
,
309 vm_prot_t
*max_protection
,
310 vm_inherit_t inheritance
)
312 mach_vm_offset_t maddress
;
313 kern_return_t result
;
316 result
= mach_vm_remap_external(target_map
, &maddress
, size
, mask
,
317 anywhere
, src_map
, memory_address
, copy
,
318 cur_protection
, max_protection
, inheritance
);
319 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
327 vm32_address_t address
,
329 vm_sync_t sync_flags
)
331 return mach_vm_msync(map
, address
, size
, sync_flags
);
339 vm_behavior_t new_behavior
)
341 if ((map
== VM_MAP_NULL
) || (start
+ size
< start
)) {
342 return KERN_INVALID_ARGUMENT
;
345 return mach_vm_behavior_set(map
, start
, size
, new_behavior
);
351 vm32_offset_t
*address
, /* IN/OUT */
352 vm32_size_t
*size
, /* OUT */
353 vm_region_flavor_t flavor
, /* IN */
354 vm_region_info_t info
, /* OUT */
355 mach_msg_type_number_t
*count
, /* IN/OUT */
356 mach_port_t
*object_name
) /* OUT */
358 mach_vm_offset_t maddress
;
359 mach_vm_size_t msize
;
360 kern_return_t result
;
364 result
= mach_vm_region(map
, &maddress
, &msize
, flavor
, info
, count
, object_name
);
365 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, msize
);
366 *address
= CAST_DOWN_EXPLICIT(vm32_offset_t
, maddress
);
374 vm32_address_t
*address
, /* IN/OUT */
375 vm32_size_t
*size
, /* OUT */
376 vm_region_flavor_t flavor
, /* IN */
377 vm_region_info_t info
, /* OUT */
378 mach_msg_type_number_t
*count
, /* IN/OUT */
379 mach_port_t
*object_name
) /* OUT */
381 vm_map_address_t map_addr
;
382 vm_map_size_t map_size
;
385 if (VM_MAP_NULL
== map
) {
386 return KERN_INVALID_ARGUMENT
;
389 map_addr
= (vm_map_address_t
)*address
;
390 map_size
= (vm_map_size_t
)*size
;
392 kr
= vm_map_region(map
,
393 &map_addr
, &map_size
,
397 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, map_addr
);
398 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, map_size
);
400 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM32_MAX_ADDRESS
) {
401 return KERN_INVALID_ADDRESS
;
407 vm32_region_recurse_64(
409 vm32_address_t
*address
,
412 vm_region_recurse_info_64_t info
,
413 mach_msg_type_number_t
*infoCnt
)
415 mach_vm_address_t maddress
;
416 mach_vm_size_t msize
;
417 kern_return_t result
;
421 result
= mach_vm_region_recurse(map
, &maddress
, &msize
, depth
, info
, infoCnt
);
422 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, maddress
);
423 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, msize
);
431 vm32_offset_t
*address
, /* IN/OUT */
432 vm32_size_t
*size
, /* OUT */
433 natural_t
*depth
, /* IN/OUT */
434 vm_region_recurse_info_t info32
, /* IN/OUT */
435 mach_msg_type_number_t
*infoCnt
) /* IN/OUT */
437 vm_region_submap_info_data_64_t info64
;
438 vm_region_submap_info_t info
;
439 vm_map_address_t map_addr
;
440 vm_map_size_t map_size
;
443 if (VM_MAP_NULL
== map
|| *infoCnt
< VM_REGION_SUBMAP_INFO_COUNT
) {
444 return KERN_INVALID_ARGUMENT
;
448 map_addr
= (vm_map_address_t
)*address
;
449 map_size
= (vm_map_size_t
)*size
;
450 info
= (vm_region_submap_info_t
)info32
;
451 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT_64
;
453 kr
= vm_map_region_recurse_64(map
, &map_addr
, &map_size
,
454 depth
, &info64
, infoCnt
);
456 info
->protection
= info64
.protection
;
457 info
->max_protection
= info64
.max_protection
;
458 info
->inheritance
= info64
.inheritance
;
459 info
->offset
= (uint32_t)info64
.offset
; /* trouble-maker */
460 info
->user_tag
= info64
.user_tag
;
461 info
->pages_resident
= info64
.pages_resident
;
462 info
->pages_shared_now_private
= info64
.pages_shared_now_private
;
463 info
->pages_swapped_out
= info64
.pages_swapped_out
;
464 info
->pages_dirtied
= info64
.pages_dirtied
;
465 info
->ref_count
= info64
.ref_count
;
466 info
->shadow_depth
= info64
.shadow_depth
;
467 info
->external_pager
= info64
.external_pager
;
468 info
->share_mode
= info64
.share_mode
;
469 info
->is_submap
= info64
.is_submap
;
470 info
->behavior
= info64
.behavior
;
471 info
->object_id
= info64
.object_id
;
472 info
->user_wired_count
= info64
.user_wired_count
;
474 *address
= CAST_DOWN_EXPLICIT(vm32_address_t
, map_addr
);
475 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, map_size
);
476 *infoCnt
= VM_REGION_SUBMAP_INFO_COUNT
;
478 if (KERN_SUCCESS
== kr
&& map_addr
+ map_size
> VM32_MAX_ADDRESS
) {
479 return KERN_INVALID_ADDRESS
;
485 vm32_purgable_control(
487 vm32_offset_t address
,
488 vm_purgable_t control
,
491 if (VM_MAP_NULL
== map
) {
492 return KERN_INVALID_ARGUMENT
;
495 return vm_map_purgable_control(map
,
496 vm_map_trunc_page(address
, PAGE_MASK
),
504 vm32_offset_t offset
,
508 if (VM_MAP_NULL
== map
) {
509 return KERN_INVALID_ARGUMENT
;
512 return vm_map_page_query_internal(
514 vm_map_trunc_page(offset
, PAGE_MASK
),
520 vm32_make_memory_entry_64(
522 memory_object_size_t
*size
,
523 memory_object_offset_t offset
,
524 vm_prot_t permission
,
525 ipc_port_t
*object_handle
,
526 ipc_port_t parent_handle
)
528 // use the existing entrypoint
529 return _mach_make_memory_entry(target_map
, size
, offset
, permission
, object_handle
, parent_handle
);
533 vm32_make_memory_entry(
536 vm32_offset_t offset
,
537 vm_prot_t permission
,
538 ipc_port_t
*object_handle
,
539 ipc_port_t parent_entry
)
541 memory_object_size_t mo_size
;
544 mo_size
= (memory_object_size_t
)*size
;
545 kr
= _mach_make_memory_entry(target_map
, &mo_size
,
546 (memory_object_offset_t
)offset
, permission
, object_handle
,
548 *size
= CAST_DOWN_EXPLICIT(vm32_size_t
, mo_size
);
557 if (map
== VM_MAP_NULL
) {
558 return KERN_INVALID_ARGUMENT
;
562 map
->wiring_required
= (must_wire
== TRUE
);
569 vm32__map_exec_lockdown(
572 if (map
== VM_MAP_NULL
) {
573 return KERN_INVALID_ARGUMENT
;
577 map
->map_disallow_new_exec
= TRUE
;
584 #endif /* VM32_SUPPORT */