]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm32_user.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / vm / vm32_user.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm32_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * User-exported virtual memory functions.
63 */
64
65 #include <debug.h>
66
67 #include <mach/boolean.h>
68 #include <mach/kern_return.h>
69 #include <mach/mach_types.h> /* to get vm_address_t */
70 #include <mach/memory_object.h>
71 #include <mach/std_types.h> /* to get pointer_t */
72 #include <mach/vm_attributes.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <mach/mach_syscalls.h>
76
77 #include <mach/host_priv_server.h>
78 #include <mach/mach_vm_server.h>
79 #include <mach/vm32_map_server.h>
80
81 #include <kern/host.h>
82 #include <kern/kalloc.h>
83 #include <kern/task.h>
84 #include <kern/misc_protos.h>
85 #include <vm/vm_fault.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/memory_object.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_protos.h>
92
93 #ifdef VM32_SUPPORT
94
95 /*
96 * See vm_user.c for the real implementation of all of these functions.
97 * We call through to the mach_ "wide" versions of the routines, and trust
98 * that the VM system verifies the arguments and only returns address that
99 * are appropriate for the task's address space size.
100 *
101 * New VM call implementations should not be added here, because they would
102 * be available only to 32-bit userspace clients. Add them to vm_user.c
103 * and the corresponding prototype to mach_vm.defs (subsystem 4800).
104 */
105
106 kern_return_t
107 vm32_allocate(
108 vm_map_t map,
109 vm32_offset_t *addr,
110 vm32_size_t size,
111 int flags)
112 {
113 mach_vm_offset_t maddr;
114 kern_return_t result;
115
116 maddr = *addr;
117 result = mach_vm_allocate_external(map, &maddr, size, flags);
118 *addr = CAST_DOWN_EXPLICIT(vm32_offset_t, maddr);
119
120 return result;
121 }
122
123 kern_return_t
124 vm32_deallocate(
125 vm_map_t map,
126 vm32_offset_t start,
127 vm32_size_t size)
128 {
129 if ((map == VM_MAP_NULL) || (start + size < start)) {
130 return KERN_INVALID_ARGUMENT;
131 }
132
133 return mach_vm_deallocate(map, start, size);
134 }
135
136 kern_return_t
137 vm32_inherit(
138 vm_map_t map,
139 vm32_offset_t start,
140 vm32_size_t size,
141 vm_inherit_t new_inheritance)
142 {
143 if ((map == VM_MAP_NULL) || (start + size < start)) {
144 return KERN_INVALID_ARGUMENT;
145 }
146
147 return mach_vm_inherit(map, start, size, new_inheritance);
148 }
149
150 kern_return_t
151 vm32_protect(
152 vm_map_t map,
153 vm32_offset_t start,
154 vm32_size_t size,
155 boolean_t set_maximum,
156 vm_prot_t new_protection)
157 {
158 if ((map == VM_MAP_NULL) || (start + size < start)) {
159 return KERN_INVALID_ARGUMENT;
160 }
161
162 return mach_vm_protect(map, start, size, set_maximum, new_protection);
163 }
164
165 kern_return_t
166 vm32_machine_attribute(
167 vm_map_t map,
168 vm32_address_t addr,
169 vm32_size_t size,
170 vm_machine_attribute_t attribute,
171 vm_machine_attribute_val_t* value) /* IN/OUT */
172 {
173 if ((map == VM_MAP_NULL) || (addr + size < addr)) {
174 return KERN_INVALID_ARGUMENT;
175 }
176
177 return mach_vm_machine_attribute(map, addr, size, attribute, value);
178 }
179
180 kern_return_t
181 vm32_read(
182 vm_map_t map,
183 vm32_address_t addr,
184 vm32_size_t size,
185 pointer_t *data,
186 mach_msg_type_number_t *data_size)
187 {
188 return mach_vm_read(map, addr, size, data, data_size);
189 }
190
191 kern_return_t
192 vm32_read_list(
193 vm_map_t map,
194 vm32_read_entry_t data_list,
195 natural_t count)
196 {
197 mach_vm_read_entry_t mdata_list;
198 mach_msg_type_number_t i;
199 kern_return_t result;
200
201 for (i = 0; i < VM_MAP_ENTRY_MAX; i++) {
202 mdata_list[i].address = data_list[i].address;
203 mdata_list[i].size = data_list[i].size;
204 }
205
206 result = mach_vm_read_list(map, mdata_list, count);
207
208 for (i = 0; i < VM_MAP_ENTRY_MAX; i++) {
209 data_list[i].address = CAST_DOWN_EXPLICIT(vm32_address_t, mdata_list[i].address);
210 data_list[i].size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_list[i].size);
211 }
212
213 return result;
214 }
215
216 kern_return_t
217 vm32_read_overwrite(
218 vm_map_t map,
219 vm32_address_t address,
220 vm32_size_t size,
221 vm32_address_t data,
222 vm32_size_t *data_size)
223 {
224 kern_return_t result;
225 mach_vm_size_t mdata_size;
226
227 mdata_size = *data_size;
228 result = mach_vm_read_overwrite(map, address, size, data, &mdata_size);
229 *data_size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_size);
230
231 return result;
232 }
233
234 kern_return_t
235 vm32_write(
236 vm_map_t map,
237 vm32_address_t address,
238 pointer_t data,
239 mach_msg_type_number_t size)
240 {
241 return mach_vm_write(map, address, data, size);
242 }
243
244 kern_return_t
245 vm32_copy(
246 vm_map_t map,
247 vm32_address_t source_address,
248 vm32_size_t size,
249 vm32_address_t dest_address)
250 {
251 return mach_vm_copy(map, source_address, size, dest_address);
252 }
253
254 kern_return_t
255 vm32_map_64(
256 vm_map_t target_map,
257 vm32_offset_t *address,
258 vm32_size_t size,
259 vm32_offset_t mask,
260 int flags,
261 ipc_port_t port,
262 vm_object_offset_t offset,
263 boolean_t copy,
264 vm_prot_t cur_protection,
265 vm_prot_t max_protection,
266 vm_inherit_t inheritance)
267 {
268 mach_vm_offset_t maddress;
269 kern_return_t result;
270
271 maddress = *address;
272 result = mach_vm_map_external(target_map, &maddress, size, mask,
273 flags, port, offset, copy,
274 cur_protection, max_protection, inheritance);
275 *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
276
277 return result;
278 }
279
280 kern_return_t
281 vm32_map(
282 vm_map_t target_map,
283 vm32_offset_t *address,
284 vm32_size_t size,
285 vm32_offset_t mask,
286 int flags,
287 ipc_port_t port,
288 vm32_offset_t offset,
289 boolean_t copy,
290 vm_prot_t cur_protection,
291 vm_prot_t max_protection,
292 vm_inherit_t inheritance)
293 {
294 return vm32_map_64(target_map, address, size, mask,
295 flags, port, offset, copy,
296 cur_protection, max_protection, inheritance);
297 }
298
299 kern_return_t
300 vm32_remap(
301 vm_map_t target_map,
302 vm32_offset_t *address,
303 vm32_size_t size,
304 vm32_offset_t mask,
305 boolean_t anywhere,
306 vm_map_t src_map,
307 vm32_offset_t memory_address,
308 boolean_t copy,
309 vm_prot_t *cur_protection,
310 vm_prot_t *max_protection,
311 vm_inherit_t inheritance)
312 {
313 mach_vm_offset_t maddress;
314 kern_return_t result;
315
316 maddress = *address;
317 result = mach_vm_remap_external(target_map, &maddress, size, mask,
318 anywhere, src_map, memory_address, copy,
319 cur_protection, max_protection, inheritance);
320 *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
321
322 return result;
323 }
324
325 kern_return_t
326 vm32_msync(
327 vm_map_t map,
328 vm32_address_t address,
329 vm32_size_t size,
330 vm_sync_t sync_flags)
331 {
332 return mach_vm_msync(map, address, size, sync_flags);
333 }
334
335 kern_return_t
336 vm32_behavior_set(
337 vm_map_t map,
338 vm32_offset_t start,
339 vm32_size_t size,
340 vm_behavior_t new_behavior)
341 {
342 if ((map == VM_MAP_NULL) || (start + size < start)) {
343 return KERN_INVALID_ARGUMENT;
344 }
345
346 return mach_vm_behavior_set(map, start, size, new_behavior);
347 }
348
349 kern_return_t
350 vm32_region_64(
351 vm_map_t map,
352 vm32_offset_t *address, /* IN/OUT */
353 vm32_size_t *size, /* OUT */
354 vm_region_flavor_t flavor, /* IN */
355 vm_region_info_t info, /* OUT */
356 mach_msg_type_number_t *count, /* IN/OUT */
357 mach_port_t *object_name) /* OUT */
358 {
359 mach_vm_offset_t maddress;
360 mach_vm_size_t msize;
361 kern_return_t result;
362
363 maddress = *address;
364 msize = *size;
365 result = mach_vm_region(map, &maddress, &msize, flavor, info, count, object_name);
366 *size = CAST_DOWN_EXPLICIT(vm32_size_t, msize);
367 *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
368
369 return result;
370 }
371
372 kern_return_t
373 vm32_region(
374 vm_map_t map,
375 vm32_address_t *address, /* IN/OUT */
376 vm32_size_t *size, /* OUT */
377 vm_region_flavor_t flavor, /* IN */
378 vm_region_info_t info, /* OUT */
379 mach_msg_type_number_t *count, /* IN/OUT */
380 mach_port_t *object_name) /* OUT */
381 {
382 vm_map_address_t map_addr;
383 vm_map_size_t map_size;
384 kern_return_t kr;
385
386 if (VM_MAP_NULL == map) {
387 return KERN_INVALID_ARGUMENT;
388 }
389
390 map_addr = (vm_map_address_t)*address;
391 map_size = (vm_map_size_t)*size;
392
393 kr = vm_map_region(map,
394 &map_addr, &map_size,
395 flavor, info, count,
396 object_name);
397
398 *address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr);
399 *size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size);
400
401 if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) {
402 return KERN_INVALID_ADDRESS;
403 }
404 return kr;
405 }
406
407 kern_return_t
408 vm32_region_recurse_64(
409 vm_map_t map,
410 vm32_address_t *address,
411 vm32_size_t *size,
412 uint32_t *depth,
413 vm_region_recurse_info_64_t info,
414 mach_msg_type_number_t *infoCnt)
415 {
416 mach_vm_address_t maddress;
417 mach_vm_size_t msize;
418 kern_return_t result;
419
420 maddress = *address;
421 msize = *size;
422 result = mach_vm_region_recurse(map, &maddress, &msize, depth, info, infoCnt);
423 *address = CAST_DOWN_EXPLICIT(vm32_address_t, maddress);
424 *size = CAST_DOWN_EXPLICIT(vm32_size_t, msize);
425
426 return result;
427 }
428
429 kern_return_t
430 vm32_region_recurse(
431 vm_map_t map,
432 vm32_offset_t *address, /* IN/OUT */
433 vm32_size_t *size, /* OUT */
434 natural_t *depth, /* IN/OUT */
435 vm_region_recurse_info_t info32, /* IN/OUT */
436 mach_msg_type_number_t *infoCnt) /* IN/OUT */
437 {
438 vm_region_submap_info_data_64_t info64;
439 vm_region_submap_info_t info;
440 vm_map_address_t map_addr;
441 vm_map_size_t map_size;
442 kern_return_t kr;
443
444 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
445 return KERN_INVALID_ARGUMENT;
446 }
447
448
449 map_addr = (vm_map_address_t)*address;
450 map_size = (vm_map_size_t)*size;
451 info = (vm_region_submap_info_t)info32;
452 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
453
454 kr = vm_map_region_recurse_64(map, &map_addr, &map_size,
455 depth, &info64, infoCnt);
456
457 info->protection = info64.protection;
458 info->max_protection = info64.max_protection;
459 info->inheritance = info64.inheritance;
460 info->offset = (uint32_t)info64.offset; /* trouble-maker */
461 info->user_tag = info64.user_tag;
462 info->pages_resident = info64.pages_resident;
463 info->pages_shared_now_private = info64.pages_shared_now_private;
464 info->pages_swapped_out = info64.pages_swapped_out;
465 info->pages_dirtied = info64.pages_dirtied;
466 info->ref_count = info64.ref_count;
467 info->shadow_depth = info64.shadow_depth;
468 info->external_pager = info64.external_pager;
469 info->share_mode = info64.share_mode;
470 info->is_submap = info64.is_submap;
471 info->behavior = info64.behavior;
472 info->object_id = info64.object_id;
473 info->user_wired_count = info64.user_wired_count;
474
475 *address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr);
476 *size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size);
477 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
478
479 if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) {
480 return KERN_INVALID_ADDRESS;
481 }
482 return kr;
483 }
484
485 kern_return_t
486 vm32_purgable_control(
487 vm_map_t map,
488 vm32_offset_t address,
489 vm_purgable_t control,
490 int *state)
491 {
492 if (VM_MAP_NULL == map) {
493 return KERN_INVALID_ARGUMENT;
494 }
495
496 return vm_map_purgable_control(map,
497 vm_map_trunc_page(address, PAGE_MASK),
498 control,
499 state);
500 }
501
502 kern_return_t
503 vm32_map_page_query(
504 vm_map_t map,
505 vm32_offset_t offset,
506 int *disposition,
507 int *ref_count)
508 {
509 if (VM_MAP_NULL == map) {
510 return KERN_INVALID_ARGUMENT;
511 }
512
513 return vm_map_page_query_internal(
514 map,
515 vm_map_trunc_page(offset, PAGE_MASK),
516 disposition,
517 ref_count);
518 }
519
520 kern_return_t
521 vm32_make_memory_entry_64(
522 vm_map_t target_map,
523 memory_object_size_t *size,
524 memory_object_offset_t offset,
525 vm_prot_t permission,
526 ipc_port_t *object_handle,
527 ipc_port_t parent_handle)
528 {
529 // use the existing entrypoint
530 return _mach_make_memory_entry(target_map, size, offset, permission, object_handle, parent_handle);
531 }
532
533 kern_return_t
534 vm32_make_memory_entry(
535 vm_map_t target_map,
536 vm32_size_t *size,
537 vm32_offset_t offset,
538 vm_prot_t permission,
539 ipc_port_t *object_handle,
540 ipc_port_t parent_entry)
541 {
542 memory_object_size_t mo_size;
543 kern_return_t kr;
544
545 mo_size = (memory_object_size_t)*size;
546 kr = _mach_make_memory_entry(target_map, &mo_size,
547 (memory_object_offset_t)offset, permission, object_handle,
548 parent_entry);
549 *size = CAST_DOWN_EXPLICIT(vm32_size_t, mo_size);
550 return kr;
551 }
552
553 kern_return_t
554 vm32__task_wire(
555 vm_map_t map,
556 boolean_t must_wire)
557 {
558 if (map == VM_MAP_NULL) {
559 return KERN_INVALID_ARGUMENT;
560 }
561
562 vm_map_lock(map);
563 map->wiring_required = (must_wire == TRUE);
564 vm_map_unlock(map);
565
566 return KERN_SUCCESS;
567 }
568
569 kern_return_t
570 vm32__map_exec_lockdown(
571 vm_map_t map)
572 {
573 if (map == VM_MAP_NULL) {
574 return KERN_INVALID_ARGUMENT;
575 }
576
577 vm_map_lock(map);
578 map->map_disallow_new_exec = TRUE;
579 vm_map_unlock(map);
580
581 return KERN_SUCCESS;
582 }
583
584
585 #endif /* VM32_SUPPORT */