]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
0a7de745 61 *
1c79356b
A
62 * User-exported virtual memory functions.
63 */
1c79356b 64
b0d623f7
A
65/*
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73 * for new code.
74 *
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
83 *
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
86 */
87
91447636
A
88#include <debug.h>
89
1c79356b
A
90#include <vm_cpm.h>
91#include <mach/boolean.h>
92#include <mach/kern_return.h>
0a7de745 93#include <mach/mach_types.h> /* to get vm_address_t */
1c79356b 94#include <mach/memory_object.h>
0a7de745 95#include <mach/std_types.h> /* to get pointer_t */
91447636 96#include <mach/upl.h>
1c79356b
A
97#include <mach/vm_attributes.h>
98#include <mach/vm_param.h>
99#include <mach/vm_statistics.h>
1c79356b 100#include <mach/mach_syscalls.h>
39037602 101#include <mach/sdt.h>
9bccf70c 102
91447636
A
103#include <mach/host_priv_server.h>
104#include <mach/mach_vm_server.h>
d9a64523 105#include <mach/memory_entry_server.h>
91447636 106#include <mach/vm_map_server.h>
1c79356b
A
107
108#include <kern/host.h>
91447636 109#include <kern/kalloc.h>
1c79356b
A
110#include <kern/task.h>
111#include <kern/misc_protos.h>
91447636 112#include <vm/vm_fault.h>
1c79356b
A
113#include <vm/vm_map.h>
114#include <vm/vm_object.h>
115#include <vm/vm_page.h>
116#include <vm/memory_object.h>
117#include <vm/vm_pageout.h>
91447636 118#include <vm/vm_protos.h>
fe8ab488 119#include <vm/vm_purgeable_internal.h>
d190cdc3 120#include <vm/vm_init.h>
1c79356b 121
5ba3f43e
A
122#include <san/kasan.h>
123
d9a64523
A
124#include <libkern/OSDebug.h>
125
1c79356b
A
126vm_size_t upl_offset_to_pagelist = 0;
127
0a7de745 128#if VM_CPM
1c79356b 129#include <vm/cpm.h>
0a7de745 130#endif /* VM_CPM */
1c79356b 131
1c79356b 132/*
91447636 133 * mach_vm_allocate allocates "zero fill" memory in the specfied
1c79356b
A
134 * map.
135 */
136kern_return_t
5ba3f43e 137mach_vm_allocate_external(
0a7de745
A
138 vm_map_t map,
139 mach_vm_offset_t *addr,
140 mach_vm_size_t size,
141 int flags)
5ba3f43e 142{
0a7de745 143 vm_tag_t tag;
5ba3f43e 144
0a7de745
A
145 VM_GET_FLAGS_ALIAS(flags, tag);
146 return mach_vm_allocate_kernel(map, addr, size, flags, tag);
5ba3f43e
A
147}
148
149kern_return_t
150mach_vm_allocate_kernel(
0a7de745
A
151 vm_map_t map,
152 mach_vm_offset_t *addr,
153 mach_vm_size_t size,
154 int flags,
5ba3f43e 155 vm_tag_t tag)
1c79356b 156{
91447636 157 vm_map_offset_t map_addr;
0a7de745
A
158 vm_map_size_t map_size;
159 kern_return_t result;
160 boolean_t anywhere;
2d21ac55
A
161
162 /* filter out any kernel-only flags */
0a7de745 163 if (flags & ~VM_FLAGS_USER_ALLOCATE) {
2d21ac55 164 return KERN_INVALID_ARGUMENT;
0a7de745 165 }
1c79356b 166
0a7de745
A
167 if (map == VM_MAP_NULL) {
168 return KERN_INVALID_ARGUMENT;
169 }
1c79356b
A
170 if (size == 0) {
171 *addr = 0;
0a7de745 172 return KERN_SUCCESS;
1c79356b
A
173 }
174
2d21ac55 175 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
91447636
A
176 if (anywhere) {
177 /*
178 * No specific address requested, so start candidate address
179 * search at the minimum address in the map. However, if that
180 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
181 * allocations of PAGEZERO to explicit requests since its
182 * normal use is to catch dereferences of NULL and many
183 * applications also treat pointers with a value of 0 as
184 * special and suddenly having address 0 contain useable
185 * memory would tend to confuse those applications.
186 */
187 map_addr = vm_map_min(map);
0a7de745 188 if (map_addr == 0) {
39236c6e 189 map_addr += VM_MAP_PAGE_SIZE(map);
0a7de745
A
190 }
191 } else {
39236c6e 192 map_addr = vm_map_trunc_page(*addr,
0a7de745
A
193 VM_MAP_PAGE_MASK(map));
194 }
39236c6e 195 map_size = vm_map_round_page(size,
0a7de745 196 VM_MAP_PAGE_MASK(map));
91447636 197 if (map_size == 0) {
0a7de745 198 return KERN_INVALID_ARGUMENT;
91447636
A
199 }
200
201 result = vm_map_enter(
0a7de745
A
202 map,
203 &map_addr,
204 map_size,
205 (vm_map_offset_t)0,
206 flags,
207 VM_MAP_KERNEL_FLAGS_NONE,
208 tag,
209 VM_OBJECT_NULL,
210 (vm_object_offset_t)0,
211 FALSE,
212 VM_PROT_DEFAULT,
213 VM_PROT_ALL,
214 VM_INHERIT_DEFAULT);
91447636
A
215
216 *addr = map_addr;
0a7de745 217 return result;
91447636
A
218}
219
220/*
0a7de745 221 * vm_allocate
91447636
A
222 * Legacy routine that allocates "zero fill" memory in the specfied
223 * map (which is limited to the same size as the kernel).
224 */
225kern_return_t
5ba3f43e 226vm_allocate_external(
0a7de745
A
227 vm_map_t map,
228 vm_offset_t *addr,
229 vm_size_t size,
230 int flags)
5ba3f43e
A
231{
232 vm_tag_t tag;
233
0a7de745
A
234 VM_GET_FLAGS_ALIAS(flags, tag);
235 return vm_allocate_kernel(map, addr, size, flags, tag);
5ba3f43e
A
236}
237
238kern_return_t
239vm_allocate_kernel(
0a7de745
A
240 vm_map_t map,
241 vm_offset_t *addr,
242 vm_size_t size,
5ba3f43e
A
243 int flags,
244 vm_tag_t tag)
91447636
A
245{
246 vm_map_offset_t map_addr;
0a7de745
A
247 vm_map_size_t map_size;
248 kern_return_t result;
249 boolean_t anywhere;
2d21ac55
A
250
251 /* filter out any kernel-only flags */
0a7de745 252 if (flags & ~VM_FLAGS_USER_ALLOCATE) {
2d21ac55 253 return KERN_INVALID_ARGUMENT;
0a7de745 254 }
91447636 255
0a7de745
A
256 if (map == VM_MAP_NULL) {
257 return KERN_INVALID_ARGUMENT;
258 }
1c79356b 259 if (size == 0) {
91447636 260 *addr = 0;
0a7de745 261 return KERN_SUCCESS;
91447636
A
262 }
263
2d21ac55 264 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
91447636
A
265 if (anywhere) {
266 /*
267 * No specific address requested, so start candidate address
268 * search at the minimum address in the map. However, if that
269 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
270 * allocations of PAGEZERO to explicit requests since its
271 * normal use is to catch dereferences of NULL and many
272 * applications also treat pointers with a value of 0 as
273 * special and suddenly having address 0 contain useable
274 * memory would tend to confuse those applications.
275 */
276 map_addr = vm_map_min(map);
0a7de745 277 if (map_addr == 0) {
39236c6e 278 map_addr += VM_MAP_PAGE_SIZE(map);
0a7de745
A
279 }
280 } else {
39236c6e 281 map_addr = vm_map_trunc_page(*addr,
0a7de745
A
282 VM_MAP_PAGE_MASK(map));
283 }
39236c6e 284 map_size = vm_map_round_page(size,
0a7de745 285 VM_MAP_PAGE_MASK(map));
91447636 286 if (map_size == 0) {
0a7de745 287 return KERN_INVALID_ARGUMENT;
1c79356b
A
288 }
289
290 result = vm_map_enter(
0a7de745
A
291 map,
292 &map_addr,
293 map_size,
294 (vm_map_offset_t)0,
295 flags,
296 VM_MAP_KERNEL_FLAGS_NONE,
297 tag,
298 VM_OBJECT_NULL,
299 (vm_object_offset_t)0,
300 FALSE,
301 VM_PROT_DEFAULT,
302 VM_PROT_ALL,
303 VM_INHERIT_DEFAULT);
1c79356b 304
5ba3f43e
A
305#if KASAN
306 if (result == KERN_SUCCESS && map->pmap == kernel_pmap) {
307 kasan_notify_address(map_addr, map_size);
308 }
309#endif
310
91447636 311 *addr = CAST_DOWN(vm_offset_t, map_addr);
0a7de745 312 return result;
1c79356b
A
313}
314
315/*
91447636
A
316 * mach_vm_deallocate -
317 * deallocates the specified range of addresses in the
1c79356b
A
318 * specified address map.
319 */
320kern_return_t
91447636 321mach_vm_deallocate(
0a7de745
A
322 vm_map_t map,
323 mach_vm_offset_t start,
324 mach_vm_size_t size)
91447636 325{
0a7de745
A
326 if ((map == VM_MAP_NULL) || (start + size < start)) {
327 return KERN_INVALID_ARGUMENT;
328 }
91447636 329
0a7de745
A
330 if (size == (mach_vm_offset_t) 0) {
331 return KERN_SUCCESS;
332 }
91447636 333
d9a64523 334 return vm_map_remove(map,
0a7de745
A
335 vm_map_trunc_page(start,
336 VM_MAP_PAGE_MASK(map)),
337 vm_map_round_page(start + size,
338 VM_MAP_PAGE_MASK(map)),
339 VM_MAP_REMOVE_NO_FLAGS);
91447636
A
340}
341
342/*
343 * vm_deallocate -
344 * deallocates the specified range of addresses in the
345 * specified address map (limited to addresses the same
346 * size as the kernel).
347 */
348kern_return_t
1c79356b 349vm_deallocate(
0a7de745
A
350 vm_map_t map,
351 vm_offset_t start,
352 vm_size_t size)
1c79356b 353{
0a7de745
A
354 if ((map == VM_MAP_NULL) || (start + size < start)) {
355 return KERN_INVALID_ARGUMENT;
356 }
1c79356b 357
0a7de745
A
358 if (size == (vm_offset_t) 0) {
359 return KERN_SUCCESS;
360 }
1c79356b 361
d9a64523 362 return vm_map_remove(map,
0a7de745
A
363 vm_map_trunc_page(start,
364 VM_MAP_PAGE_MASK(map)),
365 vm_map_round_page(start + size,
366 VM_MAP_PAGE_MASK(map)),
367 VM_MAP_REMOVE_NO_FLAGS);
1c79356b
A
368}
369
370/*
91447636
A
371 * mach_vm_inherit -
372 * Sets the inheritance of the specified range in the
1c79356b
A
373 * specified map.
374 */
375kern_return_t
91447636 376mach_vm_inherit(
0a7de745
A
377 vm_map_t map,
378 mach_vm_offset_t start,
379 mach_vm_size_t size,
380 vm_inherit_t new_inheritance)
91447636
A
381{
382 if ((map == VM_MAP_NULL) || (start + size < start) ||
0a7de745
A
383 (new_inheritance > VM_INHERIT_LAST_VALID)) {
384 return KERN_INVALID_ARGUMENT;
385 }
91447636 386
0a7de745 387 if (size == 0) {
91447636 388 return KERN_SUCCESS;
0a7de745 389 }
91447636 390
0a7de745
A
391 return vm_map_inherit(map,
392 vm_map_trunc_page(start,
393 VM_MAP_PAGE_MASK(map)),
394 vm_map_round_page(start + size,
395 VM_MAP_PAGE_MASK(map)),
396 new_inheritance);
91447636
A
397}
398
399/*
400 * vm_inherit -
401 * Sets the inheritance of the specified range in the
402 * specified map (range limited to addresses
403 */
404kern_return_t
1c79356b 405vm_inherit(
0a7de745
A
406 vm_map_t map,
407 vm_offset_t start,
408 vm_size_t size,
409 vm_inherit_t new_inheritance)
1c79356b 410{
91447636 411 if ((map == VM_MAP_NULL) || (start + size < start) ||
0a7de745
A
412 (new_inheritance > VM_INHERIT_LAST_VALID)) {
413 return KERN_INVALID_ARGUMENT;
414 }
1c79356b 415
0a7de745 416 if (size == 0) {
91447636 417 return KERN_SUCCESS;
0a7de745 418 }
91447636 419
0a7de745
A
420 return vm_map_inherit(map,
421 vm_map_trunc_page(start,
422 VM_MAP_PAGE_MASK(map)),
423 vm_map_round_page(start + size,
424 VM_MAP_PAGE_MASK(map)),
425 new_inheritance);
1c79356b
A
426}
427
428/*
91447636
A
429 * mach_vm_protect -
430 * Sets the protection of the specified range in the
1c79356b
A
431 * specified map.
432 */
433
91447636
A
434kern_return_t
435mach_vm_protect(
0a7de745
A
436 vm_map_t map,
437 mach_vm_offset_t start,
438 mach_vm_size_t size,
439 boolean_t set_maximum,
440 vm_prot_t new_protection)
91447636
A
441{
442 if ((map == VM_MAP_NULL) || (start + size < start) ||
0a7de745
A
443 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) {
444 return KERN_INVALID_ARGUMENT;
445 }
91447636 446
0a7de745 447 if (size == 0) {
91447636 448 return KERN_SUCCESS;
0a7de745 449 }
91447636 450
0a7de745
A
451 return vm_map_protect(map,
452 vm_map_trunc_page(start,
453 VM_MAP_PAGE_MASK(map)),
454 vm_map_round_page(start + size,
455 VM_MAP_PAGE_MASK(map)),
456 new_protection,
457 set_maximum);
91447636
A
458}
459
460/*
461 * vm_protect -
462 * Sets the protection of the specified range in the
463 * specified map. Addressability of the range limited
464 * to the same size as the kernel.
465 */
466
1c79356b
A
467kern_return_t
468vm_protect(
0a7de745
A
469 vm_map_t map,
470 vm_offset_t start,
471 vm_size_t size,
472 boolean_t set_maximum,
473 vm_prot_t new_protection)
1c79356b 474{
91447636 475 if ((map == VM_MAP_NULL) || (start + size < start) ||
0a7de745
A
476 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) {
477 return KERN_INVALID_ARGUMENT;
478 }
1c79356b 479
0a7de745 480 if (size == 0) {
91447636 481 return KERN_SUCCESS;
0a7de745 482 }
91447636 483
0a7de745
A
484 return vm_map_protect(map,
485 vm_map_trunc_page(start,
486 VM_MAP_PAGE_MASK(map)),
487 vm_map_round_page(start + size,
488 VM_MAP_PAGE_MASK(map)),
489 new_protection,
490 set_maximum);
1c79356b
A
491}
492
493/*
91447636 494 * mach_vm_machine_attributes -
1c79356b
A
495 * Handle machine-specific attributes for a mapping, such
496 * as cachability, migrability, etc.
497 */
498kern_return_t
91447636 499mach_vm_machine_attribute(
0a7de745
A
500 vm_map_t map,
501 mach_vm_address_t addr,
502 mach_vm_size_t size,
503 vm_machine_attribute_t attribute,
504 vm_machine_attribute_val_t* value) /* IN/OUT */
91447636 505{
0a7de745
A
506 if ((map == VM_MAP_NULL) || (addr + size < addr)) {
507 return KERN_INVALID_ARGUMENT;
508 }
91447636 509
0a7de745 510 if (size == 0) {
91447636 511 return KERN_SUCCESS;
0a7de745 512 }
91447636 513
39236c6e 514 return vm_map_machine_attribute(
0a7de745 515 map,
39236c6e 516 vm_map_trunc_page(addr,
0a7de745
A
517 VM_MAP_PAGE_MASK(map)),
518 vm_map_round_page(addr + size,
519 VM_MAP_PAGE_MASK(map)),
39236c6e
A
520 attribute,
521 value);
91447636
A
522}
523
524/*
525 * vm_machine_attribute -
526 * Handle machine-specific attributes for a mapping, such
527 * as cachability, migrability, etc. Limited addressability
528 * (same range limits as for the native kernel map).
529 */
530kern_return_t
1c79356b 531vm_machine_attribute(
0a7de745
A
532 vm_map_t map,
533 vm_address_t addr,
534 vm_size_t size,
535 vm_machine_attribute_t attribute,
536 vm_machine_attribute_val_t* value) /* IN/OUT */
1c79356b 537{
0a7de745
A
538 if ((map == VM_MAP_NULL) || (addr + size < addr)) {
539 return KERN_INVALID_ARGUMENT;
540 }
91447636 541
0a7de745 542 if (size == 0) {
91447636 543 return KERN_SUCCESS;
0a7de745 544 }
91447636 545
39236c6e 546 return vm_map_machine_attribute(
0a7de745 547 map,
39236c6e 548 vm_map_trunc_page(addr,
0a7de745
A
549 VM_MAP_PAGE_MASK(map)),
550 vm_map_round_page(addr + size,
551 VM_MAP_PAGE_MASK(map)),
39236c6e
A
552 attribute,
553 value);
91447636
A
554}
555
556/*
557 * mach_vm_read -
558 * Read/copy a range from one address space and return it to the caller.
559 *
560 * It is assumed that the address for the returned memory is selected by
561 * the IPC implementation as part of receiving the reply to this call.
562 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
563 * that gets returned.
0a7de745 564 *
91447636
A
565 * JMM - because of mach_msg_type_number_t, this call is limited to a
566 * single 4GB region at this time.
567 *
568 */
569kern_return_t
570mach_vm_read(
0a7de745
A
571 vm_map_t map,
572 mach_vm_address_t addr,
573 mach_vm_size_t size,
574 pointer_t *data,
575 mach_msg_type_number_t *data_size)
91447636 576{
0a7de745
A
577 kern_return_t error;
578 vm_map_copy_t ipc_address;
91447636 579
0a7de745
A
580 if (map == VM_MAP_NULL) {
581 return KERN_INVALID_ARGUMENT;
582 }
1c79356b 583
0a7de745 584 if ((mach_msg_type_number_t) size != size) {
b0d623f7 585 return KERN_INVALID_ARGUMENT;
0a7de745
A
586 }
587
91447636 588 error = vm_map_copyin(map,
0a7de745
A
589 (vm_map_address_t)addr,
590 (vm_map_size_t)size,
591 FALSE, /* src_destroy */
592 &ipc_address);
91447636
A
593
594 if (KERN_SUCCESS == error) {
595 *data = (pointer_t) ipc_address;
b0d623f7
A
596 *data_size = (mach_msg_type_number_t) size;
597 assert(*data_size == size);
91447636 598 }
0a7de745 599 return error;
1c79356b
A
600}
601
91447636
A
602/*
603 * vm_read -
604 * Read/copy a range from one address space and return it to the caller.
605 * Limited addressability (same range limits as for the native kernel map).
0a7de745 606 *
91447636
A
607 * It is assumed that the address for the returned memory is selected by
608 * the IPC implementation as part of receiving the reply to this call.
609 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
610 * that gets returned.
611 */
1c79356b
A
612kern_return_t
613vm_read(
0a7de745
A
614 vm_map_t map,
615 vm_address_t addr,
616 vm_size_t size,
617 pointer_t *data,
618 mach_msg_type_number_t *data_size)
1c79356b 619{
0a7de745
A
620 kern_return_t error;
621 vm_map_copy_t ipc_address;
1c79356b 622
0a7de745
A
623 if (map == VM_MAP_NULL) {
624 return KERN_INVALID_ARGUMENT;
625 }
1c79356b 626
d9a64523
A
627 mach_msg_type_number_t dsize;
628 if (os_convert_overflow(size, &dsize)) {
b0d623f7
A
629 /*
630 * The kernel could handle a 64-bit "size" value, but
631 * it could not return the size of the data in "*data_size"
632 * without overflowing.
633 * Let's reject this "size" as invalid.
634 */
635 return KERN_INVALID_ARGUMENT;
636 }
637
91447636 638 error = vm_map_copyin(map,
0a7de745
A
639 (vm_map_address_t)addr,
640 (vm_map_size_t)size,
641 FALSE, /* src_destroy */
642 &ipc_address);
91447636
A
643
644 if (KERN_SUCCESS == error) {
1c79356b 645 *data = (pointer_t) ipc_address;
d9a64523 646 *data_size = dsize;
b0d623f7 647 assert(*data_size == size);
1c79356b 648 }
0a7de745 649 return error;
1c79356b
A
650}
651
0a7de745 652/*
91447636
A
653 * mach_vm_read_list -
654 * Read/copy a list of address ranges from specified map.
655 *
656 * MIG does not know how to deal with a returned array of
657 * vm_map_copy_t structures, so we have to do the copyout
658 * manually here.
659 */
660kern_return_t
661mach_vm_read_list(
0a7de745
A
662 vm_map_t map,
663 mach_vm_read_entry_t data_list,
664 natural_t count)
91447636 665{
0a7de745
A
666 mach_msg_type_number_t i;
667 kern_return_t error;
668 vm_map_copy_t copy;
91447636 669
8ad349bb 670 if (map == VM_MAP_NULL ||
0a7de745
A
671 count > VM_MAP_ENTRY_MAX) {
672 return KERN_INVALID_ARGUMENT;
673 }
91447636
A
674
675 error = KERN_SUCCESS;
0a7de745 676 for (i = 0; i < count; i++) {
91447636
A
677 vm_map_address_t map_addr;
678 vm_map_size_t map_size;
679
680 map_addr = (vm_map_address_t)(data_list[i].address);
681 map_size = (vm_map_size_t)(data_list[i].size);
682
0a7de745 683 if (map_size != 0) {
91447636 684 error = vm_map_copyin(map,
0a7de745
A
685 map_addr,
686 map_size,
687 FALSE, /* src_destroy */
688 &copy);
91447636
A
689 if (KERN_SUCCESS == error) {
690 error = vm_map_copyout(
0a7de745
A
691 current_task()->map,
692 &map_addr,
693 copy);
91447636
A
694 if (KERN_SUCCESS == error) {
695 data_list[i].address = map_addr;
696 continue;
697 }
698 vm_map_copy_discard(copy);
699 }
700 }
701 data_list[i].address = (mach_vm_address_t)0;
702 data_list[i].size = (mach_vm_size_t)0;
703 }
0a7de745 704 return error;
91447636
A
705}
706
0a7de745 707/*
91447636
A
708 * vm_read_list -
709 * Read/copy a list of address ranges from specified map.
710 *
711 * MIG does not know how to deal with a returned array of
712 * vm_map_copy_t structures, so we have to do the copyout
713 * manually here.
714 *
715 * The source and destination ranges are limited to those
716 * that can be described with a vm_address_t (i.e. same
717 * size map as the kernel).
718 *
719 * JMM - If the result of the copyout is an address range
720 * that cannot be described with a vm_address_t (i.e. the
721 * caller had a larger address space but used this call
722 * anyway), it will result in a truncated address being
723 * returned (and a likely confused caller).
724 */
725
1c79356b
A
726kern_return_t
727vm_read_list(
0a7de745
A
728 vm_map_t map,
729 vm_read_entry_t data_list,
730 natural_t count)
1c79356b 731{
0a7de745
A
732 mach_msg_type_number_t i;
733 kern_return_t error;
734 vm_map_copy_t copy;
1c79356b 735
8ad349bb 736 if (map == VM_MAP_NULL ||
0a7de745
A
737 count > VM_MAP_ENTRY_MAX) {
738 return KERN_INVALID_ARGUMENT;
739 }
1c79356b 740
91447636 741 error = KERN_SUCCESS;
0a7de745 742 for (i = 0; i < count; i++) {
91447636
A
743 vm_map_address_t map_addr;
744 vm_map_size_t map_size;
745
746 map_addr = (vm_map_address_t)(data_list[i].address);
747 map_size = (vm_map_size_t)(data_list[i].size);
748
0a7de745 749 if (map_size != 0) {
91447636 750 error = vm_map_copyin(map,
0a7de745
A
751 map_addr,
752 map_size,
753 FALSE, /* src_destroy */
754 &copy);
91447636 755 if (KERN_SUCCESS == error) {
0a7de745
A
756 error = vm_map_copyout(current_task()->map,
757 &map_addr,
758 copy);
91447636
A
759 if (KERN_SUCCESS == error) {
760 data_list[i].address =
0a7de745 761 CAST_DOWN(vm_offset_t, map_addr);
91447636
A
762 continue;
763 }
764 vm_map_copy_discard(copy);
1c79356b
A
765 }
766 }
91447636
A
767 data_list[i].address = (mach_vm_address_t)0;
768 data_list[i].size = (mach_vm_size_t)0;
1c79356b 769 }
0a7de745 770 return error;
1c79356b
A
771}
772
773/*
91447636
A
774 * mach_vm_read_overwrite -
775 * Overwrite a range of the current map with data from the specified
776 * map/address range.
0a7de745 777 *
91447636
A
778 * In making an assumption that the current thread is local, it is
779 * no longer cluster-safe without a fully supportive local proxy
780 * thread/task (but we don't support cluster's anymore so this is moot).
1c79356b
A
781 */
782
1c79356b 783kern_return_t
91447636 784mach_vm_read_overwrite(
0a7de745
A
785 vm_map_t map,
786 mach_vm_address_t address,
787 mach_vm_size_t size,
788 mach_vm_address_t data,
789 mach_vm_size_t *data_size)
91447636 790{
0a7de745
A
791 kern_return_t error;
792 vm_map_copy_t copy;
1c79356b 793
0a7de745
A
794 if (map == VM_MAP_NULL) {
795 return KERN_INVALID_ARGUMENT;
796 }
1c79356b 797
91447636 798 error = vm_map_copyin(map, (vm_map_address_t)address,
0a7de745 799 (vm_map_size_t)size, FALSE, &copy);
91447636
A
800
801 if (KERN_SUCCESS == error) {
802 error = vm_map_copy_overwrite(current_thread()->map,
0a7de745
A
803 (vm_map_address_t)data,
804 copy, FALSE);
91447636
A
805 if (KERN_SUCCESS == error) {
806 *data_size = size;
807 return error;
1c79356b 808 }
91447636 809 vm_map_copy_discard(copy);
1c79356b 810 }
0a7de745 811 return error;
91447636
A
812}
813
814/*
815 * vm_read_overwrite -
816 * Overwrite a range of the current map with data from the specified
817 * map/address range.
0a7de745 818 *
91447636
A
819 * This routine adds the additional limitation that the source and
820 * destination ranges must be describable with vm_address_t values
821 * (i.e. the same size address spaces as the kernel, or at least the
822 * the ranges are in that first portion of the respective address
823 * spaces).
824 */
825
826kern_return_t
827vm_read_overwrite(
0a7de745
A
828 vm_map_t map,
829 vm_address_t address,
830 vm_size_t size,
831 vm_address_t data,
832 vm_size_t *data_size)
91447636 833{
0a7de745
A
834 kern_return_t error;
835 vm_map_copy_t copy;
91447636 836
0a7de745
A
837 if (map == VM_MAP_NULL) {
838 return KERN_INVALID_ARGUMENT;
839 }
91447636
A
840
841 error = vm_map_copyin(map, (vm_map_address_t)address,
0a7de745 842 (vm_map_size_t)size, FALSE, &copy);
91447636
A
843
844 if (KERN_SUCCESS == error) {
845 error = vm_map_copy_overwrite(current_thread()->map,
0a7de745
A
846 (vm_map_address_t)data,
847 copy, FALSE);
91447636
A
848 if (KERN_SUCCESS == error) {
849 *data_size = size;
850 return error;
1c79356b 851 }
91447636 852 vm_map_copy_discard(copy);
1c79356b 853 }
0a7de745 854 return error;
1c79356b
A
855}
856
857
91447636
A
858/*
859 * mach_vm_write -
860 * Overwrite the specified address range with the data provided
861 * (from the current map).
862 */
863kern_return_t
864mach_vm_write(
0a7de745
A
865 vm_map_t map,
866 mach_vm_address_t address,
867 pointer_t data,
868 __unused mach_msg_type_number_t size)
91447636 869{
0a7de745 870 if (map == VM_MAP_NULL) {
91447636 871 return KERN_INVALID_ARGUMENT;
0a7de745 872 }
1c79356b 873
91447636 874 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
0a7de745 875 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
91447636 876}
1c79356b 877
91447636
A
878/*
879 * vm_write -
880 * Overwrite the specified address range with the data provided
881 * (from the current map).
882 *
883 * The addressability of the range of addresses to overwrite is
884 * limited bu the use of a vm_address_t (same size as kernel map).
885 * Either the target map is also small, or the range is in the
886 * low addresses within it.
887 */
1c79356b
A
888kern_return_t
889vm_write(
0a7de745
A
890 vm_map_t map,
891 vm_address_t address,
892 pointer_t data,
893 __unused mach_msg_type_number_t size)
91447636 894{
0a7de745 895 if (map == VM_MAP_NULL) {
91447636 896 return KERN_INVALID_ARGUMENT;
0a7de745 897 }
91447636
A
898
899 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
0a7de745 900 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
91447636
A
901}
902
903/*
904 * mach_vm_copy -
905 * Overwrite one range of the specified map with the contents of
906 * another range within that same map (i.e. both address ranges
907 * are "over there").
908 */
909kern_return_t
910mach_vm_copy(
0a7de745
A
911 vm_map_t map,
912 mach_vm_address_t source_address,
913 mach_vm_size_t size,
914 mach_vm_address_t dest_address)
1c79356b 915{
91447636
A
916 vm_map_copy_t copy;
917 kern_return_t kr;
918
0a7de745 919 if (map == VM_MAP_NULL) {
1c79356b 920 return KERN_INVALID_ARGUMENT;
0a7de745 921 }
1c79356b 922
91447636 923 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
0a7de745 924 (vm_map_size_t)size, FALSE, &copy);
91447636
A
925
926 if (KERN_SUCCESS == kr) {
927 kr = vm_map_copy_overwrite(map,
0a7de745
A
928 (vm_map_address_t)dest_address,
929 copy, FALSE /* interruptible XXX */);
91447636 930
0a7de745 931 if (KERN_SUCCESS != kr) {
91447636 932 vm_map_copy_discard(copy);
0a7de745 933 }
91447636
A
934 }
935 return kr;
1c79356b
A
936}
937
938kern_return_t
939vm_copy(
0a7de745
A
940 vm_map_t map,
941 vm_address_t source_address,
942 vm_size_t size,
943 vm_address_t dest_address)
1c79356b
A
944{
945 vm_map_copy_t copy;
946 kern_return_t kr;
947
0a7de745 948 if (map == VM_MAP_NULL) {
1c79356b 949 return KERN_INVALID_ARGUMENT;
0a7de745 950 }
1c79356b 951
91447636 952 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
0a7de745 953 (vm_map_size_t)size, FALSE, &copy);
1c79356b 954
91447636
A
955 if (KERN_SUCCESS == kr) {
956 kr = vm_map_copy_overwrite(map,
0a7de745
A
957 (vm_map_address_t)dest_address,
958 copy, FALSE /* interruptible XXX */);
1c79356b 959
0a7de745 960 if (KERN_SUCCESS != kr) {
91447636 961 vm_map_copy_discard(copy);
0a7de745 962 }
91447636
A
963 }
964 return kr;
1c79356b
A
965}
966
967/*
91447636
A
968 * mach_vm_map -
969 * Map some range of an object into an address space.
970 *
971 * The object can be one of several types of objects:
972 * NULL - anonymous memory
973 * a named entry - a range within another address space
974 * or a range within a memory object
975 * a whole memory object
976 *
1c79356b
A
977 */
978kern_return_t
5ba3f43e 979mach_vm_map_external(
0a7de745
A
980 vm_map_t target_map,
981 mach_vm_offset_t *address,
982 mach_vm_size_t initial_size,
983 mach_vm_offset_t mask,
984 int flags,
985 ipc_port_t port,
986 vm_object_offset_t offset,
987 boolean_t copy,
988 vm_prot_t cur_protection,
989 vm_prot_t max_protection,
990 vm_inherit_t inheritance)
5ba3f43e
A
991{
992 vm_tag_t tag;
993
994 VM_GET_FLAGS_ALIAS(flags, tag);
0a7de745
A
995 return mach_vm_map_kernel(target_map, address, initial_size, mask,
996 flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
997 port, offset, copy,
998 cur_protection, max_protection,
999 inheritance);
5ba3f43e
A
1000}
1001
1002kern_return_t
1003mach_vm_map_kernel(
0a7de745
A
1004 vm_map_t target_map,
1005 mach_vm_offset_t *address,
1006 mach_vm_size_t initial_size,
1007 mach_vm_offset_t mask,
1008 int flags,
1009 vm_map_kernel_flags_t vmk_flags,
1010 vm_tag_t tag,
1011 ipc_port_t port,
1012 vm_object_offset_t offset,
1013 boolean_t copy,
1014 vm_prot_t cur_protection,
1015 vm_prot_t max_protection,
1016 vm_inherit_t inheritance)
1c79356b 1017{
0a7de745
A
1018 kern_return_t kr;
1019 vm_map_offset_t vmmaddr;
316670eb
A
1020
1021 vmmaddr = (vm_map_offset_t) *address;
1022
2d21ac55 1023 /* filter out any kernel-only flags */
0a7de745 1024 if (flags & ~VM_FLAGS_USER_MAP) {
2d21ac55 1025 return KERN_INVALID_ARGUMENT;
0a7de745 1026 }
1c79356b 1027
316670eb 1028 kr = vm_map_enter_mem_object(target_map,
0a7de745
A
1029 &vmmaddr,
1030 initial_size,
1031 mask,
1032 flags,
1033 vmk_flags,
1034 tag,
1035 port,
1036 offset,
1037 copy,
1038 cur_protection,
1039 max_protection,
1040 inheritance);
5ba3f43e
A
1041
1042#if KASAN
1043 if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) {
1044 kasan_notify_address(vmmaddr, initial_size);
1045 }
1046#endif
316670eb
A
1047
1048 *address = vmmaddr;
1049 return kr;
1c79356b
A
1050}
1051
91447636
A
1052
1053/* legacy interface */
1054kern_return_t
5ba3f43e 1055vm_map_64_external(
0a7de745
A
1056 vm_map_t target_map,
1057 vm_offset_t *address,
1058 vm_size_t size,
1059 vm_offset_t mask,
1060 int flags,
1061 ipc_port_t port,
1062 vm_object_offset_t offset,
1063 boolean_t copy,
1064 vm_prot_t cur_protection,
1065 vm_prot_t max_protection,
1066 vm_inherit_t inheritance)
5ba3f43e
A
1067{
1068 vm_tag_t tag;
1069
1070 VM_GET_FLAGS_ALIAS(flags, tag);
0a7de745
A
1071 return vm_map_64_kernel(target_map, address, size, mask,
1072 flags, VM_MAP_KERNEL_FLAGS_NONE,
1073 tag, port, offset, copy,
1074 cur_protection, max_protection,
1075 inheritance);
5ba3f43e
A
1076}
1077
1078kern_return_t
1079vm_map_64_kernel(
0a7de745
A
1080 vm_map_t target_map,
1081 vm_offset_t *address,
1082 vm_size_t size,
1083 vm_offset_t mask,
1084 int flags,
1085 vm_map_kernel_flags_t vmk_flags,
1086 vm_tag_t tag,
1087 ipc_port_t port,
1088 vm_object_offset_t offset,
1089 boolean_t copy,
1090 vm_prot_t cur_protection,
1091 vm_prot_t max_protection,
1092 vm_inherit_t inheritance)
91447636
A
1093{
1094 mach_vm_address_t map_addr;
1095 mach_vm_size_t map_size;
1096 mach_vm_offset_t map_mask;
1097 kern_return_t kr;
1098
1099 map_addr = (mach_vm_address_t)*address;
1100 map_size = (mach_vm_size_t)size;
1101 map_mask = (mach_vm_offset_t)mask;
1102
d9a64523 1103 kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
0a7de745
A
1104 flags, vmk_flags, tag,
1105 port, offset, copy,
1106 cur_protection, max_protection, inheritance);
b0d623f7 1107 *address = CAST_DOWN(vm_offset_t, map_addr);
91447636
A
1108 return kr;
1109}
1110
1c79356b 1111/* temporary, until world build */
55e303ae 1112kern_return_t
5ba3f43e 1113vm_map_external(
0a7de745
A
1114 vm_map_t target_map,
1115 vm_offset_t *address,
1116 vm_size_t size,
1117 vm_offset_t mask,
1118 int flags,
1119 ipc_port_t port,
1120 vm_offset_t offset,
1121 boolean_t copy,
1122 vm_prot_t cur_protection,
1123 vm_prot_t max_protection,
1124 vm_inherit_t inheritance)
5ba3f43e
A
1125{
1126 vm_tag_t tag;
1127
1128 VM_GET_FLAGS_ALIAS(flags, tag);
0a7de745
A
1129 return vm_map_kernel(target_map, address, size, mask,
1130 flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
1131 port, offset, copy,
1132 cur_protection, max_protection, inheritance);
5ba3f43e
A
1133}
1134
1135kern_return_t
1136vm_map_kernel(
0a7de745
A
1137 vm_map_t target_map,
1138 vm_offset_t *address,
1139 vm_size_t size,
1140 vm_offset_t mask,
1141 int flags,
1142 vm_map_kernel_flags_t vmk_flags,
1143 vm_tag_t tag,
1144 ipc_port_t port,
1145 vm_offset_t offset,
1146 boolean_t copy,
1147 vm_prot_t cur_protection,
1148 vm_prot_t max_protection,
1149 vm_inherit_t inheritance)
1c79356b 1150{
91447636
A
1151 mach_vm_address_t map_addr;
1152 mach_vm_size_t map_size;
1153 mach_vm_offset_t map_mask;
1154 vm_object_offset_t obj_offset;
1155 kern_return_t kr;
1156
1157 map_addr = (mach_vm_address_t)*address;
1158 map_size = (mach_vm_size_t)size;
1159 map_mask = (mach_vm_offset_t)mask;
1160 obj_offset = (vm_object_offset_t)offset;
1161
d9a64523 1162 kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
0a7de745
A
1163 flags, vmk_flags, tag,
1164 port, obj_offset, copy,
1165 cur_protection, max_protection, inheritance);
b0d623f7 1166 *address = CAST_DOWN(vm_offset_t, map_addr);
91447636
A
1167 return kr;
1168}
1169
1170/*
1171 * mach_vm_remap -
1172 * Remap a range of memory from one task into another,
1173 * to another address range within the same task, or
1174 * over top of itself (with altered permissions and/or
1175 * as an in-place copy of itself).
1176 */
5ba3f43e
A
1177kern_return_t
1178mach_vm_remap_external(
0a7de745
A
1179 vm_map_t target_map,
1180 mach_vm_offset_t *address,
1181 mach_vm_size_t size,
1182 mach_vm_offset_t mask,
1183 int flags,
1184 vm_map_t src_map,
1185 mach_vm_offset_t memory_address,
1186 boolean_t copy,
1187 vm_prot_t *cur_protection,
1188 vm_prot_t *max_protection,
1189 vm_inherit_t inheritance)
5ba3f43e
A
1190{
1191 vm_tag_t tag;
1192 VM_GET_FLAGS_ALIAS(flags, tag);
1193
0a7de745
A
1194 return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address,
1195 copy, cur_protection, max_protection, inheritance);
5ba3f43e 1196}
91447636
A
1197
1198kern_return_t
5ba3f43e 1199mach_vm_remap_kernel(
0a7de745
A
1200 vm_map_t target_map,
1201 mach_vm_offset_t *address,
1202 mach_vm_size_t size,
1203 mach_vm_offset_t mask,
1204 int flags,
1205 vm_tag_t tag,
1206 vm_map_t src_map,
1207 mach_vm_offset_t memory_address,
1208 boolean_t copy,
1209 vm_prot_t *cur_protection,
1210 vm_prot_t *max_protection,
1211 vm_inherit_t inheritance)
91447636 1212{
0a7de745
A
1213 vm_map_offset_t map_addr;
1214 kern_return_t kr;
91447636 1215
0a7de745 1216 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
91447636 1217 return KERN_INVALID_ARGUMENT;
0a7de745 1218 }
91447636 1219
060df5ea 1220 /* filter out any kernel-only flags */
0a7de745 1221 if (flags & ~VM_FLAGS_USER_REMAP) {
060df5ea 1222 return KERN_INVALID_ARGUMENT;
0a7de745 1223 }
060df5ea 1224
91447636
A
1225 map_addr = (vm_map_offset_t)*address;
1226
1227 kr = vm_map_remap(target_map,
0a7de745
A
1228 &map_addr,
1229 size,
1230 mask,
1231 flags,
1232 VM_MAP_KERNEL_FLAGS_NONE,
1233 tag,
1234 src_map,
1235 memory_address,
1236 copy,
1237 cur_protection,
1238 max_protection,
1239 inheritance);
91447636
A
1240 *address = map_addr;
1241 return kr;
1c79356b
A
1242}
1243
91447636
A
1244/*
1245 * vm_remap -
1246 * Remap a range of memory from one task into another,
1247 * to another address range within the same task, or
1248 * over top of itself (with altered permissions and/or
1249 * as an in-place copy of itself).
1250 *
1251 * The addressability of the source and target address
1252 * range is limited by the size of vm_address_t (in the
1253 * kernel context).
1254 */
1255kern_return_t
5ba3f43e 1256vm_remap_external(
0a7de745
A
1257 vm_map_t target_map,
1258 vm_offset_t *address,
1259 vm_size_t size,
1260 vm_offset_t mask,
1261 int flags,
1262 vm_map_t src_map,
1263 vm_offset_t memory_address,
1264 boolean_t copy,
1265 vm_prot_t *cur_protection,
1266 vm_prot_t *max_protection,
1267 vm_inherit_t inheritance)
5ba3f43e
A
1268{
1269 vm_tag_t tag;
1270 VM_GET_FLAGS_ALIAS(flags, tag);
1271
0a7de745
A
1272 return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map,
1273 memory_address, copy, cur_protection, max_protection, inheritance);
5ba3f43e
A
1274}
1275
1276kern_return_t
1277vm_remap_kernel(
0a7de745
A
1278 vm_map_t target_map,
1279 vm_offset_t *address,
1280 vm_size_t size,
1281 vm_offset_t mask,
1282 int flags,
1283 vm_tag_t tag,
1284 vm_map_t src_map,
1285 vm_offset_t memory_address,
1286 boolean_t copy,
1287 vm_prot_t *cur_protection,
1288 vm_prot_t *max_protection,
1289 vm_inherit_t inheritance)
91447636 1290{
0a7de745
A
1291 vm_map_offset_t map_addr;
1292 kern_return_t kr;
91447636 1293
0a7de745 1294 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
91447636 1295 return KERN_INVALID_ARGUMENT;
0a7de745 1296 }
91447636 1297
060df5ea 1298 /* filter out any kernel-only flags */
0a7de745 1299 if (flags & ~VM_FLAGS_USER_REMAP) {
060df5ea 1300 return KERN_INVALID_ARGUMENT;
0a7de745 1301 }
060df5ea 1302
91447636
A
1303 map_addr = (vm_map_offset_t)*address;
1304
1305 kr = vm_map_remap(target_map,
0a7de745
A
1306 &map_addr,
1307 size,
1308 mask,
1309 flags,
1310 VM_MAP_KERNEL_FLAGS_NONE,
1311 tag,
1312 src_map,
1313 memory_address,
1314 copy,
1315 cur_protection,
1316 max_protection,
1317 inheritance);
91447636
A
1318 *address = CAST_DOWN(vm_offset_t, map_addr);
1319 return kr;
1320}
1c79356b
A
1321
1322/*
91447636
A
1323 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1324 * when mach_vm_wire and vm_wire are changed to use ledgers.
1c79356b
A
1325 */
1326#include <mach/mach_host_server.h>
1327/*
91447636
A
1328 * mach_vm_wire
1329 * Specify that the range of the virtual address space
1330 * of the target task must not cause page faults for
1331 * the indicated accesses.
1332 *
1333 * [ To unwire the pages, specify VM_PROT_NONE. ]
1334 */
1335kern_return_t
5ba3f43e 1336mach_vm_wire_external(
0a7de745
A
1337 host_priv_t host_priv,
1338 vm_map_t map,
1339 mach_vm_offset_t start,
1340 mach_vm_size_t size,
1341 vm_prot_t access)
5ba3f43e 1342{
0a7de745 1343 return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK);
5ba3f43e
A
1344}
1345
1346kern_return_t
1347mach_vm_wire_kernel(
0a7de745
A
1348 host_priv_t host_priv,
1349 vm_map_t map,
1350 mach_vm_offset_t start,
1351 mach_vm_size_t size,
1352 vm_prot_t access,
1353 vm_tag_t tag)
91447636 1354{
0a7de745 1355 kern_return_t rc;
91447636 1356
0a7de745 1357 if (host_priv == HOST_PRIV_NULL) {
91447636 1358 return KERN_INVALID_HOST;
0a7de745 1359 }
91447636
A
1360
1361 assert(host_priv == &realhost);
1362
0a7de745 1363 if (map == VM_MAP_NULL) {
91447636 1364 return KERN_INVALID_TASK;
0a7de745 1365 }
91447636 1366
0a7de745 1367 if (access & ~VM_PROT_ALL || (start + size < start)) {
91447636 1368 return KERN_INVALID_ARGUMENT;
0a7de745 1369 }
91447636
A
1370
1371 if (access != VM_PROT_NONE) {
5ba3f43e 1372 rc = vm_map_wire_kernel(map,
0a7de745
A
1373 vm_map_trunc_page(start,
1374 VM_MAP_PAGE_MASK(map)),
1375 vm_map_round_page(start + size,
1376 VM_MAP_PAGE_MASK(map)),
1377 access, tag,
1378 TRUE);
91447636 1379 } else {
39236c6e 1380 rc = vm_map_unwire(map,
0a7de745
A
1381 vm_map_trunc_page(start,
1382 VM_MAP_PAGE_MASK(map)),
1383 vm_map_round_page(start + size,
1384 VM_MAP_PAGE_MASK(map)),
1385 TRUE);
91447636
A
1386 }
1387 return rc;
1388}
1389
1390/*
1391 * vm_wire -
1c79356b
A
1392 * Specify that the range of the virtual address space
1393 * of the target task must not cause page faults for
1394 * the indicated accesses.
1395 *
1396 * [ To unwire the pages, specify VM_PROT_NONE. ]
1397 */
1398kern_return_t
1399vm_wire(
0a7de745
A
1400 host_priv_t host_priv,
1401 vm_map_t map,
1402 vm_offset_t start,
1403 vm_size_t size,
1404 vm_prot_t access)
1c79356b 1405{
0a7de745 1406 kern_return_t rc;
1c79356b 1407
0a7de745 1408 if (host_priv == HOST_PRIV_NULL) {
1c79356b 1409 return KERN_INVALID_HOST;
0a7de745 1410 }
1c79356b
A
1411
1412 assert(host_priv == &realhost);
1413
0a7de745 1414 if (map == VM_MAP_NULL) {
1c79356b 1415 return KERN_INVALID_TASK;
0a7de745 1416 }
1c79356b 1417
0a7de745 1418 if ((access & ~VM_PROT_ALL) || (start + size < start)) {
1c79356b 1419 return KERN_INVALID_ARGUMENT;
0a7de745 1420 }
1c79356b 1421
91447636
A
1422 if (size == 0) {
1423 rc = KERN_SUCCESS;
1424 } else if (access != VM_PROT_NONE) {
5ba3f43e 1425 rc = vm_map_wire_kernel(map,
0a7de745
A
1426 vm_map_trunc_page(start,
1427 VM_MAP_PAGE_MASK(map)),
1428 vm_map_round_page(start + size,
1429 VM_MAP_PAGE_MASK(map)),
1430 access, VM_KERN_MEMORY_OSFMK,
1431 TRUE);
1c79356b 1432 } else {
39236c6e 1433 rc = vm_map_unwire(map,
0a7de745
A
1434 vm_map_trunc_page(start,
1435 VM_MAP_PAGE_MASK(map)),
1436 vm_map_round_page(start + size,
1437 VM_MAP_PAGE_MASK(map)),
1438 TRUE);
1c79356b
A
1439 }
1440 return rc;
1441}
1442
1443/*
1444 * vm_msync
1445 *
1446 * Synchronises the memory range specified with its backing store
1447 * image by either flushing or cleaning the contents to the appropriate
91447636
A
1448 * memory manager.
1449 *
1450 * interpretation of sync_flags
1451 * VM_SYNC_INVALIDATE - discard pages, only return precious
1452 * pages to manager.
1453 *
1454 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1455 * - discard pages, write dirty or precious
1456 * pages back to memory manager.
1457 *
1458 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1459 * - write dirty or precious pages back to
1460 * the memory manager.
1461 *
1462 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1463 * is a hole in the region, and we would
1464 * have returned KERN_SUCCESS, return
1465 * KERN_INVALID_ADDRESS instead.
1466 *
1467 * RETURNS
1468 * KERN_INVALID_TASK Bad task parameter
1469 * KERN_INVALID_ARGUMENT both sync and async were specified.
1470 * KERN_SUCCESS The usual.
1471 * KERN_INVALID_ADDRESS There was a hole in the region.
1472 */
1473
1474kern_return_t
1475mach_vm_msync(
0a7de745
A
1476 vm_map_t map,
1477 mach_vm_address_t address,
1478 mach_vm_size_t size,
1479 vm_sync_t sync_flags)
91447636 1480{
0a7de745
A
1481 if (map == VM_MAP_NULL) {
1482 return KERN_INVALID_TASK;
1483 }
91447636
A
1484
1485 return vm_map_msync(map, (vm_map_address_t)address,
0a7de745 1486 (vm_map_size_t)size, sync_flags);
91447636 1487}
0a7de745 1488
91447636
A
1489/*
1490 * vm_msync
1491 *
1492 * Synchronises the memory range specified with its backing store
1493 * image by either flushing or cleaning the contents to the appropriate
1494 * memory manager.
1c79356b
A
1495 *
1496 * interpretation of sync_flags
1497 * VM_SYNC_INVALIDATE - discard pages, only return precious
1498 * pages to manager.
1499 *
1500 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1501 * - discard pages, write dirty or precious
1502 * pages back to memory manager.
1503 *
1504 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1505 * - write dirty or precious pages back to
1506 * the memory manager.
1507 *
91447636
A
1508 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1509 * is a hole in the region, and we would
1510 * have returned KERN_SUCCESS, return
1511 * KERN_INVALID_ADDRESS instead.
1512 *
1513 * The addressability of the range is limited to that which can
1514 * be described by a vm_address_t.
1c79356b
A
1515 *
1516 * RETURNS
1517 * KERN_INVALID_TASK Bad task parameter
1518 * KERN_INVALID_ARGUMENT both sync and async were specified.
1519 * KERN_SUCCESS The usual.
91447636 1520 * KERN_INVALID_ADDRESS There was a hole in the region.
1c79356b
A
1521 */
1522
1523kern_return_t
1524vm_msync(
0a7de745
A
1525 vm_map_t map,
1526 vm_address_t address,
1527 vm_size_t size,
1528 vm_sync_t sync_flags)
1c79356b 1529{
0a7de745
A
1530 if (map == VM_MAP_NULL) {
1531 return KERN_INVALID_TASK;
1532 }
1c79356b 1533
91447636 1534 return vm_map_msync(map, (vm_map_address_t)address,
0a7de745 1535 (vm_map_size_t)size, sync_flags);
91447636 1536}
1c79356b 1537
91447636 1538
6d2010ae
A
1539int
1540vm_toggle_entry_reuse(int toggle, int *old_value)
1541{
1542 vm_map_t map = current_map();
0a7de745 1543
39037602 1544 assert(!map->is_nested_map);
0a7de745 1545 if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
6d2010ae 1546 *old_value = map->disable_vmentry_reuse;
0a7de745 1547 } else if (toggle == VM_TOGGLE_SET) {
3e170ce0
A
1548 vm_map_entry_t map_to_entry;
1549
6d2010ae 1550 vm_map_lock(map);
3e170ce0 1551 vm_map_disable_hole_optimization(map);
6d2010ae 1552 map->disable_vmentry_reuse = TRUE;
3e170ce0
A
1553 __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
1554 if (map->first_free == map_to_entry) {
6d2010ae
A
1555 map->highest_entry_end = vm_map_min(map);
1556 } else {
1557 map->highest_entry_end = map->first_free->vme_end;
1558 }
1559 vm_map_unlock(map);
0a7de745 1560 } else if (toggle == VM_TOGGLE_CLEAR) {
6d2010ae
A
1561 vm_map_lock(map);
1562 map->disable_vmentry_reuse = FALSE;
1563 vm_map_unlock(map);
0a7de745 1564 } else {
6d2010ae 1565 return KERN_INVALID_ARGUMENT;
0a7de745 1566 }
6d2010ae
A
1567
1568 return KERN_SUCCESS;
1569}
1570
91447636 1571/*
0a7de745 1572 * mach_vm_behavior_set
91447636
A
1573 *
1574 * Sets the paging behavior attribute for the specified range
1575 * in the specified map.
1576 *
1577 * This routine will fail with KERN_INVALID_ADDRESS if any address
1578 * in [start,start+size) is not a valid allocated memory region.
1579 */
0a7de745 1580kern_return_t
91447636 1581mach_vm_behavior_set(
0a7de745
A
1582 vm_map_t map,
1583 mach_vm_offset_t start,
1584 mach_vm_size_t size,
1585 vm_behavior_t new_behavior)
91447636 1586{
0a7de745 1587 vm_map_offset_t align_mask;
39037602 1588
0a7de745
A
1589 if ((map == VM_MAP_NULL) || (start + size < start)) {
1590 return KERN_INVALID_ARGUMENT;
1591 }
1c79356b 1592
0a7de745 1593 if (size == 0) {
91447636 1594 return KERN_SUCCESS;
0a7de745 1595 }
1c79356b 1596
39037602
A
1597 switch (new_behavior) {
1598 case VM_BEHAVIOR_REUSABLE:
1599 case VM_BEHAVIOR_REUSE:
1600 case VM_BEHAVIOR_CAN_REUSE:
1601 /*
1602 * Align to the hardware page size, to allow
1603 * malloc() to maximize the amount of re-usability,
1604 * even on systems with larger software page size.
1605 */
1606 align_mask = PAGE_MASK;
1607 break;
1608 default:
1609 align_mask = VM_MAP_PAGE_MASK(map);
1610 break;
1611 }
1612
1613 return vm_map_behavior_set(map,
0a7de745
A
1614 vm_map_trunc_page(start, align_mask),
1615 vm_map_round_page(start + size, align_mask),
1616 new_behavior);
91447636 1617}
1c79356b 1618
91447636 1619/*
0a7de745 1620 * vm_behavior_set
91447636
A
1621 *
1622 * Sets the paging behavior attribute for the specified range
1623 * in the specified map.
1624 *
1625 * This routine will fail with KERN_INVALID_ADDRESS if any address
1626 * in [start,start+size) is not a valid allocated memory region.
1627 *
1628 * This routine is potentially limited in addressibility by the
1629 * use of vm_offset_t (if the map provided is larger than the
1630 * kernel's).
1631 */
0a7de745 1632kern_return_t
91447636 1633vm_behavior_set(
0a7de745
A
1634 vm_map_t map,
1635 vm_offset_t start,
1636 vm_size_t size,
1637 vm_behavior_t new_behavior)
91447636 1638{
0a7de745 1639 if (start + size < start) {
39037602 1640 return KERN_INVALID_ARGUMENT;
0a7de745 1641 }
1c79356b 1642
39037602 1643 return mach_vm_behavior_set(map,
0a7de745
A
1644 (mach_vm_offset_t) start,
1645 (mach_vm_size_t) size,
1646 new_behavior);
91447636 1647}
1c79356b 1648
91447636
A
1649/*
1650 * mach_vm_region:
1651 *
1652 * User call to obtain information about a region in
1653 * a task's address map. Currently, only one flavor is
1654 * supported.
1655 *
1656 * XXX The reserved and behavior fields cannot be filled
1657 * in until the vm merge from the IK is completed, and
1658 * vm_reserve is implemented.
1659 *
1660 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1661 */
1c79356b 1662
91447636
A
1663kern_return_t
1664mach_vm_region(
0a7de745
A
1665 vm_map_t map,
1666 mach_vm_offset_t *address, /* IN/OUT */
1667 mach_vm_size_t *size, /* OUT */
1668 vm_region_flavor_t flavor, /* IN */
1669 vm_region_info_t info, /* OUT */
1670 mach_msg_type_number_t *count, /* IN/OUT */
1671 mach_port_t *object_name) /* OUT */
91447636 1672{
0a7de745
A
1673 vm_map_offset_t map_addr;
1674 vm_map_size_t map_size;
1675 kern_return_t kr;
1c79356b 1676
0a7de745 1677 if (VM_MAP_NULL == map) {
91447636 1678 return KERN_INVALID_ARGUMENT;
0a7de745 1679 }
1c79356b 1680
91447636
A
1681 map_addr = (vm_map_offset_t)*address;
1682 map_size = (vm_map_size_t)*size;
1c79356b 1683
91447636 1684 /* legacy conversion */
0a7de745 1685 if (VM_REGION_BASIC_INFO == flavor) {
91447636 1686 flavor = VM_REGION_BASIC_INFO_64;
0a7de745 1687 }
1c79356b 1688
91447636 1689 kr = vm_map_region(map,
0a7de745
A
1690 &map_addr, &map_size,
1691 flavor, info, count,
1692 object_name);
1c79356b 1693
91447636
A
1694 *address = map_addr;
1695 *size = map_size;
1696 return kr;
1697}
1c79356b 1698
91447636
A
1699/*
1700 * vm_region_64 and vm_region:
1701 *
1702 * User call to obtain information about a region in
1703 * a task's address map. Currently, only one flavor is
1704 * supported.
1705 *
1706 * XXX The reserved and behavior fields cannot be filled
1707 * in until the vm merge from the IK is completed, and
1708 * vm_reserve is implemented.
1709 *
1710 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1711 */
1c79356b 1712
91447636
A
1713kern_return_t
1714vm_region_64(
0a7de745
A
1715 vm_map_t map,
1716 vm_offset_t *address, /* IN/OUT */
1717 vm_size_t *size, /* OUT */
1718 vm_region_flavor_t flavor, /* IN */
1719 vm_region_info_t info, /* OUT */
1720 mach_msg_type_number_t *count, /* IN/OUT */
1721 mach_port_t *object_name) /* OUT */
91447636 1722{
0a7de745
A
1723 vm_map_offset_t map_addr;
1724 vm_map_size_t map_size;
1725 kern_return_t kr;
1c79356b 1726
0a7de745 1727 if (VM_MAP_NULL == map) {
91447636 1728 return KERN_INVALID_ARGUMENT;
0a7de745 1729 }
1c79356b 1730
91447636
A
1731 map_addr = (vm_map_offset_t)*address;
1732 map_size = (vm_map_size_t)*size;
1c79356b 1733
91447636 1734 /* legacy conversion */
0a7de745 1735 if (VM_REGION_BASIC_INFO == flavor) {
91447636 1736 flavor = VM_REGION_BASIC_INFO_64;
0a7de745 1737 }
1c79356b 1738
91447636 1739 kr = vm_map_region(map,
0a7de745
A
1740 &map_addr, &map_size,
1741 flavor, info, count,
1742 object_name);
1c79356b 1743
91447636
A
1744 *address = CAST_DOWN(vm_offset_t, map_addr);
1745 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1746
0a7de745 1747 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
91447636 1748 return KERN_INVALID_ADDRESS;
0a7de745 1749 }
91447636
A
1750 return kr;
1751}
1c79356b 1752
91447636
A
1753kern_return_t
1754vm_region(
0a7de745
A
1755 vm_map_t map,
1756 vm_address_t *address, /* IN/OUT */
1757 vm_size_t *size, /* OUT */
1758 vm_region_flavor_t flavor, /* IN */
1759 vm_region_info_t info, /* OUT */
1760 mach_msg_type_number_t *count, /* IN/OUT */
1761 mach_port_t *object_name) /* OUT */
91447636 1762{
0a7de745
A
1763 vm_map_address_t map_addr;
1764 vm_map_size_t map_size;
1765 kern_return_t kr;
1c79356b 1766
0a7de745 1767 if (VM_MAP_NULL == map) {
91447636 1768 return KERN_INVALID_ARGUMENT;
0a7de745 1769 }
1c79356b 1770
91447636
A
1771 map_addr = (vm_map_address_t)*address;
1772 map_size = (vm_map_size_t)*size;
1c79356b 1773
91447636 1774 kr = vm_map_region(map,
0a7de745
A
1775 &map_addr, &map_size,
1776 flavor, info, count,
1777 object_name);
1c79356b 1778
91447636
A
1779 *address = CAST_DOWN(vm_address_t, map_addr);
1780 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1781
0a7de745 1782 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
91447636 1783 return KERN_INVALID_ADDRESS;
0a7de745 1784 }
91447636
A
1785 return kr;
1786}
1c79356b
A
1787
1788/*
91447636
A
1789 * vm_region_recurse: A form of vm_region which follows the
1790 * submaps in a target map
1c79356b 1791 *
1c79356b
A
1792 */
1793kern_return_t
91447636 1794mach_vm_region_recurse(
0a7de745
A
1795 vm_map_t map,
1796 mach_vm_address_t *address,
1797 mach_vm_size_t *size,
1798 uint32_t *depth,
1799 vm_region_recurse_info_t info,
1800 mach_msg_type_number_t *infoCnt)
1c79356b 1801{
0a7de745
A
1802 vm_map_address_t map_addr;
1803 vm_map_size_t map_size;
1804 kern_return_t kr;
1c79356b 1805
0a7de745 1806 if (VM_MAP_NULL == map) {
91447636 1807 return KERN_INVALID_ARGUMENT;
0a7de745 1808 }
1c79356b 1809
91447636
A
1810 map_addr = (vm_map_address_t)*address;
1811 map_size = (vm_map_size_t)*size;
1812
1813 kr = vm_map_region_recurse_64(
0a7de745
A
1814 map,
1815 &map_addr,
1816 &map_size,
1817 depth,
1818 (vm_region_submap_info_64_t)info,
1819 infoCnt);
91447636
A
1820
1821 *address = map_addr;
1822 *size = map_size;
1823 return kr;
1c79356b
A
1824}
1825
1826/*
91447636
A
1827 * vm_region_recurse: A form of vm_region which follows the
1828 * submaps in a target map
1829 *
1c79356b 1830 */
91447636
A
1831kern_return_t
1832vm_region_recurse_64(
0a7de745
A
1833 vm_map_t map,
1834 vm_address_t *address,
1835 vm_size_t *size,
1836 uint32_t *depth,
1837 vm_region_recurse_info_64_t info,
1838 mach_msg_type_number_t *infoCnt)
1c79356b 1839{
0a7de745
A
1840 vm_map_address_t map_addr;
1841 vm_map_size_t map_size;
1842 kern_return_t kr;
91447636 1843
0a7de745 1844 if (VM_MAP_NULL == map) {
91447636 1845 return KERN_INVALID_ARGUMENT;
0a7de745 1846 }
91447636
A
1847
1848 map_addr = (vm_map_address_t)*address;
1849 map_size = (vm_map_size_t)*size;
1850
1851 kr = vm_map_region_recurse_64(
0a7de745
A
1852 map,
1853 &map_addr,
1854 &map_size,
1855 depth,
1856 (vm_region_submap_info_64_t)info,
1857 infoCnt);
1c79356b 1858
91447636
A
1859 *address = CAST_DOWN(vm_address_t, map_addr);
1860 *size = CAST_DOWN(vm_size_t, map_size);
1861
0a7de745 1862 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
91447636 1863 return KERN_INVALID_ADDRESS;
0a7de745 1864 }
91447636 1865 return kr;
1c79356b
A
1866}
1867
91447636
A
1868kern_return_t
1869vm_region_recurse(
0a7de745
A
1870 vm_map_t map,
1871 vm_offset_t *address, /* IN/OUT */
1872 vm_size_t *size, /* OUT */
1873 natural_t *depth, /* IN/OUT */
1874 vm_region_recurse_info_t info32, /* IN/OUT */
1875 mach_msg_type_number_t *infoCnt) /* IN/OUT */
91447636
A
1876{
1877 vm_region_submap_info_data_64_t info64;
1878 vm_region_submap_info_t info;
0a7de745
A
1879 vm_map_address_t map_addr;
1880 vm_map_size_t map_size;
1881 kern_return_t kr;
91447636 1882
0a7de745 1883 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
91447636 1884 return KERN_INVALID_ARGUMENT;
0a7de745
A
1885 }
1886
91447636 1887
91447636
A
1888 map_addr = (vm_map_address_t)*address;
1889 map_size = (vm_map_size_t)*size;
1890 info = (vm_region_submap_info_t)info32;
1891 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1892
0a7de745
A
1893 kr = vm_map_region_recurse_64(map, &map_addr, &map_size,
1894 depth, &info64, infoCnt);
91447636
A
1895
1896 info->protection = info64.protection;
1897 info->max_protection = info64.max_protection;
1898 info->inheritance = info64.inheritance;
1899 info->offset = (uint32_t)info64.offset; /* trouble-maker */
0a7de745
A
1900 info->user_tag = info64.user_tag;
1901 info->pages_resident = info64.pages_resident;
1902 info->pages_shared_now_private = info64.pages_shared_now_private;
1903 info->pages_swapped_out = info64.pages_swapped_out;
1904 info->pages_dirtied = info64.pages_dirtied;
1905 info->ref_count = info64.ref_count;
1906 info->shadow_depth = info64.shadow_depth;
1907 info->external_pager = info64.external_pager;
1908 info->share_mode = info64.share_mode;
91447636
A
1909 info->is_submap = info64.is_submap;
1910 info->behavior = info64.behavior;
1911 info->object_id = info64.object_id;
0a7de745 1912 info->user_wired_count = info64.user_wired_count;
91447636
A
1913
1914 *address = CAST_DOWN(vm_address_t, map_addr);
1915 *size = CAST_DOWN(vm_size_t, map_size);
1916 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1917
0a7de745 1918 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
91447636 1919 return KERN_INVALID_ADDRESS;
0a7de745 1920 }
91447636
A
1921 return kr;
1922}
1923
2d21ac55
A
1924kern_return_t
1925mach_vm_purgable_control(
0a7de745
A
1926 vm_map_t map,
1927 mach_vm_offset_t address,
1928 vm_purgable_t control,
1929 int *state)
2d21ac55 1930{
0a7de745 1931 if (VM_MAP_NULL == map) {
2d21ac55 1932 return KERN_INVALID_ARGUMENT;
0a7de745 1933 }
2d21ac55 1934
5ba3f43e
A
1935 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1936 /* not allowed from user-space */
1937 return KERN_INVALID_ARGUMENT;
1938 }
1939
2d21ac55 1940 return vm_map_purgable_control(map,
0a7de745
A
1941 vm_map_trunc_page(address, PAGE_MASK),
1942 control,
1943 state);
2d21ac55
A
1944}
1945
91447636
A
1946kern_return_t
1947vm_purgable_control(
0a7de745
A
1948 vm_map_t map,
1949 vm_offset_t address,
1950 vm_purgable_t control,
1951 int *state)
91447636 1952{
0a7de745 1953 if (VM_MAP_NULL == map) {
91447636 1954 return KERN_INVALID_ARGUMENT;
0a7de745 1955 }
91447636 1956
5ba3f43e
A
1957 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1958 /* not allowed from user-space */
1959 return KERN_INVALID_ARGUMENT;
1960 }
1961
91447636 1962 return vm_map_purgable_control(map,
0a7de745
A
1963 vm_map_trunc_page(address, PAGE_MASK),
1964 control,
1965 state);
91447636 1966}
0a7de745 1967
1c79356b
A
1968
1969/*
1970 * Ordinarily, the right to allocate CPM is restricted
1971 * to privileged applications (those that can gain access
91447636
A
1972 * to the host priv port). Set this variable to zero if
1973 * you want to let any application allocate CPM.
1c79356b 1974 */
0a7de745 1975unsigned int vm_allocate_cpm_privileged = 0;
1c79356b
A
1976
1977/*
1978 * Allocate memory in the specified map, with the caveat that
1979 * the memory is physically contiguous. This call may fail
1980 * if the system can't find sufficient contiguous memory.
1981 * This call may cause or lead to heart-stopping amounts of
1982 * paging activity.
1983 *
1984 * Memory obtained from this call should be freed in the
1985 * normal way, viz., via vm_deallocate.
1986 */
1987kern_return_t
1988vm_allocate_cpm(
0a7de745
A
1989 host_priv_t host_priv,
1990 vm_map_t map,
1991 vm_address_t *addr,
1992 vm_size_t size,
1993 int flags)
1c79356b 1994{
0a7de745
A
1995 vm_map_address_t map_addr;
1996 vm_map_size_t map_size;
1997 kern_return_t kr;
1c79356b 1998
0a7de745 1999 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) {
1c79356b 2000 return KERN_INVALID_HOST;
0a7de745 2001 }
1c79356b 2002
0a7de745 2003 if (VM_MAP_NULL == map) {
1c79356b 2004 return KERN_INVALID_ARGUMENT;
0a7de745 2005 }
1c79356b 2006
91447636
A
2007 map_addr = (vm_map_address_t)*addr;
2008 map_size = (vm_map_size_t)size;
1c79356b 2009
91447636 2010 kr = vm_map_enter_cpm(map,
0a7de745
A
2011 &map_addr,
2012 map_size,
2013 flags);
1c79356b 2014
91447636 2015 *addr = CAST_DOWN(vm_address_t, map_addr);
1c79356b
A
2016 return kr;
2017}
2018
2019
91447636
A
2020kern_return_t
2021mach_vm_page_query(
0a7de745
A
2022 vm_map_t map,
2023 mach_vm_offset_t offset,
2024 int *disposition,
2025 int *ref_count)
91447636 2026{
0a7de745 2027 if (VM_MAP_NULL == map) {
91447636 2028 return KERN_INVALID_ARGUMENT;
0a7de745 2029 }
1c79356b 2030
39236c6e
A
2031 return vm_map_page_query_internal(
2032 map,
2033 vm_map_trunc_page(offset, PAGE_MASK),
2034 disposition, ref_count);
91447636 2035}
1c79356b
A
2036
2037kern_return_t
91447636 2038vm_map_page_query(
0a7de745
A
2039 vm_map_t map,
2040 vm_offset_t offset,
2041 int *disposition,
2042 int *ref_count)
1c79356b 2043{
0a7de745 2044 if (VM_MAP_NULL == map) {
91447636 2045 return KERN_INVALID_ARGUMENT;
0a7de745 2046 }
91447636 2047
39236c6e
A
2048 return vm_map_page_query_internal(
2049 map,
2050 vm_map_trunc_page(offset, PAGE_MASK),
2051 disposition, ref_count);
b0d623f7
A
2052}
2053
5ba3f43e
A
2054kern_return_t
2055mach_vm_page_range_query(
0a7de745
A
2056 vm_map_t map,
2057 mach_vm_offset_t address,
2058 mach_vm_size_t size,
2059 mach_vm_address_t dispositions_addr,
2060 mach_vm_size_t *dispositions_count)
5ba3f43e 2061{
0a7de745
A
2062 kern_return_t kr = KERN_SUCCESS;
2063 int num_pages = 0, i = 0;
2064 mach_vm_size_t curr_sz = 0, copy_sz = 0;
2065 mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0;
2066 mach_msg_type_number_t count = 0;
5ba3f43e 2067
0a7de745
A
2068 void *info = NULL;
2069 void *local_disp = NULL;;
2070 vm_map_size_t info_size = 0, local_disp_size = 0;
2071 mach_vm_offset_t start = 0, end = 0;
5ba3f43e
A
2072
2073 if (map == VM_MAP_NULL || dispositions_count == NULL) {
2074 return KERN_INVALID_ARGUMENT;
2075 }
2076
0a7de745 2077 disp_buf_req_size = (*dispositions_count * sizeof(int));
5ba3f43e
A
2078 start = mach_vm_trunc_page(address);
2079 end = mach_vm_round_page(address + size);
2080
2081 if (end < start) {
2082 return KERN_INVALID_ARGUMENT;
2083 }
2084
0a7de745
A
2085 if ((end - start) < size) {
2086 /*
2087 * Aligned size is less than unaligned size.
2088 */
2089 return KERN_INVALID_ARGUMENT;
2090 }
2091
5ba3f43e
A
2092 if (disp_buf_req_size == 0 || (end == start)) {
2093 return KERN_SUCCESS;
2094 }
2095
2096 /*
2097 * For large requests, we will go through them
2098 * MAX_PAGE_RANGE_QUERY chunk at a time.
2099 */
2100
2101 curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY);
2102 num_pages = (int) (curr_sz >> PAGE_SHIFT);
2103
2104 info_size = num_pages * sizeof(vm_page_info_basic_data_t);
2105 info = kalloc(info_size);
2106
2107 if (info == NULL) {
2108 return KERN_RESOURCE_SHORTAGE;
2109 }
2110
2111 local_disp_size = num_pages * sizeof(int);
2112 local_disp = kalloc(local_disp_size);
2113
2114 if (local_disp == NULL) {
5ba3f43e
A
2115 kfree(info, info_size);
2116 info = NULL;
2117 return KERN_RESOURCE_SHORTAGE;
2118 }
2119
2120 while (size) {
5ba3f43e
A
2121 count = VM_PAGE_INFO_BASIC_COUNT;
2122 kr = vm_map_page_range_info_internal(
0a7de745
A
2123 map,
2124 start,
2125 mach_vm_round_page(start + curr_sz),
2126 VM_PAGE_INFO_BASIC,
2127 (vm_page_info_t) info,
2128 &count);
5ba3f43e
A
2129
2130 assert(kr == KERN_SUCCESS);
2131
2132 for (i = 0; i < num_pages; i++) {
5ba3f43e
A
2133 ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
2134 }
2135
0a7de745 2136 copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
5ba3f43e
A
2137 kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
2138
2139 start += curr_sz;
2140 disp_buf_req_size -= copy_sz;
2141 disp_buf_total_size += copy_sz;
2142
2143 if (kr != 0) {
2144 break;
2145 }
2146
2147 if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
5ba3f43e
A
2148 /*
2149 * We might have inspected the full range OR
2150 * more than it esp. if the user passed in
2151 * non-page aligned start/size and/or if we
2152 * descended into a submap. We are done here.
2153 */
2154
2155 size = 0;
5ba3f43e 2156 } else {
5ba3f43e
A
2157 dispositions_addr += copy_sz;
2158
2159 size -= curr_sz;
2160
2161 curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY);
2162 num_pages = (int)(curr_sz >> PAGE_SHIFT);
2163 }
2164 }
2165
2166 *dispositions_count = disp_buf_total_size / sizeof(int);
2167
2168 kfree(local_disp, local_disp_size);
2169 local_disp = NULL;
2170
2171 kfree(info, info_size);
2172 info = NULL;
2173
2174 return kr;
2175}
2176
b0d623f7
A
2177kern_return_t
2178mach_vm_page_info(
0a7de745
A
2179 vm_map_t map,
2180 mach_vm_address_t address,
2181 vm_page_info_flavor_t flavor,
2182 vm_page_info_t info,
2183 mach_msg_type_number_t *count)
b0d623f7 2184{
0a7de745 2185 kern_return_t kr;
b0d623f7
A
2186
2187 if (map == VM_MAP_NULL) {
2188 return KERN_INVALID_ARGUMENT;
2189 }
2190
2191 kr = vm_map_page_info(map, address, flavor, info, count);
2192 return kr;
1c79356b
A
2193}
2194
91447636 2195/* map a (whole) upl into an address space */
1c79356b 2196kern_return_t
91447636 2197vm_upl_map(
0a7de745
A
2198 vm_map_t map,
2199 upl_t upl,
2200 vm_address_t *dst_addr)
1c79356b 2201{
0a7de745
A
2202 vm_map_offset_t map_addr;
2203 kern_return_t kr;
1c79356b 2204
0a7de745 2205 if (VM_MAP_NULL == map) {
91447636 2206 return KERN_INVALID_ARGUMENT;
0a7de745 2207 }
1c79356b 2208
91447636 2209 kr = vm_map_enter_upl(map, upl, &map_addr);
b0d623f7 2210 *dst_addr = CAST_DOWN(vm_address_t, map_addr);
91447636
A
2211 return kr;
2212}
1c79356b 2213
91447636
A
2214kern_return_t
2215vm_upl_unmap(
0a7de745
A
2216 vm_map_t map,
2217 upl_t upl)
91447636 2218{
0a7de745 2219 if (VM_MAP_NULL == map) {
91447636 2220 return KERN_INVALID_ARGUMENT;
0a7de745 2221 }
1c79356b 2222
0a7de745 2223 return vm_map_remove_upl(map, upl);
91447636 2224}
1c79356b 2225
91447636
A
2226/* Retrieve a upl for an object underlying an address range in a map */
2227
2228kern_return_t
2229vm_map_get_upl(
0a7de745
A
2230 vm_map_t map,
2231 vm_map_offset_t map_offset,
2232 upl_size_t *upl_size,
2233 upl_t *upl,
2234 upl_page_info_array_t page_list,
2235 unsigned int *count,
2236 upl_control_flags_t *flags,
2237 vm_tag_t tag,
2238 int force_data_sync)
91447636 2239{
3e170ce0 2240 upl_control_flags_t map_flags;
0a7de745 2241 kern_return_t kr;
1c79356b 2242
0a7de745 2243 if (VM_MAP_NULL == map) {
91447636 2244 return KERN_INVALID_ARGUMENT;
0a7de745 2245 }
1c79356b 2246
91447636 2247 map_flags = *flags & ~UPL_NOZEROFILL;
0a7de745 2248 if (force_data_sync) {
91447636 2249 map_flags |= UPL_FORCE_DATA_SYNC;
0a7de745 2250 }
1c79356b 2251
91447636 2252 kr = vm_map_create_upl(map,
0a7de745
A
2253 map_offset,
2254 upl_size,
2255 upl,
2256 page_list,
2257 count,
2258 &map_flags,
2259 tag);
1c79356b 2260
91447636
A
2261 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
2262 return kr;
1c79356b
A
2263}
2264
5ba3f43e
A
2265#if CONFIG_EMBEDDED
2266extern int proc_selfpid(void);
2267extern char *proc_name_address(void *p);
2268int cs_executable_mem_entry = 0;
2269int log_executable_mem_entry = 0;
2270#endif /* CONFIG_EMBEDDED */
39037602 2271
1c79356b 2272/*
91447636
A
2273 * mach_make_memory_entry_64
2274 *
2275 * Think of it as a two-stage vm_remap() operation. First
2276 * you get a handle. Second, you get map that handle in
2277 * somewhere else. Rather than doing it all at once (and
2278 * without needing access to the other whole map).
1c79356b 2279 */
1c79356b
A
2280kern_return_t
2281mach_make_memory_entry_64(
0a7de745
A
2282 vm_map_t target_map,
2283 memory_object_size_t *size,
91447636 2284 memory_object_offset_t offset,
0a7de745
A
2285 vm_prot_t permission,
2286 ipc_port_t *object_handle,
2287 ipc_port_t parent_handle)
9d749ea3
A
2288{
2289 if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
2290 /*
2291 * Unknown flag: reject for forward compatibility.
2292 */
2293 return KERN_INVALID_VALUE;
2294 }
2295
2296 return mach_make_memory_entry_internal(target_map,
0a7de745
A
2297 size,
2298 offset,
2299 permission,
2300 object_handle,
2301 parent_handle);
9d749ea3
A
2302}
2303
9d749ea3
A
2304kern_return_t
2305mach_make_memory_entry_internal(
0a7de745
A
2306 vm_map_t target_map,
2307 memory_object_size_t *size,
9d749ea3 2308 memory_object_offset_t offset,
0a7de745
A
2309 vm_prot_t permission,
2310 ipc_port_t *object_handle,
2311 ipc_port_t parent_handle)
1c79356b 2312{
0a7de745
A
2313 vm_map_version_t version;
2314 vm_named_entry_t parent_entry;
2315 vm_named_entry_t user_entry;
2316 ipc_port_t user_handle;
2317 kern_return_t kr;
2318 vm_map_t real_map;
1c79356b
A
2319
2320 /* needed for call to vm_map_lookup_locked */
0a7de745
A
2321 boolean_t wired;
2322 boolean_t iskernel;
2323 vm_object_offset_t obj_off;
2324 vm_prot_t prot;
2325 struct vm_object_fault_info fault_info = {};
2326 vm_object_t object;
2327 vm_object_t shadow_object;
1c79356b
A
2328
2329 /* needed for direct map entry manipulation */
0a7de745
A
2330 vm_map_entry_t map_entry;
2331 vm_map_entry_t next_entry;
2332 vm_map_t local_map;
2333 vm_map_t original_map = target_map;
2334 vm_map_size_t total_size, map_size;
2335 vm_map_offset_t map_start, map_end;
2336 vm_map_offset_t local_offset;
2337 vm_object_size_t mappable_size;
2338
2339 /*
39236c6e
A
2340 * Stash the offset in the page for use by vm_map_enter_mem_object()
2341 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2342 */
0a7de745 2343 vm_object_offset_t offset_in_page;
39236c6e 2344
0a7de745
A
2345 unsigned int access;
2346 vm_prot_t protections;
2347 vm_prot_t original_protections, mask_protections;
2348 unsigned int wimg_mode;
91447636 2349
0a7de745
A
2350 boolean_t force_shadow = FALSE;
2351 boolean_t use_data_addr;
2352 boolean_t use_4K_compat;
d9a64523 2353#if VM_NAMED_ENTRY_LIST
0a7de745 2354 int alias = -1;
d9a64523 2355#endif /* VM_NAMED_ENTRY_LIST */
e2d2fc5c 2356
9d749ea3 2357 if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
91447636
A
2358 /*
2359 * Unknown flag: reject for forward compatibility.
2360 */
2361 return KERN_INVALID_VALUE;
2362 }
2363
d9a64523 2364 if (IP_VALID(parent_handle) &&
91447636
A
2365 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
2366 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
2367 } else {
2368 parent_entry = NULL;
2369 }
55e303ae 2370
39236c6e
A
2371 if (parent_entry && parent_entry->is_copy) {
2372 return KERN_INVALID_ARGUMENT;
2373 }
2374
6d2010ae
A
2375 original_protections = permission & VM_PROT_ALL;
2376 protections = original_protections;
2377 mask_protections = permission & VM_PROT_IS_MASK;
55e303ae 2378 access = GET_MAP_MEM(permission);
39236c6e 2379 use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
3e170ce0 2380 use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0);
55e303ae 2381
91447636
A
2382 user_handle = IP_NULL;
2383 user_entry = NULL;
2384
3e170ce0 2385 map_start = vm_map_trunc_page(offset, PAGE_MASK);
1c79356b 2386
91447636 2387 if (permission & MAP_MEM_ONLY) {
0a7de745 2388 boolean_t parent_is_object;
55e303ae 2389
3e170ce0
A
2390 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2391 map_size = map_end - map_start;
0a7de745 2392
3e170ce0 2393 if (use_data_addr || use_4K_compat || parent_entry == NULL) {
55e303ae
A
2394 return KERN_INVALID_ARGUMENT;
2395 }
91447636 2396
5ba3f43e 2397 parent_is_object = !parent_entry->is_sub_map;
91447636 2398 object = parent_entry->backing.object;
0a7de745 2399 if (parent_is_object && object != VM_OBJECT_NULL) {
55e303ae 2400 wimg_mode = object->wimg_bits;
0a7de745 2401 } else {
6d2010ae 2402 wimg_mode = VM_WIMG_USE_DEFAULT;
0a7de745
A
2403 }
2404 if ((access != GET_MAP_MEM(parent_entry->protection)) &&
2405 !(parent_entry->protection & VM_PROT_WRITE)) {
55e303ae
A
2406 return KERN_INVALID_RIGHT;
2407 }
5ba3f43e 2408 vm_prot_to_wimg(access, &wimg_mode);
0a7de745 2409 if (access != MAP_MEM_NOOP) {
5ba3f43e 2410 SET_MAP_MEM(access, parent_entry->protection);
0a7de745 2411 }
6d2010ae 2412 if (parent_is_object && object &&
0a7de745
A
2413 (access != MAP_MEM_NOOP) &&
2414 (!(object->nophyscache))) {
6d2010ae
A
2415 if (object->wimg_bits != wimg_mode) {
2416 vm_object_lock(object);
2417 vm_object_change_wimg_mode(object, wimg_mode);
2418 vm_object_unlock(object);
55e303ae
A
2419 }
2420 }
0a7de745 2421 if (object_handle) {
91447636 2422 *object_handle = IP_NULL;
0a7de745 2423 }
55e303ae 2424 return KERN_SUCCESS;
39236c6e 2425 } else if (permission & MAP_MEM_NAMED_CREATE) {
3e170ce0
A
2426 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2427 map_size = map_end - map_start;
39236c6e 2428
3e170ce0 2429 if (use_data_addr || use_4K_compat) {
39236c6e
A
2430 return KERN_INVALID_ARGUMENT;
2431 }
55e303ae 2432
91447636
A
2433 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2434 if (kr != KERN_SUCCESS) {
2435 return KERN_FAILURE;
2436 }
55e303ae 2437
91447636
A
2438 /*
2439 * Force the creation of the VM object now.
2440 */
b0d623f7 2441 if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
91447636 2442 /*
b0d623f7 2443 * LP64todo - for now, we can only allocate 4GB-4096
91447636
A
2444 * internal objects because the default pager can't
2445 * page bigger ones. Remove this when it can.
2446 */
2447 kr = KERN_FAILURE;
2448 goto make_mem_done;
2449 }
1c79356b 2450
91447636
A
2451 object = vm_object_allocate(map_size);
2452 assert(object != VM_OBJECT_NULL);
1c79356b 2453
91447636 2454 if (permission & MAP_MEM_PURGABLE) {
d9a64523
A
2455 task_t owner;
2456
0a7de745 2457 if (!(permission & VM_PROT_WRITE)) {
91447636
A
2458 /* if we can't write, we can't purge */
2459 vm_object_deallocate(object);
2460 kr = KERN_INVALID_ARGUMENT;
2461 goto make_mem_done;
2462 }
2d21ac55 2463 object->purgable = VM_PURGABLE_NONVOLATILE;
5ba3f43e
A
2464 if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
2465 object->purgeable_only_by_kernel = TRUE;
2466 }
d9a64523 2467 assert(object->vo_owner == NULL);
fe8ab488
A
2468 assert(object->resident_page_count == 0);
2469 assert(object->wired_page_count == 0);
2470 vm_object_lock(object);
d9a64523
A
2471 owner = current_task();
2472#if __arm64__
2473 if (owner->task_legacy_footprint) {
2474 /*
2475 * For ios11, we failed to account for
2476 * this memory. Keep doing that for
2477 * legacy apps (built before ios12),
2478 * for backwards compatibility's sake...
2479 */
2480 owner = kernel_task;
5ba3f43e 2481 }
d9a64523
A
2482#endif /* __arm64__ */
2483 vm_purgeable_nonvolatile_enqueue(object, owner);
2484 vm_object_unlock(object);
2485 }
2486
2487 if (permission & MAP_MEM_LEDGER_TAG_NETWORK) {
2488 /* make this object owned by the calling task */
2489 vm_object_lock(object);
2490 vm_object_ownership_change(
2491 object,
2492 VM_OBJECT_LEDGER_TAG_NETWORK,
2493 current_task(), /* new owner */
2494 FALSE); /* task_objq locked? */
fe8ab488 2495 vm_object_unlock(object);
91447636 2496 }
1c79356b 2497
39037602
A
2498#if CONFIG_SECLUDED_MEMORY
2499 if (secluded_for_iokit && /* global boot-arg */
2500 ((permission & MAP_MEM_GRAB_SECLUDED)
2501#if 11
0a7de745
A
2502 /* XXX FBDP for my testing only */
2503 || (secluded_for_fbdp && map_size == 97550336)
39037602 2504#endif
0a7de745 2505 )) {
39037602
A
2506#if 11
2507 if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
2508 secluded_for_fbdp) {
2509 printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size);
2510 }
2511#endif
2512 object->can_grab_secluded = TRUE;
2513 assert(!object->eligible_for_secluded);
2514 }
2515#endif /* CONFIG_SECLUDED_MEMORY */
2516
91447636
A
2517 /*
2518 * The VM object is brand new and nobody else knows about it,
2519 * so we don't need to lock it.
2520 */
1c79356b 2521
91447636 2522 wimg_mode = object->wimg_bits;
5ba3f43e 2523 vm_prot_to_wimg(access, &wimg_mode);
0a7de745
A
2524 if (access != MAP_MEM_NOOP) {
2525 object->wimg_bits = wimg_mode;
2526 }
5ba3f43e 2527
91447636 2528 /* the object has no pages, so no WIMG bits to update here */
1c79356b 2529
91447636
A
2530 /*
2531 * XXX
2532 * We use this path when we want to make sure that
2533 * nobody messes with the object (coalesce, for
2534 * example) before we map it.
2535 * We might want to use these objects for transposition via
2536 * vm_object_transpose() too, so we don't want any copy or
2537 * shadow objects either...
2538 */
2539 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
fe8ab488 2540 object->true_share = TRUE;
1c79356b 2541
91447636
A
2542 user_entry->backing.object = object;
2543 user_entry->internal = TRUE;
2544 user_entry->is_sub_map = FALSE;
91447636 2545 user_entry->offset = 0;
39236c6e 2546 user_entry->data_offset = 0;
91447636
A
2547 user_entry->protection = protections;
2548 SET_MAP_MEM(access, user_entry->protection);
2549 user_entry->size = map_size;
55e303ae
A
2550
2551 /* user_object pager and internal fields are not used */
2552 /* when the object field is filled in. */
2553
3e170ce0 2554 *size = CAST_DOWN(vm_size_t, (user_entry->size -
0a7de745 2555 user_entry->data_offset));
55e303ae
A
2556 *object_handle = user_handle;
2557 return KERN_SUCCESS;
2558 }
2559
39236c6e 2560 if (permission & MAP_MEM_VM_COPY) {
0a7de745 2561 vm_map_copy_t copy;
39236c6e
A
2562
2563 if (target_map == VM_MAP_NULL) {
2564 return KERN_INVALID_TASK;
2565 }
2566
3e170ce0
A
2567 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2568 map_size = map_end - map_start;
2569 if (use_data_addr || use_4K_compat) {
2570 offset_in_page = offset - map_start;
0a7de745 2571 if (use_4K_compat) {
3e170ce0 2572 offset_in_page &= ~((signed)(0xFFF));
0a7de745 2573 }
39236c6e 2574 } else {
39236c6e
A
2575 offset_in_page = 0;
2576 }
2577
4bd07ac2 2578 kr = vm_map_copyin_internal(target_map,
0a7de745
A
2579 map_start,
2580 map_size,
2581 VM_MAP_COPYIN_ENTRY_LIST,
2582 &copy);
39236c6e
A
2583 if (kr != KERN_SUCCESS) {
2584 return kr;
2585 }
0a7de745 2586
39236c6e
A
2587 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2588 if (kr != KERN_SUCCESS) {
2589 vm_map_copy_discard(copy);
2590 return KERN_FAILURE;
2591 }
2592
2593 user_entry->backing.copy = copy;
2594 user_entry->internal = FALSE;
2595 user_entry->is_sub_map = FALSE;
39236c6e
A
2596 user_entry->is_copy = TRUE;
2597 user_entry->offset = 0;
2598 user_entry->protection = protections;
2599 user_entry->size = map_size;
2600 user_entry->data_offset = offset_in_page;
2601
3e170ce0 2602 *size = CAST_DOWN(vm_size_t, (user_entry->size -
0a7de745 2603 user_entry->data_offset));
39236c6e
A
2604 *object_handle = user_handle;
2605 return KERN_SUCCESS;
2606 }
2607
2608 if (permission & MAP_MEM_VM_SHARE) {
0a7de745
A
2609 vm_map_copy_t copy;
2610 vm_prot_t cur_prot, max_prot;
39236c6e
A
2611
2612 if (target_map == VM_MAP_NULL) {
2613 return KERN_INVALID_TASK;
2614 }
2615
3e170ce0
A
2616 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2617 map_size = map_end - map_start;
2618 if (use_data_addr || use_4K_compat) {
2619 offset_in_page = offset - map_start;
0a7de745 2620 if (use_4K_compat) {
3e170ce0 2621 offset_in_page &= ~((signed)(0xFFF));
0a7de745 2622 }
39236c6e 2623 } else {
39236c6e
A
2624 offset_in_page = 0;
2625 }
2626
39037602 2627 cur_prot = VM_PROT_ALL;
39236c6e 2628 kr = vm_map_copy_extract(target_map,
0a7de745
A
2629 map_start,
2630 map_size,
2631 &copy,
2632 &cur_prot,
2633 &max_prot);
39236c6e
A
2634 if (kr != KERN_SUCCESS) {
2635 return kr;
2636 }
2637
2638 if (mask_protections) {
2639 /*
0a7de745 2640 * We just want as much of "original_protections"
39236c6e
A
2641 * as we can get out of the actual "cur_prot".
2642 */
2643 protections &= cur_prot;
2644 if (protections == VM_PROT_NONE) {
2645 /* no access at all: fail */
2646 vm_map_copy_discard(copy);
2647 return KERN_PROTECTION_FAILURE;
2648 }
2649 } else {
2650 /*
2651 * We want exactly "original_protections"
2652 * out of "cur_prot".
2653 */
2654 if ((cur_prot & protections) != protections) {
2655 vm_map_copy_discard(copy);
2656 return KERN_PROTECTION_FAILURE;
2657 }
2658 }
2659
2660 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2661 if (kr != KERN_SUCCESS) {
2662 vm_map_copy_discard(copy);
2663 return KERN_FAILURE;
2664 }
2665
2666 user_entry->backing.copy = copy;
2667 user_entry->internal = FALSE;
2668 user_entry->is_sub_map = FALSE;
39236c6e
A
2669 user_entry->is_copy = TRUE;
2670 user_entry->offset = 0;
2671 user_entry->protection = protections;
2672 user_entry->size = map_size;
2673 user_entry->data_offset = offset_in_page;
2674
3e170ce0 2675 *size = CAST_DOWN(vm_size_t, (user_entry->size -
0a7de745 2676 user_entry->data_offset));
39236c6e
A
2677 *object_handle = user_handle;
2678 return KERN_SUCCESS;
2679 }
2680
91447636
A
2681 if (parent_entry == NULL ||
2682 (permission & MAP_MEM_NAMED_REUSE)) {
3e170ce0
A
2683 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2684 map_size = map_end - map_start;
2685 if (use_data_addr || use_4K_compat) {
2686 offset_in_page = offset - map_start;
0a7de745 2687 if (use_4K_compat) {
3e170ce0 2688 offset_in_page &= ~((signed)(0xFFF));
0a7de745 2689 }
39236c6e 2690 } else {
39236c6e
A
2691 offset_in_page = 0;
2692 }
2693
91447636
A
2694 /* Create a named object based on address range within the task map */
2695 /* Go find the object at given address */
1c79356b 2696
2d21ac55
A
2697 if (target_map == VM_MAP_NULL) {
2698 return KERN_INVALID_TASK;
2699 }
2700
91447636 2701redo_lookup:
6d2010ae 2702 protections = original_protections;
1c79356b
A
2703 vm_map_lock_read(target_map);
2704
2705 /* get the object associated with the target address */
2706 /* note we check the permission of the range against */
2707 /* that requested by the caller */
2708
0a7de745
A
2709 kr = vm_map_lookup_locked(&target_map, map_start,
2710 protections | mask_protections,
2711 OBJECT_LOCK_EXCLUSIVE, &version,
2712 &object, &obj_off, &prot, &wired,
2713 &fault_info,
2714 &real_map);
1c79356b
A
2715 if (kr != KERN_SUCCESS) {
2716 vm_map_unlock_read(target_map);
2717 goto make_mem_done;
2718 }
6d2010ae
A
2719 if (mask_protections) {
2720 /*
2721 * The caller asked us to use the "protections" as
2722 * a mask, so restrict "protections" to what this
2723 * mapping actually allows.
2724 */
2725 protections &= prot;
2726 }
5ba3f43e
A
2727#if CONFIG_EMBEDDED
2728 /*
2729 * Wiring would copy the pages to a shadow object.
2730 * The shadow object would not be code-signed so
2731 * attempting to execute code from these copied pages
2732 * would trigger a code-signing violation.
2733 */
2734 if (prot & VM_PROT_EXECUTE) {
2735 if (log_executable_mem_entry) {
2736 void *bsd_info;
2737 bsd_info = current_task()->bsd_info;
2738 printf("pid %d[%s] making memory entry out of "
0a7de745
A
2739 "executable range from 0x%llx to 0x%llx:"
2740 "might cause code-signing issues "
2741 "later\n",
2742 proc_selfpid(),
2743 (bsd_info != NULL
2744 ? proc_name_address(bsd_info)
2745 : "?"),
2746 (uint64_t) map_start,
2747 (uint64_t) map_end);
5ba3f43e
A
2748 }
2749 DTRACE_VM2(cs_executable_mem_entry,
0a7de745
A
2750 uint64_t, (uint64_t)map_start,
2751 uint64_t, (uint64_t)map_end);
5ba3f43e
A
2752 cs_executable_mem_entry++;
2753
2754#if 11
2755 /*
2756 * We don't know how the memory entry will be used.
2757 * It might never get wired and might not cause any
2758 * trouble, so let's not reject this request...
2759 */
2760#else /* 11 */
2761 kr = KERN_PROTECTION_FAILURE;
2762 vm_object_unlock(object);
2763 vm_map_unlock_read(target_map);
0a7de745 2764 if (real_map != target_map) {
5ba3f43e 2765 vm_map_unlock_read(real_map);
0a7de745 2766 }
5ba3f43e
A
2767 goto make_mem_done;
2768#endif /* 11 */
5ba3f43e
A
2769 }
2770#endif /* CONFIG_EMBEDDED */
39037602 2771
0a7de745 2772 if (((prot & protections) != protections)
39037602 2773 || (object == kernel_object)) {
1c79356b
A
2774 kr = KERN_INVALID_RIGHT;
2775 vm_object_unlock(object);
2776 vm_map_unlock_read(target_map);
0a7de745 2777 if (real_map != target_map) {
91447636 2778 vm_map_unlock_read(real_map);
0a7de745
A
2779 }
2780 if (object == kernel_object) {
9bccf70c 2781 printf("Warning: Attempt to create a named"
0a7de745 2782 " entry from the kernel_object\n");
9bccf70c 2783 }
1c79356b
A
2784 goto make_mem_done;
2785 }
2786
2787 /* We have an object, now check to see if this object */
2788 /* is suitable. If not, create a shadow and share that */
91447636
A
2789
2790 /*
2791 * We have to unlock the VM object to avoid deadlocking with
2792 * a VM map lock (the lock ordering is map, the object), if we
2793 * need to modify the VM map to create a shadow object. Since
2794 * we might release the VM map lock below anyway, we have
2795 * to release the VM map lock now.
2796 * XXX FBDP There must be a way to avoid this double lookup...
2797 *
2798 * Take an extra reference on the VM object to make sure it's
2799 * not going to disappear.
2800 */
2801 vm_object_reference_locked(object); /* extra ref to hold obj */
2802 vm_object_unlock(object);
2803
9bccf70c 2804 local_map = original_map;
3e170ce0 2805 local_offset = map_start;
0a7de745 2806 if (target_map != local_map) {
9bccf70c 2807 vm_map_unlock_read(target_map);
0a7de745 2808 if (real_map != target_map) {
91447636 2809 vm_map_unlock_read(real_map);
0a7de745 2810 }
9bccf70c
A
2811 vm_map_lock_read(local_map);
2812 target_map = local_map;
91447636 2813 real_map = local_map;
9bccf70c 2814 }
0a7de745
A
2815 while (TRUE) {
2816 if (!vm_map_lookup_entry(local_map,
2817 local_offset, &map_entry)) {
2818 kr = KERN_INVALID_ARGUMENT;
2819 vm_map_unlock_read(target_map);
2820 if (real_map != target_map) {
2821 vm_map_unlock_read(real_map);
2822 }
2823 vm_object_deallocate(object); /* release extra ref */
2824 object = VM_OBJECT_NULL;
2825 goto make_mem_done;
2826 }
2827 iskernel = (local_map->pmap == kernel_pmap);
2828 if (!(map_entry->is_sub_map)) {
2829 if (VME_OBJECT(map_entry) != object) {
2830 kr = KERN_INVALID_ARGUMENT;
2831 vm_map_unlock_read(target_map);
2832 if (real_map != target_map) {
2833 vm_map_unlock_read(real_map);
2834 }
2835 vm_object_deallocate(object); /* release extra ref */
2836 object = VM_OBJECT_NULL;
2837 goto make_mem_done;
2838 }
2839 break;
2840 } else {
2841 vm_map_t tmap;
2842 tmap = local_map;
2843 local_map = VME_SUBMAP(map_entry);
2844
2845 vm_map_lock_read(local_map);
2846 vm_map_unlock_read(tmap);
2847 target_map = local_map;
2848 real_map = local_map;
2849 local_offset = local_offset - map_entry->vme_start;
2850 local_offset += VME_OFFSET(map_entry);
2851 }
1c79356b 2852 }
91447636 2853
d9a64523
A
2854#if VM_NAMED_ENTRY_LIST
2855 alias = VME_ALIAS(map_entry);
2856#endif /* VM_NAMED_ENTRY_LIST */
2857
91447636
A
2858 /*
2859 * We found the VM map entry, lock the VM object again.
2860 */
2861 vm_object_lock(object);
0a7de745
A
2862 if (map_entry->wired_count) {
2863 /* JMM - The check below should be reworked instead. */
2864 object->true_share = TRUE;
2865 }
6d2010ae
A
2866 if (mask_protections) {
2867 /*
2868 * The caller asked us to use the "protections" as
2869 * a mask, so restrict "protections" to what this
2870 * mapping actually allows.
2871 */
2872 protections &= map_entry->max_protection;
2873 }
0a7de745
A
2874 if (((map_entry->max_protection) & protections) != protections) {
2875 kr = KERN_INVALID_RIGHT;
2876 vm_object_unlock(object);
2877 vm_map_unlock_read(target_map);
2878 if (real_map != target_map) {
91447636 2879 vm_map_unlock_read(real_map);
0a7de745
A
2880 }
2881 vm_object_deallocate(object);
2882 object = VM_OBJECT_NULL;
2883 goto make_mem_done;
1c79356b 2884 }
9bccf70c 2885
2d21ac55 2886 mappable_size = fault_info.hi_offset - obj_off;
9bccf70c 2887 total_size = map_entry->vme_end - map_entry->vme_start;
0a7de745 2888 if (map_size > mappable_size) {
9bccf70c
A
2889 /* try to extend mappable size if the entries */
2890 /* following are from the same object and are */
2891 /* compatible */
2892 next_entry = map_entry->vme_next;
2893 /* lets see if the next map entry is still */
2894 /* pointing at this object and is contiguous */
0a7de745 2895 while (map_size > mappable_size) {
3e170ce0 2896 if ((VME_OBJECT(next_entry) == object) &&
0a7de745
A
2897 (next_entry->vme_start ==
2898 next_entry->vme_prev->vme_end) &&
2899 (VME_OFFSET(next_entry) ==
2900 (VME_OFFSET(next_entry->vme_prev) +
2901 (next_entry->vme_prev->vme_end -
2902 next_entry->vme_prev->vme_start)))) {
6d2010ae
A
2903 if (mask_protections) {
2904 /*
2905 * The caller asked us to use
2906 * the "protections" as a mask,
2907 * so restrict "protections" to
2908 * what this mapping actually
2909 * allows.
2910 */
2911 protections &= next_entry->max_protection;
2912 }
316670eb
A
2913 if ((next_entry->wired_count) &&
2914 (map_entry->wired_count == 0)) {
2915 break;
2916 }
0a7de745
A
2917 if (((next_entry->max_protection)
2918 & protections) != protections) {
2919 break;
9bccf70c 2920 }
55e303ae 2921 if (next_entry->needs_copy !=
0a7de745 2922 map_entry->needs_copy) {
55e303ae 2923 break;
0a7de745 2924 }
9bccf70c 2925 mappable_size += next_entry->vme_end
0a7de745 2926 - next_entry->vme_start;
9bccf70c 2927 total_size += next_entry->vme_end
0a7de745 2928 - next_entry->vme_start;
9bccf70c
A
2929 next_entry = next_entry->vme_next;
2930 } else {
2931 break;
2932 }
9bccf70c
A
2933 }
2934 }
2935
3e170ce0 2936 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
0a7de745 2937 * never true in kernel */
3e170ce0 2938 if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) &&
e2d2fc5c
A
2939 object->vo_size > map_size &&
2940 map_size != 0) {
2941 /*
2942 * Set up the targeted range for copy-on-write to
2943 * limit the impact of "true_share"/"copy_delay" to
2944 * that range instead of the entire VM object...
2945 */
0a7de745 2946
e2d2fc5c
A
2947 vm_object_unlock(object);
2948 if (vm_map_lock_read_to_write(target_map)) {
2949 vm_object_deallocate(object);
2950 target_map = original_map;
2951 goto redo_lookup;
2952 }
2953
39236c6e 2954 vm_map_clip_start(target_map,
0a7de745
A
2955 map_entry,
2956 vm_map_trunc_page(map_start,
2957 VM_MAP_PAGE_MASK(target_map)));
39236c6e 2958 vm_map_clip_end(target_map,
0a7de745
A
2959 map_entry,
2960 (vm_map_round_page(map_end,
2961 VM_MAP_PAGE_MASK(target_map))));
e2d2fc5c
A
2962 force_shadow = TRUE;
2963
fe8ab488 2964 if ((map_entry->vme_end - offset) < map_size) {
3e170ce0 2965 map_size = map_entry->vme_end - map_start;
fe8ab488
A
2966 }
2967 total_size = map_entry->vme_end - map_entry->vme_start;
e2d2fc5c
A
2968
2969 vm_map_lock_write_to_read(target_map);
2970 vm_object_lock(object);
2971 }
e2d2fc5c 2972
39236c6e 2973 if (object->internal) {
0a7de745
A
2974 /* vm_map_lookup_locked will create a shadow if */
2975 /* needs_copy is set but does not check for the */
2976 /* other two conditions shown. It is important to */
1c79356b
A
2977 /* set up an object which will not be pulled from */
2978 /* under us. */
2979
0a7de745
A
2980 if (force_shadow ||
2981 ((map_entry->needs_copy ||
2982 object->shadowed ||
2983 (object->vo_size > total_size &&
2984 (VME_OFFSET(map_entry) != 0 ||
2985 object->vo_size >
2986 vm_map_round_page(total_size,
2987 VM_MAP_PAGE_MASK(target_map)))))
2988 && !object->true_share
2989 && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) {
91447636
A
2990 /*
2991 * We have to unlock the VM object before
2992 * trying to upgrade the VM map lock, to
2993 * honor lock ordering (map then object).
2994 * Otherwise, we would deadlock if another
2995 * thread holds a read lock on the VM map and
2996 * is trying to acquire the VM object's lock.
2997 * We still hold an extra reference on the
2998 * VM object, guaranteeing that it won't
2999 * disappear.
3000 */
3001 vm_object_unlock(object);
3002
0a7de745 3003 if (vm_map_lock_read_to_write(target_map)) {
91447636
A
3004 /*
3005 * We couldn't upgrade our VM map lock
3006 * from "read" to "write" and we lost
3007 * our "read" lock.
3008 * Start all over again...
3009 */
3010 vm_object_deallocate(object); /* extra ref */
3011 target_map = original_map;
0a7de745
A
3012 goto redo_lookup;
3013 }
fe8ab488 3014#if 00
91447636 3015 vm_object_lock(object);
fe8ab488 3016#endif
1c79356b 3017
0a7de745 3018 /*
55e303ae
A
3019 * JMM - We need to avoid coming here when the object
3020 * is wired by anybody, not just the current map. Why
3021 * couldn't we use the standard vm_object_copy_quickly()
3022 * approach here?
3023 */
0a7de745
A
3024
3025 /* create a shadow object */
3e170ce0
A
3026 VME_OBJECT_SHADOW(map_entry, total_size);
3027 shadow_object = VME_OBJECT(map_entry);
fe8ab488 3028#if 00
9bccf70c 3029 vm_object_unlock(object);
fe8ab488 3030#endif
91447636 3031
0c530ab8 3032 prot = map_entry->protection & ~VM_PROT_WRITE;
2d21ac55 3033
3e170ce0 3034 if (override_nx(target_map,
0a7de745
A
3035 VME_ALIAS(map_entry))
3036 && prot) {
3037 prot |= VM_PROT_EXECUTE;
3038 }
2d21ac55 3039
9bccf70c 3040 vm_object_pmap_protect(
3e170ce0 3041 object, VME_OFFSET(map_entry),
9bccf70c 3042 total_size,
0a7de745
A
3043 ((map_entry->is_shared
3044 || target_map->mapped_in_other_pmaps)
3045 ? PMAP_NULL :
3046 target_map->pmap),
9bccf70c 3047 map_entry->vme_start,
0c530ab8 3048 prot);
0a7de745
A
3049 total_size -= (map_entry->vme_end
3050 - map_entry->vme_start);
9bccf70c
A
3051 next_entry = map_entry->vme_next;
3052 map_entry->needs_copy = FALSE;
2d21ac55
A
3053
3054 vm_object_lock(shadow_object);
9bccf70c 3055 while (total_size) {
0a7de745
A
3056 assert((next_entry->wired_count == 0) ||
3057 (map_entry->wired_count));
3058
3059 if (VME_OBJECT(next_entry) == object) {
3060 vm_object_reference_locked(shadow_object);
3061 VME_OBJECT_SET(next_entry,
3062 shadow_object);
3063 vm_object_deallocate(object);
3064 VME_OFFSET_SET(
3065 next_entry,
3066 (VME_OFFSET(next_entry->vme_prev) +
3067 (next_entry->vme_prev->vme_end
3068 - next_entry->vme_prev->vme_start)));
3069 next_entry->use_pmap = TRUE;
9bccf70c
A
3070 next_entry->needs_copy = FALSE;
3071 } else {
3072 panic("mach_make_memory_entry_64:"
0a7de745 3073 " map entries out of sync\n");
9bccf70c 3074 }
0a7de745
A
3075 total_size -=
3076 next_entry->vme_end
3077 - next_entry->vme_start;
9bccf70c
A
3078 next_entry = next_entry->vme_next;
3079 }
3080
91447636
A
3081 /*
3082 * Transfer our extra reference to the
3083 * shadow object.
3084 */
3085 vm_object_reference_locked(shadow_object);
3086 vm_object_deallocate(object); /* extra ref */
9bccf70c 3087 object = shadow_object;
91447636 3088
3e170ce0 3089 obj_off = ((local_offset - map_entry->vme_start)
0a7de745 3090 + VME_OFFSET(map_entry));
1c79356b 3091
91447636 3092 vm_map_lock_write_to_read(target_map);
0a7de745
A
3093 }
3094 }
1c79356b
A
3095
3096 /* note: in the future we can (if necessary) allow for */
3097 /* memory object lists, this will better support */
3098 /* fragmentation, but is it necessary? The user should */
3099 /* be encouraged to create address space oriented */
3100 /* shared objects from CLEAN memory regions which have */
3101 /* a known and defined history. i.e. no inheritence */
3102 /* share, make this call before making the region the */
3103 /* target of ipc's, etc. The code above, protecting */
3104 /* against delayed copy, etc. is mostly defensive. */
3105
55e303ae 3106 wimg_mode = object->wimg_bits;
0a7de745 3107 if (!(object->nophyscache)) {
5ba3f43e 3108 vm_prot_to_wimg(access, &wimg_mode);
0a7de745 3109 }
d7e50217 3110
fe8ab488
A
3111#if VM_OBJECT_TRACKING_OP_TRUESHARE
3112 if (!object->true_share &&
3113 vm_object_tracking_inited) {
3114 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
3115 int num = 0;
3116
3117 num = OSBacktrace(bt,
0a7de745 3118 VM_OBJECT_TRACKING_BTDEPTH);
fe8ab488 3119 btlog_add_entry(vm_object_tracking_btlog,
0a7de745
A
3120 object,
3121 VM_OBJECT_TRACKING_OP_TRUESHARE,
3122 bt,
3123 num);
fe8ab488
A
3124 }
3125#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3126
39037602 3127 vm_object_lock_assert_exclusive(object);
de355530 3128 object->true_share = TRUE;
0a7de745 3129 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
55e303ae 3130 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
0a7de745 3131 }
55e303ae 3132
91447636
A
3133 /*
3134 * The memory entry now points to this VM object and we
3135 * need to hold a reference on the VM object. Use the extra
3136 * reference we took earlier to keep the object alive when we
3137 * had to unlock it.
3138 */
3139
55e303ae 3140 vm_map_unlock_read(target_map);
0a7de745 3141 if (real_map != target_map) {
91447636 3142 vm_map_unlock_read(real_map);
0a7de745 3143 }
55e303ae 3144
0a7de745 3145 if (object->wimg_bits != wimg_mode) {
6d2010ae 3146 vm_object_change_wimg_mode(object, wimg_mode);
0a7de745 3147 }
1c79356b
A
3148
3149 /* the size of mapped entry that overlaps with our region */
3150 /* which is targeted for share. */
3151 /* (entry_end - entry_start) - */
3152 /* offset of our beg addr within entry */
3153 /* it corresponds to this: */
3154
0a7de745 3155 if (map_size > mappable_size) {
91447636 3156 map_size = mappable_size;
0a7de745 3157 }
91447636
A
3158
3159 if (permission & MAP_MEM_NAMED_REUSE) {
3160 /*
3161 * Compare what we got with the "parent_entry".
3162 * If they match, re-use the "parent_entry" instead
3163 * of creating a new one.
3164 */
3165 if (parent_entry != NULL &&
3166 parent_entry->backing.object == object &&
3167 parent_entry->internal == object->internal &&
3168 parent_entry->is_sub_map == FALSE &&
91447636
A
3169 parent_entry->offset == obj_off &&
3170 parent_entry->protection == protections &&
39236c6e 3171 parent_entry->size == map_size &&
3e170ce0 3172 ((!(use_data_addr || use_4K_compat) &&
0a7de745
A
3173 (parent_entry->data_offset == 0)) ||
3174 ((use_data_addr || use_4K_compat) &&
3175 (parent_entry->data_offset == offset_in_page)))) {
91447636
A
3176 /*
3177 * We have a match: re-use "parent_entry".
3178 */
3179 /* release our extra reference on object */
3180 vm_object_unlock(object);
3181 vm_object_deallocate(object);
3182 /* parent_entry->ref_count++; XXX ? */
3183 /* Get an extra send-right on handle */
3184 ipc_port_copy_send(parent_handle);
fe8ab488 3185
3e170ce0 3186 *size = CAST_DOWN(vm_size_t,
0a7de745
A
3187 (parent_entry->size -
3188 parent_entry->data_offset));
91447636
A
3189 *object_handle = parent_handle;
3190 return KERN_SUCCESS;
3191 } else {
3192 /*
3193 * No match: we need to create a new entry.
3194 * fall through...
3195 */
3196 }
3197 }
3198
3199 vm_object_unlock(object);
3200 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3201 != KERN_SUCCESS) {
3202 /* release our unused reference on the object */
3203 vm_object_deallocate(object);
3204 return KERN_FAILURE;
3205 }
1c79356b 3206
91447636
A
3207 user_entry->backing.object = object;
3208 user_entry->internal = object->internal;
3209 user_entry->is_sub_map = FALSE;
91447636 3210 user_entry->offset = obj_off;
39236c6e 3211 user_entry->data_offset = offset_in_page;
6d2010ae
A
3212 user_entry->protection = protections;
3213 SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
91447636 3214 user_entry->size = map_size;
d9a64523
A
3215#if VM_NAMED_ENTRY_LIST
3216 user_entry->named_entry_alias = alias;
3217#endif /* VM_NAMED_ENTRY_LIST */
1c79356b
A
3218
3219 /* user_object pager and internal fields are not used */
3220 /* when the object field is filled in. */
3221
3e170ce0 3222 *size = CAST_DOWN(vm_size_t, (user_entry->size -
0a7de745 3223 user_entry->data_offset));
1c79356b 3224 *object_handle = user_handle;
1c79356b 3225 return KERN_SUCCESS;
91447636 3226 } else {
1c79356b 3227 /* The new object will be base on an existing named object */
91447636 3228 if (parent_entry == NULL) {
1c79356b
A
3229 kr = KERN_INVALID_ARGUMENT;
3230 goto make_mem_done;
3231 }
39236c6e 3232
3e170ce0 3233 if (use_data_addr || use_4K_compat) {
39236c6e
A
3234 /*
3235 * submaps and pagers should only be accessible from within
3236 * the kernel, which shouldn't use the data address flag, so can fail here.
3237 */
5ba3f43e
A
3238 if (parent_entry->is_sub_map) {
3239 panic("Shouldn't be using data address with a parent entry that is a submap.");
39236c6e
A
3240 }
3241 /*
3242 * Account for offset to data in parent entry and
3243 * compute our own offset to data.
3244 */
0a7de745 3245 if ((offset + *size + parent_entry->data_offset) > parent_entry->size) {
39236c6e
A
3246 kr = KERN_INVALID_ARGUMENT;
3247 goto make_mem_done;
3248 }
3249
3e170ce0
A
3250 map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
3251 offset_in_page = (offset + parent_entry->data_offset) - map_start;
0a7de745 3252 if (use_4K_compat) {
3e170ce0 3253 offset_in_page &= ~((signed)(0xFFF));
0a7de745 3254 }
3e170ce0
A
3255 map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK);
3256 map_size = map_end - map_start;
39236c6e 3257 } else {
3e170ce0
A
3258 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
3259 map_size = map_end - map_start;
39236c6e
A
3260 offset_in_page = 0;
3261
0a7de745 3262 if ((offset + map_size) > parent_entry->size) {
39236c6e
A
3263 kr = KERN_INVALID_ARGUMENT;
3264 goto make_mem_done;
3265 }
1c79356b
A
3266 }
3267
6d2010ae
A
3268 if (mask_protections) {
3269 /*
3270 * The caller asked us to use the "protections" as
3271 * a mask, so restrict "protections" to what this
3272 * mapping actually allows.
3273 */
3274 protections &= parent_entry->protection;
3275 }
0a7de745 3276 if ((protections & parent_entry->protection) != protections) {
91447636
A
3277 kr = KERN_PROTECTION_FAILURE;
3278 goto make_mem_done;
3279 }
3280
3281 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3282 != KERN_SUCCESS) {
3283 kr = KERN_FAILURE;
3284 goto make_mem_done;
55e303ae 3285 }
91447636
A
3286
3287 user_entry->size = map_size;
3e170ce0 3288 user_entry->offset = parent_entry->offset + map_start;
0a7de745 3289 user_entry->data_offset = offset_in_page;
91447636 3290 user_entry->is_sub_map = parent_entry->is_sub_map;
39236c6e 3291 user_entry->is_copy = parent_entry->is_copy;
91447636
A
3292 user_entry->internal = parent_entry->internal;
3293 user_entry->protection = protections;
3294
0a7de745
A
3295 if (access != MAP_MEM_NOOP) {
3296 SET_MAP_MEM(access, user_entry->protection);
1c79356b 3297 }
91447636 3298
0a7de745
A
3299 if (parent_entry->is_sub_map) {
3300 user_entry->backing.map = parent_entry->backing.map;
3301 vm_map_lock(user_entry->backing.map);
3302 user_entry->backing.map->map_refcnt++;
3303 vm_map_unlock(user_entry->backing.map);
91447636 3304 } else {
0a7de745
A
3305 object = parent_entry->backing.object;
3306 assert(object != VM_OBJECT_NULL);
3307 user_entry->backing.object = object;
3308 /* we now point to this object, hold on */
3309 vm_object_lock(object);
3310 vm_object_reference_locked(object);
fe8ab488 3311#if VM_OBJECT_TRACKING_OP_TRUESHARE
0a7de745
A
3312 if (!object->true_share &&
3313 vm_object_tracking_inited) {
3314 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
3315 int num = 0;
3316
3317 num = OSBacktrace(bt,
3318 VM_OBJECT_TRACKING_BTDEPTH);
3319 btlog_add_entry(vm_object_tracking_btlog,
3320 object,
3321 VM_OBJECT_TRACKING_OP_TRUESHARE,
3322 bt,
3323 num);
3324 }
fe8ab488
A
3325#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3326
0a7de745
A
3327 object->true_share = TRUE;
3328 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3329 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3330 }
3331 vm_object_unlock(object);
1c79356b 3332 }
3e170ce0 3333 *size = CAST_DOWN(vm_size_t, (user_entry->size -
0a7de745 3334 user_entry->data_offset));
1c79356b
A
3335 *object_handle = user_handle;
3336 return KERN_SUCCESS;
3337 }
3338
1c79356b 3339make_mem_done:
91447636 3340 if (user_handle != IP_NULL) {
0b4c1975
A
3341 /*
3342 * Releasing "user_handle" causes the kernel object
3343 * associated with it ("user_entry" here) to also be
3344 * released and freed.
3345 */
3346 mach_memory_entry_port_release(user_handle);
91447636
A
3347 }
3348 return kr;
3349}
3350
3351kern_return_t
3352_mach_make_memory_entry(
0a7de745
A
3353 vm_map_t target_map,
3354 memory_object_size_t *size,
3355 memory_object_offset_t offset,
3356 vm_prot_t permission,
3357 ipc_port_t *object_handle,
3358 ipc_port_t parent_entry)
91447636 3359{
0a7de745
A
3360 memory_object_size_t mo_size;
3361 kern_return_t kr;
3362
2d21ac55 3363 mo_size = (memory_object_size_t)*size;
0a7de745
A
3364 kr = mach_make_memory_entry_64(target_map, &mo_size,
3365 (memory_object_offset_t)offset, permission, object_handle,
3366 parent_entry);
91447636 3367 *size = mo_size;
1c79356b
A
3368 return kr;
3369}
3370
3371kern_return_t
3372mach_make_memory_entry(
0a7de745
A
3373 vm_map_t target_map,
3374 vm_size_t *size,
3375 vm_offset_t offset,
3376 vm_prot_t permission,
3377 ipc_port_t *object_handle,
3378 ipc_port_t parent_entry)
3379{
3380 memory_object_size_t mo_size;
3381 kern_return_t kr;
3382
2d21ac55 3383 mo_size = (memory_object_size_t)*size;
0a7de745
A
3384 kr = mach_make_memory_entry_64(target_map, &mo_size,
3385 (memory_object_offset_t)offset, permission, object_handle,
3386 parent_entry);
91447636 3387 *size = CAST_DOWN(vm_size_t, mo_size);
1c79356b
A
3388 return kr;
3389}
3390
3391/*
91447636
A
3392 * task_wire
3393 *
3394 * Set or clear the map's wiring_required flag. This flag, if set,
3395 * will cause all future virtual memory allocation to allocate
3396 * user wired memory. Unwiring pages wired down as a result of
3397 * this routine is done with the vm_wire interface.
1c79356b 3398 */
1c79356b 3399kern_return_t
91447636 3400task_wire(
0a7de745
A
3401 vm_map_t map,
3402 boolean_t must_wire)
91447636 3403{
0a7de745
A
3404 if (map == VM_MAP_NULL) {
3405 return KERN_INVALID_ARGUMENT;
3406 }
91447636 3407
d9a64523
A
3408 vm_map_lock(map);
3409 map->wiring_required = (must_wire == TRUE);
3410 vm_map_unlock(map);
91447636 3411
0a7de745 3412 return KERN_SUCCESS;
91447636
A
3413}
3414
a39ff7e2
A
3415kern_return_t
3416vm_map_exec_lockdown(
0a7de745 3417 vm_map_t map)
a39ff7e2 3418{
0a7de745
A
3419 if (map == VM_MAP_NULL) {
3420 return KERN_INVALID_ARGUMENT;
3421 }
a39ff7e2
A
3422
3423 vm_map_lock(map);
3424 map->map_disallow_new_exec = TRUE;
3425 vm_map_unlock(map);
3426
0a7de745 3427 return KERN_SUCCESS;
a39ff7e2
A
3428}
3429
d9a64523 3430#if VM_NAMED_ENTRY_LIST
0a7de745
A
3431queue_head_t vm_named_entry_list;
3432int vm_named_entry_count = 0;
3433lck_mtx_t vm_named_entry_list_lock_data;
3434lck_mtx_ext_t vm_named_entry_list_lock_data_ext;
d9a64523
A
3435#endif /* VM_NAMED_ENTRY_LIST */
3436
3437void vm_named_entry_init(void);
3438void
3439vm_named_entry_init(void)
3440{
3441#if VM_NAMED_ENTRY_LIST
3442 queue_init(&vm_named_entry_list);
3443 vm_named_entry_count = 0;
3444 lck_mtx_init_ext(&vm_named_entry_list_lock_data,
0a7de745
A
3445 &vm_named_entry_list_lock_data_ext,
3446 &vm_object_lck_grp,
3447 &vm_object_lck_attr);
d9a64523
A
3448#endif /* VM_NAMED_ENTRY_LIST */
3449}
3450
91447636
A
3451__private_extern__ kern_return_t
3452mach_memory_entry_allocate(
0a7de745
A
3453 vm_named_entry_t *user_entry_p,
3454 ipc_port_t *user_handle_p)
1c79356b 3455{
0a7de745
A
3456 vm_named_entry_t user_entry;
3457 ipc_port_t user_handle;
3458 ipc_port_t previous;
1c79356b 3459
91447636 3460 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
0a7de745 3461 if (user_entry == NULL) {
1c79356b 3462 return KERN_FAILURE;
0a7de745
A
3463 }
3464 bzero(user_entry, sizeof(*user_entry));
1c79356b 3465
91447636 3466 named_entry_lock_init(user_entry);
1c79356b 3467
91447636
A
3468 user_handle = ipc_port_alloc_kernel();
3469 if (user_handle == IP_NULL) {
3470 kfree(user_entry, sizeof *user_entry);
3471 return KERN_FAILURE;
3472 }
1c79356b
A
3473 ip_lock(user_handle);
3474
3475 /* make a sonce right */
3476 user_handle->ip_sorights++;
3477 ip_reference(user_handle);
3478
1c79356b 3479 /* make a send right */
0a7de745
A
3480 user_handle->ip_mscount++;
3481 user_handle->ip_srights++;
3482 ip_reference(user_handle);
1c79356b
A
3483
3484 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
3485 /* nsrequest unlocks user_handle */
3486
5ba3f43e 3487 user_entry->backing.object = NULL;
91447636 3488 user_entry->is_sub_map = FALSE;
39236c6e 3489 user_entry->is_copy = FALSE;
91447636 3490 user_entry->internal = FALSE;
2d21ac55
A
3491 user_entry->size = 0;
3492 user_entry->offset = 0;
39236c6e 3493 user_entry->data_offset = 0;
2d21ac55 3494 user_entry->protection = VM_PROT_NONE;
91447636 3495 user_entry->ref_count = 1;
1c79356b 3496
91447636 3497 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
0a7de745 3498 IKOT_NAMED_ENTRY);
1c79356b 3499
91447636
A
3500 *user_entry_p = user_entry;
3501 *user_handle_p = user_handle;
1c79356b 3502
d9a64523
A
3503#if VM_NAMED_ENTRY_LIST
3504 /* keep a loose (no reference) pointer to the Mach port, for debugging only */
3505 user_entry->named_entry_port = user_handle;
3506 /* backtrace at allocation time, for debugging only */
3507 OSBacktrace(&user_entry->named_entry_bt[0],
0a7de745 3508 NAMED_ENTRY_BT_DEPTH);
d9a64523
A
3509
3510 /* add this new named entry to the global list */
3511 lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
3512 queue_enter(&vm_named_entry_list, user_entry,
0a7de745 3513 vm_named_entry_t, named_entry_list);
d9a64523
A
3514 vm_named_entry_count++;
3515 lck_mtx_unlock(&vm_named_entry_list_lock_data);
3516#endif /* VM_NAMED_ENTRY_LIST */
3517
91447636
A
3518 return KERN_SUCCESS;
3519}
1c79356b 3520
91447636
A
3521/*
3522 * mach_memory_object_memory_entry_64
3523 *
3524 * Create a named entry backed by the provided pager.
3525 *
91447636
A
3526 */
3527kern_return_t
3528mach_memory_object_memory_entry_64(
0a7de745
A
3529 host_t host,
3530 boolean_t internal,
3531 vm_object_offset_t size,
3532 vm_prot_t permission,
3533 memory_object_t pager,
3534 ipc_port_t *entry_handle)
91447636 3535{
0a7de745
A
3536 unsigned int access;
3537 vm_named_entry_t user_entry;
3538 ipc_port_t user_handle;
3539 vm_object_t object;
91447636 3540
0a7de745
A
3541 if (host == HOST_NULL) {
3542 return KERN_INVALID_HOST;
3543 }
91447636 3544
5ba3f43e
A
3545 if (pager == MEMORY_OBJECT_NULL && internal) {
3546 object = vm_object_allocate(size);
5c9f4661
A
3547 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3548 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3549 }
5ba3f43e
A
3550 } else {
3551 object = memory_object_to_vm_object(pager);
3552 if (object != VM_OBJECT_NULL) {
3553 vm_object_reference(object);
3554 }
3555 }
3556 if (object == VM_OBJECT_NULL) {
3557 return KERN_INVALID_ARGUMENT;
3558 }
3559
91447636
A
3560 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3561 != KERN_SUCCESS) {
5ba3f43e 3562 vm_object_deallocate(object);
91447636
A
3563 return KERN_FAILURE;
3564 }
3565
91447636
A
3566 user_entry->size = size;
3567 user_entry->offset = 0;
3568 user_entry->protection = permission & VM_PROT_ALL;
3569 access = GET_MAP_MEM(permission);
3570 SET_MAP_MEM(access, user_entry->protection);
91447636 3571 user_entry->is_sub_map = FALSE;
91447636
A
3572 assert(user_entry->ref_count == 1);
3573
5ba3f43e
A
3574 user_entry->backing.object = object;
3575 user_entry->internal = object->internal;
3576 assert(object->internal == internal);
3577
91447636 3578 *entry_handle = user_handle;
1c79356b 3579 return KERN_SUCCESS;
5ba3f43e 3580}
91447636
A
3581
3582kern_return_t
3583mach_memory_object_memory_entry(
0a7de745
A
3584 host_t host,
3585 boolean_t internal,
3586 vm_size_t size,
3587 vm_prot_t permission,
3588 memory_object_t pager,
3589 ipc_port_t *entry_handle)
91447636 3590{
0a7de745
A
3591 return mach_memory_object_memory_entry_64( host, internal,
3592 (vm_object_offset_t)size, permission, pager, entry_handle);
91447636
A
3593}
3594
3595
3596kern_return_t
3597mach_memory_entry_purgable_control(
0a7de745
A
3598 ipc_port_t entry_port,
3599 vm_purgable_t control,
3600 int *state)
5ba3f43e
A
3601{
3602 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
3603 /* not allowed from user-space */
3604 return KERN_INVALID_ARGUMENT;
3605 }
3606
3607 return memory_entry_purgeable_control_internal(entry_port, control, state);
3608}
3609
3610kern_return_t
3611memory_entry_purgeable_control_internal(
0a7de745
A
3612 ipc_port_t entry_port,
3613 vm_purgable_t control,
3614 int *state)
91447636 3615{
0a7de745
A
3616 kern_return_t kr;
3617 vm_named_entry_t mem_entry;
3618 vm_object_t object;
1c79356b 3619
d9a64523 3620 if (!IP_VALID(entry_port) ||
91447636
A
3621 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3622 return KERN_INVALID_ARGUMENT;
3623 }
2d21ac55 3624 if (control != VM_PURGABLE_SET_STATE &&
5ba3f43e 3625 control != VM_PURGABLE_GET_STATE &&
0a7de745
A
3626 control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
3627 return KERN_INVALID_ARGUMENT;
3628 }
2d21ac55 3629
5ba3f43e 3630 if ((control == VM_PURGABLE_SET_STATE ||
0a7de745 3631 control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
b0d623f7 3632 (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
0a7de745
A
3633 ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) {
3634 return KERN_INVALID_ARGUMENT;
3635 }
1c79356b 3636
91447636 3637 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
1c79356b 3638
91447636 3639 named_entry_lock(mem_entry);
1c79356b 3640
39236c6e 3641 if (mem_entry->is_sub_map ||
39236c6e 3642 mem_entry->is_copy) {
91447636 3643 named_entry_unlock(mem_entry);
1c79356b
A
3644 return KERN_INVALID_ARGUMENT;
3645 }
91447636
A
3646
3647 object = mem_entry->backing.object;
3648 if (object == VM_OBJECT_NULL) {
3649 named_entry_unlock(mem_entry);
1c79356b
A
3650 return KERN_INVALID_ARGUMENT;
3651 }
91447636
A
3652
3653 vm_object_lock(object);
3654
3655 /* check that named entry covers entire object ? */
6d2010ae 3656 if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
91447636
A
3657 vm_object_unlock(object);
3658 named_entry_unlock(mem_entry);
3659 return KERN_INVALID_ARGUMENT;
1c79356b 3660 }
91447636
A
3661
3662 named_entry_unlock(mem_entry);
3663
3664 kr = vm_object_purgable_control(object, control, state);
3665
3666 vm_object_unlock(object);
3667
3668 return kr;
1c79356b
A
3669}
3670
d9a64523
A
3671kern_return_t
3672mach_memory_entry_access_tracking(
0a7de745
A
3673 ipc_port_t entry_port,
3674 int *access_tracking,
3675 uint32_t *access_tracking_reads,
3676 uint32_t *access_tracking_writes)
d9a64523
A
3677{
3678 return memory_entry_access_tracking_internal(entry_port,
0a7de745
A
3679 access_tracking,
3680 access_tracking_reads,
3681 access_tracking_writes);
d9a64523
A
3682}
3683
3684kern_return_t
3685memory_entry_access_tracking_internal(
0a7de745
A
3686 ipc_port_t entry_port,
3687 int *access_tracking,
3688 uint32_t *access_tracking_reads,
3689 uint32_t *access_tracking_writes)
d9a64523 3690{
0a7de745
A
3691 vm_named_entry_t mem_entry;
3692 vm_object_t object;
3693 kern_return_t kr;
d9a64523
A
3694
3695 if (!IP_VALID(entry_port) ||
3696 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3697 return KERN_INVALID_ARGUMENT;
3698 }
3699
3700 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3701
3702 named_entry_lock(mem_entry);
3703
3704 if (mem_entry->is_sub_map ||
3705 mem_entry->is_copy) {
3706 named_entry_unlock(mem_entry);
3707 return KERN_INVALID_ARGUMENT;
3708 }
3709
3710 object = mem_entry->backing.object;
3711 if (object == VM_OBJECT_NULL) {
3712 named_entry_unlock(mem_entry);
3713 return KERN_INVALID_ARGUMENT;
3714 }
3715
3716#if VM_OBJECT_ACCESS_TRACKING
3717 vm_object_access_tracking(object,
0a7de745
A
3718 access_tracking,
3719 access_tracking_reads,
3720 access_tracking_writes);
d9a64523
A
3721 kr = KERN_SUCCESS;
3722#else /* VM_OBJECT_ACCESS_TRACKING */
3723 (void) access_tracking;
3724 (void) access_tracking_reads;
3725 (void) access_tracking_writes;
3726 kr = KERN_NOT_SUPPORTED;
3727#endif /* VM_OBJECT_ACCESS_TRACKING */
3728
3729 named_entry_unlock(mem_entry);
3730
3731 return kr;
3732}
3733
39236c6e
A
3734kern_return_t
3735mach_memory_entry_get_page_counts(
0a7de745
A
3736 ipc_port_t entry_port,
3737 unsigned int *resident_page_count,
3738 unsigned int *dirty_page_count)
39236c6e 3739{
0a7de745
A
3740 kern_return_t kr;
3741 vm_named_entry_t mem_entry;
3742 vm_object_t object;
3743 vm_object_offset_t offset;
3744 vm_object_size_t size;
39236c6e 3745
d9a64523 3746 if (!IP_VALID(entry_port) ||
39236c6e
A
3747 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3748 return KERN_INVALID_ARGUMENT;
3749 }
3750
3751 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3752
3753 named_entry_lock(mem_entry);
3754
3755 if (mem_entry->is_sub_map ||
39236c6e
A
3756 mem_entry->is_copy) {
3757 named_entry_unlock(mem_entry);
3758 return KERN_INVALID_ARGUMENT;
3759 }
3760
3761 object = mem_entry->backing.object;
3762 if (object == VM_OBJECT_NULL) {
3763 named_entry_unlock(mem_entry);
3764 return KERN_INVALID_ARGUMENT;
3765 }
3766
3767 vm_object_lock(object);
3768
3769 offset = mem_entry->offset;
3770 size = mem_entry->size;
3771
3772 named_entry_unlock(mem_entry);
3773
3774 kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count);
3775
3776 vm_object_unlock(object);
3777
3778 return kr;
3779}
3780
91447636
A
3781/*
3782 * mach_memory_entry_port_release:
3783 *
3784 * Release a send right on a named entry port. This is the correct
3785 * way to destroy a named entry. When the last right on the port is
3786 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3787 */
3788void
3789mach_memory_entry_port_release(
0a7de745 3790 ipc_port_t port)
91447636
A
3791{
3792 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
3793 ipc_port_release_send(port);
3794}
1c79356b 3795
91447636
A
3796/*
3797 * mach_destroy_memory_entry:
3798 *
3799 * Drops a reference on a memory entry and destroys the memory entry if
3800 * there are no more references on it.
3801 * NOTE: This routine should not be called to destroy a memory entry from the
3802 * kernel, as it will not release the Mach port associated with the memory
3803 * entry. The proper way to destroy a memory entry in the kernel is to
3804 * call mach_memort_entry_port_release() to release the kernel's send-right on
3805 * the memory entry's port. When the last send right is released, the memory
3806 * entry will be destroyed via ipc_kobject_destroy().
3807 */
1c79356b
A
3808void
3809mach_destroy_memory_entry(
0a7de745 3810 ipc_port_t port)
1c79356b 3811{
0a7de745 3812 vm_named_entry_t named_entry;
1c79356b
A
3813#if MACH_ASSERT
3814 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
3815#endif /* MACH_ASSERT */
3816 named_entry = (vm_named_entry_t)port->ip_kobject;
316670eb
A
3817
3818 named_entry_lock(named_entry);
91447636 3819 named_entry->ref_count -= 1;
316670eb 3820
0a7de745 3821 if (named_entry->ref_count == 0) {
91447636 3822 if (named_entry->is_sub_map) {
1c79356b 3823 vm_map_deallocate(named_entry->backing.map);
39236c6e
A
3824 } else if (named_entry->is_copy) {
3825 vm_map_copy_discard(named_entry->backing.copy);
3826 } else {
3827 /* release the VM object we've been pointing to */
91447636 3828 vm_object_deallocate(named_entry->backing.object);
39236c6e 3829 }
91447636 3830
316670eb
A
3831 named_entry_unlock(named_entry);
3832 named_entry_lock_destroy(named_entry);
91447636 3833
d9a64523
A
3834#if VM_NAMED_ENTRY_LIST
3835 lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
3836 queue_remove(&vm_named_entry_list, named_entry,
0a7de745 3837 vm_named_entry_t, named_entry_list);
d9a64523
A
3838 assert(vm_named_entry_count > 0);
3839 vm_named_entry_count--;
3840 lck_mtx_unlock(&vm_named_entry_list_lock_data);
3841#endif /* VM_NAMED_ENTRY_LIST */
3842
0a7de745
A
3843 kfree(port->ip_kobject,
3844 sizeof(struct vm_named_entry));
3845 } else {
316670eb 3846 named_entry_unlock(named_entry);
0a7de745 3847 }
1c79356b
A
3848}
3849
0c530ab8
A
3850/* Allow manipulation of individual page state. This is actually part of */
3851/* the UPL regimen but takes place on the memory entry rather than on a UPL */
3852
3853kern_return_t
3854mach_memory_entry_page_op(
0a7de745
A
3855 ipc_port_t entry_port,
3856 vm_object_offset_t offset,
3857 int ops,
3858 ppnum_t *phys_entry,
3859 int *flags)
0c530ab8 3860{
0a7de745
A
3861 vm_named_entry_t mem_entry;
3862 vm_object_t object;
3863 kern_return_t kr;
0c530ab8 3864
d9a64523 3865 if (!IP_VALID(entry_port) ||
0c530ab8
A
3866 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3867 return KERN_INVALID_ARGUMENT;
3868 }
3869
3870 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3871
3872 named_entry_lock(mem_entry);
3873
39236c6e 3874 if (mem_entry->is_sub_map ||
39236c6e 3875 mem_entry->is_copy) {
0c530ab8
A
3876 named_entry_unlock(mem_entry);
3877 return KERN_INVALID_ARGUMENT;
3878 }
3879
3880 object = mem_entry->backing.object;
3881 if (object == VM_OBJECT_NULL) {
3882 named_entry_unlock(mem_entry);
3883 return KERN_INVALID_ARGUMENT;
3884 }
3885
3886 vm_object_reference(object);
3887 named_entry_unlock(mem_entry);
3888
3889 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
3890
0a7de745 3891 vm_object_deallocate(object);
0c530ab8
A
3892
3893 return kr;
3894}
3895
3896/*
0a7de745
A
3897 * mach_memory_entry_range_op offers performance enhancement over
3898 * mach_memory_entry_page_op for page_op functions which do not require page
3899 * level state to be returned from the call. Page_op was created to provide
3900 * a low-cost alternative to page manipulation via UPLs when only a single
3901 * page was involved. The range_op call establishes the ability in the _op
0c530ab8
A
3902 * family of functions to work on multiple pages where the lack of page level
3903 * state handling allows the caller to avoid the overhead of the upl structures.
3904 */
3905
3906kern_return_t
3907mach_memory_entry_range_op(
0a7de745
A
3908 ipc_port_t entry_port,
3909 vm_object_offset_t offset_beg,
3910 vm_object_offset_t offset_end,
0c530ab8
A
3911 int ops,
3912 int *range)
3913{
0a7de745
A
3914 vm_named_entry_t mem_entry;
3915 vm_object_t object;
3916 kern_return_t kr;
0c530ab8 3917
d9a64523 3918 if (!IP_VALID(entry_port) ||
0c530ab8
A
3919 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3920 return KERN_INVALID_ARGUMENT;
3921 }
3922
3923 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3924
3925 named_entry_lock(mem_entry);
3926
39236c6e 3927 if (mem_entry->is_sub_map ||
39236c6e 3928 mem_entry->is_copy) {
0c530ab8
A
3929 named_entry_unlock(mem_entry);
3930 return KERN_INVALID_ARGUMENT;
3931 }
3932
3933 object = mem_entry->backing.object;
3934 if (object == VM_OBJECT_NULL) {
3935 named_entry_unlock(mem_entry);
3936 return KERN_INVALID_ARGUMENT;
3937 }
3938
3939 vm_object_reference(object);
3940 named_entry_unlock(mem_entry);
3941
3942 kr = vm_object_range_op(object,
0a7de745
A
3943 offset_beg,
3944 offset_end,
3945 ops,
3946 (uint32_t *) range);
0c530ab8
A
3947
3948 vm_object_deallocate(object);
3949
3950 return kr;
3951}
1c79356b 3952
91447636 3953/* ******* Temporary Internal calls to UPL for BSD ***** */
1c79356b 3954
91447636
A
3955extern int kernel_upl_map(
3956 vm_map_t map,
3957 upl_t upl,
3958 vm_offset_t *dst_addr);
1c79356b 3959
91447636
A
3960extern int kernel_upl_unmap(
3961 vm_map_t map,
3962 upl_t upl);
150bd074 3963
91447636
A
3964extern int kernel_upl_commit(
3965 upl_t upl,
3966 upl_page_info_t *pl,
0a7de745 3967 mach_msg_type_number_t count);
1c79356b 3968
91447636
A
3969extern int kernel_upl_commit_range(
3970 upl_t upl,
3971 upl_offset_t offset,
0a7de745
A
3972 upl_size_t size,
3973 int flags,
3974 upl_page_info_array_t pl,
3975 mach_msg_type_number_t count);
1c79356b 3976
91447636
A
3977extern int kernel_upl_abort(
3978 upl_t upl,
3979 int abort_type);
1c79356b 3980
91447636
A
3981extern int kernel_upl_abort_range(
3982 upl_t upl,
3983 upl_offset_t offset,
3984 upl_size_t size,
3985 int abort_flags);
1c79356b 3986
1c79356b 3987
1c79356b
A
3988kern_return_t
3989kernel_upl_map(
0a7de745
A
3990 vm_map_t map,
3991 upl_t upl,
3992 vm_offset_t *dst_addr)
1c79356b 3993{
91447636 3994 return vm_upl_map(map, upl, dst_addr);
1c79356b
A
3995}
3996
3997
3998kern_return_t
3999kernel_upl_unmap(
0a7de745
A
4000 vm_map_t map,
4001 upl_t upl)
1c79356b 4002{
91447636 4003 return vm_upl_unmap(map, upl);
1c79356b
A
4004}
4005
4006kern_return_t
4007kernel_upl_commit(
91447636
A
4008 upl_t upl,
4009 upl_page_info_t *pl,
0b4e3aa0 4010 mach_msg_type_number_t count)
1c79356b 4011{
0a7de745 4012 kern_return_t kr;
0b4e3aa0
A
4013
4014 kr = upl_commit(upl, pl, count);
4015 upl_deallocate(upl);
1c79356b
A
4016 return kr;
4017}
4018
0b4e3aa0 4019
1c79356b
A
4020kern_return_t
4021kernel_upl_commit_range(
0a7de745
A
4022 upl_t upl,
4023 upl_offset_t offset,
4024 upl_size_t size,
4025 int flags,
0b4e3aa0
A
4026 upl_page_info_array_t pl,
4027 mach_msg_type_number_t count)
1c79356b 4028{
0a7de745
A
4029 boolean_t finished = FALSE;
4030 kern_return_t kr;
0b4e3aa0 4031
0a7de745 4032 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
0b4e3aa0 4033 flags |= UPL_COMMIT_NOTIFY_EMPTY;
0a7de745 4034 }
0b4e3aa0 4035
593a1d5f
A
4036 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
4037 return KERN_INVALID_ARGUMENT;
4038 }
4039
0b4e3aa0
A
4040 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
4041
0a7de745 4042 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) {
0b4e3aa0 4043 upl_deallocate(upl);
0a7de745 4044 }
0b4e3aa0 4045
1c79356b
A
4046 return kr;
4047}
0a7de745 4048
1c79356b
A
4049kern_return_t
4050kernel_upl_abort_range(
0a7de745
A
4051 upl_t upl,
4052 upl_offset_t offset,
4053 upl_size_t size,
4054 int abort_flags)
1c79356b 4055{
0a7de745
A
4056 kern_return_t kr;
4057 boolean_t finished = FALSE;
1c79356b 4058
0a7de745 4059 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) {
0b4e3aa0 4060 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
0a7de745 4061 }
1c79356b 4062
0b4e3aa0 4063 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 4064
0a7de745 4065 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) {
0b4e3aa0 4066 upl_deallocate(upl);
0a7de745 4067 }
1c79356b 4068
0b4e3aa0 4069 return kr;
1c79356b
A
4070}
4071
1c79356b 4072kern_return_t
0b4e3aa0 4073kernel_upl_abort(
0a7de745
A
4074 upl_t upl,
4075 int abort_type)
1c79356b 4076{
0a7de745 4077 kern_return_t kr;
1c79356b 4078
0b4e3aa0
A
4079 kr = upl_abort(upl, abort_type);
4080 upl_deallocate(upl);
4081 return kr;
1c79356b
A
4082}
4083
91447636
A
4084/*
4085 * Now a kernel-private interface (for BootCache
4086 * use only). Need a cleaner way to create an
4087 * empty vm_map() and return a handle to it.
4088 */
1c79356b
A
4089
4090kern_return_t
91447636 4091vm_region_object_create(
0a7de745
A
4092 __unused vm_map_t target_map,
4093 vm_size_t size,
4094 ipc_port_t *object_handle)
1c79356b 4095{
0a7de745
A
4096 vm_named_entry_t user_entry;
4097 ipc_port_t user_handle;
4098
4099 vm_map_t new_map;
1c79356b 4100
91447636
A
4101 if (mach_memory_entry_allocate(&user_entry, &user_handle)
4102 != KERN_SUCCESS) {
1c79356b 4103 return KERN_FAILURE;
91447636 4104 }
1c79356b 4105
91447636 4106 /* Create a named object based on a submap of specified size */
1c79356b 4107
91447636 4108 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
0a7de745
A
4109 vm_map_round_page(size,
4110 VM_MAP_PAGE_MASK(target_map)),
4111 TRUE);
39236c6e 4112 vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
1c79356b 4113
91447636
A
4114 user_entry->backing.map = new_map;
4115 user_entry->internal = TRUE;
4116 user_entry->is_sub_map = TRUE;
4117 user_entry->offset = 0;
4118 user_entry->protection = VM_PROT_ALL;
4119 user_entry->size = size;
4120 assert(user_entry->ref_count == 1);
1c79356b 4121
91447636 4122 *object_handle = user_handle;
1c79356b 4123 return KERN_SUCCESS;
55e303ae
A
4124}
4125
0a7de745
A
4126ppnum_t vm_map_get_phys_page( /* forward */
4127 vm_map_t map,
4128 vm_offset_t offset);
91447636 4129
55e303ae 4130ppnum_t
1c79356b 4131vm_map_get_phys_page(
0a7de745
A
4132 vm_map_t map,
4133 vm_offset_t addr)
1c79356b 4134{
0a7de745
A
4135 vm_object_offset_t offset;
4136 vm_object_t object;
4137 vm_map_offset_t map_offset;
4138 vm_map_entry_t entry;
4139 ppnum_t phys_page = 0;
91447636 4140
39236c6e 4141 map_offset = vm_map_trunc_page(addr, PAGE_MASK);
1c79356b
A
4142
4143 vm_map_lock(map);
91447636 4144 while (vm_map_lookup_entry(map, map_offset, &entry)) {
3e170ce0 4145 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
1c79356b 4146 vm_map_unlock(map);
91447636 4147 return (ppnum_t) 0;
1c79356b
A
4148 }
4149 if (entry->is_sub_map) {
0a7de745 4150 vm_map_t old_map;
3e170ce0 4151 vm_map_lock(VME_SUBMAP(entry));
1c79356b 4152 old_map = map;
3e170ce0
A
4153 map = VME_SUBMAP(entry);
4154 map_offset = (VME_OFFSET(entry) +
0a7de745 4155 (map_offset - entry->vme_start));
1c79356b
A
4156 vm_map_unlock(old_map);
4157 continue;
4158 }
3e170ce0 4159 if (VME_OBJECT(entry)->phys_contiguous) {
9bccf70c
A
4160 /* These are not standard pageable memory mappings */
4161 /* If they are not present in the object they will */
4162 /* have to be picked up from the pager through the */
4163 /* fault mechanism. */
3e170ce0 4164 if (VME_OBJECT(entry)->vo_shadow_offset == 0) {
9bccf70c
A
4165 /* need to call vm_fault */
4166 vm_map_unlock(map);
0a7de745
A
4167 vm_fault(map, map_offset, VM_PROT_NONE,
4168 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
4169 THREAD_UNINT, NULL, 0);
9bccf70c
A
4170 vm_map_lock(map);
4171 continue;
4172 }
3e170ce0 4173 offset = (VME_OFFSET(entry) +
0a7de745 4174 (map_offset - entry->vme_start));
55e303ae 4175 phys_page = (ppnum_t)
0a7de745
A
4176 ((VME_OBJECT(entry)->vo_shadow_offset
4177 + offset) >> PAGE_SHIFT);
9bccf70c 4178 break;
9bccf70c 4179 }
3e170ce0
A
4180 offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start));
4181 object = VME_OBJECT(entry);
1c79356b
A
4182 vm_object_lock(object);
4183 while (TRUE) {
0a7de745
A
4184 vm_page_t dst_page = vm_page_lookup(object, offset);
4185 if (dst_page == VM_PAGE_NULL) {
4186 if (object->shadow) {
1c79356b
A
4187 vm_object_t old_object;
4188 vm_object_lock(object->shadow);
4189 old_object = object;
6d2010ae 4190 offset = offset + object->vo_shadow_offset;
1c79356b
A
4191 object = object->shadow;
4192 vm_object_unlock(old_object);
4193 } else {
4194 vm_object_unlock(object);
4195 break;
4196 }
4197 } else {
39037602 4198 phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page));
1c79356b
A
4199 vm_object_unlock(object);
4200 break;
4201 }
4202 }
4203 break;
0a7de745 4204 }
1c79356b
A
4205
4206 vm_map_unlock(map);
55e303ae
A
4207 return phys_page;
4208}
4209
3e170ce0 4210#if 0
0a7de745
A
4211kern_return_t kernel_object_iopl_request( /* forward */
4212 vm_named_entry_t named_entry,
4213 memory_object_offset_t offset,
4214 upl_size_t *upl_size,
4215 upl_t *upl_ptr,
4216 upl_page_info_array_t user_page_list,
4217 unsigned int *page_list_count,
4218 int *flags);
91447636 4219
55e303ae
A
4220kern_return_t
4221kernel_object_iopl_request(
0a7de745
A
4222 vm_named_entry_t named_entry,
4223 memory_object_offset_t offset,
4224 upl_size_t *upl_size,
4225 upl_t *upl_ptr,
4226 upl_page_info_array_t user_page_list,
4227 unsigned int *page_list_count,
4228 int *flags)
55e303ae 4229{
0a7de745
A
4230 vm_object_t object;
4231 kern_return_t ret;
55e303ae 4232
0a7de745 4233 int caller_flags;
55e303ae
A
4234
4235 caller_flags = *flags;
4236
91447636
A
4237 if (caller_flags & ~UPL_VALID_FLAGS) {
4238 /*
4239 * For forward compatibility's sake,
4240 * reject any unknown flag.
4241 */
4242 return KERN_INVALID_VALUE;
4243 }
4244
55e303ae 4245 /* a few checks to make sure user is obeying rules */
0a7de745
A
4246 if (*upl_size == 0) {
4247 if (offset >= named_entry->size) {
4248 return KERN_INVALID_RIGHT;
4249 }
b0d623f7 4250 *upl_size = (upl_size_t) (named_entry->size - offset);
0a7de745 4251 if (*upl_size != named_entry->size - offset) {
b0d623f7 4252 return KERN_INVALID_ARGUMENT;
0a7de745 4253 }
55e303ae 4254 }
0a7de745
A
4255 if (caller_flags & UPL_COPYOUT_FROM) {
4256 if ((named_entry->protection & VM_PROT_READ)
4257 != VM_PROT_READ) {
4258 return KERN_INVALID_RIGHT;
55e303ae
A
4259 }
4260 } else {
0a7de745
A
4261 if ((named_entry->protection &
4262 (VM_PROT_READ | VM_PROT_WRITE))
4263 != (VM_PROT_READ | VM_PROT_WRITE)) {
4264 return KERN_INVALID_RIGHT;
55e303ae
A
4265 }
4266 }
0a7de745
A
4267 if (named_entry->size < (offset + *upl_size)) {
4268 return KERN_INVALID_ARGUMENT;
4269 }
55e303ae
A
4270
4271 /* the callers parameter offset is defined to be the */
4272 /* offset from beginning of named entry offset in object */
4273 offset = offset + named_entry->offset;
4274
39236c6e 4275 if (named_entry->is_sub_map ||
0a7de745 4276 named_entry->is_copy) {
39236c6e 4277 return KERN_INVALID_ARGUMENT;
0a7de745
A
4278 }
4279
55e303ae
A
4280 named_entry_lock(named_entry);
4281
5ba3f43e
A
4282 /* This is the case where we are going to operate */
4283 /* on an already known object. If the object is */
4284 /* not ready it is internal. An external */
4285 /* object cannot be mapped until it is ready */
4286 /* we can therefore avoid the ready check */
4287 /* in this case. */
4288 object = named_entry->backing.object;
4289 vm_object_reference(object);
4290 named_entry_unlock(named_entry);
55e303ae
A
4291
4292 if (!object->private) {
0a7de745 4293 if (*upl_size > MAX_UPL_TRANSFER_BYTES) {
fe8ab488 4294 *upl_size = MAX_UPL_TRANSFER_BYTES;
0a7de745 4295 }
55e303ae
A
4296 if (object->phys_contiguous) {
4297 *flags = UPL_PHYS_CONTIG;
4298 } else {
4299 *flags = 0;
4300 }
4301 } else {
4302 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
4303 }
4304
4305 ret = vm_object_iopl_request(object,
0a7de745
A
4306 offset,
4307 *upl_size,
4308 upl_ptr,
4309 user_page_list,
4310 page_list_count,
4311 (upl_control_flags_t)(unsigned int)caller_flags);
55e303ae
A
4312 vm_object_deallocate(object);
4313 return ret;
1c79356b 4314}
3e170ce0 4315#endif
5ba3f43e
A
4316
4317/*
4318 * These symbols are looked up at runtime by vmware, VirtualBox,
4319 * despite not being exported in the symbol sets.
4320 */
4321
4322#if defined(__x86_64__)
4323
4324kern_return_t
4325mach_vm_map(
0a7de745
A
4326 vm_map_t target_map,
4327 mach_vm_offset_t *address,
4328 mach_vm_size_t initial_size,
4329 mach_vm_offset_t mask,
4330 int flags,
4331 ipc_port_t port,
4332 vm_object_offset_t offset,
4333 boolean_t copy,
4334 vm_prot_t cur_protection,
4335 vm_prot_t max_protection,
4336 vm_inherit_t inheritance);
5ba3f43e
A
4337
4338kern_return_t
4339mach_vm_remap(
0a7de745
A
4340 vm_map_t target_map,
4341 mach_vm_offset_t *address,
4342 mach_vm_size_t size,
4343 mach_vm_offset_t mask,
4344 int flags,
4345 vm_map_t src_map,
4346 mach_vm_offset_t memory_address,
4347 boolean_t copy,
4348 vm_prot_t *cur_protection,
4349 vm_prot_t *max_protection,
4350 vm_inherit_t inheritance);
5ba3f43e
A
4351
4352kern_return_t
4353mach_vm_map(
0a7de745
A
4354 vm_map_t target_map,
4355 mach_vm_offset_t *address,
4356 mach_vm_size_t initial_size,
4357 mach_vm_offset_t mask,
4358 int flags,
4359 ipc_port_t port,
4360 vm_object_offset_t offset,
4361 boolean_t copy,
4362 vm_prot_t cur_protection,
4363 vm_prot_t max_protection,
4364 vm_inherit_t inheritance)
5ba3f43e 4365{
0a7de745
A
4366 return mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
4367 offset, copy, cur_protection, max_protection, inheritance);
5ba3f43e
A
4368}
4369
4370kern_return_t
4371mach_vm_remap(
0a7de745
A
4372 vm_map_t target_map,
4373 mach_vm_offset_t *address,
4374 mach_vm_size_t size,
4375 mach_vm_offset_t mask,
4376 int flags,
4377 vm_map_t src_map,
4378 mach_vm_offset_t memory_address,
4379 boolean_t copy,
4380 vm_prot_t *cur_protection,
4381 vm_prot_t *max_protection,
4382 vm_inherit_t inheritance)
5ba3f43e 4383{
0a7de745
A
4384 return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
4385 copy, cur_protection, max_protection, inheritance);
5ba3f43e
A
4386}
4387
4388kern_return_t
4389vm_map(
0a7de745
A
4390 vm_map_t target_map,
4391 vm_offset_t *address,
4392 vm_size_t size,
4393 vm_offset_t mask,
4394 int flags,
4395 ipc_port_t port,
4396 vm_offset_t offset,
4397 boolean_t copy,
4398 vm_prot_t cur_protection,
4399 vm_prot_t max_protection,
4400 vm_inherit_t inheritance);
5ba3f43e
A
4401
4402kern_return_t
4403vm_map(
0a7de745
A
4404 vm_map_t target_map,
4405 vm_offset_t *address,
4406 vm_size_t size,
4407 vm_offset_t mask,
4408 int flags,
4409 ipc_port_t port,
4410 vm_offset_t offset,
4411 boolean_t copy,
4412 vm_prot_t cur_protection,
4413 vm_prot_t max_protection,
4414 vm_inherit_t inheritance)
5ba3f43e
A
4415{
4416 vm_tag_t tag;
4417
4418 VM_GET_FLAGS_ALIAS(flags, tag);
d9a64523 4419 return vm_map_kernel(target_map, address, size, mask,
0a7de745
A
4420 flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
4421 port, offset, copy,
4422 cur_protection, max_protection, inheritance);
5ba3f43e
A
4423}
4424
4425#endif /* __x86_64__ */