]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * User-exported virtual memory functions.
63 */
1c79356b 64
b0d623f7
A
65/*
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73 * for new code.
74 *
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
83 *
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
86 */
87
91447636
A
88#include <debug.h>
89
1c79356b
A
90#include <vm_cpm.h>
91#include <mach/boolean.h>
92#include <mach/kern_return.h>
93#include <mach/mach_types.h> /* to get vm_address_t */
94#include <mach/memory_object.h>
95#include <mach/std_types.h> /* to get pointer_t */
91447636 96#include <mach/upl.h>
1c79356b
A
97#include <mach/vm_attributes.h>
98#include <mach/vm_param.h>
99#include <mach/vm_statistics.h>
1c79356b 100#include <mach/mach_syscalls.h>
39037602 101#include <mach/sdt.h>
9bccf70c 102
91447636
A
103#include <mach/host_priv_server.h>
104#include <mach/mach_vm_server.h>
91447636 105#include <mach/vm_map_server.h>
1c79356b
A
106
107#include <kern/host.h>
91447636 108#include <kern/kalloc.h>
1c79356b
A
109#include <kern/task.h>
110#include <kern/misc_protos.h>
91447636 111#include <vm/vm_fault.h>
1c79356b
A
112#include <vm/vm_map.h>
113#include <vm/vm_object.h>
114#include <vm/vm_page.h>
115#include <vm/memory_object.h>
116#include <vm/vm_pageout.h>
91447636 117#include <vm/vm_protos.h>
fe8ab488 118#include <vm/vm_purgeable_internal.h>
d190cdc3 119#include <vm/vm_init.h>
1c79356b 120
5ba3f43e
A
121#include <san/kasan.h>
122
1c79356b
A
123vm_size_t upl_offset_to_pagelist = 0;
124
125#if VM_CPM
126#include <vm/cpm.h>
127#endif /* VM_CPM */
128
1c79356b 129/*
91447636 130 * mach_vm_allocate allocates "zero fill" memory in the specfied
1c79356b
A
131 * map.
132 */
133kern_return_t
5ba3f43e 134mach_vm_allocate_external(
91447636
A
135 vm_map_t map,
136 mach_vm_offset_t *addr,
137 mach_vm_size_t size,
1c79356b 138 int flags)
5ba3f43e
A
139{
140 vm_tag_t tag;
141
142 VM_GET_FLAGS_ALIAS(flags, tag);
143 return (mach_vm_allocate_kernel(map, addr, size, flags, tag));
144}
145
146kern_return_t
147mach_vm_allocate_kernel(
148 vm_map_t map,
149 mach_vm_offset_t *addr,
150 mach_vm_size_t size,
151 int flags,
152 vm_tag_t tag)
1c79356b 153{
91447636
A
154 vm_map_offset_t map_addr;
155 vm_map_size_t map_size;
1c79356b 156 kern_return_t result;
2d21ac55
A
157 boolean_t anywhere;
158
159 /* filter out any kernel-only flags */
160 if (flags & ~VM_FLAGS_USER_ALLOCATE)
161 return KERN_INVALID_ARGUMENT;
1c79356b
A
162
163 if (map == VM_MAP_NULL)
164 return(KERN_INVALID_ARGUMENT);
165 if (size == 0) {
166 *addr = 0;
167 return(KERN_SUCCESS);
168 }
169
2d21ac55 170 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
91447636
A
171 if (anywhere) {
172 /*
173 * No specific address requested, so start candidate address
174 * search at the minimum address in the map. However, if that
175 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
176 * allocations of PAGEZERO to explicit requests since its
177 * normal use is to catch dereferences of NULL and many
178 * applications also treat pointers with a value of 0 as
179 * special and suddenly having address 0 contain useable
180 * memory would tend to confuse those applications.
181 */
182 map_addr = vm_map_min(map);
183 if (map_addr == 0)
39236c6e 184 map_addr += VM_MAP_PAGE_SIZE(map);
91447636 185 } else
39236c6e
A
186 map_addr = vm_map_trunc_page(*addr,
187 VM_MAP_PAGE_MASK(map));
188 map_size = vm_map_round_page(size,
189 VM_MAP_PAGE_MASK(map));
91447636
A
190 if (map_size == 0) {
191 return(KERN_INVALID_ARGUMENT);
192 }
193
194 result = vm_map_enter(
195 map,
196 &map_addr,
197 map_size,
198 (vm_map_offset_t)0,
199 flags,
5ba3f43e
A
200 VM_MAP_KERNEL_FLAGS_NONE,
201 tag,
91447636
A
202 VM_OBJECT_NULL,
203 (vm_object_offset_t)0,
204 FALSE,
205 VM_PROT_DEFAULT,
206 VM_PROT_ALL,
207 VM_INHERIT_DEFAULT);
208
209 *addr = map_addr;
210 return(result);
211}
212
213/*
214 * vm_allocate
215 * Legacy routine that allocates "zero fill" memory in the specfied
216 * map (which is limited to the same size as the kernel).
217 */
218kern_return_t
5ba3f43e 219vm_allocate_external(
91447636
A
220 vm_map_t map,
221 vm_offset_t *addr,
222 vm_size_t size,
223 int flags)
5ba3f43e
A
224{
225 vm_tag_t tag;
226
227 VM_GET_FLAGS_ALIAS(flags, tag);
228 return (vm_allocate_kernel(map, addr, size, flags, tag));
229}
230
231kern_return_t
232vm_allocate_kernel(
233 vm_map_t map,
234 vm_offset_t *addr,
235 vm_size_t size,
236 int flags,
237 vm_tag_t tag)
91447636
A
238{
239 vm_map_offset_t map_addr;
240 vm_map_size_t map_size;
241 kern_return_t result;
2d21ac55
A
242 boolean_t anywhere;
243
244 /* filter out any kernel-only flags */
245 if (flags & ~VM_FLAGS_USER_ALLOCATE)
246 return KERN_INVALID_ARGUMENT;
91447636
A
247
248 if (map == VM_MAP_NULL)
249 return(KERN_INVALID_ARGUMENT);
1c79356b 250 if (size == 0) {
91447636
A
251 *addr = 0;
252 return(KERN_SUCCESS);
253 }
254
2d21ac55 255 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
91447636
A
256 if (anywhere) {
257 /*
258 * No specific address requested, so start candidate address
259 * search at the minimum address in the map. However, if that
260 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
261 * allocations of PAGEZERO to explicit requests since its
262 * normal use is to catch dereferences of NULL and many
263 * applications also treat pointers with a value of 0 as
264 * special and suddenly having address 0 contain useable
265 * memory would tend to confuse those applications.
266 */
267 map_addr = vm_map_min(map);
268 if (map_addr == 0)
39236c6e 269 map_addr += VM_MAP_PAGE_SIZE(map);
91447636 270 } else
39236c6e
A
271 map_addr = vm_map_trunc_page(*addr,
272 VM_MAP_PAGE_MASK(map));
273 map_size = vm_map_round_page(size,
274 VM_MAP_PAGE_MASK(map));
91447636 275 if (map_size == 0) {
1c79356b
A
276 return(KERN_INVALID_ARGUMENT);
277 }
278
279 result = vm_map_enter(
280 map,
91447636
A
281 &map_addr,
282 map_size,
283 (vm_map_offset_t)0,
1c79356b 284 flags,
5ba3f43e
A
285 VM_MAP_KERNEL_FLAGS_NONE,
286 tag,
1c79356b
A
287 VM_OBJECT_NULL,
288 (vm_object_offset_t)0,
289 FALSE,
290 VM_PROT_DEFAULT,
291 VM_PROT_ALL,
292 VM_INHERIT_DEFAULT);
293
5ba3f43e
A
294#if KASAN
295 if (result == KERN_SUCCESS && map->pmap == kernel_pmap) {
296 kasan_notify_address(map_addr, map_size);
297 }
298#endif
299
91447636 300 *addr = CAST_DOWN(vm_offset_t, map_addr);
1c79356b
A
301 return(result);
302}
303
304/*
91447636
A
305 * mach_vm_deallocate -
306 * deallocates the specified range of addresses in the
1c79356b
A
307 * specified address map.
308 */
309kern_return_t
91447636
A
310mach_vm_deallocate(
311 vm_map_t map,
312 mach_vm_offset_t start,
313 mach_vm_size_t size)
314{
315 if ((map == VM_MAP_NULL) || (start + size < start))
316 return(KERN_INVALID_ARGUMENT);
317
318 if (size == (mach_vm_offset_t) 0)
319 return(KERN_SUCCESS);
320
39236c6e
A
321 return(vm_map_remove(map,
322 vm_map_trunc_page(start,
323 VM_MAP_PAGE_MASK(map)),
324 vm_map_round_page(start+size,
325 VM_MAP_PAGE_MASK(map)),
326 VM_MAP_NO_FLAGS));
91447636
A
327}
328
329/*
330 * vm_deallocate -
331 * deallocates the specified range of addresses in the
332 * specified address map (limited to addresses the same
333 * size as the kernel).
334 */
335kern_return_t
1c79356b 336vm_deallocate(
39037602 337 vm_map_t map,
1c79356b
A
338 vm_offset_t start,
339 vm_size_t size)
340{
91447636 341 if ((map == VM_MAP_NULL) || (start + size < start))
1c79356b
A
342 return(KERN_INVALID_ARGUMENT);
343
344 if (size == (vm_offset_t) 0)
345 return(KERN_SUCCESS);
346
39236c6e
A
347 return(vm_map_remove(map,
348 vm_map_trunc_page(start,
349 VM_MAP_PAGE_MASK(map)),
350 vm_map_round_page(start+size,
351 VM_MAP_PAGE_MASK(map)),
352 VM_MAP_NO_FLAGS));
1c79356b
A
353}
354
355/*
91447636
A
356 * mach_vm_inherit -
357 * Sets the inheritance of the specified range in the
1c79356b
A
358 * specified map.
359 */
360kern_return_t
91447636
A
361mach_vm_inherit(
362 vm_map_t map,
363 mach_vm_offset_t start,
364 mach_vm_size_t size,
365 vm_inherit_t new_inheritance)
366{
367 if ((map == VM_MAP_NULL) || (start + size < start) ||
368 (new_inheritance > VM_INHERIT_LAST_VALID))
369 return(KERN_INVALID_ARGUMENT);
370
371 if (size == 0)
372 return KERN_SUCCESS;
373
374 return(vm_map_inherit(map,
39236c6e
A
375 vm_map_trunc_page(start,
376 VM_MAP_PAGE_MASK(map)),
377 vm_map_round_page(start+size,
378 VM_MAP_PAGE_MASK(map)),
91447636
A
379 new_inheritance));
380}
381
382/*
383 * vm_inherit -
384 * Sets the inheritance of the specified range in the
385 * specified map (range limited to addresses
386 */
387kern_return_t
1c79356b 388vm_inherit(
39037602 389 vm_map_t map,
1c79356b
A
390 vm_offset_t start,
391 vm_size_t size,
392 vm_inherit_t new_inheritance)
393{
91447636
A
394 if ((map == VM_MAP_NULL) || (start + size < start) ||
395 (new_inheritance > VM_INHERIT_LAST_VALID))
1c79356b
A
396 return(KERN_INVALID_ARGUMENT);
397
91447636
A
398 if (size == 0)
399 return KERN_SUCCESS;
400
1c79356b 401 return(vm_map_inherit(map,
39236c6e
A
402 vm_map_trunc_page(start,
403 VM_MAP_PAGE_MASK(map)),
404 vm_map_round_page(start+size,
405 VM_MAP_PAGE_MASK(map)),
1c79356b
A
406 new_inheritance));
407}
408
409/*
91447636
A
410 * mach_vm_protect -
411 * Sets the protection of the specified range in the
1c79356b
A
412 * specified map.
413 */
414
91447636
A
415kern_return_t
416mach_vm_protect(
417 vm_map_t map,
418 mach_vm_offset_t start,
419 mach_vm_size_t size,
420 boolean_t set_maximum,
421 vm_prot_t new_protection)
422{
423 if ((map == VM_MAP_NULL) || (start + size < start) ||
424 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
425 return(KERN_INVALID_ARGUMENT);
426
427 if (size == 0)
428 return KERN_SUCCESS;
429
430 return(vm_map_protect(map,
39236c6e
A
431 vm_map_trunc_page(start,
432 VM_MAP_PAGE_MASK(map)),
433 vm_map_round_page(start+size,
434 VM_MAP_PAGE_MASK(map)),
91447636
A
435 new_protection,
436 set_maximum));
437}
438
439/*
440 * vm_protect -
441 * Sets the protection of the specified range in the
442 * specified map. Addressability of the range limited
443 * to the same size as the kernel.
444 */
445
1c79356b
A
446kern_return_t
447vm_protect(
91447636 448 vm_map_t map,
1c79356b
A
449 vm_offset_t start,
450 vm_size_t size,
451 boolean_t set_maximum,
452 vm_prot_t new_protection)
453{
91447636
A
454 if ((map == VM_MAP_NULL) || (start + size < start) ||
455 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
1c79356b
A
456 return(KERN_INVALID_ARGUMENT);
457
91447636
A
458 if (size == 0)
459 return KERN_SUCCESS;
460
1c79356b 461 return(vm_map_protect(map,
39236c6e
A
462 vm_map_trunc_page(start,
463 VM_MAP_PAGE_MASK(map)),
464 vm_map_round_page(start+size,
465 VM_MAP_PAGE_MASK(map)),
1c79356b
A
466 new_protection,
467 set_maximum));
468}
469
470/*
91447636 471 * mach_vm_machine_attributes -
1c79356b
A
472 * Handle machine-specific attributes for a mapping, such
473 * as cachability, migrability, etc.
474 */
475kern_return_t
91447636
A
476mach_vm_machine_attribute(
477 vm_map_t map,
478 mach_vm_address_t addr,
479 mach_vm_size_t size,
480 vm_machine_attribute_t attribute,
481 vm_machine_attribute_val_t* value) /* IN/OUT */
482{
483 if ((map == VM_MAP_NULL) || (addr + size < addr))
484 return(KERN_INVALID_ARGUMENT);
485
486 if (size == 0)
487 return KERN_SUCCESS;
488
39236c6e
A
489 return vm_map_machine_attribute(
490 map,
491 vm_map_trunc_page(addr,
492 VM_MAP_PAGE_MASK(map)),
493 vm_map_round_page(addr+size,
494 VM_MAP_PAGE_MASK(map)),
495 attribute,
496 value);
91447636
A
497}
498
499/*
500 * vm_machine_attribute -
501 * Handle machine-specific attributes for a mapping, such
502 * as cachability, migrability, etc. Limited addressability
503 * (same range limits as for the native kernel map).
504 */
505kern_return_t
1c79356b
A
506vm_machine_attribute(
507 vm_map_t map,
91447636 508 vm_address_t addr,
1c79356b
A
509 vm_size_t size,
510 vm_machine_attribute_t attribute,
511 vm_machine_attribute_val_t* value) /* IN/OUT */
512{
91447636
A
513 if ((map == VM_MAP_NULL) || (addr + size < addr))
514 return(KERN_INVALID_ARGUMENT);
515
516 if (size == 0)
517 return KERN_SUCCESS;
518
39236c6e
A
519 return vm_map_machine_attribute(
520 map,
521 vm_map_trunc_page(addr,
522 VM_MAP_PAGE_MASK(map)),
523 vm_map_round_page(addr+size,
524 VM_MAP_PAGE_MASK(map)),
525 attribute,
526 value);
91447636
A
527}
528
529/*
530 * mach_vm_read -
531 * Read/copy a range from one address space and return it to the caller.
532 *
533 * It is assumed that the address for the returned memory is selected by
534 * the IPC implementation as part of receiving the reply to this call.
535 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
536 * that gets returned.
537 *
538 * JMM - because of mach_msg_type_number_t, this call is limited to a
539 * single 4GB region at this time.
540 *
541 */
542kern_return_t
543mach_vm_read(
544 vm_map_t map,
545 mach_vm_address_t addr,
546 mach_vm_size_t size,
547 pointer_t *data,
548 mach_msg_type_number_t *data_size)
549{
550 kern_return_t error;
551 vm_map_copy_t ipc_address;
552
1c79356b
A
553 if (map == VM_MAP_NULL)
554 return(KERN_INVALID_ARGUMENT);
555
b0d623f7
A
556 if ((mach_msg_type_number_t) size != size)
557 return KERN_INVALID_ARGUMENT;
91447636
A
558
559 error = vm_map_copyin(map,
560 (vm_map_address_t)addr,
561 (vm_map_size_t)size,
562 FALSE, /* src_destroy */
563 &ipc_address);
564
565 if (KERN_SUCCESS == error) {
566 *data = (pointer_t) ipc_address;
b0d623f7
A
567 *data_size = (mach_msg_type_number_t) size;
568 assert(*data_size == size);
91447636
A
569 }
570 return(error);
1c79356b
A
571}
572
91447636
A
573/*
574 * vm_read -
575 * Read/copy a range from one address space and return it to the caller.
576 * Limited addressability (same range limits as for the native kernel map).
577 *
578 * It is assumed that the address for the returned memory is selected by
579 * the IPC implementation as part of receiving the reply to this call.
580 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
581 * that gets returned.
582 */
1c79356b
A
583kern_return_t
584vm_read(
585 vm_map_t map,
91447636 586 vm_address_t addr,
1c79356b
A
587 vm_size_t size,
588 pointer_t *data,
589 mach_msg_type_number_t *data_size)
590{
591 kern_return_t error;
592 vm_map_copy_t ipc_address;
593
594 if (map == VM_MAP_NULL)
595 return(KERN_INVALID_ARGUMENT);
596
b0d623f7
A
597 if (size > (unsigned)(mach_msg_type_number_t) -1) {
598 /*
599 * The kernel could handle a 64-bit "size" value, but
600 * it could not return the size of the data in "*data_size"
601 * without overflowing.
602 * Let's reject this "size" as invalid.
603 */
604 return KERN_INVALID_ARGUMENT;
605 }
606
91447636
A
607 error = vm_map_copyin(map,
608 (vm_map_address_t)addr,
609 (vm_map_size_t)size,
610 FALSE, /* src_destroy */
611 &ipc_address);
612
613 if (KERN_SUCCESS == error) {
1c79356b 614 *data = (pointer_t) ipc_address;
b0d623f7
A
615 *data_size = (mach_msg_type_number_t) size;
616 assert(*data_size == size);
1c79356b
A
617 }
618 return(error);
619}
620
91447636
A
621/*
622 * mach_vm_read_list -
623 * Read/copy a list of address ranges from specified map.
624 *
625 * MIG does not know how to deal with a returned array of
626 * vm_map_copy_t structures, so we have to do the copyout
627 * manually here.
628 */
629kern_return_t
630mach_vm_read_list(
631 vm_map_t map,
632 mach_vm_read_entry_t data_list,
633 natural_t count)
634{
635 mach_msg_type_number_t i;
636 kern_return_t error;
637 vm_map_copy_t copy;
638
8ad349bb
A
639 if (map == VM_MAP_NULL ||
640 count > VM_MAP_ENTRY_MAX)
91447636
A
641 return(KERN_INVALID_ARGUMENT);
642
643 error = KERN_SUCCESS;
644 for(i=0; i<count; i++) {
645 vm_map_address_t map_addr;
646 vm_map_size_t map_size;
647
648 map_addr = (vm_map_address_t)(data_list[i].address);
649 map_size = (vm_map_size_t)(data_list[i].size);
650
651 if(map_size != 0) {
652 error = vm_map_copyin(map,
653 map_addr,
654 map_size,
655 FALSE, /* src_destroy */
656 &copy);
657 if (KERN_SUCCESS == error) {
658 error = vm_map_copyout(
659 current_task()->map,
660 &map_addr,
661 copy);
662 if (KERN_SUCCESS == error) {
663 data_list[i].address = map_addr;
664 continue;
665 }
666 vm_map_copy_discard(copy);
667 }
668 }
669 data_list[i].address = (mach_vm_address_t)0;
670 data_list[i].size = (mach_vm_size_t)0;
671 }
672 return(error);
673}
674
675/*
676 * vm_read_list -
677 * Read/copy a list of address ranges from specified map.
678 *
679 * MIG does not know how to deal with a returned array of
680 * vm_map_copy_t structures, so we have to do the copyout
681 * manually here.
682 *
683 * The source and destination ranges are limited to those
684 * that can be described with a vm_address_t (i.e. same
685 * size map as the kernel).
686 *
687 * JMM - If the result of the copyout is an address range
688 * that cannot be described with a vm_address_t (i.e. the
689 * caller had a larger address space but used this call
690 * anyway), it will result in a truncated address being
691 * returned (and a likely confused caller).
692 */
693
1c79356b
A
694kern_return_t
695vm_read_list(
696 vm_map_t map,
91447636
A
697 vm_read_entry_t data_list,
698 natural_t count)
1c79356b
A
699{
700 mach_msg_type_number_t i;
701 kern_return_t error;
91447636 702 vm_map_copy_t copy;
1c79356b 703
8ad349bb
A
704 if (map == VM_MAP_NULL ||
705 count > VM_MAP_ENTRY_MAX)
1c79356b
A
706 return(KERN_INVALID_ARGUMENT);
707
91447636 708 error = KERN_SUCCESS;
1c79356b 709 for(i=0; i<count; i++) {
91447636
A
710 vm_map_address_t map_addr;
711 vm_map_size_t map_size;
712
713 map_addr = (vm_map_address_t)(data_list[i].address);
714 map_size = (vm_map_size_t)(data_list[i].size);
715
716 if(map_size != 0) {
717 error = vm_map_copyin(map,
718 map_addr,
719 map_size,
720 FALSE, /* src_destroy */
721 &copy);
722 if (KERN_SUCCESS == error) {
723 error = vm_map_copyout(current_task()->map,
724 &map_addr,
725 copy);
726 if (KERN_SUCCESS == error) {
727 data_list[i].address =
728 CAST_DOWN(vm_offset_t, map_addr);
729 continue;
730 }
731 vm_map_copy_discard(copy);
1c79356b
A
732 }
733 }
91447636
A
734 data_list[i].address = (mach_vm_address_t)0;
735 data_list[i].size = (mach_vm_size_t)0;
1c79356b
A
736 }
737 return(error);
738}
739
740/*
91447636
A
741 * mach_vm_read_overwrite -
742 * Overwrite a range of the current map with data from the specified
743 * map/address range.
744 *
745 * In making an assumption that the current thread is local, it is
746 * no longer cluster-safe without a fully supportive local proxy
747 * thread/task (but we don't support cluster's anymore so this is moot).
1c79356b
A
748 */
749
1c79356b 750kern_return_t
91447636
A
751mach_vm_read_overwrite(
752 vm_map_t map,
753 mach_vm_address_t address,
754 mach_vm_size_t size,
755 mach_vm_address_t data,
756 mach_vm_size_t *data_size)
757{
758 kern_return_t error;
1c79356b
A
759 vm_map_copy_t copy;
760
761 if (map == VM_MAP_NULL)
762 return(KERN_INVALID_ARGUMENT);
763
91447636
A
764 error = vm_map_copyin(map, (vm_map_address_t)address,
765 (vm_map_size_t)size, FALSE, &copy);
766
767 if (KERN_SUCCESS == error) {
768 error = vm_map_copy_overwrite(current_thread()->map,
769 (vm_map_address_t)data,
770 copy, FALSE);
771 if (KERN_SUCCESS == error) {
772 *data_size = size;
773 return error;
1c79356b 774 }
91447636 775 vm_map_copy_discard(copy);
1c79356b 776 }
91447636
A
777 return(error);
778}
779
780/*
781 * vm_read_overwrite -
782 * Overwrite a range of the current map with data from the specified
783 * map/address range.
784 *
785 * This routine adds the additional limitation that the source and
786 * destination ranges must be describable with vm_address_t values
787 * (i.e. the same size address spaces as the kernel, or at least the
788 * the ranges are in that first portion of the respective address
789 * spaces).
790 */
791
792kern_return_t
793vm_read_overwrite(
794 vm_map_t map,
795 vm_address_t address,
796 vm_size_t size,
797 vm_address_t data,
798 vm_size_t *data_size)
799{
800 kern_return_t error;
801 vm_map_copy_t copy;
802
803 if (map == VM_MAP_NULL)
804 return(KERN_INVALID_ARGUMENT);
805
806 error = vm_map_copyin(map, (vm_map_address_t)address,
807 (vm_map_size_t)size, FALSE, &copy);
808
809 if (KERN_SUCCESS == error) {
810 error = vm_map_copy_overwrite(current_thread()->map,
811 (vm_map_address_t)data,
812 copy, FALSE);
813 if (KERN_SUCCESS == error) {
814 *data_size = size;
815 return error;
1c79356b 816 }
91447636 817 vm_map_copy_discard(copy);
1c79356b 818 }
1c79356b
A
819 return(error);
820}
821
822
91447636
A
823/*
824 * mach_vm_write -
825 * Overwrite the specified address range with the data provided
826 * (from the current map).
827 */
828kern_return_t
829mach_vm_write(
830 vm_map_t map,
831 mach_vm_address_t address,
832 pointer_t data,
833 __unused mach_msg_type_number_t size)
834{
835 if (map == VM_MAP_NULL)
836 return KERN_INVALID_ARGUMENT;
1c79356b 837
91447636
A
838 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
839 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
840}
1c79356b 841
91447636
A
842/*
843 * vm_write -
844 * Overwrite the specified address range with the data provided
845 * (from the current map).
846 *
847 * The addressability of the range of addresses to overwrite is
848 * limited bu the use of a vm_address_t (same size as kernel map).
849 * Either the target map is also small, or the range is in the
850 * low addresses within it.
851 */
1c79356b
A
852kern_return_t
853vm_write(
91447636
A
854 vm_map_t map,
855 vm_address_t address,
856 pointer_t data,
857 __unused mach_msg_type_number_t size)
858{
859 if (map == VM_MAP_NULL)
860 return KERN_INVALID_ARGUMENT;
861
862 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
863 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
864}
865
866/*
867 * mach_vm_copy -
868 * Overwrite one range of the specified map with the contents of
869 * another range within that same map (i.e. both address ranges
870 * are "over there").
871 */
872kern_return_t
873mach_vm_copy(
1c79356b 874 vm_map_t map,
91447636
A
875 mach_vm_address_t source_address,
876 mach_vm_size_t size,
877 mach_vm_address_t dest_address)
1c79356b 878{
91447636
A
879 vm_map_copy_t copy;
880 kern_return_t kr;
881
1c79356b
A
882 if (map == VM_MAP_NULL)
883 return KERN_INVALID_ARGUMENT;
884
91447636
A
885 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
886 (vm_map_size_t)size, FALSE, &copy);
887
888 if (KERN_SUCCESS == kr) {
889 kr = vm_map_copy_overwrite(map,
890 (vm_map_address_t)dest_address,
891 copy, FALSE /* interruptible XXX */);
892
893 if (KERN_SUCCESS != kr)
894 vm_map_copy_discard(copy);
895 }
896 return kr;
1c79356b
A
897}
898
899kern_return_t
900vm_copy(
901 vm_map_t map,
902 vm_address_t source_address,
903 vm_size_t size,
904 vm_address_t dest_address)
905{
906 vm_map_copy_t copy;
907 kern_return_t kr;
908
909 if (map == VM_MAP_NULL)
910 return KERN_INVALID_ARGUMENT;
911
91447636
A
912 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
913 (vm_map_size_t)size, FALSE, &copy);
1c79356b 914
91447636
A
915 if (KERN_SUCCESS == kr) {
916 kr = vm_map_copy_overwrite(map,
917 (vm_map_address_t)dest_address,
918 copy, FALSE /* interruptible XXX */);
1c79356b 919
91447636
A
920 if (KERN_SUCCESS != kr)
921 vm_map_copy_discard(copy);
922 }
923 return kr;
1c79356b
A
924}
925
926/*
91447636
A
927 * mach_vm_map -
928 * Map some range of an object into an address space.
929 *
930 * The object can be one of several types of objects:
931 * NULL - anonymous memory
932 * a named entry - a range within another address space
933 * or a range within a memory object
934 * a whole memory object
935 *
1c79356b
A
936 */
937kern_return_t
5ba3f43e 938mach_vm_map_external(
1c79356b 939 vm_map_t target_map,
91447636
A
940 mach_vm_offset_t *address,
941 mach_vm_size_t initial_size,
942 mach_vm_offset_t mask,
1c79356b
A
943 int flags,
944 ipc_port_t port,
945 vm_object_offset_t offset,
946 boolean_t copy,
947 vm_prot_t cur_protection,
948 vm_prot_t max_protection,
949 vm_inherit_t inheritance)
5ba3f43e
A
950{
951 vm_tag_t tag;
952
953 VM_GET_FLAGS_ALIAS(flags, tag);
954 return (mach_vm_map_kernel(target_map, address, initial_size, mask, flags, tag, port,
955 offset, copy, cur_protection, max_protection, inheritance));
956}
957
958kern_return_t
959mach_vm_map_kernel(
960 vm_map_t target_map,
961 mach_vm_offset_t *address,
962 mach_vm_size_t initial_size,
963 mach_vm_offset_t mask,
964 int flags,
965 vm_tag_t tag,
966 ipc_port_t port,
967 vm_object_offset_t offset,
968 boolean_t copy,
969 vm_prot_t cur_protection,
970 vm_prot_t max_protection,
971 vm_inherit_t inheritance)
1c79356b 972{
316670eb
A
973 kern_return_t kr;
974 vm_map_offset_t vmmaddr;
975
976 vmmaddr = (vm_map_offset_t) *address;
977
2d21ac55
A
978 /* filter out any kernel-only flags */
979 if (flags & ~VM_FLAGS_USER_MAP)
980 return KERN_INVALID_ARGUMENT;
1c79356b 981
316670eb 982 kr = vm_map_enter_mem_object(target_map,
5ba3f43e
A
983 &vmmaddr,
984 initial_size,
985 mask,
986 flags,
987 VM_MAP_KERNEL_FLAGS_NONE,
988 tag,
989 port,
990 offset,
991 copy,
992 cur_protection,
993 max_protection,
994 inheritance);
995
996#if KASAN
997 if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) {
998 kasan_notify_address(vmmaddr, initial_size);
999 }
1000#endif
316670eb
A
1001
1002 *address = vmmaddr;
1003 return kr;
1c79356b
A
1004}
1005
91447636
A
1006
1007/* legacy interface */
1008kern_return_t
5ba3f43e 1009vm_map_64_external(
91447636
A
1010 vm_map_t target_map,
1011 vm_offset_t *address,
1012 vm_size_t size,
1013 vm_offset_t mask,
1014 int flags,
1015 ipc_port_t port,
1016 vm_object_offset_t offset,
1017 boolean_t copy,
1018 vm_prot_t cur_protection,
1019 vm_prot_t max_protection,
1020 vm_inherit_t inheritance)
5ba3f43e
A
1021{
1022 vm_tag_t tag;
1023
1024 VM_GET_FLAGS_ALIAS(flags, tag);
1025 return (vm_map_64_kernel(target_map, address, size, mask, flags, tag, port, offset,
1026 copy, cur_protection, max_protection, inheritance));
1027}
1028
1029kern_return_t
1030vm_map_64_kernel(
1031 vm_map_t target_map,
1032 vm_offset_t *address,
1033 vm_size_t size,
1034 vm_offset_t mask,
1035 int flags,
1036 vm_tag_t tag,
1037 ipc_port_t port,
1038 vm_object_offset_t offset,
1039 boolean_t copy,
1040 vm_prot_t cur_protection,
1041 vm_prot_t max_protection,
1042 vm_inherit_t inheritance)
91447636
A
1043{
1044 mach_vm_address_t map_addr;
1045 mach_vm_size_t map_size;
1046 mach_vm_offset_t map_mask;
1047 kern_return_t kr;
1048
1049 map_addr = (mach_vm_address_t)*address;
1050 map_size = (mach_vm_size_t)size;
1051 map_mask = (mach_vm_offset_t)mask;
1052
5ba3f43e 1053 kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, flags, tag,
91447636
A
1054 port, offset, copy,
1055 cur_protection, max_protection, inheritance);
b0d623f7 1056 *address = CAST_DOWN(vm_offset_t, map_addr);
91447636
A
1057 return kr;
1058}
1059
1c79356b 1060/* temporary, until world build */
55e303ae 1061kern_return_t
5ba3f43e
A
1062vm_map_external(
1063 vm_map_t target_map,
1064 vm_offset_t *address,
1065 vm_size_t size,
1066 vm_offset_t mask,
1067 int flags,
1068 ipc_port_t port,
1069 vm_offset_t offset,
1070 boolean_t copy,
1071 vm_prot_t cur_protection,
1072 vm_prot_t max_protection,
1073 vm_inherit_t inheritance)
1074{
1075 vm_tag_t tag;
1076
1077 VM_GET_FLAGS_ALIAS(flags, tag);
1078 return (vm_map_kernel(target_map, address, size, mask, flags, tag, port, offset, copy, cur_protection, max_protection, inheritance));
1079}
1080
1081kern_return_t
1082vm_map_kernel(
1c79356b
A
1083 vm_map_t target_map,
1084 vm_offset_t *address,
1085 vm_size_t size,
1086 vm_offset_t mask,
1087 int flags,
5ba3f43e 1088 vm_tag_t tag,
1c79356b
A
1089 ipc_port_t port,
1090 vm_offset_t offset,
1091 boolean_t copy,
1092 vm_prot_t cur_protection,
1093 vm_prot_t max_protection,
1094 vm_inherit_t inheritance)
1095{
91447636
A
1096 mach_vm_address_t map_addr;
1097 mach_vm_size_t map_size;
1098 mach_vm_offset_t map_mask;
1099 vm_object_offset_t obj_offset;
1100 kern_return_t kr;
1101
1102 map_addr = (mach_vm_address_t)*address;
1103 map_size = (mach_vm_size_t)size;
1104 map_mask = (mach_vm_offset_t)mask;
1105 obj_offset = (vm_object_offset_t)offset;
1106
5ba3f43e 1107 kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, flags, tag,
91447636
A
1108 port, obj_offset, copy,
1109 cur_protection, max_protection, inheritance);
b0d623f7 1110 *address = CAST_DOWN(vm_offset_t, map_addr);
91447636
A
1111 return kr;
1112}
1113
1114/*
1115 * mach_vm_remap -
1116 * Remap a range of memory from one task into another,
1117 * to another address range within the same task, or
1118 * over top of itself (with altered permissions and/or
1119 * as an in-place copy of itself).
1120 */
5ba3f43e
A
1121kern_return_t
1122mach_vm_remap_external(
1123 vm_map_t target_map,
1124 mach_vm_offset_t *address,
1125 mach_vm_size_t size,
1126 mach_vm_offset_t mask,
1127 int flags,
1128 vm_map_t src_map,
1129 mach_vm_offset_t memory_address,
1130 boolean_t copy,
1131 vm_prot_t *cur_protection,
1132 vm_prot_t *max_protection,
1133 vm_inherit_t inheritance)
1134{
1135 vm_tag_t tag;
1136 VM_GET_FLAGS_ALIAS(flags, tag);
1137
1138 return (mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address,
1139 copy, cur_protection, max_protection, inheritance));
1140}
91447636
A
1141
1142kern_return_t
5ba3f43e 1143mach_vm_remap_kernel(
91447636
A
1144 vm_map_t target_map,
1145 mach_vm_offset_t *address,
1146 mach_vm_size_t size,
1147 mach_vm_offset_t mask,
060df5ea 1148 int flags,
5ba3f43e 1149 vm_tag_t tag,
91447636
A
1150 vm_map_t src_map,
1151 mach_vm_offset_t memory_address,
1152 boolean_t copy,
1153 vm_prot_t *cur_protection,
1154 vm_prot_t *max_protection,
1155 vm_inherit_t inheritance)
1156{
1157 vm_map_offset_t map_addr;
1158 kern_return_t kr;
1159
1160 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1161 return KERN_INVALID_ARGUMENT;
1162
060df5ea
A
1163 /* filter out any kernel-only flags */
1164 if (flags & ~VM_FLAGS_USER_REMAP)
1165 return KERN_INVALID_ARGUMENT;
1166
91447636
A
1167 map_addr = (vm_map_offset_t)*address;
1168
1169 kr = vm_map_remap(target_map,
1170 &map_addr,
1171 size,
1172 mask,
060df5ea 1173 flags,
5ba3f43e
A
1174 VM_MAP_KERNEL_FLAGS_NONE,
1175 tag,
91447636
A
1176 src_map,
1177 memory_address,
1178 copy,
1179 cur_protection,
1180 max_protection,
1181 inheritance);
1182 *address = map_addr;
1183 return kr;
1c79356b
A
1184}
1185
91447636
A
1186/*
1187 * vm_remap -
1188 * Remap a range of memory from one task into another,
1189 * to another address range within the same task, or
1190 * over top of itself (with altered permissions and/or
1191 * as an in-place copy of itself).
1192 *
1193 * The addressability of the source and target address
1194 * range is limited by the size of vm_address_t (in the
1195 * kernel context).
1196 */
1197kern_return_t
5ba3f43e 1198vm_remap_external(
91447636
A
1199 vm_map_t target_map,
1200 vm_offset_t *address,
1201 vm_size_t size,
1202 vm_offset_t mask,
060df5ea 1203 int flags,
91447636
A
1204 vm_map_t src_map,
1205 vm_offset_t memory_address,
1206 boolean_t copy,
1207 vm_prot_t *cur_protection,
1208 vm_prot_t *max_protection,
1209 vm_inherit_t inheritance)
5ba3f43e
A
1210{
1211 vm_tag_t tag;
1212 VM_GET_FLAGS_ALIAS(flags, tag);
1213
1214 return (vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map,
1215 memory_address, copy, cur_protection, max_protection, inheritance));
1216}
1217
1218kern_return_t
1219vm_remap_kernel(
1220 vm_map_t target_map,
1221 vm_offset_t *address,
1222 vm_size_t size,
1223 vm_offset_t mask,
1224 int flags,
1225 vm_tag_t tag,
1226 vm_map_t src_map,
1227 vm_offset_t memory_address,
1228 boolean_t copy,
1229 vm_prot_t *cur_protection,
1230 vm_prot_t *max_protection,
1231 vm_inherit_t inheritance)
91447636
A
1232{
1233 vm_map_offset_t map_addr;
1234 kern_return_t kr;
1235
1236 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1237 return KERN_INVALID_ARGUMENT;
1238
060df5ea
A
1239 /* filter out any kernel-only flags */
1240 if (flags & ~VM_FLAGS_USER_REMAP)
1241 return KERN_INVALID_ARGUMENT;
1242
91447636
A
1243 map_addr = (vm_map_offset_t)*address;
1244
1245 kr = vm_map_remap(target_map,
1246 &map_addr,
1247 size,
1248 mask,
060df5ea 1249 flags,
5ba3f43e
A
1250 VM_MAP_KERNEL_FLAGS_NONE,
1251 tag,
91447636
A
1252 src_map,
1253 memory_address,
1254 copy,
1255 cur_protection,
1256 max_protection,
1257 inheritance);
1258 *address = CAST_DOWN(vm_offset_t, map_addr);
1259 return kr;
1260}
1c79356b
A
1261
1262/*
91447636
A
1263 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1264 * when mach_vm_wire and vm_wire are changed to use ledgers.
1c79356b
A
1265 */
1266#include <mach/mach_host_server.h>
1267/*
91447636
A
1268 * mach_vm_wire
1269 * Specify that the range of the virtual address space
1270 * of the target task must not cause page faults for
1271 * the indicated accesses.
1272 *
1273 * [ To unwire the pages, specify VM_PROT_NONE. ]
1274 */
1275kern_return_t
5ba3f43e 1276mach_vm_wire_external(
91447636
A
1277 host_priv_t host_priv,
1278 vm_map_t map,
1279 mach_vm_offset_t start,
1280 mach_vm_size_t size,
1281 vm_prot_t access)
5ba3f43e
A
1282{
1283 return (mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK));
1284}
1285
1286kern_return_t
1287mach_vm_wire_kernel(
1288 host_priv_t host_priv,
1289 vm_map_t map,
1290 mach_vm_offset_t start,
1291 mach_vm_size_t size,
1292 vm_prot_t access,
1293 vm_tag_t tag)
91447636
A
1294{
1295 kern_return_t rc;
1296
1297 if (host_priv == HOST_PRIV_NULL)
1298 return KERN_INVALID_HOST;
1299
1300 assert(host_priv == &realhost);
1301
1302 if (map == VM_MAP_NULL)
1303 return KERN_INVALID_TASK;
1304
b0d623f7 1305 if (access & ~VM_PROT_ALL || (start + size < start))
91447636
A
1306 return KERN_INVALID_ARGUMENT;
1307
1308 if (access != VM_PROT_NONE) {
5ba3f43e 1309 rc = vm_map_wire_kernel(map,
39236c6e
A
1310 vm_map_trunc_page(start,
1311 VM_MAP_PAGE_MASK(map)),
1312 vm_map_round_page(start+size,
1313 VM_MAP_PAGE_MASK(map)),
5ba3f43e 1314 access, tag,
39236c6e 1315 TRUE);
91447636 1316 } else {
39236c6e
A
1317 rc = vm_map_unwire(map,
1318 vm_map_trunc_page(start,
1319 VM_MAP_PAGE_MASK(map)),
1320 vm_map_round_page(start+size,
1321 VM_MAP_PAGE_MASK(map)),
1322 TRUE);
91447636
A
1323 }
1324 return rc;
1325}
1326
1327/*
1328 * vm_wire -
1c79356b
A
1329 * Specify that the range of the virtual address space
1330 * of the target task must not cause page faults for
1331 * the indicated accesses.
1332 *
1333 * [ To unwire the pages, specify VM_PROT_NONE. ]
1334 */
1335kern_return_t
1336vm_wire(
1337 host_priv_t host_priv,
39037602 1338 vm_map_t map,
1c79356b
A
1339 vm_offset_t start,
1340 vm_size_t size,
1341 vm_prot_t access)
1342{
1343 kern_return_t rc;
1344
1345 if (host_priv == HOST_PRIV_NULL)
1346 return KERN_INVALID_HOST;
1347
1348 assert(host_priv == &realhost);
1349
1350 if (map == VM_MAP_NULL)
1351 return KERN_INVALID_TASK;
1352
91447636 1353 if ((access & ~VM_PROT_ALL) || (start + size < start))
1c79356b
A
1354 return KERN_INVALID_ARGUMENT;
1355
91447636
A
1356 if (size == 0) {
1357 rc = KERN_SUCCESS;
1358 } else if (access != VM_PROT_NONE) {
5ba3f43e 1359 rc = vm_map_wire_kernel(map,
39236c6e
A
1360 vm_map_trunc_page(start,
1361 VM_MAP_PAGE_MASK(map)),
1362 vm_map_round_page(start+size,
1363 VM_MAP_PAGE_MASK(map)),
5ba3f43e 1364 access, VM_KERN_MEMORY_OSFMK,
39236c6e 1365 TRUE);
1c79356b 1366 } else {
39236c6e
A
1367 rc = vm_map_unwire(map,
1368 vm_map_trunc_page(start,
1369 VM_MAP_PAGE_MASK(map)),
1370 vm_map_round_page(start+size,
1371 VM_MAP_PAGE_MASK(map)),
1372 TRUE);
1c79356b
A
1373 }
1374 return rc;
1375}
1376
1377/*
1378 * vm_msync
1379 *
1380 * Synchronises the memory range specified with its backing store
1381 * image by either flushing or cleaning the contents to the appropriate
91447636
A
1382 * memory manager.
1383 *
1384 * interpretation of sync_flags
1385 * VM_SYNC_INVALIDATE - discard pages, only return precious
1386 * pages to manager.
1387 *
1388 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1389 * - discard pages, write dirty or precious
1390 * pages back to memory manager.
1391 *
1392 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1393 * - write dirty or precious pages back to
1394 * the memory manager.
1395 *
1396 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1397 * is a hole in the region, and we would
1398 * have returned KERN_SUCCESS, return
1399 * KERN_INVALID_ADDRESS instead.
1400 *
1401 * RETURNS
1402 * KERN_INVALID_TASK Bad task parameter
1403 * KERN_INVALID_ARGUMENT both sync and async were specified.
1404 * KERN_SUCCESS The usual.
1405 * KERN_INVALID_ADDRESS There was a hole in the region.
1406 */
1407
1408kern_return_t
1409mach_vm_msync(
1410 vm_map_t map,
1411 mach_vm_address_t address,
1412 mach_vm_size_t size,
1413 vm_sync_t sync_flags)
1414{
1415
1416 if (map == VM_MAP_NULL)
1417 return(KERN_INVALID_TASK);
1418
1419 return vm_map_msync(map, (vm_map_address_t)address,
1420 (vm_map_size_t)size, sync_flags);
1421}
1422
1423/*
1424 * vm_msync
1425 *
1426 * Synchronises the memory range specified with its backing store
1427 * image by either flushing or cleaning the contents to the appropriate
1428 * memory manager.
1c79356b
A
1429 *
1430 * interpretation of sync_flags
1431 * VM_SYNC_INVALIDATE - discard pages, only return precious
1432 * pages to manager.
1433 *
1434 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1435 * - discard pages, write dirty or precious
1436 * pages back to memory manager.
1437 *
1438 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1439 * - write dirty or precious pages back to
1440 * the memory manager.
1441 *
91447636
A
1442 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1443 * is a hole in the region, and we would
1444 * have returned KERN_SUCCESS, return
1445 * KERN_INVALID_ADDRESS instead.
1446 *
1447 * The addressability of the range is limited to that which can
1448 * be described by a vm_address_t.
1c79356b
A
1449 *
1450 * RETURNS
1451 * KERN_INVALID_TASK Bad task parameter
1452 * KERN_INVALID_ARGUMENT both sync and async were specified.
1453 * KERN_SUCCESS The usual.
91447636 1454 * KERN_INVALID_ADDRESS There was a hole in the region.
1c79356b
A
1455 */
1456
1457kern_return_t
1458vm_msync(
1459 vm_map_t map,
1460 vm_address_t address,
1461 vm_size_t size,
1462 vm_sync_t sync_flags)
1463{
1c79356b 1464
91447636
A
1465 if (map == VM_MAP_NULL)
1466 return(KERN_INVALID_TASK);
1c79356b 1467
91447636
A
1468 return vm_map_msync(map, (vm_map_address_t)address,
1469 (vm_map_size_t)size, sync_flags);
1470}
1c79356b 1471
91447636 1472
6d2010ae
A
1473int
1474vm_toggle_entry_reuse(int toggle, int *old_value)
1475{
1476 vm_map_t map = current_map();
1477
39037602 1478 assert(!map->is_nested_map);
6d2010ae
A
1479 if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
1480 *old_value = map->disable_vmentry_reuse;
1481 } else if(toggle == VM_TOGGLE_SET){
3e170ce0
A
1482 vm_map_entry_t map_to_entry;
1483
6d2010ae 1484 vm_map_lock(map);
3e170ce0 1485 vm_map_disable_hole_optimization(map);
6d2010ae 1486 map->disable_vmentry_reuse = TRUE;
3e170ce0
A
1487 __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
1488 if (map->first_free == map_to_entry) {
6d2010ae
A
1489 map->highest_entry_end = vm_map_min(map);
1490 } else {
1491 map->highest_entry_end = map->first_free->vme_end;
1492 }
1493 vm_map_unlock(map);
1494 } else if (toggle == VM_TOGGLE_CLEAR){
1495 vm_map_lock(map);
1496 map->disable_vmentry_reuse = FALSE;
1497 vm_map_unlock(map);
1498 } else
1499 return KERN_INVALID_ARGUMENT;
1500
1501 return KERN_SUCCESS;
1502}
1503
91447636
A
1504/*
1505 * mach_vm_behavior_set
1506 *
1507 * Sets the paging behavior attribute for the specified range
1508 * in the specified map.
1509 *
1510 * This routine will fail with KERN_INVALID_ADDRESS if any address
1511 * in [start,start+size) is not a valid allocated memory region.
1512 */
1513kern_return_t
1514mach_vm_behavior_set(
1515 vm_map_t map,
1516 mach_vm_offset_t start,
39037602 1517 mach_vm_size_t size,
91447636
A
1518 vm_behavior_t new_behavior)
1519{
39037602
A
1520 vm_map_offset_t align_mask;
1521
91447636
A
1522 if ((map == VM_MAP_NULL) || (start + size < start))
1523 return(KERN_INVALID_ARGUMENT);
1c79356b
A
1524
1525 if (size == 0)
91447636 1526 return KERN_SUCCESS;
1c79356b 1527
39037602
A
1528 switch (new_behavior) {
1529 case VM_BEHAVIOR_REUSABLE:
1530 case VM_BEHAVIOR_REUSE:
1531 case VM_BEHAVIOR_CAN_REUSE:
1532 /*
1533 * Align to the hardware page size, to allow
1534 * malloc() to maximize the amount of re-usability,
1535 * even on systems with larger software page size.
1536 */
1537 align_mask = PAGE_MASK;
1538 break;
1539 default:
1540 align_mask = VM_MAP_PAGE_MASK(map);
1541 break;
1542 }
1543
1544 return vm_map_behavior_set(map,
1545 vm_map_trunc_page(start, align_mask),
1546 vm_map_round_page(start+size, align_mask),
1547 new_behavior);
91447636 1548}
1c79356b 1549
91447636
A
1550/*
1551 * vm_behavior_set
1552 *
1553 * Sets the paging behavior attribute for the specified range
1554 * in the specified map.
1555 *
1556 * This routine will fail with KERN_INVALID_ADDRESS if any address
1557 * in [start,start+size) is not a valid allocated memory region.
1558 *
1559 * This routine is potentially limited in addressibility by the
1560 * use of vm_offset_t (if the map provided is larger than the
1561 * kernel's).
1562 */
1563kern_return_t
1564vm_behavior_set(
1565 vm_map_t map,
1566 vm_offset_t start,
1567 vm_size_t size,
1568 vm_behavior_t new_behavior)
1569{
39037602
A
1570 if (start + size < start)
1571 return KERN_INVALID_ARGUMENT;
1c79356b 1572
39037602
A
1573 return mach_vm_behavior_set(map,
1574 (mach_vm_offset_t) start,
1575 (mach_vm_size_t) size,
1576 new_behavior);
91447636 1577}
1c79356b 1578
91447636
A
1579/*
1580 * mach_vm_region:
1581 *
1582 * User call to obtain information about a region in
1583 * a task's address map. Currently, only one flavor is
1584 * supported.
1585 *
1586 * XXX The reserved and behavior fields cannot be filled
1587 * in until the vm merge from the IK is completed, and
1588 * vm_reserve is implemented.
1589 *
1590 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1591 */
1c79356b 1592
91447636
A
1593kern_return_t
1594mach_vm_region(
1595 vm_map_t map,
1596 mach_vm_offset_t *address, /* IN/OUT */
1597 mach_vm_size_t *size, /* OUT */
1598 vm_region_flavor_t flavor, /* IN */
1599 vm_region_info_t info, /* OUT */
1600 mach_msg_type_number_t *count, /* IN/OUT */
1601 mach_port_t *object_name) /* OUT */
1602{
1603 vm_map_offset_t map_addr;
1604 vm_map_size_t map_size;
1605 kern_return_t kr;
1c79356b 1606
91447636
A
1607 if (VM_MAP_NULL == map)
1608 return KERN_INVALID_ARGUMENT;
1c79356b 1609
91447636
A
1610 map_addr = (vm_map_offset_t)*address;
1611 map_size = (vm_map_size_t)*size;
1c79356b 1612
91447636
A
1613 /* legacy conversion */
1614 if (VM_REGION_BASIC_INFO == flavor)
1615 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1616
91447636
A
1617 kr = vm_map_region(map,
1618 &map_addr, &map_size,
1619 flavor, info, count,
1620 object_name);
1c79356b 1621
91447636
A
1622 *address = map_addr;
1623 *size = map_size;
1624 return kr;
1625}
1c79356b 1626
91447636
A
1627/*
1628 * vm_region_64 and vm_region:
1629 *
1630 * User call to obtain information about a region in
1631 * a task's address map. Currently, only one flavor is
1632 * supported.
1633 *
1634 * XXX The reserved and behavior fields cannot be filled
1635 * in until the vm merge from the IK is completed, and
1636 * vm_reserve is implemented.
1637 *
1638 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1639 */
1c79356b 1640
91447636
A
1641kern_return_t
1642vm_region_64(
1643 vm_map_t map,
1644 vm_offset_t *address, /* IN/OUT */
1645 vm_size_t *size, /* OUT */
1646 vm_region_flavor_t flavor, /* IN */
1647 vm_region_info_t info, /* OUT */
1648 mach_msg_type_number_t *count, /* IN/OUT */
1649 mach_port_t *object_name) /* OUT */
1650{
1651 vm_map_offset_t map_addr;
1652 vm_map_size_t map_size;
1653 kern_return_t kr;
1c79356b 1654
91447636
A
1655 if (VM_MAP_NULL == map)
1656 return KERN_INVALID_ARGUMENT;
1c79356b 1657
91447636
A
1658 map_addr = (vm_map_offset_t)*address;
1659 map_size = (vm_map_size_t)*size;
1c79356b 1660
91447636
A
1661 /* legacy conversion */
1662 if (VM_REGION_BASIC_INFO == flavor)
1663 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1664
91447636
A
1665 kr = vm_map_region(map,
1666 &map_addr, &map_size,
1667 flavor, info, count,
1668 object_name);
1c79356b 1669
91447636
A
1670 *address = CAST_DOWN(vm_offset_t, map_addr);
1671 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1672
91447636
A
1673 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1674 return KERN_INVALID_ADDRESS;
1675 return kr;
1676}
1c79356b 1677
91447636
A
1678kern_return_t
1679vm_region(
1680 vm_map_t map,
1681 vm_address_t *address, /* IN/OUT */
1682 vm_size_t *size, /* OUT */
1683 vm_region_flavor_t flavor, /* IN */
1684 vm_region_info_t info, /* OUT */
1685 mach_msg_type_number_t *count, /* IN/OUT */
1686 mach_port_t *object_name) /* OUT */
1687{
1688 vm_map_address_t map_addr;
1689 vm_map_size_t map_size;
1690 kern_return_t kr;
1c79356b 1691
91447636
A
1692 if (VM_MAP_NULL == map)
1693 return KERN_INVALID_ARGUMENT;
1c79356b 1694
91447636
A
1695 map_addr = (vm_map_address_t)*address;
1696 map_size = (vm_map_size_t)*size;
1c79356b 1697
91447636
A
1698 kr = vm_map_region(map,
1699 &map_addr, &map_size,
1700 flavor, info, count,
1701 object_name);
1c79356b 1702
91447636
A
1703 *address = CAST_DOWN(vm_address_t, map_addr);
1704 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1705
91447636
A
1706 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1707 return KERN_INVALID_ADDRESS;
1708 return kr;
1709}
1c79356b
A
1710
1711/*
91447636
A
1712 * vm_region_recurse: A form of vm_region which follows the
1713 * submaps in a target map
1c79356b 1714 *
1c79356b
A
1715 */
1716kern_return_t
91447636
A
1717mach_vm_region_recurse(
1718 vm_map_t map,
1719 mach_vm_address_t *address,
1720 mach_vm_size_t *size,
1721 uint32_t *depth,
1722 vm_region_recurse_info_t info,
1723 mach_msg_type_number_t *infoCnt)
1c79356b 1724{
91447636
A
1725 vm_map_address_t map_addr;
1726 vm_map_size_t map_size;
1727 kern_return_t kr;
1c79356b 1728
91447636
A
1729 if (VM_MAP_NULL == map)
1730 return KERN_INVALID_ARGUMENT;
1c79356b 1731
91447636
A
1732 map_addr = (vm_map_address_t)*address;
1733 map_size = (vm_map_size_t)*size;
1734
1735 kr = vm_map_region_recurse_64(
1736 map,
1737 &map_addr,
1738 &map_size,
1739 depth,
1740 (vm_region_submap_info_64_t)info,
1741 infoCnt);
1742
1743 *address = map_addr;
1744 *size = map_size;
1745 return kr;
1c79356b
A
1746}
1747
1748/*
91447636
A
1749 * vm_region_recurse: A form of vm_region which follows the
1750 * submaps in a target map
1751 *
1c79356b 1752 */
91447636
A
1753kern_return_t
1754vm_region_recurse_64(
1755 vm_map_t map,
1756 vm_address_t *address,
1757 vm_size_t *size,
1758 uint32_t *depth,
1759 vm_region_recurse_info_64_t info,
1760 mach_msg_type_number_t *infoCnt)
1c79356b 1761{
91447636
A
1762 vm_map_address_t map_addr;
1763 vm_map_size_t map_size;
1764 kern_return_t kr;
1765
1766 if (VM_MAP_NULL == map)
1767 return KERN_INVALID_ARGUMENT;
1768
1769 map_addr = (vm_map_address_t)*address;
1770 map_size = (vm_map_size_t)*size;
1771
1772 kr = vm_map_region_recurse_64(
1773 map,
1774 &map_addr,
1775 &map_size,
1776 depth,
1777 (vm_region_submap_info_64_t)info,
1778 infoCnt);
1c79356b 1779
91447636
A
1780 *address = CAST_DOWN(vm_address_t, map_addr);
1781 *size = CAST_DOWN(vm_size_t, map_size);
1782
1783 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1784 return KERN_INVALID_ADDRESS;
1785 return kr;
1c79356b
A
1786}
1787
91447636
A
1788kern_return_t
1789vm_region_recurse(
1790 vm_map_t map,
1791 vm_offset_t *address, /* IN/OUT */
1792 vm_size_t *size, /* OUT */
1793 natural_t *depth, /* IN/OUT */
1794 vm_region_recurse_info_t info32, /* IN/OUT */
1795 mach_msg_type_number_t *infoCnt) /* IN/OUT */
1796{
1797 vm_region_submap_info_data_64_t info64;
1798 vm_region_submap_info_t info;
1799 vm_map_address_t map_addr;
1800 vm_map_size_t map_size;
1801 kern_return_t kr;
1802
1803 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
1804 return KERN_INVALID_ARGUMENT;
1805
1806
1807 map_addr = (vm_map_address_t)*address;
1808 map_size = (vm_map_size_t)*size;
1809 info = (vm_region_submap_info_t)info32;
1810 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1811
1812 kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
1813 depth, &info64, infoCnt);
1814
1815 info->protection = info64.protection;
1816 info->max_protection = info64.max_protection;
1817 info->inheritance = info64.inheritance;
1818 info->offset = (uint32_t)info64.offset; /* trouble-maker */
1819 info->user_tag = info64.user_tag;
1820 info->pages_resident = info64.pages_resident;
1821 info->pages_shared_now_private = info64.pages_shared_now_private;
1822 info->pages_swapped_out = info64.pages_swapped_out;
1823 info->pages_dirtied = info64.pages_dirtied;
1824 info->ref_count = info64.ref_count;
1825 info->shadow_depth = info64.shadow_depth;
1826 info->external_pager = info64.external_pager;
1827 info->share_mode = info64.share_mode;
1828 info->is_submap = info64.is_submap;
1829 info->behavior = info64.behavior;
1830 info->object_id = info64.object_id;
1831 info->user_wired_count = info64.user_wired_count;
1832
1833 *address = CAST_DOWN(vm_address_t, map_addr);
1834 *size = CAST_DOWN(vm_size_t, map_size);
1835 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1836
1837 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1838 return KERN_INVALID_ADDRESS;
1839 return kr;
1840}
1841
2d21ac55
A
1842kern_return_t
1843mach_vm_purgable_control(
1844 vm_map_t map,
1845 mach_vm_offset_t address,
1846 vm_purgable_t control,
1847 int *state)
1848{
1849 if (VM_MAP_NULL == map)
1850 return KERN_INVALID_ARGUMENT;
1851
5ba3f43e
A
1852 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1853 /* not allowed from user-space */
1854 return KERN_INVALID_ARGUMENT;
1855 }
1856
2d21ac55 1857 return vm_map_purgable_control(map,
39236c6e 1858 vm_map_trunc_page(address, PAGE_MASK),
2d21ac55
A
1859 control,
1860 state);
1861}
1862
91447636
A
1863kern_return_t
1864vm_purgable_control(
1865 vm_map_t map,
1866 vm_offset_t address,
1867 vm_purgable_t control,
1868 int *state)
1869{
1870 if (VM_MAP_NULL == map)
1871 return KERN_INVALID_ARGUMENT;
1872
5ba3f43e
A
1873 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1874 /* not allowed from user-space */
1875 return KERN_INVALID_ARGUMENT;
1876 }
1877
91447636 1878 return vm_map_purgable_control(map,
39236c6e 1879 vm_map_trunc_page(address, PAGE_MASK),
91447636
A
1880 control,
1881 state);
1882}
1883
1c79356b
A
1884
1885/*
1886 * Ordinarily, the right to allocate CPM is restricted
1887 * to privileged applications (those that can gain access
91447636
A
1888 * to the host priv port). Set this variable to zero if
1889 * you want to let any application allocate CPM.
1c79356b
A
1890 */
1891unsigned int vm_allocate_cpm_privileged = 0;
1892
1893/*
1894 * Allocate memory in the specified map, with the caveat that
1895 * the memory is physically contiguous. This call may fail
1896 * if the system can't find sufficient contiguous memory.
1897 * This call may cause or lead to heart-stopping amounts of
1898 * paging activity.
1899 *
1900 * Memory obtained from this call should be freed in the
1901 * normal way, viz., via vm_deallocate.
1902 */
1903kern_return_t
1904vm_allocate_cpm(
1905 host_priv_t host_priv,
91447636
A
1906 vm_map_t map,
1907 vm_address_t *addr,
1908 vm_size_t size,
1c79356b
A
1909 int flags)
1910{
91447636
A
1911 vm_map_address_t map_addr;
1912 vm_map_size_t map_size;
1c79356b 1913 kern_return_t kr;
1c79356b 1914
91447636 1915 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
1c79356b
A
1916 return KERN_INVALID_HOST;
1917
91447636 1918 if (VM_MAP_NULL == map)
1c79356b 1919 return KERN_INVALID_ARGUMENT;
1c79356b 1920
91447636
A
1921 map_addr = (vm_map_address_t)*addr;
1922 map_size = (vm_map_size_t)size;
1c79356b 1923
91447636
A
1924 kr = vm_map_enter_cpm(map,
1925 &map_addr,
1926 map_size,
1927 flags);
1c79356b 1928
91447636 1929 *addr = CAST_DOWN(vm_address_t, map_addr);
1c79356b
A
1930 return kr;
1931}
1932
1933
91447636
A
1934kern_return_t
1935mach_vm_page_query(
1936 vm_map_t map,
1937 mach_vm_offset_t offset,
1938 int *disposition,
1939 int *ref_count)
1940{
1941 if (VM_MAP_NULL == map)
1942 return KERN_INVALID_ARGUMENT;
1c79356b 1943
39236c6e
A
1944 return vm_map_page_query_internal(
1945 map,
1946 vm_map_trunc_page(offset, PAGE_MASK),
1947 disposition, ref_count);
91447636 1948}
1c79356b
A
1949
1950kern_return_t
91447636
A
1951vm_map_page_query(
1952 vm_map_t map,
1953 vm_offset_t offset,
1954 int *disposition,
1955 int *ref_count)
1c79356b 1956{
91447636
A
1957 if (VM_MAP_NULL == map)
1958 return KERN_INVALID_ARGUMENT;
1959
39236c6e
A
1960 return vm_map_page_query_internal(
1961 map,
1962 vm_map_trunc_page(offset, PAGE_MASK),
1963 disposition, ref_count);
b0d623f7
A
1964}
1965
5ba3f43e
A
1966kern_return_t
1967mach_vm_page_range_query(
1968 vm_map_t map,
1969 mach_vm_offset_t address,
1970 mach_vm_size_t size,
1971 mach_vm_address_t dispositions_addr,
1972 mach_vm_size_t *dispositions_count)
1973{
1974 kern_return_t kr = KERN_SUCCESS;
1975 int num_pages = 0, i = 0;
1976 mach_vm_size_t curr_sz = 0, copy_sz = 0;
1977 mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0;
1978 mach_msg_type_number_t count = 0;
1979
1980 void *info = NULL;
1981 void *local_disp = NULL;;
1982 vm_map_size_t info_size = 0, local_disp_size = 0;
1983 mach_vm_offset_t start = 0, end = 0;
1984
1985 if (map == VM_MAP_NULL || dispositions_count == NULL) {
1986 return KERN_INVALID_ARGUMENT;
1987 }
1988
1989 disp_buf_req_size = ( *dispositions_count * sizeof(int));
1990 start = mach_vm_trunc_page(address);
1991 end = mach_vm_round_page(address + size);
1992
1993 if (end < start) {
1994 return KERN_INVALID_ARGUMENT;
1995 }
1996
1997 if (disp_buf_req_size == 0 || (end == start)) {
1998 return KERN_SUCCESS;
1999 }
2000
2001 /*
2002 * For large requests, we will go through them
2003 * MAX_PAGE_RANGE_QUERY chunk at a time.
2004 */
2005
2006 curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY);
2007 num_pages = (int) (curr_sz >> PAGE_SHIFT);
2008
2009 info_size = num_pages * sizeof(vm_page_info_basic_data_t);
2010 info = kalloc(info_size);
2011
2012 if (info == NULL) {
2013 return KERN_RESOURCE_SHORTAGE;
2014 }
2015
2016 local_disp_size = num_pages * sizeof(int);
2017 local_disp = kalloc(local_disp_size);
2018
2019 if (local_disp == NULL) {
2020
2021 kfree(info, info_size);
2022 info = NULL;
2023 return KERN_RESOURCE_SHORTAGE;
2024 }
2025
2026 while (size) {
2027
2028 count = VM_PAGE_INFO_BASIC_COUNT;
2029 kr = vm_map_page_range_info_internal(
2030 map,
2031 start,
2032 mach_vm_round_page(start + curr_sz),
2033 VM_PAGE_INFO_BASIC,
2034 (vm_page_info_t) info,
2035 &count);
2036
2037 assert(kr == KERN_SUCCESS);
2038
2039 for (i = 0; i < num_pages; i++) {
2040
2041 ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
2042 }
2043
2044 copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int)/* an int per page */);
2045 kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
2046
2047 start += curr_sz;
2048 disp_buf_req_size -= copy_sz;
2049 disp_buf_total_size += copy_sz;
2050
2051 if (kr != 0) {
2052 break;
2053 }
2054
2055 if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
2056
2057 /*
2058 * We might have inspected the full range OR
2059 * more than it esp. if the user passed in
2060 * non-page aligned start/size and/or if we
2061 * descended into a submap. We are done here.
2062 */
2063
2064 size = 0;
2065
2066 } else {
2067
2068 dispositions_addr += copy_sz;
2069
2070 size -= curr_sz;
2071
2072 curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY);
2073 num_pages = (int)(curr_sz >> PAGE_SHIFT);
2074 }
2075 }
2076
2077 *dispositions_count = disp_buf_total_size / sizeof(int);
2078
2079 kfree(local_disp, local_disp_size);
2080 local_disp = NULL;
2081
2082 kfree(info, info_size);
2083 info = NULL;
2084
2085 return kr;
2086}
2087
b0d623f7
A
2088kern_return_t
2089mach_vm_page_info(
2090 vm_map_t map,
2091 mach_vm_address_t address,
2092 vm_page_info_flavor_t flavor,
2093 vm_page_info_t info,
2094 mach_msg_type_number_t *count)
2095{
2096 kern_return_t kr;
2097
2098 if (map == VM_MAP_NULL) {
2099 return KERN_INVALID_ARGUMENT;
2100 }
2101
2102 kr = vm_map_page_info(map, address, flavor, info, count);
2103 return kr;
1c79356b
A
2104}
2105
91447636 2106/* map a (whole) upl into an address space */
1c79356b 2107kern_return_t
91447636
A
2108vm_upl_map(
2109 vm_map_t map,
2110 upl_t upl,
b0d623f7 2111 vm_address_t *dst_addr)
1c79356b 2112{
91447636 2113 vm_map_offset_t map_addr;
1c79356b
A
2114 kern_return_t kr;
2115
91447636
A
2116 if (VM_MAP_NULL == map)
2117 return KERN_INVALID_ARGUMENT;
1c79356b 2118
91447636 2119 kr = vm_map_enter_upl(map, upl, &map_addr);
b0d623f7 2120 *dst_addr = CAST_DOWN(vm_address_t, map_addr);
91447636
A
2121 return kr;
2122}
1c79356b 2123
91447636
A
2124kern_return_t
2125vm_upl_unmap(
2126 vm_map_t map,
2127 upl_t upl)
2128{
2129 if (VM_MAP_NULL == map)
2130 return KERN_INVALID_ARGUMENT;
1c79356b 2131
91447636
A
2132 return (vm_map_remove_upl(map, upl));
2133}
1c79356b 2134
91447636
A
2135/* Retrieve a upl for an object underlying an address range in a map */
2136
2137kern_return_t
2138vm_map_get_upl(
2139 vm_map_t map,
cc9f6e38 2140 vm_map_offset_t map_offset,
91447636
A
2141 upl_size_t *upl_size,
2142 upl_t *upl,
2143 upl_page_info_array_t page_list,
2144 unsigned int *count,
3e170ce0 2145 upl_control_flags_t *flags,
5ba3f43e 2146 vm_tag_t tag,
91447636
A
2147 int force_data_sync)
2148{
3e170ce0
A
2149 upl_control_flags_t map_flags;
2150 kern_return_t kr;
1c79356b 2151
91447636
A
2152 if (VM_MAP_NULL == map)
2153 return KERN_INVALID_ARGUMENT;
1c79356b 2154
91447636
A
2155 map_flags = *flags & ~UPL_NOZEROFILL;
2156 if (force_data_sync)
2157 map_flags |= UPL_FORCE_DATA_SYNC;
1c79356b 2158
91447636
A
2159 kr = vm_map_create_upl(map,
2160 map_offset,
2161 upl_size,
2162 upl,
2163 page_list,
2164 count,
5ba3f43e
A
2165 &map_flags,
2166 tag);
1c79356b 2167
91447636
A
2168 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
2169 return kr;
1c79356b
A
2170}
2171
5ba3f43e
A
2172#if CONFIG_EMBEDDED
2173extern int proc_selfpid(void);
2174extern char *proc_name_address(void *p);
2175int cs_executable_mem_entry = 0;
2176int log_executable_mem_entry = 0;
2177#endif /* CONFIG_EMBEDDED */
39037602 2178
1c79356b 2179/*
91447636
A
2180 * mach_make_memory_entry_64
2181 *
2182 * Think of it as a two-stage vm_remap() operation. First
2183 * you get a handle. Second, you get map that handle in
2184 * somewhere else. Rather than doing it all at once (and
2185 * without needing access to the other whole map).
1c79356b 2186 */
1c79356b
A
2187kern_return_t
2188mach_make_memory_entry_64(
2189 vm_map_t target_map,
91447636
A
2190 memory_object_size_t *size,
2191 memory_object_offset_t offset,
1c79356b
A
2192 vm_prot_t permission,
2193 ipc_port_t *object_handle,
91447636 2194 ipc_port_t parent_handle)
9d749ea3
A
2195{
2196 if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
2197 /*
2198 * Unknown flag: reject for forward compatibility.
2199 */
2200 return KERN_INVALID_VALUE;
2201 }
2202
2203 return mach_make_memory_entry_internal(target_map,
2204 size,
2205 offset,
2206 permission,
2207 object_handle,
2208 parent_handle);
2209}
2210
2211extern int pacified_purgeable_iokit;
2212
2213kern_return_t
2214mach_make_memory_entry_internal(
2215 vm_map_t target_map,
2216 memory_object_size_t *size,
2217 memory_object_offset_t offset,
2218 vm_prot_t permission,
2219 ipc_port_t *object_handle,
2220 ipc_port_t parent_handle)
1c79356b
A
2221{
2222 vm_map_version_t version;
91447636
A
2223 vm_named_entry_t parent_entry;
2224 vm_named_entry_t user_entry;
1c79356b 2225 ipc_port_t user_handle;
1c79356b 2226 kern_return_t kr;
91447636 2227 vm_map_t real_map;
1c79356b
A
2228
2229 /* needed for call to vm_map_lookup_locked */
91447636 2230 boolean_t wired;
3e170ce0 2231 boolean_t iskernel;
1c79356b 2232 vm_object_offset_t obj_off;
91447636 2233 vm_prot_t prot;
2d21ac55 2234 struct vm_object_fault_info fault_info;
91447636
A
2235 vm_object_t object;
2236 vm_object_t shadow_object;
1c79356b
A
2237
2238 /* needed for direct map entry manipulation */
2239 vm_map_entry_t map_entry;
9bccf70c 2240 vm_map_entry_t next_entry;
91447636
A
2241 vm_map_t local_map;
2242 vm_map_t original_map = target_map;
3e170ce0
A
2243 vm_map_size_t total_size, map_size;
2244 vm_map_offset_t map_start, map_end;
91447636 2245 vm_map_offset_t local_offset;
1c79356b 2246 vm_object_size_t mappable_size;
9bccf70c 2247
39236c6e
A
2248 /*
2249 * Stash the offset in the page for use by vm_map_enter_mem_object()
2250 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
2251 */
2252 vm_object_offset_t offset_in_page;
2253
91447636
A
2254 unsigned int access;
2255 vm_prot_t protections;
6d2010ae 2256 vm_prot_t original_protections, mask_protections;
91447636 2257 unsigned int wimg_mode;
91447636 2258
e2d2fc5c 2259 boolean_t force_shadow = FALSE;
39236c6e 2260 boolean_t use_data_addr;
3e170ce0 2261 boolean_t use_4K_compat;
e2d2fc5c 2262
9d749ea3 2263 if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
91447636
A
2264 /*
2265 * Unknown flag: reject for forward compatibility.
2266 */
2267 return KERN_INVALID_VALUE;
2268 }
2269
2270 if (parent_handle != IP_NULL &&
2271 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
2272 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
2273 } else {
2274 parent_entry = NULL;
2275 }
55e303ae 2276
39236c6e
A
2277 if (parent_entry && parent_entry->is_copy) {
2278 return KERN_INVALID_ARGUMENT;
2279 }
2280
6d2010ae
A
2281 original_protections = permission & VM_PROT_ALL;
2282 protections = original_protections;
2283 mask_protections = permission & VM_PROT_IS_MASK;
55e303ae 2284 access = GET_MAP_MEM(permission);
39236c6e 2285 use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
3e170ce0 2286 use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0);
55e303ae 2287
91447636
A
2288 user_handle = IP_NULL;
2289 user_entry = NULL;
2290
3e170ce0 2291 map_start = vm_map_trunc_page(offset, PAGE_MASK);
1c79356b 2292
91447636
A
2293 if (permission & MAP_MEM_ONLY) {
2294 boolean_t parent_is_object;
55e303ae 2295
3e170ce0
A
2296 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2297 map_size = map_end - map_start;
39236c6e 2298
3e170ce0 2299 if (use_data_addr || use_4K_compat || parent_entry == NULL) {
55e303ae
A
2300 return KERN_INVALID_ARGUMENT;
2301 }
91447636 2302
5ba3f43e 2303 parent_is_object = !parent_entry->is_sub_map;
91447636
A
2304 object = parent_entry->backing.object;
2305 if(parent_is_object && object != VM_OBJECT_NULL)
55e303ae 2306 wimg_mode = object->wimg_bits;
91447636 2307 else
6d2010ae 2308 wimg_mode = VM_WIMG_USE_DEFAULT;
91447636
A
2309 if((access != GET_MAP_MEM(parent_entry->protection)) &&
2310 !(parent_entry->protection & VM_PROT_WRITE)) {
55e303ae
A
2311 return KERN_INVALID_RIGHT;
2312 }
5ba3f43e
A
2313 vm_prot_to_wimg(access, &wimg_mode);
2314 if (access != MAP_MEM_NOOP)
2315 SET_MAP_MEM(access, parent_entry->protection);
6d2010ae 2316 if (parent_is_object && object &&
55e303ae
A
2317 (access != MAP_MEM_NOOP) &&
2318 (!(object->nophyscache))) {
6d2010ae
A
2319
2320 if (object->wimg_bits != wimg_mode) {
2321 vm_object_lock(object);
2322 vm_object_change_wimg_mode(object, wimg_mode);
2323 vm_object_unlock(object);
55e303ae
A
2324 }
2325 }
91447636
A
2326 if (object_handle)
2327 *object_handle = IP_NULL;
55e303ae 2328 return KERN_SUCCESS;
39236c6e 2329 } else if (permission & MAP_MEM_NAMED_CREATE) {
3e170ce0
A
2330 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2331 map_size = map_end - map_start;
39236c6e 2332
3e170ce0 2333 if (use_data_addr || use_4K_compat) {
39236c6e
A
2334 return KERN_INVALID_ARGUMENT;
2335 }
55e303ae 2336
91447636
A
2337 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2338 if (kr != KERN_SUCCESS) {
2339 return KERN_FAILURE;
2340 }
55e303ae 2341
91447636
A
2342 /*
2343 * Force the creation of the VM object now.
2344 */
b0d623f7 2345 if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
91447636 2346 /*
b0d623f7 2347 * LP64todo - for now, we can only allocate 4GB-4096
91447636
A
2348 * internal objects because the default pager can't
2349 * page bigger ones. Remove this when it can.
2350 */
2351 kr = KERN_FAILURE;
2352 goto make_mem_done;
2353 }
1c79356b 2354
91447636
A
2355 object = vm_object_allocate(map_size);
2356 assert(object != VM_OBJECT_NULL);
1c79356b 2357
91447636
A
2358 if (permission & MAP_MEM_PURGABLE) {
2359 if (! (permission & VM_PROT_WRITE)) {
2360 /* if we can't write, we can't purge */
2361 vm_object_deallocate(object);
2362 kr = KERN_INVALID_ARGUMENT;
2363 goto make_mem_done;
2364 }
2d21ac55 2365 object->purgable = VM_PURGABLE_NONVOLATILE;
5ba3f43e
A
2366 if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
2367 object->purgeable_only_by_kernel = TRUE;
2368 }
fe8ab488
A
2369 assert(object->vo_purgeable_owner == NULL);
2370 assert(object->resident_page_count == 0);
2371 assert(object->wired_page_count == 0);
2372 vm_object_lock(object);
9d749ea3
A
2373 if (pacified_purgeable_iokit) {
2374 if (permission & MAP_MEM_LEDGER_TAG_NETWORK) {
2375 vm_purgeable_nonvolatile_enqueue(object,
2376 kernel_task);
2377 } else {
2378 vm_purgeable_nonvolatile_enqueue(object,
2379 current_task());
2380 }
5ba3f43e 2381 } else {
9d749ea3
A
2382 if (object->purgeable_only_by_kernel) {
2383 vm_purgeable_nonvolatile_enqueue(object,
2384 kernel_task);
2385 } else {
2386 vm_purgeable_nonvolatile_enqueue(object,
2387 current_task());
2388 }
5ba3f43e 2389 }
fe8ab488 2390 vm_object_unlock(object);
91447636 2391 }
1c79356b 2392
39037602
A
2393#if CONFIG_SECLUDED_MEMORY
2394 if (secluded_for_iokit && /* global boot-arg */
2395 ((permission & MAP_MEM_GRAB_SECLUDED)
2396#if 11
2397 /* XXX FBDP for my testing only */
2398 || (secluded_for_fbdp && map_size == 97550336)
2399#endif
2400 )) {
2401#if 11
2402 if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
2403 secluded_for_fbdp) {
2404 printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size);
2405 }
2406#endif
2407 object->can_grab_secluded = TRUE;
2408 assert(!object->eligible_for_secluded);
2409 }
2410#endif /* CONFIG_SECLUDED_MEMORY */
2411
91447636
A
2412 /*
2413 * The VM object is brand new and nobody else knows about it,
2414 * so we don't need to lock it.
2415 */
1c79356b 2416
91447636 2417 wimg_mode = object->wimg_bits;
5ba3f43e
A
2418 vm_prot_to_wimg(access, &wimg_mode);
2419 if (access != MAP_MEM_NOOP) {
2420 object->wimg_bits = wimg_mode;
2421 }
2422
91447636 2423 /* the object has no pages, so no WIMG bits to update here */
1c79356b 2424
91447636
A
2425 /*
2426 * XXX
2427 * We use this path when we want to make sure that
2428 * nobody messes with the object (coalesce, for
2429 * example) before we map it.
2430 * We might want to use these objects for transposition via
2431 * vm_object_transpose() too, so we don't want any copy or
2432 * shadow objects either...
2433 */
2434 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
fe8ab488 2435 object->true_share = TRUE;
1c79356b 2436
91447636
A
2437 user_entry->backing.object = object;
2438 user_entry->internal = TRUE;
2439 user_entry->is_sub_map = FALSE;
91447636 2440 user_entry->offset = 0;
39236c6e 2441 user_entry->data_offset = 0;
91447636
A
2442 user_entry->protection = protections;
2443 SET_MAP_MEM(access, user_entry->protection);
2444 user_entry->size = map_size;
55e303ae
A
2445
2446 /* user_object pager and internal fields are not used */
2447 /* when the object field is filled in. */
2448
3e170ce0
A
2449 *size = CAST_DOWN(vm_size_t, (user_entry->size -
2450 user_entry->data_offset));
55e303ae
A
2451 *object_handle = user_handle;
2452 return KERN_SUCCESS;
2453 }
2454
39236c6e
A
2455 if (permission & MAP_MEM_VM_COPY) {
2456 vm_map_copy_t copy;
2457
2458 if (target_map == VM_MAP_NULL) {
2459 return KERN_INVALID_TASK;
2460 }
2461
3e170ce0
A
2462 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2463 map_size = map_end - map_start;
2464 if (use_data_addr || use_4K_compat) {
2465 offset_in_page = offset - map_start;
2466 if (use_4K_compat)
2467 offset_in_page &= ~((signed)(0xFFF));
39236c6e 2468 } else {
39236c6e
A
2469 offset_in_page = 0;
2470 }
2471
4bd07ac2
A
2472 kr = vm_map_copyin_internal(target_map,
2473 map_start,
2474 map_size,
2475 VM_MAP_COPYIN_ENTRY_LIST,
2476 &copy);
39236c6e
A
2477 if (kr != KERN_SUCCESS) {
2478 return kr;
2479 }
2480
2481 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2482 if (kr != KERN_SUCCESS) {
2483 vm_map_copy_discard(copy);
2484 return KERN_FAILURE;
2485 }
2486
2487 user_entry->backing.copy = copy;
2488 user_entry->internal = FALSE;
2489 user_entry->is_sub_map = FALSE;
39236c6e
A
2490 user_entry->is_copy = TRUE;
2491 user_entry->offset = 0;
2492 user_entry->protection = protections;
2493 user_entry->size = map_size;
2494 user_entry->data_offset = offset_in_page;
2495
3e170ce0
A
2496 *size = CAST_DOWN(vm_size_t, (user_entry->size -
2497 user_entry->data_offset));
39236c6e
A
2498 *object_handle = user_handle;
2499 return KERN_SUCCESS;
2500 }
2501
2502 if (permission & MAP_MEM_VM_SHARE) {
2503 vm_map_copy_t copy;
2504 vm_prot_t cur_prot, max_prot;
2505
2506 if (target_map == VM_MAP_NULL) {
2507 return KERN_INVALID_TASK;
2508 }
2509
3e170ce0
A
2510 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2511 map_size = map_end - map_start;
2512 if (use_data_addr || use_4K_compat) {
2513 offset_in_page = offset - map_start;
2514 if (use_4K_compat)
2515 offset_in_page &= ~((signed)(0xFFF));
39236c6e 2516 } else {
39236c6e
A
2517 offset_in_page = 0;
2518 }
2519
39037602 2520 cur_prot = VM_PROT_ALL;
39236c6e 2521 kr = vm_map_copy_extract(target_map,
3e170ce0 2522 map_start,
39236c6e
A
2523 map_size,
2524 &copy,
2525 &cur_prot,
2526 &max_prot);
2527 if (kr != KERN_SUCCESS) {
2528 return kr;
2529 }
2530
2531 if (mask_protections) {
2532 /*
2533 * We just want as much of "original_protections"
2534 * as we can get out of the actual "cur_prot".
2535 */
2536 protections &= cur_prot;
2537 if (protections == VM_PROT_NONE) {
2538 /* no access at all: fail */
2539 vm_map_copy_discard(copy);
2540 return KERN_PROTECTION_FAILURE;
2541 }
2542 } else {
2543 /*
2544 * We want exactly "original_protections"
2545 * out of "cur_prot".
2546 */
2547 if ((cur_prot & protections) != protections) {
2548 vm_map_copy_discard(copy);
2549 return KERN_PROTECTION_FAILURE;
2550 }
2551 }
2552
2553 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2554 if (kr != KERN_SUCCESS) {
2555 vm_map_copy_discard(copy);
2556 return KERN_FAILURE;
2557 }
2558
2559 user_entry->backing.copy = copy;
2560 user_entry->internal = FALSE;
2561 user_entry->is_sub_map = FALSE;
39236c6e
A
2562 user_entry->is_copy = TRUE;
2563 user_entry->offset = 0;
2564 user_entry->protection = protections;
2565 user_entry->size = map_size;
2566 user_entry->data_offset = offset_in_page;
2567
3e170ce0
A
2568 *size = CAST_DOWN(vm_size_t, (user_entry->size -
2569 user_entry->data_offset));
39236c6e
A
2570 *object_handle = user_handle;
2571 return KERN_SUCCESS;
2572 }
2573
91447636
A
2574 if (parent_entry == NULL ||
2575 (permission & MAP_MEM_NAMED_REUSE)) {
2576
3e170ce0
A
2577 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
2578 map_size = map_end - map_start;
2579 if (use_data_addr || use_4K_compat) {
2580 offset_in_page = offset - map_start;
2581 if (use_4K_compat)
2582 offset_in_page &= ~((signed)(0xFFF));
39236c6e 2583 } else {
39236c6e
A
2584 offset_in_page = 0;
2585 }
2586
91447636
A
2587 /* Create a named object based on address range within the task map */
2588 /* Go find the object at given address */
1c79356b 2589
2d21ac55
A
2590 if (target_map == VM_MAP_NULL) {
2591 return KERN_INVALID_TASK;
2592 }
2593
91447636 2594redo_lookup:
6d2010ae 2595 protections = original_protections;
1c79356b
A
2596 vm_map_lock_read(target_map);
2597
2598 /* get the object associated with the target address */
2599 /* note we check the permission of the range against */
2600 /* that requested by the caller */
2601
3e170ce0 2602 kr = vm_map_lookup_locked(&target_map, map_start,
6d2010ae
A
2603 protections | mask_protections,
2604 OBJECT_LOCK_EXCLUSIVE, &version,
2605 &object, &obj_off, &prot, &wired,
2606 &fault_info,
2607 &real_map);
1c79356b
A
2608 if (kr != KERN_SUCCESS) {
2609 vm_map_unlock_read(target_map);
2610 goto make_mem_done;
2611 }
6d2010ae
A
2612 if (mask_protections) {
2613 /*
2614 * The caller asked us to use the "protections" as
2615 * a mask, so restrict "protections" to what this
2616 * mapping actually allows.
2617 */
2618 protections &= prot;
2619 }
5ba3f43e
A
2620#if CONFIG_EMBEDDED
2621 /*
2622 * Wiring would copy the pages to a shadow object.
2623 * The shadow object would not be code-signed so
2624 * attempting to execute code from these copied pages
2625 * would trigger a code-signing violation.
2626 */
2627 if (prot & VM_PROT_EXECUTE) {
2628 if (log_executable_mem_entry) {
2629 void *bsd_info;
2630 bsd_info = current_task()->bsd_info;
2631 printf("pid %d[%s] making memory entry out of "
2632 "executable range from 0x%llx to 0x%llx:"
2633 "might cause code-signing issues "
2634 "later\n",
2635 proc_selfpid(),
2636 (bsd_info != NULL
2637 ? proc_name_address(bsd_info)
2638 : "?"),
2639 (uint64_t) map_start,
2640 (uint64_t) map_end);
2641 }
2642 DTRACE_VM2(cs_executable_mem_entry,
2643 uint64_t, (uint64_t)map_start,
2644 uint64_t, (uint64_t)map_end);
2645 cs_executable_mem_entry++;
2646
2647#if 11
2648 /*
2649 * We don't know how the memory entry will be used.
2650 * It might never get wired and might not cause any
2651 * trouble, so let's not reject this request...
2652 */
2653#else /* 11 */
2654 kr = KERN_PROTECTION_FAILURE;
2655 vm_object_unlock(object);
2656 vm_map_unlock_read(target_map);
2657 if(real_map != target_map)
2658 vm_map_unlock_read(real_map);
2659 goto make_mem_done;
2660#endif /* 11 */
2661
2662 }
2663#endif /* CONFIG_EMBEDDED */
39037602 2664
55e303ae 2665 if (((prot & protections) != protections)
39037602 2666 || (object == kernel_object)) {
1c79356b
A
2667 kr = KERN_INVALID_RIGHT;
2668 vm_object_unlock(object);
2669 vm_map_unlock_read(target_map);
91447636
A
2670 if(real_map != target_map)
2671 vm_map_unlock_read(real_map);
9bccf70c
A
2672 if(object == kernel_object) {
2673 printf("Warning: Attempt to create a named"
2674 " entry from the kernel_object\n");
2675 }
1c79356b
A
2676 goto make_mem_done;
2677 }
2678
2679 /* We have an object, now check to see if this object */
2680 /* is suitable. If not, create a shadow and share that */
91447636
A
2681
2682 /*
2683 * We have to unlock the VM object to avoid deadlocking with
2684 * a VM map lock (the lock ordering is map, the object), if we
2685 * need to modify the VM map to create a shadow object. Since
2686 * we might release the VM map lock below anyway, we have
2687 * to release the VM map lock now.
2688 * XXX FBDP There must be a way to avoid this double lookup...
2689 *
2690 * Take an extra reference on the VM object to make sure it's
2691 * not going to disappear.
2692 */
2693 vm_object_reference_locked(object); /* extra ref to hold obj */
2694 vm_object_unlock(object);
2695
9bccf70c 2696 local_map = original_map;
3e170ce0 2697 local_offset = map_start;
9bccf70c
A
2698 if(target_map != local_map) {
2699 vm_map_unlock_read(target_map);
91447636
A
2700 if(real_map != target_map)
2701 vm_map_unlock_read(real_map);
9bccf70c
A
2702 vm_map_lock_read(local_map);
2703 target_map = local_map;
91447636 2704 real_map = local_map;
9bccf70c 2705 }
1c79356b 2706 while(TRUE) {
9bccf70c
A
2707 if(!vm_map_lookup_entry(local_map,
2708 local_offset, &map_entry)) {
1c79356b 2709 kr = KERN_INVALID_ARGUMENT;
1c79356b 2710 vm_map_unlock_read(target_map);
91447636
A
2711 if(real_map != target_map)
2712 vm_map_unlock_read(real_map);
2713 vm_object_deallocate(object); /* release extra ref */
2714 object = VM_OBJECT_NULL;
1c79356b
A
2715 goto make_mem_done;
2716 }
3e170ce0 2717 iskernel = (local_map->pmap == kernel_pmap);
1c79356b 2718 if(!(map_entry->is_sub_map)) {
3e170ce0 2719 if (VME_OBJECT(map_entry) != object) {
1c79356b 2720 kr = KERN_INVALID_ARGUMENT;
1c79356b 2721 vm_map_unlock_read(target_map);
91447636
A
2722 if(real_map != target_map)
2723 vm_map_unlock_read(real_map);
2724 vm_object_deallocate(object); /* release extra ref */
2725 object = VM_OBJECT_NULL;
1c79356b
A
2726 goto make_mem_done;
2727 }
2728 break;
2729 } else {
9bccf70c
A
2730 vm_map_t tmap;
2731 tmap = local_map;
3e170ce0 2732 local_map = VME_SUBMAP(map_entry);
9bccf70c 2733
1c79356b 2734 vm_map_lock_read(local_map);
9bccf70c 2735 vm_map_unlock_read(tmap);
1c79356b 2736 target_map = local_map;
91447636 2737 real_map = local_map;
9bccf70c 2738 local_offset = local_offset - map_entry->vme_start;
3e170ce0 2739 local_offset += VME_OFFSET(map_entry);
1c79356b
A
2740 }
2741 }
91447636
A
2742
2743 /*
2744 * We found the VM map entry, lock the VM object again.
2745 */
2746 vm_object_lock(object);
2747 if(map_entry->wired_count) {
2748 /* JMM - The check below should be reworked instead. */
2749 object->true_share = TRUE;
2750 }
6d2010ae
A
2751 if (mask_protections) {
2752 /*
2753 * The caller asked us to use the "protections" as
2754 * a mask, so restrict "protections" to what this
2755 * mapping actually allows.
2756 */
2757 protections &= map_entry->max_protection;
2758 }
55e303ae 2759 if(((map_entry->max_protection) & protections) != protections) {
1c79356b
A
2760 kr = KERN_INVALID_RIGHT;
2761 vm_object_unlock(object);
2762 vm_map_unlock_read(target_map);
91447636
A
2763 if(real_map != target_map)
2764 vm_map_unlock_read(real_map);
2765 vm_object_deallocate(object);
2766 object = VM_OBJECT_NULL;
1c79356b
A
2767 goto make_mem_done;
2768 }
9bccf70c 2769
2d21ac55 2770 mappable_size = fault_info.hi_offset - obj_off;
9bccf70c 2771 total_size = map_entry->vme_end - map_entry->vme_start;
91447636 2772 if(map_size > mappable_size) {
9bccf70c
A
2773 /* try to extend mappable size if the entries */
2774 /* following are from the same object and are */
2775 /* compatible */
2776 next_entry = map_entry->vme_next;
2777 /* lets see if the next map entry is still */
2778 /* pointing at this object and is contiguous */
91447636 2779 while(map_size > mappable_size) {
3e170ce0
A
2780 if ((VME_OBJECT(next_entry) == object) &&
2781 (next_entry->vme_start ==
2782 next_entry->vme_prev->vme_end) &&
2783 (VME_OFFSET(next_entry) ==
2784 (VME_OFFSET(next_entry->vme_prev) +
2785 (next_entry->vme_prev->vme_end -
2786 next_entry->vme_prev->vme_start)))) {
6d2010ae
A
2787 if (mask_protections) {
2788 /*
2789 * The caller asked us to use
2790 * the "protections" as a mask,
2791 * so restrict "protections" to
2792 * what this mapping actually
2793 * allows.
2794 */
2795 protections &= next_entry->max_protection;
2796 }
316670eb
A
2797 if ((next_entry->wired_count) &&
2798 (map_entry->wired_count == 0)) {
2799 break;
2800 }
9bccf70c 2801 if(((next_entry->max_protection)
55e303ae 2802 & protections) != protections) {
9bccf70c
A
2803 break;
2804 }
55e303ae
A
2805 if (next_entry->needs_copy !=
2806 map_entry->needs_copy)
2807 break;
9bccf70c
A
2808 mappable_size += next_entry->vme_end
2809 - next_entry->vme_start;
2810 total_size += next_entry->vme_end
2811 - next_entry->vme_start;
2812 next_entry = next_entry->vme_next;
2813 } else {
2814 break;
2815 }
2816
2817 }
2818 }
2819
3e170ce0
A
2820 /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
2821 * never true in kernel */
2822 if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) &&
e2d2fc5c
A
2823 object->vo_size > map_size &&
2824 map_size != 0) {
2825 /*
2826 * Set up the targeted range for copy-on-write to
2827 * limit the impact of "true_share"/"copy_delay" to
2828 * that range instead of the entire VM object...
2829 */
2830
2831 vm_object_unlock(object);
2832 if (vm_map_lock_read_to_write(target_map)) {
2833 vm_object_deallocate(object);
2834 target_map = original_map;
2835 goto redo_lookup;
2836 }
2837
39236c6e
A
2838 vm_map_clip_start(target_map,
2839 map_entry,
3e170ce0 2840 vm_map_trunc_page(map_start,
39236c6e
A
2841 VM_MAP_PAGE_MASK(target_map)));
2842 vm_map_clip_end(target_map,
2843 map_entry,
3e170ce0 2844 (vm_map_round_page(map_end,
fe8ab488 2845 VM_MAP_PAGE_MASK(target_map))));
e2d2fc5c
A
2846 force_shadow = TRUE;
2847
fe8ab488 2848 if ((map_entry->vme_end - offset) < map_size) {
3e170ce0 2849 map_size = map_entry->vme_end - map_start;
fe8ab488
A
2850 }
2851 total_size = map_entry->vme_end - map_entry->vme_start;
e2d2fc5c
A
2852
2853 vm_map_lock_write_to_read(target_map);
2854 vm_object_lock(object);
2855 }
e2d2fc5c 2856
39236c6e 2857 if (object->internal) {
1c79356b
A
2858 /* vm_map_lookup_locked will create a shadow if */
2859 /* needs_copy is set but does not check for the */
2860 /* other two conditions shown. It is important to */
2861 /* set up an object which will not be pulled from */
2862 /* under us. */
2863
e2d2fc5c
A
2864 if (force_shadow ||
2865 ((map_entry->needs_copy ||
2866 object->shadowed ||
39236c6e 2867 (object->vo_size > total_size &&
3e170ce0 2868 (VME_OFFSET(map_entry) != 0 ||
39236c6e
A
2869 object->vo_size >
2870 vm_map_round_page(total_size,
2871 VM_MAP_PAGE_MASK(target_map)))))
2872 && !object->true_share)) {
91447636
A
2873 /*
2874 * We have to unlock the VM object before
2875 * trying to upgrade the VM map lock, to
2876 * honor lock ordering (map then object).
2877 * Otherwise, we would deadlock if another
2878 * thread holds a read lock on the VM map and
2879 * is trying to acquire the VM object's lock.
2880 * We still hold an extra reference on the
2881 * VM object, guaranteeing that it won't
2882 * disappear.
2883 */
2884 vm_object_unlock(object);
2885
1c79356b 2886 if (vm_map_lock_read_to_write(target_map)) {
91447636
A
2887 /*
2888 * We couldn't upgrade our VM map lock
2889 * from "read" to "write" and we lost
2890 * our "read" lock.
2891 * Start all over again...
2892 */
2893 vm_object_deallocate(object); /* extra ref */
2894 target_map = original_map;
1c79356b
A
2895 goto redo_lookup;
2896 }
fe8ab488 2897#if 00
91447636 2898 vm_object_lock(object);
fe8ab488 2899#endif
1c79356b 2900
55e303ae
A
2901 /*
2902 * JMM - We need to avoid coming here when the object
2903 * is wired by anybody, not just the current map. Why
2904 * couldn't we use the standard vm_object_copy_quickly()
2905 * approach here?
2906 */
2907
1c79356b 2908 /* create a shadow object */
3e170ce0
A
2909 VME_OBJECT_SHADOW(map_entry, total_size);
2910 shadow_object = VME_OBJECT(map_entry);
fe8ab488 2911#if 00
9bccf70c 2912 vm_object_unlock(object);
fe8ab488 2913#endif
91447636 2914
0c530ab8 2915 prot = map_entry->protection & ~VM_PROT_WRITE;
2d21ac55 2916
3e170ce0
A
2917 if (override_nx(target_map,
2918 VME_ALIAS(map_entry))
2919 && prot)
0c530ab8 2920 prot |= VM_PROT_EXECUTE;
2d21ac55 2921
9bccf70c 2922 vm_object_pmap_protect(
3e170ce0 2923 object, VME_OFFSET(map_entry),
9bccf70c
A
2924 total_size,
2925 ((map_entry->is_shared
316670eb 2926 || target_map->mapped_in_other_pmaps)
9bccf70c
A
2927 ? PMAP_NULL :
2928 target_map->pmap),
2929 map_entry->vme_start,
0c530ab8 2930 prot);
9bccf70c
A
2931 total_size -= (map_entry->vme_end
2932 - map_entry->vme_start);
2933 next_entry = map_entry->vme_next;
2934 map_entry->needs_copy = FALSE;
2d21ac55
A
2935
2936 vm_object_lock(shadow_object);
9bccf70c 2937 while (total_size) {
316670eb
A
2938 assert((next_entry->wired_count == 0) ||
2939 (map_entry->wired_count));
2940
3e170ce0 2941 if (VME_OBJECT(next_entry) == object) {
2d21ac55 2942 vm_object_reference_locked(shadow_object);
3e170ce0
A
2943 VME_OBJECT_SET(next_entry,
2944 shadow_object);
55e303ae 2945 vm_object_deallocate(object);
3e170ce0
A
2946 VME_OFFSET_SET(
2947 next_entry,
2948 (VME_OFFSET(next_entry->vme_prev) +
2949 (next_entry->vme_prev->vme_end
2950 - next_entry->vme_prev->vme_start)));
a39ff7e2 2951 next_entry->use_pmap = TRUE;
9bccf70c
A
2952 next_entry->needs_copy = FALSE;
2953 } else {
2954 panic("mach_make_memory_entry_64:"
2955 " map entries out of sync\n");
2956 }
2957 total_size -=
2958 next_entry->vme_end
2959 - next_entry->vme_start;
2960 next_entry = next_entry->vme_next;
2961 }
2962
91447636
A
2963 /*
2964 * Transfer our extra reference to the
2965 * shadow object.
2966 */
2967 vm_object_reference_locked(shadow_object);
2968 vm_object_deallocate(object); /* extra ref */
9bccf70c 2969 object = shadow_object;
91447636 2970
3e170ce0
A
2971 obj_off = ((local_offset - map_entry->vme_start)
2972 + VME_OFFSET(map_entry));
1c79356b 2973
91447636 2974 vm_map_lock_write_to_read(target_map);
1c79356b
A
2975 }
2976 }
2977
2978 /* note: in the future we can (if necessary) allow for */
2979 /* memory object lists, this will better support */
2980 /* fragmentation, but is it necessary? The user should */
2981 /* be encouraged to create address space oriented */
2982 /* shared objects from CLEAN memory regions which have */
2983 /* a known and defined history. i.e. no inheritence */
2984 /* share, make this call before making the region the */
2985 /* target of ipc's, etc. The code above, protecting */
2986 /* against delayed copy, etc. is mostly defensive. */
2987
55e303ae 2988 wimg_mode = object->wimg_bits;
5ba3f43e
A
2989 if(!(object->nophyscache))
2990 vm_prot_to_wimg(access, &wimg_mode);
d7e50217 2991
fe8ab488
A
2992#if VM_OBJECT_TRACKING_OP_TRUESHARE
2993 if (!object->true_share &&
2994 vm_object_tracking_inited) {
2995 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
2996 int num = 0;
2997
2998 num = OSBacktrace(bt,
2999 VM_OBJECT_TRACKING_BTDEPTH);
3000 btlog_add_entry(vm_object_tracking_btlog,
3001 object,
3002 VM_OBJECT_TRACKING_OP_TRUESHARE,
3003 bt,
3004 num);
3005 }
3006#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3007
39037602 3008 vm_object_lock_assert_exclusive(object);
de355530 3009 object->true_share = TRUE;
55e303ae
A
3010 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
3011 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3012
91447636
A
3013 /*
3014 * The memory entry now points to this VM object and we
3015 * need to hold a reference on the VM object. Use the extra
3016 * reference we took earlier to keep the object alive when we
3017 * had to unlock it.
3018 */
3019
55e303ae 3020 vm_map_unlock_read(target_map);
91447636
A
3021 if(real_map != target_map)
3022 vm_map_unlock_read(real_map);
55e303ae 3023
6d2010ae
A
3024 if (object->wimg_bits != wimg_mode)
3025 vm_object_change_wimg_mode(object, wimg_mode);
1c79356b
A
3026
3027 /* the size of mapped entry that overlaps with our region */
3028 /* which is targeted for share. */
3029 /* (entry_end - entry_start) - */
3030 /* offset of our beg addr within entry */
3031 /* it corresponds to this: */
3032
91447636
A
3033 if(map_size > mappable_size)
3034 map_size = mappable_size;
3035
3036 if (permission & MAP_MEM_NAMED_REUSE) {
3037 /*
3038 * Compare what we got with the "parent_entry".
3039 * If they match, re-use the "parent_entry" instead
3040 * of creating a new one.
3041 */
3042 if (parent_entry != NULL &&
3043 parent_entry->backing.object == object &&
3044 parent_entry->internal == object->internal &&
3045 parent_entry->is_sub_map == FALSE &&
91447636
A
3046 parent_entry->offset == obj_off &&
3047 parent_entry->protection == protections &&
39236c6e 3048 parent_entry->size == map_size &&
3e170ce0
A
3049 ((!(use_data_addr || use_4K_compat) &&
3050 (parent_entry->data_offset == 0)) ||
3051 ((use_data_addr || use_4K_compat) &&
3052 (parent_entry->data_offset == offset_in_page)))) {
91447636
A
3053 /*
3054 * We have a match: re-use "parent_entry".
3055 */
3056 /* release our extra reference on object */
3057 vm_object_unlock(object);
3058 vm_object_deallocate(object);
3059 /* parent_entry->ref_count++; XXX ? */
3060 /* Get an extra send-right on handle */
3061 ipc_port_copy_send(parent_handle);
fe8ab488 3062
3e170ce0
A
3063 *size = CAST_DOWN(vm_size_t,
3064 (parent_entry->size -
3065 parent_entry->data_offset));
91447636
A
3066 *object_handle = parent_handle;
3067 return KERN_SUCCESS;
3068 } else {
3069 /*
3070 * No match: we need to create a new entry.
3071 * fall through...
3072 */
3073 }
3074 }
3075
3076 vm_object_unlock(object);
3077 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3078 != KERN_SUCCESS) {
3079 /* release our unused reference on the object */
3080 vm_object_deallocate(object);
3081 return KERN_FAILURE;
3082 }
1c79356b 3083
91447636
A
3084 user_entry->backing.object = object;
3085 user_entry->internal = object->internal;
3086 user_entry->is_sub_map = FALSE;
91447636 3087 user_entry->offset = obj_off;
39236c6e 3088 user_entry->data_offset = offset_in_page;
6d2010ae
A
3089 user_entry->protection = protections;
3090 SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
91447636 3091 user_entry->size = map_size;
1c79356b
A
3092
3093 /* user_object pager and internal fields are not used */
3094 /* when the object field is filled in. */
3095
3e170ce0
A
3096 *size = CAST_DOWN(vm_size_t, (user_entry->size -
3097 user_entry->data_offset));
1c79356b 3098 *object_handle = user_handle;
1c79356b 3099 return KERN_SUCCESS;
1c79356b 3100
91447636 3101 } else {
1c79356b 3102 /* The new object will be base on an existing named object */
91447636 3103 if (parent_entry == NULL) {
1c79356b
A
3104 kr = KERN_INVALID_ARGUMENT;
3105 goto make_mem_done;
3106 }
39236c6e 3107
3e170ce0 3108 if (use_data_addr || use_4K_compat) {
39236c6e
A
3109 /*
3110 * submaps and pagers should only be accessible from within
3111 * the kernel, which shouldn't use the data address flag, so can fail here.
3112 */
5ba3f43e
A
3113 if (parent_entry->is_sub_map) {
3114 panic("Shouldn't be using data address with a parent entry that is a submap.");
39236c6e
A
3115 }
3116 /*
3117 * Account for offset to data in parent entry and
3118 * compute our own offset to data.
3119 */
3120 if((offset + *size + parent_entry->data_offset) > parent_entry->size) {
3121 kr = KERN_INVALID_ARGUMENT;
3122 goto make_mem_done;
3123 }
3124
3e170ce0
A
3125 map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
3126 offset_in_page = (offset + parent_entry->data_offset) - map_start;
3127 if (use_4K_compat)
3128 offset_in_page &= ~((signed)(0xFFF));
3129 map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK);
3130 map_size = map_end - map_start;
39236c6e 3131 } else {
3e170ce0
A
3132 map_end = vm_map_round_page(offset + *size, PAGE_MASK);
3133 map_size = map_end - map_start;
39236c6e
A
3134 offset_in_page = 0;
3135
3136 if((offset + map_size) > parent_entry->size) {
3137 kr = KERN_INVALID_ARGUMENT;
3138 goto make_mem_done;
3139 }
1c79356b
A
3140 }
3141
6d2010ae
A
3142 if (mask_protections) {
3143 /*
3144 * The caller asked us to use the "protections" as
3145 * a mask, so restrict "protections" to what this
3146 * mapping actually allows.
3147 */
3148 protections &= parent_entry->protection;
3149 }
91447636
A
3150 if((protections & parent_entry->protection) != protections) {
3151 kr = KERN_PROTECTION_FAILURE;
3152 goto make_mem_done;
3153 }
3154
3155 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3156 != KERN_SUCCESS) {
3157 kr = KERN_FAILURE;
3158 goto make_mem_done;
55e303ae 3159 }
91447636
A
3160
3161 user_entry->size = map_size;
3e170ce0 3162 user_entry->offset = parent_entry->offset + map_start;
39236c6e 3163 user_entry->data_offset = offset_in_page;
91447636 3164 user_entry->is_sub_map = parent_entry->is_sub_map;
39236c6e 3165 user_entry->is_copy = parent_entry->is_copy;
91447636
A
3166 user_entry->internal = parent_entry->internal;
3167 user_entry->protection = protections;
3168
3169 if(access != MAP_MEM_NOOP) {
3170 SET_MAP_MEM(access, user_entry->protection);
1c79356b 3171 }
91447636
A
3172
3173 if(parent_entry->is_sub_map) {
3174 user_entry->backing.map = parent_entry->backing.map;
3175 vm_map_lock(user_entry->backing.map);
3176 user_entry->backing.map->ref_count++;
3177 vm_map_unlock(user_entry->backing.map);
91447636
A
3178 } else {
3179 object = parent_entry->backing.object;
3180 assert(object != VM_OBJECT_NULL);
3181 user_entry->backing.object = object;
3182 /* we now point to this object, hold on */
91447636 3183 vm_object_lock(object);
39037602 3184 vm_object_reference_locked(object);
fe8ab488
A
3185#if VM_OBJECT_TRACKING_OP_TRUESHARE
3186 if (!object->true_share &&
3187 vm_object_tracking_inited) {
3188 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
3189 int num = 0;
3190
3191 num = OSBacktrace(bt,
3192 VM_OBJECT_TRACKING_BTDEPTH);
3193 btlog_add_entry(vm_object_tracking_btlog,
3194 object,
3195 VM_OBJECT_TRACKING_OP_TRUESHARE,
3196 bt,
3197 num);
3198 }
3199#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
3200
91447636
A
3201 object->true_share = TRUE;
3202 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
3203 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3204 vm_object_unlock(object);
1c79356b 3205 }
3e170ce0
A
3206 *size = CAST_DOWN(vm_size_t, (user_entry->size -
3207 user_entry->data_offset));
1c79356b
A
3208 *object_handle = user_handle;
3209 return KERN_SUCCESS;
3210 }
3211
1c79356b 3212make_mem_done:
91447636 3213 if (user_handle != IP_NULL) {
0b4c1975
A
3214 /*
3215 * Releasing "user_handle" causes the kernel object
3216 * associated with it ("user_entry" here) to also be
3217 * released and freed.
3218 */
3219 mach_memory_entry_port_release(user_handle);
91447636
A
3220 }
3221 return kr;
3222}
3223
3224kern_return_t
3225_mach_make_memory_entry(
3226 vm_map_t target_map,
3227 memory_object_size_t *size,
3228 memory_object_offset_t offset,
3229 vm_prot_t permission,
3230 ipc_port_t *object_handle,
3231 ipc_port_t parent_entry)
3232{
2d21ac55 3233 memory_object_size_t mo_size;
91447636
A
3234 kern_return_t kr;
3235
2d21ac55 3236 mo_size = (memory_object_size_t)*size;
91447636
A
3237 kr = mach_make_memory_entry_64(target_map, &mo_size,
3238 (memory_object_offset_t)offset, permission, object_handle,
3239 parent_entry);
3240 *size = mo_size;
1c79356b
A
3241 return kr;
3242}
3243
3244kern_return_t
3245mach_make_memory_entry(
3246 vm_map_t target_map,
3247 vm_size_t *size,
3248 vm_offset_t offset,
3249 vm_prot_t permission,
3250 ipc_port_t *object_handle,
3251 ipc_port_t parent_entry)
91447636 3252{
2d21ac55 3253 memory_object_size_t mo_size;
1c79356b
A
3254 kern_return_t kr;
3255
2d21ac55 3256 mo_size = (memory_object_size_t)*size;
91447636
A
3257 kr = mach_make_memory_entry_64(target_map, &mo_size,
3258 (memory_object_offset_t)offset, permission, object_handle,
1c79356b 3259 parent_entry);
91447636 3260 *size = CAST_DOWN(vm_size_t, mo_size);
1c79356b
A
3261 return kr;
3262}
3263
3264/*
91447636
A
3265 * task_wire
3266 *
3267 * Set or clear the map's wiring_required flag. This flag, if set,
3268 * will cause all future virtual memory allocation to allocate
3269 * user wired memory. Unwiring pages wired down as a result of
3270 * this routine is done with the vm_wire interface.
1c79356b 3271 */
1c79356b 3272kern_return_t
91447636
A
3273task_wire(
3274 vm_map_t map,
3275 boolean_t must_wire)
3276{
3277 if (map == VM_MAP_NULL)
3278 return(KERN_INVALID_ARGUMENT);
3279
3280 if (must_wire)
3281 map->wiring_required = TRUE;
3282 else
3283 map->wiring_required = FALSE;
3284
3285 return(KERN_SUCCESS);
3286}
3287
a39ff7e2
A
3288kern_return_t
3289vm_map_exec_lockdown(
3290 vm_map_t map)
3291{
3292 if (map == VM_MAP_NULL)
3293 return(KERN_INVALID_ARGUMENT);
3294
3295 vm_map_lock(map);
3296 map->map_disallow_new_exec = TRUE;
3297 vm_map_unlock(map);
3298
3299 return(KERN_SUCCESS);
3300}
3301
91447636
A
3302__private_extern__ kern_return_t
3303mach_memory_entry_allocate(
3304 vm_named_entry_t *user_entry_p,
3305 ipc_port_t *user_handle_p)
1c79356b 3306{
91447636 3307 vm_named_entry_t user_entry;
1c79356b 3308 ipc_port_t user_handle;
91447636 3309 ipc_port_t previous;
1c79356b 3310
91447636
A
3311 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
3312 if (user_entry == NULL)
1c79356b 3313 return KERN_FAILURE;
1c79356b 3314
91447636 3315 named_entry_lock_init(user_entry);
1c79356b 3316
91447636
A
3317 user_handle = ipc_port_alloc_kernel();
3318 if (user_handle == IP_NULL) {
3319 kfree(user_entry, sizeof *user_entry);
3320 return KERN_FAILURE;
3321 }
1c79356b
A
3322 ip_lock(user_handle);
3323
3324 /* make a sonce right */
3325 user_handle->ip_sorights++;
3326 ip_reference(user_handle);
3327
3328 user_handle->ip_destination = IP_NULL;
3329 user_handle->ip_receiver_name = MACH_PORT_NULL;
3330 user_handle->ip_receiver = ipc_space_kernel;
3331
3332 /* make a send right */
3333 user_handle->ip_mscount++;
3334 user_handle->ip_srights++;
3335 ip_reference(user_handle);
3336
3337 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
3338 /* nsrequest unlocks user_handle */
3339
5ba3f43e 3340 user_entry->backing.object = NULL;
91447636 3341 user_entry->is_sub_map = FALSE;
39236c6e 3342 user_entry->is_copy = FALSE;
91447636 3343 user_entry->internal = FALSE;
2d21ac55
A
3344 user_entry->size = 0;
3345 user_entry->offset = 0;
39236c6e 3346 user_entry->data_offset = 0;
2d21ac55 3347 user_entry->protection = VM_PROT_NONE;
91447636 3348 user_entry->ref_count = 1;
1c79356b 3349
91447636
A
3350 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
3351 IKOT_NAMED_ENTRY);
1c79356b 3352
91447636
A
3353 *user_entry_p = user_entry;
3354 *user_handle_p = user_handle;
1c79356b 3355
91447636
A
3356 return KERN_SUCCESS;
3357}
1c79356b 3358
91447636
A
3359/*
3360 * mach_memory_object_memory_entry_64
3361 *
3362 * Create a named entry backed by the provided pager.
3363 *
91447636
A
3364 */
3365kern_return_t
3366mach_memory_object_memory_entry_64(
3367 host_t host,
3368 boolean_t internal,
3369 vm_object_offset_t size,
3370 vm_prot_t permission,
3371 memory_object_t pager,
3372 ipc_port_t *entry_handle)
3373{
3374 unsigned int access;
3375 vm_named_entry_t user_entry;
3376 ipc_port_t user_handle;
5ba3f43e 3377 vm_object_t object;
91447636
A
3378
3379 if (host == HOST_NULL)
3380 return(KERN_INVALID_HOST);
3381
5ba3f43e
A
3382 if (pager == MEMORY_OBJECT_NULL && internal) {
3383 object = vm_object_allocate(size);
5c9f4661
A
3384 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3385 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3386 }
5ba3f43e
A
3387 } else {
3388 object = memory_object_to_vm_object(pager);
3389 if (object != VM_OBJECT_NULL) {
3390 vm_object_reference(object);
3391 }
3392 }
3393 if (object == VM_OBJECT_NULL) {
3394 return KERN_INVALID_ARGUMENT;
3395 }
3396
91447636
A
3397 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3398 != KERN_SUCCESS) {
5ba3f43e 3399 vm_object_deallocate(object);
91447636
A
3400 return KERN_FAILURE;
3401 }
3402
91447636
A
3403 user_entry->size = size;
3404 user_entry->offset = 0;
3405 user_entry->protection = permission & VM_PROT_ALL;
3406 access = GET_MAP_MEM(permission);
3407 SET_MAP_MEM(access, user_entry->protection);
91447636 3408 user_entry->is_sub_map = FALSE;
91447636
A
3409 assert(user_entry->ref_count == 1);
3410
5ba3f43e
A
3411 user_entry->backing.object = object;
3412 user_entry->internal = object->internal;
3413 assert(object->internal == internal);
3414
91447636 3415 *entry_handle = user_handle;
1c79356b 3416 return KERN_SUCCESS;
5ba3f43e 3417}
91447636
A
3418
3419kern_return_t
3420mach_memory_object_memory_entry(
3421 host_t host,
3422 boolean_t internal,
3423 vm_size_t size,
3424 vm_prot_t permission,
3425 memory_object_t pager,
3426 ipc_port_t *entry_handle)
3427{
3428 return mach_memory_object_memory_entry_64( host, internal,
3429 (vm_object_offset_t)size, permission, pager, entry_handle);
3430}
3431
3432
3433kern_return_t
3434mach_memory_entry_purgable_control(
3435 ipc_port_t entry_port,
3436 vm_purgable_t control,
3437 int *state)
5ba3f43e
A
3438{
3439 if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
3440 /* not allowed from user-space */
3441 return KERN_INVALID_ARGUMENT;
3442 }
3443
3444 return memory_entry_purgeable_control_internal(entry_port, control, state);
3445}
3446
3447kern_return_t
3448memory_entry_purgeable_control_internal(
3449 ipc_port_t entry_port,
3450 vm_purgable_t control,
3451 int *state)
91447636
A
3452{
3453 kern_return_t kr;
3454 vm_named_entry_t mem_entry;
3455 vm_object_t object;
1c79356b 3456
91447636
A
3457 if (entry_port == IP_NULL ||
3458 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3459 return KERN_INVALID_ARGUMENT;
3460 }
2d21ac55 3461 if (control != VM_PURGABLE_SET_STATE &&
5ba3f43e
A
3462 control != VM_PURGABLE_GET_STATE &&
3463 control != VM_PURGABLE_SET_STATE_FROM_KERNEL)
2d21ac55
A
3464 return(KERN_INVALID_ARGUMENT);
3465
5ba3f43e
A
3466 if ((control == VM_PURGABLE_SET_STATE ||
3467 control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
b0d623f7 3468 (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
2d21ac55
A
3469 ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
3470 return(KERN_INVALID_ARGUMENT);
1c79356b 3471
91447636 3472 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
1c79356b 3473
91447636 3474 named_entry_lock(mem_entry);
1c79356b 3475
39236c6e 3476 if (mem_entry->is_sub_map ||
39236c6e 3477 mem_entry->is_copy) {
91447636 3478 named_entry_unlock(mem_entry);
1c79356b
A
3479 return KERN_INVALID_ARGUMENT;
3480 }
91447636
A
3481
3482 object = mem_entry->backing.object;
3483 if (object == VM_OBJECT_NULL) {
3484 named_entry_unlock(mem_entry);
1c79356b
A
3485 return KERN_INVALID_ARGUMENT;
3486 }
91447636
A
3487
3488 vm_object_lock(object);
3489
3490 /* check that named entry covers entire object ? */
6d2010ae 3491 if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
91447636
A
3492 vm_object_unlock(object);
3493 named_entry_unlock(mem_entry);
3494 return KERN_INVALID_ARGUMENT;
1c79356b 3495 }
91447636
A
3496
3497 named_entry_unlock(mem_entry);
3498
3499 kr = vm_object_purgable_control(object, control, state);
3500
3501 vm_object_unlock(object);
3502
3503 return kr;
1c79356b
A
3504}
3505
39236c6e
A
3506kern_return_t
3507mach_memory_entry_get_page_counts(
3508 ipc_port_t entry_port,
3509 unsigned int *resident_page_count,
3510 unsigned int *dirty_page_count)
3511{
3512 kern_return_t kr;
3513 vm_named_entry_t mem_entry;
3514 vm_object_t object;
3515 vm_object_offset_t offset;
3516 vm_object_size_t size;
3517
3518 if (entry_port == IP_NULL ||
3519 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3520 return KERN_INVALID_ARGUMENT;
3521 }
3522
3523 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3524
3525 named_entry_lock(mem_entry);
3526
3527 if (mem_entry->is_sub_map ||
39236c6e
A
3528 mem_entry->is_copy) {
3529 named_entry_unlock(mem_entry);
3530 return KERN_INVALID_ARGUMENT;
3531 }
3532
3533 object = mem_entry->backing.object;
3534 if (object == VM_OBJECT_NULL) {
3535 named_entry_unlock(mem_entry);
3536 return KERN_INVALID_ARGUMENT;
3537 }
3538
3539 vm_object_lock(object);
3540
3541 offset = mem_entry->offset;
3542 size = mem_entry->size;
3543
3544 named_entry_unlock(mem_entry);
3545
3546 kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count);
3547
3548 vm_object_unlock(object);
3549
3550 return kr;
3551}
3552
91447636
A
3553/*
3554 * mach_memory_entry_port_release:
3555 *
3556 * Release a send right on a named entry port. This is the correct
3557 * way to destroy a named entry. When the last right on the port is
3558 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
3559 */
3560void
3561mach_memory_entry_port_release(
3562 ipc_port_t port)
3563{
3564 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
3565 ipc_port_release_send(port);
3566}
1c79356b 3567
91447636
A
3568/*
3569 * mach_destroy_memory_entry:
3570 *
3571 * Drops a reference on a memory entry and destroys the memory entry if
3572 * there are no more references on it.
3573 * NOTE: This routine should not be called to destroy a memory entry from the
3574 * kernel, as it will not release the Mach port associated with the memory
3575 * entry. The proper way to destroy a memory entry in the kernel is to
3576 * call mach_memort_entry_port_release() to release the kernel's send-right on
3577 * the memory entry's port. When the last send right is released, the memory
3578 * entry will be destroyed via ipc_kobject_destroy().
3579 */
1c79356b
A
3580void
3581mach_destroy_memory_entry(
3582 ipc_port_t port)
3583{
3584 vm_named_entry_t named_entry;
3585#if MACH_ASSERT
3586 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
3587#endif /* MACH_ASSERT */
3588 named_entry = (vm_named_entry_t)port->ip_kobject;
316670eb
A
3589
3590 named_entry_lock(named_entry);
91447636 3591 named_entry->ref_count -= 1;
316670eb 3592
1c79356b 3593 if(named_entry->ref_count == 0) {
91447636 3594 if (named_entry->is_sub_map) {
1c79356b 3595 vm_map_deallocate(named_entry->backing.map);
39236c6e
A
3596 } else if (named_entry->is_copy) {
3597 vm_map_copy_discard(named_entry->backing.copy);
3598 } else {
3599 /* release the VM object we've been pointing to */
91447636 3600 vm_object_deallocate(named_entry->backing.object);
39236c6e 3601 }
91447636 3602
316670eb
A
3603 named_entry_unlock(named_entry);
3604 named_entry_lock_destroy(named_entry);
91447636
A
3605
3606 kfree((void *) port->ip_kobject,
3607 sizeof (struct vm_named_entry));
1c79356b 3608 } else
316670eb 3609 named_entry_unlock(named_entry);
1c79356b
A
3610}
3611
0c530ab8
A
3612/* Allow manipulation of individual page state. This is actually part of */
3613/* the UPL regimen but takes place on the memory entry rather than on a UPL */
3614
3615kern_return_t
3616mach_memory_entry_page_op(
3617 ipc_port_t entry_port,
3618 vm_object_offset_t offset,
3619 int ops,
3620 ppnum_t *phys_entry,
3621 int *flags)
3622{
3623 vm_named_entry_t mem_entry;
3624 vm_object_t object;
3625 kern_return_t kr;
3626
3627 if (entry_port == IP_NULL ||
3628 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3629 return KERN_INVALID_ARGUMENT;
3630 }
3631
3632 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3633
3634 named_entry_lock(mem_entry);
3635
39236c6e 3636 if (mem_entry->is_sub_map ||
39236c6e 3637 mem_entry->is_copy) {
0c530ab8
A
3638 named_entry_unlock(mem_entry);
3639 return KERN_INVALID_ARGUMENT;
3640 }
3641
3642 object = mem_entry->backing.object;
3643 if (object == VM_OBJECT_NULL) {
3644 named_entry_unlock(mem_entry);
3645 return KERN_INVALID_ARGUMENT;
3646 }
3647
3648 vm_object_reference(object);
3649 named_entry_unlock(mem_entry);
3650
3651 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
3652
3653 vm_object_deallocate(object);
3654
3655 return kr;
3656}
3657
3658/*
3659 * mach_memory_entry_range_op offers performance enhancement over
3660 * mach_memory_entry_page_op for page_op functions which do not require page
3661 * level state to be returned from the call. Page_op was created to provide
3662 * a low-cost alternative to page manipulation via UPLs when only a single
3663 * page was involved. The range_op call establishes the ability in the _op
3664 * family of functions to work on multiple pages where the lack of page level
3665 * state handling allows the caller to avoid the overhead of the upl structures.
3666 */
3667
3668kern_return_t
3669mach_memory_entry_range_op(
3670 ipc_port_t entry_port,
3671 vm_object_offset_t offset_beg,
3672 vm_object_offset_t offset_end,
3673 int ops,
3674 int *range)
3675{
3676 vm_named_entry_t mem_entry;
3677 vm_object_t object;
3678 kern_return_t kr;
3679
3680 if (entry_port == IP_NULL ||
3681 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
3682 return KERN_INVALID_ARGUMENT;
3683 }
3684
3685 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
3686
3687 named_entry_lock(mem_entry);
3688
39236c6e 3689 if (mem_entry->is_sub_map ||
39236c6e 3690 mem_entry->is_copy) {
0c530ab8
A
3691 named_entry_unlock(mem_entry);
3692 return KERN_INVALID_ARGUMENT;
3693 }
3694
3695 object = mem_entry->backing.object;
3696 if (object == VM_OBJECT_NULL) {
3697 named_entry_unlock(mem_entry);
3698 return KERN_INVALID_ARGUMENT;
3699 }
3700
3701 vm_object_reference(object);
3702 named_entry_unlock(mem_entry);
3703
3704 kr = vm_object_range_op(object,
3705 offset_beg,
3706 offset_end,
3707 ops,
b0d623f7 3708 (uint32_t *) range);
0c530ab8
A
3709
3710 vm_object_deallocate(object);
3711
3712 return kr;
3713}
1c79356b 3714
91447636 3715/* ******* Temporary Internal calls to UPL for BSD ***** */
1c79356b 3716
91447636
A
3717extern int kernel_upl_map(
3718 vm_map_t map,
3719 upl_t upl,
3720 vm_offset_t *dst_addr);
1c79356b 3721
91447636
A
3722extern int kernel_upl_unmap(
3723 vm_map_t map,
3724 upl_t upl);
150bd074 3725
91447636
A
3726extern int kernel_upl_commit(
3727 upl_t upl,
3728 upl_page_info_t *pl,
3729 mach_msg_type_number_t count);
1c79356b 3730
91447636
A
3731extern int kernel_upl_commit_range(
3732 upl_t upl,
3733 upl_offset_t offset,
3734 upl_size_t size,
3735 int flags,
3736 upl_page_info_array_t pl,
3737 mach_msg_type_number_t count);
1c79356b 3738
91447636
A
3739extern int kernel_upl_abort(
3740 upl_t upl,
3741 int abort_type);
1c79356b 3742
91447636
A
3743extern int kernel_upl_abort_range(
3744 upl_t upl,
3745 upl_offset_t offset,
3746 upl_size_t size,
3747 int abort_flags);
1c79356b 3748
1c79356b 3749
1c79356b
A
3750kern_return_t
3751kernel_upl_map(
3752 vm_map_t map,
3753 upl_t upl,
3754 vm_offset_t *dst_addr)
3755{
91447636 3756 return vm_upl_map(map, upl, dst_addr);
1c79356b
A
3757}
3758
3759
3760kern_return_t
3761kernel_upl_unmap(
3762 vm_map_t map,
0b4e3aa0 3763 upl_t upl)
1c79356b 3764{
91447636 3765 return vm_upl_unmap(map, upl);
1c79356b
A
3766}
3767
3768kern_return_t
3769kernel_upl_commit(
91447636
A
3770 upl_t upl,
3771 upl_page_info_t *pl,
0b4e3aa0 3772 mach_msg_type_number_t count)
1c79356b 3773{
0b4e3aa0
A
3774 kern_return_t kr;
3775
3776 kr = upl_commit(upl, pl, count);
3777 upl_deallocate(upl);
1c79356b
A
3778 return kr;
3779}
3780
0b4e3aa0 3781
1c79356b
A
3782kern_return_t
3783kernel_upl_commit_range(
3784 upl_t upl,
91447636
A
3785 upl_offset_t offset,
3786 upl_size_t size,
1c79356b 3787 int flags,
0b4e3aa0
A
3788 upl_page_info_array_t pl,
3789 mach_msg_type_number_t count)
1c79356b 3790{
0b4e3aa0
A
3791 boolean_t finished = FALSE;
3792 kern_return_t kr;
3793
3794 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
3795 flags |= UPL_COMMIT_NOTIFY_EMPTY;
3796
593a1d5f
A
3797 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
3798 return KERN_INVALID_ARGUMENT;
3799 }
3800
0b4e3aa0
A
3801 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
3802
3803 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
3804 upl_deallocate(upl);
3805
1c79356b
A
3806 return kr;
3807}
3808
3809kern_return_t
3810kernel_upl_abort_range(
0b4e3aa0 3811 upl_t upl,
91447636
A
3812 upl_offset_t offset,
3813 upl_size_t size,
0b4e3aa0 3814 int abort_flags)
1c79356b 3815{
0b4e3aa0
A
3816 kern_return_t kr;
3817 boolean_t finished = FALSE;
1c79356b 3818
0b4e3aa0
A
3819 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
3820 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 3821
0b4e3aa0 3822 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 3823
0b4e3aa0
A
3824 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
3825 upl_deallocate(upl);
1c79356b 3826
0b4e3aa0 3827 return kr;
1c79356b
A
3828}
3829
1c79356b 3830kern_return_t
0b4e3aa0
A
3831kernel_upl_abort(
3832 upl_t upl,
3833 int abort_type)
1c79356b 3834{
0b4e3aa0 3835 kern_return_t kr;
1c79356b 3836
0b4e3aa0
A
3837 kr = upl_abort(upl, abort_type);
3838 upl_deallocate(upl);
3839 return kr;
1c79356b
A
3840}
3841
91447636
A
3842/*
3843 * Now a kernel-private interface (for BootCache
3844 * use only). Need a cleaner way to create an
3845 * empty vm_map() and return a handle to it.
3846 */
1c79356b
A
3847
3848kern_return_t
91447636
A
3849vm_region_object_create(
3850 __unused vm_map_t target_map,
3851 vm_size_t size,
3852 ipc_port_t *object_handle)
1c79356b 3853{
91447636
A
3854 vm_named_entry_t user_entry;
3855 ipc_port_t user_handle;
1c79356b 3856
91447636 3857 vm_map_t new_map;
1c79356b 3858
91447636
A
3859 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3860 != KERN_SUCCESS) {
1c79356b 3861 return KERN_FAILURE;
91447636 3862 }
1c79356b 3863
91447636 3864 /* Create a named object based on a submap of specified size */
1c79356b 3865
91447636 3866 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
39236c6e
A
3867 vm_map_round_page(size,
3868 VM_MAP_PAGE_MASK(target_map)),
3869 TRUE);
3870 vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
1c79356b 3871
91447636
A
3872 user_entry->backing.map = new_map;
3873 user_entry->internal = TRUE;
3874 user_entry->is_sub_map = TRUE;
3875 user_entry->offset = 0;
3876 user_entry->protection = VM_PROT_ALL;
3877 user_entry->size = size;
3878 assert(user_entry->ref_count == 1);
1c79356b 3879
91447636 3880 *object_handle = user_handle;
1c79356b 3881 return KERN_SUCCESS;
1c79356b 3882
55e303ae
A
3883}
3884
91447636
A
3885ppnum_t vm_map_get_phys_page( /* forward */
3886 vm_map_t map,
3887 vm_offset_t offset);
3888
55e303ae 3889ppnum_t
1c79356b 3890vm_map_get_phys_page(
91447636
A
3891 vm_map_t map,
3892 vm_offset_t addr)
1c79356b 3893{
91447636
A
3894 vm_object_offset_t offset;
3895 vm_object_t object;
3896 vm_map_offset_t map_offset;
3897 vm_map_entry_t entry;
3898 ppnum_t phys_page = 0;
3899
39236c6e 3900 map_offset = vm_map_trunc_page(addr, PAGE_MASK);
1c79356b
A
3901
3902 vm_map_lock(map);
91447636 3903 while (vm_map_lookup_entry(map, map_offset, &entry)) {
1c79356b 3904
3e170ce0 3905 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
1c79356b 3906 vm_map_unlock(map);
91447636 3907 return (ppnum_t) 0;
1c79356b
A
3908 }
3909 if (entry->is_sub_map) {
3910 vm_map_t old_map;
3e170ce0 3911 vm_map_lock(VME_SUBMAP(entry));
1c79356b 3912 old_map = map;
3e170ce0
A
3913 map = VME_SUBMAP(entry);
3914 map_offset = (VME_OFFSET(entry) +
3915 (map_offset - entry->vme_start));
1c79356b
A
3916 vm_map_unlock(old_map);
3917 continue;
3918 }
3e170ce0 3919 if (VME_OBJECT(entry)->phys_contiguous) {
9bccf70c
A
3920 /* These are not standard pageable memory mappings */
3921 /* If they are not present in the object they will */
3922 /* have to be picked up from the pager through the */
3923 /* fault mechanism. */
3e170ce0 3924 if (VME_OBJECT(entry)->vo_shadow_offset == 0) {
9bccf70c
A
3925 /* need to call vm_fault */
3926 vm_map_unlock(map);
91447636 3927 vm_fault(map, map_offset, VM_PROT_NONE,
5ba3f43e
A
3928 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
3929 THREAD_UNINT, NULL, 0);
9bccf70c
A
3930 vm_map_lock(map);
3931 continue;
3932 }
3e170ce0
A
3933 offset = (VME_OFFSET(entry) +
3934 (map_offset - entry->vme_start));
55e303ae 3935 phys_page = (ppnum_t)
3e170ce0
A
3936 ((VME_OBJECT(entry)->vo_shadow_offset
3937 + offset) >> PAGE_SHIFT);
9bccf70c
A
3938 break;
3939
3940 }
3e170ce0
A
3941 offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start));
3942 object = VME_OBJECT(entry);
1c79356b
A
3943 vm_object_lock(object);
3944 while (TRUE) {
3945 vm_page_t dst_page = vm_page_lookup(object,offset);
3946 if(dst_page == VM_PAGE_NULL) {
3947 if(object->shadow) {
3948 vm_object_t old_object;
3949 vm_object_lock(object->shadow);
3950 old_object = object;
6d2010ae 3951 offset = offset + object->vo_shadow_offset;
1c79356b
A
3952 object = object->shadow;
3953 vm_object_unlock(old_object);
3954 } else {
3955 vm_object_unlock(object);
3956 break;
3957 }
3958 } else {
39037602 3959 phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page));
1c79356b
A
3960 vm_object_unlock(object);
3961 break;
3962 }
3963 }
3964 break;
3965
3966 }
3967
3968 vm_map_unlock(map);
55e303ae
A
3969 return phys_page;
3970}
3971
3e170ce0 3972#if 0
91447636
A
3973kern_return_t kernel_object_iopl_request( /* forward */
3974 vm_named_entry_t named_entry,
3975 memory_object_offset_t offset,
b0d623f7 3976 upl_size_t *upl_size,
91447636
A
3977 upl_t *upl_ptr,
3978 upl_page_info_array_t user_page_list,
3979 unsigned int *page_list_count,
3980 int *flags);
3981
55e303ae
A
3982kern_return_t
3983kernel_object_iopl_request(
3984 vm_named_entry_t named_entry,
3985 memory_object_offset_t offset,
b0d623f7 3986 upl_size_t *upl_size,
55e303ae
A
3987 upl_t *upl_ptr,
3988 upl_page_info_array_t user_page_list,
3989 unsigned int *page_list_count,
3990 int *flags)
3991{
3992 vm_object_t object;
3993 kern_return_t ret;
3994
3995 int caller_flags;
3996
3997 caller_flags = *flags;
3998
91447636
A
3999 if (caller_flags & ~UPL_VALID_FLAGS) {
4000 /*
4001 * For forward compatibility's sake,
4002 * reject any unknown flag.
4003 */
4004 return KERN_INVALID_VALUE;
4005 }
4006
55e303ae
A
4007 /* a few checks to make sure user is obeying rules */
4008 if(*upl_size == 0) {
4009 if(offset >= named_entry->size)
4010 return(KERN_INVALID_RIGHT);
b0d623f7
A
4011 *upl_size = (upl_size_t) (named_entry->size - offset);
4012 if (*upl_size != named_entry->size - offset)
4013 return KERN_INVALID_ARGUMENT;
55e303ae
A
4014 }
4015 if(caller_flags & UPL_COPYOUT_FROM) {
4016 if((named_entry->protection & VM_PROT_READ)
4017 != VM_PROT_READ) {
4018 return(KERN_INVALID_RIGHT);
4019 }
4020 } else {
4021 if((named_entry->protection &
4022 (VM_PROT_READ | VM_PROT_WRITE))
4023 != (VM_PROT_READ | VM_PROT_WRITE)) {
4024 return(KERN_INVALID_RIGHT);
4025 }
4026 }
4027 if(named_entry->size < (offset + *upl_size))
4028 return(KERN_INVALID_ARGUMENT);
4029
4030 /* the callers parameter offset is defined to be the */
4031 /* offset from beginning of named entry offset in object */
4032 offset = offset + named_entry->offset;
4033
39236c6e
A
4034 if (named_entry->is_sub_map ||
4035 named_entry->is_copy)
4036 return KERN_INVALID_ARGUMENT;
55e303ae
A
4037
4038 named_entry_lock(named_entry);
4039
5ba3f43e
A
4040 /* This is the case where we are going to operate */
4041 /* on an already known object. If the object is */
4042 /* not ready it is internal. An external */
4043 /* object cannot be mapped until it is ready */
4044 /* we can therefore avoid the ready check */
4045 /* in this case. */
4046 object = named_entry->backing.object;
4047 vm_object_reference(object);
4048 named_entry_unlock(named_entry);
55e303ae
A
4049
4050 if (!object->private) {
fe8ab488
A
4051 if (*upl_size > MAX_UPL_TRANSFER_BYTES)
4052 *upl_size = MAX_UPL_TRANSFER_BYTES;
55e303ae
A
4053 if (object->phys_contiguous) {
4054 *flags = UPL_PHYS_CONTIG;
4055 } else {
4056 *flags = 0;
4057 }
4058 } else {
4059 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
4060 }
4061
4062 ret = vm_object_iopl_request(object,
4063 offset,
4064 *upl_size,
4065 upl_ptr,
4066 user_page_list,
4067 page_list_count,
3e170ce0 4068 (upl_control_flags_t)(unsigned int)caller_flags);
55e303ae
A
4069 vm_object_deallocate(object);
4070 return ret;
1c79356b 4071}
3e170ce0 4072#endif
5ba3f43e
A
4073
4074/*
4075 * These symbols are looked up at runtime by vmware, VirtualBox,
4076 * despite not being exported in the symbol sets.
4077 */
4078
4079#if defined(__x86_64__)
4080
4081kern_return_t
4082mach_vm_map(
4083 vm_map_t target_map,
4084 mach_vm_offset_t *address,
4085 mach_vm_size_t initial_size,
4086 mach_vm_offset_t mask,
4087 int flags,
4088 ipc_port_t port,
4089 vm_object_offset_t offset,
4090 boolean_t copy,
4091 vm_prot_t cur_protection,
4092 vm_prot_t max_protection,
4093 vm_inherit_t inheritance);
4094
4095kern_return_t
4096mach_vm_remap(
4097 vm_map_t target_map,
4098 mach_vm_offset_t *address,
4099 mach_vm_size_t size,
4100 mach_vm_offset_t mask,
4101 int flags,
4102 vm_map_t src_map,
4103 mach_vm_offset_t memory_address,
4104 boolean_t copy,
4105 vm_prot_t *cur_protection,
4106 vm_prot_t *max_protection,
4107 vm_inherit_t inheritance);
4108
4109kern_return_t
4110mach_vm_map(
4111 vm_map_t target_map,
4112 mach_vm_offset_t *address,
4113 mach_vm_size_t initial_size,
4114 mach_vm_offset_t mask,
4115 int flags,
4116 ipc_port_t port,
4117 vm_object_offset_t offset,
4118 boolean_t copy,
4119 vm_prot_t cur_protection,
4120 vm_prot_t max_protection,
4121 vm_inherit_t inheritance)
4122{
4123 return (mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
4124 offset, copy, cur_protection, max_protection, inheritance));
4125}
4126
4127kern_return_t
4128mach_vm_remap(
4129 vm_map_t target_map,
4130 mach_vm_offset_t *address,
4131 mach_vm_size_t size,
4132 mach_vm_offset_t mask,
4133 int flags,
4134 vm_map_t src_map,
4135 mach_vm_offset_t memory_address,
4136 boolean_t copy,
4137 vm_prot_t *cur_protection,
4138 vm_prot_t *max_protection,
4139 vm_inherit_t inheritance)
4140{
4141 return (mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
4142 copy, cur_protection, max_protection, inheritance));
4143}
4144
4145kern_return_t
4146vm_map(
4147 vm_map_t target_map,
4148 vm_offset_t *address,
4149 vm_size_t size,
4150 vm_offset_t mask,
4151 int flags,
4152 ipc_port_t port,
4153 vm_offset_t offset,
4154 boolean_t copy,
4155 vm_prot_t cur_protection,
4156 vm_prot_t max_protection,
4157 vm_inherit_t inheritance);
4158
4159kern_return_t
4160vm_map(
4161 vm_map_t target_map,
4162 vm_offset_t *address,
4163 vm_size_t size,
4164 vm_offset_t mask,
4165 int flags,
4166 ipc_port_t port,
4167 vm_offset_t offset,
4168 boolean_t copy,
4169 vm_prot_t cur_protection,
4170 vm_prot_t max_protection,
4171 vm_inherit_t inheritance)
4172{
4173 vm_tag_t tag;
4174
4175 VM_GET_FLAGS_ALIAS(flags, tag);
4176 return (vm_map_kernel(target_map, address, size, mask, flags, tag, port, offset, copy, cur_protection, max_protection, inheritance));
4177}
4178
4179#endif /* __x86_64__ */