]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * User-exported virtual memory functions.
63 */
1c79356b 64
91447636
A
65#include <debug.h>
66
1c79356b
A
67#include <vm_cpm.h>
68#include <mach/boolean.h>
69#include <mach/kern_return.h>
70#include <mach/mach_types.h> /* to get vm_address_t */
71#include <mach/memory_object.h>
72#include <mach/std_types.h> /* to get pointer_t */
91447636 73#include <mach/upl.h>
1c79356b
A
74#include <mach/vm_attributes.h>
75#include <mach/vm_param.h>
76#include <mach/vm_statistics.h>
1c79356b 77#include <mach/mach_syscalls.h>
9bccf70c 78
91447636
A
79#include <mach/host_priv_server.h>
80#include <mach/mach_vm_server.h>
91447636 81#include <mach/vm_map_server.h>
1c79356b
A
82
83#include <kern/host.h>
91447636 84#include <kern/kalloc.h>
1c79356b
A
85#include <kern/task.h>
86#include <kern/misc_protos.h>
91447636 87#include <vm/vm_fault.h>
1c79356b
A
88#include <vm/vm_map.h>
89#include <vm/vm_object.h>
90#include <vm/vm_page.h>
91#include <vm/memory_object.h>
92#include <vm/vm_pageout.h>
91447636 93#include <vm/vm_protos.h>
1c79356b
A
94
95vm_size_t upl_offset_to_pagelist = 0;
96
97#if VM_CPM
98#include <vm/cpm.h>
99#endif /* VM_CPM */
100
101ipc_port_t dynamic_pager_control_port=NULL;
102
103/*
91447636 104 * mach_vm_allocate allocates "zero fill" memory in the specfied
1c79356b
A
105 * map.
106 */
107kern_return_t
91447636
A
108mach_vm_allocate(
109 vm_map_t map,
110 mach_vm_offset_t *addr,
111 mach_vm_size_t size,
1c79356b
A
112 int flags)
113{
91447636
A
114 vm_map_offset_t map_addr;
115 vm_map_size_t map_size;
1c79356b 116 kern_return_t result;
2d21ac55
A
117 boolean_t anywhere;
118
119 /* filter out any kernel-only flags */
120 if (flags & ~VM_FLAGS_USER_ALLOCATE)
121 return KERN_INVALID_ARGUMENT;
1c79356b
A
122
123 if (map == VM_MAP_NULL)
124 return(KERN_INVALID_ARGUMENT);
125 if (size == 0) {
126 *addr = 0;
127 return(KERN_SUCCESS);
128 }
129
2d21ac55 130 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
91447636
A
131 if (anywhere) {
132 /*
133 * No specific address requested, so start candidate address
134 * search at the minimum address in the map. However, if that
135 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
136 * allocations of PAGEZERO to explicit requests since its
137 * normal use is to catch dereferences of NULL and many
138 * applications also treat pointers with a value of 0 as
139 * special and suddenly having address 0 contain useable
140 * memory would tend to confuse those applications.
141 */
142 map_addr = vm_map_min(map);
143 if (map_addr == 0)
144 map_addr += PAGE_SIZE;
145 } else
146 map_addr = vm_map_trunc_page(*addr);
147 map_size = vm_map_round_page(size);
148 if (map_size == 0) {
149 return(KERN_INVALID_ARGUMENT);
150 }
151
152 result = vm_map_enter(
153 map,
154 &map_addr,
155 map_size,
156 (vm_map_offset_t)0,
157 flags,
158 VM_OBJECT_NULL,
159 (vm_object_offset_t)0,
160 FALSE,
161 VM_PROT_DEFAULT,
162 VM_PROT_ALL,
163 VM_INHERIT_DEFAULT);
164
165 *addr = map_addr;
166 return(result);
167}
168
169/*
170 * vm_allocate
171 * Legacy routine that allocates "zero fill" memory in the specfied
172 * map (which is limited to the same size as the kernel).
173 */
174kern_return_t
175vm_allocate(
176 vm_map_t map,
177 vm_offset_t *addr,
178 vm_size_t size,
179 int flags)
180{
181 vm_map_offset_t map_addr;
182 vm_map_size_t map_size;
183 kern_return_t result;
2d21ac55
A
184 boolean_t anywhere;
185
186 /* filter out any kernel-only flags */
187 if (flags & ~VM_FLAGS_USER_ALLOCATE)
188 return KERN_INVALID_ARGUMENT;
91447636
A
189
190 if (map == VM_MAP_NULL)
191 return(KERN_INVALID_ARGUMENT);
1c79356b 192 if (size == 0) {
91447636
A
193 *addr = 0;
194 return(KERN_SUCCESS);
195 }
196
2d21ac55 197 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
91447636
A
198 if (anywhere) {
199 /*
200 * No specific address requested, so start candidate address
201 * search at the minimum address in the map. However, if that
202 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
203 * allocations of PAGEZERO to explicit requests since its
204 * normal use is to catch dereferences of NULL and many
205 * applications also treat pointers with a value of 0 as
206 * special and suddenly having address 0 contain useable
207 * memory would tend to confuse those applications.
208 */
209 map_addr = vm_map_min(map);
210 if (map_addr == 0)
211 map_addr += PAGE_SIZE;
212 } else
213 map_addr = vm_map_trunc_page(*addr);
214 map_size = vm_map_round_page(size);
215 if (map_size == 0) {
1c79356b
A
216 return(KERN_INVALID_ARGUMENT);
217 }
218
219 result = vm_map_enter(
220 map,
91447636
A
221 &map_addr,
222 map_size,
223 (vm_map_offset_t)0,
1c79356b
A
224 flags,
225 VM_OBJECT_NULL,
226 (vm_object_offset_t)0,
227 FALSE,
228 VM_PROT_DEFAULT,
229 VM_PROT_ALL,
230 VM_INHERIT_DEFAULT);
231
91447636 232 *addr = CAST_DOWN(vm_offset_t, map_addr);
1c79356b
A
233 return(result);
234}
235
236/*
91447636
A
237 * mach_vm_deallocate -
238 * deallocates the specified range of addresses in the
1c79356b
A
239 * specified address map.
240 */
241kern_return_t
91447636
A
242mach_vm_deallocate(
243 vm_map_t map,
244 mach_vm_offset_t start,
245 mach_vm_size_t size)
246{
247 if ((map == VM_MAP_NULL) || (start + size < start))
248 return(KERN_INVALID_ARGUMENT);
249
250 if (size == (mach_vm_offset_t) 0)
251 return(KERN_SUCCESS);
252
253 return(vm_map_remove(map, vm_map_trunc_page(start),
254 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
255}
256
257/*
258 * vm_deallocate -
259 * deallocates the specified range of addresses in the
260 * specified address map (limited to addresses the same
261 * size as the kernel).
262 */
263kern_return_t
1c79356b
A
264vm_deallocate(
265 register vm_map_t map,
266 vm_offset_t start,
267 vm_size_t size)
268{
91447636 269 if ((map == VM_MAP_NULL) || (start + size < start))
1c79356b
A
270 return(KERN_INVALID_ARGUMENT);
271
272 if (size == (vm_offset_t) 0)
273 return(KERN_SUCCESS);
274
91447636
A
275 return(vm_map_remove(map, vm_map_trunc_page(start),
276 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
277}
278
279/*
91447636
A
280 * mach_vm_inherit -
281 * Sets the inheritance of the specified range in the
1c79356b
A
282 * specified map.
283 */
284kern_return_t
91447636
A
285mach_vm_inherit(
286 vm_map_t map,
287 mach_vm_offset_t start,
288 mach_vm_size_t size,
289 vm_inherit_t new_inheritance)
290{
291 if ((map == VM_MAP_NULL) || (start + size < start) ||
292 (new_inheritance > VM_INHERIT_LAST_VALID))
293 return(KERN_INVALID_ARGUMENT);
294
295 if (size == 0)
296 return KERN_SUCCESS;
297
298 return(vm_map_inherit(map,
299 vm_map_trunc_page(start),
300 vm_map_round_page(start+size),
301 new_inheritance));
302}
303
304/*
305 * vm_inherit -
306 * Sets the inheritance of the specified range in the
307 * specified map (range limited to addresses
308 */
309kern_return_t
1c79356b
A
310vm_inherit(
311 register vm_map_t map,
312 vm_offset_t start,
313 vm_size_t size,
314 vm_inherit_t new_inheritance)
315{
91447636
A
316 if ((map == VM_MAP_NULL) || (start + size < start) ||
317 (new_inheritance > VM_INHERIT_LAST_VALID))
1c79356b
A
318 return(KERN_INVALID_ARGUMENT);
319
91447636
A
320 if (size == 0)
321 return KERN_SUCCESS;
322
1c79356b 323 return(vm_map_inherit(map,
91447636
A
324 vm_map_trunc_page(start),
325 vm_map_round_page(start+size),
1c79356b
A
326 new_inheritance));
327}
328
329/*
91447636
A
330 * mach_vm_protect -
331 * Sets the protection of the specified range in the
1c79356b
A
332 * specified map.
333 */
334
91447636
A
335kern_return_t
336mach_vm_protect(
337 vm_map_t map,
338 mach_vm_offset_t start,
339 mach_vm_size_t size,
340 boolean_t set_maximum,
341 vm_prot_t new_protection)
342{
343 if ((map == VM_MAP_NULL) || (start + size < start) ||
344 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
345 return(KERN_INVALID_ARGUMENT);
346
347 if (size == 0)
348 return KERN_SUCCESS;
349
350 return(vm_map_protect(map,
351 vm_map_trunc_page(start),
352 vm_map_round_page(start+size),
353 new_protection,
354 set_maximum));
355}
356
357/*
358 * vm_protect -
359 * Sets the protection of the specified range in the
360 * specified map. Addressability of the range limited
361 * to the same size as the kernel.
362 */
363
1c79356b
A
364kern_return_t
365vm_protect(
91447636 366 vm_map_t map,
1c79356b
A
367 vm_offset_t start,
368 vm_size_t size,
369 boolean_t set_maximum,
370 vm_prot_t new_protection)
371{
91447636
A
372 if ((map == VM_MAP_NULL) || (start + size < start) ||
373 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
1c79356b
A
374 return(KERN_INVALID_ARGUMENT);
375
91447636
A
376 if (size == 0)
377 return KERN_SUCCESS;
378
1c79356b 379 return(vm_map_protect(map,
91447636
A
380 vm_map_trunc_page(start),
381 vm_map_round_page(start+size),
1c79356b
A
382 new_protection,
383 set_maximum));
384}
385
386/*
91447636 387 * mach_vm_machine_attributes -
1c79356b
A
388 * Handle machine-specific attributes for a mapping, such
389 * as cachability, migrability, etc.
390 */
391kern_return_t
91447636
A
392mach_vm_machine_attribute(
393 vm_map_t map,
394 mach_vm_address_t addr,
395 mach_vm_size_t size,
396 vm_machine_attribute_t attribute,
397 vm_machine_attribute_val_t* value) /* IN/OUT */
398{
399 if ((map == VM_MAP_NULL) || (addr + size < addr))
400 return(KERN_INVALID_ARGUMENT);
401
402 if (size == 0)
403 return KERN_SUCCESS;
404
405 return vm_map_machine_attribute(map,
406 vm_map_trunc_page(addr),
407 vm_map_round_page(addr+size),
408 attribute,
409 value);
410}
411
412/*
413 * vm_machine_attribute -
414 * Handle machine-specific attributes for a mapping, such
415 * as cachability, migrability, etc. Limited addressability
416 * (same range limits as for the native kernel map).
417 */
418kern_return_t
1c79356b
A
419vm_machine_attribute(
420 vm_map_t map,
91447636 421 vm_address_t addr,
1c79356b
A
422 vm_size_t size,
423 vm_machine_attribute_t attribute,
424 vm_machine_attribute_val_t* value) /* IN/OUT */
425{
91447636
A
426 if ((map == VM_MAP_NULL) || (addr + size < addr))
427 return(KERN_INVALID_ARGUMENT);
428
429 if (size == 0)
430 return KERN_SUCCESS;
431
432 return vm_map_machine_attribute(map,
433 vm_map_trunc_page(addr),
434 vm_map_round_page(addr+size),
435 attribute,
436 value);
437}
438
439/*
440 * mach_vm_read -
441 * Read/copy a range from one address space and return it to the caller.
442 *
443 * It is assumed that the address for the returned memory is selected by
444 * the IPC implementation as part of receiving the reply to this call.
445 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
446 * that gets returned.
447 *
448 * JMM - because of mach_msg_type_number_t, this call is limited to a
449 * single 4GB region at this time.
450 *
451 */
452kern_return_t
453mach_vm_read(
454 vm_map_t map,
455 mach_vm_address_t addr,
456 mach_vm_size_t size,
457 pointer_t *data,
458 mach_msg_type_number_t *data_size)
459{
460 kern_return_t error;
461 vm_map_copy_t ipc_address;
462
1c79356b
A
463 if (map == VM_MAP_NULL)
464 return(KERN_INVALID_ARGUMENT);
465
91447636
A
466
467 error = vm_map_copyin(map,
468 (vm_map_address_t)addr,
469 (vm_map_size_t)size,
470 FALSE, /* src_destroy */
471 &ipc_address);
472
473 if (KERN_SUCCESS == error) {
474 *data = (pointer_t) ipc_address;
475 *data_size = size;
476 }
477 return(error);
1c79356b
A
478}
479
91447636
A
480/*
481 * vm_read -
482 * Read/copy a range from one address space and return it to the caller.
483 * Limited addressability (same range limits as for the native kernel map).
484 *
485 * It is assumed that the address for the returned memory is selected by
486 * the IPC implementation as part of receiving the reply to this call.
487 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
488 * that gets returned.
489 */
1c79356b
A
490kern_return_t
491vm_read(
492 vm_map_t map,
91447636 493 vm_address_t addr,
1c79356b
A
494 vm_size_t size,
495 pointer_t *data,
496 mach_msg_type_number_t *data_size)
497{
498 kern_return_t error;
499 vm_map_copy_t ipc_address;
500
501 if (map == VM_MAP_NULL)
502 return(KERN_INVALID_ARGUMENT);
503
91447636
A
504 error = vm_map_copyin(map,
505 (vm_map_address_t)addr,
506 (vm_map_size_t)size,
507 FALSE, /* src_destroy */
508 &ipc_address);
509
510 if (KERN_SUCCESS == error) {
1c79356b
A
511 *data = (pointer_t) ipc_address;
512 *data_size = size;
513 }
514 return(error);
515}
516
91447636
A
517/*
518 * mach_vm_read_list -
519 * Read/copy a list of address ranges from specified map.
520 *
521 * MIG does not know how to deal with a returned array of
522 * vm_map_copy_t structures, so we have to do the copyout
523 * manually here.
524 */
525kern_return_t
526mach_vm_read_list(
527 vm_map_t map,
528 mach_vm_read_entry_t data_list,
529 natural_t count)
530{
531 mach_msg_type_number_t i;
532 kern_return_t error;
533 vm_map_copy_t copy;
534
8ad349bb
A
535 if (map == VM_MAP_NULL ||
536 count > VM_MAP_ENTRY_MAX)
91447636
A
537 return(KERN_INVALID_ARGUMENT);
538
539 error = KERN_SUCCESS;
540 for(i=0; i<count; i++) {
541 vm_map_address_t map_addr;
542 vm_map_size_t map_size;
543
544 map_addr = (vm_map_address_t)(data_list[i].address);
545 map_size = (vm_map_size_t)(data_list[i].size);
546
547 if(map_size != 0) {
548 error = vm_map_copyin(map,
549 map_addr,
550 map_size,
551 FALSE, /* src_destroy */
552 &copy);
553 if (KERN_SUCCESS == error) {
554 error = vm_map_copyout(
555 current_task()->map,
556 &map_addr,
557 copy);
558 if (KERN_SUCCESS == error) {
559 data_list[i].address = map_addr;
560 continue;
561 }
562 vm_map_copy_discard(copy);
563 }
564 }
565 data_list[i].address = (mach_vm_address_t)0;
566 data_list[i].size = (mach_vm_size_t)0;
567 }
568 return(error);
569}
570
571/*
572 * vm_read_list -
573 * Read/copy a list of address ranges from specified map.
574 *
575 * MIG does not know how to deal with a returned array of
576 * vm_map_copy_t structures, so we have to do the copyout
577 * manually here.
578 *
579 * The source and destination ranges are limited to those
580 * that can be described with a vm_address_t (i.e. same
581 * size map as the kernel).
582 *
583 * JMM - If the result of the copyout is an address range
584 * that cannot be described with a vm_address_t (i.e. the
585 * caller had a larger address space but used this call
586 * anyway), it will result in a truncated address being
587 * returned (and a likely confused caller).
588 */
589
1c79356b
A
590kern_return_t
591vm_read_list(
592 vm_map_t map,
91447636
A
593 vm_read_entry_t data_list,
594 natural_t count)
1c79356b
A
595{
596 mach_msg_type_number_t i;
597 kern_return_t error;
91447636 598 vm_map_copy_t copy;
1c79356b 599
8ad349bb
A
600 if (map == VM_MAP_NULL ||
601 count > VM_MAP_ENTRY_MAX)
1c79356b
A
602 return(KERN_INVALID_ARGUMENT);
603
91447636 604 error = KERN_SUCCESS;
1c79356b 605 for(i=0; i<count; i++) {
91447636
A
606 vm_map_address_t map_addr;
607 vm_map_size_t map_size;
608
609 map_addr = (vm_map_address_t)(data_list[i].address);
610 map_size = (vm_map_size_t)(data_list[i].size);
611
612 if(map_size != 0) {
613 error = vm_map_copyin(map,
614 map_addr,
615 map_size,
616 FALSE, /* src_destroy */
617 &copy);
618 if (KERN_SUCCESS == error) {
619 error = vm_map_copyout(current_task()->map,
620 &map_addr,
621 copy);
622 if (KERN_SUCCESS == error) {
623 data_list[i].address =
624 CAST_DOWN(vm_offset_t, map_addr);
625 continue;
626 }
627 vm_map_copy_discard(copy);
1c79356b
A
628 }
629 }
91447636
A
630 data_list[i].address = (mach_vm_address_t)0;
631 data_list[i].size = (mach_vm_size_t)0;
1c79356b
A
632 }
633 return(error);
634}
635
636/*
91447636
A
637 * mach_vm_read_overwrite -
638 * Overwrite a range of the current map with data from the specified
639 * map/address range.
640 *
641 * In making an assumption that the current thread is local, it is
642 * no longer cluster-safe without a fully supportive local proxy
643 * thread/task (but we don't support cluster's anymore so this is moot).
1c79356b
A
644 */
645
1c79356b 646kern_return_t
91447636
A
647mach_vm_read_overwrite(
648 vm_map_t map,
649 mach_vm_address_t address,
650 mach_vm_size_t size,
651 mach_vm_address_t data,
652 mach_vm_size_t *data_size)
653{
654 kern_return_t error;
1c79356b
A
655 vm_map_copy_t copy;
656
657 if (map == VM_MAP_NULL)
658 return(KERN_INVALID_ARGUMENT);
659
91447636
A
660 error = vm_map_copyin(map, (vm_map_address_t)address,
661 (vm_map_size_t)size, FALSE, &copy);
662
663 if (KERN_SUCCESS == error) {
664 error = vm_map_copy_overwrite(current_thread()->map,
665 (vm_map_address_t)data,
666 copy, FALSE);
667 if (KERN_SUCCESS == error) {
668 *data_size = size;
669 return error;
1c79356b 670 }
91447636 671 vm_map_copy_discard(copy);
1c79356b 672 }
91447636
A
673 return(error);
674}
675
676/*
677 * vm_read_overwrite -
678 * Overwrite a range of the current map with data from the specified
679 * map/address range.
680 *
681 * This routine adds the additional limitation that the source and
682 * destination ranges must be describable with vm_address_t values
683 * (i.e. the same size address spaces as the kernel, or at least the
684 * the ranges are in that first portion of the respective address
685 * spaces).
686 */
687
688kern_return_t
689vm_read_overwrite(
690 vm_map_t map,
691 vm_address_t address,
692 vm_size_t size,
693 vm_address_t data,
694 vm_size_t *data_size)
695{
696 kern_return_t error;
697 vm_map_copy_t copy;
698
699 if (map == VM_MAP_NULL)
700 return(KERN_INVALID_ARGUMENT);
701
702 error = vm_map_copyin(map, (vm_map_address_t)address,
703 (vm_map_size_t)size, FALSE, &copy);
704
705 if (KERN_SUCCESS == error) {
706 error = vm_map_copy_overwrite(current_thread()->map,
707 (vm_map_address_t)data,
708 copy, FALSE);
709 if (KERN_SUCCESS == error) {
710 *data_size = size;
711 return error;
1c79356b 712 }
91447636 713 vm_map_copy_discard(copy);
1c79356b 714 }
1c79356b
A
715 return(error);
716}
717
718
91447636
A
719/*
720 * mach_vm_write -
721 * Overwrite the specified address range with the data provided
722 * (from the current map).
723 */
724kern_return_t
725mach_vm_write(
726 vm_map_t map,
727 mach_vm_address_t address,
728 pointer_t data,
729 __unused mach_msg_type_number_t size)
730{
731 if (map == VM_MAP_NULL)
732 return KERN_INVALID_ARGUMENT;
1c79356b 733
91447636
A
734 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
735 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
736}
1c79356b 737
91447636
A
738/*
739 * vm_write -
740 * Overwrite the specified address range with the data provided
741 * (from the current map).
742 *
743 * The addressability of the range of addresses to overwrite is
744 * limited bu the use of a vm_address_t (same size as kernel map).
745 * Either the target map is also small, or the range is in the
746 * low addresses within it.
747 */
1c79356b
A
748kern_return_t
749vm_write(
91447636
A
750 vm_map_t map,
751 vm_address_t address,
752 pointer_t data,
753 __unused mach_msg_type_number_t size)
754{
755 if (map == VM_MAP_NULL)
756 return KERN_INVALID_ARGUMENT;
757
758 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
759 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
760}
761
762/*
763 * mach_vm_copy -
764 * Overwrite one range of the specified map with the contents of
765 * another range within that same map (i.e. both address ranges
766 * are "over there").
767 */
768kern_return_t
769mach_vm_copy(
1c79356b 770 vm_map_t map,
91447636
A
771 mach_vm_address_t source_address,
772 mach_vm_size_t size,
773 mach_vm_address_t dest_address)
1c79356b 774{
91447636
A
775 vm_map_copy_t copy;
776 kern_return_t kr;
777
1c79356b
A
778 if (map == VM_MAP_NULL)
779 return KERN_INVALID_ARGUMENT;
780
91447636
A
781 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
782 (vm_map_size_t)size, FALSE, &copy);
783
784 if (KERN_SUCCESS == kr) {
785 kr = vm_map_copy_overwrite(map,
786 (vm_map_address_t)dest_address,
787 copy, FALSE /* interruptible XXX */);
788
789 if (KERN_SUCCESS != kr)
790 vm_map_copy_discard(copy);
791 }
792 return kr;
1c79356b
A
793}
794
795kern_return_t
796vm_copy(
797 vm_map_t map,
798 vm_address_t source_address,
799 vm_size_t size,
800 vm_address_t dest_address)
801{
802 vm_map_copy_t copy;
803 kern_return_t kr;
804
805 if (map == VM_MAP_NULL)
806 return KERN_INVALID_ARGUMENT;
807
91447636
A
808 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
809 (vm_map_size_t)size, FALSE, &copy);
1c79356b 810
91447636
A
811 if (KERN_SUCCESS == kr) {
812 kr = vm_map_copy_overwrite(map,
813 (vm_map_address_t)dest_address,
814 copy, FALSE /* interruptible XXX */);
1c79356b 815
91447636
A
816 if (KERN_SUCCESS != kr)
817 vm_map_copy_discard(copy);
818 }
819 return kr;
1c79356b
A
820}
821
822/*
91447636
A
823 * mach_vm_map -
824 * Map some range of an object into an address space.
825 *
826 * The object can be one of several types of objects:
827 * NULL - anonymous memory
828 * a named entry - a range within another address space
829 * or a range within a memory object
830 * a whole memory object
831 *
1c79356b
A
832 */
833kern_return_t
91447636 834mach_vm_map(
1c79356b 835 vm_map_t target_map,
91447636
A
836 mach_vm_offset_t *address,
837 mach_vm_size_t initial_size,
838 mach_vm_offset_t mask,
1c79356b
A
839 int flags,
840 ipc_port_t port,
841 vm_object_offset_t offset,
842 boolean_t copy,
843 vm_prot_t cur_protection,
844 vm_prot_t max_protection,
845 vm_inherit_t inheritance)
846{
2d21ac55
A
847 /* filter out any kernel-only flags */
848 if (flags & ~VM_FLAGS_USER_MAP)
849 return KERN_INVALID_ARGUMENT;
1c79356b 850
2d21ac55
A
851 return vm_map_enter_mem_object(target_map,
852 address,
853 initial_size,
854 mask,
855 flags,
856 port,
857 offset,
858 copy,
859 cur_protection,
860 max_protection,
861 inheritance);
1c79356b
A
862}
863
91447636
A
864
865/* legacy interface */
866kern_return_t
867vm_map_64(
868 vm_map_t target_map,
869 vm_offset_t *address,
870 vm_size_t size,
871 vm_offset_t mask,
872 int flags,
873 ipc_port_t port,
874 vm_object_offset_t offset,
875 boolean_t copy,
876 vm_prot_t cur_protection,
877 vm_prot_t max_protection,
878 vm_inherit_t inheritance)
879{
880 mach_vm_address_t map_addr;
881 mach_vm_size_t map_size;
882 mach_vm_offset_t map_mask;
883 kern_return_t kr;
884
885 map_addr = (mach_vm_address_t)*address;
886 map_size = (mach_vm_size_t)size;
887 map_mask = (mach_vm_offset_t)mask;
888
889 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
890 port, offset, copy,
891 cur_protection, max_protection, inheritance);
892 *address = CAST_DOWN(vm_address_t, map_addr);
893 return kr;
894}
895
1c79356b 896/* temporary, until world build */
55e303ae 897kern_return_t
1c79356b
A
898vm_map(
899 vm_map_t target_map,
900 vm_offset_t *address,
901 vm_size_t size,
902 vm_offset_t mask,
903 int flags,
904 ipc_port_t port,
905 vm_offset_t offset,
906 boolean_t copy,
907 vm_prot_t cur_protection,
908 vm_prot_t max_protection,
909 vm_inherit_t inheritance)
910{
91447636
A
911 mach_vm_address_t map_addr;
912 mach_vm_size_t map_size;
913 mach_vm_offset_t map_mask;
914 vm_object_offset_t obj_offset;
915 kern_return_t kr;
916
917 map_addr = (mach_vm_address_t)*address;
918 map_size = (mach_vm_size_t)size;
919 map_mask = (mach_vm_offset_t)mask;
920 obj_offset = (vm_object_offset_t)offset;
921
922 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
923 port, obj_offset, copy,
924 cur_protection, max_protection, inheritance);
925 *address = CAST_DOWN(vm_address_t, map_addr);
926 return kr;
927}
928
929/*
930 * mach_vm_remap -
931 * Remap a range of memory from one task into another,
932 * to another address range within the same task, or
933 * over top of itself (with altered permissions and/or
934 * as an in-place copy of itself).
935 */
936
937kern_return_t
938mach_vm_remap(
939 vm_map_t target_map,
940 mach_vm_offset_t *address,
941 mach_vm_size_t size,
942 mach_vm_offset_t mask,
943 boolean_t anywhere,
944 vm_map_t src_map,
945 mach_vm_offset_t memory_address,
946 boolean_t copy,
947 vm_prot_t *cur_protection,
948 vm_prot_t *max_protection,
949 vm_inherit_t inheritance)
950{
951 vm_map_offset_t map_addr;
952 kern_return_t kr;
953
954 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
955 return KERN_INVALID_ARGUMENT;
956
957 map_addr = (vm_map_offset_t)*address;
958
959 kr = vm_map_remap(target_map,
960 &map_addr,
961 size,
962 mask,
963 anywhere,
964 src_map,
965 memory_address,
966 copy,
967 cur_protection,
968 max_protection,
969 inheritance);
970 *address = map_addr;
971 return kr;
1c79356b
A
972}
973
91447636
A
974/*
975 * vm_remap -
976 * Remap a range of memory from one task into another,
977 * to another address range within the same task, or
978 * over top of itself (with altered permissions and/or
979 * as an in-place copy of itself).
980 *
981 * The addressability of the source and target address
982 * range is limited by the size of vm_address_t (in the
983 * kernel context).
984 */
985kern_return_t
986vm_remap(
987 vm_map_t target_map,
988 vm_offset_t *address,
989 vm_size_t size,
990 vm_offset_t mask,
991 boolean_t anywhere,
992 vm_map_t src_map,
993 vm_offset_t memory_address,
994 boolean_t copy,
995 vm_prot_t *cur_protection,
996 vm_prot_t *max_protection,
997 vm_inherit_t inheritance)
998{
999 vm_map_offset_t map_addr;
1000 kern_return_t kr;
1001
1002 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1003 return KERN_INVALID_ARGUMENT;
1004
1005 map_addr = (vm_map_offset_t)*address;
1006
1007 kr = vm_map_remap(target_map,
1008 &map_addr,
1009 size,
1010 mask,
1011 anywhere,
1012 src_map,
1013 memory_address,
1014 copy,
1015 cur_protection,
1016 max_protection,
1017 inheritance);
1018 *address = CAST_DOWN(vm_offset_t, map_addr);
1019 return kr;
1020}
1c79356b
A
1021
1022/*
91447636
A
1023 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1024 * when mach_vm_wire and vm_wire are changed to use ledgers.
1c79356b
A
1025 */
1026#include <mach/mach_host_server.h>
1027/*
91447636
A
1028 * mach_vm_wire
1029 * Specify that the range of the virtual address space
1030 * of the target task must not cause page faults for
1031 * the indicated accesses.
1032 *
1033 * [ To unwire the pages, specify VM_PROT_NONE. ]
1034 */
1035kern_return_t
1036mach_vm_wire(
1037 host_priv_t host_priv,
1038 vm_map_t map,
1039 mach_vm_offset_t start,
1040 mach_vm_size_t size,
1041 vm_prot_t access)
1042{
1043 kern_return_t rc;
1044
1045 if (host_priv == HOST_PRIV_NULL)
1046 return KERN_INVALID_HOST;
1047
1048 assert(host_priv == &realhost);
1049
1050 if (map == VM_MAP_NULL)
1051 return KERN_INVALID_TASK;
1052
1053 if (access & ~VM_PROT_ALL)
1054 return KERN_INVALID_ARGUMENT;
1055
1056 if (access != VM_PROT_NONE) {
1057 rc = vm_map_wire(map, vm_map_trunc_page(start),
1058 vm_map_round_page(start+size), access, TRUE);
1059 } else {
1060 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1061 vm_map_round_page(start+size), TRUE);
1062 }
1063 return rc;
1064}
1065
1066/*
1067 * vm_wire -
1c79356b
A
1068 * Specify that the range of the virtual address space
1069 * of the target task must not cause page faults for
1070 * the indicated accesses.
1071 *
1072 * [ To unwire the pages, specify VM_PROT_NONE. ]
1073 */
1074kern_return_t
1075vm_wire(
1076 host_priv_t host_priv,
1077 register vm_map_t map,
1078 vm_offset_t start,
1079 vm_size_t size,
1080 vm_prot_t access)
1081{
1082 kern_return_t rc;
1083
1084 if (host_priv == HOST_PRIV_NULL)
1085 return KERN_INVALID_HOST;
1086
1087 assert(host_priv == &realhost);
1088
1089 if (map == VM_MAP_NULL)
1090 return KERN_INVALID_TASK;
1091
91447636 1092 if ((access & ~VM_PROT_ALL) || (start + size < start))
1c79356b
A
1093 return KERN_INVALID_ARGUMENT;
1094
91447636
A
1095 if (size == 0) {
1096 rc = KERN_SUCCESS;
1097 } else if (access != VM_PROT_NONE) {
1098 rc = vm_map_wire(map, vm_map_trunc_page(start),
1099 vm_map_round_page(start+size), access, TRUE);
1c79356b 1100 } else {
91447636
A
1101 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1102 vm_map_round_page(start+size), TRUE);
1c79356b
A
1103 }
1104 return rc;
1105}
1106
1107/*
1108 * vm_msync
1109 *
1110 * Synchronises the memory range specified with its backing store
1111 * image by either flushing or cleaning the contents to the appropriate
91447636
A
1112 * memory manager.
1113 *
1114 * interpretation of sync_flags
1115 * VM_SYNC_INVALIDATE - discard pages, only return precious
1116 * pages to manager.
1117 *
1118 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1119 * - discard pages, write dirty or precious
1120 * pages back to memory manager.
1121 *
1122 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1123 * - write dirty or precious pages back to
1124 * the memory manager.
1125 *
1126 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1127 * is a hole in the region, and we would
1128 * have returned KERN_SUCCESS, return
1129 * KERN_INVALID_ADDRESS instead.
1130 *
1131 * RETURNS
1132 * KERN_INVALID_TASK Bad task parameter
1133 * KERN_INVALID_ARGUMENT both sync and async were specified.
1134 * KERN_SUCCESS The usual.
1135 * KERN_INVALID_ADDRESS There was a hole in the region.
1136 */
1137
1138kern_return_t
1139mach_vm_msync(
1140 vm_map_t map,
1141 mach_vm_address_t address,
1142 mach_vm_size_t size,
1143 vm_sync_t sync_flags)
1144{
1145
1146 if (map == VM_MAP_NULL)
1147 return(KERN_INVALID_TASK);
1148
1149 return vm_map_msync(map, (vm_map_address_t)address,
1150 (vm_map_size_t)size, sync_flags);
1151}
1152
1153/*
1154 * vm_msync
1155 *
1156 * Synchronises the memory range specified with its backing store
1157 * image by either flushing or cleaning the contents to the appropriate
1158 * memory manager.
1c79356b
A
1159 *
1160 * interpretation of sync_flags
1161 * VM_SYNC_INVALIDATE - discard pages, only return precious
1162 * pages to manager.
1163 *
1164 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1165 * - discard pages, write dirty or precious
1166 * pages back to memory manager.
1167 *
1168 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1169 * - write dirty or precious pages back to
1170 * the memory manager.
1171 *
91447636
A
1172 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1173 * is a hole in the region, and we would
1174 * have returned KERN_SUCCESS, return
1175 * KERN_INVALID_ADDRESS instead.
1176 *
1177 * The addressability of the range is limited to that which can
1178 * be described by a vm_address_t.
1c79356b
A
1179 *
1180 * RETURNS
1181 * KERN_INVALID_TASK Bad task parameter
1182 * KERN_INVALID_ARGUMENT both sync and async were specified.
1183 * KERN_SUCCESS The usual.
91447636 1184 * KERN_INVALID_ADDRESS There was a hole in the region.
1c79356b
A
1185 */
1186
1187kern_return_t
1188vm_msync(
1189 vm_map_t map,
1190 vm_address_t address,
1191 vm_size_t size,
1192 vm_sync_t sync_flags)
1193{
1c79356b 1194
91447636
A
1195 if (map == VM_MAP_NULL)
1196 return(KERN_INVALID_TASK);
1c79356b 1197
91447636
A
1198 return vm_map_msync(map, (vm_map_address_t)address,
1199 (vm_map_size_t)size, sync_flags);
1200}
1c79356b 1201
91447636
A
1202
1203/*
1204 * mach_vm_behavior_set
1205 *
1206 * Sets the paging behavior attribute for the specified range
1207 * in the specified map.
1208 *
1209 * This routine will fail with KERN_INVALID_ADDRESS if any address
1210 * in [start,start+size) is not a valid allocated memory region.
1211 */
1212kern_return_t
1213mach_vm_behavior_set(
1214 vm_map_t map,
1215 mach_vm_offset_t start,
1216 mach_vm_size_t size,
1217 vm_behavior_t new_behavior)
1218{
1219 if ((map == VM_MAP_NULL) || (start + size < start))
1220 return(KERN_INVALID_ARGUMENT);
1c79356b
A
1221
1222 if (size == 0)
91447636 1223 return KERN_SUCCESS;
1c79356b 1224
91447636
A
1225 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1226 vm_map_round_page(start+size), new_behavior));
1227}
1c79356b 1228
91447636
A
1229/*
1230 * vm_behavior_set
1231 *
1232 * Sets the paging behavior attribute for the specified range
1233 * in the specified map.
1234 *
1235 * This routine will fail with KERN_INVALID_ADDRESS if any address
1236 * in [start,start+size) is not a valid allocated memory region.
1237 *
1238 * This routine is potentially limited in addressibility by the
1239 * use of vm_offset_t (if the map provided is larger than the
1240 * kernel's).
1241 */
1242kern_return_t
1243vm_behavior_set(
1244 vm_map_t map,
1245 vm_offset_t start,
1246 vm_size_t size,
1247 vm_behavior_t new_behavior)
1248{
1249 if ((map == VM_MAP_NULL) || (start + size < start))
1250 return(KERN_INVALID_ARGUMENT);
1c79356b 1251
91447636
A
1252 if (size == 0)
1253 return KERN_SUCCESS;
1c79356b 1254
91447636
A
1255 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1256 vm_map_round_page(start+size), new_behavior));
1257}
1c79356b 1258
91447636
A
1259/*
1260 * mach_vm_region:
1261 *
1262 * User call to obtain information about a region in
1263 * a task's address map. Currently, only one flavor is
1264 * supported.
1265 *
1266 * XXX The reserved and behavior fields cannot be filled
1267 * in until the vm merge from the IK is completed, and
1268 * vm_reserve is implemented.
1269 *
1270 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1271 */
1c79356b 1272
91447636
A
1273kern_return_t
1274mach_vm_region(
1275 vm_map_t map,
1276 mach_vm_offset_t *address, /* IN/OUT */
1277 mach_vm_size_t *size, /* OUT */
1278 vm_region_flavor_t flavor, /* IN */
1279 vm_region_info_t info, /* OUT */
1280 mach_msg_type_number_t *count, /* IN/OUT */
1281 mach_port_t *object_name) /* OUT */
1282{
1283 vm_map_offset_t map_addr;
1284 vm_map_size_t map_size;
1285 kern_return_t kr;
1c79356b 1286
91447636
A
1287 if (VM_MAP_NULL == map)
1288 return KERN_INVALID_ARGUMENT;
1c79356b 1289
91447636
A
1290 map_addr = (vm_map_offset_t)*address;
1291 map_size = (vm_map_size_t)*size;
1c79356b 1292
91447636
A
1293 /* legacy conversion */
1294 if (VM_REGION_BASIC_INFO == flavor)
1295 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1296
91447636
A
1297 kr = vm_map_region(map,
1298 &map_addr, &map_size,
1299 flavor, info, count,
1300 object_name);
1c79356b 1301
91447636
A
1302 *address = map_addr;
1303 *size = map_size;
1304 return kr;
1305}
1c79356b 1306
91447636
A
1307/*
1308 * vm_region_64 and vm_region:
1309 *
1310 * User call to obtain information about a region in
1311 * a task's address map. Currently, only one flavor is
1312 * supported.
1313 *
1314 * XXX The reserved and behavior fields cannot be filled
1315 * in until the vm merge from the IK is completed, and
1316 * vm_reserve is implemented.
1317 *
1318 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1319 */
1c79356b 1320
91447636
A
1321kern_return_t
1322vm_region_64(
1323 vm_map_t map,
1324 vm_offset_t *address, /* IN/OUT */
1325 vm_size_t *size, /* OUT */
1326 vm_region_flavor_t flavor, /* IN */
1327 vm_region_info_t info, /* OUT */
1328 mach_msg_type_number_t *count, /* IN/OUT */
1329 mach_port_t *object_name) /* OUT */
1330{
1331 vm_map_offset_t map_addr;
1332 vm_map_size_t map_size;
1333 kern_return_t kr;
1c79356b 1334
91447636
A
1335 if (VM_MAP_NULL == map)
1336 return KERN_INVALID_ARGUMENT;
1c79356b 1337
91447636
A
1338 map_addr = (vm_map_offset_t)*address;
1339 map_size = (vm_map_size_t)*size;
1c79356b 1340
91447636
A
1341 /* legacy conversion */
1342 if (VM_REGION_BASIC_INFO == flavor)
1343 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1344
91447636
A
1345 kr = vm_map_region(map,
1346 &map_addr, &map_size,
1347 flavor, info, count,
1348 object_name);
1c79356b 1349
91447636
A
1350 *address = CAST_DOWN(vm_offset_t, map_addr);
1351 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1352
91447636
A
1353 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1354 return KERN_INVALID_ADDRESS;
1355 return kr;
1356}
1c79356b 1357
91447636
A
1358kern_return_t
1359vm_region(
1360 vm_map_t map,
1361 vm_address_t *address, /* IN/OUT */
1362 vm_size_t *size, /* OUT */
1363 vm_region_flavor_t flavor, /* IN */
1364 vm_region_info_t info, /* OUT */
1365 mach_msg_type_number_t *count, /* IN/OUT */
1366 mach_port_t *object_name) /* OUT */
1367{
1368 vm_map_address_t map_addr;
1369 vm_map_size_t map_size;
1370 kern_return_t kr;
1c79356b 1371
91447636
A
1372 if (VM_MAP_NULL == map)
1373 return KERN_INVALID_ARGUMENT;
1c79356b 1374
91447636
A
1375 map_addr = (vm_map_address_t)*address;
1376 map_size = (vm_map_size_t)*size;
1c79356b 1377
91447636
A
1378 kr = vm_map_region(map,
1379 &map_addr, &map_size,
1380 flavor, info, count,
1381 object_name);
1c79356b 1382
91447636
A
1383 *address = CAST_DOWN(vm_address_t, map_addr);
1384 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1385
91447636
A
1386 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1387 return KERN_INVALID_ADDRESS;
1388 return kr;
1389}
1c79356b
A
1390
1391/*
91447636
A
1392 * vm_region_recurse: A form of vm_region which follows the
1393 * submaps in a target map
1c79356b 1394 *
1c79356b
A
1395 */
1396kern_return_t
91447636
A
1397mach_vm_region_recurse(
1398 vm_map_t map,
1399 mach_vm_address_t *address,
1400 mach_vm_size_t *size,
1401 uint32_t *depth,
1402 vm_region_recurse_info_t info,
1403 mach_msg_type_number_t *infoCnt)
1c79356b 1404{
91447636
A
1405 vm_map_address_t map_addr;
1406 vm_map_size_t map_size;
1407 kern_return_t kr;
1c79356b 1408
91447636
A
1409 if (VM_MAP_NULL == map)
1410 return KERN_INVALID_ARGUMENT;
1c79356b 1411
91447636
A
1412 map_addr = (vm_map_address_t)*address;
1413 map_size = (vm_map_size_t)*size;
1414
1415 kr = vm_map_region_recurse_64(
1416 map,
1417 &map_addr,
1418 &map_size,
1419 depth,
1420 (vm_region_submap_info_64_t)info,
1421 infoCnt);
1422
1423 *address = map_addr;
1424 *size = map_size;
1425 return kr;
1c79356b
A
1426}
1427
1428/*
91447636
A
1429 * vm_region_recurse: A form of vm_region which follows the
1430 * submaps in a target map
1431 *
1c79356b 1432 */
91447636
A
1433kern_return_t
1434vm_region_recurse_64(
1435 vm_map_t map,
1436 vm_address_t *address,
1437 vm_size_t *size,
1438 uint32_t *depth,
1439 vm_region_recurse_info_64_t info,
1440 mach_msg_type_number_t *infoCnt)
1c79356b 1441{
91447636
A
1442 vm_map_address_t map_addr;
1443 vm_map_size_t map_size;
1444 kern_return_t kr;
1445
1446 if (VM_MAP_NULL == map)
1447 return KERN_INVALID_ARGUMENT;
1448
1449 map_addr = (vm_map_address_t)*address;
1450 map_size = (vm_map_size_t)*size;
1451
1452 kr = vm_map_region_recurse_64(
1453 map,
1454 &map_addr,
1455 &map_size,
1456 depth,
1457 (vm_region_submap_info_64_t)info,
1458 infoCnt);
1c79356b 1459
91447636
A
1460 *address = CAST_DOWN(vm_address_t, map_addr);
1461 *size = CAST_DOWN(vm_size_t, map_size);
1462
1463 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1464 return KERN_INVALID_ADDRESS;
1465 return kr;
1c79356b
A
1466}
1467
91447636
A
1468kern_return_t
1469vm_region_recurse(
1470 vm_map_t map,
1471 vm_offset_t *address, /* IN/OUT */
1472 vm_size_t *size, /* OUT */
1473 natural_t *depth, /* IN/OUT */
1474 vm_region_recurse_info_t info32, /* IN/OUT */
1475 mach_msg_type_number_t *infoCnt) /* IN/OUT */
1476{
1477 vm_region_submap_info_data_64_t info64;
1478 vm_region_submap_info_t info;
1479 vm_map_address_t map_addr;
1480 vm_map_size_t map_size;
1481 kern_return_t kr;
1482
1483 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
1484 return KERN_INVALID_ARGUMENT;
1485
1486
1487 map_addr = (vm_map_address_t)*address;
1488 map_size = (vm_map_size_t)*size;
1489 info = (vm_region_submap_info_t)info32;
1490 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1491
1492 kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
1493 depth, &info64, infoCnt);
1494
1495 info->protection = info64.protection;
1496 info->max_protection = info64.max_protection;
1497 info->inheritance = info64.inheritance;
1498 info->offset = (uint32_t)info64.offset; /* trouble-maker */
1499 info->user_tag = info64.user_tag;
1500 info->pages_resident = info64.pages_resident;
1501 info->pages_shared_now_private = info64.pages_shared_now_private;
1502 info->pages_swapped_out = info64.pages_swapped_out;
1503 info->pages_dirtied = info64.pages_dirtied;
1504 info->ref_count = info64.ref_count;
1505 info->shadow_depth = info64.shadow_depth;
1506 info->external_pager = info64.external_pager;
1507 info->share_mode = info64.share_mode;
1508 info->is_submap = info64.is_submap;
1509 info->behavior = info64.behavior;
1510 info->object_id = info64.object_id;
1511 info->user_wired_count = info64.user_wired_count;
1512
1513 *address = CAST_DOWN(vm_address_t, map_addr);
1514 *size = CAST_DOWN(vm_size_t, map_size);
1515 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1516
1517 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1518 return KERN_INVALID_ADDRESS;
1519 return kr;
1520}
1521
2d21ac55
A
1522kern_return_t
1523mach_vm_purgable_control(
1524 vm_map_t map,
1525 mach_vm_offset_t address,
1526 vm_purgable_t control,
1527 int *state)
1528{
1529 if (VM_MAP_NULL == map)
1530 return KERN_INVALID_ARGUMENT;
1531
1532 return vm_map_purgable_control(map,
1533 vm_map_trunc_page(address),
1534 control,
1535 state);
1536}
1537
91447636
A
1538kern_return_t
1539vm_purgable_control(
1540 vm_map_t map,
1541 vm_offset_t address,
1542 vm_purgable_t control,
1543 int *state)
1544{
1545 if (VM_MAP_NULL == map)
1546 return KERN_INVALID_ARGUMENT;
1547
1548 return vm_map_purgable_control(map,
1549 vm_map_trunc_page(address),
1550 control,
1551 state);
1552}
1553
1c79356b
A
1554
1555/*
1556 * Ordinarily, the right to allocate CPM is restricted
1557 * to privileged applications (those that can gain access
91447636
A
1558 * to the host priv port). Set this variable to zero if
1559 * you want to let any application allocate CPM.
1c79356b
A
1560 */
1561unsigned int vm_allocate_cpm_privileged = 0;
1562
1563/*
1564 * Allocate memory in the specified map, with the caveat that
1565 * the memory is physically contiguous. This call may fail
1566 * if the system can't find sufficient contiguous memory.
1567 * This call may cause or lead to heart-stopping amounts of
1568 * paging activity.
1569 *
1570 * Memory obtained from this call should be freed in the
1571 * normal way, viz., via vm_deallocate.
1572 */
1573kern_return_t
1574vm_allocate_cpm(
1575 host_priv_t host_priv,
91447636
A
1576 vm_map_t map,
1577 vm_address_t *addr,
1578 vm_size_t size,
1c79356b
A
1579 int flags)
1580{
91447636
A
1581 vm_map_address_t map_addr;
1582 vm_map_size_t map_size;
1c79356b 1583 kern_return_t kr;
1c79356b 1584
91447636 1585 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
1c79356b
A
1586 return KERN_INVALID_HOST;
1587
91447636 1588 if (VM_MAP_NULL == map)
1c79356b 1589 return KERN_INVALID_ARGUMENT;
1c79356b 1590
91447636
A
1591 map_addr = (vm_map_address_t)*addr;
1592 map_size = (vm_map_size_t)size;
1c79356b 1593
91447636
A
1594 kr = vm_map_enter_cpm(map,
1595 &map_addr,
1596 map_size,
1597 flags);
1c79356b 1598
91447636 1599 *addr = CAST_DOWN(vm_address_t, map_addr);
1c79356b
A
1600 return kr;
1601}
1602
1603
91447636
A
1604kern_return_t
1605mach_vm_page_query(
1606 vm_map_t map,
1607 mach_vm_offset_t offset,
1608 int *disposition,
1609 int *ref_count)
1610{
1611 if (VM_MAP_NULL == map)
1612 return KERN_INVALID_ARGUMENT;
1c79356b 1613
91447636
A
1614 return vm_map_page_info(map,
1615 vm_map_trunc_page(offset),
1616 disposition, ref_count);
1617}
1c79356b
A
1618
1619kern_return_t
91447636
A
1620vm_map_page_query(
1621 vm_map_t map,
1622 vm_offset_t offset,
1623 int *disposition,
1624 int *ref_count)
1c79356b 1625{
91447636
A
1626 if (VM_MAP_NULL == map)
1627 return KERN_INVALID_ARGUMENT;
1628
1629 return vm_map_page_info(map,
1630 vm_map_trunc_page(offset),
1631 disposition, ref_count);
1c79356b
A
1632}
1633
91447636 1634/* map a (whole) upl into an address space */
1c79356b 1635kern_return_t
91447636
A
1636vm_upl_map(
1637 vm_map_t map,
1638 upl_t upl,
1639 vm_offset_t *dst_addr)
1c79356b 1640{
91447636 1641 vm_map_offset_t map_addr;
1c79356b
A
1642 kern_return_t kr;
1643
91447636
A
1644 if (VM_MAP_NULL == map)
1645 return KERN_INVALID_ARGUMENT;
1c79356b 1646
91447636
A
1647 kr = vm_map_enter_upl(map, upl, &map_addr);
1648 *dst_addr = CAST_DOWN(vm_offset_t, map_addr);
1649 return kr;
1650}
1c79356b 1651
91447636
A
1652kern_return_t
1653vm_upl_unmap(
1654 vm_map_t map,
1655 upl_t upl)
1656{
1657 if (VM_MAP_NULL == map)
1658 return KERN_INVALID_ARGUMENT;
1c79356b 1659
91447636
A
1660 return (vm_map_remove_upl(map, upl));
1661}
1c79356b 1662
91447636
A
1663/* Retrieve a upl for an object underlying an address range in a map */
1664
1665kern_return_t
1666vm_map_get_upl(
1667 vm_map_t map,
cc9f6e38 1668 vm_map_offset_t map_offset,
91447636
A
1669 upl_size_t *upl_size,
1670 upl_t *upl,
1671 upl_page_info_array_t page_list,
1672 unsigned int *count,
1673 int *flags,
1674 int force_data_sync)
1675{
91447636
A
1676 int map_flags;
1677 kern_return_t kr;
1c79356b 1678
91447636
A
1679 if (VM_MAP_NULL == map)
1680 return KERN_INVALID_ARGUMENT;
1c79356b 1681
91447636
A
1682 map_flags = *flags & ~UPL_NOZEROFILL;
1683 if (force_data_sync)
1684 map_flags |= UPL_FORCE_DATA_SYNC;
1c79356b 1685
91447636
A
1686 kr = vm_map_create_upl(map,
1687 map_offset,
1688 upl_size,
1689 upl,
1690 page_list,
1691 count,
1692 &map_flags);
1c79356b 1693
91447636
A
1694 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
1695 return kr;
1c79356b
A
1696}
1697
1c79356b 1698/*
91447636
A
1699 * mach_make_memory_entry_64
1700 *
1701 * Think of it as a two-stage vm_remap() operation. First
1702 * you get a handle. Second, you get map that handle in
1703 * somewhere else. Rather than doing it all at once (and
1704 * without needing access to the other whole map).
1c79356b
A
1705 */
1706
1707kern_return_t
1708mach_make_memory_entry_64(
1709 vm_map_t target_map,
91447636
A
1710 memory_object_size_t *size,
1711 memory_object_offset_t offset,
1c79356b
A
1712 vm_prot_t permission,
1713 ipc_port_t *object_handle,
91447636 1714 ipc_port_t parent_handle)
1c79356b
A
1715{
1716 vm_map_version_t version;
91447636
A
1717 vm_named_entry_t parent_entry;
1718 vm_named_entry_t user_entry;
1c79356b 1719 ipc_port_t user_handle;
1c79356b 1720 kern_return_t kr;
91447636 1721 vm_map_t real_map;
1c79356b
A
1722
1723 /* needed for call to vm_map_lookup_locked */
91447636 1724 boolean_t wired;
1c79356b 1725 vm_object_offset_t obj_off;
91447636 1726 vm_prot_t prot;
2d21ac55 1727 struct vm_object_fault_info fault_info;
91447636
A
1728 vm_object_t object;
1729 vm_object_t shadow_object;
1c79356b
A
1730
1731 /* needed for direct map entry manipulation */
1732 vm_map_entry_t map_entry;
9bccf70c 1733 vm_map_entry_t next_entry;
91447636
A
1734 vm_map_t local_map;
1735 vm_map_t original_map = target_map;
1736 vm_map_size_t total_size;
1737 vm_map_size_t map_size;
1738 vm_map_offset_t map_offset;
1739 vm_map_offset_t local_offset;
1c79356b 1740 vm_object_size_t mappable_size;
9bccf70c 1741
91447636
A
1742 unsigned int access;
1743 vm_prot_t protections;
1744 unsigned int wimg_mode;
1745 boolean_t cache_attr = FALSE;
1746
1747 if (((permission & 0x00FF0000) &
1748 ~(MAP_MEM_ONLY |
1749 MAP_MEM_NAMED_CREATE |
1750 MAP_MEM_PURGABLE |
1751 MAP_MEM_NAMED_REUSE))) {
1752 /*
1753 * Unknown flag: reject for forward compatibility.
1754 */
1755 return KERN_INVALID_VALUE;
1756 }
1757
1758 if (parent_handle != IP_NULL &&
1759 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
1760 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
1761 } else {
1762 parent_entry = NULL;
1763 }
55e303ae
A
1764
1765 protections = permission & VM_PROT_ALL;
1766 access = GET_MAP_MEM(permission);
1767
91447636
A
1768 user_handle = IP_NULL;
1769 user_entry = NULL;
1770
1771 map_offset = vm_map_trunc_page(offset);
1772 map_size = vm_map_round_page(*size);
1c79356b 1773
91447636
A
1774 if (permission & MAP_MEM_ONLY) {
1775 boolean_t parent_is_object;
55e303ae 1776
91447636 1777 if (parent_entry == NULL) {
55e303ae
A
1778 return KERN_INVALID_ARGUMENT;
1779 }
91447636
A
1780
1781 parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager);
1782 object = parent_entry->backing.object;
1783 if(parent_is_object && object != VM_OBJECT_NULL)
55e303ae 1784 wimg_mode = object->wimg_bits;
91447636
A
1785 else
1786 wimg_mode = VM_WIMG_DEFAULT;
1787 if((access != GET_MAP_MEM(parent_entry->protection)) &&
1788 !(parent_entry->protection & VM_PROT_WRITE)) {
55e303ae
A
1789 return KERN_INVALID_RIGHT;
1790 }
1791 if(access == MAP_MEM_IO) {
91447636 1792 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
1793 wimg_mode = VM_WIMG_IO;
1794 } else if (access == MAP_MEM_COPYBACK) {
91447636 1795 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
1796 wimg_mode = VM_WIMG_DEFAULT;
1797 } else if (access == MAP_MEM_WTHRU) {
91447636 1798 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
1799 wimg_mode = VM_WIMG_WTHRU;
1800 } else if (access == MAP_MEM_WCOMB) {
91447636 1801 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
1802 wimg_mode = VM_WIMG_WCOMB;
1803 }
91447636 1804 if(parent_is_object && object &&
55e303ae
A
1805 (access != MAP_MEM_NOOP) &&
1806 (!(object->nophyscache))) {
1807 if(object->wimg_bits != wimg_mode) {
1808 vm_page_t p;
1809 if ((wimg_mode == VM_WIMG_IO)
1810 || (wimg_mode == VM_WIMG_WCOMB))
1811 cache_attr = TRUE;
1812 else
1813 cache_attr = FALSE;
1814 vm_object_lock(object);
91447636 1815 vm_object_paging_wait(object, THREAD_UNINT);
55e303ae
A
1816 object->wimg_bits = wimg_mode;
1817 queue_iterate(&object->memq,
1818 p, vm_page_t, listq) {
1819 if (!p->fictitious) {
2d21ac55
A
1820 if (p->pmapped)
1821 pmap_disconnect(p->phys_page);
91447636
A
1822 if (cache_attr)
1823 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
1824 }
1825 }
1826 vm_object_unlock(object);
1827 }
1828 }
91447636
A
1829 if (object_handle)
1830 *object_handle = IP_NULL;
55e303ae
A
1831 return KERN_SUCCESS;
1832 }
1833
91447636
A
1834 if(permission & MAP_MEM_NAMED_CREATE) {
1835 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
1836 if (kr != KERN_SUCCESS) {
1837 return KERN_FAILURE;
1838 }
55e303ae 1839
91447636
A
1840 /*
1841 * Force the creation of the VM object now.
1842 */
1843 if (map_size > (vm_map_size_t) VM_MAX_ADDRESS) {
1844 /*
1845 * LP64todo - for now, we can only allocate 4GB
1846 * internal objects because the default pager can't
1847 * page bigger ones. Remove this when it can.
1848 */
1849 kr = KERN_FAILURE;
1850 goto make_mem_done;
1851 }
1c79356b 1852
91447636
A
1853 object = vm_object_allocate(map_size);
1854 assert(object != VM_OBJECT_NULL);
1c79356b 1855
91447636
A
1856 if (permission & MAP_MEM_PURGABLE) {
1857 if (! (permission & VM_PROT_WRITE)) {
1858 /* if we can't write, we can't purge */
1859 vm_object_deallocate(object);
1860 kr = KERN_INVALID_ARGUMENT;
1861 goto make_mem_done;
1862 }
2d21ac55 1863 object->purgable = VM_PURGABLE_NONVOLATILE;
91447636 1864 }
1c79356b 1865
91447636
A
1866 /*
1867 * The VM object is brand new and nobody else knows about it,
1868 * so we don't need to lock it.
1869 */
1c79356b 1870
91447636
A
1871 wimg_mode = object->wimg_bits;
1872 if (access == MAP_MEM_IO) {
1873 wimg_mode = VM_WIMG_IO;
1874 } else if (access == MAP_MEM_COPYBACK) {
1875 wimg_mode = VM_WIMG_DEFAULT;
1876 } else if (access == MAP_MEM_WTHRU) {
1877 wimg_mode = VM_WIMG_WTHRU;
1878 } else if (access == MAP_MEM_WCOMB) {
1879 wimg_mode = VM_WIMG_WCOMB;
1880 }
1881 if (access != MAP_MEM_NOOP) {
1882 object->wimg_bits = wimg_mode;
1883 }
1884 /* the object has no pages, so no WIMG bits to update here */
1c79356b 1885
91447636
A
1886 /*
1887 * XXX
1888 * We use this path when we want to make sure that
1889 * nobody messes with the object (coalesce, for
1890 * example) before we map it.
1891 * We might want to use these objects for transposition via
1892 * vm_object_transpose() too, so we don't want any copy or
1893 * shadow objects either...
1894 */
1895 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1c79356b 1896
91447636
A
1897 user_entry->backing.object = object;
1898 user_entry->internal = TRUE;
1899 user_entry->is_sub_map = FALSE;
1900 user_entry->is_pager = FALSE;
1901 user_entry->offset = 0;
1902 user_entry->protection = protections;
1903 SET_MAP_MEM(access, user_entry->protection);
1904 user_entry->size = map_size;
55e303ae
A
1905
1906 /* user_object pager and internal fields are not used */
1907 /* when the object field is filled in. */
1908
91447636 1909 *size = CAST_DOWN(vm_size_t, map_size);
55e303ae
A
1910 *object_handle = user_handle;
1911 return KERN_SUCCESS;
1912 }
1913
91447636
A
1914 if (parent_entry == NULL ||
1915 (permission & MAP_MEM_NAMED_REUSE)) {
1916
1917 /* Create a named object based on address range within the task map */
1918 /* Go find the object at given address */
1c79356b 1919
2d21ac55
A
1920 if (target_map == VM_MAP_NULL) {
1921 return KERN_INVALID_TASK;
1922 }
1923
91447636 1924redo_lookup:
1c79356b
A
1925 vm_map_lock_read(target_map);
1926
1927 /* get the object associated with the target address */
1928 /* note we check the permission of the range against */
1929 /* that requested by the caller */
1930
91447636 1931 kr = vm_map_lookup_locked(&target_map, map_offset,
2d21ac55
A
1932 protections, OBJECT_LOCK_EXCLUSIVE, &version,
1933 &object, &obj_off, &prot, &wired,
1934 &fault_info,
1935 &real_map);
1c79356b
A
1936 if (kr != KERN_SUCCESS) {
1937 vm_map_unlock_read(target_map);
1938 goto make_mem_done;
1939 }
55e303ae 1940 if (((prot & protections) != protections)
9bccf70c 1941 || (object == kernel_object)) {
1c79356b
A
1942 kr = KERN_INVALID_RIGHT;
1943 vm_object_unlock(object);
1944 vm_map_unlock_read(target_map);
91447636
A
1945 if(real_map != target_map)
1946 vm_map_unlock_read(real_map);
9bccf70c
A
1947 if(object == kernel_object) {
1948 printf("Warning: Attempt to create a named"
1949 " entry from the kernel_object\n");
1950 }
1c79356b
A
1951 goto make_mem_done;
1952 }
1953
1954 /* We have an object, now check to see if this object */
1955 /* is suitable. If not, create a shadow and share that */
91447636
A
1956
1957 /*
1958 * We have to unlock the VM object to avoid deadlocking with
1959 * a VM map lock (the lock ordering is map, the object), if we
1960 * need to modify the VM map to create a shadow object. Since
1961 * we might release the VM map lock below anyway, we have
1962 * to release the VM map lock now.
1963 * XXX FBDP There must be a way to avoid this double lookup...
1964 *
1965 * Take an extra reference on the VM object to make sure it's
1966 * not going to disappear.
1967 */
1968 vm_object_reference_locked(object); /* extra ref to hold obj */
1969 vm_object_unlock(object);
1970
9bccf70c 1971 local_map = original_map;
91447636 1972 local_offset = map_offset;
9bccf70c
A
1973 if(target_map != local_map) {
1974 vm_map_unlock_read(target_map);
91447636
A
1975 if(real_map != target_map)
1976 vm_map_unlock_read(real_map);
9bccf70c
A
1977 vm_map_lock_read(local_map);
1978 target_map = local_map;
91447636 1979 real_map = local_map;
9bccf70c 1980 }
1c79356b 1981 while(TRUE) {
9bccf70c
A
1982 if(!vm_map_lookup_entry(local_map,
1983 local_offset, &map_entry)) {
1c79356b 1984 kr = KERN_INVALID_ARGUMENT;
1c79356b 1985 vm_map_unlock_read(target_map);
91447636
A
1986 if(real_map != target_map)
1987 vm_map_unlock_read(real_map);
1988 vm_object_deallocate(object); /* release extra ref */
1989 object = VM_OBJECT_NULL;
1c79356b
A
1990 goto make_mem_done;
1991 }
1992 if(!(map_entry->is_sub_map)) {
1993 if(map_entry->object.vm_object != object) {
1994 kr = KERN_INVALID_ARGUMENT;
1c79356b 1995 vm_map_unlock_read(target_map);
91447636
A
1996 if(real_map != target_map)
1997 vm_map_unlock_read(real_map);
1998 vm_object_deallocate(object); /* release extra ref */
1999 object = VM_OBJECT_NULL;
1c79356b
A
2000 goto make_mem_done;
2001 }
2002 break;
2003 } else {
9bccf70c
A
2004 vm_map_t tmap;
2005 tmap = local_map;
1c79356b 2006 local_map = map_entry->object.sub_map;
9bccf70c 2007
1c79356b 2008 vm_map_lock_read(local_map);
9bccf70c 2009 vm_map_unlock_read(tmap);
1c79356b 2010 target_map = local_map;
91447636 2011 real_map = local_map;
9bccf70c
A
2012 local_offset = local_offset - map_entry->vme_start;
2013 local_offset += map_entry->offset;
1c79356b
A
2014 }
2015 }
91447636
A
2016
2017 /*
2018 * We found the VM map entry, lock the VM object again.
2019 */
2020 vm_object_lock(object);
2021 if(map_entry->wired_count) {
2022 /* JMM - The check below should be reworked instead. */
2023 object->true_share = TRUE;
2024 }
55e303ae 2025 if(((map_entry->max_protection) & protections) != protections) {
1c79356b
A
2026 kr = KERN_INVALID_RIGHT;
2027 vm_object_unlock(object);
2028 vm_map_unlock_read(target_map);
91447636
A
2029 if(real_map != target_map)
2030 vm_map_unlock_read(real_map);
2031 vm_object_deallocate(object);
2032 object = VM_OBJECT_NULL;
1c79356b
A
2033 goto make_mem_done;
2034 }
9bccf70c 2035
2d21ac55 2036 mappable_size = fault_info.hi_offset - obj_off;
9bccf70c 2037 total_size = map_entry->vme_end - map_entry->vme_start;
91447636 2038 if(map_size > mappable_size) {
9bccf70c
A
2039 /* try to extend mappable size if the entries */
2040 /* following are from the same object and are */
2041 /* compatible */
2042 next_entry = map_entry->vme_next;
2043 /* lets see if the next map entry is still */
2044 /* pointing at this object and is contiguous */
91447636 2045 while(map_size > mappable_size) {
9bccf70c
A
2046 if((next_entry->object.vm_object == object) &&
2047 (next_entry->vme_start ==
2048 next_entry->vme_prev->vme_end) &&
2049 (next_entry->offset ==
2050 next_entry->vme_prev->offset +
2051 (next_entry->vme_prev->vme_end -
2052 next_entry->vme_prev->vme_start))) {
2053 if(((next_entry->max_protection)
55e303ae 2054 & protections) != protections) {
9bccf70c
A
2055 break;
2056 }
55e303ae
A
2057 if (next_entry->needs_copy !=
2058 map_entry->needs_copy)
2059 break;
9bccf70c
A
2060 mappable_size += next_entry->vme_end
2061 - next_entry->vme_start;
2062 total_size += next_entry->vme_end
2063 - next_entry->vme_start;
2064 next_entry = next_entry->vme_next;
2065 } else {
2066 break;
2067 }
2068
2069 }
2070 }
2071
1c79356b
A
2072 if(object->internal) {
2073 /* vm_map_lookup_locked will create a shadow if */
2074 /* needs_copy is set but does not check for the */
2075 /* other two conditions shown. It is important to */
2076 /* set up an object which will not be pulled from */
2077 /* under us. */
2078
0b4e3aa0 2079 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
2080 (object->size > total_size))
2081 && !object->true_share) {
91447636
A
2082 /*
2083 * We have to unlock the VM object before
2084 * trying to upgrade the VM map lock, to
2085 * honor lock ordering (map then object).
2086 * Otherwise, we would deadlock if another
2087 * thread holds a read lock on the VM map and
2088 * is trying to acquire the VM object's lock.
2089 * We still hold an extra reference on the
2090 * VM object, guaranteeing that it won't
2091 * disappear.
2092 */
2093 vm_object_unlock(object);
2094
1c79356b 2095 if (vm_map_lock_read_to_write(target_map)) {
91447636
A
2096 /*
2097 * We couldn't upgrade our VM map lock
2098 * from "read" to "write" and we lost
2099 * our "read" lock.
2100 * Start all over again...
2101 */
2102 vm_object_deallocate(object); /* extra ref */
2103 target_map = original_map;
1c79356b
A
2104 goto redo_lookup;
2105 }
91447636 2106 vm_object_lock(object);
1c79356b 2107
55e303ae
A
2108 /*
2109 * JMM - We need to avoid coming here when the object
2110 * is wired by anybody, not just the current map. Why
2111 * couldn't we use the standard vm_object_copy_quickly()
2112 * approach here?
2113 */
2114
1c79356b 2115 /* create a shadow object */
9bccf70c
A
2116 vm_object_shadow(&map_entry->object.vm_object,
2117 &map_entry->offset, total_size);
2118 shadow_object = map_entry->object.vm_object;
2119 vm_object_unlock(object);
91447636 2120
0c530ab8 2121 prot = map_entry->protection & ~VM_PROT_WRITE;
2d21ac55
A
2122
2123 if (override_nx(target_map, map_entry->alias) && prot)
0c530ab8 2124 prot |= VM_PROT_EXECUTE;
2d21ac55 2125
9bccf70c
A
2126 vm_object_pmap_protect(
2127 object, map_entry->offset,
2128 total_size,
2129 ((map_entry->is_shared
2130 || target_map->mapped)
2131 ? PMAP_NULL :
2132 target_map->pmap),
2133 map_entry->vme_start,
0c530ab8 2134 prot);
9bccf70c
A
2135 total_size -= (map_entry->vme_end
2136 - map_entry->vme_start);
2137 next_entry = map_entry->vme_next;
2138 map_entry->needs_copy = FALSE;
2d21ac55
A
2139
2140 vm_object_lock(shadow_object);
9bccf70c
A
2141 while (total_size) {
2142 if(next_entry->object.vm_object == object) {
2d21ac55 2143 vm_object_reference_locked(shadow_object);
9bccf70c
A
2144 next_entry->object.vm_object
2145 = shadow_object;
55e303ae 2146 vm_object_deallocate(object);
9bccf70c
A
2147 next_entry->offset
2148 = next_entry->vme_prev->offset +
2149 (next_entry->vme_prev->vme_end
2150 - next_entry->vme_prev->vme_start);
2151 next_entry->needs_copy = FALSE;
2152 } else {
2153 panic("mach_make_memory_entry_64:"
2154 " map entries out of sync\n");
2155 }
2156 total_size -=
2157 next_entry->vme_end
2158 - next_entry->vme_start;
2159 next_entry = next_entry->vme_next;
2160 }
2161
91447636
A
2162 /*
2163 * Transfer our extra reference to the
2164 * shadow object.
2165 */
2166 vm_object_reference_locked(shadow_object);
2167 vm_object_deallocate(object); /* extra ref */
9bccf70c 2168 object = shadow_object;
91447636 2169
9bccf70c
A
2170 obj_off = (local_offset - map_entry->vme_start)
2171 + map_entry->offset;
1c79356b 2172
91447636 2173 vm_map_lock_write_to_read(target_map);
1c79356b
A
2174 }
2175 }
2176
2177 /* note: in the future we can (if necessary) allow for */
2178 /* memory object lists, this will better support */
2179 /* fragmentation, but is it necessary? The user should */
2180 /* be encouraged to create address space oriented */
2181 /* shared objects from CLEAN memory regions which have */
2182 /* a known and defined history. i.e. no inheritence */
2183 /* share, make this call before making the region the */
2184 /* target of ipc's, etc. The code above, protecting */
2185 /* against delayed copy, etc. is mostly defensive. */
2186
55e303ae
A
2187 wimg_mode = object->wimg_bits;
2188 if(!(object->nophyscache)) {
2189 if(access == MAP_MEM_IO) {
2190 wimg_mode = VM_WIMG_IO;
2191 } else if (access == MAP_MEM_COPYBACK) {
2192 wimg_mode = VM_WIMG_USE_DEFAULT;
2193 } else if (access == MAP_MEM_WTHRU) {
2194 wimg_mode = VM_WIMG_WTHRU;
2195 } else if (access == MAP_MEM_WCOMB) {
2196 wimg_mode = VM_WIMG_WCOMB;
2197 }
2198 }
d7e50217 2199
de355530 2200 object->true_share = TRUE;
55e303ae
A
2201 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2202 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2203
91447636
A
2204 /*
2205 * The memory entry now points to this VM object and we
2206 * need to hold a reference on the VM object. Use the extra
2207 * reference we took earlier to keep the object alive when we
2208 * had to unlock it.
2209 */
2210
55e303ae 2211 vm_map_unlock_read(target_map);
91447636
A
2212 if(real_map != target_map)
2213 vm_map_unlock_read(real_map);
55e303ae
A
2214
2215 if(object->wimg_bits != wimg_mode) {
2216 vm_page_t p;
2217
2218 vm_object_paging_wait(object, THREAD_UNINT);
2219
91447636
A
2220 if ((wimg_mode == VM_WIMG_IO)
2221 || (wimg_mode == VM_WIMG_WCOMB))
2222 cache_attr = TRUE;
2223 else
2224 cache_attr = FALSE;
2225
55e303ae
A
2226 queue_iterate(&object->memq,
2227 p, vm_page_t, listq) {
2228 if (!p->fictitious) {
2d21ac55
A
2229 if (p->pmapped)
2230 pmap_disconnect(p->phys_page);
91447636
A
2231 if (cache_attr)
2232 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
2233 }
2234 }
2235 object->wimg_bits = wimg_mode;
2236 }
1c79356b
A
2237
2238 /* the size of mapped entry that overlaps with our region */
2239 /* which is targeted for share. */
2240 /* (entry_end - entry_start) - */
2241 /* offset of our beg addr within entry */
2242 /* it corresponds to this: */
2243
91447636
A
2244 if(map_size > mappable_size)
2245 map_size = mappable_size;
2246
2247 if (permission & MAP_MEM_NAMED_REUSE) {
2248 /*
2249 * Compare what we got with the "parent_entry".
2250 * If they match, re-use the "parent_entry" instead
2251 * of creating a new one.
2252 */
2253 if (parent_entry != NULL &&
2254 parent_entry->backing.object == object &&
2255 parent_entry->internal == object->internal &&
2256 parent_entry->is_sub_map == FALSE &&
2257 parent_entry->is_pager == FALSE &&
2258 parent_entry->offset == obj_off &&
2259 parent_entry->protection == protections &&
2260 parent_entry->size == map_size) {
2261 /*
2262 * We have a match: re-use "parent_entry".
2263 */
2264 /* release our extra reference on object */
2265 vm_object_unlock(object);
2266 vm_object_deallocate(object);
2267 /* parent_entry->ref_count++; XXX ? */
2268 /* Get an extra send-right on handle */
2269 ipc_port_copy_send(parent_handle);
2270 *object_handle = parent_handle;
2271 return KERN_SUCCESS;
2272 } else {
2273 /*
2274 * No match: we need to create a new entry.
2275 * fall through...
2276 */
2277 }
2278 }
2279
2280 vm_object_unlock(object);
2281 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2282 != KERN_SUCCESS) {
2283 /* release our unused reference on the object */
2284 vm_object_deallocate(object);
2285 return KERN_FAILURE;
2286 }
1c79356b 2287
91447636
A
2288 user_entry->backing.object = object;
2289 user_entry->internal = object->internal;
2290 user_entry->is_sub_map = FALSE;
2291 user_entry->is_pager = FALSE;
2292 user_entry->offset = obj_off;
2293 user_entry->protection = permission;
2294 user_entry->size = map_size;
1c79356b
A
2295
2296 /* user_object pager and internal fields are not used */
2297 /* when the object field is filled in. */
2298
91447636 2299 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 2300 *object_handle = user_handle;
1c79356b 2301 return KERN_SUCCESS;
1c79356b 2302
91447636 2303 } else {
1c79356b 2304 /* The new object will be base on an existing named object */
91447636
A
2305
2306 if (parent_entry == NULL) {
1c79356b
A
2307 kr = KERN_INVALID_ARGUMENT;
2308 goto make_mem_done;
2309 }
91447636 2310 if((offset + map_size) > parent_entry->size) {
1c79356b
A
2311 kr = KERN_INVALID_ARGUMENT;
2312 goto make_mem_done;
2313 }
2314
91447636
A
2315 if((protections & parent_entry->protection) != protections) {
2316 kr = KERN_PROTECTION_FAILURE;
2317 goto make_mem_done;
2318 }
2319
2320 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2321 != KERN_SUCCESS) {
2322 kr = KERN_FAILURE;
2323 goto make_mem_done;
55e303ae 2324 }
91447636
A
2325
2326 user_entry->size = map_size;
2327 user_entry->offset = parent_entry->offset + map_offset;
2328 user_entry->is_sub_map = parent_entry->is_sub_map;
2329 user_entry->is_pager = parent_entry->is_pager;
2330 user_entry->internal = parent_entry->internal;
2331 user_entry->protection = protections;
2332
2333 if(access != MAP_MEM_NOOP) {
2334 SET_MAP_MEM(access, user_entry->protection);
1c79356b 2335 }
91447636
A
2336
2337 if(parent_entry->is_sub_map) {
2338 user_entry->backing.map = parent_entry->backing.map;
2339 vm_map_lock(user_entry->backing.map);
2340 user_entry->backing.map->ref_count++;
2341 vm_map_unlock(user_entry->backing.map);
1c79356b 2342 }
91447636
A
2343 else if (parent_entry->is_pager) {
2344 user_entry->backing.pager = parent_entry->backing.pager;
2345 /* JMM - don't we need a reference here? */
2346 } else {
2347 object = parent_entry->backing.object;
2348 assert(object != VM_OBJECT_NULL);
2349 user_entry->backing.object = object;
2350 /* we now point to this object, hold on */
2351 vm_object_reference(object);
2352 vm_object_lock(object);
2353 object->true_share = TRUE;
2354 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2355 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2356 vm_object_unlock(object);
1c79356b 2357 }
91447636 2358 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b
A
2359 *object_handle = user_handle;
2360 return KERN_SUCCESS;
2361 }
2362
1c79356b 2363make_mem_done:
91447636
A
2364 if (user_handle != IP_NULL) {
2365 ipc_port_dealloc_kernel(user_handle);
2366 }
2367 if (user_entry != NULL) {
2368 kfree(user_entry, sizeof *user_entry);
2369 }
2370 return kr;
2371}
2372
2373kern_return_t
2374_mach_make_memory_entry(
2375 vm_map_t target_map,
2376 memory_object_size_t *size,
2377 memory_object_offset_t offset,
2378 vm_prot_t permission,
2379 ipc_port_t *object_handle,
2380 ipc_port_t parent_entry)
2381{
2d21ac55 2382 memory_object_size_t mo_size;
91447636
A
2383 kern_return_t kr;
2384
2d21ac55 2385 mo_size = (memory_object_size_t)*size;
91447636
A
2386 kr = mach_make_memory_entry_64(target_map, &mo_size,
2387 (memory_object_offset_t)offset, permission, object_handle,
2388 parent_entry);
2389 *size = mo_size;
1c79356b
A
2390 return kr;
2391}
2392
2393kern_return_t
2394mach_make_memory_entry(
2395 vm_map_t target_map,
2396 vm_size_t *size,
2397 vm_offset_t offset,
2398 vm_prot_t permission,
2399 ipc_port_t *object_handle,
2400 ipc_port_t parent_entry)
91447636 2401{
2d21ac55 2402 memory_object_size_t mo_size;
1c79356b
A
2403 kern_return_t kr;
2404
2d21ac55 2405 mo_size = (memory_object_size_t)*size;
91447636
A
2406 kr = mach_make_memory_entry_64(target_map, &mo_size,
2407 (memory_object_offset_t)offset, permission, object_handle,
1c79356b 2408 parent_entry);
91447636 2409 *size = CAST_DOWN(vm_size_t, mo_size);
1c79356b
A
2410 return kr;
2411}
2412
2413/*
91447636
A
2414 * task_wire
2415 *
2416 * Set or clear the map's wiring_required flag. This flag, if set,
2417 * will cause all future virtual memory allocation to allocate
2418 * user wired memory. Unwiring pages wired down as a result of
2419 * this routine is done with the vm_wire interface.
1c79356b 2420 */
1c79356b 2421kern_return_t
91447636
A
2422task_wire(
2423 vm_map_t map,
2424 boolean_t must_wire)
2425{
2426 if (map == VM_MAP_NULL)
2427 return(KERN_INVALID_ARGUMENT);
2428
2429 if (must_wire)
2430 map->wiring_required = TRUE;
2431 else
2432 map->wiring_required = FALSE;
2433
2434 return(KERN_SUCCESS);
2435}
2436
2437__private_extern__ kern_return_t
2438mach_memory_entry_allocate(
2439 vm_named_entry_t *user_entry_p,
2440 ipc_port_t *user_handle_p)
1c79356b 2441{
91447636 2442 vm_named_entry_t user_entry;
1c79356b 2443 ipc_port_t user_handle;
91447636 2444 ipc_port_t previous;
1c79356b 2445
91447636
A
2446 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
2447 if (user_entry == NULL)
1c79356b 2448 return KERN_FAILURE;
1c79356b 2449
91447636 2450 named_entry_lock_init(user_entry);
1c79356b 2451
91447636
A
2452 user_handle = ipc_port_alloc_kernel();
2453 if (user_handle == IP_NULL) {
2454 kfree(user_entry, sizeof *user_entry);
2455 return KERN_FAILURE;
2456 }
1c79356b
A
2457 ip_lock(user_handle);
2458
2459 /* make a sonce right */
2460 user_handle->ip_sorights++;
2461 ip_reference(user_handle);
2462
2463 user_handle->ip_destination = IP_NULL;
2464 user_handle->ip_receiver_name = MACH_PORT_NULL;
2465 user_handle->ip_receiver = ipc_space_kernel;
2466
2467 /* make a send right */
2468 user_handle->ip_mscount++;
2469 user_handle->ip_srights++;
2470 ip_reference(user_handle);
2471
2472 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
2473 /* nsrequest unlocks user_handle */
2474
91447636
A
2475 user_entry->backing.pager = NULL;
2476 user_entry->is_sub_map = FALSE;
2477 user_entry->is_pager = FALSE;
91447636 2478 user_entry->internal = FALSE;
2d21ac55
A
2479 user_entry->size = 0;
2480 user_entry->offset = 0;
2481 user_entry->protection = VM_PROT_NONE;
91447636 2482 user_entry->ref_count = 1;
1c79356b 2483
91447636
A
2484 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
2485 IKOT_NAMED_ENTRY);
1c79356b 2486
91447636
A
2487 *user_entry_p = user_entry;
2488 *user_handle_p = user_handle;
1c79356b 2489
91447636
A
2490 return KERN_SUCCESS;
2491}
1c79356b 2492
91447636
A
2493/*
2494 * mach_memory_object_memory_entry_64
2495 *
2496 * Create a named entry backed by the provided pager.
2497 *
2498 * JMM - we need to hold a reference on the pager -
2499 * and release it when the named entry is destroyed.
2500 */
2501kern_return_t
2502mach_memory_object_memory_entry_64(
2503 host_t host,
2504 boolean_t internal,
2505 vm_object_offset_t size,
2506 vm_prot_t permission,
2507 memory_object_t pager,
2508 ipc_port_t *entry_handle)
2509{
2510 unsigned int access;
2511 vm_named_entry_t user_entry;
2512 ipc_port_t user_handle;
2513
2514 if (host == HOST_NULL)
2515 return(KERN_INVALID_HOST);
2516
2517 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2518 != KERN_SUCCESS) {
2519 return KERN_FAILURE;
2520 }
2521
2522 user_entry->backing.pager = pager;
2523 user_entry->size = size;
2524 user_entry->offset = 0;
2525 user_entry->protection = permission & VM_PROT_ALL;
2526 access = GET_MAP_MEM(permission);
2527 SET_MAP_MEM(access, user_entry->protection);
2528 user_entry->internal = internal;
2529 user_entry->is_sub_map = FALSE;
2530 user_entry->is_pager = TRUE;
2531 assert(user_entry->ref_count == 1);
2532
2533 *entry_handle = user_handle;
1c79356b 2534 return KERN_SUCCESS;
91447636
A
2535}
2536
2537kern_return_t
2538mach_memory_object_memory_entry(
2539 host_t host,
2540 boolean_t internal,
2541 vm_size_t size,
2542 vm_prot_t permission,
2543 memory_object_t pager,
2544 ipc_port_t *entry_handle)
2545{
2546 return mach_memory_object_memory_entry_64( host, internal,
2547 (vm_object_offset_t)size, permission, pager, entry_handle);
2548}
2549
2550
2551kern_return_t
2552mach_memory_entry_purgable_control(
2553 ipc_port_t entry_port,
2554 vm_purgable_t control,
2555 int *state)
2556{
2557 kern_return_t kr;
2558 vm_named_entry_t mem_entry;
2559 vm_object_t object;
1c79356b 2560
91447636
A
2561 if (entry_port == IP_NULL ||
2562 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2563 return KERN_INVALID_ARGUMENT;
2564 }
2d21ac55
A
2565 if (control != VM_PURGABLE_SET_STATE &&
2566 control != VM_PURGABLE_GET_STATE)
2567 return(KERN_INVALID_ARGUMENT);
2568
2569 if (control == VM_PURGABLE_SET_STATE &&
2570 (((*state & ~(VM_PURGABLE_STATE_MASK|VM_VOLATILE_ORDER_MASK|VM_PURGABLE_ORDERING_MASK|VM_PURGABLE_BEHAVIOR_MASK|VM_VOLATILE_GROUP_MASK)) != 0) ||
2571 ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
2572 return(KERN_INVALID_ARGUMENT);
1c79356b 2573
91447636 2574 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
1c79356b 2575
91447636 2576 named_entry_lock(mem_entry);
1c79356b 2577
91447636
A
2578 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2579 named_entry_unlock(mem_entry);
1c79356b
A
2580 return KERN_INVALID_ARGUMENT;
2581 }
91447636
A
2582
2583 object = mem_entry->backing.object;
2584 if (object == VM_OBJECT_NULL) {
2585 named_entry_unlock(mem_entry);
1c79356b
A
2586 return KERN_INVALID_ARGUMENT;
2587 }
91447636
A
2588
2589 vm_object_lock(object);
2590
2591 /* check that named entry covers entire object ? */
2592 if (mem_entry->offset != 0 || object->size != mem_entry->size) {
2593 vm_object_unlock(object);
2594 named_entry_unlock(mem_entry);
2595 return KERN_INVALID_ARGUMENT;
1c79356b 2596 }
91447636
A
2597
2598 named_entry_unlock(mem_entry);
2599
2600 kr = vm_object_purgable_control(object, control, state);
2601
2602 vm_object_unlock(object);
2603
2604 return kr;
1c79356b
A
2605}
2606
91447636
A
2607/*
2608 * mach_memory_entry_port_release:
2609 *
2610 * Release a send right on a named entry port. This is the correct
2611 * way to destroy a named entry. When the last right on the port is
2612 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2613 */
2614void
2615mach_memory_entry_port_release(
2616 ipc_port_t port)
2617{
2618 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2619 ipc_port_release_send(port);
2620}
1c79356b 2621
91447636
A
2622/*
2623 * mach_destroy_memory_entry:
2624 *
2625 * Drops a reference on a memory entry and destroys the memory entry if
2626 * there are no more references on it.
2627 * NOTE: This routine should not be called to destroy a memory entry from the
2628 * kernel, as it will not release the Mach port associated with the memory
2629 * entry. The proper way to destroy a memory entry in the kernel is to
2630 * call mach_memort_entry_port_release() to release the kernel's send-right on
2631 * the memory entry's port. When the last send right is released, the memory
2632 * entry will be destroyed via ipc_kobject_destroy().
2633 */
1c79356b
A
2634void
2635mach_destroy_memory_entry(
2636 ipc_port_t port)
2637{
2638 vm_named_entry_t named_entry;
2639#if MACH_ASSERT
2640 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2641#endif /* MACH_ASSERT */
2642 named_entry = (vm_named_entry_t)port->ip_kobject;
2643 mutex_lock(&(named_entry)->Lock);
91447636 2644 named_entry->ref_count -= 1;
1c79356b 2645 if(named_entry->ref_count == 0) {
91447636 2646 if (named_entry->is_sub_map) {
1c79356b 2647 vm_map_deallocate(named_entry->backing.map);
91447636
A
2648 } else if (!named_entry->is_pager) {
2649 /* release the memory object we've been pointing to */
2650 vm_object_deallocate(named_entry->backing.object);
2651 } /* else JMM - need to drop reference on pager in that case */
2652
2653 mutex_unlock(&(named_entry)->Lock);
2654
2655 kfree((void *) port->ip_kobject,
2656 sizeof (struct vm_named_entry));
1c79356b
A
2657 } else
2658 mutex_unlock(&(named_entry)->Lock);
2659}
2660
0c530ab8
A
2661/* Allow manipulation of individual page state. This is actually part of */
2662/* the UPL regimen but takes place on the memory entry rather than on a UPL */
2663
2664kern_return_t
2665mach_memory_entry_page_op(
2666 ipc_port_t entry_port,
2667 vm_object_offset_t offset,
2668 int ops,
2669 ppnum_t *phys_entry,
2670 int *flags)
2671{
2672 vm_named_entry_t mem_entry;
2673 vm_object_t object;
2674 kern_return_t kr;
2675
2676 if (entry_port == IP_NULL ||
2677 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2678 return KERN_INVALID_ARGUMENT;
2679 }
2680
2681 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2682
2683 named_entry_lock(mem_entry);
2684
2685 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2686 named_entry_unlock(mem_entry);
2687 return KERN_INVALID_ARGUMENT;
2688 }
2689
2690 object = mem_entry->backing.object;
2691 if (object == VM_OBJECT_NULL) {
2692 named_entry_unlock(mem_entry);
2693 return KERN_INVALID_ARGUMENT;
2694 }
2695
2696 vm_object_reference(object);
2697 named_entry_unlock(mem_entry);
2698
2699 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
2700
2701 vm_object_deallocate(object);
2702
2703 return kr;
2704}
2705
2706/*
2707 * mach_memory_entry_range_op offers performance enhancement over
2708 * mach_memory_entry_page_op for page_op functions which do not require page
2709 * level state to be returned from the call. Page_op was created to provide
2710 * a low-cost alternative to page manipulation via UPLs when only a single
2711 * page was involved. The range_op call establishes the ability in the _op
2712 * family of functions to work on multiple pages where the lack of page level
2713 * state handling allows the caller to avoid the overhead of the upl structures.
2714 */
2715
2716kern_return_t
2717mach_memory_entry_range_op(
2718 ipc_port_t entry_port,
2719 vm_object_offset_t offset_beg,
2720 vm_object_offset_t offset_end,
2721 int ops,
2722 int *range)
2723{
2724 vm_named_entry_t mem_entry;
2725 vm_object_t object;
2726 kern_return_t kr;
2727
2728 if (entry_port == IP_NULL ||
2729 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2730 return KERN_INVALID_ARGUMENT;
2731 }
2732
2733 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2734
2735 named_entry_lock(mem_entry);
2736
2737 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2738 named_entry_unlock(mem_entry);
2739 return KERN_INVALID_ARGUMENT;
2740 }
2741
2742 object = mem_entry->backing.object;
2743 if (object == VM_OBJECT_NULL) {
2744 named_entry_unlock(mem_entry);
2745 return KERN_INVALID_ARGUMENT;
2746 }
2747
2748 vm_object_reference(object);
2749 named_entry_unlock(mem_entry);
2750
2751 kr = vm_object_range_op(object,
2752 offset_beg,
2753 offset_end,
2754 ops,
2755 range);
2756
2757 vm_object_deallocate(object);
2758
2759 return kr;
2760}
1c79356b 2761
1c79356b
A
2762
2763kern_return_t
2764set_dp_control_port(
2765 host_priv_t host_priv,
2766 ipc_port_t control_port)
2767{
2768 if (host_priv == HOST_PRIV_NULL)
2769 return (KERN_INVALID_HOST);
0b4e3aa0
A
2770
2771 if (IP_VALID(dynamic_pager_control_port))
2772 ipc_port_release_send(dynamic_pager_control_port);
2773
1c79356b
A
2774 dynamic_pager_control_port = control_port;
2775 return KERN_SUCCESS;
2776}
2777
2778kern_return_t
2779get_dp_control_port(
2780 host_priv_t host_priv,
2781 ipc_port_t *control_port)
2782{
2783 if (host_priv == HOST_PRIV_NULL)
2784 return (KERN_INVALID_HOST);
0b4e3aa0
A
2785
2786 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
2787 return KERN_SUCCESS;
2788
2789}
2790
91447636 2791/* ******* Temporary Internal calls to UPL for BSD ***** */
1c79356b 2792
91447636
A
2793extern int kernel_upl_map(
2794 vm_map_t map,
2795 upl_t upl,
2796 vm_offset_t *dst_addr);
1c79356b 2797
91447636
A
2798extern int kernel_upl_unmap(
2799 vm_map_t map,
2800 upl_t upl);
150bd074 2801
91447636
A
2802extern int kernel_upl_commit(
2803 upl_t upl,
2804 upl_page_info_t *pl,
2805 mach_msg_type_number_t count);
1c79356b 2806
91447636
A
2807extern int kernel_upl_commit_range(
2808 upl_t upl,
2809 upl_offset_t offset,
2810 upl_size_t size,
2811 int flags,
2812 upl_page_info_array_t pl,
2813 mach_msg_type_number_t count);
1c79356b 2814
91447636
A
2815extern int kernel_upl_abort(
2816 upl_t upl,
2817 int abort_type);
1c79356b 2818
91447636
A
2819extern int kernel_upl_abort_range(
2820 upl_t upl,
2821 upl_offset_t offset,
2822 upl_size_t size,
2823 int abort_flags);
1c79356b 2824
1c79356b 2825
1c79356b
A
2826kern_return_t
2827kernel_upl_map(
2828 vm_map_t map,
2829 upl_t upl,
2830 vm_offset_t *dst_addr)
2831{
91447636 2832 return vm_upl_map(map, upl, dst_addr);
1c79356b
A
2833}
2834
2835
2836kern_return_t
2837kernel_upl_unmap(
2838 vm_map_t map,
0b4e3aa0 2839 upl_t upl)
1c79356b 2840{
91447636 2841 return vm_upl_unmap(map, upl);
1c79356b
A
2842}
2843
2844kern_return_t
2845kernel_upl_commit(
91447636
A
2846 upl_t upl,
2847 upl_page_info_t *pl,
0b4e3aa0 2848 mach_msg_type_number_t count)
1c79356b 2849{
0b4e3aa0
A
2850 kern_return_t kr;
2851
2852 kr = upl_commit(upl, pl, count);
2853 upl_deallocate(upl);
1c79356b
A
2854 return kr;
2855}
2856
0b4e3aa0 2857
1c79356b
A
2858kern_return_t
2859kernel_upl_commit_range(
2860 upl_t upl,
91447636
A
2861 upl_offset_t offset,
2862 upl_size_t size,
1c79356b 2863 int flags,
0b4e3aa0
A
2864 upl_page_info_array_t pl,
2865 mach_msg_type_number_t count)
1c79356b 2866{
0b4e3aa0
A
2867 boolean_t finished = FALSE;
2868 kern_return_t kr;
2869
2870 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2871 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2872
2873 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2874
2875 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2876 upl_deallocate(upl);
2877
1c79356b
A
2878 return kr;
2879}
2880
2881kern_return_t
2882kernel_upl_abort_range(
0b4e3aa0 2883 upl_t upl,
91447636
A
2884 upl_offset_t offset,
2885 upl_size_t size,
0b4e3aa0 2886 int abort_flags)
1c79356b 2887{
0b4e3aa0
A
2888 kern_return_t kr;
2889 boolean_t finished = FALSE;
1c79356b 2890
0b4e3aa0
A
2891 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2892 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 2893
0b4e3aa0 2894 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 2895
0b4e3aa0
A
2896 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2897 upl_deallocate(upl);
1c79356b 2898
0b4e3aa0 2899 return kr;
1c79356b
A
2900}
2901
1c79356b 2902kern_return_t
0b4e3aa0
A
2903kernel_upl_abort(
2904 upl_t upl,
2905 int abort_type)
1c79356b 2906{
0b4e3aa0 2907 kern_return_t kr;
1c79356b 2908
0b4e3aa0
A
2909 kr = upl_abort(upl, abort_type);
2910 upl_deallocate(upl);
2911 return kr;
1c79356b
A
2912}
2913
91447636
A
2914/*
2915 * Now a kernel-private interface (for BootCache
2916 * use only). Need a cleaner way to create an
2917 * empty vm_map() and return a handle to it.
2918 */
1c79356b
A
2919
2920kern_return_t
91447636
A
2921vm_region_object_create(
2922 __unused vm_map_t target_map,
2923 vm_size_t size,
2924 ipc_port_t *object_handle)
1c79356b 2925{
91447636
A
2926 vm_named_entry_t user_entry;
2927 ipc_port_t user_handle;
1c79356b 2928
91447636 2929 vm_map_t new_map;
1c79356b 2930
91447636
A
2931 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2932 != KERN_SUCCESS) {
1c79356b 2933 return KERN_FAILURE;
91447636 2934 }
1c79356b 2935
91447636 2936 /* Create a named object based on a submap of specified size */
1c79356b 2937
91447636
A
2938 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
2939 vm_map_round_page(size), TRUE);
1c79356b 2940
91447636
A
2941 user_entry->backing.map = new_map;
2942 user_entry->internal = TRUE;
2943 user_entry->is_sub_map = TRUE;
2944 user_entry->offset = 0;
2945 user_entry->protection = VM_PROT_ALL;
2946 user_entry->size = size;
2947 assert(user_entry->ref_count == 1);
1c79356b 2948
91447636 2949 *object_handle = user_handle;
1c79356b 2950 return KERN_SUCCESS;
1c79356b 2951
55e303ae
A
2952}
2953
91447636
A
2954ppnum_t vm_map_get_phys_page( /* forward */
2955 vm_map_t map,
2956 vm_offset_t offset);
2957
55e303ae 2958ppnum_t
1c79356b 2959vm_map_get_phys_page(
91447636
A
2960 vm_map_t map,
2961 vm_offset_t addr)
1c79356b 2962{
91447636
A
2963 vm_object_offset_t offset;
2964 vm_object_t object;
2965 vm_map_offset_t map_offset;
2966 vm_map_entry_t entry;
2967 ppnum_t phys_page = 0;
2968
2969 map_offset = vm_map_trunc_page(addr);
1c79356b
A
2970
2971 vm_map_lock(map);
91447636 2972 while (vm_map_lookup_entry(map, map_offset, &entry)) {
1c79356b
A
2973
2974 if (entry->object.vm_object == VM_OBJECT_NULL) {
2975 vm_map_unlock(map);
91447636 2976 return (ppnum_t) 0;
1c79356b
A
2977 }
2978 if (entry->is_sub_map) {
2979 vm_map_t old_map;
2980 vm_map_lock(entry->object.sub_map);
2981 old_map = map;
2982 map = entry->object.sub_map;
91447636 2983 map_offset = entry->offset + (map_offset - entry->vme_start);
1c79356b
A
2984 vm_map_unlock(old_map);
2985 continue;
2986 }
9bccf70c
A
2987 if (entry->object.vm_object->phys_contiguous) {
2988 /* These are not standard pageable memory mappings */
2989 /* If they are not present in the object they will */
2990 /* have to be picked up from the pager through the */
2991 /* fault mechanism. */
2992 if(entry->object.vm_object->shadow_offset == 0) {
2993 /* need to call vm_fault */
2994 vm_map_unlock(map);
91447636 2995 vm_fault(map, map_offset, VM_PROT_NONE,
9bccf70c
A
2996 FALSE, THREAD_UNINT, NULL, 0);
2997 vm_map_lock(map);
2998 continue;
2999 }
91447636 3000 offset = entry->offset + (map_offset - entry->vme_start);
55e303ae
A
3001 phys_page = (ppnum_t)
3002 ((entry->object.vm_object->shadow_offset
3003 + offset) >> 12);
9bccf70c
A
3004 break;
3005
3006 }
91447636 3007 offset = entry->offset + (map_offset - entry->vme_start);
1c79356b
A
3008 object = entry->object.vm_object;
3009 vm_object_lock(object);
3010 while (TRUE) {
3011 vm_page_t dst_page = vm_page_lookup(object,offset);
3012 if(dst_page == VM_PAGE_NULL) {
3013 if(object->shadow) {
3014 vm_object_t old_object;
3015 vm_object_lock(object->shadow);
3016 old_object = object;
3017 offset = offset + object->shadow_offset;
3018 object = object->shadow;
3019 vm_object_unlock(old_object);
3020 } else {
3021 vm_object_unlock(object);
3022 break;
3023 }
3024 } else {
55e303ae 3025 phys_page = (ppnum_t)(dst_page->phys_page);
1c79356b
A
3026 vm_object_unlock(object);
3027 break;
3028 }
3029 }
3030 break;
3031
3032 }
3033
3034 vm_map_unlock(map);
55e303ae
A
3035 return phys_page;
3036}
3037
3038
3039
91447636
A
3040kern_return_t kernel_object_iopl_request( /* forward */
3041 vm_named_entry_t named_entry,
3042 memory_object_offset_t offset,
3043 vm_size_t *upl_size,
3044 upl_t *upl_ptr,
3045 upl_page_info_array_t user_page_list,
3046 unsigned int *page_list_count,
3047 int *flags);
3048
55e303ae
A
3049kern_return_t
3050kernel_object_iopl_request(
3051 vm_named_entry_t named_entry,
3052 memory_object_offset_t offset,
3053 vm_size_t *upl_size,
3054 upl_t *upl_ptr,
3055 upl_page_info_array_t user_page_list,
3056 unsigned int *page_list_count,
3057 int *flags)
3058{
3059 vm_object_t object;
3060 kern_return_t ret;
3061
3062 int caller_flags;
3063
3064 caller_flags = *flags;
3065
91447636
A
3066 if (caller_flags & ~UPL_VALID_FLAGS) {
3067 /*
3068 * For forward compatibility's sake,
3069 * reject any unknown flag.
3070 */
3071 return KERN_INVALID_VALUE;
3072 }
3073
55e303ae
A
3074 /* a few checks to make sure user is obeying rules */
3075 if(*upl_size == 0) {
3076 if(offset >= named_entry->size)
3077 return(KERN_INVALID_RIGHT);
3078 *upl_size = named_entry->size - offset;
3079 }
3080 if(caller_flags & UPL_COPYOUT_FROM) {
3081 if((named_entry->protection & VM_PROT_READ)
3082 != VM_PROT_READ) {
3083 return(KERN_INVALID_RIGHT);
3084 }
3085 } else {
3086 if((named_entry->protection &
3087 (VM_PROT_READ | VM_PROT_WRITE))
3088 != (VM_PROT_READ | VM_PROT_WRITE)) {
3089 return(KERN_INVALID_RIGHT);
3090 }
3091 }
3092 if(named_entry->size < (offset + *upl_size))
3093 return(KERN_INVALID_ARGUMENT);
3094
3095 /* the callers parameter offset is defined to be the */
3096 /* offset from beginning of named entry offset in object */
3097 offset = offset + named_entry->offset;
3098
3099 if(named_entry->is_sub_map)
3100 return (KERN_INVALID_ARGUMENT);
3101
3102 named_entry_lock(named_entry);
3103
91447636 3104 if (named_entry->is_pager) {
55e303ae
A
3105 object = vm_object_enter(named_entry->backing.pager,
3106 named_entry->offset + named_entry->size,
3107 named_entry->internal,
3108 FALSE,
3109 FALSE);
3110 if (object == VM_OBJECT_NULL) {
3111 named_entry_unlock(named_entry);
3112 return(KERN_INVALID_OBJECT);
3113 }
55e303ae 3114
91447636
A
3115 /* JMM - drop reference on the pager here? */
3116
3117 /* create an extra reference for the object */
3118 vm_object_lock(object);
55e303ae 3119 vm_object_reference_locked(object);
91447636
A
3120 named_entry->backing.object = object;
3121 named_entry->is_pager = FALSE;
55e303ae
A
3122 named_entry_unlock(named_entry);
3123
3124 /* wait for object (if any) to be ready */
91447636
A
3125 if (!named_entry->internal) {
3126 while (!object->pager_ready) {
3127 vm_object_wait(object,
3128 VM_OBJECT_EVENT_PAGER_READY,
3129 THREAD_UNINT);
3130 vm_object_lock(object);
3131 }
55e303ae
A
3132 }
3133 vm_object_unlock(object);
91447636
A
3134
3135 } else {
3136 /* This is the case where we are going to operate */
3137 /* an an already known object. If the object is */
3138 /* not ready it is internal. An external */
3139 /* object cannot be mapped until it is ready */
3140 /* we can therefore avoid the ready check */
3141 /* in this case. */
3142 object = named_entry->backing.object;
3143 vm_object_reference(object);
3144 named_entry_unlock(named_entry);
55e303ae
A
3145 }
3146
3147 if (!object->private) {
3148 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
3149 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
3150 if (object->phys_contiguous) {
3151 *flags = UPL_PHYS_CONTIG;
3152 } else {
3153 *flags = 0;
3154 }
3155 } else {
3156 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
3157 }
3158
3159 ret = vm_object_iopl_request(object,
3160 offset,
3161 *upl_size,
3162 upl_ptr,
3163 user_page_list,
3164 page_list_count,
3165 caller_flags);
3166 vm_object_deallocate(object);
3167 return ret;
1c79356b 3168}