]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * User-exported virtual memory functions.
63 */
1c79356b 64
91447636
A
65#include <debug.h>
66
1c79356b
A
67#include <vm_cpm.h>
68#include <mach/boolean.h>
69#include <mach/kern_return.h>
70#include <mach/mach_types.h> /* to get vm_address_t */
71#include <mach/memory_object.h>
72#include <mach/std_types.h> /* to get pointer_t */
91447636 73#include <mach/upl.h>
1c79356b
A
74#include <mach/vm_attributes.h>
75#include <mach/vm_param.h>
76#include <mach/vm_statistics.h>
1c79356b 77#include <mach/mach_syscalls.h>
9bccf70c 78
91447636
A
79#include <mach/host_priv_server.h>
80#include <mach/mach_vm_server.h>
1c79356b 81#include <mach/shared_memory_server.h>
91447636 82#include <mach/vm_map_server.h>
9bccf70c 83#include <vm/vm_shared_memory_server.h>
1c79356b
A
84
85#include <kern/host.h>
91447636 86#include <kern/kalloc.h>
1c79356b
A
87#include <kern/task.h>
88#include <kern/misc_protos.h>
91447636 89#include <vm/vm_fault.h>
1c79356b
A
90#include <vm/vm_map.h>
91#include <vm/vm_object.h>
92#include <vm/vm_page.h>
93#include <vm/memory_object.h>
94#include <vm/vm_pageout.h>
91447636 95#include <vm/vm_protos.h>
1c79356b
A
96
97vm_size_t upl_offset_to_pagelist = 0;
98
99#if VM_CPM
100#include <vm/cpm.h>
101#endif /* VM_CPM */
102
103ipc_port_t dynamic_pager_control_port=NULL;
104
105/*
91447636 106 * mach_vm_allocate allocates "zero fill" memory in the specfied
1c79356b
A
107 * map.
108 */
109kern_return_t
91447636
A
110mach_vm_allocate(
111 vm_map_t map,
112 mach_vm_offset_t *addr,
113 mach_vm_size_t size,
1c79356b
A
114 int flags)
115{
91447636
A
116 vm_map_offset_t map_addr;
117 vm_map_size_t map_size;
1c79356b 118 kern_return_t result;
91447636 119 boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
1c79356b
A
120
121 if (map == VM_MAP_NULL)
122 return(KERN_INVALID_ARGUMENT);
123 if (size == 0) {
124 *addr = 0;
125 return(KERN_SUCCESS);
126 }
127
91447636
A
128 if (anywhere) {
129 /*
130 * No specific address requested, so start candidate address
131 * search at the minimum address in the map. However, if that
132 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
133 * allocations of PAGEZERO to explicit requests since its
134 * normal use is to catch dereferences of NULL and many
135 * applications also treat pointers with a value of 0 as
136 * special and suddenly having address 0 contain useable
137 * memory would tend to confuse those applications.
138 */
139 map_addr = vm_map_min(map);
140 if (map_addr == 0)
141 map_addr += PAGE_SIZE;
142 } else
143 map_addr = vm_map_trunc_page(*addr);
144 map_size = vm_map_round_page(size);
145 if (map_size == 0) {
146 return(KERN_INVALID_ARGUMENT);
147 }
148
149 result = vm_map_enter(
150 map,
151 &map_addr,
152 map_size,
153 (vm_map_offset_t)0,
154 flags,
155 VM_OBJECT_NULL,
156 (vm_object_offset_t)0,
157 FALSE,
158 VM_PROT_DEFAULT,
159 VM_PROT_ALL,
160 VM_INHERIT_DEFAULT);
161
162 *addr = map_addr;
163 return(result);
164}
165
166/*
167 * vm_allocate
168 * Legacy routine that allocates "zero fill" memory in the specfied
169 * map (which is limited to the same size as the kernel).
170 */
171kern_return_t
172vm_allocate(
173 vm_map_t map,
174 vm_offset_t *addr,
175 vm_size_t size,
176 int flags)
177{
178 vm_map_offset_t map_addr;
179 vm_map_size_t map_size;
180 kern_return_t result;
181 boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
182
183 if (map == VM_MAP_NULL)
184 return(KERN_INVALID_ARGUMENT);
1c79356b 185 if (size == 0) {
91447636
A
186 *addr = 0;
187 return(KERN_SUCCESS);
188 }
189
190 if (anywhere) {
191 /*
192 * No specific address requested, so start candidate address
193 * search at the minimum address in the map. However, if that
194 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
195 * allocations of PAGEZERO to explicit requests since its
196 * normal use is to catch dereferences of NULL and many
197 * applications also treat pointers with a value of 0 as
198 * special and suddenly having address 0 contain useable
199 * memory would tend to confuse those applications.
200 */
201 map_addr = vm_map_min(map);
202 if (map_addr == 0)
203 map_addr += PAGE_SIZE;
204 } else
205 map_addr = vm_map_trunc_page(*addr);
206 map_size = vm_map_round_page(size);
207 if (map_size == 0) {
1c79356b
A
208 return(KERN_INVALID_ARGUMENT);
209 }
210
211 result = vm_map_enter(
212 map,
91447636
A
213 &map_addr,
214 map_size,
215 (vm_map_offset_t)0,
1c79356b
A
216 flags,
217 VM_OBJECT_NULL,
218 (vm_object_offset_t)0,
219 FALSE,
220 VM_PROT_DEFAULT,
221 VM_PROT_ALL,
222 VM_INHERIT_DEFAULT);
223
91447636 224 *addr = CAST_DOWN(vm_offset_t, map_addr);
1c79356b
A
225 return(result);
226}
227
228/*
91447636
A
229 * mach_vm_deallocate -
230 * deallocates the specified range of addresses in the
1c79356b
A
231 * specified address map.
232 */
233kern_return_t
91447636
A
234mach_vm_deallocate(
235 vm_map_t map,
236 mach_vm_offset_t start,
237 mach_vm_size_t size)
238{
239 if ((map == VM_MAP_NULL) || (start + size < start))
240 return(KERN_INVALID_ARGUMENT);
241
242 if (size == (mach_vm_offset_t) 0)
243 return(KERN_SUCCESS);
244
245 return(vm_map_remove(map, vm_map_trunc_page(start),
246 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
247}
248
249/*
250 * vm_deallocate -
251 * deallocates the specified range of addresses in the
252 * specified address map (limited to addresses the same
253 * size as the kernel).
254 */
255kern_return_t
1c79356b
A
256vm_deallocate(
257 register vm_map_t map,
258 vm_offset_t start,
259 vm_size_t size)
260{
91447636 261 if ((map == VM_MAP_NULL) || (start + size < start))
1c79356b
A
262 return(KERN_INVALID_ARGUMENT);
263
264 if (size == (vm_offset_t) 0)
265 return(KERN_SUCCESS);
266
91447636
A
267 return(vm_map_remove(map, vm_map_trunc_page(start),
268 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
269}
270
271/*
91447636
A
272 * mach_vm_inherit -
273 * Sets the inheritance of the specified range in the
1c79356b
A
274 * specified map.
275 */
276kern_return_t
91447636
A
277mach_vm_inherit(
278 vm_map_t map,
279 mach_vm_offset_t start,
280 mach_vm_size_t size,
281 vm_inherit_t new_inheritance)
282{
283 if ((map == VM_MAP_NULL) || (start + size < start) ||
284 (new_inheritance > VM_INHERIT_LAST_VALID))
285 return(KERN_INVALID_ARGUMENT);
286
287 if (size == 0)
288 return KERN_SUCCESS;
289
290 return(vm_map_inherit(map,
291 vm_map_trunc_page(start),
292 vm_map_round_page(start+size),
293 new_inheritance));
294}
295
296/*
297 * vm_inherit -
298 * Sets the inheritance of the specified range in the
299 * specified map (range limited to addresses
300 */
301kern_return_t
1c79356b
A
302vm_inherit(
303 register vm_map_t map,
304 vm_offset_t start,
305 vm_size_t size,
306 vm_inherit_t new_inheritance)
307{
91447636
A
308 if ((map == VM_MAP_NULL) || (start + size < start) ||
309 (new_inheritance > VM_INHERIT_LAST_VALID))
1c79356b
A
310 return(KERN_INVALID_ARGUMENT);
311
91447636
A
312 if (size == 0)
313 return KERN_SUCCESS;
314
1c79356b 315 return(vm_map_inherit(map,
91447636
A
316 vm_map_trunc_page(start),
317 vm_map_round_page(start+size),
1c79356b
A
318 new_inheritance));
319}
320
321/*
91447636
A
322 * mach_vm_protect -
323 * Sets the protection of the specified range in the
1c79356b
A
324 * specified map.
325 */
326
91447636
A
327kern_return_t
328mach_vm_protect(
329 vm_map_t map,
330 mach_vm_offset_t start,
331 mach_vm_size_t size,
332 boolean_t set_maximum,
333 vm_prot_t new_protection)
334{
335 if ((map == VM_MAP_NULL) || (start + size < start) ||
336 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
337 return(KERN_INVALID_ARGUMENT);
338
339 if (size == 0)
340 return KERN_SUCCESS;
341
342 return(vm_map_protect(map,
343 vm_map_trunc_page(start),
344 vm_map_round_page(start+size),
345 new_protection,
346 set_maximum));
347}
348
349/*
350 * vm_protect -
351 * Sets the protection of the specified range in the
352 * specified map. Addressability of the range limited
353 * to the same size as the kernel.
354 */
355
1c79356b
A
356kern_return_t
357vm_protect(
91447636 358 vm_map_t map,
1c79356b
A
359 vm_offset_t start,
360 vm_size_t size,
361 boolean_t set_maximum,
362 vm_prot_t new_protection)
363{
91447636
A
364 if ((map == VM_MAP_NULL) || (start + size < start) ||
365 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
1c79356b
A
366 return(KERN_INVALID_ARGUMENT);
367
91447636
A
368 if (size == 0)
369 return KERN_SUCCESS;
370
1c79356b 371 return(vm_map_protect(map,
91447636
A
372 vm_map_trunc_page(start),
373 vm_map_round_page(start+size),
1c79356b
A
374 new_protection,
375 set_maximum));
376}
377
378/*
91447636 379 * mach_vm_machine_attributes -
1c79356b
A
380 * Handle machine-specific attributes for a mapping, such
381 * as cachability, migrability, etc.
382 */
383kern_return_t
91447636
A
384mach_vm_machine_attribute(
385 vm_map_t map,
386 mach_vm_address_t addr,
387 mach_vm_size_t size,
388 vm_machine_attribute_t attribute,
389 vm_machine_attribute_val_t* value) /* IN/OUT */
390{
391 if ((map == VM_MAP_NULL) || (addr + size < addr))
392 return(KERN_INVALID_ARGUMENT);
393
394 if (size == 0)
395 return KERN_SUCCESS;
396
397 return vm_map_machine_attribute(map,
398 vm_map_trunc_page(addr),
399 vm_map_round_page(addr+size),
400 attribute,
401 value);
402}
403
404/*
405 * vm_machine_attribute -
406 * Handle machine-specific attributes for a mapping, such
407 * as cachability, migrability, etc. Limited addressability
408 * (same range limits as for the native kernel map).
409 */
410kern_return_t
1c79356b
A
411vm_machine_attribute(
412 vm_map_t map,
91447636 413 vm_address_t addr,
1c79356b
A
414 vm_size_t size,
415 vm_machine_attribute_t attribute,
416 vm_machine_attribute_val_t* value) /* IN/OUT */
417{
91447636
A
418 if ((map == VM_MAP_NULL) || (addr + size < addr))
419 return(KERN_INVALID_ARGUMENT);
420
421 if (size == 0)
422 return KERN_SUCCESS;
423
424 return vm_map_machine_attribute(map,
425 vm_map_trunc_page(addr),
426 vm_map_round_page(addr+size),
427 attribute,
428 value);
429}
430
431/*
432 * mach_vm_read -
433 * Read/copy a range from one address space and return it to the caller.
434 *
435 * It is assumed that the address for the returned memory is selected by
436 * the IPC implementation as part of receiving the reply to this call.
437 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
438 * that gets returned.
439 *
440 * JMM - because of mach_msg_type_number_t, this call is limited to a
441 * single 4GB region at this time.
442 *
443 */
444kern_return_t
445mach_vm_read(
446 vm_map_t map,
447 mach_vm_address_t addr,
448 mach_vm_size_t size,
449 pointer_t *data,
450 mach_msg_type_number_t *data_size)
451{
452 kern_return_t error;
453 vm_map_copy_t ipc_address;
454
1c79356b
A
455 if (map == VM_MAP_NULL)
456 return(KERN_INVALID_ARGUMENT);
457
91447636
A
458
459 error = vm_map_copyin(map,
460 (vm_map_address_t)addr,
461 (vm_map_size_t)size,
462 FALSE, /* src_destroy */
463 &ipc_address);
464
465 if (KERN_SUCCESS == error) {
466 *data = (pointer_t) ipc_address;
467 *data_size = size;
468 }
469 return(error);
1c79356b
A
470}
471
91447636
A
472/*
473 * vm_read -
474 * Read/copy a range from one address space and return it to the caller.
475 * Limited addressability (same range limits as for the native kernel map).
476 *
477 * It is assumed that the address for the returned memory is selected by
478 * the IPC implementation as part of receiving the reply to this call.
479 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
480 * that gets returned.
481 */
1c79356b
A
482kern_return_t
483vm_read(
484 vm_map_t map,
91447636 485 vm_address_t addr,
1c79356b
A
486 vm_size_t size,
487 pointer_t *data,
488 mach_msg_type_number_t *data_size)
489{
490 kern_return_t error;
491 vm_map_copy_t ipc_address;
492
493 if (map == VM_MAP_NULL)
494 return(KERN_INVALID_ARGUMENT);
495
91447636
A
496 error = vm_map_copyin(map,
497 (vm_map_address_t)addr,
498 (vm_map_size_t)size,
499 FALSE, /* src_destroy */
500 &ipc_address);
501
502 if (KERN_SUCCESS == error) {
1c79356b
A
503 *data = (pointer_t) ipc_address;
504 *data_size = size;
505 }
506 return(error);
507}
508
91447636
A
509/*
510 * mach_vm_read_list -
511 * Read/copy a list of address ranges from specified map.
512 *
513 * MIG does not know how to deal with a returned array of
514 * vm_map_copy_t structures, so we have to do the copyout
515 * manually here.
516 */
517kern_return_t
518mach_vm_read_list(
519 vm_map_t map,
520 mach_vm_read_entry_t data_list,
521 natural_t count)
522{
523 mach_msg_type_number_t i;
524 kern_return_t error;
525 vm_map_copy_t copy;
526
8ad349bb
A
527 if (map == VM_MAP_NULL ||
528 count > VM_MAP_ENTRY_MAX)
91447636
A
529 return(KERN_INVALID_ARGUMENT);
530
531 error = KERN_SUCCESS;
532 for(i=0; i<count; i++) {
533 vm_map_address_t map_addr;
534 vm_map_size_t map_size;
535
536 map_addr = (vm_map_address_t)(data_list[i].address);
537 map_size = (vm_map_size_t)(data_list[i].size);
538
539 if(map_size != 0) {
540 error = vm_map_copyin(map,
541 map_addr,
542 map_size,
543 FALSE, /* src_destroy */
544 &copy);
545 if (KERN_SUCCESS == error) {
546 error = vm_map_copyout(
547 current_task()->map,
548 &map_addr,
549 copy);
550 if (KERN_SUCCESS == error) {
551 data_list[i].address = map_addr;
552 continue;
553 }
554 vm_map_copy_discard(copy);
555 }
556 }
557 data_list[i].address = (mach_vm_address_t)0;
558 data_list[i].size = (mach_vm_size_t)0;
559 }
560 return(error);
561}
562
563/*
564 * vm_read_list -
565 * Read/copy a list of address ranges from specified map.
566 *
567 * MIG does not know how to deal with a returned array of
568 * vm_map_copy_t structures, so we have to do the copyout
569 * manually here.
570 *
571 * The source and destination ranges are limited to those
572 * that can be described with a vm_address_t (i.e. same
573 * size map as the kernel).
574 *
575 * JMM - If the result of the copyout is an address range
576 * that cannot be described with a vm_address_t (i.e. the
577 * caller had a larger address space but used this call
578 * anyway), it will result in a truncated address being
579 * returned (and a likely confused caller).
580 */
581
1c79356b
A
582kern_return_t
583vm_read_list(
584 vm_map_t map,
91447636
A
585 vm_read_entry_t data_list,
586 natural_t count)
1c79356b
A
587{
588 mach_msg_type_number_t i;
589 kern_return_t error;
91447636 590 vm_map_copy_t copy;
1c79356b 591
8ad349bb
A
592 if (map == VM_MAP_NULL ||
593 count > VM_MAP_ENTRY_MAX)
1c79356b
A
594 return(KERN_INVALID_ARGUMENT);
595
91447636 596 error = KERN_SUCCESS;
1c79356b 597 for(i=0; i<count; i++) {
91447636
A
598 vm_map_address_t map_addr;
599 vm_map_size_t map_size;
600
601 map_addr = (vm_map_address_t)(data_list[i].address);
602 map_size = (vm_map_size_t)(data_list[i].size);
603
604 if(map_size != 0) {
605 error = vm_map_copyin(map,
606 map_addr,
607 map_size,
608 FALSE, /* src_destroy */
609 &copy);
610 if (KERN_SUCCESS == error) {
611 error = vm_map_copyout(current_task()->map,
612 &map_addr,
613 copy);
614 if (KERN_SUCCESS == error) {
615 data_list[i].address =
616 CAST_DOWN(vm_offset_t, map_addr);
617 continue;
618 }
619 vm_map_copy_discard(copy);
1c79356b
A
620 }
621 }
91447636
A
622 data_list[i].address = (mach_vm_address_t)0;
623 data_list[i].size = (mach_vm_size_t)0;
1c79356b
A
624 }
625 return(error);
626}
627
628/*
91447636
A
629 * mach_vm_read_overwrite -
630 * Overwrite a range of the current map with data from the specified
631 * map/address range.
632 *
633 * In making an assumption that the current thread is local, it is
634 * no longer cluster-safe without a fully supportive local proxy
635 * thread/task (but we don't support cluster's anymore so this is moot).
1c79356b
A
636 */
637
1c79356b 638kern_return_t
91447636
A
639mach_vm_read_overwrite(
640 vm_map_t map,
641 mach_vm_address_t address,
642 mach_vm_size_t size,
643 mach_vm_address_t data,
644 mach_vm_size_t *data_size)
645{
646 kern_return_t error;
1c79356b
A
647 vm_map_copy_t copy;
648
649 if (map == VM_MAP_NULL)
650 return(KERN_INVALID_ARGUMENT);
651
91447636
A
652 error = vm_map_copyin(map, (vm_map_address_t)address,
653 (vm_map_size_t)size, FALSE, &copy);
654
655 if (KERN_SUCCESS == error) {
656 error = vm_map_copy_overwrite(current_thread()->map,
657 (vm_map_address_t)data,
658 copy, FALSE);
659 if (KERN_SUCCESS == error) {
660 *data_size = size;
661 return error;
1c79356b 662 }
91447636 663 vm_map_copy_discard(copy);
1c79356b 664 }
91447636
A
665 return(error);
666}
667
668/*
669 * vm_read_overwrite -
670 * Overwrite a range of the current map with data from the specified
671 * map/address range.
672 *
673 * This routine adds the additional limitation that the source and
674 * destination ranges must be describable with vm_address_t values
675 * (i.e. the same size address spaces as the kernel, or at least the
676 * the ranges are in that first portion of the respective address
677 * spaces).
678 */
679
680kern_return_t
681vm_read_overwrite(
682 vm_map_t map,
683 vm_address_t address,
684 vm_size_t size,
685 vm_address_t data,
686 vm_size_t *data_size)
687{
688 kern_return_t error;
689 vm_map_copy_t copy;
690
691 if (map == VM_MAP_NULL)
692 return(KERN_INVALID_ARGUMENT);
693
694 error = vm_map_copyin(map, (vm_map_address_t)address,
695 (vm_map_size_t)size, FALSE, &copy);
696
697 if (KERN_SUCCESS == error) {
698 error = vm_map_copy_overwrite(current_thread()->map,
699 (vm_map_address_t)data,
700 copy, FALSE);
701 if (KERN_SUCCESS == error) {
702 *data_size = size;
703 return error;
1c79356b 704 }
91447636 705 vm_map_copy_discard(copy);
1c79356b 706 }
1c79356b
A
707 return(error);
708}
709
710
91447636
A
711/*
712 * mach_vm_write -
713 * Overwrite the specified address range with the data provided
714 * (from the current map).
715 */
716kern_return_t
717mach_vm_write(
718 vm_map_t map,
719 mach_vm_address_t address,
720 pointer_t data,
721 __unused mach_msg_type_number_t size)
722{
723 if (map == VM_MAP_NULL)
724 return KERN_INVALID_ARGUMENT;
1c79356b 725
91447636
A
726 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
727 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
728}
1c79356b 729
91447636
A
730/*
731 * vm_write -
732 * Overwrite the specified address range with the data provided
733 * (from the current map).
734 *
735 * The addressability of the range of addresses to overwrite is
736 * limited bu the use of a vm_address_t (same size as kernel map).
737 * Either the target map is also small, or the range is in the
738 * low addresses within it.
739 */
1c79356b
A
740kern_return_t
741vm_write(
91447636
A
742 vm_map_t map,
743 vm_address_t address,
744 pointer_t data,
745 __unused mach_msg_type_number_t size)
746{
747 if (map == VM_MAP_NULL)
748 return KERN_INVALID_ARGUMENT;
749
750 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
751 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
752}
753
754/*
755 * mach_vm_copy -
756 * Overwrite one range of the specified map with the contents of
757 * another range within that same map (i.e. both address ranges
758 * are "over there").
759 */
760kern_return_t
761mach_vm_copy(
1c79356b 762 vm_map_t map,
91447636
A
763 mach_vm_address_t source_address,
764 mach_vm_size_t size,
765 mach_vm_address_t dest_address)
1c79356b 766{
91447636
A
767 vm_map_copy_t copy;
768 kern_return_t kr;
769
1c79356b
A
770 if (map == VM_MAP_NULL)
771 return KERN_INVALID_ARGUMENT;
772
91447636
A
773 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
774 (vm_map_size_t)size, FALSE, &copy);
775
776 if (KERN_SUCCESS == kr) {
777 kr = vm_map_copy_overwrite(map,
778 (vm_map_address_t)dest_address,
779 copy, FALSE /* interruptible XXX */);
780
781 if (KERN_SUCCESS != kr)
782 vm_map_copy_discard(copy);
783 }
784 return kr;
1c79356b
A
785}
786
787kern_return_t
788vm_copy(
789 vm_map_t map,
790 vm_address_t source_address,
791 vm_size_t size,
792 vm_address_t dest_address)
793{
794 vm_map_copy_t copy;
795 kern_return_t kr;
796
797 if (map == VM_MAP_NULL)
798 return KERN_INVALID_ARGUMENT;
799
91447636
A
800 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
801 (vm_map_size_t)size, FALSE, &copy);
1c79356b 802
91447636
A
803 if (KERN_SUCCESS == kr) {
804 kr = vm_map_copy_overwrite(map,
805 (vm_map_address_t)dest_address,
806 copy, FALSE /* interruptible XXX */);
1c79356b 807
91447636
A
808 if (KERN_SUCCESS != kr)
809 vm_map_copy_discard(copy);
810 }
811 return kr;
1c79356b
A
812}
813
814/*
91447636
A
815 * mach_vm_map -
816 * Map some range of an object into an address space.
817 *
818 * The object can be one of several types of objects:
819 * NULL - anonymous memory
820 * a named entry - a range within another address space
821 * or a range within a memory object
822 * a whole memory object
823 *
1c79356b
A
824 */
825kern_return_t
91447636 826mach_vm_map(
1c79356b 827 vm_map_t target_map,
91447636
A
828 mach_vm_offset_t *address,
829 mach_vm_size_t initial_size,
830 mach_vm_offset_t mask,
1c79356b
A
831 int flags,
832 ipc_port_t port,
833 vm_object_offset_t offset,
834 boolean_t copy,
835 vm_prot_t cur_protection,
836 vm_prot_t max_protection,
837 vm_inherit_t inheritance)
838{
91447636
A
839 vm_map_address_t map_addr;
840 vm_map_size_t map_size;
1c79356b 841 vm_object_t object;
91447636 842 vm_object_size_t size;
1c79356b
A
843 kern_return_t result;
844
845 /*
846 * Check arguments for validity
847 */
848 if ((target_map == VM_MAP_NULL) ||
849 (cur_protection & ~VM_PROT_ALL) ||
850 (max_protection & ~VM_PROT_ALL) ||
851 (inheritance > VM_INHERIT_LAST_VALID) ||
91447636 852 initial_size == 0)
1c79356b
A
853 return(KERN_INVALID_ARGUMENT);
854
91447636
A
855 map_addr = vm_map_trunc_page(*address);
856 map_size = vm_map_round_page(initial_size);
857 size = vm_object_round_page(initial_size);
858
1c79356b
A
859 /*
860 * Find the vm object (if any) corresponding to this port.
861 */
862 if (!IP_VALID(port)) {
863 object = VM_OBJECT_NULL;
864 offset = 0;
865 copy = FALSE;
866 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
867 vm_named_entry_t named_entry;
868
869 named_entry = (vm_named_entry_t)port->ip_kobject;
870 /* a few checks to make sure user is obeying rules */
871 if(size == 0) {
872 if(offset >= named_entry->size)
873 return(KERN_INVALID_RIGHT);
874 size = named_entry->size - offset;
875 }
876 if((named_entry->protection & max_protection) != max_protection)
877 return(KERN_INVALID_RIGHT);
878 if((named_entry->protection & cur_protection) != cur_protection)
879 return(KERN_INVALID_RIGHT);
880 if(named_entry->size < (offset + size))
881 return(KERN_INVALID_ARGUMENT);
882
883 /* the callers parameter offset is defined to be the */
884 /* offset from beginning of named entry offset in object */
885 offset = offset + named_entry->offset;
886
887 named_entry_lock(named_entry);
888 if(named_entry->is_sub_map) {
889 vm_map_entry_t map_entry;
890
891 named_entry_unlock(named_entry);
1c79356b
A
892 vm_object_reference(vm_submap_object);
893 if ((result = vm_map_enter(target_map,
91447636
A
894 &map_addr, map_size,
895 (vm_map_offset_t)mask, flags,
1c79356b
A
896 vm_submap_object, 0,
897 FALSE,
898 cur_protection, max_protection, inheritance
899 )) != KERN_SUCCESS) {
900 vm_object_deallocate(vm_submap_object);
901 } else {
902 char alias;
903
904 VM_GET_FLAGS_ALIAS(flags, alias);
905 if ((alias == VM_MEMORY_SHARED_PMAP) &&
906 !copy) {
91447636
A
907 vm_map_submap(target_map, map_addr,
908 map_addr + map_size,
1c79356b 909 named_entry->backing.map,
91447636 910 (vm_map_offset_t)offset, TRUE);
1c79356b 911 } else {
91447636
A
912 vm_map_submap(target_map, map_addr,
913 map_addr + map_size,
1c79356b 914 named_entry->backing.map,
91447636 915 (vm_map_offset_t)offset, FALSE);
1c79356b
A
916 }
917 if(copy) {
918 if(vm_map_lookup_entry(
91447636 919 target_map, map_addr, &map_entry)) {
1c79356b
A
920 map_entry->needs_copy = TRUE;
921 }
922 }
91447636 923 *address = map_addr;
1c79356b
A
924 }
925 return(result);
926
91447636 927 } else if (named_entry->is_pager) {
55e303ae
A
928 unsigned int access;
929 vm_prot_t protections;
930 unsigned int wimg_mode;
91447636 931 boolean_t cache_attr;
55e303ae
A
932
933 protections = named_entry->protection
934 & VM_PROT_ALL;
935 access = GET_MAP_MEM(named_entry->protection);
936
937 object = vm_object_enter(
938 named_entry->backing.pager,
939 named_entry->size,
940 named_entry->internal,
941 FALSE,
942 FALSE);
1c79356b
A
943 if (object == VM_OBJECT_NULL) {
944 named_entry_unlock(named_entry);
945 return(KERN_INVALID_OBJECT);
946 }
55e303ae 947
91447636 948 /* JMM - drop reference on pager here */
55e303ae
A
949
950 /* create an extra ref for the named entry */
91447636 951 vm_object_lock(object);
55e303ae 952 vm_object_reference_locked(object);
91447636
A
953 named_entry->backing.object = object;
954 named_entry->is_pager = FALSE;
1c79356b 955 named_entry_unlock(named_entry);
55e303ae
A
956
957 wimg_mode = object->wimg_bits;
958 if(access == MAP_MEM_IO) {
959 wimg_mode = VM_WIMG_IO;
960 } else if (access == MAP_MEM_COPYBACK) {
961 wimg_mode = VM_WIMG_USE_DEFAULT;
962 } else if (access == MAP_MEM_WTHRU) {
963 wimg_mode = VM_WIMG_WTHRU;
964 } else if (access == MAP_MEM_WCOMB) {
965 wimg_mode = VM_WIMG_WCOMB;
966 }
967 if ((wimg_mode == VM_WIMG_IO)
968 || (wimg_mode == VM_WIMG_WCOMB))
969 cache_attr = TRUE;
970 else
971 cache_attr = FALSE;
972
91447636
A
973 /* wait for object (if any) to be ready */
974 if (!named_entry->internal) {
1c79356b
A
975 while (!object->pager_ready) {
976 vm_object_wait(object,
91447636
A
977 VM_OBJECT_EVENT_PAGER_READY,
978 THREAD_UNINT);
1c79356b
A
979 vm_object_lock(object);
980 }
1c79356b 981 }
91447636 982
55e303ae
A
983 if(object->wimg_bits != wimg_mode) {
984 vm_page_t p;
985
986 vm_object_paging_wait(object, THREAD_UNINT);
987
988 object->wimg_bits = wimg_mode;
989 queue_iterate(&object->memq, p, vm_page_t, listq) {
990 if (!p->fictitious) {
91447636
A
991 pmap_disconnect(p->phys_page);
992 if (cache_attr)
993 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
994 }
995 }
996 }
997 object->true_share = TRUE;
998 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
999 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1000 vm_object_unlock(object);
91447636
A
1001 } else {
1002 /* This is the case where we are going to map */
1003 /* an already mapped object. If the object is */
1004 /* not ready it is internal. An external */
1005 /* object cannot be mapped until it is ready */
1006 /* we can therefore avoid the ready check */
1007 /* in this case. */
1008 object = named_entry->backing.object;
1009 assert(object != VM_OBJECT_NULL);
1010 named_entry_unlock(named_entry);
1011 vm_object_reference(object);
1c79356b 1012 }
0b4e3aa0
A
1013 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
1014 /*
1015 * JMM - This is temporary until we unify named entries
1016 * and raw memory objects.
1017 *
1018 * Detected fake ip_kotype for a memory object. In
1019 * this case, the port isn't really a port at all, but
1020 * instead is just a raw memory object.
1021 */
1022
1023 if ((object = vm_object_enter((memory_object_t)port,
1024 size, FALSE, FALSE, FALSE))
1c79356b
A
1025 == VM_OBJECT_NULL)
1026 return(KERN_INVALID_OBJECT);
1027
1028 /* wait for object (if any) to be ready */
1029 if (object != VM_OBJECT_NULL) {
9bccf70c
A
1030 if(object == kernel_object) {
1031 printf("Warning: Attempt to map kernel object"
1032 " by a non-private kernel entity\n");
1033 return(KERN_INVALID_OBJECT);
1034 }
1c79356b
A
1035 vm_object_lock(object);
1036 while (!object->pager_ready) {
1037 vm_object_wait(object,
1038 VM_OBJECT_EVENT_PAGER_READY,
1039 THREAD_UNINT);
1040 vm_object_lock(object);
1041 }
1042 vm_object_unlock(object);
1043 }
0b4e3aa0
A
1044 } else {
1045 return (KERN_INVALID_OBJECT);
1c79356b
A
1046 }
1047
1c79356b
A
1048 /*
1049 * Perform the copy if requested
1050 */
1051
1052 if (copy) {
1053 vm_object_t new_object;
1054 vm_object_offset_t new_offset;
1055
1056 result = vm_object_copy_strategically(object, offset, size,
1057 &new_object, &new_offset,
1058 &copy);
1059
1060
1061 if (result == KERN_MEMORY_RESTART_COPY) {
1062 boolean_t success;
1063 boolean_t src_needs_copy;
1064
1065 /*
1066 * XXX
1067 * We currently ignore src_needs_copy.
1068 * This really is the issue of how to make
1069 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
1070 * non-kernel users to use. Solution forthcoming.
1071 * In the meantime, since we don't allow non-kernel
1072 * memory managers to specify symmetric copy,
1073 * we won't run into problems here.
1074 */
1075 new_object = object;
1076 new_offset = offset;
1077 success = vm_object_copy_quickly(&new_object,
1078 new_offset, size,
1079 &src_needs_copy,
1080 &copy);
1081 assert(success);
1082 result = KERN_SUCCESS;
1083 }
1084 /*
1085 * Throw away the reference to the
1086 * original object, as it won't be mapped.
1087 */
1088
1089 vm_object_deallocate(object);
1090
1091 if (result != KERN_SUCCESS)
1092 return (result);
1093
1094 object = new_object;
1095 offset = new_offset;
1096 }
1097
1098 if ((result = vm_map_enter(target_map,
91447636
A
1099 &map_addr, map_size,
1100 (vm_map_offset_t)mask,
1101 flags,
1102 object, offset,
1103 copy,
1104 cur_protection, max_protection, inheritance
1105 )) != KERN_SUCCESS)
1106 vm_object_deallocate(object);
1107 *address = map_addr;
1c79356b
A
1108 return(result);
1109}
1110
91447636
A
1111
1112/* legacy interface */
1113kern_return_t
1114vm_map_64(
1115 vm_map_t target_map,
1116 vm_offset_t *address,
1117 vm_size_t size,
1118 vm_offset_t mask,
1119 int flags,
1120 ipc_port_t port,
1121 vm_object_offset_t offset,
1122 boolean_t copy,
1123 vm_prot_t cur_protection,
1124 vm_prot_t max_protection,
1125 vm_inherit_t inheritance)
1126{
1127 mach_vm_address_t map_addr;
1128 mach_vm_size_t map_size;
1129 mach_vm_offset_t map_mask;
1130 kern_return_t kr;
1131
1132 map_addr = (mach_vm_address_t)*address;
1133 map_size = (mach_vm_size_t)size;
1134 map_mask = (mach_vm_offset_t)mask;
1135
1136 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
1137 port, offset, copy,
1138 cur_protection, max_protection, inheritance);
1139 *address = CAST_DOWN(vm_address_t, map_addr);
1140 return kr;
1141}
1142
1c79356b 1143/* temporary, until world build */
55e303ae 1144kern_return_t
1c79356b
A
1145vm_map(
1146 vm_map_t target_map,
1147 vm_offset_t *address,
1148 vm_size_t size,
1149 vm_offset_t mask,
1150 int flags,
1151 ipc_port_t port,
1152 vm_offset_t offset,
1153 boolean_t copy,
1154 vm_prot_t cur_protection,
1155 vm_prot_t max_protection,
1156 vm_inherit_t inheritance)
1157{
91447636
A
1158 mach_vm_address_t map_addr;
1159 mach_vm_size_t map_size;
1160 mach_vm_offset_t map_mask;
1161 vm_object_offset_t obj_offset;
1162 kern_return_t kr;
1163
1164 map_addr = (mach_vm_address_t)*address;
1165 map_size = (mach_vm_size_t)size;
1166 map_mask = (mach_vm_offset_t)mask;
1167 obj_offset = (vm_object_offset_t)offset;
1168
1169 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
1170 port, obj_offset, copy,
1171 cur_protection, max_protection, inheritance);
1172 *address = CAST_DOWN(vm_address_t, map_addr);
1173 return kr;
1174}
1175
1176/*
1177 * mach_vm_remap -
1178 * Remap a range of memory from one task into another,
1179 * to another address range within the same task, or
1180 * over top of itself (with altered permissions and/or
1181 * as an in-place copy of itself).
1182 */
1183
1184kern_return_t
1185mach_vm_remap(
1186 vm_map_t target_map,
1187 mach_vm_offset_t *address,
1188 mach_vm_size_t size,
1189 mach_vm_offset_t mask,
1190 boolean_t anywhere,
1191 vm_map_t src_map,
1192 mach_vm_offset_t memory_address,
1193 boolean_t copy,
1194 vm_prot_t *cur_protection,
1195 vm_prot_t *max_protection,
1196 vm_inherit_t inheritance)
1197{
1198 vm_map_offset_t map_addr;
1199 kern_return_t kr;
1200
1201 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1202 return KERN_INVALID_ARGUMENT;
1203
1204 map_addr = (vm_map_offset_t)*address;
1205
1206 kr = vm_map_remap(target_map,
1207 &map_addr,
1208 size,
1209 mask,
1210 anywhere,
1211 src_map,
1212 memory_address,
1213 copy,
1214 cur_protection,
1215 max_protection,
1216 inheritance);
1217 *address = map_addr;
1218 return kr;
1c79356b
A
1219}
1220
91447636
A
1221/*
1222 * vm_remap -
1223 * Remap a range of memory from one task into another,
1224 * to another address range within the same task, or
1225 * over top of itself (with altered permissions and/or
1226 * as an in-place copy of itself).
1227 *
1228 * The addressability of the source and target address
1229 * range is limited by the size of vm_address_t (in the
1230 * kernel context).
1231 */
1232kern_return_t
1233vm_remap(
1234 vm_map_t target_map,
1235 vm_offset_t *address,
1236 vm_size_t size,
1237 vm_offset_t mask,
1238 boolean_t anywhere,
1239 vm_map_t src_map,
1240 vm_offset_t memory_address,
1241 boolean_t copy,
1242 vm_prot_t *cur_protection,
1243 vm_prot_t *max_protection,
1244 vm_inherit_t inheritance)
1245{
1246 vm_map_offset_t map_addr;
1247 kern_return_t kr;
1248
1249 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1250 return KERN_INVALID_ARGUMENT;
1251
1252 map_addr = (vm_map_offset_t)*address;
1253
1254 kr = vm_map_remap(target_map,
1255 &map_addr,
1256 size,
1257 mask,
1258 anywhere,
1259 src_map,
1260 memory_address,
1261 copy,
1262 cur_protection,
1263 max_protection,
1264 inheritance);
1265 *address = CAST_DOWN(vm_offset_t, map_addr);
1266 return kr;
1267}
1c79356b
A
1268
1269/*
91447636
A
1270 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1271 * when mach_vm_wire and vm_wire are changed to use ledgers.
1c79356b
A
1272 */
1273#include <mach/mach_host_server.h>
1274/*
91447636
A
1275 * mach_vm_wire
1276 * Specify that the range of the virtual address space
1277 * of the target task must not cause page faults for
1278 * the indicated accesses.
1279 *
1280 * [ To unwire the pages, specify VM_PROT_NONE. ]
1281 */
1282kern_return_t
1283mach_vm_wire(
1284 host_priv_t host_priv,
1285 vm_map_t map,
1286 mach_vm_offset_t start,
1287 mach_vm_size_t size,
1288 vm_prot_t access)
1289{
1290 kern_return_t rc;
1291
1292 if (host_priv == HOST_PRIV_NULL)
1293 return KERN_INVALID_HOST;
1294
1295 assert(host_priv == &realhost);
1296
1297 if (map == VM_MAP_NULL)
1298 return KERN_INVALID_TASK;
1299
1300 if (access & ~VM_PROT_ALL)
1301 return KERN_INVALID_ARGUMENT;
1302
1303 if (access != VM_PROT_NONE) {
1304 rc = vm_map_wire(map, vm_map_trunc_page(start),
1305 vm_map_round_page(start+size), access, TRUE);
1306 } else {
1307 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1308 vm_map_round_page(start+size), TRUE);
1309 }
1310 return rc;
1311}
1312
1313/*
1314 * vm_wire -
1c79356b
A
1315 * Specify that the range of the virtual address space
1316 * of the target task must not cause page faults for
1317 * the indicated accesses.
1318 *
1319 * [ To unwire the pages, specify VM_PROT_NONE. ]
1320 */
1321kern_return_t
1322vm_wire(
1323 host_priv_t host_priv,
1324 register vm_map_t map,
1325 vm_offset_t start,
1326 vm_size_t size,
1327 vm_prot_t access)
1328{
1329 kern_return_t rc;
1330
1331 if (host_priv == HOST_PRIV_NULL)
1332 return KERN_INVALID_HOST;
1333
1334 assert(host_priv == &realhost);
1335
1336 if (map == VM_MAP_NULL)
1337 return KERN_INVALID_TASK;
1338
91447636 1339 if ((access & ~VM_PROT_ALL) || (start + size < start))
1c79356b
A
1340 return KERN_INVALID_ARGUMENT;
1341
91447636
A
1342 if (size == 0) {
1343 rc = KERN_SUCCESS;
1344 } else if (access != VM_PROT_NONE) {
1345 rc = vm_map_wire(map, vm_map_trunc_page(start),
1346 vm_map_round_page(start+size), access, TRUE);
1c79356b 1347 } else {
91447636
A
1348 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1349 vm_map_round_page(start+size), TRUE);
1c79356b
A
1350 }
1351 return rc;
1352}
1353
1354/*
1355 * vm_msync
1356 *
1357 * Synchronises the memory range specified with its backing store
1358 * image by either flushing or cleaning the contents to the appropriate
91447636
A
1359 * memory manager.
1360 *
1361 * interpretation of sync_flags
1362 * VM_SYNC_INVALIDATE - discard pages, only return precious
1363 * pages to manager.
1364 *
1365 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1366 * - discard pages, write dirty or precious
1367 * pages back to memory manager.
1368 *
1369 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1370 * - write dirty or precious pages back to
1371 * the memory manager.
1372 *
1373 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1374 * is a hole in the region, and we would
1375 * have returned KERN_SUCCESS, return
1376 * KERN_INVALID_ADDRESS instead.
1377 *
1378 * RETURNS
1379 * KERN_INVALID_TASK Bad task parameter
1380 * KERN_INVALID_ARGUMENT both sync and async were specified.
1381 * KERN_SUCCESS The usual.
1382 * KERN_INVALID_ADDRESS There was a hole in the region.
1383 */
1384
1385kern_return_t
1386mach_vm_msync(
1387 vm_map_t map,
1388 mach_vm_address_t address,
1389 mach_vm_size_t size,
1390 vm_sync_t sync_flags)
1391{
1392
1393 if (map == VM_MAP_NULL)
1394 return(KERN_INVALID_TASK);
1395
1396 return vm_map_msync(map, (vm_map_address_t)address,
1397 (vm_map_size_t)size, sync_flags);
1398}
1399
1400/*
1401 * vm_msync
1402 *
1403 * Synchronises the memory range specified with its backing store
1404 * image by either flushing or cleaning the contents to the appropriate
1405 * memory manager.
1c79356b
A
1406 *
1407 * interpretation of sync_flags
1408 * VM_SYNC_INVALIDATE - discard pages, only return precious
1409 * pages to manager.
1410 *
1411 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1412 * - discard pages, write dirty or precious
1413 * pages back to memory manager.
1414 *
1415 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1416 * - write dirty or precious pages back to
1417 * the memory manager.
1418 *
91447636
A
1419 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1420 * is a hole in the region, and we would
1421 * have returned KERN_SUCCESS, return
1422 * KERN_INVALID_ADDRESS instead.
1423 *
1424 * The addressability of the range is limited to that which can
1425 * be described by a vm_address_t.
1c79356b
A
1426 *
1427 * RETURNS
1428 * KERN_INVALID_TASK Bad task parameter
1429 * KERN_INVALID_ARGUMENT both sync and async were specified.
1430 * KERN_SUCCESS The usual.
91447636 1431 * KERN_INVALID_ADDRESS There was a hole in the region.
1c79356b
A
1432 */
1433
1434kern_return_t
1435vm_msync(
1436 vm_map_t map,
1437 vm_address_t address,
1438 vm_size_t size,
1439 vm_sync_t sync_flags)
1440{
1c79356b 1441
91447636
A
1442 if (map == VM_MAP_NULL)
1443 return(KERN_INVALID_TASK);
1c79356b 1444
91447636
A
1445 return vm_map_msync(map, (vm_map_address_t)address,
1446 (vm_map_size_t)size, sync_flags);
1447}
1c79356b 1448
91447636
A
1449
1450/*
1451 * mach_vm_behavior_set
1452 *
1453 * Sets the paging behavior attribute for the specified range
1454 * in the specified map.
1455 *
1456 * This routine will fail with KERN_INVALID_ADDRESS if any address
1457 * in [start,start+size) is not a valid allocated memory region.
1458 */
1459kern_return_t
1460mach_vm_behavior_set(
1461 vm_map_t map,
1462 mach_vm_offset_t start,
1463 mach_vm_size_t size,
1464 vm_behavior_t new_behavior)
1465{
1466 if ((map == VM_MAP_NULL) || (start + size < start))
1467 return(KERN_INVALID_ARGUMENT);
1c79356b
A
1468
1469 if (size == 0)
91447636 1470 return KERN_SUCCESS;
1c79356b 1471
91447636
A
1472 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1473 vm_map_round_page(start+size), new_behavior));
1474}
1c79356b 1475
91447636
A
1476/*
1477 * vm_behavior_set
1478 *
1479 * Sets the paging behavior attribute for the specified range
1480 * in the specified map.
1481 *
1482 * This routine will fail with KERN_INVALID_ADDRESS if any address
1483 * in [start,start+size) is not a valid allocated memory region.
1484 *
1485 * This routine is potentially limited in addressibility by the
1486 * use of vm_offset_t (if the map provided is larger than the
1487 * kernel's).
1488 */
1489kern_return_t
1490vm_behavior_set(
1491 vm_map_t map,
1492 vm_offset_t start,
1493 vm_size_t size,
1494 vm_behavior_t new_behavior)
1495{
1496 if ((map == VM_MAP_NULL) || (start + size < start))
1497 return(KERN_INVALID_ARGUMENT);
1c79356b 1498
91447636
A
1499 if (size == 0)
1500 return KERN_SUCCESS;
1c79356b 1501
91447636
A
1502 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1503 vm_map_round_page(start+size), new_behavior));
1504}
1c79356b 1505
91447636
A
1506/*
1507 * mach_vm_region:
1508 *
1509 * User call to obtain information about a region in
1510 * a task's address map. Currently, only one flavor is
1511 * supported.
1512 *
1513 * XXX The reserved and behavior fields cannot be filled
1514 * in until the vm merge from the IK is completed, and
1515 * vm_reserve is implemented.
1516 *
1517 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1518 */
1c79356b 1519
91447636
A
1520kern_return_t
1521mach_vm_region(
1522 vm_map_t map,
1523 mach_vm_offset_t *address, /* IN/OUT */
1524 mach_vm_size_t *size, /* OUT */
1525 vm_region_flavor_t flavor, /* IN */
1526 vm_region_info_t info, /* OUT */
1527 mach_msg_type_number_t *count, /* IN/OUT */
1528 mach_port_t *object_name) /* OUT */
1529{
1530 vm_map_offset_t map_addr;
1531 vm_map_size_t map_size;
1532 kern_return_t kr;
1c79356b 1533
91447636
A
1534 if (VM_MAP_NULL == map)
1535 return KERN_INVALID_ARGUMENT;
1c79356b 1536
91447636
A
1537 map_addr = (vm_map_offset_t)*address;
1538 map_size = (vm_map_size_t)*size;
1c79356b 1539
91447636
A
1540 /* legacy conversion */
1541 if (VM_REGION_BASIC_INFO == flavor)
1542 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1543
91447636
A
1544 kr = vm_map_region(map,
1545 &map_addr, &map_size,
1546 flavor, info, count,
1547 object_name);
1c79356b 1548
91447636
A
1549 *address = map_addr;
1550 *size = map_size;
1551 return kr;
1552}
1c79356b 1553
91447636
A
1554/*
1555 * vm_region_64 and vm_region:
1556 *
1557 * User call to obtain information about a region in
1558 * a task's address map. Currently, only one flavor is
1559 * supported.
1560 *
1561 * XXX The reserved and behavior fields cannot be filled
1562 * in until the vm merge from the IK is completed, and
1563 * vm_reserve is implemented.
1564 *
1565 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1566 */
1c79356b 1567
91447636
A
1568kern_return_t
1569vm_region_64(
1570 vm_map_t map,
1571 vm_offset_t *address, /* IN/OUT */
1572 vm_size_t *size, /* OUT */
1573 vm_region_flavor_t flavor, /* IN */
1574 vm_region_info_t info, /* OUT */
1575 mach_msg_type_number_t *count, /* IN/OUT */
1576 mach_port_t *object_name) /* OUT */
1577{
1578 vm_map_offset_t map_addr;
1579 vm_map_size_t map_size;
1580 kern_return_t kr;
1c79356b 1581
91447636
A
1582 if (VM_MAP_NULL == map)
1583 return KERN_INVALID_ARGUMENT;
1c79356b 1584
91447636
A
1585 map_addr = (vm_map_offset_t)*address;
1586 map_size = (vm_map_size_t)*size;
1c79356b 1587
91447636
A
1588 /* legacy conversion */
1589 if (VM_REGION_BASIC_INFO == flavor)
1590 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1591
91447636
A
1592 kr = vm_map_region(map,
1593 &map_addr, &map_size,
1594 flavor, info, count,
1595 object_name);
1c79356b 1596
91447636
A
1597 *address = CAST_DOWN(vm_offset_t, map_addr);
1598 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1599
91447636
A
1600 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1601 return KERN_INVALID_ADDRESS;
1602 return kr;
1603}
1c79356b 1604
91447636
A
1605kern_return_t
1606vm_region(
1607 vm_map_t map,
1608 vm_address_t *address, /* IN/OUT */
1609 vm_size_t *size, /* OUT */
1610 vm_region_flavor_t flavor, /* IN */
1611 vm_region_info_t info, /* OUT */
1612 mach_msg_type_number_t *count, /* IN/OUT */
1613 mach_port_t *object_name) /* OUT */
1614{
1615 vm_map_address_t map_addr;
1616 vm_map_size_t map_size;
1617 kern_return_t kr;
1c79356b 1618
91447636
A
1619 if (VM_MAP_NULL == map)
1620 return KERN_INVALID_ARGUMENT;
1c79356b 1621
91447636
A
1622 map_addr = (vm_map_address_t)*address;
1623 map_size = (vm_map_size_t)*size;
1c79356b 1624
91447636
A
1625 kr = vm_map_region(map,
1626 &map_addr, &map_size,
1627 flavor, info, count,
1628 object_name);
1c79356b 1629
91447636
A
1630 *address = CAST_DOWN(vm_address_t, map_addr);
1631 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1632
91447636
A
1633 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1634 return KERN_INVALID_ADDRESS;
1635 return kr;
1636}
1c79356b
A
1637
1638/*
91447636
A
1639 * vm_region_recurse: A form of vm_region which follows the
1640 * submaps in a target map
1c79356b 1641 *
1c79356b
A
1642 */
1643kern_return_t
91447636
A
1644mach_vm_region_recurse(
1645 vm_map_t map,
1646 mach_vm_address_t *address,
1647 mach_vm_size_t *size,
1648 uint32_t *depth,
1649 vm_region_recurse_info_t info,
1650 mach_msg_type_number_t *infoCnt)
1c79356b 1651{
91447636
A
1652 vm_map_address_t map_addr;
1653 vm_map_size_t map_size;
1654 kern_return_t kr;
1c79356b 1655
91447636
A
1656 if (VM_MAP_NULL == map)
1657 return KERN_INVALID_ARGUMENT;
1c79356b 1658
91447636
A
1659 map_addr = (vm_map_address_t)*address;
1660 map_size = (vm_map_size_t)*size;
1661
1662 kr = vm_map_region_recurse_64(
1663 map,
1664 &map_addr,
1665 &map_size,
1666 depth,
1667 (vm_region_submap_info_64_t)info,
1668 infoCnt);
1669
1670 *address = map_addr;
1671 *size = map_size;
1672 return kr;
1c79356b
A
1673}
1674
1675/*
91447636
A
1676 * vm_region_recurse: A form of vm_region which follows the
1677 * submaps in a target map
1678 *
1c79356b 1679 */
91447636
A
1680kern_return_t
1681vm_region_recurse_64(
1682 vm_map_t map,
1683 vm_address_t *address,
1684 vm_size_t *size,
1685 uint32_t *depth,
1686 vm_region_recurse_info_64_t info,
1687 mach_msg_type_number_t *infoCnt)
1c79356b 1688{
91447636
A
1689 vm_map_address_t map_addr;
1690 vm_map_size_t map_size;
1691 kern_return_t kr;
1692
1693 if (VM_MAP_NULL == map)
1694 return KERN_INVALID_ARGUMENT;
1695
1696 map_addr = (vm_map_address_t)*address;
1697 map_size = (vm_map_size_t)*size;
1698
1699 kr = vm_map_region_recurse_64(
1700 map,
1701 &map_addr,
1702 &map_size,
1703 depth,
1704 (vm_region_submap_info_64_t)info,
1705 infoCnt);
1c79356b 1706
91447636
A
1707 *address = CAST_DOWN(vm_address_t, map_addr);
1708 *size = CAST_DOWN(vm_size_t, map_size);
1709
1710 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1711 return KERN_INVALID_ADDRESS;
1712 return kr;
1c79356b
A
1713}
1714
91447636
A
1715kern_return_t
1716vm_region_recurse(
1717 vm_map_t map,
1718 vm_offset_t *address, /* IN/OUT */
1719 vm_size_t *size, /* OUT */
1720 natural_t *depth, /* IN/OUT */
1721 vm_region_recurse_info_t info32, /* IN/OUT */
1722 mach_msg_type_number_t *infoCnt) /* IN/OUT */
1723{
1724 vm_region_submap_info_data_64_t info64;
1725 vm_region_submap_info_t info;
1726 vm_map_address_t map_addr;
1727 vm_map_size_t map_size;
1728 kern_return_t kr;
1729
1730 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
1731 return KERN_INVALID_ARGUMENT;
1732
1733
1734 map_addr = (vm_map_address_t)*address;
1735 map_size = (vm_map_size_t)*size;
1736 info = (vm_region_submap_info_t)info32;
1737 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1738
1739 kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
1740 depth, &info64, infoCnt);
1741
1742 info->protection = info64.protection;
1743 info->max_protection = info64.max_protection;
1744 info->inheritance = info64.inheritance;
1745 info->offset = (uint32_t)info64.offset; /* trouble-maker */
1746 info->user_tag = info64.user_tag;
1747 info->pages_resident = info64.pages_resident;
1748 info->pages_shared_now_private = info64.pages_shared_now_private;
1749 info->pages_swapped_out = info64.pages_swapped_out;
1750 info->pages_dirtied = info64.pages_dirtied;
1751 info->ref_count = info64.ref_count;
1752 info->shadow_depth = info64.shadow_depth;
1753 info->external_pager = info64.external_pager;
1754 info->share_mode = info64.share_mode;
1755 info->is_submap = info64.is_submap;
1756 info->behavior = info64.behavior;
1757 info->object_id = info64.object_id;
1758 info->user_wired_count = info64.user_wired_count;
1759
1760 *address = CAST_DOWN(vm_address_t, map_addr);
1761 *size = CAST_DOWN(vm_size_t, map_size);
1762 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1763
1764 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1765 return KERN_INVALID_ADDRESS;
1766 return kr;
1767}
1768
1769kern_return_t
1770vm_purgable_control(
1771 vm_map_t map,
1772 vm_offset_t address,
1773 vm_purgable_t control,
1774 int *state)
1775{
1776 if (VM_MAP_NULL == map)
1777 return KERN_INVALID_ARGUMENT;
1778
1779 return vm_map_purgable_control(map,
1780 vm_map_trunc_page(address),
1781 control,
1782 state);
1783}
1784
1c79356b
A
1785
1786/*
1787 * Ordinarily, the right to allocate CPM is restricted
1788 * to privileged applications (those that can gain access
91447636
A
1789 * to the host priv port). Set this variable to zero if
1790 * you want to let any application allocate CPM.
1c79356b
A
1791 */
1792unsigned int vm_allocate_cpm_privileged = 0;
1793
1794/*
1795 * Allocate memory in the specified map, with the caveat that
1796 * the memory is physically contiguous. This call may fail
1797 * if the system can't find sufficient contiguous memory.
1798 * This call may cause or lead to heart-stopping amounts of
1799 * paging activity.
1800 *
1801 * Memory obtained from this call should be freed in the
1802 * normal way, viz., via vm_deallocate.
1803 */
1804kern_return_t
1805vm_allocate_cpm(
1806 host_priv_t host_priv,
91447636
A
1807 vm_map_t map,
1808 vm_address_t *addr,
1809 vm_size_t size,
1c79356b
A
1810 int flags)
1811{
91447636
A
1812 vm_map_address_t map_addr;
1813 vm_map_size_t map_size;
1c79356b 1814 kern_return_t kr;
1c79356b 1815
91447636 1816 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
1c79356b
A
1817 return KERN_INVALID_HOST;
1818
91447636 1819 if (VM_MAP_NULL == map)
1c79356b 1820 return KERN_INVALID_ARGUMENT;
1c79356b 1821
91447636
A
1822 map_addr = (vm_map_address_t)*addr;
1823 map_size = (vm_map_size_t)size;
1c79356b 1824
91447636
A
1825 kr = vm_map_enter_cpm(map,
1826 &map_addr,
1827 map_size,
1828 flags);
1c79356b 1829
91447636 1830 *addr = CAST_DOWN(vm_address_t, map_addr);
1c79356b
A
1831 return kr;
1832}
1833
1834
91447636
A
1835kern_return_t
1836mach_vm_page_query(
1837 vm_map_t map,
1838 mach_vm_offset_t offset,
1839 int *disposition,
1840 int *ref_count)
1841{
1842 if (VM_MAP_NULL == map)
1843 return KERN_INVALID_ARGUMENT;
1c79356b 1844
91447636
A
1845 return vm_map_page_info(map,
1846 vm_map_trunc_page(offset),
1847 disposition, ref_count);
1848}
1c79356b
A
1849
1850kern_return_t
91447636
A
1851vm_map_page_query(
1852 vm_map_t map,
1853 vm_offset_t offset,
1854 int *disposition,
1855 int *ref_count)
1c79356b 1856{
91447636
A
1857 if (VM_MAP_NULL == map)
1858 return KERN_INVALID_ARGUMENT;
1859
1860 return vm_map_page_info(map,
1861 vm_map_trunc_page(offset),
1862 disposition, ref_count);
1c79356b
A
1863}
1864
91447636 1865/* map a (whole) upl into an address space */
1c79356b 1866kern_return_t
91447636
A
1867vm_upl_map(
1868 vm_map_t map,
1869 upl_t upl,
1870 vm_offset_t *dst_addr)
1c79356b 1871{
91447636 1872 vm_map_offset_t map_addr;
1c79356b
A
1873 kern_return_t kr;
1874
91447636
A
1875 if (VM_MAP_NULL == map)
1876 return KERN_INVALID_ARGUMENT;
1c79356b 1877
91447636
A
1878 kr = vm_map_enter_upl(map, upl, &map_addr);
1879 *dst_addr = CAST_DOWN(vm_offset_t, map_addr);
1880 return kr;
1881}
1c79356b 1882
91447636
A
1883kern_return_t
1884vm_upl_unmap(
1885 vm_map_t map,
1886 upl_t upl)
1887{
1888 if (VM_MAP_NULL == map)
1889 return KERN_INVALID_ARGUMENT;
1c79356b 1890
91447636
A
1891 return (vm_map_remove_upl(map, upl));
1892}
1c79356b 1893
91447636
A
1894/* Retrieve a upl for an object underlying an address range in a map */
1895
1896kern_return_t
1897vm_map_get_upl(
1898 vm_map_t map,
cc9f6e38 1899 vm_map_offset_t map_offset,
91447636
A
1900 upl_size_t *upl_size,
1901 upl_t *upl,
1902 upl_page_info_array_t page_list,
1903 unsigned int *count,
1904 int *flags,
1905 int force_data_sync)
1906{
91447636
A
1907 int map_flags;
1908 kern_return_t kr;
1c79356b 1909
91447636
A
1910 if (VM_MAP_NULL == map)
1911 return KERN_INVALID_ARGUMENT;
1c79356b 1912
91447636
A
1913 map_flags = *flags & ~UPL_NOZEROFILL;
1914 if (force_data_sync)
1915 map_flags |= UPL_FORCE_DATA_SYNC;
1c79356b 1916
91447636
A
1917 kr = vm_map_create_upl(map,
1918 map_offset,
1919 upl_size,
1920 upl,
1921 page_list,
1922 count,
1923 &map_flags);
1c79356b 1924
91447636
A
1925 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
1926 return kr;
1c79356b
A
1927}
1928
1929
91447636
A
1930__private_extern__ kern_return_t
1931mach_memory_entry_allocate(
1932 vm_named_entry_t *user_entry_p,
1933 ipc_port_t *user_handle_p); /* forward */
1c79356b
A
1934
1935/*
91447636
A
1936 * mach_make_memory_entry_64
1937 *
1938 * Think of it as a two-stage vm_remap() operation. First
1939 * you get a handle. Second, you get map that handle in
1940 * somewhere else. Rather than doing it all at once (and
1941 * without needing access to the other whole map).
1c79356b
A
1942 */
1943
1944kern_return_t
1945mach_make_memory_entry_64(
1946 vm_map_t target_map,
91447636
A
1947 memory_object_size_t *size,
1948 memory_object_offset_t offset,
1c79356b
A
1949 vm_prot_t permission,
1950 ipc_port_t *object_handle,
91447636 1951 ipc_port_t parent_handle)
1c79356b
A
1952{
1953 vm_map_version_t version;
91447636
A
1954 vm_named_entry_t parent_entry;
1955 vm_named_entry_t user_entry;
1c79356b 1956 ipc_port_t user_handle;
1c79356b 1957 kern_return_t kr;
91447636 1958 vm_map_t real_map;
1c79356b
A
1959
1960 /* needed for call to vm_map_lookup_locked */
91447636 1961 boolean_t wired;
1c79356b 1962 vm_object_offset_t obj_off;
91447636
A
1963 vm_prot_t prot;
1964 vm_map_offset_t lo_offset, hi_offset;
1c79356b 1965 vm_behavior_t behavior;
91447636
A
1966 vm_object_t object;
1967 vm_object_t shadow_object;
1c79356b
A
1968
1969 /* needed for direct map entry manipulation */
1970 vm_map_entry_t map_entry;
9bccf70c 1971 vm_map_entry_t next_entry;
91447636
A
1972 vm_map_t local_map;
1973 vm_map_t original_map = target_map;
1974 vm_map_size_t total_size;
1975 vm_map_size_t map_size;
1976 vm_map_offset_t map_offset;
1977 vm_map_offset_t local_offset;
1c79356b 1978 vm_object_size_t mappable_size;
9bccf70c 1979
91447636
A
1980 unsigned int access;
1981 vm_prot_t protections;
1982 unsigned int wimg_mode;
1983 boolean_t cache_attr = FALSE;
1984
1985 if (((permission & 0x00FF0000) &
1986 ~(MAP_MEM_ONLY |
1987 MAP_MEM_NAMED_CREATE |
1988 MAP_MEM_PURGABLE |
1989 MAP_MEM_NAMED_REUSE))) {
1990 /*
1991 * Unknown flag: reject for forward compatibility.
1992 */
1993 return KERN_INVALID_VALUE;
1994 }
1995
1996 if (parent_handle != IP_NULL &&
1997 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
1998 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
1999 } else {
2000 parent_entry = NULL;
2001 }
55e303ae
A
2002
2003 protections = permission & VM_PROT_ALL;
2004 access = GET_MAP_MEM(permission);
2005
91447636
A
2006 user_handle = IP_NULL;
2007 user_entry = NULL;
2008
2009 map_offset = vm_map_trunc_page(offset);
2010 map_size = vm_map_round_page(*size);
1c79356b 2011
91447636
A
2012 if (permission & MAP_MEM_ONLY) {
2013 boolean_t parent_is_object;
55e303ae 2014
91447636 2015 if (parent_entry == NULL) {
55e303ae
A
2016 return KERN_INVALID_ARGUMENT;
2017 }
91447636
A
2018
2019 parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager);
2020 object = parent_entry->backing.object;
2021 if(parent_is_object && object != VM_OBJECT_NULL)
55e303ae 2022 wimg_mode = object->wimg_bits;
91447636
A
2023 else
2024 wimg_mode = VM_WIMG_DEFAULT;
2025 if((access != GET_MAP_MEM(parent_entry->protection)) &&
2026 !(parent_entry->protection & VM_PROT_WRITE)) {
55e303ae
A
2027 return KERN_INVALID_RIGHT;
2028 }
2029 if(access == MAP_MEM_IO) {
91447636 2030 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2031 wimg_mode = VM_WIMG_IO;
2032 } else if (access == MAP_MEM_COPYBACK) {
91447636 2033 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2034 wimg_mode = VM_WIMG_DEFAULT;
2035 } else if (access == MAP_MEM_WTHRU) {
91447636 2036 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2037 wimg_mode = VM_WIMG_WTHRU;
2038 } else if (access == MAP_MEM_WCOMB) {
91447636 2039 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2040 wimg_mode = VM_WIMG_WCOMB;
2041 }
91447636 2042 if(parent_is_object && object &&
55e303ae
A
2043 (access != MAP_MEM_NOOP) &&
2044 (!(object->nophyscache))) {
2045 if(object->wimg_bits != wimg_mode) {
2046 vm_page_t p;
2047 if ((wimg_mode == VM_WIMG_IO)
2048 || (wimg_mode == VM_WIMG_WCOMB))
2049 cache_attr = TRUE;
2050 else
2051 cache_attr = FALSE;
2052 vm_object_lock(object);
91447636 2053 vm_object_paging_wait(object, THREAD_UNINT);
55e303ae
A
2054 object->wimg_bits = wimg_mode;
2055 queue_iterate(&object->memq,
2056 p, vm_page_t, listq) {
2057 if (!p->fictitious) {
91447636
A
2058 pmap_disconnect(p->phys_page);
2059 if (cache_attr)
2060 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
2061 }
2062 }
2063 vm_object_unlock(object);
2064 }
2065 }
91447636
A
2066 if (object_handle)
2067 *object_handle = IP_NULL;
55e303ae
A
2068 return KERN_SUCCESS;
2069 }
2070
91447636
A
2071 if(permission & MAP_MEM_NAMED_CREATE) {
2072 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2073 if (kr != KERN_SUCCESS) {
2074 return KERN_FAILURE;
2075 }
55e303ae 2076
91447636
A
2077 /*
2078 * Force the creation of the VM object now.
2079 */
2080 if (map_size > (vm_map_size_t) VM_MAX_ADDRESS) {
2081 /*
2082 * LP64todo - for now, we can only allocate 4GB
2083 * internal objects because the default pager can't
2084 * page bigger ones. Remove this when it can.
2085 */
2086 kr = KERN_FAILURE;
2087 goto make_mem_done;
2088 }
1c79356b 2089
91447636
A
2090 object = vm_object_allocate(map_size);
2091 assert(object != VM_OBJECT_NULL);
1c79356b 2092
91447636
A
2093 if (permission & MAP_MEM_PURGABLE) {
2094 if (! (permission & VM_PROT_WRITE)) {
2095 /* if we can't write, we can't purge */
2096 vm_object_deallocate(object);
2097 kr = KERN_INVALID_ARGUMENT;
2098 goto make_mem_done;
2099 }
2100 object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE;
2101 }
1c79356b 2102
91447636
A
2103 /*
2104 * The VM object is brand new and nobody else knows about it,
2105 * so we don't need to lock it.
2106 */
1c79356b 2107
91447636
A
2108 wimg_mode = object->wimg_bits;
2109 if (access == MAP_MEM_IO) {
2110 wimg_mode = VM_WIMG_IO;
2111 } else if (access == MAP_MEM_COPYBACK) {
2112 wimg_mode = VM_WIMG_DEFAULT;
2113 } else if (access == MAP_MEM_WTHRU) {
2114 wimg_mode = VM_WIMG_WTHRU;
2115 } else if (access == MAP_MEM_WCOMB) {
2116 wimg_mode = VM_WIMG_WCOMB;
2117 }
2118 if (access != MAP_MEM_NOOP) {
2119 object->wimg_bits = wimg_mode;
2120 }
2121 /* the object has no pages, so no WIMG bits to update here */
1c79356b 2122
91447636
A
2123 /*
2124 * XXX
2125 * We use this path when we want to make sure that
2126 * nobody messes with the object (coalesce, for
2127 * example) before we map it.
2128 * We might want to use these objects for transposition via
2129 * vm_object_transpose() too, so we don't want any copy or
2130 * shadow objects either...
2131 */
2132 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1c79356b 2133
91447636
A
2134 user_entry->backing.object = object;
2135 user_entry->internal = TRUE;
2136 user_entry->is_sub_map = FALSE;
2137 user_entry->is_pager = FALSE;
2138 user_entry->offset = 0;
2139 user_entry->protection = protections;
2140 SET_MAP_MEM(access, user_entry->protection);
2141 user_entry->size = map_size;
55e303ae
A
2142
2143 /* user_object pager and internal fields are not used */
2144 /* when the object field is filled in. */
2145
91447636 2146 *size = CAST_DOWN(vm_size_t, map_size);
55e303ae
A
2147 *object_handle = user_handle;
2148 return KERN_SUCCESS;
2149 }
2150
91447636
A
2151 if (parent_entry == NULL ||
2152 (permission & MAP_MEM_NAMED_REUSE)) {
2153
2154 /* Create a named object based on address range within the task map */
2155 /* Go find the object at given address */
1c79356b 2156
91447636 2157redo_lookup:
1c79356b
A
2158 vm_map_lock_read(target_map);
2159
2160 /* get the object associated with the target address */
2161 /* note we check the permission of the range against */
2162 /* that requested by the caller */
2163
91447636 2164 kr = vm_map_lookup_locked(&target_map, map_offset,
55e303ae 2165 protections, &version,
1c79356b 2166 &object, &obj_off, &prot, &wired, &behavior,
91447636 2167 &lo_offset, &hi_offset, &real_map);
1c79356b
A
2168 if (kr != KERN_SUCCESS) {
2169 vm_map_unlock_read(target_map);
2170 goto make_mem_done;
2171 }
55e303ae 2172 if (((prot & protections) != protections)
9bccf70c 2173 || (object == kernel_object)) {
1c79356b
A
2174 kr = KERN_INVALID_RIGHT;
2175 vm_object_unlock(object);
2176 vm_map_unlock_read(target_map);
91447636
A
2177 if(real_map != target_map)
2178 vm_map_unlock_read(real_map);
9bccf70c
A
2179 if(object == kernel_object) {
2180 printf("Warning: Attempt to create a named"
2181 " entry from the kernel_object\n");
2182 }
1c79356b
A
2183 goto make_mem_done;
2184 }
2185
2186 /* We have an object, now check to see if this object */
2187 /* is suitable. If not, create a shadow and share that */
91447636
A
2188
2189 /*
2190 * We have to unlock the VM object to avoid deadlocking with
2191 * a VM map lock (the lock ordering is map, the object), if we
2192 * need to modify the VM map to create a shadow object. Since
2193 * we might release the VM map lock below anyway, we have
2194 * to release the VM map lock now.
2195 * XXX FBDP There must be a way to avoid this double lookup...
2196 *
2197 * Take an extra reference on the VM object to make sure it's
2198 * not going to disappear.
2199 */
2200 vm_object_reference_locked(object); /* extra ref to hold obj */
2201 vm_object_unlock(object);
2202
9bccf70c 2203 local_map = original_map;
91447636 2204 local_offset = map_offset;
9bccf70c
A
2205 if(target_map != local_map) {
2206 vm_map_unlock_read(target_map);
91447636
A
2207 if(real_map != target_map)
2208 vm_map_unlock_read(real_map);
9bccf70c
A
2209 vm_map_lock_read(local_map);
2210 target_map = local_map;
91447636 2211 real_map = local_map;
9bccf70c 2212 }
1c79356b 2213 while(TRUE) {
9bccf70c
A
2214 if(!vm_map_lookup_entry(local_map,
2215 local_offset, &map_entry)) {
1c79356b 2216 kr = KERN_INVALID_ARGUMENT;
1c79356b 2217 vm_map_unlock_read(target_map);
91447636
A
2218 if(real_map != target_map)
2219 vm_map_unlock_read(real_map);
2220 vm_object_deallocate(object); /* release extra ref */
2221 object = VM_OBJECT_NULL;
1c79356b
A
2222 goto make_mem_done;
2223 }
2224 if(!(map_entry->is_sub_map)) {
2225 if(map_entry->object.vm_object != object) {
2226 kr = KERN_INVALID_ARGUMENT;
1c79356b 2227 vm_map_unlock_read(target_map);
91447636
A
2228 if(real_map != target_map)
2229 vm_map_unlock_read(real_map);
2230 vm_object_deallocate(object); /* release extra ref */
2231 object = VM_OBJECT_NULL;
1c79356b
A
2232 goto make_mem_done;
2233 }
2234 break;
2235 } else {
9bccf70c
A
2236 vm_map_t tmap;
2237 tmap = local_map;
1c79356b 2238 local_map = map_entry->object.sub_map;
9bccf70c 2239
1c79356b 2240 vm_map_lock_read(local_map);
9bccf70c 2241 vm_map_unlock_read(tmap);
1c79356b 2242 target_map = local_map;
91447636 2243 real_map = local_map;
9bccf70c
A
2244 local_offset = local_offset - map_entry->vme_start;
2245 local_offset += map_entry->offset;
1c79356b
A
2246 }
2247 }
91447636
A
2248
2249 /*
2250 * We found the VM map entry, lock the VM object again.
2251 */
2252 vm_object_lock(object);
2253 if(map_entry->wired_count) {
2254 /* JMM - The check below should be reworked instead. */
2255 object->true_share = TRUE;
2256 }
55e303ae 2257 if(((map_entry->max_protection) & protections) != protections) {
1c79356b
A
2258 kr = KERN_INVALID_RIGHT;
2259 vm_object_unlock(object);
2260 vm_map_unlock_read(target_map);
91447636
A
2261 if(real_map != target_map)
2262 vm_map_unlock_read(real_map);
2263 vm_object_deallocate(object);
2264 object = VM_OBJECT_NULL;
1c79356b
A
2265 goto make_mem_done;
2266 }
9bccf70c
A
2267
2268 mappable_size = hi_offset - obj_off;
2269 total_size = map_entry->vme_end - map_entry->vme_start;
91447636 2270 if(map_size > mappable_size) {
9bccf70c
A
2271 /* try to extend mappable size if the entries */
2272 /* following are from the same object and are */
2273 /* compatible */
2274 next_entry = map_entry->vme_next;
2275 /* lets see if the next map entry is still */
2276 /* pointing at this object and is contiguous */
91447636 2277 while(map_size > mappable_size) {
9bccf70c
A
2278 if((next_entry->object.vm_object == object) &&
2279 (next_entry->vme_start ==
2280 next_entry->vme_prev->vme_end) &&
2281 (next_entry->offset ==
2282 next_entry->vme_prev->offset +
2283 (next_entry->vme_prev->vme_end -
2284 next_entry->vme_prev->vme_start))) {
2285 if(((next_entry->max_protection)
55e303ae 2286 & protections) != protections) {
9bccf70c
A
2287 break;
2288 }
55e303ae
A
2289 if (next_entry->needs_copy !=
2290 map_entry->needs_copy)
2291 break;
9bccf70c
A
2292 mappable_size += next_entry->vme_end
2293 - next_entry->vme_start;
2294 total_size += next_entry->vme_end
2295 - next_entry->vme_start;
2296 next_entry = next_entry->vme_next;
2297 } else {
2298 break;
2299 }
2300
2301 }
2302 }
2303
1c79356b
A
2304 if(object->internal) {
2305 /* vm_map_lookup_locked will create a shadow if */
2306 /* needs_copy is set but does not check for the */
2307 /* other two conditions shown. It is important to */
2308 /* set up an object which will not be pulled from */
2309 /* under us. */
2310
0b4e3aa0 2311 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
2312 (object->size > total_size))
2313 && !object->true_share) {
91447636
A
2314 /*
2315 * We have to unlock the VM object before
2316 * trying to upgrade the VM map lock, to
2317 * honor lock ordering (map then object).
2318 * Otherwise, we would deadlock if another
2319 * thread holds a read lock on the VM map and
2320 * is trying to acquire the VM object's lock.
2321 * We still hold an extra reference on the
2322 * VM object, guaranteeing that it won't
2323 * disappear.
2324 */
2325 vm_object_unlock(object);
2326
1c79356b 2327 if (vm_map_lock_read_to_write(target_map)) {
91447636
A
2328 /*
2329 * We couldn't upgrade our VM map lock
2330 * from "read" to "write" and we lost
2331 * our "read" lock.
2332 * Start all over again...
2333 */
2334 vm_object_deallocate(object); /* extra ref */
2335 target_map = original_map;
1c79356b
A
2336 goto redo_lookup;
2337 }
91447636 2338 vm_object_lock(object);
1c79356b 2339
55e303ae
A
2340 /*
2341 * JMM - We need to avoid coming here when the object
2342 * is wired by anybody, not just the current map. Why
2343 * couldn't we use the standard vm_object_copy_quickly()
2344 * approach here?
2345 */
2346
1c79356b 2347 /* create a shadow object */
9bccf70c
A
2348 vm_object_shadow(&map_entry->object.vm_object,
2349 &map_entry->offset, total_size);
2350 shadow_object = map_entry->object.vm_object;
2351 vm_object_unlock(object);
91447636 2352
9bccf70c
A
2353 vm_object_pmap_protect(
2354 object, map_entry->offset,
2355 total_size,
2356 ((map_entry->is_shared
2357 || target_map->mapped)
2358 ? PMAP_NULL :
2359 target_map->pmap),
2360 map_entry->vme_start,
21362eb3 2361 map_entry->protection & ~VM_PROT_WRITE);
9bccf70c
A
2362 total_size -= (map_entry->vme_end
2363 - map_entry->vme_start);
2364 next_entry = map_entry->vme_next;
2365 map_entry->needs_copy = FALSE;
2366 while (total_size) {
2367 if(next_entry->object.vm_object == object) {
55e303ae
A
2368 shadow_object->ref_count++;
2369 vm_object_res_reference(shadow_object);
9bccf70c
A
2370 next_entry->object.vm_object
2371 = shadow_object;
55e303ae 2372 vm_object_deallocate(object);
9bccf70c
A
2373 next_entry->offset
2374 = next_entry->vme_prev->offset +
2375 (next_entry->vme_prev->vme_end
2376 - next_entry->vme_prev->vme_start);
2377 next_entry->needs_copy = FALSE;
2378 } else {
2379 panic("mach_make_memory_entry_64:"
2380 " map entries out of sync\n");
2381 }
2382 total_size -=
2383 next_entry->vme_end
2384 - next_entry->vme_start;
2385 next_entry = next_entry->vme_next;
2386 }
2387
91447636
A
2388 /*
2389 * Transfer our extra reference to the
2390 * shadow object.
2391 */
2392 vm_object_reference_locked(shadow_object);
2393 vm_object_deallocate(object); /* extra ref */
9bccf70c 2394 object = shadow_object;
91447636 2395
9bccf70c
A
2396 obj_off = (local_offset - map_entry->vme_start)
2397 + map_entry->offset;
1c79356b 2398
91447636
A
2399 vm_map_lock_write_to_read(target_map);
2400 vm_object_lock(object);
1c79356b
A
2401
2402 }
2403 }
2404
2405 /* note: in the future we can (if necessary) allow for */
2406 /* memory object lists, this will better support */
2407 /* fragmentation, but is it necessary? The user should */
2408 /* be encouraged to create address space oriented */
2409 /* shared objects from CLEAN memory regions which have */
2410 /* a known and defined history. i.e. no inheritence */
2411 /* share, make this call before making the region the */
2412 /* target of ipc's, etc. The code above, protecting */
2413 /* against delayed copy, etc. is mostly defensive. */
2414
55e303ae
A
2415 wimg_mode = object->wimg_bits;
2416 if(!(object->nophyscache)) {
2417 if(access == MAP_MEM_IO) {
2418 wimg_mode = VM_WIMG_IO;
2419 } else if (access == MAP_MEM_COPYBACK) {
2420 wimg_mode = VM_WIMG_USE_DEFAULT;
2421 } else if (access == MAP_MEM_WTHRU) {
2422 wimg_mode = VM_WIMG_WTHRU;
2423 } else if (access == MAP_MEM_WCOMB) {
2424 wimg_mode = VM_WIMG_WCOMB;
2425 }
2426 }
d7e50217 2427
de355530 2428 object->true_share = TRUE;
55e303ae
A
2429 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2430 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2431
91447636
A
2432 /*
2433 * The memory entry now points to this VM object and we
2434 * need to hold a reference on the VM object. Use the extra
2435 * reference we took earlier to keep the object alive when we
2436 * had to unlock it.
2437 */
2438
55e303ae 2439 vm_map_unlock_read(target_map);
91447636
A
2440 if(real_map != target_map)
2441 vm_map_unlock_read(real_map);
55e303ae
A
2442
2443 if(object->wimg_bits != wimg_mode) {
2444 vm_page_t p;
2445
2446 vm_object_paging_wait(object, THREAD_UNINT);
2447
91447636
A
2448 if ((wimg_mode == VM_WIMG_IO)
2449 || (wimg_mode == VM_WIMG_WCOMB))
2450 cache_attr = TRUE;
2451 else
2452 cache_attr = FALSE;
2453
55e303ae
A
2454 queue_iterate(&object->memq,
2455 p, vm_page_t, listq) {
2456 if (!p->fictitious) {
91447636
A
2457 pmap_disconnect(p->phys_page);
2458 if (cache_attr)
2459 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
2460 }
2461 }
2462 object->wimg_bits = wimg_mode;
2463 }
1c79356b
A
2464
2465 /* the size of mapped entry that overlaps with our region */
2466 /* which is targeted for share. */
2467 /* (entry_end - entry_start) - */
2468 /* offset of our beg addr within entry */
2469 /* it corresponds to this: */
2470
91447636
A
2471 if(map_size > mappable_size)
2472 map_size = mappable_size;
2473
2474 if (permission & MAP_MEM_NAMED_REUSE) {
2475 /*
2476 * Compare what we got with the "parent_entry".
2477 * If they match, re-use the "parent_entry" instead
2478 * of creating a new one.
2479 */
2480 if (parent_entry != NULL &&
2481 parent_entry->backing.object == object &&
2482 parent_entry->internal == object->internal &&
2483 parent_entry->is_sub_map == FALSE &&
2484 parent_entry->is_pager == FALSE &&
2485 parent_entry->offset == obj_off &&
2486 parent_entry->protection == protections &&
2487 parent_entry->size == map_size) {
2488 /*
2489 * We have a match: re-use "parent_entry".
2490 */
2491 /* release our extra reference on object */
2492 vm_object_unlock(object);
2493 vm_object_deallocate(object);
2494 /* parent_entry->ref_count++; XXX ? */
2495 /* Get an extra send-right on handle */
2496 ipc_port_copy_send(parent_handle);
2497 *object_handle = parent_handle;
2498 return KERN_SUCCESS;
2499 } else {
2500 /*
2501 * No match: we need to create a new entry.
2502 * fall through...
2503 */
2504 }
2505 }
2506
2507 vm_object_unlock(object);
2508 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2509 != KERN_SUCCESS) {
2510 /* release our unused reference on the object */
2511 vm_object_deallocate(object);
2512 return KERN_FAILURE;
2513 }
1c79356b 2514
91447636
A
2515 user_entry->backing.object = object;
2516 user_entry->internal = object->internal;
2517 user_entry->is_sub_map = FALSE;
2518 user_entry->is_pager = FALSE;
2519 user_entry->offset = obj_off;
2520 user_entry->protection = permission;
2521 user_entry->size = map_size;
1c79356b
A
2522
2523 /* user_object pager and internal fields are not used */
2524 /* when the object field is filled in. */
2525
91447636 2526 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 2527 *object_handle = user_handle;
1c79356b 2528 return KERN_SUCCESS;
1c79356b 2529
91447636 2530 } else {
1c79356b 2531 /* The new object will be base on an existing named object */
91447636
A
2532
2533 if (parent_entry == NULL) {
1c79356b
A
2534 kr = KERN_INVALID_ARGUMENT;
2535 goto make_mem_done;
2536 }
91447636 2537 if((offset + map_size) > parent_entry->size) {
1c79356b
A
2538 kr = KERN_INVALID_ARGUMENT;
2539 goto make_mem_done;
2540 }
2541
91447636
A
2542 if((protections & parent_entry->protection) != protections) {
2543 kr = KERN_PROTECTION_FAILURE;
2544 goto make_mem_done;
2545 }
2546
2547 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2548 != KERN_SUCCESS) {
2549 kr = KERN_FAILURE;
2550 goto make_mem_done;
55e303ae 2551 }
91447636
A
2552
2553 user_entry->size = map_size;
2554 user_entry->offset = parent_entry->offset + map_offset;
2555 user_entry->is_sub_map = parent_entry->is_sub_map;
2556 user_entry->is_pager = parent_entry->is_pager;
2557 user_entry->internal = parent_entry->internal;
2558 user_entry->protection = protections;
2559
2560 if(access != MAP_MEM_NOOP) {
2561 SET_MAP_MEM(access, user_entry->protection);
1c79356b 2562 }
91447636
A
2563
2564 if(parent_entry->is_sub_map) {
2565 user_entry->backing.map = parent_entry->backing.map;
2566 vm_map_lock(user_entry->backing.map);
2567 user_entry->backing.map->ref_count++;
2568 vm_map_unlock(user_entry->backing.map);
1c79356b 2569 }
91447636
A
2570 else if (parent_entry->is_pager) {
2571 user_entry->backing.pager = parent_entry->backing.pager;
2572 /* JMM - don't we need a reference here? */
2573 } else {
2574 object = parent_entry->backing.object;
2575 assert(object != VM_OBJECT_NULL);
2576 user_entry->backing.object = object;
2577 /* we now point to this object, hold on */
2578 vm_object_reference(object);
2579 vm_object_lock(object);
2580 object->true_share = TRUE;
2581 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2582 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2583 vm_object_unlock(object);
1c79356b 2584 }
91447636 2585 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b
A
2586 *object_handle = user_handle;
2587 return KERN_SUCCESS;
2588 }
2589
1c79356b 2590make_mem_done:
91447636
A
2591 if (user_handle != IP_NULL) {
2592 ipc_port_dealloc_kernel(user_handle);
2593 }
2594 if (user_entry != NULL) {
2595 kfree(user_entry, sizeof *user_entry);
2596 }
2597 return kr;
2598}
2599
2600kern_return_t
2601_mach_make_memory_entry(
2602 vm_map_t target_map,
2603 memory_object_size_t *size,
2604 memory_object_offset_t offset,
2605 vm_prot_t permission,
2606 ipc_port_t *object_handle,
2607 ipc_port_t parent_entry)
2608{
2609 memory_object_offset_t mo_size;
2610 kern_return_t kr;
2611
2612 mo_size = (memory_object_offset_t)*size;
2613 kr = mach_make_memory_entry_64(target_map, &mo_size,
2614 (memory_object_offset_t)offset, permission, object_handle,
2615 parent_entry);
2616 *size = mo_size;
1c79356b
A
2617 return kr;
2618}
2619
2620kern_return_t
2621mach_make_memory_entry(
2622 vm_map_t target_map,
2623 vm_size_t *size,
2624 vm_offset_t offset,
2625 vm_prot_t permission,
2626 ipc_port_t *object_handle,
2627 ipc_port_t parent_entry)
91447636
A
2628{
2629 memory_object_offset_t mo_size;
1c79356b
A
2630 kern_return_t kr;
2631
91447636
A
2632 mo_size = (memory_object_offset_t)*size;
2633 kr = mach_make_memory_entry_64(target_map, &mo_size,
2634 (memory_object_offset_t)offset, permission, object_handle,
1c79356b 2635 parent_entry);
91447636 2636 *size = CAST_DOWN(vm_size_t, mo_size);
1c79356b
A
2637 return kr;
2638}
2639
2640/*
91447636
A
2641 * task_wire
2642 *
2643 * Set or clear the map's wiring_required flag. This flag, if set,
2644 * will cause all future virtual memory allocation to allocate
2645 * user wired memory. Unwiring pages wired down as a result of
2646 * this routine is done with the vm_wire interface.
1c79356b 2647 */
1c79356b 2648kern_return_t
91447636
A
2649task_wire(
2650 vm_map_t map,
2651 boolean_t must_wire)
2652{
2653 if (map == VM_MAP_NULL)
2654 return(KERN_INVALID_ARGUMENT);
2655
2656 if (must_wire)
2657 map->wiring_required = TRUE;
2658 else
2659 map->wiring_required = FALSE;
2660
2661 return(KERN_SUCCESS);
2662}
2663
2664__private_extern__ kern_return_t
2665mach_memory_entry_allocate(
2666 vm_named_entry_t *user_entry_p,
2667 ipc_port_t *user_handle_p)
1c79356b 2668{
91447636 2669 vm_named_entry_t user_entry;
1c79356b 2670 ipc_port_t user_handle;
91447636 2671 ipc_port_t previous;
1c79356b 2672
91447636
A
2673 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
2674 if (user_entry == NULL)
1c79356b 2675 return KERN_FAILURE;
1c79356b 2676
91447636 2677 named_entry_lock_init(user_entry);
1c79356b 2678
91447636
A
2679 user_handle = ipc_port_alloc_kernel();
2680 if (user_handle == IP_NULL) {
2681 kfree(user_entry, sizeof *user_entry);
2682 return KERN_FAILURE;
2683 }
1c79356b
A
2684 ip_lock(user_handle);
2685
2686 /* make a sonce right */
2687 user_handle->ip_sorights++;
2688 ip_reference(user_handle);
2689
2690 user_handle->ip_destination = IP_NULL;
2691 user_handle->ip_receiver_name = MACH_PORT_NULL;
2692 user_handle->ip_receiver = ipc_space_kernel;
2693
2694 /* make a send right */
2695 user_handle->ip_mscount++;
2696 user_handle->ip_srights++;
2697 ip_reference(user_handle);
2698
2699 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
2700 /* nsrequest unlocks user_handle */
2701
91447636
A
2702 user_entry->backing.pager = NULL;
2703 user_entry->is_sub_map = FALSE;
2704 user_entry->is_pager = FALSE;
2705 user_entry->size = 0;
2706 user_entry->internal = FALSE;
2707 user_entry->ref_count = 1;
1c79356b 2708
91447636
A
2709 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
2710 IKOT_NAMED_ENTRY);
1c79356b 2711
91447636
A
2712 *user_entry_p = user_entry;
2713 *user_handle_p = user_handle;
1c79356b 2714
91447636
A
2715 return KERN_SUCCESS;
2716}
1c79356b 2717
91447636
A
2718/*
2719 * mach_memory_object_memory_entry_64
2720 *
2721 * Create a named entry backed by the provided pager.
2722 *
2723 * JMM - we need to hold a reference on the pager -
2724 * and release it when the named entry is destroyed.
2725 */
2726kern_return_t
2727mach_memory_object_memory_entry_64(
2728 host_t host,
2729 boolean_t internal,
2730 vm_object_offset_t size,
2731 vm_prot_t permission,
2732 memory_object_t pager,
2733 ipc_port_t *entry_handle)
2734{
2735 unsigned int access;
2736 vm_named_entry_t user_entry;
2737 ipc_port_t user_handle;
2738
2739 if (host == HOST_NULL)
2740 return(KERN_INVALID_HOST);
2741
2742 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2743 != KERN_SUCCESS) {
2744 return KERN_FAILURE;
2745 }
2746
2747 user_entry->backing.pager = pager;
2748 user_entry->size = size;
2749 user_entry->offset = 0;
2750 user_entry->protection = permission & VM_PROT_ALL;
2751 access = GET_MAP_MEM(permission);
2752 SET_MAP_MEM(access, user_entry->protection);
2753 user_entry->internal = internal;
2754 user_entry->is_sub_map = FALSE;
2755 user_entry->is_pager = TRUE;
2756 assert(user_entry->ref_count == 1);
2757
2758 *entry_handle = user_handle;
1c79356b 2759 return KERN_SUCCESS;
91447636
A
2760}
2761
2762kern_return_t
2763mach_memory_object_memory_entry(
2764 host_t host,
2765 boolean_t internal,
2766 vm_size_t size,
2767 vm_prot_t permission,
2768 memory_object_t pager,
2769 ipc_port_t *entry_handle)
2770{
2771 return mach_memory_object_memory_entry_64( host, internal,
2772 (vm_object_offset_t)size, permission, pager, entry_handle);
2773}
2774
2775
2776kern_return_t
2777mach_memory_entry_purgable_control(
2778 ipc_port_t entry_port,
2779 vm_purgable_t control,
2780 int *state)
2781{
2782 kern_return_t kr;
2783 vm_named_entry_t mem_entry;
2784 vm_object_t object;
1c79356b 2785
91447636
A
2786 if (entry_port == IP_NULL ||
2787 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2788 return KERN_INVALID_ARGUMENT;
2789 }
1c79356b 2790
91447636 2791 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
1c79356b 2792
91447636 2793 named_entry_lock(mem_entry);
1c79356b 2794
91447636
A
2795 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2796 named_entry_unlock(mem_entry);
1c79356b
A
2797 return KERN_INVALID_ARGUMENT;
2798 }
91447636
A
2799
2800 object = mem_entry->backing.object;
2801 if (object == VM_OBJECT_NULL) {
2802 named_entry_unlock(mem_entry);
1c79356b
A
2803 return KERN_INVALID_ARGUMENT;
2804 }
91447636
A
2805
2806 vm_object_lock(object);
2807
2808 /* check that named entry covers entire object ? */
2809 if (mem_entry->offset != 0 || object->size != mem_entry->size) {
2810 vm_object_unlock(object);
2811 named_entry_unlock(mem_entry);
2812 return KERN_INVALID_ARGUMENT;
1c79356b 2813 }
91447636
A
2814
2815 named_entry_unlock(mem_entry);
2816
2817 kr = vm_object_purgable_control(object, control, state);
2818
2819 vm_object_unlock(object);
2820
2821 return kr;
1c79356b
A
2822}
2823
91447636
A
2824/*
2825 * mach_memory_entry_port_release:
2826 *
2827 * Release a send right on a named entry port. This is the correct
2828 * way to destroy a named entry. When the last right on the port is
2829 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2830 */
2831void
2832mach_memory_entry_port_release(
2833 ipc_port_t port)
2834{
2835 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2836 ipc_port_release_send(port);
2837}
1c79356b 2838
91447636
A
2839/*
2840 * mach_destroy_memory_entry:
2841 *
2842 * Drops a reference on a memory entry and destroys the memory entry if
2843 * there are no more references on it.
2844 * NOTE: This routine should not be called to destroy a memory entry from the
2845 * kernel, as it will not release the Mach port associated with the memory
2846 * entry. The proper way to destroy a memory entry in the kernel is to
2847 * call mach_memort_entry_port_release() to release the kernel's send-right on
2848 * the memory entry's port. When the last send right is released, the memory
2849 * entry will be destroyed via ipc_kobject_destroy().
2850 */
1c79356b
A
2851void
2852mach_destroy_memory_entry(
2853 ipc_port_t port)
2854{
2855 vm_named_entry_t named_entry;
2856#if MACH_ASSERT
2857 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2858#endif /* MACH_ASSERT */
2859 named_entry = (vm_named_entry_t)port->ip_kobject;
2860 mutex_lock(&(named_entry)->Lock);
91447636 2861 named_entry->ref_count -= 1;
1c79356b 2862 if(named_entry->ref_count == 0) {
91447636 2863 if (named_entry->is_sub_map) {
1c79356b 2864 vm_map_deallocate(named_entry->backing.map);
91447636
A
2865 } else if (!named_entry->is_pager) {
2866 /* release the memory object we've been pointing to */
2867 vm_object_deallocate(named_entry->backing.object);
2868 } /* else JMM - need to drop reference on pager in that case */
2869
2870 mutex_unlock(&(named_entry)->Lock);
2871
2872 kfree((void *) port->ip_kobject,
2873 sizeof (struct vm_named_entry));
1c79356b
A
2874 } else
2875 mutex_unlock(&(named_entry)->Lock);
2876}
2877
2878
1c79356b
A
2879
2880kern_return_t
2881set_dp_control_port(
2882 host_priv_t host_priv,
2883 ipc_port_t control_port)
2884{
2885 if (host_priv == HOST_PRIV_NULL)
2886 return (KERN_INVALID_HOST);
0b4e3aa0
A
2887
2888 if (IP_VALID(dynamic_pager_control_port))
2889 ipc_port_release_send(dynamic_pager_control_port);
2890
1c79356b
A
2891 dynamic_pager_control_port = control_port;
2892 return KERN_SUCCESS;
2893}
2894
2895kern_return_t
2896get_dp_control_port(
2897 host_priv_t host_priv,
2898 ipc_port_t *control_port)
2899{
2900 if (host_priv == HOST_PRIV_NULL)
2901 return (KERN_INVALID_HOST);
0b4e3aa0
A
2902
2903 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
2904 return KERN_SUCCESS;
2905
2906}
2907
91447636 2908/* ******* Temporary Internal calls to UPL for BSD ***** */
1c79356b 2909
91447636
A
2910extern int kernel_upl_map(
2911 vm_map_t map,
2912 upl_t upl,
2913 vm_offset_t *dst_addr);
1c79356b 2914
91447636
A
2915extern int kernel_upl_unmap(
2916 vm_map_t map,
2917 upl_t upl);
150bd074 2918
91447636
A
2919extern int kernel_upl_commit(
2920 upl_t upl,
2921 upl_page_info_t *pl,
2922 mach_msg_type_number_t count);
1c79356b 2923
91447636
A
2924extern int kernel_upl_commit_range(
2925 upl_t upl,
2926 upl_offset_t offset,
2927 upl_size_t size,
2928 int flags,
2929 upl_page_info_array_t pl,
2930 mach_msg_type_number_t count);
1c79356b 2931
91447636
A
2932extern int kernel_upl_abort(
2933 upl_t upl,
2934 int abort_type);
1c79356b 2935
91447636
A
2936extern int kernel_upl_abort_range(
2937 upl_t upl,
2938 upl_offset_t offset,
2939 upl_size_t size,
2940 int abort_flags);
1c79356b 2941
1c79356b 2942
1c79356b
A
2943kern_return_t
2944kernel_upl_map(
2945 vm_map_t map,
2946 upl_t upl,
2947 vm_offset_t *dst_addr)
2948{
91447636 2949 return vm_upl_map(map, upl, dst_addr);
1c79356b
A
2950}
2951
2952
2953kern_return_t
2954kernel_upl_unmap(
2955 vm_map_t map,
0b4e3aa0 2956 upl_t upl)
1c79356b 2957{
91447636 2958 return vm_upl_unmap(map, upl);
1c79356b
A
2959}
2960
2961kern_return_t
2962kernel_upl_commit(
91447636
A
2963 upl_t upl,
2964 upl_page_info_t *pl,
0b4e3aa0 2965 mach_msg_type_number_t count)
1c79356b 2966{
0b4e3aa0
A
2967 kern_return_t kr;
2968
2969 kr = upl_commit(upl, pl, count);
2970 upl_deallocate(upl);
1c79356b
A
2971 return kr;
2972}
2973
0b4e3aa0 2974
1c79356b
A
2975kern_return_t
2976kernel_upl_commit_range(
2977 upl_t upl,
91447636
A
2978 upl_offset_t offset,
2979 upl_size_t size,
1c79356b 2980 int flags,
0b4e3aa0
A
2981 upl_page_info_array_t pl,
2982 mach_msg_type_number_t count)
1c79356b 2983{
0b4e3aa0
A
2984 boolean_t finished = FALSE;
2985 kern_return_t kr;
2986
2987 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2988 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2989
2990 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2991
2992 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2993 upl_deallocate(upl);
2994
1c79356b
A
2995 return kr;
2996}
2997
2998kern_return_t
2999kernel_upl_abort_range(
0b4e3aa0 3000 upl_t upl,
91447636
A
3001 upl_offset_t offset,
3002 upl_size_t size,
0b4e3aa0 3003 int abort_flags)
1c79356b 3004{
0b4e3aa0
A
3005 kern_return_t kr;
3006 boolean_t finished = FALSE;
1c79356b 3007
0b4e3aa0
A
3008 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
3009 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 3010
0b4e3aa0 3011 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 3012
0b4e3aa0
A
3013 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
3014 upl_deallocate(upl);
1c79356b 3015
0b4e3aa0 3016 return kr;
1c79356b
A
3017}
3018
1c79356b 3019kern_return_t
0b4e3aa0
A
3020kernel_upl_abort(
3021 upl_t upl,
3022 int abort_type)
1c79356b 3023{
0b4e3aa0 3024 kern_return_t kr;
1c79356b 3025
0b4e3aa0
A
3026 kr = upl_abort(upl, abort_type);
3027 upl_deallocate(upl);
3028 return kr;
1c79356b
A
3029}
3030
91447636
A
3031/*
3032 * Now a kernel-private interface (for BootCache
3033 * use only). Need a cleaner way to create an
3034 * empty vm_map() and return a handle to it.
3035 */
1c79356b
A
3036
3037kern_return_t
91447636
A
3038vm_region_object_create(
3039 __unused vm_map_t target_map,
3040 vm_size_t size,
3041 ipc_port_t *object_handle)
1c79356b 3042{
91447636
A
3043 vm_named_entry_t user_entry;
3044 ipc_port_t user_handle;
1c79356b 3045
91447636 3046 vm_map_t new_map;
1c79356b 3047
91447636
A
3048 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3049 != KERN_SUCCESS) {
1c79356b 3050 return KERN_FAILURE;
91447636 3051 }
1c79356b 3052
91447636 3053 /* Create a named object based on a submap of specified size */
1c79356b 3054
91447636
A
3055 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
3056 vm_map_round_page(size), TRUE);
1c79356b 3057
91447636
A
3058 user_entry->backing.map = new_map;
3059 user_entry->internal = TRUE;
3060 user_entry->is_sub_map = TRUE;
3061 user_entry->offset = 0;
3062 user_entry->protection = VM_PROT_ALL;
3063 user_entry->size = size;
3064 assert(user_entry->ref_count == 1);
1c79356b 3065
91447636 3066 *object_handle = user_handle;
1c79356b 3067 return KERN_SUCCESS;
1c79356b 3068
55e303ae
A
3069}
3070
91447636
A
3071ppnum_t vm_map_get_phys_page( /* forward */
3072 vm_map_t map,
3073 vm_offset_t offset);
3074
55e303ae 3075ppnum_t
1c79356b 3076vm_map_get_phys_page(
91447636
A
3077 vm_map_t map,
3078 vm_offset_t addr)
1c79356b 3079{
91447636
A
3080 vm_object_offset_t offset;
3081 vm_object_t object;
3082 vm_map_offset_t map_offset;
3083 vm_map_entry_t entry;
3084 ppnum_t phys_page = 0;
3085
3086 map_offset = vm_map_trunc_page(addr);
1c79356b
A
3087
3088 vm_map_lock(map);
91447636 3089 while (vm_map_lookup_entry(map, map_offset, &entry)) {
1c79356b
A
3090
3091 if (entry->object.vm_object == VM_OBJECT_NULL) {
3092 vm_map_unlock(map);
91447636 3093 return (ppnum_t) 0;
1c79356b
A
3094 }
3095 if (entry->is_sub_map) {
3096 vm_map_t old_map;
3097 vm_map_lock(entry->object.sub_map);
3098 old_map = map;
3099 map = entry->object.sub_map;
91447636 3100 map_offset = entry->offset + (map_offset - entry->vme_start);
1c79356b
A
3101 vm_map_unlock(old_map);
3102 continue;
3103 }
9bccf70c
A
3104 if (entry->object.vm_object->phys_contiguous) {
3105 /* These are not standard pageable memory mappings */
3106 /* If they are not present in the object they will */
3107 /* have to be picked up from the pager through the */
3108 /* fault mechanism. */
3109 if(entry->object.vm_object->shadow_offset == 0) {
3110 /* need to call vm_fault */
3111 vm_map_unlock(map);
91447636 3112 vm_fault(map, map_offset, VM_PROT_NONE,
9bccf70c
A
3113 FALSE, THREAD_UNINT, NULL, 0);
3114 vm_map_lock(map);
3115 continue;
3116 }
91447636 3117 offset = entry->offset + (map_offset - entry->vme_start);
55e303ae
A
3118 phys_page = (ppnum_t)
3119 ((entry->object.vm_object->shadow_offset
3120 + offset) >> 12);
9bccf70c
A
3121 break;
3122
3123 }
91447636 3124 offset = entry->offset + (map_offset - entry->vme_start);
1c79356b
A
3125 object = entry->object.vm_object;
3126 vm_object_lock(object);
3127 while (TRUE) {
3128 vm_page_t dst_page = vm_page_lookup(object,offset);
3129 if(dst_page == VM_PAGE_NULL) {
3130 if(object->shadow) {
3131 vm_object_t old_object;
3132 vm_object_lock(object->shadow);
3133 old_object = object;
3134 offset = offset + object->shadow_offset;
3135 object = object->shadow;
3136 vm_object_unlock(old_object);
3137 } else {
3138 vm_object_unlock(object);
3139 break;
3140 }
3141 } else {
55e303ae 3142 phys_page = (ppnum_t)(dst_page->phys_page);
1c79356b
A
3143 vm_object_unlock(object);
3144 break;
3145 }
3146 }
3147 break;
3148
3149 }
3150
3151 vm_map_unlock(map);
55e303ae
A
3152 return phys_page;
3153}
3154
3155
3156
91447636
A
3157kern_return_t kernel_object_iopl_request( /* forward */
3158 vm_named_entry_t named_entry,
3159 memory_object_offset_t offset,
3160 vm_size_t *upl_size,
3161 upl_t *upl_ptr,
3162 upl_page_info_array_t user_page_list,
3163 unsigned int *page_list_count,
3164 int *flags);
3165
55e303ae
A
3166kern_return_t
3167kernel_object_iopl_request(
3168 vm_named_entry_t named_entry,
3169 memory_object_offset_t offset,
3170 vm_size_t *upl_size,
3171 upl_t *upl_ptr,
3172 upl_page_info_array_t user_page_list,
3173 unsigned int *page_list_count,
3174 int *flags)
3175{
3176 vm_object_t object;
3177 kern_return_t ret;
3178
3179 int caller_flags;
3180
3181 caller_flags = *flags;
3182
91447636
A
3183 if (caller_flags & ~UPL_VALID_FLAGS) {
3184 /*
3185 * For forward compatibility's sake,
3186 * reject any unknown flag.
3187 */
3188 return KERN_INVALID_VALUE;
3189 }
3190
55e303ae
A
3191 /* a few checks to make sure user is obeying rules */
3192 if(*upl_size == 0) {
3193 if(offset >= named_entry->size)
3194 return(KERN_INVALID_RIGHT);
3195 *upl_size = named_entry->size - offset;
3196 }
3197 if(caller_flags & UPL_COPYOUT_FROM) {
3198 if((named_entry->protection & VM_PROT_READ)
3199 != VM_PROT_READ) {
3200 return(KERN_INVALID_RIGHT);
3201 }
3202 } else {
3203 if((named_entry->protection &
3204 (VM_PROT_READ | VM_PROT_WRITE))
3205 != (VM_PROT_READ | VM_PROT_WRITE)) {
3206 return(KERN_INVALID_RIGHT);
3207 }
3208 }
3209 if(named_entry->size < (offset + *upl_size))
3210 return(KERN_INVALID_ARGUMENT);
3211
3212 /* the callers parameter offset is defined to be the */
3213 /* offset from beginning of named entry offset in object */
3214 offset = offset + named_entry->offset;
3215
3216 if(named_entry->is_sub_map)
3217 return (KERN_INVALID_ARGUMENT);
3218
3219 named_entry_lock(named_entry);
3220
91447636 3221 if (named_entry->is_pager) {
55e303ae
A
3222 object = vm_object_enter(named_entry->backing.pager,
3223 named_entry->offset + named_entry->size,
3224 named_entry->internal,
3225 FALSE,
3226 FALSE);
3227 if (object == VM_OBJECT_NULL) {
3228 named_entry_unlock(named_entry);
3229 return(KERN_INVALID_OBJECT);
3230 }
55e303ae 3231
91447636
A
3232 /* JMM - drop reference on the pager here? */
3233
3234 /* create an extra reference for the object */
3235 vm_object_lock(object);
55e303ae 3236 vm_object_reference_locked(object);
91447636
A
3237 named_entry->backing.object = object;
3238 named_entry->is_pager = FALSE;
55e303ae
A
3239 named_entry_unlock(named_entry);
3240
3241 /* wait for object (if any) to be ready */
91447636
A
3242 if (!named_entry->internal) {
3243 while (!object->pager_ready) {
3244 vm_object_wait(object,
3245 VM_OBJECT_EVENT_PAGER_READY,
3246 THREAD_UNINT);
3247 vm_object_lock(object);
3248 }
55e303ae
A
3249 }
3250 vm_object_unlock(object);
91447636
A
3251
3252 } else {
3253 /* This is the case where we are going to operate */
3254 /* an an already known object. If the object is */
3255 /* not ready it is internal. An external */
3256 /* object cannot be mapped until it is ready */
3257 /* we can therefore avoid the ready check */
3258 /* in this case. */
3259 object = named_entry->backing.object;
3260 vm_object_reference(object);
3261 named_entry_unlock(named_entry);
55e303ae
A
3262 }
3263
3264 if (!object->private) {
3265 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
3266 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
3267 if (object->phys_contiguous) {
3268 *flags = UPL_PHYS_CONTIG;
3269 } else {
3270 *flags = 0;
3271 }
3272 } else {
3273 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
3274 }
3275
3276 ret = vm_object_iopl_request(object,
3277 offset,
3278 *upl_size,
3279 upl_ptr,
3280 user_page_list,
3281 page_list_count,
3282 caller_flags);
3283 vm_object_deallocate(object);
3284 return ret;
1c79356b 3285}