]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_user.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_user.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * User-exported virtual memory functions.
63 */
64
65 /*
66 * There are three implementations of the "XXX_allocate" functionality in
67 * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68 * (for a task with the same address space size, especially the current task),
69 * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70 * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71 * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72 * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73 * for new code.
74 *
75 * The entrypoints into the kernel are more complex. All platforms support a
76 * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77 * size types for the platform. On platforms that only support U32/K32,
78 * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79 * subsystem 3800 is used disambiguate the size of parameters, and they will
80 * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81 * the MIG glue should never call into vm_allocate directly, because the calling
82 * task and kernel_task are unlikely to use the same size parameters
83 *
84 * New VM call implementations should be added here and to mach_vm.defs
85 * (subsystem 4800), and use mach_vm_* "wide" types.
86 */
87
88 #include <debug.h>
89
90 #include <vm_cpm.h>
91 #include <mach/boolean.h>
92 #include <mach/kern_return.h>
93 #include <mach/mach_types.h> /* to get vm_address_t */
94 #include <mach/memory_object.h>
95 #include <mach/std_types.h> /* to get pointer_t */
96 #include <mach/upl.h>
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <mach/vm_statistics.h>
100 #include <mach/mach_syscalls.h>
101
102 #include <mach/host_priv_server.h>
103 #include <mach/mach_vm_server.h>
104 #include <mach/vm_map_server.h>
105
106 #include <kern/host.h>
107 #include <kern/kalloc.h>
108 #include <kern/task.h>
109 #include <kern/misc_protos.h>
110 #include <vm/vm_fault.h>
111 #include <vm/vm_map.h>
112 #include <vm/vm_object.h>
113 #include <vm/vm_page.h>
114 #include <vm/memory_object.h>
115 #include <vm/vm_pageout.h>
116 #include <vm/vm_protos.h>
117
118 vm_size_t upl_offset_to_pagelist = 0;
119
120 #if VM_CPM
121 #include <vm/cpm.h>
122 #endif /* VM_CPM */
123
124 ipc_port_t dynamic_pager_control_port=NULL;
125
126 /*
127 * mach_vm_allocate allocates "zero fill" memory in the specfied
128 * map.
129 */
130 kern_return_t
131 mach_vm_allocate(
132 vm_map_t map,
133 mach_vm_offset_t *addr,
134 mach_vm_size_t size,
135 int flags)
136 {
137 vm_map_offset_t map_addr;
138 vm_map_size_t map_size;
139 kern_return_t result;
140 boolean_t anywhere;
141
142 /* filter out any kernel-only flags */
143 if (flags & ~VM_FLAGS_USER_ALLOCATE)
144 return KERN_INVALID_ARGUMENT;
145
146 if (map == VM_MAP_NULL)
147 return(KERN_INVALID_ARGUMENT);
148 if (size == 0) {
149 *addr = 0;
150 return(KERN_SUCCESS);
151 }
152
153 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
154 if (anywhere) {
155 /*
156 * No specific address requested, so start candidate address
157 * search at the minimum address in the map. However, if that
158 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
159 * allocations of PAGEZERO to explicit requests since its
160 * normal use is to catch dereferences of NULL and many
161 * applications also treat pointers with a value of 0 as
162 * special and suddenly having address 0 contain useable
163 * memory would tend to confuse those applications.
164 */
165 map_addr = vm_map_min(map);
166 if (map_addr == 0)
167 map_addr += PAGE_SIZE;
168 } else
169 map_addr = vm_map_trunc_page(*addr);
170 map_size = vm_map_round_page(size);
171 if (map_size == 0) {
172 return(KERN_INVALID_ARGUMENT);
173 }
174
175 result = vm_map_enter(
176 map,
177 &map_addr,
178 map_size,
179 (vm_map_offset_t)0,
180 flags,
181 VM_OBJECT_NULL,
182 (vm_object_offset_t)0,
183 FALSE,
184 VM_PROT_DEFAULT,
185 VM_PROT_ALL,
186 VM_INHERIT_DEFAULT);
187
188 *addr = map_addr;
189 return(result);
190 }
191
192 /*
193 * vm_allocate
194 * Legacy routine that allocates "zero fill" memory in the specfied
195 * map (which is limited to the same size as the kernel).
196 */
197 kern_return_t
198 vm_allocate(
199 vm_map_t map,
200 vm_offset_t *addr,
201 vm_size_t size,
202 int flags)
203 {
204 vm_map_offset_t map_addr;
205 vm_map_size_t map_size;
206 kern_return_t result;
207 boolean_t anywhere;
208
209 /* filter out any kernel-only flags */
210 if (flags & ~VM_FLAGS_USER_ALLOCATE)
211 return KERN_INVALID_ARGUMENT;
212
213 if (map == VM_MAP_NULL)
214 return(KERN_INVALID_ARGUMENT);
215 if (size == 0) {
216 *addr = 0;
217 return(KERN_SUCCESS);
218 }
219
220 anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
221 if (anywhere) {
222 /*
223 * No specific address requested, so start candidate address
224 * search at the minimum address in the map. However, if that
225 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
226 * allocations of PAGEZERO to explicit requests since its
227 * normal use is to catch dereferences of NULL and many
228 * applications also treat pointers with a value of 0 as
229 * special and suddenly having address 0 contain useable
230 * memory would tend to confuse those applications.
231 */
232 map_addr = vm_map_min(map);
233 if (map_addr == 0)
234 map_addr += PAGE_SIZE;
235 } else
236 map_addr = vm_map_trunc_page(*addr);
237 map_size = vm_map_round_page(size);
238 if (map_size == 0) {
239 return(KERN_INVALID_ARGUMENT);
240 }
241
242 result = vm_map_enter(
243 map,
244 &map_addr,
245 map_size,
246 (vm_map_offset_t)0,
247 flags,
248 VM_OBJECT_NULL,
249 (vm_object_offset_t)0,
250 FALSE,
251 VM_PROT_DEFAULT,
252 VM_PROT_ALL,
253 VM_INHERIT_DEFAULT);
254
255 *addr = CAST_DOWN(vm_offset_t, map_addr);
256 return(result);
257 }
258
259 /*
260 * mach_vm_deallocate -
261 * deallocates the specified range of addresses in the
262 * specified address map.
263 */
264 kern_return_t
265 mach_vm_deallocate(
266 vm_map_t map,
267 mach_vm_offset_t start,
268 mach_vm_size_t size)
269 {
270 if ((map == VM_MAP_NULL) || (start + size < start))
271 return(KERN_INVALID_ARGUMENT);
272
273 if (size == (mach_vm_offset_t) 0)
274 return(KERN_SUCCESS);
275
276 return(vm_map_remove(map, vm_map_trunc_page(start),
277 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
278 }
279
280 /*
281 * vm_deallocate -
282 * deallocates the specified range of addresses in the
283 * specified address map (limited to addresses the same
284 * size as the kernel).
285 */
286 kern_return_t
287 vm_deallocate(
288 register vm_map_t map,
289 vm_offset_t start,
290 vm_size_t size)
291 {
292 if ((map == VM_MAP_NULL) || (start + size < start))
293 return(KERN_INVALID_ARGUMENT);
294
295 if (size == (vm_offset_t) 0)
296 return(KERN_SUCCESS);
297
298 return(vm_map_remove(map, vm_map_trunc_page(start),
299 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
300 }
301
302 /*
303 * mach_vm_inherit -
304 * Sets the inheritance of the specified range in the
305 * specified map.
306 */
307 kern_return_t
308 mach_vm_inherit(
309 vm_map_t map,
310 mach_vm_offset_t start,
311 mach_vm_size_t size,
312 vm_inherit_t new_inheritance)
313 {
314 if ((map == VM_MAP_NULL) || (start + size < start) ||
315 (new_inheritance > VM_INHERIT_LAST_VALID))
316 return(KERN_INVALID_ARGUMENT);
317
318 if (size == 0)
319 return KERN_SUCCESS;
320
321 return(vm_map_inherit(map,
322 vm_map_trunc_page(start),
323 vm_map_round_page(start+size),
324 new_inheritance));
325 }
326
327 /*
328 * vm_inherit -
329 * Sets the inheritance of the specified range in the
330 * specified map (range limited to addresses
331 */
332 kern_return_t
333 vm_inherit(
334 register vm_map_t map,
335 vm_offset_t start,
336 vm_size_t size,
337 vm_inherit_t new_inheritance)
338 {
339 if ((map == VM_MAP_NULL) || (start + size < start) ||
340 (new_inheritance > VM_INHERIT_LAST_VALID))
341 return(KERN_INVALID_ARGUMENT);
342
343 if (size == 0)
344 return KERN_SUCCESS;
345
346 return(vm_map_inherit(map,
347 vm_map_trunc_page(start),
348 vm_map_round_page(start+size),
349 new_inheritance));
350 }
351
352 /*
353 * mach_vm_protect -
354 * Sets the protection of the specified range in the
355 * specified map.
356 */
357
358 kern_return_t
359 mach_vm_protect(
360 vm_map_t map,
361 mach_vm_offset_t start,
362 mach_vm_size_t size,
363 boolean_t set_maximum,
364 vm_prot_t new_protection)
365 {
366 if ((map == VM_MAP_NULL) || (start + size < start) ||
367 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
368 return(KERN_INVALID_ARGUMENT);
369
370 if (size == 0)
371 return KERN_SUCCESS;
372
373 return(vm_map_protect(map,
374 vm_map_trunc_page(start),
375 vm_map_round_page(start+size),
376 new_protection,
377 set_maximum));
378 }
379
380 /*
381 * vm_protect -
382 * Sets the protection of the specified range in the
383 * specified map. Addressability of the range limited
384 * to the same size as the kernel.
385 */
386
387 kern_return_t
388 vm_protect(
389 vm_map_t map,
390 vm_offset_t start,
391 vm_size_t size,
392 boolean_t set_maximum,
393 vm_prot_t new_protection)
394 {
395 if ((map == VM_MAP_NULL) || (start + size < start) ||
396 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
397 return(KERN_INVALID_ARGUMENT);
398
399 if (size == 0)
400 return KERN_SUCCESS;
401
402 return(vm_map_protect(map,
403 vm_map_trunc_page(start),
404 vm_map_round_page(start+size),
405 new_protection,
406 set_maximum));
407 }
408
409 /*
410 * mach_vm_machine_attributes -
411 * Handle machine-specific attributes for a mapping, such
412 * as cachability, migrability, etc.
413 */
414 kern_return_t
415 mach_vm_machine_attribute(
416 vm_map_t map,
417 mach_vm_address_t addr,
418 mach_vm_size_t size,
419 vm_machine_attribute_t attribute,
420 vm_machine_attribute_val_t* value) /* IN/OUT */
421 {
422 if ((map == VM_MAP_NULL) || (addr + size < addr))
423 return(KERN_INVALID_ARGUMENT);
424
425 if (size == 0)
426 return KERN_SUCCESS;
427
428 return vm_map_machine_attribute(map,
429 vm_map_trunc_page(addr),
430 vm_map_round_page(addr+size),
431 attribute,
432 value);
433 }
434
435 /*
436 * vm_machine_attribute -
437 * Handle machine-specific attributes for a mapping, such
438 * as cachability, migrability, etc. Limited addressability
439 * (same range limits as for the native kernel map).
440 */
441 kern_return_t
442 vm_machine_attribute(
443 vm_map_t map,
444 vm_address_t addr,
445 vm_size_t size,
446 vm_machine_attribute_t attribute,
447 vm_machine_attribute_val_t* value) /* IN/OUT */
448 {
449 if ((map == VM_MAP_NULL) || (addr + size < addr))
450 return(KERN_INVALID_ARGUMENT);
451
452 if (size == 0)
453 return KERN_SUCCESS;
454
455 return vm_map_machine_attribute(map,
456 vm_map_trunc_page(addr),
457 vm_map_round_page(addr+size),
458 attribute,
459 value);
460 }
461
462 /*
463 * mach_vm_read -
464 * Read/copy a range from one address space and return it to the caller.
465 *
466 * It is assumed that the address for the returned memory is selected by
467 * the IPC implementation as part of receiving the reply to this call.
468 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
469 * that gets returned.
470 *
471 * JMM - because of mach_msg_type_number_t, this call is limited to a
472 * single 4GB region at this time.
473 *
474 */
475 kern_return_t
476 mach_vm_read(
477 vm_map_t map,
478 mach_vm_address_t addr,
479 mach_vm_size_t size,
480 pointer_t *data,
481 mach_msg_type_number_t *data_size)
482 {
483 kern_return_t error;
484 vm_map_copy_t ipc_address;
485
486 if (map == VM_MAP_NULL)
487 return(KERN_INVALID_ARGUMENT);
488
489 if ((mach_msg_type_number_t) size != size)
490 return KERN_INVALID_ARGUMENT;
491
492 error = vm_map_copyin(map,
493 (vm_map_address_t)addr,
494 (vm_map_size_t)size,
495 FALSE, /* src_destroy */
496 &ipc_address);
497
498 if (KERN_SUCCESS == error) {
499 *data = (pointer_t) ipc_address;
500 *data_size = (mach_msg_type_number_t) size;
501 assert(*data_size == size);
502 }
503 return(error);
504 }
505
506 /*
507 * vm_read -
508 * Read/copy a range from one address space and return it to the caller.
509 * Limited addressability (same range limits as for the native kernel map).
510 *
511 * It is assumed that the address for the returned memory is selected by
512 * the IPC implementation as part of receiving the reply to this call.
513 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
514 * that gets returned.
515 */
516 kern_return_t
517 vm_read(
518 vm_map_t map,
519 vm_address_t addr,
520 vm_size_t size,
521 pointer_t *data,
522 mach_msg_type_number_t *data_size)
523 {
524 kern_return_t error;
525 vm_map_copy_t ipc_address;
526
527 if (map == VM_MAP_NULL)
528 return(KERN_INVALID_ARGUMENT);
529
530 if (size > (unsigned)(mach_msg_type_number_t) -1) {
531 /*
532 * The kernel could handle a 64-bit "size" value, but
533 * it could not return the size of the data in "*data_size"
534 * without overflowing.
535 * Let's reject this "size" as invalid.
536 */
537 return KERN_INVALID_ARGUMENT;
538 }
539
540 error = vm_map_copyin(map,
541 (vm_map_address_t)addr,
542 (vm_map_size_t)size,
543 FALSE, /* src_destroy */
544 &ipc_address);
545
546 if (KERN_SUCCESS == error) {
547 *data = (pointer_t) ipc_address;
548 *data_size = (mach_msg_type_number_t) size;
549 assert(*data_size == size);
550 }
551 return(error);
552 }
553
554 /*
555 * mach_vm_read_list -
556 * Read/copy a list of address ranges from specified map.
557 *
558 * MIG does not know how to deal with a returned array of
559 * vm_map_copy_t structures, so we have to do the copyout
560 * manually here.
561 */
562 kern_return_t
563 mach_vm_read_list(
564 vm_map_t map,
565 mach_vm_read_entry_t data_list,
566 natural_t count)
567 {
568 mach_msg_type_number_t i;
569 kern_return_t error;
570 vm_map_copy_t copy;
571
572 if (map == VM_MAP_NULL ||
573 count > VM_MAP_ENTRY_MAX)
574 return(KERN_INVALID_ARGUMENT);
575
576 error = KERN_SUCCESS;
577 for(i=0; i<count; i++) {
578 vm_map_address_t map_addr;
579 vm_map_size_t map_size;
580
581 map_addr = (vm_map_address_t)(data_list[i].address);
582 map_size = (vm_map_size_t)(data_list[i].size);
583
584 if(map_size != 0) {
585 error = vm_map_copyin(map,
586 map_addr,
587 map_size,
588 FALSE, /* src_destroy */
589 &copy);
590 if (KERN_SUCCESS == error) {
591 error = vm_map_copyout(
592 current_task()->map,
593 &map_addr,
594 copy);
595 if (KERN_SUCCESS == error) {
596 data_list[i].address = map_addr;
597 continue;
598 }
599 vm_map_copy_discard(copy);
600 }
601 }
602 data_list[i].address = (mach_vm_address_t)0;
603 data_list[i].size = (mach_vm_size_t)0;
604 }
605 return(error);
606 }
607
608 /*
609 * vm_read_list -
610 * Read/copy a list of address ranges from specified map.
611 *
612 * MIG does not know how to deal with a returned array of
613 * vm_map_copy_t structures, so we have to do the copyout
614 * manually here.
615 *
616 * The source and destination ranges are limited to those
617 * that can be described with a vm_address_t (i.e. same
618 * size map as the kernel).
619 *
620 * JMM - If the result of the copyout is an address range
621 * that cannot be described with a vm_address_t (i.e. the
622 * caller had a larger address space but used this call
623 * anyway), it will result in a truncated address being
624 * returned (and a likely confused caller).
625 */
626
627 kern_return_t
628 vm_read_list(
629 vm_map_t map,
630 vm_read_entry_t data_list,
631 natural_t count)
632 {
633 mach_msg_type_number_t i;
634 kern_return_t error;
635 vm_map_copy_t copy;
636
637 if (map == VM_MAP_NULL ||
638 count > VM_MAP_ENTRY_MAX)
639 return(KERN_INVALID_ARGUMENT);
640
641 error = KERN_SUCCESS;
642 for(i=0; i<count; i++) {
643 vm_map_address_t map_addr;
644 vm_map_size_t map_size;
645
646 map_addr = (vm_map_address_t)(data_list[i].address);
647 map_size = (vm_map_size_t)(data_list[i].size);
648
649 if(map_size != 0) {
650 error = vm_map_copyin(map,
651 map_addr,
652 map_size,
653 FALSE, /* src_destroy */
654 &copy);
655 if (KERN_SUCCESS == error) {
656 error = vm_map_copyout(current_task()->map,
657 &map_addr,
658 copy);
659 if (KERN_SUCCESS == error) {
660 data_list[i].address =
661 CAST_DOWN(vm_offset_t, map_addr);
662 continue;
663 }
664 vm_map_copy_discard(copy);
665 }
666 }
667 data_list[i].address = (mach_vm_address_t)0;
668 data_list[i].size = (mach_vm_size_t)0;
669 }
670 return(error);
671 }
672
673 /*
674 * mach_vm_read_overwrite -
675 * Overwrite a range of the current map with data from the specified
676 * map/address range.
677 *
678 * In making an assumption that the current thread is local, it is
679 * no longer cluster-safe without a fully supportive local proxy
680 * thread/task (but we don't support cluster's anymore so this is moot).
681 */
682
683 kern_return_t
684 mach_vm_read_overwrite(
685 vm_map_t map,
686 mach_vm_address_t address,
687 mach_vm_size_t size,
688 mach_vm_address_t data,
689 mach_vm_size_t *data_size)
690 {
691 kern_return_t error;
692 vm_map_copy_t copy;
693
694 if (map == VM_MAP_NULL)
695 return(KERN_INVALID_ARGUMENT);
696
697 error = vm_map_copyin(map, (vm_map_address_t)address,
698 (vm_map_size_t)size, FALSE, &copy);
699
700 if (KERN_SUCCESS == error) {
701 error = vm_map_copy_overwrite(current_thread()->map,
702 (vm_map_address_t)data,
703 copy, FALSE);
704 if (KERN_SUCCESS == error) {
705 *data_size = size;
706 return error;
707 }
708 vm_map_copy_discard(copy);
709 }
710 return(error);
711 }
712
713 /*
714 * vm_read_overwrite -
715 * Overwrite a range of the current map with data from the specified
716 * map/address range.
717 *
718 * This routine adds the additional limitation that the source and
719 * destination ranges must be describable with vm_address_t values
720 * (i.e. the same size address spaces as the kernel, or at least the
721 * the ranges are in that first portion of the respective address
722 * spaces).
723 */
724
725 kern_return_t
726 vm_read_overwrite(
727 vm_map_t map,
728 vm_address_t address,
729 vm_size_t size,
730 vm_address_t data,
731 vm_size_t *data_size)
732 {
733 kern_return_t error;
734 vm_map_copy_t copy;
735
736 if (map == VM_MAP_NULL)
737 return(KERN_INVALID_ARGUMENT);
738
739 error = vm_map_copyin(map, (vm_map_address_t)address,
740 (vm_map_size_t)size, FALSE, &copy);
741
742 if (KERN_SUCCESS == error) {
743 error = vm_map_copy_overwrite(current_thread()->map,
744 (vm_map_address_t)data,
745 copy, FALSE);
746 if (KERN_SUCCESS == error) {
747 *data_size = size;
748 return error;
749 }
750 vm_map_copy_discard(copy);
751 }
752 return(error);
753 }
754
755
756 /*
757 * mach_vm_write -
758 * Overwrite the specified address range with the data provided
759 * (from the current map).
760 */
761 kern_return_t
762 mach_vm_write(
763 vm_map_t map,
764 mach_vm_address_t address,
765 pointer_t data,
766 __unused mach_msg_type_number_t size)
767 {
768 if (map == VM_MAP_NULL)
769 return KERN_INVALID_ARGUMENT;
770
771 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
772 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
773 }
774
775 /*
776 * vm_write -
777 * Overwrite the specified address range with the data provided
778 * (from the current map).
779 *
780 * The addressability of the range of addresses to overwrite is
781 * limited bu the use of a vm_address_t (same size as kernel map).
782 * Either the target map is also small, or the range is in the
783 * low addresses within it.
784 */
785 kern_return_t
786 vm_write(
787 vm_map_t map,
788 vm_address_t address,
789 pointer_t data,
790 __unused mach_msg_type_number_t size)
791 {
792 if (map == VM_MAP_NULL)
793 return KERN_INVALID_ARGUMENT;
794
795 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
796 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
797 }
798
799 /*
800 * mach_vm_copy -
801 * Overwrite one range of the specified map with the contents of
802 * another range within that same map (i.e. both address ranges
803 * are "over there").
804 */
805 kern_return_t
806 mach_vm_copy(
807 vm_map_t map,
808 mach_vm_address_t source_address,
809 mach_vm_size_t size,
810 mach_vm_address_t dest_address)
811 {
812 vm_map_copy_t copy;
813 kern_return_t kr;
814
815 if (map == VM_MAP_NULL)
816 return KERN_INVALID_ARGUMENT;
817
818 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
819 (vm_map_size_t)size, FALSE, &copy);
820
821 if (KERN_SUCCESS == kr) {
822 kr = vm_map_copy_overwrite(map,
823 (vm_map_address_t)dest_address,
824 copy, FALSE /* interruptible XXX */);
825
826 if (KERN_SUCCESS != kr)
827 vm_map_copy_discard(copy);
828 }
829 return kr;
830 }
831
832 kern_return_t
833 vm_copy(
834 vm_map_t map,
835 vm_address_t source_address,
836 vm_size_t size,
837 vm_address_t dest_address)
838 {
839 vm_map_copy_t copy;
840 kern_return_t kr;
841
842 if (map == VM_MAP_NULL)
843 return KERN_INVALID_ARGUMENT;
844
845 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
846 (vm_map_size_t)size, FALSE, &copy);
847
848 if (KERN_SUCCESS == kr) {
849 kr = vm_map_copy_overwrite(map,
850 (vm_map_address_t)dest_address,
851 copy, FALSE /* interruptible XXX */);
852
853 if (KERN_SUCCESS != kr)
854 vm_map_copy_discard(copy);
855 }
856 return kr;
857 }
858
859 /*
860 * mach_vm_map -
861 * Map some range of an object into an address space.
862 *
863 * The object can be one of several types of objects:
864 * NULL - anonymous memory
865 * a named entry - a range within another address space
866 * or a range within a memory object
867 * a whole memory object
868 *
869 */
870 kern_return_t
871 mach_vm_map(
872 vm_map_t target_map,
873 mach_vm_offset_t *address,
874 mach_vm_size_t initial_size,
875 mach_vm_offset_t mask,
876 int flags,
877 ipc_port_t port,
878 vm_object_offset_t offset,
879 boolean_t copy,
880 vm_prot_t cur_protection,
881 vm_prot_t max_protection,
882 vm_inherit_t inheritance)
883 {
884 kern_return_t kr;
885 vm_map_offset_t vmmaddr;
886
887 vmmaddr = (vm_map_offset_t) *address;
888
889 /* filter out any kernel-only flags */
890 if (flags & ~VM_FLAGS_USER_MAP)
891 return KERN_INVALID_ARGUMENT;
892
893 kr = vm_map_enter_mem_object(target_map,
894 &vmmaddr,
895 initial_size,
896 mask,
897 flags,
898 port,
899 offset,
900 copy,
901 cur_protection,
902 max_protection,
903 inheritance);
904
905 *address = vmmaddr;
906 return kr;
907 }
908
909
910 /* legacy interface */
911 kern_return_t
912 vm_map_64(
913 vm_map_t target_map,
914 vm_offset_t *address,
915 vm_size_t size,
916 vm_offset_t mask,
917 int flags,
918 ipc_port_t port,
919 vm_object_offset_t offset,
920 boolean_t copy,
921 vm_prot_t cur_protection,
922 vm_prot_t max_protection,
923 vm_inherit_t inheritance)
924 {
925 mach_vm_address_t map_addr;
926 mach_vm_size_t map_size;
927 mach_vm_offset_t map_mask;
928 kern_return_t kr;
929
930 map_addr = (mach_vm_address_t)*address;
931 map_size = (mach_vm_size_t)size;
932 map_mask = (mach_vm_offset_t)mask;
933
934 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
935 port, offset, copy,
936 cur_protection, max_protection, inheritance);
937 *address = CAST_DOWN(vm_offset_t, map_addr);
938 return kr;
939 }
940
941 /* temporary, until world build */
942 kern_return_t
943 vm_map(
944 vm_map_t target_map,
945 vm_offset_t *address,
946 vm_size_t size,
947 vm_offset_t mask,
948 int flags,
949 ipc_port_t port,
950 vm_offset_t offset,
951 boolean_t copy,
952 vm_prot_t cur_protection,
953 vm_prot_t max_protection,
954 vm_inherit_t inheritance)
955 {
956 mach_vm_address_t map_addr;
957 mach_vm_size_t map_size;
958 mach_vm_offset_t map_mask;
959 vm_object_offset_t obj_offset;
960 kern_return_t kr;
961
962 map_addr = (mach_vm_address_t)*address;
963 map_size = (mach_vm_size_t)size;
964 map_mask = (mach_vm_offset_t)mask;
965 obj_offset = (vm_object_offset_t)offset;
966
967 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
968 port, obj_offset, copy,
969 cur_protection, max_protection, inheritance);
970 *address = CAST_DOWN(vm_offset_t, map_addr);
971 return kr;
972 }
973
974 /*
975 * mach_vm_remap -
976 * Remap a range of memory from one task into another,
977 * to another address range within the same task, or
978 * over top of itself (with altered permissions and/or
979 * as an in-place copy of itself).
980 */
981
982 kern_return_t
983 mach_vm_remap(
984 vm_map_t target_map,
985 mach_vm_offset_t *address,
986 mach_vm_size_t size,
987 mach_vm_offset_t mask,
988 int flags,
989 vm_map_t src_map,
990 mach_vm_offset_t memory_address,
991 boolean_t copy,
992 vm_prot_t *cur_protection,
993 vm_prot_t *max_protection,
994 vm_inherit_t inheritance)
995 {
996 vm_map_offset_t map_addr;
997 kern_return_t kr;
998
999 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1000 return KERN_INVALID_ARGUMENT;
1001
1002 /* filter out any kernel-only flags */
1003 if (flags & ~VM_FLAGS_USER_REMAP)
1004 return KERN_INVALID_ARGUMENT;
1005
1006 map_addr = (vm_map_offset_t)*address;
1007
1008 kr = vm_map_remap(target_map,
1009 &map_addr,
1010 size,
1011 mask,
1012 flags,
1013 src_map,
1014 memory_address,
1015 copy,
1016 cur_protection,
1017 max_protection,
1018 inheritance);
1019 *address = map_addr;
1020 return kr;
1021 }
1022
1023 /*
1024 * vm_remap -
1025 * Remap a range of memory from one task into another,
1026 * to another address range within the same task, or
1027 * over top of itself (with altered permissions and/or
1028 * as an in-place copy of itself).
1029 *
1030 * The addressability of the source and target address
1031 * range is limited by the size of vm_address_t (in the
1032 * kernel context).
1033 */
1034 kern_return_t
1035 vm_remap(
1036 vm_map_t target_map,
1037 vm_offset_t *address,
1038 vm_size_t size,
1039 vm_offset_t mask,
1040 int flags,
1041 vm_map_t src_map,
1042 vm_offset_t memory_address,
1043 boolean_t copy,
1044 vm_prot_t *cur_protection,
1045 vm_prot_t *max_protection,
1046 vm_inherit_t inheritance)
1047 {
1048 vm_map_offset_t map_addr;
1049 kern_return_t kr;
1050
1051 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1052 return KERN_INVALID_ARGUMENT;
1053
1054 /* filter out any kernel-only flags */
1055 if (flags & ~VM_FLAGS_USER_REMAP)
1056 return KERN_INVALID_ARGUMENT;
1057
1058 map_addr = (vm_map_offset_t)*address;
1059
1060 kr = vm_map_remap(target_map,
1061 &map_addr,
1062 size,
1063 mask,
1064 flags,
1065 src_map,
1066 memory_address,
1067 copy,
1068 cur_protection,
1069 max_protection,
1070 inheritance);
1071 *address = CAST_DOWN(vm_offset_t, map_addr);
1072 return kr;
1073 }
1074
1075 /*
1076 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1077 * when mach_vm_wire and vm_wire are changed to use ledgers.
1078 */
1079 #include <mach/mach_host_server.h>
1080 /*
1081 * mach_vm_wire
1082 * Specify that the range of the virtual address space
1083 * of the target task must not cause page faults for
1084 * the indicated accesses.
1085 *
1086 * [ To unwire the pages, specify VM_PROT_NONE. ]
1087 */
1088 kern_return_t
1089 mach_vm_wire(
1090 host_priv_t host_priv,
1091 vm_map_t map,
1092 mach_vm_offset_t start,
1093 mach_vm_size_t size,
1094 vm_prot_t access)
1095 {
1096 kern_return_t rc;
1097
1098 if (host_priv == HOST_PRIV_NULL)
1099 return KERN_INVALID_HOST;
1100
1101 assert(host_priv == &realhost);
1102
1103 if (map == VM_MAP_NULL)
1104 return KERN_INVALID_TASK;
1105
1106 if (access & ~VM_PROT_ALL || (start + size < start))
1107 return KERN_INVALID_ARGUMENT;
1108
1109 if (access != VM_PROT_NONE) {
1110 rc = vm_map_wire(map, vm_map_trunc_page(start),
1111 vm_map_round_page(start+size), access, TRUE);
1112 } else {
1113 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1114 vm_map_round_page(start+size), TRUE);
1115 }
1116 return rc;
1117 }
1118
1119 /*
1120 * vm_wire -
1121 * Specify that the range of the virtual address space
1122 * of the target task must not cause page faults for
1123 * the indicated accesses.
1124 *
1125 * [ To unwire the pages, specify VM_PROT_NONE. ]
1126 */
1127 kern_return_t
1128 vm_wire(
1129 host_priv_t host_priv,
1130 register vm_map_t map,
1131 vm_offset_t start,
1132 vm_size_t size,
1133 vm_prot_t access)
1134 {
1135 kern_return_t rc;
1136
1137 if (host_priv == HOST_PRIV_NULL)
1138 return KERN_INVALID_HOST;
1139
1140 assert(host_priv == &realhost);
1141
1142 if (map == VM_MAP_NULL)
1143 return KERN_INVALID_TASK;
1144
1145 if ((access & ~VM_PROT_ALL) || (start + size < start))
1146 return KERN_INVALID_ARGUMENT;
1147
1148 if (size == 0) {
1149 rc = KERN_SUCCESS;
1150 } else if (access != VM_PROT_NONE) {
1151 rc = vm_map_wire(map, vm_map_trunc_page(start),
1152 vm_map_round_page(start+size), access, TRUE);
1153 } else {
1154 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1155 vm_map_round_page(start+size), TRUE);
1156 }
1157 return rc;
1158 }
1159
1160 /*
1161 * vm_msync
1162 *
1163 * Synchronises the memory range specified with its backing store
1164 * image by either flushing or cleaning the contents to the appropriate
1165 * memory manager.
1166 *
1167 * interpretation of sync_flags
1168 * VM_SYNC_INVALIDATE - discard pages, only return precious
1169 * pages to manager.
1170 *
1171 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1172 * - discard pages, write dirty or precious
1173 * pages back to memory manager.
1174 *
1175 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1176 * - write dirty or precious pages back to
1177 * the memory manager.
1178 *
1179 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1180 * is a hole in the region, and we would
1181 * have returned KERN_SUCCESS, return
1182 * KERN_INVALID_ADDRESS instead.
1183 *
1184 * RETURNS
1185 * KERN_INVALID_TASK Bad task parameter
1186 * KERN_INVALID_ARGUMENT both sync and async were specified.
1187 * KERN_SUCCESS The usual.
1188 * KERN_INVALID_ADDRESS There was a hole in the region.
1189 */
1190
1191 kern_return_t
1192 mach_vm_msync(
1193 vm_map_t map,
1194 mach_vm_address_t address,
1195 mach_vm_size_t size,
1196 vm_sync_t sync_flags)
1197 {
1198
1199 if (map == VM_MAP_NULL)
1200 return(KERN_INVALID_TASK);
1201
1202 return vm_map_msync(map, (vm_map_address_t)address,
1203 (vm_map_size_t)size, sync_flags);
1204 }
1205
1206 /*
1207 * vm_msync
1208 *
1209 * Synchronises the memory range specified with its backing store
1210 * image by either flushing or cleaning the contents to the appropriate
1211 * memory manager.
1212 *
1213 * interpretation of sync_flags
1214 * VM_SYNC_INVALIDATE - discard pages, only return precious
1215 * pages to manager.
1216 *
1217 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1218 * - discard pages, write dirty or precious
1219 * pages back to memory manager.
1220 *
1221 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1222 * - write dirty or precious pages back to
1223 * the memory manager.
1224 *
1225 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1226 * is a hole in the region, and we would
1227 * have returned KERN_SUCCESS, return
1228 * KERN_INVALID_ADDRESS instead.
1229 *
1230 * The addressability of the range is limited to that which can
1231 * be described by a vm_address_t.
1232 *
1233 * RETURNS
1234 * KERN_INVALID_TASK Bad task parameter
1235 * KERN_INVALID_ARGUMENT both sync and async were specified.
1236 * KERN_SUCCESS The usual.
1237 * KERN_INVALID_ADDRESS There was a hole in the region.
1238 */
1239
1240 kern_return_t
1241 vm_msync(
1242 vm_map_t map,
1243 vm_address_t address,
1244 vm_size_t size,
1245 vm_sync_t sync_flags)
1246 {
1247
1248 if (map == VM_MAP_NULL)
1249 return(KERN_INVALID_TASK);
1250
1251 return vm_map_msync(map, (vm_map_address_t)address,
1252 (vm_map_size_t)size, sync_flags);
1253 }
1254
1255
1256 int
1257 vm_toggle_entry_reuse(int toggle, int *old_value)
1258 {
1259 vm_map_t map = current_map();
1260
1261 if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
1262 *old_value = map->disable_vmentry_reuse;
1263 } else if(toggle == VM_TOGGLE_SET){
1264 vm_map_lock(map);
1265 map->disable_vmentry_reuse = TRUE;
1266 if (map->first_free == vm_map_to_entry(map)) {
1267 map->highest_entry_end = vm_map_min(map);
1268 } else {
1269 map->highest_entry_end = map->first_free->vme_end;
1270 }
1271 vm_map_unlock(map);
1272 } else if (toggle == VM_TOGGLE_CLEAR){
1273 vm_map_lock(map);
1274 map->disable_vmentry_reuse = FALSE;
1275 vm_map_unlock(map);
1276 } else
1277 return KERN_INVALID_ARGUMENT;
1278
1279 return KERN_SUCCESS;
1280 }
1281
1282 /*
1283 * mach_vm_behavior_set
1284 *
1285 * Sets the paging behavior attribute for the specified range
1286 * in the specified map.
1287 *
1288 * This routine will fail with KERN_INVALID_ADDRESS if any address
1289 * in [start,start+size) is not a valid allocated memory region.
1290 */
1291 kern_return_t
1292 mach_vm_behavior_set(
1293 vm_map_t map,
1294 mach_vm_offset_t start,
1295 mach_vm_size_t size,
1296 vm_behavior_t new_behavior)
1297 {
1298 if ((map == VM_MAP_NULL) || (start + size < start))
1299 return(KERN_INVALID_ARGUMENT);
1300
1301 if (size == 0)
1302 return KERN_SUCCESS;
1303
1304 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1305 vm_map_round_page(start+size), new_behavior));
1306 }
1307
1308 /*
1309 * vm_behavior_set
1310 *
1311 * Sets the paging behavior attribute for the specified range
1312 * in the specified map.
1313 *
1314 * This routine will fail with KERN_INVALID_ADDRESS if any address
1315 * in [start,start+size) is not a valid allocated memory region.
1316 *
1317 * This routine is potentially limited in addressibility by the
1318 * use of vm_offset_t (if the map provided is larger than the
1319 * kernel's).
1320 */
1321 kern_return_t
1322 vm_behavior_set(
1323 vm_map_t map,
1324 vm_offset_t start,
1325 vm_size_t size,
1326 vm_behavior_t new_behavior)
1327 {
1328 if ((map == VM_MAP_NULL) || (start + size < start))
1329 return(KERN_INVALID_ARGUMENT);
1330
1331 if (size == 0)
1332 return KERN_SUCCESS;
1333
1334 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1335 vm_map_round_page(start+size), new_behavior));
1336 }
1337
1338 /*
1339 * mach_vm_region:
1340 *
1341 * User call to obtain information about a region in
1342 * a task's address map. Currently, only one flavor is
1343 * supported.
1344 *
1345 * XXX The reserved and behavior fields cannot be filled
1346 * in until the vm merge from the IK is completed, and
1347 * vm_reserve is implemented.
1348 *
1349 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1350 */
1351
1352 kern_return_t
1353 mach_vm_region(
1354 vm_map_t map,
1355 mach_vm_offset_t *address, /* IN/OUT */
1356 mach_vm_size_t *size, /* OUT */
1357 vm_region_flavor_t flavor, /* IN */
1358 vm_region_info_t info, /* OUT */
1359 mach_msg_type_number_t *count, /* IN/OUT */
1360 mach_port_t *object_name) /* OUT */
1361 {
1362 vm_map_offset_t map_addr;
1363 vm_map_size_t map_size;
1364 kern_return_t kr;
1365
1366 if (VM_MAP_NULL == map)
1367 return KERN_INVALID_ARGUMENT;
1368
1369 map_addr = (vm_map_offset_t)*address;
1370 map_size = (vm_map_size_t)*size;
1371
1372 /* legacy conversion */
1373 if (VM_REGION_BASIC_INFO == flavor)
1374 flavor = VM_REGION_BASIC_INFO_64;
1375
1376 kr = vm_map_region(map,
1377 &map_addr, &map_size,
1378 flavor, info, count,
1379 object_name);
1380
1381 *address = map_addr;
1382 *size = map_size;
1383 return kr;
1384 }
1385
1386 /*
1387 * vm_region_64 and vm_region:
1388 *
1389 * User call to obtain information about a region in
1390 * a task's address map. Currently, only one flavor is
1391 * supported.
1392 *
1393 * XXX The reserved and behavior fields cannot be filled
1394 * in until the vm merge from the IK is completed, and
1395 * vm_reserve is implemented.
1396 *
1397 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1398 */
1399
1400 kern_return_t
1401 vm_region_64(
1402 vm_map_t map,
1403 vm_offset_t *address, /* IN/OUT */
1404 vm_size_t *size, /* OUT */
1405 vm_region_flavor_t flavor, /* IN */
1406 vm_region_info_t info, /* OUT */
1407 mach_msg_type_number_t *count, /* IN/OUT */
1408 mach_port_t *object_name) /* OUT */
1409 {
1410 vm_map_offset_t map_addr;
1411 vm_map_size_t map_size;
1412 kern_return_t kr;
1413
1414 if (VM_MAP_NULL == map)
1415 return KERN_INVALID_ARGUMENT;
1416
1417 map_addr = (vm_map_offset_t)*address;
1418 map_size = (vm_map_size_t)*size;
1419
1420 /* legacy conversion */
1421 if (VM_REGION_BASIC_INFO == flavor)
1422 flavor = VM_REGION_BASIC_INFO_64;
1423
1424 kr = vm_map_region(map,
1425 &map_addr, &map_size,
1426 flavor, info, count,
1427 object_name);
1428
1429 *address = CAST_DOWN(vm_offset_t, map_addr);
1430 *size = CAST_DOWN(vm_size_t, map_size);
1431
1432 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1433 return KERN_INVALID_ADDRESS;
1434 return kr;
1435 }
1436
1437 kern_return_t
1438 vm_region(
1439 vm_map_t map,
1440 vm_address_t *address, /* IN/OUT */
1441 vm_size_t *size, /* OUT */
1442 vm_region_flavor_t flavor, /* IN */
1443 vm_region_info_t info, /* OUT */
1444 mach_msg_type_number_t *count, /* IN/OUT */
1445 mach_port_t *object_name) /* OUT */
1446 {
1447 vm_map_address_t map_addr;
1448 vm_map_size_t map_size;
1449 kern_return_t kr;
1450
1451 if (VM_MAP_NULL == map)
1452 return KERN_INVALID_ARGUMENT;
1453
1454 map_addr = (vm_map_address_t)*address;
1455 map_size = (vm_map_size_t)*size;
1456
1457 kr = vm_map_region(map,
1458 &map_addr, &map_size,
1459 flavor, info, count,
1460 object_name);
1461
1462 *address = CAST_DOWN(vm_address_t, map_addr);
1463 *size = CAST_DOWN(vm_size_t, map_size);
1464
1465 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1466 return KERN_INVALID_ADDRESS;
1467 return kr;
1468 }
1469
1470 /*
1471 * vm_region_recurse: A form of vm_region which follows the
1472 * submaps in a target map
1473 *
1474 */
1475 kern_return_t
1476 mach_vm_region_recurse(
1477 vm_map_t map,
1478 mach_vm_address_t *address,
1479 mach_vm_size_t *size,
1480 uint32_t *depth,
1481 vm_region_recurse_info_t info,
1482 mach_msg_type_number_t *infoCnt)
1483 {
1484 vm_map_address_t map_addr;
1485 vm_map_size_t map_size;
1486 kern_return_t kr;
1487
1488 if (VM_MAP_NULL == map)
1489 return KERN_INVALID_ARGUMENT;
1490
1491 map_addr = (vm_map_address_t)*address;
1492 map_size = (vm_map_size_t)*size;
1493
1494 kr = vm_map_region_recurse_64(
1495 map,
1496 &map_addr,
1497 &map_size,
1498 depth,
1499 (vm_region_submap_info_64_t)info,
1500 infoCnt);
1501
1502 *address = map_addr;
1503 *size = map_size;
1504 return kr;
1505 }
1506
1507 /*
1508 * vm_region_recurse: A form of vm_region which follows the
1509 * submaps in a target map
1510 *
1511 */
1512 kern_return_t
1513 vm_region_recurse_64(
1514 vm_map_t map,
1515 vm_address_t *address,
1516 vm_size_t *size,
1517 uint32_t *depth,
1518 vm_region_recurse_info_64_t info,
1519 mach_msg_type_number_t *infoCnt)
1520 {
1521 vm_map_address_t map_addr;
1522 vm_map_size_t map_size;
1523 kern_return_t kr;
1524
1525 if (VM_MAP_NULL == map)
1526 return KERN_INVALID_ARGUMENT;
1527
1528 map_addr = (vm_map_address_t)*address;
1529 map_size = (vm_map_size_t)*size;
1530
1531 kr = vm_map_region_recurse_64(
1532 map,
1533 &map_addr,
1534 &map_size,
1535 depth,
1536 (vm_region_submap_info_64_t)info,
1537 infoCnt);
1538
1539 *address = CAST_DOWN(vm_address_t, map_addr);
1540 *size = CAST_DOWN(vm_size_t, map_size);
1541
1542 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1543 return KERN_INVALID_ADDRESS;
1544 return kr;
1545 }
1546
1547 kern_return_t
1548 vm_region_recurse(
1549 vm_map_t map,
1550 vm_offset_t *address, /* IN/OUT */
1551 vm_size_t *size, /* OUT */
1552 natural_t *depth, /* IN/OUT */
1553 vm_region_recurse_info_t info32, /* IN/OUT */
1554 mach_msg_type_number_t *infoCnt) /* IN/OUT */
1555 {
1556 vm_region_submap_info_data_64_t info64;
1557 vm_region_submap_info_t info;
1558 vm_map_address_t map_addr;
1559 vm_map_size_t map_size;
1560 kern_return_t kr;
1561
1562 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
1563 return KERN_INVALID_ARGUMENT;
1564
1565
1566 map_addr = (vm_map_address_t)*address;
1567 map_size = (vm_map_size_t)*size;
1568 info = (vm_region_submap_info_t)info32;
1569 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1570
1571 kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
1572 depth, &info64, infoCnt);
1573
1574 info->protection = info64.protection;
1575 info->max_protection = info64.max_protection;
1576 info->inheritance = info64.inheritance;
1577 info->offset = (uint32_t)info64.offset; /* trouble-maker */
1578 info->user_tag = info64.user_tag;
1579 info->pages_resident = info64.pages_resident;
1580 info->pages_shared_now_private = info64.pages_shared_now_private;
1581 info->pages_swapped_out = info64.pages_swapped_out;
1582 info->pages_dirtied = info64.pages_dirtied;
1583 info->ref_count = info64.ref_count;
1584 info->shadow_depth = info64.shadow_depth;
1585 info->external_pager = info64.external_pager;
1586 info->share_mode = info64.share_mode;
1587 info->is_submap = info64.is_submap;
1588 info->behavior = info64.behavior;
1589 info->object_id = info64.object_id;
1590 info->user_wired_count = info64.user_wired_count;
1591
1592 *address = CAST_DOWN(vm_address_t, map_addr);
1593 *size = CAST_DOWN(vm_size_t, map_size);
1594 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1595
1596 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1597 return KERN_INVALID_ADDRESS;
1598 return kr;
1599 }
1600
1601 kern_return_t
1602 mach_vm_purgable_control(
1603 vm_map_t map,
1604 mach_vm_offset_t address,
1605 vm_purgable_t control,
1606 int *state)
1607 {
1608 if (VM_MAP_NULL == map)
1609 return KERN_INVALID_ARGUMENT;
1610
1611 return vm_map_purgable_control(map,
1612 vm_map_trunc_page(address),
1613 control,
1614 state);
1615 }
1616
1617 kern_return_t
1618 vm_purgable_control(
1619 vm_map_t map,
1620 vm_offset_t address,
1621 vm_purgable_t control,
1622 int *state)
1623 {
1624 if (VM_MAP_NULL == map)
1625 return KERN_INVALID_ARGUMENT;
1626
1627 return vm_map_purgable_control(map,
1628 vm_map_trunc_page(address),
1629 control,
1630 state);
1631 }
1632
1633
1634 /*
1635 * Ordinarily, the right to allocate CPM is restricted
1636 * to privileged applications (those that can gain access
1637 * to the host priv port). Set this variable to zero if
1638 * you want to let any application allocate CPM.
1639 */
1640 unsigned int vm_allocate_cpm_privileged = 0;
1641
1642 /*
1643 * Allocate memory in the specified map, with the caveat that
1644 * the memory is physically contiguous. This call may fail
1645 * if the system can't find sufficient contiguous memory.
1646 * This call may cause or lead to heart-stopping amounts of
1647 * paging activity.
1648 *
1649 * Memory obtained from this call should be freed in the
1650 * normal way, viz., via vm_deallocate.
1651 */
1652 kern_return_t
1653 vm_allocate_cpm(
1654 host_priv_t host_priv,
1655 vm_map_t map,
1656 vm_address_t *addr,
1657 vm_size_t size,
1658 int flags)
1659 {
1660 vm_map_address_t map_addr;
1661 vm_map_size_t map_size;
1662 kern_return_t kr;
1663
1664 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
1665 return KERN_INVALID_HOST;
1666
1667 if (VM_MAP_NULL == map)
1668 return KERN_INVALID_ARGUMENT;
1669
1670 map_addr = (vm_map_address_t)*addr;
1671 map_size = (vm_map_size_t)size;
1672
1673 kr = vm_map_enter_cpm(map,
1674 &map_addr,
1675 map_size,
1676 flags);
1677
1678 *addr = CAST_DOWN(vm_address_t, map_addr);
1679 return kr;
1680 }
1681
1682
1683 kern_return_t
1684 mach_vm_page_query(
1685 vm_map_t map,
1686 mach_vm_offset_t offset,
1687 int *disposition,
1688 int *ref_count)
1689 {
1690 if (VM_MAP_NULL == map)
1691 return KERN_INVALID_ARGUMENT;
1692
1693 return vm_map_page_query_internal(map,
1694 vm_map_trunc_page(offset),
1695 disposition, ref_count);
1696 }
1697
1698 kern_return_t
1699 vm_map_page_query(
1700 vm_map_t map,
1701 vm_offset_t offset,
1702 int *disposition,
1703 int *ref_count)
1704 {
1705 if (VM_MAP_NULL == map)
1706 return KERN_INVALID_ARGUMENT;
1707
1708 return vm_map_page_query_internal(map,
1709 vm_map_trunc_page(offset),
1710 disposition, ref_count);
1711 }
1712
1713 kern_return_t
1714 mach_vm_page_info(
1715 vm_map_t map,
1716 mach_vm_address_t address,
1717 vm_page_info_flavor_t flavor,
1718 vm_page_info_t info,
1719 mach_msg_type_number_t *count)
1720 {
1721 kern_return_t kr;
1722
1723 if (map == VM_MAP_NULL) {
1724 return KERN_INVALID_ARGUMENT;
1725 }
1726
1727 kr = vm_map_page_info(map, address, flavor, info, count);
1728 return kr;
1729 }
1730
1731 /* map a (whole) upl into an address space */
1732 kern_return_t
1733 vm_upl_map(
1734 vm_map_t map,
1735 upl_t upl,
1736 vm_address_t *dst_addr)
1737 {
1738 vm_map_offset_t map_addr;
1739 kern_return_t kr;
1740
1741 if (VM_MAP_NULL == map)
1742 return KERN_INVALID_ARGUMENT;
1743
1744 kr = vm_map_enter_upl(map, upl, &map_addr);
1745 *dst_addr = CAST_DOWN(vm_address_t, map_addr);
1746 return kr;
1747 }
1748
1749 kern_return_t
1750 vm_upl_unmap(
1751 vm_map_t map,
1752 upl_t upl)
1753 {
1754 if (VM_MAP_NULL == map)
1755 return KERN_INVALID_ARGUMENT;
1756
1757 return (vm_map_remove_upl(map, upl));
1758 }
1759
1760 /* Retrieve a upl for an object underlying an address range in a map */
1761
1762 kern_return_t
1763 vm_map_get_upl(
1764 vm_map_t map,
1765 vm_map_offset_t map_offset,
1766 upl_size_t *upl_size,
1767 upl_t *upl,
1768 upl_page_info_array_t page_list,
1769 unsigned int *count,
1770 int *flags,
1771 int force_data_sync)
1772 {
1773 int map_flags;
1774 kern_return_t kr;
1775
1776 if (VM_MAP_NULL == map)
1777 return KERN_INVALID_ARGUMENT;
1778
1779 map_flags = *flags & ~UPL_NOZEROFILL;
1780 if (force_data_sync)
1781 map_flags |= UPL_FORCE_DATA_SYNC;
1782
1783 kr = vm_map_create_upl(map,
1784 map_offset,
1785 upl_size,
1786 upl,
1787 page_list,
1788 count,
1789 &map_flags);
1790
1791 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
1792 return kr;
1793 }
1794
1795 /*
1796 * mach_make_memory_entry_64
1797 *
1798 * Think of it as a two-stage vm_remap() operation. First
1799 * you get a handle. Second, you get map that handle in
1800 * somewhere else. Rather than doing it all at once (and
1801 * without needing access to the other whole map).
1802 */
1803
1804 kern_return_t
1805 mach_make_memory_entry_64(
1806 vm_map_t target_map,
1807 memory_object_size_t *size,
1808 memory_object_offset_t offset,
1809 vm_prot_t permission,
1810 ipc_port_t *object_handle,
1811 ipc_port_t parent_handle)
1812 {
1813 vm_map_version_t version;
1814 vm_named_entry_t parent_entry;
1815 vm_named_entry_t user_entry;
1816 ipc_port_t user_handle;
1817 kern_return_t kr;
1818 vm_map_t real_map;
1819
1820 /* needed for call to vm_map_lookup_locked */
1821 boolean_t wired;
1822 vm_object_offset_t obj_off;
1823 vm_prot_t prot;
1824 struct vm_object_fault_info fault_info;
1825 vm_object_t object;
1826 vm_object_t shadow_object;
1827
1828 /* needed for direct map entry manipulation */
1829 vm_map_entry_t map_entry;
1830 vm_map_entry_t next_entry;
1831 vm_map_t local_map;
1832 vm_map_t original_map = target_map;
1833 vm_map_size_t total_size;
1834 vm_map_size_t map_size;
1835 vm_map_offset_t map_offset;
1836 vm_map_offset_t local_offset;
1837 vm_object_size_t mappable_size;
1838
1839 unsigned int access;
1840 vm_prot_t protections;
1841 vm_prot_t original_protections, mask_protections;
1842 unsigned int wimg_mode;
1843
1844 boolean_t force_shadow = FALSE;
1845
1846 if (((permission & 0x00FF0000) &
1847 ~(MAP_MEM_ONLY |
1848 MAP_MEM_NAMED_CREATE |
1849 MAP_MEM_PURGABLE |
1850 MAP_MEM_NAMED_REUSE))) {
1851 /*
1852 * Unknown flag: reject for forward compatibility.
1853 */
1854 return KERN_INVALID_VALUE;
1855 }
1856
1857 if (parent_handle != IP_NULL &&
1858 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
1859 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
1860 } else {
1861 parent_entry = NULL;
1862 }
1863
1864 original_protections = permission & VM_PROT_ALL;
1865 protections = original_protections;
1866 mask_protections = permission & VM_PROT_IS_MASK;
1867 access = GET_MAP_MEM(permission);
1868
1869 user_handle = IP_NULL;
1870 user_entry = NULL;
1871
1872 map_offset = vm_map_trunc_page(offset);
1873 map_size = vm_map_round_page(*size);
1874
1875 if (permission & MAP_MEM_ONLY) {
1876 boolean_t parent_is_object;
1877
1878 if (parent_entry == NULL) {
1879 return KERN_INVALID_ARGUMENT;
1880 }
1881
1882 parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager);
1883 object = parent_entry->backing.object;
1884 if(parent_is_object && object != VM_OBJECT_NULL)
1885 wimg_mode = object->wimg_bits;
1886 else
1887 wimg_mode = VM_WIMG_USE_DEFAULT;
1888 if((access != GET_MAP_MEM(parent_entry->protection)) &&
1889 !(parent_entry->protection & VM_PROT_WRITE)) {
1890 return KERN_INVALID_RIGHT;
1891 }
1892 if(access == MAP_MEM_IO) {
1893 SET_MAP_MEM(access, parent_entry->protection);
1894 wimg_mode = VM_WIMG_IO;
1895 } else if (access == MAP_MEM_COPYBACK) {
1896 SET_MAP_MEM(access, parent_entry->protection);
1897 wimg_mode = VM_WIMG_USE_DEFAULT;
1898 } else if (access == MAP_MEM_INNERWBACK) {
1899 SET_MAP_MEM(access, parent_entry->protection);
1900 wimg_mode = VM_WIMG_INNERWBACK;
1901 } else if (access == MAP_MEM_WTHRU) {
1902 SET_MAP_MEM(access, parent_entry->protection);
1903 wimg_mode = VM_WIMG_WTHRU;
1904 } else if (access == MAP_MEM_WCOMB) {
1905 SET_MAP_MEM(access, parent_entry->protection);
1906 wimg_mode = VM_WIMG_WCOMB;
1907 }
1908 if (parent_is_object && object &&
1909 (access != MAP_MEM_NOOP) &&
1910 (!(object->nophyscache))) {
1911
1912 if (object->wimg_bits != wimg_mode) {
1913 vm_object_lock(object);
1914 vm_object_change_wimg_mode(object, wimg_mode);
1915 vm_object_unlock(object);
1916 }
1917 }
1918 if (object_handle)
1919 *object_handle = IP_NULL;
1920 return KERN_SUCCESS;
1921 }
1922
1923 if(permission & MAP_MEM_NAMED_CREATE) {
1924 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
1925 if (kr != KERN_SUCCESS) {
1926 return KERN_FAILURE;
1927 }
1928
1929 /*
1930 * Force the creation of the VM object now.
1931 */
1932 if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
1933 /*
1934 * LP64todo - for now, we can only allocate 4GB-4096
1935 * internal objects because the default pager can't
1936 * page bigger ones. Remove this when it can.
1937 */
1938 kr = KERN_FAILURE;
1939 goto make_mem_done;
1940 }
1941
1942 object = vm_object_allocate(map_size);
1943 assert(object != VM_OBJECT_NULL);
1944
1945 if (permission & MAP_MEM_PURGABLE) {
1946 if (! (permission & VM_PROT_WRITE)) {
1947 /* if we can't write, we can't purge */
1948 vm_object_deallocate(object);
1949 kr = KERN_INVALID_ARGUMENT;
1950 goto make_mem_done;
1951 }
1952 object->purgable = VM_PURGABLE_NONVOLATILE;
1953 }
1954
1955 /*
1956 * The VM object is brand new and nobody else knows about it,
1957 * so we don't need to lock it.
1958 */
1959
1960 wimg_mode = object->wimg_bits;
1961 if (access == MAP_MEM_IO) {
1962 wimg_mode = VM_WIMG_IO;
1963 } else if (access == MAP_MEM_COPYBACK) {
1964 wimg_mode = VM_WIMG_USE_DEFAULT;
1965 } else if (access == MAP_MEM_INNERWBACK) {
1966 wimg_mode = VM_WIMG_INNERWBACK;
1967 } else if (access == MAP_MEM_WTHRU) {
1968 wimg_mode = VM_WIMG_WTHRU;
1969 } else if (access == MAP_MEM_WCOMB) {
1970 wimg_mode = VM_WIMG_WCOMB;
1971 }
1972 if (access != MAP_MEM_NOOP) {
1973 object->wimg_bits = wimg_mode;
1974 }
1975 /* the object has no pages, so no WIMG bits to update here */
1976
1977 /*
1978 * XXX
1979 * We use this path when we want to make sure that
1980 * nobody messes with the object (coalesce, for
1981 * example) before we map it.
1982 * We might want to use these objects for transposition via
1983 * vm_object_transpose() too, so we don't want any copy or
1984 * shadow objects either...
1985 */
1986 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1987
1988 user_entry->backing.object = object;
1989 user_entry->internal = TRUE;
1990 user_entry->is_sub_map = FALSE;
1991 user_entry->is_pager = FALSE;
1992 user_entry->offset = 0;
1993 user_entry->protection = protections;
1994 SET_MAP_MEM(access, user_entry->protection);
1995 user_entry->size = map_size;
1996
1997 /* user_object pager and internal fields are not used */
1998 /* when the object field is filled in. */
1999
2000 *size = CAST_DOWN(vm_size_t, map_size);
2001 *object_handle = user_handle;
2002 return KERN_SUCCESS;
2003 }
2004
2005 if (parent_entry == NULL ||
2006 (permission & MAP_MEM_NAMED_REUSE)) {
2007
2008 /* Create a named object based on address range within the task map */
2009 /* Go find the object at given address */
2010
2011 if (target_map == VM_MAP_NULL) {
2012 return KERN_INVALID_TASK;
2013 }
2014
2015 redo_lookup:
2016 protections = original_protections;
2017 vm_map_lock_read(target_map);
2018
2019 /* get the object associated with the target address */
2020 /* note we check the permission of the range against */
2021 /* that requested by the caller */
2022
2023 kr = vm_map_lookup_locked(&target_map, map_offset,
2024 protections | mask_protections,
2025 OBJECT_LOCK_EXCLUSIVE, &version,
2026 &object, &obj_off, &prot, &wired,
2027 &fault_info,
2028 &real_map);
2029 if (kr != KERN_SUCCESS) {
2030 vm_map_unlock_read(target_map);
2031 goto make_mem_done;
2032 }
2033 if (mask_protections) {
2034 /*
2035 * The caller asked us to use the "protections" as
2036 * a mask, so restrict "protections" to what this
2037 * mapping actually allows.
2038 */
2039 protections &= prot;
2040 }
2041 if (((prot & protections) != protections)
2042 || (object == kernel_object)) {
2043 kr = KERN_INVALID_RIGHT;
2044 vm_object_unlock(object);
2045 vm_map_unlock_read(target_map);
2046 if(real_map != target_map)
2047 vm_map_unlock_read(real_map);
2048 if(object == kernel_object) {
2049 printf("Warning: Attempt to create a named"
2050 " entry from the kernel_object\n");
2051 }
2052 goto make_mem_done;
2053 }
2054
2055 /* We have an object, now check to see if this object */
2056 /* is suitable. If not, create a shadow and share that */
2057
2058 /*
2059 * We have to unlock the VM object to avoid deadlocking with
2060 * a VM map lock (the lock ordering is map, the object), if we
2061 * need to modify the VM map to create a shadow object. Since
2062 * we might release the VM map lock below anyway, we have
2063 * to release the VM map lock now.
2064 * XXX FBDP There must be a way to avoid this double lookup...
2065 *
2066 * Take an extra reference on the VM object to make sure it's
2067 * not going to disappear.
2068 */
2069 vm_object_reference_locked(object); /* extra ref to hold obj */
2070 vm_object_unlock(object);
2071
2072 local_map = original_map;
2073 local_offset = map_offset;
2074 if(target_map != local_map) {
2075 vm_map_unlock_read(target_map);
2076 if(real_map != target_map)
2077 vm_map_unlock_read(real_map);
2078 vm_map_lock_read(local_map);
2079 target_map = local_map;
2080 real_map = local_map;
2081 }
2082 while(TRUE) {
2083 if(!vm_map_lookup_entry(local_map,
2084 local_offset, &map_entry)) {
2085 kr = KERN_INVALID_ARGUMENT;
2086 vm_map_unlock_read(target_map);
2087 if(real_map != target_map)
2088 vm_map_unlock_read(real_map);
2089 vm_object_deallocate(object); /* release extra ref */
2090 object = VM_OBJECT_NULL;
2091 goto make_mem_done;
2092 }
2093 if(!(map_entry->is_sub_map)) {
2094 if(map_entry->object.vm_object != object) {
2095 kr = KERN_INVALID_ARGUMENT;
2096 vm_map_unlock_read(target_map);
2097 if(real_map != target_map)
2098 vm_map_unlock_read(real_map);
2099 vm_object_deallocate(object); /* release extra ref */
2100 object = VM_OBJECT_NULL;
2101 goto make_mem_done;
2102 }
2103 break;
2104 } else {
2105 vm_map_t tmap;
2106 tmap = local_map;
2107 local_map = map_entry->object.sub_map;
2108
2109 vm_map_lock_read(local_map);
2110 vm_map_unlock_read(tmap);
2111 target_map = local_map;
2112 real_map = local_map;
2113 local_offset = local_offset - map_entry->vme_start;
2114 local_offset += map_entry->offset;
2115 }
2116 }
2117
2118 /*
2119 * We found the VM map entry, lock the VM object again.
2120 */
2121 vm_object_lock(object);
2122 if(map_entry->wired_count) {
2123 /* JMM - The check below should be reworked instead. */
2124 object->true_share = TRUE;
2125 }
2126 if (mask_protections) {
2127 /*
2128 * The caller asked us to use the "protections" as
2129 * a mask, so restrict "protections" to what this
2130 * mapping actually allows.
2131 */
2132 protections &= map_entry->max_protection;
2133 }
2134 if(((map_entry->max_protection) & protections) != protections) {
2135 kr = KERN_INVALID_RIGHT;
2136 vm_object_unlock(object);
2137 vm_map_unlock_read(target_map);
2138 if(real_map != target_map)
2139 vm_map_unlock_read(real_map);
2140 vm_object_deallocate(object);
2141 object = VM_OBJECT_NULL;
2142 goto make_mem_done;
2143 }
2144
2145 mappable_size = fault_info.hi_offset - obj_off;
2146 total_size = map_entry->vme_end - map_entry->vme_start;
2147 if(map_size > mappable_size) {
2148 /* try to extend mappable size if the entries */
2149 /* following are from the same object and are */
2150 /* compatible */
2151 next_entry = map_entry->vme_next;
2152 /* lets see if the next map entry is still */
2153 /* pointing at this object and is contiguous */
2154 while(map_size > mappable_size) {
2155 if((next_entry->object.vm_object == object) &&
2156 (next_entry->vme_start ==
2157 next_entry->vme_prev->vme_end) &&
2158 (next_entry->offset ==
2159 next_entry->vme_prev->offset +
2160 (next_entry->vme_prev->vme_end -
2161 next_entry->vme_prev->vme_start))) {
2162 if (mask_protections) {
2163 /*
2164 * The caller asked us to use
2165 * the "protections" as a mask,
2166 * so restrict "protections" to
2167 * what this mapping actually
2168 * allows.
2169 */
2170 protections &= next_entry->max_protection;
2171 }
2172 if ((next_entry->wired_count) &&
2173 (map_entry->wired_count == 0)) {
2174 break;
2175 }
2176 if(((next_entry->max_protection)
2177 & protections) != protections) {
2178 break;
2179 }
2180 if (next_entry->needs_copy !=
2181 map_entry->needs_copy)
2182 break;
2183 mappable_size += next_entry->vme_end
2184 - next_entry->vme_start;
2185 total_size += next_entry->vme_end
2186 - next_entry->vme_start;
2187 next_entry = next_entry->vme_next;
2188 } else {
2189 break;
2190 }
2191
2192 }
2193 }
2194
2195 #if !CONFIG_EMBEDDED
2196 if (vm_map_entry_should_cow_for_true_share(map_entry) &&
2197 object->vo_size > map_size &&
2198 map_size != 0) {
2199 /*
2200 * Set up the targeted range for copy-on-write to
2201 * limit the impact of "true_share"/"copy_delay" to
2202 * that range instead of the entire VM object...
2203 */
2204
2205 vm_object_unlock(object);
2206 if (vm_map_lock_read_to_write(target_map)) {
2207 vm_object_deallocate(object);
2208 target_map = original_map;
2209 goto redo_lookup;
2210 }
2211
2212 vm_map_clip_start(target_map, map_entry, vm_map_trunc_page(offset));
2213 vm_map_clip_end(target_map, map_entry, vm_map_round_page(offset) + map_size);
2214 force_shadow = TRUE;
2215
2216 map_size = map_entry->vme_end - map_entry->vme_start;
2217 total_size = map_size;
2218
2219 vm_map_lock_write_to_read(target_map);
2220 vm_object_lock(object);
2221 }
2222 #endif /* !CONFIG_EMBEDDED */
2223
2224 if(object->internal) {
2225 /* vm_map_lookup_locked will create a shadow if */
2226 /* needs_copy is set but does not check for the */
2227 /* other two conditions shown. It is important to */
2228 /* set up an object which will not be pulled from */
2229 /* under us. */
2230
2231 if (force_shadow ||
2232 ((map_entry->needs_copy ||
2233 object->shadowed ||
2234 (object->vo_size > total_size)) &&
2235 !object->true_share)) {
2236 /*
2237 * We have to unlock the VM object before
2238 * trying to upgrade the VM map lock, to
2239 * honor lock ordering (map then object).
2240 * Otherwise, we would deadlock if another
2241 * thread holds a read lock on the VM map and
2242 * is trying to acquire the VM object's lock.
2243 * We still hold an extra reference on the
2244 * VM object, guaranteeing that it won't
2245 * disappear.
2246 */
2247 vm_object_unlock(object);
2248
2249 if (vm_map_lock_read_to_write(target_map)) {
2250 /*
2251 * We couldn't upgrade our VM map lock
2252 * from "read" to "write" and we lost
2253 * our "read" lock.
2254 * Start all over again...
2255 */
2256 vm_object_deallocate(object); /* extra ref */
2257 target_map = original_map;
2258 goto redo_lookup;
2259 }
2260 vm_object_lock(object);
2261
2262 /*
2263 * JMM - We need to avoid coming here when the object
2264 * is wired by anybody, not just the current map. Why
2265 * couldn't we use the standard vm_object_copy_quickly()
2266 * approach here?
2267 */
2268
2269 /* create a shadow object */
2270 vm_object_shadow(&map_entry->object.vm_object,
2271 &map_entry->offset, total_size);
2272 shadow_object = map_entry->object.vm_object;
2273 vm_object_unlock(object);
2274
2275 prot = map_entry->protection & ~VM_PROT_WRITE;
2276
2277 if (override_nx(target_map, map_entry->alias) && prot)
2278 prot |= VM_PROT_EXECUTE;
2279
2280 vm_object_pmap_protect(
2281 object, map_entry->offset,
2282 total_size,
2283 ((map_entry->is_shared
2284 || target_map->mapped_in_other_pmaps)
2285 ? PMAP_NULL :
2286 target_map->pmap),
2287 map_entry->vme_start,
2288 prot);
2289 total_size -= (map_entry->vme_end
2290 - map_entry->vme_start);
2291 next_entry = map_entry->vme_next;
2292 map_entry->needs_copy = FALSE;
2293
2294 vm_object_lock(shadow_object);
2295 while (total_size) {
2296 assert((next_entry->wired_count == 0) ||
2297 (map_entry->wired_count));
2298
2299 if(next_entry->object.vm_object == object) {
2300 vm_object_reference_locked(shadow_object);
2301 next_entry->object.vm_object
2302 = shadow_object;
2303 vm_object_deallocate(object);
2304 next_entry->offset
2305 = next_entry->vme_prev->offset +
2306 (next_entry->vme_prev->vme_end
2307 - next_entry->vme_prev->vme_start);
2308 next_entry->needs_copy = FALSE;
2309 } else {
2310 panic("mach_make_memory_entry_64:"
2311 " map entries out of sync\n");
2312 }
2313 total_size -=
2314 next_entry->vme_end
2315 - next_entry->vme_start;
2316 next_entry = next_entry->vme_next;
2317 }
2318
2319 /*
2320 * Transfer our extra reference to the
2321 * shadow object.
2322 */
2323 vm_object_reference_locked(shadow_object);
2324 vm_object_deallocate(object); /* extra ref */
2325 object = shadow_object;
2326
2327 obj_off = (local_offset - map_entry->vme_start)
2328 + map_entry->offset;
2329
2330 vm_map_lock_write_to_read(target_map);
2331 }
2332 }
2333
2334 /* note: in the future we can (if necessary) allow for */
2335 /* memory object lists, this will better support */
2336 /* fragmentation, but is it necessary? The user should */
2337 /* be encouraged to create address space oriented */
2338 /* shared objects from CLEAN memory regions which have */
2339 /* a known and defined history. i.e. no inheritence */
2340 /* share, make this call before making the region the */
2341 /* target of ipc's, etc. The code above, protecting */
2342 /* against delayed copy, etc. is mostly defensive. */
2343
2344 wimg_mode = object->wimg_bits;
2345 if(!(object->nophyscache)) {
2346 if(access == MAP_MEM_IO) {
2347 wimg_mode = VM_WIMG_IO;
2348 } else if (access == MAP_MEM_COPYBACK) {
2349 wimg_mode = VM_WIMG_USE_DEFAULT;
2350 } else if (access == MAP_MEM_INNERWBACK) {
2351 wimg_mode = VM_WIMG_INNERWBACK;
2352 } else if (access == MAP_MEM_WTHRU) {
2353 wimg_mode = VM_WIMG_WTHRU;
2354 } else if (access == MAP_MEM_WCOMB) {
2355 wimg_mode = VM_WIMG_WCOMB;
2356 }
2357 }
2358
2359 object->true_share = TRUE;
2360 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2361 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2362
2363 /*
2364 * The memory entry now points to this VM object and we
2365 * need to hold a reference on the VM object. Use the extra
2366 * reference we took earlier to keep the object alive when we
2367 * had to unlock it.
2368 */
2369
2370 vm_map_unlock_read(target_map);
2371 if(real_map != target_map)
2372 vm_map_unlock_read(real_map);
2373
2374 if (object->wimg_bits != wimg_mode)
2375 vm_object_change_wimg_mode(object, wimg_mode);
2376
2377 /* the size of mapped entry that overlaps with our region */
2378 /* which is targeted for share. */
2379 /* (entry_end - entry_start) - */
2380 /* offset of our beg addr within entry */
2381 /* it corresponds to this: */
2382
2383 if(map_size > mappable_size)
2384 map_size = mappable_size;
2385
2386 if (permission & MAP_MEM_NAMED_REUSE) {
2387 /*
2388 * Compare what we got with the "parent_entry".
2389 * If they match, re-use the "parent_entry" instead
2390 * of creating a new one.
2391 */
2392 if (parent_entry != NULL &&
2393 parent_entry->backing.object == object &&
2394 parent_entry->internal == object->internal &&
2395 parent_entry->is_sub_map == FALSE &&
2396 parent_entry->is_pager == FALSE &&
2397 parent_entry->offset == obj_off &&
2398 parent_entry->protection == protections &&
2399 parent_entry->size == map_size) {
2400 /*
2401 * We have a match: re-use "parent_entry".
2402 */
2403 /* release our extra reference on object */
2404 vm_object_unlock(object);
2405 vm_object_deallocate(object);
2406 /* parent_entry->ref_count++; XXX ? */
2407 /* Get an extra send-right on handle */
2408 ipc_port_copy_send(parent_handle);
2409 *object_handle = parent_handle;
2410 return KERN_SUCCESS;
2411 } else {
2412 /*
2413 * No match: we need to create a new entry.
2414 * fall through...
2415 */
2416 }
2417 }
2418
2419 vm_object_unlock(object);
2420 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2421 != KERN_SUCCESS) {
2422 /* release our unused reference on the object */
2423 vm_object_deallocate(object);
2424 return KERN_FAILURE;
2425 }
2426
2427 user_entry->backing.object = object;
2428 user_entry->internal = object->internal;
2429 user_entry->is_sub_map = FALSE;
2430 user_entry->is_pager = FALSE;
2431 user_entry->offset = obj_off;
2432 user_entry->protection = protections;
2433 SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
2434 user_entry->size = map_size;
2435
2436 /* user_object pager and internal fields are not used */
2437 /* when the object field is filled in. */
2438
2439 *size = CAST_DOWN(vm_size_t, map_size);
2440 *object_handle = user_handle;
2441 return KERN_SUCCESS;
2442
2443 } else {
2444 /* The new object will be base on an existing named object */
2445
2446 if (parent_entry == NULL) {
2447 kr = KERN_INVALID_ARGUMENT;
2448 goto make_mem_done;
2449 }
2450 if((offset + map_size) > parent_entry->size) {
2451 kr = KERN_INVALID_ARGUMENT;
2452 goto make_mem_done;
2453 }
2454
2455 if (mask_protections) {
2456 /*
2457 * The caller asked us to use the "protections" as
2458 * a mask, so restrict "protections" to what this
2459 * mapping actually allows.
2460 */
2461 protections &= parent_entry->protection;
2462 }
2463 if((protections & parent_entry->protection) != protections) {
2464 kr = KERN_PROTECTION_FAILURE;
2465 goto make_mem_done;
2466 }
2467
2468 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2469 != KERN_SUCCESS) {
2470 kr = KERN_FAILURE;
2471 goto make_mem_done;
2472 }
2473
2474 user_entry->size = map_size;
2475 user_entry->offset = parent_entry->offset + map_offset;
2476 user_entry->is_sub_map = parent_entry->is_sub_map;
2477 user_entry->is_pager = parent_entry->is_pager;
2478 user_entry->internal = parent_entry->internal;
2479 user_entry->protection = protections;
2480
2481 if(access != MAP_MEM_NOOP) {
2482 SET_MAP_MEM(access, user_entry->protection);
2483 }
2484
2485 if(parent_entry->is_sub_map) {
2486 user_entry->backing.map = parent_entry->backing.map;
2487 vm_map_lock(user_entry->backing.map);
2488 user_entry->backing.map->ref_count++;
2489 vm_map_unlock(user_entry->backing.map);
2490 }
2491 else if (parent_entry->is_pager) {
2492 user_entry->backing.pager = parent_entry->backing.pager;
2493 /* JMM - don't we need a reference here? */
2494 } else {
2495 object = parent_entry->backing.object;
2496 assert(object != VM_OBJECT_NULL);
2497 user_entry->backing.object = object;
2498 /* we now point to this object, hold on */
2499 vm_object_reference(object);
2500 vm_object_lock(object);
2501 object->true_share = TRUE;
2502 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2503 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2504 vm_object_unlock(object);
2505 }
2506 *size = CAST_DOWN(vm_size_t, map_size);
2507 *object_handle = user_handle;
2508 return KERN_SUCCESS;
2509 }
2510
2511 make_mem_done:
2512 if (user_handle != IP_NULL) {
2513 /*
2514 * Releasing "user_handle" causes the kernel object
2515 * associated with it ("user_entry" here) to also be
2516 * released and freed.
2517 */
2518 mach_memory_entry_port_release(user_handle);
2519 }
2520 return kr;
2521 }
2522
2523 kern_return_t
2524 _mach_make_memory_entry(
2525 vm_map_t target_map,
2526 memory_object_size_t *size,
2527 memory_object_offset_t offset,
2528 vm_prot_t permission,
2529 ipc_port_t *object_handle,
2530 ipc_port_t parent_entry)
2531 {
2532 memory_object_size_t mo_size;
2533 kern_return_t kr;
2534
2535 mo_size = (memory_object_size_t)*size;
2536 kr = mach_make_memory_entry_64(target_map, &mo_size,
2537 (memory_object_offset_t)offset, permission, object_handle,
2538 parent_entry);
2539 *size = mo_size;
2540 return kr;
2541 }
2542
2543 kern_return_t
2544 mach_make_memory_entry(
2545 vm_map_t target_map,
2546 vm_size_t *size,
2547 vm_offset_t offset,
2548 vm_prot_t permission,
2549 ipc_port_t *object_handle,
2550 ipc_port_t parent_entry)
2551 {
2552 memory_object_size_t mo_size;
2553 kern_return_t kr;
2554
2555 mo_size = (memory_object_size_t)*size;
2556 kr = mach_make_memory_entry_64(target_map, &mo_size,
2557 (memory_object_offset_t)offset, permission, object_handle,
2558 parent_entry);
2559 *size = CAST_DOWN(vm_size_t, mo_size);
2560 return kr;
2561 }
2562
2563 /*
2564 * task_wire
2565 *
2566 * Set or clear the map's wiring_required flag. This flag, if set,
2567 * will cause all future virtual memory allocation to allocate
2568 * user wired memory. Unwiring pages wired down as a result of
2569 * this routine is done with the vm_wire interface.
2570 */
2571 kern_return_t
2572 task_wire(
2573 vm_map_t map,
2574 boolean_t must_wire)
2575 {
2576 if (map == VM_MAP_NULL)
2577 return(KERN_INVALID_ARGUMENT);
2578
2579 if (must_wire)
2580 map->wiring_required = TRUE;
2581 else
2582 map->wiring_required = FALSE;
2583
2584 return(KERN_SUCCESS);
2585 }
2586
2587 __private_extern__ kern_return_t
2588 mach_memory_entry_allocate(
2589 vm_named_entry_t *user_entry_p,
2590 ipc_port_t *user_handle_p)
2591 {
2592 vm_named_entry_t user_entry;
2593 ipc_port_t user_handle;
2594 ipc_port_t previous;
2595
2596 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
2597 if (user_entry == NULL)
2598 return KERN_FAILURE;
2599
2600 named_entry_lock_init(user_entry);
2601
2602 user_handle = ipc_port_alloc_kernel();
2603 if (user_handle == IP_NULL) {
2604 kfree(user_entry, sizeof *user_entry);
2605 return KERN_FAILURE;
2606 }
2607 ip_lock(user_handle);
2608
2609 /* make a sonce right */
2610 user_handle->ip_sorights++;
2611 ip_reference(user_handle);
2612
2613 user_handle->ip_destination = IP_NULL;
2614 user_handle->ip_receiver_name = MACH_PORT_NULL;
2615 user_handle->ip_receiver = ipc_space_kernel;
2616
2617 /* make a send right */
2618 user_handle->ip_mscount++;
2619 user_handle->ip_srights++;
2620 ip_reference(user_handle);
2621
2622 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
2623 /* nsrequest unlocks user_handle */
2624
2625 user_entry->backing.pager = NULL;
2626 user_entry->is_sub_map = FALSE;
2627 user_entry->is_pager = FALSE;
2628 user_entry->internal = FALSE;
2629 user_entry->size = 0;
2630 user_entry->offset = 0;
2631 user_entry->protection = VM_PROT_NONE;
2632 user_entry->ref_count = 1;
2633
2634 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
2635 IKOT_NAMED_ENTRY);
2636
2637 *user_entry_p = user_entry;
2638 *user_handle_p = user_handle;
2639
2640 return KERN_SUCCESS;
2641 }
2642
2643 /*
2644 * mach_memory_object_memory_entry_64
2645 *
2646 * Create a named entry backed by the provided pager.
2647 *
2648 * JMM - we need to hold a reference on the pager -
2649 * and release it when the named entry is destroyed.
2650 */
2651 kern_return_t
2652 mach_memory_object_memory_entry_64(
2653 host_t host,
2654 boolean_t internal,
2655 vm_object_offset_t size,
2656 vm_prot_t permission,
2657 memory_object_t pager,
2658 ipc_port_t *entry_handle)
2659 {
2660 unsigned int access;
2661 vm_named_entry_t user_entry;
2662 ipc_port_t user_handle;
2663
2664 if (host == HOST_NULL)
2665 return(KERN_INVALID_HOST);
2666
2667 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2668 != KERN_SUCCESS) {
2669 return KERN_FAILURE;
2670 }
2671
2672 user_entry->backing.pager = pager;
2673 user_entry->size = size;
2674 user_entry->offset = 0;
2675 user_entry->protection = permission & VM_PROT_ALL;
2676 access = GET_MAP_MEM(permission);
2677 SET_MAP_MEM(access, user_entry->protection);
2678 user_entry->internal = internal;
2679 user_entry->is_sub_map = FALSE;
2680 user_entry->is_pager = TRUE;
2681 assert(user_entry->ref_count == 1);
2682
2683 *entry_handle = user_handle;
2684 return KERN_SUCCESS;
2685 }
2686
2687 kern_return_t
2688 mach_memory_object_memory_entry(
2689 host_t host,
2690 boolean_t internal,
2691 vm_size_t size,
2692 vm_prot_t permission,
2693 memory_object_t pager,
2694 ipc_port_t *entry_handle)
2695 {
2696 return mach_memory_object_memory_entry_64( host, internal,
2697 (vm_object_offset_t)size, permission, pager, entry_handle);
2698 }
2699
2700
2701 kern_return_t
2702 mach_memory_entry_purgable_control(
2703 ipc_port_t entry_port,
2704 vm_purgable_t control,
2705 int *state)
2706 {
2707 kern_return_t kr;
2708 vm_named_entry_t mem_entry;
2709 vm_object_t object;
2710
2711 if (entry_port == IP_NULL ||
2712 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2713 return KERN_INVALID_ARGUMENT;
2714 }
2715 if (control != VM_PURGABLE_SET_STATE &&
2716 control != VM_PURGABLE_GET_STATE)
2717 return(KERN_INVALID_ARGUMENT);
2718
2719 if (control == VM_PURGABLE_SET_STATE &&
2720 (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
2721 ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
2722 return(KERN_INVALID_ARGUMENT);
2723
2724 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2725
2726 named_entry_lock(mem_entry);
2727
2728 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2729 named_entry_unlock(mem_entry);
2730 return KERN_INVALID_ARGUMENT;
2731 }
2732
2733 object = mem_entry->backing.object;
2734 if (object == VM_OBJECT_NULL) {
2735 named_entry_unlock(mem_entry);
2736 return KERN_INVALID_ARGUMENT;
2737 }
2738
2739 vm_object_lock(object);
2740
2741 /* check that named entry covers entire object ? */
2742 if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
2743 vm_object_unlock(object);
2744 named_entry_unlock(mem_entry);
2745 return KERN_INVALID_ARGUMENT;
2746 }
2747
2748 named_entry_unlock(mem_entry);
2749
2750 kr = vm_object_purgable_control(object, control, state);
2751
2752 vm_object_unlock(object);
2753
2754 return kr;
2755 }
2756
2757 /*
2758 * mach_memory_entry_port_release:
2759 *
2760 * Release a send right on a named entry port. This is the correct
2761 * way to destroy a named entry. When the last right on the port is
2762 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2763 */
2764 void
2765 mach_memory_entry_port_release(
2766 ipc_port_t port)
2767 {
2768 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2769 ipc_port_release_send(port);
2770 }
2771
2772 /*
2773 * mach_destroy_memory_entry:
2774 *
2775 * Drops a reference on a memory entry and destroys the memory entry if
2776 * there are no more references on it.
2777 * NOTE: This routine should not be called to destroy a memory entry from the
2778 * kernel, as it will not release the Mach port associated with the memory
2779 * entry. The proper way to destroy a memory entry in the kernel is to
2780 * call mach_memort_entry_port_release() to release the kernel's send-right on
2781 * the memory entry's port. When the last send right is released, the memory
2782 * entry will be destroyed via ipc_kobject_destroy().
2783 */
2784 void
2785 mach_destroy_memory_entry(
2786 ipc_port_t port)
2787 {
2788 vm_named_entry_t named_entry;
2789 #if MACH_ASSERT
2790 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2791 #endif /* MACH_ASSERT */
2792 named_entry = (vm_named_entry_t)port->ip_kobject;
2793
2794 named_entry_lock(named_entry);
2795 named_entry->ref_count -= 1;
2796
2797 if(named_entry->ref_count == 0) {
2798 if (named_entry->is_sub_map) {
2799 vm_map_deallocate(named_entry->backing.map);
2800 } else if (!named_entry->is_pager) {
2801 /* release the memory object we've been pointing to */
2802 vm_object_deallocate(named_entry->backing.object);
2803 } /* else JMM - need to drop reference on pager in that case */
2804
2805 named_entry_unlock(named_entry);
2806 named_entry_lock_destroy(named_entry);
2807
2808 kfree((void *) port->ip_kobject,
2809 sizeof (struct vm_named_entry));
2810 } else
2811 named_entry_unlock(named_entry);
2812 }
2813
2814 /* Allow manipulation of individual page state. This is actually part of */
2815 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
2816
2817 kern_return_t
2818 mach_memory_entry_page_op(
2819 ipc_port_t entry_port,
2820 vm_object_offset_t offset,
2821 int ops,
2822 ppnum_t *phys_entry,
2823 int *flags)
2824 {
2825 vm_named_entry_t mem_entry;
2826 vm_object_t object;
2827 kern_return_t kr;
2828
2829 if (entry_port == IP_NULL ||
2830 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2831 return KERN_INVALID_ARGUMENT;
2832 }
2833
2834 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2835
2836 named_entry_lock(mem_entry);
2837
2838 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2839 named_entry_unlock(mem_entry);
2840 return KERN_INVALID_ARGUMENT;
2841 }
2842
2843 object = mem_entry->backing.object;
2844 if (object == VM_OBJECT_NULL) {
2845 named_entry_unlock(mem_entry);
2846 return KERN_INVALID_ARGUMENT;
2847 }
2848
2849 vm_object_reference(object);
2850 named_entry_unlock(mem_entry);
2851
2852 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
2853
2854 vm_object_deallocate(object);
2855
2856 return kr;
2857 }
2858
2859 /*
2860 * mach_memory_entry_range_op offers performance enhancement over
2861 * mach_memory_entry_page_op for page_op functions which do not require page
2862 * level state to be returned from the call. Page_op was created to provide
2863 * a low-cost alternative to page manipulation via UPLs when only a single
2864 * page was involved. The range_op call establishes the ability in the _op
2865 * family of functions to work on multiple pages where the lack of page level
2866 * state handling allows the caller to avoid the overhead of the upl structures.
2867 */
2868
2869 kern_return_t
2870 mach_memory_entry_range_op(
2871 ipc_port_t entry_port,
2872 vm_object_offset_t offset_beg,
2873 vm_object_offset_t offset_end,
2874 int ops,
2875 int *range)
2876 {
2877 vm_named_entry_t mem_entry;
2878 vm_object_t object;
2879 kern_return_t kr;
2880
2881 if (entry_port == IP_NULL ||
2882 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2883 return KERN_INVALID_ARGUMENT;
2884 }
2885
2886 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2887
2888 named_entry_lock(mem_entry);
2889
2890 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2891 named_entry_unlock(mem_entry);
2892 return KERN_INVALID_ARGUMENT;
2893 }
2894
2895 object = mem_entry->backing.object;
2896 if (object == VM_OBJECT_NULL) {
2897 named_entry_unlock(mem_entry);
2898 return KERN_INVALID_ARGUMENT;
2899 }
2900
2901 vm_object_reference(object);
2902 named_entry_unlock(mem_entry);
2903
2904 kr = vm_object_range_op(object,
2905 offset_beg,
2906 offset_end,
2907 ops,
2908 (uint32_t *) range);
2909
2910 vm_object_deallocate(object);
2911
2912 return kr;
2913 }
2914
2915
2916 kern_return_t
2917 set_dp_control_port(
2918 host_priv_t host_priv,
2919 ipc_port_t control_port)
2920 {
2921 if (host_priv == HOST_PRIV_NULL)
2922 return (KERN_INVALID_HOST);
2923
2924 if (IP_VALID(dynamic_pager_control_port))
2925 ipc_port_release_send(dynamic_pager_control_port);
2926
2927 dynamic_pager_control_port = control_port;
2928 return KERN_SUCCESS;
2929 }
2930
2931 kern_return_t
2932 get_dp_control_port(
2933 host_priv_t host_priv,
2934 ipc_port_t *control_port)
2935 {
2936 if (host_priv == HOST_PRIV_NULL)
2937 return (KERN_INVALID_HOST);
2938
2939 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
2940 return KERN_SUCCESS;
2941
2942 }
2943
2944 /* ******* Temporary Internal calls to UPL for BSD ***** */
2945
2946 extern int kernel_upl_map(
2947 vm_map_t map,
2948 upl_t upl,
2949 vm_offset_t *dst_addr);
2950
2951 extern int kernel_upl_unmap(
2952 vm_map_t map,
2953 upl_t upl);
2954
2955 extern int kernel_upl_commit(
2956 upl_t upl,
2957 upl_page_info_t *pl,
2958 mach_msg_type_number_t count);
2959
2960 extern int kernel_upl_commit_range(
2961 upl_t upl,
2962 upl_offset_t offset,
2963 upl_size_t size,
2964 int flags,
2965 upl_page_info_array_t pl,
2966 mach_msg_type_number_t count);
2967
2968 extern int kernel_upl_abort(
2969 upl_t upl,
2970 int abort_type);
2971
2972 extern int kernel_upl_abort_range(
2973 upl_t upl,
2974 upl_offset_t offset,
2975 upl_size_t size,
2976 int abort_flags);
2977
2978
2979 kern_return_t
2980 kernel_upl_map(
2981 vm_map_t map,
2982 upl_t upl,
2983 vm_offset_t *dst_addr)
2984 {
2985 return vm_upl_map(map, upl, dst_addr);
2986 }
2987
2988
2989 kern_return_t
2990 kernel_upl_unmap(
2991 vm_map_t map,
2992 upl_t upl)
2993 {
2994 return vm_upl_unmap(map, upl);
2995 }
2996
2997 kern_return_t
2998 kernel_upl_commit(
2999 upl_t upl,
3000 upl_page_info_t *pl,
3001 mach_msg_type_number_t count)
3002 {
3003 kern_return_t kr;
3004
3005 kr = upl_commit(upl, pl, count);
3006 upl_deallocate(upl);
3007 return kr;
3008 }
3009
3010
3011 kern_return_t
3012 kernel_upl_commit_range(
3013 upl_t upl,
3014 upl_offset_t offset,
3015 upl_size_t size,
3016 int flags,
3017 upl_page_info_array_t pl,
3018 mach_msg_type_number_t count)
3019 {
3020 boolean_t finished = FALSE;
3021 kern_return_t kr;
3022
3023 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
3024 flags |= UPL_COMMIT_NOTIFY_EMPTY;
3025
3026 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
3027 return KERN_INVALID_ARGUMENT;
3028 }
3029
3030 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
3031
3032 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
3033 upl_deallocate(upl);
3034
3035 return kr;
3036 }
3037
3038 kern_return_t
3039 kernel_upl_abort_range(
3040 upl_t upl,
3041 upl_offset_t offset,
3042 upl_size_t size,
3043 int abort_flags)
3044 {
3045 kern_return_t kr;
3046 boolean_t finished = FALSE;
3047
3048 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
3049 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
3050
3051 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
3052
3053 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
3054 upl_deallocate(upl);
3055
3056 return kr;
3057 }
3058
3059 kern_return_t
3060 kernel_upl_abort(
3061 upl_t upl,
3062 int abort_type)
3063 {
3064 kern_return_t kr;
3065
3066 kr = upl_abort(upl, abort_type);
3067 upl_deallocate(upl);
3068 return kr;
3069 }
3070
3071 /*
3072 * Now a kernel-private interface (for BootCache
3073 * use only). Need a cleaner way to create an
3074 * empty vm_map() and return a handle to it.
3075 */
3076
3077 kern_return_t
3078 vm_region_object_create(
3079 __unused vm_map_t target_map,
3080 vm_size_t size,
3081 ipc_port_t *object_handle)
3082 {
3083 vm_named_entry_t user_entry;
3084 ipc_port_t user_handle;
3085
3086 vm_map_t new_map;
3087
3088 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3089 != KERN_SUCCESS) {
3090 return KERN_FAILURE;
3091 }
3092
3093 /* Create a named object based on a submap of specified size */
3094
3095 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
3096 vm_map_round_page(size), TRUE);
3097
3098 user_entry->backing.map = new_map;
3099 user_entry->internal = TRUE;
3100 user_entry->is_sub_map = TRUE;
3101 user_entry->offset = 0;
3102 user_entry->protection = VM_PROT_ALL;
3103 user_entry->size = size;
3104 assert(user_entry->ref_count == 1);
3105
3106 *object_handle = user_handle;
3107 return KERN_SUCCESS;
3108
3109 }
3110
3111 ppnum_t vm_map_get_phys_page( /* forward */
3112 vm_map_t map,
3113 vm_offset_t offset);
3114
3115 ppnum_t
3116 vm_map_get_phys_page(
3117 vm_map_t map,
3118 vm_offset_t addr)
3119 {
3120 vm_object_offset_t offset;
3121 vm_object_t object;
3122 vm_map_offset_t map_offset;
3123 vm_map_entry_t entry;
3124 ppnum_t phys_page = 0;
3125
3126 map_offset = vm_map_trunc_page(addr);
3127
3128 vm_map_lock(map);
3129 while (vm_map_lookup_entry(map, map_offset, &entry)) {
3130
3131 if (entry->object.vm_object == VM_OBJECT_NULL) {
3132 vm_map_unlock(map);
3133 return (ppnum_t) 0;
3134 }
3135 if (entry->is_sub_map) {
3136 vm_map_t old_map;
3137 vm_map_lock(entry->object.sub_map);
3138 old_map = map;
3139 map = entry->object.sub_map;
3140 map_offset = entry->offset + (map_offset - entry->vme_start);
3141 vm_map_unlock(old_map);
3142 continue;
3143 }
3144 if (entry->object.vm_object->phys_contiguous) {
3145 /* These are not standard pageable memory mappings */
3146 /* If they are not present in the object they will */
3147 /* have to be picked up from the pager through the */
3148 /* fault mechanism. */
3149 if(entry->object.vm_object->vo_shadow_offset == 0) {
3150 /* need to call vm_fault */
3151 vm_map_unlock(map);
3152 vm_fault(map, map_offset, VM_PROT_NONE,
3153 FALSE, THREAD_UNINT, NULL, 0);
3154 vm_map_lock(map);
3155 continue;
3156 }
3157 offset = entry->offset + (map_offset - entry->vme_start);
3158 phys_page = (ppnum_t)
3159 ((entry->object.vm_object->vo_shadow_offset
3160 + offset) >> 12);
3161 break;
3162
3163 }
3164 offset = entry->offset + (map_offset - entry->vme_start);
3165 object = entry->object.vm_object;
3166 vm_object_lock(object);
3167 while (TRUE) {
3168 vm_page_t dst_page = vm_page_lookup(object,offset);
3169 if(dst_page == VM_PAGE_NULL) {
3170 if(object->shadow) {
3171 vm_object_t old_object;
3172 vm_object_lock(object->shadow);
3173 old_object = object;
3174 offset = offset + object->vo_shadow_offset;
3175 object = object->shadow;
3176 vm_object_unlock(old_object);
3177 } else {
3178 vm_object_unlock(object);
3179 break;
3180 }
3181 } else {
3182 phys_page = (ppnum_t)(dst_page->phys_page);
3183 vm_object_unlock(object);
3184 break;
3185 }
3186 }
3187 break;
3188
3189 }
3190
3191 vm_map_unlock(map);
3192 return phys_page;
3193 }
3194
3195
3196
3197 kern_return_t kernel_object_iopl_request( /* forward */
3198 vm_named_entry_t named_entry,
3199 memory_object_offset_t offset,
3200 upl_size_t *upl_size,
3201 upl_t *upl_ptr,
3202 upl_page_info_array_t user_page_list,
3203 unsigned int *page_list_count,
3204 int *flags);
3205
3206 kern_return_t
3207 kernel_object_iopl_request(
3208 vm_named_entry_t named_entry,
3209 memory_object_offset_t offset,
3210 upl_size_t *upl_size,
3211 upl_t *upl_ptr,
3212 upl_page_info_array_t user_page_list,
3213 unsigned int *page_list_count,
3214 int *flags)
3215 {
3216 vm_object_t object;
3217 kern_return_t ret;
3218
3219 int caller_flags;
3220
3221 caller_flags = *flags;
3222
3223 if (caller_flags & ~UPL_VALID_FLAGS) {
3224 /*
3225 * For forward compatibility's sake,
3226 * reject any unknown flag.
3227 */
3228 return KERN_INVALID_VALUE;
3229 }
3230
3231 /* a few checks to make sure user is obeying rules */
3232 if(*upl_size == 0) {
3233 if(offset >= named_entry->size)
3234 return(KERN_INVALID_RIGHT);
3235 *upl_size = (upl_size_t) (named_entry->size - offset);
3236 if (*upl_size != named_entry->size - offset)
3237 return KERN_INVALID_ARGUMENT;
3238 }
3239 if(caller_flags & UPL_COPYOUT_FROM) {
3240 if((named_entry->protection & VM_PROT_READ)
3241 != VM_PROT_READ) {
3242 return(KERN_INVALID_RIGHT);
3243 }
3244 } else {
3245 if((named_entry->protection &
3246 (VM_PROT_READ | VM_PROT_WRITE))
3247 != (VM_PROT_READ | VM_PROT_WRITE)) {
3248 return(KERN_INVALID_RIGHT);
3249 }
3250 }
3251 if(named_entry->size < (offset + *upl_size))
3252 return(KERN_INVALID_ARGUMENT);
3253
3254 /* the callers parameter offset is defined to be the */
3255 /* offset from beginning of named entry offset in object */
3256 offset = offset + named_entry->offset;
3257
3258 if(named_entry->is_sub_map)
3259 return (KERN_INVALID_ARGUMENT);
3260
3261 named_entry_lock(named_entry);
3262
3263 if (named_entry->is_pager) {
3264 object = vm_object_enter(named_entry->backing.pager,
3265 named_entry->offset + named_entry->size,
3266 named_entry->internal,
3267 FALSE,
3268 FALSE);
3269 if (object == VM_OBJECT_NULL) {
3270 named_entry_unlock(named_entry);
3271 return(KERN_INVALID_OBJECT);
3272 }
3273
3274 /* JMM - drop reference on the pager here? */
3275
3276 /* create an extra reference for the object */
3277 vm_object_lock(object);
3278 vm_object_reference_locked(object);
3279 named_entry->backing.object = object;
3280 named_entry->is_pager = FALSE;
3281 named_entry_unlock(named_entry);
3282
3283 /* wait for object (if any) to be ready */
3284 if (!named_entry->internal) {
3285 while (!object->pager_ready) {
3286 vm_object_wait(object,
3287 VM_OBJECT_EVENT_PAGER_READY,
3288 THREAD_UNINT);
3289 vm_object_lock(object);
3290 }
3291 }
3292 vm_object_unlock(object);
3293
3294 } else {
3295 /* This is the case where we are going to operate */
3296 /* an an already known object. If the object is */
3297 /* not ready it is internal. An external */
3298 /* object cannot be mapped until it is ready */
3299 /* we can therefore avoid the ready check */
3300 /* in this case. */
3301 object = named_entry->backing.object;
3302 vm_object_reference(object);
3303 named_entry_unlock(named_entry);
3304 }
3305
3306 if (!object->private) {
3307 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
3308 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
3309 if (object->phys_contiguous) {
3310 *flags = UPL_PHYS_CONTIG;
3311 } else {
3312 *flags = 0;
3313 }
3314 } else {
3315 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
3316 }
3317
3318 ret = vm_object_iopl_request(object,
3319 offset,
3320 *upl_size,
3321 upl_ptr,
3322 user_page_list,
3323 page_list_count,
3324 caller_flags);
3325 vm_object_deallocate(object);
3326 return ret;
3327 }