]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
e5568f75 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_user.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * User-exported virtual memory functions.
57 */
1c79356b 58
91447636
A
59#include <debug.h>
60
1c79356b
A
61#include <vm_cpm.h>
62#include <mach/boolean.h>
63#include <mach/kern_return.h>
64#include <mach/mach_types.h> /* to get vm_address_t */
65#include <mach/memory_object.h>
66#include <mach/std_types.h> /* to get pointer_t */
91447636 67#include <mach/upl.h>
1c79356b
A
68#include <mach/vm_attributes.h>
69#include <mach/vm_param.h>
70#include <mach/vm_statistics.h>
1c79356b 71#include <mach/mach_syscalls.h>
9bccf70c 72
91447636
A
73#include <mach/host_priv_server.h>
74#include <mach/mach_vm_server.h>
1c79356b 75#include <mach/shared_memory_server.h>
91447636 76#include <mach/vm_map_server.h>
9bccf70c 77#include <vm/vm_shared_memory_server.h>
1c79356b
A
78
79#include <kern/host.h>
91447636 80#include <kern/kalloc.h>
1c79356b
A
81#include <kern/task.h>
82#include <kern/misc_protos.h>
91447636 83#include <vm/vm_fault.h>
1c79356b
A
84#include <vm/vm_map.h>
85#include <vm/vm_object.h>
86#include <vm/vm_page.h>
87#include <vm/memory_object.h>
88#include <vm/vm_pageout.h>
91447636 89#include <vm/vm_protos.h>
1c79356b
A
90
91vm_size_t upl_offset_to_pagelist = 0;
92
93#if VM_CPM
94#include <vm/cpm.h>
95#endif /* VM_CPM */
96
97ipc_port_t dynamic_pager_control_port=NULL;
98
99/*
91447636 100 * mach_vm_allocate allocates "zero fill" memory in the specfied
1c79356b
A
101 * map.
102 */
103kern_return_t
91447636
A
104mach_vm_allocate(
105 vm_map_t map,
106 mach_vm_offset_t *addr,
107 mach_vm_size_t size,
1c79356b
A
108 int flags)
109{
91447636
A
110 vm_map_offset_t map_addr;
111 vm_map_size_t map_size;
1c79356b 112 kern_return_t result;
91447636 113 boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
1c79356b
A
114
115 if (map == VM_MAP_NULL)
116 return(KERN_INVALID_ARGUMENT);
117 if (size == 0) {
118 *addr = 0;
119 return(KERN_SUCCESS);
120 }
121
91447636
A
122 if (anywhere) {
123 /*
124 * No specific address requested, so start candidate address
125 * search at the minimum address in the map. However, if that
126 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
127 * allocations of PAGEZERO to explicit requests since its
128 * normal use is to catch dereferences of NULL and many
129 * applications also treat pointers with a value of 0 as
130 * special and suddenly having address 0 contain useable
131 * memory would tend to confuse those applications.
132 */
133 map_addr = vm_map_min(map);
134 if (map_addr == 0)
135 map_addr += PAGE_SIZE;
136 } else
137 map_addr = vm_map_trunc_page(*addr);
138 map_size = vm_map_round_page(size);
139 if (map_size == 0) {
140 return(KERN_INVALID_ARGUMENT);
141 }
142
143 result = vm_map_enter(
144 map,
145 &map_addr,
146 map_size,
147 (vm_map_offset_t)0,
148 flags,
149 VM_OBJECT_NULL,
150 (vm_object_offset_t)0,
151 FALSE,
152 VM_PROT_DEFAULT,
153 VM_PROT_ALL,
154 VM_INHERIT_DEFAULT);
155
156 *addr = map_addr;
157 return(result);
158}
159
160/*
161 * vm_allocate
162 * Legacy routine that allocates "zero fill" memory in the specfied
163 * map (which is limited to the same size as the kernel).
164 */
165kern_return_t
166vm_allocate(
167 vm_map_t map,
168 vm_offset_t *addr,
169 vm_size_t size,
170 int flags)
171{
172 vm_map_offset_t map_addr;
173 vm_map_size_t map_size;
174 kern_return_t result;
175 boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
176
177 if (map == VM_MAP_NULL)
178 return(KERN_INVALID_ARGUMENT);
1c79356b 179 if (size == 0) {
91447636
A
180 *addr = 0;
181 return(KERN_SUCCESS);
182 }
183
184 if (anywhere) {
185 /*
186 * No specific address requested, so start candidate address
187 * search at the minimum address in the map. However, if that
188 * minimum is 0, bump it up by PAGE_SIZE. We want to limit
189 * allocations of PAGEZERO to explicit requests since its
190 * normal use is to catch dereferences of NULL and many
191 * applications also treat pointers with a value of 0 as
192 * special and suddenly having address 0 contain useable
193 * memory would tend to confuse those applications.
194 */
195 map_addr = vm_map_min(map);
196 if (map_addr == 0)
197 map_addr += PAGE_SIZE;
198 } else
199 map_addr = vm_map_trunc_page(*addr);
200 map_size = vm_map_round_page(size);
201 if (map_size == 0) {
1c79356b
A
202 return(KERN_INVALID_ARGUMENT);
203 }
204
205 result = vm_map_enter(
206 map,
91447636
A
207 &map_addr,
208 map_size,
209 (vm_map_offset_t)0,
1c79356b
A
210 flags,
211 VM_OBJECT_NULL,
212 (vm_object_offset_t)0,
213 FALSE,
214 VM_PROT_DEFAULT,
215 VM_PROT_ALL,
216 VM_INHERIT_DEFAULT);
217
91447636 218 *addr = CAST_DOWN(vm_offset_t, map_addr);
1c79356b
A
219 return(result);
220}
221
222/*
91447636
A
223 * mach_vm_deallocate -
224 * deallocates the specified range of addresses in the
1c79356b
A
225 * specified address map.
226 */
227kern_return_t
91447636
A
228mach_vm_deallocate(
229 vm_map_t map,
230 mach_vm_offset_t start,
231 mach_vm_size_t size)
232{
233 if ((map == VM_MAP_NULL) || (start + size < start))
234 return(KERN_INVALID_ARGUMENT);
235
236 if (size == (mach_vm_offset_t) 0)
237 return(KERN_SUCCESS);
238
239 return(vm_map_remove(map, vm_map_trunc_page(start),
240 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
241}
242
243/*
244 * vm_deallocate -
245 * deallocates the specified range of addresses in the
246 * specified address map (limited to addresses the same
247 * size as the kernel).
248 */
249kern_return_t
1c79356b
A
250vm_deallocate(
251 register vm_map_t map,
252 vm_offset_t start,
253 vm_size_t size)
254{
91447636 255 if ((map == VM_MAP_NULL) || (start + size < start))
1c79356b
A
256 return(KERN_INVALID_ARGUMENT);
257
258 if (size == (vm_offset_t) 0)
259 return(KERN_SUCCESS);
260
91447636
A
261 return(vm_map_remove(map, vm_map_trunc_page(start),
262 vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
263}
264
265/*
91447636
A
266 * mach_vm_inherit -
267 * Sets the inheritance of the specified range in the
1c79356b
A
268 * specified map.
269 */
270kern_return_t
91447636
A
271mach_vm_inherit(
272 vm_map_t map,
273 mach_vm_offset_t start,
274 mach_vm_size_t size,
275 vm_inherit_t new_inheritance)
276{
277 if ((map == VM_MAP_NULL) || (start + size < start) ||
278 (new_inheritance > VM_INHERIT_LAST_VALID))
279 return(KERN_INVALID_ARGUMENT);
280
281 if (size == 0)
282 return KERN_SUCCESS;
283
284 return(vm_map_inherit(map,
285 vm_map_trunc_page(start),
286 vm_map_round_page(start+size),
287 new_inheritance));
288}
289
290/*
291 * vm_inherit -
292 * Sets the inheritance of the specified range in the
293 * specified map (range limited to addresses
294 */
295kern_return_t
1c79356b
A
296vm_inherit(
297 register vm_map_t map,
298 vm_offset_t start,
299 vm_size_t size,
300 vm_inherit_t new_inheritance)
301{
91447636
A
302 if ((map == VM_MAP_NULL) || (start + size < start) ||
303 (new_inheritance > VM_INHERIT_LAST_VALID))
1c79356b
A
304 return(KERN_INVALID_ARGUMENT);
305
91447636
A
306 if (size == 0)
307 return KERN_SUCCESS;
308
1c79356b 309 return(vm_map_inherit(map,
91447636
A
310 vm_map_trunc_page(start),
311 vm_map_round_page(start+size),
1c79356b
A
312 new_inheritance));
313}
314
315/*
91447636
A
316 * mach_vm_protect -
317 * Sets the protection of the specified range in the
1c79356b
A
318 * specified map.
319 */
320
91447636
A
321kern_return_t
322mach_vm_protect(
323 vm_map_t map,
324 mach_vm_offset_t start,
325 mach_vm_size_t size,
326 boolean_t set_maximum,
327 vm_prot_t new_protection)
328{
329 if ((map == VM_MAP_NULL) || (start + size < start) ||
330 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
331 return(KERN_INVALID_ARGUMENT);
332
333 if (size == 0)
334 return KERN_SUCCESS;
335
336 return(vm_map_protect(map,
337 vm_map_trunc_page(start),
338 vm_map_round_page(start+size),
339 new_protection,
340 set_maximum));
341}
342
343/*
344 * vm_protect -
345 * Sets the protection of the specified range in the
346 * specified map. Addressability of the range limited
347 * to the same size as the kernel.
348 */
349
1c79356b
A
350kern_return_t
351vm_protect(
91447636 352 vm_map_t map,
1c79356b
A
353 vm_offset_t start,
354 vm_size_t size,
355 boolean_t set_maximum,
356 vm_prot_t new_protection)
357{
91447636
A
358 if ((map == VM_MAP_NULL) || (start + size < start) ||
359 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
1c79356b
A
360 return(KERN_INVALID_ARGUMENT);
361
91447636
A
362 if (size == 0)
363 return KERN_SUCCESS;
364
1c79356b 365 return(vm_map_protect(map,
91447636
A
366 vm_map_trunc_page(start),
367 vm_map_round_page(start+size),
1c79356b
A
368 new_protection,
369 set_maximum));
370}
371
372/*
91447636 373 * mach_vm_machine_attributes -
1c79356b
A
374 * Handle machine-specific attributes for a mapping, such
375 * as cachability, migrability, etc.
376 */
377kern_return_t
91447636
A
378mach_vm_machine_attribute(
379 vm_map_t map,
380 mach_vm_address_t addr,
381 mach_vm_size_t size,
382 vm_machine_attribute_t attribute,
383 vm_machine_attribute_val_t* value) /* IN/OUT */
384{
385 if ((map == VM_MAP_NULL) || (addr + size < addr))
386 return(KERN_INVALID_ARGUMENT);
387
388 if (size == 0)
389 return KERN_SUCCESS;
390
391 return vm_map_machine_attribute(map,
392 vm_map_trunc_page(addr),
393 vm_map_round_page(addr+size),
394 attribute,
395 value);
396}
397
398/*
399 * vm_machine_attribute -
400 * Handle machine-specific attributes for a mapping, such
401 * as cachability, migrability, etc. Limited addressability
402 * (same range limits as for the native kernel map).
403 */
404kern_return_t
1c79356b
A
405vm_machine_attribute(
406 vm_map_t map,
91447636 407 vm_address_t addr,
1c79356b
A
408 vm_size_t size,
409 vm_machine_attribute_t attribute,
410 vm_machine_attribute_val_t* value) /* IN/OUT */
411{
91447636
A
412 if ((map == VM_MAP_NULL) || (addr + size < addr))
413 return(KERN_INVALID_ARGUMENT);
414
415 if (size == 0)
416 return KERN_SUCCESS;
417
418 return vm_map_machine_attribute(map,
419 vm_map_trunc_page(addr),
420 vm_map_round_page(addr+size),
421 attribute,
422 value);
423}
424
425/*
426 * mach_vm_read -
427 * Read/copy a range from one address space and return it to the caller.
428 *
429 * It is assumed that the address for the returned memory is selected by
430 * the IPC implementation as part of receiving the reply to this call.
431 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
432 * that gets returned.
433 *
434 * JMM - because of mach_msg_type_number_t, this call is limited to a
435 * single 4GB region at this time.
436 *
437 */
438kern_return_t
439mach_vm_read(
440 vm_map_t map,
441 mach_vm_address_t addr,
442 mach_vm_size_t size,
443 pointer_t *data,
444 mach_msg_type_number_t *data_size)
445{
446 kern_return_t error;
447 vm_map_copy_t ipc_address;
448
1c79356b
A
449 if (map == VM_MAP_NULL)
450 return(KERN_INVALID_ARGUMENT);
451
91447636
A
452
453 error = vm_map_copyin(map,
454 (vm_map_address_t)addr,
455 (vm_map_size_t)size,
456 FALSE, /* src_destroy */
457 &ipc_address);
458
459 if (KERN_SUCCESS == error) {
460 *data = (pointer_t) ipc_address;
461 *data_size = size;
462 }
463 return(error);
1c79356b
A
464}
465
91447636
A
466/*
467 * vm_read -
468 * Read/copy a range from one address space and return it to the caller.
469 * Limited addressability (same range limits as for the native kernel map).
470 *
471 * It is assumed that the address for the returned memory is selected by
472 * the IPC implementation as part of receiving the reply to this call.
473 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
474 * that gets returned.
475 */
1c79356b
A
476kern_return_t
477vm_read(
478 vm_map_t map,
91447636 479 vm_address_t addr,
1c79356b
A
480 vm_size_t size,
481 pointer_t *data,
482 mach_msg_type_number_t *data_size)
483{
484 kern_return_t error;
485 vm_map_copy_t ipc_address;
486
487 if (map == VM_MAP_NULL)
488 return(KERN_INVALID_ARGUMENT);
489
91447636
A
490 error = vm_map_copyin(map,
491 (vm_map_address_t)addr,
492 (vm_map_size_t)size,
493 FALSE, /* src_destroy */
494 &ipc_address);
495
496 if (KERN_SUCCESS == error) {
1c79356b
A
497 *data = (pointer_t) ipc_address;
498 *data_size = size;
499 }
500 return(error);
501}
502
91447636
A
503/*
504 * mach_vm_read_list -
505 * Read/copy a list of address ranges from specified map.
506 *
507 * MIG does not know how to deal with a returned array of
508 * vm_map_copy_t structures, so we have to do the copyout
509 * manually here.
510 */
511kern_return_t
512mach_vm_read_list(
513 vm_map_t map,
514 mach_vm_read_entry_t data_list,
515 natural_t count)
516{
517 mach_msg_type_number_t i;
518 kern_return_t error;
519 vm_map_copy_t copy;
520
521 if (map == VM_MAP_NULL)
522 return(KERN_INVALID_ARGUMENT);
523
524 error = KERN_SUCCESS;
525 for(i=0; i<count; i++) {
526 vm_map_address_t map_addr;
527 vm_map_size_t map_size;
528
529 map_addr = (vm_map_address_t)(data_list[i].address);
530 map_size = (vm_map_size_t)(data_list[i].size);
531
532 if(map_size != 0) {
533 error = vm_map_copyin(map,
534 map_addr,
535 map_size,
536 FALSE, /* src_destroy */
537 &copy);
538 if (KERN_SUCCESS == error) {
539 error = vm_map_copyout(
540 current_task()->map,
541 &map_addr,
542 copy);
543 if (KERN_SUCCESS == error) {
544 data_list[i].address = map_addr;
545 continue;
546 }
547 vm_map_copy_discard(copy);
548 }
549 }
550 data_list[i].address = (mach_vm_address_t)0;
551 data_list[i].size = (mach_vm_size_t)0;
552 }
553 return(error);
554}
555
556/*
557 * vm_read_list -
558 * Read/copy a list of address ranges from specified map.
559 *
560 * MIG does not know how to deal with a returned array of
561 * vm_map_copy_t structures, so we have to do the copyout
562 * manually here.
563 *
564 * The source and destination ranges are limited to those
565 * that can be described with a vm_address_t (i.e. same
566 * size map as the kernel).
567 *
568 * JMM - If the result of the copyout is an address range
569 * that cannot be described with a vm_address_t (i.e. the
570 * caller had a larger address space but used this call
571 * anyway), it will result in a truncated address being
572 * returned (and a likely confused caller).
573 */
574
1c79356b
A
575kern_return_t
576vm_read_list(
577 vm_map_t map,
91447636
A
578 vm_read_entry_t data_list,
579 natural_t count)
1c79356b
A
580{
581 mach_msg_type_number_t i;
582 kern_return_t error;
91447636 583 vm_map_copy_t copy;
1c79356b
A
584
585 if (map == VM_MAP_NULL)
586 return(KERN_INVALID_ARGUMENT);
587
91447636 588 error = KERN_SUCCESS;
1c79356b 589 for(i=0; i<count; i++) {
91447636
A
590 vm_map_address_t map_addr;
591 vm_map_size_t map_size;
592
593 map_addr = (vm_map_address_t)(data_list[i].address);
594 map_size = (vm_map_size_t)(data_list[i].size);
595
596 if(map_size != 0) {
597 error = vm_map_copyin(map,
598 map_addr,
599 map_size,
600 FALSE, /* src_destroy */
601 &copy);
602 if (KERN_SUCCESS == error) {
603 error = vm_map_copyout(current_task()->map,
604 &map_addr,
605 copy);
606 if (KERN_SUCCESS == error) {
607 data_list[i].address =
608 CAST_DOWN(vm_offset_t, map_addr);
609 continue;
610 }
611 vm_map_copy_discard(copy);
1c79356b
A
612 }
613 }
91447636
A
614 data_list[i].address = (mach_vm_address_t)0;
615 data_list[i].size = (mach_vm_size_t)0;
1c79356b
A
616 }
617 return(error);
618}
619
620/*
91447636
A
621 * mach_vm_read_overwrite -
622 * Overwrite a range of the current map with data from the specified
623 * map/address range.
624 *
625 * In making an assumption that the current thread is local, it is
626 * no longer cluster-safe without a fully supportive local proxy
627 * thread/task (but we don't support cluster's anymore so this is moot).
1c79356b
A
628 */
629
1c79356b 630kern_return_t
91447636
A
631mach_vm_read_overwrite(
632 vm_map_t map,
633 mach_vm_address_t address,
634 mach_vm_size_t size,
635 mach_vm_address_t data,
636 mach_vm_size_t *data_size)
637{
638 kern_return_t error;
1c79356b
A
639 vm_map_copy_t copy;
640
641 if (map == VM_MAP_NULL)
642 return(KERN_INVALID_ARGUMENT);
643
91447636
A
644 error = vm_map_copyin(map, (vm_map_address_t)address,
645 (vm_map_size_t)size, FALSE, &copy);
646
647 if (KERN_SUCCESS == error) {
648 error = vm_map_copy_overwrite(current_thread()->map,
649 (vm_map_address_t)data,
650 copy, FALSE);
651 if (KERN_SUCCESS == error) {
652 *data_size = size;
653 return error;
1c79356b 654 }
91447636 655 vm_map_copy_discard(copy);
1c79356b 656 }
91447636
A
657 return(error);
658}
659
660/*
661 * vm_read_overwrite -
662 * Overwrite a range of the current map with data from the specified
663 * map/address range.
664 *
665 * This routine adds the additional limitation that the source and
666 * destination ranges must be describable with vm_address_t values
667 * (i.e. the same size address spaces as the kernel, or at least the
668 * the ranges are in that first portion of the respective address
669 * spaces).
670 */
671
672kern_return_t
673vm_read_overwrite(
674 vm_map_t map,
675 vm_address_t address,
676 vm_size_t size,
677 vm_address_t data,
678 vm_size_t *data_size)
679{
680 kern_return_t error;
681 vm_map_copy_t copy;
682
683 if (map == VM_MAP_NULL)
684 return(KERN_INVALID_ARGUMENT);
685
686 error = vm_map_copyin(map, (vm_map_address_t)address,
687 (vm_map_size_t)size, FALSE, &copy);
688
689 if (KERN_SUCCESS == error) {
690 error = vm_map_copy_overwrite(current_thread()->map,
691 (vm_map_address_t)data,
692 copy, FALSE);
693 if (KERN_SUCCESS == error) {
694 *data_size = size;
695 return error;
1c79356b 696 }
91447636 697 vm_map_copy_discard(copy);
1c79356b 698 }
1c79356b
A
699 return(error);
700}
701
702
91447636
A
703/*
704 * mach_vm_write -
705 * Overwrite the specified address range with the data provided
706 * (from the current map).
707 */
708kern_return_t
709mach_vm_write(
710 vm_map_t map,
711 mach_vm_address_t address,
712 pointer_t data,
713 __unused mach_msg_type_number_t size)
714{
715 if (map == VM_MAP_NULL)
716 return KERN_INVALID_ARGUMENT;
1c79356b 717
91447636
A
718 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
719 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
720}
1c79356b 721
91447636
A
722/*
723 * vm_write -
724 * Overwrite the specified address range with the data provided
725 * (from the current map).
726 *
727 * The addressability of the range of addresses to overwrite is
728 * limited bu the use of a vm_address_t (same size as kernel map).
729 * Either the target map is also small, or the range is in the
730 * low addresses within it.
731 */
1c79356b
A
732kern_return_t
733vm_write(
91447636
A
734 vm_map_t map,
735 vm_address_t address,
736 pointer_t data,
737 __unused mach_msg_type_number_t size)
738{
739 if (map == VM_MAP_NULL)
740 return KERN_INVALID_ARGUMENT;
741
742 return vm_map_copy_overwrite(map, (vm_map_address_t)address,
743 (vm_map_copy_t) data, FALSE /* interruptible XXX */);
744}
745
746/*
747 * mach_vm_copy -
748 * Overwrite one range of the specified map with the contents of
749 * another range within that same map (i.e. both address ranges
750 * are "over there").
751 */
752kern_return_t
753mach_vm_copy(
1c79356b 754 vm_map_t map,
91447636
A
755 mach_vm_address_t source_address,
756 mach_vm_size_t size,
757 mach_vm_address_t dest_address)
1c79356b 758{
91447636
A
759 vm_map_copy_t copy;
760 kern_return_t kr;
761
1c79356b
A
762 if (map == VM_MAP_NULL)
763 return KERN_INVALID_ARGUMENT;
764
91447636
A
765 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
766 (vm_map_size_t)size, FALSE, &copy);
767
768 if (KERN_SUCCESS == kr) {
769 kr = vm_map_copy_overwrite(map,
770 (vm_map_address_t)dest_address,
771 copy, FALSE /* interruptible XXX */);
772
773 if (KERN_SUCCESS != kr)
774 vm_map_copy_discard(copy);
775 }
776 return kr;
1c79356b
A
777}
778
779kern_return_t
780vm_copy(
781 vm_map_t map,
782 vm_address_t source_address,
783 vm_size_t size,
784 vm_address_t dest_address)
785{
786 vm_map_copy_t copy;
787 kern_return_t kr;
788
789 if (map == VM_MAP_NULL)
790 return KERN_INVALID_ARGUMENT;
791
91447636
A
792 kr = vm_map_copyin(map, (vm_map_address_t)source_address,
793 (vm_map_size_t)size, FALSE, &copy);
1c79356b 794
91447636
A
795 if (KERN_SUCCESS == kr) {
796 kr = vm_map_copy_overwrite(map,
797 (vm_map_address_t)dest_address,
798 copy, FALSE /* interruptible XXX */);
1c79356b 799
91447636
A
800 if (KERN_SUCCESS != kr)
801 vm_map_copy_discard(copy);
802 }
803 return kr;
1c79356b
A
804}
805
806/*
91447636
A
807 * mach_vm_map -
808 * Map some range of an object into an address space.
809 *
810 * The object can be one of several types of objects:
811 * NULL - anonymous memory
812 * a named entry - a range within another address space
813 * or a range within a memory object
814 * a whole memory object
815 *
1c79356b
A
816 */
817kern_return_t
91447636 818mach_vm_map(
1c79356b 819 vm_map_t target_map,
91447636
A
820 mach_vm_offset_t *address,
821 mach_vm_size_t initial_size,
822 mach_vm_offset_t mask,
1c79356b
A
823 int flags,
824 ipc_port_t port,
825 vm_object_offset_t offset,
826 boolean_t copy,
827 vm_prot_t cur_protection,
828 vm_prot_t max_protection,
829 vm_inherit_t inheritance)
830{
91447636
A
831 vm_map_address_t map_addr;
832 vm_map_size_t map_size;
1c79356b 833 vm_object_t object;
91447636 834 vm_object_size_t size;
1c79356b
A
835 kern_return_t result;
836
837 /*
838 * Check arguments for validity
839 */
840 if ((target_map == VM_MAP_NULL) ||
841 (cur_protection & ~VM_PROT_ALL) ||
842 (max_protection & ~VM_PROT_ALL) ||
843 (inheritance > VM_INHERIT_LAST_VALID) ||
91447636 844 initial_size == 0)
1c79356b
A
845 return(KERN_INVALID_ARGUMENT);
846
91447636
A
847 map_addr = vm_map_trunc_page(*address);
848 map_size = vm_map_round_page(initial_size);
849 size = vm_object_round_page(initial_size);
850
1c79356b
A
851 /*
852 * Find the vm object (if any) corresponding to this port.
853 */
854 if (!IP_VALID(port)) {
855 object = VM_OBJECT_NULL;
856 offset = 0;
857 copy = FALSE;
858 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
859 vm_named_entry_t named_entry;
860
861 named_entry = (vm_named_entry_t)port->ip_kobject;
862 /* a few checks to make sure user is obeying rules */
863 if(size == 0) {
864 if(offset >= named_entry->size)
865 return(KERN_INVALID_RIGHT);
866 size = named_entry->size - offset;
867 }
868 if((named_entry->protection & max_protection) != max_protection)
869 return(KERN_INVALID_RIGHT);
870 if((named_entry->protection & cur_protection) != cur_protection)
871 return(KERN_INVALID_RIGHT);
872 if(named_entry->size < (offset + size))
873 return(KERN_INVALID_ARGUMENT);
874
875 /* the callers parameter offset is defined to be the */
876 /* offset from beginning of named entry offset in object */
877 offset = offset + named_entry->offset;
878
879 named_entry_lock(named_entry);
880 if(named_entry->is_sub_map) {
881 vm_map_entry_t map_entry;
882
883 named_entry_unlock(named_entry);
1c79356b
A
884 vm_object_reference(vm_submap_object);
885 if ((result = vm_map_enter(target_map,
91447636
A
886 &map_addr, map_size,
887 (vm_map_offset_t)mask, flags,
1c79356b
A
888 vm_submap_object, 0,
889 FALSE,
890 cur_protection, max_protection, inheritance
891 )) != KERN_SUCCESS) {
892 vm_object_deallocate(vm_submap_object);
893 } else {
894 char alias;
895
896 VM_GET_FLAGS_ALIAS(flags, alias);
897 if ((alias == VM_MEMORY_SHARED_PMAP) &&
898 !copy) {
91447636
A
899 vm_map_submap(target_map, map_addr,
900 map_addr + map_size,
1c79356b 901 named_entry->backing.map,
91447636 902 (vm_map_offset_t)offset, TRUE);
1c79356b 903 } else {
91447636
A
904 vm_map_submap(target_map, map_addr,
905 map_addr + map_size,
1c79356b 906 named_entry->backing.map,
91447636 907 (vm_map_offset_t)offset, FALSE);
1c79356b
A
908 }
909 if(copy) {
910 if(vm_map_lookup_entry(
91447636 911 target_map, map_addr, &map_entry)) {
1c79356b
A
912 map_entry->needs_copy = TRUE;
913 }
914 }
91447636 915 *address = map_addr;
1c79356b
A
916 }
917 return(result);
918
91447636 919 } else if (named_entry->is_pager) {
55e303ae
A
920 unsigned int access;
921 vm_prot_t protections;
922 unsigned int wimg_mode;
91447636 923 boolean_t cache_attr;
55e303ae
A
924
925 protections = named_entry->protection
926 & VM_PROT_ALL;
927 access = GET_MAP_MEM(named_entry->protection);
928
929 object = vm_object_enter(
930 named_entry->backing.pager,
931 named_entry->size,
932 named_entry->internal,
933 FALSE,
934 FALSE);
1c79356b
A
935 if (object == VM_OBJECT_NULL) {
936 named_entry_unlock(named_entry);
937 return(KERN_INVALID_OBJECT);
938 }
55e303ae 939
91447636 940 /* JMM - drop reference on pager here */
55e303ae
A
941
942 /* create an extra ref for the named entry */
91447636 943 vm_object_lock(object);
55e303ae 944 vm_object_reference_locked(object);
91447636
A
945 named_entry->backing.object = object;
946 named_entry->is_pager = FALSE;
1c79356b 947 named_entry_unlock(named_entry);
55e303ae
A
948
949 wimg_mode = object->wimg_bits;
950 if(access == MAP_MEM_IO) {
951 wimg_mode = VM_WIMG_IO;
952 } else if (access == MAP_MEM_COPYBACK) {
953 wimg_mode = VM_WIMG_USE_DEFAULT;
954 } else if (access == MAP_MEM_WTHRU) {
955 wimg_mode = VM_WIMG_WTHRU;
956 } else if (access == MAP_MEM_WCOMB) {
957 wimg_mode = VM_WIMG_WCOMB;
958 }
959 if ((wimg_mode == VM_WIMG_IO)
960 || (wimg_mode == VM_WIMG_WCOMB))
961 cache_attr = TRUE;
962 else
963 cache_attr = FALSE;
964
91447636
A
965 /* wait for object (if any) to be ready */
966 if (!named_entry->internal) {
1c79356b
A
967 while (!object->pager_ready) {
968 vm_object_wait(object,
91447636
A
969 VM_OBJECT_EVENT_PAGER_READY,
970 THREAD_UNINT);
1c79356b
A
971 vm_object_lock(object);
972 }
1c79356b 973 }
91447636 974
55e303ae
A
975 if(object->wimg_bits != wimg_mode) {
976 vm_page_t p;
977
978 vm_object_paging_wait(object, THREAD_UNINT);
979
980 object->wimg_bits = wimg_mode;
981 queue_iterate(&object->memq, p, vm_page_t, listq) {
982 if (!p->fictitious) {
91447636
A
983 pmap_disconnect(p->phys_page);
984 if (cache_attr)
985 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
986 }
987 }
988 }
989 object->true_share = TRUE;
990 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
991 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
992 vm_object_unlock(object);
91447636
A
993 } else {
994 /* This is the case where we are going to map */
995 /* an already mapped object. If the object is */
996 /* not ready it is internal. An external */
997 /* object cannot be mapped until it is ready */
998 /* we can therefore avoid the ready check */
999 /* in this case. */
1000 object = named_entry->backing.object;
1001 assert(object != VM_OBJECT_NULL);
1002 named_entry_unlock(named_entry);
1003 vm_object_reference(object);
1c79356b 1004 }
0b4e3aa0
A
1005 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
1006 /*
1007 * JMM - This is temporary until we unify named entries
1008 * and raw memory objects.
1009 *
1010 * Detected fake ip_kotype for a memory object. In
1011 * this case, the port isn't really a port at all, but
1012 * instead is just a raw memory object.
1013 */
1014
1015 if ((object = vm_object_enter((memory_object_t)port,
1016 size, FALSE, FALSE, FALSE))
1c79356b
A
1017 == VM_OBJECT_NULL)
1018 return(KERN_INVALID_OBJECT);
1019
1020 /* wait for object (if any) to be ready */
1021 if (object != VM_OBJECT_NULL) {
9bccf70c
A
1022 if(object == kernel_object) {
1023 printf("Warning: Attempt to map kernel object"
1024 " by a non-private kernel entity\n");
1025 return(KERN_INVALID_OBJECT);
1026 }
1c79356b
A
1027 vm_object_lock(object);
1028 while (!object->pager_ready) {
1029 vm_object_wait(object,
1030 VM_OBJECT_EVENT_PAGER_READY,
1031 THREAD_UNINT);
1032 vm_object_lock(object);
1033 }
1034 vm_object_unlock(object);
1035 }
0b4e3aa0
A
1036 } else {
1037 return (KERN_INVALID_OBJECT);
1c79356b
A
1038 }
1039
1c79356b
A
1040 /*
1041 * Perform the copy if requested
1042 */
1043
1044 if (copy) {
1045 vm_object_t new_object;
1046 vm_object_offset_t new_offset;
1047
1048 result = vm_object_copy_strategically(object, offset, size,
1049 &new_object, &new_offset,
1050 &copy);
1051
1052
1053 if (result == KERN_MEMORY_RESTART_COPY) {
1054 boolean_t success;
1055 boolean_t src_needs_copy;
1056
1057 /*
1058 * XXX
1059 * We currently ignore src_needs_copy.
1060 * This really is the issue of how to make
1061 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
1062 * non-kernel users to use. Solution forthcoming.
1063 * In the meantime, since we don't allow non-kernel
1064 * memory managers to specify symmetric copy,
1065 * we won't run into problems here.
1066 */
1067 new_object = object;
1068 new_offset = offset;
1069 success = vm_object_copy_quickly(&new_object,
1070 new_offset, size,
1071 &src_needs_copy,
1072 &copy);
1073 assert(success);
1074 result = KERN_SUCCESS;
1075 }
1076 /*
1077 * Throw away the reference to the
1078 * original object, as it won't be mapped.
1079 */
1080
1081 vm_object_deallocate(object);
1082
1083 if (result != KERN_SUCCESS)
1084 return (result);
1085
1086 object = new_object;
1087 offset = new_offset;
1088 }
1089
1090 if ((result = vm_map_enter(target_map,
91447636
A
1091 &map_addr, map_size,
1092 (vm_map_offset_t)mask,
1093 flags,
1094 object, offset,
1095 copy,
1096 cur_protection, max_protection, inheritance
1097 )) != KERN_SUCCESS)
1098 vm_object_deallocate(object);
1099 *address = map_addr;
1c79356b
A
1100 return(result);
1101}
1102
91447636
A
1103
1104/* legacy interface */
1105kern_return_t
1106vm_map_64(
1107 vm_map_t target_map,
1108 vm_offset_t *address,
1109 vm_size_t size,
1110 vm_offset_t mask,
1111 int flags,
1112 ipc_port_t port,
1113 vm_object_offset_t offset,
1114 boolean_t copy,
1115 vm_prot_t cur_protection,
1116 vm_prot_t max_protection,
1117 vm_inherit_t inheritance)
1118{
1119 mach_vm_address_t map_addr;
1120 mach_vm_size_t map_size;
1121 mach_vm_offset_t map_mask;
1122 kern_return_t kr;
1123
1124 map_addr = (mach_vm_address_t)*address;
1125 map_size = (mach_vm_size_t)size;
1126 map_mask = (mach_vm_offset_t)mask;
1127
1128 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
1129 port, offset, copy,
1130 cur_protection, max_protection, inheritance);
1131 *address = CAST_DOWN(vm_address_t, map_addr);
1132 return kr;
1133}
1134
1c79356b 1135/* temporary, until world build */
55e303ae 1136kern_return_t
1c79356b
A
1137vm_map(
1138 vm_map_t target_map,
1139 vm_offset_t *address,
1140 vm_size_t size,
1141 vm_offset_t mask,
1142 int flags,
1143 ipc_port_t port,
1144 vm_offset_t offset,
1145 boolean_t copy,
1146 vm_prot_t cur_protection,
1147 vm_prot_t max_protection,
1148 vm_inherit_t inheritance)
1149{
91447636
A
1150 mach_vm_address_t map_addr;
1151 mach_vm_size_t map_size;
1152 mach_vm_offset_t map_mask;
1153 vm_object_offset_t obj_offset;
1154 kern_return_t kr;
1155
1156 map_addr = (mach_vm_address_t)*address;
1157 map_size = (mach_vm_size_t)size;
1158 map_mask = (mach_vm_offset_t)mask;
1159 obj_offset = (vm_object_offset_t)offset;
1160
1161 kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
1162 port, obj_offset, copy,
1163 cur_protection, max_protection, inheritance);
1164 *address = CAST_DOWN(vm_address_t, map_addr);
1165 return kr;
1166}
1167
1168/*
1169 * mach_vm_remap -
1170 * Remap a range of memory from one task into another,
1171 * to another address range within the same task, or
1172 * over top of itself (with altered permissions and/or
1173 * as an in-place copy of itself).
1174 */
1175
1176kern_return_t
1177mach_vm_remap(
1178 vm_map_t target_map,
1179 mach_vm_offset_t *address,
1180 mach_vm_size_t size,
1181 mach_vm_offset_t mask,
1182 boolean_t anywhere,
1183 vm_map_t src_map,
1184 mach_vm_offset_t memory_address,
1185 boolean_t copy,
1186 vm_prot_t *cur_protection,
1187 vm_prot_t *max_protection,
1188 vm_inherit_t inheritance)
1189{
1190 vm_map_offset_t map_addr;
1191 kern_return_t kr;
1192
1193 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1194 return KERN_INVALID_ARGUMENT;
1195
1196 map_addr = (vm_map_offset_t)*address;
1197
1198 kr = vm_map_remap(target_map,
1199 &map_addr,
1200 size,
1201 mask,
1202 anywhere,
1203 src_map,
1204 memory_address,
1205 copy,
1206 cur_protection,
1207 max_protection,
1208 inheritance);
1209 *address = map_addr;
1210 return kr;
1c79356b
A
1211}
1212
91447636
A
1213/*
1214 * vm_remap -
1215 * Remap a range of memory from one task into another,
1216 * to another address range within the same task, or
1217 * over top of itself (with altered permissions and/or
1218 * as an in-place copy of itself).
1219 *
1220 * The addressability of the source and target address
1221 * range is limited by the size of vm_address_t (in the
1222 * kernel context).
1223 */
1224kern_return_t
1225vm_remap(
1226 vm_map_t target_map,
1227 vm_offset_t *address,
1228 vm_size_t size,
1229 vm_offset_t mask,
1230 boolean_t anywhere,
1231 vm_map_t src_map,
1232 vm_offset_t memory_address,
1233 boolean_t copy,
1234 vm_prot_t *cur_protection,
1235 vm_prot_t *max_protection,
1236 vm_inherit_t inheritance)
1237{
1238 vm_map_offset_t map_addr;
1239 kern_return_t kr;
1240
1241 if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
1242 return KERN_INVALID_ARGUMENT;
1243
1244 map_addr = (vm_map_offset_t)*address;
1245
1246 kr = vm_map_remap(target_map,
1247 &map_addr,
1248 size,
1249 mask,
1250 anywhere,
1251 src_map,
1252 memory_address,
1253 copy,
1254 cur_protection,
1255 max_protection,
1256 inheritance);
1257 *address = CAST_DOWN(vm_offset_t, map_addr);
1258 return kr;
1259}
1c79356b
A
1260
1261/*
91447636
A
1262 * NOTE: these routine (and this file) will no longer require mach_host_server.h
1263 * when mach_vm_wire and vm_wire are changed to use ledgers.
1c79356b
A
1264 */
1265#include <mach/mach_host_server.h>
1266/*
91447636
A
1267 * mach_vm_wire
1268 * Specify that the range of the virtual address space
1269 * of the target task must not cause page faults for
1270 * the indicated accesses.
1271 *
1272 * [ To unwire the pages, specify VM_PROT_NONE. ]
1273 */
1274kern_return_t
1275mach_vm_wire(
1276 host_priv_t host_priv,
1277 vm_map_t map,
1278 mach_vm_offset_t start,
1279 mach_vm_size_t size,
1280 vm_prot_t access)
1281{
1282 kern_return_t rc;
1283
1284 if (host_priv == HOST_PRIV_NULL)
1285 return KERN_INVALID_HOST;
1286
1287 assert(host_priv == &realhost);
1288
1289 if (map == VM_MAP_NULL)
1290 return KERN_INVALID_TASK;
1291
1292 if (access & ~VM_PROT_ALL)
1293 return KERN_INVALID_ARGUMENT;
1294
1295 if (access != VM_PROT_NONE) {
1296 rc = vm_map_wire(map, vm_map_trunc_page(start),
1297 vm_map_round_page(start+size), access, TRUE);
1298 } else {
1299 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1300 vm_map_round_page(start+size), TRUE);
1301 }
1302 return rc;
1303}
1304
1305/*
1306 * vm_wire -
1c79356b
A
1307 * Specify that the range of the virtual address space
1308 * of the target task must not cause page faults for
1309 * the indicated accesses.
1310 *
1311 * [ To unwire the pages, specify VM_PROT_NONE. ]
1312 */
1313kern_return_t
1314vm_wire(
1315 host_priv_t host_priv,
1316 register vm_map_t map,
1317 vm_offset_t start,
1318 vm_size_t size,
1319 vm_prot_t access)
1320{
1321 kern_return_t rc;
1322
1323 if (host_priv == HOST_PRIV_NULL)
1324 return KERN_INVALID_HOST;
1325
1326 assert(host_priv == &realhost);
1327
1328 if (map == VM_MAP_NULL)
1329 return KERN_INVALID_TASK;
1330
91447636 1331 if ((access & ~VM_PROT_ALL) || (start + size < start))
1c79356b
A
1332 return KERN_INVALID_ARGUMENT;
1333
91447636
A
1334 if (size == 0) {
1335 rc = KERN_SUCCESS;
1336 } else if (access != VM_PROT_NONE) {
1337 rc = vm_map_wire(map, vm_map_trunc_page(start),
1338 vm_map_round_page(start+size), access, TRUE);
1c79356b 1339 } else {
91447636
A
1340 rc = vm_map_unwire(map, vm_map_trunc_page(start),
1341 vm_map_round_page(start+size), TRUE);
1c79356b
A
1342 }
1343 return rc;
1344}
1345
1346/*
1347 * vm_msync
1348 *
1349 * Synchronises the memory range specified with its backing store
1350 * image by either flushing or cleaning the contents to the appropriate
91447636
A
1351 * memory manager.
1352 *
1353 * interpretation of sync_flags
1354 * VM_SYNC_INVALIDATE - discard pages, only return precious
1355 * pages to manager.
1356 *
1357 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1358 * - discard pages, write dirty or precious
1359 * pages back to memory manager.
1360 *
1361 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1362 * - write dirty or precious pages back to
1363 * the memory manager.
1364 *
1365 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1366 * is a hole in the region, and we would
1367 * have returned KERN_SUCCESS, return
1368 * KERN_INVALID_ADDRESS instead.
1369 *
1370 * RETURNS
1371 * KERN_INVALID_TASK Bad task parameter
1372 * KERN_INVALID_ARGUMENT both sync and async were specified.
1373 * KERN_SUCCESS The usual.
1374 * KERN_INVALID_ADDRESS There was a hole in the region.
1375 */
1376
1377kern_return_t
1378mach_vm_msync(
1379 vm_map_t map,
1380 mach_vm_address_t address,
1381 mach_vm_size_t size,
1382 vm_sync_t sync_flags)
1383{
1384
1385 if (map == VM_MAP_NULL)
1386 return(KERN_INVALID_TASK);
1387
1388 return vm_map_msync(map, (vm_map_address_t)address,
1389 (vm_map_size_t)size, sync_flags);
1390}
1391
1392/*
1393 * vm_msync
1394 *
1395 * Synchronises the memory range specified with its backing store
1396 * image by either flushing or cleaning the contents to the appropriate
1397 * memory manager.
1c79356b
A
1398 *
1399 * interpretation of sync_flags
1400 * VM_SYNC_INVALIDATE - discard pages, only return precious
1401 * pages to manager.
1402 *
1403 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1404 * - discard pages, write dirty or precious
1405 * pages back to memory manager.
1406 *
1407 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1408 * - write dirty or precious pages back to
1409 * the memory manager.
1410 *
91447636
A
1411 * VM_SYNC_CONTIGUOUS - does everything normally, but if there
1412 * is a hole in the region, and we would
1413 * have returned KERN_SUCCESS, return
1414 * KERN_INVALID_ADDRESS instead.
1415 *
1416 * The addressability of the range is limited to that which can
1417 * be described by a vm_address_t.
1c79356b
A
1418 *
1419 * RETURNS
1420 * KERN_INVALID_TASK Bad task parameter
1421 * KERN_INVALID_ARGUMENT both sync and async were specified.
1422 * KERN_SUCCESS The usual.
91447636 1423 * KERN_INVALID_ADDRESS There was a hole in the region.
1c79356b
A
1424 */
1425
1426kern_return_t
1427vm_msync(
1428 vm_map_t map,
1429 vm_address_t address,
1430 vm_size_t size,
1431 vm_sync_t sync_flags)
1432{
1c79356b 1433
91447636
A
1434 if (map == VM_MAP_NULL)
1435 return(KERN_INVALID_TASK);
1c79356b 1436
91447636
A
1437 return vm_map_msync(map, (vm_map_address_t)address,
1438 (vm_map_size_t)size, sync_flags);
1439}
1c79356b 1440
91447636
A
1441
1442/*
1443 * mach_vm_behavior_set
1444 *
1445 * Sets the paging behavior attribute for the specified range
1446 * in the specified map.
1447 *
1448 * This routine will fail with KERN_INVALID_ADDRESS if any address
1449 * in [start,start+size) is not a valid allocated memory region.
1450 */
1451kern_return_t
1452mach_vm_behavior_set(
1453 vm_map_t map,
1454 mach_vm_offset_t start,
1455 mach_vm_size_t size,
1456 vm_behavior_t new_behavior)
1457{
1458 if ((map == VM_MAP_NULL) || (start + size < start))
1459 return(KERN_INVALID_ARGUMENT);
1c79356b
A
1460
1461 if (size == 0)
91447636 1462 return KERN_SUCCESS;
1c79356b 1463
91447636
A
1464 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1465 vm_map_round_page(start+size), new_behavior));
1466}
1c79356b 1467
91447636
A
1468/*
1469 * vm_behavior_set
1470 *
1471 * Sets the paging behavior attribute for the specified range
1472 * in the specified map.
1473 *
1474 * This routine will fail with KERN_INVALID_ADDRESS if any address
1475 * in [start,start+size) is not a valid allocated memory region.
1476 *
1477 * This routine is potentially limited in addressibility by the
1478 * use of vm_offset_t (if the map provided is larger than the
1479 * kernel's).
1480 */
1481kern_return_t
1482vm_behavior_set(
1483 vm_map_t map,
1484 vm_offset_t start,
1485 vm_size_t size,
1486 vm_behavior_t new_behavior)
1487{
1488 if ((map == VM_MAP_NULL) || (start + size < start))
1489 return(KERN_INVALID_ARGUMENT);
1c79356b 1490
91447636
A
1491 if (size == 0)
1492 return KERN_SUCCESS;
1c79356b 1493
91447636
A
1494 return(vm_map_behavior_set(map, vm_map_trunc_page(start),
1495 vm_map_round_page(start+size), new_behavior));
1496}
1c79356b 1497
91447636
A
1498/*
1499 * mach_vm_region:
1500 *
1501 * User call to obtain information about a region in
1502 * a task's address map. Currently, only one flavor is
1503 * supported.
1504 *
1505 * XXX The reserved and behavior fields cannot be filled
1506 * in until the vm merge from the IK is completed, and
1507 * vm_reserve is implemented.
1508 *
1509 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1510 */
1c79356b 1511
91447636
A
1512kern_return_t
1513mach_vm_region(
1514 vm_map_t map,
1515 mach_vm_offset_t *address, /* IN/OUT */
1516 mach_vm_size_t *size, /* OUT */
1517 vm_region_flavor_t flavor, /* IN */
1518 vm_region_info_t info, /* OUT */
1519 mach_msg_type_number_t *count, /* IN/OUT */
1520 mach_port_t *object_name) /* OUT */
1521{
1522 vm_map_offset_t map_addr;
1523 vm_map_size_t map_size;
1524 kern_return_t kr;
1c79356b 1525
91447636
A
1526 if (VM_MAP_NULL == map)
1527 return KERN_INVALID_ARGUMENT;
1c79356b 1528
91447636
A
1529 map_addr = (vm_map_offset_t)*address;
1530 map_size = (vm_map_size_t)*size;
1c79356b 1531
91447636
A
1532 /* legacy conversion */
1533 if (VM_REGION_BASIC_INFO == flavor)
1534 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1535
91447636
A
1536 kr = vm_map_region(map,
1537 &map_addr, &map_size,
1538 flavor, info, count,
1539 object_name);
1c79356b 1540
91447636
A
1541 *address = map_addr;
1542 *size = map_size;
1543 return kr;
1544}
1c79356b 1545
91447636
A
1546/*
1547 * vm_region_64 and vm_region:
1548 *
1549 * User call to obtain information about a region in
1550 * a task's address map. Currently, only one flavor is
1551 * supported.
1552 *
1553 * XXX The reserved and behavior fields cannot be filled
1554 * in until the vm merge from the IK is completed, and
1555 * vm_reserve is implemented.
1556 *
1557 * XXX Dependency: syscall_vm_region() also supports only one flavor.
1558 */
1c79356b 1559
91447636
A
1560kern_return_t
1561vm_region_64(
1562 vm_map_t map,
1563 vm_offset_t *address, /* IN/OUT */
1564 vm_size_t *size, /* OUT */
1565 vm_region_flavor_t flavor, /* IN */
1566 vm_region_info_t info, /* OUT */
1567 mach_msg_type_number_t *count, /* IN/OUT */
1568 mach_port_t *object_name) /* OUT */
1569{
1570 vm_map_offset_t map_addr;
1571 vm_map_size_t map_size;
1572 kern_return_t kr;
1c79356b 1573
91447636
A
1574 if (VM_MAP_NULL == map)
1575 return KERN_INVALID_ARGUMENT;
1c79356b 1576
91447636
A
1577 map_addr = (vm_map_offset_t)*address;
1578 map_size = (vm_map_size_t)*size;
1c79356b 1579
91447636
A
1580 /* legacy conversion */
1581 if (VM_REGION_BASIC_INFO == flavor)
1582 flavor = VM_REGION_BASIC_INFO_64;
1c79356b 1583
91447636
A
1584 kr = vm_map_region(map,
1585 &map_addr, &map_size,
1586 flavor, info, count,
1587 object_name);
1c79356b 1588
91447636
A
1589 *address = CAST_DOWN(vm_offset_t, map_addr);
1590 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1591
91447636
A
1592 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1593 return KERN_INVALID_ADDRESS;
1594 return kr;
1595}
1c79356b 1596
91447636
A
1597kern_return_t
1598vm_region(
1599 vm_map_t map,
1600 vm_address_t *address, /* IN/OUT */
1601 vm_size_t *size, /* OUT */
1602 vm_region_flavor_t flavor, /* IN */
1603 vm_region_info_t info, /* OUT */
1604 mach_msg_type_number_t *count, /* IN/OUT */
1605 mach_port_t *object_name) /* OUT */
1606{
1607 vm_map_address_t map_addr;
1608 vm_map_size_t map_size;
1609 kern_return_t kr;
1c79356b 1610
91447636
A
1611 if (VM_MAP_NULL == map)
1612 return KERN_INVALID_ARGUMENT;
1c79356b 1613
91447636
A
1614 map_addr = (vm_map_address_t)*address;
1615 map_size = (vm_map_size_t)*size;
1c79356b 1616
91447636
A
1617 kr = vm_map_region(map,
1618 &map_addr, &map_size,
1619 flavor, info, count,
1620 object_name);
1c79356b 1621
91447636
A
1622 *address = CAST_DOWN(vm_address_t, map_addr);
1623 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 1624
91447636
A
1625 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1626 return KERN_INVALID_ADDRESS;
1627 return kr;
1628}
1c79356b
A
1629
1630/*
91447636
A
1631 * vm_region_recurse: A form of vm_region which follows the
1632 * submaps in a target map
1c79356b 1633 *
1c79356b
A
1634 */
1635kern_return_t
91447636
A
1636mach_vm_region_recurse(
1637 vm_map_t map,
1638 mach_vm_address_t *address,
1639 mach_vm_size_t *size,
1640 uint32_t *depth,
1641 vm_region_recurse_info_t info,
1642 mach_msg_type_number_t *infoCnt)
1c79356b 1643{
91447636
A
1644 vm_map_address_t map_addr;
1645 vm_map_size_t map_size;
1646 kern_return_t kr;
1c79356b 1647
91447636
A
1648 if (VM_MAP_NULL == map)
1649 return KERN_INVALID_ARGUMENT;
1c79356b 1650
91447636
A
1651 map_addr = (vm_map_address_t)*address;
1652 map_size = (vm_map_size_t)*size;
1653
1654 kr = vm_map_region_recurse_64(
1655 map,
1656 &map_addr,
1657 &map_size,
1658 depth,
1659 (vm_region_submap_info_64_t)info,
1660 infoCnt);
1661
1662 *address = map_addr;
1663 *size = map_size;
1664 return kr;
1c79356b
A
1665}
1666
1667/*
91447636
A
1668 * vm_region_recurse: A form of vm_region which follows the
1669 * submaps in a target map
1670 *
1c79356b 1671 */
91447636
A
1672kern_return_t
1673vm_region_recurse_64(
1674 vm_map_t map,
1675 vm_address_t *address,
1676 vm_size_t *size,
1677 uint32_t *depth,
1678 vm_region_recurse_info_64_t info,
1679 mach_msg_type_number_t *infoCnt)
1c79356b 1680{
91447636
A
1681 vm_map_address_t map_addr;
1682 vm_map_size_t map_size;
1683 kern_return_t kr;
1684
1685 if (VM_MAP_NULL == map)
1686 return KERN_INVALID_ARGUMENT;
1687
1688 map_addr = (vm_map_address_t)*address;
1689 map_size = (vm_map_size_t)*size;
1690
1691 kr = vm_map_region_recurse_64(
1692 map,
1693 &map_addr,
1694 &map_size,
1695 depth,
1696 (vm_region_submap_info_64_t)info,
1697 infoCnt);
1c79356b 1698
91447636
A
1699 *address = CAST_DOWN(vm_address_t, map_addr);
1700 *size = CAST_DOWN(vm_size_t, map_size);
1701
1702 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1703 return KERN_INVALID_ADDRESS;
1704 return kr;
1c79356b
A
1705}
1706
91447636
A
1707kern_return_t
1708vm_region_recurse(
1709 vm_map_t map,
1710 vm_offset_t *address, /* IN/OUT */
1711 vm_size_t *size, /* OUT */
1712 natural_t *depth, /* IN/OUT */
1713 vm_region_recurse_info_t info32, /* IN/OUT */
1714 mach_msg_type_number_t *infoCnt) /* IN/OUT */
1715{
1716 vm_region_submap_info_data_64_t info64;
1717 vm_region_submap_info_t info;
1718 vm_map_address_t map_addr;
1719 vm_map_size_t map_size;
1720 kern_return_t kr;
1721
1722 if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
1723 return KERN_INVALID_ARGUMENT;
1724
1725
1726 map_addr = (vm_map_address_t)*address;
1727 map_size = (vm_map_size_t)*size;
1728 info = (vm_region_submap_info_t)info32;
1729 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1730
1731 kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
1732 depth, &info64, infoCnt);
1733
1734 info->protection = info64.protection;
1735 info->max_protection = info64.max_protection;
1736 info->inheritance = info64.inheritance;
1737 info->offset = (uint32_t)info64.offset; /* trouble-maker */
1738 info->user_tag = info64.user_tag;
1739 info->pages_resident = info64.pages_resident;
1740 info->pages_shared_now_private = info64.pages_shared_now_private;
1741 info->pages_swapped_out = info64.pages_swapped_out;
1742 info->pages_dirtied = info64.pages_dirtied;
1743 info->ref_count = info64.ref_count;
1744 info->shadow_depth = info64.shadow_depth;
1745 info->external_pager = info64.external_pager;
1746 info->share_mode = info64.share_mode;
1747 info->is_submap = info64.is_submap;
1748 info->behavior = info64.behavior;
1749 info->object_id = info64.object_id;
1750 info->user_wired_count = info64.user_wired_count;
1751
1752 *address = CAST_DOWN(vm_address_t, map_addr);
1753 *size = CAST_DOWN(vm_size_t, map_size);
1754 *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1755
1756 if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
1757 return KERN_INVALID_ADDRESS;
1758 return kr;
1759}
1760
1761kern_return_t
1762vm_purgable_control(
1763 vm_map_t map,
1764 vm_offset_t address,
1765 vm_purgable_t control,
1766 int *state)
1767{
1768 if (VM_MAP_NULL == map)
1769 return KERN_INVALID_ARGUMENT;
1770
1771 return vm_map_purgable_control(map,
1772 vm_map_trunc_page(address),
1773 control,
1774 state);
1775}
1776
1c79356b
A
1777
1778/*
1779 * Ordinarily, the right to allocate CPM is restricted
1780 * to privileged applications (those that can gain access
91447636
A
1781 * to the host priv port). Set this variable to zero if
1782 * you want to let any application allocate CPM.
1c79356b
A
1783 */
1784unsigned int vm_allocate_cpm_privileged = 0;
1785
1786/*
1787 * Allocate memory in the specified map, with the caveat that
1788 * the memory is physically contiguous. This call may fail
1789 * if the system can't find sufficient contiguous memory.
1790 * This call may cause or lead to heart-stopping amounts of
1791 * paging activity.
1792 *
1793 * Memory obtained from this call should be freed in the
1794 * normal way, viz., via vm_deallocate.
1795 */
1796kern_return_t
1797vm_allocate_cpm(
1798 host_priv_t host_priv,
91447636
A
1799 vm_map_t map,
1800 vm_address_t *addr,
1801 vm_size_t size,
1c79356b
A
1802 int flags)
1803{
91447636
A
1804 vm_map_address_t map_addr;
1805 vm_map_size_t map_size;
1c79356b 1806 kern_return_t kr;
1c79356b 1807
91447636 1808 if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
1c79356b
A
1809 return KERN_INVALID_HOST;
1810
91447636 1811 if (VM_MAP_NULL == map)
1c79356b 1812 return KERN_INVALID_ARGUMENT;
1c79356b 1813
91447636
A
1814 map_addr = (vm_map_address_t)*addr;
1815 map_size = (vm_map_size_t)size;
1c79356b 1816
91447636
A
1817 kr = vm_map_enter_cpm(map,
1818 &map_addr,
1819 map_size,
1820 flags);
1c79356b 1821
91447636 1822 *addr = CAST_DOWN(vm_address_t, map_addr);
1c79356b
A
1823 return kr;
1824}
1825
1826
91447636
A
1827kern_return_t
1828mach_vm_page_query(
1829 vm_map_t map,
1830 mach_vm_offset_t offset,
1831 int *disposition,
1832 int *ref_count)
1833{
1834 if (VM_MAP_NULL == map)
1835 return KERN_INVALID_ARGUMENT;
1c79356b 1836
91447636
A
1837 return vm_map_page_info(map,
1838 vm_map_trunc_page(offset),
1839 disposition, ref_count);
1840}
1c79356b
A
1841
1842kern_return_t
91447636
A
1843vm_map_page_query(
1844 vm_map_t map,
1845 vm_offset_t offset,
1846 int *disposition,
1847 int *ref_count)
1c79356b 1848{
91447636
A
1849 if (VM_MAP_NULL == map)
1850 return KERN_INVALID_ARGUMENT;
1851
1852 return vm_map_page_info(map,
1853 vm_map_trunc_page(offset),
1854 disposition, ref_count);
1c79356b
A
1855}
1856
91447636 1857/* map a (whole) upl into an address space */
1c79356b 1858kern_return_t
91447636
A
1859vm_upl_map(
1860 vm_map_t map,
1861 upl_t upl,
1862 vm_offset_t *dst_addr)
1c79356b 1863{
91447636 1864 vm_map_offset_t map_addr;
1c79356b
A
1865 kern_return_t kr;
1866
91447636
A
1867 if (VM_MAP_NULL == map)
1868 return KERN_INVALID_ARGUMENT;
1c79356b 1869
91447636
A
1870 kr = vm_map_enter_upl(map, upl, &map_addr);
1871 *dst_addr = CAST_DOWN(vm_offset_t, map_addr);
1872 return kr;
1873}
1c79356b 1874
91447636
A
1875kern_return_t
1876vm_upl_unmap(
1877 vm_map_t map,
1878 upl_t upl)
1879{
1880 if (VM_MAP_NULL == map)
1881 return KERN_INVALID_ARGUMENT;
1c79356b 1882
91447636
A
1883 return (vm_map_remove_upl(map, upl));
1884}
1c79356b 1885
91447636
A
1886/* Retrieve a upl for an object underlying an address range in a map */
1887
1888kern_return_t
1889vm_map_get_upl(
1890 vm_map_t map,
cc9f6e38 1891 vm_map_offset_t map_offset,
91447636
A
1892 upl_size_t *upl_size,
1893 upl_t *upl,
1894 upl_page_info_array_t page_list,
1895 unsigned int *count,
1896 int *flags,
1897 int force_data_sync)
1898{
91447636
A
1899 int map_flags;
1900 kern_return_t kr;
1c79356b 1901
91447636
A
1902 if (VM_MAP_NULL == map)
1903 return KERN_INVALID_ARGUMENT;
1c79356b 1904
91447636
A
1905 map_flags = *flags & ~UPL_NOZEROFILL;
1906 if (force_data_sync)
1907 map_flags |= UPL_FORCE_DATA_SYNC;
1c79356b 1908
91447636
A
1909 kr = vm_map_create_upl(map,
1910 map_offset,
1911 upl_size,
1912 upl,
1913 page_list,
1914 count,
1915 &map_flags);
1c79356b 1916
91447636
A
1917 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
1918 return kr;
1c79356b
A
1919}
1920
1921
91447636
A
1922__private_extern__ kern_return_t
1923mach_memory_entry_allocate(
1924 vm_named_entry_t *user_entry_p,
1925 ipc_port_t *user_handle_p); /* forward */
1c79356b
A
1926
1927/*
91447636
A
1928 * mach_make_memory_entry_64
1929 *
1930 * Think of it as a two-stage vm_remap() operation. First
1931 * you get a handle. Second, you get map that handle in
1932 * somewhere else. Rather than doing it all at once (and
1933 * without needing access to the other whole map).
1c79356b
A
1934 */
1935
1936kern_return_t
1937mach_make_memory_entry_64(
1938 vm_map_t target_map,
91447636
A
1939 memory_object_size_t *size,
1940 memory_object_offset_t offset,
1c79356b
A
1941 vm_prot_t permission,
1942 ipc_port_t *object_handle,
91447636 1943 ipc_port_t parent_handle)
1c79356b
A
1944{
1945 vm_map_version_t version;
91447636
A
1946 vm_named_entry_t parent_entry;
1947 vm_named_entry_t user_entry;
1c79356b 1948 ipc_port_t user_handle;
1c79356b 1949 kern_return_t kr;
91447636 1950 vm_map_t real_map;
1c79356b
A
1951
1952 /* needed for call to vm_map_lookup_locked */
91447636 1953 boolean_t wired;
1c79356b 1954 vm_object_offset_t obj_off;
91447636
A
1955 vm_prot_t prot;
1956 vm_map_offset_t lo_offset, hi_offset;
1c79356b 1957 vm_behavior_t behavior;
91447636
A
1958 vm_object_t object;
1959 vm_object_t shadow_object;
1c79356b
A
1960
1961 /* needed for direct map entry manipulation */
1962 vm_map_entry_t map_entry;
9bccf70c 1963 vm_map_entry_t next_entry;
91447636
A
1964 vm_map_t local_map;
1965 vm_map_t original_map = target_map;
1966 vm_map_size_t total_size;
1967 vm_map_size_t map_size;
1968 vm_map_offset_t map_offset;
1969 vm_map_offset_t local_offset;
1c79356b 1970 vm_object_size_t mappable_size;
9bccf70c 1971
91447636
A
1972 unsigned int access;
1973 vm_prot_t protections;
1974 unsigned int wimg_mode;
1975 boolean_t cache_attr = FALSE;
1976
1977 if (((permission & 0x00FF0000) &
1978 ~(MAP_MEM_ONLY |
1979 MAP_MEM_NAMED_CREATE |
1980 MAP_MEM_PURGABLE |
1981 MAP_MEM_NAMED_REUSE))) {
1982 /*
1983 * Unknown flag: reject for forward compatibility.
1984 */
1985 return KERN_INVALID_VALUE;
1986 }
1987
1988 if (parent_handle != IP_NULL &&
1989 ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
1990 parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
1991 } else {
1992 parent_entry = NULL;
1993 }
55e303ae
A
1994
1995 protections = permission & VM_PROT_ALL;
1996 access = GET_MAP_MEM(permission);
1997
91447636
A
1998 user_handle = IP_NULL;
1999 user_entry = NULL;
2000
2001 map_offset = vm_map_trunc_page(offset);
2002 map_size = vm_map_round_page(*size);
1c79356b 2003
91447636
A
2004 if (permission & MAP_MEM_ONLY) {
2005 boolean_t parent_is_object;
55e303ae 2006
91447636 2007 if (parent_entry == NULL) {
55e303ae
A
2008 return KERN_INVALID_ARGUMENT;
2009 }
91447636
A
2010
2011 parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager);
2012 object = parent_entry->backing.object;
2013 if(parent_is_object && object != VM_OBJECT_NULL)
55e303ae 2014 wimg_mode = object->wimg_bits;
91447636
A
2015 else
2016 wimg_mode = VM_WIMG_DEFAULT;
2017 if((access != GET_MAP_MEM(parent_entry->protection)) &&
2018 !(parent_entry->protection & VM_PROT_WRITE)) {
55e303ae
A
2019 return KERN_INVALID_RIGHT;
2020 }
2021 if(access == MAP_MEM_IO) {
91447636 2022 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2023 wimg_mode = VM_WIMG_IO;
2024 } else if (access == MAP_MEM_COPYBACK) {
91447636 2025 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2026 wimg_mode = VM_WIMG_DEFAULT;
2027 } else if (access == MAP_MEM_WTHRU) {
91447636 2028 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2029 wimg_mode = VM_WIMG_WTHRU;
2030 } else if (access == MAP_MEM_WCOMB) {
91447636 2031 SET_MAP_MEM(access, parent_entry->protection);
55e303ae
A
2032 wimg_mode = VM_WIMG_WCOMB;
2033 }
91447636 2034 if(parent_is_object && object &&
55e303ae
A
2035 (access != MAP_MEM_NOOP) &&
2036 (!(object->nophyscache))) {
2037 if(object->wimg_bits != wimg_mode) {
2038 vm_page_t p;
2039 if ((wimg_mode == VM_WIMG_IO)
2040 || (wimg_mode == VM_WIMG_WCOMB))
2041 cache_attr = TRUE;
2042 else
2043 cache_attr = FALSE;
2044 vm_object_lock(object);
91447636 2045 vm_object_paging_wait(object, THREAD_UNINT);
55e303ae
A
2046 object->wimg_bits = wimg_mode;
2047 queue_iterate(&object->memq,
2048 p, vm_page_t, listq) {
2049 if (!p->fictitious) {
91447636
A
2050 pmap_disconnect(p->phys_page);
2051 if (cache_attr)
2052 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
2053 }
2054 }
2055 vm_object_unlock(object);
2056 }
2057 }
91447636
A
2058 if (object_handle)
2059 *object_handle = IP_NULL;
55e303ae
A
2060 return KERN_SUCCESS;
2061 }
2062
91447636
A
2063 if(permission & MAP_MEM_NAMED_CREATE) {
2064 kr = mach_memory_entry_allocate(&user_entry, &user_handle);
2065 if (kr != KERN_SUCCESS) {
2066 return KERN_FAILURE;
2067 }
55e303ae 2068
91447636
A
2069 /*
2070 * Force the creation of the VM object now.
2071 */
2072 if (map_size > (vm_map_size_t) VM_MAX_ADDRESS) {
2073 /*
2074 * LP64todo - for now, we can only allocate 4GB
2075 * internal objects because the default pager can't
2076 * page bigger ones. Remove this when it can.
2077 */
2078 kr = KERN_FAILURE;
2079 goto make_mem_done;
2080 }
1c79356b 2081
91447636
A
2082 object = vm_object_allocate(map_size);
2083 assert(object != VM_OBJECT_NULL);
1c79356b 2084
91447636
A
2085 if (permission & MAP_MEM_PURGABLE) {
2086 if (! (permission & VM_PROT_WRITE)) {
2087 /* if we can't write, we can't purge */
2088 vm_object_deallocate(object);
2089 kr = KERN_INVALID_ARGUMENT;
2090 goto make_mem_done;
2091 }
2092 object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE;
2093 }
1c79356b 2094
91447636
A
2095 /*
2096 * The VM object is brand new and nobody else knows about it,
2097 * so we don't need to lock it.
2098 */
1c79356b 2099
91447636
A
2100 wimg_mode = object->wimg_bits;
2101 if (access == MAP_MEM_IO) {
2102 wimg_mode = VM_WIMG_IO;
2103 } else if (access == MAP_MEM_COPYBACK) {
2104 wimg_mode = VM_WIMG_DEFAULT;
2105 } else if (access == MAP_MEM_WTHRU) {
2106 wimg_mode = VM_WIMG_WTHRU;
2107 } else if (access == MAP_MEM_WCOMB) {
2108 wimg_mode = VM_WIMG_WCOMB;
2109 }
2110 if (access != MAP_MEM_NOOP) {
2111 object->wimg_bits = wimg_mode;
2112 }
2113 /* the object has no pages, so no WIMG bits to update here */
1c79356b 2114
91447636
A
2115 /*
2116 * XXX
2117 * We use this path when we want to make sure that
2118 * nobody messes with the object (coalesce, for
2119 * example) before we map it.
2120 * We might want to use these objects for transposition via
2121 * vm_object_transpose() too, so we don't want any copy or
2122 * shadow objects either...
2123 */
2124 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1c79356b 2125
91447636
A
2126 user_entry->backing.object = object;
2127 user_entry->internal = TRUE;
2128 user_entry->is_sub_map = FALSE;
2129 user_entry->is_pager = FALSE;
2130 user_entry->offset = 0;
2131 user_entry->protection = protections;
2132 SET_MAP_MEM(access, user_entry->protection);
2133 user_entry->size = map_size;
55e303ae
A
2134
2135 /* user_object pager and internal fields are not used */
2136 /* when the object field is filled in. */
2137
91447636 2138 *size = CAST_DOWN(vm_size_t, map_size);
55e303ae
A
2139 *object_handle = user_handle;
2140 return KERN_SUCCESS;
2141 }
2142
91447636
A
2143 if (parent_entry == NULL ||
2144 (permission & MAP_MEM_NAMED_REUSE)) {
2145
2146 /* Create a named object based on address range within the task map */
2147 /* Go find the object at given address */
1c79356b 2148
91447636 2149redo_lookup:
1c79356b
A
2150 vm_map_lock_read(target_map);
2151
2152 /* get the object associated with the target address */
2153 /* note we check the permission of the range against */
2154 /* that requested by the caller */
2155
91447636 2156 kr = vm_map_lookup_locked(&target_map, map_offset,
55e303ae 2157 protections, &version,
1c79356b 2158 &object, &obj_off, &prot, &wired, &behavior,
91447636 2159 &lo_offset, &hi_offset, &real_map);
1c79356b
A
2160 if (kr != KERN_SUCCESS) {
2161 vm_map_unlock_read(target_map);
2162 goto make_mem_done;
2163 }
55e303ae 2164 if (((prot & protections) != protections)
9bccf70c 2165 || (object == kernel_object)) {
1c79356b
A
2166 kr = KERN_INVALID_RIGHT;
2167 vm_object_unlock(object);
2168 vm_map_unlock_read(target_map);
91447636
A
2169 if(real_map != target_map)
2170 vm_map_unlock_read(real_map);
9bccf70c
A
2171 if(object == kernel_object) {
2172 printf("Warning: Attempt to create a named"
2173 " entry from the kernel_object\n");
2174 }
1c79356b
A
2175 goto make_mem_done;
2176 }
2177
2178 /* We have an object, now check to see if this object */
2179 /* is suitable. If not, create a shadow and share that */
91447636
A
2180
2181 /*
2182 * We have to unlock the VM object to avoid deadlocking with
2183 * a VM map lock (the lock ordering is map, the object), if we
2184 * need to modify the VM map to create a shadow object. Since
2185 * we might release the VM map lock below anyway, we have
2186 * to release the VM map lock now.
2187 * XXX FBDP There must be a way to avoid this double lookup...
2188 *
2189 * Take an extra reference on the VM object to make sure it's
2190 * not going to disappear.
2191 */
2192 vm_object_reference_locked(object); /* extra ref to hold obj */
2193 vm_object_unlock(object);
2194
9bccf70c 2195 local_map = original_map;
91447636 2196 local_offset = map_offset;
9bccf70c
A
2197 if(target_map != local_map) {
2198 vm_map_unlock_read(target_map);
91447636
A
2199 if(real_map != target_map)
2200 vm_map_unlock_read(real_map);
9bccf70c
A
2201 vm_map_lock_read(local_map);
2202 target_map = local_map;
91447636 2203 real_map = local_map;
9bccf70c 2204 }
1c79356b 2205 while(TRUE) {
9bccf70c
A
2206 if(!vm_map_lookup_entry(local_map,
2207 local_offset, &map_entry)) {
1c79356b 2208 kr = KERN_INVALID_ARGUMENT;
1c79356b 2209 vm_map_unlock_read(target_map);
91447636
A
2210 if(real_map != target_map)
2211 vm_map_unlock_read(real_map);
2212 vm_object_deallocate(object); /* release extra ref */
2213 object = VM_OBJECT_NULL;
1c79356b
A
2214 goto make_mem_done;
2215 }
2216 if(!(map_entry->is_sub_map)) {
2217 if(map_entry->object.vm_object != object) {
2218 kr = KERN_INVALID_ARGUMENT;
1c79356b 2219 vm_map_unlock_read(target_map);
91447636
A
2220 if(real_map != target_map)
2221 vm_map_unlock_read(real_map);
2222 vm_object_deallocate(object); /* release extra ref */
2223 object = VM_OBJECT_NULL;
1c79356b
A
2224 goto make_mem_done;
2225 }
2226 break;
2227 } else {
9bccf70c
A
2228 vm_map_t tmap;
2229 tmap = local_map;
1c79356b 2230 local_map = map_entry->object.sub_map;
9bccf70c 2231
1c79356b 2232 vm_map_lock_read(local_map);
9bccf70c 2233 vm_map_unlock_read(tmap);
1c79356b 2234 target_map = local_map;
91447636 2235 real_map = local_map;
9bccf70c
A
2236 local_offset = local_offset - map_entry->vme_start;
2237 local_offset += map_entry->offset;
1c79356b
A
2238 }
2239 }
91447636
A
2240
2241 /*
2242 * We found the VM map entry, lock the VM object again.
2243 */
2244 vm_object_lock(object);
2245 if(map_entry->wired_count) {
2246 /* JMM - The check below should be reworked instead. */
2247 object->true_share = TRUE;
2248 }
55e303ae 2249 if(((map_entry->max_protection) & protections) != protections) {
1c79356b
A
2250 kr = KERN_INVALID_RIGHT;
2251 vm_object_unlock(object);
2252 vm_map_unlock_read(target_map);
91447636
A
2253 if(real_map != target_map)
2254 vm_map_unlock_read(real_map);
2255 vm_object_deallocate(object);
2256 object = VM_OBJECT_NULL;
1c79356b
A
2257 goto make_mem_done;
2258 }
9bccf70c
A
2259
2260 mappable_size = hi_offset - obj_off;
2261 total_size = map_entry->vme_end - map_entry->vme_start;
91447636 2262 if(map_size > mappable_size) {
9bccf70c
A
2263 /* try to extend mappable size if the entries */
2264 /* following are from the same object and are */
2265 /* compatible */
2266 next_entry = map_entry->vme_next;
2267 /* lets see if the next map entry is still */
2268 /* pointing at this object and is contiguous */
91447636 2269 while(map_size > mappable_size) {
9bccf70c
A
2270 if((next_entry->object.vm_object == object) &&
2271 (next_entry->vme_start ==
2272 next_entry->vme_prev->vme_end) &&
2273 (next_entry->offset ==
2274 next_entry->vme_prev->offset +
2275 (next_entry->vme_prev->vme_end -
2276 next_entry->vme_prev->vme_start))) {
2277 if(((next_entry->max_protection)
55e303ae 2278 & protections) != protections) {
9bccf70c
A
2279 break;
2280 }
55e303ae
A
2281 if (next_entry->needs_copy !=
2282 map_entry->needs_copy)
2283 break;
9bccf70c
A
2284 mappable_size += next_entry->vme_end
2285 - next_entry->vme_start;
2286 total_size += next_entry->vme_end
2287 - next_entry->vme_start;
2288 next_entry = next_entry->vme_next;
2289 } else {
2290 break;
2291 }
2292
2293 }
2294 }
2295
1c79356b
A
2296 if(object->internal) {
2297 /* vm_map_lookup_locked will create a shadow if */
2298 /* needs_copy is set but does not check for the */
2299 /* other two conditions shown. It is important to */
2300 /* set up an object which will not be pulled from */
2301 /* under us. */
2302
0b4e3aa0 2303 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
2304 (object->size > total_size))
2305 && !object->true_share) {
91447636
A
2306 /*
2307 * We have to unlock the VM object before
2308 * trying to upgrade the VM map lock, to
2309 * honor lock ordering (map then object).
2310 * Otherwise, we would deadlock if another
2311 * thread holds a read lock on the VM map and
2312 * is trying to acquire the VM object's lock.
2313 * We still hold an extra reference on the
2314 * VM object, guaranteeing that it won't
2315 * disappear.
2316 */
2317 vm_object_unlock(object);
2318
1c79356b 2319 if (vm_map_lock_read_to_write(target_map)) {
91447636
A
2320 /*
2321 * We couldn't upgrade our VM map lock
2322 * from "read" to "write" and we lost
2323 * our "read" lock.
2324 * Start all over again...
2325 */
2326 vm_object_deallocate(object); /* extra ref */
2327 target_map = original_map;
1c79356b
A
2328 goto redo_lookup;
2329 }
91447636 2330 vm_object_lock(object);
1c79356b 2331
55e303ae
A
2332 /*
2333 * JMM - We need to avoid coming here when the object
2334 * is wired by anybody, not just the current map. Why
2335 * couldn't we use the standard vm_object_copy_quickly()
2336 * approach here?
2337 */
2338
1c79356b 2339 /* create a shadow object */
9bccf70c
A
2340 vm_object_shadow(&map_entry->object.vm_object,
2341 &map_entry->offset, total_size);
2342 shadow_object = map_entry->object.vm_object;
2343 vm_object_unlock(object);
91447636 2344
c0fea474
A
2345 prot = map_entry->protection & ~VM_PROT_WRITE;
2346#ifdef STACK_ONLY_NX
2347 if (map_entry->alias != VM_MEMORY_STACK && prot)
2348 prot |= VM_PROT_EXECUTE;
2349#endif
9bccf70c
A
2350 vm_object_pmap_protect(
2351 object, map_entry->offset,
2352 total_size,
2353 ((map_entry->is_shared
2354 || target_map->mapped)
2355 ? PMAP_NULL :
2356 target_map->pmap),
2357 map_entry->vme_start,
c0fea474 2358 prot);
9bccf70c
A
2359 total_size -= (map_entry->vme_end
2360 - map_entry->vme_start);
2361 next_entry = map_entry->vme_next;
2362 map_entry->needs_copy = FALSE;
2363 while (total_size) {
2364 if(next_entry->object.vm_object == object) {
55e303ae
A
2365 shadow_object->ref_count++;
2366 vm_object_res_reference(shadow_object);
9bccf70c
A
2367 next_entry->object.vm_object
2368 = shadow_object;
55e303ae 2369 vm_object_deallocate(object);
9bccf70c
A
2370 next_entry->offset
2371 = next_entry->vme_prev->offset +
2372 (next_entry->vme_prev->vme_end
2373 - next_entry->vme_prev->vme_start);
2374 next_entry->needs_copy = FALSE;
2375 } else {
2376 panic("mach_make_memory_entry_64:"
2377 " map entries out of sync\n");
2378 }
2379 total_size -=
2380 next_entry->vme_end
2381 - next_entry->vme_start;
2382 next_entry = next_entry->vme_next;
2383 }
2384
91447636
A
2385 /*
2386 * Transfer our extra reference to the
2387 * shadow object.
2388 */
2389 vm_object_reference_locked(shadow_object);
2390 vm_object_deallocate(object); /* extra ref */
9bccf70c 2391 object = shadow_object;
91447636 2392
9bccf70c
A
2393 obj_off = (local_offset - map_entry->vme_start)
2394 + map_entry->offset;
1c79356b 2395
91447636
A
2396 vm_map_lock_write_to_read(target_map);
2397 vm_object_lock(object);
1c79356b
A
2398
2399 }
2400 }
2401
2402 /* note: in the future we can (if necessary) allow for */
2403 /* memory object lists, this will better support */
2404 /* fragmentation, but is it necessary? The user should */
2405 /* be encouraged to create address space oriented */
2406 /* shared objects from CLEAN memory regions which have */
2407 /* a known and defined history. i.e. no inheritence */
2408 /* share, make this call before making the region the */
2409 /* target of ipc's, etc. The code above, protecting */
2410 /* against delayed copy, etc. is mostly defensive. */
2411
55e303ae
A
2412 wimg_mode = object->wimg_bits;
2413 if(!(object->nophyscache)) {
2414 if(access == MAP_MEM_IO) {
2415 wimg_mode = VM_WIMG_IO;
2416 } else if (access == MAP_MEM_COPYBACK) {
2417 wimg_mode = VM_WIMG_USE_DEFAULT;
2418 } else if (access == MAP_MEM_WTHRU) {
2419 wimg_mode = VM_WIMG_WTHRU;
2420 } else if (access == MAP_MEM_WCOMB) {
2421 wimg_mode = VM_WIMG_WCOMB;
2422 }
2423 }
d7e50217 2424
de355530 2425 object->true_share = TRUE;
55e303ae
A
2426 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2427 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2428
91447636
A
2429 /*
2430 * The memory entry now points to this VM object and we
2431 * need to hold a reference on the VM object. Use the extra
2432 * reference we took earlier to keep the object alive when we
2433 * had to unlock it.
2434 */
2435
55e303ae 2436 vm_map_unlock_read(target_map);
91447636
A
2437 if(real_map != target_map)
2438 vm_map_unlock_read(real_map);
55e303ae
A
2439
2440 if(object->wimg_bits != wimg_mode) {
2441 vm_page_t p;
2442
2443 vm_object_paging_wait(object, THREAD_UNINT);
2444
91447636
A
2445 if ((wimg_mode == VM_WIMG_IO)
2446 || (wimg_mode == VM_WIMG_WCOMB))
2447 cache_attr = TRUE;
2448 else
2449 cache_attr = FALSE;
2450
55e303ae
A
2451 queue_iterate(&object->memq,
2452 p, vm_page_t, listq) {
2453 if (!p->fictitious) {
91447636
A
2454 pmap_disconnect(p->phys_page);
2455 if (cache_attr)
2456 pmap_sync_page_attributes_phys(p->phys_page);
55e303ae
A
2457 }
2458 }
2459 object->wimg_bits = wimg_mode;
2460 }
1c79356b
A
2461
2462 /* the size of mapped entry that overlaps with our region */
2463 /* which is targeted for share. */
2464 /* (entry_end - entry_start) - */
2465 /* offset of our beg addr within entry */
2466 /* it corresponds to this: */
2467
91447636
A
2468 if(map_size > mappable_size)
2469 map_size = mappable_size;
2470
2471 if (permission & MAP_MEM_NAMED_REUSE) {
2472 /*
2473 * Compare what we got with the "parent_entry".
2474 * If they match, re-use the "parent_entry" instead
2475 * of creating a new one.
2476 */
2477 if (parent_entry != NULL &&
2478 parent_entry->backing.object == object &&
2479 parent_entry->internal == object->internal &&
2480 parent_entry->is_sub_map == FALSE &&
2481 parent_entry->is_pager == FALSE &&
2482 parent_entry->offset == obj_off &&
2483 parent_entry->protection == protections &&
2484 parent_entry->size == map_size) {
2485 /*
2486 * We have a match: re-use "parent_entry".
2487 */
2488 /* release our extra reference on object */
2489 vm_object_unlock(object);
2490 vm_object_deallocate(object);
2491 /* parent_entry->ref_count++; XXX ? */
2492 /* Get an extra send-right on handle */
2493 ipc_port_copy_send(parent_handle);
2494 *object_handle = parent_handle;
2495 return KERN_SUCCESS;
2496 } else {
2497 /*
2498 * No match: we need to create a new entry.
2499 * fall through...
2500 */
2501 }
2502 }
2503
2504 vm_object_unlock(object);
2505 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2506 != KERN_SUCCESS) {
2507 /* release our unused reference on the object */
2508 vm_object_deallocate(object);
2509 return KERN_FAILURE;
2510 }
1c79356b 2511
91447636
A
2512 user_entry->backing.object = object;
2513 user_entry->internal = object->internal;
2514 user_entry->is_sub_map = FALSE;
2515 user_entry->is_pager = FALSE;
2516 user_entry->offset = obj_off;
2517 user_entry->protection = permission;
2518 user_entry->size = map_size;
1c79356b
A
2519
2520 /* user_object pager and internal fields are not used */
2521 /* when the object field is filled in. */
2522
91447636 2523 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b 2524 *object_handle = user_handle;
1c79356b 2525 return KERN_SUCCESS;
1c79356b 2526
91447636 2527 } else {
1c79356b 2528 /* The new object will be base on an existing named object */
91447636
A
2529
2530 if (parent_entry == NULL) {
1c79356b
A
2531 kr = KERN_INVALID_ARGUMENT;
2532 goto make_mem_done;
2533 }
91447636 2534 if((offset + map_size) > parent_entry->size) {
1c79356b
A
2535 kr = KERN_INVALID_ARGUMENT;
2536 goto make_mem_done;
2537 }
2538
91447636
A
2539 if((protections & parent_entry->protection) != protections) {
2540 kr = KERN_PROTECTION_FAILURE;
2541 goto make_mem_done;
2542 }
2543
2544 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2545 != KERN_SUCCESS) {
2546 kr = KERN_FAILURE;
2547 goto make_mem_done;
55e303ae 2548 }
91447636
A
2549
2550 user_entry->size = map_size;
2551 user_entry->offset = parent_entry->offset + map_offset;
2552 user_entry->is_sub_map = parent_entry->is_sub_map;
2553 user_entry->is_pager = parent_entry->is_pager;
2554 user_entry->internal = parent_entry->internal;
2555 user_entry->protection = protections;
2556
2557 if(access != MAP_MEM_NOOP) {
2558 SET_MAP_MEM(access, user_entry->protection);
1c79356b 2559 }
91447636
A
2560
2561 if(parent_entry->is_sub_map) {
2562 user_entry->backing.map = parent_entry->backing.map;
2563 vm_map_lock(user_entry->backing.map);
2564 user_entry->backing.map->ref_count++;
2565 vm_map_unlock(user_entry->backing.map);
1c79356b 2566 }
91447636
A
2567 else if (parent_entry->is_pager) {
2568 user_entry->backing.pager = parent_entry->backing.pager;
2569 /* JMM - don't we need a reference here? */
2570 } else {
2571 object = parent_entry->backing.object;
2572 assert(object != VM_OBJECT_NULL);
2573 user_entry->backing.object = object;
2574 /* we now point to this object, hold on */
2575 vm_object_reference(object);
2576 vm_object_lock(object);
2577 object->true_share = TRUE;
2578 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2579 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2580 vm_object_unlock(object);
1c79356b 2581 }
91447636 2582 *size = CAST_DOWN(vm_size_t, map_size);
1c79356b
A
2583 *object_handle = user_handle;
2584 return KERN_SUCCESS;
2585 }
2586
1c79356b 2587make_mem_done:
91447636
A
2588 if (user_handle != IP_NULL) {
2589 ipc_port_dealloc_kernel(user_handle);
2590 }
2591 if (user_entry != NULL) {
2592 kfree(user_entry, sizeof *user_entry);
2593 }
2594 return kr;
2595}
2596
2597kern_return_t
2598_mach_make_memory_entry(
2599 vm_map_t target_map,
2600 memory_object_size_t *size,
2601 memory_object_offset_t offset,
2602 vm_prot_t permission,
2603 ipc_port_t *object_handle,
2604 ipc_port_t parent_entry)
2605{
2606 memory_object_offset_t mo_size;
2607 kern_return_t kr;
2608
2609 mo_size = (memory_object_offset_t)*size;
2610 kr = mach_make_memory_entry_64(target_map, &mo_size,
2611 (memory_object_offset_t)offset, permission, object_handle,
2612 parent_entry);
2613 *size = mo_size;
1c79356b
A
2614 return kr;
2615}
2616
2617kern_return_t
2618mach_make_memory_entry(
2619 vm_map_t target_map,
2620 vm_size_t *size,
2621 vm_offset_t offset,
2622 vm_prot_t permission,
2623 ipc_port_t *object_handle,
2624 ipc_port_t parent_entry)
91447636
A
2625{
2626 memory_object_offset_t mo_size;
1c79356b
A
2627 kern_return_t kr;
2628
91447636
A
2629 mo_size = (memory_object_offset_t)*size;
2630 kr = mach_make_memory_entry_64(target_map, &mo_size,
2631 (memory_object_offset_t)offset, permission, object_handle,
1c79356b 2632 parent_entry);
91447636 2633 *size = CAST_DOWN(vm_size_t, mo_size);
1c79356b
A
2634 return kr;
2635}
2636
2637/*
91447636
A
2638 * task_wire
2639 *
2640 * Set or clear the map's wiring_required flag. This flag, if set,
2641 * will cause all future virtual memory allocation to allocate
2642 * user wired memory. Unwiring pages wired down as a result of
2643 * this routine is done with the vm_wire interface.
1c79356b 2644 */
1c79356b 2645kern_return_t
91447636
A
2646task_wire(
2647 vm_map_t map,
2648 boolean_t must_wire)
2649{
2650 if (map == VM_MAP_NULL)
2651 return(KERN_INVALID_ARGUMENT);
2652
2653 if (must_wire)
2654 map->wiring_required = TRUE;
2655 else
2656 map->wiring_required = FALSE;
2657
2658 return(KERN_SUCCESS);
2659}
2660
2661__private_extern__ kern_return_t
2662mach_memory_entry_allocate(
2663 vm_named_entry_t *user_entry_p,
2664 ipc_port_t *user_handle_p)
1c79356b 2665{
91447636 2666 vm_named_entry_t user_entry;
1c79356b 2667 ipc_port_t user_handle;
91447636 2668 ipc_port_t previous;
1c79356b 2669
91447636
A
2670 user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
2671 if (user_entry == NULL)
1c79356b 2672 return KERN_FAILURE;
1c79356b 2673
91447636 2674 named_entry_lock_init(user_entry);
1c79356b 2675
91447636
A
2676 user_handle = ipc_port_alloc_kernel();
2677 if (user_handle == IP_NULL) {
2678 kfree(user_entry, sizeof *user_entry);
2679 return KERN_FAILURE;
2680 }
1c79356b
A
2681 ip_lock(user_handle);
2682
2683 /* make a sonce right */
2684 user_handle->ip_sorights++;
2685 ip_reference(user_handle);
2686
2687 user_handle->ip_destination = IP_NULL;
2688 user_handle->ip_receiver_name = MACH_PORT_NULL;
2689 user_handle->ip_receiver = ipc_space_kernel;
2690
2691 /* make a send right */
2692 user_handle->ip_mscount++;
2693 user_handle->ip_srights++;
2694 ip_reference(user_handle);
2695
2696 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
2697 /* nsrequest unlocks user_handle */
2698
91447636
A
2699 user_entry->backing.pager = NULL;
2700 user_entry->is_sub_map = FALSE;
2701 user_entry->is_pager = FALSE;
2702 user_entry->size = 0;
2703 user_entry->internal = FALSE;
2704 user_entry->ref_count = 1;
1c79356b 2705
91447636
A
2706 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
2707 IKOT_NAMED_ENTRY);
1c79356b 2708
91447636
A
2709 *user_entry_p = user_entry;
2710 *user_handle_p = user_handle;
1c79356b 2711
91447636
A
2712 return KERN_SUCCESS;
2713}
1c79356b 2714
91447636
A
2715/*
2716 * mach_memory_object_memory_entry_64
2717 *
2718 * Create a named entry backed by the provided pager.
2719 *
2720 * JMM - we need to hold a reference on the pager -
2721 * and release it when the named entry is destroyed.
2722 */
2723kern_return_t
2724mach_memory_object_memory_entry_64(
2725 host_t host,
2726 boolean_t internal,
2727 vm_object_offset_t size,
2728 vm_prot_t permission,
2729 memory_object_t pager,
2730 ipc_port_t *entry_handle)
2731{
2732 unsigned int access;
2733 vm_named_entry_t user_entry;
2734 ipc_port_t user_handle;
2735
2736 if (host == HOST_NULL)
2737 return(KERN_INVALID_HOST);
2738
2739 if (mach_memory_entry_allocate(&user_entry, &user_handle)
2740 != KERN_SUCCESS) {
2741 return KERN_FAILURE;
2742 }
2743
2744 user_entry->backing.pager = pager;
2745 user_entry->size = size;
2746 user_entry->offset = 0;
2747 user_entry->protection = permission & VM_PROT_ALL;
2748 access = GET_MAP_MEM(permission);
2749 SET_MAP_MEM(access, user_entry->protection);
2750 user_entry->internal = internal;
2751 user_entry->is_sub_map = FALSE;
2752 user_entry->is_pager = TRUE;
2753 assert(user_entry->ref_count == 1);
2754
2755 *entry_handle = user_handle;
1c79356b 2756 return KERN_SUCCESS;
91447636
A
2757}
2758
2759kern_return_t
2760mach_memory_object_memory_entry(
2761 host_t host,
2762 boolean_t internal,
2763 vm_size_t size,
2764 vm_prot_t permission,
2765 memory_object_t pager,
2766 ipc_port_t *entry_handle)
2767{
2768 return mach_memory_object_memory_entry_64( host, internal,
2769 (vm_object_offset_t)size, permission, pager, entry_handle);
2770}
2771
2772
2773kern_return_t
2774mach_memory_entry_purgable_control(
2775 ipc_port_t entry_port,
2776 vm_purgable_t control,
2777 int *state)
2778{
2779 kern_return_t kr;
2780 vm_named_entry_t mem_entry;
2781 vm_object_t object;
1c79356b 2782
91447636
A
2783 if (entry_port == IP_NULL ||
2784 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2785 return KERN_INVALID_ARGUMENT;
2786 }
1c79356b 2787
91447636 2788 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
1c79356b 2789
91447636 2790 named_entry_lock(mem_entry);
1c79356b 2791
91447636
A
2792 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2793 named_entry_unlock(mem_entry);
1c79356b
A
2794 return KERN_INVALID_ARGUMENT;
2795 }
91447636
A
2796
2797 object = mem_entry->backing.object;
2798 if (object == VM_OBJECT_NULL) {
2799 named_entry_unlock(mem_entry);
1c79356b
A
2800 return KERN_INVALID_ARGUMENT;
2801 }
91447636
A
2802
2803 vm_object_lock(object);
2804
2805 /* check that named entry covers entire object ? */
2806 if (mem_entry->offset != 0 || object->size != mem_entry->size) {
2807 vm_object_unlock(object);
2808 named_entry_unlock(mem_entry);
2809 return KERN_INVALID_ARGUMENT;
1c79356b 2810 }
91447636
A
2811
2812 named_entry_unlock(mem_entry);
2813
2814 kr = vm_object_purgable_control(object, control, state);
2815
2816 vm_object_unlock(object);
2817
2818 return kr;
1c79356b
A
2819}
2820
91447636
A
2821/*
2822 * mach_memory_entry_port_release:
2823 *
2824 * Release a send right on a named entry port. This is the correct
2825 * way to destroy a named entry. When the last right on the port is
2826 * released, ipc_kobject_destroy() will call mach_destroy_memory_entry().
2827 */
2828void
2829mach_memory_entry_port_release(
2830 ipc_port_t port)
2831{
2832 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2833 ipc_port_release_send(port);
2834}
1c79356b 2835
91447636
A
2836/*
2837 * mach_destroy_memory_entry:
2838 *
2839 * Drops a reference on a memory entry and destroys the memory entry if
2840 * there are no more references on it.
2841 * NOTE: This routine should not be called to destroy a memory entry from the
2842 * kernel, as it will not release the Mach port associated with the memory
2843 * entry. The proper way to destroy a memory entry in the kernel is to
2844 * call mach_memort_entry_port_release() to release the kernel's send-right on
2845 * the memory entry's port. When the last send right is released, the memory
2846 * entry will be destroyed via ipc_kobject_destroy().
2847 */
1c79356b
A
2848void
2849mach_destroy_memory_entry(
2850 ipc_port_t port)
2851{
2852 vm_named_entry_t named_entry;
2853#if MACH_ASSERT
2854 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2855#endif /* MACH_ASSERT */
2856 named_entry = (vm_named_entry_t)port->ip_kobject;
2857 mutex_lock(&(named_entry)->Lock);
91447636 2858 named_entry->ref_count -= 1;
1c79356b 2859 if(named_entry->ref_count == 0) {
91447636 2860 if (named_entry->is_sub_map) {
1c79356b 2861 vm_map_deallocate(named_entry->backing.map);
91447636
A
2862 } else if (!named_entry->is_pager) {
2863 /* release the memory object we've been pointing to */
2864 vm_object_deallocate(named_entry->backing.object);
2865 } /* else JMM - need to drop reference on pager in that case */
2866
2867 mutex_unlock(&(named_entry)->Lock);
2868
2869 kfree((void *) port->ip_kobject,
2870 sizeof (struct vm_named_entry));
1c79356b
A
2871 } else
2872 mutex_unlock(&(named_entry)->Lock);
2873}
2874
c0fea474
A
2875/* Allow manipulation of individual page state. This is actually part of */
2876/* the UPL regimen but takes place on the memory entry rather than on a UPL */
2877
2878kern_return_t
2879mach_memory_entry_page_op(
2880 ipc_port_t entry_port,
2881 vm_object_offset_t offset,
2882 int ops,
2883 ppnum_t *phys_entry,
2884 int *flags)
2885{
2886 vm_named_entry_t mem_entry;
2887 vm_object_t object;
2888 kern_return_t kr;
2889
2890 if (entry_port == IP_NULL ||
2891 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2892 return KERN_INVALID_ARGUMENT;
2893 }
2894
2895 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2896
2897 named_entry_lock(mem_entry);
2898
2899 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2900 named_entry_unlock(mem_entry);
2901 return KERN_INVALID_ARGUMENT;
2902 }
2903
2904 object = mem_entry->backing.object;
2905 if (object == VM_OBJECT_NULL) {
2906 named_entry_unlock(mem_entry);
2907 return KERN_INVALID_ARGUMENT;
2908 }
2909
2910 vm_object_reference(object);
2911 named_entry_unlock(mem_entry);
2912
2913 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
2914
2915 vm_object_deallocate(object);
2916
2917 return kr;
2918}
2919
2920/*
2921 * mach_memory_entry_range_op offers performance enhancement over
2922 * mach_memory_entry_page_op for page_op functions which do not require page
2923 * level state to be returned from the call. Page_op was created to provide
2924 * a low-cost alternative to page manipulation via UPLs when only a single
2925 * page was involved. The range_op call establishes the ability in the _op
2926 * family of functions to work on multiple pages where the lack of page level
2927 * state handling allows the caller to avoid the overhead of the upl structures.
2928 */
2929
2930kern_return_t
2931mach_memory_entry_range_op(
2932 ipc_port_t entry_port,
2933 vm_object_offset_t offset_beg,
2934 vm_object_offset_t offset_end,
2935 int ops,
2936 int *range)
2937{
2938 vm_named_entry_t mem_entry;
2939 vm_object_t object;
2940 kern_return_t kr;
2941
2942 if (entry_port == IP_NULL ||
2943 ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
2944 return KERN_INVALID_ARGUMENT;
2945 }
2946
2947 mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
2948
2949 named_entry_lock(mem_entry);
2950
2951 if (mem_entry->is_sub_map || mem_entry->is_pager) {
2952 named_entry_unlock(mem_entry);
2953 return KERN_INVALID_ARGUMENT;
2954 }
2955
2956 object = mem_entry->backing.object;
2957 if (object == VM_OBJECT_NULL) {
2958 named_entry_unlock(mem_entry);
2959 return KERN_INVALID_ARGUMENT;
2960 }
2961
2962 vm_object_reference(object);
2963 named_entry_unlock(mem_entry);
2964
2965 kr = vm_object_range_op(object,
2966 offset_beg,
2967 offset_end,
2968 ops,
2969 range);
2970
2971 vm_object_deallocate(object);
2972
2973 return kr;
2974}
1c79356b 2975
1c79356b
A
2976
2977kern_return_t
2978set_dp_control_port(
2979 host_priv_t host_priv,
2980 ipc_port_t control_port)
2981{
2982 if (host_priv == HOST_PRIV_NULL)
2983 return (KERN_INVALID_HOST);
0b4e3aa0
A
2984
2985 if (IP_VALID(dynamic_pager_control_port))
2986 ipc_port_release_send(dynamic_pager_control_port);
2987
1c79356b
A
2988 dynamic_pager_control_port = control_port;
2989 return KERN_SUCCESS;
2990}
2991
2992kern_return_t
2993get_dp_control_port(
2994 host_priv_t host_priv,
2995 ipc_port_t *control_port)
2996{
2997 if (host_priv == HOST_PRIV_NULL)
2998 return (KERN_INVALID_HOST);
0b4e3aa0
A
2999
3000 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
3001 return KERN_SUCCESS;
3002
3003}
3004
91447636 3005/* ******* Temporary Internal calls to UPL for BSD ***** */
1c79356b 3006
91447636
A
3007extern int kernel_upl_map(
3008 vm_map_t map,
3009 upl_t upl,
3010 vm_offset_t *dst_addr);
1c79356b 3011
91447636
A
3012extern int kernel_upl_unmap(
3013 vm_map_t map,
3014 upl_t upl);
150bd074 3015
91447636
A
3016extern int kernel_upl_commit(
3017 upl_t upl,
3018 upl_page_info_t *pl,
3019 mach_msg_type_number_t count);
1c79356b 3020
91447636
A
3021extern int kernel_upl_commit_range(
3022 upl_t upl,
3023 upl_offset_t offset,
3024 upl_size_t size,
3025 int flags,
3026 upl_page_info_array_t pl,
3027 mach_msg_type_number_t count);
1c79356b 3028
91447636
A
3029extern int kernel_upl_abort(
3030 upl_t upl,
3031 int abort_type);
1c79356b 3032
91447636
A
3033extern int kernel_upl_abort_range(
3034 upl_t upl,
3035 upl_offset_t offset,
3036 upl_size_t size,
3037 int abort_flags);
1c79356b 3038
1c79356b 3039
1c79356b
A
3040kern_return_t
3041kernel_upl_map(
3042 vm_map_t map,
3043 upl_t upl,
3044 vm_offset_t *dst_addr)
3045{
91447636 3046 return vm_upl_map(map, upl, dst_addr);
1c79356b
A
3047}
3048
3049
3050kern_return_t
3051kernel_upl_unmap(
3052 vm_map_t map,
0b4e3aa0 3053 upl_t upl)
1c79356b 3054{
91447636 3055 return vm_upl_unmap(map, upl);
1c79356b
A
3056}
3057
3058kern_return_t
3059kernel_upl_commit(
91447636
A
3060 upl_t upl,
3061 upl_page_info_t *pl,
0b4e3aa0 3062 mach_msg_type_number_t count)
1c79356b 3063{
0b4e3aa0
A
3064 kern_return_t kr;
3065
3066 kr = upl_commit(upl, pl, count);
3067 upl_deallocate(upl);
1c79356b
A
3068 return kr;
3069}
3070
0b4e3aa0 3071
1c79356b
A
3072kern_return_t
3073kernel_upl_commit_range(
3074 upl_t upl,
91447636
A
3075 upl_offset_t offset,
3076 upl_size_t size,
1c79356b 3077 int flags,
0b4e3aa0
A
3078 upl_page_info_array_t pl,
3079 mach_msg_type_number_t count)
1c79356b 3080{
0b4e3aa0
A
3081 boolean_t finished = FALSE;
3082 kern_return_t kr;
3083
3084 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
3085 flags |= UPL_COMMIT_NOTIFY_EMPTY;
3086
3087 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
3088
3089 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
3090 upl_deallocate(upl);
3091
1c79356b
A
3092 return kr;
3093}
3094
3095kern_return_t
3096kernel_upl_abort_range(
0b4e3aa0 3097 upl_t upl,
91447636
A
3098 upl_offset_t offset,
3099 upl_size_t size,
0b4e3aa0 3100 int abort_flags)
1c79356b 3101{
0b4e3aa0
A
3102 kern_return_t kr;
3103 boolean_t finished = FALSE;
1c79356b 3104
0b4e3aa0
A
3105 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
3106 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 3107
0b4e3aa0 3108 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 3109
0b4e3aa0
A
3110 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
3111 upl_deallocate(upl);
1c79356b 3112
0b4e3aa0 3113 return kr;
1c79356b
A
3114}
3115
1c79356b 3116kern_return_t
0b4e3aa0
A
3117kernel_upl_abort(
3118 upl_t upl,
3119 int abort_type)
1c79356b 3120{
0b4e3aa0 3121 kern_return_t kr;
1c79356b 3122
0b4e3aa0
A
3123 kr = upl_abort(upl, abort_type);
3124 upl_deallocate(upl);
3125 return kr;
1c79356b
A
3126}
3127
91447636
A
3128/*
3129 * Now a kernel-private interface (for BootCache
3130 * use only). Need a cleaner way to create an
3131 * empty vm_map() and return a handle to it.
3132 */
1c79356b
A
3133
3134kern_return_t
91447636
A
3135vm_region_object_create(
3136 __unused vm_map_t target_map,
3137 vm_size_t size,
3138 ipc_port_t *object_handle)
1c79356b 3139{
91447636
A
3140 vm_named_entry_t user_entry;
3141 ipc_port_t user_handle;
1c79356b 3142
91447636 3143 vm_map_t new_map;
1c79356b 3144
91447636
A
3145 if (mach_memory_entry_allocate(&user_entry, &user_handle)
3146 != KERN_SUCCESS) {
1c79356b 3147 return KERN_FAILURE;
91447636 3148 }
1c79356b 3149
91447636 3150 /* Create a named object based on a submap of specified size */
1c79356b 3151
91447636
A
3152 new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
3153 vm_map_round_page(size), TRUE);
1c79356b 3154
91447636
A
3155 user_entry->backing.map = new_map;
3156 user_entry->internal = TRUE;
3157 user_entry->is_sub_map = TRUE;
3158 user_entry->offset = 0;
3159 user_entry->protection = VM_PROT_ALL;
3160 user_entry->size = size;
3161 assert(user_entry->ref_count == 1);
1c79356b 3162
91447636 3163 *object_handle = user_handle;
1c79356b 3164 return KERN_SUCCESS;
1c79356b 3165
55e303ae
A
3166}
3167
91447636
A
3168ppnum_t vm_map_get_phys_page( /* forward */
3169 vm_map_t map,
3170 vm_offset_t offset);
3171
55e303ae 3172ppnum_t
1c79356b 3173vm_map_get_phys_page(
91447636
A
3174 vm_map_t map,
3175 vm_offset_t addr)
1c79356b 3176{
91447636
A
3177 vm_object_offset_t offset;
3178 vm_object_t object;
3179 vm_map_offset_t map_offset;
3180 vm_map_entry_t entry;
3181 ppnum_t phys_page = 0;
3182
3183 map_offset = vm_map_trunc_page(addr);
1c79356b
A
3184
3185 vm_map_lock(map);
91447636 3186 while (vm_map_lookup_entry(map, map_offset, &entry)) {
1c79356b
A
3187
3188 if (entry->object.vm_object == VM_OBJECT_NULL) {
3189 vm_map_unlock(map);
91447636 3190 return (ppnum_t) 0;
1c79356b
A
3191 }
3192 if (entry->is_sub_map) {
3193 vm_map_t old_map;
3194 vm_map_lock(entry->object.sub_map);
3195 old_map = map;
3196 map = entry->object.sub_map;
91447636 3197 map_offset = entry->offset + (map_offset - entry->vme_start);
1c79356b
A
3198 vm_map_unlock(old_map);
3199 continue;
3200 }
9bccf70c
A
3201 if (entry->object.vm_object->phys_contiguous) {
3202 /* These are not standard pageable memory mappings */
3203 /* If they are not present in the object they will */
3204 /* have to be picked up from the pager through the */
3205 /* fault mechanism. */
3206 if(entry->object.vm_object->shadow_offset == 0) {
3207 /* need to call vm_fault */
3208 vm_map_unlock(map);
91447636 3209 vm_fault(map, map_offset, VM_PROT_NONE,
9bccf70c
A
3210 FALSE, THREAD_UNINT, NULL, 0);
3211 vm_map_lock(map);
3212 continue;
3213 }
91447636 3214 offset = entry->offset + (map_offset - entry->vme_start);
55e303ae
A
3215 phys_page = (ppnum_t)
3216 ((entry->object.vm_object->shadow_offset
3217 + offset) >> 12);
9bccf70c
A
3218 break;
3219
3220 }
91447636 3221 offset = entry->offset + (map_offset - entry->vme_start);
1c79356b
A
3222 object = entry->object.vm_object;
3223 vm_object_lock(object);
3224 while (TRUE) {
3225 vm_page_t dst_page = vm_page_lookup(object,offset);
3226 if(dst_page == VM_PAGE_NULL) {
3227 if(object->shadow) {
3228 vm_object_t old_object;
3229 vm_object_lock(object->shadow);
3230 old_object = object;
3231 offset = offset + object->shadow_offset;
3232 object = object->shadow;
3233 vm_object_unlock(old_object);
3234 } else {
3235 vm_object_unlock(object);
3236 break;
3237 }
3238 } else {
55e303ae 3239 phys_page = (ppnum_t)(dst_page->phys_page);
1c79356b
A
3240 vm_object_unlock(object);
3241 break;
3242 }
3243 }
3244 break;
3245
3246 }
3247
3248 vm_map_unlock(map);
55e303ae
A
3249 return phys_page;
3250}
3251
3252
3253
91447636
A
3254kern_return_t kernel_object_iopl_request( /* forward */
3255 vm_named_entry_t named_entry,
3256 memory_object_offset_t offset,
3257 vm_size_t *upl_size,
3258 upl_t *upl_ptr,
3259 upl_page_info_array_t user_page_list,
3260 unsigned int *page_list_count,
3261 int *flags);
3262
55e303ae
A
3263kern_return_t
3264kernel_object_iopl_request(
3265 vm_named_entry_t named_entry,
3266 memory_object_offset_t offset,
3267 vm_size_t *upl_size,
3268 upl_t *upl_ptr,
3269 upl_page_info_array_t user_page_list,
3270 unsigned int *page_list_count,
3271 int *flags)
3272{
3273 vm_object_t object;
3274 kern_return_t ret;
3275
3276 int caller_flags;
3277
3278 caller_flags = *flags;
3279
91447636
A
3280 if (caller_flags & ~UPL_VALID_FLAGS) {
3281 /*
3282 * For forward compatibility's sake,
3283 * reject any unknown flag.
3284 */
3285 return KERN_INVALID_VALUE;
3286 }
3287
55e303ae
A
3288 /* a few checks to make sure user is obeying rules */
3289 if(*upl_size == 0) {
3290 if(offset >= named_entry->size)
3291 return(KERN_INVALID_RIGHT);
3292 *upl_size = named_entry->size - offset;
3293 }
3294 if(caller_flags & UPL_COPYOUT_FROM) {
3295 if((named_entry->protection & VM_PROT_READ)
3296 != VM_PROT_READ) {
3297 return(KERN_INVALID_RIGHT);
3298 }
3299 } else {
3300 if((named_entry->protection &
3301 (VM_PROT_READ | VM_PROT_WRITE))
3302 != (VM_PROT_READ | VM_PROT_WRITE)) {
3303 return(KERN_INVALID_RIGHT);
3304 }
3305 }
3306 if(named_entry->size < (offset + *upl_size))
3307 return(KERN_INVALID_ARGUMENT);
3308
3309 /* the callers parameter offset is defined to be the */
3310 /* offset from beginning of named entry offset in object */
3311 offset = offset + named_entry->offset;
3312
3313 if(named_entry->is_sub_map)
3314 return (KERN_INVALID_ARGUMENT);
3315
3316 named_entry_lock(named_entry);
3317
91447636 3318 if (named_entry->is_pager) {
55e303ae
A
3319 object = vm_object_enter(named_entry->backing.pager,
3320 named_entry->offset + named_entry->size,
3321 named_entry->internal,
3322 FALSE,
3323 FALSE);
3324 if (object == VM_OBJECT_NULL) {
3325 named_entry_unlock(named_entry);
3326 return(KERN_INVALID_OBJECT);
3327 }
55e303ae 3328
91447636
A
3329 /* JMM - drop reference on the pager here? */
3330
3331 /* create an extra reference for the object */
3332 vm_object_lock(object);
55e303ae 3333 vm_object_reference_locked(object);
91447636
A
3334 named_entry->backing.object = object;
3335 named_entry->is_pager = FALSE;
55e303ae
A
3336 named_entry_unlock(named_entry);
3337
3338 /* wait for object (if any) to be ready */
91447636
A
3339 if (!named_entry->internal) {
3340 while (!object->pager_ready) {
3341 vm_object_wait(object,
3342 VM_OBJECT_EVENT_PAGER_READY,
3343 THREAD_UNINT);
3344 vm_object_lock(object);
3345 }
55e303ae
A
3346 }
3347 vm_object_unlock(object);
91447636
A
3348
3349 } else {
3350 /* This is the case where we are going to operate */
3351 /* an an already known object. If the object is */
3352 /* not ready it is internal. An external */
3353 /* object cannot be mapped until it is ready */
3354 /* we can therefore avoid the ready check */
3355 /* in this case. */
3356 object = named_entry->backing.object;
3357 vm_object_reference(object);
3358 named_entry_unlock(named_entry);
55e303ae
A
3359 }
3360
3361 if (!object->private) {
3362 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
3363 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
3364 if (object->phys_contiguous) {
3365 *flags = UPL_PHYS_CONTIG;
3366 } else {
3367 *flags = 0;
3368 }
3369 } else {
3370 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
3371 }
3372
3373 ret = vm_object_iopl_request(object,
3374 offset,
3375 *upl_size,
3376 upl_ptr,
3377 user_page_list,
3378 page_list_count,
3379 caller_flags);
3380 vm_object_deallocate(object);
3381 return ret;
1c79356b 3382}