]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
55e303ae 2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_user.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * User-exported virtual memory functions.
57 */
1c79356b
A
58
59#include <vm_cpm.h>
60#include <mach/boolean.h>
61#include <mach/kern_return.h>
62#include <mach/mach_types.h> /* to get vm_address_t */
63#include <mach/memory_object.h>
64#include <mach/std_types.h> /* to get pointer_t */
65#include <mach/vm_attributes.h>
66#include <mach/vm_param.h>
67#include <mach/vm_statistics.h>
68#include <mach/vm_map_server.h>
69#include <mach/mach_syscalls.h>
9bccf70c 70
1c79356b 71#include <mach/shared_memory_server.h>
9bccf70c 72#include <vm/vm_shared_memory_server.h>
1c79356b
A
73
74#include <kern/host.h>
75#include <kern/task.h>
76#include <kern/misc_protos.h>
77#include <vm/vm_map.h>
78#include <vm/vm_object.h>
79#include <vm/vm_page.h>
80#include <vm/memory_object.h>
81#include <vm/vm_pageout.h>
82
55e303ae
A
83__private_extern__ load_struct_t *
84lsf_remove_regions_mappings_lock(
85 shared_region_mapping_t region,
86 shared_region_task_mappings_t sm_info,
87 int need_lock);
1c79356b
A
88
89
90vm_size_t upl_offset_to_pagelist = 0;
91
92#if VM_CPM
93#include <vm/cpm.h>
94#endif /* VM_CPM */
95
96ipc_port_t dynamic_pager_control_port=NULL;
97
98/*
99 * vm_allocate allocates "zero fill" memory in the specfied
100 * map.
101 */
102kern_return_t
103vm_allocate(
104 register vm_map_t map,
105 register vm_offset_t *addr,
106 register vm_size_t size,
107 int flags)
108{
109 kern_return_t result;
110 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
111
112 if (map == VM_MAP_NULL)
113 return(KERN_INVALID_ARGUMENT);
114 if (size == 0) {
115 *addr = 0;
116 return(KERN_SUCCESS);
117 }
118
119 if (anywhere)
120 *addr = vm_map_min(map);
121 else
55e303ae
A
122 *addr = trunc_page_32(*addr);
123 size = round_page_32(size);
1c79356b
A
124 if (size == 0) {
125 return(KERN_INVALID_ARGUMENT);
126 }
127
128 result = vm_map_enter(
129 map,
130 addr,
131 size,
132 (vm_offset_t)0,
133 flags,
134 VM_OBJECT_NULL,
135 (vm_object_offset_t)0,
136 FALSE,
137 VM_PROT_DEFAULT,
138 VM_PROT_ALL,
139 VM_INHERIT_DEFAULT);
140
141 return(result);
142}
143
144/*
145 * vm_deallocate deallocates the specified range of addresses in the
146 * specified address map.
147 */
148kern_return_t
149vm_deallocate(
150 register vm_map_t map,
151 vm_offset_t start,
152 vm_size_t size)
153{
154 if (map == VM_MAP_NULL)
155 return(KERN_INVALID_ARGUMENT);
156
157 if (size == (vm_offset_t) 0)
158 return(KERN_SUCCESS);
159
55e303ae
A
160 return(vm_map_remove(map, trunc_page_32(start),
161 round_page_32(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
162}
163
164/*
165 * vm_inherit sets the inheritance of the specified range in the
166 * specified map.
167 */
168kern_return_t
169vm_inherit(
170 register vm_map_t map,
171 vm_offset_t start,
172 vm_size_t size,
173 vm_inherit_t new_inheritance)
174{
175 if (map == VM_MAP_NULL)
176 return(KERN_INVALID_ARGUMENT);
177
178 if (new_inheritance > VM_INHERIT_LAST_VALID)
179 return(KERN_INVALID_ARGUMENT);
180
181 return(vm_map_inherit(map,
55e303ae
A
182 trunc_page_32(start),
183 round_page_32(start+size),
1c79356b
A
184 new_inheritance));
185}
186
187/*
188 * vm_protect sets the protection of the specified range in the
189 * specified map.
190 */
191
192kern_return_t
193vm_protect(
194 register vm_map_t map,
195 vm_offset_t start,
196 vm_size_t size,
197 boolean_t set_maximum,
198 vm_prot_t new_protection)
199{
200 if ((map == VM_MAP_NULL) ||
201 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
202 return(KERN_INVALID_ARGUMENT);
203
204 return(vm_map_protect(map,
55e303ae
A
205 trunc_page_32(start),
206 round_page_32(start+size),
1c79356b
A
207 new_protection,
208 set_maximum));
209}
210
211/*
212 * Handle machine-specific attributes for a mapping, such
213 * as cachability, migrability, etc.
214 */
215kern_return_t
216vm_machine_attribute(
217 vm_map_t map,
218 vm_address_t address,
219 vm_size_t size,
220 vm_machine_attribute_t attribute,
221 vm_machine_attribute_val_t* value) /* IN/OUT */
222{
223 if (map == VM_MAP_NULL)
224 return(KERN_INVALID_ARGUMENT);
225
226 return vm_map_machine_attribute(map, address, size, attribute, value);
227}
228
229kern_return_t
230vm_read(
231 vm_map_t map,
232 vm_address_t address,
233 vm_size_t size,
234 pointer_t *data,
235 mach_msg_type_number_t *data_size)
236{
237 kern_return_t error;
238 vm_map_copy_t ipc_address;
239
240 if (map == VM_MAP_NULL)
241 return(KERN_INVALID_ARGUMENT);
242
243 if ((error = vm_map_copyin(map,
244 address,
245 size,
246 FALSE, /* src_destroy */
247 &ipc_address)) == KERN_SUCCESS) {
248 *data = (pointer_t) ipc_address;
249 *data_size = size;
250 }
251 return(error);
252}
253
254kern_return_t
255vm_read_list(
256 vm_map_t map,
257 vm_read_entry_t data_list,
258 mach_msg_type_number_t count)
259{
260 mach_msg_type_number_t i;
261 kern_return_t error;
262 vm_map_copy_t ipc_address;
263
264 if (map == VM_MAP_NULL)
265 return(KERN_INVALID_ARGUMENT);
266
267 for(i=0; i<count; i++) {
268 error = vm_map_copyin(map,
269 data_list[i].address,
270 data_list[i].size,
271 FALSE, /* src_destroy */
272 &ipc_address);
273 if(error != KERN_SUCCESS) {
274 data_list[i].address = (vm_address_t)0;
275 data_list[i].size = (vm_size_t)0;
276 break;
277 }
278 if(data_list[i].size != 0) {
279 error = vm_map_copyout(current_task()->map,
280 &(data_list[i].address),
281 (vm_map_copy_t) ipc_address);
282 if(error != KERN_SUCCESS) {
283 data_list[i].address = (vm_address_t)0;
284 data_list[i].size = (vm_size_t)0;
285 break;
286 }
287 }
288 }
289 return(error);
290}
291
292/*
293 * This routine reads from the specified map and overwrites part of the current
294 * activation's map. In making an assumption that the current thread is local,
295 * it is no longer cluster-safe without a fully supportive local proxy thread/
296 * task (but we don't support cluster's anymore so this is moot).
297 */
298
299#define VM_OVERWRITE_SMALL 512
300
301kern_return_t
302vm_read_overwrite(
303 vm_map_t map,
304 vm_address_t address,
305 vm_size_t size,
306 vm_address_t data,
307 vm_size_t *data_size)
308{
309 struct {
310 long align;
311 char buf[VM_OVERWRITE_SMALL];
312 } inbuf;
313 vm_map_t oldmap;
314 kern_return_t error = KERN_SUCCESS;
315 vm_map_copy_t copy;
316
317 if (map == VM_MAP_NULL)
318 return(KERN_INVALID_ARGUMENT);
319
320 if (size <= VM_OVERWRITE_SMALL) {
321 if(vm_map_read_user(map, (vm_offset_t)address,
322 (vm_offset_t)&inbuf, size)) {
323 error = KERN_INVALID_ADDRESS;
324 } else {
325 if(vm_map_write_user(current_map(),
326 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
327 error = KERN_INVALID_ADDRESS;
328 }
329 }
330 else {
331 if ((error = vm_map_copyin(map,
332 address,
333 size,
334 FALSE, /* src_destroy */
335 &copy)) == KERN_SUCCESS) {
336 if ((error = vm_map_copy_overwrite(
337 current_act()->map,
338 data,
339 copy,
340 FALSE)) == KERN_SUCCESS) {
341 }
342 else {
343 vm_map_copy_discard(copy);
344 }
345 }
346 }
347 *data_size = size;
348 return(error);
349}
350
351
352
353
354/*ARGSUSED*/
355kern_return_t
356vm_write(
357 vm_map_t map,
358 vm_address_t address,
359 vm_offset_t data,
360 mach_msg_type_number_t size)
361{
362 if (map == VM_MAP_NULL)
363 return KERN_INVALID_ARGUMENT;
364
365 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
366 FALSE /* interruptible XXX */);
367}
368
369kern_return_t
370vm_copy(
371 vm_map_t map,
372 vm_address_t source_address,
373 vm_size_t size,
374 vm_address_t dest_address)
375{
376 vm_map_copy_t copy;
377 kern_return_t kr;
378
379 if (map == VM_MAP_NULL)
380 return KERN_INVALID_ARGUMENT;
381
382 kr = vm_map_copyin(map, source_address, size,
383 FALSE, &copy);
384 if (kr != KERN_SUCCESS)
385 return kr;
386
387 kr = vm_map_copy_overwrite(map, dest_address, copy,
388 FALSE /* interruptible XXX */);
389 if (kr != KERN_SUCCESS) {
390 vm_map_copy_discard(copy);
391 return kr;
392 }
393
394 return KERN_SUCCESS;
395}
396
397/*
398 * Routine: vm_map
399 */
400kern_return_t
401vm_map_64(
402 vm_map_t target_map,
403 vm_offset_t *address,
404 vm_size_t initial_size,
405 vm_offset_t mask,
406 int flags,
407 ipc_port_t port,
408 vm_object_offset_t offset,
409 boolean_t copy,
410 vm_prot_t cur_protection,
411 vm_prot_t max_protection,
412 vm_inherit_t inheritance)
413{
414 register
415 vm_object_t object;
416 vm_prot_t prot;
417 vm_object_size_t size = (vm_object_size_t)initial_size;
418 kern_return_t result;
419
420 /*
421 * Check arguments for validity
422 */
423 if ((target_map == VM_MAP_NULL) ||
424 (cur_protection & ~VM_PROT_ALL) ||
425 (max_protection & ~VM_PROT_ALL) ||
426 (inheritance > VM_INHERIT_LAST_VALID) ||
427 size == 0)
428 return(KERN_INVALID_ARGUMENT);
429
430 /*
431 * Find the vm object (if any) corresponding to this port.
432 */
433 if (!IP_VALID(port)) {
434 object = VM_OBJECT_NULL;
435 offset = 0;
436 copy = FALSE;
437 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
438 vm_named_entry_t named_entry;
439
440 named_entry = (vm_named_entry_t)port->ip_kobject;
441 /* a few checks to make sure user is obeying rules */
442 if(size == 0) {
443 if(offset >= named_entry->size)
444 return(KERN_INVALID_RIGHT);
445 size = named_entry->size - offset;
446 }
447 if((named_entry->protection & max_protection) != max_protection)
448 return(KERN_INVALID_RIGHT);
449 if((named_entry->protection & cur_protection) != cur_protection)
450 return(KERN_INVALID_RIGHT);
451 if(named_entry->size < (offset + size))
452 return(KERN_INVALID_ARGUMENT);
453
454 /* the callers parameter offset is defined to be the */
455 /* offset from beginning of named entry offset in object */
456 offset = offset + named_entry->offset;
457
458 named_entry_lock(named_entry);
459 if(named_entry->is_sub_map) {
460 vm_map_entry_t map_entry;
461
462 named_entry_unlock(named_entry);
55e303ae
A
463 *address = trunc_page_32(*address);
464 size = round_page_64(size);
1c79356b
A
465 vm_object_reference(vm_submap_object);
466 if ((result = vm_map_enter(target_map,
467 address, size, mask, flags,
468 vm_submap_object, 0,
469 FALSE,
470 cur_protection, max_protection, inheritance
471 )) != KERN_SUCCESS) {
472 vm_object_deallocate(vm_submap_object);
473 } else {
474 char alias;
475
476 VM_GET_FLAGS_ALIAS(flags, alias);
477 if ((alias == VM_MEMORY_SHARED_PMAP) &&
478 !copy) {
479 vm_map_submap(target_map, *address,
480 (*address) + size,
481 named_entry->backing.map,
482 (vm_offset_t)offset, TRUE);
483 } else {
484 vm_map_submap(target_map, *address,
485 (*address) + size,
486 named_entry->backing.map,
487 (vm_offset_t)offset, FALSE);
488 }
489 if(copy) {
490 if(vm_map_lookup_entry(
491 target_map, *address, &map_entry)) {
492 map_entry->needs_copy = TRUE;
493 }
494 }
495 }
496 return(result);
497
498 } else if(named_entry->object) {
499 /* This is the case where we are going to map */
500 /* an already mapped object. If the object is */
501 /* not ready it is internal. An external */
502 /* object cannot be mapped until it is ready */
503 /* we can therefore avoid the ready check */
504 /* in this case. */
505 named_entry_unlock(named_entry);
506 vm_object_reference(named_entry->object);
507 object = named_entry->object;
508 } else {
55e303ae
A
509 unsigned int access;
510 vm_prot_t protections;
511 unsigned int wimg_mode;
512 boolean_t cache_attr;
513
514 protections = named_entry->protection
515 & VM_PROT_ALL;
516 access = GET_MAP_MEM(named_entry->protection);
517
518 object = vm_object_enter(
519 named_entry->backing.pager,
520 named_entry->size,
521 named_entry->internal,
522 FALSE,
523 FALSE);
1c79356b
A
524 if (object == VM_OBJECT_NULL) {
525 named_entry_unlock(named_entry);
526 return(KERN_INVALID_OBJECT);
527 }
55e303ae
A
528
529 vm_object_lock(object);
530
531 /* create an extra ref for the named entry */
532 vm_object_reference_locked(object);
1c79356b
A
533 named_entry->object = object;
534 named_entry_unlock(named_entry);
55e303ae
A
535
536 wimg_mode = object->wimg_bits;
537 if(access == MAP_MEM_IO) {
538 wimg_mode = VM_WIMG_IO;
539 } else if (access == MAP_MEM_COPYBACK) {
540 wimg_mode = VM_WIMG_USE_DEFAULT;
541 } else if (access == MAP_MEM_WTHRU) {
542 wimg_mode = VM_WIMG_WTHRU;
543 } else if (access == MAP_MEM_WCOMB) {
544 wimg_mode = VM_WIMG_WCOMB;
545 }
546 if ((wimg_mode == VM_WIMG_IO)
547 || (wimg_mode == VM_WIMG_WCOMB))
548 cache_attr = TRUE;
549 else
550 cache_attr = FALSE;
551
552 if (named_entry->backing.pager) {
553 /* wait for object (if any) to be ready */
1c79356b
A
554 while (!object->pager_ready) {
555 vm_object_wait(object,
55e303ae
A
556 VM_OBJECT_EVENT_PAGER_READY,
557 THREAD_UNINT);
1c79356b
A
558 vm_object_lock(object);
559 }
1c79356b 560 }
55e303ae
A
561 if(object->wimg_bits != wimg_mode) {
562 vm_page_t p;
563
564 vm_object_paging_wait(object, THREAD_UNINT);
565
566 object->wimg_bits = wimg_mode;
567 queue_iterate(&object->memq, p, vm_page_t, listq) {
568 if (!p->fictitious) {
569 pmap_page_protect(
570 p->phys_page,
571 VM_PROT_NONE);
572 if(cache_attr)
573 pmap_sync_caches_phys(
574 p->phys_page);
575 }
576 }
577 }
578 object->true_share = TRUE;
579 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
580 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
581 vm_object_unlock(object);
1c79356b 582 }
0b4e3aa0
A
583 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
584 /*
585 * JMM - This is temporary until we unify named entries
586 * and raw memory objects.
587 *
588 * Detected fake ip_kotype for a memory object. In
589 * this case, the port isn't really a port at all, but
590 * instead is just a raw memory object.
591 */
592
593 if ((object = vm_object_enter((memory_object_t)port,
594 size, FALSE, FALSE, FALSE))
1c79356b
A
595 == VM_OBJECT_NULL)
596 return(KERN_INVALID_OBJECT);
597
598 /* wait for object (if any) to be ready */
599 if (object != VM_OBJECT_NULL) {
9bccf70c
A
600 if(object == kernel_object) {
601 printf("Warning: Attempt to map kernel object"
602 " by a non-private kernel entity\n");
603 return(KERN_INVALID_OBJECT);
604 }
1c79356b
A
605 vm_object_lock(object);
606 while (!object->pager_ready) {
607 vm_object_wait(object,
608 VM_OBJECT_EVENT_PAGER_READY,
609 THREAD_UNINT);
610 vm_object_lock(object);
611 }
612 vm_object_unlock(object);
613 }
0b4e3aa0
A
614 } else {
615 return (KERN_INVALID_OBJECT);
1c79356b
A
616 }
617
55e303ae
A
618 *address = trunc_page_32(*address);
619 size = round_page_64(size);
1c79356b
A
620
621 /*
622 * Perform the copy if requested
623 */
624
625 if (copy) {
626 vm_object_t new_object;
627 vm_object_offset_t new_offset;
628
629 result = vm_object_copy_strategically(object, offset, size,
630 &new_object, &new_offset,
631 &copy);
632
633
634 if (result == KERN_MEMORY_RESTART_COPY) {
635 boolean_t success;
636 boolean_t src_needs_copy;
637
638 /*
639 * XXX
640 * We currently ignore src_needs_copy.
641 * This really is the issue of how to make
642 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
643 * non-kernel users to use. Solution forthcoming.
644 * In the meantime, since we don't allow non-kernel
645 * memory managers to specify symmetric copy,
646 * we won't run into problems here.
647 */
648 new_object = object;
649 new_offset = offset;
650 success = vm_object_copy_quickly(&new_object,
651 new_offset, size,
652 &src_needs_copy,
653 &copy);
654 assert(success);
655 result = KERN_SUCCESS;
656 }
657 /*
658 * Throw away the reference to the
659 * original object, as it won't be mapped.
660 */
661
662 vm_object_deallocate(object);
663
664 if (result != KERN_SUCCESS)
665 return (result);
666
667 object = new_object;
668 offset = new_offset;
669 }
670
671 if ((result = vm_map_enter(target_map,
672 address, size, mask, flags,
673 object, offset,
674 copy,
675 cur_protection, max_protection, inheritance
676 )) != KERN_SUCCESS)
677 vm_object_deallocate(object);
678 return(result);
679}
680
681/* temporary, until world build */
55e303ae 682kern_return_t
1c79356b
A
683vm_map(
684 vm_map_t target_map,
685 vm_offset_t *address,
686 vm_size_t size,
687 vm_offset_t mask,
688 int flags,
689 ipc_port_t port,
690 vm_offset_t offset,
691 boolean_t copy,
692 vm_prot_t cur_protection,
693 vm_prot_t max_protection,
694 vm_inherit_t inheritance)
695{
55e303ae 696 return vm_map_64(target_map, address, size, mask, flags,
1c79356b
A
697 port, (vm_object_offset_t)offset, copy,
698 cur_protection, max_protection, inheritance);
699}
700
701
702/*
703 * NOTE: this routine (and this file) will no longer require mach_host_server.h
704 * when vm_wire is changed to use ledgers.
705 */
706#include <mach/mach_host_server.h>
707/*
708 * Specify that the range of the virtual address space
709 * of the target task must not cause page faults for
710 * the indicated accesses.
711 *
712 * [ To unwire the pages, specify VM_PROT_NONE. ]
713 */
714kern_return_t
715vm_wire(
716 host_priv_t host_priv,
717 register vm_map_t map,
718 vm_offset_t start,
719 vm_size_t size,
720 vm_prot_t access)
721{
722 kern_return_t rc;
723
724 if (host_priv == HOST_PRIV_NULL)
725 return KERN_INVALID_HOST;
726
727 assert(host_priv == &realhost);
728
729 if (map == VM_MAP_NULL)
730 return KERN_INVALID_TASK;
731
732 if (access & ~VM_PROT_ALL)
733 return KERN_INVALID_ARGUMENT;
734
735 if (access != VM_PROT_NONE) {
55e303ae
A
736 rc = vm_map_wire(map, trunc_page_32(start),
737 round_page_32(start+size), access, TRUE);
1c79356b 738 } else {
55e303ae
A
739 rc = vm_map_unwire(map, trunc_page_32(start),
740 round_page_32(start+size), TRUE);
1c79356b
A
741 }
742 return rc;
743}
744
745/*
746 * vm_msync
747 *
748 * Synchronises the memory range specified with its backing store
749 * image by either flushing or cleaning the contents to the appropriate
750 * memory manager engaging in a memory object synchronize dialog with
751 * the manager. The client doesn't return until the manager issues
752 * m_o_s_completed message. MIG Magically converts user task parameter
753 * to the task's address map.
754 *
755 * interpretation of sync_flags
756 * VM_SYNC_INVALIDATE - discard pages, only return precious
757 * pages to manager.
758 *
759 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
760 * - discard pages, write dirty or precious
761 * pages back to memory manager.
762 *
763 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
764 * - write dirty or precious pages back to
765 * the memory manager.
766 *
767 * NOTE
768 * The memory object attributes have not yet been implemented, this
769 * function will have to deal with the invalidate attribute
770 *
771 * RETURNS
772 * KERN_INVALID_TASK Bad task parameter
773 * KERN_INVALID_ARGUMENT both sync and async were specified.
774 * KERN_SUCCESS The usual.
775 */
776
777kern_return_t
778vm_msync(
779 vm_map_t map,
780 vm_address_t address,
781 vm_size_t size,
782 vm_sync_t sync_flags)
783{
784 msync_req_t msr;
785 msync_req_t new_msr;
786 queue_chain_t req_q; /* queue of requests for this msync */
787 vm_map_entry_t entry;
788 vm_size_t amount_left;
789 vm_object_offset_t offset;
790 boolean_t do_sync_req;
791 boolean_t modifiable;
792
793
794 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
795 (sync_flags & VM_SYNC_SYNCHRONOUS))
796 return(KERN_INVALID_ARGUMENT);
797
798 /*
799 * align address and size on page boundaries
800 */
55e303ae
A
801 size = round_page_32(address + size) - trunc_page_32(address);
802 address = trunc_page_32(address);
1c79356b
A
803
804 if (map == VM_MAP_NULL)
805 return(KERN_INVALID_TASK);
806
807 if (size == 0)
808 return(KERN_SUCCESS);
809
810 queue_init(&req_q);
811 amount_left = size;
812
813 while (amount_left > 0) {
814 vm_size_t flush_size;
815 vm_object_t object;
816
817 vm_map_lock(map);
818 if (!vm_map_lookup_entry(map, address, &entry)) {
819 vm_size_t skip;
820
821 /*
822 * hole in the address map.
823 */
824
825 /*
826 * Check for empty map.
827 */
828 if (entry == vm_map_to_entry(map) &&
829 entry->vme_next == entry) {
830 vm_map_unlock(map);
831 break;
832 }
833 /*
834 * Check that we don't wrap and that
835 * we have at least one real map entry.
836 */
837 if ((map->hdr.nentries == 0) ||
838 (entry->vme_next->vme_start < address)) {
839 vm_map_unlock(map);
840 break;
841 }
842 /*
843 * Move up to the next entry if needed
844 */
845 skip = (entry->vme_next->vme_start - address);
846 if (skip >= amount_left)
847 amount_left = 0;
848 else
849 amount_left -= skip;
850 address = entry->vme_next->vme_start;
851 vm_map_unlock(map);
852 continue;
853 }
854
855 offset = address - entry->vme_start;
856
857 /*
858 * do we have more to flush than is contained in this
859 * entry ?
860 */
861 if (amount_left + entry->vme_start + offset > entry->vme_end) {
862 flush_size = entry->vme_end -
863 (entry->vme_start + offset);
864 } else {
865 flush_size = amount_left;
866 }
867 amount_left -= flush_size;
868 address += flush_size;
869
870 if (entry->is_sub_map == TRUE) {
871 vm_map_t local_map;
872 vm_offset_t local_offset;
873
874 local_map = entry->object.sub_map;
875 local_offset = entry->offset;
876 vm_map_unlock(map);
877 vm_msync(
878 local_map,
879 local_offset,
880 flush_size,
881 sync_flags);
882 continue;
883 }
884 object = entry->object.vm_object;
885
886 /*
887 * We can't sync this object if the object has not been
888 * created yet
889 */
890 if (object == VM_OBJECT_NULL) {
891 vm_map_unlock(map);
892 continue;
893 }
894 offset += entry->offset;
895 modifiable = (entry->protection & VM_PROT_WRITE)
896 != VM_PROT_NONE;
897
898 vm_object_lock(object);
899
900 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
901 boolean_t kill_pages = 0;
902
903 if (sync_flags & VM_SYNC_KILLPAGES) {
904 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
905 kill_pages = 1;
906 else
907 kill_pages = -1;
908 }
909 if (kill_pages != -1)
0b4e3aa0 910 vm_object_deactivate_pages(object, offset,
1c79356b
A
911 (vm_object_size_t)flush_size, kill_pages);
912 vm_object_unlock(object);
913 vm_map_unlock(map);
914 continue;
915 }
916 /*
917 * We can't sync this object if there isn't a pager.
918 * Don't bother to sync internal objects, since there can't
919 * be any "permanent" storage for these objects anyway.
920 */
0b4e3aa0
A
921 if ((object->pager == MEMORY_OBJECT_NULL) ||
922 (object->internal) || (object->private)) {
1c79356b
A
923 vm_object_unlock(object);
924 vm_map_unlock(map);
925 continue;
926 }
927 /*
928 * keep reference on the object until syncing is done
929 */
930 assert(object->ref_count > 0);
931 object->ref_count++;
932 vm_object_res_reference(object);
933 vm_object_unlock(object);
934
935 vm_map_unlock(map);
936
0b4e3aa0 937 do_sync_req = vm_object_sync(object,
1c79356b
A
938 offset,
939 flush_size,
940 sync_flags & VM_SYNC_INVALIDATE,
941 (modifiable &&
942 (sync_flags & VM_SYNC_SYNCHRONOUS ||
943 sync_flags & VM_SYNC_ASYNCHRONOUS)));
944
945 /*
946 * only send a m_o_s if we returned pages or if the entry
947 * is writable (ie dirty pages may have already been sent back)
948 */
949 if (!do_sync_req && !modifiable) {
950 vm_object_deallocate(object);
951 continue;
952 }
953 msync_req_alloc(new_msr);
954
955 vm_object_lock(object);
956 offset += object->paging_offset;
957
958 new_msr->offset = offset;
959 new_msr->length = flush_size;
960 new_msr->object = object;
961 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
962re_iterate:
963 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
964 /*
965 * need to check for overlapping entry, if found, wait
966 * on overlapping msr to be done, then reiterate
967 */
968 msr_lock(msr);
969 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
970 ((offset >= msr->offset &&
971 offset < (msr->offset + msr->length)) ||
972 (msr->offset >= offset &&
973 msr->offset < (offset + flush_size))))
974 {
975 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
976 msr_unlock(msr);
977 vm_object_unlock(object);
978 thread_block((void (*)(void))0);
979 vm_object_lock(object);
980 goto re_iterate;
981 }
982 msr_unlock(msr);
983 }/* queue_iterate */
984
985 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
986 vm_object_unlock(object);
987
988 queue_enter(&req_q, new_msr, msync_req_t, req_q);
989
1c79356b
A
990 (void) memory_object_synchronize(
991 object->pager,
1c79356b
A
992 offset,
993 flush_size,
994 sync_flags);
1c79356b
A
995 }/* while */
996
997 /*
998 * wait for memory_object_sychronize_completed messages from pager(s)
999 */
1000
1001 while (!queue_empty(&req_q)) {
1002 msr = (msync_req_t)queue_first(&req_q);
1003 msr_lock(msr);
1004 while(msr->flag != VM_MSYNC_DONE) {
1005 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
1006 msr_unlock(msr);
1007 thread_block((void (*)(void))0);
1008 msr_lock(msr);
1009 }/* while */
1010 queue_remove(&req_q, msr, msync_req_t, req_q);
1011 msr_unlock(msr);
1012 vm_object_deallocate(msr->object);
1013 msync_req_free(msr);
1014 }/* queue_iterate */
1015
1016 return(KERN_SUCCESS);
1017}/* vm_msync */
1018
1019
1020/*
1021 * task_wire
1022 *
1023 * Set or clear the map's wiring_required flag. This flag, if set,
1024 * will cause all future virtual memory allocation to allocate
1025 * user wired memory. Unwiring pages wired down as a result of
1026 * this routine is done with the vm_wire interface.
1027 */
1028kern_return_t
1029task_wire(
1030 vm_map_t map,
1031 boolean_t must_wire)
1032{
1033 if (map == VM_MAP_NULL)
1034 return(KERN_INVALID_ARGUMENT);
1035
1036 if (must_wire)
1037 map->wiring_required = TRUE;
1038 else
1039 map->wiring_required = FALSE;
1040
1041 return(KERN_SUCCESS);
1042}
1043
1044/*
1045 * vm_behavior_set sets the paging behavior attribute for the
1046 * specified range in the specified map. This routine will fail
1047 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
1048 * is not a valid allocated or reserved memory region.
1049 */
1050kern_return_t
1051vm_behavior_set(
1052 vm_map_t map,
1053 vm_offset_t start,
1054 vm_size_t size,
1055 vm_behavior_t new_behavior)
1056{
1057 if (map == VM_MAP_NULL)
1058 return(KERN_INVALID_ARGUMENT);
1059
55e303ae
A
1060 return(vm_map_behavior_set(map, trunc_page_32(start),
1061 round_page_32(start+size), new_behavior));
1c79356b
A
1062}
1063
1064#if VM_CPM
1065/*
1066 * Control whether the kernel will permit use of
1067 * vm_allocate_cpm at all.
1068 */
1069unsigned int vm_allocate_cpm_enabled = 1;
1070
1071/*
1072 * Ordinarily, the right to allocate CPM is restricted
1073 * to privileged applications (those that can gain access
1074 * to the host port). Set this variable to zero if you
1075 * want to let any application allocate CPM.
1076 */
1077unsigned int vm_allocate_cpm_privileged = 0;
1078
1079/*
1080 * Allocate memory in the specified map, with the caveat that
1081 * the memory is physically contiguous. This call may fail
1082 * if the system can't find sufficient contiguous memory.
1083 * This call may cause or lead to heart-stopping amounts of
1084 * paging activity.
1085 *
1086 * Memory obtained from this call should be freed in the
1087 * normal way, viz., via vm_deallocate.
1088 */
1089kern_return_t
1090vm_allocate_cpm(
1091 host_priv_t host_priv,
1092 register vm_map_t map,
1093 register vm_offset_t *addr,
1094 register vm_size_t size,
1095 int flags)
1096{
1097 vm_object_t cpm_obj;
1098 pmap_t pmap;
1099 vm_page_t m, pages;
1100 kern_return_t kr;
1101 vm_offset_t va, start, end, offset;
1102#if MACH_ASSERT
1103 extern vm_offset_t avail_start, avail_end;
1104 vm_offset_t prev_addr;
1105#endif /* MACH_ASSERT */
1106
1107 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1108
1109 if (!vm_allocate_cpm_enabled)
1110 return KERN_FAILURE;
1111
1112 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1113 return KERN_INVALID_HOST;
1114
1115 if (map == VM_MAP_NULL)
1116 return KERN_INVALID_ARGUMENT;
1117
1118 assert(host_priv == &realhost);
1119
1120 if (size == 0) {
1121 *addr = 0;
1122 return KERN_SUCCESS;
1123 }
1124
1125 if (anywhere)
1126 *addr = vm_map_min(map);
1127 else
55e303ae
A
1128 *addr = trunc_page_32(*addr);
1129 size = round_page_32(size);
1c79356b
A
1130
1131 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1132 return kr;
1133
1134 cpm_obj = vm_object_allocate(size);
1135 assert(cpm_obj != VM_OBJECT_NULL);
1136 assert(cpm_obj->internal);
1137 assert(cpm_obj->size == size);
1138 assert(cpm_obj->can_persist == FALSE);
1139 assert(cpm_obj->pager_created == FALSE);
1140 assert(cpm_obj->pageout == FALSE);
1141 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1142
1143 /*
1144 * Insert pages into object.
1145 */
1146
1147 vm_object_lock(cpm_obj);
1148 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1149 m = pages;
1150 pages = NEXT_PAGE(m);
1151
1152 assert(!m->gobbled);
1153 assert(!m->wanted);
1154 assert(!m->pageout);
1155 assert(!m->tabled);
1156 assert(m->busy);
55e303ae 1157 assert(m->phys_page>=avail_start && m->phys_page<=avail_end);
1c79356b
A
1158
1159 m->busy = FALSE;
1160 vm_page_insert(m, cpm_obj, offset);
1161 }
1162 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1163 vm_object_unlock(cpm_obj);
1164
1165 /*
1166 * Hang onto a reference on the object in case a
1167 * multi-threaded application for some reason decides
1168 * to deallocate the portion of the address space into
1169 * which we will insert this object.
1170 *
1171 * Unfortunately, we must insert the object now before
1172 * we can talk to the pmap module about which addresses
1173 * must be wired down. Hence, the race with a multi-
1174 * threaded app.
1175 */
1176 vm_object_reference(cpm_obj);
1177
1178 /*
1179 * Insert object into map.
1180 */
1181
1182 kr = vm_map_enter(
1183 map,
1184 addr,
1185 size,
1186 (vm_offset_t)0,
1187 flags,
1188 cpm_obj,
1189 (vm_object_offset_t)0,
1190 FALSE,
1191 VM_PROT_ALL,
1192 VM_PROT_ALL,
1193 VM_INHERIT_DEFAULT);
1194
1195 if (kr != KERN_SUCCESS) {
1196 /*
1197 * A CPM object doesn't have can_persist set,
1198 * so all we have to do is deallocate it to
1199 * free up these pages.
1200 */
1201 assert(cpm_obj->pager_created == FALSE);
1202 assert(cpm_obj->can_persist == FALSE);
1203 assert(cpm_obj->pageout == FALSE);
1204 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1205 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1206 vm_object_deallocate(cpm_obj); /* kill creation ref */
1207 }
1208
1209 /*
1210 * Inform the physical mapping system that the
1211 * range of addresses may not fault, so that
1212 * page tables and such can be locked down as well.
1213 */
1214 start = *addr;
1215 end = start + size;
1216 pmap = vm_map_pmap(map);
1217 pmap_pageable(pmap, start, end, FALSE);
1218
1219 /*
1220 * Enter each page into the pmap, to avoid faults.
1221 * Note that this loop could be coded more efficiently,
1222 * if the need arose, rather than looking up each page
1223 * again.
1224 */
1225 for (offset = 0, va = start; offset < size;
1226 va += PAGE_SIZE, offset += PAGE_SIZE) {
1227 vm_object_lock(cpm_obj);
1228 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1229 vm_object_unlock(cpm_obj);
1230 assert(m != VM_PAGE_NULL);
9bccf70c 1231 PMAP_ENTER(pmap, va, m, VM_PROT_ALL,
55e303ae
A
1232 ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK,
1233 TRUE);
1c79356b
A
1234 }
1235
1236#if MACH_ASSERT
1237 /*
1238 * Verify ordering in address space.
1239 */
1240 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1241 vm_object_lock(cpm_obj);
1242 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1243 vm_object_unlock(cpm_obj);
1244 if (m == VM_PAGE_NULL)
1245 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1246 cpm_obj, offset);
1247 assert(m->tabled);
1248 assert(!m->busy);
1249 assert(!m->wanted);
1250 assert(!m->fictitious);
1251 assert(!m->private);
1252 assert(!m->absent);
1253 assert(!m->error);
1254 assert(!m->cleaning);
1255 assert(!m->precious);
1256 assert(!m->clustered);
1257 if (offset != 0) {
55e303ae 1258 if (m->phys_page != prev_addr + 1) {
1c79356b
A
1259 printf("start 0x%x end 0x%x va 0x%x\n",
1260 start, end, va);
1261 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1262 printf("m 0x%x prev_address 0x%x\n", m,
1263 prev_addr);
1264 panic("vm_allocate_cpm: pages not contig!");
1265 }
1266 }
55e303ae 1267 prev_addr = m->phys_page;
1c79356b
A
1268 }
1269#endif /* MACH_ASSERT */
1270
1271 vm_object_deallocate(cpm_obj); /* kill extra ref */
1272
1273 return kr;
1274}
1275
1276
1277#else /* VM_CPM */
1278
1279/*
1280 * Interface is defined in all cases, but unless the kernel
1281 * is built explicitly for this option, the interface does
1282 * nothing.
1283 */
1284
1285kern_return_t
1286vm_allocate_cpm(
1287 host_priv_t host_priv,
1288 register vm_map_t map,
1289 register vm_offset_t *addr,
1290 register vm_size_t size,
1291 int flags)
1292{
1293 return KERN_FAILURE;
1294}
1295
1296/*
1297 */
1298kern_return_t
1299mach_memory_object_memory_entry_64(
1300 host_t host,
1301 boolean_t internal,
1302 vm_object_offset_t size,
1303 vm_prot_t permission,
0b4e3aa0 1304 memory_object_t pager,
1c79356b
A
1305 ipc_port_t *entry_handle)
1306{
55e303ae 1307 unsigned int access;
1c79356b
A
1308 vm_named_entry_t user_object;
1309 ipc_port_t user_handle;
1310 ipc_port_t previous;
1311 kern_return_t kr;
1312
1313 if (host == HOST_NULL)
1314 return(KERN_INVALID_HOST);
1315
1316 user_object = (vm_named_entry_t)
1317 kalloc(sizeof (struct vm_named_entry));
1318 if(user_object == NULL)
1319 return KERN_FAILURE;
1320 named_entry_lock_init(user_object);
1321 user_handle = ipc_port_alloc_kernel();
1322 ip_lock(user_handle);
1323
1324 /* make a sonce right */
1325 user_handle->ip_sorights++;
1326 ip_reference(user_handle);
1327
1328 user_handle->ip_destination = IP_NULL;
1329 user_handle->ip_receiver_name = MACH_PORT_NULL;
1330 user_handle->ip_receiver = ipc_space_kernel;
1331
1332 /* make a send right */
1333 user_handle->ip_mscount++;
1334 user_handle->ip_srights++;
1335 ip_reference(user_handle);
1336
1337 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1338 /* nsrequest unlocks user_handle */
1339
1340 user_object->object = NULL;
1341 user_object->size = size;
1342 user_object->offset = 0;
1343 user_object->backing.pager = pager;
55e303ae
A
1344 user_object->protection = permission & VM_PROT_ALL;
1345 access = GET_MAP_MEM(permission);
1346 SET_MAP_MEM(access, user_object->protection);
1c79356b
A
1347 user_object->internal = internal;
1348 user_object->is_sub_map = FALSE;
1349 user_object->ref_count = 1;
1350
1351 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1352 IKOT_NAMED_ENTRY);
1353 *entry_handle = user_handle;
1354 return KERN_SUCCESS;
1355}
1356
1357kern_return_t
1358mach_memory_object_memory_entry(
1359 host_t host,
1360 boolean_t internal,
1361 vm_size_t size,
1362 vm_prot_t permission,
0b4e3aa0 1363 memory_object_t pager,
1c79356b
A
1364 ipc_port_t *entry_handle)
1365{
1366 return mach_memory_object_memory_entry_64( host, internal,
1367 (vm_object_offset_t)size, permission, pager, entry_handle);
1368}
1369
1370
1371
1372/*
1373 */
1374
1375kern_return_t
1376mach_make_memory_entry_64(
1377 vm_map_t target_map,
1378 vm_object_size_t *size,
1379 vm_object_offset_t offset,
1380 vm_prot_t permission,
1381 ipc_port_t *object_handle,
1382 ipc_port_t parent_entry)
1383{
1384 vm_map_version_t version;
1385 vm_named_entry_t user_object;
1386 ipc_port_t user_handle;
1387 ipc_port_t previous;
1388 kern_return_t kr;
1389 vm_map_t pmap_map;
1390
1391 /* needed for call to vm_map_lookup_locked */
9bccf70c 1392 boolean_t wired;
1c79356b 1393 vm_object_offset_t obj_off;
9bccf70c 1394 vm_prot_t prot;
1c79356b
A
1395 vm_object_offset_t lo_offset, hi_offset;
1396 vm_behavior_t behavior;
9bccf70c
A
1397 vm_object_t object;
1398 vm_object_t shadow_object;
1c79356b
A
1399
1400 /* needed for direct map entry manipulation */
1401 vm_map_entry_t map_entry;
9bccf70c
A
1402 vm_map_entry_t next_entry;
1403 vm_map_t local_map;
1404 vm_map_t original_map = target_map;
1405 vm_offset_t local_offset;
1c79356b 1406 vm_object_size_t mappable_size;
9bccf70c
A
1407 vm_object_size_t total_size;
1408
55e303ae
A
1409 unsigned int access;
1410 vm_prot_t protections;
1411 unsigned int wimg_mode;
1412 boolean_t cache_attr;
1413
1414 protections = permission & VM_PROT_ALL;
1415 access = GET_MAP_MEM(permission);
1416
1c79356b 1417
9bccf70c
A
1418 offset = trunc_page_64(offset);
1419 *size = round_page_64(*size);
55e303ae
A
1420
1421 if((parent_entry != NULL)
1422 && (permission & MAP_MEM_ONLY)) {
1423 vm_named_entry_t parent_object;
1424 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1425 return KERN_INVALID_ARGUMENT;
1426 }
1427 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1428 object = parent_object->object;
1429 if(object != VM_OBJECT_NULL)
1430 wimg_mode = object->wimg_bits;
1431 if((access != GET_MAP_MEM(parent_object->protection)) &&
1432 !(parent_object->protection & VM_PROT_WRITE)) {
1433 return KERN_INVALID_RIGHT;
1434 }
1435 if(access == MAP_MEM_IO) {
1436 SET_MAP_MEM(access, parent_object->protection);
1437 wimg_mode = VM_WIMG_IO;
1438 } else if (access == MAP_MEM_COPYBACK) {
1439 SET_MAP_MEM(access, parent_object->protection);
1440 wimg_mode = VM_WIMG_DEFAULT;
1441 } else if (access == MAP_MEM_WTHRU) {
1442 SET_MAP_MEM(access, parent_object->protection);
1443 wimg_mode = VM_WIMG_WTHRU;
1444 } else if (access == MAP_MEM_WCOMB) {
1445 SET_MAP_MEM(access, parent_object->protection);
1446 wimg_mode = VM_WIMG_WCOMB;
1447 }
1448 if(object &&
1449 (access != MAP_MEM_NOOP) &&
1450 (!(object->nophyscache))) {
1451 if(object->wimg_bits != wimg_mode) {
1452 vm_page_t p;
1453 if ((wimg_mode == VM_WIMG_IO)
1454 || (wimg_mode == VM_WIMG_WCOMB))
1455 cache_attr = TRUE;
1456 else
1457 cache_attr = FALSE;
1458 vm_object_lock(object);
1459 while(object->paging_in_progress) {
1460 vm_object_unlock(object);
1461 vm_object_wait(object,
1462 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1463 THREAD_UNINT);
1464 vm_object_lock(object);
1465 }
1466 object->wimg_bits = wimg_mode;
1467 queue_iterate(&object->memq,
1468 p, vm_page_t, listq) {
1469 if (!p->fictitious) {
1470 pmap_page_protect(
1471 p->phys_page,
1472 VM_PROT_NONE);
1473 if(cache_attr)
1474 pmap_sync_caches_phys(
1475 p->phys_page);
1476 }
1477 }
1478 vm_object_unlock(object);
1479 }
1480 }
1481 return KERN_SUCCESS;
1482 }
1483
1484 if(permission & MAP_MEM_ONLY) {
1485 return KERN_INVALID_ARGUMENT;
1486 }
1487
1c79356b
A
1488 user_object = (vm_named_entry_t)
1489 kalloc(sizeof (struct vm_named_entry));
1490 if(user_object == NULL)
1491 return KERN_FAILURE;
1492 named_entry_lock_init(user_object);
1493 user_handle = ipc_port_alloc_kernel();
1494 ip_lock(user_handle);
1495
1496 /* make a sonce right */
1497 user_handle->ip_sorights++;
1498 ip_reference(user_handle);
1499
1500 user_handle->ip_destination = IP_NULL;
1501 user_handle->ip_receiver_name = MACH_PORT_NULL;
1502 user_handle->ip_receiver = ipc_space_kernel;
1503
1504 /* make a send right */
1505 user_handle->ip_mscount++;
1506 user_handle->ip_srights++;
1507 ip_reference(user_handle);
1508
1509 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1510 /* nsrequest unlocks user_handle */
1511
1512 user_object->backing.pager = NULL;
1513 user_object->ref_count = 1;
1514
55e303ae
A
1515 if(permission & MAP_MEM_NAMED_CREATE) {
1516 user_object->object = NULL;
1517 user_object->internal = TRUE;
1518 user_object->is_sub_map = FALSE;
1519 user_object->offset = 0;
1520 user_object->protection = protections;
1521 SET_MAP_MEM(access, user_object->protection);
1522 user_object->size = *size;
1523
1524 /* user_object pager and internal fields are not used */
1525 /* when the object field is filled in. */
1526
1527 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1528 IKOT_NAMED_ENTRY);
1529 *object_handle = user_handle;
1530 return KERN_SUCCESS;
1531 }
1532
1c79356b
A
1533 if(parent_entry == NULL) {
1534 /* Create a named object based on address range within the task map */
1535 /* Go find the object at given address */
1536
1c79356b
A
1537 vm_map_lock_read(target_map);
1538
1539 /* get the object associated with the target address */
1540 /* note we check the permission of the range against */
1541 /* that requested by the caller */
1542
1543 kr = vm_map_lookup_locked(&target_map, offset,
55e303ae 1544 protections, &version,
1c79356b
A
1545 &object, &obj_off, &prot, &wired, &behavior,
1546 &lo_offset, &hi_offset, &pmap_map);
1547 if (kr != KERN_SUCCESS) {
1548 vm_map_unlock_read(target_map);
1549 goto make_mem_done;
1550 }
55e303ae 1551 if (((prot & protections) != protections)
9bccf70c 1552 || (object == kernel_object)) {
1c79356b
A
1553 kr = KERN_INVALID_RIGHT;
1554 vm_object_unlock(object);
1555 vm_map_unlock_read(target_map);
1556 if(pmap_map != target_map)
1557 vm_map_unlock_read(pmap_map);
9bccf70c
A
1558 if(object == kernel_object) {
1559 printf("Warning: Attempt to create a named"
1560 " entry from the kernel_object\n");
1561 }
1c79356b
A
1562 goto make_mem_done;
1563 }
1564
1565 /* We have an object, now check to see if this object */
1566 /* is suitable. If not, create a shadow and share that */
1567
1c79356b 1568redo_lookup:
9bccf70c
A
1569 local_map = original_map;
1570 local_offset = offset;
1571 if(target_map != local_map) {
1572 vm_map_unlock_read(target_map);
1573 if(pmap_map != target_map)
1574 vm_map_unlock_read(pmap_map);
1575 vm_map_lock_read(local_map);
1576 target_map = local_map;
1577 pmap_map = local_map;
1578 }
1c79356b 1579 while(TRUE) {
9bccf70c
A
1580 if(!vm_map_lookup_entry(local_map,
1581 local_offset, &map_entry)) {
1c79356b
A
1582 kr = KERN_INVALID_ARGUMENT;
1583 vm_object_unlock(object);
1584 vm_map_unlock_read(target_map);
1585 if(pmap_map != target_map)
1586 vm_map_unlock_read(pmap_map);
1587 goto make_mem_done;
1588 }
1589 if(!(map_entry->is_sub_map)) {
1590 if(map_entry->object.vm_object != object) {
1591 kr = KERN_INVALID_ARGUMENT;
1592 vm_object_unlock(object);
1593 vm_map_unlock_read(target_map);
1594 if(pmap_map != target_map)
1595 vm_map_unlock_read(pmap_map);
1596 goto make_mem_done;
1597 }
9bccf70c 1598 if(map_entry->wired_count) {
55e303ae 1599 /* JMM - The check below should be reworked instead. */
9bccf70c
A
1600 object->true_share = TRUE;
1601 }
1c79356b
A
1602 break;
1603 } else {
9bccf70c
A
1604 vm_map_t tmap;
1605 tmap = local_map;
1c79356b 1606 local_map = map_entry->object.sub_map;
9bccf70c 1607
1c79356b 1608 vm_map_lock_read(local_map);
9bccf70c 1609 vm_map_unlock_read(tmap);
1c79356b 1610 target_map = local_map;
9bccf70c
A
1611 pmap_map = local_map;
1612 local_offset = local_offset - map_entry->vme_start;
1613 local_offset += map_entry->offset;
1c79356b
A
1614 }
1615 }
55e303ae 1616 if(((map_entry->max_protection) & protections) != protections) {
1c79356b
A
1617 kr = KERN_INVALID_RIGHT;
1618 vm_object_unlock(object);
1619 vm_map_unlock_read(target_map);
1620 if(pmap_map != target_map)
1621 vm_map_unlock_read(pmap_map);
1622 goto make_mem_done;
1623 }
9bccf70c
A
1624
1625 mappable_size = hi_offset - obj_off;
1626 total_size = map_entry->vme_end - map_entry->vme_start;
1627 if(*size > mappable_size) {
1628 /* try to extend mappable size if the entries */
1629 /* following are from the same object and are */
1630 /* compatible */
1631 next_entry = map_entry->vme_next;
1632 /* lets see if the next map entry is still */
1633 /* pointing at this object and is contiguous */
1634 while(*size > mappable_size) {
1635 if((next_entry->object.vm_object == object) &&
1636 (next_entry->vme_start ==
1637 next_entry->vme_prev->vme_end) &&
1638 (next_entry->offset ==
1639 next_entry->vme_prev->offset +
1640 (next_entry->vme_prev->vme_end -
1641 next_entry->vme_prev->vme_start))) {
1642 if(((next_entry->max_protection)
55e303ae 1643 & protections) != protections) {
9bccf70c
A
1644 break;
1645 }
55e303ae
A
1646 if (next_entry->needs_copy !=
1647 map_entry->needs_copy)
1648 break;
9bccf70c
A
1649 mappable_size += next_entry->vme_end
1650 - next_entry->vme_start;
1651 total_size += next_entry->vme_end
1652 - next_entry->vme_start;
1653 next_entry = next_entry->vme_next;
1654 } else {
1655 break;
1656 }
1657
1658 }
1659 }
1660
1c79356b
A
1661 if(object->internal) {
1662 /* vm_map_lookup_locked will create a shadow if */
1663 /* needs_copy is set but does not check for the */
1664 /* other two conditions shown. It is important to */
1665 /* set up an object which will not be pulled from */
1666 /* under us. */
1667
0b4e3aa0 1668 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
1669 (object->size > total_size))
1670 && !object->true_share) {
1c79356b
A
1671 if (vm_map_lock_read_to_write(target_map)) {
1672 vm_map_lock_read(target_map);
1673 goto redo_lookup;
1674 }
1675
55e303ae
A
1676 /*
1677 * JMM - We need to avoid coming here when the object
1678 * is wired by anybody, not just the current map. Why
1679 * couldn't we use the standard vm_object_copy_quickly()
1680 * approach here?
1681 */
1682
1c79356b 1683 /* create a shadow object */
9bccf70c
A
1684 vm_object_shadow(&map_entry->object.vm_object,
1685 &map_entry->offset, total_size);
1686 shadow_object = map_entry->object.vm_object;
1687 vm_object_unlock(object);
1688 vm_object_pmap_protect(
1689 object, map_entry->offset,
1690 total_size,
1691 ((map_entry->is_shared
1692 || target_map->mapped)
1693 ? PMAP_NULL :
1694 target_map->pmap),
1695 map_entry->vme_start,
1696 map_entry->protection & ~VM_PROT_WRITE);
1697 total_size -= (map_entry->vme_end
1698 - map_entry->vme_start);
1699 next_entry = map_entry->vme_next;
1700 map_entry->needs_copy = FALSE;
1701 while (total_size) {
1702 if(next_entry->object.vm_object == object) {
55e303ae
A
1703 shadow_object->ref_count++;
1704 vm_object_res_reference(shadow_object);
9bccf70c
A
1705 next_entry->object.vm_object
1706 = shadow_object;
55e303ae 1707 vm_object_deallocate(object);
9bccf70c
A
1708 next_entry->offset
1709 = next_entry->vme_prev->offset +
1710 (next_entry->vme_prev->vme_end
1711 - next_entry->vme_prev->vme_start);
1712 next_entry->needs_copy = FALSE;
1713 } else {
1714 panic("mach_make_memory_entry_64:"
1715 " map entries out of sync\n");
1716 }
1717 total_size -=
1718 next_entry->vme_end
1719 - next_entry->vme_start;
1720 next_entry = next_entry->vme_next;
1721 }
1722
1723 object = shadow_object;
1724 vm_object_lock(object);
1725 obj_off = (local_offset - map_entry->vme_start)
1726 + map_entry->offset;
1727 vm_map_lock_write_to_read(target_map);
1c79356b 1728
1c79356b
A
1729
1730 }
1731 }
1732
1733 /* note: in the future we can (if necessary) allow for */
1734 /* memory object lists, this will better support */
1735 /* fragmentation, but is it necessary? The user should */
1736 /* be encouraged to create address space oriented */
1737 /* shared objects from CLEAN memory regions which have */
1738 /* a known and defined history. i.e. no inheritence */
1739 /* share, make this call before making the region the */
1740 /* target of ipc's, etc. The code above, protecting */
1741 /* against delayed copy, etc. is mostly defensive. */
1742
55e303ae
A
1743 wimg_mode = object->wimg_bits;
1744 if(!(object->nophyscache)) {
1745 if(access == MAP_MEM_IO) {
1746 wimg_mode = VM_WIMG_IO;
1747 } else if (access == MAP_MEM_COPYBACK) {
1748 wimg_mode = VM_WIMG_USE_DEFAULT;
1749 } else if (access == MAP_MEM_WTHRU) {
1750 wimg_mode = VM_WIMG_WTHRU;
1751 } else if (access == MAP_MEM_WCOMB) {
1752 wimg_mode = VM_WIMG_WCOMB;
1753 }
1754 }
d7e50217 1755
de355530 1756 object->true_share = TRUE;
55e303ae
A
1757 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
1758 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1759
1760 /* we now point to this object, hold on to it */
1761 vm_object_reference_locked(object);
1762 vm_map_unlock_read(target_map);
1763 if(pmap_map != target_map)
1764 vm_map_unlock_read(pmap_map);
1765
1766 if(object->wimg_bits != wimg_mode) {
1767 vm_page_t p;
1768
1769 vm_object_paging_wait(object, THREAD_UNINT);
1770
1771 queue_iterate(&object->memq,
1772 p, vm_page_t, listq) {
1773 if (!p->fictitious) {
1774 pmap_page_protect(
1775 p->phys_page,
1776 VM_PROT_NONE);
1777 if(cache_attr)
1778 pmap_sync_caches_phys(
1779 p->phys_page);
1780 }
1781 }
1782 object->wimg_bits = wimg_mode;
1783 }
1c79356b
A
1784 user_object->object = object;
1785 user_object->internal = object->internal;
1786 user_object->is_sub_map = FALSE;
1787 user_object->offset = obj_off;
1788 user_object->protection = permission;
1789
1790 /* the size of mapped entry that overlaps with our region */
1791 /* which is targeted for share. */
1792 /* (entry_end - entry_start) - */
1793 /* offset of our beg addr within entry */
1794 /* it corresponds to this: */
1795
1c79356b
A
1796 if(*size > mappable_size)
1797 *size = mappable_size;
1798
1799 user_object->size = *size;
1800
1801 /* user_object pager and internal fields are not used */
1802 /* when the object field is filled in. */
1803
1c79356b
A
1804 vm_object_unlock(object);
1805 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1806 IKOT_NAMED_ENTRY);
1c79356b 1807 *object_handle = user_handle;
1c79356b
A
1808 return KERN_SUCCESS;
1809 } else {
1810
1811 vm_named_entry_t parent_object;
1812
1813 /* The new object will be base on an existing named object */
1814 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1815 kr = KERN_INVALID_ARGUMENT;
1816 goto make_mem_done;
1817 }
1818 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1c79356b
A
1819 if((offset + *size) > parent_object->size) {
1820 kr = KERN_INVALID_ARGUMENT;
1821 goto make_mem_done;
1822 }
1823
1824 user_object->object = parent_object->object;
1825 user_object->size = *size;
1826 user_object->offset = parent_object->offset + offset;
55e303ae
A
1827 user_object->protection = parent_object->protection;
1828 user_object->protection &= ~VM_PROT_ALL;
1829 user_object->protection = permission & VM_PROT_ALL;
1830 if(access != MAP_MEM_NOOP) {
1831 SET_MAP_MEM(access, user_object->protection);
1832 }
1c79356b
A
1833 if(parent_object->is_sub_map) {
1834 user_object->backing.map = parent_object->backing.map;
1835 vm_map_lock(user_object->backing.map);
1836 user_object->backing.map->ref_count++;
1837 vm_map_unlock(user_object->backing.map);
1838 }
1839 else {
1840 user_object->backing.pager = parent_object->backing.pager;
1841 }
1842 user_object->internal = parent_object->internal;
1843 user_object->is_sub_map = parent_object->is_sub_map;
1844
1845 if(parent_object->object != NULL) {
1846 /* we now point to this object, hold on */
1847 vm_object_reference(parent_object->object);
1848 vm_object_lock(parent_object->object);
1849 parent_object->object->true_share = TRUE;
55e303ae
A
1850 if (parent_object->object->copy_strategy ==
1851 MEMORY_OBJECT_COPY_SYMMETRIC)
1852 parent_object->object->copy_strategy =
1853 MEMORY_OBJECT_COPY_DELAY;
1c79356b
A
1854 vm_object_unlock(parent_object->object);
1855 }
1856 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1857 IKOT_NAMED_ENTRY);
1858 *object_handle = user_handle;
1859 return KERN_SUCCESS;
1860 }
1861
1862
1863
1864make_mem_done:
1865 ipc_port_dealloc_kernel(user_handle);
1866 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1867 return kr;
1868}
1869
1870kern_return_t
1871mach_make_memory_entry(
1872 vm_map_t target_map,
1873 vm_size_t *size,
1874 vm_offset_t offset,
1875 vm_prot_t permission,
1876 ipc_port_t *object_handle,
1877 ipc_port_t parent_entry)
1878{
1879 vm_object_offset_t size_64;
1880 kern_return_t kr;
1881
1882 size_64 = (vm_object_offset_t)*size;
1883 kr = mach_make_memory_entry_64(target_map, &size_64,
1884 (vm_object_offset_t)offset, permission, object_handle,
1885 parent_entry);
1886 *size = (vm_size_t)size_64;
1887 return kr;
1888}
1889
1890/*
1891 */
1892
1893kern_return_t
1894vm_region_object_create(
1895 vm_map_t target_map,
1896 vm_size_t size,
1897 ipc_port_t *object_handle)
1898{
1899 vm_named_entry_t user_object;
1900 ipc_port_t user_handle;
1901 kern_return_t kr;
1902
1c79356b
A
1903 ipc_port_t previous;
1904 vm_map_t new_map;
1905
1c79356b
A
1906 user_object = (vm_named_entry_t)
1907 kalloc(sizeof (struct vm_named_entry));
1908 if(user_object == NULL) {
1c79356b
A
1909 return KERN_FAILURE;
1910 }
1911 named_entry_lock_init(user_object);
1912 user_handle = ipc_port_alloc_kernel();
1913
1914
1915 ip_lock(user_handle);
1916
1917 /* make a sonce right */
1918 user_handle->ip_sorights++;
1919 ip_reference(user_handle);
1920
1921 user_handle->ip_destination = IP_NULL;
1922 user_handle->ip_receiver_name = MACH_PORT_NULL;
1923 user_handle->ip_receiver = ipc_space_kernel;
1924
1925 /* make a send right */
1926 user_handle->ip_mscount++;
1927 user_handle->ip_srights++;
1928 ip_reference(user_handle);
1929
1930 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1931 /* nsrequest unlocks user_handle */
1932
1933 /* Create a named object based on a submap of specified size */
1934
55e303ae 1935 new_map = vm_map_create(0, 0, size, TRUE);
1c79356b
A
1936 user_object->backing.map = new_map;
1937
1938
1939 user_object->object = VM_OBJECT_NULL;
1940 user_object->internal = TRUE;
1941 user_object->is_sub_map = TRUE;
1942 user_object->offset = 0;
1943 user_object->protection = VM_PROT_ALL;
1944 user_object->size = size;
1945 user_object->ref_count = 1;
1946
1947 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1948 IKOT_NAMED_ENTRY);
1949 *object_handle = user_handle;
1950 return KERN_SUCCESS;
1951
1952}
1953
1954/* For a given range, check all map entries. If the entry coresponds to */
1955/* the old vm_region/map provided on the call, replace it with the */
1956/* corresponding range in the new vm_region/map */
1957kern_return_t vm_map_region_replace(
1958 vm_map_t target_map,
1959 ipc_port_t old_region,
1960 ipc_port_t new_region,
1961 vm_offset_t start,
1962 vm_offset_t end)
1963{
1964 vm_named_entry_t old_object;
1965 vm_named_entry_t new_object;
1966 vm_map_t old_submap;
1967 vm_map_t new_submap;
1968 vm_offset_t addr;
1969 vm_map_entry_t entry;
1970 int nested_pmap = 0;
1971
1972
1973 vm_map_lock(target_map);
1974 old_object = (vm_named_entry_t)old_region->ip_kobject;
1975 new_object = (vm_named_entry_t)new_region->ip_kobject;
1976 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1977 vm_map_unlock(target_map);
1978 return KERN_INVALID_ARGUMENT;
1979 }
1980 old_submap = (vm_map_t)old_object->backing.map;
1981 new_submap = (vm_map_t)new_object->backing.map;
1982 vm_map_lock(old_submap);
1983 if((old_submap->min_offset != new_submap->min_offset) ||
1984 (old_submap->max_offset != new_submap->max_offset)) {
1985 vm_map_unlock(old_submap);
1986 vm_map_unlock(target_map);
1987 return KERN_INVALID_ARGUMENT;
1988 }
1989 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1990 /* if the src is not contained, the entry preceeds */
1991 /* our range */
1992 addr = entry->vme_start;
1993 if(entry == vm_map_to_entry(target_map)) {
1994 vm_map_unlock(old_submap);
1995 vm_map_unlock(target_map);
1996 return KERN_SUCCESS;
1997 }
55e303ae
A
1998 }
1999 if ((entry->use_pmap) &&
2000 (new_submap->pmap == NULL)) {
2001 new_submap->pmap = pmap_create((vm_size_t) 0);
2002 if(new_submap->pmap == PMAP_NULL) {
2003 vm_map_unlock(old_submap);
2004 vm_map_unlock(target_map);
2005 return(KERN_NO_SPACE);
2006 }
1c79356b
A
2007 }
2008 addr = entry->vme_start;
2009 vm_map_reference(old_submap);
2010 while((entry != vm_map_to_entry(target_map)) &&
2011 (entry->vme_start < end)) {
2012 if((entry->is_sub_map) &&
2013 (entry->object.sub_map == old_submap)) {
1c79356b 2014 if(entry->use_pmap) {
55e303ae 2015 if((start & 0x0fffffff) ||
1c79356b
A
2016 ((end - start) != 0x10000000)) {
2017 vm_map_unlock(old_submap);
9bccf70c 2018 vm_map_deallocate(old_submap);
1c79356b
A
2019 vm_map_unlock(target_map);
2020 return KERN_INVALID_ARGUMENT;
2021 }
2022 nested_pmap = 1;
2023 }
9bccf70c 2024 entry->object.sub_map = new_submap;
1c79356b
A
2025 vm_map_reference(new_submap);
2026 vm_map_deallocate(old_submap);
2027 }
2028 entry = entry->vme_next;
2029 addr = entry->vme_start;
2030 }
2031 if(nested_pmap) {
2032#ifndef i386
55e303ae 2033 pmap_unnest(target_map->pmap, (addr64_t)start);
9bccf70c
A
2034 if(target_map->mapped) {
2035 vm_map_submap_pmap_clean(target_map,
2036 start, end, old_submap, 0);
2037 }
1c79356b 2038 pmap_nest(target_map->pmap, new_submap->pmap,
55e303ae
A
2039 (addr64_t)start, (addr64_t)start,
2040 (addr64_t)(end - start));
2041#endif /* i386 */
1c79356b 2042 } else {
9bccf70c
A
2043 vm_map_submap_pmap_clean(target_map,
2044 start, end, old_submap, 0);
1c79356b
A
2045 }
2046 vm_map_unlock(old_submap);
9bccf70c 2047 vm_map_deallocate(old_submap);
1c79356b
A
2048 vm_map_unlock(target_map);
2049 return KERN_SUCCESS;
2050}
2051
2052
2053void
2054mach_destroy_memory_entry(
2055 ipc_port_t port)
2056{
2057 vm_named_entry_t named_entry;
2058#if MACH_ASSERT
2059 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2060#endif /* MACH_ASSERT */
2061 named_entry = (vm_named_entry_t)port->ip_kobject;
2062 mutex_lock(&(named_entry)->Lock);
2063 named_entry->ref_count-=1;
2064 if(named_entry->ref_count == 0) {
2065 if(named_entry->object) {
2066 /* release the memory object we've been pointing to */
2067 vm_object_deallocate(named_entry->object);
2068 }
2069 if(named_entry->is_sub_map) {
2070 vm_map_deallocate(named_entry->backing.map);
2071 }
2072 kfree((vm_offset_t)port->ip_kobject,
2073 sizeof (struct vm_named_entry));
2074 } else
2075 mutex_unlock(&(named_entry)->Lock);
2076}
2077
2078
2079kern_return_t
2080vm_map_page_query(
2081 vm_map_t target_map,
2082 vm_offset_t offset,
2083 int *disposition,
2084 int *ref_count)
2085{
2086 vm_map_entry_t map_entry;
2087 vm_object_t object;
2088 vm_page_t m;
2089
2090restart_page_query:
2091 *disposition = 0;
2092 *ref_count = 0;
2093 vm_map_lock(target_map);
2094 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
2095 vm_map_unlock(target_map);
2096 return KERN_FAILURE;
2097 }
2098 offset -= map_entry->vme_start; /* adjust to offset within entry */
2099 offset += map_entry->offset; /* adjust to target object offset */
2100 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
2101 if(!map_entry->is_sub_map) {
2102 object = map_entry->object.vm_object;
2103 } else {
2104 vm_map_unlock(target_map);
2105 target_map = map_entry->object.sub_map;
2106 goto restart_page_query;
2107 }
2108 } else {
2109 vm_map_unlock(target_map);
2110 return KERN_FAILURE;
2111 }
2112 vm_object_lock(object);
2113 vm_map_unlock(target_map);
2114 while(TRUE) {
2115 m = vm_page_lookup(object, offset);
2116 if (m != VM_PAGE_NULL) {
2117 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
2118 break;
2119 } else {
2120 if(object->shadow) {
2121 offset += object->shadow_offset;
2122 vm_object_unlock(object);
2123 object = object->shadow;
2124 vm_object_lock(object);
2125 continue;
2126 }
2127 vm_object_unlock(object);
2128 return KERN_FAILURE;
2129 }
2130 }
2131
2132 /* The ref_count is not strictly accurate, it measures the number */
2133 /* of entities holding a ref on the object, they may not be mapping */
2134 /* the object or may not be mapping the section holding the */
2135 /* target page but its still a ball park number and though an over- */
2136 /* count, it picks up the copy-on-write cases */
2137
2138 /* We could also get a picture of page sharing from pmap_attributes */
2139 /* but this would under count as only faulted-in mappings would */
2140 /* show up. */
2141
2142 *ref_count = object->ref_count;
2143
2144 if (m->fictitious) {
2145 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
2146 vm_object_unlock(object);
2147 return KERN_SUCCESS;
2148 }
2149
2150 if (m->dirty)
2151 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
55e303ae 2152 else if(pmap_is_modified(m->phys_page))
1c79356b
A
2153 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
2154
2155 if (m->reference)
2156 *disposition |= VM_PAGE_QUERY_PAGE_REF;
55e303ae 2157 else if(pmap_is_referenced(m->phys_page))
1c79356b
A
2158 *disposition |= VM_PAGE_QUERY_PAGE_REF;
2159
2160 vm_object_unlock(object);
2161 return KERN_SUCCESS;
2162
2163}
2164
2165kern_return_t
2166set_dp_control_port(
2167 host_priv_t host_priv,
2168 ipc_port_t control_port)
2169{
2170 if (host_priv == HOST_PRIV_NULL)
2171 return (KERN_INVALID_HOST);
0b4e3aa0
A
2172
2173 if (IP_VALID(dynamic_pager_control_port))
2174 ipc_port_release_send(dynamic_pager_control_port);
2175
1c79356b
A
2176 dynamic_pager_control_port = control_port;
2177 return KERN_SUCCESS;
2178}
2179
2180kern_return_t
2181get_dp_control_port(
2182 host_priv_t host_priv,
2183 ipc_port_t *control_port)
2184{
2185 if (host_priv == HOST_PRIV_NULL)
2186 return (KERN_INVALID_HOST);
0b4e3aa0
A
2187
2188 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
2189 return KERN_SUCCESS;
2190
2191}
2192
1c79356b
A
2193
2194/* Retrieve a upl for an object underlying an address range in a map */
2195
2196kern_return_t
2197vm_map_get_upl(
0b4e3aa0
A
2198 vm_map_t map,
2199 vm_address_t offset,
2200 vm_size_t *upl_size,
2201 upl_t *upl,
2202 upl_page_info_array_t page_list,
2203 unsigned int *count,
2204 int *flags,
2205 int force_data_sync)
1c79356b
A
2206{
2207 vm_map_entry_t entry;
2208 int caller_flags;
150bd074
A
2209 int sync_cow_data = FALSE;
2210 vm_object_t local_object;
2211 vm_offset_t local_offset;
2212 vm_offset_t local_start;
2213 kern_return_t ret;
1c79356b
A
2214
2215 caller_flags = *flags;
150bd074
A
2216 if (!(caller_flags & UPL_COPYOUT_FROM)) {
2217 sync_cow_data = TRUE;
2218 }
1c79356b
A
2219 if(upl == NULL)
2220 return KERN_INVALID_ARGUMENT;
0b4e3aa0
A
2221
2222
1c79356b
A
2223REDISCOVER_ENTRY:
2224 vm_map_lock(map);
2225 if (vm_map_lookup_entry(map, offset, &entry)) {
0b4e3aa0
A
2226 if (entry->object.vm_object == VM_OBJECT_NULL ||
2227 !entry->object.vm_object->phys_contiguous) {
2228 if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
2229 *upl_size = MAX_UPL_TRANSFER * page_size;
2230 }
2231 }
1c79356b
A
2232 if((entry->vme_end - offset) < *upl_size) {
2233 *upl_size = entry->vme_end - offset;
2234 }
0b4e3aa0
A
2235 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2236 if (entry->object.vm_object == VM_OBJECT_NULL) {
2237 *flags = 0;
2238 } else if (entry->object.vm_object->private) {
2239 *flags = UPL_DEV_MEMORY;
2240 if (entry->object.vm_object->phys_contiguous) {
2241 *flags |= UPL_PHYS_CONTIG;
2242 }
2243 } else {
2244 *flags = 0;
2245 }
2246 vm_map_unlock(map);
2247 return KERN_SUCCESS;
2248 }
1c79356b
A
2249 /*
2250 * Create an object if necessary.
2251 */
2252 if (entry->object.vm_object == VM_OBJECT_NULL) {
2253 entry->object.vm_object = vm_object_allocate(
2254 (vm_size_t)(entry->vme_end - entry->vme_start));
2255 entry->offset = 0;
2256 }
2257 if (!(caller_flags & UPL_COPYOUT_FROM)) {
55e303ae
A
2258 if (!(entry->protection & VM_PROT_WRITE)) {
2259 vm_map_unlock(map);
2260 return KERN_PROTECTION_FAILURE;
2261 }
0b4e3aa0 2262 if (entry->needs_copy) {
1c79356b
A
2263 vm_map_t local_map;
2264 vm_object_t object;
2265 vm_object_offset_t offset_hi;
2266 vm_object_offset_t offset_lo;
2267 vm_object_offset_t new_offset;
2268 vm_prot_t prot;
2269 boolean_t wired;
2270 vm_behavior_t behavior;
2271 vm_map_version_t version;
2272 vm_map_t pmap_map;
2273
2274 local_map = map;
2275 vm_map_lock_write_to_read(map);
2276 if(vm_map_lookup_locked(&local_map,
2277 offset, VM_PROT_WRITE,
2278 &version, &object,
2279 &new_offset, &prot, &wired,
2280 &behavior, &offset_lo,
2281 &offset_hi, &pmap_map)) {
2282 vm_map_unlock(local_map);
2283 return KERN_FAILURE;
2284 }
2285 if (pmap_map != map) {
2286 vm_map_unlock(pmap_map);
2287 }
2288 vm_object_unlock(object);
2289 vm_map_unlock(local_map);
2290
2291 goto REDISCOVER_ENTRY;
2292 }
2293 }
2294 if (entry->is_sub_map) {
150bd074
A
2295 vm_map_t submap;
2296
2297 submap = entry->object.sub_map;
2298 local_start = entry->vme_start;
2299 local_offset = entry->offset;
2300 vm_map_reference(submap);
1c79356b 2301 vm_map_unlock(map);
150bd074
A
2302
2303 ret = (vm_map_get_upl(submap,
2304 local_offset + (offset - local_start),
1c79356b
A
2305 upl_size, upl, page_list, count,
2306 flags, force_data_sync));
150bd074
A
2307
2308 vm_map_deallocate(submap);
2309 return ret;
1c79356b
A
2310 }
2311
150bd074 2312 if (sync_cow_data) {
0b4e3aa0
A
2313 if (entry->object.vm_object->shadow
2314 || entry->object.vm_object->copy) {
150bd074
A
2315 int flags;
2316
2317 local_object = entry->object.vm_object;
2318 local_start = entry->vme_start;
2319 local_offset = entry->offset;
2320 vm_object_reference(local_object);
1c79356b
A
2321 vm_map_unlock(map);
2322
150bd074 2323 if(local_object->copy == NULL) {
1c79356b
A
2324 flags = MEMORY_OBJECT_DATA_SYNC;
2325 } else {
2326 flags = MEMORY_OBJECT_COPY_SYNC;
2327 }
150bd074 2328
0b4e3aa0
A
2329 if (entry->object.vm_object->shadow &&
2330 entry->object.vm_object->copy) {
2331 vm_object_lock_request(
2332 local_object->shadow,
2333 (vm_object_offset_t)
2334 ((offset - local_start) +
2335 local_offset) +
55e303ae 2336 local_object->shadow_offset,
0b4e3aa0
A
2337 *upl_size, FALSE,
2338 MEMORY_OBJECT_DATA_SYNC,
2339 VM_PROT_NO_CHANGE);
2340 }
150bd074 2341 sync_cow_data = FALSE;
0b4e3aa0 2342 vm_object_deallocate(local_object);
150bd074 2343 goto REDISCOVER_ENTRY;
1c79356b
A
2344 }
2345 }
2346
2347 if (force_data_sync) {
150bd074
A
2348
2349 local_object = entry->object.vm_object;
2350 local_start = entry->vme_start;
2351 local_offset = entry->offset;
2352 vm_object_reference(local_object);
1c79356b 2353 vm_map_unlock(map);
1c79356b 2354
0b4e3aa0
A
2355 vm_object_lock_request(
2356 local_object,
2357 (vm_object_offset_t)
55e303ae 2358 ((offset - local_start) + local_offset),
150bd074
A
2359 (vm_object_size_t)*upl_size, FALSE,
2360 MEMORY_OBJECT_DATA_SYNC,
0b4e3aa0 2361 VM_PROT_NO_CHANGE);
150bd074 2362 force_data_sync = FALSE;
0b4e3aa0 2363 vm_object_deallocate(local_object);
150bd074 2364 goto REDISCOVER_ENTRY;
1c79356b
A
2365 }
2366
2367 if(!(entry->object.vm_object->private)) {
2368 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2369 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2370 if(entry->object.vm_object->phys_contiguous) {
2371 *flags = UPL_PHYS_CONTIG;
2372 } else {
2373 *flags = 0;
2374 }
2375 } else {
2376 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2377 }
150bd074
A
2378 local_object = entry->object.vm_object;
2379 local_offset = entry->offset;
2380 local_start = entry->vme_start;
2381 vm_object_reference(local_object);
1c79356b 2382 vm_map_unlock(map);
55e303ae
A
2383 if(caller_flags & UPL_SET_IO_WIRE) {
2384 ret = (vm_object_iopl_request(local_object,
2385 (vm_object_offset_t)
2386 ((offset - local_start)
2387 + local_offset),
2388 *upl_size,
2389 upl,
2390 page_list,
2391 count,
2392 caller_flags));
2393 } else {
2394 ret = (vm_object_upl_request(local_object,
2395 (vm_object_offset_t)
2396 ((offset - local_start)
2397 + local_offset),
2398 *upl_size,
2399 upl,
2400 page_list,
2401 count,
2402 caller_flags));
2403 }
150bd074
A
2404 vm_object_deallocate(local_object);
2405 return(ret);
1c79356b
A
2406 }
2407
2408 vm_map_unlock(map);
2409 return(KERN_FAILURE);
2410
2411}
2412
1c79356b
A
2413/* ******* Temporary Internal calls to UPL for BSD ***** */
2414kern_return_t
2415kernel_upl_map(
2416 vm_map_t map,
2417 upl_t upl,
2418 vm_offset_t *dst_addr)
2419{
0b4e3aa0 2420 return (vm_upl_map(map, upl, dst_addr));
1c79356b
A
2421}
2422
2423
2424kern_return_t
2425kernel_upl_unmap(
2426 vm_map_t map,
0b4e3aa0 2427 upl_t upl)
1c79356b 2428{
0b4e3aa0 2429 return(vm_upl_unmap(map, upl));
1c79356b
A
2430}
2431
2432kern_return_t
2433kernel_upl_commit(
2434 upl_t upl,
0b4e3aa0
A
2435 upl_page_info_t *pl,
2436 mach_msg_type_number_t count)
1c79356b 2437{
0b4e3aa0
A
2438 kern_return_t kr;
2439
2440 kr = upl_commit(upl, pl, count);
2441 upl_deallocate(upl);
1c79356b
A
2442 return kr;
2443}
2444
0b4e3aa0 2445
1c79356b
A
2446kern_return_t
2447kernel_upl_commit_range(
2448 upl_t upl,
2449 vm_offset_t offset,
2450 vm_size_t size,
2451 int flags,
0b4e3aa0
A
2452 upl_page_info_array_t pl,
2453 mach_msg_type_number_t count)
1c79356b 2454{
0b4e3aa0
A
2455 boolean_t finished = FALSE;
2456 kern_return_t kr;
2457
2458 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2459 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2460
2461 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2462
2463 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2464 upl_deallocate(upl);
2465
1c79356b
A
2466 return kr;
2467}
2468
2469kern_return_t
2470kernel_upl_abort_range(
0b4e3aa0
A
2471 upl_t upl,
2472 vm_offset_t offset,
2473 vm_size_t size,
2474 int abort_flags)
1c79356b 2475{
0b4e3aa0
A
2476 kern_return_t kr;
2477 boolean_t finished = FALSE;
1c79356b 2478
0b4e3aa0
A
2479 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2480 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 2481
0b4e3aa0 2482 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 2483
0b4e3aa0
A
2484 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2485 upl_deallocate(upl);
1c79356b 2486
0b4e3aa0 2487 return kr;
1c79356b
A
2488}
2489
1c79356b 2490kern_return_t
0b4e3aa0
A
2491kernel_upl_abort(
2492 upl_t upl,
2493 int abort_type)
1c79356b 2494{
0b4e3aa0 2495 kern_return_t kr;
1c79356b 2496
0b4e3aa0
A
2497 kr = upl_abort(upl, abort_type);
2498 upl_deallocate(upl);
2499 return kr;
1c79356b
A
2500}
2501
1c79356b
A
2502
2503kern_return_t
2504vm_get_shared_region(
2505 task_t task,
2506 shared_region_mapping_t *shared_region)
2507{
2508 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2509 return KERN_SUCCESS;
2510}
2511
2512kern_return_t
2513vm_set_shared_region(
2514 task_t task,
2515 shared_region_mapping_t shared_region)
2516{
2517 task->system_shared_region = (vm_offset_t) shared_region;
2518 return KERN_SUCCESS;
2519}
2520
2521kern_return_t
2522shared_region_mapping_info(
2523 shared_region_mapping_t shared_region,
2524 ipc_port_t *text_region,
2525 vm_size_t *text_size,
2526 ipc_port_t *data_region,
2527 vm_size_t *data_size,
2528 vm_offset_t *region_mappings,
2529 vm_offset_t *client_base,
2530 vm_offset_t *alt_base,
2531 vm_offset_t *alt_next,
55e303ae
A
2532 unsigned int *fs_base,
2533 unsigned int *system,
1c79356b
A
2534 int *flags,
2535 shared_region_mapping_t *next)
2536{
2537 shared_region_mapping_lock(shared_region);
2538
2539 *text_region = shared_region->text_region;
2540 *text_size = shared_region->text_size;
2541 *data_region = shared_region->data_region;
2542 *data_size = shared_region->data_size;
2543 *region_mappings = shared_region->region_mappings;
2544 *client_base = shared_region->client_base;
2545 *alt_base = shared_region->alternate_base;
2546 *alt_next = shared_region->alternate_next;
2547 *flags = shared_region->flags;
55e303ae
A
2548 *fs_base = shared_region->fs_base;
2549 *system = shared_region->system;
1c79356b
A
2550 *next = shared_region->next;
2551
2552 shared_region_mapping_unlock(shared_region);
2553}
2554
2555kern_return_t
2556shared_region_object_chain_attach(
2557 shared_region_mapping_t target_region,
2558 shared_region_mapping_t object_chain_region)
2559{
2560 shared_region_object_chain_t object_ele;
2561
2562 if(target_region->object_chain)
2563 return KERN_FAILURE;
2564 object_ele = (shared_region_object_chain_t)
2565 kalloc(sizeof (struct shared_region_object_chain));
2566 shared_region_mapping_lock(object_chain_region);
2567 target_region->object_chain = object_ele;
2568 object_ele->object_chain_region = object_chain_region;
2569 object_ele->next = object_chain_region->object_chain;
2570 object_ele->depth = object_chain_region->depth;
2571 object_chain_region->depth++;
2572 target_region->alternate_next = object_chain_region->alternate_next;
2573 shared_region_mapping_unlock(object_chain_region);
2574 return KERN_SUCCESS;
2575}
2576
2577kern_return_t
2578shared_region_mapping_create(
2579 ipc_port_t text_region,
2580 vm_size_t text_size,
2581 ipc_port_t data_region,
2582 vm_size_t data_size,
2583 vm_offset_t region_mappings,
2584 vm_offset_t client_base,
2585 shared_region_mapping_t *shared_region,
2586 vm_offset_t alt_base,
2587 vm_offset_t alt_next)
2588{
2589 *shared_region = (shared_region_mapping_t)
2590 kalloc(sizeof (struct shared_region_mapping));
2591 if(*shared_region == NULL)
2592 return KERN_FAILURE;
2593 shared_region_mapping_lock_init((*shared_region));
2594 (*shared_region)->text_region = text_region;
2595 (*shared_region)->text_size = text_size;
55e303ae
A
2596 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
2597 (*shared_region)->system = machine_slot[cpu_number()].cpu_type;
1c79356b
A
2598 (*shared_region)->data_region = data_region;
2599 (*shared_region)->data_size = data_size;
2600 (*shared_region)->region_mappings = region_mappings;
2601 (*shared_region)->client_base = client_base;
2602 (*shared_region)->ref_count = 1;
2603 (*shared_region)->next = NULL;
2604 (*shared_region)->object_chain = NULL;
2605 (*shared_region)->self = *shared_region;
2606 (*shared_region)->flags = 0;
2607 (*shared_region)->depth = 0;
55e303ae 2608 (*shared_region)->default_env_list = NULL;
1c79356b
A
2609 (*shared_region)->alternate_base = alt_base;
2610 (*shared_region)->alternate_next = alt_next;
2611 return KERN_SUCCESS;
2612}
2613
2614kern_return_t
2615shared_region_mapping_set_alt_next(
2616 shared_region_mapping_t shared_region,
2617 vm_offset_t alt_next)
2618{
2619 shared_region->alternate_next = alt_next;
2620 return KERN_SUCCESS;
2621}
2622
2623kern_return_t
2624shared_region_mapping_ref(
2625 shared_region_mapping_t shared_region)
2626{
2627 if(shared_region == NULL)
2628 return KERN_SUCCESS;
9bccf70c 2629 hw_atomic_add(&shared_region->ref_count, 1);
1c79356b
A
2630 return KERN_SUCCESS;
2631}
2632
55e303ae
A
2633__private_extern__ kern_return_t
2634shared_region_mapping_dealloc_lock(
2635 shared_region_mapping_t shared_region,
2636 int need_lock)
1c79356b
A
2637{
2638 struct shared_region_task_mappings sm_info;
9bccf70c 2639 shared_region_mapping_t next = NULL;
55e303ae 2640 int ref_count;
9bccf70c
A
2641
2642 while (shared_region) {
55e303ae
A
2643 if ((ref_count =
2644 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
9bccf70c
A
2645 shared_region_mapping_lock(shared_region);
2646
2647 sm_info.text_region = shared_region->text_region;
2648 sm_info.text_size = shared_region->text_size;
2649 sm_info.data_region = shared_region->data_region;
2650 sm_info.data_size = shared_region->data_size;
2651 sm_info.region_mappings = shared_region->region_mappings;
2652 sm_info.client_base = shared_region->client_base;
2653 sm_info.alternate_base = shared_region->alternate_base;
2654 sm_info.alternate_next = shared_region->alternate_next;
2655 sm_info.flags = shared_region->flags;
2656 sm_info.self = (vm_offset_t)shared_region;
2657
55e303ae
A
2658 if(shared_region->region_mappings) {
2659 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_lock);
2660 }
2661 if(((vm_named_entry_t)
2662 (shared_region->text_region->ip_kobject))
2663 ->backing.map->pmap) {
2664 pmap_remove(((vm_named_entry_t)
9bccf70c
A
2665 (shared_region->text_region->ip_kobject))
2666 ->backing.map->pmap,
2667 sm_info.client_base,
2668 sm_info.client_base + sm_info.text_size);
55e303ae 2669 }
9bccf70c 2670 ipc_port_release_send(shared_region->text_region);
55e303ae
A
2671 if(shared_region->data_region)
2672 ipc_port_release_send(shared_region->data_region);
9bccf70c
A
2673 if (shared_region->object_chain) {
2674 next = shared_region->object_chain->object_chain_region;
2675 kfree((vm_offset_t)shared_region->object_chain,
2676 sizeof (struct shared_region_object_chain));
2677 } else {
2678 next = NULL;
2679 }
2680 shared_region_mapping_unlock(shared_region);
2681 kfree((vm_offset_t)shared_region,
1c79356b 2682 sizeof (struct shared_region_mapping));
9bccf70c
A
2683 shared_region = next;
2684 } else {
55e303ae
A
2685 /* Stale indicates that a system region is no */
2686 /* longer in the default environment list. */
2687 if((ref_count == 1) &&
2688 (shared_region->flags & SHARED_REGION_SYSTEM)
2689 && (shared_region->flags & ~SHARED_REGION_STALE)) {
2690 remove_default_shared_region_lock(shared_region,need_lock);
2691 }
9bccf70c
A
2692 break;
2693 }
1c79356b 2694 }
1c79356b
A
2695 return KERN_SUCCESS;
2696}
2697
55e303ae
A
2698/*
2699 * Stub function; always indicates that the lock needs to be taken in the
2700 * call to lsf_remove_regions_mappings_lock().
2701 */
2702kern_return_t
2703shared_region_mapping_dealloc(
2704 shared_region_mapping_t shared_region)
2705{
2706 return shared_region_mapping_dealloc_lock(shared_region, 1);
2707}
2708
2709ppnum_t
1c79356b
A
2710vm_map_get_phys_page(
2711 vm_map_t map,
2712 vm_offset_t offset)
2713{
2714 vm_map_entry_t entry;
2715 int ops;
2716 int flags;
55e303ae 2717 ppnum_t phys_page = 0;
1c79356b
A
2718 vm_object_t object;
2719
2720 vm_map_lock(map);
2721 while (vm_map_lookup_entry(map, offset, &entry)) {
2722
2723 if (entry->object.vm_object == VM_OBJECT_NULL) {
2724 vm_map_unlock(map);
2725 return (vm_offset_t) 0;
2726 }
2727 if (entry->is_sub_map) {
2728 vm_map_t old_map;
2729 vm_map_lock(entry->object.sub_map);
2730 old_map = map;
2731 map = entry->object.sub_map;
2732 offset = entry->offset + (offset - entry->vme_start);
2733 vm_map_unlock(old_map);
2734 continue;
2735 }
9bccf70c
A
2736 if (entry->object.vm_object->phys_contiguous) {
2737 /* These are not standard pageable memory mappings */
2738 /* If they are not present in the object they will */
2739 /* have to be picked up from the pager through the */
2740 /* fault mechanism. */
2741 if(entry->object.vm_object->shadow_offset == 0) {
2742 /* need to call vm_fault */
2743 vm_map_unlock(map);
2744 vm_fault(map, offset, VM_PROT_NONE,
2745 FALSE, THREAD_UNINT, NULL, 0);
2746 vm_map_lock(map);
2747 continue;
2748 }
2749 offset = entry->offset + (offset - entry->vme_start);
55e303ae
A
2750 phys_page = (ppnum_t)
2751 ((entry->object.vm_object->shadow_offset
2752 + offset) >> 12);
9bccf70c
A
2753 break;
2754
2755 }
1c79356b
A
2756 offset = entry->offset + (offset - entry->vme_start);
2757 object = entry->object.vm_object;
2758 vm_object_lock(object);
2759 while (TRUE) {
2760 vm_page_t dst_page = vm_page_lookup(object,offset);
2761 if(dst_page == VM_PAGE_NULL) {
2762 if(object->shadow) {
2763 vm_object_t old_object;
2764 vm_object_lock(object->shadow);
2765 old_object = object;
2766 offset = offset + object->shadow_offset;
2767 object = object->shadow;
2768 vm_object_unlock(old_object);
2769 } else {
2770 vm_object_unlock(object);
2771 break;
2772 }
2773 } else {
55e303ae 2774 phys_page = (ppnum_t)(dst_page->phys_page);
1c79356b
A
2775 vm_object_unlock(object);
2776 break;
2777 }
2778 }
2779 break;
2780
2781 }
2782
2783 vm_map_unlock(map);
55e303ae
A
2784 return phys_page;
2785}
2786
2787
2788
2789kern_return_t
2790kernel_object_iopl_request(
2791 vm_named_entry_t named_entry,
2792 memory_object_offset_t offset,
2793 vm_size_t *upl_size,
2794 upl_t *upl_ptr,
2795 upl_page_info_array_t user_page_list,
2796 unsigned int *page_list_count,
2797 int *flags)
2798{
2799 vm_object_t object;
2800 kern_return_t ret;
2801
2802 int caller_flags;
2803
2804 caller_flags = *flags;
2805
2806 /* a few checks to make sure user is obeying rules */
2807 if(*upl_size == 0) {
2808 if(offset >= named_entry->size)
2809 return(KERN_INVALID_RIGHT);
2810 *upl_size = named_entry->size - offset;
2811 }
2812 if(caller_flags & UPL_COPYOUT_FROM) {
2813 if((named_entry->protection & VM_PROT_READ)
2814 != VM_PROT_READ) {
2815 return(KERN_INVALID_RIGHT);
2816 }
2817 } else {
2818 if((named_entry->protection &
2819 (VM_PROT_READ | VM_PROT_WRITE))
2820 != (VM_PROT_READ | VM_PROT_WRITE)) {
2821 return(KERN_INVALID_RIGHT);
2822 }
2823 }
2824 if(named_entry->size < (offset + *upl_size))
2825 return(KERN_INVALID_ARGUMENT);
2826
2827 /* the callers parameter offset is defined to be the */
2828 /* offset from beginning of named entry offset in object */
2829 offset = offset + named_entry->offset;
2830
2831 if(named_entry->is_sub_map)
2832 return (KERN_INVALID_ARGUMENT);
2833
2834 named_entry_lock(named_entry);
2835
2836 if(named_entry->object) {
2837 /* This is the case where we are going to map */
2838 /* an already mapped object. If the object is */
2839 /* not ready it is internal. An external */
2840 /* object cannot be mapped until it is ready */
2841 /* we can therefore avoid the ready check */
2842 /* in this case. */
2843 vm_object_reference(named_entry->object);
2844 object = named_entry->object;
2845 named_entry_unlock(named_entry);
2846 } else {
2847 object = vm_object_enter(named_entry->backing.pager,
2848 named_entry->offset + named_entry->size,
2849 named_entry->internal,
2850 FALSE,
2851 FALSE);
2852 if (object == VM_OBJECT_NULL) {
2853 named_entry_unlock(named_entry);
2854 return(KERN_INVALID_OBJECT);
2855 }
2856 vm_object_lock(object);
2857
2858 /* create an extra reference for the named entry */
2859 vm_object_reference_locked(object);
2860 named_entry->object = object;
2861 named_entry_unlock(named_entry);
2862
2863 /* wait for object (if any) to be ready */
2864 while (!object->pager_ready) {
2865 vm_object_wait(object,
2866 VM_OBJECT_EVENT_PAGER_READY,
2867 THREAD_UNINT);
2868 vm_object_lock(object);
2869 }
2870 vm_object_unlock(object);
2871 }
2872
2873 if (!object->private) {
2874 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2875 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2876 if (object->phys_contiguous) {
2877 *flags = UPL_PHYS_CONTIG;
2878 } else {
2879 *flags = 0;
2880 }
2881 } else {
2882 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2883 }
2884
2885 ret = vm_object_iopl_request(object,
2886 offset,
2887 *upl_size,
2888 upl_ptr,
2889 user_page_list,
2890 page_list_count,
2891 caller_flags);
2892 vm_object_deallocate(object);
2893 return ret;
1c79356b 2894}
55e303ae 2895
1c79356b 2896#endif /* VM_CPM */