]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
55e303ae 2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: vm/vm_user.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 *
59 * User-exported virtual memory functions.
60 */
1c79356b
A
61
62#include <vm_cpm.h>
63#include <mach/boolean.h>
64#include <mach/kern_return.h>
65#include <mach/mach_types.h> /* to get vm_address_t */
66#include <mach/memory_object.h>
67#include <mach/std_types.h> /* to get pointer_t */
68#include <mach/vm_attributes.h>
69#include <mach/vm_param.h>
70#include <mach/vm_statistics.h>
71#include <mach/vm_map_server.h>
72#include <mach/mach_syscalls.h>
9bccf70c 73
1c79356b 74#include <mach/shared_memory_server.h>
9bccf70c 75#include <vm/vm_shared_memory_server.h>
1c79356b
A
76
77#include <kern/host.h>
78#include <kern/task.h>
79#include <kern/misc_protos.h>
80#include <vm/vm_map.h>
81#include <vm/vm_object.h>
82#include <vm/vm_page.h>
83#include <vm/memory_object.h>
84#include <vm/vm_pageout.h>
85
55e303ae
A
86__private_extern__ load_struct_t *
87lsf_remove_regions_mappings_lock(
88 shared_region_mapping_t region,
89 shared_region_task_mappings_t sm_info,
90 int need_lock);
1c79356b
A
91
92
93vm_size_t upl_offset_to_pagelist = 0;
94
95#if VM_CPM
96#include <vm/cpm.h>
97#endif /* VM_CPM */
98
99ipc_port_t dynamic_pager_control_port=NULL;
100
101/*
102 * vm_allocate allocates "zero fill" memory in the specfied
103 * map.
104 */
105kern_return_t
106vm_allocate(
107 register vm_map_t map,
108 register vm_offset_t *addr,
109 register vm_size_t size,
110 int flags)
111{
112 kern_return_t result;
113 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
114
115 if (map == VM_MAP_NULL)
116 return(KERN_INVALID_ARGUMENT);
117 if (size == 0) {
118 *addr = 0;
119 return(KERN_SUCCESS);
120 }
121
122 if (anywhere)
123 *addr = vm_map_min(map);
124 else
55e303ae
A
125 *addr = trunc_page_32(*addr);
126 size = round_page_32(size);
1c79356b
A
127 if (size == 0) {
128 return(KERN_INVALID_ARGUMENT);
129 }
130
131 result = vm_map_enter(
132 map,
133 addr,
134 size,
135 (vm_offset_t)0,
136 flags,
137 VM_OBJECT_NULL,
138 (vm_object_offset_t)0,
139 FALSE,
140 VM_PROT_DEFAULT,
141 VM_PROT_ALL,
142 VM_INHERIT_DEFAULT);
143
144 return(result);
145}
146
147/*
148 * vm_deallocate deallocates the specified range of addresses in the
149 * specified address map.
150 */
151kern_return_t
152vm_deallocate(
153 register vm_map_t map,
154 vm_offset_t start,
155 vm_size_t size)
156{
157 if (map == VM_MAP_NULL)
158 return(KERN_INVALID_ARGUMENT);
159
160 if (size == (vm_offset_t) 0)
161 return(KERN_SUCCESS);
162
55e303ae
A
163 return(vm_map_remove(map, trunc_page_32(start),
164 round_page_32(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
165}
166
167/*
168 * vm_inherit sets the inheritance of the specified range in the
169 * specified map.
170 */
171kern_return_t
172vm_inherit(
173 register vm_map_t map,
174 vm_offset_t start,
175 vm_size_t size,
176 vm_inherit_t new_inheritance)
177{
178 if (map == VM_MAP_NULL)
179 return(KERN_INVALID_ARGUMENT);
180
181 if (new_inheritance > VM_INHERIT_LAST_VALID)
182 return(KERN_INVALID_ARGUMENT);
183
184 return(vm_map_inherit(map,
55e303ae
A
185 trunc_page_32(start),
186 round_page_32(start+size),
1c79356b
A
187 new_inheritance));
188}
189
190/*
191 * vm_protect sets the protection of the specified range in the
192 * specified map.
193 */
194
195kern_return_t
196vm_protect(
197 register vm_map_t map,
198 vm_offset_t start,
199 vm_size_t size,
200 boolean_t set_maximum,
201 vm_prot_t new_protection)
202{
203 if ((map == VM_MAP_NULL) ||
204 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
205 return(KERN_INVALID_ARGUMENT);
206
207 return(vm_map_protect(map,
55e303ae
A
208 trunc_page_32(start),
209 round_page_32(start+size),
1c79356b
A
210 new_protection,
211 set_maximum));
212}
213
214/*
215 * Handle machine-specific attributes for a mapping, such
216 * as cachability, migrability, etc.
217 */
218kern_return_t
219vm_machine_attribute(
220 vm_map_t map,
221 vm_address_t address,
222 vm_size_t size,
223 vm_machine_attribute_t attribute,
224 vm_machine_attribute_val_t* value) /* IN/OUT */
225{
226 if (map == VM_MAP_NULL)
227 return(KERN_INVALID_ARGUMENT);
228
229 return vm_map_machine_attribute(map, address, size, attribute, value);
230}
231
232kern_return_t
233vm_read(
234 vm_map_t map,
235 vm_address_t address,
236 vm_size_t size,
237 pointer_t *data,
238 mach_msg_type_number_t *data_size)
239{
240 kern_return_t error;
241 vm_map_copy_t ipc_address;
242
243 if (map == VM_MAP_NULL)
244 return(KERN_INVALID_ARGUMENT);
245
246 if ((error = vm_map_copyin(map,
247 address,
248 size,
249 FALSE, /* src_destroy */
250 &ipc_address)) == KERN_SUCCESS) {
251 *data = (pointer_t) ipc_address;
252 *data_size = size;
253 }
254 return(error);
255}
256
257kern_return_t
258vm_read_list(
259 vm_map_t map,
260 vm_read_entry_t data_list,
261 mach_msg_type_number_t count)
262{
263 mach_msg_type_number_t i;
264 kern_return_t error;
265 vm_map_copy_t ipc_address;
266
267 if (map == VM_MAP_NULL)
268 return(KERN_INVALID_ARGUMENT);
269
270 for(i=0; i<count; i++) {
271 error = vm_map_copyin(map,
272 data_list[i].address,
273 data_list[i].size,
274 FALSE, /* src_destroy */
275 &ipc_address);
276 if(error != KERN_SUCCESS) {
277 data_list[i].address = (vm_address_t)0;
278 data_list[i].size = (vm_size_t)0;
279 break;
280 }
281 if(data_list[i].size != 0) {
282 error = vm_map_copyout(current_task()->map,
283 &(data_list[i].address),
284 (vm_map_copy_t) ipc_address);
285 if(error != KERN_SUCCESS) {
286 data_list[i].address = (vm_address_t)0;
287 data_list[i].size = (vm_size_t)0;
288 break;
289 }
290 }
291 }
292 return(error);
293}
294
295/*
296 * This routine reads from the specified map and overwrites part of the current
297 * activation's map. In making an assumption that the current thread is local,
298 * it is no longer cluster-safe without a fully supportive local proxy thread/
299 * task (but we don't support cluster's anymore so this is moot).
300 */
301
302#define VM_OVERWRITE_SMALL 512
303
304kern_return_t
305vm_read_overwrite(
306 vm_map_t map,
307 vm_address_t address,
308 vm_size_t size,
309 vm_address_t data,
310 vm_size_t *data_size)
311{
312 struct {
313 long align;
314 char buf[VM_OVERWRITE_SMALL];
315 } inbuf;
316 vm_map_t oldmap;
317 kern_return_t error = KERN_SUCCESS;
318 vm_map_copy_t copy;
319
320 if (map == VM_MAP_NULL)
321 return(KERN_INVALID_ARGUMENT);
322
323 if (size <= VM_OVERWRITE_SMALL) {
324 if(vm_map_read_user(map, (vm_offset_t)address,
325 (vm_offset_t)&inbuf, size)) {
326 error = KERN_INVALID_ADDRESS;
327 } else {
328 if(vm_map_write_user(current_map(),
329 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
330 error = KERN_INVALID_ADDRESS;
331 }
332 }
333 else {
334 if ((error = vm_map_copyin(map,
335 address,
336 size,
337 FALSE, /* src_destroy */
338 &copy)) == KERN_SUCCESS) {
339 if ((error = vm_map_copy_overwrite(
340 current_act()->map,
341 data,
342 copy,
343 FALSE)) == KERN_SUCCESS) {
344 }
345 else {
346 vm_map_copy_discard(copy);
347 }
348 }
349 }
350 *data_size = size;
351 return(error);
352}
353
354
355
356
357/*ARGSUSED*/
358kern_return_t
359vm_write(
360 vm_map_t map,
361 vm_address_t address,
362 vm_offset_t data,
363 mach_msg_type_number_t size)
364{
365 if (map == VM_MAP_NULL)
366 return KERN_INVALID_ARGUMENT;
367
368 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
369 FALSE /* interruptible XXX */);
370}
371
372kern_return_t
373vm_copy(
374 vm_map_t map,
375 vm_address_t source_address,
376 vm_size_t size,
377 vm_address_t dest_address)
378{
379 vm_map_copy_t copy;
380 kern_return_t kr;
381
382 if (map == VM_MAP_NULL)
383 return KERN_INVALID_ARGUMENT;
384
385 kr = vm_map_copyin(map, source_address, size,
386 FALSE, &copy);
387 if (kr != KERN_SUCCESS)
388 return kr;
389
390 kr = vm_map_copy_overwrite(map, dest_address, copy,
391 FALSE /* interruptible XXX */);
392 if (kr != KERN_SUCCESS) {
393 vm_map_copy_discard(copy);
394 return kr;
395 }
396
397 return KERN_SUCCESS;
398}
399
400/*
401 * Routine: vm_map
402 */
403kern_return_t
404vm_map_64(
405 vm_map_t target_map,
406 vm_offset_t *address,
407 vm_size_t initial_size,
408 vm_offset_t mask,
409 int flags,
410 ipc_port_t port,
411 vm_object_offset_t offset,
412 boolean_t copy,
413 vm_prot_t cur_protection,
414 vm_prot_t max_protection,
415 vm_inherit_t inheritance)
416{
417 register
418 vm_object_t object;
419 vm_prot_t prot;
420 vm_object_size_t size = (vm_object_size_t)initial_size;
421 kern_return_t result;
422
423 /*
424 * Check arguments for validity
425 */
426 if ((target_map == VM_MAP_NULL) ||
427 (cur_protection & ~VM_PROT_ALL) ||
428 (max_protection & ~VM_PROT_ALL) ||
429 (inheritance > VM_INHERIT_LAST_VALID) ||
430 size == 0)
431 return(KERN_INVALID_ARGUMENT);
432
433 /*
434 * Find the vm object (if any) corresponding to this port.
435 */
436 if (!IP_VALID(port)) {
437 object = VM_OBJECT_NULL;
438 offset = 0;
439 copy = FALSE;
440 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
441 vm_named_entry_t named_entry;
442
443 named_entry = (vm_named_entry_t)port->ip_kobject;
444 /* a few checks to make sure user is obeying rules */
445 if(size == 0) {
446 if(offset >= named_entry->size)
447 return(KERN_INVALID_RIGHT);
448 size = named_entry->size - offset;
449 }
450 if((named_entry->protection & max_protection) != max_protection)
451 return(KERN_INVALID_RIGHT);
452 if((named_entry->protection & cur_protection) != cur_protection)
453 return(KERN_INVALID_RIGHT);
454 if(named_entry->size < (offset + size))
455 return(KERN_INVALID_ARGUMENT);
456
457 /* the callers parameter offset is defined to be the */
458 /* offset from beginning of named entry offset in object */
459 offset = offset + named_entry->offset;
460
461 named_entry_lock(named_entry);
462 if(named_entry->is_sub_map) {
463 vm_map_entry_t map_entry;
464
465 named_entry_unlock(named_entry);
55e303ae
A
466 *address = trunc_page_32(*address);
467 size = round_page_64(size);
1c79356b
A
468 vm_object_reference(vm_submap_object);
469 if ((result = vm_map_enter(target_map,
470 address, size, mask, flags,
471 vm_submap_object, 0,
472 FALSE,
473 cur_protection, max_protection, inheritance
474 )) != KERN_SUCCESS) {
475 vm_object_deallocate(vm_submap_object);
476 } else {
477 char alias;
478
479 VM_GET_FLAGS_ALIAS(flags, alias);
480 if ((alias == VM_MEMORY_SHARED_PMAP) &&
481 !copy) {
482 vm_map_submap(target_map, *address,
483 (*address) + size,
484 named_entry->backing.map,
485 (vm_offset_t)offset, TRUE);
486 } else {
487 vm_map_submap(target_map, *address,
488 (*address) + size,
489 named_entry->backing.map,
490 (vm_offset_t)offset, FALSE);
491 }
492 if(copy) {
493 if(vm_map_lookup_entry(
494 target_map, *address, &map_entry)) {
495 map_entry->needs_copy = TRUE;
496 }
497 }
498 }
499 return(result);
500
501 } else if(named_entry->object) {
502 /* This is the case where we are going to map */
503 /* an already mapped object. If the object is */
504 /* not ready it is internal. An external */
505 /* object cannot be mapped until it is ready */
506 /* we can therefore avoid the ready check */
507 /* in this case. */
508 named_entry_unlock(named_entry);
509 vm_object_reference(named_entry->object);
510 object = named_entry->object;
511 } else {
55e303ae
A
512 unsigned int access;
513 vm_prot_t protections;
514 unsigned int wimg_mode;
515 boolean_t cache_attr;
516
517 protections = named_entry->protection
518 & VM_PROT_ALL;
519 access = GET_MAP_MEM(named_entry->protection);
520
521 object = vm_object_enter(
522 named_entry->backing.pager,
523 named_entry->size,
524 named_entry->internal,
525 FALSE,
526 FALSE);
1c79356b
A
527 if (object == VM_OBJECT_NULL) {
528 named_entry_unlock(named_entry);
529 return(KERN_INVALID_OBJECT);
530 }
55e303ae
A
531
532 vm_object_lock(object);
533
534 /* create an extra ref for the named entry */
535 vm_object_reference_locked(object);
1c79356b
A
536 named_entry->object = object;
537 named_entry_unlock(named_entry);
55e303ae
A
538
539 wimg_mode = object->wimg_bits;
540 if(access == MAP_MEM_IO) {
541 wimg_mode = VM_WIMG_IO;
542 } else if (access == MAP_MEM_COPYBACK) {
543 wimg_mode = VM_WIMG_USE_DEFAULT;
544 } else if (access == MAP_MEM_WTHRU) {
545 wimg_mode = VM_WIMG_WTHRU;
546 } else if (access == MAP_MEM_WCOMB) {
547 wimg_mode = VM_WIMG_WCOMB;
548 }
549 if ((wimg_mode == VM_WIMG_IO)
550 || (wimg_mode == VM_WIMG_WCOMB))
551 cache_attr = TRUE;
552 else
553 cache_attr = FALSE;
554
555 if (named_entry->backing.pager) {
556 /* wait for object (if any) to be ready */
1c79356b
A
557 while (!object->pager_ready) {
558 vm_object_wait(object,
55e303ae
A
559 VM_OBJECT_EVENT_PAGER_READY,
560 THREAD_UNINT);
1c79356b
A
561 vm_object_lock(object);
562 }
1c79356b 563 }
55e303ae
A
564 if(object->wimg_bits != wimg_mode) {
565 vm_page_t p;
566
567 vm_object_paging_wait(object, THREAD_UNINT);
568
569 object->wimg_bits = wimg_mode;
570 queue_iterate(&object->memq, p, vm_page_t, listq) {
571 if (!p->fictitious) {
572 pmap_page_protect(
573 p->phys_page,
574 VM_PROT_NONE);
575 if(cache_attr)
576 pmap_sync_caches_phys(
577 p->phys_page);
578 }
579 }
580 }
581 object->true_share = TRUE;
582 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
583 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
584 vm_object_unlock(object);
1c79356b 585 }
0b4e3aa0
A
586 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
587 /*
588 * JMM - This is temporary until we unify named entries
589 * and raw memory objects.
590 *
591 * Detected fake ip_kotype for a memory object. In
592 * this case, the port isn't really a port at all, but
593 * instead is just a raw memory object.
594 */
595
596 if ((object = vm_object_enter((memory_object_t)port,
597 size, FALSE, FALSE, FALSE))
1c79356b
A
598 == VM_OBJECT_NULL)
599 return(KERN_INVALID_OBJECT);
600
601 /* wait for object (if any) to be ready */
602 if (object != VM_OBJECT_NULL) {
9bccf70c
A
603 if(object == kernel_object) {
604 printf("Warning: Attempt to map kernel object"
605 " by a non-private kernel entity\n");
606 return(KERN_INVALID_OBJECT);
607 }
1c79356b
A
608 vm_object_lock(object);
609 while (!object->pager_ready) {
610 vm_object_wait(object,
611 VM_OBJECT_EVENT_PAGER_READY,
612 THREAD_UNINT);
613 vm_object_lock(object);
614 }
615 vm_object_unlock(object);
616 }
0b4e3aa0
A
617 } else {
618 return (KERN_INVALID_OBJECT);
1c79356b
A
619 }
620
55e303ae
A
621 *address = trunc_page_32(*address);
622 size = round_page_64(size);
1c79356b
A
623
624 /*
625 * Perform the copy if requested
626 */
627
628 if (copy) {
629 vm_object_t new_object;
630 vm_object_offset_t new_offset;
631
632 result = vm_object_copy_strategically(object, offset, size,
633 &new_object, &new_offset,
634 &copy);
635
636
637 if (result == KERN_MEMORY_RESTART_COPY) {
638 boolean_t success;
639 boolean_t src_needs_copy;
640
641 /*
642 * XXX
643 * We currently ignore src_needs_copy.
644 * This really is the issue of how to make
645 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
646 * non-kernel users to use. Solution forthcoming.
647 * In the meantime, since we don't allow non-kernel
648 * memory managers to specify symmetric copy,
649 * we won't run into problems here.
650 */
651 new_object = object;
652 new_offset = offset;
653 success = vm_object_copy_quickly(&new_object,
654 new_offset, size,
655 &src_needs_copy,
656 &copy);
657 assert(success);
658 result = KERN_SUCCESS;
659 }
660 /*
661 * Throw away the reference to the
662 * original object, as it won't be mapped.
663 */
664
665 vm_object_deallocate(object);
666
667 if (result != KERN_SUCCESS)
668 return (result);
669
670 object = new_object;
671 offset = new_offset;
672 }
673
674 if ((result = vm_map_enter(target_map,
675 address, size, mask, flags,
676 object, offset,
677 copy,
678 cur_protection, max_protection, inheritance
679 )) != KERN_SUCCESS)
680 vm_object_deallocate(object);
681 return(result);
682}
683
684/* temporary, until world build */
55e303ae 685kern_return_t
1c79356b
A
686vm_map(
687 vm_map_t target_map,
688 vm_offset_t *address,
689 vm_size_t size,
690 vm_offset_t mask,
691 int flags,
692 ipc_port_t port,
693 vm_offset_t offset,
694 boolean_t copy,
695 vm_prot_t cur_protection,
696 vm_prot_t max_protection,
697 vm_inherit_t inheritance)
698{
55e303ae 699 return vm_map_64(target_map, address, size, mask, flags,
1c79356b
A
700 port, (vm_object_offset_t)offset, copy,
701 cur_protection, max_protection, inheritance);
702}
703
704
705/*
706 * NOTE: this routine (and this file) will no longer require mach_host_server.h
707 * when vm_wire is changed to use ledgers.
708 */
709#include <mach/mach_host_server.h>
710/*
711 * Specify that the range of the virtual address space
712 * of the target task must not cause page faults for
713 * the indicated accesses.
714 *
715 * [ To unwire the pages, specify VM_PROT_NONE. ]
716 */
717kern_return_t
718vm_wire(
719 host_priv_t host_priv,
720 register vm_map_t map,
721 vm_offset_t start,
722 vm_size_t size,
723 vm_prot_t access)
724{
725 kern_return_t rc;
726
727 if (host_priv == HOST_PRIV_NULL)
728 return KERN_INVALID_HOST;
729
730 assert(host_priv == &realhost);
731
732 if (map == VM_MAP_NULL)
733 return KERN_INVALID_TASK;
734
735 if (access & ~VM_PROT_ALL)
736 return KERN_INVALID_ARGUMENT;
737
738 if (access != VM_PROT_NONE) {
55e303ae
A
739 rc = vm_map_wire(map, trunc_page_32(start),
740 round_page_32(start+size), access, TRUE);
1c79356b 741 } else {
55e303ae
A
742 rc = vm_map_unwire(map, trunc_page_32(start),
743 round_page_32(start+size), TRUE);
1c79356b
A
744 }
745 return rc;
746}
747
748/*
749 * vm_msync
750 *
751 * Synchronises the memory range specified with its backing store
752 * image by either flushing or cleaning the contents to the appropriate
753 * memory manager engaging in a memory object synchronize dialog with
754 * the manager. The client doesn't return until the manager issues
755 * m_o_s_completed message. MIG Magically converts user task parameter
756 * to the task's address map.
757 *
758 * interpretation of sync_flags
759 * VM_SYNC_INVALIDATE - discard pages, only return precious
760 * pages to manager.
761 *
762 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
763 * - discard pages, write dirty or precious
764 * pages back to memory manager.
765 *
766 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
767 * - write dirty or precious pages back to
768 * the memory manager.
769 *
770 * NOTE
771 * The memory object attributes have not yet been implemented, this
772 * function will have to deal with the invalidate attribute
773 *
774 * RETURNS
775 * KERN_INVALID_TASK Bad task parameter
776 * KERN_INVALID_ARGUMENT both sync and async were specified.
777 * KERN_SUCCESS The usual.
778 */
779
780kern_return_t
781vm_msync(
782 vm_map_t map,
783 vm_address_t address,
784 vm_size_t size,
785 vm_sync_t sync_flags)
786{
787 msync_req_t msr;
788 msync_req_t new_msr;
789 queue_chain_t req_q; /* queue of requests for this msync */
790 vm_map_entry_t entry;
791 vm_size_t amount_left;
792 vm_object_offset_t offset;
793 boolean_t do_sync_req;
794 boolean_t modifiable;
795
796
797 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
798 (sync_flags & VM_SYNC_SYNCHRONOUS))
799 return(KERN_INVALID_ARGUMENT);
800
801 /*
802 * align address and size on page boundaries
803 */
55e303ae
A
804 size = round_page_32(address + size) - trunc_page_32(address);
805 address = trunc_page_32(address);
1c79356b
A
806
807 if (map == VM_MAP_NULL)
808 return(KERN_INVALID_TASK);
809
810 if (size == 0)
811 return(KERN_SUCCESS);
812
813 queue_init(&req_q);
814 amount_left = size;
815
816 while (amount_left > 0) {
817 vm_size_t flush_size;
818 vm_object_t object;
819
820 vm_map_lock(map);
821 if (!vm_map_lookup_entry(map, address, &entry)) {
822 vm_size_t skip;
823
824 /*
825 * hole in the address map.
826 */
827
828 /*
829 * Check for empty map.
830 */
831 if (entry == vm_map_to_entry(map) &&
832 entry->vme_next == entry) {
833 vm_map_unlock(map);
834 break;
835 }
836 /*
837 * Check that we don't wrap and that
838 * we have at least one real map entry.
839 */
840 if ((map->hdr.nentries == 0) ||
841 (entry->vme_next->vme_start < address)) {
842 vm_map_unlock(map);
843 break;
844 }
845 /*
846 * Move up to the next entry if needed
847 */
848 skip = (entry->vme_next->vme_start - address);
849 if (skip >= amount_left)
850 amount_left = 0;
851 else
852 amount_left -= skip;
853 address = entry->vme_next->vme_start;
854 vm_map_unlock(map);
855 continue;
856 }
857
858 offset = address - entry->vme_start;
859
860 /*
861 * do we have more to flush than is contained in this
862 * entry ?
863 */
864 if (amount_left + entry->vme_start + offset > entry->vme_end) {
865 flush_size = entry->vme_end -
866 (entry->vme_start + offset);
867 } else {
868 flush_size = amount_left;
869 }
870 amount_left -= flush_size;
871 address += flush_size;
872
873 if (entry->is_sub_map == TRUE) {
874 vm_map_t local_map;
875 vm_offset_t local_offset;
876
877 local_map = entry->object.sub_map;
878 local_offset = entry->offset;
879 vm_map_unlock(map);
880 vm_msync(
881 local_map,
882 local_offset,
883 flush_size,
884 sync_flags);
885 continue;
886 }
887 object = entry->object.vm_object;
888
889 /*
890 * We can't sync this object if the object has not been
891 * created yet
892 */
893 if (object == VM_OBJECT_NULL) {
894 vm_map_unlock(map);
895 continue;
896 }
897 offset += entry->offset;
898 modifiable = (entry->protection & VM_PROT_WRITE)
899 != VM_PROT_NONE;
900
901 vm_object_lock(object);
902
903 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
904 boolean_t kill_pages = 0;
905
906 if (sync_flags & VM_SYNC_KILLPAGES) {
907 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
908 kill_pages = 1;
909 else
910 kill_pages = -1;
911 }
912 if (kill_pages != -1)
0b4e3aa0 913 vm_object_deactivate_pages(object, offset,
1c79356b
A
914 (vm_object_size_t)flush_size, kill_pages);
915 vm_object_unlock(object);
916 vm_map_unlock(map);
917 continue;
918 }
919 /*
920 * We can't sync this object if there isn't a pager.
921 * Don't bother to sync internal objects, since there can't
922 * be any "permanent" storage for these objects anyway.
923 */
0b4e3aa0
A
924 if ((object->pager == MEMORY_OBJECT_NULL) ||
925 (object->internal) || (object->private)) {
1c79356b
A
926 vm_object_unlock(object);
927 vm_map_unlock(map);
928 continue;
929 }
930 /*
931 * keep reference on the object until syncing is done
932 */
933 assert(object->ref_count > 0);
934 object->ref_count++;
935 vm_object_res_reference(object);
936 vm_object_unlock(object);
937
938 vm_map_unlock(map);
939
0b4e3aa0 940 do_sync_req = vm_object_sync(object,
1c79356b
A
941 offset,
942 flush_size,
943 sync_flags & VM_SYNC_INVALIDATE,
944 (modifiable &&
945 (sync_flags & VM_SYNC_SYNCHRONOUS ||
946 sync_flags & VM_SYNC_ASYNCHRONOUS)));
947
948 /*
949 * only send a m_o_s if we returned pages or if the entry
950 * is writable (ie dirty pages may have already been sent back)
951 */
952 if (!do_sync_req && !modifiable) {
953 vm_object_deallocate(object);
954 continue;
955 }
956 msync_req_alloc(new_msr);
957
958 vm_object_lock(object);
959 offset += object->paging_offset;
960
961 new_msr->offset = offset;
962 new_msr->length = flush_size;
963 new_msr->object = object;
964 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
965re_iterate:
966 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
967 /*
968 * need to check for overlapping entry, if found, wait
969 * on overlapping msr to be done, then reiterate
970 */
971 msr_lock(msr);
972 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
973 ((offset >= msr->offset &&
974 offset < (msr->offset + msr->length)) ||
975 (msr->offset >= offset &&
976 msr->offset < (offset + flush_size))))
977 {
978 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
979 msr_unlock(msr);
980 vm_object_unlock(object);
981 thread_block((void (*)(void))0);
982 vm_object_lock(object);
983 goto re_iterate;
984 }
985 msr_unlock(msr);
986 }/* queue_iterate */
987
988 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
989 vm_object_unlock(object);
990
991 queue_enter(&req_q, new_msr, msync_req_t, req_q);
992
1c79356b
A
993 (void) memory_object_synchronize(
994 object->pager,
1c79356b
A
995 offset,
996 flush_size,
997 sync_flags);
1c79356b
A
998 }/* while */
999
1000 /*
1001 * wait for memory_object_sychronize_completed messages from pager(s)
1002 */
1003
1004 while (!queue_empty(&req_q)) {
1005 msr = (msync_req_t)queue_first(&req_q);
1006 msr_lock(msr);
1007 while(msr->flag != VM_MSYNC_DONE) {
1008 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
1009 msr_unlock(msr);
1010 thread_block((void (*)(void))0);
1011 msr_lock(msr);
1012 }/* while */
1013 queue_remove(&req_q, msr, msync_req_t, req_q);
1014 msr_unlock(msr);
1015 vm_object_deallocate(msr->object);
1016 msync_req_free(msr);
1017 }/* queue_iterate */
1018
1019 return(KERN_SUCCESS);
1020}/* vm_msync */
1021
1022
1023/*
1024 * task_wire
1025 *
1026 * Set or clear the map's wiring_required flag. This flag, if set,
1027 * will cause all future virtual memory allocation to allocate
1028 * user wired memory. Unwiring pages wired down as a result of
1029 * this routine is done with the vm_wire interface.
1030 */
1031kern_return_t
1032task_wire(
1033 vm_map_t map,
1034 boolean_t must_wire)
1035{
1036 if (map == VM_MAP_NULL)
1037 return(KERN_INVALID_ARGUMENT);
1038
1039 if (must_wire)
1040 map->wiring_required = TRUE;
1041 else
1042 map->wiring_required = FALSE;
1043
1044 return(KERN_SUCCESS);
1045}
1046
1047/*
1048 * vm_behavior_set sets the paging behavior attribute for the
1049 * specified range in the specified map. This routine will fail
1050 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
1051 * is not a valid allocated or reserved memory region.
1052 */
1053kern_return_t
1054vm_behavior_set(
1055 vm_map_t map,
1056 vm_offset_t start,
1057 vm_size_t size,
1058 vm_behavior_t new_behavior)
1059{
1060 if (map == VM_MAP_NULL)
1061 return(KERN_INVALID_ARGUMENT);
1062
55e303ae
A
1063 return(vm_map_behavior_set(map, trunc_page_32(start),
1064 round_page_32(start+size), new_behavior));
1c79356b
A
1065}
1066
1067#if VM_CPM
1068/*
1069 * Control whether the kernel will permit use of
1070 * vm_allocate_cpm at all.
1071 */
1072unsigned int vm_allocate_cpm_enabled = 1;
1073
1074/*
1075 * Ordinarily, the right to allocate CPM is restricted
1076 * to privileged applications (those that can gain access
1077 * to the host port). Set this variable to zero if you
1078 * want to let any application allocate CPM.
1079 */
1080unsigned int vm_allocate_cpm_privileged = 0;
1081
1082/*
1083 * Allocate memory in the specified map, with the caveat that
1084 * the memory is physically contiguous. This call may fail
1085 * if the system can't find sufficient contiguous memory.
1086 * This call may cause or lead to heart-stopping amounts of
1087 * paging activity.
1088 *
1089 * Memory obtained from this call should be freed in the
1090 * normal way, viz., via vm_deallocate.
1091 */
1092kern_return_t
1093vm_allocate_cpm(
1094 host_priv_t host_priv,
1095 register vm_map_t map,
1096 register vm_offset_t *addr,
1097 register vm_size_t size,
1098 int flags)
1099{
1100 vm_object_t cpm_obj;
1101 pmap_t pmap;
1102 vm_page_t m, pages;
1103 kern_return_t kr;
1104 vm_offset_t va, start, end, offset;
1105#if MACH_ASSERT
1106 extern vm_offset_t avail_start, avail_end;
1107 vm_offset_t prev_addr;
1108#endif /* MACH_ASSERT */
1109
1110 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1111
1112 if (!vm_allocate_cpm_enabled)
1113 return KERN_FAILURE;
1114
1115 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1116 return KERN_INVALID_HOST;
1117
1118 if (map == VM_MAP_NULL)
1119 return KERN_INVALID_ARGUMENT;
1120
1121 assert(host_priv == &realhost);
1122
1123 if (size == 0) {
1124 *addr = 0;
1125 return KERN_SUCCESS;
1126 }
1127
1128 if (anywhere)
1129 *addr = vm_map_min(map);
1130 else
55e303ae
A
1131 *addr = trunc_page_32(*addr);
1132 size = round_page_32(size);
1c79356b
A
1133
1134 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1135 return kr;
1136
1137 cpm_obj = vm_object_allocate(size);
1138 assert(cpm_obj != VM_OBJECT_NULL);
1139 assert(cpm_obj->internal);
1140 assert(cpm_obj->size == size);
1141 assert(cpm_obj->can_persist == FALSE);
1142 assert(cpm_obj->pager_created == FALSE);
1143 assert(cpm_obj->pageout == FALSE);
1144 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1145
1146 /*
1147 * Insert pages into object.
1148 */
1149
1150 vm_object_lock(cpm_obj);
1151 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1152 m = pages;
1153 pages = NEXT_PAGE(m);
1154
1155 assert(!m->gobbled);
1156 assert(!m->wanted);
1157 assert(!m->pageout);
1158 assert(!m->tabled);
1159 assert(m->busy);
55e303ae 1160 assert(m->phys_page>=avail_start && m->phys_page<=avail_end);
1c79356b
A
1161
1162 m->busy = FALSE;
1163 vm_page_insert(m, cpm_obj, offset);
1164 }
1165 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1166 vm_object_unlock(cpm_obj);
1167
1168 /*
1169 * Hang onto a reference on the object in case a
1170 * multi-threaded application for some reason decides
1171 * to deallocate the portion of the address space into
1172 * which we will insert this object.
1173 *
1174 * Unfortunately, we must insert the object now before
1175 * we can talk to the pmap module about which addresses
1176 * must be wired down. Hence, the race with a multi-
1177 * threaded app.
1178 */
1179 vm_object_reference(cpm_obj);
1180
1181 /*
1182 * Insert object into map.
1183 */
1184
1185 kr = vm_map_enter(
1186 map,
1187 addr,
1188 size,
1189 (vm_offset_t)0,
1190 flags,
1191 cpm_obj,
1192 (vm_object_offset_t)0,
1193 FALSE,
1194 VM_PROT_ALL,
1195 VM_PROT_ALL,
1196 VM_INHERIT_DEFAULT);
1197
1198 if (kr != KERN_SUCCESS) {
1199 /*
1200 * A CPM object doesn't have can_persist set,
1201 * so all we have to do is deallocate it to
1202 * free up these pages.
1203 */
1204 assert(cpm_obj->pager_created == FALSE);
1205 assert(cpm_obj->can_persist == FALSE);
1206 assert(cpm_obj->pageout == FALSE);
1207 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1208 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1209 vm_object_deallocate(cpm_obj); /* kill creation ref */
1210 }
1211
1212 /*
1213 * Inform the physical mapping system that the
1214 * range of addresses may not fault, so that
1215 * page tables and such can be locked down as well.
1216 */
1217 start = *addr;
1218 end = start + size;
1219 pmap = vm_map_pmap(map);
1220 pmap_pageable(pmap, start, end, FALSE);
1221
1222 /*
1223 * Enter each page into the pmap, to avoid faults.
1224 * Note that this loop could be coded more efficiently,
1225 * if the need arose, rather than looking up each page
1226 * again.
1227 */
1228 for (offset = 0, va = start; offset < size;
1229 va += PAGE_SIZE, offset += PAGE_SIZE) {
1230 vm_object_lock(cpm_obj);
1231 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1232 vm_object_unlock(cpm_obj);
1233 assert(m != VM_PAGE_NULL);
9bccf70c 1234 PMAP_ENTER(pmap, va, m, VM_PROT_ALL,
55e303ae
A
1235 ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK,
1236 TRUE);
1c79356b
A
1237 }
1238
1239#if MACH_ASSERT
1240 /*
1241 * Verify ordering in address space.
1242 */
1243 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1244 vm_object_lock(cpm_obj);
1245 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1246 vm_object_unlock(cpm_obj);
1247 if (m == VM_PAGE_NULL)
1248 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1249 cpm_obj, offset);
1250 assert(m->tabled);
1251 assert(!m->busy);
1252 assert(!m->wanted);
1253 assert(!m->fictitious);
1254 assert(!m->private);
1255 assert(!m->absent);
1256 assert(!m->error);
1257 assert(!m->cleaning);
1258 assert(!m->precious);
1259 assert(!m->clustered);
1260 if (offset != 0) {
55e303ae 1261 if (m->phys_page != prev_addr + 1) {
1c79356b
A
1262 printf("start 0x%x end 0x%x va 0x%x\n",
1263 start, end, va);
1264 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1265 printf("m 0x%x prev_address 0x%x\n", m,
1266 prev_addr);
1267 panic("vm_allocate_cpm: pages not contig!");
1268 }
1269 }
55e303ae 1270 prev_addr = m->phys_page;
1c79356b
A
1271 }
1272#endif /* MACH_ASSERT */
1273
1274 vm_object_deallocate(cpm_obj); /* kill extra ref */
1275
1276 return kr;
1277}
1278
1279
1280#else /* VM_CPM */
1281
1282/*
1283 * Interface is defined in all cases, but unless the kernel
1284 * is built explicitly for this option, the interface does
1285 * nothing.
1286 */
1287
1288kern_return_t
1289vm_allocate_cpm(
1290 host_priv_t host_priv,
1291 register vm_map_t map,
1292 register vm_offset_t *addr,
1293 register vm_size_t size,
1294 int flags)
1295{
1296 return KERN_FAILURE;
1297}
1298
1299/*
1300 */
1301kern_return_t
1302mach_memory_object_memory_entry_64(
1303 host_t host,
1304 boolean_t internal,
1305 vm_object_offset_t size,
1306 vm_prot_t permission,
0b4e3aa0 1307 memory_object_t pager,
1c79356b
A
1308 ipc_port_t *entry_handle)
1309{
55e303ae 1310 unsigned int access;
1c79356b
A
1311 vm_named_entry_t user_object;
1312 ipc_port_t user_handle;
1313 ipc_port_t previous;
1314 kern_return_t kr;
1315
1316 if (host == HOST_NULL)
1317 return(KERN_INVALID_HOST);
1318
1319 user_object = (vm_named_entry_t)
1320 kalloc(sizeof (struct vm_named_entry));
1321 if(user_object == NULL)
1322 return KERN_FAILURE;
1323 named_entry_lock_init(user_object);
1324 user_handle = ipc_port_alloc_kernel();
1325 ip_lock(user_handle);
1326
1327 /* make a sonce right */
1328 user_handle->ip_sorights++;
1329 ip_reference(user_handle);
1330
1331 user_handle->ip_destination = IP_NULL;
1332 user_handle->ip_receiver_name = MACH_PORT_NULL;
1333 user_handle->ip_receiver = ipc_space_kernel;
1334
1335 /* make a send right */
1336 user_handle->ip_mscount++;
1337 user_handle->ip_srights++;
1338 ip_reference(user_handle);
1339
1340 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1341 /* nsrequest unlocks user_handle */
1342
1343 user_object->object = NULL;
1344 user_object->size = size;
1345 user_object->offset = 0;
1346 user_object->backing.pager = pager;
55e303ae
A
1347 user_object->protection = permission & VM_PROT_ALL;
1348 access = GET_MAP_MEM(permission);
1349 SET_MAP_MEM(access, user_object->protection);
1c79356b
A
1350 user_object->internal = internal;
1351 user_object->is_sub_map = FALSE;
1352 user_object->ref_count = 1;
1353
1354 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1355 IKOT_NAMED_ENTRY);
1356 *entry_handle = user_handle;
1357 return KERN_SUCCESS;
1358}
1359
1360kern_return_t
1361mach_memory_object_memory_entry(
1362 host_t host,
1363 boolean_t internal,
1364 vm_size_t size,
1365 vm_prot_t permission,
0b4e3aa0 1366 memory_object_t pager,
1c79356b
A
1367 ipc_port_t *entry_handle)
1368{
1369 return mach_memory_object_memory_entry_64( host, internal,
1370 (vm_object_offset_t)size, permission, pager, entry_handle);
1371}
1372
1373
1374
1375/*
1376 */
1377
1378kern_return_t
1379mach_make_memory_entry_64(
1380 vm_map_t target_map,
1381 vm_object_size_t *size,
1382 vm_object_offset_t offset,
1383 vm_prot_t permission,
1384 ipc_port_t *object_handle,
1385 ipc_port_t parent_entry)
1386{
1387 vm_map_version_t version;
1388 vm_named_entry_t user_object;
1389 ipc_port_t user_handle;
1390 ipc_port_t previous;
1391 kern_return_t kr;
1392 vm_map_t pmap_map;
1393
1394 /* needed for call to vm_map_lookup_locked */
9bccf70c 1395 boolean_t wired;
1c79356b 1396 vm_object_offset_t obj_off;
9bccf70c 1397 vm_prot_t prot;
1c79356b
A
1398 vm_object_offset_t lo_offset, hi_offset;
1399 vm_behavior_t behavior;
9bccf70c
A
1400 vm_object_t object;
1401 vm_object_t shadow_object;
1c79356b
A
1402
1403 /* needed for direct map entry manipulation */
1404 vm_map_entry_t map_entry;
9bccf70c
A
1405 vm_map_entry_t next_entry;
1406 vm_map_t local_map;
1407 vm_map_t original_map = target_map;
1408 vm_offset_t local_offset;
1c79356b 1409 vm_object_size_t mappable_size;
9bccf70c
A
1410 vm_object_size_t total_size;
1411
55e303ae
A
1412 unsigned int access;
1413 vm_prot_t protections;
1414 unsigned int wimg_mode;
1415 boolean_t cache_attr;
1416
1417 protections = permission & VM_PROT_ALL;
1418 access = GET_MAP_MEM(permission);
1419
1c79356b 1420
9bccf70c
A
1421 offset = trunc_page_64(offset);
1422 *size = round_page_64(*size);
55e303ae
A
1423
1424 if((parent_entry != NULL)
1425 && (permission & MAP_MEM_ONLY)) {
1426 vm_named_entry_t parent_object;
1427 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1428 return KERN_INVALID_ARGUMENT;
1429 }
1430 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1431 object = parent_object->object;
1432 if(object != VM_OBJECT_NULL)
1433 wimg_mode = object->wimg_bits;
1434 if((access != GET_MAP_MEM(parent_object->protection)) &&
1435 !(parent_object->protection & VM_PROT_WRITE)) {
1436 return KERN_INVALID_RIGHT;
1437 }
1438 if(access == MAP_MEM_IO) {
1439 SET_MAP_MEM(access, parent_object->protection);
1440 wimg_mode = VM_WIMG_IO;
1441 } else if (access == MAP_MEM_COPYBACK) {
1442 SET_MAP_MEM(access, parent_object->protection);
1443 wimg_mode = VM_WIMG_DEFAULT;
1444 } else if (access == MAP_MEM_WTHRU) {
1445 SET_MAP_MEM(access, parent_object->protection);
1446 wimg_mode = VM_WIMG_WTHRU;
1447 } else if (access == MAP_MEM_WCOMB) {
1448 SET_MAP_MEM(access, parent_object->protection);
1449 wimg_mode = VM_WIMG_WCOMB;
1450 }
1451 if(object &&
1452 (access != MAP_MEM_NOOP) &&
1453 (!(object->nophyscache))) {
1454 if(object->wimg_bits != wimg_mode) {
1455 vm_page_t p;
1456 if ((wimg_mode == VM_WIMG_IO)
1457 || (wimg_mode == VM_WIMG_WCOMB))
1458 cache_attr = TRUE;
1459 else
1460 cache_attr = FALSE;
1461 vm_object_lock(object);
1462 while(object->paging_in_progress) {
1463 vm_object_unlock(object);
1464 vm_object_wait(object,
1465 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1466 THREAD_UNINT);
1467 vm_object_lock(object);
1468 }
1469 object->wimg_bits = wimg_mode;
1470 queue_iterate(&object->memq,
1471 p, vm_page_t, listq) {
1472 if (!p->fictitious) {
1473 pmap_page_protect(
1474 p->phys_page,
1475 VM_PROT_NONE);
1476 if(cache_attr)
1477 pmap_sync_caches_phys(
1478 p->phys_page);
1479 }
1480 }
1481 vm_object_unlock(object);
1482 }
1483 }
1484 return KERN_SUCCESS;
1485 }
1486
1487 if(permission & MAP_MEM_ONLY) {
1488 return KERN_INVALID_ARGUMENT;
1489 }
1490
1c79356b
A
1491 user_object = (vm_named_entry_t)
1492 kalloc(sizeof (struct vm_named_entry));
1493 if(user_object == NULL)
1494 return KERN_FAILURE;
1495 named_entry_lock_init(user_object);
1496 user_handle = ipc_port_alloc_kernel();
1497 ip_lock(user_handle);
1498
1499 /* make a sonce right */
1500 user_handle->ip_sorights++;
1501 ip_reference(user_handle);
1502
1503 user_handle->ip_destination = IP_NULL;
1504 user_handle->ip_receiver_name = MACH_PORT_NULL;
1505 user_handle->ip_receiver = ipc_space_kernel;
1506
1507 /* make a send right */
1508 user_handle->ip_mscount++;
1509 user_handle->ip_srights++;
1510 ip_reference(user_handle);
1511
1512 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1513 /* nsrequest unlocks user_handle */
1514
1515 user_object->backing.pager = NULL;
1516 user_object->ref_count = 1;
1517
55e303ae
A
1518 if(permission & MAP_MEM_NAMED_CREATE) {
1519 user_object->object = NULL;
1520 user_object->internal = TRUE;
1521 user_object->is_sub_map = FALSE;
1522 user_object->offset = 0;
1523 user_object->protection = protections;
1524 SET_MAP_MEM(access, user_object->protection);
1525 user_object->size = *size;
1526
1527 /* user_object pager and internal fields are not used */
1528 /* when the object field is filled in. */
1529
1530 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1531 IKOT_NAMED_ENTRY);
1532 *object_handle = user_handle;
1533 return KERN_SUCCESS;
1534 }
1535
1c79356b
A
1536 if(parent_entry == NULL) {
1537 /* Create a named object based on address range within the task map */
1538 /* Go find the object at given address */
1539
1c79356b
A
1540 vm_map_lock_read(target_map);
1541
1542 /* get the object associated with the target address */
1543 /* note we check the permission of the range against */
1544 /* that requested by the caller */
1545
1546 kr = vm_map_lookup_locked(&target_map, offset,
55e303ae 1547 protections, &version,
1c79356b
A
1548 &object, &obj_off, &prot, &wired, &behavior,
1549 &lo_offset, &hi_offset, &pmap_map);
1550 if (kr != KERN_SUCCESS) {
1551 vm_map_unlock_read(target_map);
1552 goto make_mem_done;
1553 }
55e303ae 1554 if (((prot & protections) != protections)
9bccf70c 1555 || (object == kernel_object)) {
1c79356b
A
1556 kr = KERN_INVALID_RIGHT;
1557 vm_object_unlock(object);
1558 vm_map_unlock_read(target_map);
1559 if(pmap_map != target_map)
1560 vm_map_unlock_read(pmap_map);
9bccf70c
A
1561 if(object == kernel_object) {
1562 printf("Warning: Attempt to create a named"
1563 " entry from the kernel_object\n");
1564 }
1c79356b
A
1565 goto make_mem_done;
1566 }
1567
1568 /* We have an object, now check to see if this object */
1569 /* is suitable. If not, create a shadow and share that */
1570
1c79356b 1571redo_lookup:
9bccf70c
A
1572 local_map = original_map;
1573 local_offset = offset;
1574 if(target_map != local_map) {
1575 vm_map_unlock_read(target_map);
1576 if(pmap_map != target_map)
1577 vm_map_unlock_read(pmap_map);
1578 vm_map_lock_read(local_map);
1579 target_map = local_map;
1580 pmap_map = local_map;
1581 }
1c79356b 1582 while(TRUE) {
9bccf70c
A
1583 if(!vm_map_lookup_entry(local_map,
1584 local_offset, &map_entry)) {
1c79356b
A
1585 kr = KERN_INVALID_ARGUMENT;
1586 vm_object_unlock(object);
1587 vm_map_unlock_read(target_map);
1588 if(pmap_map != target_map)
1589 vm_map_unlock_read(pmap_map);
1590 goto make_mem_done;
1591 }
1592 if(!(map_entry->is_sub_map)) {
1593 if(map_entry->object.vm_object != object) {
1594 kr = KERN_INVALID_ARGUMENT;
1595 vm_object_unlock(object);
1596 vm_map_unlock_read(target_map);
1597 if(pmap_map != target_map)
1598 vm_map_unlock_read(pmap_map);
1599 goto make_mem_done;
1600 }
9bccf70c 1601 if(map_entry->wired_count) {
55e303ae 1602 /* JMM - The check below should be reworked instead. */
9bccf70c
A
1603 object->true_share = TRUE;
1604 }
1c79356b
A
1605 break;
1606 } else {
9bccf70c
A
1607 vm_map_t tmap;
1608 tmap = local_map;
1c79356b 1609 local_map = map_entry->object.sub_map;
9bccf70c 1610
1c79356b 1611 vm_map_lock_read(local_map);
9bccf70c 1612 vm_map_unlock_read(tmap);
1c79356b 1613 target_map = local_map;
9bccf70c
A
1614 pmap_map = local_map;
1615 local_offset = local_offset - map_entry->vme_start;
1616 local_offset += map_entry->offset;
1c79356b
A
1617 }
1618 }
55e303ae 1619 if(((map_entry->max_protection) & protections) != protections) {
1c79356b
A
1620 kr = KERN_INVALID_RIGHT;
1621 vm_object_unlock(object);
1622 vm_map_unlock_read(target_map);
1623 if(pmap_map != target_map)
1624 vm_map_unlock_read(pmap_map);
1625 goto make_mem_done;
1626 }
9bccf70c
A
1627
1628 mappable_size = hi_offset - obj_off;
1629 total_size = map_entry->vme_end - map_entry->vme_start;
1630 if(*size > mappable_size) {
1631 /* try to extend mappable size if the entries */
1632 /* following are from the same object and are */
1633 /* compatible */
1634 next_entry = map_entry->vme_next;
1635 /* lets see if the next map entry is still */
1636 /* pointing at this object and is contiguous */
1637 while(*size > mappable_size) {
1638 if((next_entry->object.vm_object == object) &&
1639 (next_entry->vme_start ==
1640 next_entry->vme_prev->vme_end) &&
1641 (next_entry->offset ==
1642 next_entry->vme_prev->offset +
1643 (next_entry->vme_prev->vme_end -
1644 next_entry->vme_prev->vme_start))) {
1645 if(((next_entry->max_protection)
55e303ae 1646 & protections) != protections) {
9bccf70c
A
1647 break;
1648 }
55e303ae
A
1649 if (next_entry->needs_copy !=
1650 map_entry->needs_copy)
1651 break;
9bccf70c
A
1652 mappable_size += next_entry->vme_end
1653 - next_entry->vme_start;
1654 total_size += next_entry->vme_end
1655 - next_entry->vme_start;
1656 next_entry = next_entry->vme_next;
1657 } else {
1658 break;
1659 }
1660
1661 }
1662 }
1663
1c79356b
A
1664 if(object->internal) {
1665 /* vm_map_lookup_locked will create a shadow if */
1666 /* needs_copy is set but does not check for the */
1667 /* other two conditions shown. It is important to */
1668 /* set up an object which will not be pulled from */
1669 /* under us. */
1670
0b4e3aa0 1671 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
1672 (object->size > total_size))
1673 && !object->true_share) {
1c79356b
A
1674 if (vm_map_lock_read_to_write(target_map)) {
1675 vm_map_lock_read(target_map);
1676 goto redo_lookup;
1677 }
1678
55e303ae
A
1679 /*
1680 * JMM - We need to avoid coming here when the object
1681 * is wired by anybody, not just the current map. Why
1682 * couldn't we use the standard vm_object_copy_quickly()
1683 * approach here?
1684 */
1685
1c79356b 1686 /* create a shadow object */
9bccf70c
A
1687 vm_object_shadow(&map_entry->object.vm_object,
1688 &map_entry->offset, total_size);
1689 shadow_object = map_entry->object.vm_object;
1690 vm_object_unlock(object);
1691 vm_object_pmap_protect(
1692 object, map_entry->offset,
1693 total_size,
1694 ((map_entry->is_shared
1695 || target_map->mapped)
1696 ? PMAP_NULL :
1697 target_map->pmap),
1698 map_entry->vme_start,
1699 map_entry->protection & ~VM_PROT_WRITE);
1700 total_size -= (map_entry->vme_end
1701 - map_entry->vme_start);
1702 next_entry = map_entry->vme_next;
1703 map_entry->needs_copy = FALSE;
1704 while (total_size) {
1705 if(next_entry->object.vm_object == object) {
55e303ae
A
1706 shadow_object->ref_count++;
1707 vm_object_res_reference(shadow_object);
9bccf70c
A
1708 next_entry->object.vm_object
1709 = shadow_object;
55e303ae 1710 vm_object_deallocate(object);
9bccf70c
A
1711 next_entry->offset
1712 = next_entry->vme_prev->offset +
1713 (next_entry->vme_prev->vme_end
1714 - next_entry->vme_prev->vme_start);
1715 next_entry->needs_copy = FALSE;
1716 } else {
1717 panic("mach_make_memory_entry_64:"
1718 " map entries out of sync\n");
1719 }
1720 total_size -=
1721 next_entry->vme_end
1722 - next_entry->vme_start;
1723 next_entry = next_entry->vme_next;
1724 }
1725
1726 object = shadow_object;
1727 vm_object_lock(object);
1728 obj_off = (local_offset - map_entry->vme_start)
1729 + map_entry->offset;
1730 vm_map_lock_write_to_read(target_map);
1c79356b 1731
1c79356b
A
1732
1733 }
1734 }
1735
1736 /* note: in the future we can (if necessary) allow for */
1737 /* memory object lists, this will better support */
1738 /* fragmentation, but is it necessary? The user should */
1739 /* be encouraged to create address space oriented */
1740 /* shared objects from CLEAN memory regions which have */
1741 /* a known and defined history. i.e. no inheritence */
1742 /* share, make this call before making the region the */
1743 /* target of ipc's, etc. The code above, protecting */
1744 /* against delayed copy, etc. is mostly defensive. */
1745
55e303ae
A
1746 wimg_mode = object->wimg_bits;
1747 if(!(object->nophyscache)) {
1748 if(access == MAP_MEM_IO) {
1749 wimg_mode = VM_WIMG_IO;
1750 } else if (access == MAP_MEM_COPYBACK) {
1751 wimg_mode = VM_WIMG_USE_DEFAULT;
1752 } else if (access == MAP_MEM_WTHRU) {
1753 wimg_mode = VM_WIMG_WTHRU;
1754 } else if (access == MAP_MEM_WCOMB) {
1755 wimg_mode = VM_WIMG_WCOMB;
1756 }
1757 }
d7e50217 1758
de355530 1759 object->true_share = TRUE;
55e303ae
A
1760 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
1761 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1762
1763 /* we now point to this object, hold on to it */
1764 vm_object_reference_locked(object);
1765 vm_map_unlock_read(target_map);
1766 if(pmap_map != target_map)
1767 vm_map_unlock_read(pmap_map);
1768
1769 if(object->wimg_bits != wimg_mode) {
1770 vm_page_t p;
1771
1772 vm_object_paging_wait(object, THREAD_UNINT);
1773
1774 queue_iterate(&object->memq,
1775 p, vm_page_t, listq) {
1776 if (!p->fictitious) {
1777 pmap_page_protect(
1778 p->phys_page,
1779 VM_PROT_NONE);
1780 if(cache_attr)
1781 pmap_sync_caches_phys(
1782 p->phys_page);
1783 }
1784 }
1785 object->wimg_bits = wimg_mode;
1786 }
1c79356b
A
1787 user_object->object = object;
1788 user_object->internal = object->internal;
1789 user_object->is_sub_map = FALSE;
1790 user_object->offset = obj_off;
1791 user_object->protection = permission;
1792
1793 /* the size of mapped entry that overlaps with our region */
1794 /* which is targeted for share. */
1795 /* (entry_end - entry_start) - */
1796 /* offset of our beg addr within entry */
1797 /* it corresponds to this: */
1798
1c79356b
A
1799 if(*size > mappable_size)
1800 *size = mappable_size;
1801
1802 user_object->size = *size;
1803
1804 /* user_object pager and internal fields are not used */
1805 /* when the object field is filled in. */
1806
1c79356b
A
1807 vm_object_unlock(object);
1808 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1809 IKOT_NAMED_ENTRY);
1c79356b 1810 *object_handle = user_handle;
1c79356b
A
1811 return KERN_SUCCESS;
1812 } else {
1813
1814 vm_named_entry_t parent_object;
1815
1816 /* The new object will be base on an existing named object */
1817 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1818 kr = KERN_INVALID_ARGUMENT;
1819 goto make_mem_done;
1820 }
1821 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1c79356b
A
1822 if((offset + *size) > parent_object->size) {
1823 kr = KERN_INVALID_ARGUMENT;
1824 goto make_mem_done;
1825 }
1826
1827 user_object->object = parent_object->object;
1828 user_object->size = *size;
1829 user_object->offset = parent_object->offset + offset;
55e303ae
A
1830 user_object->protection = parent_object->protection;
1831 user_object->protection &= ~VM_PROT_ALL;
1832 user_object->protection = permission & VM_PROT_ALL;
1833 if(access != MAP_MEM_NOOP) {
1834 SET_MAP_MEM(access, user_object->protection);
1835 }
1c79356b
A
1836 if(parent_object->is_sub_map) {
1837 user_object->backing.map = parent_object->backing.map;
1838 vm_map_lock(user_object->backing.map);
1839 user_object->backing.map->ref_count++;
1840 vm_map_unlock(user_object->backing.map);
1841 }
1842 else {
1843 user_object->backing.pager = parent_object->backing.pager;
1844 }
1845 user_object->internal = parent_object->internal;
1846 user_object->is_sub_map = parent_object->is_sub_map;
1847
1848 if(parent_object->object != NULL) {
1849 /* we now point to this object, hold on */
1850 vm_object_reference(parent_object->object);
1851 vm_object_lock(parent_object->object);
1852 parent_object->object->true_share = TRUE;
55e303ae
A
1853 if (parent_object->object->copy_strategy ==
1854 MEMORY_OBJECT_COPY_SYMMETRIC)
1855 parent_object->object->copy_strategy =
1856 MEMORY_OBJECT_COPY_DELAY;
1c79356b
A
1857 vm_object_unlock(parent_object->object);
1858 }
1859 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1860 IKOT_NAMED_ENTRY);
1861 *object_handle = user_handle;
1862 return KERN_SUCCESS;
1863 }
1864
1865
1866
1867make_mem_done:
1868 ipc_port_dealloc_kernel(user_handle);
1869 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1870 return kr;
1871}
1872
1873kern_return_t
1874mach_make_memory_entry(
1875 vm_map_t target_map,
1876 vm_size_t *size,
1877 vm_offset_t offset,
1878 vm_prot_t permission,
1879 ipc_port_t *object_handle,
1880 ipc_port_t parent_entry)
1881{
1882 vm_object_offset_t size_64;
1883 kern_return_t kr;
1884
1885 size_64 = (vm_object_offset_t)*size;
1886 kr = mach_make_memory_entry_64(target_map, &size_64,
1887 (vm_object_offset_t)offset, permission, object_handle,
1888 parent_entry);
1889 *size = (vm_size_t)size_64;
1890 return kr;
1891}
1892
1893/*
1894 */
1895
1896kern_return_t
1897vm_region_object_create(
1898 vm_map_t target_map,
1899 vm_size_t size,
1900 ipc_port_t *object_handle)
1901{
1902 vm_named_entry_t user_object;
1903 ipc_port_t user_handle;
1904 kern_return_t kr;
1905
1c79356b
A
1906 ipc_port_t previous;
1907 vm_map_t new_map;
1908
1c79356b
A
1909 user_object = (vm_named_entry_t)
1910 kalloc(sizeof (struct vm_named_entry));
1911 if(user_object == NULL) {
1c79356b
A
1912 return KERN_FAILURE;
1913 }
1914 named_entry_lock_init(user_object);
1915 user_handle = ipc_port_alloc_kernel();
1916
1917
1918 ip_lock(user_handle);
1919
1920 /* make a sonce right */
1921 user_handle->ip_sorights++;
1922 ip_reference(user_handle);
1923
1924 user_handle->ip_destination = IP_NULL;
1925 user_handle->ip_receiver_name = MACH_PORT_NULL;
1926 user_handle->ip_receiver = ipc_space_kernel;
1927
1928 /* make a send right */
1929 user_handle->ip_mscount++;
1930 user_handle->ip_srights++;
1931 ip_reference(user_handle);
1932
1933 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1934 /* nsrequest unlocks user_handle */
1935
1936 /* Create a named object based on a submap of specified size */
1937
55e303ae 1938 new_map = vm_map_create(0, 0, size, TRUE);
1c79356b
A
1939 user_object->backing.map = new_map;
1940
1941
1942 user_object->object = VM_OBJECT_NULL;
1943 user_object->internal = TRUE;
1944 user_object->is_sub_map = TRUE;
1945 user_object->offset = 0;
1946 user_object->protection = VM_PROT_ALL;
1947 user_object->size = size;
1948 user_object->ref_count = 1;
1949
1950 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1951 IKOT_NAMED_ENTRY);
1952 *object_handle = user_handle;
1953 return KERN_SUCCESS;
1954
1955}
1956
1957/* For a given range, check all map entries. If the entry coresponds to */
1958/* the old vm_region/map provided on the call, replace it with the */
1959/* corresponding range in the new vm_region/map */
1960kern_return_t vm_map_region_replace(
1961 vm_map_t target_map,
1962 ipc_port_t old_region,
1963 ipc_port_t new_region,
1964 vm_offset_t start,
1965 vm_offset_t end)
1966{
1967 vm_named_entry_t old_object;
1968 vm_named_entry_t new_object;
1969 vm_map_t old_submap;
1970 vm_map_t new_submap;
1971 vm_offset_t addr;
1972 vm_map_entry_t entry;
1973 int nested_pmap = 0;
1974
1975
1976 vm_map_lock(target_map);
1977 old_object = (vm_named_entry_t)old_region->ip_kobject;
1978 new_object = (vm_named_entry_t)new_region->ip_kobject;
1979 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1980 vm_map_unlock(target_map);
1981 return KERN_INVALID_ARGUMENT;
1982 }
1983 old_submap = (vm_map_t)old_object->backing.map;
1984 new_submap = (vm_map_t)new_object->backing.map;
1985 vm_map_lock(old_submap);
1986 if((old_submap->min_offset != new_submap->min_offset) ||
1987 (old_submap->max_offset != new_submap->max_offset)) {
1988 vm_map_unlock(old_submap);
1989 vm_map_unlock(target_map);
1990 return KERN_INVALID_ARGUMENT;
1991 }
1992 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1993 /* if the src is not contained, the entry preceeds */
1994 /* our range */
1995 addr = entry->vme_start;
1996 if(entry == vm_map_to_entry(target_map)) {
1997 vm_map_unlock(old_submap);
1998 vm_map_unlock(target_map);
1999 return KERN_SUCCESS;
2000 }
55e303ae
A
2001 }
2002 if ((entry->use_pmap) &&
2003 (new_submap->pmap == NULL)) {
2004 new_submap->pmap = pmap_create((vm_size_t) 0);
2005 if(new_submap->pmap == PMAP_NULL) {
2006 vm_map_unlock(old_submap);
2007 vm_map_unlock(target_map);
2008 return(KERN_NO_SPACE);
2009 }
1c79356b
A
2010 }
2011 addr = entry->vme_start;
2012 vm_map_reference(old_submap);
2013 while((entry != vm_map_to_entry(target_map)) &&
2014 (entry->vme_start < end)) {
2015 if((entry->is_sub_map) &&
2016 (entry->object.sub_map == old_submap)) {
1c79356b 2017 if(entry->use_pmap) {
55e303ae 2018 if((start & 0x0fffffff) ||
1c79356b
A
2019 ((end - start) != 0x10000000)) {
2020 vm_map_unlock(old_submap);
9bccf70c 2021 vm_map_deallocate(old_submap);
1c79356b
A
2022 vm_map_unlock(target_map);
2023 return KERN_INVALID_ARGUMENT;
2024 }
2025 nested_pmap = 1;
2026 }
9bccf70c 2027 entry->object.sub_map = new_submap;
1c79356b
A
2028 vm_map_reference(new_submap);
2029 vm_map_deallocate(old_submap);
2030 }
2031 entry = entry->vme_next;
2032 addr = entry->vme_start;
2033 }
2034 if(nested_pmap) {
2035#ifndef i386
55e303ae 2036 pmap_unnest(target_map->pmap, (addr64_t)start);
9bccf70c
A
2037 if(target_map->mapped) {
2038 vm_map_submap_pmap_clean(target_map,
2039 start, end, old_submap, 0);
2040 }
1c79356b 2041 pmap_nest(target_map->pmap, new_submap->pmap,
55e303ae
A
2042 (addr64_t)start, (addr64_t)start,
2043 (addr64_t)(end - start));
2044#endif /* i386 */
1c79356b 2045 } else {
9bccf70c
A
2046 vm_map_submap_pmap_clean(target_map,
2047 start, end, old_submap, 0);
1c79356b
A
2048 }
2049 vm_map_unlock(old_submap);
9bccf70c 2050 vm_map_deallocate(old_submap);
1c79356b
A
2051 vm_map_unlock(target_map);
2052 return KERN_SUCCESS;
2053}
2054
2055
2056void
2057mach_destroy_memory_entry(
2058 ipc_port_t port)
2059{
2060 vm_named_entry_t named_entry;
2061#if MACH_ASSERT
2062 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2063#endif /* MACH_ASSERT */
2064 named_entry = (vm_named_entry_t)port->ip_kobject;
2065 mutex_lock(&(named_entry)->Lock);
2066 named_entry->ref_count-=1;
2067 if(named_entry->ref_count == 0) {
2068 if(named_entry->object) {
2069 /* release the memory object we've been pointing to */
2070 vm_object_deallocate(named_entry->object);
2071 }
2072 if(named_entry->is_sub_map) {
2073 vm_map_deallocate(named_entry->backing.map);
2074 }
2075 kfree((vm_offset_t)port->ip_kobject,
2076 sizeof (struct vm_named_entry));
2077 } else
2078 mutex_unlock(&(named_entry)->Lock);
2079}
2080
2081
2082kern_return_t
2083vm_map_page_query(
2084 vm_map_t target_map,
2085 vm_offset_t offset,
2086 int *disposition,
2087 int *ref_count)
2088{
2089 vm_map_entry_t map_entry;
2090 vm_object_t object;
2091 vm_page_t m;
2092
2093restart_page_query:
2094 *disposition = 0;
2095 *ref_count = 0;
2096 vm_map_lock(target_map);
2097 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
2098 vm_map_unlock(target_map);
2099 return KERN_FAILURE;
2100 }
2101 offset -= map_entry->vme_start; /* adjust to offset within entry */
2102 offset += map_entry->offset; /* adjust to target object offset */
2103 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
2104 if(!map_entry->is_sub_map) {
2105 object = map_entry->object.vm_object;
2106 } else {
2107 vm_map_unlock(target_map);
2108 target_map = map_entry->object.sub_map;
2109 goto restart_page_query;
2110 }
2111 } else {
2112 vm_map_unlock(target_map);
2113 return KERN_FAILURE;
2114 }
2115 vm_object_lock(object);
2116 vm_map_unlock(target_map);
2117 while(TRUE) {
2118 m = vm_page_lookup(object, offset);
2119 if (m != VM_PAGE_NULL) {
2120 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
2121 break;
2122 } else {
2123 if(object->shadow) {
2124 offset += object->shadow_offset;
2125 vm_object_unlock(object);
2126 object = object->shadow;
2127 vm_object_lock(object);
2128 continue;
2129 }
2130 vm_object_unlock(object);
2131 return KERN_FAILURE;
2132 }
2133 }
2134
2135 /* The ref_count is not strictly accurate, it measures the number */
2136 /* of entities holding a ref on the object, they may not be mapping */
2137 /* the object or may not be mapping the section holding the */
2138 /* target page but its still a ball park number and though an over- */
2139 /* count, it picks up the copy-on-write cases */
2140
2141 /* We could also get a picture of page sharing from pmap_attributes */
2142 /* but this would under count as only faulted-in mappings would */
2143 /* show up. */
2144
2145 *ref_count = object->ref_count;
2146
2147 if (m->fictitious) {
2148 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
2149 vm_object_unlock(object);
2150 return KERN_SUCCESS;
2151 }
2152
2153 if (m->dirty)
2154 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
55e303ae 2155 else if(pmap_is_modified(m->phys_page))
1c79356b
A
2156 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
2157
2158 if (m->reference)
2159 *disposition |= VM_PAGE_QUERY_PAGE_REF;
55e303ae 2160 else if(pmap_is_referenced(m->phys_page))
1c79356b
A
2161 *disposition |= VM_PAGE_QUERY_PAGE_REF;
2162
2163 vm_object_unlock(object);
2164 return KERN_SUCCESS;
2165
2166}
2167
2168kern_return_t
2169set_dp_control_port(
2170 host_priv_t host_priv,
2171 ipc_port_t control_port)
2172{
2173 if (host_priv == HOST_PRIV_NULL)
2174 return (KERN_INVALID_HOST);
0b4e3aa0
A
2175
2176 if (IP_VALID(dynamic_pager_control_port))
2177 ipc_port_release_send(dynamic_pager_control_port);
2178
1c79356b
A
2179 dynamic_pager_control_port = control_port;
2180 return KERN_SUCCESS;
2181}
2182
2183kern_return_t
2184get_dp_control_port(
2185 host_priv_t host_priv,
2186 ipc_port_t *control_port)
2187{
2188 if (host_priv == HOST_PRIV_NULL)
2189 return (KERN_INVALID_HOST);
0b4e3aa0
A
2190
2191 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
2192 return KERN_SUCCESS;
2193
2194}
2195
1c79356b
A
2196
2197/* Retrieve a upl for an object underlying an address range in a map */
2198
2199kern_return_t
2200vm_map_get_upl(
0b4e3aa0
A
2201 vm_map_t map,
2202 vm_address_t offset,
2203 vm_size_t *upl_size,
2204 upl_t *upl,
2205 upl_page_info_array_t page_list,
2206 unsigned int *count,
2207 int *flags,
2208 int force_data_sync)
1c79356b
A
2209{
2210 vm_map_entry_t entry;
2211 int caller_flags;
150bd074
A
2212 int sync_cow_data = FALSE;
2213 vm_object_t local_object;
2214 vm_offset_t local_offset;
2215 vm_offset_t local_start;
2216 kern_return_t ret;
1c79356b
A
2217
2218 caller_flags = *flags;
150bd074
A
2219 if (!(caller_flags & UPL_COPYOUT_FROM)) {
2220 sync_cow_data = TRUE;
2221 }
1c79356b
A
2222 if(upl == NULL)
2223 return KERN_INVALID_ARGUMENT;
0b4e3aa0
A
2224
2225
1c79356b
A
2226REDISCOVER_ENTRY:
2227 vm_map_lock(map);
2228 if (vm_map_lookup_entry(map, offset, &entry)) {
0b4e3aa0
A
2229 if (entry->object.vm_object == VM_OBJECT_NULL ||
2230 !entry->object.vm_object->phys_contiguous) {
2231 if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
2232 *upl_size = MAX_UPL_TRANSFER * page_size;
2233 }
2234 }
1c79356b
A
2235 if((entry->vme_end - offset) < *upl_size) {
2236 *upl_size = entry->vme_end - offset;
2237 }
0b4e3aa0
A
2238 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2239 if (entry->object.vm_object == VM_OBJECT_NULL) {
2240 *flags = 0;
2241 } else if (entry->object.vm_object->private) {
2242 *flags = UPL_DEV_MEMORY;
2243 if (entry->object.vm_object->phys_contiguous) {
2244 *flags |= UPL_PHYS_CONTIG;
2245 }
2246 } else {
2247 *flags = 0;
2248 }
2249 vm_map_unlock(map);
2250 return KERN_SUCCESS;
2251 }
1c79356b
A
2252 /*
2253 * Create an object if necessary.
2254 */
2255 if (entry->object.vm_object == VM_OBJECT_NULL) {
2256 entry->object.vm_object = vm_object_allocate(
2257 (vm_size_t)(entry->vme_end - entry->vme_start));
2258 entry->offset = 0;
2259 }
2260 if (!(caller_flags & UPL_COPYOUT_FROM)) {
55e303ae
A
2261 if (!(entry->protection & VM_PROT_WRITE)) {
2262 vm_map_unlock(map);
2263 return KERN_PROTECTION_FAILURE;
2264 }
0b4e3aa0 2265 if (entry->needs_copy) {
1c79356b
A
2266 vm_map_t local_map;
2267 vm_object_t object;
2268 vm_object_offset_t offset_hi;
2269 vm_object_offset_t offset_lo;
2270 vm_object_offset_t new_offset;
2271 vm_prot_t prot;
2272 boolean_t wired;
2273 vm_behavior_t behavior;
2274 vm_map_version_t version;
2275 vm_map_t pmap_map;
2276
2277 local_map = map;
2278 vm_map_lock_write_to_read(map);
2279 if(vm_map_lookup_locked(&local_map,
2280 offset, VM_PROT_WRITE,
2281 &version, &object,
2282 &new_offset, &prot, &wired,
2283 &behavior, &offset_lo,
2284 &offset_hi, &pmap_map)) {
2285 vm_map_unlock(local_map);
2286 return KERN_FAILURE;
2287 }
2288 if (pmap_map != map) {
2289 vm_map_unlock(pmap_map);
2290 }
2291 vm_object_unlock(object);
2292 vm_map_unlock(local_map);
2293
2294 goto REDISCOVER_ENTRY;
2295 }
2296 }
2297 if (entry->is_sub_map) {
150bd074
A
2298 vm_map_t submap;
2299
2300 submap = entry->object.sub_map;
2301 local_start = entry->vme_start;
2302 local_offset = entry->offset;
2303 vm_map_reference(submap);
1c79356b 2304 vm_map_unlock(map);
150bd074
A
2305
2306 ret = (vm_map_get_upl(submap,
2307 local_offset + (offset - local_start),
1c79356b
A
2308 upl_size, upl, page_list, count,
2309 flags, force_data_sync));
150bd074
A
2310
2311 vm_map_deallocate(submap);
2312 return ret;
1c79356b
A
2313 }
2314
150bd074 2315 if (sync_cow_data) {
0b4e3aa0
A
2316 if (entry->object.vm_object->shadow
2317 || entry->object.vm_object->copy) {
150bd074
A
2318 int flags;
2319
2320 local_object = entry->object.vm_object;
2321 local_start = entry->vme_start;
2322 local_offset = entry->offset;
2323 vm_object_reference(local_object);
1c79356b
A
2324 vm_map_unlock(map);
2325
150bd074 2326 if(local_object->copy == NULL) {
1c79356b
A
2327 flags = MEMORY_OBJECT_DATA_SYNC;
2328 } else {
2329 flags = MEMORY_OBJECT_COPY_SYNC;
2330 }
150bd074 2331
0b4e3aa0
A
2332 if (entry->object.vm_object->shadow &&
2333 entry->object.vm_object->copy) {
2334 vm_object_lock_request(
2335 local_object->shadow,
2336 (vm_object_offset_t)
2337 ((offset - local_start) +
2338 local_offset) +
55e303ae 2339 local_object->shadow_offset,
0b4e3aa0
A
2340 *upl_size, FALSE,
2341 MEMORY_OBJECT_DATA_SYNC,
2342 VM_PROT_NO_CHANGE);
2343 }
150bd074 2344 sync_cow_data = FALSE;
0b4e3aa0 2345 vm_object_deallocate(local_object);
150bd074 2346 goto REDISCOVER_ENTRY;
1c79356b
A
2347 }
2348 }
2349
2350 if (force_data_sync) {
150bd074
A
2351
2352 local_object = entry->object.vm_object;
2353 local_start = entry->vme_start;
2354 local_offset = entry->offset;
2355 vm_object_reference(local_object);
1c79356b 2356 vm_map_unlock(map);
1c79356b 2357
0b4e3aa0
A
2358 vm_object_lock_request(
2359 local_object,
2360 (vm_object_offset_t)
55e303ae 2361 ((offset - local_start) + local_offset),
150bd074
A
2362 (vm_object_size_t)*upl_size, FALSE,
2363 MEMORY_OBJECT_DATA_SYNC,
0b4e3aa0 2364 VM_PROT_NO_CHANGE);
150bd074 2365 force_data_sync = FALSE;
0b4e3aa0 2366 vm_object_deallocate(local_object);
150bd074 2367 goto REDISCOVER_ENTRY;
1c79356b
A
2368 }
2369
2370 if(!(entry->object.vm_object->private)) {
2371 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2372 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2373 if(entry->object.vm_object->phys_contiguous) {
2374 *flags = UPL_PHYS_CONTIG;
2375 } else {
2376 *flags = 0;
2377 }
2378 } else {
2379 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2380 }
150bd074
A
2381 local_object = entry->object.vm_object;
2382 local_offset = entry->offset;
2383 local_start = entry->vme_start;
2384 vm_object_reference(local_object);
1c79356b 2385 vm_map_unlock(map);
55e303ae
A
2386 if(caller_flags & UPL_SET_IO_WIRE) {
2387 ret = (vm_object_iopl_request(local_object,
2388 (vm_object_offset_t)
2389 ((offset - local_start)
2390 + local_offset),
2391 *upl_size,
2392 upl,
2393 page_list,
2394 count,
2395 caller_flags));
2396 } else {
2397 ret = (vm_object_upl_request(local_object,
2398 (vm_object_offset_t)
2399 ((offset - local_start)
2400 + local_offset),
2401 *upl_size,
2402 upl,
2403 page_list,
2404 count,
2405 caller_flags));
2406 }
150bd074
A
2407 vm_object_deallocate(local_object);
2408 return(ret);
1c79356b
A
2409 }
2410
2411 vm_map_unlock(map);
2412 return(KERN_FAILURE);
2413
2414}
2415
1c79356b
A
2416/* ******* Temporary Internal calls to UPL for BSD ***** */
2417kern_return_t
2418kernel_upl_map(
2419 vm_map_t map,
2420 upl_t upl,
2421 vm_offset_t *dst_addr)
2422{
0b4e3aa0 2423 return (vm_upl_map(map, upl, dst_addr));
1c79356b
A
2424}
2425
2426
2427kern_return_t
2428kernel_upl_unmap(
2429 vm_map_t map,
0b4e3aa0 2430 upl_t upl)
1c79356b 2431{
0b4e3aa0 2432 return(vm_upl_unmap(map, upl));
1c79356b
A
2433}
2434
2435kern_return_t
2436kernel_upl_commit(
2437 upl_t upl,
0b4e3aa0
A
2438 upl_page_info_t *pl,
2439 mach_msg_type_number_t count)
1c79356b 2440{
0b4e3aa0
A
2441 kern_return_t kr;
2442
2443 kr = upl_commit(upl, pl, count);
2444 upl_deallocate(upl);
1c79356b
A
2445 return kr;
2446}
2447
0b4e3aa0 2448
1c79356b
A
2449kern_return_t
2450kernel_upl_commit_range(
2451 upl_t upl,
2452 vm_offset_t offset,
2453 vm_size_t size,
2454 int flags,
0b4e3aa0
A
2455 upl_page_info_array_t pl,
2456 mach_msg_type_number_t count)
1c79356b 2457{
0b4e3aa0
A
2458 boolean_t finished = FALSE;
2459 kern_return_t kr;
2460
2461 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2462 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2463
2464 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2465
2466 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2467 upl_deallocate(upl);
2468
1c79356b
A
2469 return kr;
2470}
2471
2472kern_return_t
2473kernel_upl_abort_range(
0b4e3aa0
A
2474 upl_t upl,
2475 vm_offset_t offset,
2476 vm_size_t size,
2477 int abort_flags)
1c79356b 2478{
0b4e3aa0
A
2479 kern_return_t kr;
2480 boolean_t finished = FALSE;
1c79356b 2481
0b4e3aa0
A
2482 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2483 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 2484
0b4e3aa0 2485 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 2486
0b4e3aa0
A
2487 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2488 upl_deallocate(upl);
1c79356b 2489
0b4e3aa0 2490 return kr;
1c79356b
A
2491}
2492
1c79356b 2493kern_return_t
0b4e3aa0
A
2494kernel_upl_abort(
2495 upl_t upl,
2496 int abort_type)
1c79356b 2497{
0b4e3aa0 2498 kern_return_t kr;
1c79356b 2499
0b4e3aa0
A
2500 kr = upl_abort(upl, abort_type);
2501 upl_deallocate(upl);
2502 return kr;
1c79356b
A
2503}
2504
1c79356b
A
2505
2506kern_return_t
2507vm_get_shared_region(
2508 task_t task,
2509 shared_region_mapping_t *shared_region)
2510{
2511 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2512 return KERN_SUCCESS;
2513}
2514
2515kern_return_t
2516vm_set_shared_region(
2517 task_t task,
2518 shared_region_mapping_t shared_region)
2519{
2520 task->system_shared_region = (vm_offset_t) shared_region;
2521 return KERN_SUCCESS;
2522}
2523
2524kern_return_t
2525shared_region_mapping_info(
2526 shared_region_mapping_t shared_region,
2527 ipc_port_t *text_region,
2528 vm_size_t *text_size,
2529 ipc_port_t *data_region,
2530 vm_size_t *data_size,
2531 vm_offset_t *region_mappings,
2532 vm_offset_t *client_base,
2533 vm_offset_t *alt_base,
2534 vm_offset_t *alt_next,
55e303ae
A
2535 unsigned int *fs_base,
2536 unsigned int *system,
1c79356b
A
2537 int *flags,
2538 shared_region_mapping_t *next)
2539{
2540 shared_region_mapping_lock(shared_region);
2541
2542 *text_region = shared_region->text_region;
2543 *text_size = shared_region->text_size;
2544 *data_region = shared_region->data_region;
2545 *data_size = shared_region->data_size;
2546 *region_mappings = shared_region->region_mappings;
2547 *client_base = shared_region->client_base;
2548 *alt_base = shared_region->alternate_base;
2549 *alt_next = shared_region->alternate_next;
2550 *flags = shared_region->flags;
55e303ae
A
2551 *fs_base = shared_region->fs_base;
2552 *system = shared_region->system;
1c79356b
A
2553 *next = shared_region->next;
2554
2555 shared_region_mapping_unlock(shared_region);
2556}
2557
2558kern_return_t
2559shared_region_object_chain_attach(
2560 shared_region_mapping_t target_region,
2561 shared_region_mapping_t object_chain_region)
2562{
2563 shared_region_object_chain_t object_ele;
2564
2565 if(target_region->object_chain)
2566 return KERN_FAILURE;
2567 object_ele = (shared_region_object_chain_t)
2568 kalloc(sizeof (struct shared_region_object_chain));
2569 shared_region_mapping_lock(object_chain_region);
2570 target_region->object_chain = object_ele;
2571 object_ele->object_chain_region = object_chain_region;
2572 object_ele->next = object_chain_region->object_chain;
2573 object_ele->depth = object_chain_region->depth;
2574 object_chain_region->depth++;
2575 target_region->alternate_next = object_chain_region->alternate_next;
2576 shared_region_mapping_unlock(object_chain_region);
2577 return KERN_SUCCESS;
2578}
2579
2580kern_return_t
2581shared_region_mapping_create(
2582 ipc_port_t text_region,
2583 vm_size_t text_size,
2584 ipc_port_t data_region,
2585 vm_size_t data_size,
2586 vm_offset_t region_mappings,
2587 vm_offset_t client_base,
2588 shared_region_mapping_t *shared_region,
2589 vm_offset_t alt_base,
2590 vm_offset_t alt_next)
2591{
2592 *shared_region = (shared_region_mapping_t)
2593 kalloc(sizeof (struct shared_region_mapping));
2594 if(*shared_region == NULL)
2595 return KERN_FAILURE;
2596 shared_region_mapping_lock_init((*shared_region));
2597 (*shared_region)->text_region = text_region;
2598 (*shared_region)->text_size = text_size;
55e303ae
A
2599 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
2600 (*shared_region)->system = machine_slot[cpu_number()].cpu_type;
1c79356b
A
2601 (*shared_region)->data_region = data_region;
2602 (*shared_region)->data_size = data_size;
2603 (*shared_region)->region_mappings = region_mappings;
2604 (*shared_region)->client_base = client_base;
2605 (*shared_region)->ref_count = 1;
2606 (*shared_region)->next = NULL;
2607 (*shared_region)->object_chain = NULL;
2608 (*shared_region)->self = *shared_region;
2609 (*shared_region)->flags = 0;
2610 (*shared_region)->depth = 0;
55e303ae 2611 (*shared_region)->default_env_list = NULL;
1c79356b
A
2612 (*shared_region)->alternate_base = alt_base;
2613 (*shared_region)->alternate_next = alt_next;
2614 return KERN_SUCCESS;
2615}
2616
2617kern_return_t
2618shared_region_mapping_set_alt_next(
2619 shared_region_mapping_t shared_region,
2620 vm_offset_t alt_next)
2621{
2622 shared_region->alternate_next = alt_next;
2623 return KERN_SUCCESS;
2624}
2625
2626kern_return_t
2627shared_region_mapping_ref(
2628 shared_region_mapping_t shared_region)
2629{
2630 if(shared_region == NULL)
2631 return KERN_SUCCESS;
9bccf70c 2632 hw_atomic_add(&shared_region->ref_count, 1);
1c79356b
A
2633 return KERN_SUCCESS;
2634}
2635
55e303ae
A
2636__private_extern__ kern_return_t
2637shared_region_mapping_dealloc_lock(
2638 shared_region_mapping_t shared_region,
2639 int need_lock)
1c79356b
A
2640{
2641 struct shared_region_task_mappings sm_info;
9bccf70c 2642 shared_region_mapping_t next = NULL;
55e303ae 2643 int ref_count;
9bccf70c
A
2644
2645 while (shared_region) {
55e303ae
A
2646 if ((ref_count =
2647 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
9bccf70c
A
2648 shared_region_mapping_lock(shared_region);
2649
2650 sm_info.text_region = shared_region->text_region;
2651 sm_info.text_size = shared_region->text_size;
2652 sm_info.data_region = shared_region->data_region;
2653 sm_info.data_size = shared_region->data_size;
2654 sm_info.region_mappings = shared_region->region_mappings;
2655 sm_info.client_base = shared_region->client_base;
2656 sm_info.alternate_base = shared_region->alternate_base;
2657 sm_info.alternate_next = shared_region->alternate_next;
2658 sm_info.flags = shared_region->flags;
2659 sm_info.self = (vm_offset_t)shared_region;
2660
55e303ae
A
2661 if(shared_region->region_mappings) {
2662 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_lock);
2663 }
2664 if(((vm_named_entry_t)
2665 (shared_region->text_region->ip_kobject))
2666 ->backing.map->pmap) {
2667 pmap_remove(((vm_named_entry_t)
9bccf70c
A
2668 (shared_region->text_region->ip_kobject))
2669 ->backing.map->pmap,
2670 sm_info.client_base,
2671 sm_info.client_base + sm_info.text_size);
55e303ae 2672 }
9bccf70c 2673 ipc_port_release_send(shared_region->text_region);
55e303ae
A
2674 if(shared_region->data_region)
2675 ipc_port_release_send(shared_region->data_region);
9bccf70c
A
2676 if (shared_region->object_chain) {
2677 next = shared_region->object_chain->object_chain_region;
2678 kfree((vm_offset_t)shared_region->object_chain,
2679 sizeof (struct shared_region_object_chain));
2680 } else {
2681 next = NULL;
2682 }
2683 shared_region_mapping_unlock(shared_region);
2684 kfree((vm_offset_t)shared_region,
1c79356b 2685 sizeof (struct shared_region_mapping));
9bccf70c
A
2686 shared_region = next;
2687 } else {
55e303ae
A
2688 /* Stale indicates that a system region is no */
2689 /* longer in the default environment list. */
2690 if((ref_count == 1) &&
2691 (shared_region->flags & SHARED_REGION_SYSTEM)
2692 && (shared_region->flags & ~SHARED_REGION_STALE)) {
2693 remove_default_shared_region_lock(shared_region,need_lock);
2694 }
9bccf70c
A
2695 break;
2696 }
1c79356b 2697 }
1c79356b
A
2698 return KERN_SUCCESS;
2699}
2700
55e303ae
A
2701/*
2702 * Stub function; always indicates that the lock needs to be taken in the
2703 * call to lsf_remove_regions_mappings_lock().
2704 */
2705kern_return_t
2706shared_region_mapping_dealloc(
2707 shared_region_mapping_t shared_region)
2708{
2709 return shared_region_mapping_dealloc_lock(shared_region, 1);
2710}
2711
2712ppnum_t
1c79356b
A
2713vm_map_get_phys_page(
2714 vm_map_t map,
2715 vm_offset_t offset)
2716{
2717 vm_map_entry_t entry;
2718 int ops;
2719 int flags;
55e303ae 2720 ppnum_t phys_page = 0;
1c79356b
A
2721 vm_object_t object;
2722
2723 vm_map_lock(map);
2724 while (vm_map_lookup_entry(map, offset, &entry)) {
2725
2726 if (entry->object.vm_object == VM_OBJECT_NULL) {
2727 vm_map_unlock(map);
2728 return (vm_offset_t) 0;
2729 }
2730 if (entry->is_sub_map) {
2731 vm_map_t old_map;
2732 vm_map_lock(entry->object.sub_map);
2733 old_map = map;
2734 map = entry->object.sub_map;
2735 offset = entry->offset + (offset - entry->vme_start);
2736 vm_map_unlock(old_map);
2737 continue;
2738 }
9bccf70c
A
2739 if (entry->object.vm_object->phys_contiguous) {
2740 /* These are not standard pageable memory mappings */
2741 /* If they are not present in the object they will */
2742 /* have to be picked up from the pager through the */
2743 /* fault mechanism. */
2744 if(entry->object.vm_object->shadow_offset == 0) {
2745 /* need to call vm_fault */
2746 vm_map_unlock(map);
2747 vm_fault(map, offset, VM_PROT_NONE,
2748 FALSE, THREAD_UNINT, NULL, 0);
2749 vm_map_lock(map);
2750 continue;
2751 }
2752 offset = entry->offset + (offset - entry->vme_start);
55e303ae
A
2753 phys_page = (ppnum_t)
2754 ((entry->object.vm_object->shadow_offset
2755 + offset) >> 12);
9bccf70c
A
2756 break;
2757
2758 }
1c79356b
A
2759 offset = entry->offset + (offset - entry->vme_start);
2760 object = entry->object.vm_object;
2761 vm_object_lock(object);
2762 while (TRUE) {
2763 vm_page_t dst_page = vm_page_lookup(object,offset);
2764 if(dst_page == VM_PAGE_NULL) {
2765 if(object->shadow) {
2766 vm_object_t old_object;
2767 vm_object_lock(object->shadow);
2768 old_object = object;
2769 offset = offset + object->shadow_offset;
2770 object = object->shadow;
2771 vm_object_unlock(old_object);
2772 } else {
2773 vm_object_unlock(object);
2774 break;
2775 }
2776 } else {
55e303ae 2777 phys_page = (ppnum_t)(dst_page->phys_page);
1c79356b
A
2778 vm_object_unlock(object);
2779 break;
2780 }
2781 }
2782 break;
2783
2784 }
2785
2786 vm_map_unlock(map);
55e303ae
A
2787 return phys_page;
2788}
2789
2790
2791
2792kern_return_t
2793kernel_object_iopl_request(
2794 vm_named_entry_t named_entry,
2795 memory_object_offset_t offset,
2796 vm_size_t *upl_size,
2797 upl_t *upl_ptr,
2798 upl_page_info_array_t user_page_list,
2799 unsigned int *page_list_count,
2800 int *flags)
2801{
2802 vm_object_t object;
2803 kern_return_t ret;
2804
2805 int caller_flags;
2806
2807 caller_flags = *flags;
2808
2809 /* a few checks to make sure user is obeying rules */
2810 if(*upl_size == 0) {
2811 if(offset >= named_entry->size)
2812 return(KERN_INVALID_RIGHT);
2813 *upl_size = named_entry->size - offset;
2814 }
2815 if(caller_flags & UPL_COPYOUT_FROM) {
2816 if((named_entry->protection & VM_PROT_READ)
2817 != VM_PROT_READ) {
2818 return(KERN_INVALID_RIGHT);
2819 }
2820 } else {
2821 if((named_entry->protection &
2822 (VM_PROT_READ | VM_PROT_WRITE))
2823 != (VM_PROT_READ | VM_PROT_WRITE)) {
2824 return(KERN_INVALID_RIGHT);
2825 }
2826 }
2827 if(named_entry->size < (offset + *upl_size))
2828 return(KERN_INVALID_ARGUMENT);
2829
2830 /* the callers parameter offset is defined to be the */
2831 /* offset from beginning of named entry offset in object */
2832 offset = offset + named_entry->offset;
2833
2834 if(named_entry->is_sub_map)
2835 return (KERN_INVALID_ARGUMENT);
2836
2837 named_entry_lock(named_entry);
2838
2839 if(named_entry->object) {
2840 /* This is the case where we are going to map */
2841 /* an already mapped object. If the object is */
2842 /* not ready it is internal. An external */
2843 /* object cannot be mapped until it is ready */
2844 /* we can therefore avoid the ready check */
2845 /* in this case. */
2846 vm_object_reference(named_entry->object);
2847 object = named_entry->object;
2848 named_entry_unlock(named_entry);
2849 } else {
2850 object = vm_object_enter(named_entry->backing.pager,
2851 named_entry->offset + named_entry->size,
2852 named_entry->internal,
2853 FALSE,
2854 FALSE);
2855 if (object == VM_OBJECT_NULL) {
2856 named_entry_unlock(named_entry);
2857 return(KERN_INVALID_OBJECT);
2858 }
2859 vm_object_lock(object);
2860
2861 /* create an extra reference for the named entry */
2862 vm_object_reference_locked(object);
2863 named_entry->object = object;
2864 named_entry_unlock(named_entry);
2865
2866 /* wait for object (if any) to be ready */
2867 while (!object->pager_ready) {
2868 vm_object_wait(object,
2869 VM_OBJECT_EVENT_PAGER_READY,
2870 THREAD_UNINT);
2871 vm_object_lock(object);
2872 }
2873 vm_object_unlock(object);
2874 }
2875
2876 if (!object->private) {
2877 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2878 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2879 if (object->phys_contiguous) {
2880 *flags = UPL_PHYS_CONTIG;
2881 } else {
2882 *flags = 0;
2883 }
2884 } else {
2885 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2886 }
2887
2888 ret = vm_object_iopl_request(object,
2889 offset,
2890 *upl_size,
2891 upl_ptr,
2892 user_page_list,
2893 page_list_count,
2894 caller_flags);
2895 vm_object_deallocate(object);
2896 return ret;
1c79356b 2897}
55e303ae 2898
1c79356b 2899#endif /* VM_CPM */