]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
0b4e3aa0 2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_user.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * User-exported virtual memory functions.
57 */
1c79356b
A
58
59#include <vm_cpm.h>
60#include <mach/boolean.h>
61#include <mach/kern_return.h>
62#include <mach/mach_types.h> /* to get vm_address_t */
63#include <mach/memory_object.h>
64#include <mach/std_types.h> /* to get pointer_t */
65#include <mach/vm_attributes.h>
66#include <mach/vm_param.h>
67#include <mach/vm_statistics.h>
68#include <mach/vm_map_server.h>
69#include <mach/mach_syscalls.h>
9bccf70c 70
1c79356b 71#include <mach/shared_memory_server.h>
9bccf70c 72#include <vm/vm_shared_memory_server.h>
1c79356b
A
73
74#include <kern/host.h>
75#include <kern/task.h>
76#include <kern/misc_protos.h>
77#include <vm/vm_map.h>
78#include <vm/vm_object.h>
79#include <vm/vm_page.h>
80#include <vm/memory_object.h>
81#include <vm/vm_pageout.h>
82
83
84
85vm_size_t upl_offset_to_pagelist = 0;
86
87#if VM_CPM
88#include <vm/cpm.h>
89#endif /* VM_CPM */
90
91ipc_port_t dynamic_pager_control_port=NULL;
92
93/*
94 * vm_allocate allocates "zero fill" memory in the specfied
95 * map.
96 */
97kern_return_t
98vm_allocate(
99 register vm_map_t map,
100 register vm_offset_t *addr,
101 register vm_size_t size,
102 int flags)
103{
104 kern_return_t result;
105 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
106
107 if (map == VM_MAP_NULL)
108 return(KERN_INVALID_ARGUMENT);
109 if (size == 0) {
110 *addr = 0;
111 return(KERN_SUCCESS);
112 }
113
114 if (anywhere)
115 *addr = vm_map_min(map);
116 else
de355530
A
117 *addr = trunc_page(*addr);
118 size = round_page(size);
1c79356b
A
119 if (size == 0) {
120 return(KERN_INVALID_ARGUMENT);
121 }
122
123 result = vm_map_enter(
124 map,
125 addr,
126 size,
127 (vm_offset_t)0,
128 flags,
129 VM_OBJECT_NULL,
130 (vm_object_offset_t)0,
131 FALSE,
132 VM_PROT_DEFAULT,
133 VM_PROT_ALL,
134 VM_INHERIT_DEFAULT);
135
136 return(result);
137}
138
139/*
140 * vm_deallocate deallocates the specified range of addresses in the
141 * specified address map.
142 */
143kern_return_t
144vm_deallocate(
145 register vm_map_t map,
146 vm_offset_t start,
147 vm_size_t size)
148{
149 if (map == VM_MAP_NULL)
150 return(KERN_INVALID_ARGUMENT);
151
152 if (size == (vm_offset_t) 0)
153 return(KERN_SUCCESS);
154
de355530
A
155 return(vm_map_remove(map, trunc_page(start),
156 round_page(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
157}
158
159/*
160 * vm_inherit sets the inheritance of the specified range in the
161 * specified map.
162 */
163kern_return_t
164vm_inherit(
165 register vm_map_t map,
166 vm_offset_t start,
167 vm_size_t size,
168 vm_inherit_t new_inheritance)
169{
170 if (map == VM_MAP_NULL)
171 return(KERN_INVALID_ARGUMENT);
172
173 if (new_inheritance > VM_INHERIT_LAST_VALID)
174 return(KERN_INVALID_ARGUMENT);
175
176 return(vm_map_inherit(map,
de355530
A
177 trunc_page(start),
178 round_page(start+size),
1c79356b
A
179 new_inheritance));
180}
181
182/*
183 * vm_protect sets the protection of the specified range in the
184 * specified map.
185 */
186
187kern_return_t
188vm_protect(
189 register vm_map_t map,
190 vm_offset_t start,
191 vm_size_t size,
192 boolean_t set_maximum,
193 vm_prot_t new_protection)
194{
195 if ((map == VM_MAP_NULL) ||
196 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
197 return(KERN_INVALID_ARGUMENT);
198
199 return(vm_map_protect(map,
de355530
A
200 trunc_page(start),
201 round_page(start+size),
1c79356b
A
202 new_protection,
203 set_maximum));
204}
205
206/*
207 * Handle machine-specific attributes for a mapping, such
208 * as cachability, migrability, etc.
209 */
210kern_return_t
211vm_machine_attribute(
212 vm_map_t map,
213 vm_address_t address,
214 vm_size_t size,
215 vm_machine_attribute_t attribute,
216 vm_machine_attribute_val_t* value) /* IN/OUT */
217{
218 if (map == VM_MAP_NULL)
219 return(KERN_INVALID_ARGUMENT);
220
221 return vm_map_machine_attribute(map, address, size, attribute, value);
222}
223
224kern_return_t
225vm_read(
226 vm_map_t map,
227 vm_address_t address,
228 vm_size_t size,
229 pointer_t *data,
230 mach_msg_type_number_t *data_size)
231{
232 kern_return_t error;
233 vm_map_copy_t ipc_address;
234
235 if (map == VM_MAP_NULL)
236 return(KERN_INVALID_ARGUMENT);
237
238 if ((error = vm_map_copyin(map,
239 address,
240 size,
241 FALSE, /* src_destroy */
242 &ipc_address)) == KERN_SUCCESS) {
243 *data = (pointer_t) ipc_address;
244 *data_size = size;
245 }
246 return(error);
247}
248
249kern_return_t
250vm_read_list(
251 vm_map_t map,
252 vm_read_entry_t data_list,
253 mach_msg_type_number_t count)
254{
255 mach_msg_type_number_t i;
256 kern_return_t error;
257 vm_map_copy_t ipc_address;
258
259 if (map == VM_MAP_NULL)
260 return(KERN_INVALID_ARGUMENT);
261
262 for(i=0; i<count; i++) {
263 error = vm_map_copyin(map,
264 data_list[i].address,
265 data_list[i].size,
266 FALSE, /* src_destroy */
267 &ipc_address);
268 if(error != KERN_SUCCESS) {
269 data_list[i].address = (vm_address_t)0;
270 data_list[i].size = (vm_size_t)0;
271 break;
272 }
273 if(data_list[i].size != 0) {
274 error = vm_map_copyout(current_task()->map,
275 &(data_list[i].address),
276 (vm_map_copy_t) ipc_address);
277 if(error != KERN_SUCCESS) {
278 data_list[i].address = (vm_address_t)0;
279 data_list[i].size = (vm_size_t)0;
280 break;
281 }
282 }
283 }
284 return(error);
285}
286
287/*
288 * This routine reads from the specified map and overwrites part of the current
289 * activation's map. In making an assumption that the current thread is local,
290 * it is no longer cluster-safe without a fully supportive local proxy thread/
291 * task (but we don't support cluster's anymore so this is moot).
292 */
293
294#define VM_OVERWRITE_SMALL 512
295
296kern_return_t
297vm_read_overwrite(
298 vm_map_t map,
299 vm_address_t address,
300 vm_size_t size,
301 vm_address_t data,
302 vm_size_t *data_size)
303{
304 struct {
305 long align;
306 char buf[VM_OVERWRITE_SMALL];
307 } inbuf;
308 vm_map_t oldmap;
309 kern_return_t error = KERN_SUCCESS;
310 vm_map_copy_t copy;
311
312 if (map == VM_MAP_NULL)
313 return(KERN_INVALID_ARGUMENT);
314
315 if (size <= VM_OVERWRITE_SMALL) {
316 if(vm_map_read_user(map, (vm_offset_t)address,
317 (vm_offset_t)&inbuf, size)) {
318 error = KERN_INVALID_ADDRESS;
319 } else {
320 if(vm_map_write_user(current_map(),
321 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
322 error = KERN_INVALID_ADDRESS;
323 }
324 }
325 else {
326 if ((error = vm_map_copyin(map,
327 address,
328 size,
329 FALSE, /* src_destroy */
330 &copy)) == KERN_SUCCESS) {
331 if ((error = vm_map_copy_overwrite(
332 current_act()->map,
333 data,
334 copy,
335 FALSE)) == KERN_SUCCESS) {
336 }
337 else {
338 vm_map_copy_discard(copy);
339 }
340 }
341 }
342 *data_size = size;
343 return(error);
344}
345
346
347
348
349/*ARGSUSED*/
350kern_return_t
351vm_write(
352 vm_map_t map,
353 vm_address_t address,
354 vm_offset_t data,
355 mach_msg_type_number_t size)
356{
357 if (map == VM_MAP_NULL)
358 return KERN_INVALID_ARGUMENT;
359
360 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
361 FALSE /* interruptible XXX */);
362}
363
364kern_return_t
365vm_copy(
366 vm_map_t map,
367 vm_address_t source_address,
368 vm_size_t size,
369 vm_address_t dest_address)
370{
371 vm_map_copy_t copy;
372 kern_return_t kr;
373
374 if (map == VM_MAP_NULL)
375 return KERN_INVALID_ARGUMENT;
376
377 kr = vm_map_copyin(map, source_address, size,
378 FALSE, &copy);
379 if (kr != KERN_SUCCESS)
380 return kr;
381
382 kr = vm_map_copy_overwrite(map, dest_address, copy,
383 FALSE /* interruptible XXX */);
384 if (kr != KERN_SUCCESS) {
385 vm_map_copy_discard(copy);
386 return kr;
387 }
388
389 return KERN_SUCCESS;
390}
391
392/*
393 * Routine: vm_map
394 */
395kern_return_t
396vm_map_64(
397 vm_map_t target_map,
398 vm_offset_t *address,
399 vm_size_t initial_size,
400 vm_offset_t mask,
401 int flags,
402 ipc_port_t port,
403 vm_object_offset_t offset,
404 boolean_t copy,
405 vm_prot_t cur_protection,
406 vm_prot_t max_protection,
407 vm_inherit_t inheritance)
408{
409 register
410 vm_object_t object;
411 vm_prot_t prot;
412 vm_object_size_t size = (vm_object_size_t)initial_size;
413 kern_return_t result;
414
415 /*
416 * Check arguments for validity
417 */
418 if ((target_map == VM_MAP_NULL) ||
419 (cur_protection & ~VM_PROT_ALL) ||
420 (max_protection & ~VM_PROT_ALL) ||
421 (inheritance > VM_INHERIT_LAST_VALID) ||
422 size == 0)
423 return(KERN_INVALID_ARGUMENT);
424
425 /*
426 * Find the vm object (if any) corresponding to this port.
427 */
428 if (!IP_VALID(port)) {
429 object = VM_OBJECT_NULL;
430 offset = 0;
431 copy = FALSE;
432 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
433 vm_named_entry_t named_entry;
434
435 named_entry = (vm_named_entry_t)port->ip_kobject;
436 /* a few checks to make sure user is obeying rules */
437 if(size == 0) {
438 if(offset >= named_entry->size)
439 return(KERN_INVALID_RIGHT);
440 size = named_entry->size - offset;
441 }
442 if((named_entry->protection & max_protection) != max_protection)
443 return(KERN_INVALID_RIGHT);
444 if((named_entry->protection & cur_protection) != cur_protection)
445 return(KERN_INVALID_RIGHT);
446 if(named_entry->size < (offset + size))
447 return(KERN_INVALID_ARGUMENT);
448
449 /* the callers parameter offset is defined to be the */
450 /* offset from beginning of named entry offset in object */
451 offset = offset + named_entry->offset;
452
453 named_entry_lock(named_entry);
454 if(named_entry->is_sub_map) {
455 vm_map_entry_t map_entry;
456
457 named_entry_unlock(named_entry);
de355530
A
458 *address = trunc_page(*address);
459 size = round_page(size);
1c79356b
A
460 vm_object_reference(vm_submap_object);
461 if ((result = vm_map_enter(target_map,
462 address, size, mask, flags,
463 vm_submap_object, 0,
464 FALSE,
465 cur_protection, max_protection, inheritance
466 )) != KERN_SUCCESS) {
467 vm_object_deallocate(vm_submap_object);
468 } else {
469 char alias;
470
471 VM_GET_FLAGS_ALIAS(flags, alias);
472 if ((alias == VM_MEMORY_SHARED_PMAP) &&
473 !copy) {
474 vm_map_submap(target_map, *address,
475 (*address) + size,
476 named_entry->backing.map,
477 (vm_offset_t)offset, TRUE);
478 } else {
479 vm_map_submap(target_map, *address,
480 (*address) + size,
481 named_entry->backing.map,
482 (vm_offset_t)offset, FALSE);
483 }
484 if(copy) {
485 if(vm_map_lookup_entry(
486 target_map, *address, &map_entry)) {
487 map_entry->needs_copy = TRUE;
488 }
489 }
490 }
491 return(result);
492
493 } else if(named_entry->object) {
494 /* This is the case where we are going to map */
495 /* an already mapped object. If the object is */
496 /* not ready it is internal. An external */
497 /* object cannot be mapped until it is ready */
498 /* we can therefore avoid the ready check */
499 /* in this case. */
500 named_entry_unlock(named_entry);
501 vm_object_reference(named_entry->object);
502 object = named_entry->object;
503 } else {
de355530
A
504 object = vm_object_enter(named_entry->backing.pager,
505 named_entry->size,
506 named_entry->internal,
507 FALSE,
508 FALSE);
1c79356b
A
509 if (object == VM_OBJECT_NULL) {
510 named_entry_unlock(named_entry);
511 return(KERN_INVALID_OBJECT);
512 }
de355530 513 object->true_share = TRUE;
1c79356b
A
514 named_entry->object = object;
515 named_entry_unlock(named_entry);
de355530
A
516 /* create an extra reference for the named entry */
517 vm_object_reference(named_entry->object);
518 /* wait for object (if any) to be ready */
519 if (object != VM_OBJECT_NULL) {
520 vm_object_lock(object);
1c79356b
A
521 while (!object->pager_ready) {
522 vm_object_wait(object,
de355530
A
523 VM_OBJECT_EVENT_PAGER_READY,
524 THREAD_UNINT);
1c79356b
A
525 vm_object_lock(object);
526 }
de355530 527 vm_object_unlock(object);
1c79356b
A
528 }
529 }
0b4e3aa0
A
530 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
531 /*
532 * JMM - This is temporary until we unify named entries
533 * and raw memory objects.
534 *
535 * Detected fake ip_kotype for a memory object. In
536 * this case, the port isn't really a port at all, but
537 * instead is just a raw memory object.
538 */
539
540 if ((object = vm_object_enter((memory_object_t)port,
541 size, FALSE, FALSE, FALSE))
1c79356b
A
542 == VM_OBJECT_NULL)
543 return(KERN_INVALID_OBJECT);
544
545 /* wait for object (if any) to be ready */
546 if (object != VM_OBJECT_NULL) {
9bccf70c
A
547 if(object == kernel_object) {
548 printf("Warning: Attempt to map kernel object"
549 " by a non-private kernel entity\n");
550 return(KERN_INVALID_OBJECT);
551 }
1c79356b
A
552 vm_object_lock(object);
553 while (!object->pager_ready) {
554 vm_object_wait(object,
555 VM_OBJECT_EVENT_PAGER_READY,
556 THREAD_UNINT);
557 vm_object_lock(object);
558 }
559 vm_object_unlock(object);
560 }
0b4e3aa0
A
561 } else {
562 return (KERN_INVALID_OBJECT);
1c79356b
A
563 }
564
de355530
A
565 *address = trunc_page(*address);
566 size = round_page(size);
1c79356b
A
567
568 /*
569 * Perform the copy if requested
570 */
571
572 if (copy) {
573 vm_object_t new_object;
574 vm_object_offset_t new_offset;
575
576 result = vm_object_copy_strategically(object, offset, size,
577 &new_object, &new_offset,
578 &copy);
579
580
581 if (result == KERN_MEMORY_RESTART_COPY) {
582 boolean_t success;
583 boolean_t src_needs_copy;
584
585 /*
586 * XXX
587 * We currently ignore src_needs_copy.
588 * This really is the issue of how to make
589 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
590 * non-kernel users to use. Solution forthcoming.
591 * In the meantime, since we don't allow non-kernel
592 * memory managers to specify symmetric copy,
593 * we won't run into problems here.
594 */
595 new_object = object;
596 new_offset = offset;
597 success = vm_object_copy_quickly(&new_object,
598 new_offset, size,
599 &src_needs_copy,
600 &copy);
601 assert(success);
602 result = KERN_SUCCESS;
603 }
604 /*
605 * Throw away the reference to the
606 * original object, as it won't be mapped.
607 */
608
609 vm_object_deallocate(object);
610
611 if (result != KERN_SUCCESS)
612 return (result);
613
614 object = new_object;
615 offset = new_offset;
616 }
617
618 if ((result = vm_map_enter(target_map,
619 address, size, mask, flags,
620 object, offset,
621 copy,
622 cur_protection, max_protection, inheritance
623 )) != KERN_SUCCESS)
624 vm_object_deallocate(object);
625 return(result);
626}
627
628/* temporary, until world build */
629vm_map(
630 vm_map_t target_map,
631 vm_offset_t *address,
632 vm_size_t size,
633 vm_offset_t mask,
634 int flags,
635 ipc_port_t port,
636 vm_offset_t offset,
637 boolean_t copy,
638 vm_prot_t cur_protection,
639 vm_prot_t max_protection,
640 vm_inherit_t inheritance)
641{
de355530 642 vm_map_64(target_map, address, size, mask, flags,
1c79356b
A
643 port, (vm_object_offset_t)offset, copy,
644 cur_protection, max_protection, inheritance);
645}
646
647
648/*
649 * NOTE: this routine (and this file) will no longer require mach_host_server.h
650 * when vm_wire is changed to use ledgers.
651 */
652#include <mach/mach_host_server.h>
653/*
654 * Specify that the range of the virtual address space
655 * of the target task must not cause page faults for
656 * the indicated accesses.
657 *
658 * [ To unwire the pages, specify VM_PROT_NONE. ]
659 */
660kern_return_t
661vm_wire(
662 host_priv_t host_priv,
663 register vm_map_t map,
664 vm_offset_t start,
665 vm_size_t size,
666 vm_prot_t access)
667{
668 kern_return_t rc;
669
670 if (host_priv == HOST_PRIV_NULL)
671 return KERN_INVALID_HOST;
672
673 assert(host_priv == &realhost);
674
675 if (map == VM_MAP_NULL)
676 return KERN_INVALID_TASK;
677
678 if (access & ~VM_PROT_ALL)
679 return KERN_INVALID_ARGUMENT;
680
681 if (access != VM_PROT_NONE) {
de355530
A
682 rc = vm_map_wire(map, trunc_page(start),
683 round_page(start+size), access, TRUE);
1c79356b 684 } else {
de355530
A
685 rc = vm_map_unwire(map, trunc_page(start),
686 round_page(start+size), TRUE);
1c79356b
A
687 }
688 return rc;
689}
690
691/*
692 * vm_msync
693 *
694 * Synchronises the memory range specified with its backing store
695 * image by either flushing or cleaning the contents to the appropriate
696 * memory manager engaging in a memory object synchronize dialog with
697 * the manager. The client doesn't return until the manager issues
698 * m_o_s_completed message. MIG Magically converts user task parameter
699 * to the task's address map.
700 *
701 * interpretation of sync_flags
702 * VM_SYNC_INVALIDATE - discard pages, only return precious
703 * pages to manager.
704 *
705 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
706 * - discard pages, write dirty or precious
707 * pages back to memory manager.
708 *
709 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
710 * - write dirty or precious pages back to
711 * the memory manager.
712 *
713 * NOTE
714 * The memory object attributes have not yet been implemented, this
715 * function will have to deal with the invalidate attribute
716 *
717 * RETURNS
718 * KERN_INVALID_TASK Bad task parameter
719 * KERN_INVALID_ARGUMENT both sync and async were specified.
720 * KERN_SUCCESS The usual.
721 */
722
723kern_return_t
724vm_msync(
725 vm_map_t map,
726 vm_address_t address,
727 vm_size_t size,
728 vm_sync_t sync_flags)
729{
730 msync_req_t msr;
731 msync_req_t new_msr;
732 queue_chain_t req_q; /* queue of requests for this msync */
733 vm_map_entry_t entry;
734 vm_size_t amount_left;
735 vm_object_offset_t offset;
736 boolean_t do_sync_req;
737 boolean_t modifiable;
738
739
740 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
741 (sync_flags & VM_SYNC_SYNCHRONOUS))
742 return(KERN_INVALID_ARGUMENT);
743
744 /*
745 * align address and size on page boundaries
746 */
de355530
A
747 size = round_page(address + size) - trunc_page(address);
748 address = trunc_page(address);
1c79356b
A
749
750 if (map == VM_MAP_NULL)
751 return(KERN_INVALID_TASK);
752
753 if (size == 0)
754 return(KERN_SUCCESS);
755
756 queue_init(&req_q);
757 amount_left = size;
758
759 while (amount_left > 0) {
760 vm_size_t flush_size;
761 vm_object_t object;
762
763 vm_map_lock(map);
764 if (!vm_map_lookup_entry(map, address, &entry)) {
765 vm_size_t skip;
766
767 /*
768 * hole in the address map.
769 */
770
771 /*
772 * Check for empty map.
773 */
774 if (entry == vm_map_to_entry(map) &&
775 entry->vme_next == entry) {
776 vm_map_unlock(map);
777 break;
778 }
779 /*
780 * Check that we don't wrap and that
781 * we have at least one real map entry.
782 */
783 if ((map->hdr.nentries == 0) ||
784 (entry->vme_next->vme_start < address)) {
785 vm_map_unlock(map);
786 break;
787 }
788 /*
789 * Move up to the next entry if needed
790 */
791 skip = (entry->vme_next->vme_start - address);
792 if (skip >= amount_left)
793 amount_left = 0;
794 else
795 amount_left -= skip;
796 address = entry->vme_next->vme_start;
797 vm_map_unlock(map);
798 continue;
799 }
800
801 offset = address - entry->vme_start;
802
803 /*
804 * do we have more to flush than is contained in this
805 * entry ?
806 */
807 if (amount_left + entry->vme_start + offset > entry->vme_end) {
808 flush_size = entry->vme_end -
809 (entry->vme_start + offset);
810 } else {
811 flush_size = amount_left;
812 }
813 amount_left -= flush_size;
814 address += flush_size;
815
816 if (entry->is_sub_map == TRUE) {
817 vm_map_t local_map;
818 vm_offset_t local_offset;
819
820 local_map = entry->object.sub_map;
821 local_offset = entry->offset;
822 vm_map_unlock(map);
823 vm_msync(
824 local_map,
825 local_offset,
826 flush_size,
827 sync_flags);
828 continue;
829 }
830 object = entry->object.vm_object;
831
832 /*
833 * We can't sync this object if the object has not been
834 * created yet
835 */
836 if (object == VM_OBJECT_NULL) {
837 vm_map_unlock(map);
838 continue;
839 }
840 offset += entry->offset;
841 modifiable = (entry->protection & VM_PROT_WRITE)
842 != VM_PROT_NONE;
843
844 vm_object_lock(object);
845
846 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
847 boolean_t kill_pages = 0;
848
849 if (sync_flags & VM_SYNC_KILLPAGES) {
850 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
851 kill_pages = 1;
852 else
853 kill_pages = -1;
854 }
855 if (kill_pages != -1)
0b4e3aa0 856 vm_object_deactivate_pages(object, offset,
1c79356b
A
857 (vm_object_size_t)flush_size, kill_pages);
858 vm_object_unlock(object);
859 vm_map_unlock(map);
860 continue;
861 }
862 /*
863 * We can't sync this object if there isn't a pager.
864 * Don't bother to sync internal objects, since there can't
865 * be any "permanent" storage for these objects anyway.
866 */
0b4e3aa0
A
867 if ((object->pager == MEMORY_OBJECT_NULL) ||
868 (object->internal) || (object->private)) {
1c79356b
A
869 vm_object_unlock(object);
870 vm_map_unlock(map);
871 continue;
872 }
873 /*
874 * keep reference on the object until syncing is done
875 */
876 assert(object->ref_count > 0);
877 object->ref_count++;
878 vm_object_res_reference(object);
879 vm_object_unlock(object);
880
881 vm_map_unlock(map);
882
0b4e3aa0 883 do_sync_req = vm_object_sync(object,
1c79356b
A
884 offset,
885 flush_size,
886 sync_flags & VM_SYNC_INVALIDATE,
887 (modifiable &&
888 (sync_flags & VM_SYNC_SYNCHRONOUS ||
889 sync_flags & VM_SYNC_ASYNCHRONOUS)));
890
891 /*
892 * only send a m_o_s if we returned pages or if the entry
893 * is writable (ie dirty pages may have already been sent back)
894 */
895 if (!do_sync_req && !modifiable) {
896 vm_object_deallocate(object);
897 continue;
898 }
899 msync_req_alloc(new_msr);
900
901 vm_object_lock(object);
902 offset += object->paging_offset;
903
904 new_msr->offset = offset;
905 new_msr->length = flush_size;
906 new_msr->object = object;
907 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
908re_iterate:
909 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
910 /*
911 * need to check for overlapping entry, if found, wait
912 * on overlapping msr to be done, then reiterate
913 */
914 msr_lock(msr);
915 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
916 ((offset >= msr->offset &&
917 offset < (msr->offset + msr->length)) ||
918 (msr->offset >= offset &&
919 msr->offset < (offset + flush_size))))
920 {
921 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
922 msr_unlock(msr);
923 vm_object_unlock(object);
924 thread_block((void (*)(void))0);
925 vm_object_lock(object);
926 goto re_iterate;
927 }
928 msr_unlock(msr);
929 }/* queue_iterate */
930
931 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
932 vm_object_unlock(object);
933
934 queue_enter(&req_q, new_msr, msync_req_t, req_q);
935
1c79356b
A
936 (void) memory_object_synchronize(
937 object->pager,
1c79356b
A
938 offset,
939 flush_size,
940 sync_flags);
1c79356b
A
941 }/* while */
942
943 /*
944 * wait for memory_object_sychronize_completed messages from pager(s)
945 */
946
947 while (!queue_empty(&req_q)) {
948 msr = (msync_req_t)queue_first(&req_q);
949 msr_lock(msr);
950 while(msr->flag != VM_MSYNC_DONE) {
951 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
952 msr_unlock(msr);
953 thread_block((void (*)(void))0);
954 msr_lock(msr);
955 }/* while */
956 queue_remove(&req_q, msr, msync_req_t, req_q);
957 msr_unlock(msr);
958 vm_object_deallocate(msr->object);
959 msync_req_free(msr);
960 }/* queue_iterate */
961
962 return(KERN_SUCCESS);
963}/* vm_msync */
964
965
966/*
967 * task_wire
968 *
969 * Set or clear the map's wiring_required flag. This flag, if set,
970 * will cause all future virtual memory allocation to allocate
971 * user wired memory. Unwiring pages wired down as a result of
972 * this routine is done with the vm_wire interface.
973 */
974kern_return_t
975task_wire(
976 vm_map_t map,
977 boolean_t must_wire)
978{
979 if (map == VM_MAP_NULL)
980 return(KERN_INVALID_ARGUMENT);
981
982 if (must_wire)
983 map->wiring_required = TRUE;
984 else
985 map->wiring_required = FALSE;
986
987 return(KERN_SUCCESS);
988}
989
990/*
991 * vm_behavior_set sets the paging behavior attribute for the
992 * specified range in the specified map. This routine will fail
993 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
994 * is not a valid allocated or reserved memory region.
995 */
996kern_return_t
997vm_behavior_set(
998 vm_map_t map,
999 vm_offset_t start,
1000 vm_size_t size,
1001 vm_behavior_t new_behavior)
1002{
1003 if (map == VM_MAP_NULL)
1004 return(KERN_INVALID_ARGUMENT);
1005
de355530
A
1006 return(vm_map_behavior_set(map, trunc_page(start),
1007 round_page(start+size), new_behavior));
1c79356b
A
1008}
1009
1010#if VM_CPM
1011/*
1012 * Control whether the kernel will permit use of
1013 * vm_allocate_cpm at all.
1014 */
1015unsigned int vm_allocate_cpm_enabled = 1;
1016
1017/*
1018 * Ordinarily, the right to allocate CPM is restricted
1019 * to privileged applications (those that can gain access
1020 * to the host port). Set this variable to zero if you
1021 * want to let any application allocate CPM.
1022 */
1023unsigned int vm_allocate_cpm_privileged = 0;
1024
1025/*
1026 * Allocate memory in the specified map, with the caveat that
1027 * the memory is physically contiguous. This call may fail
1028 * if the system can't find sufficient contiguous memory.
1029 * This call may cause or lead to heart-stopping amounts of
1030 * paging activity.
1031 *
1032 * Memory obtained from this call should be freed in the
1033 * normal way, viz., via vm_deallocate.
1034 */
1035kern_return_t
1036vm_allocate_cpm(
1037 host_priv_t host_priv,
1038 register vm_map_t map,
1039 register vm_offset_t *addr,
1040 register vm_size_t size,
1041 int flags)
1042{
1043 vm_object_t cpm_obj;
1044 pmap_t pmap;
1045 vm_page_t m, pages;
1046 kern_return_t kr;
1047 vm_offset_t va, start, end, offset;
1048#if MACH_ASSERT
1049 extern vm_offset_t avail_start, avail_end;
1050 vm_offset_t prev_addr;
1051#endif /* MACH_ASSERT */
1052
1053 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1054
1055 if (!vm_allocate_cpm_enabled)
1056 return KERN_FAILURE;
1057
1058 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1059 return KERN_INVALID_HOST;
1060
1061 if (map == VM_MAP_NULL)
1062 return KERN_INVALID_ARGUMENT;
1063
1064 assert(host_priv == &realhost);
1065
1066 if (size == 0) {
1067 *addr = 0;
1068 return KERN_SUCCESS;
1069 }
1070
1071 if (anywhere)
1072 *addr = vm_map_min(map);
1073 else
de355530
A
1074 *addr = trunc_page(*addr);
1075 size = round_page(size);
1c79356b
A
1076
1077 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1078 return kr;
1079
1080 cpm_obj = vm_object_allocate(size);
1081 assert(cpm_obj != VM_OBJECT_NULL);
1082 assert(cpm_obj->internal);
1083 assert(cpm_obj->size == size);
1084 assert(cpm_obj->can_persist == FALSE);
1085 assert(cpm_obj->pager_created == FALSE);
1086 assert(cpm_obj->pageout == FALSE);
1087 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1088
1089 /*
1090 * Insert pages into object.
1091 */
1092
1093 vm_object_lock(cpm_obj);
1094 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1095 m = pages;
1096 pages = NEXT_PAGE(m);
1097
1098 assert(!m->gobbled);
1099 assert(!m->wanted);
1100 assert(!m->pageout);
1101 assert(!m->tabled);
1102 assert(m->busy);
de355530 1103 assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end);
1c79356b
A
1104
1105 m->busy = FALSE;
1106 vm_page_insert(m, cpm_obj, offset);
1107 }
1108 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1109 vm_object_unlock(cpm_obj);
1110
1111 /*
1112 * Hang onto a reference on the object in case a
1113 * multi-threaded application for some reason decides
1114 * to deallocate the portion of the address space into
1115 * which we will insert this object.
1116 *
1117 * Unfortunately, we must insert the object now before
1118 * we can talk to the pmap module about which addresses
1119 * must be wired down. Hence, the race with a multi-
1120 * threaded app.
1121 */
1122 vm_object_reference(cpm_obj);
1123
1124 /*
1125 * Insert object into map.
1126 */
1127
1128 kr = vm_map_enter(
1129 map,
1130 addr,
1131 size,
1132 (vm_offset_t)0,
1133 flags,
1134 cpm_obj,
1135 (vm_object_offset_t)0,
1136 FALSE,
1137 VM_PROT_ALL,
1138 VM_PROT_ALL,
1139 VM_INHERIT_DEFAULT);
1140
1141 if (kr != KERN_SUCCESS) {
1142 /*
1143 * A CPM object doesn't have can_persist set,
1144 * so all we have to do is deallocate it to
1145 * free up these pages.
1146 */
1147 assert(cpm_obj->pager_created == FALSE);
1148 assert(cpm_obj->can_persist == FALSE);
1149 assert(cpm_obj->pageout == FALSE);
1150 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1151 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1152 vm_object_deallocate(cpm_obj); /* kill creation ref */
1153 }
1154
1155 /*
1156 * Inform the physical mapping system that the
1157 * range of addresses may not fault, so that
1158 * page tables and such can be locked down as well.
1159 */
1160 start = *addr;
1161 end = start + size;
1162 pmap = vm_map_pmap(map);
1163 pmap_pageable(pmap, start, end, FALSE);
1164
1165 /*
1166 * Enter each page into the pmap, to avoid faults.
1167 * Note that this loop could be coded more efficiently,
1168 * if the need arose, rather than looking up each page
1169 * again.
1170 */
1171 for (offset = 0, va = start; offset < size;
1172 va += PAGE_SIZE, offset += PAGE_SIZE) {
1173 vm_object_lock(cpm_obj);
1174 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1175 vm_object_unlock(cpm_obj);
1176 assert(m != VM_PAGE_NULL);
9bccf70c 1177 PMAP_ENTER(pmap, va, m, VM_PROT_ALL,
de355530 1178 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
1179 }
1180
1181#if MACH_ASSERT
1182 /*
1183 * Verify ordering in address space.
1184 */
1185 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1186 vm_object_lock(cpm_obj);
1187 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1188 vm_object_unlock(cpm_obj);
1189 if (m == VM_PAGE_NULL)
1190 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1191 cpm_obj, offset);
1192 assert(m->tabled);
1193 assert(!m->busy);
1194 assert(!m->wanted);
1195 assert(!m->fictitious);
1196 assert(!m->private);
1197 assert(!m->absent);
1198 assert(!m->error);
1199 assert(!m->cleaning);
1200 assert(!m->precious);
1201 assert(!m->clustered);
1202 if (offset != 0) {
de355530 1203 if (m->phys_addr != prev_addr + PAGE_SIZE) {
1c79356b
A
1204 printf("start 0x%x end 0x%x va 0x%x\n",
1205 start, end, va);
1206 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1207 printf("m 0x%x prev_address 0x%x\n", m,
1208 prev_addr);
1209 panic("vm_allocate_cpm: pages not contig!");
1210 }
1211 }
de355530 1212 prev_addr = m->phys_addr;
1c79356b
A
1213 }
1214#endif /* MACH_ASSERT */
1215
1216 vm_object_deallocate(cpm_obj); /* kill extra ref */
1217
1218 return kr;
1219}
1220
1221
1222#else /* VM_CPM */
1223
1224/*
1225 * Interface is defined in all cases, but unless the kernel
1226 * is built explicitly for this option, the interface does
1227 * nothing.
1228 */
1229
1230kern_return_t
1231vm_allocate_cpm(
1232 host_priv_t host_priv,
1233 register vm_map_t map,
1234 register vm_offset_t *addr,
1235 register vm_size_t size,
1236 int flags)
1237{
1238 return KERN_FAILURE;
1239}
1240
1241/*
1242 */
1243kern_return_t
1244mach_memory_object_memory_entry_64(
1245 host_t host,
1246 boolean_t internal,
1247 vm_object_offset_t size,
1248 vm_prot_t permission,
0b4e3aa0 1249 memory_object_t pager,
1c79356b
A
1250 ipc_port_t *entry_handle)
1251{
1252 vm_named_entry_t user_object;
1253 ipc_port_t user_handle;
1254 ipc_port_t previous;
1255 kern_return_t kr;
1256
1257 if (host == HOST_NULL)
1258 return(KERN_INVALID_HOST);
1259
1260 user_object = (vm_named_entry_t)
1261 kalloc(sizeof (struct vm_named_entry));
1262 if(user_object == NULL)
1263 return KERN_FAILURE;
1264 named_entry_lock_init(user_object);
1265 user_handle = ipc_port_alloc_kernel();
1266 ip_lock(user_handle);
1267
1268 /* make a sonce right */
1269 user_handle->ip_sorights++;
1270 ip_reference(user_handle);
1271
1272 user_handle->ip_destination = IP_NULL;
1273 user_handle->ip_receiver_name = MACH_PORT_NULL;
1274 user_handle->ip_receiver = ipc_space_kernel;
1275
1276 /* make a send right */
1277 user_handle->ip_mscount++;
1278 user_handle->ip_srights++;
1279 ip_reference(user_handle);
1280
1281 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1282 /* nsrequest unlocks user_handle */
1283
1284 user_object->object = NULL;
1285 user_object->size = size;
1286 user_object->offset = 0;
1287 user_object->backing.pager = pager;
de355530 1288 user_object->protection = permission;
1c79356b
A
1289 user_object->internal = internal;
1290 user_object->is_sub_map = FALSE;
1291 user_object->ref_count = 1;
1292
1293 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1294 IKOT_NAMED_ENTRY);
1295 *entry_handle = user_handle;
1296 return KERN_SUCCESS;
1297}
1298
1299kern_return_t
1300mach_memory_object_memory_entry(
1301 host_t host,
1302 boolean_t internal,
1303 vm_size_t size,
1304 vm_prot_t permission,
0b4e3aa0 1305 memory_object_t pager,
1c79356b
A
1306 ipc_port_t *entry_handle)
1307{
1308 return mach_memory_object_memory_entry_64( host, internal,
1309 (vm_object_offset_t)size, permission, pager, entry_handle);
1310}
1311
1312
1313
1314/*
1315 */
1316
1317kern_return_t
1318mach_make_memory_entry_64(
1319 vm_map_t target_map,
1320 vm_object_size_t *size,
1321 vm_object_offset_t offset,
1322 vm_prot_t permission,
1323 ipc_port_t *object_handle,
1324 ipc_port_t parent_entry)
1325{
1326 vm_map_version_t version;
1327 vm_named_entry_t user_object;
1328 ipc_port_t user_handle;
1329 ipc_port_t previous;
1330 kern_return_t kr;
1331 vm_map_t pmap_map;
1332
1333 /* needed for call to vm_map_lookup_locked */
9bccf70c 1334 boolean_t wired;
1c79356b 1335 vm_object_offset_t obj_off;
9bccf70c 1336 vm_prot_t prot;
1c79356b
A
1337 vm_object_offset_t lo_offset, hi_offset;
1338 vm_behavior_t behavior;
9bccf70c
A
1339 vm_object_t object;
1340 vm_object_t shadow_object;
1c79356b
A
1341
1342 /* needed for direct map entry manipulation */
1343 vm_map_entry_t map_entry;
9bccf70c
A
1344 vm_map_entry_t next_entry;
1345 vm_map_t local_map;
1346 vm_map_t original_map = target_map;
1347 vm_offset_t local_offset;
1c79356b 1348 vm_object_size_t mappable_size;
9bccf70c
A
1349 vm_object_size_t total_size;
1350
1c79356b 1351
9bccf70c
A
1352 offset = trunc_page_64(offset);
1353 *size = round_page_64(*size);
de355530 1354
1c79356b
A
1355 user_object = (vm_named_entry_t)
1356 kalloc(sizeof (struct vm_named_entry));
1357 if(user_object == NULL)
1358 return KERN_FAILURE;
1359 named_entry_lock_init(user_object);
1360 user_handle = ipc_port_alloc_kernel();
1361 ip_lock(user_handle);
1362
1363 /* make a sonce right */
1364 user_handle->ip_sorights++;
1365 ip_reference(user_handle);
1366
1367 user_handle->ip_destination = IP_NULL;
1368 user_handle->ip_receiver_name = MACH_PORT_NULL;
1369 user_handle->ip_receiver = ipc_space_kernel;
1370
1371 /* make a send right */
1372 user_handle->ip_mscount++;
1373 user_handle->ip_srights++;
1374 ip_reference(user_handle);
1375
1376 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1377 /* nsrequest unlocks user_handle */
1378
1379 user_object->backing.pager = NULL;
1380 user_object->ref_count = 1;
1381
1382 if(parent_entry == NULL) {
1383 /* Create a named object based on address range within the task map */
1384 /* Go find the object at given address */
1385
de355530 1386 permission &= VM_PROT_ALL;
1c79356b
A
1387 vm_map_lock_read(target_map);
1388
1389 /* get the object associated with the target address */
1390 /* note we check the permission of the range against */
1391 /* that requested by the caller */
1392
1393 kr = vm_map_lookup_locked(&target_map, offset,
de355530 1394 permission, &version,
1c79356b
A
1395 &object, &obj_off, &prot, &wired, &behavior,
1396 &lo_offset, &hi_offset, &pmap_map);
1397 if (kr != KERN_SUCCESS) {
1398 vm_map_unlock_read(target_map);
1399 goto make_mem_done;
1400 }
de355530 1401 if (((prot & permission) != permission)
9bccf70c 1402 || (object == kernel_object)) {
1c79356b
A
1403 kr = KERN_INVALID_RIGHT;
1404 vm_object_unlock(object);
1405 vm_map_unlock_read(target_map);
1406 if(pmap_map != target_map)
1407 vm_map_unlock_read(pmap_map);
9bccf70c
A
1408 if(object == kernel_object) {
1409 printf("Warning: Attempt to create a named"
1410 " entry from the kernel_object\n");
1411 }
1c79356b
A
1412 goto make_mem_done;
1413 }
1414
1415 /* We have an object, now check to see if this object */
1416 /* is suitable. If not, create a shadow and share that */
1417
1c79356b 1418redo_lookup:
9bccf70c
A
1419 local_map = original_map;
1420 local_offset = offset;
1421 if(target_map != local_map) {
1422 vm_map_unlock_read(target_map);
1423 if(pmap_map != target_map)
1424 vm_map_unlock_read(pmap_map);
1425 vm_map_lock_read(local_map);
1426 target_map = local_map;
1427 pmap_map = local_map;
1428 }
1c79356b 1429 while(TRUE) {
9bccf70c
A
1430 if(!vm_map_lookup_entry(local_map,
1431 local_offset, &map_entry)) {
1c79356b
A
1432 kr = KERN_INVALID_ARGUMENT;
1433 vm_object_unlock(object);
1434 vm_map_unlock_read(target_map);
1435 if(pmap_map != target_map)
1436 vm_map_unlock_read(pmap_map);
1437 goto make_mem_done;
1438 }
1439 if(!(map_entry->is_sub_map)) {
1440 if(map_entry->object.vm_object != object) {
1441 kr = KERN_INVALID_ARGUMENT;
1442 vm_object_unlock(object);
1443 vm_map_unlock_read(target_map);
1444 if(pmap_map != target_map)
1445 vm_map_unlock_read(pmap_map);
1446 goto make_mem_done;
1447 }
9bccf70c
A
1448 if(map_entry->wired_count) {
1449 object->true_share = TRUE;
1450 }
1c79356b
A
1451 break;
1452 } else {
9bccf70c
A
1453 vm_map_t tmap;
1454 tmap = local_map;
1c79356b 1455 local_map = map_entry->object.sub_map;
9bccf70c 1456
1c79356b 1457 vm_map_lock_read(local_map);
9bccf70c 1458 vm_map_unlock_read(tmap);
1c79356b 1459 target_map = local_map;
9bccf70c
A
1460 pmap_map = local_map;
1461 local_offset = local_offset - map_entry->vme_start;
1462 local_offset += map_entry->offset;
1c79356b
A
1463 }
1464 }
de355530 1465 if(((map_entry->max_protection) & permission) != permission) {
1c79356b
A
1466 kr = KERN_INVALID_RIGHT;
1467 vm_object_unlock(object);
1468 vm_map_unlock_read(target_map);
1469 if(pmap_map != target_map)
1470 vm_map_unlock_read(pmap_map);
1471 goto make_mem_done;
1472 }
9bccf70c
A
1473
1474 mappable_size = hi_offset - obj_off;
1475 total_size = map_entry->vme_end - map_entry->vme_start;
1476 if(*size > mappable_size) {
1477 /* try to extend mappable size if the entries */
1478 /* following are from the same object and are */
1479 /* compatible */
1480 next_entry = map_entry->vme_next;
1481 /* lets see if the next map entry is still */
1482 /* pointing at this object and is contiguous */
1483 while(*size > mappable_size) {
1484 if((next_entry->object.vm_object == object) &&
1485 (next_entry->vme_start ==
1486 next_entry->vme_prev->vme_end) &&
1487 (next_entry->offset ==
1488 next_entry->vme_prev->offset +
1489 (next_entry->vme_prev->vme_end -
1490 next_entry->vme_prev->vme_start))) {
1491 if(((next_entry->max_protection)
de355530 1492 & permission) != permission) {
9bccf70c
A
1493 break;
1494 }
1495 mappable_size += next_entry->vme_end
1496 - next_entry->vme_start;
1497 total_size += next_entry->vme_end
1498 - next_entry->vme_start;
1499 next_entry = next_entry->vme_next;
1500 } else {
1501 break;
1502 }
1503
1504 }
1505 }
1506
1c79356b
A
1507 if(object->internal) {
1508 /* vm_map_lookup_locked will create a shadow if */
1509 /* needs_copy is set but does not check for the */
1510 /* other two conditions shown. It is important to */
1511 /* set up an object which will not be pulled from */
1512 /* under us. */
1513
0b4e3aa0 1514 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
1515 (object->size > total_size))
1516 && !object->true_share) {
1c79356b
A
1517 if (vm_map_lock_read_to_write(target_map)) {
1518 vm_map_lock_read(target_map);
1519 goto redo_lookup;
1520 }
1521
de355530 1522
1c79356b 1523 /* create a shadow object */
9bccf70c
A
1524 vm_object_shadow(&map_entry->object.vm_object,
1525 &map_entry->offset, total_size);
1526 shadow_object = map_entry->object.vm_object;
1527 vm_object_unlock(object);
1528 vm_object_pmap_protect(
1529 object, map_entry->offset,
1530 total_size,
1531 ((map_entry->is_shared
1532 || target_map->mapped)
1533 ? PMAP_NULL :
1534 target_map->pmap),
1535 map_entry->vme_start,
1536 map_entry->protection & ~VM_PROT_WRITE);
1537 total_size -= (map_entry->vme_end
1538 - map_entry->vme_start);
1539 next_entry = map_entry->vme_next;
1540 map_entry->needs_copy = FALSE;
1541 while (total_size) {
1542 if(next_entry->object.vm_object == object) {
1543 next_entry->object.vm_object
1544 = shadow_object;
1545 next_entry->offset
1546 = next_entry->vme_prev->offset +
1547 (next_entry->vme_prev->vme_end
1548 - next_entry->vme_prev->vme_start);
1549 next_entry->needs_copy = FALSE;
1550 } else {
1551 panic("mach_make_memory_entry_64:"
1552 " map entries out of sync\n");
1553 }
1554 total_size -=
1555 next_entry->vme_end
1556 - next_entry->vme_start;
1557 next_entry = next_entry->vme_next;
1558 }
1559
1560 object = shadow_object;
1561 vm_object_lock(object);
1562 obj_off = (local_offset - map_entry->vme_start)
1563 + map_entry->offset;
1564 vm_map_lock_write_to_read(target_map);
1c79356b 1565
1c79356b
A
1566
1567 }
1568 }
1569
1570 /* note: in the future we can (if necessary) allow for */
1571 /* memory object lists, this will better support */
1572 /* fragmentation, but is it necessary? The user should */
1573 /* be encouraged to create address space oriented */
1574 /* shared objects from CLEAN memory regions which have */
1575 /* a known and defined history. i.e. no inheritence */
1576 /* share, make this call before making the region the */
1577 /* target of ipc's, etc. The code above, protecting */
1578 /* against delayed copy, etc. is mostly defensive. */
1579
1c79356b 1580
d7e50217 1581
de355530 1582 object->true_share = TRUE;
1c79356b
A
1583 user_object->object = object;
1584 user_object->internal = object->internal;
1585 user_object->is_sub_map = FALSE;
1586 user_object->offset = obj_off;
1587 user_object->protection = permission;
1588
1589 /* the size of mapped entry that overlaps with our region */
1590 /* which is targeted for share. */
1591 /* (entry_end - entry_start) - */
1592 /* offset of our beg addr within entry */
1593 /* it corresponds to this: */
1594
1c79356b
A
1595 if(*size > mappable_size)
1596 *size = mappable_size;
1597
1598 user_object->size = *size;
1599
1600 /* user_object pager and internal fields are not used */
1601 /* when the object field is filled in. */
1602
de355530
A
1603 object->ref_count++; /* we now point to this object, hold on */
1604 vm_object_res_reference(object);
1c79356b
A
1605 vm_object_unlock(object);
1606 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1607 IKOT_NAMED_ENTRY);
1c79356b 1608 *object_handle = user_handle;
de355530
A
1609 vm_map_unlock_read(target_map);
1610 if(pmap_map != target_map)
1611 vm_map_unlock_read(pmap_map);
1c79356b
A
1612 return KERN_SUCCESS;
1613 } else {
1614
1615 vm_named_entry_t parent_object;
1616
1617 /* The new object will be base on an existing named object */
1618 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1619 kr = KERN_INVALID_ARGUMENT;
1620 goto make_mem_done;
1621 }
1622 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
de355530
A
1623 if(permission & parent_object->protection != permission) {
1624 kr = KERN_INVALID_ARGUMENT;
1625 goto make_mem_done;
1626 }
1c79356b
A
1627 if((offset + *size) > parent_object->size) {
1628 kr = KERN_INVALID_ARGUMENT;
1629 goto make_mem_done;
1630 }
1631
1632 user_object->object = parent_object->object;
1633 user_object->size = *size;
1634 user_object->offset = parent_object->offset + offset;
de355530 1635 user_object->protection = permission;
1c79356b
A
1636 if(parent_object->is_sub_map) {
1637 user_object->backing.map = parent_object->backing.map;
1638 vm_map_lock(user_object->backing.map);
1639 user_object->backing.map->ref_count++;
1640 vm_map_unlock(user_object->backing.map);
1641 }
1642 else {
1643 user_object->backing.pager = parent_object->backing.pager;
1644 }
1645 user_object->internal = parent_object->internal;
1646 user_object->is_sub_map = parent_object->is_sub_map;
1647
1648 if(parent_object->object != NULL) {
1649 /* we now point to this object, hold on */
1650 vm_object_reference(parent_object->object);
1651 vm_object_lock(parent_object->object);
1652 parent_object->object->true_share = TRUE;
1653 vm_object_unlock(parent_object->object);
1654 }
1655 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1656 IKOT_NAMED_ENTRY);
1657 *object_handle = user_handle;
1658 return KERN_SUCCESS;
1659 }
1660
1661
1662
1663make_mem_done:
1664 ipc_port_dealloc_kernel(user_handle);
1665 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1666 return kr;
1667}
1668
1669kern_return_t
1670mach_make_memory_entry(
1671 vm_map_t target_map,
1672 vm_size_t *size,
1673 vm_offset_t offset,
1674 vm_prot_t permission,
1675 ipc_port_t *object_handle,
1676 ipc_port_t parent_entry)
1677{
1678 vm_object_offset_t size_64;
1679 kern_return_t kr;
1680
1681 size_64 = (vm_object_offset_t)*size;
1682 kr = mach_make_memory_entry_64(target_map, &size_64,
1683 (vm_object_offset_t)offset, permission, object_handle,
1684 parent_entry);
1685 *size = (vm_size_t)size_64;
1686 return kr;
1687}
1688
1689/*
1690 */
1691
1692kern_return_t
1693vm_region_object_create(
1694 vm_map_t target_map,
1695 vm_size_t size,
1696 ipc_port_t *object_handle)
1697{
1698 vm_named_entry_t user_object;
1699 ipc_port_t user_handle;
1700 kern_return_t kr;
1701
de355530 1702 pmap_t new_pmap = pmap_create((vm_size_t) 0);
1c79356b
A
1703 ipc_port_t previous;
1704 vm_map_t new_map;
1705
de355530
A
1706 if(new_pmap == PMAP_NULL)
1707 return KERN_FAILURE;
1c79356b
A
1708 user_object = (vm_named_entry_t)
1709 kalloc(sizeof (struct vm_named_entry));
1710 if(user_object == NULL) {
de355530 1711 pmap_destroy(new_pmap);
1c79356b
A
1712 return KERN_FAILURE;
1713 }
1714 named_entry_lock_init(user_object);
1715 user_handle = ipc_port_alloc_kernel();
1716
1717
1718 ip_lock(user_handle);
1719
1720 /* make a sonce right */
1721 user_handle->ip_sorights++;
1722 ip_reference(user_handle);
1723
1724 user_handle->ip_destination = IP_NULL;
1725 user_handle->ip_receiver_name = MACH_PORT_NULL;
1726 user_handle->ip_receiver = ipc_space_kernel;
1727
1728 /* make a send right */
1729 user_handle->ip_mscount++;
1730 user_handle->ip_srights++;
1731 ip_reference(user_handle);
1732
1733 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1734 /* nsrequest unlocks user_handle */
1735
1736 /* Create a named object based on a submap of specified size */
1737
de355530 1738 new_map = vm_map_create(new_pmap, 0, size, TRUE);
1c79356b
A
1739 user_object->backing.map = new_map;
1740
1741
1742 user_object->object = VM_OBJECT_NULL;
1743 user_object->internal = TRUE;
1744 user_object->is_sub_map = TRUE;
1745 user_object->offset = 0;
1746 user_object->protection = VM_PROT_ALL;
1747 user_object->size = size;
1748 user_object->ref_count = 1;
1749
1750 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1751 IKOT_NAMED_ENTRY);
1752 *object_handle = user_handle;
1753 return KERN_SUCCESS;
1754
1755}
1756
1757/* For a given range, check all map entries. If the entry coresponds to */
1758/* the old vm_region/map provided on the call, replace it with the */
1759/* corresponding range in the new vm_region/map */
1760kern_return_t vm_map_region_replace(
1761 vm_map_t target_map,
1762 ipc_port_t old_region,
1763 ipc_port_t new_region,
1764 vm_offset_t start,
1765 vm_offset_t end)
1766{
1767 vm_named_entry_t old_object;
1768 vm_named_entry_t new_object;
1769 vm_map_t old_submap;
1770 vm_map_t new_submap;
1771 vm_offset_t addr;
1772 vm_map_entry_t entry;
1773 int nested_pmap = 0;
1774
1775
1776 vm_map_lock(target_map);
1777 old_object = (vm_named_entry_t)old_region->ip_kobject;
1778 new_object = (vm_named_entry_t)new_region->ip_kobject;
1779 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1780 vm_map_unlock(target_map);
1781 return KERN_INVALID_ARGUMENT;
1782 }
1783 old_submap = (vm_map_t)old_object->backing.map;
1784 new_submap = (vm_map_t)new_object->backing.map;
1785 vm_map_lock(old_submap);
1786 if((old_submap->min_offset != new_submap->min_offset) ||
1787 (old_submap->max_offset != new_submap->max_offset)) {
1788 vm_map_unlock(old_submap);
1789 vm_map_unlock(target_map);
1790 return KERN_INVALID_ARGUMENT;
1791 }
1792 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1793 /* if the src is not contained, the entry preceeds */
1794 /* our range */
1795 addr = entry->vme_start;
1796 if(entry == vm_map_to_entry(target_map)) {
1797 vm_map_unlock(old_submap);
1798 vm_map_unlock(target_map);
1799 return KERN_SUCCESS;
1800 }
de355530 1801 vm_map_lookup_entry(target_map, addr, &entry);
1c79356b
A
1802 }
1803 addr = entry->vme_start;
1804 vm_map_reference(old_submap);
1805 while((entry != vm_map_to_entry(target_map)) &&
1806 (entry->vme_start < end)) {
1807 if((entry->is_sub_map) &&
1808 (entry->object.sub_map == old_submap)) {
1c79356b 1809 if(entry->use_pmap) {
de355530 1810 if((start & 0xfffffff) ||
1c79356b
A
1811 ((end - start) != 0x10000000)) {
1812 vm_map_unlock(old_submap);
9bccf70c 1813 vm_map_deallocate(old_submap);
1c79356b
A
1814 vm_map_unlock(target_map);
1815 return KERN_INVALID_ARGUMENT;
1816 }
1817 nested_pmap = 1;
1818 }
9bccf70c 1819 entry->object.sub_map = new_submap;
1c79356b
A
1820 vm_map_reference(new_submap);
1821 vm_map_deallocate(old_submap);
1822 }
1823 entry = entry->vme_next;
1824 addr = entry->vme_start;
1825 }
1826 if(nested_pmap) {
1827#ifndef i386
de355530 1828 pmap_unnest(target_map->pmap, start, end - start);
9bccf70c
A
1829 if(target_map->mapped) {
1830 vm_map_submap_pmap_clean(target_map,
1831 start, end, old_submap, 0);
1832 }
1c79356b 1833 pmap_nest(target_map->pmap, new_submap->pmap,
de355530
A
1834 start, end - start);
1835#endif i386
1c79356b 1836 } else {
9bccf70c
A
1837 vm_map_submap_pmap_clean(target_map,
1838 start, end, old_submap, 0);
1c79356b
A
1839 }
1840 vm_map_unlock(old_submap);
9bccf70c 1841 vm_map_deallocate(old_submap);
1c79356b
A
1842 vm_map_unlock(target_map);
1843 return KERN_SUCCESS;
1844}
1845
1846
1847void
1848mach_destroy_memory_entry(
1849 ipc_port_t port)
1850{
1851 vm_named_entry_t named_entry;
1852#if MACH_ASSERT
1853 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1854#endif /* MACH_ASSERT */
1855 named_entry = (vm_named_entry_t)port->ip_kobject;
1856 mutex_lock(&(named_entry)->Lock);
1857 named_entry->ref_count-=1;
1858 if(named_entry->ref_count == 0) {
1859 if(named_entry->object) {
1860 /* release the memory object we've been pointing to */
1861 vm_object_deallocate(named_entry->object);
1862 }
1863 if(named_entry->is_sub_map) {
1864 vm_map_deallocate(named_entry->backing.map);
1865 }
1866 kfree((vm_offset_t)port->ip_kobject,
1867 sizeof (struct vm_named_entry));
1868 } else
1869 mutex_unlock(&(named_entry)->Lock);
1870}
1871
1872
1873kern_return_t
1874vm_map_page_query(
1875 vm_map_t target_map,
1876 vm_offset_t offset,
1877 int *disposition,
1878 int *ref_count)
1879{
1880 vm_map_entry_t map_entry;
1881 vm_object_t object;
1882 vm_page_t m;
1883
1884restart_page_query:
1885 *disposition = 0;
1886 *ref_count = 0;
1887 vm_map_lock(target_map);
1888 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
1889 vm_map_unlock(target_map);
1890 return KERN_FAILURE;
1891 }
1892 offset -= map_entry->vme_start; /* adjust to offset within entry */
1893 offset += map_entry->offset; /* adjust to target object offset */
1894 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
1895 if(!map_entry->is_sub_map) {
1896 object = map_entry->object.vm_object;
1897 } else {
1898 vm_map_unlock(target_map);
1899 target_map = map_entry->object.sub_map;
1900 goto restart_page_query;
1901 }
1902 } else {
1903 vm_map_unlock(target_map);
1904 return KERN_FAILURE;
1905 }
1906 vm_object_lock(object);
1907 vm_map_unlock(target_map);
1908 while(TRUE) {
1909 m = vm_page_lookup(object, offset);
1910 if (m != VM_PAGE_NULL) {
1911 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
1912 break;
1913 } else {
1914 if(object->shadow) {
1915 offset += object->shadow_offset;
1916 vm_object_unlock(object);
1917 object = object->shadow;
1918 vm_object_lock(object);
1919 continue;
1920 }
1921 vm_object_unlock(object);
1922 return KERN_FAILURE;
1923 }
1924 }
1925
1926 /* The ref_count is not strictly accurate, it measures the number */
1927 /* of entities holding a ref on the object, they may not be mapping */
1928 /* the object or may not be mapping the section holding the */
1929 /* target page but its still a ball park number and though an over- */
1930 /* count, it picks up the copy-on-write cases */
1931
1932 /* We could also get a picture of page sharing from pmap_attributes */
1933 /* but this would under count as only faulted-in mappings would */
1934 /* show up. */
1935
1936 *ref_count = object->ref_count;
1937
1938 if (m->fictitious) {
1939 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
1940 vm_object_unlock(object);
1941 return KERN_SUCCESS;
1942 }
1943
1944 if (m->dirty)
1945 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
de355530 1946 else if(pmap_is_modified(m->phys_addr))
1c79356b
A
1947 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1948
1949 if (m->reference)
1950 *disposition |= VM_PAGE_QUERY_PAGE_REF;
de355530 1951 else if(pmap_is_referenced(m->phys_addr))
1c79356b
A
1952 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1953
1954 vm_object_unlock(object);
1955 return KERN_SUCCESS;
1956
1957}
1958
1959kern_return_t
1960set_dp_control_port(
1961 host_priv_t host_priv,
1962 ipc_port_t control_port)
1963{
1964 if (host_priv == HOST_PRIV_NULL)
1965 return (KERN_INVALID_HOST);
0b4e3aa0
A
1966
1967 if (IP_VALID(dynamic_pager_control_port))
1968 ipc_port_release_send(dynamic_pager_control_port);
1969
1c79356b
A
1970 dynamic_pager_control_port = control_port;
1971 return KERN_SUCCESS;
1972}
1973
1974kern_return_t
1975get_dp_control_port(
1976 host_priv_t host_priv,
1977 ipc_port_t *control_port)
1978{
1979 if (host_priv == HOST_PRIV_NULL)
1980 return (KERN_INVALID_HOST);
0b4e3aa0
A
1981
1982 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
1983 return KERN_SUCCESS;
1984
1985}
1986
1c79356b
A
1987
1988/* Retrieve a upl for an object underlying an address range in a map */
1989
1990kern_return_t
1991vm_map_get_upl(
0b4e3aa0
A
1992 vm_map_t map,
1993 vm_address_t offset,
1994 vm_size_t *upl_size,
1995 upl_t *upl,
1996 upl_page_info_array_t page_list,
1997 unsigned int *count,
1998 int *flags,
1999 int force_data_sync)
1c79356b
A
2000{
2001 vm_map_entry_t entry;
2002 int caller_flags;
150bd074
A
2003 int sync_cow_data = FALSE;
2004 vm_object_t local_object;
2005 vm_offset_t local_offset;
2006 vm_offset_t local_start;
2007 kern_return_t ret;
1c79356b
A
2008
2009 caller_flags = *flags;
150bd074
A
2010 if (!(caller_flags & UPL_COPYOUT_FROM)) {
2011 sync_cow_data = TRUE;
2012 }
1c79356b
A
2013 if(upl == NULL)
2014 return KERN_INVALID_ARGUMENT;
0b4e3aa0
A
2015
2016
1c79356b
A
2017REDISCOVER_ENTRY:
2018 vm_map_lock(map);
2019 if (vm_map_lookup_entry(map, offset, &entry)) {
0b4e3aa0
A
2020 if (entry->object.vm_object == VM_OBJECT_NULL ||
2021 !entry->object.vm_object->phys_contiguous) {
2022 if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
2023 *upl_size = MAX_UPL_TRANSFER * page_size;
2024 }
2025 }
1c79356b
A
2026 if((entry->vme_end - offset) < *upl_size) {
2027 *upl_size = entry->vme_end - offset;
2028 }
0b4e3aa0
A
2029 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2030 if (entry->object.vm_object == VM_OBJECT_NULL) {
2031 *flags = 0;
2032 } else if (entry->object.vm_object->private) {
2033 *flags = UPL_DEV_MEMORY;
2034 if (entry->object.vm_object->phys_contiguous) {
2035 *flags |= UPL_PHYS_CONTIG;
2036 }
2037 } else {
2038 *flags = 0;
2039 }
2040 vm_map_unlock(map);
2041 return KERN_SUCCESS;
2042 }
1c79356b
A
2043 /*
2044 * Create an object if necessary.
2045 */
2046 if (entry->object.vm_object == VM_OBJECT_NULL) {
2047 entry->object.vm_object = vm_object_allocate(
2048 (vm_size_t)(entry->vme_end - entry->vme_start));
2049 entry->offset = 0;
2050 }
2051 if (!(caller_flags & UPL_COPYOUT_FROM)) {
0b4e3aa0 2052 if (entry->needs_copy) {
1c79356b
A
2053 vm_map_t local_map;
2054 vm_object_t object;
2055 vm_object_offset_t offset_hi;
2056 vm_object_offset_t offset_lo;
2057 vm_object_offset_t new_offset;
2058 vm_prot_t prot;
2059 boolean_t wired;
2060 vm_behavior_t behavior;
2061 vm_map_version_t version;
2062 vm_map_t pmap_map;
2063
2064 local_map = map;
2065 vm_map_lock_write_to_read(map);
2066 if(vm_map_lookup_locked(&local_map,
2067 offset, VM_PROT_WRITE,
2068 &version, &object,
2069 &new_offset, &prot, &wired,
2070 &behavior, &offset_lo,
2071 &offset_hi, &pmap_map)) {
2072 vm_map_unlock(local_map);
2073 return KERN_FAILURE;
2074 }
2075 if (pmap_map != map) {
2076 vm_map_unlock(pmap_map);
2077 }
2078 vm_object_unlock(object);
2079 vm_map_unlock(local_map);
2080
2081 goto REDISCOVER_ENTRY;
2082 }
2083 }
2084 if (entry->is_sub_map) {
150bd074
A
2085 vm_map_t submap;
2086
2087 submap = entry->object.sub_map;
2088 local_start = entry->vme_start;
2089 local_offset = entry->offset;
2090 vm_map_reference(submap);
1c79356b 2091 vm_map_unlock(map);
150bd074
A
2092
2093 ret = (vm_map_get_upl(submap,
2094 local_offset + (offset - local_start),
1c79356b
A
2095 upl_size, upl, page_list, count,
2096 flags, force_data_sync));
150bd074
A
2097
2098 vm_map_deallocate(submap);
2099 return ret;
1c79356b
A
2100 }
2101
150bd074 2102 if (sync_cow_data) {
0b4e3aa0
A
2103 if (entry->object.vm_object->shadow
2104 || entry->object.vm_object->copy) {
150bd074
A
2105 int flags;
2106
2107 local_object = entry->object.vm_object;
2108 local_start = entry->vme_start;
2109 local_offset = entry->offset;
2110 vm_object_reference(local_object);
1c79356b
A
2111 vm_map_unlock(map);
2112
150bd074 2113 if(local_object->copy == NULL) {
1c79356b
A
2114 flags = MEMORY_OBJECT_DATA_SYNC;
2115 } else {
2116 flags = MEMORY_OBJECT_COPY_SYNC;
2117 }
150bd074
A
2118
2119 if((local_object->paging_offset) &&
2120 (local_object->pager == 0)) {
2121 /*
2122 * do a little clean-up for our unorthodox
2123 * entry into a pager call from a non-pager
2124 * context. Normally the pager code
2125 * assumes that an object it has been called
2126 * with has a backing pager and so does
2127 * not bother to check the pager field
2128 * before relying on the paging_offset
2129 */
2130 vm_object_lock(local_object);
2131 if (local_object->pager == 0) {
2132 local_object->paging_offset = 0;
2133 }
2134 vm_object_unlock(local_object);
2135 }
1c79356b 2136
0b4e3aa0
A
2137 if (entry->object.vm_object->shadow &&
2138 entry->object.vm_object->copy) {
2139 vm_object_lock_request(
2140 local_object->shadow,
2141 (vm_object_offset_t)
2142 ((offset - local_start) +
2143 local_offset) +
2144 local_object->shadow_offset +
2145 local_object->paging_offset,
2146 *upl_size, FALSE,
2147 MEMORY_OBJECT_DATA_SYNC,
2148 VM_PROT_NO_CHANGE);
2149 }
150bd074 2150 sync_cow_data = FALSE;
0b4e3aa0 2151 vm_object_deallocate(local_object);
150bd074 2152 goto REDISCOVER_ENTRY;
1c79356b
A
2153 }
2154 }
2155
2156 if (force_data_sync) {
150bd074
A
2157
2158 local_object = entry->object.vm_object;
2159 local_start = entry->vme_start;
2160 local_offset = entry->offset;
2161 vm_object_reference(local_object);
1c79356b 2162 vm_map_unlock(map);
1c79356b 2163
150bd074
A
2164 if((local_object->paging_offset) &&
2165 (local_object->pager == 0)) {
2166 /*
2167 * do a little clean-up for our unorthodox
2168 * entry into a pager call from a non-pager
2169 * context. Normally the pager code
2170 * assumes that an object it has been called
2171 * with has a backing pager and so does
2172 * not bother to check the pager field
2173 * before relying on the paging_offset
2174 */
2175 vm_object_lock(local_object);
2176 if (local_object->pager == 0) {
2177 local_object->paging_offset = 0;
2178 }
2179 vm_object_unlock(local_object);
2180 }
2181
0b4e3aa0
A
2182 vm_object_lock_request(
2183 local_object,
2184 (vm_object_offset_t)
2185 ((offset - local_start) + local_offset) +
2186 local_object->paging_offset,
150bd074
A
2187 (vm_object_size_t)*upl_size, FALSE,
2188 MEMORY_OBJECT_DATA_SYNC,
0b4e3aa0 2189 VM_PROT_NO_CHANGE);
150bd074 2190 force_data_sync = FALSE;
0b4e3aa0 2191 vm_object_deallocate(local_object);
150bd074 2192 goto REDISCOVER_ENTRY;
1c79356b
A
2193 }
2194
2195 if(!(entry->object.vm_object->private)) {
2196 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2197 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2198 if(entry->object.vm_object->phys_contiguous) {
2199 *flags = UPL_PHYS_CONTIG;
2200 } else {
2201 *flags = 0;
2202 }
2203 } else {
2204 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2205 }
150bd074
A
2206 local_object = entry->object.vm_object;
2207 local_offset = entry->offset;
2208 local_start = entry->vme_start;
2209 vm_object_reference(local_object);
1c79356b 2210 vm_map_unlock(map);
de355530
A
2211 ret = (vm_object_upl_request(local_object,
2212 (vm_object_offset_t)
2213 ((offset - local_start) + local_offset),
2214 *upl_size,
2215 upl,
2216 page_list,
2217 count,
2218 caller_flags));
150bd074
A
2219 vm_object_deallocate(local_object);
2220 return(ret);
1c79356b
A
2221 }
2222
2223 vm_map_unlock(map);
2224 return(KERN_FAILURE);
2225
2226}
2227
1c79356b
A
2228/* ******* Temporary Internal calls to UPL for BSD ***** */
2229kern_return_t
2230kernel_upl_map(
2231 vm_map_t map,
2232 upl_t upl,
2233 vm_offset_t *dst_addr)
2234{
0b4e3aa0 2235 return (vm_upl_map(map, upl, dst_addr));
1c79356b
A
2236}
2237
2238
2239kern_return_t
2240kernel_upl_unmap(
2241 vm_map_t map,
0b4e3aa0 2242 upl_t upl)
1c79356b 2243{
0b4e3aa0 2244 return(vm_upl_unmap(map, upl));
1c79356b
A
2245}
2246
2247kern_return_t
2248kernel_upl_commit(
2249 upl_t upl,
0b4e3aa0
A
2250 upl_page_info_t *pl,
2251 mach_msg_type_number_t count)
1c79356b 2252{
0b4e3aa0
A
2253 kern_return_t kr;
2254
2255 kr = upl_commit(upl, pl, count);
2256 upl_deallocate(upl);
1c79356b
A
2257 return kr;
2258}
2259
0b4e3aa0 2260
1c79356b
A
2261kern_return_t
2262kernel_upl_commit_range(
2263 upl_t upl,
2264 vm_offset_t offset,
2265 vm_size_t size,
2266 int flags,
0b4e3aa0
A
2267 upl_page_info_array_t pl,
2268 mach_msg_type_number_t count)
1c79356b 2269{
0b4e3aa0
A
2270 boolean_t finished = FALSE;
2271 kern_return_t kr;
2272
2273 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2274 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2275
2276 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2277
2278 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2279 upl_deallocate(upl);
2280
1c79356b
A
2281 return kr;
2282}
2283
2284kern_return_t
2285kernel_upl_abort_range(
0b4e3aa0
A
2286 upl_t upl,
2287 vm_offset_t offset,
2288 vm_size_t size,
2289 int abort_flags)
1c79356b 2290{
0b4e3aa0
A
2291 kern_return_t kr;
2292 boolean_t finished = FALSE;
1c79356b 2293
0b4e3aa0
A
2294 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2295 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 2296
0b4e3aa0 2297 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 2298
0b4e3aa0
A
2299 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2300 upl_deallocate(upl);
1c79356b 2301
0b4e3aa0 2302 return kr;
1c79356b
A
2303}
2304
1c79356b 2305kern_return_t
0b4e3aa0
A
2306kernel_upl_abort(
2307 upl_t upl,
2308 int abort_type)
1c79356b 2309{
0b4e3aa0 2310 kern_return_t kr;
1c79356b 2311
0b4e3aa0
A
2312 kr = upl_abort(upl, abort_type);
2313 upl_deallocate(upl);
2314 return kr;
1c79356b
A
2315}
2316
1c79356b
A
2317
2318kern_return_t
2319vm_get_shared_region(
2320 task_t task,
2321 shared_region_mapping_t *shared_region)
2322{
2323 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2324 return KERN_SUCCESS;
2325}
2326
2327kern_return_t
2328vm_set_shared_region(
2329 task_t task,
2330 shared_region_mapping_t shared_region)
2331{
2332 task->system_shared_region = (vm_offset_t) shared_region;
2333 return KERN_SUCCESS;
2334}
2335
2336kern_return_t
2337shared_region_mapping_info(
2338 shared_region_mapping_t shared_region,
2339 ipc_port_t *text_region,
2340 vm_size_t *text_size,
2341 ipc_port_t *data_region,
2342 vm_size_t *data_size,
2343 vm_offset_t *region_mappings,
2344 vm_offset_t *client_base,
2345 vm_offset_t *alt_base,
2346 vm_offset_t *alt_next,
2347 int *flags,
2348 shared_region_mapping_t *next)
2349{
2350 shared_region_mapping_lock(shared_region);
2351
2352 *text_region = shared_region->text_region;
2353 *text_size = shared_region->text_size;
2354 *data_region = shared_region->data_region;
2355 *data_size = shared_region->data_size;
2356 *region_mappings = shared_region->region_mappings;
2357 *client_base = shared_region->client_base;
2358 *alt_base = shared_region->alternate_base;
2359 *alt_next = shared_region->alternate_next;
2360 *flags = shared_region->flags;
2361 *next = shared_region->next;
2362
2363 shared_region_mapping_unlock(shared_region);
2364}
2365
2366kern_return_t
2367shared_region_object_chain_attach(
2368 shared_region_mapping_t target_region,
2369 shared_region_mapping_t object_chain_region)
2370{
2371 shared_region_object_chain_t object_ele;
2372
2373 if(target_region->object_chain)
2374 return KERN_FAILURE;
2375 object_ele = (shared_region_object_chain_t)
2376 kalloc(sizeof (struct shared_region_object_chain));
2377 shared_region_mapping_lock(object_chain_region);
2378 target_region->object_chain = object_ele;
2379 object_ele->object_chain_region = object_chain_region;
2380 object_ele->next = object_chain_region->object_chain;
2381 object_ele->depth = object_chain_region->depth;
2382 object_chain_region->depth++;
2383 target_region->alternate_next = object_chain_region->alternate_next;
2384 shared_region_mapping_unlock(object_chain_region);
2385 return KERN_SUCCESS;
2386}
2387
2388kern_return_t
2389shared_region_mapping_create(
2390 ipc_port_t text_region,
2391 vm_size_t text_size,
2392 ipc_port_t data_region,
2393 vm_size_t data_size,
2394 vm_offset_t region_mappings,
2395 vm_offset_t client_base,
2396 shared_region_mapping_t *shared_region,
2397 vm_offset_t alt_base,
2398 vm_offset_t alt_next)
2399{
2400 *shared_region = (shared_region_mapping_t)
2401 kalloc(sizeof (struct shared_region_mapping));
2402 if(*shared_region == NULL)
2403 return KERN_FAILURE;
2404 shared_region_mapping_lock_init((*shared_region));
2405 (*shared_region)->text_region = text_region;
2406 (*shared_region)->text_size = text_size;
2407 (*shared_region)->data_region = data_region;
2408 (*shared_region)->data_size = data_size;
2409 (*shared_region)->region_mappings = region_mappings;
2410 (*shared_region)->client_base = client_base;
2411 (*shared_region)->ref_count = 1;
2412 (*shared_region)->next = NULL;
2413 (*shared_region)->object_chain = NULL;
2414 (*shared_region)->self = *shared_region;
2415 (*shared_region)->flags = 0;
2416 (*shared_region)->depth = 0;
2417 (*shared_region)->alternate_base = alt_base;
2418 (*shared_region)->alternate_next = alt_next;
2419 return KERN_SUCCESS;
2420}
2421
2422kern_return_t
2423shared_region_mapping_set_alt_next(
2424 shared_region_mapping_t shared_region,
2425 vm_offset_t alt_next)
2426{
2427 shared_region->alternate_next = alt_next;
2428 return KERN_SUCCESS;
2429}
2430
2431kern_return_t
2432shared_region_mapping_ref(
2433 shared_region_mapping_t shared_region)
2434{
2435 if(shared_region == NULL)
2436 return KERN_SUCCESS;
9bccf70c 2437 hw_atomic_add(&shared_region->ref_count, 1);
1c79356b
A
2438 return KERN_SUCCESS;
2439}
2440
2441kern_return_t
2442shared_region_mapping_dealloc(
2443 shared_region_mapping_t shared_region)
2444{
2445 struct shared_region_task_mappings sm_info;
9bccf70c
A
2446 shared_region_mapping_t next = NULL;
2447
2448 while (shared_region) {
de355530 2449 if (hw_atomic_sub(&shared_region->ref_count, 1) == 0) {
9bccf70c
A
2450 shared_region_mapping_lock(shared_region);
2451
2452 sm_info.text_region = shared_region->text_region;
2453 sm_info.text_size = shared_region->text_size;
2454 sm_info.data_region = shared_region->data_region;
2455 sm_info.data_size = shared_region->data_size;
2456 sm_info.region_mappings = shared_region->region_mappings;
2457 sm_info.client_base = shared_region->client_base;
2458 sm_info.alternate_base = shared_region->alternate_base;
2459 sm_info.alternate_next = shared_region->alternate_next;
2460 sm_info.flags = shared_region->flags;
2461 sm_info.self = (vm_offset_t)shared_region;
2462
de355530
A
2463 lsf_remove_regions_mappings(shared_region, &sm_info);
2464 pmap_remove(((vm_named_entry_t)
9bccf70c
A
2465 (shared_region->text_region->ip_kobject))
2466 ->backing.map->pmap,
2467 sm_info.client_base,
2468 sm_info.client_base + sm_info.text_size);
2469 ipc_port_release_send(shared_region->text_region);
de355530 2470 ipc_port_release_send(shared_region->data_region);
9bccf70c
A
2471 if (shared_region->object_chain) {
2472 next = shared_region->object_chain->object_chain_region;
2473 kfree((vm_offset_t)shared_region->object_chain,
2474 sizeof (struct shared_region_object_chain));
2475 } else {
2476 next = NULL;
2477 }
2478 shared_region_mapping_unlock(shared_region);
2479 kfree((vm_offset_t)shared_region,
1c79356b 2480 sizeof (struct shared_region_mapping));
9bccf70c
A
2481 shared_region = next;
2482 } else {
2483 break;
2484 }
1c79356b 2485 }
1c79356b
A
2486 return KERN_SUCCESS;
2487}
2488
de355530 2489vm_offset_t
1c79356b
A
2490vm_map_get_phys_page(
2491 vm_map_t map,
2492 vm_offset_t offset)
2493{
2494 vm_map_entry_t entry;
2495 int ops;
2496 int flags;
de355530 2497 vm_offset_t phys_addr = 0;
1c79356b
A
2498 vm_object_t object;
2499
2500 vm_map_lock(map);
2501 while (vm_map_lookup_entry(map, offset, &entry)) {
2502
2503 if (entry->object.vm_object == VM_OBJECT_NULL) {
2504 vm_map_unlock(map);
2505 return (vm_offset_t) 0;
2506 }
2507 if (entry->is_sub_map) {
2508 vm_map_t old_map;
2509 vm_map_lock(entry->object.sub_map);
2510 old_map = map;
2511 map = entry->object.sub_map;
2512 offset = entry->offset + (offset - entry->vme_start);
2513 vm_map_unlock(old_map);
2514 continue;
2515 }
9bccf70c
A
2516 if (entry->object.vm_object->phys_contiguous) {
2517 /* These are not standard pageable memory mappings */
2518 /* If they are not present in the object they will */
2519 /* have to be picked up from the pager through the */
2520 /* fault mechanism. */
2521 if(entry->object.vm_object->shadow_offset == 0) {
2522 /* need to call vm_fault */
2523 vm_map_unlock(map);
2524 vm_fault(map, offset, VM_PROT_NONE,
2525 FALSE, THREAD_UNINT, NULL, 0);
2526 vm_map_lock(map);
2527 continue;
2528 }
2529 offset = entry->offset + (offset - entry->vme_start);
de355530 2530 phys_addr = entry->object.vm_object->shadow_offset + offset;
9bccf70c
A
2531 break;
2532
2533 }
1c79356b
A
2534 offset = entry->offset + (offset - entry->vme_start);
2535 object = entry->object.vm_object;
2536 vm_object_lock(object);
2537 while (TRUE) {
2538 vm_page_t dst_page = vm_page_lookup(object,offset);
2539 if(dst_page == VM_PAGE_NULL) {
2540 if(object->shadow) {
2541 vm_object_t old_object;
2542 vm_object_lock(object->shadow);
2543 old_object = object;
2544 offset = offset + object->shadow_offset;
2545 object = object->shadow;
2546 vm_object_unlock(old_object);
2547 } else {
2548 vm_object_unlock(object);
2549 break;
2550 }
2551 } else {
de355530 2552 phys_addr = dst_page->phys_addr;
1c79356b
A
2553 vm_object_unlock(object);
2554 break;
2555 }
2556 }
2557 break;
2558
2559 }
2560
2561 vm_map_unlock(map);
de355530 2562 return phys_addr;
1c79356b
A
2563}
2564#endif /* VM_CPM */