]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
0b4e3aa0 2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: vm/vm_user.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 *
59 * User-exported virtual memory functions.
60 */
1c79356b
A
61
62#include <vm_cpm.h>
63#include <mach/boolean.h>
64#include <mach/kern_return.h>
65#include <mach/mach_types.h> /* to get vm_address_t */
66#include <mach/memory_object.h>
67#include <mach/std_types.h> /* to get pointer_t */
68#include <mach/vm_attributes.h>
69#include <mach/vm_param.h>
70#include <mach/vm_statistics.h>
71#include <mach/vm_map_server.h>
72#include <mach/mach_syscalls.h>
9bccf70c 73
1c79356b 74#include <mach/shared_memory_server.h>
9bccf70c 75#include <vm/vm_shared_memory_server.h>
1c79356b
A
76
77#include <kern/host.h>
78#include <kern/task.h>
79#include <kern/misc_protos.h>
80#include <vm/vm_map.h>
81#include <vm/vm_object.h>
82#include <vm/vm_page.h>
83#include <vm/memory_object.h>
84#include <vm/vm_pageout.h>
85
86
87
88vm_size_t upl_offset_to_pagelist = 0;
89
90#if VM_CPM
91#include <vm/cpm.h>
92#endif /* VM_CPM */
93
94ipc_port_t dynamic_pager_control_port=NULL;
95
96/*
97 * vm_allocate allocates "zero fill" memory in the specfied
98 * map.
99 */
100kern_return_t
101vm_allocate(
102 register vm_map_t map,
103 register vm_offset_t *addr,
104 register vm_size_t size,
105 int flags)
106{
107 kern_return_t result;
108 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
109
110 if (map == VM_MAP_NULL)
111 return(KERN_INVALID_ARGUMENT);
112 if (size == 0) {
113 *addr = 0;
114 return(KERN_SUCCESS);
115 }
116
117 if (anywhere)
118 *addr = vm_map_min(map);
119 else
de355530
A
120 *addr = trunc_page(*addr);
121 size = round_page(size);
1c79356b
A
122 if (size == 0) {
123 return(KERN_INVALID_ARGUMENT);
124 }
125
126 result = vm_map_enter(
127 map,
128 addr,
129 size,
130 (vm_offset_t)0,
131 flags,
132 VM_OBJECT_NULL,
133 (vm_object_offset_t)0,
134 FALSE,
135 VM_PROT_DEFAULT,
136 VM_PROT_ALL,
137 VM_INHERIT_DEFAULT);
138
139 return(result);
140}
141
142/*
143 * vm_deallocate deallocates the specified range of addresses in the
144 * specified address map.
145 */
146kern_return_t
147vm_deallocate(
148 register vm_map_t map,
149 vm_offset_t start,
150 vm_size_t size)
151{
152 if (map == VM_MAP_NULL)
153 return(KERN_INVALID_ARGUMENT);
154
155 if (size == (vm_offset_t) 0)
156 return(KERN_SUCCESS);
157
de355530
A
158 return(vm_map_remove(map, trunc_page(start),
159 round_page(start+size), VM_MAP_NO_FLAGS));
1c79356b
A
160}
161
162/*
163 * vm_inherit sets the inheritance of the specified range in the
164 * specified map.
165 */
166kern_return_t
167vm_inherit(
168 register vm_map_t map,
169 vm_offset_t start,
170 vm_size_t size,
171 vm_inherit_t new_inheritance)
172{
173 if (map == VM_MAP_NULL)
174 return(KERN_INVALID_ARGUMENT);
175
176 if (new_inheritance > VM_INHERIT_LAST_VALID)
177 return(KERN_INVALID_ARGUMENT);
178
179 return(vm_map_inherit(map,
de355530
A
180 trunc_page(start),
181 round_page(start+size),
1c79356b
A
182 new_inheritance));
183}
184
185/*
186 * vm_protect sets the protection of the specified range in the
187 * specified map.
188 */
189
190kern_return_t
191vm_protect(
192 register vm_map_t map,
193 vm_offset_t start,
194 vm_size_t size,
195 boolean_t set_maximum,
196 vm_prot_t new_protection)
197{
198 if ((map == VM_MAP_NULL) ||
199 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
200 return(KERN_INVALID_ARGUMENT);
201
202 return(vm_map_protect(map,
de355530
A
203 trunc_page(start),
204 round_page(start+size),
1c79356b
A
205 new_protection,
206 set_maximum));
207}
208
209/*
210 * Handle machine-specific attributes for a mapping, such
211 * as cachability, migrability, etc.
212 */
213kern_return_t
214vm_machine_attribute(
215 vm_map_t map,
216 vm_address_t address,
217 vm_size_t size,
218 vm_machine_attribute_t attribute,
219 vm_machine_attribute_val_t* value) /* IN/OUT */
220{
221 if (map == VM_MAP_NULL)
222 return(KERN_INVALID_ARGUMENT);
223
224 return vm_map_machine_attribute(map, address, size, attribute, value);
225}
226
227kern_return_t
228vm_read(
229 vm_map_t map,
230 vm_address_t address,
231 vm_size_t size,
232 pointer_t *data,
233 mach_msg_type_number_t *data_size)
234{
235 kern_return_t error;
236 vm_map_copy_t ipc_address;
237
238 if (map == VM_MAP_NULL)
239 return(KERN_INVALID_ARGUMENT);
240
241 if ((error = vm_map_copyin(map,
242 address,
243 size,
244 FALSE, /* src_destroy */
245 &ipc_address)) == KERN_SUCCESS) {
246 *data = (pointer_t) ipc_address;
247 *data_size = size;
248 }
249 return(error);
250}
251
252kern_return_t
253vm_read_list(
254 vm_map_t map,
255 vm_read_entry_t data_list,
256 mach_msg_type_number_t count)
257{
258 mach_msg_type_number_t i;
259 kern_return_t error;
260 vm_map_copy_t ipc_address;
261
262 if (map == VM_MAP_NULL)
263 return(KERN_INVALID_ARGUMENT);
264
265 for(i=0; i<count; i++) {
266 error = vm_map_copyin(map,
267 data_list[i].address,
268 data_list[i].size,
269 FALSE, /* src_destroy */
270 &ipc_address);
271 if(error != KERN_SUCCESS) {
272 data_list[i].address = (vm_address_t)0;
273 data_list[i].size = (vm_size_t)0;
274 break;
275 }
276 if(data_list[i].size != 0) {
277 error = vm_map_copyout(current_task()->map,
278 &(data_list[i].address),
279 (vm_map_copy_t) ipc_address);
280 if(error != KERN_SUCCESS) {
281 data_list[i].address = (vm_address_t)0;
282 data_list[i].size = (vm_size_t)0;
283 break;
284 }
285 }
286 }
287 return(error);
288}
289
290/*
291 * This routine reads from the specified map and overwrites part of the current
292 * activation's map. In making an assumption that the current thread is local,
293 * it is no longer cluster-safe without a fully supportive local proxy thread/
294 * task (but we don't support cluster's anymore so this is moot).
295 */
296
297#define VM_OVERWRITE_SMALL 512
298
299kern_return_t
300vm_read_overwrite(
301 vm_map_t map,
302 vm_address_t address,
303 vm_size_t size,
304 vm_address_t data,
305 vm_size_t *data_size)
306{
307 struct {
308 long align;
309 char buf[VM_OVERWRITE_SMALL];
310 } inbuf;
311 vm_map_t oldmap;
312 kern_return_t error = KERN_SUCCESS;
313 vm_map_copy_t copy;
314
315 if (map == VM_MAP_NULL)
316 return(KERN_INVALID_ARGUMENT);
317
318 if (size <= VM_OVERWRITE_SMALL) {
319 if(vm_map_read_user(map, (vm_offset_t)address,
320 (vm_offset_t)&inbuf, size)) {
321 error = KERN_INVALID_ADDRESS;
322 } else {
323 if(vm_map_write_user(current_map(),
324 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
325 error = KERN_INVALID_ADDRESS;
326 }
327 }
328 else {
329 if ((error = vm_map_copyin(map,
330 address,
331 size,
332 FALSE, /* src_destroy */
333 &copy)) == KERN_SUCCESS) {
334 if ((error = vm_map_copy_overwrite(
335 current_act()->map,
336 data,
337 copy,
338 FALSE)) == KERN_SUCCESS) {
339 }
340 else {
341 vm_map_copy_discard(copy);
342 }
343 }
344 }
345 *data_size = size;
346 return(error);
347}
348
349
350
351
352/*ARGSUSED*/
353kern_return_t
354vm_write(
355 vm_map_t map,
356 vm_address_t address,
357 vm_offset_t data,
358 mach_msg_type_number_t size)
359{
360 if (map == VM_MAP_NULL)
361 return KERN_INVALID_ARGUMENT;
362
363 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
364 FALSE /* interruptible XXX */);
365}
366
367kern_return_t
368vm_copy(
369 vm_map_t map,
370 vm_address_t source_address,
371 vm_size_t size,
372 vm_address_t dest_address)
373{
374 vm_map_copy_t copy;
375 kern_return_t kr;
376
377 if (map == VM_MAP_NULL)
378 return KERN_INVALID_ARGUMENT;
379
380 kr = vm_map_copyin(map, source_address, size,
381 FALSE, &copy);
382 if (kr != KERN_SUCCESS)
383 return kr;
384
385 kr = vm_map_copy_overwrite(map, dest_address, copy,
386 FALSE /* interruptible XXX */);
387 if (kr != KERN_SUCCESS) {
388 vm_map_copy_discard(copy);
389 return kr;
390 }
391
392 return KERN_SUCCESS;
393}
394
395/*
396 * Routine: vm_map
397 */
398kern_return_t
399vm_map_64(
400 vm_map_t target_map,
401 vm_offset_t *address,
402 vm_size_t initial_size,
403 vm_offset_t mask,
404 int flags,
405 ipc_port_t port,
406 vm_object_offset_t offset,
407 boolean_t copy,
408 vm_prot_t cur_protection,
409 vm_prot_t max_protection,
410 vm_inherit_t inheritance)
411{
412 register
413 vm_object_t object;
414 vm_prot_t prot;
415 vm_object_size_t size = (vm_object_size_t)initial_size;
416 kern_return_t result;
417
418 /*
419 * Check arguments for validity
420 */
421 if ((target_map == VM_MAP_NULL) ||
422 (cur_protection & ~VM_PROT_ALL) ||
423 (max_protection & ~VM_PROT_ALL) ||
424 (inheritance > VM_INHERIT_LAST_VALID) ||
425 size == 0)
426 return(KERN_INVALID_ARGUMENT);
427
428 /*
429 * Find the vm object (if any) corresponding to this port.
430 */
431 if (!IP_VALID(port)) {
432 object = VM_OBJECT_NULL;
433 offset = 0;
434 copy = FALSE;
435 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
436 vm_named_entry_t named_entry;
437
438 named_entry = (vm_named_entry_t)port->ip_kobject;
439 /* a few checks to make sure user is obeying rules */
440 if(size == 0) {
441 if(offset >= named_entry->size)
442 return(KERN_INVALID_RIGHT);
443 size = named_entry->size - offset;
444 }
445 if((named_entry->protection & max_protection) != max_protection)
446 return(KERN_INVALID_RIGHT);
447 if((named_entry->protection & cur_protection) != cur_protection)
448 return(KERN_INVALID_RIGHT);
449 if(named_entry->size < (offset + size))
450 return(KERN_INVALID_ARGUMENT);
451
452 /* the callers parameter offset is defined to be the */
453 /* offset from beginning of named entry offset in object */
454 offset = offset + named_entry->offset;
455
456 named_entry_lock(named_entry);
457 if(named_entry->is_sub_map) {
458 vm_map_entry_t map_entry;
459
460 named_entry_unlock(named_entry);
de355530
A
461 *address = trunc_page(*address);
462 size = round_page(size);
1c79356b
A
463 vm_object_reference(vm_submap_object);
464 if ((result = vm_map_enter(target_map,
465 address, size, mask, flags,
466 vm_submap_object, 0,
467 FALSE,
468 cur_protection, max_protection, inheritance
469 )) != KERN_SUCCESS) {
470 vm_object_deallocate(vm_submap_object);
471 } else {
472 char alias;
473
474 VM_GET_FLAGS_ALIAS(flags, alias);
475 if ((alias == VM_MEMORY_SHARED_PMAP) &&
476 !copy) {
477 vm_map_submap(target_map, *address,
478 (*address) + size,
479 named_entry->backing.map,
480 (vm_offset_t)offset, TRUE);
481 } else {
482 vm_map_submap(target_map, *address,
483 (*address) + size,
484 named_entry->backing.map,
485 (vm_offset_t)offset, FALSE);
486 }
487 if(copy) {
488 if(vm_map_lookup_entry(
489 target_map, *address, &map_entry)) {
490 map_entry->needs_copy = TRUE;
491 }
492 }
493 }
494 return(result);
495
496 } else if(named_entry->object) {
497 /* This is the case where we are going to map */
498 /* an already mapped object. If the object is */
499 /* not ready it is internal. An external */
500 /* object cannot be mapped until it is ready */
501 /* we can therefore avoid the ready check */
502 /* in this case. */
503 named_entry_unlock(named_entry);
504 vm_object_reference(named_entry->object);
505 object = named_entry->object;
506 } else {
de355530
A
507 object = vm_object_enter(named_entry->backing.pager,
508 named_entry->size,
509 named_entry->internal,
510 FALSE,
511 FALSE);
1c79356b
A
512 if (object == VM_OBJECT_NULL) {
513 named_entry_unlock(named_entry);
514 return(KERN_INVALID_OBJECT);
515 }
de355530 516 object->true_share = TRUE;
1c79356b
A
517 named_entry->object = object;
518 named_entry_unlock(named_entry);
de355530
A
519 /* create an extra reference for the named entry */
520 vm_object_reference(named_entry->object);
521 /* wait for object (if any) to be ready */
522 if (object != VM_OBJECT_NULL) {
523 vm_object_lock(object);
1c79356b
A
524 while (!object->pager_ready) {
525 vm_object_wait(object,
de355530
A
526 VM_OBJECT_EVENT_PAGER_READY,
527 THREAD_UNINT);
1c79356b
A
528 vm_object_lock(object);
529 }
de355530 530 vm_object_unlock(object);
1c79356b
A
531 }
532 }
0b4e3aa0
A
533 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
534 /*
535 * JMM - This is temporary until we unify named entries
536 * and raw memory objects.
537 *
538 * Detected fake ip_kotype for a memory object. In
539 * this case, the port isn't really a port at all, but
540 * instead is just a raw memory object.
541 */
542
543 if ((object = vm_object_enter((memory_object_t)port,
544 size, FALSE, FALSE, FALSE))
1c79356b
A
545 == VM_OBJECT_NULL)
546 return(KERN_INVALID_OBJECT);
547
548 /* wait for object (if any) to be ready */
549 if (object != VM_OBJECT_NULL) {
9bccf70c
A
550 if(object == kernel_object) {
551 printf("Warning: Attempt to map kernel object"
552 " by a non-private kernel entity\n");
553 return(KERN_INVALID_OBJECT);
554 }
1c79356b
A
555 vm_object_lock(object);
556 while (!object->pager_ready) {
557 vm_object_wait(object,
558 VM_OBJECT_EVENT_PAGER_READY,
559 THREAD_UNINT);
560 vm_object_lock(object);
561 }
562 vm_object_unlock(object);
563 }
0b4e3aa0
A
564 } else {
565 return (KERN_INVALID_OBJECT);
1c79356b
A
566 }
567
de355530
A
568 *address = trunc_page(*address);
569 size = round_page(size);
1c79356b
A
570
571 /*
572 * Perform the copy if requested
573 */
574
575 if (copy) {
576 vm_object_t new_object;
577 vm_object_offset_t new_offset;
578
579 result = vm_object_copy_strategically(object, offset, size,
580 &new_object, &new_offset,
581 &copy);
582
583
584 if (result == KERN_MEMORY_RESTART_COPY) {
585 boolean_t success;
586 boolean_t src_needs_copy;
587
588 /*
589 * XXX
590 * We currently ignore src_needs_copy.
591 * This really is the issue of how to make
592 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
593 * non-kernel users to use. Solution forthcoming.
594 * In the meantime, since we don't allow non-kernel
595 * memory managers to specify symmetric copy,
596 * we won't run into problems here.
597 */
598 new_object = object;
599 new_offset = offset;
600 success = vm_object_copy_quickly(&new_object,
601 new_offset, size,
602 &src_needs_copy,
603 &copy);
604 assert(success);
605 result = KERN_SUCCESS;
606 }
607 /*
608 * Throw away the reference to the
609 * original object, as it won't be mapped.
610 */
611
612 vm_object_deallocate(object);
613
614 if (result != KERN_SUCCESS)
615 return (result);
616
617 object = new_object;
618 offset = new_offset;
619 }
620
621 if ((result = vm_map_enter(target_map,
622 address, size, mask, flags,
623 object, offset,
624 copy,
625 cur_protection, max_protection, inheritance
626 )) != KERN_SUCCESS)
627 vm_object_deallocate(object);
628 return(result);
629}
630
631/* temporary, until world build */
632vm_map(
633 vm_map_t target_map,
634 vm_offset_t *address,
635 vm_size_t size,
636 vm_offset_t mask,
637 int flags,
638 ipc_port_t port,
639 vm_offset_t offset,
640 boolean_t copy,
641 vm_prot_t cur_protection,
642 vm_prot_t max_protection,
643 vm_inherit_t inheritance)
644{
de355530 645 vm_map_64(target_map, address, size, mask, flags,
1c79356b
A
646 port, (vm_object_offset_t)offset, copy,
647 cur_protection, max_protection, inheritance);
648}
649
650
651/*
652 * NOTE: this routine (and this file) will no longer require mach_host_server.h
653 * when vm_wire is changed to use ledgers.
654 */
655#include <mach/mach_host_server.h>
656/*
657 * Specify that the range of the virtual address space
658 * of the target task must not cause page faults for
659 * the indicated accesses.
660 *
661 * [ To unwire the pages, specify VM_PROT_NONE. ]
662 */
663kern_return_t
664vm_wire(
665 host_priv_t host_priv,
666 register vm_map_t map,
667 vm_offset_t start,
668 vm_size_t size,
669 vm_prot_t access)
670{
671 kern_return_t rc;
672
673 if (host_priv == HOST_PRIV_NULL)
674 return KERN_INVALID_HOST;
675
676 assert(host_priv == &realhost);
677
678 if (map == VM_MAP_NULL)
679 return KERN_INVALID_TASK;
680
681 if (access & ~VM_PROT_ALL)
682 return KERN_INVALID_ARGUMENT;
683
684 if (access != VM_PROT_NONE) {
de355530
A
685 rc = vm_map_wire(map, trunc_page(start),
686 round_page(start+size), access, TRUE);
1c79356b 687 } else {
de355530
A
688 rc = vm_map_unwire(map, trunc_page(start),
689 round_page(start+size), TRUE);
1c79356b
A
690 }
691 return rc;
692}
693
694/*
695 * vm_msync
696 *
697 * Synchronises the memory range specified with its backing store
698 * image by either flushing or cleaning the contents to the appropriate
699 * memory manager engaging in a memory object synchronize dialog with
700 * the manager. The client doesn't return until the manager issues
701 * m_o_s_completed message. MIG Magically converts user task parameter
702 * to the task's address map.
703 *
704 * interpretation of sync_flags
705 * VM_SYNC_INVALIDATE - discard pages, only return precious
706 * pages to manager.
707 *
708 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
709 * - discard pages, write dirty or precious
710 * pages back to memory manager.
711 *
712 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
713 * - write dirty or precious pages back to
714 * the memory manager.
715 *
716 * NOTE
717 * The memory object attributes have not yet been implemented, this
718 * function will have to deal with the invalidate attribute
719 *
720 * RETURNS
721 * KERN_INVALID_TASK Bad task parameter
722 * KERN_INVALID_ARGUMENT both sync and async were specified.
723 * KERN_SUCCESS The usual.
724 */
725
726kern_return_t
727vm_msync(
728 vm_map_t map,
729 vm_address_t address,
730 vm_size_t size,
731 vm_sync_t sync_flags)
732{
733 msync_req_t msr;
734 msync_req_t new_msr;
735 queue_chain_t req_q; /* queue of requests for this msync */
736 vm_map_entry_t entry;
737 vm_size_t amount_left;
738 vm_object_offset_t offset;
739 boolean_t do_sync_req;
740 boolean_t modifiable;
741
742
743 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
744 (sync_flags & VM_SYNC_SYNCHRONOUS))
745 return(KERN_INVALID_ARGUMENT);
746
747 /*
748 * align address and size on page boundaries
749 */
de355530
A
750 size = round_page(address + size) - trunc_page(address);
751 address = trunc_page(address);
1c79356b
A
752
753 if (map == VM_MAP_NULL)
754 return(KERN_INVALID_TASK);
755
756 if (size == 0)
757 return(KERN_SUCCESS);
758
759 queue_init(&req_q);
760 amount_left = size;
761
762 while (amount_left > 0) {
763 vm_size_t flush_size;
764 vm_object_t object;
765
766 vm_map_lock(map);
767 if (!vm_map_lookup_entry(map, address, &entry)) {
768 vm_size_t skip;
769
770 /*
771 * hole in the address map.
772 */
773
774 /*
775 * Check for empty map.
776 */
777 if (entry == vm_map_to_entry(map) &&
778 entry->vme_next == entry) {
779 vm_map_unlock(map);
780 break;
781 }
782 /*
783 * Check that we don't wrap and that
784 * we have at least one real map entry.
785 */
786 if ((map->hdr.nentries == 0) ||
787 (entry->vme_next->vme_start < address)) {
788 vm_map_unlock(map);
789 break;
790 }
791 /*
792 * Move up to the next entry if needed
793 */
794 skip = (entry->vme_next->vme_start - address);
795 if (skip >= amount_left)
796 amount_left = 0;
797 else
798 amount_left -= skip;
799 address = entry->vme_next->vme_start;
800 vm_map_unlock(map);
801 continue;
802 }
803
804 offset = address - entry->vme_start;
805
806 /*
807 * do we have more to flush than is contained in this
808 * entry ?
809 */
810 if (amount_left + entry->vme_start + offset > entry->vme_end) {
811 flush_size = entry->vme_end -
812 (entry->vme_start + offset);
813 } else {
814 flush_size = amount_left;
815 }
816 amount_left -= flush_size;
817 address += flush_size;
818
819 if (entry->is_sub_map == TRUE) {
820 vm_map_t local_map;
821 vm_offset_t local_offset;
822
823 local_map = entry->object.sub_map;
824 local_offset = entry->offset;
825 vm_map_unlock(map);
826 vm_msync(
827 local_map,
828 local_offset,
829 flush_size,
830 sync_flags);
831 continue;
832 }
833 object = entry->object.vm_object;
834
835 /*
836 * We can't sync this object if the object has not been
837 * created yet
838 */
839 if (object == VM_OBJECT_NULL) {
840 vm_map_unlock(map);
841 continue;
842 }
843 offset += entry->offset;
844 modifiable = (entry->protection & VM_PROT_WRITE)
845 != VM_PROT_NONE;
846
847 vm_object_lock(object);
848
849 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
850 boolean_t kill_pages = 0;
851
852 if (sync_flags & VM_SYNC_KILLPAGES) {
853 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
854 kill_pages = 1;
855 else
856 kill_pages = -1;
857 }
858 if (kill_pages != -1)
0b4e3aa0 859 vm_object_deactivate_pages(object, offset,
1c79356b
A
860 (vm_object_size_t)flush_size, kill_pages);
861 vm_object_unlock(object);
862 vm_map_unlock(map);
863 continue;
864 }
865 /*
866 * We can't sync this object if there isn't a pager.
867 * Don't bother to sync internal objects, since there can't
868 * be any "permanent" storage for these objects anyway.
869 */
0b4e3aa0
A
870 if ((object->pager == MEMORY_OBJECT_NULL) ||
871 (object->internal) || (object->private)) {
1c79356b
A
872 vm_object_unlock(object);
873 vm_map_unlock(map);
874 continue;
875 }
876 /*
877 * keep reference on the object until syncing is done
878 */
879 assert(object->ref_count > 0);
880 object->ref_count++;
881 vm_object_res_reference(object);
882 vm_object_unlock(object);
883
884 vm_map_unlock(map);
885
0b4e3aa0 886 do_sync_req = vm_object_sync(object,
1c79356b
A
887 offset,
888 flush_size,
889 sync_flags & VM_SYNC_INVALIDATE,
890 (modifiable &&
891 (sync_flags & VM_SYNC_SYNCHRONOUS ||
892 sync_flags & VM_SYNC_ASYNCHRONOUS)));
893
894 /*
895 * only send a m_o_s if we returned pages or if the entry
896 * is writable (ie dirty pages may have already been sent back)
897 */
898 if (!do_sync_req && !modifiable) {
899 vm_object_deallocate(object);
900 continue;
901 }
902 msync_req_alloc(new_msr);
903
904 vm_object_lock(object);
905 offset += object->paging_offset;
906
907 new_msr->offset = offset;
908 new_msr->length = flush_size;
909 new_msr->object = object;
910 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
911re_iterate:
912 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
913 /*
914 * need to check for overlapping entry, if found, wait
915 * on overlapping msr to be done, then reiterate
916 */
917 msr_lock(msr);
918 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
919 ((offset >= msr->offset &&
920 offset < (msr->offset + msr->length)) ||
921 (msr->offset >= offset &&
922 msr->offset < (offset + flush_size))))
923 {
924 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
925 msr_unlock(msr);
926 vm_object_unlock(object);
927 thread_block((void (*)(void))0);
928 vm_object_lock(object);
929 goto re_iterate;
930 }
931 msr_unlock(msr);
932 }/* queue_iterate */
933
934 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
935 vm_object_unlock(object);
936
937 queue_enter(&req_q, new_msr, msync_req_t, req_q);
938
1c79356b
A
939 (void) memory_object_synchronize(
940 object->pager,
1c79356b
A
941 offset,
942 flush_size,
943 sync_flags);
1c79356b
A
944 }/* while */
945
946 /*
947 * wait for memory_object_sychronize_completed messages from pager(s)
948 */
949
950 while (!queue_empty(&req_q)) {
951 msr = (msync_req_t)queue_first(&req_q);
952 msr_lock(msr);
953 while(msr->flag != VM_MSYNC_DONE) {
954 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
955 msr_unlock(msr);
956 thread_block((void (*)(void))0);
957 msr_lock(msr);
958 }/* while */
959 queue_remove(&req_q, msr, msync_req_t, req_q);
960 msr_unlock(msr);
961 vm_object_deallocate(msr->object);
962 msync_req_free(msr);
963 }/* queue_iterate */
964
965 return(KERN_SUCCESS);
966}/* vm_msync */
967
968
969/*
970 * task_wire
971 *
972 * Set or clear the map's wiring_required flag. This flag, if set,
973 * will cause all future virtual memory allocation to allocate
974 * user wired memory. Unwiring pages wired down as a result of
975 * this routine is done with the vm_wire interface.
976 */
977kern_return_t
978task_wire(
979 vm_map_t map,
980 boolean_t must_wire)
981{
982 if (map == VM_MAP_NULL)
983 return(KERN_INVALID_ARGUMENT);
984
985 if (must_wire)
986 map->wiring_required = TRUE;
987 else
988 map->wiring_required = FALSE;
989
990 return(KERN_SUCCESS);
991}
992
993/*
994 * vm_behavior_set sets the paging behavior attribute for the
995 * specified range in the specified map. This routine will fail
996 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
997 * is not a valid allocated or reserved memory region.
998 */
999kern_return_t
1000vm_behavior_set(
1001 vm_map_t map,
1002 vm_offset_t start,
1003 vm_size_t size,
1004 vm_behavior_t new_behavior)
1005{
1006 if (map == VM_MAP_NULL)
1007 return(KERN_INVALID_ARGUMENT);
1008
de355530
A
1009 return(vm_map_behavior_set(map, trunc_page(start),
1010 round_page(start+size), new_behavior));
1c79356b
A
1011}
1012
1013#if VM_CPM
1014/*
1015 * Control whether the kernel will permit use of
1016 * vm_allocate_cpm at all.
1017 */
1018unsigned int vm_allocate_cpm_enabled = 1;
1019
1020/*
1021 * Ordinarily, the right to allocate CPM is restricted
1022 * to privileged applications (those that can gain access
1023 * to the host port). Set this variable to zero if you
1024 * want to let any application allocate CPM.
1025 */
1026unsigned int vm_allocate_cpm_privileged = 0;
1027
1028/*
1029 * Allocate memory in the specified map, with the caveat that
1030 * the memory is physically contiguous. This call may fail
1031 * if the system can't find sufficient contiguous memory.
1032 * This call may cause or lead to heart-stopping amounts of
1033 * paging activity.
1034 *
1035 * Memory obtained from this call should be freed in the
1036 * normal way, viz., via vm_deallocate.
1037 */
1038kern_return_t
1039vm_allocate_cpm(
1040 host_priv_t host_priv,
1041 register vm_map_t map,
1042 register vm_offset_t *addr,
1043 register vm_size_t size,
1044 int flags)
1045{
1046 vm_object_t cpm_obj;
1047 pmap_t pmap;
1048 vm_page_t m, pages;
1049 kern_return_t kr;
1050 vm_offset_t va, start, end, offset;
1051#if MACH_ASSERT
1052 extern vm_offset_t avail_start, avail_end;
1053 vm_offset_t prev_addr;
1054#endif /* MACH_ASSERT */
1055
1056 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1057
1058 if (!vm_allocate_cpm_enabled)
1059 return KERN_FAILURE;
1060
1061 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1062 return KERN_INVALID_HOST;
1063
1064 if (map == VM_MAP_NULL)
1065 return KERN_INVALID_ARGUMENT;
1066
1067 assert(host_priv == &realhost);
1068
1069 if (size == 0) {
1070 *addr = 0;
1071 return KERN_SUCCESS;
1072 }
1073
1074 if (anywhere)
1075 *addr = vm_map_min(map);
1076 else
de355530
A
1077 *addr = trunc_page(*addr);
1078 size = round_page(size);
1c79356b
A
1079
1080 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1081 return kr;
1082
1083 cpm_obj = vm_object_allocate(size);
1084 assert(cpm_obj != VM_OBJECT_NULL);
1085 assert(cpm_obj->internal);
1086 assert(cpm_obj->size == size);
1087 assert(cpm_obj->can_persist == FALSE);
1088 assert(cpm_obj->pager_created == FALSE);
1089 assert(cpm_obj->pageout == FALSE);
1090 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1091
1092 /*
1093 * Insert pages into object.
1094 */
1095
1096 vm_object_lock(cpm_obj);
1097 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1098 m = pages;
1099 pages = NEXT_PAGE(m);
1100
1101 assert(!m->gobbled);
1102 assert(!m->wanted);
1103 assert(!m->pageout);
1104 assert(!m->tabled);
1105 assert(m->busy);
de355530 1106 assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end);
1c79356b
A
1107
1108 m->busy = FALSE;
1109 vm_page_insert(m, cpm_obj, offset);
1110 }
1111 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1112 vm_object_unlock(cpm_obj);
1113
1114 /*
1115 * Hang onto a reference on the object in case a
1116 * multi-threaded application for some reason decides
1117 * to deallocate the portion of the address space into
1118 * which we will insert this object.
1119 *
1120 * Unfortunately, we must insert the object now before
1121 * we can talk to the pmap module about which addresses
1122 * must be wired down. Hence, the race with a multi-
1123 * threaded app.
1124 */
1125 vm_object_reference(cpm_obj);
1126
1127 /*
1128 * Insert object into map.
1129 */
1130
1131 kr = vm_map_enter(
1132 map,
1133 addr,
1134 size,
1135 (vm_offset_t)0,
1136 flags,
1137 cpm_obj,
1138 (vm_object_offset_t)0,
1139 FALSE,
1140 VM_PROT_ALL,
1141 VM_PROT_ALL,
1142 VM_INHERIT_DEFAULT);
1143
1144 if (kr != KERN_SUCCESS) {
1145 /*
1146 * A CPM object doesn't have can_persist set,
1147 * so all we have to do is deallocate it to
1148 * free up these pages.
1149 */
1150 assert(cpm_obj->pager_created == FALSE);
1151 assert(cpm_obj->can_persist == FALSE);
1152 assert(cpm_obj->pageout == FALSE);
1153 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1154 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1155 vm_object_deallocate(cpm_obj); /* kill creation ref */
1156 }
1157
1158 /*
1159 * Inform the physical mapping system that the
1160 * range of addresses may not fault, so that
1161 * page tables and such can be locked down as well.
1162 */
1163 start = *addr;
1164 end = start + size;
1165 pmap = vm_map_pmap(map);
1166 pmap_pageable(pmap, start, end, FALSE);
1167
1168 /*
1169 * Enter each page into the pmap, to avoid faults.
1170 * Note that this loop could be coded more efficiently,
1171 * if the need arose, rather than looking up each page
1172 * again.
1173 */
1174 for (offset = 0, va = start; offset < size;
1175 va += PAGE_SIZE, offset += PAGE_SIZE) {
1176 vm_object_lock(cpm_obj);
1177 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1178 vm_object_unlock(cpm_obj);
1179 assert(m != VM_PAGE_NULL);
9bccf70c 1180 PMAP_ENTER(pmap, va, m, VM_PROT_ALL,
de355530 1181 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
1182 }
1183
1184#if MACH_ASSERT
1185 /*
1186 * Verify ordering in address space.
1187 */
1188 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1189 vm_object_lock(cpm_obj);
1190 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1191 vm_object_unlock(cpm_obj);
1192 if (m == VM_PAGE_NULL)
1193 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1194 cpm_obj, offset);
1195 assert(m->tabled);
1196 assert(!m->busy);
1197 assert(!m->wanted);
1198 assert(!m->fictitious);
1199 assert(!m->private);
1200 assert(!m->absent);
1201 assert(!m->error);
1202 assert(!m->cleaning);
1203 assert(!m->precious);
1204 assert(!m->clustered);
1205 if (offset != 0) {
de355530 1206 if (m->phys_addr != prev_addr + PAGE_SIZE) {
1c79356b
A
1207 printf("start 0x%x end 0x%x va 0x%x\n",
1208 start, end, va);
1209 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1210 printf("m 0x%x prev_address 0x%x\n", m,
1211 prev_addr);
1212 panic("vm_allocate_cpm: pages not contig!");
1213 }
1214 }
de355530 1215 prev_addr = m->phys_addr;
1c79356b
A
1216 }
1217#endif /* MACH_ASSERT */
1218
1219 vm_object_deallocate(cpm_obj); /* kill extra ref */
1220
1221 return kr;
1222}
1223
1224
1225#else /* VM_CPM */
1226
1227/*
1228 * Interface is defined in all cases, but unless the kernel
1229 * is built explicitly for this option, the interface does
1230 * nothing.
1231 */
1232
1233kern_return_t
1234vm_allocate_cpm(
1235 host_priv_t host_priv,
1236 register vm_map_t map,
1237 register vm_offset_t *addr,
1238 register vm_size_t size,
1239 int flags)
1240{
1241 return KERN_FAILURE;
1242}
1243
1244/*
1245 */
1246kern_return_t
1247mach_memory_object_memory_entry_64(
1248 host_t host,
1249 boolean_t internal,
1250 vm_object_offset_t size,
1251 vm_prot_t permission,
0b4e3aa0 1252 memory_object_t pager,
1c79356b
A
1253 ipc_port_t *entry_handle)
1254{
1255 vm_named_entry_t user_object;
1256 ipc_port_t user_handle;
1257 ipc_port_t previous;
1258 kern_return_t kr;
1259
1260 if (host == HOST_NULL)
1261 return(KERN_INVALID_HOST);
1262
1263 user_object = (vm_named_entry_t)
1264 kalloc(sizeof (struct vm_named_entry));
1265 if(user_object == NULL)
1266 return KERN_FAILURE;
1267 named_entry_lock_init(user_object);
1268 user_handle = ipc_port_alloc_kernel();
1269 ip_lock(user_handle);
1270
1271 /* make a sonce right */
1272 user_handle->ip_sorights++;
1273 ip_reference(user_handle);
1274
1275 user_handle->ip_destination = IP_NULL;
1276 user_handle->ip_receiver_name = MACH_PORT_NULL;
1277 user_handle->ip_receiver = ipc_space_kernel;
1278
1279 /* make a send right */
1280 user_handle->ip_mscount++;
1281 user_handle->ip_srights++;
1282 ip_reference(user_handle);
1283
1284 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1285 /* nsrequest unlocks user_handle */
1286
1287 user_object->object = NULL;
1288 user_object->size = size;
1289 user_object->offset = 0;
1290 user_object->backing.pager = pager;
de355530 1291 user_object->protection = permission;
1c79356b
A
1292 user_object->internal = internal;
1293 user_object->is_sub_map = FALSE;
1294 user_object->ref_count = 1;
1295
1296 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1297 IKOT_NAMED_ENTRY);
1298 *entry_handle = user_handle;
1299 return KERN_SUCCESS;
1300}
1301
1302kern_return_t
1303mach_memory_object_memory_entry(
1304 host_t host,
1305 boolean_t internal,
1306 vm_size_t size,
1307 vm_prot_t permission,
0b4e3aa0 1308 memory_object_t pager,
1c79356b
A
1309 ipc_port_t *entry_handle)
1310{
1311 return mach_memory_object_memory_entry_64( host, internal,
1312 (vm_object_offset_t)size, permission, pager, entry_handle);
1313}
1314
1315
1316
1317/*
1318 */
1319
1320kern_return_t
1321mach_make_memory_entry_64(
1322 vm_map_t target_map,
1323 vm_object_size_t *size,
1324 vm_object_offset_t offset,
1325 vm_prot_t permission,
1326 ipc_port_t *object_handle,
1327 ipc_port_t parent_entry)
1328{
1329 vm_map_version_t version;
1330 vm_named_entry_t user_object;
1331 ipc_port_t user_handle;
1332 ipc_port_t previous;
1333 kern_return_t kr;
1334 vm_map_t pmap_map;
1335
1336 /* needed for call to vm_map_lookup_locked */
9bccf70c 1337 boolean_t wired;
1c79356b 1338 vm_object_offset_t obj_off;
9bccf70c 1339 vm_prot_t prot;
1c79356b
A
1340 vm_object_offset_t lo_offset, hi_offset;
1341 vm_behavior_t behavior;
9bccf70c
A
1342 vm_object_t object;
1343 vm_object_t shadow_object;
1c79356b
A
1344
1345 /* needed for direct map entry manipulation */
1346 vm_map_entry_t map_entry;
9bccf70c
A
1347 vm_map_entry_t next_entry;
1348 vm_map_t local_map;
1349 vm_map_t original_map = target_map;
1350 vm_offset_t local_offset;
1c79356b 1351 vm_object_size_t mappable_size;
9bccf70c
A
1352 vm_object_size_t total_size;
1353
1c79356b 1354
9bccf70c
A
1355 offset = trunc_page_64(offset);
1356 *size = round_page_64(*size);
de355530 1357
1c79356b
A
1358 user_object = (vm_named_entry_t)
1359 kalloc(sizeof (struct vm_named_entry));
1360 if(user_object == NULL)
1361 return KERN_FAILURE;
1362 named_entry_lock_init(user_object);
1363 user_handle = ipc_port_alloc_kernel();
1364 ip_lock(user_handle);
1365
1366 /* make a sonce right */
1367 user_handle->ip_sorights++;
1368 ip_reference(user_handle);
1369
1370 user_handle->ip_destination = IP_NULL;
1371 user_handle->ip_receiver_name = MACH_PORT_NULL;
1372 user_handle->ip_receiver = ipc_space_kernel;
1373
1374 /* make a send right */
1375 user_handle->ip_mscount++;
1376 user_handle->ip_srights++;
1377 ip_reference(user_handle);
1378
1379 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1380 /* nsrequest unlocks user_handle */
1381
1382 user_object->backing.pager = NULL;
1383 user_object->ref_count = 1;
1384
1385 if(parent_entry == NULL) {
1386 /* Create a named object based on address range within the task map */
1387 /* Go find the object at given address */
1388
de355530 1389 permission &= VM_PROT_ALL;
1c79356b
A
1390 vm_map_lock_read(target_map);
1391
1392 /* get the object associated with the target address */
1393 /* note we check the permission of the range against */
1394 /* that requested by the caller */
1395
1396 kr = vm_map_lookup_locked(&target_map, offset,
de355530 1397 permission, &version,
1c79356b
A
1398 &object, &obj_off, &prot, &wired, &behavior,
1399 &lo_offset, &hi_offset, &pmap_map);
1400 if (kr != KERN_SUCCESS) {
1401 vm_map_unlock_read(target_map);
1402 goto make_mem_done;
1403 }
de355530 1404 if (((prot & permission) != permission)
9bccf70c 1405 || (object == kernel_object)) {
1c79356b
A
1406 kr = KERN_INVALID_RIGHT;
1407 vm_object_unlock(object);
1408 vm_map_unlock_read(target_map);
1409 if(pmap_map != target_map)
1410 vm_map_unlock_read(pmap_map);
9bccf70c
A
1411 if(object == kernel_object) {
1412 printf("Warning: Attempt to create a named"
1413 " entry from the kernel_object\n");
1414 }
1c79356b
A
1415 goto make_mem_done;
1416 }
1417
1418 /* We have an object, now check to see if this object */
1419 /* is suitable. If not, create a shadow and share that */
1420
1c79356b 1421redo_lookup:
9bccf70c
A
1422 local_map = original_map;
1423 local_offset = offset;
1424 if(target_map != local_map) {
1425 vm_map_unlock_read(target_map);
1426 if(pmap_map != target_map)
1427 vm_map_unlock_read(pmap_map);
1428 vm_map_lock_read(local_map);
1429 target_map = local_map;
1430 pmap_map = local_map;
1431 }
1c79356b 1432 while(TRUE) {
9bccf70c
A
1433 if(!vm_map_lookup_entry(local_map,
1434 local_offset, &map_entry)) {
1c79356b
A
1435 kr = KERN_INVALID_ARGUMENT;
1436 vm_object_unlock(object);
1437 vm_map_unlock_read(target_map);
1438 if(pmap_map != target_map)
1439 vm_map_unlock_read(pmap_map);
1440 goto make_mem_done;
1441 }
1442 if(!(map_entry->is_sub_map)) {
1443 if(map_entry->object.vm_object != object) {
1444 kr = KERN_INVALID_ARGUMENT;
1445 vm_object_unlock(object);
1446 vm_map_unlock_read(target_map);
1447 if(pmap_map != target_map)
1448 vm_map_unlock_read(pmap_map);
1449 goto make_mem_done;
1450 }
9bccf70c
A
1451 if(map_entry->wired_count) {
1452 object->true_share = TRUE;
1453 }
1c79356b
A
1454 break;
1455 } else {
9bccf70c
A
1456 vm_map_t tmap;
1457 tmap = local_map;
1c79356b 1458 local_map = map_entry->object.sub_map;
9bccf70c 1459
1c79356b 1460 vm_map_lock_read(local_map);
9bccf70c 1461 vm_map_unlock_read(tmap);
1c79356b 1462 target_map = local_map;
9bccf70c
A
1463 pmap_map = local_map;
1464 local_offset = local_offset - map_entry->vme_start;
1465 local_offset += map_entry->offset;
1c79356b
A
1466 }
1467 }
de355530 1468 if(((map_entry->max_protection) & permission) != permission) {
1c79356b
A
1469 kr = KERN_INVALID_RIGHT;
1470 vm_object_unlock(object);
1471 vm_map_unlock_read(target_map);
1472 if(pmap_map != target_map)
1473 vm_map_unlock_read(pmap_map);
1474 goto make_mem_done;
1475 }
9bccf70c
A
1476
1477 mappable_size = hi_offset - obj_off;
1478 total_size = map_entry->vme_end - map_entry->vme_start;
1479 if(*size > mappable_size) {
1480 /* try to extend mappable size if the entries */
1481 /* following are from the same object and are */
1482 /* compatible */
1483 next_entry = map_entry->vme_next;
1484 /* lets see if the next map entry is still */
1485 /* pointing at this object and is contiguous */
1486 while(*size > mappable_size) {
1487 if((next_entry->object.vm_object == object) &&
1488 (next_entry->vme_start ==
1489 next_entry->vme_prev->vme_end) &&
1490 (next_entry->offset ==
1491 next_entry->vme_prev->offset +
1492 (next_entry->vme_prev->vme_end -
1493 next_entry->vme_prev->vme_start))) {
1494 if(((next_entry->max_protection)
de355530 1495 & permission) != permission) {
9bccf70c
A
1496 break;
1497 }
1498 mappable_size += next_entry->vme_end
1499 - next_entry->vme_start;
1500 total_size += next_entry->vme_end
1501 - next_entry->vme_start;
1502 next_entry = next_entry->vme_next;
1503 } else {
1504 break;
1505 }
1506
1507 }
1508 }
1509
1c79356b
A
1510 if(object->internal) {
1511 /* vm_map_lookup_locked will create a shadow if */
1512 /* needs_copy is set but does not check for the */
1513 /* other two conditions shown. It is important to */
1514 /* set up an object which will not be pulled from */
1515 /* under us. */
1516
0b4e3aa0 1517 if ((map_entry->needs_copy || object->shadowed ||
9bccf70c
A
1518 (object->size > total_size))
1519 && !object->true_share) {
1c79356b
A
1520 if (vm_map_lock_read_to_write(target_map)) {
1521 vm_map_lock_read(target_map);
1522 goto redo_lookup;
1523 }
1524
de355530 1525
1c79356b 1526 /* create a shadow object */
9bccf70c
A
1527 vm_object_shadow(&map_entry->object.vm_object,
1528 &map_entry->offset, total_size);
1529 shadow_object = map_entry->object.vm_object;
1530 vm_object_unlock(object);
1531 vm_object_pmap_protect(
1532 object, map_entry->offset,
1533 total_size,
1534 ((map_entry->is_shared
1535 || target_map->mapped)
1536 ? PMAP_NULL :
1537 target_map->pmap),
1538 map_entry->vme_start,
1539 map_entry->protection & ~VM_PROT_WRITE);
1540 total_size -= (map_entry->vme_end
1541 - map_entry->vme_start);
1542 next_entry = map_entry->vme_next;
1543 map_entry->needs_copy = FALSE;
1544 while (total_size) {
1545 if(next_entry->object.vm_object == object) {
1546 next_entry->object.vm_object
1547 = shadow_object;
1548 next_entry->offset
1549 = next_entry->vme_prev->offset +
1550 (next_entry->vme_prev->vme_end
1551 - next_entry->vme_prev->vme_start);
1552 next_entry->needs_copy = FALSE;
1553 } else {
1554 panic("mach_make_memory_entry_64:"
1555 " map entries out of sync\n");
1556 }
1557 total_size -=
1558 next_entry->vme_end
1559 - next_entry->vme_start;
1560 next_entry = next_entry->vme_next;
1561 }
1562
1563 object = shadow_object;
1564 vm_object_lock(object);
1565 obj_off = (local_offset - map_entry->vme_start)
1566 + map_entry->offset;
1567 vm_map_lock_write_to_read(target_map);
1c79356b 1568
1c79356b
A
1569
1570 }
1571 }
1572
1573 /* note: in the future we can (if necessary) allow for */
1574 /* memory object lists, this will better support */
1575 /* fragmentation, but is it necessary? The user should */
1576 /* be encouraged to create address space oriented */
1577 /* shared objects from CLEAN memory regions which have */
1578 /* a known and defined history. i.e. no inheritence */
1579 /* share, make this call before making the region the */
1580 /* target of ipc's, etc. The code above, protecting */
1581 /* against delayed copy, etc. is mostly defensive. */
1582
1c79356b 1583
d7e50217 1584
de355530 1585 object->true_share = TRUE;
1c79356b
A
1586 user_object->object = object;
1587 user_object->internal = object->internal;
1588 user_object->is_sub_map = FALSE;
1589 user_object->offset = obj_off;
1590 user_object->protection = permission;
1591
1592 /* the size of mapped entry that overlaps with our region */
1593 /* which is targeted for share. */
1594 /* (entry_end - entry_start) - */
1595 /* offset of our beg addr within entry */
1596 /* it corresponds to this: */
1597
1c79356b
A
1598 if(*size > mappable_size)
1599 *size = mappable_size;
1600
1601 user_object->size = *size;
1602
1603 /* user_object pager and internal fields are not used */
1604 /* when the object field is filled in. */
1605
de355530
A
1606 object->ref_count++; /* we now point to this object, hold on */
1607 vm_object_res_reference(object);
1c79356b
A
1608 vm_object_unlock(object);
1609 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1610 IKOT_NAMED_ENTRY);
1c79356b 1611 *object_handle = user_handle;
de355530
A
1612 vm_map_unlock_read(target_map);
1613 if(pmap_map != target_map)
1614 vm_map_unlock_read(pmap_map);
1c79356b
A
1615 return KERN_SUCCESS;
1616 } else {
1617
1618 vm_named_entry_t parent_object;
1619
1620 /* The new object will be base on an existing named object */
1621 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1622 kr = KERN_INVALID_ARGUMENT;
1623 goto make_mem_done;
1624 }
1625 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
de355530
A
1626 if(permission & parent_object->protection != permission) {
1627 kr = KERN_INVALID_ARGUMENT;
1628 goto make_mem_done;
1629 }
1c79356b
A
1630 if((offset + *size) > parent_object->size) {
1631 kr = KERN_INVALID_ARGUMENT;
1632 goto make_mem_done;
1633 }
1634
1635 user_object->object = parent_object->object;
1636 user_object->size = *size;
1637 user_object->offset = parent_object->offset + offset;
de355530 1638 user_object->protection = permission;
1c79356b
A
1639 if(parent_object->is_sub_map) {
1640 user_object->backing.map = parent_object->backing.map;
1641 vm_map_lock(user_object->backing.map);
1642 user_object->backing.map->ref_count++;
1643 vm_map_unlock(user_object->backing.map);
1644 }
1645 else {
1646 user_object->backing.pager = parent_object->backing.pager;
1647 }
1648 user_object->internal = parent_object->internal;
1649 user_object->is_sub_map = parent_object->is_sub_map;
1650
1651 if(parent_object->object != NULL) {
1652 /* we now point to this object, hold on */
1653 vm_object_reference(parent_object->object);
1654 vm_object_lock(parent_object->object);
1655 parent_object->object->true_share = TRUE;
1656 vm_object_unlock(parent_object->object);
1657 }
1658 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1659 IKOT_NAMED_ENTRY);
1660 *object_handle = user_handle;
1661 return KERN_SUCCESS;
1662 }
1663
1664
1665
1666make_mem_done:
1667 ipc_port_dealloc_kernel(user_handle);
1668 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1669 return kr;
1670}
1671
1672kern_return_t
1673mach_make_memory_entry(
1674 vm_map_t target_map,
1675 vm_size_t *size,
1676 vm_offset_t offset,
1677 vm_prot_t permission,
1678 ipc_port_t *object_handle,
1679 ipc_port_t parent_entry)
1680{
1681 vm_object_offset_t size_64;
1682 kern_return_t kr;
1683
1684 size_64 = (vm_object_offset_t)*size;
1685 kr = mach_make_memory_entry_64(target_map, &size_64,
1686 (vm_object_offset_t)offset, permission, object_handle,
1687 parent_entry);
1688 *size = (vm_size_t)size_64;
1689 return kr;
1690}
1691
1692/*
1693 */
1694
1695kern_return_t
1696vm_region_object_create(
1697 vm_map_t target_map,
1698 vm_size_t size,
1699 ipc_port_t *object_handle)
1700{
1701 vm_named_entry_t user_object;
1702 ipc_port_t user_handle;
1703 kern_return_t kr;
1704
de355530 1705 pmap_t new_pmap = pmap_create((vm_size_t) 0);
1c79356b
A
1706 ipc_port_t previous;
1707 vm_map_t new_map;
1708
de355530
A
1709 if(new_pmap == PMAP_NULL)
1710 return KERN_FAILURE;
1c79356b
A
1711 user_object = (vm_named_entry_t)
1712 kalloc(sizeof (struct vm_named_entry));
1713 if(user_object == NULL) {
de355530 1714 pmap_destroy(new_pmap);
1c79356b
A
1715 return KERN_FAILURE;
1716 }
1717 named_entry_lock_init(user_object);
1718 user_handle = ipc_port_alloc_kernel();
1719
1720
1721 ip_lock(user_handle);
1722
1723 /* make a sonce right */
1724 user_handle->ip_sorights++;
1725 ip_reference(user_handle);
1726
1727 user_handle->ip_destination = IP_NULL;
1728 user_handle->ip_receiver_name = MACH_PORT_NULL;
1729 user_handle->ip_receiver = ipc_space_kernel;
1730
1731 /* make a send right */
1732 user_handle->ip_mscount++;
1733 user_handle->ip_srights++;
1734 ip_reference(user_handle);
1735
1736 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1737 /* nsrequest unlocks user_handle */
1738
1739 /* Create a named object based on a submap of specified size */
1740
de355530 1741 new_map = vm_map_create(new_pmap, 0, size, TRUE);
1c79356b
A
1742 user_object->backing.map = new_map;
1743
1744
1745 user_object->object = VM_OBJECT_NULL;
1746 user_object->internal = TRUE;
1747 user_object->is_sub_map = TRUE;
1748 user_object->offset = 0;
1749 user_object->protection = VM_PROT_ALL;
1750 user_object->size = size;
1751 user_object->ref_count = 1;
1752
1753 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1754 IKOT_NAMED_ENTRY);
1755 *object_handle = user_handle;
1756 return KERN_SUCCESS;
1757
1758}
1759
1760/* For a given range, check all map entries. If the entry coresponds to */
1761/* the old vm_region/map provided on the call, replace it with the */
1762/* corresponding range in the new vm_region/map */
1763kern_return_t vm_map_region_replace(
1764 vm_map_t target_map,
1765 ipc_port_t old_region,
1766 ipc_port_t new_region,
1767 vm_offset_t start,
1768 vm_offset_t end)
1769{
1770 vm_named_entry_t old_object;
1771 vm_named_entry_t new_object;
1772 vm_map_t old_submap;
1773 vm_map_t new_submap;
1774 vm_offset_t addr;
1775 vm_map_entry_t entry;
1776 int nested_pmap = 0;
1777
1778
1779 vm_map_lock(target_map);
1780 old_object = (vm_named_entry_t)old_region->ip_kobject;
1781 new_object = (vm_named_entry_t)new_region->ip_kobject;
1782 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1783 vm_map_unlock(target_map);
1784 return KERN_INVALID_ARGUMENT;
1785 }
1786 old_submap = (vm_map_t)old_object->backing.map;
1787 new_submap = (vm_map_t)new_object->backing.map;
1788 vm_map_lock(old_submap);
1789 if((old_submap->min_offset != new_submap->min_offset) ||
1790 (old_submap->max_offset != new_submap->max_offset)) {
1791 vm_map_unlock(old_submap);
1792 vm_map_unlock(target_map);
1793 return KERN_INVALID_ARGUMENT;
1794 }
1795 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1796 /* if the src is not contained, the entry preceeds */
1797 /* our range */
1798 addr = entry->vme_start;
1799 if(entry == vm_map_to_entry(target_map)) {
1800 vm_map_unlock(old_submap);
1801 vm_map_unlock(target_map);
1802 return KERN_SUCCESS;
1803 }
de355530 1804 vm_map_lookup_entry(target_map, addr, &entry);
1c79356b
A
1805 }
1806 addr = entry->vme_start;
1807 vm_map_reference(old_submap);
1808 while((entry != vm_map_to_entry(target_map)) &&
1809 (entry->vme_start < end)) {
1810 if((entry->is_sub_map) &&
1811 (entry->object.sub_map == old_submap)) {
1c79356b 1812 if(entry->use_pmap) {
de355530 1813 if((start & 0xfffffff) ||
1c79356b
A
1814 ((end - start) != 0x10000000)) {
1815 vm_map_unlock(old_submap);
9bccf70c 1816 vm_map_deallocate(old_submap);
1c79356b
A
1817 vm_map_unlock(target_map);
1818 return KERN_INVALID_ARGUMENT;
1819 }
1820 nested_pmap = 1;
1821 }
9bccf70c 1822 entry->object.sub_map = new_submap;
1c79356b
A
1823 vm_map_reference(new_submap);
1824 vm_map_deallocate(old_submap);
1825 }
1826 entry = entry->vme_next;
1827 addr = entry->vme_start;
1828 }
1829 if(nested_pmap) {
1830#ifndef i386
de355530 1831 pmap_unnest(target_map->pmap, start, end - start);
9bccf70c
A
1832 if(target_map->mapped) {
1833 vm_map_submap_pmap_clean(target_map,
1834 start, end, old_submap, 0);
1835 }
1c79356b 1836 pmap_nest(target_map->pmap, new_submap->pmap,
de355530
A
1837 start, end - start);
1838#endif i386
1c79356b 1839 } else {
9bccf70c
A
1840 vm_map_submap_pmap_clean(target_map,
1841 start, end, old_submap, 0);
1c79356b
A
1842 }
1843 vm_map_unlock(old_submap);
9bccf70c 1844 vm_map_deallocate(old_submap);
1c79356b
A
1845 vm_map_unlock(target_map);
1846 return KERN_SUCCESS;
1847}
1848
1849
1850void
1851mach_destroy_memory_entry(
1852 ipc_port_t port)
1853{
1854 vm_named_entry_t named_entry;
1855#if MACH_ASSERT
1856 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1857#endif /* MACH_ASSERT */
1858 named_entry = (vm_named_entry_t)port->ip_kobject;
1859 mutex_lock(&(named_entry)->Lock);
1860 named_entry->ref_count-=1;
1861 if(named_entry->ref_count == 0) {
1862 if(named_entry->object) {
1863 /* release the memory object we've been pointing to */
1864 vm_object_deallocate(named_entry->object);
1865 }
1866 if(named_entry->is_sub_map) {
1867 vm_map_deallocate(named_entry->backing.map);
1868 }
1869 kfree((vm_offset_t)port->ip_kobject,
1870 sizeof (struct vm_named_entry));
1871 } else
1872 mutex_unlock(&(named_entry)->Lock);
1873}
1874
1875
1876kern_return_t
1877vm_map_page_query(
1878 vm_map_t target_map,
1879 vm_offset_t offset,
1880 int *disposition,
1881 int *ref_count)
1882{
1883 vm_map_entry_t map_entry;
1884 vm_object_t object;
1885 vm_page_t m;
1886
1887restart_page_query:
1888 *disposition = 0;
1889 *ref_count = 0;
1890 vm_map_lock(target_map);
1891 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
1892 vm_map_unlock(target_map);
1893 return KERN_FAILURE;
1894 }
1895 offset -= map_entry->vme_start; /* adjust to offset within entry */
1896 offset += map_entry->offset; /* adjust to target object offset */
1897 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
1898 if(!map_entry->is_sub_map) {
1899 object = map_entry->object.vm_object;
1900 } else {
1901 vm_map_unlock(target_map);
1902 target_map = map_entry->object.sub_map;
1903 goto restart_page_query;
1904 }
1905 } else {
1906 vm_map_unlock(target_map);
1907 return KERN_FAILURE;
1908 }
1909 vm_object_lock(object);
1910 vm_map_unlock(target_map);
1911 while(TRUE) {
1912 m = vm_page_lookup(object, offset);
1913 if (m != VM_PAGE_NULL) {
1914 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
1915 break;
1916 } else {
1917 if(object->shadow) {
1918 offset += object->shadow_offset;
1919 vm_object_unlock(object);
1920 object = object->shadow;
1921 vm_object_lock(object);
1922 continue;
1923 }
1924 vm_object_unlock(object);
1925 return KERN_FAILURE;
1926 }
1927 }
1928
1929 /* The ref_count is not strictly accurate, it measures the number */
1930 /* of entities holding a ref on the object, they may not be mapping */
1931 /* the object or may not be mapping the section holding the */
1932 /* target page but its still a ball park number and though an over- */
1933 /* count, it picks up the copy-on-write cases */
1934
1935 /* We could also get a picture of page sharing from pmap_attributes */
1936 /* but this would under count as only faulted-in mappings would */
1937 /* show up. */
1938
1939 *ref_count = object->ref_count;
1940
1941 if (m->fictitious) {
1942 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
1943 vm_object_unlock(object);
1944 return KERN_SUCCESS;
1945 }
1946
1947 if (m->dirty)
1948 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
de355530 1949 else if(pmap_is_modified(m->phys_addr))
1c79356b
A
1950 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1951
1952 if (m->reference)
1953 *disposition |= VM_PAGE_QUERY_PAGE_REF;
de355530 1954 else if(pmap_is_referenced(m->phys_addr))
1c79356b
A
1955 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1956
1957 vm_object_unlock(object);
1958 return KERN_SUCCESS;
1959
1960}
1961
1962kern_return_t
1963set_dp_control_port(
1964 host_priv_t host_priv,
1965 ipc_port_t control_port)
1966{
1967 if (host_priv == HOST_PRIV_NULL)
1968 return (KERN_INVALID_HOST);
0b4e3aa0
A
1969
1970 if (IP_VALID(dynamic_pager_control_port))
1971 ipc_port_release_send(dynamic_pager_control_port);
1972
1c79356b
A
1973 dynamic_pager_control_port = control_port;
1974 return KERN_SUCCESS;
1975}
1976
1977kern_return_t
1978get_dp_control_port(
1979 host_priv_t host_priv,
1980 ipc_port_t *control_port)
1981{
1982 if (host_priv == HOST_PRIV_NULL)
1983 return (KERN_INVALID_HOST);
0b4e3aa0
A
1984
1985 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
1986 return KERN_SUCCESS;
1987
1988}
1989
1c79356b
A
1990
1991/* Retrieve a upl for an object underlying an address range in a map */
1992
1993kern_return_t
1994vm_map_get_upl(
0b4e3aa0
A
1995 vm_map_t map,
1996 vm_address_t offset,
1997 vm_size_t *upl_size,
1998 upl_t *upl,
1999 upl_page_info_array_t page_list,
2000 unsigned int *count,
2001 int *flags,
2002 int force_data_sync)
1c79356b
A
2003{
2004 vm_map_entry_t entry;
2005 int caller_flags;
150bd074
A
2006 int sync_cow_data = FALSE;
2007 vm_object_t local_object;
2008 vm_offset_t local_offset;
2009 vm_offset_t local_start;
2010 kern_return_t ret;
1c79356b
A
2011
2012 caller_flags = *flags;
150bd074
A
2013 if (!(caller_flags & UPL_COPYOUT_FROM)) {
2014 sync_cow_data = TRUE;
2015 }
1c79356b
A
2016 if(upl == NULL)
2017 return KERN_INVALID_ARGUMENT;
0b4e3aa0
A
2018
2019
1c79356b
A
2020REDISCOVER_ENTRY:
2021 vm_map_lock(map);
2022 if (vm_map_lookup_entry(map, offset, &entry)) {
0b4e3aa0
A
2023 if (entry->object.vm_object == VM_OBJECT_NULL ||
2024 !entry->object.vm_object->phys_contiguous) {
2025 if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
2026 *upl_size = MAX_UPL_TRANSFER * page_size;
2027 }
2028 }
1c79356b
A
2029 if((entry->vme_end - offset) < *upl_size) {
2030 *upl_size = entry->vme_end - offset;
2031 }
0b4e3aa0
A
2032 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2033 if (entry->object.vm_object == VM_OBJECT_NULL) {
2034 *flags = 0;
2035 } else if (entry->object.vm_object->private) {
2036 *flags = UPL_DEV_MEMORY;
2037 if (entry->object.vm_object->phys_contiguous) {
2038 *flags |= UPL_PHYS_CONTIG;
2039 }
2040 } else {
2041 *flags = 0;
2042 }
2043 vm_map_unlock(map);
2044 return KERN_SUCCESS;
2045 }
1c79356b
A
2046 /*
2047 * Create an object if necessary.
2048 */
2049 if (entry->object.vm_object == VM_OBJECT_NULL) {
2050 entry->object.vm_object = vm_object_allocate(
2051 (vm_size_t)(entry->vme_end - entry->vme_start));
2052 entry->offset = 0;
2053 }
2054 if (!(caller_flags & UPL_COPYOUT_FROM)) {
0b4e3aa0 2055 if (entry->needs_copy) {
1c79356b
A
2056 vm_map_t local_map;
2057 vm_object_t object;
2058 vm_object_offset_t offset_hi;
2059 vm_object_offset_t offset_lo;
2060 vm_object_offset_t new_offset;
2061 vm_prot_t prot;
2062 boolean_t wired;
2063 vm_behavior_t behavior;
2064 vm_map_version_t version;
2065 vm_map_t pmap_map;
2066
2067 local_map = map;
2068 vm_map_lock_write_to_read(map);
2069 if(vm_map_lookup_locked(&local_map,
2070 offset, VM_PROT_WRITE,
2071 &version, &object,
2072 &new_offset, &prot, &wired,
2073 &behavior, &offset_lo,
2074 &offset_hi, &pmap_map)) {
2075 vm_map_unlock(local_map);
2076 return KERN_FAILURE;
2077 }
2078 if (pmap_map != map) {
2079 vm_map_unlock(pmap_map);
2080 }
2081 vm_object_unlock(object);
2082 vm_map_unlock(local_map);
2083
2084 goto REDISCOVER_ENTRY;
2085 }
2086 }
2087 if (entry->is_sub_map) {
150bd074
A
2088 vm_map_t submap;
2089
2090 submap = entry->object.sub_map;
2091 local_start = entry->vme_start;
2092 local_offset = entry->offset;
2093 vm_map_reference(submap);
1c79356b 2094 vm_map_unlock(map);
150bd074
A
2095
2096 ret = (vm_map_get_upl(submap,
2097 local_offset + (offset - local_start),
1c79356b
A
2098 upl_size, upl, page_list, count,
2099 flags, force_data_sync));
150bd074
A
2100
2101 vm_map_deallocate(submap);
2102 return ret;
1c79356b
A
2103 }
2104
150bd074 2105 if (sync_cow_data) {
0b4e3aa0
A
2106 if (entry->object.vm_object->shadow
2107 || entry->object.vm_object->copy) {
150bd074
A
2108 int flags;
2109
2110 local_object = entry->object.vm_object;
2111 local_start = entry->vme_start;
2112 local_offset = entry->offset;
2113 vm_object_reference(local_object);
1c79356b
A
2114 vm_map_unlock(map);
2115
150bd074 2116 if(local_object->copy == NULL) {
1c79356b
A
2117 flags = MEMORY_OBJECT_DATA_SYNC;
2118 } else {
2119 flags = MEMORY_OBJECT_COPY_SYNC;
2120 }
150bd074
A
2121
2122 if((local_object->paging_offset) &&
2123 (local_object->pager == 0)) {
2124 /*
2125 * do a little clean-up for our unorthodox
2126 * entry into a pager call from a non-pager
2127 * context. Normally the pager code
2128 * assumes that an object it has been called
2129 * with has a backing pager and so does
2130 * not bother to check the pager field
2131 * before relying on the paging_offset
2132 */
2133 vm_object_lock(local_object);
2134 if (local_object->pager == 0) {
2135 local_object->paging_offset = 0;
2136 }
2137 vm_object_unlock(local_object);
2138 }
1c79356b 2139
0b4e3aa0
A
2140 if (entry->object.vm_object->shadow &&
2141 entry->object.vm_object->copy) {
2142 vm_object_lock_request(
2143 local_object->shadow,
2144 (vm_object_offset_t)
2145 ((offset - local_start) +
2146 local_offset) +
2147 local_object->shadow_offset +
2148 local_object->paging_offset,
2149 *upl_size, FALSE,
2150 MEMORY_OBJECT_DATA_SYNC,
2151 VM_PROT_NO_CHANGE);
2152 }
150bd074 2153 sync_cow_data = FALSE;
0b4e3aa0 2154 vm_object_deallocate(local_object);
150bd074 2155 goto REDISCOVER_ENTRY;
1c79356b
A
2156 }
2157 }
2158
2159 if (force_data_sync) {
150bd074
A
2160
2161 local_object = entry->object.vm_object;
2162 local_start = entry->vme_start;
2163 local_offset = entry->offset;
2164 vm_object_reference(local_object);
1c79356b 2165 vm_map_unlock(map);
1c79356b 2166
150bd074
A
2167 if((local_object->paging_offset) &&
2168 (local_object->pager == 0)) {
2169 /*
2170 * do a little clean-up for our unorthodox
2171 * entry into a pager call from a non-pager
2172 * context. Normally the pager code
2173 * assumes that an object it has been called
2174 * with has a backing pager and so does
2175 * not bother to check the pager field
2176 * before relying on the paging_offset
2177 */
2178 vm_object_lock(local_object);
2179 if (local_object->pager == 0) {
2180 local_object->paging_offset = 0;
2181 }
2182 vm_object_unlock(local_object);
2183 }
2184
0b4e3aa0
A
2185 vm_object_lock_request(
2186 local_object,
2187 (vm_object_offset_t)
2188 ((offset - local_start) + local_offset) +
2189 local_object->paging_offset,
150bd074
A
2190 (vm_object_size_t)*upl_size, FALSE,
2191 MEMORY_OBJECT_DATA_SYNC,
0b4e3aa0 2192 VM_PROT_NO_CHANGE);
150bd074 2193 force_data_sync = FALSE;
0b4e3aa0 2194 vm_object_deallocate(local_object);
150bd074 2195 goto REDISCOVER_ENTRY;
1c79356b
A
2196 }
2197
2198 if(!(entry->object.vm_object->private)) {
2199 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2200 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2201 if(entry->object.vm_object->phys_contiguous) {
2202 *flags = UPL_PHYS_CONTIG;
2203 } else {
2204 *flags = 0;
2205 }
2206 } else {
2207 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2208 }
150bd074
A
2209 local_object = entry->object.vm_object;
2210 local_offset = entry->offset;
2211 local_start = entry->vme_start;
2212 vm_object_reference(local_object);
1c79356b 2213 vm_map_unlock(map);
de355530
A
2214 ret = (vm_object_upl_request(local_object,
2215 (vm_object_offset_t)
2216 ((offset - local_start) + local_offset),
2217 *upl_size,
2218 upl,
2219 page_list,
2220 count,
2221 caller_flags));
150bd074
A
2222 vm_object_deallocate(local_object);
2223 return(ret);
1c79356b
A
2224 }
2225
2226 vm_map_unlock(map);
2227 return(KERN_FAILURE);
2228
2229}
2230
1c79356b
A
2231/* ******* Temporary Internal calls to UPL for BSD ***** */
2232kern_return_t
2233kernel_upl_map(
2234 vm_map_t map,
2235 upl_t upl,
2236 vm_offset_t *dst_addr)
2237{
0b4e3aa0 2238 return (vm_upl_map(map, upl, dst_addr));
1c79356b
A
2239}
2240
2241
2242kern_return_t
2243kernel_upl_unmap(
2244 vm_map_t map,
0b4e3aa0 2245 upl_t upl)
1c79356b 2246{
0b4e3aa0 2247 return(vm_upl_unmap(map, upl));
1c79356b
A
2248}
2249
2250kern_return_t
2251kernel_upl_commit(
2252 upl_t upl,
0b4e3aa0
A
2253 upl_page_info_t *pl,
2254 mach_msg_type_number_t count)
1c79356b 2255{
0b4e3aa0
A
2256 kern_return_t kr;
2257
2258 kr = upl_commit(upl, pl, count);
2259 upl_deallocate(upl);
1c79356b
A
2260 return kr;
2261}
2262
0b4e3aa0 2263
1c79356b
A
2264kern_return_t
2265kernel_upl_commit_range(
2266 upl_t upl,
2267 vm_offset_t offset,
2268 vm_size_t size,
2269 int flags,
0b4e3aa0
A
2270 upl_page_info_array_t pl,
2271 mach_msg_type_number_t count)
1c79356b 2272{
0b4e3aa0
A
2273 boolean_t finished = FALSE;
2274 kern_return_t kr;
2275
2276 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2277 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2278
2279 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2280
2281 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2282 upl_deallocate(upl);
2283
1c79356b
A
2284 return kr;
2285}
2286
2287kern_return_t
2288kernel_upl_abort_range(
0b4e3aa0
A
2289 upl_t upl,
2290 vm_offset_t offset,
2291 vm_size_t size,
2292 int abort_flags)
1c79356b 2293{
0b4e3aa0
A
2294 kern_return_t kr;
2295 boolean_t finished = FALSE;
1c79356b 2296
0b4e3aa0
A
2297 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2298 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 2299
0b4e3aa0 2300 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 2301
0b4e3aa0
A
2302 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2303 upl_deallocate(upl);
1c79356b 2304
0b4e3aa0 2305 return kr;
1c79356b
A
2306}
2307
1c79356b 2308kern_return_t
0b4e3aa0
A
2309kernel_upl_abort(
2310 upl_t upl,
2311 int abort_type)
1c79356b 2312{
0b4e3aa0 2313 kern_return_t kr;
1c79356b 2314
0b4e3aa0
A
2315 kr = upl_abort(upl, abort_type);
2316 upl_deallocate(upl);
2317 return kr;
1c79356b
A
2318}
2319
1c79356b
A
2320
2321kern_return_t
2322vm_get_shared_region(
2323 task_t task,
2324 shared_region_mapping_t *shared_region)
2325{
2326 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2327 return KERN_SUCCESS;
2328}
2329
2330kern_return_t
2331vm_set_shared_region(
2332 task_t task,
2333 shared_region_mapping_t shared_region)
2334{
2335 task->system_shared_region = (vm_offset_t) shared_region;
2336 return KERN_SUCCESS;
2337}
2338
2339kern_return_t
2340shared_region_mapping_info(
2341 shared_region_mapping_t shared_region,
2342 ipc_port_t *text_region,
2343 vm_size_t *text_size,
2344 ipc_port_t *data_region,
2345 vm_size_t *data_size,
2346 vm_offset_t *region_mappings,
2347 vm_offset_t *client_base,
2348 vm_offset_t *alt_base,
2349 vm_offset_t *alt_next,
2350 int *flags,
2351 shared_region_mapping_t *next)
2352{
2353 shared_region_mapping_lock(shared_region);
2354
2355 *text_region = shared_region->text_region;
2356 *text_size = shared_region->text_size;
2357 *data_region = shared_region->data_region;
2358 *data_size = shared_region->data_size;
2359 *region_mappings = shared_region->region_mappings;
2360 *client_base = shared_region->client_base;
2361 *alt_base = shared_region->alternate_base;
2362 *alt_next = shared_region->alternate_next;
2363 *flags = shared_region->flags;
2364 *next = shared_region->next;
2365
2366 shared_region_mapping_unlock(shared_region);
2367}
2368
2369kern_return_t
2370shared_region_object_chain_attach(
2371 shared_region_mapping_t target_region,
2372 shared_region_mapping_t object_chain_region)
2373{
2374 shared_region_object_chain_t object_ele;
2375
2376 if(target_region->object_chain)
2377 return KERN_FAILURE;
2378 object_ele = (shared_region_object_chain_t)
2379 kalloc(sizeof (struct shared_region_object_chain));
2380 shared_region_mapping_lock(object_chain_region);
2381 target_region->object_chain = object_ele;
2382 object_ele->object_chain_region = object_chain_region;
2383 object_ele->next = object_chain_region->object_chain;
2384 object_ele->depth = object_chain_region->depth;
2385 object_chain_region->depth++;
2386 target_region->alternate_next = object_chain_region->alternate_next;
2387 shared_region_mapping_unlock(object_chain_region);
2388 return KERN_SUCCESS;
2389}
2390
2391kern_return_t
2392shared_region_mapping_create(
2393 ipc_port_t text_region,
2394 vm_size_t text_size,
2395 ipc_port_t data_region,
2396 vm_size_t data_size,
2397 vm_offset_t region_mappings,
2398 vm_offset_t client_base,
2399 shared_region_mapping_t *shared_region,
2400 vm_offset_t alt_base,
2401 vm_offset_t alt_next)
2402{
2403 *shared_region = (shared_region_mapping_t)
2404 kalloc(sizeof (struct shared_region_mapping));
2405 if(*shared_region == NULL)
2406 return KERN_FAILURE;
2407 shared_region_mapping_lock_init((*shared_region));
2408 (*shared_region)->text_region = text_region;
2409 (*shared_region)->text_size = text_size;
2410 (*shared_region)->data_region = data_region;
2411 (*shared_region)->data_size = data_size;
2412 (*shared_region)->region_mappings = region_mappings;
2413 (*shared_region)->client_base = client_base;
2414 (*shared_region)->ref_count = 1;
2415 (*shared_region)->next = NULL;
2416 (*shared_region)->object_chain = NULL;
2417 (*shared_region)->self = *shared_region;
2418 (*shared_region)->flags = 0;
2419 (*shared_region)->depth = 0;
2420 (*shared_region)->alternate_base = alt_base;
2421 (*shared_region)->alternate_next = alt_next;
2422 return KERN_SUCCESS;
2423}
2424
2425kern_return_t
2426shared_region_mapping_set_alt_next(
2427 shared_region_mapping_t shared_region,
2428 vm_offset_t alt_next)
2429{
2430 shared_region->alternate_next = alt_next;
2431 return KERN_SUCCESS;
2432}
2433
2434kern_return_t
2435shared_region_mapping_ref(
2436 shared_region_mapping_t shared_region)
2437{
2438 if(shared_region == NULL)
2439 return KERN_SUCCESS;
9bccf70c 2440 hw_atomic_add(&shared_region->ref_count, 1);
1c79356b
A
2441 return KERN_SUCCESS;
2442}
2443
2444kern_return_t
2445shared_region_mapping_dealloc(
2446 shared_region_mapping_t shared_region)
2447{
2448 struct shared_region_task_mappings sm_info;
9bccf70c
A
2449 shared_region_mapping_t next = NULL;
2450
2451 while (shared_region) {
de355530 2452 if (hw_atomic_sub(&shared_region->ref_count, 1) == 0) {
9bccf70c
A
2453 shared_region_mapping_lock(shared_region);
2454
2455 sm_info.text_region = shared_region->text_region;
2456 sm_info.text_size = shared_region->text_size;
2457 sm_info.data_region = shared_region->data_region;
2458 sm_info.data_size = shared_region->data_size;
2459 sm_info.region_mappings = shared_region->region_mappings;
2460 sm_info.client_base = shared_region->client_base;
2461 sm_info.alternate_base = shared_region->alternate_base;
2462 sm_info.alternate_next = shared_region->alternate_next;
2463 sm_info.flags = shared_region->flags;
2464 sm_info.self = (vm_offset_t)shared_region;
2465
de355530
A
2466 lsf_remove_regions_mappings(shared_region, &sm_info);
2467 pmap_remove(((vm_named_entry_t)
9bccf70c
A
2468 (shared_region->text_region->ip_kobject))
2469 ->backing.map->pmap,
2470 sm_info.client_base,
2471 sm_info.client_base + sm_info.text_size);
2472 ipc_port_release_send(shared_region->text_region);
de355530 2473 ipc_port_release_send(shared_region->data_region);
9bccf70c
A
2474 if (shared_region->object_chain) {
2475 next = shared_region->object_chain->object_chain_region;
2476 kfree((vm_offset_t)shared_region->object_chain,
2477 sizeof (struct shared_region_object_chain));
2478 } else {
2479 next = NULL;
2480 }
2481 shared_region_mapping_unlock(shared_region);
2482 kfree((vm_offset_t)shared_region,
1c79356b 2483 sizeof (struct shared_region_mapping));
9bccf70c
A
2484 shared_region = next;
2485 } else {
2486 break;
2487 }
1c79356b 2488 }
1c79356b
A
2489 return KERN_SUCCESS;
2490}
2491
de355530 2492vm_offset_t
1c79356b
A
2493vm_map_get_phys_page(
2494 vm_map_t map,
2495 vm_offset_t offset)
2496{
2497 vm_map_entry_t entry;
2498 int ops;
2499 int flags;
de355530 2500 vm_offset_t phys_addr = 0;
1c79356b
A
2501 vm_object_t object;
2502
2503 vm_map_lock(map);
2504 while (vm_map_lookup_entry(map, offset, &entry)) {
2505
2506 if (entry->object.vm_object == VM_OBJECT_NULL) {
2507 vm_map_unlock(map);
2508 return (vm_offset_t) 0;
2509 }
2510 if (entry->is_sub_map) {
2511 vm_map_t old_map;
2512 vm_map_lock(entry->object.sub_map);
2513 old_map = map;
2514 map = entry->object.sub_map;
2515 offset = entry->offset + (offset - entry->vme_start);
2516 vm_map_unlock(old_map);
2517 continue;
2518 }
9bccf70c
A
2519 if (entry->object.vm_object->phys_contiguous) {
2520 /* These are not standard pageable memory mappings */
2521 /* If they are not present in the object they will */
2522 /* have to be picked up from the pager through the */
2523 /* fault mechanism. */
2524 if(entry->object.vm_object->shadow_offset == 0) {
2525 /* need to call vm_fault */
2526 vm_map_unlock(map);
2527 vm_fault(map, offset, VM_PROT_NONE,
2528 FALSE, THREAD_UNINT, NULL, 0);
2529 vm_map_lock(map);
2530 continue;
2531 }
2532 offset = entry->offset + (offset - entry->vme_start);
de355530 2533 phys_addr = entry->object.vm_object->shadow_offset + offset;
9bccf70c
A
2534 break;
2535
2536 }
1c79356b
A
2537 offset = entry->offset + (offset - entry->vme_start);
2538 object = entry->object.vm_object;
2539 vm_object_lock(object);
2540 while (TRUE) {
2541 vm_page_t dst_page = vm_page_lookup(object,offset);
2542 if(dst_page == VM_PAGE_NULL) {
2543 if(object->shadow) {
2544 vm_object_t old_object;
2545 vm_object_lock(object->shadow);
2546 old_object = object;
2547 offset = offset + object->shadow_offset;
2548 object = object->shadow;
2549 vm_object_unlock(old_object);
2550 } else {
2551 vm_object_unlock(object);
2552 break;
2553 }
2554 } else {
de355530 2555 phys_addr = dst_page->phys_addr;
1c79356b
A
2556 vm_object_unlock(object);
2557 break;
2558 }
2559 }
2560 break;
2561
2562 }
2563
2564 vm_map_unlock(map);
de355530 2565 return phys_addr;
1c79356b
A
2566}
2567#endif /* VM_CPM */