]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_user.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: vm/vm_user.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 *
59 * User-exported virtual memory functions.
60 */
61
62 #include <vm_cpm.h>
63 #include <mach/boolean.h>
64 #include <mach/kern_return.h>
65 #include <mach/mach_types.h> /* to get vm_address_t */
66 #include <mach/memory_object.h>
67 #include <mach/std_types.h> /* to get pointer_t */
68 #include <mach/vm_attributes.h>
69 #include <mach/vm_param.h>
70 #include <mach/vm_statistics.h>
71 #include <mach/vm_map_server.h>
72 #include <mach/mach_syscalls.h>
73
74 #include <mach/shared_memory_server.h>
75 #include <vm/vm_shared_memory_server.h>
76
77 #include <kern/host.h>
78 #include <kern/task.h>
79 #include <kern/misc_protos.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/memory_object.h>
84 #include <vm/vm_pageout.h>
85
86
87
88 vm_size_t upl_offset_to_pagelist = 0;
89
90 #if VM_CPM
91 #include <vm/cpm.h>
92 #endif /* VM_CPM */
93
94 ipc_port_t dynamic_pager_control_port=NULL;
95
96 /*
97 * vm_allocate allocates "zero fill" memory in the specfied
98 * map.
99 */
100 kern_return_t
101 vm_allocate(
102 register vm_map_t map,
103 register vm_offset_t *addr,
104 register vm_size_t size,
105 int flags)
106 {
107 kern_return_t result;
108 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
109
110 if (map == VM_MAP_NULL)
111 return(KERN_INVALID_ARGUMENT);
112 if (size == 0) {
113 *addr = 0;
114 return(KERN_SUCCESS);
115 }
116
117 if (anywhere)
118 *addr = vm_map_min(map);
119 else
120 *addr = trunc_page_32(*addr);
121 size = round_page_32(size);
122 if (size == 0) {
123 return(KERN_INVALID_ARGUMENT);
124 }
125
126 result = vm_map_enter(
127 map,
128 addr,
129 size,
130 (vm_offset_t)0,
131 flags,
132 VM_OBJECT_NULL,
133 (vm_object_offset_t)0,
134 FALSE,
135 VM_PROT_DEFAULT,
136 VM_PROT_ALL,
137 VM_INHERIT_DEFAULT);
138
139 return(result);
140 }
141
142 /*
143 * vm_deallocate deallocates the specified range of addresses in the
144 * specified address map.
145 */
146 kern_return_t
147 vm_deallocate(
148 register vm_map_t map,
149 vm_offset_t start,
150 vm_size_t size)
151 {
152 if (map == VM_MAP_NULL)
153 return(KERN_INVALID_ARGUMENT);
154
155 if (size == (vm_offset_t) 0)
156 return(KERN_SUCCESS);
157
158 return(vm_map_remove(map, trunc_page_32(start),
159 round_page_32(start+size), VM_MAP_NO_FLAGS));
160 }
161
162 /*
163 * vm_inherit sets the inheritance of the specified range in the
164 * specified map.
165 */
166 kern_return_t
167 vm_inherit(
168 register vm_map_t map,
169 vm_offset_t start,
170 vm_size_t size,
171 vm_inherit_t new_inheritance)
172 {
173 if (map == VM_MAP_NULL)
174 return(KERN_INVALID_ARGUMENT);
175
176 if (new_inheritance > VM_INHERIT_LAST_VALID)
177 return(KERN_INVALID_ARGUMENT);
178
179 return(vm_map_inherit(map,
180 trunc_page_32(start),
181 round_page_32(start+size),
182 new_inheritance));
183 }
184
185 /*
186 * vm_protect sets the protection of the specified range in the
187 * specified map.
188 */
189
190 kern_return_t
191 vm_protect(
192 register vm_map_t map,
193 vm_offset_t start,
194 vm_size_t size,
195 boolean_t set_maximum,
196 vm_prot_t new_protection)
197 {
198 if ((map == VM_MAP_NULL) ||
199 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
200 return(KERN_INVALID_ARGUMENT);
201
202 return(vm_map_protect(map,
203 trunc_page_32(start),
204 round_page_32(start+size),
205 new_protection,
206 set_maximum));
207 }
208
209 /*
210 * Handle machine-specific attributes for a mapping, such
211 * as cachability, migrability, etc.
212 */
213 kern_return_t
214 vm_machine_attribute(
215 vm_map_t map,
216 vm_address_t address,
217 vm_size_t size,
218 vm_machine_attribute_t attribute,
219 vm_machine_attribute_val_t* value) /* IN/OUT */
220 {
221 if (map == VM_MAP_NULL)
222 return(KERN_INVALID_ARGUMENT);
223
224 return vm_map_machine_attribute(map, address, size, attribute, value);
225 }
226
227 kern_return_t
228 vm_read(
229 vm_map_t map,
230 vm_address_t address,
231 vm_size_t size,
232 pointer_t *data,
233 mach_msg_type_number_t *data_size)
234 {
235 kern_return_t error;
236 vm_map_copy_t ipc_address;
237
238 if (map == VM_MAP_NULL)
239 return(KERN_INVALID_ARGUMENT);
240
241 if ((error = vm_map_copyin(map,
242 address,
243 size,
244 FALSE, /* src_destroy */
245 &ipc_address)) == KERN_SUCCESS) {
246 *data = (pointer_t) ipc_address;
247 *data_size = size;
248 }
249 return(error);
250 }
251
252 kern_return_t
253 vm_read_list(
254 vm_map_t map,
255 vm_read_entry_t data_list,
256 mach_msg_type_number_t count)
257 {
258 mach_msg_type_number_t i;
259 kern_return_t error;
260 vm_map_copy_t ipc_address;
261
262 if (map == VM_MAP_NULL)
263 return(KERN_INVALID_ARGUMENT);
264
265 for(i=0; i<count; i++) {
266 error = vm_map_copyin(map,
267 data_list[i].address,
268 data_list[i].size,
269 FALSE, /* src_destroy */
270 &ipc_address);
271 if(error != KERN_SUCCESS) {
272 data_list[i].address = (vm_address_t)0;
273 data_list[i].size = (vm_size_t)0;
274 break;
275 }
276 if(data_list[i].size != 0) {
277 error = vm_map_copyout(current_task()->map,
278 &(data_list[i].address),
279 (vm_map_copy_t) ipc_address);
280 if(error != KERN_SUCCESS) {
281 data_list[i].address = (vm_address_t)0;
282 data_list[i].size = (vm_size_t)0;
283 break;
284 }
285 }
286 }
287 return(error);
288 }
289
290 /*
291 * This routine reads from the specified map and overwrites part of the current
292 * activation's map. In making an assumption that the current thread is local,
293 * it is no longer cluster-safe without a fully supportive local proxy thread/
294 * task (but we don't support cluster's anymore so this is moot).
295 */
296
297 #define VM_OVERWRITE_SMALL 512
298
299 kern_return_t
300 vm_read_overwrite(
301 vm_map_t map,
302 vm_address_t address,
303 vm_size_t size,
304 vm_address_t data,
305 vm_size_t *data_size)
306 {
307 struct {
308 long align;
309 char buf[VM_OVERWRITE_SMALL];
310 } inbuf;
311 vm_map_t oldmap;
312 kern_return_t error = KERN_SUCCESS;
313 vm_map_copy_t copy;
314
315 if (map == VM_MAP_NULL)
316 return(KERN_INVALID_ARGUMENT);
317
318 if (size <= VM_OVERWRITE_SMALL) {
319 if(vm_map_read_user(map, (vm_offset_t)address,
320 (vm_offset_t)&inbuf, size)) {
321 error = KERN_INVALID_ADDRESS;
322 } else {
323 if(vm_map_write_user(current_map(),
324 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
325 error = KERN_INVALID_ADDRESS;
326 }
327 }
328 else {
329 if ((error = vm_map_copyin(map,
330 address,
331 size,
332 FALSE, /* src_destroy */
333 &copy)) == KERN_SUCCESS) {
334 if ((error = vm_map_copy_overwrite(
335 current_act()->map,
336 data,
337 copy,
338 FALSE)) == KERN_SUCCESS) {
339 }
340 else {
341 vm_map_copy_discard(copy);
342 }
343 }
344 }
345 *data_size = size;
346 return(error);
347 }
348
349
350
351
352 /*ARGSUSED*/
353 kern_return_t
354 vm_write(
355 vm_map_t map,
356 vm_address_t address,
357 vm_offset_t data,
358 mach_msg_type_number_t size)
359 {
360 if (map == VM_MAP_NULL)
361 return KERN_INVALID_ARGUMENT;
362
363 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
364 FALSE /* interruptible XXX */);
365 }
366
367 kern_return_t
368 vm_copy(
369 vm_map_t map,
370 vm_address_t source_address,
371 vm_size_t size,
372 vm_address_t dest_address)
373 {
374 vm_map_copy_t copy;
375 kern_return_t kr;
376
377 if (map == VM_MAP_NULL)
378 return KERN_INVALID_ARGUMENT;
379
380 kr = vm_map_copyin(map, source_address, size,
381 FALSE, &copy);
382 if (kr != KERN_SUCCESS)
383 return kr;
384
385 kr = vm_map_copy_overwrite(map, dest_address, copy,
386 FALSE /* interruptible XXX */);
387 if (kr != KERN_SUCCESS) {
388 vm_map_copy_discard(copy);
389 return kr;
390 }
391
392 return KERN_SUCCESS;
393 }
394
395 /*
396 * Routine: vm_map
397 */
398 kern_return_t
399 vm_map_64(
400 vm_map_t target_map,
401 vm_offset_t *address,
402 vm_size_t initial_size,
403 vm_offset_t mask,
404 int flags,
405 ipc_port_t port,
406 vm_object_offset_t offset,
407 boolean_t copy,
408 vm_prot_t cur_protection,
409 vm_prot_t max_protection,
410 vm_inherit_t inheritance)
411 {
412 register
413 vm_object_t object;
414 vm_prot_t prot;
415 vm_object_size_t size = (vm_object_size_t)initial_size;
416 kern_return_t result;
417
418 /*
419 * Check arguments for validity
420 */
421 if ((target_map == VM_MAP_NULL) ||
422 (cur_protection & ~VM_PROT_ALL) ||
423 (max_protection & ~VM_PROT_ALL) ||
424 (inheritance > VM_INHERIT_LAST_VALID) ||
425 size == 0)
426 return(KERN_INVALID_ARGUMENT);
427
428 /*
429 * Find the vm object (if any) corresponding to this port.
430 */
431 if (!IP_VALID(port)) {
432 object = VM_OBJECT_NULL;
433 offset = 0;
434 copy = FALSE;
435 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
436 vm_named_entry_t named_entry;
437
438 named_entry = (vm_named_entry_t)port->ip_kobject;
439 /* a few checks to make sure user is obeying rules */
440 if(size == 0) {
441 if(offset >= named_entry->size)
442 return(KERN_INVALID_RIGHT);
443 size = named_entry->size - offset;
444 }
445 if((named_entry->protection & max_protection) != max_protection)
446 return(KERN_INVALID_RIGHT);
447 if((named_entry->protection & cur_protection) != cur_protection)
448 return(KERN_INVALID_RIGHT);
449 if(named_entry->size < (offset + size))
450 return(KERN_INVALID_ARGUMENT);
451
452 /* the callers parameter offset is defined to be the */
453 /* offset from beginning of named entry offset in object */
454 offset = offset + named_entry->offset;
455
456 named_entry_lock(named_entry);
457 if(named_entry->is_sub_map) {
458 vm_map_entry_t map_entry;
459
460 named_entry_unlock(named_entry);
461 *address = trunc_page_32(*address);
462 size = round_page_64(size);
463 vm_object_reference(vm_submap_object);
464 if ((result = vm_map_enter(target_map,
465 address, size, mask, flags,
466 vm_submap_object, 0,
467 FALSE,
468 cur_protection, max_protection, inheritance
469 )) != KERN_SUCCESS) {
470 vm_object_deallocate(vm_submap_object);
471 } else {
472 char alias;
473
474 VM_GET_FLAGS_ALIAS(flags, alias);
475 if ((alias == VM_MEMORY_SHARED_PMAP) &&
476 !copy) {
477 vm_map_submap(target_map, *address,
478 (*address) + size,
479 named_entry->backing.map,
480 (vm_offset_t)offset, TRUE);
481 } else {
482 vm_map_submap(target_map, *address,
483 (*address) + size,
484 named_entry->backing.map,
485 (vm_offset_t)offset, FALSE);
486 }
487 if(copy) {
488 if(vm_map_lookup_entry(
489 target_map, *address, &map_entry)) {
490 map_entry->needs_copy = TRUE;
491 }
492 }
493 }
494 return(result);
495
496 } else if(named_entry->object) {
497 /* This is the case where we are going to map */
498 /* an already mapped object. If the object is */
499 /* not ready it is internal. An external */
500 /* object cannot be mapped until it is ready */
501 /* we can therefore avoid the ready check */
502 /* in this case. */
503 named_entry_unlock(named_entry);
504 vm_object_reference(named_entry->object);
505 object = named_entry->object;
506 } else {
507 unsigned int access;
508 vm_prot_t protections;
509 unsigned int wimg_mode;
510 boolean_t cache_attr;
511
512 protections = named_entry->protection
513 & VM_PROT_ALL;
514 access = GET_MAP_MEM(named_entry->protection);
515
516 object = vm_object_enter(
517 named_entry->backing.pager,
518 named_entry->size,
519 named_entry->internal,
520 FALSE,
521 FALSE);
522 if (object == VM_OBJECT_NULL) {
523 named_entry_unlock(named_entry);
524 return(KERN_INVALID_OBJECT);
525 }
526
527 vm_object_lock(object);
528
529 /* create an extra ref for the named entry */
530 vm_object_reference_locked(object);
531 named_entry->object = object;
532 named_entry_unlock(named_entry);
533
534 wimg_mode = object->wimg_bits;
535 if(access == MAP_MEM_IO) {
536 wimg_mode = VM_WIMG_IO;
537 } else if (access == MAP_MEM_COPYBACK) {
538 wimg_mode = VM_WIMG_USE_DEFAULT;
539 } else if (access == MAP_MEM_WTHRU) {
540 wimg_mode = VM_WIMG_WTHRU;
541 } else if (access == MAP_MEM_WCOMB) {
542 wimg_mode = VM_WIMG_WCOMB;
543 }
544 if ((wimg_mode == VM_WIMG_IO)
545 || (wimg_mode == VM_WIMG_WCOMB))
546 cache_attr = TRUE;
547 else
548 cache_attr = FALSE;
549
550 if (named_entry->backing.pager) {
551 /* wait for object (if any) to be ready */
552 while (!object->pager_ready) {
553 vm_object_wait(object,
554 VM_OBJECT_EVENT_PAGER_READY,
555 THREAD_UNINT);
556 vm_object_lock(object);
557 }
558 }
559 if(object->wimg_bits != wimg_mode) {
560 vm_page_t p;
561
562 vm_object_paging_wait(object, THREAD_UNINT);
563
564 object->wimg_bits = wimg_mode;
565 queue_iterate(&object->memq, p, vm_page_t, listq) {
566 if (!p->fictitious) {
567 pmap_page_protect(
568 p->phys_page,
569 VM_PROT_NONE);
570 if(cache_attr)
571 pmap_sync_caches_phys(
572 p->phys_page);
573 }
574 }
575 }
576 object->true_share = TRUE;
577 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
578 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
579 vm_object_unlock(object);
580 }
581 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
582 /*
583 * JMM - This is temporary until we unify named entries
584 * and raw memory objects.
585 *
586 * Detected fake ip_kotype for a memory object. In
587 * this case, the port isn't really a port at all, but
588 * instead is just a raw memory object.
589 */
590
591 if ((object = vm_object_enter((memory_object_t)port,
592 size, FALSE, FALSE, FALSE))
593 == VM_OBJECT_NULL)
594 return(KERN_INVALID_OBJECT);
595
596 /* wait for object (if any) to be ready */
597 if (object != VM_OBJECT_NULL) {
598 if(object == kernel_object) {
599 printf("Warning: Attempt to map kernel object"
600 " by a non-private kernel entity\n");
601 return(KERN_INVALID_OBJECT);
602 }
603 vm_object_lock(object);
604 while (!object->pager_ready) {
605 vm_object_wait(object,
606 VM_OBJECT_EVENT_PAGER_READY,
607 THREAD_UNINT);
608 vm_object_lock(object);
609 }
610 vm_object_unlock(object);
611 }
612 } else {
613 return (KERN_INVALID_OBJECT);
614 }
615
616 *address = trunc_page_32(*address);
617 size = round_page_64(size);
618
619 /*
620 * Perform the copy if requested
621 */
622
623 if (copy) {
624 vm_object_t new_object;
625 vm_object_offset_t new_offset;
626
627 result = vm_object_copy_strategically(object, offset, size,
628 &new_object, &new_offset,
629 &copy);
630
631
632 if (result == KERN_MEMORY_RESTART_COPY) {
633 boolean_t success;
634 boolean_t src_needs_copy;
635
636 /*
637 * XXX
638 * We currently ignore src_needs_copy.
639 * This really is the issue of how to make
640 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
641 * non-kernel users to use. Solution forthcoming.
642 * In the meantime, since we don't allow non-kernel
643 * memory managers to specify symmetric copy,
644 * we won't run into problems here.
645 */
646 new_object = object;
647 new_offset = offset;
648 success = vm_object_copy_quickly(&new_object,
649 new_offset, size,
650 &src_needs_copy,
651 &copy);
652 assert(success);
653 result = KERN_SUCCESS;
654 }
655 /*
656 * Throw away the reference to the
657 * original object, as it won't be mapped.
658 */
659
660 vm_object_deallocate(object);
661
662 if (result != KERN_SUCCESS)
663 return (result);
664
665 object = new_object;
666 offset = new_offset;
667 }
668
669 if ((result = vm_map_enter(target_map,
670 address, size, mask, flags,
671 object, offset,
672 copy,
673 cur_protection, max_protection, inheritance
674 )) != KERN_SUCCESS)
675 vm_object_deallocate(object);
676 return(result);
677 }
678
679 /* temporary, until world build */
680 kern_return_t
681 vm_map(
682 vm_map_t target_map,
683 vm_offset_t *address,
684 vm_size_t size,
685 vm_offset_t mask,
686 int flags,
687 ipc_port_t port,
688 vm_offset_t offset,
689 boolean_t copy,
690 vm_prot_t cur_protection,
691 vm_prot_t max_protection,
692 vm_inherit_t inheritance)
693 {
694 return vm_map_64(target_map, address, size, mask, flags,
695 port, (vm_object_offset_t)offset, copy,
696 cur_protection, max_protection, inheritance);
697 }
698
699
700 /*
701 * NOTE: this routine (and this file) will no longer require mach_host_server.h
702 * when vm_wire is changed to use ledgers.
703 */
704 #include <mach/mach_host_server.h>
705 /*
706 * Specify that the range of the virtual address space
707 * of the target task must not cause page faults for
708 * the indicated accesses.
709 *
710 * [ To unwire the pages, specify VM_PROT_NONE. ]
711 */
712 kern_return_t
713 vm_wire(
714 host_priv_t host_priv,
715 register vm_map_t map,
716 vm_offset_t start,
717 vm_size_t size,
718 vm_prot_t access)
719 {
720 kern_return_t rc;
721
722 if (host_priv == HOST_PRIV_NULL)
723 return KERN_INVALID_HOST;
724
725 assert(host_priv == &realhost);
726
727 if (map == VM_MAP_NULL)
728 return KERN_INVALID_TASK;
729
730 if (access & ~VM_PROT_ALL)
731 return KERN_INVALID_ARGUMENT;
732
733 if (access != VM_PROT_NONE) {
734 rc = vm_map_wire(map, trunc_page_32(start),
735 round_page_32(start+size), access, TRUE);
736 } else {
737 rc = vm_map_unwire(map, trunc_page_32(start),
738 round_page_32(start+size), TRUE);
739 }
740 return rc;
741 }
742
743 /*
744 * vm_msync
745 *
746 * Synchronises the memory range specified with its backing store
747 * image by either flushing or cleaning the contents to the appropriate
748 * memory manager engaging in a memory object synchronize dialog with
749 * the manager. The client doesn't return until the manager issues
750 * m_o_s_completed message. MIG Magically converts user task parameter
751 * to the task's address map.
752 *
753 * interpretation of sync_flags
754 * VM_SYNC_INVALIDATE - discard pages, only return precious
755 * pages to manager.
756 *
757 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
758 * - discard pages, write dirty or precious
759 * pages back to memory manager.
760 *
761 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
762 * - write dirty or precious pages back to
763 * the memory manager.
764 *
765 * NOTE
766 * The memory object attributes have not yet been implemented, this
767 * function will have to deal with the invalidate attribute
768 *
769 * RETURNS
770 * KERN_INVALID_TASK Bad task parameter
771 * KERN_INVALID_ARGUMENT both sync and async were specified.
772 * KERN_SUCCESS The usual.
773 */
774
775 kern_return_t
776 vm_msync(
777 vm_map_t map,
778 vm_address_t address,
779 vm_size_t size,
780 vm_sync_t sync_flags)
781 {
782 msync_req_t msr;
783 msync_req_t new_msr;
784 queue_chain_t req_q; /* queue of requests for this msync */
785 vm_map_entry_t entry;
786 vm_size_t amount_left;
787 vm_object_offset_t offset;
788 boolean_t do_sync_req;
789 boolean_t modifiable;
790
791
792 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
793 (sync_flags & VM_SYNC_SYNCHRONOUS))
794 return(KERN_INVALID_ARGUMENT);
795
796 /*
797 * align address and size on page boundaries
798 */
799 size = round_page_32(address + size) - trunc_page_32(address);
800 address = trunc_page_32(address);
801
802 if (map == VM_MAP_NULL)
803 return(KERN_INVALID_TASK);
804
805 if (size == 0)
806 return(KERN_SUCCESS);
807
808 queue_init(&req_q);
809 amount_left = size;
810
811 while (amount_left > 0) {
812 vm_size_t flush_size;
813 vm_object_t object;
814
815 vm_map_lock(map);
816 if (!vm_map_lookup_entry(map, address, &entry)) {
817 vm_size_t skip;
818
819 /*
820 * hole in the address map.
821 */
822
823 /*
824 * Check for empty map.
825 */
826 if (entry == vm_map_to_entry(map) &&
827 entry->vme_next == entry) {
828 vm_map_unlock(map);
829 break;
830 }
831 /*
832 * Check that we don't wrap and that
833 * we have at least one real map entry.
834 */
835 if ((map->hdr.nentries == 0) ||
836 (entry->vme_next->vme_start < address)) {
837 vm_map_unlock(map);
838 break;
839 }
840 /*
841 * Move up to the next entry if needed
842 */
843 skip = (entry->vme_next->vme_start - address);
844 if (skip >= amount_left)
845 amount_left = 0;
846 else
847 amount_left -= skip;
848 address = entry->vme_next->vme_start;
849 vm_map_unlock(map);
850 continue;
851 }
852
853 offset = address - entry->vme_start;
854
855 /*
856 * do we have more to flush than is contained in this
857 * entry ?
858 */
859 if (amount_left + entry->vme_start + offset > entry->vme_end) {
860 flush_size = entry->vme_end -
861 (entry->vme_start + offset);
862 } else {
863 flush_size = amount_left;
864 }
865 amount_left -= flush_size;
866 address += flush_size;
867
868 if (entry->is_sub_map == TRUE) {
869 vm_map_t local_map;
870 vm_offset_t local_offset;
871
872 local_map = entry->object.sub_map;
873 local_offset = entry->offset;
874 vm_map_unlock(map);
875 vm_msync(
876 local_map,
877 local_offset,
878 flush_size,
879 sync_flags);
880 continue;
881 }
882 object = entry->object.vm_object;
883
884 /*
885 * We can't sync this object if the object has not been
886 * created yet
887 */
888 if (object == VM_OBJECT_NULL) {
889 vm_map_unlock(map);
890 continue;
891 }
892 offset += entry->offset;
893 modifiable = (entry->protection & VM_PROT_WRITE)
894 != VM_PROT_NONE;
895
896 vm_object_lock(object);
897
898 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
899 boolean_t kill_pages = 0;
900
901 if (sync_flags & VM_SYNC_KILLPAGES) {
902 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
903 kill_pages = 1;
904 else
905 kill_pages = -1;
906 }
907 if (kill_pages != -1)
908 vm_object_deactivate_pages(object, offset,
909 (vm_object_size_t)flush_size, kill_pages);
910 vm_object_unlock(object);
911 vm_map_unlock(map);
912 continue;
913 }
914 /*
915 * We can't sync this object if there isn't a pager.
916 * Don't bother to sync internal objects, since there can't
917 * be any "permanent" storage for these objects anyway.
918 */
919 if ((object->pager == MEMORY_OBJECT_NULL) ||
920 (object->internal) || (object->private)) {
921 vm_object_unlock(object);
922 vm_map_unlock(map);
923 continue;
924 }
925 /*
926 * keep reference on the object until syncing is done
927 */
928 assert(object->ref_count > 0);
929 object->ref_count++;
930 vm_object_res_reference(object);
931 vm_object_unlock(object);
932
933 vm_map_unlock(map);
934
935 do_sync_req = vm_object_sync(object,
936 offset,
937 flush_size,
938 sync_flags & VM_SYNC_INVALIDATE,
939 (modifiable &&
940 (sync_flags & VM_SYNC_SYNCHRONOUS ||
941 sync_flags & VM_SYNC_ASYNCHRONOUS)));
942
943 /*
944 * only send a m_o_s if we returned pages or if the entry
945 * is writable (ie dirty pages may have already been sent back)
946 */
947 if (!do_sync_req && !modifiable) {
948 vm_object_deallocate(object);
949 continue;
950 }
951 msync_req_alloc(new_msr);
952
953 vm_object_lock(object);
954 offset += object->paging_offset;
955
956 new_msr->offset = offset;
957 new_msr->length = flush_size;
958 new_msr->object = object;
959 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
960 re_iterate:
961 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
962 /*
963 * need to check for overlapping entry, if found, wait
964 * on overlapping msr to be done, then reiterate
965 */
966 msr_lock(msr);
967 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
968 ((offset >= msr->offset &&
969 offset < (msr->offset + msr->length)) ||
970 (msr->offset >= offset &&
971 msr->offset < (offset + flush_size))))
972 {
973 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
974 msr_unlock(msr);
975 vm_object_unlock(object);
976 thread_block((void (*)(void))0);
977 vm_object_lock(object);
978 goto re_iterate;
979 }
980 msr_unlock(msr);
981 }/* queue_iterate */
982
983 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
984 vm_object_unlock(object);
985
986 queue_enter(&req_q, new_msr, msync_req_t, req_q);
987
988 (void) memory_object_synchronize(
989 object->pager,
990 offset,
991 flush_size,
992 sync_flags);
993 }/* while */
994
995 /*
996 * wait for memory_object_sychronize_completed messages from pager(s)
997 */
998
999 while (!queue_empty(&req_q)) {
1000 msr = (msync_req_t)queue_first(&req_q);
1001 msr_lock(msr);
1002 while(msr->flag != VM_MSYNC_DONE) {
1003 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
1004 msr_unlock(msr);
1005 thread_block((void (*)(void))0);
1006 msr_lock(msr);
1007 }/* while */
1008 queue_remove(&req_q, msr, msync_req_t, req_q);
1009 msr_unlock(msr);
1010 vm_object_deallocate(msr->object);
1011 msync_req_free(msr);
1012 }/* queue_iterate */
1013
1014 return(KERN_SUCCESS);
1015 }/* vm_msync */
1016
1017
1018 /*
1019 * task_wire
1020 *
1021 * Set or clear the map's wiring_required flag. This flag, if set,
1022 * will cause all future virtual memory allocation to allocate
1023 * user wired memory. Unwiring pages wired down as a result of
1024 * this routine is done with the vm_wire interface.
1025 */
1026 kern_return_t
1027 task_wire(
1028 vm_map_t map,
1029 boolean_t must_wire)
1030 {
1031 if (map == VM_MAP_NULL)
1032 return(KERN_INVALID_ARGUMENT);
1033
1034 if (must_wire)
1035 map->wiring_required = TRUE;
1036 else
1037 map->wiring_required = FALSE;
1038
1039 return(KERN_SUCCESS);
1040 }
1041
1042 /*
1043 * vm_behavior_set sets the paging behavior attribute for the
1044 * specified range in the specified map. This routine will fail
1045 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
1046 * is not a valid allocated or reserved memory region.
1047 */
1048 kern_return_t
1049 vm_behavior_set(
1050 vm_map_t map,
1051 vm_offset_t start,
1052 vm_size_t size,
1053 vm_behavior_t new_behavior)
1054 {
1055 if (map == VM_MAP_NULL)
1056 return(KERN_INVALID_ARGUMENT);
1057
1058 return(vm_map_behavior_set(map, trunc_page_32(start),
1059 round_page_32(start+size), new_behavior));
1060 }
1061
1062 #if VM_CPM
1063 /*
1064 * Control whether the kernel will permit use of
1065 * vm_allocate_cpm at all.
1066 */
1067 unsigned int vm_allocate_cpm_enabled = 1;
1068
1069 /*
1070 * Ordinarily, the right to allocate CPM is restricted
1071 * to privileged applications (those that can gain access
1072 * to the host port). Set this variable to zero if you
1073 * want to let any application allocate CPM.
1074 */
1075 unsigned int vm_allocate_cpm_privileged = 0;
1076
1077 /*
1078 * Allocate memory in the specified map, with the caveat that
1079 * the memory is physically contiguous. This call may fail
1080 * if the system can't find sufficient contiguous memory.
1081 * This call may cause or lead to heart-stopping amounts of
1082 * paging activity.
1083 *
1084 * Memory obtained from this call should be freed in the
1085 * normal way, viz., via vm_deallocate.
1086 */
1087 kern_return_t
1088 vm_allocate_cpm(
1089 host_priv_t host_priv,
1090 register vm_map_t map,
1091 register vm_offset_t *addr,
1092 register vm_size_t size,
1093 int flags)
1094 {
1095 vm_object_t cpm_obj;
1096 pmap_t pmap;
1097 vm_page_t m, pages;
1098 kern_return_t kr;
1099 vm_offset_t va, start, end, offset;
1100 #if MACH_ASSERT
1101 extern vm_offset_t avail_start, avail_end;
1102 vm_offset_t prev_addr;
1103 #endif /* MACH_ASSERT */
1104
1105 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1106
1107 if (!vm_allocate_cpm_enabled)
1108 return KERN_FAILURE;
1109
1110 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1111 return KERN_INVALID_HOST;
1112
1113 if (map == VM_MAP_NULL)
1114 return KERN_INVALID_ARGUMENT;
1115
1116 assert(host_priv == &realhost);
1117
1118 if (size == 0) {
1119 *addr = 0;
1120 return KERN_SUCCESS;
1121 }
1122
1123 if (anywhere)
1124 *addr = vm_map_min(map);
1125 else
1126 *addr = trunc_page_32(*addr);
1127 size = round_page_32(size);
1128
1129 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1130 return kr;
1131
1132 cpm_obj = vm_object_allocate(size);
1133 assert(cpm_obj != VM_OBJECT_NULL);
1134 assert(cpm_obj->internal);
1135 assert(cpm_obj->size == size);
1136 assert(cpm_obj->can_persist == FALSE);
1137 assert(cpm_obj->pager_created == FALSE);
1138 assert(cpm_obj->pageout == FALSE);
1139 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1140
1141 /*
1142 * Insert pages into object.
1143 */
1144
1145 vm_object_lock(cpm_obj);
1146 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1147 m = pages;
1148 pages = NEXT_PAGE(m);
1149
1150 assert(!m->gobbled);
1151 assert(!m->wanted);
1152 assert(!m->pageout);
1153 assert(!m->tabled);
1154 assert(m->busy);
1155 assert(m->phys_page>=avail_start && m->phys_page<=avail_end);
1156
1157 m->busy = FALSE;
1158 vm_page_insert(m, cpm_obj, offset);
1159 }
1160 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1161 vm_object_unlock(cpm_obj);
1162
1163 /*
1164 * Hang onto a reference on the object in case a
1165 * multi-threaded application for some reason decides
1166 * to deallocate the portion of the address space into
1167 * which we will insert this object.
1168 *
1169 * Unfortunately, we must insert the object now before
1170 * we can talk to the pmap module about which addresses
1171 * must be wired down. Hence, the race with a multi-
1172 * threaded app.
1173 */
1174 vm_object_reference(cpm_obj);
1175
1176 /*
1177 * Insert object into map.
1178 */
1179
1180 kr = vm_map_enter(
1181 map,
1182 addr,
1183 size,
1184 (vm_offset_t)0,
1185 flags,
1186 cpm_obj,
1187 (vm_object_offset_t)0,
1188 FALSE,
1189 VM_PROT_ALL,
1190 VM_PROT_ALL,
1191 VM_INHERIT_DEFAULT);
1192
1193 if (kr != KERN_SUCCESS) {
1194 /*
1195 * A CPM object doesn't have can_persist set,
1196 * so all we have to do is deallocate it to
1197 * free up these pages.
1198 */
1199 assert(cpm_obj->pager_created == FALSE);
1200 assert(cpm_obj->can_persist == FALSE);
1201 assert(cpm_obj->pageout == FALSE);
1202 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1203 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1204 vm_object_deallocate(cpm_obj); /* kill creation ref */
1205 }
1206
1207 /*
1208 * Inform the physical mapping system that the
1209 * range of addresses may not fault, so that
1210 * page tables and such can be locked down as well.
1211 */
1212 start = *addr;
1213 end = start + size;
1214 pmap = vm_map_pmap(map);
1215 pmap_pageable(pmap, start, end, FALSE);
1216
1217 /*
1218 * Enter each page into the pmap, to avoid faults.
1219 * Note that this loop could be coded more efficiently,
1220 * if the need arose, rather than looking up each page
1221 * again.
1222 */
1223 for (offset = 0, va = start; offset < size;
1224 va += PAGE_SIZE, offset += PAGE_SIZE) {
1225 vm_object_lock(cpm_obj);
1226 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1227 vm_object_unlock(cpm_obj);
1228 assert(m != VM_PAGE_NULL);
1229 PMAP_ENTER(pmap, va, m, VM_PROT_ALL,
1230 ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK,
1231 TRUE);
1232 }
1233
1234 #if MACH_ASSERT
1235 /*
1236 * Verify ordering in address space.
1237 */
1238 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1239 vm_object_lock(cpm_obj);
1240 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1241 vm_object_unlock(cpm_obj);
1242 if (m == VM_PAGE_NULL)
1243 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1244 cpm_obj, offset);
1245 assert(m->tabled);
1246 assert(!m->busy);
1247 assert(!m->wanted);
1248 assert(!m->fictitious);
1249 assert(!m->private);
1250 assert(!m->absent);
1251 assert(!m->error);
1252 assert(!m->cleaning);
1253 assert(!m->precious);
1254 assert(!m->clustered);
1255 if (offset != 0) {
1256 if (m->phys_page != prev_addr + 1) {
1257 printf("start 0x%x end 0x%x va 0x%x\n",
1258 start, end, va);
1259 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1260 printf("m 0x%x prev_address 0x%x\n", m,
1261 prev_addr);
1262 panic("vm_allocate_cpm: pages not contig!");
1263 }
1264 }
1265 prev_addr = m->phys_page;
1266 }
1267 #endif /* MACH_ASSERT */
1268
1269 vm_object_deallocate(cpm_obj); /* kill extra ref */
1270
1271 return kr;
1272 }
1273
1274
1275 #else /* VM_CPM */
1276
1277 /*
1278 * Interface is defined in all cases, but unless the kernel
1279 * is built explicitly for this option, the interface does
1280 * nothing.
1281 */
1282
1283 kern_return_t
1284 vm_allocate_cpm(
1285 host_priv_t host_priv,
1286 register vm_map_t map,
1287 register vm_offset_t *addr,
1288 register vm_size_t size,
1289 int flags)
1290 {
1291 return KERN_FAILURE;
1292 }
1293
1294 /*
1295 */
1296 kern_return_t
1297 mach_memory_object_memory_entry_64(
1298 host_t host,
1299 boolean_t internal,
1300 vm_object_offset_t size,
1301 vm_prot_t permission,
1302 memory_object_t pager,
1303 ipc_port_t *entry_handle)
1304 {
1305 unsigned int access;
1306 vm_named_entry_t user_object;
1307 ipc_port_t user_handle;
1308 ipc_port_t previous;
1309 kern_return_t kr;
1310
1311 if (host == HOST_NULL)
1312 return(KERN_INVALID_HOST);
1313
1314 user_object = (vm_named_entry_t)
1315 kalloc(sizeof (struct vm_named_entry));
1316 if(user_object == NULL)
1317 return KERN_FAILURE;
1318 named_entry_lock_init(user_object);
1319 user_handle = ipc_port_alloc_kernel();
1320 ip_lock(user_handle);
1321
1322 /* make a sonce right */
1323 user_handle->ip_sorights++;
1324 ip_reference(user_handle);
1325
1326 user_handle->ip_destination = IP_NULL;
1327 user_handle->ip_receiver_name = MACH_PORT_NULL;
1328 user_handle->ip_receiver = ipc_space_kernel;
1329
1330 /* make a send right */
1331 user_handle->ip_mscount++;
1332 user_handle->ip_srights++;
1333 ip_reference(user_handle);
1334
1335 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1336 /* nsrequest unlocks user_handle */
1337
1338 user_object->object = NULL;
1339 user_object->size = size;
1340 user_object->offset = 0;
1341 user_object->backing.pager = pager;
1342 user_object->protection = permission & VM_PROT_ALL;
1343 access = GET_MAP_MEM(permission);
1344 SET_MAP_MEM(access, user_object->protection);
1345 user_object->internal = internal;
1346 user_object->is_sub_map = FALSE;
1347 user_object->ref_count = 1;
1348
1349 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1350 IKOT_NAMED_ENTRY);
1351 *entry_handle = user_handle;
1352 return KERN_SUCCESS;
1353 }
1354
1355 kern_return_t
1356 mach_memory_object_memory_entry(
1357 host_t host,
1358 boolean_t internal,
1359 vm_size_t size,
1360 vm_prot_t permission,
1361 memory_object_t pager,
1362 ipc_port_t *entry_handle)
1363 {
1364 return mach_memory_object_memory_entry_64( host, internal,
1365 (vm_object_offset_t)size, permission, pager, entry_handle);
1366 }
1367
1368
1369
1370 /*
1371 */
1372
1373 kern_return_t
1374 mach_make_memory_entry_64(
1375 vm_map_t target_map,
1376 vm_object_size_t *size,
1377 vm_object_offset_t offset,
1378 vm_prot_t permission,
1379 ipc_port_t *object_handle,
1380 ipc_port_t parent_entry)
1381 {
1382 vm_map_version_t version;
1383 vm_named_entry_t user_object;
1384 ipc_port_t user_handle;
1385 ipc_port_t previous;
1386 kern_return_t kr;
1387 vm_map_t pmap_map;
1388
1389 /* needed for call to vm_map_lookup_locked */
1390 boolean_t wired;
1391 vm_object_offset_t obj_off;
1392 vm_prot_t prot;
1393 vm_object_offset_t lo_offset, hi_offset;
1394 vm_behavior_t behavior;
1395 vm_object_t object;
1396 vm_object_t shadow_object;
1397
1398 /* needed for direct map entry manipulation */
1399 vm_map_entry_t map_entry;
1400 vm_map_entry_t next_entry;
1401 vm_map_t local_map;
1402 vm_map_t original_map = target_map;
1403 vm_offset_t local_offset;
1404 vm_object_size_t mappable_size;
1405 vm_object_size_t total_size;
1406
1407 unsigned int access;
1408 vm_prot_t protections;
1409 unsigned int wimg_mode;
1410 boolean_t cache_attr;
1411
1412 protections = permission & VM_PROT_ALL;
1413 access = GET_MAP_MEM(permission);
1414
1415
1416 offset = trunc_page_64(offset);
1417 *size = round_page_64(*size);
1418
1419 if((parent_entry != NULL)
1420 && (permission & MAP_MEM_ONLY)) {
1421 vm_named_entry_t parent_object;
1422 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1423 return KERN_INVALID_ARGUMENT;
1424 }
1425 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1426 object = parent_object->object;
1427 if(object != VM_OBJECT_NULL)
1428 wimg_mode = object->wimg_bits;
1429 if((access != GET_MAP_MEM(parent_object->protection)) &&
1430 !(parent_object->protection & VM_PROT_WRITE)) {
1431 return KERN_INVALID_RIGHT;
1432 }
1433 if(access == MAP_MEM_IO) {
1434 SET_MAP_MEM(access, parent_object->protection);
1435 wimg_mode = VM_WIMG_IO;
1436 } else if (access == MAP_MEM_COPYBACK) {
1437 SET_MAP_MEM(access, parent_object->protection);
1438 wimg_mode = VM_WIMG_DEFAULT;
1439 } else if (access == MAP_MEM_WTHRU) {
1440 SET_MAP_MEM(access, parent_object->protection);
1441 wimg_mode = VM_WIMG_WTHRU;
1442 } else if (access == MAP_MEM_WCOMB) {
1443 SET_MAP_MEM(access, parent_object->protection);
1444 wimg_mode = VM_WIMG_WCOMB;
1445 }
1446 if(object &&
1447 (access != MAP_MEM_NOOP) &&
1448 (!(object->nophyscache))) {
1449 if(object->wimg_bits != wimg_mode) {
1450 vm_page_t p;
1451 if ((wimg_mode == VM_WIMG_IO)
1452 || (wimg_mode == VM_WIMG_WCOMB))
1453 cache_attr = TRUE;
1454 else
1455 cache_attr = FALSE;
1456 vm_object_lock(object);
1457 while(object->paging_in_progress) {
1458 vm_object_unlock(object);
1459 vm_object_wait(object,
1460 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1461 THREAD_UNINT);
1462 vm_object_lock(object);
1463 }
1464 object->wimg_bits = wimg_mode;
1465 queue_iterate(&object->memq,
1466 p, vm_page_t, listq) {
1467 if (!p->fictitious) {
1468 pmap_page_protect(
1469 p->phys_page,
1470 VM_PROT_NONE);
1471 if(cache_attr)
1472 pmap_sync_caches_phys(
1473 p->phys_page);
1474 }
1475 }
1476 vm_object_unlock(object);
1477 }
1478 }
1479 return KERN_SUCCESS;
1480 }
1481
1482 if(permission & MAP_MEM_ONLY) {
1483 return KERN_INVALID_ARGUMENT;
1484 }
1485
1486 user_object = (vm_named_entry_t)
1487 kalloc(sizeof (struct vm_named_entry));
1488 if(user_object == NULL)
1489 return KERN_FAILURE;
1490 named_entry_lock_init(user_object);
1491 user_handle = ipc_port_alloc_kernel();
1492 ip_lock(user_handle);
1493
1494 /* make a sonce right */
1495 user_handle->ip_sorights++;
1496 ip_reference(user_handle);
1497
1498 user_handle->ip_destination = IP_NULL;
1499 user_handle->ip_receiver_name = MACH_PORT_NULL;
1500 user_handle->ip_receiver = ipc_space_kernel;
1501
1502 /* make a send right */
1503 user_handle->ip_mscount++;
1504 user_handle->ip_srights++;
1505 ip_reference(user_handle);
1506
1507 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1508 /* nsrequest unlocks user_handle */
1509
1510 user_object->backing.pager = NULL;
1511 user_object->ref_count = 1;
1512
1513 if(permission & MAP_MEM_NAMED_CREATE) {
1514 user_object->object = NULL;
1515 user_object->internal = TRUE;
1516 user_object->is_sub_map = FALSE;
1517 user_object->offset = 0;
1518 user_object->protection = protections;
1519 SET_MAP_MEM(access, user_object->protection);
1520 user_object->size = *size;
1521
1522 /* user_object pager and internal fields are not used */
1523 /* when the object field is filled in. */
1524
1525 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1526 IKOT_NAMED_ENTRY);
1527 *object_handle = user_handle;
1528 return KERN_SUCCESS;
1529 }
1530
1531 if(parent_entry == NULL) {
1532 /* Create a named object based on address range within the task map */
1533 /* Go find the object at given address */
1534
1535 vm_map_lock_read(target_map);
1536
1537 /* get the object associated with the target address */
1538 /* note we check the permission of the range against */
1539 /* that requested by the caller */
1540
1541 kr = vm_map_lookup_locked(&target_map, offset,
1542 protections, &version,
1543 &object, &obj_off, &prot, &wired, &behavior,
1544 &lo_offset, &hi_offset, &pmap_map);
1545 if (kr != KERN_SUCCESS) {
1546 vm_map_unlock_read(target_map);
1547 goto make_mem_done;
1548 }
1549 if (((prot & protections) != protections)
1550 || (object == kernel_object)) {
1551 kr = KERN_INVALID_RIGHT;
1552 vm_object_unlock(object);
1553 vm_map_unlock_read(target_map);
1554 if(pmap_map != target_map)
1555 vm_map_unlock_read(pmap_map);
1556 if(object == kernel_object) {
1557 printf("Warning: Attempt to create a named"
1558 " entry from the kernel_object\n");
1559 }
1560 goto make_mem_done;
1561 }
1562
1563 /* We have an object, now check to see if this object */
1564 /* is suitable. If not, create a shadow and share that */
1565
1566 redo_lookup:
1567 local_map = original_map;
1568 local_offset = offset;
1569 if(target_map != local_map) {
1570 vm_map_unlock_read(target_map);
1571 if(pmap_map != target_map)
1572 vm_map_unlock_read(pmap_map);
1573 vm_map_lock_read(local_map);
1574 target_map = local_map;
1575 pmap_map = local_map;
1576 }
1577 while(TRUE) {
1578 if(!vm_map_lookup_entry(local_map,
1579 local_offset, &map_entry)) {
1580 kr = KERN_INVALID_ARGUMENT;
1581 vm_object_unlock(object);
1582 vm_map_unlock_read(target_map);
1583 if(pmap_map != target_map)
1584 vm_map_unlock_read(pmap_map);
1585 goto make_mem_done;
1586 }
1587 if(!(map_entry->is_sub_map)) {
1588 if(map_entry->object.vm_object != object) {
1589 kr = KERN_INVALID_ARGUMENT;
1590 vm_object_unlock(object);
1591 vm_map_unlock_read(target_map);
1592 if(pmap_map != target_map)
1593 vm_map_unlock_read(pmap_map);
1594 goto make_mem_done;
1595 }
1596 if(map_entry->wired_count) {
1597 /* JMM - The check below should be reworked instead. */
1598 object->true_share = TRUE;
1599 }
1600 break;
1601 } else {
1602 vm_map_t tmap;
1603 tmap = local_map;
1604 local_map = map_entry->object.sub_map;
1605
1606 vm_map_lock_read(local_map);
1607 vm_map_unlock_read(tmap);
1608 target_map = local_map;
1609 pmap_map = local_map;
1610 local_offset = local_offset - map_entry->vme_start;
1611 local_offset += map_entry->offset;
1612 }
1613 }
1614 if(((map_entry->max_protection) & protections) != protections) {
1615 kr = KERN_INVALID_RIGHT;
1616 vm_object_unlock(object);
1617 vm_map_unlock_read(target_map);
1618 if(pmap_map != target_map)
1619 vm_map_unlock_read(pmap_map);
1620 goto make_mem_done;
1621 }
1622
1623 mappable_size = hi_offset - obj_off;
1624 total_size = map_entry->vme_end - map_entry->vme_start;
1625 if(*size > mappable_size) {
1626 /* try to extend mappable size if the entries */
1627 /* following are from the same object and are */
1628 /* compatible */
1629 next_entry = map_entry->vme_next;
1630 /* lets see if the next map entry is still */
1631 /* pointing at this object and is contiguous */
1632 while(*size > mappable_size) {
1633 if((next_entry->object.vm_object == object) &&
1634 (next_entry->vme_start ==
1635 next_entry->vme_prev->vme_end) &&
1636 (next_entry->offset ==
1637 next_entry->vme_prev->offset +
1638 (next_entry->vme_prev->vme_end -
1639 next_entry->vme_prev->vme_start))) {
1640 if(((next_entry->max_protection)
1641 & protections) != protections) {
1642 break;
1643 }
1644 if (next_entry->needs_copy !=
1645 map_entry->needs_copy)
1646 break;
1647 mappable_size += next_entry->vme_end
1648 - next_entry->vme_start;
1649 total_size += next_entry->vme_end
1650 - next_entry->vme_start;
1651 next_entry = next_entry->vme_next;
1652 } else {
1653 break;
1654 }
1655
1656 }
1657 }
1658
1659 if(object->internal) {
1660 /* vm_map_lookup_locked will create a shadow if */
1661 /* needs_copy is set but does not check for the */
1662 /* other two conditions shown. It is important to */
1663 /* set up an object which will not be pulled from */
1664 /* under us. */
1665
1666 if ((map_entry->needs_copy || object->shadowed ||
1667 (object->size > total_size))
1668 && !object->true_share) {
1669 if (vm_map_lock_read_to_write(target_map)) {
1670 vm_map_lock_read(target_map);
1671 goto redo_lookup;
1672 }
1673
1674 /*
1675 * JMM - We need to avoid coming here when the object
1676 * is wired by anybody, not just the current map. Why
1677 * couldn't we use the standard vm_object_copy_quickly()
1678 * approach here?
1679 */
1680
1681 /* create a shadow object */
1682 vm_object_shadow(&map_entry->object.vm_object,
1683 &map_entry->offset, total_size);
1684 shadow_object = map_entry->object.vm_object;
1685 vm_object_unlock(object);
1686 vm_object_pmap_protect(
1687 object, map_entry->offset,
1688 total_size,
1689 ((map_entry->is_shared
1690 || target_map->mapped)
1691 ? PMAP_NULL :
1692 target_map->pmap),
1693 map_entry->vme_start,
1694 map_entry->protection & ~VM_PROT_WRITE);
1695 total_size -= (map_entry->vme_end
1696 - map_entry->vme_start);
1697 next_entry = map_entry->vme_next;
1698 map_entry->needs_copy = FALSE;
1699 while (total_size) {
1700 if(next_entry->object.vm_object == object) {
1701 next_entry->object.vm_object
1702 = shadow_object;
1703 next_entry->offset
1704 = next_entry->vme_prev->offset +
1705 (next_entry->vme_prev->vme_end
1706 - next_entry->vme_prev->vme_start);
1707 next_entry->needs_copy = FALSE;
1708 } else {
1709 panic("mach_make_memory_entry_64:"
1710 " map entries out of sync\n");
1711 }
1712 total_size -=
1713 next_entry->vme_end
1714 - next_entry->vme_start;
1715 next_entry = next_entry->vme_next;
1716 }
1717
1718 object = shadow_object;
1719 vm_object_lock(object);
1720 obj_off = (local_offset - map_entry->vme_start)
1721 + map_entry->offset;
1722 vm_map_lock_write_to_read(target_map);
1723
1724
1725 }
1726 }
1727
1728 /* note: in the future we can (if necessary) allow for */
1729 /* memory object lists, this will better support */
1730 /* fragmentation, but is it necessary? The user should */
1731 /* be encouraged to create address space oriented */
1732 /* shared objects from CLEAN memory regions which have */
1733 /* a known and defined history. i.e. no inheritence */
1734 /* share, make this call before making the region the */
1735 /* target of ipc's, etc. The code above, protecting */
1736 /* against delayed copy, etc. is mostly defensive. */
1737
1738 wimg_mode = object->wimg_bits;
1739 if(!(object->nophyscache)) {
1740 if(access == MAP_MEM_IO) {
1741 wimg_mode = VM_WIMG_IO;
1742 } else if (access == MAP_MEM_COPYBACK) {
1743 wimg_mode = VM_WIMG_USE_DEFAULT;
1744 } else if (access == MAP_MEM_WTHRU) {
1745 wimg_mode = VM_WIMG_WTHRU;
1746 } else if (access == MAP_MEM_WCOMB) {
1747 wimg_mode = VM_WIMG_WCOMB;
1748 }
1749 }
1750
1751 object->true_share = TRUE;
1752 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
1753 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1754
1755 /* we now point to this object, hold on to it */
1756 vm_object_reference_locked(object);
1757 vm_map_unlock_read(target_map);
1758 if(pmap_map != target_map)
1759 vm_map_unlock_read(pmap_map);
1760
1761 if(object->wimg_bits != wimg_mode) {
1762 vm_page_t p;
1763
1764 vm_object_paging_wait(object, THREAD_UNINT);
1765
1766 queue_iterate(&object->memq,
1767 p, vm_page_t, listq) {
1768 if (!p->fictitious) {
1769 pmap_page_protect(
1770 p->phys_page,
1771 VM_PROT_NONE);
1772 if(cache_attr)
1773 pmap_sync_caches_phys(
1774 p->phys_page);
1775 }
1776 }
1777 object->wimg_bits = wimg_mode;
1778 }
1779 user_object->object = object;
1780 user_object->internal = object->internal;
1781 user_object->is_sub_map = FALSE;
1782 user_object->offset = obj_off;
1783 user_object->protection = permission;
1784
1785 /* the size of mapped entry that overlaps with our region */
1786 /* which is targeted for share. */
1787 /* (entry_end - entry_start) - */
1788 /* offset of our beg addr within entry */
1789 /* it corresponds to this: */
1790
1791 if(*size > mappable_size)
1792 *size = mappable_size;
1793
1794 user_object->size = *size;
1795
1796 /* user_object pager and internal fields are not used */
1797 /* when the object field is filled in. */
1798
1799 vm_object_unlock(object);
1800 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1801 IKOT_NAMED_ENTRY);
1802 *object_handle = user_handle;
1803 return KERN_SUCCESS;
1804 } else {
1805
1806 vm_named_entry_t parent_object;
1807
1808 /* The new object will be base on an existing named object */
1809 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1810 kr = KERN_INVALID_ARGUMENT;
1811 goto make_mem_done;
1812 }
1813 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1814 if((offset + *size) > parent_object->size) {
1815 kr = KERN_INVALID_ARGUMENT;
1816 goto make_mem_done;
1817 }
1818
1819 user_object->object = parent_object->object;
1820 user_object->size = *size;
1821 user_object->offset = parent_object->offset + offset;
1822 user_object->protection = parent_object->protection;
1823 user_object->protection &= ~VM_PROT_ALL;
1824 user_object->protection = permission & VM_PROT_ALL;
1825 if(access != MAP_MEM_NOOP) {
1826 SET_MAP_MEM(access, user_object->protection);
1827 }
1828 if(parent_object->is_sub_map) {
1829 user_object->backing.map = parent_object->backing.map;
1830 vm_map_lock(user_object->backing.map);
1831 user_object->backing.map->ref_count++;
1832 vm_map_unlock(user_object->backing.map);
1833 }
1834 else {
1835 user_object->backing.pager = parent_object->backing.pager;
1836 }
1837 user_object->internal = parent_object->internal;
1838 user_object->is_sub_map = parent_object->is_sub_map;
1839
1840 if(parent_object->object != NULL) {
1841 /* we now point to this object, hold on */
1842 vm_object_reference(parent_object->object);
1843 vm_object_lock(parent_object->object);
1844 parent_object->object->true_share = TRUE;
1845 if (parent_object->object->copy_strategy ==
1846 MEMORY_OBJECT_COPY_SYMMETRIC)
1847 parent_object->object->copy_strategy =
1848 MEMORY_OBJECT_COPY_DELAY;
1849 vm_object_unlock(parent_object->object);
1850 }
1851 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1852 IKOT_NAMED_ENTRY);
1853 *object_handle = user_handle;
1854 return KERN_SUCCESS;
1855 }
1856
1857
1858
1859 make_mem_done:
1860 ipc_port_dealloc_kernel(user_handle);
1861 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1862 return kr;
1863 }
1864
1865 kern_return_t
1866 mach_make_memory_entry(
1867 vm_map_t target_map,
1868 vm_size_t *size,
1869 vm_offset_t offset,
1870 vm_prot_t permission,
1871 ipc_port_t *object_handle,
1872 ipc_port_t parent_entry)
1873 {
1874 vm_object_offset_t size_64;
1875 kern_return_t kr;
1876
1877 size_64 = (vm_object_offset_t)*size;
1878 kr = mach_make_memory_entry_64(target_map, &size_64,
1879 (vm_object_offset_t)offset, permission, object_handle,
1880 parent_entry);
1881 *size = (vm_size_t)size_64;
1882 return kr;
1883 }
1884
1885 /*
1886 */
1887
1888 kern_return_t
1889 vm_region_object_create(
1890 vm_map_t target_map,
1891 vm_size_t size,
1892 ipc_port_t *object_handle)
1893 {
1894 vm_named_entry_t user_object;
1895 ipc_port_t user_handle;
1896 kern_return_t kr;
1897
1898 ipc_port_t previous;
1899 vm_map_t new_map;
1900
1901 user_object = (vm_named_entry_t)
1902 kalloc(sizeof (struct vm_named_entry));
1903 if(user_object == NULL) {
1904 return KERN_FAILURE;
1905 }
1906 named_entry_lock_init(user_object);
1907 user_handle = ipc_port_alloc_kernel();
1908
1909
1910 ip_lock(user_handle);
1911
1912 /* make a sonce right */
1913 user_handle->ip_sorights++;
1914 ip_reference(user_handle);
1915
1916 user_handle->ip_destination = IP_NULL;
1917 user_handle->ip_receiver_name = MACH_PORT_NULL;
1918 user_handle->ip_receiver = ipc_space_kernel;
1919
1920 /* make a send right */
1921 user_handle->ip_mscount++;
1922 user_handle->ip_srights++;
1923 ip_reference(user_handle);
1924
1925 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1926 /* nsrequest unlocks user_handle */
1927
1928 /* Create a named object based on a submap of specified size */
1929
1930 new_map = vm_map_create(0, 0, size, TRUE);
1931 user_object->backing.map = new_map;
1932
1933
1934 user_object->object = VM_OBJECT_NULL;
1935 user_object->internal = TRUE;
1936 user_object->is_sub_map = TRUE;
1937 user_object->offset = 0;
1938 user_object->protection = VM_PROT_ALL;
1939 user_object->size = size;
1940 user_object->ref_count = 1;
1941
1942 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1943 IKOT_NAMED_ENTRY);
1944 *object_handle = user_handle;
1945 return KERN_SUCCESS;
1946
1947 }
1948
1949 /* For a given range, check all map entries. If the entry coresponds to */
1950 /* the old vm_region/map provided on the call, replace it with the */
1951 /* corresponding range in the new vm_region/map */
1952 kern_return_t vm_map_region_replace(
1953 vm_map_t target_map,
1954 ipc_port_t old_region,
1955 ipc_port_t new_region,
1956 vm_offset_t start,
1957 vm_offset_t end)
1958 {
1959 vm_named_entry_t old_object;
1960 vm_named_entry_t new_object;
1961 vm_map_t old_submap;
1962 vm_map_t new_submap;
1963 vm_offset_t addr;
1964 vm_map_entry_t entry;
1965 int nested_pmap = 0;
1966
1967
1968 vm_map_lock(target_map);
1969 old_object = (vm_named_entry_t)old_region->ip_kobject;
1970 new_object = (vm_named_entry_t)new_region->ip_kobject;
1971 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1972 vm_map_unlock(target_map);
1973 return KERN_INVALID_ARGUMENT;
1974 }
1975 old_submap = (vm_map_t)old_object->backing.map;
1976 new_submap = (vm_map_t)new_object->backing.map;
1977 vm_map_lock(old_submap);
1978 if((old_submap->min_offset != new_submap->min_offset) ||
1979 (old_submap->max_offset != new_submap->max_offset)) {
1980 vm_map_unlock(old_submap);
1981 vm_map_unlock(target_map);
1982 return KERN_INVALID_ARGUMENT;
1983 }
1984 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1985 /* if the src is not contained, the entry preceeds */
1986 /* our range */
1987 addr = entry->vme_start;
1988 if(entry == vm_map_to_entry(target_map)) {
1989 vm_map_unlock(old_submap);
1990 vm_map_unlock(target_map);
1991 return KERN_SUCCESS;
1992 }
1993 }
1994 if ((entry->use_pmap) &&
1995 (new_submap->pmap == NULL)) {
1996 new_submap->pmap = pmap_create((vm_size_t) 0);
1997 if(new_submap->pmap == PMAP_NULL) {
1998 vm_map_unlock(old_submap);
1999 vm_map_unlock(target_map);
2000 return(KERN_NO_SPACE);
2001 }
2002 }
2003 addr = entry->vme_start;
2004 vm_map_reference(old_submap);
2005 while((entry != vm_map_to_entry(target_map)) &&
2006 (entry->vme_start < end)) {
2007 if((entry->is_sub_map) &&
2008 (entry->object.sub_map == old_submap)) {
2009 if(entry->use_pmap) {
2010 if((start & 0x0fffffff) ||
2011 ((end - start) != 0x10000000)) {
2012 vm_map_unlock(old_submap);
2013 vm_map_deallocate(old_submap);
2014 vm_map_unlock(target_map);
2015 return KERN_INVALID_ARGUMENT;
2016 }
2017 nested_pmap = 1;
2018 }
2019 entry->object.sub_map = new_submap;
2020 vm_map_reference(new_submap);
2021 vm_map_deallocate(old_submap);
2022 }
2023 entry = entry->vme_next;
2024 addr = entry->vme_start;
2025 }
2026 if(nested_pmap) {
2027 #ifndef i386
2028 pmap_unnest(target_map->pmap, (addr64_t)start);
2029 if(target_map->mapped) {
2030 vm_map_submap_pmap_clean(target_map,
2031 start, end, old_submap, 0);
2032 }
2033 pmap_nest(target_map->pmap, new_submap->pmap,
2034 (addr64_t)start, (addr64_t)start,
2035 (addr64_t)(end - start));
2036 #endif /* i386 */
2037 } else {
2038 vm_map_submap_pmap_clean(target_map,
2039 start, end, old_submap, 0);
2040 }
2041 vm_map_unlock(old_submap);
2042 vm_map_deallocate(old_submap);
2043 vm_map_unlock(target_map);
2044 return KERN_SUCCESS;
2045 }
2046
2047
2048 void
2049 mach_destroy_memory_entry(
2050 ipc_port_t port)
2051 {
2052 vm_named_entry_t named_entry;
2053 #if MACH_ASSERT
2054 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
2055 #endif /* MACH_ASSERT */
2056 named_entry = (vm_named_entry_t)port->ip_kobject;
2057 mutex_lock(&(named_entry)->Lock);
2058 named_entry->ref_count-=1;
2059 if(named_entry->ref_count == 0) {
2060 if(named_entry->object) {
2061 /* release the memory object we've been pointing to */
2062 vm_object_deallocate(named_entry->object);
2063 }
2064 if(named_entry->is_sub_map) {
2065 vm_map_deallocate(named_entry->backing.map);
2066 }
2067 kfree((vm_offset_t)port->ip_kobject,
2068 sizeof (struct vm_named_entry));
2069 } else
2070 mutex_unlock(&(named_entry)->Lock);
2071 }
2072
2073
2074 kern_return_t
2075 vm_map_page_query(
2076 vm_map_t target_map,
2077 vm_offset_t offset,
2078 int *disposition,
2079 int *ref_count)
2080 {
2081 vm_map_entry_t map_entry;
2082 vm_object_t object;
2083 vm_page_t m;
2084
2085 restart_page_query:
2086 *disposition = 0;
2087 *ref_count = 0;
2088 vm_map_lock(target_map);
2089 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
2090 vm_map_unlock(target_map);
2091 return KERN_FAILURE;
2092 }
2093 offset -= map_entry->vme_start; /* adjust to offset within entry */
2094 offset += map_entry->offset; /* adjust to target object offset */
2095 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
2096 if(!map_entry->is_sub_map) {
2097 object = map_entry->object.vm_object;
2098 } else {
2099 vm_map_unlock(target_map);
2100 target_map = map_entry->object.sub_map;
2101 goto restart_page_query;
2102 }
2103 } else {
2104 vm_map_unlock(target_map);
2105 return KERN_FAILURE;
2106 }
2107 vm_object_lock(object);
2108 vm_map_unlock(target_map);
2109 while(TRUE) {
2110 m = vm_page_lookup(object, offset);
2111 if (m != VM_PAGE_NULL) {
2112 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
2113 break;
2114 } else {
2115 if(object->shadow) {
2116 offset += object->shadow_offset;
2117 vm_object_unlock(object);
2118 object = object->shadow;
2119 vm_object_lock(object);
2120 continue;
2121 }
2122 vm_object_unlock(object);
2123 return KERN_FAILURE;
2124 }
2125 }
2126
2127 /* The ref_count is not strictly accurate, it measures the number */
2128 /* of entities holding a ref on the object, they may not be mapping */
2129 /* the object or may not be mapping the section holding the */
2130 /* target page but its still a ball park number and though an over- */
2131 /* count, it picks up the copy-on-write cases */
2132
2133 /* We could also get a picture of page sharing from pmap_attributes */
2134 /* but this would under count as only faulted-in mappings would */
2135 /* show up. */
2136
2137 *ref_count = object->ref_count;
2138
2139 if (m->fictitious) {
2140 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
2141 vm_object_unlock(object);
2142 return KERN_SUCCESS;
2143 }
2144
2145 if (m->dirty)
2146 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
2147 else if(pmap_is_modified(m->phys_page))
2148 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
2149
2150 if (m->reference)
2151 *disposition |= VM_PAGE_QUERY_PAGE_REF;
2152 else if(pmap_is_referenced(m->phys_page))
2153 *disposition |= VM_PAGE_QUERY_PAGE_REF;
2154
2155 vm_object_unlock(object);
2156 return KERN_SUCCESS;
2157
2158 }
2159
2160 kern_return_t
2161 set_dp_control_port(
2162 host_priv_t host_priv,
2163 ipc_port_t control_port)
2164 {
2165 if (host_priv == HOST_PRIV_NULL)
2166 return (KERN_INVALID_HOST);
2167
2168 if (IP_VALID(dynamic_pager_control_port))
2169 ipc_port_release_send(dynamic_pager_control_port);
2170
2171 dynamic_pager_control_port = control_port;
2172 return KERN_SUCCESS;
2173 }
2174
2175 kern_return_t
2176 get_dp_control_port(
2177 host_priv_t host_priv,
2178 ipc_port_t *control_port)
2179 {
2180 if (host_priv == HOST_PRIV_NULL)
2181 return (KERN_INVALID_HOST);
2182
2183 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
2184 return KERN_SUCCESS;
2185
2186 }
2187
2188
2189 /* Retrieve a upl for an object underlying an address range in a map */
2190
2191 kern_return_t
2192 vm_map_get_upl(
2193 vm_map_t map,
2194 vm_address_t offset,
2195 vm_size_t *upl_size,
2196 upl_t *upl,
2197 upl_page_info_array_t page_list,
2198 unsigned int *count,
2199 int *flags,
2200 int force_data_sync)
2201 {
2202 vm_map_entry_t entry;
2203 int caller_flags;
2204 int sync_cow_data = FALSE;
2205 vm_object_t local_object;
2206 vm_offset_t local_offset;
2207 vm_offset_t local_start;
2208 kern_return_t ret;
2209
2210 caller_flags = *flags;
2211 if (!(caller_flags & UPL_COPYOUT_FROM)) {
2212 sync_cow_data = TRUE;
2213 }
2214 if(upl == NULL)
2215 return KERN_INVALID_ARGUMENT;
2216
2217
2218 REDISCOVER_ENTRY:
2219 vm_map_lock(map);
2220 if (vm_map_lookup_entry(map, offset, &entry)) {
2221 if (entry->object.vm_object == VM_OBJECT_NULL ||
2222 !entry->object.vm_object->phys_contiguous) {
2223 if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
2224 *upl_size = MAX_UPL_TRANSFER * page_size;
2225 }
2226 }
2227 if((entry->vme_end - offset) < *upl_size) {
2228 *upl_size = entry->vme_end - offset;
2229 }
2230 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2231 if (entry->object.vm_object == VM_OBJECT_NULL) {
2232 *flags = 0;
2233 } else if (entry->object.vm_object->private) {
2234 *flags = UPL_DEV_MEMORY;
2235 if (entry->object.vm_object->phys_contiguous) {
2236 *flags |= UPL_PHYS_CONTIG;
2237 }
2238 } else {
2239 *flags = 0;
2240 }
2241 vm_map_unlock(map);
2242 return KERN_SUCCESS;
2243 }
2244 /*
2245 * Create an object if necessary.
2246 */
2247 if (entry->object.vm_object == VM_OBJECT_NULL) {
2248 entry->object.vm_object = vm_object_allocate(
2249 (vm_size_t)(entry->vme_end - entry->vme_start));
2250 entry->offset = 0;
2251 }
2252 if (!(caller_flags & UPL_COPYOUT_FROM)) {
2253 if (!(entry->protection & VM_PROT_WRITE)) {
2254 vm_map_unlock(map);
2255 return KERN_PROTECTION_FAILURE;
2256 }
2257 if (entry->needs_copy) {
2258 vm_map_t local_map;
2259 vm_object_t object;
2260 vm_object_offset_t offset_hi;
2261 vm_object_offset_t offset_lo;
2262 vm_object_offset_t new_offset;
2263 vm_prot_t prot;
2264 boolean_t wired;
2265 vm_behavior_t behavior;
2266 vm_map_version_t version;
2267 vm_map_t pmap_map;
2268
2269 local_map = map;
2270 vm_map_lock_write_to_read(map);
2271 if(vm_map_lookup_locked(&local_map,
2272 offset, VM_PROT_WRITE,
2273 &version, &object,
2274 &new_offset, &prot, &wired,
2275 &behavior, &offset_lo,
2276 &offset_hi, &pmap_map)) {
2277 vm_map_unlock(local_map);
2278 return KERN_FAILURE;
2279 }
2280 if (pmap_map != map) {
2281 vm_map_unlock(pmap_map);
2282 }
2283 vm_object_unlock(object);
2284 vm_map_unlock(local_map);
2285
2286 goto REDISCOVER_ENTRY;
2287 }
2288 }
2289 if (entry->is_sub_map) {
2290 vm_map_t submap;
2291
2292 submap = entry->object.sub_map;
2293 local_start = entry->vme_start;
2294 local_offset = entry->offset;
2295 vm_map_reference(submap);
2296 vm_map_unlock(map);
2297
2298 ret = (vm_map_get_upl(submap,
2299 local_offset + (offset - local_start),
2300 upl_size, upl, page_list, count,
2301 flags, force_data_sync));
2302
2303 vm_map_deallocate(submap);
2304 return ret;
2305 }
2306
2307 if (sync_cow_data) {
2308 if (entry->object.vm_object->shadow
2309 || entry->object.vm_object->copy) {
2310 int flags;
2311
2312 local_object = entry->object.vm_object;
2313 local_start = entry->vme_start;
2314 local_offset = entry->offset;
2315 vm_object_reference(local_object);
2316 vm_map_unlock(map);
2317
2318 if(local_object->copy == NULL) {
2319 flags = MEMORY_OBJECT_DATA_SYNC;
2320 } else {
2321 flags = MEMORY_OBJECT_COPY_SYNC;
2322 }
2323
2324 if((local_object->paging_offset) &&
2325 (local_object->pager == 0)) {
2326 /*
2327 * do a little clean-up for our unorthodox
2328 * entry into a pager call from a non-pager
2329 * context. Normally the pager code
2330 * assumes that an object it has been called
2331 * with has a backing pager and so does
2332 * not bother to check the pager field
2333 * before relying on the paging_offset
2334 */
2335 vm_object_lock(local_object);
2336 if (local_object->pager == 0) {
2337 local_object->paging_offset = 0;
2338 }
2339 vm_object_unlock(local_object);
2340 }
2341
2342 if (entry->object.vm_object->shadow &&
2343 entry->object.vm_object->copy) {
2344 vm_object_lock_request(
2345 local_object->shadow,
2346 (vm_object_offset_t)
2347 ((offset - local_start) +
2348 local_offset) +
2349 local_object->shadow_offset +
2350 local_object->paging_offset,
2351 *upl_size, FALSE,
2352 MEMORY_OBJECT_DATA_SYNC,
2353 VM_PROT_NO_CHANGE);
2354 }
2355 sync_cow_data = FALSE;
2356 vm_object_deallocate(local_object);
2357 goto REDISCOVER_ENTRY;
2358 }
2359 }
2360
2361 if (force_data_sync) {
2362
2363 local_object = entry->object.vm_object;
2364 local_start = entry->vme_start;
2365 local_offset = entry->offset;
2366 vm_object_reference(local_object);
2367 vm_map_unlock(map);
2368
2369 if((local_object->paging_offset) &&
2370 (local_object->pager == 0)) {
2371 /*
2372 * do a little clean-up for our unorthodox
2373 * entry into a pager call from a non-pager
2374 * context. Normally the pager code
2375 * assumes that an object it has been called
2376 * with has a backing pager and so does
2377 * not bother to check the pager field
2378 * before relying on the paging_offset
2379 */
2380 vm_object_lock(local_object);
2381 if (local_object->pager == 0) {
2382 local_object->paging_offset = 0;
2383 }
2384 vm_object_unlock(local_object);
2385 }
2386
2387 vm_object_lock_request(
2388 local_object,
2389 (vm_object_offset_t)
2390 ((offset - local_start) + local_offset) +
2391 local_object->paging_offset,
2392 (vm_object_size_t)*upl_size, FALSE,
2393 MEMORY_OBJECT_DATA_SYNC,
2394 VM_PROT_NO_CHANGE);
2395 force_data_sync = FALSE;
2396 vm_object_deallocate(local_object);
2397 goto REDISCOVER_ENTRY;
2398 }
2399
2400 if(!(entry->object.vm_object->private)) {
2401 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2402 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2403 if(entry->object.vm_object->phys_contiguous) {
2404 *flags = UPL_PHYS_CONTIG;
2405 } else {
2406 *flags = 0;
2407 }
2408 } else {
2409 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2410 }
2411 local_object = entry->object.vm_object;
2412 local_offset = entry->offset;
2413 local_start = entry->vme_start;
2414 vm_object_reference(local_object);
2415 vm_map_unlock(map);
2416 if(caller_flags & UPL_SET_IO_WIRE) {
2417 ret = (vm_object_iopl_request(local_object,
2418 (vm_object_offset_t)
2419 ((offset - local_start)
2420 + local_offset),
2421 *upl_size,
2422 upl,
2423 page_list,
2424 count,
2425 caller_flags));
2426 } else {
2427 ret = (vm_object_upl_request(local_object,
2428 (vm_object_offset_t)
2429 ((offset - local_start)
2430 + local_offset),
2431 *upl_size,
2432 upl,
2433 page_list,
2434 count,
2435 caller_flags));
2436 }
2437 vm_object_deallocate(local_object);
2438 return(ret);
2439 }
2440
2441 vm_map_unlock(map);
2442 return(KERN_FAILURE);
2443
2444 }
2445
2446 /* ******* Temporary Internal calls to UPL for BSD ***** */
2447 kern_return_t
2448 kernel_upl_map(
2449 vm_map_t map,
2450 upl_t upl,
2451 vm_offset_t *dst_addr)
2452 {
2453 return (vm_upl_map(map, upl, dst_addr));
2454 }
2455
2456
2457 kern_return_t
2458 kernel_upl_unmap(
2459 vm_map_t map,
2460 upl_t upl)
2461 {
2462 return(vm_upl_unmap(map, upl));
2463 }
2464
2465 kern_return_t
2466 kernel_upl_commit(
2467 upl_t upl,
2468 upl_page_info_t *pl,
2469 mach_msg_type_number_t count)
2470 {
2471 kern_return_t kr;
2472
2473 kr = upl_commit(upl, pl, count);
2474 upl_deallocate(upl);
2475 return kr;
2476 }
2477
2478
2479 kern_return_t
2480 kernel_upl_commit_range(
2481 upl_t upl,
2482 vm_offset_t offset,
2483 vm_size_t size,
2484 int flags,
2485 upl_page_info_array_t pl,
2486 mach_msg_type_number_t count)
2487 {
2488 boolean_t finished = FALSE;
2489 kern_return_t kr;
2490
2491 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2492 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2493
2494 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2495
2496 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2497 upl_deallocate(upl);
2498
2499 return kr;
2500 }
2501
2502 kern_return_t
2503 kernel_upl_abort_range(
2504 upl_t upl,
2505 vm_offset_t offset,
2506 vm_size_t size,
2507 int abort_flags)
2508 {
2509 kern_return_t kr;
2510 boolean_t finished = FALSE;
2511
2512 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2513 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
2514
2515 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
2516
2517 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2518 upl_deallocate(upl);
2519
2520 return kr;
2521 }
2522
2523 kern_return_t
2524 kernel_upl_abort(
2525 upl_t upl,
2526 int abort_type)
2527 {
2528 kern_return_t kr;
2529
2530 kr = upl_abort(upl, abort_type);
2531 upl_deallocate(upl);
2532 return kr;
2533 }
2534
2535
2536 kern_return_t
2537 vm_get_shared_region(
2538 task_t task,
2539 shared_region_mapping_t *shared_region)
2540 {
2541 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2542 return KERN_SUCCESS;
2543 }
2544
2545 kern_return_t
2546 vm_set_shared_region(
2547 task_t task,
2548 shared_region_mapping_t shared_region)
2549 {
2550 task->system_shared_region = (vm_offset_t) shared_region;
2551 return KERN_SUCCESS;
2552 }
2553
2554 kern_return_t
2555 shared_region_mapping_info(
2556 shared_region_mapping_t shared_region,
2557 ipc_port_t *text_region,
2558 vm_size_t *text_size,
2559 ipc_port_t *data_region,
2560 vm_size_t *data_size,
2561 vm_offset_t *region_mappings,
2562 vm_offset_t *client_base,
2563 vm_offset_t *alt_base,
2564 vm_offset_t *alt_next,
2565 unsigned int *fs_base,
2566 unsigned int *system,
2567 int *flags,
2568 shared_region_mapping_t *next)
2569 {
2570 shared_region_mapping_lock(shared_region);
2571
2572 *text_region = shared_region->text_region;
2573 *text_size = shared_region->text_size;
2574 *data_region = shared_region->data_region;
2575 *data_size = shared_region->data_size;
2576 *region_mappings = shared_region->region_mappings;
2577 *client_base = shared_region->client_base;
2578 *alt_base = shared_region->alternate_base;
2579 *alt_next = shared_region->alternate_next;
2580 *flags = shared_region->flags;
2581 *fs_base = shared_region->fs_base;
2582 *system = shared_region->system;
2583 *next = shared_region->next;
2584
2585 shared_region_mapping_unlock(shared_region);
2586 }
2587
2588 kern_return_t
2589 shared_region_object_chain_attach(
2590 shared_region_mapping_t target_region,
2591 shared_region_mapping_t object_chain_region)
2592 {
2593 shared_region_object_chain_t object_ele;
2594
2595 if(target_region->object_chain)
2596 return KERN_FAILURE;
2597 object_ele = (shared_region_object_chain_t)
2598 kalloc(sizeof (struct shared_region_object_chain));
2599 shared_region_mapping_lock(object_chain_region);
2600 target_region->object_chain = object_ele;
2601 object_ele->object_chain_region = object_chain_region;
2602 object_ele->next = object_chain_region->object_chain;
2603 object_ele->depth = object_chain_region->depth;
2604 object_chain_region->depth++;
2605 target_region->alternate_next = object_chain_region->alternate_next;
2606 shared_region_mapping_unlock(object_chain_region);
2607 return KERN_SUCCESS;
2608 }
2609
2610 kern_return_t
2611 shared_region_mapping_create(
2612 ipc_port_t text_region,
2613 vm_size_t text_size,
2614 ipc_port_t data_region,
2615 vm_size_t data_size,
2616 vm_offset_t region_mappings,
2617 vm_offset_t client_base,
2618 shared_region_mapping_t *shared_region,
2619 vm_offset_t alt_base,
2620 vm_offset_t alt_next)
2621 {
2622 *shared_region = (shared_region_mapping_t)
2623 kalloc(sizeof (struct shared_region_mapping));
2624 if(*shared_region == NULL)
2625 return KERN_FAILURE;
2626 shared_region_mapping_lock_init((*shared_region));
2627 (*shared_region)->text_region = text_region;
2628 (*shared_region)->text_size = text_size;
2629 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
2630 (*shared_region)->system = ENV_DEFAULT_SYSTEM;
2631 (*shared_region)->data_region = data_region;
2632 (*shared_region)->data_size = data_size;
2633 (*shared_region)->region_mappings = region_mappings;
2634 (*shared_region)->client_base = client_base;
2635 (*shared_region)->ref_count = 1;
2636 (*shared_region)->next = NULL;
2637 (*shared_region)->object_chain = NULL;
2638 (*shared_region)->self = *shared_region;
2639 (*shared_region)->flags = 0;
2640 (*shared_region)->depth = 0;
2641 (*shared_region)->default_env_list = NULL;
2642 (*shared_region)->alternate_base = alt_base;
2643 (*shared_region)->alternate_next = alt_next;
2644 return KERN_SUCCESS;
2645 }
2646
2647 kern_return_t
2648 shared_region_mapping_set_alt_next(
2649 shared_region_mapping_t shared_region,
2650 vm_offset_t alt_next)
2651 {
2652 shared_region->alternate_next = alt_next;
2653 return KERN_SUCCESS;
2654 }
2655
2656 kern_return_t
2657 shared_region_mapping_ref(
2658 shared_region_mapping_t shared_region)
2659 {
2660 if(shared_region == NULL)
2661 return KERN_SUCCESS;
2662 hw_atomic_add(&shared_region->ref_count, 1);
2663 return KERN_SUCCESS;
2664 }
2665
2666 kern_return_t
2667 shared_region_mapping_dealloc(
2668 shared_region_mapping_t shared_region)
2669 {
2670 struct shared_region_task_mappings sm_info;
2671 shared_region_mapping_t next = NULL;
2672 int ref_count;
2673
2674 while (shared_region) {
2675 if ((ref_count =
2676 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
2677 shared_region_mapping_lock(shared_region);
2678
2679 sm_info.text_region = shared_region->text_region;
2680 sm_info.text_size = shared_region->text_size;
2681 sm_info.data_region = shared_region->data_region;
2682 sm_info.data_size = shared_region->data_size;
2683 sm_info.region_mappings = shared_region->region_mappings;
2684 sm_info.client_base = shared_region->client_base;
2685 sm_info.alternate_base = shared_region->alternate_base;
2686 sm_info.alternate_next = shared_region->alternate_next;
2687 sm_info.flags = shared_region->flags;
2688 sm_info.self = (vm_offset_t)shared_region;
2689
2690 if(shared_region->region_mappings) {
2691 lsf_remove_regions_mappings(shared_region, &sm_info);
2692 }
2693 if(((vm_named_entry_t)
2694 (shared_region->text_region->ip_kobject))
2695 ->backing.map->pmap) {
2696 pmap_remove(((vm_named_entry_t)
2697 (shared_region->text_region->ip_kobject))
2698 ->backing.map->pmap,
2699 sm_info.client_base,
2700 sm_info.client_base + sm_info.text_size);
2701 }
2702 ipc_port_release_send(shared_region->text_region);
2703 if(shared_region->data_region)
2704 ipc_port_release_send(shared_region->data_region);
2705 if (shared_region->object_chain) {
2706 next = shared_region->object_chain->object_chain_region;
2707 kfree((vm_offset_t)shared_region->object_chain,
2708 sizeof (struct shared_region_object_chain));
2709 } else {
2710 next = NULL;
2711 }
2712 shared_region_mapping_unlock(shared_region);
2713 kfree((vm_offset_t)shared_region,
2714 sizeof (struct shared_region_mapping));
2715 shared_region = next;
2716 } else {
2717 /* Stale indicates that a system region is no */
2718 /* longer in the default environment list. */
2719 if((ref_count == 1) &&
2720 (shared_region->flags & SHARED_REGION_SYSTEM)
2721 && (shared_region->flags & ~SHARED_REGION_STALE)) {
2722 remove_default_shared_region(shared_region);
2723 }
2724 break;
2725 }
2726 }
2727 return KERN_SUCCESS;
2728 }
2729
2730 ppnum_t
2731 vm_map_get_phys_page(
2732 vm_map_t map,
2733 vm_offset_t offset)
2734 {
2735 vm_map_entry_t entry;
2736 int ops;
2737 int flags;
2738 ppnum_t phys_page = 0;
2739 vm_object_t object;
2740
2741 vm_map_lock(map);
2742 while (vm_map_lookup_entry(map, offset, &entry)) {
2743
2744 if (entry->object.vm_object == VM_OBJECT_NULL) {
2745 vm_map_unlock(map);
2746 return (vm_offset_t) 0;
2747 }
2748 if (entry->is_sub_map) {
2749 vm_map_t old_map;
2750 vm_map_lock(entry->object.sub_map);
2751 old_map = map;
2752 map = entry->object.sub_map;
2753 offset = entry->offset + (offset - entry->vme_start);
2754 vm_map_unlock(old_map);
2755 continue;
2756 }
2757 if (entry->object.vm_object->phys_contiguous) {
2758 /* These are not standard pageable memory mappings */
2759 /* If they are not present in the object they will */
2760 /* have to be picked up from the pager through the */
2761 /* fault mechanism. */
2762 if(entry->object.vm_object->shadow_offset == 0) {
2763 /* need to call vm_fault */
2764 vm_map_unlock(map);
2765 vm_fault(map, offset, VM_PROT_NONE,
2766 FALSE, THREAD_UNINT, NULL, 0);
2767 vm_map_lock(map);
2768 continue;
2769 }
2770 offset = entry->offset + (offset - entry->vme_start);
2771 phys_page = (ppnum_t)
2772 ((entry->object.vm_object->shadow_offset
2773 + offset) >> 12);
2774 break;
2775
2776 }
2777 offset = entry->offset + (offset - entry->vme_start);
2778 object = entry->object.vm_object;
2779 vm_object_lock(object);
2780 while (TRUE) {
2781 vm_page_t dst_page = vm_page_lookup(object,offset);
2782 if(dst_page == VM_PAGE_NULL) {
2783 if(object->shadow) {
2784 vm_object_t old_object;
2785 vm_object_lock(object->shadow);
2786 old_object = object;
2787 offset = offset + object->shadow_offset;
2788 object = object->shadow;
2789 vm_object_unlock(old_object);
2790 } else {
2791 vm_object_unlock(object);
2792 break;
2793 }
2794 } else {
2795 phys_page = (ppnum_t)(dst_page->phys_page);
2796 vm_object_unlock(object);
2797 break;
2798 }
2799 }
2800 break;
2801
2802 }
2803
2804 vm_map_unlock(map);
2805 return phys_page;
2806 }
2807
2808 kern_return_t
2809 kernel_object_iopl_request(
2810 vm_named_entry_t named_entry,
2811 memory_object_offset_t offset,
2812 vm_size_t size,
2813 upl_t *upl_ptr,
2814 upl_page_info_array_t user_page_list,
2815 unsigned int *page_list_count,
2816 int cntrl_flags)
2817 {
2818 vm_object_t object;
2819 kern_return_t ret;
2820
2821
2822 /* a few checks to make sure user is obeying rules */
2823 if(size == 0) {
2824 if(offset >= named_entry->size)
2825 return(KERN_INVALID_RIGHT);
2826 size = named_entry->size - offset;
2827 }
2828 if(cntrl_flags & UPL_COPYOUT_FROM) {
2829 if((named_entry->protection & VM_PROT_READ)
2830 != VM_PROT_READ) {
2831 return(KERN_INVALID_RIGHT);
2832 }
2833 } else {
2834 if((named_entry->protection &
2835 (VM_PROT_READ | VM_PROT_WRITE))
2836 != (VM_PROT_READ | VM_PROT_WRITE)) {
2837 return(KERN_INVALID_RIGHT);
2838 }
2839 }
2840 if(named_entry->size < (offset + size))
2841 return(KERN_INVALID_ARGUMENT);
2842
2843 /* the callers parameter offset is defined to be the */
2844 /* offset from beginning of named entry offset in object */
2845 offset = offset + named_entry->offset;
2846
2847 if(named_entry->is_sub_map)
2848 return (KERN_INVALID_ARGUMENT);
2849
2850 named_entry_lock(named_entry);
2851
2852 if(named_entry->object) {
2853 /* This is the case where we are going to map */
2854 /* an already mapped object. If the object is */
2855 /* not ready it is internal. An external */
2856 /* object cannot be mapped until it is ready */
2857 /* we can therefore avoid the ready check */
2858 /* in this case. */
2859 vm_object_reference(named_entry->object);
2860 object = named_entry->object;
2861 named_entry_unlock(named_entry);
2862 } else {
2863 object = vm_object_enter(named_entry->backing.pager,
2864 named_entry->size,
2865 named_entry->internal,
2866 FALSE,
2867 FALSE);
2868 if (object == VM_OBJECT_NULL) {
2869 named_entry_unlock(named_entry);
2870 return(KERN_INVALID_OBJECT);
2871 }
2872 vm_object_lock(object);
2873
2874 /* create an extra reference for the named entry */
2875 vm_object_reference_locked(object);
2876 named_entry->object = object;
2877 named_entry_unlock(named_entry);
2878
2879 /* wait for object (if any) to be ready */
2880 while (!object->pager_ready) {
2881 vm_object_wait(object,
2882 VM_OBJECT_EVENT_PAGER_READY,
2883 THREAD_UNINT);
2884 vm_object_lock(object);
2885 }
2886 vm_object_unlock(object);
2887 }
2888
2889 ret = vm_object_iopl_request(object,
2890 offset,
2891 size,
2892 upl_ptr,
2893 user_page_list,
2894 page_list_count,
2895 cntrl_flags);
2896 vm_object_deallocate(object);
2897 return ret;
2898 }
2899 #endif /* VM_CPM */