]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_user.c
ec4dcfbf292cff647f7d553d0ae0f64c4ecc423d
[apple/xnu.git] / osfmk / vm / vm_user.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_user.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * User-exported virtual memory functions.
57 */
58 #ifdef MACH_BSD
59 /* remove after component interface available */
60 extern int vnode_pager_workaround;
61 #endif
62
63 #include <vm_cpm.h>
64 #include <mach/boolean.h>
65 #include <mach/kern_return.h>
66 #include <mach/mach_types.h> /* to get vm_address_t */
67 #include <mach/memory_object.h>
68 #include <mach/std_types.h> /* to get pointer_t */
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/vm_map_server.h>
73 #include <mach/mach_syscalls.h>
74 #include <mach/shared_memory_server.h>
75
76 #include <kern/host.h>
77 #include <kern/task.h>
78 #include <kern/misc_protos.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_page.h>
82 #include <vm/memory_object.h>
83 #include <vm/vm_pageout.h>
84
85
86
87 vm_size_t upl_offset_to_pagelist = 0;
88
89 #if VM_CPM
90 #include <vm/cpm.h>
91 #endif /* VM_CPM */
92
93 ipc_port_t dynamic_pager_control_port=NULL;
94
95 /*
96 * vm_allocate allocates "zero fill" memory in the specfied
97 * map.
98 */
99 kern_return_t
100 vm_allocate(
101 register vm_map_t map,
102 register vm_offset_t *addr,
103 register vm_size_t size,
104 int flags)
105 {
106 kern_return_t result;
107 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
108
109 if (map == VM_MAP_NULL)
110 return(KERN_INVALID_ARGUMENT);
111 if (size == 0) {
112 *addr = 0;
113 return(KERN_SUCCESS);
114 }
115
116 if (anywhere)
117 *addr = vm_map_min(map);
118 else
119 *addr = trunc_page(*addr);
120 size = round_page(size);
121 if (size == 0) {
122 return(KERN_INVALID_ARGUMENT);
123 }
124
125 result = vm_map_enter(
126 map,
127 addr,
128 size,
129 (vm_offset_t)0,
130 flags,
131 VM_OBJECT_NULL,
132 (vm_object_offset_t)0,
133 FALSE,
134 VM_PROT_DEFAULT,
135 VM_PROT_ALL,
136 VM_INHERIT_DEFAULT);
137
138 return(result);
139 }
140
141 /*
142 * vm_deallocate deallocates the specified range of addresses in the
143 * specified address map.
144 */
145 kern_return_t
146 vm_deallocate(
147 register vm_map_t map,
148 vm_offset_t start,
149 vm_size_t size)
150 {
151 if (map == VM_MAP_NULL)
152 return(KERN_INVALID_ARGUMENT);
153
154 if (size == (vm_offset_t) 0)
155 return(KERN_SUCCESS);
156
157 return(vm_map_remove(map, trunc_page(start),
158 round_page(start+size), VM_MAP_NO_FLAGS));
159 }
160
161 /*
162 * vm_inherit sets the inheritance of the specified range in the
163 * specified map.
164 */
165 kern_return_t
166 vm_inherit(
167 register vm_map_t map,
168 vm_offset_t start,
169 vm_size_t size,
170 vm_inherit_t new_inheritance)
171 {
172 if (map == VM_MAP_NULL)
173 return(KERN_INVALID_ARGUMENT);
174
175 if (new_inheritance > VM_INHERIT_LAST_VALID)
176 return(KERN_INVALID_ARGUMENT);
177
178 return(vm_map_inherit(map,
179 trunc_page(start),
180 round_page(start+size),
181 new_inheritance));
182 }
183
184 /*
185 * vm_protect sets the protection of the specified range in the
186 * specified map.
187 */
188
189 kern_return_t
190 vm_protect(
191 register vm_map_t map,
192 vm_offset_t start,
193 vm_size_t size,
194 boolean_t set_maximum,
195 vm_prot_t new_protection)
196 {
197 if ((map == VM_MAP_NULL) ||
198 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
199 return(KERN_INVALID_ARGUMENT);
200
201 return(vm_map_protect(map,
202 trunc_page(start),
203 round_page(start+size),
204 new_protection,
205 set_maximum));
206 }
207
208 /*
209 * Handle machine-specific attributes for a mapping, such
210 * as cachability, migrability, etc.
211 */
212 kern_return_t
213 vm_machine_attribute(
214 vm_map_t map,
215 vm_address_t address,
216 vm_size_t size,
217 vm_machine_attribute_t attribute,
218 vm_machine_attribute_val_t* value) /* IN/OUT */
219 {
220 if (map == VM_MAP_NULL)
221 return(KERN_INVALID_ARGUMENT);
222
223 return vm_map_machine_attribute(map, address, size, attribute, value);
224 }
225
226 kern_return_t
227 vm_read(
228 vm_map_t map,
229 vm_address_t address,
230 vm_size_t size,
231 pointer_t *data,
232 mach_msg_type_number_t *data_size)
233 {
234 kern_return_t error;
235 vm_map_copy_t ipc_address;
236
237 if (map == VM_MAP_NULL)
238 return(KERN_INVALID_ARGUMENT);
239
240 if ((error = vm_map_copyin(map,
241 address,
242 size,
243 FALSE, /* src_destroy */
244 &ipc_address)) == KERN_SUCCESS) {
245 *data = (pointer_t) ipc_address;
246 *data_size = size;
247 }
248 return(error);
249 }
250
251 kern_return_t
252 vm_read_list(
253 vm_map_t map,
254 vm_read_entry_t data_list,
255 mach_msg_type_number_t count)
256 {
257 mach_msg_type_number_t i;
258 kern_return_t error;
259 vm_map_copy_t ipc_address;
260
261 if (map == VM_MAP_NULL)
262 return(KERN_INVALID_ARGUMENT);
263
264 for(i=0; i<count; i++) {
265 error = vm_map_copyin(map,
266 data_list[i].address,
267 data_list[i].size,
268 FALSE, /* src_destroy */
269 &ipc_address);
270 if(error != KERN_SUCCESS) {
271 data_list[i].address = (vm_address_t)0;
272 data_list[i].size = (vm_size_t)0;
273 break;
274 }
275 if(data_list[i].size != 0) {
276 error = vm_map_copyout(current_task()->map,
277 &(data_list[i].address),
278 (vm_map_copy_t) ipc_address);
279 if(error != KERN_SUCCESS) {
280 data_list[i].address = (vm_address_t)0;
281 data_list[i].size = (vm_size_t)0;
282 break;
283 }
284 }
285 }
286 return(error);
287 }
288
289 /*
290 * This routine reads from the specified map and overwrites part of the current
291 * activation's map. In making an assumption that the current thread is local,
292 * it is no longer cluster-safe without a fully supportive local proxy thread/
293 * task (but we don't support cluster's anymore so this is moot).
294 */
295
296 #define VM_OVERWRITE_SMALL 512
297
298 kern_return_t
299 vm_read_overwrite(
300 vm_map_t map,
301 vm_address_t address,
302 vm_size_t size,
303 vm_address_t data,
304 vm_size_t *data_size)
305 {
306 struct {
307 long align;
308 char buf[VM_OVERWRITE_SMALL];
309 } inbuf;
310 vm_map_t oldmap;
311 kern_return_t error = KERN_SUCCESS;
312 vm_map_copy_t copy;
313
314 if (map == VM_MAP_NULL)
315 return(KERN_INVALID_ARGUMENT);
316
317 if (size <= VM_OVERWRITE_SMALL) {
318 if(vm_map_read_user(map, (vm_offset_t)address,
319 (vm_offset_t)&inbuf, size)) {
320 error = KERN_INVALID_ADDRESS;
321 } else {
322 if(vm_map_write_user(current_map(),
323 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
324 error = KERN_INVALID_ADDRESS;
325 }
326 }
327 else {
328 if ((error = vm_map_copyin(map,
329 address,
330 size,
331 FALSE, /* src_destroy */
332 &copy)) == KERN_SUCCESS) {
333 if ((error = vm_map_copy_overwrite(
334 current_act()->map,
335 data,
336 copy,
337 FALSE)) == KERN_SUCCESS) {
338 }
339 else {
340 vm_map_copy_discard(copy);
341 }
342 }
343 }
344 *data_size = size;
345 return(error);
346 }
347
348
349
350
351 /*ARGSUSED*/
352 kern_return_t
353 vm_write(
354 vm_map_t map,
355 vm_address_t address,
356 vm_offset_t data,
357 mach_msg_type_number_t size)
358 {
359 if (map == VM_MAP_NULL)
360 return KERN_INVALID_ARGUMENT;
361
362 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
363 FALSE /* interruptible XXX */);
364 }
365
366 kern_return_t
367 vm_copy(
368 vm_map_t map,
369 vm_address_t source_address,
370 vm_size_t size,
371 vm_address_t dest_address)
372 {
373 vm_map_copy_t copy;
374 kern_return_t kr;
375
376 if (map == VM_MAP_NULL)
377 return KERN_INVALID_ARGUMENT;
378
379 kr = vm_map_copyin(map, source_address, size,
380 FALSE, &copy);
381 if (kr != KERN_SUCCESS)
382 return kr;
383
384 kr = vm_map_copy_overwrite(map, dest_address, copy,
385 FALSE /* interruptible XXX */);
386 if (kr != KERN_SUCCESS) {
387 vm_map_copy_discard(copy);
388 return kr;
389 }
390
391 return KERN_SUCCESS;
392 }
393
394 /*
395 * Routine: vm_map
396 */
397 kern_return_t
398 vm_map_64(
399 vm_map_t target_map,
400 vm_offset_t *address,
401 vm_size_t initial_size,
402 vm_offset_t mask,
403 int flags,
404 ipc_port_t port,
405 vm_object_offset_t offset,
406 boolean_t copy,
407 vm_prot_t cur_protection,
408 vm_prot_t max_protection,
409 vm_inherit_t inheritance)
410 {
411 register
412 vm_object_t object;
413 vm_prot_t prot;
414 vm_object_size_t size = (vm_object_size_t)initial_size;
415 kern_return_t result;
416
417 /*
418 * Check arguments for validity
419 */
420 if ((target_map == VM_MAP_NULL) ||
421 (cur_protection & ~VM_PROT_ALL) ||
422 (max_protection & ~VM_PROT_ALL) ||
423 (inheritance > VM_INHERIT_LAST_VALID) ||
424 size == 0)
425 return(KERN_INVALID_ARGUMENT);
426
427 /*
428 * Find the vm object (if any) corresponding to this port.
429 */
430 if (!IP_VALID(port)) {
431 object = VM_OBJECT_NULL;
432 offset = 0;
433 copy = FALSE;
434 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
435 vm_named_entry_t named_entry;
436
437 named_entry = (vm_named_entry_t)port->ip_kobject;
438 /* a few checks to make sure user is obeying rules */
439 if(size == 0) {
440 if(offset >= named_entry->size)
441 return(KERN_INVALID_RIGHT);
442 size = named_entry->size - offset;
443 }
444 if((named_entry->protection & max_protection) != max_protection)
445 return(KERN_INVALID_RIGHT);
446 if((named_entry->protection & cur_protection) != cur_protection)
447 return(KERN_INVALID_RIGHT);
448 if(named_entry->size < (offset + size))
449 return(KERN_INVALID_ARGUMENT);
450
451 /* the callers parameter offset is defined to be the */
452 /* offset from beginning of named entry offset in object */
453 offset = offset + named_entry->offset;
454
455 named_entry_lock(named_entry);
456 if(named_entry->is_sub_map) {
457 vm_map_entry_t map_entry;
458
459 named_entry_unlock(named_entry);
460 *address = trunc_page(*address);
461 size = round_page(size);
462 vm_object_reference(vm_submap_object);
463 if ((result = vm_map_enter(target_map,
464 address, size, mask, flags,
465 vm_submap_object, 0,
466 FALSE,
467 cur_protection, max_protection, inheritance
468 )) != KERN_SUCCESS) {
469 vm_object_deallocate(vm_submap_object);
470 } else {
471 char alias;
472
473 VM_GET_FLAGS_ALIAS(flags, alias);
474 if ((alias == VM_MEMORY_SHARED_PMAP) &&
475 !copy) {
476 vm_map_submap(target_map, *address,
477 (*address) + size,
478 named_entry->backing.map,
479 (vm_offset_t)offset, TRUE);
480 } else {
481 vm_map_submap(target_map, *address,
482 (*address) + size,
483 named_entry->backing.map,
484 (vm_offset_t)offset, FALSE);
485 }
486 if(copy) {
487 if(vm_map_lookup_entry(
488 target_map, *address, &map_entry)) {
489 map_entry->needs_copy = TRUE;
490 }
491 }
492 }
493 return(result);
494
495 } else if(named_entry->object) {
496 /* This is the case where we are going to map */
497 /* an already mapped object. If the object is */
498 /* not ready it is internal. An external */
499 /* object cannot be mapped until it is ready */
500 /* we can therefore avoid the ready check */
501 /* in this case. */
502 named_entry_unlock(named_entry);
503 vm_object_reference(named_entry->object);
504 object = named_entry->object;
505 } else {
506 object = vm_object_enter(named_entry->backing.pager,
507 named_entry->size,
508 named_entry->internal,
509 FALSE,
510 FALSE);
511 if (object == VM_OBJECT_NULL) {
512 named_entry_unlock(named_entry);
513 return(KERN_INVALID_OBJECT);
514 }
515 named_entry->object = object;
516 named_entry_unlock(named_entry);
517 /* create an extra reference for the named entry */
518 vm_object_reference(named_entry->object);
519 /* wait for object (if any) to be ready */
520 if (object != VM_OBJECT_NULL) {
521 vm_object_lock(object);
522 while (!object->pager_ready) {
523 vm_object_wait(object,
524 VM_OBJECT_EVENT_PAGER_READY,
525 THREAD_UNINT);
526 vm_object_lock(object);
527 }
528 vm_object_unlock(object);
529 }
530 }
531 } else {
532 if ((object = vm_object_enter(port, size, FALSE, FALSE, FALSE))
533 == VM_OBJECT_NULL)
534 return(KERN_INVALID_OBJECT);
535
536 /* wait for object (if any) to be ready */
537 if (object != VM_OBJECT_NULL) {
538 vm_object_lock(object);
539 while (!object->pager_ready) {
540 vm_object_wait(object,
541 VM_OBJECT_EVENT_PAGER_READY,
542 THREAD_UNINT);
543 vm_object_lock(object);
544 }
545 vm_object_unlock(object);
546 }
547 }
548
549 *address = trunc_page(*address);
550 size = round_page(size);
551
552 /*
553 * Perform the copy if requested
554 */
555
556 if (copy) {
557 vm_object_t new_object;
558 vm_object_offset_t new_offset;
559
560 result = vm_object_copy_strategically(object, offset, size,
561 &new_object, &new_offset,
562 &copy);
563
564
565 if (result == KERN_MEMORY_RESTART_COPY) {
566 boolean_t success;
567 boolean_t src_needs_copy;
568
569 /*
570 * XXX
571 * We currently ignore src_needs_copy.
572 * This really is the issue of how to make
573 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
574 * non-kernel users to use. Solution forthcoming.
575 * In the meantime, since we don't allow non-kernel
576 * memory managers to specify symmetric copy,
577 * we won't run into problems here.
578 */
579 new_object = object;
580 new_offset = offset;
581 success = vm_object_copy_quickly(&new_object,
582 new_offset, size,
583 &src_needs_copy,
584 &copy);
585 assert(success);
586 result = KERN_SUCCESS;
587 }
588 /*
589 * Throw away the reference to the
590 * original object, as it won't be mapped.
591 */
592
593 vm_object_deallocate(object);
594
595 if (result != KERN_SUCCESS)
596 return (result);
597
598 object = new_object;
599 offset = new_offset;
600 }
601
602 if ((result = vm_map_enter(target_map,
603 address, size, mask, flags,
604 object, offset,
605 copy,
606 cur_protection, max_protection, inheritance
607 )) != KERN_SUCCESS)
608 vm_object_deallocate(object);
609 return(result);
610 }
611
612 /* temporary, until world build */
613 vm_map(
614 vm_map_t target_map,
615 vm_offset_t *address,
616 vm_size_t size,
617 vm_offset_t mask,
618 int flags,
619 ipc_port_t port,
620 vm_offset_t offset,
621 boolean_t copy,
622 vm_prot_t cur_protection,
623 vm_prot_t max_protection,
624 vm_inherit_t inheritance)
625 {
626 vm_map_64(target_map, address, size, mask, flags,
627 port, (vm_object_offset_t)offset, copy,
628 cur_protection, max_protection, inheritance);
629 }
630
631
632 /*
633 * NOTE: this routine (and this file) will no longer require mach_host_server.h
634 * when vm_wire is changed to use ledgers.
635 */
636 #include <mach/mach_host_server.h>
637 /*
638 * Specify that the range of the virtual address space
639 * of the target task must not cause page faults for
640 * the indicated accesses.
641 *
642 * [ To unwire the pages, specify VM_PROT_NONE. ]
643 */
644 kern_return_t
645 vm_wire(
646 host_priv_t host_priv,
647 register vm_map_t map,
648 vm_offset_t start,
649 vm_size_t size,
650 vm_prot_t access)
651 {
652 kern_return_t rc;
653
654 if (host_priv == HOST_PRIV_NULL)
655 return KERN_INVALID_HOST;
656
657 assert(host_priv == &realhost);
658
659 if (map == VM_MAP_NULL)
660 return KERN_INVALID_TASK;
661
662 if (access & ~VM_PROT_ALL)
663 return KERN_INVALID_ARGUMENT;
664
665 if (access != VM_PROT_NONE) {
666 rc = vm_map_wire(map, trunc_page(start),
667 round_page(start+size), access, TRUE);
668 } else {
669 rc = vm_map_unwire(map, trunc_page(start),
670 round_page(start+size), TRUE);
671 }
672 return rc;
673 }
674
675 /*
676 * vm_msync
677 *
678 * Synchronises the memory range specified with its backing store
679 * image by either flushing or cleaning the contents to the appropriate
680 * memory manager engaging in a memory object synchronize dialog with
681 * the manager. The client doesn't return until the manager issues
682 * m_o_s_completed message. MIG Magically converts user task parameter
683 * to the task's address map.
684 *
685 * interpretation of sync_flags
686 * VM_SYNC_INVALIDATE - discard pages, only return precious
687 * pages to manager.
688 *
689 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
690 * - discard pages, write dirty or precious
691 * pages back to memory manager.
692 *
693 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
694 * - write dirty or precious pages back to
695 * the memory manager.
696 *
697 * NOTE
698 * The memory object attributes have not yet been implemented, this
699 * function will have to deal with the invalidate attribute
700 *
701 * RETURNS
702 * KERN_INVALID_TASK Bad task parameter
703 * KERN_INVALID_ARGUMENT both sync and async were specified.
704 * KERN_SUCCESS The usual.
705 */
706
707 kern_return_t
708 vm_msync(
709 vm_map_t map,
710 vm_address_t address,
711 vm_size_t size,
712 vm_sync_t sync_flags)
713 {
714 msync_req_t msr;
715 msync_req_t new_msr;
716 queue_chain_t req_q; /* queue of requests for this msync */
717 vm_map_entry_t entry;
718 vm_size_t amount_left;
719 vm_object_offset_t offset;
720 boolean_t do_sync_req;
721 boolean_t modifiable;
722
723
724 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
725 (sync_flags & VM_SYNC_SYNCHRONOUS))
726 return(KERN_INVALID_ARGUMENT);
727
728 /*
729 * align address and size on page boundaries
730 */
731 size = round_page(address + size) - trunc_page(address);
732 address = trunc_page(address);
733
734 if (map == VM_MAP_NULL)
735 return(KERN_INVALID_TASK);
736
737 if (size == 0)
738 return(KERN_SUCCESS);
739
740 queue_init(&req_q);
741 amount_left = size;
742
743 while (amount_left > 0) {
744 vm_size_t flush_size;
745 vm_object_t object;
746
747 vm_map_lock(map);
748 if (!vm_map_lookup_entry(map, address, &entry)) {
749 vm_size_t skip;
750
751 /*
752 * hole in the address map.
753 */
754
755 /*
756 * Check for empty map.
757 */
758 if (entry == vm_map_to_entry(map) &&
759 entry->vme_next == entry) {
760 vm_map_unlock(map);
761 break;
762 }
763 /*
764 * Check that we don't wrap and that
765 * we have at least one real map entry.
766 */
767 if ((map->hdr.nentries == 0) ||
768 (entry->vme_next->vme_start < address)) {
769 vm_map_unlock(map);
770 break;
771 }
772 /*
773 * Move up to the next entry if needed
774 */
775 skip = (entry->vme_next->vme_start - address);
776 if (skip >= amount_left)
777 amount_left = 0;
778 else
779 amount_left -= skip;
780 address = entry->vme_next->vme_start;
781 vm_map_unlock(map);
782 continue;
783 }
784
785 offset = address - entry->vme_start;
786
787 /*
788 * do we have more to flush than is contained in this
789 * entry ?
790 */
791 if (amount_left + entry->vme_start + offset > entry->vme_end) {
792 flush_size = entry->vme_end -
793 (entry->vme_start + offset);
794 } else {
795 flush_size = amount_left;
796 }
797 amount_left -= flush_size;
798 address += flush_size;
799
800 if (entry->is_sub_map == TRUE) {
801 vm_map_t local_map;
802 vm_offset_t local_offset;
803
804 local_map = entry->object.sub_map;
805 local_offset = entry->offset;
806 vm_map_unlock(map);
807 vm_msync(
808 local_map,
809 local_offset,
810 flush_size,
811 sync_flags);
812 continue;
813 }
814 object = entry->object.vm_object;
815
816 /*
817 * We can't sync this object if the object has not been
818 * created yet
819 */
820 if (object == VM_OBJECT_NULL) {
821 vm_map_unlock(map);
822 continue;
823 }
824 offset += entry->offset;
825 modifiable = (entry->protection & VM_PROT_WRITE)
826 != VM_PROT_NONE;
827
828 vm_object_lock(object);
829
830 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
831 boolean_t kill_pages = 0;
832
833 if (sync_flags & VM_SYNC_KILLPAGES) {
834 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
835 kill_pages = 1;
836 else
837 kill_pages = -1;
838 }
839 if (kill_pages != -1)
840 memory_object_deactivate_pages(object, offset,
841 (vm_object_size_t)flush_size, kill_pages);
842 vm_object_unlock(object);
843 vm_map_unlock(map);
844 continue;
845 }
846 /*
847 * We can't sync this object if there isn't a pager.
848 * Don't bother to sync internal objects, since there can't
849 * be any "permanent" storage for these objects anyway.
850 */
851 if ((object->pager == IP_NULL) || (object->internal) ||
852 (object->private)) {
853 vm_object_unlock(object);
854 vm_map_unlock(map);
855 continue;
856 }
857 /*
858 * keep reference on the object until syncing is done
859 */
860 assert(object->ref_count > 0);
861 object->ref_count++;
862 vm_object_res_reference(object);
863 vm_object_unlock(object);
864
865 vm_map_unlock(map);
866
867 do_sync_req = memory_object_sync(object,
868 offset,
869 flush_size,
870 sync_flags & VM_SYNC_INVALIDATE,
871 (modifiable &&
872 (sync_flags & VM_SYNC_SYNCHRONOUS ||
873 sync_flags & VM_SYNC_ASYNCHRONOUS)));
874
875 /*
876 * only send a m_o_s if we returned pages or if the entry
877 * is writable (ie dirty pages may have already been sent back)
878 */
879 if (!do_sync_req && !modifiable) {
880 vm_object_deallocate(object);
881 continue;
882 }
883 msync_req_alloc(new_msr);
884
885 vm_object_lock(object);
886 offset += object->paging_offset;
887
888 new_msr->offset = offset;
889 new_msr->length = flush_size;
890 new_msr->object = object;
891 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
892 re_iterate:
893 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
894 /*
895 * need to check for overlapping entry, if found, wait
896 * on overlapping msr to be done, then reiterate
897 */
898 msr_lock(msr);
899 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
900 ((offset >= msr->offset &&
901 offset < (msr->offset + msr->length)) ||
902 (msr->offset >= offset &&
903 msr->offset < (offset + flush_size))))
904 {
905 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
906 msr_unlock(msr);
907 vm_object_unlock(object);
908 thread_block((void (*)(void))0);
909 vm_object_lock(object);
910 goto re_iterate;
911 }
912 msr_unlock(msr);
913 }/* queue_iterate */
914
915 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
916 vm_object_unlock(object);
917
918 queue_enter(&req_q, new_msr, msync_req_t, req_q);
919
920 #ifdef MACH_BSD
921 if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) ==
922 ((rpc_subsystem_t) &vnode_pager_workaround)) {
923 (void) vnode_pager_synchronize(
924 object->pager,
925 object->pager_request,
926 offset,
927 flush_size,
928 sync_flags);
929 } else {
930 (void) memory_object_synchronize(
931 object->pager,
932 object->pager_request,
933 offset,
934 flush_size,
935 sync_flags);
936 }
937 #else
938 (void) memory_object_synchronize(
939 object->pager,
940 object->pager_request,
941 offset,
942 flush_size,
943 sync_flags);
944 #endif
945 }/* while */
946
947 /*
948 * wait for memory_object_sychronize_completed messages from pager(s)
949 */
950
951 while (!queue_empty(&req_q)) {
952 msr = (msync_req_t)queue_first(&req_q);
953 msr_lock(msr);
954 while(msr->flag != VM_MSYNC_DONE) {
955 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
956 msr_unlock(msr);
957 thread_block((void (*)(void))0);
958 msr_lock(msr);
959 }/* while */
960 queue_remove(&req_q, msr, msync_req_t, req_q);
961 msr_unlock(msr);
962 vm_object_deallocate(msr->object);
963 msync_req_free(msr);
964 }/* queue_iterate */
965
966 return(KERN_SUCCESS);
967 }/* vm_msync */
968
969
970 /*
971 * task_wire
972 *
973 * Set or clear the map's wiring_required flag. This flag, if set,
974 * will cause all future virtual memory allocation to allocate
975 * user wired memory. Unwiring pages wired down as a result of
976 * this routine is done with the vm_wire interface.
977 */
978 kern_return_t
979 task_wire(
980 vm_map_t map,
981 boolean_t must_wire)
982 {
983 if (map == VM_MAP_NULL)
984 return(KERN_INVALID_ARGUMENT);
985
986 if (must_wire)
987 map->wiring_required = TRUE;
988 else
989 map->wiring_required = FALSE;
990
991 return(KERN_SUCCESS);
992 }
993
994 /*
995 * vm_behavior_set sets the paging behavior attribute for the
996 * specified range in the specified map. This routine will fail
997 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
998 * is not a valid allocated or reserved memory region.
999 */
1000 kern_return_t
1001 vm_behavior_set(
1002 vm_map_t map,
1003 vm_offset_t start,
1004 vm_size_t size,
1005 vm_behavior_t new_behavior)
1006 {
1007 if (map == VM_MAP_NULL)
1008 return(KERN_INVALID_ARGUMENT);
1009
1010 return(vm_map_behavior_set(map, trunc_page(start),
1011 round_page(start+size), new_behavior));
1012 }
1013
1014 #if VM_CPM
1015 /*
1016 * Control whether the kernel will permit use of
1017 * vm_allocate_cpm at all.
1018 */
1019 unsigned int vm_allocate_cpm_enabled = 1;
1020
1021 /*
1022 * Ordinarily, the right to allocate CPM is restricted
1023 * to privileged applications (those that can gain access
1024 * to the host port). Set this variable to zero if you
1025 * want to let any application allocate CPM.
1026 */
1027 unsigned int vm_allocate_cpm_privileged = 0;
1028
1029 /*
1030 * Allocate memory in the specified map, with the caveat that
1031 * the memory is physically contiguous. This call may fail
1032 * if the system can't find sufficient contiguous memory.
1033 * This call may cause or lead to heart-stopping amounts of
1034 * paging activity.
1035 *
1036 * Memory obtained from this call should be freed in the
1037 * normal way, viz., via vm_deallocate.
1038 */
1039 kern_return_t
1040 vm_allocate_cpm(
1041 host_priv_t host_priv,
1042 register vm_map_t map,
1043 register vm_offset_t *addr,
1044 register vm_size_t size,
1045 int flags)
1046 {
1047 vm_object_t cpm_obj;
1048 pmap_t pmap;
1049 vm_page_t m, pages;
1050 kern_return_t kr;
1051 vm_offset_t va, start, end, offset;
1052 #if MACH_ASSERT
1053 extern vm_offset_t avail_start, avail_end;
1054 vm_offset_t prev_addr;
1055 #endif /* MACH_ASSERT */
1056
1057 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1058
1059 if (!vm_allocate_cpm_enabled)
1060 return KERN_FAILURE;
1061
1062 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1063 return KERN_INVALID_HOST;
1064
1065 if (map == VM_MAP_NULL)
1066 return KERN_INVALID_ARGUMENT;
1067
1068 assert(host_priv == &realhost);
1069
1070 if (size == 0) {
1071 *addr = 0;
1072 return KERN_SUCCESS;
1073 }
1074
1075 if (anywhere)
1076 *addr = vm_map_min(map);
1077 else
1078 *addr = trunc_page(*addr);
1079 size = round_page(size);
1080
1081 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1082 return kr;
1083
1084 cpm_obj = vm_object_allocate(size);
1085 assert(cpm_obj != VM_OBJECT_NULL);
1086 assert(cpm_obj->internal);
1087 assert(cpm_obj->size == size);
1088 assert(cpm_obj->can_persist == FALSE);
1089 assert(cpm_obj->pager_created == FALSE);
1090 assert(cpm_obj->pageout == FALSE);
1091 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1092
1093 /*
1094 * Insert pages into object.
1095 */
1096
1097 vm_object_lock(cpm_obj);
1098 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1099 m = pages;
1100 pages = NEXT_PAGE(m);
1101
1102 assert(!m->gobbled);
1103 assert(!m->wanted);
1104 assert(!m->pageout);
1105 assert(!m->tabled);
1106 assert(m->busy);
1107 assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end);
1108
1109 m->busy = FALSE;
1110 vm_page_insert(m, cpm_obj, offset);
1111 }
1112 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1113 vm_object_unlock(cpm_obj);
1114
1115 /*
1116 * Hang onto a reference on the object in case a
1117 * multi-threaded application for some reason decides
1118 * to deallocate the portion of the address space into
1119 * which we will insert this object.
1120 *
1121 * Unfortunately, we must insert the object now before
1122 * we can talk to the pmap module about which addresses
1123 * must be wired down. Hence, the race with a multi-
1124 * threaded app.
1125 */
1126 vm_object_reference(cpm_obj);
1127
1128 /*
1129 * Insert object into map.
1130 */
1131
1132 kr = vm_map_enter(
1133 map,
1134 addr,
1135 size,
1136 (vm_offset_t)0,
1137 flags,
1138 cpm_obj,
1139 (vm_object_offset_t)0,
1140 FALSE,
1141 VM_PROT_ALL,
1142 VM_PROT_ALL,
1143 VM_INHERIT_DEFAULT);
1144
1145 if (kr != KERN_SUCCESS) {
1146 /*
1147 * A CPM object doesn't have can_persist set,
1148 * so all we have to do is deallocate it to
1149 * free up these pages.
1150 */
1151 assert(cpm_obj->pager_created == FALSE);
1152 assert(cpm_obj->can_persist == FALSE);
1153 assert(cpm_obj->pageout == FALSE);
1154 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1155 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1156 vm_object_deallocate(cpm_obj); /* kill creation ref */
1157 }
1158
1159 /*
1160 * Inform the physical mapping system that the
1161 * range of addresses may not fault, so that
1162 * page tables and such can be locked down as well.
1163 */
1164 start = *addr;
1165 end = start + size;
1166 pmap = vm_map_pmap(map);
1167 pmap_pageable(pmap, start, end, FALSE);
1168
1169 /*
1170 * Enter each page into the pmap, to avoid faults.
1171 * Note that this loop could be coded more efficiently,
1172 * if the need arose, rather than looking up each page
1173 * again.
1174 */
1175 for (offset = 0, va = start; offset < size;
1176 va += PAGE_SIZE, offset += PAGE_SIZE) {
1177 vm_object_lock(cpm_obj);
1178 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1179 vm_object_unlock(cpm_obj);
1180 assert(m != VM_PAGE_NULL);
1181 PMAP_ENTER(pmap, va, m, VM_PROT_ALL, TRUE);
1182 }
1183
1184 #if MACH_ASSERT
1185 /*
1186 * Verify ordering in address space.
1187 */
1188 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1189 vm_object_lock(cpm_obj);
1190 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1191 vm_object_unlock(cpm_obj);
1192 if (m == VM_PAGE_NULL)
1193 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1194 cpm_obj, offset);
1195 assert(m->tabled);
1196 assert(!m->busy);
1197 assert(!m->wanted);
1198 assert(!m->fictitious);
1199 assert(!m->private);
1200 assert(!m->absent);
1201 assert(!m->error);
1202 assert(!m->cleaning);
1203 assert(!m->precious);
1204 assert(!m->clustered);
1205 if (offset != 0) {
1206 if (m->phys_addr != prev_addr + PAGE_SIZE) {
1207 printf("start 0x%x end 0x%x va 0x%x\n",
1208 start, end, va);
1209 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1210 printf("m 0x%x prev_address 0x%x\n", m,
1211 prev_addr);
1212 panic("vm_allocate_cpm: pages not contig!");
1213 }
1214 }
1215 prev_addr = m->phys_addr;
1216 }
1217 #endif /* MACH_ASSERT */
1218
1219 vm_object_deallocate(cpm_obj); /* kill extra ref */
1220
1221 return kr;
1222 }
1223
1224
1225 #else /* VM_CPM */
1226
1227 /*
1228 * Interface is defined in all cases, but unless the kernel
1229 * is built explicitly for this option, the interface does
1230 * nothing.
1231 */
1232
1233 kern_return_t
1234 vm_allocate_cpm(
1235 host_priv_t host_priv,
1236 register vm_map_t map,
1237 register vm_offset_t *addr,
1238 register vm_size_t size,
1239 int flags)
1240 {
1241 return KERN_FAILURE;
1242 }
1243
1244 /*
1245 */
1246 kern_return_t
1247 mach_memory_object_memory_entry_64(
1248 host_t host,
1249 boolean_t internal,
1250 vm_object_offset_t size,
1251 vm_prot_t permission,
1252 ipc_port_t pager,
1253 ipc_port_t *entry_handle)
1254 {
1255 vm_named_entry_t user_object;
1256 ipc_port_t user_handle;
1257 ipc_port_t previous;
1258 kern_return_t kr;
1259
1260 if (host == HOST_NULL)
1261 return(KERN_INVALID_HOST);
1262
1263 user_object = (vm_named_entry_t)
1264 kalloc(sizeof (struct vm_named_entry));
1265 if(user_object == NULL)
1266 return KERN_FAILURE;
1267 named_entry_lock_init(user_object);
1268 user_handle = ipc_port_alloc_kernel();
1269 ip_lock(user_handle);
1270
1271 /* make a sonce right */
1272 user_handle->ip_sorights++;
1273 ip_reference(user_handle);
1274
1275 user_handle->ip_destination = IP_NULL;
1276 user_handle->ip_receiver_name = MACH_PORT_NULL;
1277 user_handle->ip_receiver = ipc_space_kernel;
1278
1279 /* make a send right */
1280 user_handle->ip_mscount++;
1281 user_handle->ip_srights++;
1282 ip_reference(user_handle);
1283
1284 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1285 /* nsrequest unlocks user_handle */
1286
1287 user_object->object = NULL;
1288 user_object->size = size;
1289 user_object->offset = 0;
1290 user_object->backing.pager = pager;
1291 user_object->protection = permission;
1292 user_object->internal = internal;
1293 user_object->is_sub_map = FALSE;
1294 user_object->ref_count = 1;
1295
1296 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1297 IKOT_NAMED_ENTRY);
1298 *entry_handle = user_handle;
1299 return KERN_SUCCESS;
1300 }
1301
1302 kern_return_t
1303 mach_memory_object_memory_entry(
1304 host_t host,
1305 boolean_t internal,
1306 vm_size_t size,
1307 vm_prot_t permission,
1308 ipc_port_t pager,
1309 ipc_port_t *entry_handle)
1310 {
1311 return mach_memory_object_memory_entry_64( host, internal,
1312 (vm_object_offset_t)size, permission, pager, entry_handle);
1313 }
1314
1315
1316
1317 /*
1318 */
1319
1320 kern_return_t
1321 mach_make_memory_entry_64(
1322 vm_map_t target_map,
1323 vm_object_size_t *size,
1324 vm_object_offset_t offset,
1325 vm_prot_t permission,
1326 ipc_port_t *object_handle,
1327 ipc_port_t parent_entry)
1328 {
1329 vm_map_version_t version;
1330 vm_named_entry_t user_object;
1331 ipc_port_t user_handle;
1332 ipc_port_t previous;
1333 kern_return_t kr;
1334 vm_map_t pmap_map;
1335
1336 /* needed for call to vm_map_lookup_locked */
1337 boolean_t wired;
1338 vm_object_offset_t obj_off;
1339 vm_prot_t prot;
1340 vm_object_offset_t lo_offset, hi_offset;
1341 vm_behavior_t behavior;
1342 vm_object_t object;
1343
1344 /* needed for direct map entry manipulation */
1345 vm_map_entry_t map_entry;
1346 vm_map_t local_map;
1347 vm_object_size_t mappable_size;
1348
1349
1350 user_object = (vm_named_entry_t)
1351 kalloc(sizeof (struct vm_named_entry));
1352 if(user_object == NULL)
1353 return KERN_FAILURE;
1354 named_entry_lock_init(user_object);
1355 user_handle = ipc_port_alloc_kernel();
1356 ip_lock(user_handle);
1357
1358 /* make a sonce right */
1359 user_handle->ip_sorights++;
1360 ip_reference(user_handle);
1361
1362 user_handle->ip_destination = IP_NULL;
1363 user_handle->ip_receiver_name = MACH_PORT_NULL;
1364 user_handle->ip_receiver = ipc_space_kernel;
1365
1366 /* make a send right */
1367 user_handle->ip_mscount++;
1368 user_handle->ip_srights++;
1369 ip_reference(user_handle);
1370
1371 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1372 /* nsrequest unlocks user_handle */
1373
1374 user_object->backing.pager = NULL;
1375 user_object->ref_count = 1;
1376
1377 if(parent_entry == NULL) {
1378 /* Create a named object based on address range within the task map */
1379 /* Go find the object at given address */
1380
1381 permission &= VM_PROT_ALL;
1382 vm_map_lock_read(target_map);
1383
1384 /* get the object associated with the target address */
1385 /* note we check the permission of the range against */
1386 /* that requested by the caller */
1387
1388 kr = vm_map_lookup_locked(&target_map, offset,
1389 permission, &version,
1390 &object, &obj_off, &prot, &wired, &behavior,
1391 &lo_offset, &hi_offset, &pmap_map);
1392 if (kr != KERN_SUCCESS) {
1393 vm_map_unlock_read(target_map);
1394 goto make_mem_done;
1395 }
1396 if ((prot & permission) != permission) {
1397 kr = KERN_INVALID_RIGHT;
1398 vm_object_unlock(object);
1399 vm_map_unlock_read(target_map);
1400 if(pmap_map != target_map)
1401 vm_map_unlock_read(pmap_map);
1402 goto make_mem_done;
1403 }
1404
1405 /* We have an object, now check to see if this object */
1406 /* is suitable. If not, create a shadow and share that */
1407
1408 local_map = target_map;
1409 redo_lookup:
1410 while(TRUE) {
1411 if(!vm_map_lookup_entry(local_map, offset, &map_entry)) {
1412 kr = KERN_INVALID_ARGUMENT;
1413 vm_object_unlock(object);
1414 vm_map_unlock_read(target_map);
1415 if(pmap_map != target_map)
1416 vm_map_unlock_read(pmap_map);
1417 goto make_mem_done;
1418 }
1419 if(!(map_entry->is_sub_map)) {
1420 if(map_entry->object.vm_object != object) {
1421 kr = KERN_INVALID_ARGUMENT;
1422 vm_object_unlock(object);
1423 vm_map_unlock_read(target_map);
1424 if(pmap_map != target_map)
1425 vm_map_unlock_read(pmap_map);
1426 goto make_mem_done;
1427 }
1428 break;
1429 } else {
1430 local_map = map_entry->object.sub_map;
1431 vm_map_lock_read(local_map);
1432 vm_map_unlock_read(target_map);
1433 if(pmap_map != target_map)
1434 vm_map_unlock_read(pmap_map);
1435 target_map = local_map;
1436 }
1437 }
1438 if(((map_entry->max_protection) & permission) != permission) {
1439 kr = KERN_INVALID_RIGHT;
1440 vm_object_unlock(object);
1441 vm_map_unlock_read(target_map);
1442 if(pmap_map != target_map)
1443 vm_map_unlock_read(pmap_map);
1444 goto make_mem_done;
1445 }
1446 if(object->internal) {
1447 /* vm_map_lookup_locked will create a shadow if */
1448 /* needs_copy is set but does not check for the */
1449 /* other two conditions shown. It is important to */
1450 /* set up an object which will not be pulled from */
1451 /* under us. */
1452
1453 if (map_entry->needs_copy || object->shadowed ||
1454 (object->size >
1455 ((vm_object_size_t)map_entry->vme_end -
1456 map_entry->vme_start))) {
1457 if (vm_map_lock_read_to_write(target_map)) {
1458 vm_map_lock_read(target_map);
1459 goto redo_lookup;
1460 }
1461
1462
1463 /* create a shadow object */
1464
1465 vm_object_shadow(&map_entry->object.vm_object,
1466 &map_entry->offset,
1467 (map_entry->vme_end
1468 - map_entry->vme_start));
1469 map_entry->needs_copy = FALSE;
1470 vm_object_unlock(object);
1471 object = map_entry->object.vm_object;
1472 vm_object_lock(object);
1473 object->size = map_entry->vme_end
1474 - map_entry->vme_start;
1475 obj_off = (offset - map_entry->vme_start) +
1476 map_entry->offset;
1477 lo_offset = map_entry->offset;
1478 hi_offset = (map_entry->vme_end -
1479 map_entry->vme_start) +
1480 map_entry->offset;
1481
1482 vm_map_lock_write_to_read(target_map);
1483
1484 }
1485 }
1486
1487 /* note: in the future we can (if necessary) allow for */
1488 /* memory object lists, this will better support */
1489 /* fragmentation, but is it necessary? The user should */
1490 /* be encouraged to create address space oriented */
1491 /* shared objects from CLEAN memory regions which have */
1492 /* a known and defined history. i.e. no inheritence */
1493 /* share, make this call before making the region the */
1494 /* target of ipc's, etc. The code above, protecting */
1495 /* against delayed copy, etc. is mostly defensive. */
1496
1497
1498
1499 object->true_share = TRUE;
1500 user_object->object = object;
1501 user_object->internal = object->internal;
1502 user_object->is_sub_map = FALSE;
1503 user_object->offset = obj_off;
1504 user_object->protection = permission;
1505
1506 /* the size of mapped entry that overlaps with our region */
1507 /* which is targeted for share. */
1508 /* (entry_end - entry_start) - */
1509 /* offset of our beg addr within entry */
1510 /* it corresponds to this: */
1511
1512 mappable_size = hi_offset - obj_off;
1513 if(*size > mappable_size)
1514 *size = mappable_size;
1515
1516 user_object->size = *size;
1517
1518 /* user_object pager and internal fields are not used */
1519 /* when the object field is filled in. */
1520
1521 object->ref_count++; /* we now point to this object, hold on */
1522 vm_object_res_reference(object);
1523 vm_object_unlock(object);
1524 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1525 IKOT_NAMED_ENTRY);
1526 *size = user_object->size;
1527 *object_handle = user_handle;
1528 vm_map_unlock_read(target_map);
1529 if(pmap_map != target_map)
1530 vm_map_unlock_read(pmap_map);
1531 return KERN_SUCCESS;
1532 } else {
1533
1534 vm_named_entry_t parent_object;
1535
1536 /* The new object will be base on an existing named object */
1537 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1538 kr = KERN_INVALID_ARGUMENT;
1539 goto make_mem_done;
1540 }
1541 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1542 if(permission & parent_object->protection != permission) {
1543 kr = KERN_INVALID_ARGUMENT;
1544 goto make_mem_done;
1545 }
1546 if((offset + *size) > parent_object->size) {
1547 kr = KERN_INVALID_ARGUMENT;
1548 goto make_mem_done;
1549 }
1550
1551 user_object->object = parent_object->object;
1552 user_object->size = *size;
1553 user_object->offset = parent_object->offset + offset;
1554 user_object->protection = permission;
1555 if(parent_object->is_sub_map) {
1556 user_object->backing.map = parent_object->backing.map;
1557 vm_map_lock(user_object->backing.map);
1558 user_object->backing.map->ref_count++;
1559 vm_map_unlock(user_object->backing.map);
1560 }
1561 else {
1562 user_object->backing.pager = parent_object->backing.pager;
1563 }
1564 user_object->internal = parent_object->internal;
1565 user_object->is_sub_map = parent_object->is_sub_map;
1566
1567 if(parent_object->object != NULL) {
1568 /* we now point to this object, hold on */
1569 vm_object_reference(parent_object->object);
1570 vm_object_lock(parent_object->object);
1571 parent_object->object->true_share = TRUE;
1572 vm_object_unlock(parent_object->object);
1573 }
1574 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1575 IKOT_NAMED_ENTRY);
1576 *object_handle = user_handle;
1577 return KERN_SUCCESS;
1578 }
1579
1580
1581
1582 make_mem_done:
1583 ipc_port_dealloc_kernel(user_handle);
1584 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1585 return kr;
1586 }
1587
1588 kern_return_t
1589 mach_make_memory_entry(
1590 vm_map_t target_map,
1591 vm_size_t *size,
1592 vm_offset_t offset,
1593 vm_prot_t permission,
1594 ipc_port_t *object_handle,
1595 ipc_port_t parent_entry)
1596 {
1597 vm_object_offset_t size_64;
1598 kern_return_t kr;
1599
1600 size_64 = (vm_object_offset_t)*size;
1601 kr = mach_make_memory_entry_64(target_map, &size_64,
1602 (vm_object_offset_t)offset, permission, object_handle,
1603 parent_entry);
1604 *size = (vm_size_t)size_64;
1605 return kr;
1606 }
1607
1608 /*
1609 */
1610
1611 kern_return_t
1612 vm_region_object_create(
1613 vm_map_t target_map,
1614 vm_size_t size,
1615 ipc_port_t *object_handle)
1616 {
1617 vm_named_entry_t user_object;
1618 ipc_port_t user_handle;
1619 kern_return_t kr;
1620
1621 pmap_t new_pmap = pmap_create((vm_size_t) 0);
1622 ipc_port_t previous;
1623 vm_map_t new_map;
1624
1625 if(new_pmap == PMAP_NULL)
1626 return KERN_FAILURE;
1627 user_object = (vm_named_entry_t)
1628 kalloc(sizeof (struct vm_named_entry));
1629 if(user_object == NULL) {
1630 pmap_destroy(new_pmap);
1631 return KERN_FAILURE;
1632 }
1633 named_entry_lock_init(user_object);
1634 user_handle = ipc_port_alloc_kernel();
1635
1636
1637 ip_lock(user_handle);
1638
1639 /* make a sonce right */
1640 user_handle->ip_sorights++;
1641 ip_reference(user_handle);
1642
1643 user_handle->ip_destination = IP_NULL;
1644 user_handle->ip_receiver_name = MACH_PORT_NULL;
1645 user_handle->ip_receiver = ipc_space_kernel;
1646
1647 /* make a send right */
1648 user_handle->ip_mscount++;
1649 user_handle->ip_srights++;
1650 ip_reference(user_handle);
1651
1652 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1653 /* nsrequest unlocks user_handle */
1654
1655 /* Create a named object based on a submap of specified size */
1656
1657 new_map = vm_map_create(new_pmap, 0, size, TRUE);
1658 user_object->backing.map = new_map;
1659
1660
1661 user_object->object = VM_OBJECT_NULL;
1662 user_object->internal = TRUE;
1663 user_object->is_sub_map = TRUE;
1664 user_object->offset = 0;
1665 user_object->protection = VM_PROT_ALL;
1666 user_object->size = size;
1667 user_object->ref_count = 1;
1668
1669 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1670 IKOT_NAMED_ENTRY);
1671 *object_handle = user_handle;
1672 return KERN_SUCCESS;
1673
1674 }
1675
1676 /* For a given range, check all map entries. If the entry coresponds to */
1677 /* the old vm_region/map provided on the call, replace it with the */
1678 /* corresponding range in the new vm_region/map */
1679 kern_return_t vm_map_region_replace(
1680 vm_map_t target_map,
1681 ipc_port_t old_region,
1682 ipc_port_t new_region,
1683 vm_offset_t start,
1684 vm_offset_t end)
1685 {
1686 vm_named_entry_t old_object;
1687 vm_named_entry_t new_object;
1688 vm_map_t old_submap;
1689 vm_map_t new_submap;
1690 vm_offset_t addr;
1691 vm_map_entry_t entry;
1692 int nested_pmap = 0;
1693
1694
1695 vm_map_lock(target_map);
1696 old_object = (vm_named_entry_t)old_region->ip_kobject;
1697 new_object = (vm_named_entry_t)new_region->ip_kobject;
1698 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1699 vm_map_unlock(target_map);
1700 return KERN_INVALID_ARGUMENT;
1701 }
1702 old_submap = (vm_map_t)old_object->backing.map;
1703 new_submap = (vm_map_t)new_object->backing.map;
1704 vm_map_lock(old_submap);
1705 if((old_submap->min_offset != new_submap->min_offset) ||
1706 (old_submap->max_offset != new_submap->max_offset)) {
1707 vm_map_unlock(old_submap);
1708 vm_map_unlock(target_map);
1709 return KERN_INVALID_ARGUMENT;
1710 }
1711 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1712 /* if the src is not contained, the entry preceeds */
1713 /* our range */
1714 addr = entry->vme_start;
1715 if(entry == vm_map_to_entry(target_map)) {
1716 vm_map_unlock(old_submap);
1717 vm_map_unlock(target_map);
1718 return KERN_SUCCESS;
1719 }
1720 vm_map_lookup_entry(target_map, addr, &entry);
1721 }
1722 addr = entry->vme_start;
1723 vm_map_reference(old_submap);
1724 while((entry != vm_map_to_entry(target_map)) &&
1725 (entry->vme_start < end)) {
1726 if((entry->is_sub_map) &&
1727 (entry->object.sub_map == old_submap)) {
1728 entry->object.sub_map = new_submap;
1729 if(entry->use_pmap) {
1730 if((start & 0xfffffff) ||
1731 ((end - start) != 0x10000000)) {
1732 vm_map_unlock(old_submap);
1733 vm_map_unlock(target_map);
1734 return KERN_INVALID_ARGUMENT;
1735 }
1736 nested_pmap = 1;
1737 }
1738 vm_map_reference(new_submap);
1739 vm_map_deallocate(old_submap);
1740 }
1741 entry = entry->vme_next;
1742 addr = entry->vme_start;
1743 }
1744 if(nested_pmap) {
1745 #ifndef i386
1746 pmap_unnest(target_map->pmap, start, end - start);
1747 pmap_nest(target_map->pmap, new_submap->pmap,
1748 start, end - start);
1749 #endif i386
1750 } else {
1751 pmap_remove(target_map->pmap, start, end);
1752 }
1753 vm_map_unlock(old_submap);
1754 vm_map_unlock(target_map);
1755 return KERN_SUCCESS;
1756 }
1757
1758
1759 void
1760 mach_destroy_memory_entry(
1761 ipc_port_t port)
1762 {
1763 vm_named_entry_t named_entry;
1764 #if MACH_ASSERT
1765 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1766 #endif /* MACH_ASSERT */
1767 named_entry = (vm_named_entry_t)port->ip_kobject;
1768 mutex_lock(&(named_entry)->Lock);
1769 named_entry->ref_count-=1;
1770 if(named_entry->ref_count == 0) {
1771 if(named_entry->object) {
1772 /* release the memory object we've been pointing to */
1773 vm_object_deallocate(named_entry->object);
1774 }
1775 if(named_entry->is_sub_map) {
1776 vm_map_deallocate(named_entry->backing.map);
1777 }
1778 kfree((vm_offset_t)port->ip_kobject,
1779 sizeof (struct vm_named_entry));
1780 } else
1781 mutex_unlock(&(named_entry)->Lock);
1782 }
1783
1784
1785 kern_return_t
1786 vm_map_page_query(
1787 vm_map_t target_map,
1788 vm_offset_t offset,
1789 int *disposition,
1790 int *ref_count)
1791 {
1792 vm_map_entry_t map_entry;
1793 vm_object_t object;
1794 vm_page_t m;
1795
1796 restart_page_query:
1797 *disposition = 0;
1798 *ref_count = 0;
1799 vm_map_lock(target_map);
1800 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
1801 vm_map_unlock(target_map);
1802 return KERN_FAILURE;
1803 }
1804 offset -= map_entry->vme_start; /* adjust to offset within entry */
1805 offset += map_entry->offset; /* adjust to target object offset */
1806 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
1807 if(!map_entry->is_sub_map) {
1808 object = map_entry->object.vm_object;
1809 } else {
1810 vm_map_unlock(target_map);
1811 target_map = map_entry->object.sub_map;
1812 goto restart_page_query;
1813 }
1814 } else {
1815 vm_map_unlock(target_map);
1816 return KERN_FAILURE;
1817 }
1818 vm_object_lock(object);
1819 vm_map_unlock(target_map);
1820 while(TRUE) {
1821 m = vm_page_lookup(object, offset);
1822 if (m != VM_PAGE_NULL) {
1823 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
1824 break;
1825 } else {
1826 if(object->shadow) {
1827 offset += object->shadow_offset;
1828 vm_object_unlock(object);
1829 object = object->shadow;
1830 vm_object_lock(object);
1831 continue;
1832 }
1833 vm_object_unlock(object);
1834 return KERN_FAILURE;
1835 }
1836 }
1837
1838 /* The ref_count is not strictly accurate, it measures the number */
1839 /* of entities holding a ref on the object, they may not be mapping */
1840 /* the object or may not be mapping the section holding the */
1841 /* target page but its still a ball park number and though an over- */
1842 /* count, it picks up the copy-on-write cases */
1843
1844 /* We could also get a picture of page sharing from pmap_attributes */
1845 /* but this would under count as only faulted-in mappings would */
1846 /* show up. */
1847
1848 *ref_count = object->ref_count;
1849
1850 if (m->fictitious) {
1851 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
1852 vm_object_unlock(object);
1853 return KERN_SUCCESS;
1854 }
1855
1856 if (m->dirty)
1857 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1858 else if(pmap_is_modified(m->phys_addr))
1859 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1860
1861 if (m->reference)
1862 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1863 else if(pmap_is_referenced(m->phys_addr))
1864 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1865
1866 vm_object_unlock(object);
1867 return KERN_SUCCESS;
1868
1869 }
1870
1871 kern_return_t
1872 set_dp_control_port(
1873 host_priv_t host_priv,
1874 ipc_port_t control_port)
1875 {
1876 if (host_priv == HOST_PRIV_NULL)
1877 return (KERN_INVALID_HOST);
1878 dynamic_pager_control_port = control_port;
1879 return KERN_SUCCESS;
1880 }
1881
1882 kern_return_t
1883 get_dp_control_port(
1884 host_priv_t host_priv,
1885 ipc_port_t *control_port)
1886 {
1887 if (host_priv == HOST_PRIV_NULL)
1888 return (KERN_INVALID_HOST);
1889 *control_port = dynamic_pager_control_port;
1890 return KERN_SUCCESS;
1891
1892 }
1893
1894 void
1895 mach_destroy_upl(
1896 ipc_port_t port)
1897 {
1898 upl_t upl;
1899 #if MACH_ASSERT
1900 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1901 #endif /* MACH_ASSERT */
1902 upl = (upl_t)port->ip_kobject;
1903 mutex_lock(&(upl)->Lock);
1904 upl->ref_count-=1;
1905 if(upl->ref_count == 0) {
1906 mutex_unlock(&(upl)->Lock);
1907 uc_upl_abort(upl, UPL_ABORT_ERROR);
1908 } else
1909 mutex_unlock(&(upl)->Lock);
1910 }
1911
1912 /* Retrieve a upl for an object underlying an address range in a map */
1913
1914 kern_return_t
1915 vm_map_get_upl(
1916 vm_map_t map,
1917 vm_offset_t offset,
1918 vm_size_t *upl_size,
1919 upl_t *upl,
1920 upl_page_info_t **page_list,
1921 int *count,
1922 int *flags,
1923 int force_data_sync)
1924 {
1925 vm_map_entry_t entry;
1926 int caller_flags;
1927 int sync_cow_data = FALSE;
1928 vm_object_t local_object;
1929 vm_offset_t local_offset;
1930 vm_offset_t local_start;
1931 kern_return_t ret;
1932
1933 caller_flags = *flags;
1934 if (!(caller_flags & UPL_COPYOUT_FROM)) {
1935 sync_cow_data = TRUE;
1936 }
1937 if(upl == NULL)
1938 return KERN_INVALID_ARGUMENT;
1939 REDISCOVER_ENTRY:
1940 vm_map_lock(map);
1941 if (vm_map_lookup_entry(map, offset, &entry)) {
1942 if((entry->vme_end - offset) < *upl_size) {
1943 *upl_size = entry->vme_end - offset;
1944 }
1945 /*
1946 * Create an object if necessary.
1947 */
1948 if (entry->object.vm_object == VM_OBJECT_NULL) {
1949 entry->object.vm_object = vm_object_allocate(
1950 (vm_size_t)(entry->vme_end - entry->vme_start));
1951 entry->offset = 0;
1952 }
1953 if (!(caller_flags & UPL_COPYOUT_FROM)) {
1954 if (entry->needs_copy
1955 || entry->object.vm_object->copy) {
1956 vm_map_t local_map;
1957 vm_object_t object;
1958 vm_object_offset_t offset_hi;
1959 vm_object_offset_t offset_lo;
1960 vm_object_offset_t new_offset;
1961 vm_prot_t prot;
1962 boolean_t wired;
1963 vm_behavior_t behavior;
1964 vm_map_version_t version;
1965 vm_map_t pmap_map;
1966
1967 local_map = map;
1968 vm_map_lock_write_to_read(map);
1969 if(vm_map_lookup_locked(&local_map,
1970 offset, VM_PROT_WRITE,
1971 &version, &object,
1972 &new_offset, &prot, &wired,
1973 &behavior, &offset_lo,
1974 &offset_hi, &pmap_map)) {
1975 vm_map_unlock(local_map);
1976 return KERN_FAILURE;
1977 }
1978 if (pmap_map != map) {
1979 vm_map_unlock(pmap_map);
1980 }
1981 vm_object_unlock(object);
1982 vm_map_unlock(local_map);
1983
1984 goto REDISCOVER_ENTRY;
1985 }
1986 }
1987 if (entry->is_sub_map) {
1988 vm_map_t submap;
1989
1990 submap = entry->object.sub_map;
1991 local_start = entry->vme_start;
1992 local_offset = entry->offset;
1993 vm_map_reference(submap);
1994 vm_map_unlock(map);
1995
1996 ret = (vm_map_get_upl(submap,
1997 local_offset + (offset - local_start),
1998 upl_size, upl, page_list, count,
1999 flags, force_data_sync));
2000
2001 vm_map_deallocate(submap);
2002 return ret;
2003 }
2004
2005 if (sync_cow_data) {
2006 if (entry->object.vm_object->shadow) {
2007 int flags;
2008
2009 local_object = entry->object.vm_object;
2010 local_start = entry->vme_start;
2011 local_offset = entry->offset;
2012 vm_object_reference(local_object);
2013 vm_map_unlock(map);
2014
2015 if(local_object->copy == NULL) {
2016 flags = MEMORY_OBJECT_DATA_SYNC;
2017 } else {
2018 flags = MEMORY_OBJECT_COPY_SYNC;
2019 }
2020
2021 if((local_object->paging_offset) &&
2022 (local_object->pager == 0)) {
2023 /*
2024 * do a little clean-up for our unorthodox
2025 * entry into a pager call from a non-pager
2026 * context. Normally the pager code
2027 * assumes that an object it has been called
2028 * with has a backing pager and so does
2029 * not bother to check the pager field
2030 * before relying on the paging_offset
2031 */
2032 vm_object_lock(local_object);
2033 if (local_object->pager == 0) {
2034 local_object->paging_offset = 0;
2035 }
2036 vm_object_unlock(local_object);
2037 }
2038
2039 memory_object_lock_request(
2040 local_object, ((offset - local_start)
2041 + local_offset) +
2042 local_object->paging_offset,
2043 (vm_object_size_t)*upl_size, FALSE,
2044 flags,
2045 VM_PROT_NO_CHANGE, NULL, 0);
2046 sync_cow_data = FALSE;
2047 goto REDISCOVER_ENTRY;
2048 }
2049 }
2050
2051 if (force_data_sync) {
2052
2053 local_object = entry->object.vm_object;
2054 local_start = entry->vme_start;
2055 local_offset = entry->offset;
2056 vm_object_reference(local_object);
2057 vm_map_unlock(map);
2058
2059 if((local_object->paging_offset) &&
2060 (local_object->pager == 0)) {
2061 /*
2062 * do a little clean-up for our unorthodox
2063 * entry into a pager call from a non-pager
2064 * context. Normally the pager code
2065 * assumes that an object it has been called
2066 * with has a backing pager and so does
2067 * not bother to check the pager field
2068 * before relying on the paging_offset
2069 */
2070 vm_object_lock(local_object);
2071 if (local_object->pager == 0) {
2072 local_object->paging_offset = 0;
2073 }
2074 vm_object_unlock(local_object);
2075 }
2076
2077 memory_object_lock_request(
2078 local_object, ((offset - local_start)
2079 + local_offset) +
2080 local_object->paging_offset,
2081 (vm_object_size_t)*upl_size, FALSE,
2082 MEMORY_OBJECT_DATA_SYNC,
2083 VM_PROT_NO_CHANGE,
2084 NULL, 0);
2085 force_data_sync = FALSE;
2086 goto REDISCOVER_ENTRY;
2087 }
2088
2089 if(!(entry->object.vm_object->private)) {
2090 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2091 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2092 if(entry->object.vm_object->phys_contiguous) {
2093 *flags = UPL_PHYS_CONTIG;
2094 } else {
2095 *flags = 0;
2096 }
2097 } else {
2098 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2099 }
2100 local_object = entry->object.vm_object;
2101 local_offset = entry->offset;
2102 local_start = entry->vme_start;
2103 vm_object_reference(local_object);
2104 vm_map_unlock(map);
2105 ret = (vm_fault_list_request(local_object,
2106 ((offset - local_start) + local_offset),
2107 *upl_size,
2108 upl,
2109 page_list,
2110 *count,
2111 caller_flags));
2112 vm_object_deallocate(local_object);
2113 return(ret);
2114 }
2115
2116 vm_map_unlock(map);
2117 return(KERN_FAILURE);
2118
2119 }
2120
2121
2122 kern_return_t
2123 vm_object_upl_request(
2124 vm_object_t object,
2125 vm_object_offset_t offset,
2126 vm_size_t size,
2127 ipc_port_t *upl,
2128 upl_page_info_t *page_list,
2129 mach_msg_type_number_t *count,
2130 int cntrl_flags)
2131 {
2132 upl_t upl_object;
2133 ipc_port_t upl_port;
2134 ipc_port_t previous;
2135 upl_page_info_t *pl;
2136 kern_return_t kr;
2137
2138 pl = page_list;
2139 kr = vm_fault_list_request(object, offset, size, &upl_object,
2140 &pl, *count, cntrl_flags);
2141
2142
2143 if(kr != KERN_SUCCESS) {
2144 *upl = MACH_PORT_NULL;
2145 return KERN_FAILURE;
2146 }
2147
2148 upl_port = ipc_port_alloc_kernel();
2149
2150
2151 ip_lock(upl_port);
2152
2153 /* make a sonce right */
2154 upl_port->ip_sorights++;
2155 ip_reference(upl_port);
2156
2157 upl_port->ip_destination = IP_NULL;
2158 upl_port->ip_receiver_name = MACH_PORT_NULL;
2159 upl_port->ip_receiver = ipc_space_kernel;
2160
2161 /* make a send right */
2162 upl_port->ip_mscount++;
2163 upl_port->ip_srights++;
2164 ip_reference(upl_port);
2165
2166 ipc_port_nsrequest(upl_port, 1, upl_port, &previous);
2167 /* nsrequest unlocks user_handle */
2168
2169 /* Create a named object based on a submap of specified size */
2170
2171
2172 ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL);
2173 *upl = upl_port;
2174 return KERN_SUCCESS;
2175 }
2176
2177 kern_return_t
2178 vm_pager_upl_request(
2179 vm_object_t object,
2180 vm_object_offset_t offset,
2181 vm_size_t size,
2182 vm_size_t super_size,
2183 ipc_port_t *upl,
2184 upl_page_info_t *page_list,
2185 mach_msg_type_number_t *count,
2186 int cntrl_flags)
2187 {
2188 upl_t upl_object;
2189 ipc_port_t upl_port;
2190 ipc_port_t previous;
2191 upl_page_info_t *pl;
2192 kern_return_t kr;
2193
2194 pl = page_list;
2195 kr = upl_system_list_request(object, offset, size, super_size,
2196 &upl_object, &pl, *count, cntrl_flags);
2197
2198 if(kr != KERN_SUCCESS) {
2199 *upl = MACH_PORT_NULL;
2200 return KERN_FAILURE;
2201 }
2202
2203
2204 upl_port = ipc_port_alloc_kernel();
2205
2206
2207 ip_lock(upl_port);
2208
2209 /* make a sonce right */
2210 upl_port->ip_sorights++;
2211 ip_reference(upl_port);
2212
2213 upl_port->ip_destination = IP_NULL;
2214 upl_port->ip_receiver_name = MACH_PORT_NULL;
2215 upl_port->ip_receiver = ipc_space_kernel;
2216
2217 /* make a send right */
2218 upl_port->ip_mscount++;
2219 upl_port->ip_srights++;
2220 ip_reference(upl_port);
2221
2222 ipc_port_nsrequest(upl_port, 1, upl_port, &previous);
2223 /* nsrequest unlocks user_handle */
2224
2225 /* Create a named object based on a submap of specified size */
2226
2227
2228 ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL);
2229 *upl = upl_port;
2230 return KERN_SUCCESS;
2231 }
2232
2233 kern_return_t
2234 vm_upl_map(
2235 vm_map_t map,
2236 ipc_port_t upl_port,
2237 vm_offset_t *dst_addr)
2238 {
2239 upl_t upl;
2240 kern_return_t kr;
2241
2242 if (!IP_VALID(upl_port)) {
2243 return KERN_INVALID_ARGUMENT;
2244 } else if (ip_kotype(upl_port) == IKOT_UPL) {
2245 upl_lock(upl);
2246 upl = (upl_t)upl_port->ip_kobject;
2247 kr = uc_upl_map(map, upl, dst_addr);
2248 upl_unlock(upl);
2249 return kr;
2250 } else {
2251 return KERN_FAILURE;
2252 }
2253 }
2254
2255
2256 kern_return_t
2257 vm_upl_unmap(
2258 vm_map_t map,
2259 ipc_port_t upl_port)
2260 {
2261 upl_t upl;
2262 kern_return_t kr;
2263
2264 if (!IP_VALID(upl_port)) {
2265 return KERN_INVALID_ARGUMENT;
2266 } else if (ip_kotype(upl_port) == IKOT_UPL) {
2267 upl_lock(upl);
2268 upl = (upl_t)upl_port->ip_kobject;
2269 kr = uc_upl_un_map(map, upl);
2270 upl_unlock(upl);
2271 return kr;
2272 } else {
2273 return KERN_FAILURE;
2274 }
2275 }
2276
2277 kern_return_t
2278 vm_upl_commit(
2279 upl_t upl,
2280 upl_page_list_ptr_t page_list,
2281 mach_msg_type_number_t count)
2282 {
2283 kern_return_t kr;
2284 upl_lock(upl);
2285 if(count) {
2286 kr = uc_upl_commit(upl, (upl_page_info_t *)page_list);
2287 } else {
2288 kr = uc_upl_commit(upl, (upl_page_info_t *) NULL);
2289 }
2290 upl_unlock(upl);
2291 return kr;
2292 }
2293
2294 kern_return_t
2295 vm_upl_commit_range(
2296 upl_t upl,
2297 vm_offset_t offset,
2298 vm_size_t size,
2299 upl_page_list_ptr_t page_list,
2300 int flags,
2301 mach_msg_type_number_t count)
2302 {
2303 kern_return_t kr;
2304 upl_lock(upl);
2305 if(count) {
2306 kr = uc_upl_commit_range(upl, offset, size, flags,
2307 (upl_page_info_t *)page_list);
2308 } else {
2309 kr = uc_upl_commit_range(upl, offset, size, flags,
2310 (upl_page_info_t *) NULL);
2311 }
2312 upl_unlock(upl);
2313 return kr;
2314 }
2315
2316 kern_return_t
2317 vm_upl_abort_range(
2318 upl_t upl,
2319 vm_offset_t offset,
2320 vm_size_t size,
2321 int abort_flags)
2322 {
2323 kern_return_t kr;
2324 upl_lock(upl);
2325 kr = uc_upl_abort_range(upl, offset, size, abort_flags);
2326 upl_unlock(upl);
2327 return kr;
2328 }
2329
2330 kern_return_t
2331 vm_upl_abort(
2332 upl_t upl,
2333 int abort_type)
2334 {
2335 kern_return_t kr;
2336 upl_lock(upl);
2337 kr = uc_upl_abort(upl, abort_type);
2338 upl_unlock(upl);
2339 return kr;
2340 }
2341
2342 /* ******* Temporary Internal calls to UPL for BSD ***** */
2343 kern_return_t
2344 kernel_upl_map(
2345 vm_map_t map,
2346 upl_t upl,
2347 vm_offset_t *dst_addr)
2348 {
2349 kern_return_t kr;
2350
2351 upl_lock(upl);
2352 kr = uc_upl_map(map, upl, dst_addr);
2353 if(kr == KERN_SUCCESS) {
2354 upl->ref_count += 1;
2355 }
2356 upl_unlock(upl);
2357 return kr;
2358 }
2359
2360
2361 kern_return_t
2362 kernel_upl_unmap(
2363 vm_map_t map,
2364 upl_t upl)
2365 {
2366 kern_return_t kr;
2367
2368 upl_lock(upl);
2369 kr = uc_upl_un_map(map, upl);
2370 if(kr == KERN_SUCCESS) {
2371 if(upl->ref_count == 1) {
2372 upl_dealloc(upl);
2373 } else {
2374 upl->ref_count -= 1;
2375 upl_unlock(upl);
2376 }
2377 } else {
2378 upl_unlock(upl);
2379 }
2380 return kr;
2381 }
2382
2383 kern_return_t
2384 kernel_upl_commit(
2385 upl_t upl,
2386 upl_page_list_ptr_t page_list,
2387 mach_msg_type_number_t count)
2388 {
2389 kern_return_t kr;
2390 upl_lock(upl);
2391 upl->ref_count += 1;
2392 if(count) {
2393 kr = uc_upl_commit(upl, (upl_page_info_t *)page_list);
2394 } else {
2395 kr = uc_upl_commit(upl, (upl_page_info_t *) NULL);
2396 }
2397 if(upl->ref_count == 1) {
2398 upl_dealloc(upl);
2399 } else {
2400 upl->ref_count -= 1;
2401 upl_unlock(upl);
2402 }
2403 return kr;
2404 }
2405
2406 kern_return_t
2407 kernel_upl_commit_range(
2408 upl_t upl,
2409 vm_offset_t offset,
2410 vm_size_t size,
2411 int flags,
2412 upl_page_list_ptr_t page_list,
2413 mach_msg_type_number_t count)
2414 {
2415 kern_return_t kr;
2416 upl_lock(upl);
2417 upl->ref_count += 1;
2418 if(count) {
2419 kr = uc_upl_commit_range(upl, offset, size, flags,
2420 (upl_page_info_t *)page_list);
2421 } else {
2422 kr = uc_upl_commit_range(upl, offset, size, flags,
2423 (upl_page_info_t *) NULL);
2424 }
2425 if(upl->ref_count == 1) {
2426 upl_dealloc(upl);
2427 } else {
2428 upl->ref_count -= 1;
2429 upl_unlock(upl);
2430 }
2431 return kr;
2432 }
2433
2434 kern_return_t
2435 kernel_upl_abort_range(
2436 upl_t upl,
2437 vm_offset_t offset,
2438 vm_size_t size,
2439 int abort_flags)
2440 {
2441 kern_return_t kr;
2442 upl_lock(upl);
2443 upl->ref_count += 1;
2444 kr = uc_upl_abort_range(upl, offset, size, abort_flags);
2445 if(upl->ref_count == 1) {
2446 upl_dealloc(upl);
2447 } else {
2448 upl->ref_count -= 1;
2449 upl_unlock(upl);
2450 }
2451 return kr;
2452 }
2453
2454 kern_return_t
2455 kernel_upl_abort(
2456 upl_t upl,
2457 int abort_type)
2458 {
2459 kern_return_t kr;
2460 upl_lock(upl);
2461 upl->ref_count += 1;
2462 kr = uc_upl_abort(upl, abort_type);
2463 if(upl->ref_count == 1) {
2464 upl_dealloc(upl);
2465 } else {
2466 upl->ref_count -= 1;
2467 upl_unlock(upl);
2468 }
2469 return kr;
2470 }
2471
2472
2473
2474 /* code snippet from vm_map */
2475 kern_return_t
2476 vm_object_create_nomap(ipc_port_t port, vm_object_size_t size)
2477 {
2478 vm_object_t object_ptr;
2479 return memory_object_create_named(port, size, &object_ptr);
2480 }
2481
2482
2483 /*
2484 * Temporary interface to overcome old style ipc artifacts, and allow
2485 * ubc to call this routine directly. Will disappear with new RPC
2486 * component architecture.
2487 * NOTE: call to memory_object_destroy removes the vm_object's association
2488 * with its abstract memory object and hence the named flag is set to false.
2489 */
2490 kern_return_t
2491 memory_object_destroy_named(
2492 vm_object_t object,
2493 kern_return_t reason)
2494 {
2495 vm_object_lock(object);
2496 if(object->named == FALSE) {
2497 panic("memory_object_destroy_named called by party which doesn't hold right");
2498 }
2499 object->ref_count++;
2500 vm_object_res_reference(object);
2501 vm_object_unlock(object);
2502 return (memory_object_destroy(object, reason));
2503 }
2504
2505 /*
2506 * Temporary interface to overcome old style ipc artifacts, and allow
2507 * ubc to call this routine directly. Will disappear with new RPC
2508 * component architecture.
2509 * Note: No change is made in the named flag.
2510 */
2511 kern_return_t
2512 memory_object_lock_request_named(
2513 vm_object_t object,
2514 vm_object_offset_t offset,
2515 vm_object_size_t size,
2516 memory_object_return_t should_return,
2517 boolean_t should_flush,
2518 vm_prot_t prot,
2519 ipc_port_t reply_to)
2520 {
2521 vm_object_lock(object);
2522 if(object->named == FALSE) {
2523 panic("memory_object_lock_request_named called by party which doesn't hold right");
2524 }
2525 object->ref_count++;
2526 vm_object_res_reference(object);
2527 vm_object_unlock(object);
2528 return (memory_object_lock_request(object,
2529 offset, size, should_return, should_flush, prot,
2530 reply_to, 0));
2531 }
2532
2533 kern_return_t
2534 memory_object_change_attributes_named(
2535 vm_object_t object,
2536 memory_object_flavor_t flavor,
2537 memory_object_info_t attributes,
2538 mach_msg_type_number_t count,
2539 ipc_port_t reply_to,
2540 mach_msg_type_name_t reply_to_type)
2541 {
2542 vm_object_lock(object);
2543 if(object->named == FALSE) {
2544 panic("memory_object_lock_request_named called by party which doesn't hold right");
2545 }
2546 object->ref_count++;
2547 vm_object_res_reference(object);
2548 vm_object_unlock(object);
2549 return (memory_object_change_attributes(object,
2550 flavor, attributes, count, reply_to, reply_to_type));
2551 }
2552
2553 kern_return_t
2554 vm_get_shared_region(
2555 task_t task,
2556 shared_region_mapping_t *shared_region)
2557 {
2558 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2559 return KERN_SUCCESS;
2560 }
2561
2562 kern_return_t
2563 vm_set_shared_region(
2564 task_t task,
2565 shared_region_mapping_t shared_region)
2566 {
2567 task->system_shared_region = (vm_offset_t) shared_region;
2568 return KERN_SUCCESS;
2569 }
2570
2571 kern_return_t
2572 shared_region_mapping_info(
2573 shared_region_mapping_t shared_region,
2574 ipc_port_t *text_region,
2575 vm_size_t *text_size,
2576 ipc_port_t *data_region,
2577 vm_size_t *data_size,
2578 vm_offset_t *region_mappings,
2579 vm_offset_t *client_base,
2580 vm_offset_t *alt_base,
2581 vm_offset_t *alt_next,
2582 int *flags,
2583 shared_region_mapping_t *next)
2584 {
2585 shared_region_mapping_lock(shared_region);
2586
2587 *text_region = shared_region->text_region;
2588 *text_size = shared_region->text_size;
2589 *data_region = shared_region->data_region;
2590 *data_size = shared_region->data_size;
2591 *region_mappings = shared_region->region_mappings;
2592 *client_base = shared_region->client_base;
2593 *alt_base = shared_region->alternate_base;
2594 *alt_next = shared_region->alternate_next;
2595 *flags = shared_region->flags;
2596 *next = shared_region->next;
2597
2598 shared_region_mapping_unlock(shared_region);
2599 }
2600
2601 kern_return_t
2602 shared_region_object_chain_attach(
2603 shared_region_mapping_t target_region,
2604 shared_region_mapping_t object_chain_region)
2605 {
2606 shared_region_object_chain_t object_ele;
2607
2608 if(target_region->object_chain)
2609 return KERN_FAILURE;
2610 object_ele = (shared_region_object_chain_t)
2611 kalloc(sizeof (struct shared_region_object_chain));
2612 shared_region_mapping_lock(object_chain_region);
2613 target_region->object_chain = object_ele;
2614 object_ele->object_chain_region = object_chain_region;
2615 object_ele->next = object_chain_region->object_chain;
2616 object_ele->depth = object_chain_region->depth;
2617 object_chain_region->depth++;
2618 target_region->alternate_next = object_chain_region->alternate_next;
2619 shared_region_mapping_unlock(object_chain_region);
2620 return KERN_SUCCESS;
2621 }
2622
2623 kern_return_t
2624 shared_region_mapping_create(
2625 ipc_port_t text_region,
2626 vm_size_t text_size,
2627 ipc_port_t data_region,
2628 vm_size_t data_size,
2629 vm_offset_t region_mappings,
2630 vm_offset_t client_base,
2631 shared_region_mapping_t *shared_region,
2632 vm_offset_t alt_base,
2633 vm_offset_t alt_next)
2634 {
2635 *shared_region = (shared_region_mapping_t)
2636 kalloc(sizeof (struct shared_region_mapping));
2637 if(*shared_region == NULL)
2638 return KERN_FAILURE;
2639 shared_region_mapping_lock_init((*shared_region));
2640 (*shared_region)->text_region = text_region;
2641 (*shared_region)->text_size = text_size;
2642 (*shared_region)->data_region = data_region;
2643 (*shared_region)->data_size = data_size;
2644 (*shared_region)->region_mappings = region_mappings;
2645 (*shared_region)->client_base = client_base;
2646 (*shared_region)->ref_count = 1;
2647 (*shared_region)->next = NULL;
2648 (*shared_region)->object_chain = NULL;
2649 (*shared_region)->self = *shared_region;
2650 (*shared_region)->flags = 0;
2651 (*shared_region)->depth = 0;
2652 (*shared_region)->alternate_base = alt_base;
2653 (*shared_region)->alternate_next = alt_next;
2654 return KERN_SUCCESS;
2655 }
2656
2657 kern_return_t
2658 shared_region_mapping_set_alt_next(
2659 shared_region_mapping_t shared_region,
2660 vm_offset_t alt_next)
2661 {
2662 shared_region->alternate_next = alt_next;
2663 return KERN_SUCCESS;
2664 }
2665
2666 kern_return_t
2667 shared_region_mapping_ref(
2668 shared_region_mapping_t shared_region)
2669 {
2670 if(shared_region == NULL)
2671 return KERN_SUCCESS;
2672 shared_region_mapping_lock(shared_region);
2673 shared_region->ref_count++;
2674 shared_region_mapping_unlock(shared_region);
2675 return KERN_SUCCESS;
2676 }
2677
2678 kern_return_t
2679 shared_region_mapping_dealloc(
2680 shared_region_mapping_t shared_region)
2681 {
2682 struct shared_region_task_mappings sm_info;
2683 shared_region_mapping_t next;
2684
2685 if(shared_region == NULL)
2686 return KERN_SUCCESS;
2687 shared_region_mapping_lock(shared_region);
2688
2689 if((--shared_region->ref_count) == 0) {
2690
2691 sm_info.text_region = shared_region->text_region;
2692 sm_info.text_size = shared_region->text_size;
2693 sm_info.data_region = shared_region->data_region;
2694 sm_info.data_size = shared_region->data_size;
2695 sm_info.region_mappings = shared_region->region_mappings;
2696 sm_info.client_base = shared_region->client_base;
2697 sm_info.alternate_base = shared_region->alternate_base;
2698 sm_info.alternate_next = shared_region->alternate_next;
2699 sm_info.flags = shared_region->flags;
2700 sm_info.self = (vm_offset_t)shared_region;
2701
2702 lsf_remove_regions_mappings(shared_region, &sm_info);
2703 pmap_remove(((vm_named_entry_t)
2704 (shared_region->text_region->ip_kobject))
2705 ->backing.map->pmap,
2706 sm_info.client_base,
2707 sm_info.client_base + sm_info.text_size);
2708 ipc_port_release_send(shared_region->text_region);
2709 ipc_port_release_send(shared_region->data_region);
2710 if(shared_region->object_chain) {
2711 shared_region_mapping_dealloc(
2712 shared_region->object_chain->object_chain_region);
2713 kfree((vm_offset_t)shared_region->object_chain,
2714 sizeof (struct shared_region_object_chain));
2715 }
2716 kfree((vm_offset_t)shared_region,
2717 sizeof (struct shared_region_mapping));
2718 return KERN_SUCCESS;
2719 }
2720 shared_region_mapping_unlock(shared_region);
2721 return KERN_SUCCESS;
2722 }
2723
2724 vm_offset_t
2725 vm_map_get_phys_page(
2726 vm_map_t map,
2727 vm_offset_t offset)
2728 {
2729 vm_map_entry_t entry;
2730 int ops;
2731 int flags;
2732 vm_offset_t phys_addr = 0;
2733 vm_object_t object;
2734
2735 vm_map_lock(map);
2736 while (vm_map_lookup_entry(map, offset, &entry)) {
2737
2738 if (entry->object.vm_object == VM_OBJECT_NULL) {
2739 vm_map_unlock(map);
2740 return (vm_offset_t) 0;
2741 }
2742 if (entry->is_sub_map) {
2743 vm_map_t old_map;
2744 vm_map_lock(entry->object.sub_map);
2745 old_map = map;
2746 map = entry->object.sub_map;
2747 offset = entry->offset + (offset - entry->vme_start);
2748 vm_map_unlock(old_map);
2749 continue;
2750 }
2751 offset = entry->offset + (offset - entry->vme_start);
2752 object = entry->object.vm_object;
2753 vm_object_lock(object);
2754 while (TRUE) {
2755 vm_page_t dst_page = vm_page_lookup(object,offset);
2756 if(dst_page == VM_PAGE_NULL) {
2757 if(object->shadow) {
2758 vm_object_t old_object;
2759 vm_object_lock(object->shadow);
2760 old_object = object;
2761 offset = offset + object->shadow_offset;
2762 object = object->shadow;
2763 vm_object_unlock(old_object);
2764 } else {
2765 vm_object_unlock(object);
2766 break;
2767 }
2768 } else {
2769 phys_addr = dst_page->phys_addr;
2770 vm_object_unlock(object);
2771 break;
2772 }
2773 }
2774 break;
2775
2776 }
2777
2778 vm_map_unlock(map);
2779 return phys_addr;
2780 }
2781 #endif /* VM_CPM */