]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_user.c
371898e0284b638088c6efe972d5d8ad869f86c4
[apple/xnu.git] / osfmk / vm / vm_user.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_user.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * User-exported virtual memory functions.
57 */
58 #ifdef MACH_BSD
59 /* remove after component interface available */
60 extern int vnode_pager_workaround;
61 #endif
62
63 #include <vm_cpm.h>
64 #include <mach/boolean.h>
65 #include <mach/kern_return.h>
66 #include <mach/mach_types.h> /* to get vm_address_t */
67 #include <mach/memory_object.h>
68 #include <mach/std_types.h> /* to get pointer_t */
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/vm_map_server.h>
73 #include <mach/mach_syscalls.h>
74 #include <mach/shared_memory_server.h>
75
76 #include <kern/host.h>
77 #include <kern/task.h>
78 #include <kern/misc_protos.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_page.h>
82 #include <vm/memory_object.h>
83 #include <vm/vm_pageout.h>
84
85
86
87 vm_size_t upl_offset_to_pagelist = 0;
88
89 #if VM_CPM
90 #include <vm/cpm.h>
91 #endif /* VM_CPM */
92
93 ipc_port_t dynamic_pager_control_port=NULL;
94
95 /*
96 * vm_allocate allocates "zero fill" memory in the specfied
97 * map.
98 */
99 kern_return_t
100 vm_allocate(
101 register vm_map_t map,
102 register vm_offset_t *addr,
103 register vm_size_t size,
104 int flags)
105 {
106 kern_return_t result;
107 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
108
109 if (map == VM_MAP_NULL)
110 return(KERN_INVALID_ARGUMENT);
111 if (size == 0) {
112 *addr = 0;
113 return(KERN_SUCCESS);
114 }
115
116 if (anywhere)
117 *addr = vm_map_min(map);
118 else
119 *addr = trunc_page(*addr);
120 size = round_page(size);
121 if (size == 0) {
122 return(KERN_INVALID_ARGUMENT);
123 }
124
125 result = vm_map_enter(
126 map,
127 addr,
128 size,
129 (vm_offset_t)0,
130 flags,
131 VM_OBJECT_NULL,
132 (vm_object_offset_t)0,
133 FALSE,
134 VM_PROT_DEFAULT,
135 VM_PROT_ALL,
136 VM_INHERIT_DEFAULT);
137
138 return(result);
139 }
140
141 /*
142 * vm_deallocate deallocates the specified range of addresses in the
143 * specified address map.
144 */
145 kern_return_t
146 vm_deallocate(
147 register vm_map_t map,
148 vm_offset_t start,
149 vm_size_t size)
150 {
151 if (map == VM_MAP_NULL)
152 return(KERN_INVALID_ARGUMENT);
153
154 if (size == (vm_offset_t) 0)
155 return(KERN_SUCCESS);
156
157 return(vm_map_remove(map, trunc_page(start),
158 round_page(start+size), VM_MAP_NO_FLAGS));
159 }
160
161 /*
162 * vm_inherit sets the inheritance of the specified range in the
163 * specified map.
164 */
165 kern_return_t
166 vm_inherit(
167 register vm_map_t map,
168 vm_offset_t start,
169 vm_size_t size,
170 vm_inherit_t new_inheritance)
171 {
172 if (map == VM_MAP_NULL)
173 return(KERN_INVALID_ARGUMENT);
174
175 if (new_inheritance > VM_INHERIT_LAST_VALID)
176 return(KERN_INVALID_ARGUMENT);
177
178 return(vm_map_inherit(map,
179 trunc_page(start),
180 round_page(start+size),
181 new_inheritance));
182 }
183
184 /*
185 * vm_protect sets the protection of the specified range in the
186 * specified map.
187 */
188
189 kern_return_t
190 vm_protect(
191 register vm_map_t map,
192 vm_offset_t start,
193 vm_size_t size,
194 boolean_t set_maximum,
195 vm_prot_t new_protection)
196 {
197 if ((map == VM_MAP_NULL) ||
198 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
199 return(KERN_INVALID_ARGUMENT);
200
201 return(vm_map_protect(map,
202 trunc_page(start),
203 round_page(start+size),
204 new_protection,
205 set_maximum));
206 }
207
208 /*
209 * Handle machine-specific attributes for a mapping, such
210 * as cachability, migrability, etc.
211 */
212 kern_return_t
213 vm_machine_attribute(
214 vm_map_t map,
215 vm_address_t address,
216 vm_size_t size,
217 vm_machine_attribute_t attribute,
218 vm_machine_attribute_val_t* value) /* IN/OUT */
219 {
220 if (map == VM_MAP_NULL)
221 return(KERN_INVALID_ARGUMENT);
222
223 return vm_map_machine_attribute(map, address, size, attribute, value);
224 }
225
226 kern_return_t
227 vm_read(
228 vm_map_t map,
229 vm_address_t address,
230 vm_size_t size,
231 pointer_t *data,
232 mach_msg_type_number_t *data_size)
233 {
234 kern_return_t error;
235 vm_map_copy_t ipc_address;
236
237 if (map == VM_MAP_NULL)
238 return(KERN_INVALID_ARGUMENT);
239
240 if ((error = vm_map_copyin(map,
241 address,
242 size,
243 FALSE, /* src_destroy */
244 &ipc_address)) == KERN_SUCCESS) {
245 *data = (pointer_t) ipc_address;
246 *data_size = size;
247 }
248 return(error);
249 }
250
251 kern_return_t
252 vm_read_list(
253 vm_map_t map,
254 vm_read_entry_t data_list,
255 mach_msg_type_number_t count)
256 {
257 mach_msg_type_number_t i;
258 kern_return_t error;
259 vm_map_copy_t ipc_address;
260
261 if (map == VM_MAP_NULL)
262 return(KERN_INVALID_ARGUMENT);
263
264 for(i=0; i<count; i++) {
265 error = vm_map_copyin(map,
266 data_list[i].address,
267 data_list[i].size,
268 FALSE, /* src_destroy */
269 &ipc_address);
270 if(error != KERN_SUCCESS) {
271 data_list[i].address = (vm_address_t)0;
272 data_list[i].size = (vm_size_t)0;
273 break;
274 }
275 if(data_list[i].size != 0) {
276 error = vm_map_copyout(current_task()->map,
277 &(data_list[i].address),
278 (vm_map_copy_t) ipc_address);
279 if(error != KERN_SUCCESS) {
280 data_list[i].address = (vm_address_t)0;
281 data_list[i].size = (vm_size_t)0;
282 break;
283 }
284 }
285 }
286 return(error);
287 }
288
289 /*
290 * This routine reads from the specified map and overwrites part of the current
291 * activation's map. In making an assumption that the current thread is local,
292 * it is no longer cluster-safe without a fully supportive local proxy thread/
293 * task (but we don't support cluster's anymore so this is moot).
294 */
295
296 #define VM_OVERWRITE_SMALL 512
297
298 kern_return_t
299 vm_read_overwrite(
300 vm_map_t map,
301 vm_address_t address,
302 vm_size_t size,
303 vm_address_t data,
304 vm_size_t *data_size)
305 {
306 struct {
307 long align;
308 char buf[VM_OVERWRITE_SMALL];
309 } inbuf;
310 vm_map_t oldmap;
311 kern_return_t error = KERN_SUCCESS;
312 vm_map_copy_t copy;
313
314 if (map == VM_MAP_NULL)
315 return(KERN_INVALID_ARGUMENT);
316
317 if (size <= VM_OVERWRITE_SMALL) {
318 if(vm_map_read_user(map, (vm_offset_t)address,
319 (vm_offset_t)&inbuf, size)) {
320 error = KERN_INVALID_ADDRESS;
321 } else {
322 if(vm_map_write_user(current_map(),
323 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
324 error = KERN_INVALID_ADDRESS;
325 }
326 }
327 else {
328 if ((error = vm_map_copyin(map,
329 address,
330 size,
331 FALSE, /* src_destroy */
332 &copy)) == KERN_SUCCESS) {
333 if ((error = vm_map_copy_overwrite(
334 current_act()->map,
335 data,
336 copy,
337 FALSE)) == KERN_SUCCESS) {
338 }
339 else {
340 vm_map_copy_discard(copy);
341 }
342 }
343 }
344 *data_size = size;
345 return(error);
346 }
347
348
349
350
351 /*ARGSUSED*/
352 kern_return_t
353 vm_write(
354 vm_map_t map,
355 vm_address_t address,
356 vm_offset_t data,
357 mach_msg_type_number_t size)
358 {
359 if (map == VM_MAP_NULL)
360 return KERN_INVALID_ARGUMENT;
361
362 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
363 FALSE /* interruptible XXX */);
364 }
365
366 kern_return_t
367 vm_copy(
368 vm_map_t map,
369 vm_address_t source_address,
370 vm_size_t size,
371 vm_address_t dest_address)
372 {
373 vm_map_copy_t copy;
374 kern_return_t kr;
375
376 if (map == VM_MAP_NULL)
377 return KERN_INVALID_ARGUMENT;
378
379 kr = vm_map_copyin(map, source_address, size,
380 FALSE, &copy);
381 if (kr != KERN_SUCCESS)
382 return kr;
383
384 kr = vm_map_copy_overwrite(map, dest_address, copy,
385 FALSE /* interruptible XXX */);
386 if (kr != KERN_SUCCESS) {
387 vm_map_copy_discard(copy);
388 return kr;
389 }
390
391 return KERN_SUCCESS;
392 }
393
394 /*
395 * Routine: vm_map
396 */
397 kern_return_t
398 vm_map_64(
399 vm_map_t target_map,
400 vm_offset_t *address,
401 vm_size_t initial_size,
402 vm_offset_t mask,
403 int flags,
404 ipc_port_t port,
405 vm_object_offset_t offset,
406 boolean_t copy,
407 vm_prot_t cur_protection,
408 vm_prot_t max_protection,
409 vm_inherit_t inheritance)
410 {
411 register
412 vm_object_t object;
413 vm_prot_t prot;
414 vm_object_size_t size = (vm_object_size_t)initial_size;
415 kern_return_t result;
416
417 /*
418 * Check arguments for validity
419 */
420 if ((target_map == VM_MAP_NULL) ||
421 (cur_protection & ~VM_PROT_ALL) ||
422 (max_protection & ~VM_PROT_ALL) ||
423 (inheritance > VM_INHERIT_LAST_VALID) ||
424 size == 0)
425 return(KERN_INVALID_ARGUMENT);
426
427 /*
428 * Find the vm object (if any) corresponding to this port.
429 */
430 if (!IP_VALID(port)) {
431 object = VM_OBJECT_NULL;
432 offset = 0;
433 copy = FALSE;
434 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
435 vm_named_entry_t named_entry;
436
437 named_entry = (vm_named_entry_t)port->ip_kobject;
438 /* a few checks to make sure user is obeying rules */
439 if(size == 0) {
440 if(offset >= named_entry->size)
441 return(KERN_INVALID_RIGHT);
442 size = named_entry->size - offset;
443 }
444 if((named_entry->protection & max_protection) != max_protection)
445 return(KERN_INVALID_RIGHT);
446 if((named_entry->protection & cur_protection) != cur_protection)
447 return(KERN_INVALID_RIGHT);
448 if(named_entry->size < (offset + size))
449 return(KERN_INVALID_ARGUMENT);
450
451 /* the callers parameter offset is defined to be the */
452 /* offset from beginning of named entry offset in object */
453 offset = offset + named_entry->offset;
454
455 named_entry_lock(named_entry);
456 if(named_entry->is_sub_map) {
457 vm_map_entry_t map_entry;
458
459 named_entry_unlock(named_entry);
460 *address = trunc_page(*address);
461 size = round_page(size);
462 vm_object_reference(vm_submap_object);
463 if ((result = vm_map_enter(target_map,
464 address, size, mask, flags,
465 vm_submap_object, 0,
466 FALSE,
467 cur_protection, max_protection, inheritance
468 )) != KERN_SUCCESS) {
469 vm_object_deallocate(vm_submap_object);
470 } else {
471 char alias;
472
473 VM_GET_FLAGS_ALIAS(flags, alias);
474 if ((alias == VM_MEMORY_SHARED_PMAP) &&
475 !copy) {
476 vm_map_submap(target_map, *address,
477 (*address) + size,
478 named_entry->backing.map,
479 (vm_offset_t)offset, TRUE);
480 } else {
481 vm_map_submap(target_map, *address,
482 (*address) + size,
483 named_entry->backing.map,
484 (vm_offset_t)offset, FALSE);
485 }
486 if(copy) {
487 if(vm_map_lookup_entry(
488 target_map, *address, &map_entry)) {
489 map_entry->needs_copy = TRUE;
490 }
491 }
492 }
493 return(result);
494
495 } else if(named_entry->object) {
496 /* This is the case where we are going to map */
497 /* an already mapped object. If the object is */
498 /* not ready it is internal. An external */
499 /* object cannot be mapped until it is ready */
500 /* we can therefore avoid the ready check */
501 /* in this case. */
502 named_entry_unlock(named_entry);
503 vm_object_reference(named_entry->object);
504 object = named_entry->object;
505 } else {
506 object = vm_object_enter(named_entry->backing.pager,
507 named_entry->size,
508 named_entry->internal,
509 FALSE,
510 FALSE);
511 if (object == VM_OBJECT_NULL) {
512 named_entry_unlock(named_entry);
513 return(KERN_INVALID_OBJECT);
514 }
515 named_entry->object = object;
516 named_entry_unlock(named_entry);
517 /* create an extra reference for the named entry */
518 vm_object_reference(named_entry->object);
519 /* wait for object (if any) to be ready */
520 if (object != VM_OBJECT_NULL) {
521 vm_object_lock(object);
522 while (!object->pager_ready) {
523 vm_object_wait(object,
524 VM_OBJECT_EVENT_PAGER_READY,
525 THREAD_UNINT);
526 vm_object_lock(object);
527 }
528 vm_object_unlock(object);
529 }
530 }
531 } else {
532 if ((object = vm_object_enter(port, size, FALSE, FALSE, FALSE))
533 == VM_OBJECT_NULL)
534 return(KERN_INVALID_OBJECT);
535
536 /* wait for object (if any) to be ready */
537 if (object != VM_OBJECT_NULL) {
538 vm_object_lock(object);
539 while (!object->pager_ready) {
540 vm_object_wait(object,
541 VM_OBJECT_EVENT_PAGER_READY,
542 THREAD_UNINT);
543 vm_object_lock(object);
544 }
545 vm_object_unlock(object);
546 }
547 }
548
549 *address = trunc_page(*address);
550 size = round_page(size);
551
552 /*
553 * Perform the copy if requested
554 */
555
556 if (copy) {
557 vm_object_t new_object;
558 vm_object_offset_t new_offset;
559
560 result = vm_object_copy_strategically(object, offset, size,
561 &new_object, &new_offset,
562 &copy);
563
564
565 if (result == KERN_MEMORY_RESTART_COPY) {
566 boolean_t success;
567 boolean_t src_needs_copy;
568
569 /*
570 * XXX
571 * We currently ignore src_needs_copy.
572 * This really is the issue of how to make
573 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
574 * non-kernel users to use. Solution forthcoming.
575 * In the meantime, since we don't allow non-kernel
576 * memory managers to specify symmetric copy,
577 * we won't run into problems here.
578 */
579 new_object = object;
580 new_offset = offset;
581 success = vm_object_copy_quickly(&new_object,
582 new_offset, size,
583 &src_needs_copy,
584 &copy);
585 assert(success);
586 result = KERN_SUCCESS;
587 }
588 /*
589 * Throw away the reference to the
590 * original object, as it won't be mapped.
591 */
592
593 vm_object_deallocate(object);
594
595 if (result != KERN_SUCCESS)
596 return (result);
597
598 object = new_object;
599 offset = new_offset;
600 }
601
602 if ((result = vm_map_enter(target_map,
603 address, size, mask, flags,
604 object, offset,
605 copy,
606 cur_protection, max_protection, inheritance
607 )) != KERN_SUCCESS)
608 vm_object_deallocate(object);
609 return(result);
610 }
611
612 /* temporary, until world build */
613 vm_map(
614 vm_map_t target_map,
615 vm_offset_t *address,
616 vm_size_t size,
617 vm_offset_t mask,
618 int flags,
619 ipc_port_t port,
620 vm_offset_t offset,
621 boolean_t copy,
622 vm_prot_t cur_protection,
623 vm_prot_t max_protection,
624 vm_inherit_t inheritance)
625 {
626 vm_map_64(target_map, address, size, mask, flags,
627 port, (vm_object_offset_t)offset, copy,
628 cur_protection, max_protection, inheritance);
629 }
630
631
632 /*
633 * NOTE: this routine (and this file) will no longer require mach_host_server.h
634 * when vm_wire is changed to use ledgers.
635 */
636 #include <mach/mach_host_server.h>
637 /*
638 * Specify that the range of the virtual address space
639 * of the target task must not cause page faults for
640 * the indicated accesses.
641 *
642 * [ To unwire the pages, specify VM_PROT_NONE. ]
643 */
644 kern_return_t
645 vm_wire(
646 host_priv_t host_priv,
647 register vm_map_t map,
648 vm_offset_t start,
649 vm_size_t size,
650 vm_prot_t access)
651 {
652 kern_return_t rc;
653
654 if (host_priv == HOST_PRIV_NULL)
655 return KERN_INVALID_HOST;
656
657 assert(host_priv == &realhost);
658
659 if (map == VM_MAP_NULL)
660 return KERN_INVALID_TASK;
661
662 if (access & ~VM_PROT_ALL)
663 return KERN_INVALID_ARGUMENT;
664
665 if (access != VM_PROT_NONE) {
666 rc = vm_map_wire(map, trunc_page(start),
667 round_page(start+size), access, TRUE);
668 } else {
669 rc = vm_map_unwire(map, trunc_page(start),
670 round_page(start+size), TRUE);
671 }
672 return rc;
673 }
674
675 /*
676 * vm_msync
677 *
678 * Synchronises the memory range specified with its backing store
679 * image by either flushing or cleaning the contents to the appropriate
680 * memory manager engaging in a memory object synchronize dialog with
681 * the manager. The client doesn't return until the manager issues
682 * m_o_s_completed message. MIG Magically converts user task parameter
683 * to the task's address map.
684 *
685 * interpretation of sync_flags
686 * VM_SYNC_INVALIDATE - discard pages, only return precious
687 * pages to manager.
688 *
689 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
690 * - discard pages, write dirty or precious
691 * pages back to memory manager.
692 *
693 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
694 * - write dirty or precious pages back to
695 * the memory manager.
696 *
697 * NOTE
698 * The memory object attributes have not yet been implemented, this
699 * function will have to deal with the invalidate attribute
700 *
701 * RETURNS
702 * KERN_INVALID_TASK Bad task parameter
703 * KERN_INVALID_ARGUMENT both sync and async were specified.
704 * KERN_SUCCESS The usual.
705 */
706
707 kern_return_t
708 vm_msync(
709 vm_map_t map,
710 vm_address_t address,
711 vm_size_t size,
712 vm_sync_t sync_flags)
713 {
714 msync_req_t msr;
715 msync_req_t new_msr;
716 queue_chain_t req_q; /* queue of requests for this msync */
717 vm_map_entry_t entry;
718 vm_size_t amount_left;
719 vm_object_offset_t offset;
720 boolean_t do_sync_req;
721 boolean_t modifiable;
722
723
724 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
725 (sync_flags & VM_SYNC_SYNCHRONOUS))
726 return(KERN_INVALID_ARGUMENT);
727
728 /*
729 * align address and size on page boundaries
730 */
731 size = round_page(address + size) - trunc_page(address);
732 address = trunc_page(address);
733
734 if (map == VM_MAP_NULL)
735 return(KERN_INVALID_TASK);
736
737 if (size == 0)
738 return(KERN_SUCCESS);
739
740 queue_init(&req_q);
741 amount_left = size;
742
743 while (amount_left > 0) {
744 vm_size_t flush_size;
745 vm_object_t object;
746
747 vm_map_lock(map);
748 if (!vm_map_lookup_entry(map, address, &entry)) {
749 vm_size_t skip;
750
751 /*
752 * hole in the address map.
753 */
754
755 /*
756 * Check for empty map.
757 */
758 if (entry == vm_map_to_entry(map) &&
759 entry->vme_next == entry) {
760 vm_map_unlock(map);
761 break;
762 }
763 /*
764 * Check that we don't wrap and that
765 * we have at least one real map entry.
766 */
767 if ((map->hdr.nentries == 0) ||
768 (entry->vme_next->vme_start < address)) {
769 vm_map_unlock(map);
770 break;
771 }
772 /*
773 * Move up to the next entry if needed
774 */
775 skip = (entry->vme_next->vme_start - address);
776 if (skip >= amount_left)
777 amount_left = 0;
778 else
779 amount_left -= skip;
780 address = entry->vme_next->vme_start;
781 vm_map_unlock(map);
782 continue;
783 }
784
785 offset = address - entry->vme_start;
786
787 /*
788 * do we have more to flush than is contained in this
789 * entry ?
790 */
791 if (amount_left + entry->vme_start + offset > entry->vme_end) {
792 flush_size = entry->vme_end -
793 (entry->vme_start + offset);
794 } else {
795 flush_size = amount_left;
796 }
797 amount_left -= flush_size;
798 address += flush_size;
799
800 if (entry->is_sub_map == TRUE) {
801 vm_map_t local_map;
802 vm_offset_t local_offset;
803
804 local_map = entry->object.sub_map;
805 local_offset = entry->offset;
806 vm_map_unlock(map);
807 vm_msync(
808 local_map,
809 local_offset,
810 flush_size,
811 sync_flags);
812 continue;
813 }
814 object = entry->object.vm_object;
815
816 /*
817 * We can't sync this object if the object has not been
818 * created yet
819 */
820 if (object == VM_OBJECT_NULL) {
821 vm_map_unlock(map);
822 continue;
823 }
824 offset += entry->offset;
825 modifiable = (entry->protection & VM_PROT_WRITE)
826 != VM_PROT_NONE;
827
828 vm_object_lock(object);
829
830 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
831 boolean_t kill_pages = 0;
832
833 if (sync_flags & VM_SYNC_KILLPAGES) {
834 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
835 kill_pages = 1;
836 else
837 kill_pages = -1;
838 }
839 if (kill_pages != -1)
840 memory_object_deactivate_pages(object, offset,
841 (vm_object_size_t)flush_size, kill_pages);
842 vm_object_unlock(object);
843 vm_map_unlock(map);
844 continue;
845 }
846 /*
847 * We can't sync this object if there isn't a pager.
848 * Don't bother to sync internal objects, since there can't
849 * be any "permanent" storage for these objects anyway.
850 */
851 if ((object->pager == IP_NULL) || (object->internal) ||
852 (object->private)) {
853 vm_object_unlock(object);
854 vm_map_unlock(map);
855 continue;
856 }
857 /*
858 * keep reference on the object until syncing is done
859 */
860 assert(object->ref_count > 0);
861 object->ref_count++;
862 vm_object_res_reference(object);
863 vm_object_unlock(object);
864
865 vm_map_unlock(map);
866
867 do_sync_req = memory_object_sync(object,
868 offset,
869 flush_size,
870 sync_flags & VM_SYNC_INVALIDATE,
871 (modifiable &&
872 (sync_flags & VM_SYNC_SYNCHRONOUS ||
873 sync_flags & VM_SYNC_ASYNCHRONOUS)));
874
875 /*
876 * only send a m_o_s if we returned pages or if the entry
877 * is writable (ie dirty pages may have already been sent back)
878 */
879 if (!do_sync_req && !modifiable) {
880 vm_object_deallocate(object);
881 continue;
882 }
883 msync_req_alloc(new_msr);
884
885 vm_object_lock(object);
886 offset += object->paging_offset;
887
888 new_msr->offset = offset;
889 new_msr->length = flush_size;
890 new_msr->object = object;
891 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
892 re_iterate:
893 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
894 /*
895 * need to check for overlapping entry, if found, wait
896 * on overlapping msr to be done, then reiterate
897 */
898 msr_lock(msr);
899 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
900 ((offset >= msr->offset &&
901 offset < (msr->offset + msr->length)) ||
902 (msr->offset >= offset &&
903 msr->offset < (offset + flush_size))))
904 {
905 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
906 msr_unlock(msr);
907 vm_object_unlock(object);
908 thread_block((void (*)(void))0);
909 vm_object_lock(object);
910 goto re_iterate;
911 }
912 msr_unlock(msr);
913 }/* queue_iterate */
914
915 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
916 vm_object_unlock(object);
917
918 queue_enter(&req_q, new_msr, msync_req_t, req_q);
919
920 #ifdef MACH_BSD
921 if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) ==
922 ((rpc_subsystem_t) &vnode_pager_workaround)) {
923 (void) vnode_pager_synchronize(
924 object->pager,
925 object->pager_request,
926 offset,
927 flush_size,
928 sync_flags);
929 } else {
930 (void) memory_object_synchronize(
931 object->pager,
932 object->pager_request,
933 offset,
934 flush_size,
935 sync_flags);
936 }
937 #else
938 (void) memory_object_synchronize(
939 object->pager,
940 object->pager_request,
941 offset,
942 flush_size,
943 sync_flags);
944 #endif
945 }/* while */
946
947 /*
948 * wait for memory_object_sychronize_completed messages from pager(s)
949 */
950
951 while (!queue_empty(&req_q)) {
952 msr = (msync_req_t)queue_first(&req_q);
953 msr_lock(msr);
954 while(msr->flag != VM_MSYNC_DONE) {
955 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
956 msr_unlock(msr);
957 thread_block((void (*)(void))0);
958 msr_lock(msr);
959 }/* while */
960 queue_remove(&req_q, msr, msync_req_t, req_q);
961 msr_unlock(msr);
962 vm_object_deallocate(msr->object);
963 msync_req_free(msr);
964 }/* queue_iterate */
965
966 return(KERN_SUCCESS);
967 }/* vm_msync */
968
969
970 /*
971 * task_wire
972 *
973 * Set or clear the map's wiring_required flag. This flag, if set,
974 * will cause all future virtual memory allocation to allocate
975 * user wired memory. Unwiring pages wired down as a result of
976 * this routine is done with the vm_wire interface.
977 */
978 kern_return_t
979 task_wire(
980 vm_map_t map,
981 boolean_t must_wire)
982 {
983 if (map == VM_MAP_NULL)
984 return(KERN_INVALID_ARGUMENT);
985
986 if (must_wire)
987 map->wiring_required = TRUE;
988 else
989 map->wiring_required = FALSE;
990
991 return(KERN_SUCCESS);
992 }
993
994 /*
995 * vm_behavior_set sets the paging behavior attribute for the
996 * specified range in the specified map. This routine will fail
997 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
998 * is not a valid allocated or reserved memory region.
999 */
1000 kern_return_t
1001 vm_behavior_set(
1002 vm_map_t map,
1003 vm_offset_t start,
1004 vm_size_t size,
1005 vm_behavior_t new_behavior)
1006 {
1007 if (map == VM_MAP_NULL)
1008 return(KERN_INVALID_ARGUMENT);
1009
1010 return(vm_map_behavior_set(map, trunc_page(start),
1011 round_page(start+size), new_behavior));
1012 }
1013
1014 #if VM_CPM
1015 /*
1016 * Control whether the kernel will permit use of
1017 * vm_allocate_cpm at all.
1018 */
1019 unsigned int vm_allocate_cpm_enabled = 1;
1020
1021 /*
1022 * Ordinarily, the right to allocate CPM is restricted
1023 * to privileged applications (those that can gain access
1024 * to the host port). Set this variable to zero if you
1025 * want to let any application allocate CPM.
1026 */
1027 unsigned int vm_allocate_cpm_privileged = 0;
1028
1029 /*
1030 * Allocate memory in the specified map, with the caveat that
1031 * the memory is physically contiguous. This call may fail
1032 * if the system can't find sufficient contiguous memory.
1033 * This call may cause or lead to heart-stopping amounts of
1034 * paging activity.
1035 *
1036 * Memory obtained from this call should be freed in the
1037 * normal way, viz., via vm_deallocate.
1038 */
1039 kern_return_t
1040 vm_allocate_cpm(
1041 host_priv_t host_priv,
1042 register vm_map_t map,
1043 register vm_offset_t *addr,
1044 register vm_size_t size,
1045 int flags)
1046 {
1047 vm_object_t cpm_obj;
1048 pmap_t pmap;
1049 vm_page_t m, pages;
1050 kern_return_t kr;
1051 vm_offset_t va, start, end, offset;
1052 #if MACH_ASSERT
1053 extern vm_offset_t avail_start, avail_end;
1054 vm_offset_t prev_addr;
1055 #endif /* MACH_ASSERT */
1056
1057 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1058
1059 if (!vm_allocate_cpm_enabled)
1060 return KERN_FAILURE;
1061
1062 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1063 return KERN_INVALID_HOST;
1064
1065 if (map == VM_MAP_NULL)
1066 return KERN_INVALID_ARGUMENT;
1067
1068 assert(host_priv == &realhost);
1069
1070 if (size == 0) {
1071 *addr = 0;
1072 return KERN_SUCCESS;
1073 }
1074
1075 if (anywhere)
1076 *addr = vm_map_min(map);
1077 else
1078 *addr = trunc_page(*addr);
1079 size = round_page(size);
1080
1081 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1082 return kr;
1083
1084 cpm_obj = vm_object_allocate(size);
1085 assert(cpm_obj != VM_OBJECT_NULL);
1086 assert(cpm_obj->internal);
1087 assert(cpm_obj->size == size);
1088 assert(cpm_obj->can_persist == FALSE);
1089 assert(cpm_obj->pager_created == FALSE);
1090 assert(cpm_obj->pageout == FALSE);
1091 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1092
1093 /*
1094 * Insert pages into object.
1095 */
1096
1097 vm_object_lock(cpm_obj);
1098 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1099 m = pages;
1100 pages = NEXT_PAGE(m);
1101
1102 assert(!m->gobbled);
1103 assert(!m->wanted);
1104 assert(!m->pageout);
1105 assert(!m->tabled);
1106 assert(m->busy);
1107 assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end);
1108
1109 m->busy = FALSE;
1110 vm_page_insert(m, cpm_obj, offset);
1111 }
1112 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1113 vm_object_unlock(cpm_obj);
1114
1115 /*
1116 * Hang onto a reference on the object in case a
1117 * multi-threaded application for some reason decides
1118 * to deallocate the portion of the address space into
1119 * which we will insert this object.
1120 *
1121 * Unfortunately, we must insert the object now before
1122 * we can talk to the pmap module about which addresses
1123 * must be wired down. Hence, the race with a multi-
1124 * threaded app.
1125 */
1126 vm_object_reference(cpm_obj);
1127
1128 /*
1129 * Insert object into map.
1130 */
1131
1132 kr = vm_map_enter(
1133 map,
1134 addr,
1135 size,
1136 (vm_offset_t)0,
1137 flags,
1138 cpm_obj,
1139 (vm_object_offset_t)0,
1140 FALSE,
1141 VM_PROT_ALL,
1142 VM_PROT_ALL,
1143 VM_INHERIT_DEFAULT);
1144
1145 if (kr != KERN_SUCCESS) {
1146 /*
1147 * A CPM object doesn't have can_persist set,
1148 * so all we have to do is deallocate it to
1149 * free up these pages.
1150 */
1151 assert(cpm_obj->pager_created == FALSE);
1152 assert(cpm_obj->can_persist == FALSE);
1153 assert(cpm_obj->pageout == FALSE);
1154 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1155 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1156 vm_object_deallocate(cpm_obj); /* kill creation ref */
1157 }
1158
1159 /*
1160 * Inform the physical mapping system that the
1161 * range of addresses may not fault, so that
1162 * page tables and such can be locked down as well.
1163 */
1164 start = *addr;
1165 end = start + size;
1166 pmap = vm_map_pmap(map);
1167 pmap_pageable(pmap, start, end, FALSE);
1168
1169 /*
1170 * Enter each page into the pmap, to avoid faults.
1171 * Note that this loop could be coded more efficiently,
1172 * if the need arose, rather than looking up each page
1173 * again.
1174 */
1175 for (offset = 0, va = start; offset < size;
1176 va += PAGE_SIZE, offset += PAGE_SIZE) {
1177 vm_object_lock(cpm_obj);
1178 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1179 vm_object_unlock(cpm_obj);
1180 assert(m != VM_PAGE_NULL);
1181 PMAP_ENTER(pmap, va, m, VM_PROT_ALL, TRUE);
1182 }
1183
1184 #if MACH_ASSERT
1185 /*
1186 * Verify ordering in address space.
1187 */
1188 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1189 vm_object_lock(cpm_obj);
1190 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1191 vm_object_unlock(cpm_obj);
1192 if (m == VM_PAGE_NULL)
1193 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1194 cpm_obj, offset);
1195 assert(m->tabled);
1196 assert(!m->busy);
1197 assert(!m->wanted);
1198 assert(!m->fictitious);
1199 assert(!m->private);
1200 assert(!m->absent);
1201 assert(!m->error);
1202 assert(!m->cleaning);
1203 assert(!m->precious);
1204 assert(!m->clustered);
1205 if (offset != 0) {
1206 if (m->phys_addr != prev_addr + PAGE_SIZE) {
1207 printf("start 0x%x end 0x%x va 0x%x\n",
1208 start, end, va);
1209 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1210 printf("m 0x%x prev_address 0x%x\n", m,
1211 prev_addr);
1212 panic("vm_allocate_cpm: pages not contig!");
1213 }
1214 }
1215 prev_addr = m->phys_addr;
1216 }
1217 #endif /* MACH_ASSERT */
1218
1219 vm_object_deallocate(cpm_obj); /* kill extra ref */
1220
1221 return kr;
1222 }
1223
1224
1225 #else /* VM_CPM */
1226
1227 /*
1228 * Interface is defined in all cases, but unless the kernel
1229 * is built explicitly for this option, the interface does
1230 * nothing.
1231 */
1232
1233 kern_return_t
1234 vm_allocate_cpm(
1235 host_priv_t host_priv,
1236 register vm_map_t map,
1237 register vm_offset_t *addr,
1238 register vm_size_t size,
1239 int flags)
1240 {
1241 return KERN_FAILURE;
1242 }
1243
1244 /*
1245 */
1246 kern_return_t
1247 mach_memory_object_memory_entry_64(
1248 host_t host,
1249 boolean_t internal,
1250 vm_object_offset_t size,
1251 vm_prot_t permission,
1252 ipc_port_t pager,
1253 ipc_port_t *entry_handle)
1254 {
1255 vm_named_entry_t user_object;
1256 ipc_port_t user_handle;
1257 ipc_port_t previous;
1258 kern_return_t kr;
1259
1260 if (host == HOST_NULL)
1261 return(KERN_INVALID_HOST);
1262
1263 user_object = (vm_named_entry_t)
1264 kalloc(sizeof (struct vm_named_entry));
1265 if(user_object == NULL)
1266 return KERN_FAILURE;
1267 named_entry_lock_init(user_object);
1268 user_handle = ipc_port_alloc_kernel();
1269 ip_lock(user_handle);
1270
1271 /* make a sonce right */
1272 user_handle->ip_sorights++;
1273 ip_reference(user_handle);
1274
1275 user_handle->ip_destination = IP_NULL;
1276 user_handle->ip_receiver_name = MACH_PORT_NULL;
1277 user_handle->ip_receiver = ipc_space_kernel;
1278
1279 /* make a send right */
1280 user_handle->ip_mscount++;
1281 user_handle->ip_srights++;
1282 ip_reference(user_handle);
1283
1284 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1285 /* nsrequest unlocks user_handle */
1286
1287 user_object->object = NULL;
1288 user_object->size = size;
1289 user_object->offset = 0;
1290 user_object->backing.pager = pager;
1291 user_object->protection = permission;
1292 user_object->internal = internal;
1293 user_object->is_sub_map = FALSE;
1294 user_object->ref_count = 1;
1295
1296 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1297 IKOT_NAMED_ENTRY);
1298 *entry_handle = user_handle;
1299 return KERN_SUCCESS;
1300 }
1301
1302 kern_return_t
1303 mach_memory_object_memory_entry(
1304 host_t host,
1305 boolean_t internal,
1306 vm_size_t size,
1307 vm_prot_t permission,
1308 ipc_port_t pager,
1309 ipc_port_t *entry_handle)
1310 {
1311 return mach_memory_object_memory_entry_64( host, internal,
1312 (vm_object_offset_t)size, permission, pager, entry_handle);
1313 }
1314
1315
1316
1317 /*
1318 */
1319
1320 kern_return_t
1321 mach_make_memory_entry_64(
1322 vm_map_t target_map,
1323 vm_object_size_t *size,
1324 vm_object_offset_t offset,
1325 vm_prot_t permission,
1326 ipc_port_t *object_handle,
1327 ipc_port_t parent_entry)
1328 {
1329 vm_map_version_t version;
1330 vm_named_entry_t user_object;
1331 ipc_port_t user_handle;
1332 ipc_port_t previous;
1333 kern_return_t kr;
1334 vm_map_t pmap_map;
1335
1336 /* needed for call to vm_map_lookup_locked */
1337 boolean_t wired;
1338 vm_object_offset_t obj_off;
1339 vm_prot_t prot;
1340 vm_object_offset_t lo_offset, hi_offset;
1341 vm_behavior_t behavior;
1342 vm_object_t object;
1343
1344 /* needed for direct map entry manipulation */
1345 vm_map_entry_t map_entry;
1346 vm_map_t local_map;
1347 vm_object_size_t mappable_size;
1348
1349
1350 user_object = (vm_named_entry_t)
1351 kalloc(sizeof (struct vm_named_entry));
1352 if(user_object == NULL)
1353 return KERN_FAILURE;
1354 named_entry_lock_init(user_object);
1355 user_handle = ipc_port_alloc_kernel();
1356 ip_lock(user_handle);
1357
1358 /* make a sonce right */
1359 user_handle->ip_sorights++;
1360 ip_reference(user_handle);
1361
1362 user_handle->ip_destination = IP_NULL;
1363 user_handle->ip_receiver_name = MACH_PORT_NULL;
1364 user_handle->ip_receiver = ipc_space_kernel;
1365
1366 /* make a send right */
1367 user_handle->ip_mscount++;
1368 user_handle->ip_srights++;
1369 ip_reference(user_handle);
1370
1371 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1372 /* nsrequest unlocks user_handle */
1373
1374 user_object->backing.pager = NULL;
1375 user_object->ref_count = 1;
1376
1377 if(parent_entry == NULL) {
1378 /* Create a named object based on address range within the task map */
1379 /* Go find the object at given address */
1380
1381 permission &= VM_PROT_ALL;
1382 vm_map_lock_read(target_map);
1383
1384 /* get the object associated with the target address */
1385 /* note we check the permission of the range against */
1386 /* that requested by the caller */
1387
1388 kr = vm_map_lookup_locked(&target_map, offset,
1389 permission, &version,
1390 &object, &obj_off, &prot, &wired, &behavior,
1391 &lo_offset, &hi_offset, &pmap_map);
1392 if (kr != KERN_SUCCESS) {
1393 vm_map_unlock_read(target_map);
1394 goto make_mem_done;
1395 }
1396 if ((prot & permission) != permission) {
1397 kr = KERN_INVALID_RIGHT;
1398 vm_object_unlock(object);
1399 vm_map_unlock_read(target_map);
1400 if(pmap_map != target_map)
1401 vm_map_unlock_read(pmap_map);
1402 goto make_mem_done;
1403 }
1404
1405 /* We have an object, now check to see if this object */
1406 /* is suitable. If not, create a shadow and share that */
1407
1408 local_map = target_map;
1409 redo_lookup:
1410 while(TRUE) {
1411 if(!vm_map_lookup_entry(local_map, offset, &map_entry)) {
1412 kr = KERN_INVALID_ARGUMENT;
1413 vm_object_unlock(object);
1414 vm_map_unlock_read(target_map);
1415 if(pmap_map != target_map)
1416 vm_map_unlock_read(pmap_map);
1417 goto make_mem_done;
1418 }
1419 if(!(map_entry->is_sub_map)) {
1420 if(map_entry->object.vm_object != object) {
1421 kr = KERN_INVALID_ARGUMENT;
1422 vm_object_unlock(object);
1423 vm_map_unlock_read(target_map);
1424 if(pmap_map != target_map)
1425 vm_map_unlock_read(pmap_map);
1426 goto make_mem_done;
1427 }
1428 break;
1429 } else {
1430 local_map = map_entry->object.sub_map;
1431 vm_map_lock_read(local_map);
1432 vm_map_unlock_read(target_map);
1433 if(pmap_map != target_map)
1434 vm_map_unlock_read(pmap_map);
1435 target_map = local_map;
1436 }
1437 }
1438 if(((map_entry->max_protection) & permission) != permission) {
1439 kr = KERN_INVALID_RIGHT;
1440 vm_object_unlock(object);
1441 vm_map_unlock_read(target_map);
1442 if(pmap_map != target_map)
1443 vm_map_unlock_read(pmap_map);
1444 goto make_mem_done;
1445 }
1446 if(object->internal) {
1447 /* vm_map_lookup_locked will create a shadow if */
1448 /* needs_copy is set but does not check for the */
1449 /* other two conditions shown. It is important to */
1450 /* set up an object which will not be pulled from */
1451 /* under us. */
1452
1453 if (map_entry->needs_copy || object->shadowed ||
1454 (object->size >
1455 ((vm_object_size_t)map_entry->vme_end -
1456 map_entry->vme_start))) {
1457 if (vm_map_lock_read_to_write(target_map)) {
1458 vm_map_lock_read(target_map);
1459 goto redo_lookup;
1460 }
1461
1462
1463 /* create a shadow object */
1464
1465 vm_object_shadow(&map_entry->object.vm_object,
1466 &map_entry->offset,
1467 (map_entry->vme_end
1468 - map_entry->vme_start));
1469 map_entry->needs_copy = FALSE;
1470 vm_object_unlock(object);
1471 object = map_entry->object.vm_object;
1472 vm_object_lock(object);
1473 object->size = map_entry->vme_end
1474 - map_entry->vme_start;
1475 obj_off = (offset - map_entry->vme_start) +
1476 map_entry->offset;
1477 lo_offset = map_entry->offset;
1478 hi_offset = (map_entry->vme_end -
1479 map_entry->vme_start) +
1480 map_entry->offset;
1481
1482 vm_map_lock_write_to_read(target_map);
1483
1484 }
1485 }
1486
1487 /* note: in the future we can (if necessary) allow for */
1488 /* memory object lists, this will better support */
1489 /* fragmentation, but is it necessary? The user should */
1490 /* be encouraged to create address space oriented */
1491 /* shared objects from CLEAN memory regions which have */
1492 /* a known and defined history. i.e. no inheritence */
1493 /* share, make this call before making the region the */
1494 /* target of ipc's, etc. The code above, protecting */
1495 /* against delayed copy, etc. is mostly defensive. */
1496
1497
1498
1499 object->true_share = TRUE;
1500 user_object->object = object;
1501 user_object->internal = object->internal;
1502 user_object->is_sub_map = FALSE;
1503 user_object->offset = obj_off;
1504 user_object->protection = permission;
1505
1506 /* the size of mapped entry that overlaps with our region */
1507 /* which is targeted for share. */
1508 /* (entry_end - entry_start) - */
1509 /* offset of our beg addr within entry */
1510 /* it corresponds to this: */
1511
1512 mappable_size = hi_offset - obj_off;
1513 if(*size > mappable_size)
1514 *size = mappable_size;
1515
1516 user_object->size = *size;
1517
1518 /* user_object pager and internal fields are not used */
1519 /* when the object field is filled in. */
1520
1521 object->ref_count++; /* we now point to this object, hold on */
1522 vm_object_res_reference(object);
1523 vm_object_unlock(object);
1524 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1525 IKOT_NAMED_ENTRY);
1526 *size = user_object->size;
1527 *object_handle = user_handle;
1528 vm_map_unlock_read(target_map);
1529 if(pmap_map != target_map)
1530 vm_map_unlock_read(pmap_map);
1531 return KERN_SUCCESS;
1532 } else {
1533
1534 vm_named_entry_t parent_object;
1535
1536 /* The new object will be base on an existing named object */
1537 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1538 kr = KERN_INVALID_ARGUMENT;
1539 goto make_mem_done;
1540 }
1541 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1542 if(permission & parent_object->protection != permission) {
1543 kr = KERN_INVALID_ARGUMENT;
1544 goto make_mem_done;
1545 }
1546 if((offset + *size) > parent_object->size) {
1547 kr = KERN_INVALID_ARGUMENT;
1548 goto make_mem_done;
1549 }
1550
1551 user_object->object = parent_object->object;
1552 user_object->size = *size;
1553 user_object->offset = parent_object->offset + offset;
1554 user_object->protection = permission;
1555 if(parent_object->is_sub_map) {
1556 user_object->backing.map = parent_object->backing.map;
1557 vm_map_lock(user_object->backing.map);
1558 user_object->backing.map->ref_count++;
1559 vm_map_unlock(user_object->backing.map);
1560 }
1561 else {
1562 user_object->backing.pager = parent_object->backing.pager;
1563 }
1564 user_object->internal = parent_object->internal;
1565 user_object->is_sub_map = parent_object->is_sub_map;
1566
1567 if(parent_object->object != NULL) {
1568 /* we now point to this object, hold on */
1569 vm_object_reference(parent_object->object);
1570 vm_object_lock(parent_object->object);
1571 parent_object->object->true_share = TRUE;
1572 vm_object_unlock(parent_object->object);
1573 }
1574 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1575 IKOT_NAMED_ENTRY);
1576 *object_handle = user_handle;
1577 return KERN_SUCCESS;
1578 }
1579
1580
1581
1582 make_mem_done:
1583 ipc_port_dealloc_kernel(user_handle);
1584 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1585 return kr;
1586 }
1587
1588 kern_return_t
1589 mach_make_memory_entry(
1590 vm_map_t target_map,
1591 vm_size_t *size,
1592 vm_offset_t offset,
1593 vm_prot_t permission,
1594 ipc_port_t *object_handle,
1595 ipc_port_t parent_entry)
1596 {
1597 vm_object_offset_t size_64;
1598 kern_return_t kr;
1599
1600 size_64 = (vm_object_offset_t)*size;
1601 kr = mach_make_memory_entry_64(target_map, &size_64,
1602 (vm_object_offset_t)offset, permission, object_handle,
1603 parent_entry);
1604 *size = (vm_size_t)size_64;
1605 return kr;
1606 }
1607
1608 /*
1609 */
1610
1611 kern_return_t
1612 vm_region_object_create(
1613 vm_map_t target_map,
1614 vm_size_t size,
1615 ipc_port_t *object_handle)
1616 {
1617 vm_named_entry_t user_object;
1618 ipc_port_t user_handle;
1619 kern_return_t kr;
1620
1621 pmap_t new_pmap = pmap_create((vm_size_t) 0);
1622 ipc_port_t previous;
1623 vm_map_t new_map;
1624
1625 if(new_pmap == PMAP_NULL)
1626 return KERN_FAILURE;
1627 user_object = (vm_named_entry_t)
1628 kalloc(sizeof (struct vm_named_entry));
1629 if(user_object == NULL) {
1630 pmap_destroy(new_pmap);
1631 return KERN_FAILURE;
1632 }
1633 named_entry_lock_init(user_object);
1634 user_handle = ipc_port_alloc_kernel();
1635
1636
1637 ip_lock(user_handle);
1638
1639 /* make a sonce right */
1640 user_handle->ip_sorights++;
1641 ip_reference(user_handle);
1642
1643 user_handle->ip_destination = IP_NULL;
1644 user_handle->ip_receiver_name = MACH_PORT_NULL;
1645 user_handle->ip_receiver = ipc_space_kernel;
1646
1647 /* make a send right */
1648 user_handle->ip_mscount++;
1649 user_handle->ip_srights++;
1650 ip_reference(user_handle);
1651
1652 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1653 /* nsrequest unlocks user_handle */
1654
1655 /* Create a named object based on a submap of specified size */
1656
1657 new_map = vm_map_create(new_pmap, 0, size, TRUE);
1658 user_object->backing.map = new_map;
1659
1660
1661 user_object->object = VM_OBJECT_NULL;
1662 user_object->internal = TRUE;
1663 user_object->is_sub_map = TRUE;
1664 user_object->offset = 0;
1665 user_object->protection = VM_PROT_ALL;
1666 user_object->size = size;
1667 user_object->ref_count = 1;
1668
1669 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1670 IKOT_NAMED_ENTRY);
1671 *object_handle = user_handle;
1672 return KERN_SUCCESS;
1673
1674 }
1675
1676 /* For a given range, check all map entries. If the entry coresponds to */
1677 /* the old vm_region/map provided on the call, replace it with the */
1678 /* corresponding range in the new vm_region/map */
1679 kern_return_t vm_map_region_replace(
1680 vm_map_t target_map,
1681 ipc_port_t old_region,
1682 ipc_port_t new_region,
1683 vm_offset_t start,
1684 vm_offset_t end)
1685 {
1686 vm_named_entry_t old_object;
1687 vm_named_entry_t new_object;
1688 vm_map_t old_submap;
1689 vm_map_t new_submap;
1690 vm_offset_t addr;
1691 vm_map_entry_t entry;
1692 int nested_pmap = 0;
1693
1694
1695 vm_map_lock(target_map);
1696 old_object = (vm_named_entry_t)old_region->ip_kobject;
1697 new_object = (vm_named_entry_t)new_region->ip_kobject;
1698 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1699 vm_map_unlock(target_map);
1700 return KERN_INVALID_ARGUMENT;
1701 }
1702 old_submap = (vm_map_t)old_object->backing.map;
1703 new_submap = (vm_map_t)new_object->backing.map;
1704 vm_map_lock(old_submap);
1705 if((old_submap->min_offset != new_submap->min_offset) ||
1706 (old_submap->max_offset != new_submap->max_offset)) {
1707 vm_map_unlock(old_submap);
1708 vm_map_unlock(target_map);
1709 return KERN_INVALID_ARGUMENT;
1710 }
1711 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1712 /* if the src is not contained, the entry preceeds */
1713 /* our range */
1714 addr = entry->vme_start;
1715 if(entry == vm_map_to_entry(target_map)) {
1716 vm_map_unlock(old_submap);
1717 vm_map_unlock(target_map);
1718 return KERN_SUCCESS;
1719 }
1720 vm_map_lookup_entry(target_map, addr, &entry);
1721 }
1722 addr = entry->vme_start;
1723 vm_map_reference(old_submap);
1724 while((entry != vm_map_to_entry(target_map)) &&
1725 (entry->vme_start < end)) {
1726 if((entry->is_sub_map) &&
1727 (entry->object.sub_map == old_submap)) {
1728 entry->object.sub_map = new_submap;
1729 if(entry->use_pmap) {
1730 if((start & 0xfffffff) ||
1731 ((end - start) != 0x10000000)) {
1732 vm_map_unlock(old_submap);
1733 vm_map_unlock(target_map);
1734 return KERN_INVALID_ARGUMENT;
1735 }
1736 nested_pmap = 1;
1737 }
1738 vm_map_reference(new_submap);
1739 vm_map_deallocate(old_submap);
1740 }
1741 entry = entry->vme_next;
1742 addr = entry->vme_start;
1743 }
1744 if(nested_pmap) {
1745 #ifndef i386
1746 pmap_unnest(target_map->pmap, start, end - start);
1747 pmap_nest(target_map->pmap, new_submap->pmap,
1748 start, end - start);
1749 #endif i386
1750 } else {
1751 pmap_remove(target_map->pmap, start, end);
1752 }
1753 vm_map_unlock(old_submap);
1754 vm_map_unlock(target_map);
1755 return KERN_SUCCESS;
1756 }
1757
1758
1759 void
1760 mach_destroy_memory_entry(
1761 ipc_port_t port)
1762 {
1763 vm_named_entry_t named_entry;
1764 #if MACH_ASSERT
1765 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1766 #endif /* MACH_ASSERT */
1767 named_entry = (vm_named_entry_t)port->ip_kobject;
1768 mutex_lock(&(named_entry)->Lock);
1769 named_entry->ref_count-=1;
1770 if(named_entry->ref_count == 0) {
1771 if(named_entry->object) {
1772 /* release the memory object we've been pointing to */
1773 vm_object_deallocate(named_entry->object);
1774 }
1775 if(named_entry->is_sub_map) {
1776 vm_map_deallocate(named_entry->backing.map);
1777 }
1778 kfree((vm_offset_t)port->ip_kobject,
1779 sizeof (struct vm_named_entry));
1780 } else
1781 mutex_unlock(&(named_entry)->Lock);
1782 }
1783
1784
1785 kern_return_t
1786 vm_map_page_query(
1787 vm_map_t target_map,
1788 vm_offset_t offset,
1789 int *disposition,
1790 int *ref_count)
1791 {
1792 vm_map_entry_t map_entry;
1793 vm_object_t object;
1794 vm_page_t m;
1795
1796 restart_page_query:
1797 *disposition = 0;
1798 *ref_count = 0;
1799 vm_map_lock(target_map);
1800 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
1801 vm_map_unlock(target_map);
1802 return KERN_FAILURE;
1803 }
1804 offset -= map_entry->vme_start; /* adjust to offset within entry */
1805 offset += map_entry->offset; /* adjust to target object offset */
1806 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
1807 if(!map_entry->is_sub_map) {
1808 object = map_entry->object.vm_object;
1809 } else {
1810 vm_map_unlock(target_map);
1811 target_map = map_entry->object.sub_map;
1812 goto restart_page_query;
1813 }
1814 } else {
1815 vm_map_unlock(target_map);
1816 return KERN_FAILURE;
1817 }
1818 vm_object_lock(object);
1819 vm_map_unlock(target_map);
1820 while(TRUE) {
1821 m = vm_page_lookup(object, offset);
1822 if (m != VM_PAGE_NULL) {
1823 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
1824 break;
1825 } else {
1826 if(object->shadow) {
1827 offset += object->shadow_offset;
1828 vm_object_unlock(object);
1829 object = object->shadow;
1830 vm_object_lock(object);
1831 continue;
1832 }
1833 vm_object_unlock(object);
1834 return KERN_FAILURE;
1835 }
1836 }
1837
1838 /* The ref_count is not strictly accurate, it measures the number */
1839 /* of entities holding a ref on the object, they may not be mapping */
1840 /* the object or may not be mapping the section holding the */
1841 /* target page but its still a ball park number and though an over- */
1842 /* count, it picks up the copy-on-write cases */
1843
1844 /* We could also get a picture of page sharing from pmap_attributes */
1845 /* but this would under count as only faulted-in mappings would */
1846 /* show up. */
1847
1848 *ref_count = object->ref_count;
1849
1850 if (m->fictitious) {
1851 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
1852 vm_object_unlock(object);
1853 return KERN_SUCCESS;
1854 }
1855
1856 if (m->dirty)
1857 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1858 else if(pmap_is_modified(m->phys_addr))
1859 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1860
1861 if (m->reference)
1862 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1863 else if(pmap_is_referenced(m->phys_addr))
1864 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1865
1866 vm_object_unlock(object);
1867 return KERN_SUCCESS;
1868
1869 }
1870
1871 kern_return_t
1872 set_dp_control_port(
1873 host_priv_t host_priv,
1874 ipc_port_t control_port)
1875 {
1876 if (host_priv == HOST_PRIV_NULL)
1877 return (KERN_INVALID_HOST);
1878 dynamic_pager_control_port = control_port;
1879 return KERN_SUCCESS;
1880 }
1881
1882 kern_return_t
1883 get_dp_control_port(
1884 host_priv_t host_priv,
1885 ipc_port_t *control_port)
1886 {
1887 if (host_priv == HOST_PRIV_NULL)
1888 return (KERN_INVALID_HOST);
1889 *control_port = dynamic_pager_control_port;
1890 return KERN_SUCCESS;
1891
1892 }
1893
1894 void
1895 mach_destroy_upl(
1896 ipc_port_t port)
1897 {
1898 upl_t upl;
1899 #if MACH_ASSERT
1900 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1901 #endif /* MACH_ASSERT */
1902 upl = (upl_t)port->ip_kobject;
1903 mutex_lock(&(upl)->Lock);
1904 upl->ref_count-=1;
1905 if(upl->ref_count == 0) {
1906 mutex_unlock(&(upl)->Lock);
1907 uc_upl_abort(upl, UPL_ABORT_ERROR);
1908 } else
1909 mutex_unlock(&(upl)->Lock);
1910 }
1911
1912 /* Retrieve a upl for an object underlying an address range in a map */
1913
1914 kern_return_t
1915 vm_map_get_upl(
1916 vm_map_t map,
1917 vm_offset_t offset,
1918 vm_size_t *upl_size,
1919 upl_t *upl,
1920 upl_page_info_t **page_list,
1921 int *count,
1922 int *flags,
1923 int force_data_sync)
1924 {
1925 vm_map_entry_t entry;
1926 int caller_flags;
1927
1928 caller_flags = *flags;
1929 if(upl == NULL)
1930 return KERN_INVALID_ARGUMENT;
1931 REDISCOVER_ENTRY:
1932 vm_map_lock(map);
1933 if (vm_map_lookup_entry(map, offset, &entry)) {
1934 if((entry->vme_end - offset) < *upl_size) {
1935 *upl_size = entry->vme_end - offset;
1936 }
1937 /*
1938 * Create an object if necessary.
1939 */
1940 if (entry->object.vm_object == VM_OBJECT_NULL) {
1941 entry->object.vm_object = vm_object_allocate(
1942 (vm_size_t)(entry->vme_end - entry->vme_start));
1943 entry->offset = 0;
1944 }
1945 if (!(caller_flags & UPL_COPYOUT_FROM)) {
1946 if (entry->needs_copy
1947 || entry->object.vm_object->copy) {
1948 vm_map_t local_map;
1949 vm_object_t object;
1950 vm_object_offset_t offset_hi;
1951 vm_object_offset_t offset_lo;
1952 vm_object_offset_t new_offset;
1953 vm_prot_t prot;
1954 boolean_t wired;
1955 vm_behavior_t behavior;
1956 vm_map_version_t version;
1957 vm_map_t pmap_map;
1958
1959 local_map = map;
1960 vm_map_lock_write_to_read(map);
1961 if(vm_map_lookup_locked(&local_map,
1962 offset, VM_PROT_WRITE,
1963 &version, &object,
1964 &new_offset, &prot, &wired,
1965 &behavior, &offset_lo,
1966 &offset_hi, &pmap_map)) {
1967 vm_map_unlock(local_map);
1968 return KERN_FAILURE;
1969 }
1970 if (pmap_map != map) {
1971 vm_map_unlock(pmap_map);
1972 }
1973 vm_object_unlock(object);
1974 vm_map_unlock(local_map);
1975
1976 goto REDISCOVER_ENTRY;
1977 }
1978 }
1979 if (entry->is_sub_map) {
1980 vm_map_unlock(map);
1981 return (vm_map_get_upl(entry->object.sub_map,
1982 entry->offset + (offset - entry->vme_start),
1983 upl_size, upl, page_list, count,
1984 flags, force_data_sync));
1985 }
1986
1987 if (!(caller_flags & UPL_COPYOUT_FROM)) {
1988 if (entry->object.vm_object->shadow) {
1989 int flags;
1990 vm_map_unlock(map);
1991
1992 vm_object_reference(entry->object.vm_object);
1993 if(entry->object.vm_object->copy == NULL) {
1994 flags = MEMORY_OBJECT_DATA_SYNC;
1995 } else {
1996 flags = MEMORY_OBJECT_COPY_SYNC;
1997 }
1998
1999 memory_object_lock_request(
2000 entry->object.vm_object,
2001 (offset - entry->vme_start)
2002 + entry->offset,
2003 (vm_object_size_t)*upl_size, FALSE,
2004 flags,
2005 VM_PROT_NO_CHANGE, NULL, 0);
2006 vm_map_lock(map);
2007 }
2008 }
2009
2010 if (force_data_sync) {
2011 vm_map_unlock(map);
2012 vm_object_reference(entry->object.vm_object);
2013
2014 memory_object_lock_request(
2015 entry->object.vm_object,
2016 (offset - entry->vme_start)
2017 + entry->offset,
2018 (vm_object_size_t)*upl_size, FALSE,
2019 MEMORY_OBJECT_DATA_SYNC,
2020 VM_PROT_NO_CHANGE,
2021 NULL, 0);
2022 vm_map_lock(map);
2023 }
2024
2025 if(!(entry->object.vm_object->private)) {
2026 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2027 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2028 if(entry->object.vm_object->phys_contiguous) {
2029 *flags = UPL_PHYS_CONTIG;
2030 } else {
2031 *flags = 0;
2032 }
2033 } else {
2034 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2035 }
2036 vm_map_unlock(map);
2037 return(vm_fault_list_request(entry->object.vm_object,
2038 ((offset - entry->vme_start) + entry->offset),
2039 *upl_size,
2040 upl,
2041 page_list,
2042 *count,
2043 caller_flags));
2044 }
2045
2046 vm_map_unlock(map);
2047 return(KERN_FAILURE);
2048
2049 }
2050
2051
2052 kern_return_t
2053 vm_object_upl_request(
2054 vm_object_t object,
2055 vm_object_offset_t offset,
2056 vm_size_t size,
2057 ipc_port_t *upl,
2058 upl_page_info_t *page_list,
2059 mach_msg_type_number_t *count,
2060 int cntrl_flags)
2061 {
2062 upl_t upl_object;
2063 ipc_port_t upl_port;
2064 ipc_port_t previous;
2065 upl_page_info_t *pl;
2066 kern_return_t kr;
2067
2068 pl = page_list;
2069 kr = vm_fault_list_request(object, offset, size, &upl_object,
2070 &pl, *count, cntrl_flags);
2071
2072
2073 if(kr != KERN_SUCCESS) {
2074 *upl = MACH_PORT_NULL;
2075 return KERN_FAILURE;
2076 }
2077
2078 upl_port = ipc_port_alloc_kernel();
2079
2080
2081 ip_lock(upl_port);
2082
2083 /* make a sonce right */
2084 upl_port->ip_sorights++;
2085 ip_reference(upl_port);
2086
2087 upl_port->ip_destination = IP_NULL;
2088 upl_port->ip_receiver_name = MACH_PORT_NULL;
2089 upl_port->ip_receiver = ipc_space_kernel;
2090
2091 /* make a send right */
2092 upl_port->ip_mscount++;
2093 upl_port->ip_srights++;
2094 ip_reference(upl_port);
2095
2096 ipc_port_nsrequest(upl_port, 1, upl_port, &previous);
2097 /* nsrequest unlocks user_handle */
2098
2099 /* Create a named object based on a submap of specified size */
2100
2101
2102 ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL);
2103 *upl = upl_port;
2104 return KERN_SUCCESS;
2105 }
2106
2107 kern_return_t
2108 vm_pager_upl_request(
2109 vm_object_t object,
2110 vm_object_offset_t offset,
2111 vm_size_t size,
2112 vm_size_t super_size,
2113 ipc_port_t *upl,
2114 upl_page_info_t *page_list,
2115 mach_msg_type_number_t *count,
2116 int cntrl_flags)
2117 {
2118 upl_t upl_object;
2119 ipc_port_t upl_port;
2120 ipc_port_t previous;
2121 upl_page_info_t *pl;
2122 kern_return_t kr;
2123
2124 pl = page_list;
2125 kr = upl_system_list_request(object, offset, size, super_size,
2126 &upl_object, &pl, *count, cntrl_flags);
2127
2128 if(kr != KERN_SUCCESS) {
2129 *upl = MACH_PORT_NULL;
2130 return KERN_FAILURE;
2131 }
2132
2133
2134 upl_port = ipc_port_alloc_kernel();
2135
2136
2137 ip_lock(upl_port);
2138
2139 /* make a sonce right */
2140 upl_port->ip_sorights++;
2141 ip_reference(upl_port);
2142
2143 upl_port->ip_destination = IP_NULL;
2144 upl_port->ip_receiver_name = MACH_PORT_NULL;
2145 upl_port->ip_receiver = ipc_space_kernel;
2146
2147 /* make a send right */
2148 upl_port->ip_mscount++;
2149 upl_port->ip_srights++;
2150 ip_reference(upl_port);
2151
2152 ipc_port_nsrequest(upl_port, 1, upl_port, &previous);
2153 /* nsrequest unlocks user_handle */
2154
2155 /* Create a named object based on a submap of specified size */
2156
2157
2158 ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL);
2159 *upl = upl_port;
2160 return KERN_SUCCESS;
2161 }
2162
2163 kern_return_t
2164 vm_upl_map(
2165 vm_map_t map,
2166 ipc_port_t upl_port,
2167 vm_offset_t *dst_addr)
2168 {
2169 upl_t upl;
2170 kern_return_t kr;
2171
2172 if (!IP_VALID(upl_port)) {
2173 return KERN_INVALID_ARGUMENT;
2174 } else if (ip_kotype(upl_port) == IKOT_UPL) {
2175 upl_lock(upl);
2176 upl = (upl_t)upl_port->ip_kobject;
2177 kr = uc_upl_map(map, upl, dst_addr);
2178 upl_unlock(upl);
2179 return kr;
2180 } else {
2181 return KERN_FAILURE;
2182 }
2183 }
2184
2185
2186 kern_return_t
2187 vm_upl_unmap(
2188 vm_map_t map,
2189 ipc_port_t upl_port)
2190 {
2191 upl_t upl;
2192 kern_return_t kr;
2193
2194 if (!IP_VALID(upl_port)) {
2195 return KERN_INVALID_ARGUMENT;
2196 } else if (ip_kotype(upl_port) == IKOT_UPL) {
2197 upl_lock(upl);
2198 upl = (upl_t)upl_port->ip_kobject;
2199 kr = uc_upl_un_map(map, upl);
2200 upl_unlock(upl);
2201 return kr;
2202 } else {
2203 return KERN_FAILURE;
2204 }
2205 }
2206
2207 kern_return_t
2208 vm_upl_commit(
2209 upl_t upl,
2210 upl_page_list_ptr_t page_list,
2211 mach_msg_type_number_t count)
2212 {
2213 kern_return_t kr;
2214 upl_lock(upl);
2215 if(count) {
2216 kr = uc_upl_commit(upl, (upl_page_info_t *)page_list);
2217 } else {
2218 kr = uc_upl_commit(upl, (upl_page_info_t *) NULL);
2219 }
2220 upl_unlock(upl);
2221 return kr;
2222 }
2223
2224 kern_return_t
2225 vm_upl_commit_range(
2226 upl_t upl,
2227 vm_offset_t offset,
2228 vm_size_t size,
2229 upl_page_list_ptr_t page_list,
2230 int flags,
2231 mach_msg_type_number_t count)
2232 {
2233 kern_return_t kr;
2234 upl_lock(upl);
2235 if(count) {
2236 kr = uc_upl_commit_range(upl, offset, size, flags,
2237 (upl_page_info_t *)page_list);
2238 } else {
2239 kr = uc_upl_commit_range(upl, offset, size, flags,
2240 (upl_page_info_t *) NULL);
2241 }
2242 upl_unlock(upl);
2243 return kr;
2244 }
2245
2246 kern_return_t
2247 vm_upl_abort_range(
2248 upl_t upl,
2249 vm_offset_t offset,
2250 vm_size_t size,
2251 int abort_flags)
2252 {
2253 kern_return_t kr;
2254 upl_lock(upl);
2255 kr = uc_upl_abort_range(upl, offset, size, abort_flags);
2256 upl_unlock(upl);
2257 return kr;
2258 }
2259
2260 kern_return_t
2261 vm_upl_abort(
2262 upl_t upl,
2263 int abort_type)
2264 {
2265 kern_return_t kr;
2266 upl_lock(upl);
2267 kr = uc_upl_abort(upl, abort_type);
2268 upl_unlock(upl);
2269 return kr;
2270 }
2271
2272 /* ******* Temporary Internal calls to UPL for BSD ***** */
2273 kern_return_t
2274 kernel_upl_map(
2275 vm_map_t map,
2276 upl_t upl,
2277 vm_offset_t *dst_addr)
2278 {
2279 kern_return_t kr;
2280
2281 upl_lock(upl);
2282 kr = uc_upl_map(map, upl, dst_addr);
2283 if(kr == KERN_SUCCESS) {
2284 upl->ref_count += 1;
2285 }
2286 upl_unlock(upl);
2287 return kr;
2288 }
2289
2290
2291 kern_return_t
2292 kernel_upl_unmap(
2293 vm_map_t map,
2294 upl_t upl)
2295 {
2296 kern_return_t kr;
2297
2298 upl_lock(upl);
2299 kr = uc_upl_un_map(map, upl);
2300 if(kr == KERN_SUCCESS) {
2301 if(upl->ref_count == 1) {
2302 upl_dealloc(upl);
2303 } else {
2304 upl->ref_count -= 1;
2305 upl_unlock(upl);
2306 }
2307 } else {
2308 upl_unlock(upl);
2309 }
2310 return kr;
2311 }
2312
2313 kern_return_t
2314 kernel_upl_commit(
2315 upl_t upl,
2316 upl_page_list_ptr_t page_list,
2317 mach_msg_type_number_t count)
2318 {
2319 kern_return_t kr;
2320 upl_lock(upl);
2321 upl->ref_count += 1;
2322 if(count) {
2323 kr = uc_upl_commit(upl, (upl_page_info_t *)page_list);
2324 } else {
2325 kr = uc_upl_commit(upl, (upl_page_info_t *) NULL);
2326 }
2327 if(upl->ref_count == 1) {
2328 upl_dealloc(upl);
2329 } else {
2330 upl->ref_count -= 1;
2331 upl_unlock(upl);
2332 }
2333 return kr;
2334 }
2335
2336 kern_return_t
2337 kernel_upl_commit_range(
2338 upl_t upl,
2339 vm_offset_t offset,
2340 vm_size_t size,
2341 int flags,
2342 upl_page_list_ptr_t page_list,
2343 mach_msg_type_number_t count)
2344 {
2345 kern_return_t kr;
2346 upl_lock(upl);
2347 upl->ref_count += 1;
2348 if(count) {
2349 kr = uc_upl_commit_range(upl, offset, size, flags,
2350 (upl_page_info_t *)page_list);
2351 } else {
2352 kr = uc_upl_commit_range(upl, offset, size, flags,
2353 (upl_page_info_t *) NULL);
2354 }
2355 if(upl->ref_count == 1) {
2356 upl_dealloc(upl);
2357 } else {
2358 upl->ref_count -= 1;
2359 upl_unlock(upl);
2360 }
2361 return kr;
2362 }
2363
2364 kern_return_t
2365 kernel_upl_abort_range(
2366 upl_t upl,
2367 vm_offset_t offset,
2368 vm_size_t size,
2369 int abort_flags)
2370 {
2371 kern_return_t kr;
2372 upl_lock(upl);
2373 upl->ref_count += 1;
2374 kr = uc_upl_abort_range(upl, offset, size, abort_flags);
2375 if(upl->ref_count == 1) {
2376 upl_dealloc(upl);
2377 } else {
2378 upl->ref_count -= 1;
2379 upl_unlock(upl);
2380 }
2381 return kr;
2382 }
2383
2384 kern_return_t
2385 kernel_upl_abort(
2386 upl_t upl,
2387 int abort_type)
2388 {
2389 kern_return_t kr;
2390 upl_lock(upl);
2391 upl->ref_count += 1;
2392 kr = uc_upl_abort(upl, abort_type);
2393 if(upl->ref_count == 1) {
2394 upl_dealloc(upl);
2395 } else {
2396 upl->ref_count -= 1;
2397 upl_unlock(upl);
2398 }
2399 return kr;
2400 }
2401
2402
2403
2404 /* code snippet from vm_map */
2405 kern_return_t
2406 vm_object_create_nomap(ipc_port_t port, vm_object_size_t size)
2407 {
2408 vm_object_t object_ptr;
2409 return memory_object_create_named(port, size, &object_ptr);
2410 }
2411
2412
2413 /*
2414 * Temporary interface to overcome old style ipc artifacts, and allow
2415 * ubc to call this routine directly. Will disappear with new RPC
2416 * component architecture.
2417 * NOTE: call to memory_object_destroy removes the vm_object's association
2418 * with its abstract memory object and hence the named flag is set to false.
2419 */
2420 kern_return_t
2421 memory_object_destroy_named(
2422 vm_object_t object,
2423 kern_return_t reason)
2424 {
2425 vm_object_lock(object);
2426 if(object->named == FALSE) {
2427 panic("memory_object_destroy_named called by party which doesn't hold right");
2428 }
2429 object->ref_count++;
2430 vm_object_res_reference(object);
2431 vm_object_unlock(object);
2432 return (memory_object_destroy(object, reason));
2433 }
2434
2435 /*
2436 * Temporary interface to overcome old style ipc artifacts, and allow
2437 * ubc to call this routine directly. Will disappear with new RPC
2438 * component architecture.
2439 * Note: No change is made in the named flag.
2440 */
2441 kern_return_t
2442 memory_object_lock_request_named(
2443 vm_object_t object,
2444 vm_object_offset_t offset,
2445 vm_object_size_t size,
2446 memory_object_return_t should_return,
2447 boolean_t should_flush,
2448 vm_prot_t prot,
2449 ipc_port_t reply_to)
2450 {
2451 vm_object_lock(object);
2452 if(object->named == FALSE) {
2453 panic("memory_object_lock_request_named called by party which doesn't hold right");
2454 }
2455 object->ref_count++;
2456 vm_object_res_reference(object);
2457 vm_object_unlock(object);
2458 return (memory_object_lock_request(object,
2459 offset, size, should_return, should_flush, prot,
2460 reply_to, 0));
2461 }
2462
2463 kern_return_t
2464 memory_object_change_attributes_named(
2465 vm_object_t object,
2466 memory_object_flavor_t flavor,
2467 memory_object_info_t attributes,
2468 mach_msg_type_number_t count,
2469 ipc_port_t reply_to,
2470 mach_msg_type_name_t reply_to_type)
2471 {
2472 vm_object_lock(object);
2473 if(object->named == FALSE) {
2474 panic("memory_object_lock_request_named called by party which doesn't hold right");
2475 }
2476 object->ref_count++;
2477 vm_object_res_reference(object);
2478 vm_object_unlock(object);
2479 return (memory_object_change_attributes(object,
2480 flavor, attributes, count, reply_to, reply_to_type));
2481 }
2482
2483 kern_return_t
2484 vm_get_shared_region(
2485 task_t task,
2486 shared_region_mapping_t *shared_region)
2487 {
2488 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2489 return KERN_SUCCESS;
2490 }
2491
2492 kern_return_t
2493 vm_set_shared_region(
2494 task_t task,
2495 shared_region_mapping_t shared_region)
2496 {
2497 task->system_shared_region = (vm_offset_t) shared_region;
2498 return KERN_SUCCESS;
2499 }
2500
2501 kern_return_t
2502 shared_region_mapping_info(
2503 shared_region_mapping_t shared_region,
2504 ipc_port_t *text_region,
2505 vm_size_t *text_size,
2506 ipc_port_t *data_region,
2507 vm_size_t *data_size,
2508 vm_offset_t *region_mappings,
2509 vm_offset_t *client_base,
2510 vm_offset_t *alt_base,
2511 vm_offset_t *alt_next,
2512 int *flags,
2513 shared_region_mapping_t *next)
2514 {
2515 shared_region_mapping_lock(shared_region);
2516
2517 *text_region = shared_region->text_region;
2518 *text_size = shared_region->text_size;
2519 *data_region = shared_region->data_region;
2520 *data_size = shared_region->data_size;
2521 *region_mappings = shared_region->region_mappings;
2522 *client_base = shared_region->client_base;
2523 *alt_base = shared_region->alternate_base;
2524 *alt_next = shared_region->alternate_next;
2525 *flags = shared_region->flags;
2526 *next = shared_region->next;
2527
2528 shared_region_mapping_unlock(shared_region);
2529 }
2530
2531 kern_return_t
2532 shared_region_object_chain_attach(
2533 shared_region_mapping_t target_region,
2534 shared_region_mapping_t object_chain_region)
2535 {
2536 shared_region_object_chain_t object_ele;
2537
2538 if(target_region->object_chain)
2539 return KERN_FAILURE;
2540 object_ele = (shared_region_object_chain_t)
2541 kalloc(sizeof (struct shared_region_object_chain));
2542 shared_region_mapping_lock(object_chain_region);
2543 target_region->object_chain = object_ele;
2544 object_ele->object_chain_region = object_chain_region;
2545 object_ele->next = object_chain_region->object_chain;
2546 object_ele->depth = object_chain_region->depth;
2547 object_chain_region->depth++;
2548 target_region->alternate_next = object_chain_region->alternate_next;
2549 shared_region_mapping_unlock(object_chain_region);
2550 return KERN_SUCCESS;
2551 }
2552
2553 kern_return_t
2554 shared_region_mapping_create(
2555 ipc_port_t text_region,
2556 vm_size_t text_size,
2557 ipc_port_t data_region,
2558 vm_size_t data_size,
2559 vm_offset_t region_mappings,
2560 vm_offset_t client_base,
2561 shared_region_mapping_t *shared_region,
2562 vm_offset_t alt_base,
2563 vm_offset_t alt_next)
2564 {
2565 *shared_region = (shared_region_mapping_t)
2566 kalloc(sizeof (struct shared_region_mapping));
2567 if(*shared_region == NULL)
2568 return KERN_FAILURE;
2569 shared_region_mapping_lock_init((*shared_region));
2570 (*shared_region)->text_region = text_region;
2571 (*shared_region)->text_size = text_size;
2572 (*shared_region)->data_region = data_region;
2573 (*shared_region)->data_size = data_size;
2574 (*shared_region)->region_mappings = region_mappings;
2575 (*shared_region)->client_base = client_base;
2576 (*shared_region)->ref_count = 1;
2577 (*shared_region)->next = NULL;
2578 (*shared_region)->object_chain = NULL;
2579 (*shared_region)->self = *shared_region;
2580 (*shared_region)->flags = 0;
2581 (*shared_region)->depth = 0;
2582 (*shared_region)->alternate_base = alt_base;
2583 (*shared_region)->alternate_next = alt_next;
2584 return KERN_SUCCESS;
2585 }
2586
2587 kern_return_t
2588 shared_region_mapping_set_alt_next(
2589 shared_region_mapping_t shared_region,
2590 vm_offset_t alt_next)
2591 {
2592 shared_region->alternate_next = alt_next;
2593 return KERN_SUCCESS;
2594 }
2595
2596 kern_return_t
2597 shared_region_mapping_ref(
2598 shared_region_mapping_t shared_region)
2599 {
2600 if(shared_region == NULL)
2601 return KERN_SUCCESS;
2602 shared_region_mapping_lock(shared_region);
2603 shared_region->ref_count++;
2604 shared_region_mapping_unlock(shared_region);
2605 return KERN_SUCCESS;
2606 }
2607
2608 kern_return_t
2609 shared_region_mapping_dealloc(
2610 shared_region_mapping_t shared_region)
2611 {
2612 struct shared_region_task_mappings sm_info;
2613 shared_region_mapping_t next;
2614
2615 if(shared_region == NULL)
2616 return KERN_SUCCESS;
2617 shared_region_mapping_lock(shared_region);
2618
2619 if((--shared_region->ref_count) == 0) {
2620
2621 sm_info.text_region = shared_region->text_region;
2622 sm_info.text_size = shared_region->text_size;
2623 sm_info.data_region = shared_region->data_region;
2624 sm_info.data_size = shared_region->data_size;
2625 sm_info.region_mappings = shared_region->region_mappings;
2626 sm_info.client_base = shared_region->client_base;
2627 sm_info.alternate_base = shared_region->alternate_base;
2628 sm_info.alternate_next = shared_region->alternate_next;
2629 sm_info.flags = shared_region->flags;
2630 sm_info.self = shared_region;
2631
2632 lsf_remove_regions_mappings(shared_region, &sm_info);
2633 pmap_remove(((vm_named_entry_t)
2634 (shared_region->text_region->ip_kobject))
2635 ->backing.map->pmap,
2636 sm_info.client_base,
2637 sm_info.client_base + sm_info.text_size);
2638 ipc_port_release_send(shared_region->text_region);
2639 ipc_port_release_send(shared_region->data_region);
2640 if(shared_region->object_chain) {
2641 shared_region_mapping_dealloc(
2642 shared_region->object_chain->object_chain_region);
2643 kfree((vm_offset_t)shared_region->object_chain,
2644 sizeof (struct shared_region_object_chain));
2645 }
2646 kfree((vm_offset_t)shared_region,
2647 sizeof (struct shared_region_mapping));
2648 return KERN_SUCCESS;
2649 }
2650 shared_region_mapping_unlock(shared_region);
2651 return KERN_SUCCESS;
2652 }
2653
2654 vm_offset_t
2655 vm_map_get_phys_page(
2656 vm_map_t map,
2657 vm_offset_t offset)
2658 {
2659 vm_map_entry_t entry;
2660 int ops;
2661 int flags;
2662 vm_offset_t phys_addr = 0;
2663 vm_object_t object;
2664
2665 vm_map_lock(map);
2666 while (vm_map_lookup_entry(map, offset, &entry)) {
2667
2668 if (entry->object.vm_object == VM_OBJECT_NULL) {
2669 vm_map_unlock(map);
2670 return (vm_offset_t) 0;
2671 }
2672 if (entry->is_sub_map) {
2673 vm_map_t old_map;
2674 vm_map_lock(entry->object.sub_map);
2675 old_map = map;
2676 map = entry->object.sub_map;
2677 offset = entry->offset + (offset - entry->vme_start);
2678 vm_map_unlock(old_map);
2679 continue;
2680 }
2681 offset = entry->offset + (offset - entry->vme_start);
2682 object = entry->object.vm_object;
2683 vm_object_lock(object);
2684 while (TRUE) {
2685 vm_page_t dst_page = vm_page_lookup(object,offset);
2686 if(dst_page == VM_PAGE_NULL) {
2687 if(object->shadow) {
2688 vm_object_t old_object;
2689 vm_object_lock(object->shadow);
2690 old_object = object;
2691 offset = offset + object->shadow_offset;
2692 object = object->shadow;
2693 vm_object_unlock(old_object);
2694 } else {
2695 vm_object_unlock(object);
2696 break;
2697 }
2698 } else {
2699 phys_addr = dst_page->phys_addr;
2700 vm_object_unlock(object);
2701 break;
2702 }
2703 }
2704 break;
2705
2706 }
2707
2708 vm_map_unlock(map);
2709 return phys_addr;
2710 }
2711 #endif /* VM_CPM */