]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_user.c
xnu-201.14.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
CommitLineData
1c79356b 1/*
0b4e3aa0 2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_user.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * User-exported virtual memory functions.
57 */
1c79356b
A
58
59#include <vm_cpm.h>
60#include <mach/boolean.h>
61#include <mach/kern_return.h>
62#include <mach/mach_types.h> /* to get vm_address_t */
63#include <mach/memory_object.h>
64#include <mach/std_types.h> /* to get pointer_t */
65#include <mach/vm_attributes.h>
66#include <mach/vm_param.h>
67#include <mach/vm_statistics.h>
68#include <mach/vm_map_server.h>
69#include <mach/mach_syscalls.h>
70#include <mach/shared_memory_server.h>
71
72#include <kern/host.h>
73#include <kern/task.h>
74#include <kern/misc_protos.h>
75#include <vm/vm_map.h>
76#include <vm/vm_object.h>
77#include <vm/vm_page.h>
78#include <vm/memory_object.h>
79#include <vm/vm_pageout.h>
80
81
82
83vm_size_t upl_offset_to_pagelist = 0;
84
85#if VM_CPM
86#include <vm/cpm.h>
87#endif /* VM_CPM */
88
89ipc_port_t dynamic_pager_control_port=NULL;
90
91/*
92 * vm_allocate allocates "zero fill" memory in the specfied
93 * map.
94 */
95kern_return_t
96vm_allocate(
97 register vm_map_t map,
98 register vm_offset_t *addr,
99 register vm_size_t size,
100 int flags)
101{
102 kern_return_t result;
103 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
104
105 if (map == VM_MAP_NULL)
106 return(KERN_INVALID_ARGUMENT);
107 if (size == 0) {
108 *addr = 0;
109 return(KERN_SUCCESS);
110 }
111
112 if (anywhere)
113 *addr = vm_map_min(map);
114 else
115 *addr = trunc_page(*addr);
116 size = round_page(size);
117 if (size == 0) {
118 return(KERN_INVALID_ARGUMENT);
119 }
120
121 result = vm_map_enter(
122 map,
123 addr,
124 size,
125 (vm_offset_t)0,
126 flags,
127 VM_OBJECT_NULL,
128 (vm_object_offset_t)0,
129 FALSE,
130 VM_PROT_DEFAULT,
131 VM_PROT_ALL,
132 VM_INHERIT_DEFAULT);
133
134 return(result);
135}
136
137/*
138 * vm_deallocate deallocates the specified range of addresses in the
139 * specified address map.
140 */
141kern_return_t
142vm_deallocate(
143 register vm_map_t map,
144 vm_offset_t start,
145 vm_size_t size)
146{
147 if (map == VM_MAP_NULL)
148 return(KERN_INVALID_ARGUMENT);
149
150 if (size == (vm_offset_t) 0)
151 return(KERN_SUCCESS);
152
153 return(vm_map_remove(map, trunc_page(start),
154 round_page(start+size), VM_MAP_NO_FLAGS));
155}
156
157/*
158 * vm_inherit sets the inheritance of the specified range in the
159 * specified map.
160 */
161kern_return_t
162vm_inherit(
163 register vm_map_t map,
164 vm_offset_t start,
165 vm_size_t size,
166 vm_inherit_t new_inheritance)
167{
168 if (map == VM_MAP_NULL)
169 return(KERN_INVALID_ARGUMENT);
170
171 if (new_inheritance > VM_INHERIT_LAST_VALID)
172 return(KERN_INVALID_ARGUMENT);
173
174 return(vm_map_inherit(map,
175 trunc_page(start),
176 round_page(start+size),
177 new_inheritance));
178}
179
180/*
181 * vm_protect sets the protection of the specified range in the
182 * specified map.
183 */
184
185kern_return_t
186vm_protect(
187 register vm_map_t map,
188 vm_offset_t start,
189 vm_size_t size,
190 boolean_t set_maximum,
191 vm_prot_t new_protection)
192{
193 if ((map == VM_MAP_NULL) ||
194 (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
195 return(KERN_INVALID_ARGUMENT);
196
197 return(vm_map_protect(map,
198 trunc_page(start),
199 round_page(start+size),
200 new_protection,
201 set_maximum));
202}
203
204/*
205 * Handle machine-specific attributes for a mapping, such
206 * as cachability, migrability, etc.
207 */
208kern_return_t
209vm_machine_attribute(
210 vm_map_t map,
211 vm_address_t address,
212 vm_size_t size,
213 vm_machine_attribute_t attribute,
214 vm_machine_attribute_val_t* value) /* IN/OUT */
215{
216 if (map == VM_MAP_NULL)
217 return(KERN_INVALID_ARGUMENT);
218
219 return vm_map_machine_attribute(map, address, size, attribute, value);
220}
221
222kern_return_t
223vm_read(
224 vm_map_t map,
225 vm_address_t address,
226 vm_size_t size,
227 pointer_t *data,
228 mach_msg_type_number_t *data_size)
229{
230 kern_return_t error;
231 vm_map_copy_t ipc_address;
232
233 if (map == VM_MAP_NULL)
234 return(KERN_INVALID_ARGUMENT);
235
236 if ((error = vm_map_copyin(map,
237 address,
238 size,
239 FALSE, /* src_destroy */
240 &ipc_address)) == KERN_SUCCESS) {
241 *data = (pointer_t) ipc_address;
242 *data_size = size;
243 }
244 return(error);
245}
246
247kern_return_t
248vm_read_list(
249 vm_map_t map,
250 vm_read_entry_t data_list,
251 mach_msg_type_number_t count)
252{
253 mach_msg_type_number_t i;
254 kern_return_t error;
255 vm_map_copy_t ipc_address;
256
257 if (map == VM_MAP_NULL)
258 return(KERN_INVALID_ARGUMENT);
259
260 for(i=0; i<count; i++) {
261 error = vm_map_copyin(map,
262 data_list[i].address,
263 data_list[i].size,
264 FALSE, /* src_destroy */
265 &ipc_address);
266 if(error != KERN_SUCCESS) {
267 data_list[i].address = (vm_address_t)0;
268 data_list[i].size = (vm_size_t)0;
269 break;
270 }
271 if(data_list[i].size != 0) {
272 error = vm_map_copyout(current_task()->map,
273 &(data_list[i].address),
274 (vm_map_copy_t) ipc_address);
275 if(error != KERN_SUCCESS) {
276 data_list[i].address = (vm_address_t)0;
277 data_list[i].size = (vm_size_t)0;
278 break;
279 }
280 }
281 }
282 return(error);
283}
284
285/*
286 * This routine reads from the specified map and overwrites part of the current
287 * activation's map. In making an assumption that the current thread is local,
288 * it is no longer cluster-safe without a fully supportive local proxy thread/
289 * task (but we don't support cluster's anymore so this is moot).
290 */
291
292#define VM_OVERWRITE_SMALL 512
293
294kern_return_t
295vm_read_overwrite(
296 vm_map_t map,
297 vm_address_t address,
298 vm_size_t size,
299 vm_address_t data,
300 vm_size_t *data_size)
301{
302 struct {
303 long align;
304 char buf[VM_OVERWRITE_SMALL];
305 } inbuf;
306 vm_map_t oldmap;
307 kern_return_t error = KERN_SUCCESS;
308 vm_map_copy_t copy;
309
310 if (map == VM_MAP_NULL)
311 return(KERN_INVALID_ARGUMENT);
312
313 if (size <= VM_OVERWRITE_SMALL) {
314 if(vm_map_read_user(map, (vm_offset_t)address,
315 (vm_offset_t)&inbuf, size)) {
316 error = KERN_INVALID_ADDRESS;
317 } else {
318 if(vm_map_write_user(current_map(),
319 (vm_offset_t)&inbuf, (vm_offset_t)data, size))
320 error = KERN_INVALID_ADDRESS;
321 }
322 }
323 else {
324 if ((error = vm_map_copyin(map,
325 address,
326 size,
327 FALSE, /* src_destroy */
328 &copy)) == KERN_SUCCESS) {
329 if ((error = vm_map_copy_overwrite(
330 current_act()->map,
331 data,
332 copy,
333 FALSE)) == KERN_SUCCESS) {
334 }
335 else {
336 vm_map_copy_discard(copy);
337 }
338 }
339 }
340 *data_size = size;
341 return(error);
342}
343
344
345
346
347/*ARGSUSED*/
348kern_return_t
349vm_write(
350 vm_map_t map,
351 vm_address_t address,
352 vm_offset_t data,
353 mach_msg_type_number_t size)
354{
355 if (map == VM_MAP_NULL)
356 return KERN_INVALID_ARGUMENT;
357
358 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
359 FALSE /* interruptible XXX */);
360}
361
362kern_return_t
363vm_copy(
364 vm_map_t map,
365 vm_address_t source_address,
366 vm_size_t size,
367 vm_address_t dest_address)
368{
369 vm_map_copy_t copy;
370 kern_return_t kr;
371
372 if (map == VM_MAP_NULL)
373 return KERN_INVALID_ARGUMENT;
374
375 kr = vm_map_copyin(map, source_address, size,
376 FALSE, &copy);
377 if (kr != KERN_SUCCESS)
378 return kr;
379
380 kr = vm_map_copy_overwrite(map, dest_address, copy,
381 FALSE /* interruptible XXX */);
382 if (kr != KERN_SUCCESS) {
383 vm_map_copy_discard(copy);
384 return kr;
385 }
386
387 return KERN_SUCCESS;
388}
389
390/*
391 * Routine: vm_map
392 */
393kern_return_t
394vm_map_64(
395 vm_map_t target_map,
396 vm_offset_t *address,
397 vm_size_t initial_size,
398 vm_offset_t mask,
399 int flags,
400 ipc_port_t port,
401 vm_object_offset_t offset,
402 boolean_t copy,
403 vm_prot_t cur_protection,
404 vm_prot_t max_protection,
405 vm_inherit_t inheritance)
406{
407 register
408 vm_object_t object;
409 vm_prot_t prot;
410 vm_object_size_t size = (vm_object_size_t)initial_size;
411 kern_return_t result;
412
413 /*
414 * Check arguments for validity
415 */
416 if ((target_map == VM_MAP_NULL) ||
417 (cur_protection & ~VM_PROT_ALL) ||
418 (max_protection & ~VM_PROT_ALL) ||
419 (inheritance > VM_INHERIT_LAST_VALID) ||
420 size == 0)
421 return(KERN_INVALID_ARGUMENT);
422
423 /*
424 * Find the vm object (if any) corresponding to this port.
425 */
426 if (!IP_VALID(port)) {
427 object = VM_OBJECT_NULL;
428 offset = 0;
429 copy = FALSE;
430 } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
431 vm_named_entry_t named_entry;
432
433 named_entry = (vm_named_entry_t)port->ip_kobject;
434 /* a few checks to make sure user is obeying rules */
435 if(size == 0) {
436 if(offset >= named_entry->size)
437 return(KERN_INVALID_RIGHT);
438 size = named_entry->size - offset;
439 }
440 if((named_entry->protection & max_protection) != max_protection)
441 return(KERN_INVALID_RIGHT);
442 if((named_entry->protection & cur_protection) != cur_protection)
443 return(KERN_INVALID_RIGHT);
444 if(named_entry->size < (offset + size))
445 return(KERN_INVALID_ARGUMENT);
446
447 /* the callers parameter offset is defined to be the */
448 /* offset from beginning of named entry offset in object */
449 offset = offset + named_entry->offset;
450
451 named_entry_lock(named_entry);
452 if(named_entry->is_sub_map) {
453 vm_map_entry_t map_entry;
454
455 named_entry_unlock(named_entry);
456 *address = trunc_page(*address);
457 size = round_page(size);
458 vm_object_reference(vm_submap_object);
459 if ((result = vm_map_enter(target_map,
460 address, size, mask, flags,
461 vm_submap_object, 0,
462 FALSE,
463 cur_protection, max_protection, inheritance
464 )) != KERN_SUCCESS) {
465 vm_object_deallocate(vm_submap_object);
466 } else {
467 char alias;
468
469 VM_GET_FLAGS_ALIAS(flags, alias);
470 if ((alias == VM_MEMORY_SHARED_PMAP) &&
471 !copy) {
472 vm_map_submap(target_map, *address,
473 (*address) + size,
474 named_entry->backing.map,
475 (vm_offset_t)offset, TRUE);
476 } else {
477 vm_map_submap(target_map, *address,
478 (*address) + size,
479 named_entry->backing.map,
480 (vm_offset_t)offset, FALSE);
481 }
482 if(copy) {
483 if(vm_map_lookup_entry(
484 target_map, *address, &map_entry)) {
485 map_entry->needs_copy = TRUE;
486 }
487 }
488 }
489 return(result);
490
491 } else if(named_entry->object) {
492 /* This is the case where we are going to map */
493 /* an already mapped object. If the object is */
494 /* not ready it is internal. An external */
495 /* object cannot be mapped until it is ready */
496 /* we can therefore avoid the ready check */
497 /* in this case. */
498 named_entry_unlock(named_entry);
499 vm_object_reference(named_entry->object);
500 object = named_entry->object;
501 } else {
502 object = vm_object_enter(named_entry->backing.pager,
503 named_entry->size,
504 named_entry->internal,
505 FALSE,
506 FALSE);
507 if (object == VM_OBJECT_NULL) {
508 named_entry_unlock(named_entry);
509 return(KERN_INVALID_OBJECT);
510 }
0b4e3aa0 511 object->true_share = TRUE;
1c79356b
A
512 named_entry->object = object;
513 named_entry_unlock(named_entry);
514 /* create an extra reference for the named entry */
515 vm_object_reference(named_entry->object);
516 /* wait for object (if any) to be ready */
517 if (object != VM_OBJECT_NULL) {
518 vm_object_lock(object);
519 while (!object->pager_ready) {
520 vm_object_wait(object,
521 VM_OBJECT_EVENT_PAGER_READY,
522 THREAD_UNINT);
523 vm_object_lock(object);
524 }
525 vm_object_unlock(object);
526 }
527 }
0b4e3aa0
A
528 } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
529 /*
530 * JMM - This is temporary until we unify named entries
531 * and raw memory objects.
532 *
533 * Detected fake ip_kotype for a memory object. In
534 * this case, the port isn't really a port at all, but
535 * instead is just a raw memory object.
536 */
537
538 if ((object = vm_object_enter((memory_object_t)port,
539 size, FALSE, FALSE, FALSE))
1c79356b
A
540 == VM_OBJECT_NULL)
541 return(KERN_INVALID_OBJECT);
542
543 /* wait for object (if any) to be ready */
544 if (object != VM_OBJECT_NULL) {
545 vm_object_lock(object);
546 while (!object->pager_ready) {
547 vm_object_wait(object,
548 VM_OBJECT_EVENT_PAGER_READY,
549 THREAD_UNINT);
550 vm_object_lock(object);
551 }
552 vm_object_unlock(object);
553 }
0b4e3aa0
A
554 } else {
555 return (KERN_INVALID_OBJECT);
1c79356b
A
556 }
557
558 *address = trunc_page(*address);
559 size = round_page(size);
560
561 /*
562 * Perform the copy if requested
563 */
564
565 if (copy) {
566 vm_object_t new_object;
567 vm_object_offset_t new_offset;
568
569 result = vm_object_copy_strategically(object, offset, size,
570 &new_object, &new_offset,
571 &copy);
572
573
574 if (result == KERN_MEMORY_RESTART_COPY) {
575 boolean_t success;
576 boolean_t src_needs_copy;
577
578 /*
579 * XXX
580 * We currently ignore src_needs_copy.
581 * This really is the issue of how to make
582 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
583 * non-kernel users to use. Solution forthcoming.
584 * In the meantime, since we don't allow non-kernel
585 * memory managers to specify symmetric copy,
586 * we won't run into problems here.
587 */
588 new_object = object;
589 new_offset = offset;
590 success = vm_object_copy_quickly(&new_object,
591 new_offset, size,
592 &src_needs_copy,
593 &copy);
594 assert(success);
595 result = KERN_SUCCESS;
596 }
597 /*
598 * Throw away the reference to the
599 * original object, as it won't be mapped.
600 */
601
602 vm_object_deallocate(object);
603
604 if (result != KERN_SUCCESS)
605 return (result);
606
607 object = new_object;
608 offset = new_offset;
609 }
610
611 if ((result = vm_map_enter(target_map,
612 address, size, mask, flags,
613 object, offset,
614 copy,
615 cur_protection, max_protection, inheritance
616 )) != KERN_SUCCESS)
617 vm_object_deallocate(object);
618 return(result);
619}
620
621/* temporary, until world build */
622vm_map(
623 vm_map_t target_map,
624 vm_offset_t *address,
625 vm_size_t size,
626 vm_offset_t mask,
627 int flags,
628 ipc_port_t port,
629 vm_offset_t offset,
630 boolean_t copy,
631 vm_prot_t cur_protection,
632 vm_prot_t max_protection,
633 vm_inherit_t inheritance)
634{
635 vm_map_64(target_map, address, size, mask, flags,
636 port, (vm_object_offset_t)offset, copy,
637 cur_protection, max_protection, inheritance);
638}
639
640
641/*
642 * NOTE: this routine (and this file) will no longer require mach_host_server.h
643 * when vm_wire is changed to use ledgers.
644 */
645#include <mach/mach_host_server.h>
646/*
647 * Specify that the range of the virtual address space
648 * of the target task must not cause page faults for
649 * the indicated accesses.
650 *
651 * [ To unwire the pages, specify VM_PROT_NONE. ]
652 */
653kern_return_t
654vm_wire(
655 host_priv_t host_priv,
656 register vm_map_t map,
657 vm_offset_t start,
658 vm_size_t size,
659 vm_prot_t access)
660{
661 kern_return_t rc;
662
663 if (host_priv == HOST_PRIV_NULL)
664 return KERN_INVALID_HOST;
665
666 assert(host_priv == &realhost);
667
668 if (map == VM_MAP_NULL)
669 return KERN_INVALID_TASK;
670
671 if (access & ~VM_PROT_ALL)
672 return KERN_INVALID_ARGUMENT;
673
674 if (access != VM_PROT_NONE) {
675 rc = vm_map_wire(map, trunc_page(start),
676 round_page(start+size), access, TRUE);
677 } else {
678 rc = vm_map_unwire(map, trunc_page(start),
679 round_page(start+size), TRUE);
680 }
681 return rc;
682}
683
684/*
685 * vm_msync
686 *
687 * Synchronises the memory range specified with its backing store
688 * image by either flushing or cleaning the contents to the appropriate
689 * memory manager engaging in a memory object synchronize dialog with
690 * the manager. The client doesn't return until the manager issues
691 * m_o_s_completed message. MIG Magically converts user task parameter
692 * to the task's address map.
693 *
694 * interpretation of sync_flags
695 * VM_SYNC_INVALIDATE - discard pages, only return precious
696 * pages to manager.
697 *
698 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
699 * - discard pages, write dirty or precious
700 * pages back to memory manager.
701 *
702 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
703 * - write dirty or precious pages back to
704 * the memory manager.
705 *
706 * NOTE
707 * The memory object attributes have not yet been implemented, this
708 * function will have to deal with the invalidate attribute
709 *
710 * RETURNS
711 * KERN_INVALID_TASK Bad task parameter
712 * KERN_INVALID_ARGUMENT both sync and async were specified.
713 * KERN_SUCCESS The usual.
714 */
715
716kern_return_t
717vm_msync(
718 vm_map_t map,
719 vm_address_t address,
720 vm_size_t size,
721 vm_sync_t sync_flags)
722{
723 msync_req_t msr;
724 msync_req_t new_msr;
725 queue_chain_t req_q; /* queue of requests for this msync */
726 vm_map_entry_t entry;
727 vm_size_t amount_left;
728 vm_object_offset_t offset;
729 boolean_t do_sync_req;
730 boolean_t modifiable;
731
732
733 if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
734 (sync_flags & VM_SYNC_SYNCHRONOUS))
735 return(KERN_INVALID_ARGUMENT);
736
737 /*
738 * align address and size on page boundaries
739 */
740 size = round_page(address + size) - trunc_page(address);
741 address = trunc_page(address);
742
743 if (map == VM_MAP_NULL)
744 return(KERN_INVALID_TASK);
745
746 if (size == 0)
747 return(KERN_SUCCESS);
748
749 queue_init(&req_q);
750 amount_left = size;
751
752 while (amount_left > 0) {
753 vm_size_t flush_size;
754 vm_object_t object;
755
756 vm_map_lock(map);
757 if (!vm_map_lookup_entry(map, address, &entry)) {
758 vm_size_t skip;
759
760 /*
761 * hole in the address map.
762 */
763
764 /*
765 * Check for empty map.
766 */
767 if (entry == vm_map_to_entry(map) &&
768 entry->vme_next == entry) {
769 vm_map_unlock(map);
770 break;
771 }
772 /*
773 * Check that we don't wrap and that
774 * we have at least one real map entry.
775 */
776 if ((map->hdr.nentries == 0) ||
777 (entry->vme_next->vme_start < address)) {
778 vm_map_unlock(map);
779 break;
780 }
781 /*
782 * Move up to the next entry if needed
783 */
784 skip = (entry->vme_next->vme_start - address);
785 if (skip >= amount_left)
786 amount_left = 0;
787 else
788 amount_left -= skip;
789 address = entry->vme_next->vme_start;
790 vm_map_unlock(map);
791 continue;
792 }
793
794 offset = address - entry->vme_start;
795
796 /*
797 * do we have more to flush than is contained in this
798 * entry ?
799 */
800 if (amount_left + entry->vme_start + offset > entry->vme_end) {
801 flush_size = entry->vme_end -
802 (entry->vme_start + offset);
803 } else {
804 flush_size = amount_left;
805 }
806 amount_left -= flush_size;
807 address += flush_size;
808
809 if (entry->is_sub_map == TRUE) {
810 vm_map_t local_map;
811 vm_offset_t local_offset;
812
813 local_map = entry->object.sub_map;
814 local_offset = entry->offset;
815 vm_map_unlock(map);
816 vm_msync(
817 local_map,
818 local_offset,
819 flush_size,
820 sync_flags);
821 continue;
822 }
823 object = entry->object.vm_object;
824
825 /*
826 * We can't sync this object if the object has not been
827 * created yet
828 */
829 if (object == VM_OBJECT_NULL) {
830 vm_map_unlock(map);
831 continue;
832 }
833 offset += entry->offset;
834 modifiable = (entry->protection & VM_PROT_WRITE)
835 != VM_PROT_NONE;
836
837 vm_object_lock(object);
838
839 if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
840 boolean_t kill_pages = 0;
841
842 if (sync_flags & VM_SYNC_KILLPAGES) {
843 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
844 kill_pages = 1;
845 else
846 kill_pages = -1;
847 }
848 if (kill_pages != -1)
0b4e3aa0 849 vm_object_deactivate_pages(object, offset,
1c79356b
A
850 (vm_object_size_t)flush_size, kill_pages);
851 vm_object_unlock(object);
852 vm_map_unlock(map);
853 continue;
854 }
855 /*
856 * We can't sync this object if there isn't a pager.
857 * Don't bother to sync internal objects, since there can't
858 * be any "permanent" storage for these objects anyway.
859 */
0b4e3aa0
A
860 if ((object->pager == MEMORY_OBJECT_NULL) ||
861 (object->internal) || (object->private)) {
1c79356b
A
862 vm_object_unlock(object);
863 vm_map_unlock(map);
864 continue;
865 }
866 /*
867 * keep reference on the object until syncing is done
868 */
869 assert(object->ref_count > 0);
870 object->ref_count++;
871 vm_object_res_reference(object);
872 vm_object_unlock(object);
873
874 vm_map_unlock(map);
875
0b4e3aa0 876 do_sync_req = vm_object_sync(object,
1c79356b
A
877 offset,
878 flush_size,
879 sync_flags & VM_SYNC_INVALIDATE,
880 (modifiable &&
881 (sync_flags & VM_SYNC_SYNCHRONOUS ||
882 sync_flags & VM_SYNC_ASYNCHRONOUS)));
883
884 /*
885 * only send a m_o_s if we returned pages or if the entry
886 * is writable (ie dirty pages may have already been sent back)
887 */
888 if (!do_sync_req && !modifiable) {
889 vm_object_deallocate(object);
890 continue;
891 }
892 msync_req_alloc(new_msr);
893
894 vm_object_lock(object);
895 offset += object->paging_offset;
896
897 new_msr->offset = offset;
898 new_msr->length = flush_size;
899 new_msr->object = object;
900 new_msr->flag = VM_MSYNC_SYNCHRONIZING;
901re_iterate:
902 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
903 /*
904 * need to check for overlapping entry, if found, wait
905 * on overlapping msr to be done, then reiterate
906 */
907 msr_lock(msr);
908 if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
909 ((offset >= msr->offset &&
910 offset < (msr->offset + msr->length)) ||
911 (msr->offset >= offset &&
912 msr->offset < (offset + flush_size))))
913 {
914 assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
915 msr_unlock(msr);
916 vm_object_unlock(object);
917 thread_block((void (*)(void))0);
918 vm_object_lock(object);
919 goto re_iterate;
920 }
921 msr_unlock(msr);
922 }/* queue_iterate */
923
924 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
925 vm_object_unlock(object);
926
927 queue_enter(&req_q, new_msr, msync_req_t, req_q);
928
1c79356b
A
929 (void) memory_object_synchronize(
930 object->pager,
1c79356b
A
931 offset,
932 flush_size,
933 sync_flags);
1c79356b
A
934 }/* while */
935
936 /*
937 * wait for memory_object_sychronize_completed messages from pager(s)
938 */
939
940 while (!queue_empty(&req_q)) {
941 msr = (msync_req_t)queue_first(&req_q);
942 msr_lock(msr);
943 while(msr->flag != VM_MSYNC_DONE) {
944 assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
945 msr_unlock(msr);
946 thread_block((void (*)(void))0);
947 msr_lock(msr);
948 }/* while */
949 queue_remove(&req_q, msr, msync_req_t, req_q);
950 msr_unlock(msr);
951 vm_object_deallocate(msr->object);
952 msync_req_free(msr);
953 }/* queue_iterate */
954
955 return(KERN_SUCCESS);
956}/* vm_msync */
957
958
959/*
960 * task_wire
961 *
962 * Set or clear the map's wiring_required flag. This flag, if set,
963 * will cause all future virtual memory allocation to allocate
964 * user wired memory. Unwiring pages wired down as a result of
965 * this routine is done with the vm_wire interface.
966 */
967kern_return_t
968task_wire(
969 vm_map_t map,
970 boolean_t must_wire)
971{
972 if (map == VM_MAP_NULL)
973 return(KERN_INVALID_ARGUMENT);
974
975 if (must_wire)
976 map->wiring_required = TRUE;
977 else
978 map->wiring_required = FALSE;
979
980 return(KERN_SUCCESS);
981}
982
983/*
984 * vm_behavior_set sets the paging behavior attribute for the
985 * specified range in the specified map. This routine will fail
986 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
987 * is not a valid allocated or reserved memory region.
988 */
989kern_return_t
990vm_behavior_set(
991 vm_map_t map,
992 vm_offset_t start,
993 vm_size_t size,
994 vm_behavior_t new_behavior)
995{
996 if (map == VM_MAP_NULL)
997 return(KERN_INVALID_ARGUMENT);
998
999 return(vm_map_behavior_set(map, trunc_page(start),
1000 round_page(start+size), new_behavior));
1001}
1002
1003#if VM_CPM
1004/*
1005 * Control whether the kernel will permit use of
1006 * vm_allocate_cpm at all.
1007 */
1008unsigned int vm_allocate_cpm_enabled = 1;
1009
1010/*
1011 * Ordinarily, the right to allocate CPM is restricted
1012 * to privileged applications (those that can gain access
1013 * to the host port). Set this variable to zero if you
1014 * want to let any application allocate CPM.
1015 */
1016unsigned int vm_allocate_cpm_privileged = 0;
1017
1018/*
1019 * Allocate memory in the specified map, with the caveat that
1020 * the memory is physically contiguous. This call may fail
1021 * if the system can't find sufficient contiguous memory.
1022 * This call may cause or lead to heart-stopping amounts of
1023 * paging activity.
1024 *
1025 * Memory obtained from this call should be freed in the
1026 * normal way, viz., via vm_deallocate.
1027 */
1028kern_return_t
1029vm_allocate_cpm(
1030 host_priv_t host_priv,
1031 register vm_map_t map,
1032 register vm_offset_t *addr,
1033 register vm_size_t size,
1034 int flags)
1035{
1036 vm_object_t cpm_obj;
1037 pmap_t pmap;
1038 vm_page_t m, pages;
1039 kern_return_t kr;
1040 vm_offset_t va, start, end, offset;
1041#if MACH_ASSERT
1042 extern vm_offset_t avail_start, avail_end;
1043 vm_offset_t prev_addr;
1044#endif /* MACH_ASSERT */
1045
1046 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1047
1048 if (!vm_allocate_cpm_enabled)
1049 return KERN_FAILURE;
1050
1051 if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL)
1052 return KERN_INVALID_HOST;
1053
1054 if (map == VM_MAP_NULL)
1055 return KERN_INVALID_ARGUMENT;
1056
1057 assert(host_priv == &realhost);
1058
1059 if (size == 0) {
1060 *addr = 0;
1061 return KERN_SUCCESS;
1062 }
1063
1064 if (anywhere)
1065 *addr = vm_map_min(map);
1066 else
1067 *addr = trunc_page(*addr);
1068 size = round_page(size);
1069
1070 if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
1071 return kr;
1072
1073 cpm_obj = vm_object_allocate(size);
1074 assert(cpm_obj != VM_OBJECT_NULL);
1075 assert(cpm_obj->internal);
1076 assert(cpm_obj->size == size);
1077 assert(cpm_obj->can_persist == FALSE);
1078 assert(cpm_obj->pager_created == FALSE);
1079 assert(cpm_obj->pageout == FALSE);
1080 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1081
1082 /*
1083 * Insert pages into object.
1084 */
1085
1086 vm_object_lock(cpm_obj);
1087 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1088 m = pages;
1089 pages = NEXT_PAGE(m);
1090
1091 assert(!m->gobbled);
1092 assert(!m->wanted);
1093 assert(!m->pageout);
1094 assert(!m->tabled);
1095 assert(m->busy);
1096 assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end);
1097
1098 m->busy = FALSE;
1099 vm_page_insert(m, cpm_obj, offset);
1100 }
1101 assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
1102 vm_object_unlock(cpm_obj);
1103
1104 /*
1105 * Hang onto a reference on the object in case a
1106 * multi-threaded application for some reason decides
1107 * to deallocate the portion of the address space into
1108 * which we will insert this object.
1109 *
1110 * Unfortunately, we must insert the object now before
1111 * we can talk to the pmap module about which addresses
1112 * must be wired down. Hence, the race with a multi-
1113 * threaded app.
1114 */
1115 vm_object_reference(cpm_obj);
1116
1117 /*
1118 * Insert object into map.
1119 */
1120
1121 kr = vm_map_enter(
1122 map,
1123 addr,
1124 size,
1125 (vm_offset_t)0,
1126 flags,
1127 cpm_obj,
1128 (vm_object_offset_t)0,
1129 FALSE,
1130 VM_PROT_ALL,
1131 VM_PROT_ALL,
1132 VM_INHERIT_DEFAULT);
1133
1134 if (kr != KERN_SUCCESS) {
1135 /*
1136 * A CPM object doesn't have can_persist set,
1137 * so all we have to do is deallocate it to
1138 * free up these pages.
1139 */
1140 assert(cpm_obj->pager_created == FALSE);
1141 assert(cpm_obj->can_persist == FALSE);
1142 assert(cpm_obj->pageout == FALSE);
1143 assert(cpm_obj->shadow == VM_OBJECT_NULL);
1144 vm_object_deallocate(cpm_obj); /* kill acquired ref */
1145 vm_object_deallocate(cpm_obj); /* kill creation ref */
1146 }
1147
1148 /*
1149 * Inform the physical mapping system that the
1150 * range of addresses may not fault, so that
1151 * page tables and such can be locked down as well.
1152 */
1153 start = *addr;
1154 end = start + size;
1155 pmap = vm_map_pmap(map);
1156 pmap_pageable(pmap, start, end, FALSE);
1157
1158 /*
1159 * Enter each page into the pmap, to avoid faults.
1160 * Note that this loop could be coded more efficiently,
1161 * if the need arose, rather than looking up each page
1162 * again.
1163 */
1164 for (offset = 0, va = start; offset < size;
1165 va += PAGE_SIZE, offset += PAGE_SIZE) {
1166 vm_object_lock(cpm_obj);
1167 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1168 vm_object_unlock(cpm_obj);
1169 assert(m != VM_PAGE_NULL);
1170 PMAP_ENTER(pmap, va, m, VM_PROT_ALL, TRUE);
1171 }
1172
1173#if MACH_ASSERT
1174 /*
1175 * Verify ordering in address space.
1176 */
1177 for (offset = 0; offset < size; offset += PAGE_SIZE) {
1178 vm_object_lock(cpm_obj);
1179 m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
1180 vm_object_unlock(cpm_obj);
1181 if (m == VM_PAGE_NULL)
1182 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1183 cpm_obj, offset);
1184 assert(m->tabled);
1185 assert(!m->busy);
1186 assert(!m->wanted);
1187 assert(!m->fictitious);
1188 assert(!m->private);
1189 assert(!m->absent);
1190 assert(!m->error);
1191 assert(!m->cleaning);
1192 assert(!m->precious);
1193 assert(!m->clustered);
1194 if (offset != 0) {
1195 if (m->phys_addr != prev_addr + PAGE_SIZE) {
1196 printf("start 0x%x end 0x%x va 0x%x\n",
1197 start, end, va);
1198 printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
1199 printf("m 0x%x prev_address 0x%x\n", m,
1200 prev_addr);
1201 panic("vm_allocate_cpm: pages not contig!");
1202 }
1203 }
1204 prev_addr = m->phys_addr;
1205 }
1206#endif /* MACH_ASSERT */
1207
1208 vm_object_deallocate(cpm_obj); /* kill extra ref */
1209
1210 return kr;
1211}
1212
1213
1214#else /* VM_CPM */
1215
1216/*
1217 * Interface is defined in all cases, but unless the kernel
1218 * is built explicitly for this option, the interface does
1219 * nothing.
1220 */
1221
1222kern_return_t
1223vm_allocate_cpm(
1224 host_priv_t host_priv,
1225 register vm_map_t map,
1226 register vm_offset_t *addr,
1227 register vm_size_t size,
1228 int flags)
1229{
1230 return KERN_FAILURE;
1231}
1232
1233/*
1234 */
1235kern_return_t
1236mach_memory_object_memory_entry_64(
1237 host_t host,
1238 boolean_t internal,
1239 vm_object_offset_t size,
1240 vm_prot_t permission,
0b4e3aa0 1241 memory_object_t pager,
1c79356b
A
1242 ipc_port_t *entry_handle)
1243{
1244 vm_named_entry_t user_object;
1245 ipc_port_t user_handle;
1246 ipc_port_t previous;
1247 kern_return_t kr;
1248
1249 if (host == HOST_NULL)
1250 return(KERN_INVALID_HOST);
1251
1252 user_object = (vm_named_entry_t)
1253 kalloc(sizeof (struct vm_named_entry));
1254 if(user_object == NULL)
1255 return KERN_FAILURE;
1256 named_entry_lock_init(user_object);
1257 user_handle = ipc_port_alloc_kernel();
1258 ip_lock(user_handle);
1259
1260 /* make a sonce right */
1261 user_handle->ip_sorights++;
1262 ip_reference(user_handle);
1263
1264 user_handle->ip_destination = IP_NULL;
1265 user_handle->ip_receiver_name = MACH_PORT_NULL;
1266 user_handle->ip_receiver = ipc_space_kernel;
1267
1268 /* make a send right */
1269 user_handle->ip_mscount++;
1270 user_handle->ip_srights++;
1271 ip_reference(user_handle);
1272
1273 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1274 /* nsrequest unlocks user_handle */
1275
1276 user_object->object = NULL;
1277 user_object->size = size;
1278 user_object->offset = 0;
1279 user_object->backing.pager = pager;
1280 user_object->protection = permission;
1281 user_object->internal = internal;
1282 user_object->is_sub_map = FALSE;
1283 user_object->ref_count = 1;
1284
1285 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1286 IKOT_NAMED_ENTRY);
1287 *entry_handle = user_handle;
1288 return KERN_SUCCESS;
1289}
1290
1291kern_return_t
1292mach_memory_object_memory_entry(
1293 host_t host,
1294 boolean_t internal,
1295 vm_size_t size,
1296 vm_prot_t permission,
0b4e3aa0 1297 memory_object_t pager,
1c79356b
A
1298 ipc_port_t *entry_handle)
1299{
1300 return mach_memory_object_memory_entry_64( host, internal,
1301 (vm_object_offset_t)size, permission, pager, entry_handle);
1302}
1303
1304
1305
1306/*
1307 */
1308
1309kern_return_t
1310mach_make_memory_entry_64(
1311 vm_map_t target_map,
1312 vm_object_size_t *size,
1313 vm_object_offset_t offset,
1314 vm_prot_t permission,
1315 ipc_port_t *object_handle,
1316 ipc_port_t parent_entry)
1317{
1318 vm_map_version_t version;
1319 vm_named_entry_t user_object;
1320 ipc_port_t user_handle;
1321 ipc_port_t previous;
1322 kern_return_t kr;
1323 vm_map_t pmap_map;
1324
1325 /* needed for call to vm_map_lookup_locked */
1326 boolean_t wired;
1327 vm_object_offset_t obj_off;
1328 vm_prot_t prot;
1329 vm_object_offset_t lo_offset, hi_offset;
1330 vm_behavior_t behavior;
1331 vm_object_t object;
1332
1333 /* needed for direct map entry manipulation */
1334 vm_map_entry_t map_entry;
1335 vm_map_t local_map;
1336 vm_object_size_t mappable_size;
1337
1338
1339 user_object = (vm_named_entry_t)
1340 kalloc(sizeof (struct vm_named_entry));
1341 if(user_object == NULL)
1342 return KERN_FAILURE;
1343 named_entry_lock_init(user_object);
1344 user_handle = ipc_port_alloc_kernel();
1345 ip_lock(user_handle);
1346
1347 /* make a sonce right */
1348 user_handle->ip_sorights++;
1349 ip_reference(user_handle);
1350
1351 user_handle->ip_destination = IP_NULL;
1352 user_handle->ip_receiver_name = MACH_PORT_NULL;
1353 user_handle->ip_receiver = ipc_space_kernel;
1354
1355 /* make a send right */
1356 user_handle->ip_mscount++;
1357 user_handle->ip_srights++;
1358 ip_reference(user_handle);
1359
1360 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1361 /* nsrequest unlocks user_handle */
1362
1363 user_object->backing.pager = NULL;
1364 user_object->ref_count = 1;
1365
1366 if(parent_entry == NULL) {
1367 /* Create a named object based on address range within the task map */
1368 /* Go find the object at given address */
1369
1370 permission &= VM_PROT_ALL;
1371 vm_map_lock_read(target_map);
1372
1373 /* get the object associated with the target address */
1374 /* note we check the permission of the range against */
1375 /* that requested by the caller */
1376
1377 kr = vm_map_lookup_locked(&target_map, offset,
1378 permission, &version,
1379 &object, &obj_off, &prot, &wired, &behavior,
1380 &lo_offset, &hi_offset, &pmap_map);
1381 if (kr != KERN_SUCCESS) {
1382 vm_map_unlock_read(target_map);
1383 goto make_mem_done;
1384 }
1385 if ((prot & permission) != permission) {
1386 kr = KERN_INVALID_RIGHT;
1387 vm_object_unlock(object);
1388 vm_map_unlock_read(target_map);
1389 if(pmap_map != target_map)
1390 vm_map_unlock_read(pmap_map);
1391 goto make_mem_done;
1392 }
1393
1394 /* We have an object, now check to see if this object */
1395 /* is suitable. If not, create a shadow and share that */
1396
1397 local_map = target_map;
1398redo_lookup:
1399 while(TRUE) {
1400 if(!vm_map_lookup_entry(local_map, offset, &map_entry)) {
1401 kr = KERN_INVALID_ARGUMENT;
1402 vm_object_unlock(object);
1403 vm_map_unlock_read(target_map);
1404 if(pmap_map != target_map)
1405 vm_map_unlock_read(pmap_map);
1406 goto make_mem_done;
1407 }
1408 if(!(map_entry->is_sub_map)) {
1409 if(map_entry->object.vm_object != object) {
1410 kr = KERN_INVALID_ARGUMENT;
1411 vm_object_unlock(object);
1412 vm_map_unlock_read(target_map);
1413 if(pmap_map != target_map)
1414 vm_map_unlock_read(pmap_map);
1415 goto make_mem_done;
1416 }
1417 break;
1418 } else {
1419 local_map = map_entry->object.sub_map;
1420 vm_map_lock_read(local_map);
1421 vm_map_unlock_read(target_map);
1422 if(pmap_map != target_map)
1423 vm_map_unlock_read(pmap_map);
1424 target_map = local_map;
1425 }
1426 }
1427 if(((map_entry->max_protection) & permission) != permission) {
1428 kr = KERN_INVALID_RIGHT;
1429 vm_object_unlock(object);
1430 vm_map_unlock_read(target_map);
1431 if(pmap_map != target_map)
1432 vm_map_unlock_read(pmap_map);
1433 goto make_mem_done;
1434 }
1435 if(object->internal) {
1436 /* vm_map_lookup_locked will create a shadow if */
1437 /* needs_copy is set but does not check for the */
1438 /* other two conditions shown. It is important to */
1439 /* set up an object which will not be pulled from */
1440 /* under us. */
1441
0b4e3aa0 1442 if ((map_entry->needs_copy || object->shadowed ||
1c79356b
A
1443 (object->size >
1444 ((vm_object_size_t)map_entry->vme_end -
0b4e3aa0
A
1445 map_entry->vme_start)))
1446 && !object->true_share) {
1c79356b
A
1447 if (vm_map_lock_read_to_write(target_map)) {
1448 vm_map_lock_read(target_map);
1449 goto redo_lookup;
1450 }
1451
1452
1453 /* create a shadow object */
1454
1455 vm_object_shadow(&map_entry->object.vm_object,
1456 &map_entry->offset,
1457 (map_entry->vme_end
1458 - map_entry->vme_start));
1459 map_entry->needs_copy = FALSE;
1460 vm_object_unlock(object);
1461 object = map_entry->object.vm_object;
1462 vm_object_lock(object);
1463 object->size = map_entry->vme_end
1464 - map_entry->vme_start;
1465 obj_off = (offset - map_entry->vme_start) +
1466 map_entry->offset;
1467 lo_offset = map_entry->offset;
1468 hi_offset = (map_entry->vme_end -
1469 map_entry->vme_start) +
1470 map_entry->offset;
1471
1472 vm_map_lock_write_to_read(target_map);
1473
1474 }
1475 }
1476
1477 /* note: in the future we can (if necessary) allow for */
1478 /* memory object lists, this will better support */
1479 /* fragmentation, but is it necessary? The user should */
1480 /* be encouraged to create address space oriented */
1481 /* shared objects from CLEAN memory regions which have */
1482 /* a known and defined history. i.e. no inheritence */
1483 /* share, make this call before making the region the */
1484 /* target of ipc's, etc. The code above, protecting */
1485 /* against delayed copy, etc. is mostly defensive. */
1486
1487
1488
1489 object->true_share = TRUE;
1490 user_object->object = object;
1491 user_object->internal = object->internal;
1492 user_object->is_sub_map = FALSE;
1493 user_object->offset = obj_off;
1494 user_object->protection = permission;
1495
1496 /* the size of mapped entry that overlaps with our region */
1497 /* which is targeted for share. */
1498 /* (entry_end - entry_start) - */
1499 /* offset of our beg addr within entry */
1500 /* it corresponds to this: */
1501
1502 mappable_size = hi_offset - obj_off;
1503 if(*size > mappable_size)
1504 *size = mappable_size;
1505
1506 user_object->size = *size;
1507
1508 /* user_object pager and internal fields are not used */
1509 /* when the object field is filled in. */
1510
1511 object->ref_count++; /* we now point to this object, hold on */
1512 vm_object_res_reference(object);
1513 vm_object_unlock(object);
1514 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1515 IKOT_NAMED_ENTRY);
1516 *size = user_object->size;
1517 *object_handle = user_handle;
1518 vm_map_unlock_read(target_map);
1519 if(pmap_map != target_map)
1520 vm_map_unlock_read(pmap_map);
1521 return KERN_SUCCESS;
1522 } else {
1523
1524 vm_named_entry_t parent_object;
1525
1526 /* The new object will be base on an existing named object */
1527 if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
1528 kr = KERN_INVALID_ARGUMENT;
1529 goto make_mem_done;
1530 }
1531 parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
1532 if(permission & parent_object->protection != permission) {
1533 kr = KERN_INVALID_ARGUMENT;
1534 goto make_mem_done;
1535 }
1536 if((offset + *size) > parent_object->size) {
1537 kr = KERN_INVALID_ARGUMENT;
1538 goto make_mem_done;
1539 }
1540
1541 user_object->object = parent_object->object;
1542 user_object->size = *size;
1543 user_object->offset = parent_object->offset + offset;
1544 user_object->protection = permission;
1545 if(parent_object->is_sub_map) {
1546 user_object->backing.map = parent_object->backing.map;
1547 vm_map_lock(user_object->backing.map);
1548 user_object->backing.map->ref_count++;
1549 vm_map_unlock(user_object->backing.map);
1550 }
1551 else {
1552 user_object->backing.pager = parent_object->backing.pager;
1553 }
1554 user_object->internal = parent_object->internal;
1555 user_object->is_sub_map = parent_object->is_sub_map;
1556
1557 if(parent_object->object != NULL) {
1558 /* we now point to this object, hold on */
1559 vm_object_reference(parent_object->object);
1560 vm_object_lock(parent_object->object);
1561 parent_object->object->true_share = TRUE;
1562 vm_object_unlock(parent_object->object);
1563 }
1564 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1565 IKOT_NAMED_ENTRY);
1566 *object_handle = user_handle;
1567 return KERN_SUCCESS;
1568 }
1569
1570
1571
1572make_mem_done:
1573 ipc_port_dealloc_kernel(user_handle);
1574 kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry));
1575 return kr;
1576}
1577
1578kern_return_t
1579mach_make_memory_entry(
1580 vm_map_t target_map,
1581 vm_size_t *size,
1582 vm_offset_t offset,
1583 vm_prot_t permission,
1584 ipc_port_t *object_handle,
1585 ipc_port_t parent_entry)
1586{
1587 vm_object_offset_t size_64;
1588 kern_return_t kr;
1589
1590 size_64 = (vm_object_offset_t)*size;
1591 kr = mach_make_memory_entry_64(target_map, &size_64,
1592 (vm_object_offset_t)offset, permission, object_handle,
1593 parent_entry);
1594 *size = (vm_size_t)size_64;
1595 return kr;
1596}
1597
1598/*
1599 */
1600
1601kern_return_t
1602vm_region_object_create(
1603 vm_map_t target_map,
1604 vm_size_t size,
1605 ipc_port_t *object_handle)
1606{
1607 vm_named_entry_t user_object;
1608 ipc_port_t user_handle;
1609 kern_return_t kr;
1610
1611 pmap_t new_pmap = pmap_create((vm_size_t) 0);
1612 ipc_port_t previous;
1613 vm_map_t new_map;
1614
1615 if(new_pmap == PMAP_NULL)
1616 return KERN_FAILURE;
1617 user_object = (vm_named_entry_t)
1618 kalloc(sizeof (struct vm_named_entry));
1619 if(user_object == NULL) {
1620 pmap_destroy(new_pmap);
1621 return KERN_FAILURE;
1622 }
1623 named_entry_lock_init(user_object);
1624 user_handle = ipc_port_alloc_kernel();
1625
1626
1627 ip_lock(user_handle);
1628
1629 /* make a sonce right */
1630 user_handle->ip_sorights++;
1631 ip_reference(user_handle);
1632
1633 user_handle->ip_destination = IP_NULL;
1634 user_handle->ip_receiver_name = MACH_PORT_NULL;
1635 user_handle->ip_receiver = ipc_space_kernel;
1636
1637 /* make a send right */
1638 user_handle->ip_mscount++;
1639 user_handle->ip_srights++;
1640 ip_reference(user_handle);
1641
1642 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
1643 /* nsrequest unlocks user_handle */
1644
1645 /* Create a named object based on a submap of specified size */
1646
1647 new_map = vm_map_create(new_pmap, 0, size, TRUE);
1648 user_object->backing.map = new_map;
1649
1650
1651 user_object->object = VM_OBJECT_NULL;
1652 user_object->internal = TRUE;
1653 user_object->is_sub_map = TRUE;
1654 user_object->offset = 0;
1655 user_object->protection = VM_PROT_ALL;
1656 user_object->size = size;
1657 user_object->ref_count = 1;
1658
1659 ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
1660 IKOT_NAMED_ENTRY);
1661 *object_handle = user_handle;
1662 return KERN_SUCCESS;
1663
1664}
1665
1666/* For a given range, check all map entries. If the entry coresponds to */
1667/* the old vm_region/map provided on the call, replace it with the */
1668/* corresponding range in the new vm_region/map */
1669kern_return_t vm_map_region_replace(
1670 vm_map_t target_map,
1671 ipc_port_t old_region,
1672 ipc_port_t new_region,
1673 vm_offset_t start,
1674 vm_offset_t end)
1675{
1676 vm_named_entry_t old_object;
1677 vm_named_entry_t new_object;
1678 vm_map_t old_submap;
1679 vm_map_t new_submap;
1680 vm_offset_t addr;
1681 vm_map_entry_t entry;
1682 int nested_pmap = 0;
1683
1684
1685 vm_map_lock(target_map);
1686 old_object = (vm_named_entry_t)old_region->ip_kobject;
1687 new_object = (vm_named_entry_t)new_region->ip_kobject;
1688 if((!old_object->is_sub_map) || (!new_object->is_sub_map)) {
1689 vm_map_unlock(target_map);
1690 return KERN_INVALID_ARGUMENT;
1691 }
1692 old_submap = (vm_map_t)old_object->backing.map;
1693 new_submap = (vm_map_t)new_object->backing.map;
1694 vm_map_lock(old_submap);
1695 if((old_submap->min_offset != new_submap->min_offset) ||
1696 (old_submap->max_offset != new_submap->max_offset)) {
1697 vm_map_unlock(old_submap);
1698 vm_map_unlock(target_map);
1699 return KERN_INVALID_ARGUMENT;
1700 }
1701 if(!vm_map_lookup_entry(target_map, start, &entry)) {
1702 /* if the src is not contained, the entry preceeds */
1703 /* our range */
1704 addr = entry->vme_start;
1705 if(entry == vm_map_to_entry(target_map)) {
1706 vm_map_unlock(old_submap);
1707 vm_map_unlock(target_map);
1708 return KERN_SUCCESS;
1709 }
1710 vm_map_lookup_entry(target_map, addr, &entry);
1711 }
1712 addr = entry->vme_start;
1713 vm_map_reference(old_submap);
1714 while((entry != vm_map_to_entry(target_map)) &&
1715 (entry->vme_start < end)) {
1716 if((entry->is_sub_map) &&
1717 (entry->object.sub_map == old_submap)) {
1718 entry->object.sub_map = new_submap;
1719 if(entry->use_pmap) {
1720 if((start & 0xfffffff) ||
1721 ((end - start) != 0x10000000)) {
1722 vm_map_unlock(old_submap);
1723 vm_map_unlock(target_map);
1724 return KERN_INVALID_ARGUMENT;
1725 }
1726 nested_pmap = 1;
1727 }
1728 vm_map_reference(new_submap);
1729 vm_map_deallocate(old_submap);
1730 }
1731 entry = entry->vme_next;
1732 addr = entry->vme_start;
1733 }
1734 if(nested_pmap) {
1735#ifndef i386
1736 pmap_unnest(target_map->pmap, start, end - start);
1737 pmap_nest(target_map->pmap, new_submap->pmap,
1738 start, end - start);
1739#endif i386
1740 } else {
1741 pmap_remove(target_map->pmap, start, end);
1742 }
1743 vm_map_unlock(old_submap);
1744 vm_map_unlock(target_map);
1745 return KERN_SUCCESS;
1746}
1747
1748
1749void
1750mach_destroy_memory_entry(
1751 ipc_port_t port)
1752{
1753 vm_named_entry_t named_entry;
1754#if MACH_ASSERT
1755 assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1756#endif /* MACH_ASSERT */
1757 named_entry = (vm_named_entry_t)port->ip_kobject;
1758 mutex_lock(&(named_entry)->Lock);
1759 named_entry->ref_count-=1;
1760 if(named_entry->ref_count == 0) {
1761 if(named_entry->object) {
1762 /* release the memory object we've been pointing to */
1763 vm_object_deallocate(named_entry->object);
1764 }
1765 if(named_entry->is_sub_map) {
1766 vm_map_deallocate(named_entry->backing.map);
1767 }
1768 kfree((vm_offset_t)port->ip_kobject,
1769 sizeof (struct vm_named_entry));
1770 } else
1771 mutex_unlock(&(named_entry)->Lock);
1772}
1773
1774
1775kern_return_t
1776vm_map_page_query(
1777 vm_map_t target_map,
1778 vm_offset_t offset,
1779 int *disposition,
1780 int *ref_count)
1781{
1782 vm_map_entry_t map_entry;
1783 vm_object_t object;
1784 vm_page_t m;
1785
1786restart_page_query:
1787 *disposition = 0;
1788 *ref_count = 0;
1789 vm_map_lock(target_map);
1790 if(!vm_map_lookup_entry(target_map, offset, &map_entry)) {
1791 vm_map_unlock(target_map);
1792 return KERN_FAILURE;
1793 }
1794 offset -= map_entry->vme_start; /* adjust to offset within entry */
1795 offset += map_entry->offset; /* adjust to target object offset */
1796 if(map_entry->object.vm_object != VM_OBJECT_NULL) {
1797 if(!map_entry->is_sub_map) {
1798 object = map_entry->object.vm_object;
1799 } else {
1800 vm_map_unlock(target_map);
1801 target_map = map_entry->object.sub_map;
1802 goto restart_page_query;
1803 }
1804 } else {
1805 vm_map_unlock(target_map);
1806 return KERN_FAILURE;
1807 }
1808 vm_object_lock(object);
1809 vm_map_unlock(target_map);
1810 while(TRUE) {
1811 m = vm_page_lookup(object, offset);
1812 if (m != VM_PAGE_NULL) {
1813 *disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
1814 break;
1815 } else {
1816 if(object->shadow) {
1817 offset += object->shadow_offset;
1818 vm_object_unlock(object);
1819 object = object->shadow;
1820 vm_object_lock(object);
1821 continue;
1822 }
1823 vm_object_unlock(object);
1824 return KERN_FAILURE;
1825 }
1826 }
1827
1828 /* The ref_count is not strictly accurate, it measures the number */
1829 /* of entities holding a ref on the object, they may not be mapping */
1830 /* the object or may not be mapping the section holding the */
1831 /* target page but its still a ball park number and though an over- */
1832 /* count, it picks up the copy-on-write cases */
1833
1834 /* We could also get a picture of page sharing from pmap_attributes */
1835 /* but this would under count as only faulted-in mappings would */
1836 /* show up. */
1837
1838 *ref_count = object->ref_count;
1839
1840 if (m->fictitious) {
1841 *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
1842 vm_object_unlock(object);
1843 return KERN_SUCCESS;
1844 }
1845
1846 if (m->dirty)
1847 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1848 else if(pmap_is_modified(m->phys_addr))
1849 *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
1850
1851 if (m->reference)
1852 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1853 else if(pmap_is_referenced(m->phys_addr))
1854 *disposition |= VM_PAGE_QUERY_PAGE_REF;
1855
1856 vm_object_unlock(object);
1857 return KERN_SUCCESS;
1858
1859}
1860
1861kern_return_t
1862set_dp_control_port(
1863 host_priv_t host_priv,
1864 ipc_port_t control_port)
1865{
1866 if (host_priv == HOST_PRIV_NULL)
1867 return (KERN_INVALID_HOST);
0b4e3aa0
A
1868
1869 if (IP_VALID(dynamic_pager_control_port))
1870 ipc_port_release_send(dynamic_pager_control_port);
1871
1c79356b
A
1872 dynamic_pager_control_port = control_port;
1873 return KERN_SUCCESS;
1874}
1875
1876kern_return_t
1877get_dp_control_port(
1878 host_priv_t host_priv,
1879 ipc_port_t *control_port)
1880{
1881 if (host_priv == HOST_PRIV_NULL)
1882 return (KERN_INVALID_HOST);
0b4e3aa0
A
1883
1884 *control_port = ipc_port_copy_send(dynamic_pager_control_port);
1c79356b
A
1885 return KERN_SUCCESS;
1886
1887}
1888
1c79356b
A
1889
1890/* Retrieve a upl for an object underlying an address range in a map */
1891
1892kern_return_t
1893vm_map_get_upl(
0b4e3aa0
A
1894 vm_map_t map,
1895 vm_address_t offset,
1896 vm_size_t *upl_size,
1897 upl_t *upl,
1898 upl_page_info_array_t page_list,
1899 unsigned int *count,
1900 int *flags,
1901 int force_data_sync)
1c79356b
A
1902{
1903 vm_map_entry_t entry;
1904 int caller_flags;
150bd074
A
1905 int sync_cow_data = FALSE;
1906 vm_object_t local_object;
1907 vm_offset_t local_offset;
1908 vm_offset_t local_start;
1909 kern_return_t ret;
1c79356b
A
1910
1911 caller_flags = *flags;
150bd074
A
1912 if (!(caller_flags & UPL_COPYOUT_FROM)) {
1913 sync_cow_data = TRUE;
1914 }
1c79356b
A
1915 if(upl == NULL)
1916 return KERN_INVALID_ARGUMENT;
0b4e3aa0
A
1917
1918
1c79356b
A
1919REDISCOVER_ENTRY:
1920 vm_map_lock(map);
1921 if (vm_map_lookup_entry(map, offset, &entry)) {
0b4e3aa0
A
1922 if (entry->object.vm_object == VM_OBJECT_NULL ||
1923 !entry->object.vm_object->phys_contiguous) {
1924 if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
1925 *upl_size = MAX_UPL_TRANSFER * page_size;
1926 }
1927 }
1c79356b
A
1928 if((entry->vme_end - offset) < *upl_size) {
1929 *upl_size = entry->vme_end - offset;
1930 }
0b4e3aa0
A
1931 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
1932 if (entry->object.vm_object == VM_OBJECT_NULL) {
1933 *flags = 0;
1934 } else if (entry->object.vm_object->private) {
1935 *flags = UPL_DEV_MEMORY;
1936 if (entry->object.vm_object->phys_contiguous) {
1937 *flags |= UPL_PHYS_CONTIG;
1938 }
1939 } else {
1940 *flags = 0;
1941 }
1942 vm_map_unlock(map);
1943 return KERN_SUCCESS;
1944 }
1c79356b
A
1945 /*
1946 * Create an object if necessary.
1947 */
1948 if (entry->object.vm_object == VM_OBJECT_NULL) {
1949 entry->object.vm_object = vm_object_allocate(
1950 (vm_size_t)(entry->vme_end - entry->vme_start));
1951 entry->offset = 0;
1952 }
1953 if (!(caller_flags & UPL_COPYOUT_FROM)) {
0b4e3aa0 1954 if (entry->needs_copy) {
1c79356b
A
1955 vm_map_t local_map;
1956 vm_object_t object;
1957 vm_object_offset_t offset_hi;
1958 vm_object_offset_t offset_lo;
1959 vm_object_offset_t new_offset;
1960 vm_prot_t prot;
1961 boolean_t wired;
1962 vm_behavior_t behavior;
1963 vm_map_version_t version;
1964 vm_map_t pmap_map;
1965
1966 local_map = map;
1967 vm_map_lock_write_to_read(map);
1968 if(vm_map_lookup_locked(&local_map,
1969 offset, VM_PROT_WRITE,
1970 &version, &object,
1971 &new_offset, &prot, &wired,
1972 &behavior, &offset_lo,
1973 &offset_hi, &pmap_map)) {
1974 vm_map_unlock(local_map);
1975 return KERN_FAILURE;
1976 }
1977 if (pmap_map != map) {
1978 vm_map_unlock(pmap_map);
1979 }
1980 vm_object_unlock(object);
1981 vm_map_unlock(local_map);
1982
1983 goto REDISCOVER_ENTRY;
1984 }
1985 }
1986 if (entry->is_sub_map) {
150bd074
A
1987 vm_map_t submap;
1988
1989 submap = entry->object.sub_map;
1990 local_start = entry->vme_start;
1991 local_offset = entry->offset;
1992 vm_map_reference(submap);
1c79356b 1993 vm_map_unlock(map);
150bd074
A
1994
1995 ret = (vm_map_get_upl(submap,
1996 local_offset + (offset - local_start),
1c79356b
A
1997 upl_size, upl, page_list, count,
1998 flags, force_data_sync));
150bd074
A
1999
2000 vm_map_deallocate(submap);
2001 return ret;
1c79356b
A
2002 }
2003
150bd074 2004 if (sync_cow_data) {
0b4e3aa0
A
2005 if (entry->object.vm_object->shadow
2006 || entry->object.vm_object->copy) {
150bd074
A
2007 int flags;
2008
2009 local_object = entry->object.vm_object;
2010 local_start = entry->vme_start;
2011 local_offset = entry->offset;
2012 vm_object_reference(local_object);
1c79356b
A
2013 vm_map_unlock(map);
2014
150bd074 2015 if(local_object->copy == NULL) {
1c79356b
A
2016 flags = MEMORY_OBJECT_DATA_SYNC;
2017 } else {
2018 flags = MEMORY_OBJECT_COPY_SYNC;
2019 }
150bd074
A
2020
2021 if((local_object->paging_offset) &&
2022 (local_object->pager == 0)) {
2023 /*
2024 * do a little clean-up for our unorthodox
2025 * entry into a pager call from a non-pager
2026 * context. Normally the pager code
2027 * assumes that an object it has been called
2028 * with has a backing pager and so does
2029 * not bother to check the pager field
2030 * before relying on the paging_offset
2031 */
2032 vm_object_lock(local_object);
2033 if (local_object->pager == 0) {
2034 local_object->paging_offset = 0;
2035 }
2036 vm_object_unlock(local_object);
2037 }
1c79356b 2038
0b4e3aa0
A
2039 if (entry->object.vm_object->shadow &&
2040 entry->object.vm_object->copy) {
2041 vm_object_lock_request(
2042 local_object->shadow,
2043 (vm_object_offset_t)
2044 ((offset - local_start) +
2045 local_offset) +
2046 local_object->shadow_offset +
2047 local_object->paging_offset,
2048 *upl_size, FALSE,
2049 MEMORY_OBJECT_DATA_SYNC,
2050 VM_PROT_NO_CHANGE);
2051 }
150bd074 2052 sync_cow_data = FALSE;
0b4e3aa0 2053 vm_object_deallocate(local_object);
150bd074 2054 goto REDISCOVER_ENTRY;
1c79356b
A
2055 }
2056 }
2057
2058 if (force_data_sync) {
150bd074
A
2059
2060 local_object = entry->object.vm_object;
2061 local_start = entry->vme_start;
2062 local_offset = entry->offset;
2063 vm_object_reference(local_object);
1c79356b 2064 vm_map_unlock(map);
1c79356b 2065
150bd074
A
2066 if((local_object->paging_offset) &&
2067 (local_object->pager == 0)) {
2068 /*
2069 * do a little clean-up for our unorthodox
2070 * entry into a pager call from a non-pager
2071 * context. Normally the pager code
2072 * assumes that an object it has been called
2073 * with has a backing pager and so does
2074 * not bother to check the pager field
2075 * before relying on the paging_offset
2076 */
2077 vm_object_lock(local_object);
2078 if (local_object->pager == 0) {
2079 local_object->paging_offset = 0;
2080 }
2081 vm_object_unlock(local_object);
2082 }
2083
0b4e3aa0
A
2084 vm_object_lock_request(
2085 local_object,
2086 (vm_object_offset_t)
2087 ((offset - local_start) + local_offset) +
2088 local_object->paging_offset,
150bd074
A
2089 (vm_object_size_t)*upl_size, FALSE,
2090 MEMORY_OBJECT_DATA_SYNC,
0b4e3aa0 2091 VM_PROT_NO_CHANGE);
150bd074 2092 force_data_sync = FALSE;
0b4e3aa0 2093 vm_object_deallocate(local_object);
150bd074 2094 goto REDISCOVER_ENTRY;
1c79356b
A
2095 }
2096
2097 if(!(entry->object.vm_object->private)) {
2098 if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
2099 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
2100 if(entry->object.vm_object->phys_contiguous) {
2101 *flags = UPL_PHYS_CONTIG;
2102 } else {
2103 *flags = 0;
2104 }
2105 } else {
2106 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
2107 }
150bd074
A
2108 local_object = entry->object.vm_object;
2109 local_offset = entry->offset;
2110 local_start = entry->vme_start;
2111 vm_object_reference(local_object);
1c79356b 2112 vm_map_unlock(map);
0b4e3aa0
A
2113 ret = (vm_object_upl_request(local_object,
2114 (vm_object_offset_t)
2115 ((offset - local_start) + local_offset),
1c79356b
A
2116 *upl_size,
2117 upl,
2118 page_list,
0b4e3aa0 2119 count,
1c79356b 2120 caller_flags));
150bd074
A
2121 vm_object_deallocate(local_object);
2122 return(ret);
1c79356b
A
2123 }
2124
2125 vm_map_unlock(map);
2126 return(KERN_FAILURE);
2127
2128}
2129
1c79356b
A
2130/* ******* Temporary Internal calls to UPL for BSD ***** */
2131kern_return_t
2132kernel_upl_map(
2133 vm_map_t map,
2134 upl_t upl,
2135 vm_offset_t *dst_addr)
2136{
0b4e3aa0 2137 return (vm_upl_map(map, upl, dst_addr));
1c79356b
A
2138}
2139
2140
2141kern_return_t
2142kernel_upl_unmap(
2143 vm_map_t map,
0b4e3aa0 2144 upl_t upl)
1c79356b 2145{
0b4e3aa0 2146 return(vm_upl_unmap(map, upl));
1c79356b
A
2147}
2148
2149kern_return_t
2150kernel_upl_commit(
2151 upl_t upl,
0b4e3aa0
A
2152 upl_page_info_t *pl,
2153 mach_msg_type_number_t count)
1c79356b 2154{
0b4e3aa0
A
2155 kern_return_t kr;
2156
2157 kr = upl_commit(upl, pl, count);
2158 upl_deallocate(upl);
1c79356b
A
2159 return kr;
2160}
2161
0b4e3aa0 2162
1c79356b
A
2163kern_return_t
2164kernel_upl_commit_range(
2165 upl_t upl,
2166 vm_offset_t offset,
2167 vm_size_t size,
2168 int flags,
0b4e3aa0
A
2169 upl_page_info_array_t pl,
2170 mach_msg_type_number_t count)
1c79356b 2171{
0b4e3aa0
A
2172 boolean_t finished = FALSE;
2173 kern_return_t kr;
2174
2175 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2176 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2177
2178 kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
2179
2180 if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
2181 upl_deallocate(upl);
2182
1c79356b
A
2183 return kr;
2184}
2185
2186kern_return_t
2187kernel_upl_abort_range(
0b4e3aa0
A
2188 upl_t upl,
2189 vm_offset_t offset,
2190 vm_size_t size,
2191 int abort_flags)
1c79356b 2192{
0b4e3aa0
A
2193 kern_return_t kr;
2194 boolean_t finished = FALSE;
1c79356b 2195
0b4e3aa0
A
2196 if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
2197 abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
1c79356b 2198
0b4e3aa0 2199 kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
1c79356b 2200
0b4e3aa0
A
2201 if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
2202 upl_deallocate(upl);
1c79356b 2203
0b4e3aa0 2204 return kr;
1c79356b
A
2205}
2206
1c79356b 2207kern_return_t
0b4e3aa0
A
2208kernel_upl_abort(
2209 upl_t upl,
2210 int abort_type)
1c79356b 2211{
0b4e3aa0 2212 kern_return_t kr;
1c79356b 2213
0b4e3aa0
A
2214 kr = upl_abort(upl, abort_type);
2215 upl_deallocate(upl);
2216 return kr;
1c79356b
A
2217}
2218
1c79356b
A
2219
2220kern_return_t
2221vm_get_shared_region(
2222 task_t task,
2223 shared_region_mapping_t *shared_region)
2224{
2225 *shared_region = (shared_region_mapping_t) task->system_shared_region;
2226 return KERN_SUCCESS;
2227}
2228
2229kern_return_t
2230vm_set_shared_region(
2231 task_t task,
2232 shared_region_mapping_t shared_region)
2233{
2234 task->system_shared_region = (vm_offset_t) shared_region;
2235 return KERN_SUCCESS;
2236}
2237
2238kern_return_t
2239shared_region_mapping_info(
2240 shared_region_mapping_t shared_region,
2241 ipc_port_t *text_region,
2242 vm_size_t *text_size,
2243 ipc_port_t *data_region,
2244 vm_size_t *data_size,
2245 vm_offset_t *region_mappings,
2246 vm_offset_t *client_base,
2247 vm_offset_t *alt_base,
2248 vm_offset_t *alt_next,
2249 int *flags,
2250 shared_region_mapping_t *next)
2251{
2252 shared_region_mapping_lock(shared_region);
2253
2254 *text_region = shared_region->text_region;
2255 *text_size = shared_region->text_size;
2256 *data_region = shared_region->data_region;
2257 *data_size = shared_region->data_size;
2258 *region_mappings = shared_region->region_mappings;
2259 *client_base = shared_region->client_base;
2260 *alt_base = shared_region->alternate_base;
2261 *alt_next = shared_region->alternate_next;
2262 *flags = shared_region->flags;
2263 *next = shared_region->next;
2264
2265 shared_region_mapping_unlock(shared_region);
2266}
2267
2268kern_return_t
2269shared_region_object_chain_attach(
2270 shared_region_mapping_t target_region,
2271 shared_region_mapping_t object_chain_region)
2272{
2273 shared_region_object_chain_t object_ele;
2274
2275 if(target_region->object_chain)
2276 return KERN_FAILURE;
2277 object_ele = (shared_region_object_chain_t)
2278 kalloc(sizeof (struct shared_region_object_chain));
2279 shared_region_mapping_lock(object_chain_region);
2280 target_region->object_chain = object_ele;
2281 object_ele->object_chain_region = object_chain_region;
2282 object_ele->next = object_chain_region->object_chain;
2283 object_ele->depth = object_chain_region->depth;
2284 object_chain_region->depth++;
2285 target_region->alternate_next = object_chain_region->alternate_next;
2286 shared_region_mapping_unlock(object_chain_region);
2287 return KERN_SUCCESS;
2288}
2289
2290kern_return_t
2291shared_region_mapping_create(
2292 ipc_port_t text_region,
2293 vm_size_t text_size,
2294 ipc_port_t data_region,
2295 vm_size_t data_size,
2296 vm_offset_t region_mappings,
2297 vm_offset_t client_base,
2298 shared_region_mapping_t *shared_region,
2299 vm_offset_t alt_base,
2300 vm_offset_t alt_next)
2301{
2302 *shared_region = (shared_region_mapping_t)
2303 kalloc(sizeof (struct shared_region_mapping));
2304 if(*shared_region == NULL)
2305 return KERN_FAILURE;
2306 shared_region_mapping_lock_init((*shared_region));
2307 (*shared_region)->text_region = text_region;
2308 (*shared_region)->text_size = text_size;
2309 (*shared_region)->data_region = data_region;
2310 (*shared_region)->data_size = data_size;
2311 (*shared_region)->region_mappings = region_mappings;
2312 (*shared_region)->client_base = client_base;
2313 (*shared_region)->ref_count = 1;
2314 (*shared_region)->next = NULL;
2315 (*shared_region)->object_chain = NULL;
2316 (*shared_region)->self = *shared_region;
2317 (*shared_region)->flags = 0;
2318 (*shared_region)->depth = 0;
2319 (*shared_region)->alternate_base = alt_base;
2320 (*shared_region)->alternate_next = alt_next;
2321 return KERN_SUCCESS;
2322}
2323
2324kern_return_t
2325shared_region_mapping_set_alt_next(
2326 shared_region_mapping_t shared_region,
2327 vm_offset_t alt_next)
2328{
2329 shared_region->alternate_next = alt_next;
2330 return KERN_SUCCESS;
2331}
2332
2333kern_return_t
2334shared_region_mapping_ref(
2335 shared_region_mapping_t shared_region)
2336{
2337 if(shared_region == NULL)
2338 return KERN_SUCCESS;
2339 shared_region_mapping_lock(shared_region);
2340 shared_region->ref_count++;
2341 shared_region_mapping_unlock(shared_region);
2342 return KERN_SUCCESS;
2343}
2344
2345kern_return_t
2346shared_region_mapping_dealloc(
2347 shared_region_mapping_t shared_region)
2348{
2349 struct shared_region_task_mappings sm_info;
2350 shared_region_mapping_t next;
2351
2352 if(shared_region == NULL)
2353 return KERN_SUCCESS;
2354 shared_region_mapping_lock(shared_region);
2355
2356 if((--shared_region->ref_count) == 0) {
2357
2358 sm_info.text_region = shared_region->text_region;
2359 sm_info.text_size = shared_region->text_size;
2360 sm_info.data_region = shared_region->data_region;
2361 sm_info.data_size = shared_region->data_size;
2362 sm_info.region_mappings = shared_region->region_mappings;
2363 sm_info.client_base = shared_region->client_base;
2364 sm_info.alternate_base = shared_region->alternate_base;
2365 sm_info.alternate_next = shared_region->alternate_next;
2366 sm_info.flags = shared_region->flags;
150bd074 2367 sm_info.self = (vm_offset_t)shared_region;
1c79356b
A
2368
2369 lsf_remove_regions_mappings(shared_region, &sm_info);
2370 pmap_remove(((vm_named_entry_t)
2371 (shared_region->text_region->ip_kobject))
2372 ->backing.map->pmap,
2373 sm_info.client_base,
2374 sm_info.client_base + sm_info.text_size);
2375 ipc_port_release_send(shared_region->text_region);
2376 ipc_port_release_send(shared_region->data_region);
2377 if(shared_region->object_chain) {
2378 shared_region_mapping_dealloc(
2379 shared_region->object_chain->object_chain_region);
2380 kfree((vm_offset_t)shared_region->object_chain,
2381 sizeof (struct shared_region_object_chain));
2382 }
2383 kfree((vm_offset_t)shared_region,
2384 sizeof (struct shared_region_mapping));
2385 return KERN_SUCCESS;
2386 }
2387 shared_region_mapping_unlock(shared_region);
2388 return KERN_SUCCESS;
2389}
2390
2391vm_offset_t
2392vm_map_get_phys_page(
2393 vm_map_t map,
2394 vm_offset_t offset)
2395{
2396 vm_map_entry_t entry;
2397 int ops;
2398 int flags;
2399 vm_offset_t phys_addr = 0;
2400 vm_object_t object;
2401
2402 vm_map_lock(map);
2403 while (vm_map_lookup_entry(map, offset, &entry)) {
2404
2405 if (entry->object.vm_object == VM_OBJECT_NULL) {
2406 vm_map_unlock(map);
2407 return (vm_offset_t) 0;
2408 }
2409 if (entry->is_sub_map) {
2410 vm_map_t old_map;
2411 vm_map_lock(entry->object.sub_map);
2412 old_map = map;
2413 map = entry->object.sub_map;
2414 offset = entry->offset + (offset - entry->vme_start);
2415 vm_map_unlock(old_map);
2416 continue;
2417 }
2418 offset = entry->offset + (offset - entry->vme_start);
2419 object = entry->object.vm_object;
2420 vm_object_lock(object);
2421 while (TRUE) {
2422 vm_page_t dst_page = vm_page_lookup(object,offset);
2423 if(dst_page == VM_PAGE_NULL) {
2424 if(object->shadow) {
2425 vm_object_t old_object;
2426 vm_object_lock(object->shadow);
2427 old_object = object;
2428 offset = offset + object->shadow_offset;
2429 object = object->shadow;
2430 vm_object_unlock(old_object);
2431 } else {
2432 vm_object_unlock(object);
2433 break;
2434 }
2435 } else {
2436 phys_addr = dst_page->phys_addr;
2437 vm_object_unlock(object);
2438 break;
2439 }
2440 }
2441 break;
2442
2443 }
2444
2445 vm_map_unlock(map);
2446 return phys_addr;
2447}
2448#endif /* VM_CPM */