2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * User-exported virtual memory functions.
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_types.h> /* to get vm_address_t */
63 #include <mach/memory_object.h>
64 #include <mach/std_types.h> /* to get pointer_t */
65 #include <mach/vm_attributes.h>
66 #include <mach/vm_param.h>
67 #include <mach/vm_statistics.h>
68 #include <mach/vm_map_server.h>
69 #include <mach/mach_syscalls.h>
70 #include <mach/shared_memory_server.h>
72 #include <kern/host.h>
73 #include <kern/task.h>
74 #include <kern/misc_protos.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_page.h>
78 #include <vm/memory_object.h>
79 #include <vm/vm_pageout.h>
83 vm_size_t upl_offset_to_pagelist
= 0;
89 ipc_port_t dynamic_pager_control_port
=NULL
;
92 * vm_allocate allocates "zero fill" memory in the specfied
97 register vm_map_t map
,
98 register vm_offset_t
*addr
,
99 register vm_size_t size
,
102 kern_return_t result
;
103 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
105 if (map
== VM_MAP_NULL
)
106 return(KERN_INVALID_ARGUMENT
);
109 return(KERN_SUCCESS
);
113 *addr
= vm_map_min(map
);
115 *addr
= trunc_page(*addr
);
116 size
= round_page(size
);
118 return(KERN_INVALID_ARGUMENT
);
121 result
= vm_map_enter(
128 (vm_object_offset_t
)0,
138 * vm_deallocate deallocates the specified range of addresses in the
139 * specified address map.
143 register vm_map_t map
,
147 if (map
== VM_MAP_NULL
)
148 return(KERN_INVALID_ARGUMENT
);
150 if (size
== (vm_offset_t
) 0)
151 return(KERN_SUCCESS
);
153 return(vm_map_remove(map
, trunc_page(start
),
154 round_page(start
+size
), VM_MAP_NO_FLAGS
));
158 * vm_inherit sets the inheritance of the specified range in the
163 register vm_map_t map
,
166 vm_inherit_t new_inheritance
)
168 if (map
== VM_MAP_NULL
)
169 return(KERN_INVALID_ARGUMENT
);
171 if (new_inheritance
> VM_INHERIT_LAST_VALID
)
172 return(KERN_INVALID_ARGUMENT
);
174 return(vm_map_inherit(map
,
176 round_page(start
+size
),
181 * vm_protect sets the protection of the specified range in the
187 register vm_map_t map
,
190 boolean_t set_maximum
,
191 vm_prot_t new_protection
)
193 if ((map
== VM_MAP_NULL
) ||
194 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
195 return(KERN_INVALID_ARGUMENT
);
197 return(vm_map_protect(map
,
199 round_page(start
+size
),
205 * Handle machine-specific attributes for a mapping, such
206 * as cachability, migrability, etc.
209 vm_machine_attribute(
211 vm_address_t address
,
213 vm_machine_attribute_t attribute
,
214 vm_machine_attribute_val_t
* value
) /* IN/OUT */
216 if (map
== VM_MAP_NULL
)
217 return(KERN_INVALID_ARGUMENT
);
219 return vm_map_machine_attribute(map
, address
, size
, attribute
, value
);
225 vm_address_t address
,
228 mach_msg_type_number_t
*data_size
)
231 vm_map_copy_t ipc_address
;
233 if (map
== VM_MAP_NULL
)
234 return(KERN_INVALID_ARGUMENT
);
236 if ((error
= vm_map_copyin(map
,
239 FALSE
, /* src_destroy */
240 &ipc_address
)) == KERN_SUCCESS
) {
241 *data
= (pointer_t
) ipc_address
;
250 vm_read_entry_t data_list
,
251 mach_msg_type_number_t count
)
253 mach_msg_type_number_t i
;
255 vm_map_copy_t ipc_address
;
257 if (map
== VM_MAP_NULL
)
258 return(KERN_INVALID_ARGUMENT
);
260 for(i
=0; i
<count
; i
++) {
261 error
= vm_map_copyin(map
,
262 data_list
[i
].address
,
264 FALSE
, /* src_destroy */
266 if(error
!= KERN_SUCCESS
) {
267 data_list
[i
].address
= (vm_address_t
)0;
268 data_list
[i
].size
= (vm_size_t
)0;
271 if(data_list
[i
].size
!= 0) {
272 error
= vm_map_copyout(current_task()->map
,
273 &(data_list
[i
].address
),
274 (vm_map_copy_t
) ipc_address
);
275 if(error
!= KERN_SUCCESS
) {
276 data_list
[i
].address
= (vm_address_t
)0;
277 data_list
[i
].size
= (vm_size_t
)0;
286 * This routine reads from the specified map and overwrites part of the current
287 * activation's map. In making an assumption that the current thread is local,
288 * it is no longer cluster-safe without a fully supportive local proxy thread/
289 * task (but we don't support cluster's anymore so this is moot).
292 #define VM_OVERWRITE_SMALL 512
297 vm_address_t address
,
300 vm_size_t
*data_size
)
304 char buf
[VM_OVERWRITE_SMALL
];
307 kern_return_t error
= KERN_SUCCESS
;
310 if (map
== VM_MAP_NULL
)
311 return(KERN_INVALID_ARGUMENT
);
313 if (size
<= VM_OVERWRITE_SMALL
) {
314 if(vm_map_read_user(map
, (vm_offset_t
)address
,
315 (vm_offset_t
)&inbuf
, size
)) {
316 error
= KERN_INVALID_ADDRESS
;
318 if(vm_map_write_user(current_map(),
319 (vm_offset_t
)&inbuf
, (vm_offset_t
)data
, size
))
320 error
= KERN_INVALID_ADDRESS
;
324 if ((error
= vm_map_copyin(map
,
327 FALSE
, /* src_destroy */
328 ©
)) == KERN_SUCCESS
) {
329 if ((error
= vm_map_copy_overwrite(
333 FALSE
)) == KERN_SUCCESS
) {
336 vm_map_copy_discard(copy
);
351 vm_address_t address
,
353 mach_msg_type_number_t size
)
355 if (map
== VM_MAP_NULL
)
356 return KERN_INVALID_ARGUMENT
;
358 return vm_map_copy_overwrite(map
, address
, (vm_map_copy_t
) data
,
359 FALSE
/* interruptible XXX */);
365 vm_address_t source_address
,
367 vm_address_t dest_address
)
372 if (map
== VM_MAP_NULL
)
373 return KERN_INVALID_ARGUMENT
;
375 kr
= vm_map_copyin(map
, source_address
, size
,
377 if (kr
!= KERN_SUCCESS
)
380 kr
= vm_map_copy_overwrite(map
, dest_address
, copy
,
381 FALSE
/* interruptible XXX */);
382 if (kr
!= KERN_SUCCESS
) {
383 vm_map_copy_discard(copy
);
396 vm_offset_t
*address
,
397 vm_size_t initial_size
,
401 vm_object_offset_t offset
,
403 vm_prot_t cur_protection
,
404 vm_prot_t max_protection
,
405 vm_inherit_t inheritance
)
410 vm_object_size_t size
= (vm_object_size_t
)initial_size
;
411 kern_return_t result
;
414 * Check arguments for validity
416 if ((target_map
== VM_MAP_NULL
) ||
417 (cur_protection
& ~VM_PROT_ALL
) ||
418 (max_protection
& ~VM_PROT_ALL
) ||
419 (inheritance
> VM_INHERIT_LAST_VALID
) ||
421 return(KERN_INVALID_ARGUMENT
);
424 * Find the vm object (if any) corresponding to this port.
426 if (!IP_VALID(port
)) {
427 object
= VM_OBJECT_NULL
;
430 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
431 vm_named_entry_t named_entry
;
433 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
434 /* a few checks to make sure user is obeying rules */
436 if(offset
>= named_entry
->size
)
437 return(KERN_INVALID_RIGHT
);
438 size
= named_entry
->size
- offset
;
440 if((named_entry
->protection
& max_protection
) != max_protection
)
441 return(KERN_INVALID_RIGHT
);
442 if((named_entry
->protection
& cur_protection
) != cur_protection
)
443 return(KERN_INVALID_RIGHT
);
444 if(named_entry
->size
< (offset
+ size
))
445 return(KERN_INVALID_ARGUMENT
);
447 /* the callers parameter offset is defined to be the */
448 /* offset from beginning of named entry offset in object */
449 offset
= offset
+ named_entry
->offset
;
451 named_entry_lock(named_entry
);
452 if(named_entry
->is_sub_map
) {
453 vm_map_entry_t map_entry
;
455 named_entry_unlock(named_entry
);
456 *address
= trunc_page(*address
);
457 size
= round_page(size
);
458 vm_object_reference(vm_submap_object
);
459 if ((result
= vm_map_enter(target_map
,
460 address
, size
, mask
, flags
,
463 cur_protection
, max_protection
, inheritance
464 )) != KERN_SUCCESS
) {
465 vm_object_deallocate(vm_submap_object
);
469 VM_GET_FLAGS_ALIAS(flags
, alias
);
470 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
472 vm_map_submap(target_map
, *address
,
474 named_entry
->backing
.map
,
475 (vm_offset_t
)offset
, TRUE
);
477 vm_map_submap(target_map
, *address
,
479 named_entry
->backing
.map
,
480 (vm_offset_t
)offset
, FALSE
);
483 if(vm_map_lookup_entry(
484 target_map
, *address
, &map_entry
)) {
485 map_entry
->needs_copy
= TRUE
;
491 } else if(named_entry
->object
) {
492 /* This is the case where we are going to map */
493 /* an already mapped object. If the object is */
494 /* not ready it is internal. An external */
495 /* object cannot be mapped until it is ready */
496 /* we can therefore avoid the ready check */
498 named_entry_unlock(named_entry
);
499 vm_object_reference(named_entry
->object
);
500 object
= named_entry
->object
;
502 object
= vm_object_enter(named_entry
->backing
.pager
,
504 named_entry
->internal
,
507 if (object
== VM_OBJECT_NULL
) {
508 named_entry_unlock(named_entry
);
509 return(KERN_INVALID_OBJECT
);
511 object
->true_share
= TRUE
;
512 named_entry
->object
= object
;
513 named_entry_unlock(named_entry
);
514 /* create an extra reference for the named entry */
515 vm_object_reference(named_entry
->object
);
516 /* wait for object (if any) to be ready */
517 if (object
!= VM_OBJECT_NULL
) {
518 vm_object_lock(object
);
519 while (!object
->pager_ready
) {
520 vm_object_wait(object
,
521 VM_OBJECT_EVENT_PAGER_READY
,
523 vm_object_lock(object
);
525 vm_object_unlock(object
);
528 } else if (ip_kotype(port
) == IKOT_MEMORY_OBJECT
) {
530 * JMM - This is temporary until we unify named entries
531 * and raw memory objects.
533 * Detected fake ip_kotype for a memory object. In
534 * this case, the port isn't really a port at all, but
535 * instead is just a raw memory object.
538 if ((object
= vm_object_enter((memory_object_t
)port
,
539 size
, FALSE
, FALSE
, FALSE
))
541 return(KERN_INVALID_OBJECT
);
543 /* wait for object (if any) to be ready */
544 if (object
!= VM_OBJECT_NULL
) {
545 vm_object_lock(object
);
546 while (!object
->pager_ready
) {
547 vm_object_wait(object
,
548 VM_OBJECT_EVENT_PAGER_READY
,
550 vm_object_lock(object
);
552 vm_object_unlock(object
);
555 return (KERN_INVALID_OBJECT
);
558 *address
= trunc_page(*address
);
559 size
= round_page(size
);
562 * Perform the copy if requested
566 vm_object_t new_object
;
567 vm_object_offset_t new_offset
;
569 result
= vm_object_copy_strategically(object
, offset
, size
,
570 &new_object
, &new_offset
,
574 if (result
== KERN_MEMORY_RESTART_COPY
) {
576 boolean_t src_needs_copy
;
580 * We currently ignore src_needs_copy.
581 * This really is the issue of how to make
582 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
583 * non-kernel users to use. Solution forthcoming.
584 * In the meantime, since we don't allow non-kernel
585 * memory managers to specify symmetric copy,
586 * we won't run into problems here.
590 success
= vm_object_copy_quickly(&new_object
,
595 result
= KERN_SUCCESS
;
598 * Throw away the reference to the
599 * original object, as it won't be mapped.
602 vm_object_deallocate(object
);
604 if (result
!= KERN_SUCCESS
)
611 if ((result
= vm_map_enter(target_map
,
612 address
, size
, mask
, flags
,
615 cur_protection
, max_protection
, inheritance
617 vm_object_deallocate(object
);
621 /* temporary, until world build */
624 vm_offset_t
*address
,
631 vm_prot_t cur_protection
,
632 vm_prot_t max_protection
,
633 vm_inherit_t inheritance
)
635 vm_map_64(target_map
, address
, size
, mask
, flags
,
636 port
, (vm_object_offset_t
)offset
, copy
,
637 cur_protection
, max_protection
, inheritance
);
642 * NOTE: this routine (and this file) will no longer require mach_host_server.h
643 * when vm_wire is changed to use ledgers.
645 #include <mach/mach_host_server.h>
647 * Specify that the range of the virtual address space
648 * of the target task must not cause page faults for
649 * the indicated accesses.
651 * [ To unwire the pages, specify VM_PROT_NONE. ]
655 host_priv_t host_priv
,
656 register vm_map_t map
,
663 if (host_priv
== HOST_PRIV_NULL
)
664 return KERN_INVALID_HOST
;
666 assert(host_priv
== &realhost
);
668 if (map
== VM_MAP_NULL
)
669 return KERN_INVALID_TASK
;
671 if (access
& ~VM_PROT_ALL
)
672 return KERN_INVALID_ARGUMENT
;
674 if (access
!= VM_PROT_NONE
) {
675 rc
= vm_map_wire(map
, trunc_page(start
),
676 round_page(start
+size
), access
, TRUE
);
678 rc
= vm_map_unwire(map
, trunc_page(start
),
679 round_page(start
+size
), TRUE
);
687 * Synchronises the memory range specified with its backing store
688 * image by either flushing or cleaning the contents to the appropriate
689 * memory manager engaging in a memory object synchronize dialog with
690 * the manager. The client doesn't return until the manager issues
691 * m_o_s_completed message. MIG Magically converts user task parameter
692 * to the task's address map.
694 * interpretation of sync_flags
695 * VM_SYNC_INVALIDATE - discard pages, only return precious
698 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
699 * - discard pages, write dirty or precious
700 * pages back to memory manager.
702 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
703 * - write dirty or precious pages back to
704 * the memory manager.
707 * The memory object attributes have not yet been implemented, this
708 * function will have to deal with the invalidate attribute
711 * KERN_INVALID_TASK Bad task parameter
712 * KERN_INVALID_ARGUMENT both sync and async were specified.
713 * KERN_SUCCESS The usual.
719 vm_address_t address
,
721 vm_sync_t sync_flags
)
725 queue_chain_t req_q
; /* queue of requests for this msync */
726 vm_map_entry_t entry
;
727 vm_size_t amount_left
;
728 vm_object_offset_t offset
;
729 boolean_t do_sync_req
;
730 boolean_t modifiable
;
733 if ((sync_flags
& VM_SYNC_ASYNCHRONOUS
) &&
734 (sync_flags
& VM_SYNC_SYNCHRONOUS
))
735 return(KERN_INVALID_ARGUMENT
);
738 * align address and size on page boundaries
740 size
= round_page(address
+ size
) - trunc_page(address
);
741 address
= trunc_page(address
);
743 if (map
== VM_MAP_NULL
)
744 return(KERN_INVALID_TASK
);
747 return(KERN_SUCCESS
);
752 while (amount_left
> 0) {
753 vm_size_t flush_size
;
757 if (!vm_map_lookup_entry(map
, address
, &entry
)) {
761 * hole in the address map.
765 * Check for empty map.
767 if (entry
== vm_map_to_entry(map
) &&
768 entry
->vme_next
== entry
) {
773 * Check that we don't wrap and that
774 * we have at least one real map entry.
776 if ((map
->hdr
.nentries
== 0) ||
777 (entry
->vme_next
->vme_start
< address
)) {
782 * Move up to the next entry if needed
784 skip
= (entry
->vme_next
->vme_start
- address
);
785 if (skip
>= amount_left
)
789 address
= entry
->vme_next
->vme_start
;
794 offset
= address
- entry
->vme_start
;
797 * do we have more to flush than is contained in this
800 if (amount_left
+ entry
->vme_start
+ offset
> entry
->vme_end
) {
801 flush_size
= entry
->vme_end
-
802 (entry
->vme_start
+ offset
);
804 flush_size
= amount_left
;
806 amount_left
-= flush_size
;
807 address
+= flush_size
;
809 if (entry
->is_sub_map
== TRUE
) {
811 vm_offset_t local_offset
;
813 local_map
= entry
->object
.sub_map
;
814 local_offset
= entry
->offset
;
823 object
= entry
->object
.vm_object
;
826 * We can't sync this object if the object has not been
829 if (object
== VM_OBJECT_NULL
) {
833 offset
+= entry
->offset
;
834 modifiable
= (entry
->protection
& VM_PROT_WRITE
)
837 vm_object_lock(object
);
839 if (sync_flags
& (VM_SYNC_KILLPAGES
| VM_SYNC_DEACTIVATE
)) {
840 boolean_t kill_pages
= 0;
842 if (sync_flags
& VM_SYNC_KILLPAGES
) {
843 if (object
->ref_count
== 1 && !entry
->needs_copy
&& !object
->shadow
)
848 if (kill_pages
!= -1)
849 vm_object_deactivate_pages(object
, offset
,
850 (vm_object_size_t
)flush_size
, kill_pages
);
851 vm_object_unlock(object
);
856 * We can't sync this object if there isn't a pager.
857 * Don't bother to sync internal objects, since there can't
858 * be any "permanent" storage for these objects anyway.
860 if ((object
->pager
== MEMORY_OBJECT_NULL
) ||
861 (object
->internal
) || (object
->private)) {
862 vm_object_unlock(object
);
867 * keep reference on the object until syncing is done
869 assert(object
->ref_count
> 0);
871 vm_object_res_reference(object
);
872 vm_object_unlock(object
);
876 do_sync_req
= vm_object_sync(object
,
879 sync_flags
& VM_SYNC_INVALIDATE
,
881 (sync_flags
& VM_SYNC_SYNCHRONOUS
||
882 sync_flags
& VM_SYNC_ASYNCHRONOUS
)));
885 * only send a m_o_s if we returned pages or if the entry
886 * is writable (ie dirty pages may have already been sent back)
888 if (!do_sync_req
&& !modifiable
) {
889 vm_object_deallocate(object
);
892 msync_req_alloc(new_msr
);
894 vm_object_lock(object
);
895 offset
+= object
->paging_offset
;
897 new_msr
->offset
= offset
;
898 new_msr
->length
= flush_size
;
899 new_msr
->object
= object
;
900 new_msr
->flag
= VM_MSYNC_SYNCHRONIZING
;
902 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
904 * need to check for overlapping entry, if found, wait
905 * on overlapping msr to be done, then reiterate
908 if (msr
->flag
== VM_MSYNC_SYNCHRONIZING
&&
909 ((offset
>= msr
->offset
&&
910 offset
< (msr
->offset
+ msr
->length
)) ||
911 (msr
->offset
>= offset
&&
912 msr
->offset
< (offset
+ flush_size
))))
914 assert_wait((event_t
) msr
,THREAD_INTERRUPTIBLE
);
916 vm_object_unlock(object
);
917 thread_block((void (*)(void))0);
918 vm_object_lock(object
);
924 queue_enter(&object
->msr_q
, new_msr
, msync_req_t
, msr_q
);
925 vm_object_unlock(object
);
927 queue_enter(&req_q
, new_msr
, msync_req_t
, req_q
);
929 (void) memory_object_synchronize(
937 * wait for memory_object_sychronize_completed messages from pager(s)
940 while (!queue_empty(&req_q
)) {
941 msr
= (msync_req_t
)queue_first(&req_q
);
943 while(msr
->flag
!= VM_MSYNC_DONE
) {
944 assert_wait((event_t
) msr
, THREAD_INTERRUPTIBLE
);
946 thread_block((void (*)(void))0);
949 queue_remove(&req_q
, msr
, msync_req_t
, req_q
);
951 vm_object_deallocate(msr
->object
);
955 return(KERN_SUCCESS
);
962 * Set or clear the map's wiring_required flag. This flag, if set,
963 * will cause all future virtual memory allocation to allocate
964 * user wired memory. Unwiring pages wired down as a result of
965 * this routine is done with the vm_wire interface.
972 if (map
== VM_MAP_NULL
)
973 return(KERN_INVALID_ARGUMENT
);
976 map
->wiring_required
= TRUE
;
978 map
->wiring_required
= FALSE
;
980 return(KERN_SUCCESS
);
984 * vm_behavior_set sets the paging behavior attribute for the
985 * specified range in the specified map. This routine will fail
986 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
987 * is not a valid allocated or reserved memory region.
994 vm_behavior_t new_behavior
)
996 if (map
== VM_MAP_NULL
)
997 return(KERN_INVALID_ARGUMENT
);
999 return(vm_map_behavior_set(map
, trunc_page(start
),
1000 round_page(start
+size
), new_behavior
));
1005 * Control whether the kernel will permit use of
1006 * vm_allocate_cpm at all.
1008 unsigned int vm_allocate_cpm_enabled
= 1;
1011 * Ordinarily, the right to allocate CPM is restricted
1012 * to privileged applications (those that can gain access
1013 * to the host port). Set this variable to zero if you
1014 * want to let any application allocate CPM.
1016 unsigned int vm_allocate_cpm_privileged
= 0;
1019 * Allocate memory in the specified map, with the caveat that
1020 * the memory is physically contiguous. This call may fail
1021 * if the system can't find sufficient contiguous memory.
1022 * This call may cause or lead to heart-stopping amounts of
1025 * Memory obtained from this call should be freed in the
1026 * normal way, viz., via vm_deallocate.
1030 host_priv_t host_priv
,
1031 register vm_map_t map
,
1032 register vm_offset_t
*addr
,
1033 register vm_size_t size
,
1036 vm_object_t cpm_obj
;
1040 vm_offset_t va
, start
, end
, offset
;
1042 extern vm_offset_t avail_start
, avail_end
;
1043 vm_offset_t prev_addr
;
1044 #endif /* MACH_ASSERT */
1046 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1048 if (!vm_allocate_cpm_enabled
)
1049 return KERN_FAILURE
;
1051 if (vm_allocate_cpm_privileged
&& host_priv
== HOST_PRIV_NULL
)
1052 return KERN_INVALID_HOST
;
1054 if (map
== VM_MAP_NULL
)
1055 return KERN_INVALID_ARGUMENT
;
1057 assert(host_priv
== &realhost
);
1061 return KERN_SUCCESS
;
1065 *addr
= vm_map_min(map
);
1067 *addr
= trunc_page(*addr
);
1068 size
= round_page(size
);
1070 if ((kr
= cpm_allocate(size
, &pages
, TRUE
)) != KERN_SUCCESS
)
1073 cpm_obj
= vm_object_allocate(size
);
1074 assert(cpm_obj
!= VM_OBJECT_NULL
);
1075 assert(cpm_obj
->internal
);
1076 assert(cpm_obj
->size
== size
);
1077 assert(cpm_obj
->can_persist
== FALSE
);
1078 assert(cpm_obj
->pager_created
== FALSE
);
1079 assert(cpm_obj
->pageout
== FALSE
);
1080 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1083 * Insert pages into object.
1086 vm_object_lock(cpm_obj
);
1087 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1089 pages
= NEXT_PAGE(m
);
1091 assert(!m
->gobbled
);
1093 assert(!m
->pageout
);
1096 assert(m
->phys_addr
>=avail_start
&& m
->phys_addr
<=avail_end
);
1099 vm_page_insert(m
, cpm_obj
, offset
);
1101 assert(cpm_obj
->resident_page_count
== size
/ PAGE_SIZE
);
1102 vm_object_unlock(cpm_obj
);
1105 * Hang onto a reference on the object in case a
1106 * multi-threaded application for some reason decides
1107 * to deallocate the portion of the address space into
1108 * which we will insert this object.
1110 * Unfortunately, we must insert the object now before
1111 * we can talk to the pmap module about which addresses
1112 * must be wired down. Hence, the race with a multi-
1115 vm_object_reference(cpm_obj
);
1118 * Insert object into map.
1128 (vm_object_offset_t
)0,
1132 VM_INHERIT_DEFAULT
);
1134 if (kr
!= KERN_SUCCESS
) {
1136 * A CPM object doesn't have can_persist set,
1137 * so all we have to do is deallocate it to
1138 * free up these pages.
1140 assert(cpm_obj
->pager_created
== FALSE
);
1141 assert(cpm_obj
->can_persist
== FALSE
);
1142 assert(cpm_obj
->pageout
== FALSE
);
1143 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1144 vm_object_deallocate(cpm_obj
); /* kill acquired ref */
1145 vm_object_deallocate(cpm_obj
); /* kill creation ref */
1149 * Inform the physical mapping system that the
1150 * range of addresses may not fault, so that
1151 * page tables and such can be locked down as well.
1155 pmap
= vm_map_pmap(map
);
1156 pmap_pageable(pmap
, start
, end
, FALSE
);
1159 * Enter each page into the pmap, to avoid faults.
1160 * Note that this loop could be coded more efficiently,
1161 * if the need arose, rather than looking up each page
1164 for (offset
= 0, va
= start
; offset
< size
;
1165 va
+= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
1166 vm_object_lock(cpm_obj
);
1167 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1168 vm_object_unlock(cpm_obj
);
1169 assert(m
!= VM_PAGE_NULL
);
1170 PMAP_ENTER(pmap
, va
, m
, VM_PROT_ALL
, TRUE
);
1175 * Verify ordering in address space.
1177 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1178 vm_object_lock(cpm_obj
);
1179 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1180 vm_object_unlock(cpm_obj
);
1181 if (m
== VM_PAGE_NULL
)
1182 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1187 assert(!m
->fictitious
);
1188 assert(!m
->private);
1191 assert(!m
->cleaning
);
1192 assert(!m
->precious
);
1193 assert(!m
->clustered
);
1195 if (m
->phys_addr
!= prev_addr
+ PAGE_SIZE
) {
1196 printf("start 0x%x end 0x%x va 0x%x\n",
1198 printf("obj 0x%x off 0x%x\n", cpm_obj
, offset
);
1199 printf("m 0x%x prev_address 0x%x\n", m
,
1201 panic("vm_allocate_cpm: pages not contig!");
1204 prev_addr
= m
->phys_addr
;
1206 #endif /* MACH_ASSERT */
1208 vm_object_deallocate(cpm_obj
); /* kill extra ref */
1217 * Interface is defined in all cases, but unless the kernel
1218 * is built explicitly for this option, the interface does
1224 host_priv_t host_priv
,
1225 register vm_map_t map
,
1226 register vm_offset_t
*addr
,
1227 register vm_size_t size
,
1230 return KERN_FAILURE
;
1236 mach_memory_object_memory_entry_64(
1239 vm_object_offset_t size
,
1240 vm_prot_t permission
,
1241 memory_object_t pager
,
1242 ipc_port_t
*entry_handle
)
1244 vm_named_entry_t user_object
;
1245 ipc_port_t user_handle
;
1246 ipc_port_t previous
;
1249 if (host
== HOST_NULL
)
1250 return(KERN_INVALID_HOST
);
1252 user_object
= (vm_named_entry_t
)
1253 kalloc(sizeof (struct vm_named_entry
));
1254 if(user_object
== NULL
)
1255 return KERN_FAILURE
;
1256 named_entry_lock_init(user_object
);
1257 user_handle
= ipc_port_alloc_kernel();
1258 ip_lock(user_handle
);
1260 /* make a sonce right */
1261 user_handle
->ip_sorights
++;
1262 ip_reference(user_handle
);
1264 user_handle
->ip_destination
= IP_NULL
;
1265 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1266 user_handle
->ip_receiver
= ipc_space_kernel
;
1268 /* make a send right */
1269 user_handle
->ip_mscount
++;
1270 user_handle
->ip_srights
++;
1271 ip_reference(user_handle
);
1273 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1274 /* nsrequest unlocks user_handle */
1276 user_object
->object
= NULL
;
1277 user_object
->size
= size
;
1278 user_object
->offset
= 0;
1279 user_object
->backing
.pager
= pager
;
1280 user_object
->protection
= permission
;
1281 user_object
->internal
= internal
;
1282 user_object
->is_sub_map
= FALSE
;
1283 user_object
->ref_count
= 1;
1285 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1287 *entry_handle
= user_handle
;
1288 return KERN_SUCCESS
;
1292 mach_memory_object_memory_entry(
1296 vm_prot_t permission
,
1297 memory_object_t pager
,
1298 ipc_port_t
*entry_handle
)
1300 return mach_memory_object_memory_entry_64( host
, internal
,
1301 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
1310 mach_make_memory_entry_64(
1311 vm_map_t target_map
,
1312 vm_object_size_t
*size
,
1313 vm_object_offset_t offset
,
1314 vm_prot_t permission
,
1315 ipc_port_t
*object_handle
,
1316 ipc_port_t parent_entry
)
1318 vm_map_version_t version
;
1319 vm_named_entry_t user_object
;
1320 ipc_port_t user_handle
;
1321 ipc_port_t previous
;
1325 /* needed for call to vm_map_lookup_locked */
1327 vm_object_offset_t obj_off
;
1329 vm_object_offset_t lo_offset
, hi_offset
;
1330 vm_behavior_t behavior
;
1333 /* needed for direct map entry manipulation */
1334 vm_map_entry_t map_entry
;
1336 vm_object_size_t mappable_size
;
1339 user_object
= (vm_named_entry_t
)
1340 kalloc(sizeof (struct vm_named_entry
));
1341 if(user_object
== NULL
)
1342 return KERN_FAILURE
;
1343 named_entry_lock_init(user_object
);
1344 user_handle
= ipc_port_alloc_kernel();
1345 ip_lock(user_handle
);
1347 /* make a sonce right */
1348 user_handle
->ip_sorights
++;
1349 ip_reference(user_handle
);
1351 user_handle
->ip_destination
= IP_NULL
;
1352 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1353 user_handle
->ip_receiver
= ipc_space_kernel
;
1355 /* make a send right */
1356 user_handle
->ip_mscount
++;
1357 user_handle
->ip_srights
++;
1358 ip_reference(user_handle
);
1360 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1361 /* nsrequest unlocks user_handle */
1363 user_object
->backing
.pager
= NULL
;
1364 user_object
->ref_count
= 1;
1366 if(parent_entry
== NULL
) {
1367 /* Create a named object based on address range within the task map */
1368 /* Go find the object at given address */
1370 permission
&= VM_PROT_ALL
;
1371 vm_map_lock_read(target_map
);
1373 /* get the object associated with the target address */
1374 /* note we check the permission of the range against */
1375 /* that requested by the caller */
1377 kr
= vm_map_lookup_locked(&target_map
, offset
,
1378 permission
, &version
,
1379 &object
, &obj_off
, &prot
, &wired
, &behavior
,
1380 &lo_offset
, &hi_offset
, &pmap_map
);
1381 if (kr
!= KERN_SUCCESS
) {
1382 vm_map_unlock_read(target_map
);
1385 if ((prot
& permission
) != permission
) {
1386 kr
= KERN_INVALID_RIGHT
;
1387 vm_object_unlock(object
);
1388 vm_map_unlock_read(target_map
);
1389 if(pmap_map
!= target_map
)
1390 vm_map_unlock_read(pmap_map
);
1394 /* We have an object, now check to see if this object */
1395 /* is suitable. If not, create a shadow and share that */
1397 local_map
= target_map
;
1400 if(!vm_map_lookup_entry(local_map
, offset
, &map_entry
)) {
1401 kr
= KERN_INVALID_ARGUMENT
;
1402 vm_object_unlock(object
);
1403 vm_map_unlock_read(target_map
);
1404 if(pmap_map
!= target_map
)
1405 vm_map_unlock_read(pmap_map
);
1408 if(!(map_entry
->is_sub_map
)) {
1409 if(map_entry
->object
.vm_object
!= object
) {
1410 kr
= KERN_INVALID_ARGUMENT
;
1411 vm_object_unlock(object
);
1412 vm_map_unlock_read(target_map
);
1413 if(pmap_map
!= target_map
)
1414 vm_map_unlock_read(pmap_map
);
1419 local_map
= map_entry
->object
.sub_map
;
1420 vm_map_lock_read(local_map
);
1421 vm_map_unlock_read(target_map
);
1422 if(pmap_map
!= target_map
)
1423 vm_map_unlock_read(pmap_map
);
1424 target_map
= local_map
;
1427 if(((map_entry
->max_protection
) & permission
) != permission
) {
1428 kr
= KERN_INVALID_RIGHT
;
1429 vm_object_unlock(object
);
1430 vm_map_unlock_read(target_map
);
1431 if(pmap_map
!= target_map
)
1432 vm_map_unlock_read(pmap_map
);
1435 if(object
->internal
) {
1436 /* vm_map_lookup_locked will create a shadow if */
1437 /* needs_copy is set but does not check for the */
1438 /* other two conditions shown. It is important to */
1439 /* set up an object which will not be pulled from */
1442 if ((map_entry
->needs_copy
|| object
->shadowed
||
1444 ((vm_object_size_t
)map_entry
->vme_end
-
1445 map_entry
->vme_start
)))
1446 && !object
->true_share
) {
1447 if (vm_map_lock_read_to_write(target_map
)) {
1448 vm_map_lock_read(target_map
);
1453 /* create a shadow object */
1455 vm_object_shadow(&map_entry
->object
.vm_object
,
1458 - map_entry
->vme_start
));
1459 map_entry
->needs_copy
= FALSE
;
1460 vm_object_unlock(object
);
1461 object
= map_entry
->object
.vm_object
;
1462 vm_object_lock(object
);
1463 object
->size
= map_entry
->vme_end
1464 - map_entry
->vme_start
;
1465 obj_off
= (offset
- map_entry
->vme_start
) +
1467 lo_offset
= map_entry
->offset
;
1468 hi_offset
= (map_entry
->vme_end
-
1469 map_entry
->vme_start
) +
1472 vm_map_lock_write_to_read(target_map
);
1477 /* note: in the future we can (if necessary) allow for */
1478 /* memory object lists, this will better support */
1479 /* fragmentation, but is it necessary? The user should */
1480 /* be encouraged to create address space oriented */
1481 /* shared objects from CLEAN memory regions which have */
1482 /* a known and defined history. i.e. no inheritence */
1483 /* share, make this call before making the region the */
1484 /* target of ipc's, etc. The code above, protecting */
1485 /* against delayed copy, etc. is mostly defensive. */
1489 object
->true_share
= TRUE
;
1490 user_object
->object
= object
;
1491 user_object
->internal
= object
->internal
;
1492 user_object
->is_sub_map
= FALSE
;
1493 user_object
->offset
= obj_off
;
1494 user_object
->protection
= permission
;
1496 /* the size of mapped entry that overlaps with our region */
1497 /* which is targeted for share. */
1498 /* (entry_end - entry_start) - */
1499 /* offset of our beg addr within entry */
1500 /* it corresponds to this: */
1502 mappable_size
= hi_offset
- obj_off
;
1503 if(*size
> mappable_size
)
1504 *size
= mappable_size
;
1506 user_object
->size
= *size
;
1508 /* user_object pager and internal fields are not used */
1509 /* when the object field is filled in. */
1511 object
->ref_count
++; /* we now point to this object, hold on */
1512 vm_object_res_reference(object
);
1513 vm_object_unlock(object
);
1514 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1516 *size
= user_object
->size
;
1517 *object_handle
= user_handle
;
1518 vm_map_unlock_read(target_map
);
1519 if(pmap_map
!= target_map
)
1520 vm_map_unlock_read(pmap_map
);
1521 return KERN_SUCCESS
;
1524 vm_named_entry_t parent_object
;
1526 /* The new object will be base on an existing named object */
1527 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1528 kr
= KERN_INVALID_ARGUMENT
;
1531 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1532 if(permission
& parent_object
->protection
!= permission
) {
1533 kr
= KERN_INVALID_ARGUMENT
;
1536 if((offset
+ *size
) > parent_object
->size
) {
1537 kr
= KERN_INVALID_ARGUMENT
;
1541 user_object
->object
= parent_object
->object
;
1542 user_object
->size
= *size
;
1543 user_object
->offset
= parent_object
->offset
+ offset
;
1544 user_object
->protection
= permission
;
1545 if(parent_object
->is_sub_map
) {
1546 user_object
->backing
.map
= parent_object
->backing
.map
;
1547 vm_map_lock(user_object
->backing
.map
);
1548 user_object
->backing
.map
->ref_count
++;
1549 vm_map_unlock(user_object
->backing
.map
);
1552 user_object
->backing
.pager
= parent_object
->backing
.pager
;
1554 user_object
->internal
= parent_object
->internal
;
1555 user_object
->is_sub_map
= parent_object
->is_sub_map
;
1557 if(parent_object
->object
!= NULL
) {
1558 /* we now point to this object, hold on */
1559 vm_object_reference(parent_object
->object
);
1560 vm_object_lock(parent_object
->object
);
1561 parent_object
->object
->true_share
= TRUE
;
1562 vm_object_unlock(parent_object
->object
);
1564 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1566 *object_handle
= user_handle
;
1567 return KERN_SUCCESS
;
1573 ipc_port_dealloc_kernel(user_handle
);
1574 kfree((vm_offset_t
)user_object
, sizeof (struct vm_named_entry
));
1579 mach_make_memory_entry(
1580 vm_map_t target_map
,
1583 vm_prot_t permission
,
1584 ipc_port_t
*object_handle
,
1585 ipc_port_t parent_entry
)
1587 vm_object_offset_t size_64
;
1590 size_64
= (vm_object_offset_t
)*size
;
1591 kr
= mach_make_memory_entry_64(target_map
, &size_64
,
1592 (vm_object_offset_t
)offset
, permission
, object_handle
,
1594 *size
= (vm_size_t
)size_64
;
1602 vm_region_object_create(
1603 vm_map_t target_map
,
1605 ipc_port_t
*object_handle
)
1607 vm_named_entry_t user_object
;
1608 ipc_port_t user_handle
;
1611 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
1612 ipc_port_t previous
;
1615 if(new_pmap
== PMAP_NULL
)
1616 return KERN_FAILURE
;
1617 user_object
= (vm_named_entry_t
)
1618 kalloc(sizeof (struct vm_named_entry
));
1619 if(user_object
== NULL
) {
1620 pmap_destroy(new_pmap
);
1621 return KERN_FAILURE
;
1623 named_entry_lock_init(user_object
);
1624 user_handle
= ipc_port_alloc_kernel();
1627 ip_lock(user_handle
);
1629 /* make a sonce right */
1630 user_handle
->ip_sorights
++;
1631 ip_reference(user_handle
);
1633 user_handle
->ip_destination
= IP_NULL
;
1634 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1635 user_handle
->ip_receiver
= ipc_space_kernel
;
1637 /* make a send right */
1638 user_handle
->ip_mscount
++;
1639 user_handle
->ip_srights
++;
1640 ip_reference(user_handle
);
1642 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1643 /* nsrequest unlocks user_handle */
1645 /* Create a named object based on a submap of specified size */
1647 new_map
= vm_map_create(new_pmap
, 0, size
, TRUE
);
1648 user_object
->backing
.map
= new_map
;
1651 user_object
->object
= VM_OBJECT_NULL
;
1652 user_object
->internal
= TRUE
;
1653 user_object
->is_sub_map
= TRUE
;
1654 user_object
->offset
= 0;
1655 user_object
->protection
= VM_PROT_ALL
;
1656 user_object
->size
= size
;
1657 user_object
->ref_count
= 1;
1659 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1661 *object_handle
= user_handle
;
1662 return KERN_SUCCESS
;
1666 /* For a given range, check all map entries. If the entry coresponds to */
1667 /* the old vm_region/map provided on the call, replace it with the */
1668 /* corresponding range in the new vm_region/map */
1669 kern_return_t
vm_map_region_replace(
1670 vm_map_t target_map
,
1671 ipc_port_t old_region
,
1672 ipc_port_t new_region
,
1676 vm_named_entry_t old_object
;
1677 vm_named_entry_t new_object
;
1678 vm_map_t old_submap
;
1679 vm_map_t new_submap
;
1681 vm_map_entry_t entry
;
1682 int nested_pmap
= 0;
1685 vm_map_lock(target_map
);
1686 old_object
= (vm_named_entry_t
)old_region
->ip_kobject
;
1687 new_object
= (vm_named_entry_t
)new_region
->ip_kobject
;
1688 if((!old_object
->is_sub_map
) || (!new_object
->is_sub_map
)) {
1689 vm_map_unlock(target_map
);
1690 return KERN_INVALID_ARGUMENT
;
1692 old_submap
= (vm_map_t
)old_object
->backing
.map
;
1693 new_submap
= (vm_map_t
)new_object
->backing
.map
;
1694 vm_map_lock(old_submap
);
1695 if((old_submap
->min_offset
!= new_submap
->min_offset
) ||
1696 (old_submap
->max_offset
!= new_submap
->max_offset
)) {
1697 vm_map_unlock(old_submap
);
1698 vm_map_unlock(target_map
);
1699 return KERN_INVALID_ARGUMENT
;
1701 if(!vm_map_lookup_entry(target_map
, start
, &entry
)) {
1702 /* if the src is not contained, the entry preceeds */
1704 addr
= entry
->vme_start
;
1705 if(entry
== vm_map_to_entry(target_map
)) {
1706 vm_map_unlock(old_submap
);
1707 vm_map_unlock(target_map
);
1708 return KERN_SUCCESS
;
1710 vm_map_lookup_entry(target_map
, addr
, &entry
);
1712 addr
= entry
->vme_start
;
1713 vm_map_reference(old_submap
);
1714 while((entry
!= vm_map_to_entry(target_map
)) &&
1715 (entry
->vme_start
< end
)) {
1716 if((entry
->is_sub_map
) &&
1717 (entry
->object
.sub_map
== old_submap
)) {
1718 entry
->object
.sub_map
= new_submap
;
1719 if(entry
->use_pmap
) {
1720 if((start
& 0xfffffff) ||
1721 ((end
- start
) != 0x10000000)) {
1722 vm_map_unlock(old_submap
);
1723 vm_map_unlock(target_map
);
1724 return KERN_INVALID_ARGUMENT
;
1728 vm_map_reference(new_submap
);
1729 vm_map_deallocate(old_submap
);
1731 entry
= entry
->vme_next
;
1732 addr
= entry
->vme_start
;
1736 pmap_unnest(target_map
->pmap
, start
, end
- start
);
1737 pmap_nest(target_map
->pmap
, new_submap
->pmap
,
1738 start
, end
- start
);
1741 pmap_remove(target_map
->pmap
, start
, end
);
1743 vm_map_unlock(old_submap
);
1744 vm_map_unlock(target_map
);
1745 return KERN_SUCCESS
;
1750 mach_destroy_memory_entry(
1753 vm_named_entry_t named_entry
;
1755 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
1756 #endif /* MACH_ASSERT */
1757 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1758 mutex_lock(&(named_entry
)->Lock
);
1759 named_entry
->ref_count
-=1;
1760 if(named_entry
->ref_count
== 0) {
1761 if(named_entry
->object
) {
1762 /* release the memory object we've been pointing to */
1763 vm_object_deallocate(named_entry
->object
);
1765 if(named_entry
->is_sub_map
) {
1766 vm_map_deallocate(named_entry
->backing
.map
);
1768 kfree((vm_offset_t
)port
->ip_kobject
,
1769 sizeof (struct vm_named_entry
));
1771 mutex_unlock(&(named_entry
)->Lock
);
1777 vm_map_t target_map
,
1782 vm_map_entry_t map_entry
;
1789 vm_map_lock(target_map
);
1790 if(!vm_map_lookup_entry(target_map
, offset
, &map_entry
)) {
1791 vm_map_unlock(target_map
);
1792 return KERN_FAILURE
;
1794 offset
-= map_entry
->vme_start
; /* adjust to offset within entry */
1795 offset
+= map_entry
->offset
; /* adjust to target object offset */
1796 if(map_entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
1797 if(!map_entry
->is_sub_map
) {
1798 object
= map_entry
->object
.vm_object
;
1800 vm_map_unlock(target_map
);
1801 target_map
= map_entry
->object
.sub_map
;
1802 goto restart_page_query
;
1805 vm_map_unlock(target_map
);
1806 return KERN_FAILURE
;
1808 vm_object_lock(object
);
1809 vm_map_unlock(target_map
);
1811 m
= vm_page_lookup(object
, offset
);
1812 if (m
!= VM_PAGE_NULL
) {
1813 *disposition
|= VM_PAGE_QUERY_PAGE_PRESENT
;
1816 if(object
->shadow
) {
1817 offset
+= object
->shadow_offset
;
1818 vm_object_unlock(object
);
1819 object
= object
->shadow
;
1820 vm_object_lock(object
);
1823 vm_object_unlock(object
);
1824 return KERN_FAILURE
;
1828 /* The ref_count is not strictly accurate, it measures the number */
1829 /* of entities holding a ref on the object, they may not be mapping */
1830 /* the object or may not be mapping the section holding the */
1831 /* target page but its still a ball park number and though an over- */
1832 /* count, it picks up the copy-on-write cases */
1834 /* We could also get a picture of page sharing from pmap_attributes */
1835 /* but this would under count as only faulted-in mappings would */
1838 *ref_count
= object
->ref_count
;
1840 if (m
->fictitious
) {
1841 *disposition
|= VM_PAGE_QUERY_PAGE_FICTITIOUS
;
1842 vm_object_unlock(object
);
1843 return KERN_SUCCESS
;
1847 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
1848 else if(pmap_is_modified(m
->phys_addr
))
1849 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
1852 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
1853 else if(pmap_is_referenced(m
->phys_addr
))
1854 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
1856 vm_object_unlock(object
);
1857 return KERN_SUCCESS
;
1862 set_dp_control_port(
1863 host_priv_t host_priv
,
1864 ipc_port_t control_port
)
1866 if (host_priv
== HOST_PRIV_NULL
)
1867 return (KERN_INVALID_HOST
);
1869 if (IP_VALID(dynamic_pager_control_port
))
1870 ipc_port_release_send(dynamic_pager_control_port
);
1872 dynamic_pager_control_port
= control_port
;
1873 return KERN_SUCCESS
;
1877 get_dp_control_port(
1878 host_priv_t host_priv
,
1879 ipc_port_t
*control_port
)
1881 if (host_priv
== HOST_PRIV_NULL
)
1882 return (KERN_INVALID_HOST
);
1884 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
1885 return KERN_SUCCESS
;
1890 /* Retrieve a upl for an object underlying an address range in a map */
1895 vm_address_t offset
,
1896 vm_size_t
*upl_size
,
1898 upl_page_info_array_t page_list
,
1899 unsigned int *count
,
1901 int force_data_sync
)
1903 vm_map_entry_t entry
;
1905 int sync_cow_data
= FALSE
;
1906 vm_object_t local_object
;
1907 vm_offset_t local_offset
;
1908 vm_offset_t local_start
;
1911 caller_flags
= *flags
;
1912 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
1913 sync_cow_data
= TRUE
;
1916 return KERN_INVALID_ARGUMENT
;
1921 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
1922 if (entry
->object
.vm_object
== VM_OBJECT_NULL
||
1923 !entry
->object
.vm_object
->phys_contiguous
) {
1924 if((*upl_size
/page_size
) > MAX_UPL_TRANSFER
) {
1925 *upl_size
= MAX_UPL_TRANSFER
* page_size
;
1928 if((entry
->vme_end
- offset
) < *upl_size
) {
1929 *upl_size
= entry
->vme_end
- offset
;
1931 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
1932 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1934 } else if (entry
->object
.vm_object
->private) {
1935 *flags
= UPL_DEV_MEMORY
;
1936 if (entry
->object
.vm_object
->phys_contiguous
) {
1937 *flags
|= UPL_PHYS_CONTIG
;
1943 return KERN_SUCCESS
;
1946 * Create an object if necessary.
1948 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1949 entry
->object
.vm_object
= vm_object_allocate(
1950 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
1953 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
1954 if (entry
->needs_copy
) {
1957 vm_object_offset_t offset_hi
;
1958 vm_object_offset_t offset_lo
;
1959 vm_object_offset_t new_offset
;
1962 vm_behavior_t behavior
;
1963 vm_map_version_t version
;
1967 vm_map_lock_write_to_read(map
);
1968 if(vm_map_lookup_locked(&local_map
,
1969 offset
, VM_PROT_WRITE
,
1971 &new_offset
, &prot
, &wired
,
1972 &behavior
, &offset_lo
,
1973 &offset_hi
, &pmap_map
)) {
1974 vm_map_unlock(local_map
);
1975 return KERN_FAILURE
;
1977 if (pmap_map
!= map
) {
1978 vm_map_unlock(pmap_map
);
1980 vm_object_unlock(object
);
1981 vm_map_unlock(local_map
);
1983 goto REDISCOVER_ENTRY
;
1986 if (entry
->is_sub_map
) {
1989 submap
= entry
->object
.sub_map
;
1990 local_start
= entry
->vme_start
;
1991 local_offset
= entry
->offset
;
1992 vm_map_reference(submap
);
1995 ret
= (vm_map_get_upl(submap
,
1996 local_offset
+ (offset
- local_start
),
1997 upl_size
, upl
, page_list
, count
,
1998 flags
, force_data_sync
));
2000 vm_map_deallocate(submap
);
2004 if (sync_cow_data
) {
2005 if (entry
->object
.vm_object
->shadow
2006 || entry
->object
.vm_object
->copy
) {
2009 local_object
= entry
->object
.vm_object
;
2010 local_start
= entry
->vme_start
;
2011 local_offset
= entry
->offset
;
2012 vm_object_reference(local_object
);
2015 if(local_object
->copy
== NULL
) {
2016 flags
= MEMORY_OBJECT_DATA_SYNC
;
2018 flags
= MEMORY_OBJECT_COPY_SYNC
;
2021 if((local_object
->paging_offset
) &&
2022 (local_object
->pager
== 0)) {
2024 * do a little clean-up for our unorthodox
2025 * entry into a pager call from a non-pager
2026 * context. Normally the pager code
2027 * assumes that an object it has been called
2028 * with has a backing pager and so does
2029 * not bother to check the pager field
2030 * before relying on the paging_offset
2032 vm_object_lock(local_object
);
2033 if (local_object
->pager
== 0) {
2034 local_object
->paging_offset
= 0;
2036 vm_object_unlock(local_object
);
2039 if (entry
->object
.vm_object
->shadow
&&
2040 entry
->object
.vm_object
->copy
) {
2041 vm_object_lock_request(
2042 local_object
->shadow
,
2043 (vm_object_offset_t
)
2044 ((offset
- local_start
) +
2046 local_object
->shadow_offset
+
2047 local_object
->paging_offset
,
2049 MEMORY_OBJECT_DATA_SYNC
,
2052 sync_cow_data
= FALSE
;
2053 vm_object_deallocate(local_object
);
2054 goto REDISCOVER_ENTRY
;
2058 if (force_data_sync
) {
2060 local_object
= entry
->object
.vm_object
;
2061 local_start
= entry
->vme_start
;
2062 local_offset
= entry
->offset
;
2063 vm_object_reference(local_object
);
2066 if((local_object
->paging_offset
) &&
2067 (local_object
->pager
== 0)) {
2069 * do a little clean-up for our unorthodox
2070 * entry into a pager call from a non-pager
2071 * context. Normally the pager code
2072 * assumes that an object it has been called
2073 * with has a backing pager and so does
2074 * not bother to check the pager field
2075 * before relying on the paging_offset
2077 vm_object_lock(local_object
);
2078 if (local_object
->pager
== 0) {
2079 local_object
->paging_offset
= 0;
2081 vm_object_unlock(local_object
);
2084 vm_object_lock_request(
2086 (vm_object_offset_t
)
2087 ((offset
- local_start
) + local_offset
) +
2088 local_object
->paging_offset
,
2089 (vm_object_size_t
)*upl_size
, FALSE
,
2090 MEMORY_OBJECT_DATA_SYNC
,
2092 force_data_sync
= FALSE
;
2093 vm_object_deallocate(local_object
);
2094 goto REDISCOVER_ENTRY
;
2097 if(!(entry
->object
.vm_object
->private)) {
2098 if(*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
2099 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
2100 if(entry
->object
.vm_object
->phys_contiguous
) {
2101 *flags
= UPL_PHYS_CONTIG
;
2106 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
2108 local_object
= entry
->object
.vm_object
;
2109 local_offset
= entry
->offset
;
2110 local_start
= entry
->vme_start
;
2111 vm_object_reference(local_object
);
2113 ret
= (vm_object_upl_request(local_object
,
2114 (vm_object_offset_t
)
2115 ((offset
- local_start
) + local_offset
),
2121 vm_object_deallocate(local_object
);
2126 return(KERN_FAILURE
);
2130 /* ******* Temporary Internal calls to UPL for BSD ***** */
2135 vm_offset_t
*dst_addr
)
2137 return (vm_upl_map(map
, upl
, dst_addr
));
2146 return(vm_upl_unmap(map
, upl
));
2152 upl_page_info_t
*pl
,
2153 mach_msg_type_number_t count
)
2157 kr
= upl_commit(upl
, pl
, count
);
2158 upl_deallocate(upl
);
2164 kernel_upl_commit_range(
2169 upl_page_info_array_t pl
,
2170 mach_msg_type_number_t count
)
2172 boolean_t finished
= FALSE
;
2175 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2176 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2178 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
2180 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
2181 upl_deallocate(upl
);
2187 kernel_upl_abort_range(
2194 boolean_t finished
= FALSE
;
2196 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2197 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2199 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
2201 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
2202 upl_deallocate(upl
);
2214 kr
= upl_abort(upl
, abort_type
);
2215 upl_deallocate(upl
);
2221 vm_get_shared_region(
2223 shared_region_mapping_t
*shared_region
)
2225 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
2226 return KERN_SUCCESS
;
2230 vm_set_shared_region(
2232 shared_region_mapping_t shared_region
)
2234 task
->system_shared_region
= (vm_offset_t
) shared_region
;
2235 return KERN_SUCCESS
;
2239 shared_region_mapping_info(
2240 shared_region_mapping_t shared_region
,
2241 ipc_port_t
*text_region
,
2242 vm_size_t
*text_size
,
2243 ipc_port_t
*data_region
,
2244 vm_size_t
*data_size
,
2245 vm_offset_t
*region_mappings
,
2246 vm_offset_t
*client_base
,
2247 vm_offset_t
*alt_base
,
2248 vm_offset_t
*alt_next
,
2250 shared_region_mapping_t
*next
)
2252 shared_region_mapping_lock(shared_region
);
2254 *text_region
= shared_region
->text_region
;
2255 *text_size
= shared_region
->text_size
;
2256 *data_region
= shared_region
->data_region
;
2257 *data_size
= shared_region
->data_size
;
2258 *region_mappings
= shared_region
->region_mappings
;
2259 *client_base
= shared_region
->client_base
;
2260 *alt_base
= shared_region
->alternate_base
;
2261 *alt_next
= shared_region
->alternate_next
;
2262 *flags
= shared_region
->flags
;
2263 *next
= shared_region
->next
;
2265 shared_region_mapping_unlock(shared_region
);
2269 shared_region_object_chain_attach(
2270 shared_region_mapping_t target_region
,
2271 shared_region_mapping_t object_chain_region
)
2273 shared_region_object_chain_t object_ele
;
2275 if(target_region
->object_chain
)
2276 return KERN_FAILURE
;
2277 object_ele
= (shared_region_object_chain_t
)
2278 kalloc(sizeof (struct shared_region_object_chain
));
2279 shared_region_mapping_lock(object_chain_region
);
2280 target_region
->object_chain
= object_ele
;
2281 object_ele
->object_chain_region
= object_chain_region
;
2282 object_ele
->next
= object_chain_region
->object_chain
;
2283 object_ele
->depth
= object_chain_region
->depth
;
2284 object_chain_region
->depth
++;
2285 target_region
->alternate_next
= object_chain_region
->alternate_next
;
2286 shared_region_mapping_unlock(object_chain_region
);
2287 return KERN_SUCCESS
;
2291 shared_region_mapping_create(
2292 ipc_port_t text_region
,
2293 vm_size_t text_size
,
2294 ipc_port_t data_region
,
2295 vm_size_t data_size
,
2296 vm_offset_t region_mappings
,
2297 vm_offset_t client_base
,
2298 shared_region_mapping_t
*shared_region
,
2299 vm_offset_t alt_base
,
2300 vm_offset_t alt_next
)
2302 *shared_region
= (shared_region_mapping_t
)
2303 kalloc(sizeof (struct shared_region_mapping
));
2304 if(*shared_region
== NULL
)
2305 return KERN_FAILURE
;
2306 shared_region_mapping_lock_init((*shared_region
));
2307 (*shared_region
)->text_region
= text_region
;
2308 (*shared_region
)->text_size
= text_size
;
2309 (*shared_region
)->data_region
= data_region
;
2310 (*shared_region
)->data_size
= data_size
;
2311 (*shared_region
)->region_mappings
= region_mappings
;
2312 (*shared_region
)->client_base
= client_base
;
2313 (*shared_region
)->ref_count
= 1;
2314 (*shared_region
)->next
= NULL
;
2315 (*shared_region
)->object_chain
= NULL
;
2316 (*shared_region
)->self
= *shared_region
;
2317 (*shared_region
)->flags
= 0;
2318 (*shared_region
)->depth
= 0;
2319 (*shared_region
)->alternate_base
= alt_base
;
2320 (*shared_region
)->alternate_next
= alt_next
;
2321 return KERN_SUCCESS
;
2325 shared_region_mapping_set_alt_next(
2326 shared_region_mapping_t shared_region
,
2327 vm_offset_t alt_next
)
2329 shared_region
->alternate_next
= alt_next
;
2330 return KERN_SUCCESS
;
2334 shared_region_mapping_ref(
2335 shared_region_mapping_t shared_region
)
2337 if(shared_region
== NULL
)
2338 return KERN_SUCCESS
;
2339 shared_region_mapping_lock(shared_region
);
2340 shared_region
->ref_count
++;
2341 shared_region_mapping_unlock(shared_region
);
2342 return KERN_SUCCESS
;
2346 shared_region_mapping_dealloc(
2347 shared_region_mapping_t shared_region
)
2349 struct shared_region_task_mappings sm_info
;
2350 shared_region_mapping_t next
;
2352 if(shared_region
== NULL
)
2353 return KERN_SUCCESS
;
2354 shared_region_mapping_lock(shared_region
);
2356 if((--shared_region
->ref_count
) == 0) {
2358 sm_info
.text_region
= shared_region
->text_region
;
2359 sm_info
.text_size
= shared_region
->text_size
;
2360 sm_info
.data_region
= shared_region
->data_region
;
2361 sm_info
.data_size
= shared_region
->data_size
;
2362 sm_info
.region_mappings
= shared_region
->region_mappings
;
2363 sm_info
.client_base
= shared_region
->client_base
;
2364 sm_info
.alternate_base
= shared_region
->alternate_base
;
2365 sm_info
.alternate_next
= shared_region
->alternate_next
;
2366 sm_info
.flags
= shared_region
->flags
;
2367 sm_info
.self
= (vm_offset_t
)shared_region
;
2369 lsf_remove_regions_mappings(shared_region
, &sm_info
);
2370 pmap_remove(((vm_named_entry_t
)
2371 (shared_region
->text_region
->ip_kobject
))
2372 ->backing
.map
->pmap
,
2373 sm_info
.client_base
,
2374 sm_info
.client_base
+ sm_info
.text_size
);
2375 ipc_port_release_send(shared_region
->text_region
);
2376 ipc_port_release_send(shared_region
->data_region
);
2377 if(shared_region
->object_chain
) {
2378 shared_region_mapping_dealloc(
2379 shared_region
->object_chain
->object_chain_region
);
2380 kfree((vm_offset_t
)shared_region
->object_chain
,
2381 sizeof (struct shared_region_object_chain
));
2383 kfree((vm_offset_t
)shared_region
,
2384 sizeof (struct shared_region_mapping
));
2385 return KERN_SUCCESS
;
2387 shared_region_mapping_unlock(shared_region
);
2388 return KERN_SUCCESS
;
2392 vm_map_get_phys_page(
2396 vm_map_entry_t entry
;
2399 vm_offset_t phys_addr
= 0;
2403 while (vm_map_lookup_entry(map
, offset
, &entry
)) {
2405 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2407 return (vm_offset_t
) 0;
2409 if (entry
->is_sub_map
) {
2411 vm_map_lock(entry
->object
.sub_map
);
2413 map
= entry
->object
.sub_map
;
2414 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2415 vm_map_unlock(old_map
);
2418 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2419 object
= entry
->object
.vm_object
;
2420 vm_object_lock(object
);
2422 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
2423 if(dst_page
== VM_PAGE_NULL
) {
2424 if(object
->shadow
) {
2425 vm_object_t old_object
;
2426 vm_object_lock(object
->shadow
);
2427 old_object
= object
;
2428 offset
= offset
+ object
->shadow_offset
;
2429 object
= object
->shadow
;
2430 vm_object_unlock(old_object
);
2432 vm_object_unlock(object
);
2436 phys_addr
= dst_page
->phys_addr
;
2437 vm_object_unlock(object
);