2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * User-exported virtual memory functions.
59 /* remove after component interface available */
60 extern int vnode_pager_workaround
;
64 #include <mach/boolean.h>
65 #include <mach/kern_return.h>
66 #include <mach/mach_types.h> /* to get vm_address_t */
67 #include <mach/memory_object.h>
68 #include <mach/std_types.h> /* to get pointer_t */
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/vm_map_server.h>
73 #include <mach/mach_syscalls.h>
74 #include <mach/shared_memory_server.h>
76 #include <kern/host.h>
77 #include <kern/task.h>
78 #include <kern/misc_protos.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_page.h>
82 #include <vm/memory_object.h>
83 #include <vm/vm_pageout.h>
87 vm_size_t upl_offset_to_pagelist
= 0;
93 ipc_port_t dynamic_pager_control_port
=NULL
;
96 * vm_allocate allocates "zero fill" memory in the specfied
101 register vm_map_t map
,
102 register vm_offset_t
*addr
,
103 register vm_size_t size
,
106 kern_return_t result
;
107 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
109 if (map
== VM_MAP_NULL
)
110 return(KERN_INVALID_ARGUMENT
);
113 return(KERN_SUCCESS
);
117 *addr
= vm_map_min(map
);
119 *addr
= trunc_page(*addr
);
120 size
= round_page(size
);
122 return(KERN_INVALID_ARGUMENT
);
125 result
= vm_map_enter(
132 (vm_object_offset_t
)0,
142 * vm_deallocate deallocates the specified range of addresses in the
143 * specified address map.
147 register vm_map_t map
,
151 if (map
== VM_MAP_NULL
)
152 return(KERN_INVALID_ARGUMENT
);
154 if (size
== (vm_offset_t
) 0)
155 return(KERN_SUCCESS
);
157 return(vm_map_remove(map
, trunc_page(start
),
158 round_page(start
+size
), VM_MAP_NO_FLAGS
));
162 * vm_inherit sets the inheritance of the specified range in the
167 register vm_map_t map
,
170 vm_inherit_t new_inheritance
)
172 if (map
== VM_MAP_NULL
)
173 return(KERN_INVALID_ARGUMENT
);
175 if (new_inheritance
> VM_INHERIT_LAST_VALID
)
176 return(KERN_INVALID_ARGUMENT
);
178 return(vm_map_inherit(map
,
180 round_page(start
+size
),
185 * vm_protect sets the protection of the specified range in the
191 register vm_map_t map
,
194 boolean_t set_maximum
,
195 vm_prot_t new_protection
)
197 if ((map
== VM_MAP_NULL
) ||
198 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
199 return(KERN_INVALID_ARGUMENT
);
201 return(vm_map_protect(map
,
203 round_page(start
+size
),
209 * Handle machine-specific attributes for a mapping, such
210 * as cachability, migrability, etc.
213 vm_machine_attribute(
215 vm_address_t address
,
217 vm_machine_attribute_t attribute
,
218 vm_machine_attribute_val_t
* value
) /* IN/OUT */
220 if (map
== VM_MAP_NULL
)
221 return(KERN_INVALID_ARGUMENT
);
223 return vm_map_machine_attribute(map
, address
, size
, attribute
, value
);
229 vm_address_t address
,
232 mach_msg_type_number_t
*data_size
)
235 vm_map_copy_t ipc_address
;
237 if (map
== VM_MAP_NULL
)
238 return(KERN_INVALID_ARGUMENT
);
240 if ((error
= vm_map_copyin(map
,
243 FALSE
, /* src_destroy */
244 &ipc_address
)) == KERN_SUCCESS
) {
245 *data
= (pointer_t
) ipc_address
;
254 vm_read_entry_t data_list
,
255 mach_msg_type_number_t count
)
257 mach_msg_type_number_t i
;
259 vm_map_copy_t ipc_address
;
261 if (map
== VM_MAP_NULL
)
262 return(KERN_INVALID_ARGUMENT
);
264 for(i
=0; i
<count
; i
++) {
265 error
= vm_map_copyin(map
,
266 data_list
[i
].address
,
268 FALSE
, /* src_destroy */
270 if(error
!= KERN_SUCCESS
) {
271 data_list
[i
].address
= (vm_address_t
)0;
272 data_list
[i
].size
= (vm_size_t
)0;
275 if(data_list
[i
].size
!= 0) {
276 error
= vm_map_copyout(current_task()->map
,
277 &(data_list
[i
].address
),
278 (vm_map_copy_t
) ipc_address
);
279 if(error
!= KERN_SUCCESS
) {
280 data_list
[i
].address
= (vm_address_t
)0;
281 data_list
[i
].size
= (vm_size_t
)0;
290 * This routine reads from the specified map and overwrites part of the current
291 * activation's map. In making an assumption that the current thread is local,
292 * it is no longer cluster-safe without a fully supportive local proxy thread/
293 * task (but we don't support cluster's anymore so this is moot).
296 #define VM_OVERWRITE_SMALL 512
301 vm_address_t address
,
304 vm_size_t
*data_size
)
308 char buf
[VM_OVERWRITE_SMALL
];
311 kern_return_t error
= KERN_SUCCESS
;
314 if (map
== VM_MAP_NULL
)
315 return(KERN_INVALID_ARGUMENT
);
317 if (size
<= VM_OVERWRITE_SMALL
) {
318 if(vm_map_read_user(map
, (vm_offset_t
)address
,
319 (vm_offset_t
)&inbuf
, size
)) {
320 error
= KERN_INVALID_ADDRESS
;
322 if(vm_map_write_user(current_map(),
323 (vm_offset_t
)&inbuf
, (vm_offset_t
)data
, size
))
324 error
= KERN_INVALID_ADDRESS
;
328 if ((error
= vm_map_copyin(map
,
331 FALSE
, /* src_destroy */
332 ©
)) == KERN_SUCCESS
) {
333 if ((error
= vm_map_copy_overwrite(
337 FALSE
)) == KERN_SUCCESS
) {
340 vm_map_copy_discard(copy
);
355 vm_address_t address
,
357 mach_msg_type_number_t size
)
359 if (map
== VM_MAP_NULL
)
360 return KERN_INVALID_ARGUMENT
;
362 return vm_map_copy_overwrite(map
, address
, (vm_map_copy_t
) data
,
363 FALSE
/* interruptible XXX */);
369 vm_address_t source_address
,
371 vm_address_t dest_address
)
376 if (map
== VM_MAP_NULL
)
377 return KERN_INVALID_ARGUMENT
;
379 kr
= vm_map_copyin(map
, source_address
, size
,
381 if (kr
!= KERN_SUCCESS
)
384 kr
= vm_map_copy_overwrite(map
, dest_address
, copy
,
385 FALSE
/* interruptible XXX */);
386 if (kr
!= KERN_SUCCESS
) {
387 vm_map_copy_discard(copy
);
400 vm_offset_t
*address
,
401 vm_size_t initial_size
,
405 vm_object_offset_t offset
,
407 vm_prot_t cur_protection
,
408 vm_prot_t max_protection
,
409 vm_inherit_t inheritance
)
414 vm_object_size_t size
= (vm_object_size_t
)initial_size
;
415 kern_return_t result
;
418 * Check arguments for validity
420 if ((target_map
== VM_MAP_NULL
) ||
421 (cur_protection
& ~VM_PROT_ALL
) ||
422 (max_protection
& ~VM_PROT_ALL
) ||
423 (inheritance
> VM_INHERIT_LAST_VALID
) ||
425 return(KERN_INVALID_ARGUMENT
);
428 * Find the vm object (if any) corresponding to this port.
430 if (!IP_VALID(port
)) {
431 object
= VM_OBJECT_NULL
;
434 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
435 vm_named_entry_t named_entry
;
437 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
438 /* a few checks to make sure user is obeying rules */
440 if(offset
>= named_entry
->size
)
441 return(KERN_INVALID_RIGHT
);
442 size
= named_entry
->size
- offset
;
444 if((named_entry
->protection
& max_protection
) != max_protection
)
445 return(KERN_INVALID_RIGHT
);
446 if((named_entry
->protection
& cur_protection
) != cur_protection
)
447 return(KERN_INVALID_RIGHT
);
448 if(named_entry
->size
< (offset
+ size
))
449 return(KERN_INVALID_ARGUMENT
);
451 /* the callers parameter offset is defined to be the */
452 /* offset from beginning of named entry offset in object */
453 offset
= offset
+ named_entry
->offset
;
455 named_entry_lock(named_entry
);
456 if(named_entry
->is_sub_map
) {
457 vm_map_entry_t map_entry
;
459 named_entry_unlock(named_entry
);
460 *address
= trunc_page(*address
);
461 size
= round_page(size
);
462 vm_object_reference(vm_submap_object
);
463 if ((result
= vm_map_enter(target_map
,
464 address
, size
, mask
, flags
,
467 cur_protection
, max_protection
, inheritance
468 )) != KERN_SUCCESS
) {
469 vm_object_deallocate(vm_submap_object
);
473 VM_GET_FLAGS_ALIAS(flags
, alias
);
474 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
476 vm_map_submap(target_map
, *address
,
478 named_entry
->backing
.map
,
479 (vm_offset_t
)offset
, TRUE
);
481 vm_map_submap(target_map
, *address
,
483 named_entry
->backing
.map
,
484 (vm_offset_t
)offset
, FALSE
);
487 if(vm_map_lookup_entry(
488 target_map
, *address
, &map_entry
)) {
489 map_entry
->needs_copy
= TRUE
;
495 } else if(named_entry
->object
) {
496 /* This is the case where we are going to map */
497 /* an already mapped object. If the object is */
498 /* not ready it is internal. An external */
499 /* object cannot be mapped until it is ready */
500 /* we can therefore avoid the ready check */
502 named_entry_unlock(named_entry
);
503 vm_object_reference(named_entry
->object
);
504 object
= named_entry
->object
;
506 object
= vm_object_enter(named_entry
->backing
.pager
,
508 named_entry
->internal
,
511 if (object
== VM_OBJECT_NULL
) {
512 named_entry_unlock(named_entry
);
513 return(KERN_INVALID_OBJECT
);
515 named_entry
->object
= object
;
516 named_entry_unlock(named_entry
);
517 /* create an extra reference for the named entry */
518 vm_object_reference(named_entry
->object
);
519 /* wait for object (if any) to be ready */
520 if (object
!= VM_OBJECT_NULL
) {
521 vm_object_lock(object
);
522 while (!object
->pager_ready
) {
523 vm_object_wait(object
,
524 VM_OBJECT_EVENT_PAGER_READY
,
526 vm_object_lock(object
);
528 vm_object_unlock(object
);
532 if ((object
= vm_object_enter(port
, size
, FALSE
, FALSE
, FALSE
))
534 return(KERN_INVALID_OBJECT
);
536 /* wait for object (if any) to be ready */
537 if (object
!= VM_OBJECT_NULL
) {
538 vm_object_lock(object
);
539 while (!object
->pager_ready
) {
540 vm_object_wait(object
,
541 VM_OBJECT_EVENT_PAGER_READY
,
543 vm_object_lock(object
);
545 vm_object_unlock(object
);
549 *address
= trunc_page(*address
);
550 size
= round_page(size
);
553 * Perform the copy if requested
557 vm_object_t new_object
;
558 vm_object_offset_t new_offset
;
560 result
= vm_object_copy_strategically(object
, offset
, size
,
561 &new_object
, &new_offset
,
565 if (result
== KERN_MEMORY_RESTART_COPY
) {
567 boolean_t src_needs_copy
;
571 * We currently ignore src_needs_copy.
572 * This really is the issue of how to make
573 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
574 * non-kernel users to use. Solution forthcoming.
575 * In the meantime, since we don't allow non-kernel
576 * memory managers to specify symmetric copy,
577 * we won't run into problems here.
581 success
= vm_object_copy_quickly(&new_object
,
586 result
= KERN_SUCCESS
;
589 * Throw away the reference to the
590 * original object, as it won't be mapped.
593 vm_object_deallocate(object
);
595 if (result
!= KERN_SUCCESS
)
602 if ((result
= vm_map_enter(target_map
,
603 address
, size
, mask
, flags
,
606 cur_protection
, max_protection
, inheritance
608 vm_object_deallocate(object
);
612 /* temporary, until world build */
615 vm_offset_t
*address
,
622 vm_prot_t cur_protection
,
623 vm_prot_t max_protection
,
624 vm_inherit_t inheritance
)
626 vm_map_64(target_map
, address
, size
, mask
, flags
,
627 port
, (vm_object_offset_t
)offset
, copy
,
628 cur_protection
, max_protection
, inheritance
);
633 * NOTE: this routine (and this file) will no longer require mach_host_server.h
634 * when vm_wire is changed to use ledgers.
636 #include <mach/mach_host_server.h>
638 * Specify that the range of the virtual address space
639 * of the target task must not cause page faults for
640 * the indicated accesses.
642 * [ To unwire the pages, specify VM_PROT_NONE. ]
646 host_priv_t host_priv
,
647 register vm_map_t map
,
654 if (host_priv
== HOST_PRIV_NULL
)
655 return KERN_INVALID_HOST
;
657 assert(host_priv
== &realhost
);
659 if (map
== VM_MAP_NULL
)
660 return KERN_INVALID_TASK
;
662 if (access
& ~VM_PROT_ALL
)
663 return KERN_INVALID_ARGUMENT
;
665 if (access
!= VM_PROT_NONE
) {
666 rc
= vm_map_wire(map
, trunc_page(start
),
667 round_page(start
+size
), access
, TRUE
);
669 rc
= vm_map_unwire(map
, trunc_page(start
),
670 round_page(start
+size
), TRUE
);
678 * Synchronises the memory range specified with its backing store
679 * image by either flushing or cleaning the contents to the appropriate
680 * memory manager engaging in a memory object synchronize dialog with
681 * the manager. The client doesn't return until the manager issues
682 * m_o_s_completed message. MIG Magically converts user task parameter
683 * to the task's address map.
685 * interpretation of sync_flags
686 * VM_SYNC_INVALIDATE - discard pages, only return precious
689 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
690 * - discard pages, write dirty or precious
691 * pages back to memory manager.
693 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
694 * - write dirty or precious pages back to
695 * the memory manager.
698 * The memory object attributes have not yet been implemented, this
699 * function will have to deal with the invalidate attribute
702 * KERN_INVALID_TASK Bad task parameter
703 * KERN_INVALID_ARGUMENT both sync and async were specified.
704 * KERN_SUCCESS The usual.
710 vm_address_t address
,
712 vm_sync_t sync_flags
)
716 queue_chain_t req_q
; /* queue of requests for this msync */
717 vm_map_entry_t entry
;
718 vm_size_t amount_left
;
719 vm_object_offset_t offset
;
720 boolean_t do_sync_req
;
721 boolean_t modifiable
;
724 if ((sync_flags
& VM_SYNC_ASYNCHRONOUS
) &&
725 (sync_flags
& VM_SYNC_SYNCHRONOUS
))
726 return(KERN_INVALID_ARGUMENT
);
729 * align address and size on page boundaries
731 size
= round_page(address
+ size
) - trunc_page(address
);
732 address
= trunc_page(address
);
734 if (map
== VM_MAP_NULL
)
735 return(KERN_INVALID_TASK
);
738 return(KERN_SUCCESS
);
743 while (amount_left
> 0) {
744 vm_size_t flush_size
;
748 if (!vm_map_lookup_entry(map
, address
, &entry
)) {
752 * hole in the address map.
756 * Check for empty map.
758 if (entry
== vm_map_to_entry(map
) &&
759 entry
->vme_next
== entry
) {
764 * Check that we don't wrap and that
765 * we have at least one real map entry.
767 if ((map
->hdr
.nentries
== 0) ||
768 (entry
->vme_next
->vme_start
< address
)) {
773 * Move up to the next entry if needed
775 skip
= (entry
->vme_next
->vme_start
- address
);
776 if (skip
>= amount_left
)
780 address
= entry
->vme_next
->vme_start
;
785 offset
= address
- entry
->vme_start
;
788 * do we have more to flush than is contained in this
791 if (amount_left
+ entry
->vme_start
+ offset
> entry
->vme_end
) {
792 flush_size
= entry
->vme_end
-
793 (entry
->vme_start
+ offset
);
795 flush_size
= amount_left
;
797 amount_left
-= flush_size
;
798 address
+= flush_size
;
800 if (entry
->is_sub_map
== TRUE
) {
802 vm_offset_t local_offset
;
804 local_map
= entry
->object
.sub_map
;
805 local_offset
= entry
->offset
;
814 object
= entry
->object
.vm_object
;
817 * We can't sync this object if the object has not been
820 if (object
== VM_OBJECT_NULL
) {
824 offset
+= entry
->offset
;
825 modifiable
= (entry
->protection
& VM_PROT_WRITE
)
828 vm_object_lock(object
);
830 if (sync_flags
& (VM_SYNC_KILLPAGES
| VM_SYNC_DEACTIVATE
)) {
831 boolean_t kill_pages
= 0;
833 if (sync_flags
& VM_SYNC_KILLPAGES
) {
834 if (object
->ref_count
== 1 && !entry
->needs_copy
&& !object
->shadow
)
839 if (kill_pages
!= -1)
840 memory_object_deactivate_pages(object
, offset
,
841 (vm_object_size_t
)flush_size
, kill_pages
);
842 vm_object_unlock(object
);
847 * We can't sync this object if there isn't a pager.
848 * Don't bother to sync internal objects, since there can't
849 * be any "permanent" storage for these objects anyway.
851 if ((object
->pager
== IP_NULL
) || (object
->internal
) ||
853 vm_object_unlock(object
);
858 * keep reference on the object until syncing is done
860 assert(object
->ref_count
> 0);
862 vm_object_res_reference(object
);
863 vm_object_unlock(object
);
867 do_sync_req
= memory_object_sync(object
,
870 sync_flags
& VM_SYNC_INVALIDATE
,
872 (sync_flags
& VM_SYNC_SYNCHRONOUS
||
873 sync_flags
& VM_SYNC_ASYNCHRONOUS
)));
876 * only send a m_o_s if we returned pages or if the entry
877 * is writable (ie dirty pages may have already been sent back)
879 if (!do_sync_req
&& !modifiable
) {
880 vm_object_deallocate(object
);
883 msync_req_alloc(new_msr
);
885 vm_object_lock(object
);
886 offset
+= object
->paging_offset
;
888 new_msr
->offset
= offset
;
889 new_msr
->length
= flush_size
;
890 new_msr
->object
= object
;
891 new_msr
->flag
= VM_MSYNC_SYNCHRONIZING
;
893 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
895 * need to check for overlapping entry, if found, wait
896 * on overlapping msr to be done, then reiterate
899 if (msr
->flag
== VM_MSYNC_SYNCHRONIZING
&&
900 ((offset
>= msr
->offset
&&
901 offset
< (msr
->offset
+ msr
->length
)) ||
902 (msr
->offset
>= offset
&&
903 msr
->offset
< (offset
+ flush_size
))))
905 assert_wait((event_t
) msr
,THREAD_INTERRUPTIBLE
);
907 vm_object_unlock(object
);
908 thread_block((void (*)(void))0);
909 vm_object_lock(object
);
915 queue_enter(&object
->msr_q
, new_msr
, msync_req_t
, msr_q
);
916 vm_object_unlock(object
);
918 queue_enter(&req_q
, new_msr
, msync_req_t
, req_q
);
921 if(((rpc_subsystem_t
)pager_mux_hash_lookup(object
->pager
)) ==
922 ((rpc_subsystem_t
) &vnode_pager_workaround
)) {
923 (void) vnode_pager_synchronize(
925 object
->pager_request
,
930 (void) memory_object_synchronize(
932 object
->pager_request
,
938 (void) memory_object_synchronize(
940 object
->pager_request
,
948 * wait for memory_object_sychronize_completed messages from pager(s)
951 while (!queue_empty(&req_q
)) {
952 msr
= (msync_req_t
)queue_first(&req_q
);
954 while(msr
->flag
!= VM_MSYNC_DONE
) {
955 assert_wait((event_t
) msr
, THREAD_INTERRUPTIBLE
);
957 thread_block((void (*)(void))0);
960 queue_remove(&req_q
, msr
, msync_req_t
, req_q
);
962 vm_object_deallocate(msr
->object
);
966 return(KERN_SUCCESS
);
973 * Set or clear the map's wiring_required flag. This flag, if set,
974 * will cause all future virtual memory allocation to allocate
975 * user wired memory. Unwiring pages wired down as a result of
976 * this routine is done with the vm_wire interface.
983 if (map
== VM_MAP_NULL
)
984 return(KERN_INVALID_ARGUMENT
);
987 map
->wiring_required
= TRUE
;
989 map
->wiring_required
= FALSE
;
991 return(KERN_SUCCESS
);
995 * vm_behavior_set sets the paging behavior attribute for the
996 * specified range in the specified map. This routine will fail
997 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
998 * is not a valid allocated or reserved memory region.
1005 vm_behavior_t new_behavior
)
1007 if (map
== VM_MAP_NULL
)
1008 return(KERN_INVALID_ARGUMENT
);
1010 return(vm_map_behavior_set(map
, trunc_page(start
),
1011 round_page(start
+size
), new_behavior
));
1016 * Control whether the kernel will permit use of
1017 * vm_allocate_cpm at all.
1019 unsigned int vm_allocate_cpm_enabled
= 1;
1022 * Ordinarily, the right to allocate CPM is restricted
1023 * to privileged applications (those that can gain access
1024 * to the host port). Set this variable to zero if you
1025 * want to let any application allocate CPM.
1027 unsigned int vm_allocate_cpm_privileged
= 0;
1030 * Allocate memory in the specified map, with the caveat that
1031 * the memory is physically contiguous. This call may fail
1032 * if the system can't find sufficient contiguous memory.
1033 * This call may cause or lead to heart-stopping amounts of
1036 * Memory obtained from this call should be freed in the
1037 * normal way, viz., via vm_deallocate.
1041 host_priv_t host_priv
,
1042 register vm_map_t map
,
1043 register vm_offset_t
*addr
,
1044 register vm_size_t size
,
1047 vm_object_t cpm_obj
;
1051 vm_offset_t va
, start
, end
, offset
;
1053 extern vm_offset_t avail_start
, avail_end
;
1054 vm_offset_t prev_addr
;
1055 #endif /* MACH_ASSERT */
1057 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1059 if (!vm_allocate_cpm_enabled
)
1060 return KERN_FAILURE
;
1062 if (vm_allocate_cpm_privileged
&& host_priv
== HOST_PRIV_NULL
)
1063 return KERN_INVALID_HOST
;
1065 if (map
== VM_MAP_NULL
)
1066 return KERN_INVALID_ARGUMENT
;
1068 assert(host_priv
== &realhost
);
1072 return KERN_SUCCESS
;
1076 *addr
= vm_map_min(map
);
1078 *addr
= trunc_page(*addr
);
1079 size
= round_page(size
);
1081 if ((kr
= cpm_allocate(size
, &pages
, TRUE
)) != KERN_SUCCESS
)
1084 cpm_obj
= vm_object_allocate(size
);
1085 assert(cpm_obj
!= VM_OBJECT_NULL
);
1086 assert(cpm_obj
->internal
);
1087 assert(cpm_obj
->size
== size
);
1088 assert(cpm_obj
->can_persist
== FALSE
);
1089 assert(cpm_obj
->pager_created
== FALSE
);
1090 assert(cpm_obj
->pageout
== FALSE
);
1091 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1094 * Insert pages into object.
1097 vm_object_lock(cpm_obj
);
1098 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1100 pages
= NEXT_PAGE(m
);
1102 assert(!m
->gobbled
);
1104 assert(!m
->pageout
);
1107 assert(m
->phys_addr
>=avail_start
&& m
->phys_addr
<=avail_end
);
1110 vm_page_insert(m
, cpm_obj
, offset
);
1112 assert(cpm_obj
->resident_page_count
== size
/ PAGE_SIZE
);
1113 vm_object_unlock(cpm_obj
);
1116 * Hang onto a reference on the object in case a
1117 * multi-threaded application for some reason decides
1118 * to deallocate the portion of the address space into
1119 * which we will insert this object.
1121 * Unfortunately, we must insert the object now before
1122 * we can talk to the pmap module about which addresses
1123 * must be wired down. Hence, the race with a multi-
1126 vm_object_reference(cpm_obj
);
1129 * Insert object into map.
1139 (vm_object_offset_t
)0,
1143 VM_INHERIT_DEFAULT
);
1145 if (kr
!= KERN_SUCCESS
) {
1147 * A CPM object doesn't have can_persist set,
1148 * so all we have to do is deallocate it to
1149 * free up these pages.
1151 assert(cpm_obj
->pager_created
== FALSE
);
1152 assert(cpm_obj
->can_persist
== FALSE
);
1153 assert(cpm_obj
->pageout
== FALSE
);
1154 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1155 vm_object_deallocate(cpm_obj
); /* kill acquired ref */
1156 vm_object_deallocate(cpm_obj
); /* kill creation ref */
1160 * Inform the physical mapping system that the
1161 * range of addresses may not fault, so that
1162 * page tables and such can be locked down as well.
1166 pmap
= vm_map_pmap(map
);
1167 pmap_pageable(pmap
, start
, end
, FALSE
);
1170 * Enter each page into the pmap, to avoid faults.
1171 * Note that this loop could be coded more efficiently,
1172 * if the need arose, rather than looking up each page
1175 for (offset
= 0, va
= start
; offset
< size
;
1176 va
+= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
1177 vm_object_lock(cpm_obj
);
1178 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1179 vm_object_unlock(cpm_obj
);
1180 assert(m
!= VM_PAGE_NULL
);
1181 PMAP_ENTER(pmap
, va
, m
, VM_PROT_ALL
, TRUE
);
1186 * Verify ordering in address space.
1188 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1189 vm_object_lock(cpm_obj
);
1190 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1191 vm_object_unlock(cpm_obj
);
1192 if (m
== VM_PAGE_NULL
)
1193 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1198 assert(!m
->fictitious
);
1199 assert(!m
->private);
1202 assert(!m
->cleaning
);
1203 assert(!m
->precious
);
1204 assert(!m
->clustered
);
1206 if (m
->phys_addr
!= prev_addr
+ PAGE_SIZE
) {
1207 printf("start 0x%x end 0x%x va 0x%x\n",
1209 printf("obj 0x%x off 0x%x\n", cpm_obj
, offset
);
1210 printf("m 0x%x prev_address 0x%x\n", m
,
1212 panic("vm_allocate_cpm: pages not contig!");
1215 prev_addr
= m
->phys_addr
;
1217 #endif /* MACH_ASSERT */
1219 vm_object_deallocate(cpm_obj
); /* kill extra ref */
1228 * Interface is defined in all cases, but unless the kernel
1229 * is built explicitly for this option, the interface does
1235 host_priv_t host_priv
,
1236 register vm_map_t map
,
1237 register vm_offset_t
*addr
,
1238 register vm_size_t size
,
1241 return KERN_FAILURE
;
1247 mach_memory_object_memory_entry_64(
1250 vm_object_offset_t size
,
1251 vm_prot_t permission
,
1253 ipc_port_t
*entry_handle
)
1255 vm_named_entry_t user_object
;
1256 ipc_port_t user_handle
;
1257 ipc_port_t previous
;
1260 if (host
== HOST_NULL
)
1261 return(KERN_INVALID_HOST
);
1263 user_object
= (vm_named_entry_t
)
1264 kalloc(sizeof (struct vm_named_entry
));
1265 if(user_object
== NULL
)
1266 return KERN_FAILURE
;
1267 named_entry_lock_init(user_object
);
1268 user_handle
= ipc_port_alloc_kernel();
1269 ip_lock(user_handle
);
1271 /* make a sonce right */
1272 user_handle
->ip_sorights
++;
1273 ip_reference(user_handle
);
1275 user_handle
->ip_destination
= IP_NULL
;
1276 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1277 user_handle
->ip_receiver
= ipc_space_kernel
;
1279 /* make a send right */
1280 user_handle
->ip_mscount
++;
1281 user_handle
->ip_srights
++;
1282 ip_reference(user_handle
);
1284 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1285 /* nsrequest unlocks user_handle */
1287 user_object
->object
= NULL
;
1288 user_object
->size
= size
;
1289 user_object
->offset
= 0;
1290 user_object
->backing
.pager
= pager
;
1291 user_object
->protection
= permission
;
1292 user_object
->internal
= internal
;
1293 user_object
->is_sub_map
= FALSE
;
1294 user_object
->ref_count
= 1;
1296 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1298 *entry_handle
= user_handle
;
1299 return KERN_SUCCESS
;
1303 mach_memory_object_memory_entry(
1307 vm_prot_t permission
,
1309 ipc_port_t
*entry_handle
)
1311 return mach_memory_object_memory_entry_64( host
, internal
,
1312 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
1321 mach_make_memory_entry_64(
1322 vm_map_t target_map
,
1323 vm_object_size_t
*size
,
1324 vm_object_offset_t offset
,
1325 vm_prot_t permission
,
1326 ipc_port_t
*object_handle
,
1327 ipc_port_t parent_entry
)
1329 vm_map_version_t version
;
1330 vm_named_entry_t user_object
;
1331 ipc_port_t user_handle
;
1332 ipc_port_t previous
;
1336 /* needed for call to vm_map_lookup_locked */
1338 vm_object_offset_t obj_off
;
1340 vm_object_offset_t lo_offset
, hi_offset
;
1341 vm_behavior_t behavior
;
1344 /* needed for direct map entry manipulation */
1345 vm_map_entry_t map_entry
;
1347 vm_object_size_t mappable_size
;
1350 user_object
= (vm_named_entry_t
)
1351 kalloc(sizeof (struct vm_named_entry
));
1352 if(user_object
== NULL
)
1353 return KERN_FAILURE
;
1354 named_entry_lock_init(user_object
);
1355 user_handle
= ipc_port_alloc_kernel();
1356 ip_lock(user_handle
);
1358 /* make a sonce right */
1359 user_handle
->ip_sorights
++;
1360 ip_reference(user_handle
);
1362 user_handle
->ip_destination
= IP_NULL
;
1363 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1364 user_handle
->ip_receiver
= ipc_space_kernel
;
1366 /* make a send right */
1367 user_handle
->ip_mscount
++;
1368 user_handle
->ip_srights
++;
1369 ip_reference(user_handle
);
1371 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1372 /* nsrequest unlocks user_handle */
1374 user_object
->backing
.pager
= NULL
;
1375 user_object
->ref_count
= 1;
1377 if(parent_entry
== NULL
) {
1378 /* Create a named object based on address range within the task map */
1379 /* Go find the object at given address */
1381 permission
&= VM_PROT_ALL
;
1382 vm_map_lock_read(target_map
);
1384 /* get the object associated with the target address */
1385 /* note we check the permission of the range against */
1386 /* that requested by the caller */
1388 kr
= vm_map_lookup_locked(&target_map
, offset
,
1389 permission
, &version
,
1390 &object
, &obj_off
, &prot
, &wired
, &behavior
,
1391 &lo_offset
, &hi_offset
, &pmap_map
);
1392 if (kr
!= KERN_SUCCESS
) {
1393 vm_map_unlock_read(target_map
);
1396 if ((prot
& permission
) != permission
) {
1397 kr
= KERN_INVALID_RIGHT
;
1398 vm_object_unlock(object
);
1399 vm_map_unlock_read(target_map
);
1400 if(pmap_map
!= target_map
)
1401 vm_map_unlock_read(pmap_map
);
1405 /* We have an object, now check to see if this object */
1406 /* is suitable. If not, create a shadow and share that */
1408 local_map
= target_map
;
1411 if(!vm_map_lookup_entry(local_map
, offset
, &map_entry
)) {
1412 kr
= KERN_INVALID_ARGUMENT
;
1413 vm_object_unlock(object
);
1414 vm_map_unlock_read(target_map
);
1415 if(pmap_map
!= target_map
)
1416 vm_map_unlock_read(pmap_map
);
1419 if(!(map_entry
->is_sub_map
)) {
1420 if(map_entry
->object
.vm_object
!= object
) {
1421 kr
= KERN_INVALID_ARGUMENT
;
1422 vm_object_unlock(object
);
1423 vm_map_unlock_read(target_map
);
1424 if(pmap_map
!= target_map
)
1425 vm_map_unlock_read(pmap_map
);
1430 local_map
= map_entry
->object
.sub_map
;
1431 vm_map_lock_read(local_map
);
1432 vm_map_unlock_read(target_map
);
1433 if(pmap_map
!= target_map
)
1434 vm_map_unlock_read(pmap_map
);
1435 target_map
= local_map
;
1438 if(((map_entry
->max_protection
) & permission
) != permission
) {
1439 kr
= KERN_INVALID_RIGHT
;
1440 vm_object_unlock(object
);
1441 vm_map_unlock_read(target_map
);
1442 if(pmap_map
!= target_map
)
1443 vm_map_unlock_read(pmap_map
);
1446 if(object
->internal
) {
1447 /* vm_map_lookup_locked will create a shadow if */
1448 /* needs_copy is set but does not check for the */
1449 /* other two conditions shown. It is important to */
1450 /* set up an object which will not be pulled from */
1453 if (map_entry
->needs_copy
|| object
->shadowed
||
1455 ((vm_object_size_t
)map_entry
->vme_end
-
1456 map_entry
->vme_start
))) {
1457 if (vm_map_lock_read_to_write(target_map
)) {
1458 vm_map_lock_read(target_map
);
1463 /* create a shadow object */
1465 vm_object_shadow(&map_entry
->object
.vm_object
,
1468 - map_entry
->vme_start
));
1469 map_entry
->needs_copy
= FALSE
;
1470 vm_object_unlock(object
);
1471 object
= map_entry
->object
.vm_object
;
1472 vm_object_lock(object
);
1473 object
->size
= map_entry
->vme_end
1474 - map_entry
->vme_start
;
1475 obj_off
= (offset
- map_entry
->vme_start
) +
1477 lo_offset
= map_entry
->offset
;
1478 hi_offset
= (map_entry
->vme_end
-
1479 map_entry
->vme_start
) +
1482 vm_map_lock_write_to_read(target_map
);
1487 /* note: in the future we can (if necessary) allow for */
1488 /* memory object lists, this will better support */
1489 /* fragmentation, but is it necessary? The user should */
1490 /* be encouraged to create address space oriented */
1491 /* shared objects from CLEAN memory regions which have */
1492 /* a known and defined history. i.e. no inheritence */
1493 /* share, make this call before making the region the */
1494 /* target of ipc's, etc. The code above, protecting */
1495 /* against delayed copy, etc. is mostly defensive. */
1499 object
->true_share
= TRUE
;
1500 user_object
->object
= object
;
1501 user_object
->internal
= object
->internal
;
1502 user_object
->is_sub_map
= FALSE
;
1503 user_object
->offset
= obj_off
;
1504 user_object
->protection
= permission
;
1506 /* the size of mapped entry that overlaps with our region */
1507 /* which is targeted for share. */
1508 /* (entry_end - entry_start) - */
1509 /* offset of our beg addr within entry */
1510 /* it corresponds to this: */
1512 mappable_size
= hi_offset
- obj_off
;
1513 if(*size
> mappable_size
)
1514 *size
= mappable_size
;
1516 user_object
->size
= *size
;
1518 /* user_object pager and internal fields are not used */
1519 /* when the object field is filled in. */
1521 object
->ref_count
++; /* we now point to this object, hold on */
1522 vm_object_res_reference(object
);
1523 vm_object_unlock(object
);
1524 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1526 *size
= user_object
->size
;
1527 *object_handle
= user_handle
;
1528 vm_map_unlock_read(target_map
);
1529 if(pmap_map
!= target_map
)
1530 vm_map_unlock_read(pmap_map
);
1531 return KERN_SUCCESS
;
1534 vm_named_entry_t parent_object
;
1536 /* The new object will be base on an existing named object */
1537 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1538 kr
= KERN_INVALID_ARGUMENT
;
1541 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1542 if(permission
& parent_object
->protection
!= permission
) {
1543 kr
= KERN_INVALID_ARGUMENT
;
1546 if((offset
+ *size
) > parent_object
->size
) {
1547 kr
= KERN_INVALID_ARGUMENT
;
1551 user_object
->object
= parent_object
->object
;
1552 user_object
->size
= *size
;
1553 user_object
->offset
= parent_object
->offset
+ offset
;
1554 user_object
->protection
= permission
;
1555 if(parent_object
->is_sub_map
) {
1556 user_object
->backing
.map
= parent_object
->backing
.map
;
1557 vm_map_lock(user_object
->backing
.map
);
1558 user_object
->backing
.map
->ref_count
++;
1559 vm_map_unlock(user_object
->backing
.map
);
1562 user_object
->backing
.pager
= parent_object
->backing
.pager
;
1564 user_object
->internal
= parent_object
->internal
;
1565 user_object
->is_sub_map
= parent_object
->is_sub_map
;
1567 if(parent_object
->object
!= NULL
) {
1568 /* we now point to this object, hold on */
1569 vm_object_reference(parent_object
->object
);
1570 vm_object_lock(parent_object
->object
);
1571 parent_object
->object
->true_share
= TRUE
;
1572 vm_object_unlock(parent_object
->object
);
1574 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1576 *object_handle
= user_handle
;
1577 return KERN_SUCCESS
;
1583 ipc_port_dealloc_kernel(user_handle
);
1584 kfree((vm_offset_t
)user_object
, sizeof (struct vm_named_entry
));
1589 mach_make_memory_entry(
1590 vm_map_t target_map
,
1593 vm_prot_t permission
,
1594 ipc_port_t
*object_handle
,
1595 ipc_port_t parent_entry
)
1597 vm_object_offset_t size_64
;
1600 size_64
= (vm_object_offset_t
)*size
;
1601 kr
= mach_make_memory_entry_64(target_map
, &size_64
,
1602 (vm_object_offset_t
)offset
, permission
, object_handle
,
1604 *size
= (vm_size_t
)size_64
;
1612 vm_region_object_create(
1613 vm_map_t target_map
,
1615 ipc_port_t
*object_handle
)
1617 vm_named_entry_t user_object
;
1618 ipc_port_t user_handle
;
1621 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
1622 ipc_port_t previous
;
1625 if(new_pmap
== PMAP_NULL
)
1626 return KERN_FAILURE
;
1627 user_object
= (vm_named_entry_t
)
1628 kalloc(sizeof (struct vm_named_entry
));
1629 if(user_object
== NULL
) {
1630 pmap_destroy(new_pmap
);
1631 return KERN_FAILURE
;
1633 named_entry_lock_init(user_object
);
1634 user_handle
= ipc_port_alloc_kernel();
1637 ip_lock(user_handle
);
1639 /* make a sonce right */
1640 user_handle
->ip_sorights
++;
1641 ip_reference(user_handle
);
1643 user_handle
->ip_destination
= IP_NULL
;
1644 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1645 user_handle
->ip_receiver
= ipc_space_kernel
;
1647 /* make a send right */
1648 user_handle
->ip_mscount
++;
1649 user_handle
->ip_srights
++;
1650 ip_reference(user_handle
);
1652 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1653 /* nsrequest unlocks user_handle */
1655 /* Create a named object based on a submap of specified size */
1657 new_map
= vm_map_create(new_pmap
, 0, size
, TRUE
);
1658 user_object
->backing
.map
= new_map
;
1661 user_object
->object
= VM_OBJECT_NULL
;
1662 user_object
->internal
= TRUE
;
1663 user_object
->is_sub_map
= TRUE
;
1664 user_object
->offset
= 0;
1665 user_object
->protection
= VM_PROT_ALL
;
1666 user_object
->size
= size
;
1667 user_object
->ref_count
= 1;
1669 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1671 *object_handle
= user_handle
;
1672 return KERN_SUCCESS
;
1676 /* For a given range, check all map entries. If the entry coresponds to */
1677 /* the old vm_region/map provided on the call, replace it with the */
1678 /* corresponding range in the new vm_region/map */
1679 kern_return_t
vm_map_region_replace(
1680 vm_map_t target_map
,
1681 ipc_port_t old_region
,
1682 ipc_port_t new_region
,
1686 vm_named_entry_t old_object
;
1687 vm_named_entry_t new_object
;
1688 vm_map_t old_submap
;
1689 vm_map_t new_submap
;
1691 vm_map_entry_t entry
;
1692 int nested_pmap
= 0;
1695 vm_map_lock(target_map
);
1696 old_object
= (vm_named_entry_t
)old_region
->ip_kobject
;
1697 new_object
= (vm_named_entry_t
)new_region
->ip_kobject
;
1698 if((!old_object
->is_sub_map
) || (!new_object
->is_sub_map
)) {
1699 vm_map_unlock(target_map
);
1700 return KERN_INVALID_ARGUMENT
;
1702 old_submap
= (vm_map_t
)old_object
->backing
.map
;
1703 new_submap
= (vm_map_t
)new_object
->backing
.map
;
1704 vm_map_lock(old_submap
);
1705 if((old_submap
->min_offset
!= new_submap
->min_offset
) ||
1706 (old_submap
->max_offset
!= new_submap
->max_offset
)) {
1707 vm_map_unlock(old_submap
);
1708 vm_map_unlock(target_map
);
1709 return KERN_INVALID_ARGUMENT
;
1711 if(!vm_map_lookup_entry(target_map
, start
, &entry
)) {
1712 /* if the src is not contained, the entry preceeds */
1714 addr
= entry
->vme_start
;
1715 if(entry
== vm_map_to_entry(target_map
)) {
1716 vm_map_unlock(old_submap
);
1717 vm_map_unlock(target_map
);
1718 return KERN_SUCCESS
;
1720 vm_map_lookup_entry(target_map
, addr
, &entry
);
1722 addr
= entry
->vme_start
;
1723 vm_map_reference(old_submap
);
1724 while((entry
!= vm_map_to_entry(target_map
)) &&
1725 (entry
->vme_start
< end
)) {
1726 if((entry
->is_sub_map
) &&
1727 (entry
->object
.sub_map
== old_submap
)) {
1728 entry
->object
.sub_map
= new_submap
;
1729 if(entry
->use_pmap
) {
1730 if((start
& 0xfffffff) ||
1731 ((end
- start
) != 0x10000000)) {
1732 vm_map_unlock(old_submap
);
1733 vm_map_unlock(target_map
);
1734 return KERN_INVALID_ARGUMENT
;
1738 vm_map_reference(new_submap
);
1739 vm_map_deallocate(old_submap
);
1741 entry
= entry
->vme_next
;
1742 addr
= entry
->vme_start
;
1746 pmap_unnest(target_map
->pmap
, start
, end
- start
);
1747 pmap_nest(target_map
->pmap
, new_submap
->pmap
,
1748 start
, end
- start
);
1751 pmap_remove(target_map
->pmap
, start
, end
);
1753 vm_map_unlock(old_submap
);
1754 vm_map_unlock(target_map
);
1755 return KERN_SUCCESS
;
1760 mach_destroy_memory_entry(
1763 vm_named_entry_t named_entry
;
1765 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
1766 #endif /* MACH_ASSERT */
1767 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1768 mutex_lock(&(named_entry
)->Lock
);
1769 named_entry
->ref_count
-=1;
1770 if(named_entry
->ref_count
== 0) {
1771 if(named_entry
->object
) {
1772 /* release the memory object we've been pointing to */
1773 vm_object_deallocate(named_entry
->object
);
1775 if(named_entry
->is_sub_map
) {
1776 vm_map_deallocate(named_entry
->backing
.map
);
1778 kfree((vm_offset_t
)port
->ip_kobject
,
1779 sizeof (struct vm_named_entry
));
1781 mutex_unlock(&(named_entry
)->Lock
);
1787 vm_map_t target_map
,
1792 vm_map_entry_t map_entry
;
1799 vm_map_lock(target_map
);
1800 if(!vm_map_lookup_entry(target_map
, offset
, &map_entry
)) {
1801 vm_map_unlock(target_map
);
1802 return KERN_FAILURE
;
1804 offset
-= map_entry
->vme_start
; /* adjust to offset within entry */
1805 offset
+= map_entry
->offset
; /* adjust to target object offset */
1806 if(map_entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
1807 if(!map_entry
->is_sub_map
) {
1808 object
= map_entry
->object
.vm_object
;
1810 vm_map_unlock(target_map
);
1811 target_map
= map_entry
->object
.sub_map
;
1812 goto restart_page_query
;
1815 vm_map_unlock(target_map
);
1816 return KERN_FAILURE
;
1818 vm_object_lock(object
);
1819 vm_map_unlock(target_map
);
1821 m
= vm_page_lookup(object
, offset
);
1822 if (m
!= VM_PAGE_NULL
) {
1823 *disposition
|= VM_PAGE_QUERY_PAGE_PRESENT
;
1826 if(object
->shadow
) {
1827 offset
+= object
->shadow_offset
;
1828 vm_object_unlock(object
);
1829 object
= object
->shadow
;
1830 vm_object_lock(object
);
1833 vm_object_unlock(object
);
1834 return KERN_FAILURE
;
1838 /* The ref_count is not strictly accurate, it measures the number */
1839 /* of entities holding a ref on the object, they may not be mapping */
1840 /* the object or may not be mapping the section holding the */
1841 /* target page but its still a ball park number and though an over- */
1842 /* count, it picks up the copy-on-write cases */
1844 /* We could also get a picture of page sharing from pmap_attributes */
1845 /* but this would under count as only faulted-in mappings would */
1848 *ref_count
= object
->ref_count
;
1850 if (m
->fictitious
) {
1851 *disposition
|= VM_PAGE_QUERY_PAGE_FICTITIOUS
;
1852 vm_object_unlock(object
);
1853 return KERN_SUCCESS
;
1857 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
1858 else if(pmap_is_modified(m
->phys_addr
))
1859 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
1862 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
1863 else if(pmap_is_referenced(m
->phys_addr
))
1864 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
1866 vm_object_unlock(object
);
1867 return KERN_SUCCESS
;
1872 set_dp_control_port(
1873 host_priv_t host_priv
,
1874 ipc_port_t control_port
)
1876 if (host_priv
== HOST_PRIV_NULL
)
1877 return (KERN_INVALID_HOST
);
1878 dynamic_pager_control_port
= control_port
;
1879 return KERN_SUCCESS
;
1883 get_dp_control_port(
1884 host_priv_t host_priv
,
1885 ipc_port_t
*control_port
)
1887 if (host_priv
== HOST_PRIV_NULL
)
1888 return (KERN_INVALID_HOST
);
1889 *control_port
= dynamic_pager_control_port
;
1890 return KERN_SUCCESS
;
1900 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
1901 #endif /* MACH_ASSERT */
1902 upl
= (upl_t
)port
->ip_kobject
;
1903 mutex_lock(&(upl
)->Lock
);
1905 if(upl
->ref_count
== 0) {
1906 mutex_unlock(&(upl
)->Lock
);
1907 uc_upl_abort(upl
, UPL_ABORT_ERROR
);
1909 mutex_unlock(&(upl
)->Lock
);
1912 /* Retrieve a upl for an object underlying an address range in a map */
1918 vm_size_t
*upl_size
,
1920 upl_page_info_t
**page_list
,
1923 int force_data_sync
)
1925 vm_map_entry_t entry
;
1927 int sync_cow_data
= FALSE
;
1928 vm_object_t local_object
;
1929 vm_offset_t local_offset
;
1930 vm_offset_t local_start
;
1933 caller_flags
= *flags
;
1934 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
1935 sync_cow_data
= TRUE
;
1938 return KERN_INVALID_ARGUMENT
;
1941 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
1942 if((entry
->vme_end
- offset
) < *upl_size
) {
1943 *upl_size
= entry
->vme_end
- offset
;
1946 * Create an object if necessary.
1948 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1949 entry
->object
.vm_object
= vm_object_allocate(
1950 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
1953 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
1954 if (entry
->needs_copy
1955 || entry
->object
.vm_object
->copy
) {
1958 vm_object_offset_t offset_hi
;
1959 vm_object_offset_t offset_lo
;
1960 vm_object_offset_t new_offset
;
1963 vm_behavior_t behavior
;
1964 vm_map_version_t version
;
1968 vm_map_lock_write_to_read(map
);
1969 if(vm_map_lookup_locked(&local_map
,
1970 offset
, VM_PROT_WRITE
,
1972 &new_offset
, &prot
, &wired
,
1973 &behavior
, &offset_lo
,
1974 &offset_hi
, &pmap_map
)) {
1975 vm_map_unlock(local_map
);
1976 return KERN_FAILURE
;
1978 if (pmap_map
!= map
) {
1979 vm_map_unlock(pmap_map
);
1981 vm_object_unlock(object
);
1982 vm_map_unlock(local_map
);
1984 goto REDISCOVER_ENTRY
;
1987 if (entry
->is_sub_map
) {
1990 submap
= entry
->object
.sub_map
;
1991 local_start
= entry
->vme_start
;
1992 local_offset
= entry
->offset
;
1993 vm_map_reference(submap
);
1996 ret
= (vm_map_get_upl(submap
,
1997 local_offset
+ (offset
- local_start
),
1998 upl_size
, upl
, page_list
, count
,
1999 flags
, force_data_sync
));
2001 vm_map_deallocate(submap
);
2005 if (sync_cow_data
) {
2006 if (entry
->object
.vm_object
->shadow
) {
2009 local_object
= entry
->object
.vm_object
;
2010 local_start
= entry
->vme_start
;
2011 local_offset
= entry
->offset
;
2012 vm_object_reference(local_object
);
2015 if(local_object
->copy
== NULL
) {
2016 flags
= MEMORY_OBJECT_DATA_SYNC
;
2018 flags
= MEMORY_OBJECT_COPY_SYNC
;
2021 if((local_object
->paging_offset
) &&
2022 (local_object
->pager
== 0)) {
2024 * do a little clean-up for our unorthodox
2025 * entry into a pager call from a non-pager
2026 * context. Normally the pager code
2027 * assumes that an object it has been called
2028 * with has a backing pager and so does
2029 * not bother to check the pager field
2030 * before relying on the paging_offset
2032 vm_object_lock(local_object
);
2033 if (local_object
->pager
== 0) {
2034 local_object
->paging_offset
= 0;
2036 vm_object_unlock(local_object
);
2039 memory_object_lock_request(
2040 local_object
, ((offset
- local_start
)
2042 local_object
->paging_offset
,
2043 (vm_object_size_t
)*upl_size
, FALSE
,
2045 VM_PROT_NO_CHANGE
, NULL
, 0);
2046 sync_cow_data
= FALSE
;
2047 goto REDISCOVER_ENTRY
;
2051 if (force_data_sync
) {
2053 local_object
= entry
->object
.vm_object
;
2054 local_start
= entry
->vme_start
;
2055 local_offset
= entry
->offset
;
2056 vm_object_reference(local_object
);
2059 if((local_object
->paging_offset
) &&
2060 (local_object
->pager
== 0)) {
2062 * do a little clean-up for our unorthodox
2063 * entry into a pager call from a non-pager
2064 * context. Normally the pager code
2065 * assumes that an object it has been called
2066 * with has a backing pager and so does
2067 * not bother to check the pager field
2068 * before relying on the paging_offset
2070 vm_object_lock(local_object
);
2071 if (local_object
->pager
== 0) {
2072 local_object
->paging_offset
= 0;
2074 vm_object_unlock(local_object
);
2077 memory_object_lock_request(
2078 local_object
, ((offset
- local_start
)
2080 local_object
->paging_offset
,
2081 (vm_object_size_t
)*upl_size
, FALSE
,
2082 MEMORY_OBJECT_DATA_SYNC
,
2085 force_data_sync
= FALSE
;
2086 goto REDISCOVER_ENTRY
;
2089 if(!(entry
->object
.vm_object
->private)) {
2090 if(*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
2091 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
2092 if(entry
->object
.vm_object
->phys_contiguous
) {
2093 *flags
= UPL_PHYS_CONTIG
;
2098 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
2100 local_object
= entry
->object
.vm_object
;
2101 local_offset
= entry
->offset
;
2102 local_start
= entry
->vme_start
;
2103 vm_object_reference(local_object
);
2105 ret
= (vm_fault_list_request(local_object
,
2106 ((offset
- local_start
) + local_offset
),
2112 vm_object_deallocate(local_object
);
2117 return(KERN_FAILURE
);
2123 vm_object_upl_request(
2125 vm_object_offset_t offset
,
2128 upl_page_info_t
*page_list
,
2129 mach_msg_type_number_t
*count
,
2133 ipc_port_t upl_port
;
2134 ipc_port_t previous
;
2135 upl_page_info_t
*pl
;
2139 kr
= vm_fault_list_request(object
, offset
, size
, &upl_object
,
2140 &pl
, *count
, cntrl_flags
);
2143 if(kr
!= KERN_SUCCESS
) {
2144 *upl
= MACH_PORT_NULL
;
2145 return KERN_FAILURE
;
2148 upl_port
= ipc_port_alloc_kernel();
2153 /* make a sonce right */
2154 upl_port
->ip_sorights
++;
2155 ip_reference(upl_port
);
2157 upl_port
->ip_destination
= IP_NULL
;
2158 upl_port
->ip_receiver_name
= MACH_PORT_NULL
;
2159 upl_port
->ip_receiver
= ipc_space_kernel
;
2161 /* make a send right */
2162 upl_port
->ip_mscount
++;
2163 upl_port
->ip_srights
++;
2164 ip_reference(upl_port
);
2166 ipc_port_nsrequest(upl_port
, 1, upl_port
, &previous
);
2167 /* nsrequest unlocks user_handle */
2169 /* Create a named object based on a submap of specified size */
2172 ipc_kobject_set(upl_port
, (ipc_kobject_t
) upl_object
, IKOT_UPL
);
2174 return KERN_SUCCESS
;
2178 vm_pager_upl_request(
2180 vm_object_offset_t offset
,
2182 vm_size_t super_size
,
2184 upl_page_info_t
*page_list
,
2185 mach_msg_type_number_t
*count
,
2189 ipc_port_t upl_port
;
2190 ipc_port_t previous
;
2191 upl_page_info_t
*pl
;
2195 kr
= upl_system_list_request(object
, offset
, size
, super_size
,
2196 &upl_object
, &pl
, *count
, cntrl_flags
);
2198 if(kr
!= KERN_SUCCESS
) {
2199 *upl
= MACH_PORT_NULL
;
2200 return KERN_FAILURE
;
2204 upl_port
= ipc_port_alloc_kernel();
2209 /* make a sonce right */
2210 upl_port
->ip_sorights
++;
2211 ip_reference(upl_port
);
2213 upl_port
->ip_destination
= IP_NULL
;
2214 upl_port
->ip_receiver_name
= MACH_PORT_NULL
;
2215 upl_port
->ip_receiver
= ipc_space_kernel
;
2217 /* make a send right */
2218 upl_port
->ip_mscount
++;
2219 upl_port
->ip_srights
++;
2220 ip_reference(upl_port
);
2222 ipc_port_nsrequest(upl_port
, 1, upl_port
, &previous
);
2223 /* nsrequest unlocks user_handle */
2225 /* Create a named object based on a submap of specified size */
2228 ipc_kobject_set(upl_port
, (ipc_kobject_t
) upl_object
, IKOT_UPL
);
2230 return KERN_SUCCESS
;
2236 ipc_port_t upl_port
,
2237 vm_offset_t
*dst_addr
)
2242 if (!IP_VALID(upl_port
)) {
2243 return KERN_INVALID_ARGUMENT
;
2244 } else if (ip_kotype(upl_port
) == IKOT_UPL
) {
2246 upl
= (upl_t
)upl_port
->ip_kobject
;
2247 kr
= uc_upl_map(map
, upl
, dst_addr
);
2251 return KERN_FAILURE
;
2259 ipc_port_t upl_port
)
2264 if (!IP_VALID(upl_port
)) {
2265 return KERN_INVALID_ARGUMENT
;
2266 } else if (ip_kotype(upl_port
) == IKOT_UPL
) {
2268 upl
= (upl_t
)upl_port
->ip_kobject
;
2269 kr
= uc_upl_un_map(map
, upl
);
2273 return KERN_FAILURE
;
2280 upl_page_list_ptr_t page_list
,
2281 mach_msg_type_number_t count
)
2286 kr
= uc_upl_commit(upl
, (upl_page_info_t
*)page_list
);
2288 kr
= uc_upl_commit(upl
, (upl_page_info_t
*) NULL
);
2295 vm_upl_commit_range(
2299 upl_page_list_ptr_t page_list
,
2301 mach_msg_type_number_t count
)
2306 kr
= uc_upl_commit_range(upl
, offset
, size
, flags
,
2307 (upl_page_info_t
*)page_list
);
2309 kr
= uc_upl_commit_range(upl
, offset
, size
, flags
,
2310 (upl_page_info_t
*) NULL
);
2325 kr
= uc_upl_abort_range(upl
, offset
, size
, abort_flags
);
2337 kr
= uc_upl_abort(upl
, abort_type
);
2342 /* ******* Temporary Internal calls to UPL for BSD ***** */
2347 vm_offset_t
*dst_addr
)
2352 kr
= uc_upl_map(map
, upl
, dst_addr
);
2353 if(kr
== KERN_SUCCESS
) {
2354 upl
->ref_count
+= 1;
2369 kr
= uc_upl_un_map(map
, upl
);
2370 if(kr
== KERN_SUCCESS
) {
2371 if(upl
->ref_count
== 1) {
2374 upl
->ref_count
-= 1;
2386 upl_page_list_ptr_t page_list
,
2387 mach_msg_type_number_t count
)
2391 upl
->ref_count
+= 1;
2393 kr
= uc_upl_commit(upl
, (upl_page_info_t
*)page_list
);
2395 kr
= uc_upl_commit(upl
, (upl_page_info_t
*) NULL
);
2397 if(upl
->ref_count
== 1) {
2400 upl
->ref_count
-= 1;
2407 kernel_upl_commit_range(
2412 upl_page_list_ptr_t page_list
,
2413 mach_msg_type_number_t count
)
2417 upl
->ref_count
+= 1;
2419 kr
= uc_upl_commit_range(upl
, offset
, size
, flags
,
2420 (upl_page_info_t
*)page_list
);
2422 kr
= uc_upl_commit_range(upl
, offset
, size
, flags
,
2423 (upl_page_info_t
*) NULL
);
2425 if(upl
->ref_count
== 1) {
2428 upl
->ref_count
-= 1;
2435 kernel_upl_abort_range(
2443 upl
->ref_count
+= 1;
2444 kr
= uc_upl_abort_range(upl
, offset
, size
, abort_flags
);
2445 if(upl
->ref_count
== 1) {
2448 upl
->ref_count
-= 1;
2461 upl
->ref_count
+= 1;
2462 kr
= uc_upl_abort(upl
, abort_type
);
2463 if(upl
->ref_count
== 1) {
2466 upl
->ref_count
-= 1;
2474 /* code snippet from vm_map */
2476 vm_object_create_nomap(ipc_port_t port
, vm_object_size_t size
)
2478 vm_object_t object_ptr
;
2479 return memory_object_create_named(port
, size
, &object_ptr
);
2484 * Temporary interface to overcome old style ipc artifacts, and allow
2485 * ubc to call this routine directly. Will disappear with new RPC
2486 * component architecture.
2487 * NOTE: call to memory_object_destroy removes the vm_object's association
2488 * with its abstract memory object and hence the named flag is set to false.
2491 memory_object_destroy_named(
2493 kern_return_t reason
)
2495 vm_object_lock(object
);
2496 if(object
->named
== FALSE
) {
2497 panic("memory_object_destroy_named called by party which doesn't hold right");
2499 object
->ref_count
++;
2500 vm_object_res_reference(object
);
2501 vm_object_unlock(object
);
2502 return (memory_object_destroy(object
, reason
));
2506 * Temporary interface to overcome old style ipc artifacts, and allow
2507 * ubc to call this routine directly. Will disappear with new RPC
2508 * component architecture.
2509 * Note: No change is made in the named flag.
2512 memory_object_lock_request_named(
2514 vm_object_offset_t offset
,
2515 vm_object_size_t size
,
2516 memory_object_return_t should_return
,
2517 boolean_t should_flush
,
2519 ipc_port_t reply_to
)
2521 vm_object_lock(object
);
2522 if(object
->named
== FALSE
) {
2523 panic("memory_object_lock_request_named called by party which doesn't hold right");
2525 object
->ref_count
++;
2526 vm_object_res_reference(object
);
2527 vm_object_unlock(object
);
2528 return (memory_object_lock_request(object
,
2529 offset
, size
, should_return
, should_flush
, prot
,
2534 memory_object_change_attributes_named(
2536 memory_object_flavor_t flavor
,
2537 memory_object_info_t attributes
,
2538 mach_msg_type_number_t count
,
2539 ipc_port_t reply_to
,
2540 mach_msg_type_name_t reply_to_type
)
2542 vm_object_lock(object
);
2543 if(object
->named
== FALSE
) {
2544 panic("memory_object_lock_request_named called by party which doesn't hold right");
2546 object
->ref_count
++;
2547 vm_object_res_reference(object
);
2548 vm_object_unlock(object
);
2549 return (memory_object_change_attributes(object
,
2550 flavor
, attributes
, count
, reply_to
, reply_to_type
));
2554 vm_get_shared_region(
2556 shared_region_mapping_t
*shared_region
)
2558 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
2559 return KERN_SUCCESS
;
2563 vm_set_shared_region(
2565 shared_region_mapping_t shared_region
)
2567 task
->system_shared_region
= (vm_offset_t
) shared_region
;
2568 return KERN_SUCCESS
;
2572 shared_region_mapping_info(
2573 shared_region_mapping_t shared_region
,
2574 ipc_port_t
*text_region
,
2575 vm_size_t
*text_size
,
2576 ipc_port_t
*data_region
,
2577 vm_size_t
*data_size
,
2578 vm_offset_t
*region_mappings
,
2579 vm_offset_t
*client_base
,
2580 vm_offset_t
*alt_base
,
2581 vm_offset_t
*alt_next
,
2583 shared_region_mapping_t
*next
)
2585 shared_region_mapping_lock(shared_region
);
2587 *text_region
= shared_region
->text_region
;
2588 *text_size
= shared_region
->text_size
;
2589 *data_region
= shared_region
->data_region
;
2590 *data_size
= shared_region
->data_size
;
2591 *region_mappings
= shared_region
->region_mappings
;
2592 *client_base
= shared_region
->client_base
;
2593 *alt_base
= shared_region
->alternate_base
;
2594 *alt_next
= shared_region
->alternate_next
;
2595 *flags
= shared_region
->flags
;
2596 *next
= shared_region
->next
;
2598 shared_region_mapping_unlock(shared_region
);
2602 shared_region_object_chain_attach(
2603 shared_region_mapping_t target_region
,
2604 shared_region_mapping_t object_chain_region
)
2606 shared_region_object_chain_t object_ele
;
2608 if(target_region
->object_chain
)
2609 return KERN_FAILURE
;
2610 object_ele
= (shared_region_object_chain_t
)
2611 kalloc(sizeof (struct shared_region_object_chain
));
2612 shared_region_mapping_lock(object_chain_region
);
2613 target_region
->object_chain
= object_ele
;
2614 object_ele
->object_chain_region
= object_chain_region
;
2615 object_ele
->next
= object_chain_region
->object_chain
;
2616 object_ele
->depth
= object_chain_region
->depth
;
2617 object_chain_region
->depth
++;
2618 target_region
->alternate_next
= object_chain_region
->alternate_next
;
2619 shared_region_mapping_unlock(object_chain_region
);
2620 return KERN_SUCCESS
;
2624 shared_region_mapping_create(
2625 ipc_port_t text_region
,
2626 vm_size_t text_size
,
2627 ipc_port_t data_region
,
2628 vm_size_t data_size
,
2629 vm_offset_t region_mappings
,
2630 vm_offset_t client_base
,
2631 shared_region_mapping_t
*shared_region
,
2632 vm_offset_t alt_base
,
2633 vm_offset_t alt_next
)
2635 *shared_region
= (shared_region_mapping_t
)
2636 kalloc(sizeof (struct shared_region_mapping
));
2637 if(*shared_region
== NULL
)
2638 return KERN_FAILURE
;
2639 shared_region_mapping_lock_init((*shared_region
));
2640 (*shared_region
)->text_region
= text_region
;
2641 (*shared_region
)->text_size
= text_size
;
2642 (*shared_region
)->data_region
= data_region
;
2643 (*shared_region
)->data_size
= data_size
;
2644 (*shared_region
)->region_mappings
= region_mappings
;
2645 (*shared_region
)->client_base
= client_base
;
2646 (*shared_region
)->ref_count
= 1;
2647 (*shared_region
)->next
= NULL
;
2648 (*shared_region
)->object_chain
= NULL
;
2649 (*shared_region
)->self
= *shared_region
;
2650 (*shared_region
)->flags
= 0;
2651 (*shared_region
)->depth
= 0;
2652 (*shared_region
)->alternate_base
= alt_base
;
2653 (*shared_region
)->alternate_next
= alt_next
;
2654 return KERN_SUCCESS
;
2658 shared_region_mapping_set_alt_next(
2659 shared_region_mapping_t shared_region
,
2660 vm_offset_t alt_next
)
2662 shared_region
->alternate_next
= alt_next
;
2663 return KERN_SUCCESS
;
2667 shared_region_mapping_ref(
2668 shared_region_mapping_t shared_region
)
2670 if(shared_region
== NULL
)
2671 return KERN_SUCCESS
;
2672 shared_region_mapping_lock(shared_region
);
2673 shared_region
->ref_count
++;
2674 shared_region_mapping_unlock(shared_region
);
2675 return KERN_SUCCESS
;
2679 shared_region_mapping_dealloc(
2680 shared_region_mapping_t shared_region
)
2682 struct shared_region_task_mappings sm_info
;
2683 shared_region_mapping_t next
;
2685 if(shared_region
== NULL
)
2686 return KERN_SUCCESS
;
2687 shared_region_mapping_lock(shared_region
);
2689 if((--shared_region
->ref_count
) == 0) {
2691 sm_info
.text_region
= shared_region
->text_region
;
2692 sm_info
.text_size
= shared_region
->text_size
;
2693 sm_info
.data_region
= shared_region
->data_region
;
2694 sm_info
.data_size
= shared_region
->data_size
;
2695 sm_info
.region_mappings
= shared_region
->region_mappings
;
2696 sm_info
.client_base
= shared_region
->client_base
;
2697 sm_info
.alternate_base
= shared_region
->alternate_base
;
2698 sm_info
.alternate_next
= shared_region
->alternate_next
;
2699 sm_info
.flags
= shared_region
->flags
;
2700 sm_info
.self
= (vm_offset_t
)shared_region
;
2702 lsf_remove_regions_mappings(shared_region
, &sm_info
);
2703 pmap_remove(((vm_named_entry_t
)
2704 (shared_region
->text_region
->ip_kobject
))
2705 ->backing
.map
->pmap
,
2706 sm_info
.client_base
,
2707 sm_info
.client_base
+ sm_info
.text_size
);
2708 ipc_port_release_send(shared_region
->text_region
);
2709 ipc_port_release_send(shared_region
->data_region
);
2710 if(shared_region
->object_chain
) {
2711 shared_region_mapping_dealloc(
2712 shared_region
->object_chain
->object_chain_region
);
2713 kfree((vm_offset_t
)shared_region
->object_chain
,
2714 sizeof (struct shared_region_object_chain
));
2716 kfree((vm_offset_t
)shared_region
,
2717 sizeof (struct shared_region_mapping
));
2718 return KERN_SUCCESS
;
2720 shared_region_mapping_unlock(shared_region
);
2721 return KERN_SUCCESS
;
2725 vm_map_get_phys_page(
2729 vm_map_entry_t entry
;
2732 vm_offset_t phys_addr
= 0;
2736 while (vm_map_lookup_entry(map
, offset
, &entry
)) {
2738 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2740 return (vm_offset_t
) 0;
2742 if (entry
->is_sub_map
) {
2744 vm_map_lock(entry
->object
.sub_map
);
2746 map
= entry
->object
.sub_map
;
2747 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2748 vm_map_unlock(old_map
);
2751 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2752 object
= entry
->object
.vm_object
;
2753 vm_object_lock(object
);
2755 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
2756 if(dst_page
== VM_PAGE_NULL
) {
2757 if(object
->shadow
) {
2758 vm_object_t old_object
;
2759 vm_object_lock(object
->shadow
);
2760 old_object
= object
;
2761 offset
= offset
+ object
->shadow_offset
;
2762 object
= object
->shadow
;
2763 vm_object_unlock(old_object
);
2765 vm_object_unlock(object
);
2769 phys_addr
= dst_page
->phys_addr
;
2770 vm_object_unlock(object
);