2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * User-exported virtual memory functions.
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_types.h> /* to get vm_address_t */
63 #include <mach/memory_object.h>
64 #include <mach/std_types.h> /* to get pointer_t */
65 #include <mach/vm_attributes.h>
66 #include <mach/vm_param.h>
67 #include <mach/vm_statistics.h>
68 #include <mach/vm_map_server.h>
69 #include <mach/mach_syscalls.h>
71 #include <mach/shared_memory_server.h>
72 #include <vm/vm_shared_memory_server.h>
74 #include <kern/host.h>
75 #include <kern/task.h>
76 #include <kern/misc_protos.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/memory_object.h>
81 #include <vm/vm_pageout.h>
85 vm_size_t upl_offset_to_pagelist
= 0;
91 ipc_port_t dynamic_pager_control_port
=NULL
;
94 * vm_allocate allocates "zero fill" memory in the specfied
99 register vm_map_t map
,
100 register vm_offset_t
*addr
,
101 register vm_size_t size
,
104 kern_return_t result
;
105 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
107 if (map
== VM_MAP_NULL
)
108 return(KERN_INVALID_ARGUMENT
);
111 return(KERN_SUCCESS
);
115 *addr
= vm_map_min(map
);
117 *addr
= trunc_page(*addr
);
118 size
= round_page(size
);
120 return(KERN_INVALID_ARGUMENT
);
123 result
= vm_map_enter(
130 (vm_object_offset_t
)0,
140 * vm_deallocate deallocates the specified range of addresses in the
141 * specified address map.
145 register vm_map_t map
,
149 if (map
== VM_MAP_NULL
)
150 return(KERN_INVALID_ARGUMENT
);
152 if (size
== (vm_offset_t
) 0)
153 return(KERN_SUCCESS
);
155 return(vm_map_remove(map
, trunc_page(start
),
156 round_page(start
+size
), VM_MAP_NO_FLAGS
));
160 * vm_inherit sets the inheritance of the specified range in the
165 register vm_map_t map
,
168 vm_inherit_t new_inheritance
)
170 if (map
== VM_MAP_NULL
)
171 return(KERN_INVALID_ARGUMENT
);
173 if (new_inheritance
> VM_INHERIT_LAST_VALID
)
174 return(KERN_INVALID_ARGUMENT
);
176 return(vm_map_inherit(map
,
178 round_page(start
+size
),
183 * vm_protect sets the protection of the specified range in the
189 register vm_map_t map
,
192 boolean_t set_maximum
,
193 vm_prot_t new_protection
)
195 if ((map
== VM_MAP_NULL
) ||
196 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
197 return(KERN_INVALID_ARGUMENT
);
199 return(vm_map_protect(map
,
201 round_page(start
+size
),
207 * Handle machine-specific attributes for a mapping, such
208 * as cachability, migrability, etc.
211 vm_machine_attribute(
213 vm_address_t address
,
215 vm_machine_attribute_t attribute
,
216 vm_machine_attribute_val_t
* value
) /* IN/OUT */
218 if (map
== VM_MAP_NULL
)
219 return(KERN_INVALID_ARGUMENT
);
221 return vm_map_machine_attribute(map
, address
, size
, attribute
, value
);
227 vm_address_t address
,
230 mach_msg_type_number_t
*data_size
)
233 vm_map_copy_t ipc_address
;
235 if (map
== VM_MAP_NULL
)
236 return(KERN_INVALID_ARGUMENT
);
238 if ((error
= vm_map_copyin(map
,
241 FALSE
, /* src_destroy */
242 &ipc_address
)) == KERN_SUCCESS
) {
243 *data
= (pointer_t
) ipc_address
;
252 vm_read_entry_t data_list
,
253 mach_msg_type_number_t count
)
255 mach_msg_type_number_t i
;
257 vm_map_copy_t ipc_address
;
259 if (map
== VM_MAP_NULL
)
260 return(KERN_INVALID_ARGUMENT
);
262 for(i
=0; i
<count
; i
++) {
263 error
= vm_map_copyin(map
,
264 data_list
[i
].address
,
266 FALSE
, /* src_destroy */
268 if(error
!= KERN_SUCCESS
) {
269 data_list
[i
].address
= (vm_address_t
)0;
270 data_list
[i
].size
= (vm_size_t
)0;
273 if(data_list
[i
].size
!= 0) {
274 error
= vm_map_copyout(current_task()->map
,
275 &(data_list
[i
].address
),
276 (vm_map_copy_t
) ipc_address
);
277 if(error
!= KERN_SUCCESS
) {
278 data_list
[i
].address
= (vm_address_t
)0;
279 data_list
[i
].size
= (vm_size_t
)0;
288 * This routine reads from the specified map and overwrites part of the current
289 * activation's map. In making an assumption that the current thread is local,
290 * it is no longer cluster-safe without a fully supportive local proxy thread/
291 * task (but we don't support cluster's anymore so this is moot).
294 #define VM_OVERWRITE_SMALL 512
299 vm_address_t address
,
302 vm_size_t
*data_size
)
306 char buf
[VM_OVERWRITE_SMALL
];
309 kern_return_t error
= KERN_SUCCESS
;
312 if (map
== VM_MAP_NULL
)
313 return(KERN_INVALID_ARGUMENT
);
315 if (size
<= VM_OVERWRITE_SMALL
) {
316 if(vm_map_read_user(map
, (vm_offset_t
)address
,
317 (vm_offset_t
)&inbuf
, size
)) {
318 error
= KERN_INVALID_ADDRESS
;
320 if(vm_map_write_user(current_map(),
321 (vm_offset_t
)&inbuf
, (vm_offset_t
)data
, size
))
322 error
= KERN_INVALID_ADDRESS
;
326 if ((error
= vm_map_copyin(map
,
329 FALSE
, /* src_destroy */
330 ©
)) == KERN_SUCCESS
) {
331 if ((error
= vm_map_copy_overwrite(
335 FALSE
)) == KERN_SUCCESS
) {
338 vm_map_copy_discard(copy
);
353 vm_address_t address
,
355 mach_msg_type_number_t size
)
357 if (map
== VM_MAP_NULL
)
358 return KERN_INVALID_ARGUMENT
;
360 return vm_map_copy_overwrite(map
, address
, (vm_map_copy_t
) data
,
361 FALSE
/* interruptible XXX */);
367 vm_address_t source_address
,
369 vm_address_t dest_address
)
374 if (map
== VM_MAP_NULL
)
375 return KERN_INVALID_ARGUMENT
;
377 kr
= vm_map_copyin(map
, source_address
, size
,
379 if (kr
!= KERN_SUCCESS
)
382 kr
= vm_map_copy_overwrite(map
, dest_address
, copy
,
383 FALSE
/* interruptible XXX */);
384 if (kr
!= KERN_SUCCESS
) {
385 vm_map_copy_discard(copy
);
398 vm_offset_t
*address
,
399 vm_size_t initial_size
,
403 vm_object_offset_t offset
,
405 vm_prot_t cur_protection
,
406 vm_prot_t max_protection
,
407 vm_inherit_t inheritance
)
412 vm_object_size_t size
= (vm_object_size_t
)initial_size
;
413 kern_return_t result
;
416 * Check arguments for validity
418 if ((target_map
== VM_MAP_NULL
) ||
419 (cur_protection
& ~VM_PROT_ALL
) ||
420 (max_protection
& ~VM_PROT_ALL
) ||
421 (inheritance
> VM_INHERIT_LAST_VALID
) ||
423 return(KERN_INVALID_ARGUMENT
);
426 * Find the vm object (if any) corresponding to this port.
428 if (!IP_VALID(port
)) {
429 object
= VM_OBJECT_NULL
;
432 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
433 vm_named_entry_t named_entry
;
435 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
436 /* a few checks to make sure user is obeying rules */
438 if(offset
>= named_entry
->size
)
439 return(KERN_INVALID_RIGHT
);
440 size
= named_entry
->size
- offset
;
442 if((named_entry
->protection
& max_protection
) != max_protection
)
443 return(KERN_INVALID_RIGHT
);
444 if((named_entry
->protection
& cur_protection
) != cur_protection
)
445 return(KERN_INVALID_RIGHT
);
446 if(named_entry
->size
< (offset
+ size
))
447 return(KERN_INVALID_ARGUMENT
);
449 /* the callers parameter offset is defined to be the */
450 /* offset from beginning of named entry offset in object */
451 offset
= offset
+ named_entry
->offset
;
453 named_entry_lock(named_entry
);
454 if(named_entry
->is_sub_map
) {
455 vm_map_entry_t map_entry
;
457 named_entry_unlock(named_entry
);
458 *address
= trunc_page(*address
);
459 size
= round_page(size
);
460 vm_object_reference(vm_submap_object
);
461 if ((result
= vm_map_enter(target_map
,
462 address
, size
, mask
, flags
,
465 cur_protection
, max_protection
, inheritance
466 )) != KERN_SUCCESS
) {
467 vm_object_deallocate(vm_submap_object
);
471 VM_GET_FLAGS_ALIAS(flags
, alias
);
472 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
474 vm_map_submap(target_map
, *address
,
476 named_entry
->backing
.map
,
477 (vm_offset_t
)offset
, TRUE
);
479 vm_map_submap(target_map
, *address
,
481 named_entry
->backing
.map
,
482 (vm_offset_t
)offset
, FALSE
);
485 if(vm_map_lookup_entry(
486 target_map
, *address
, &map_entry
)) {
487 map_entry
->needs_copy
= TRUE
;
493 } else if(named_entry
->object
) {
494 /* This is the case where we are going to map */
495 /* an already mapped object. If the object is */
496 /* not ready it is internal. An external */
497 /* object cannot be mapped until it is ready */
498 /* we can therefore avoid the ready check */
500 named_entry_unlock(named_entry
);
501 vm_object_reference(named_entry
->object
);
502 object
= named_entry
->object
;
504 object
= vm_object_enter(named_entry
->backing
.pager
,
506 named_entry
->internal
,
509 if (object
== VM_OBJECT_NULL
) {
510 named_entry_unlock(named_entry
);
511 return(KERN_INVALID_OBJECT
);
513 object
->true_share
= TRUE
;
514 named_entry
->object
= object
;
515 named_entry_unlock(named_entry
);
516 /* create an extra reference for the named entry */
517 vm_object_reference(named_entry
->object
);
518 /* wait for object (if any) to be ready */
519 if (object
!= VM_OBJECT_NULL
) {
520 vm_object_lock(object
);
521 while (!object
->pager_ready
) {
522 vm_object_wait(object
,
523 VM_OBJECT_EVENT_PAGER_READY
,
525 vm_object_lock(object
);
527 vm_object_unlock(object
);
530 } else if (ip_kotype(port
) == IKOT_MEMORY_OBJECT
) {
532 * JMM - This is temporary until we unify named entries
533 * and raw memory objects.
535 * Detected fake ip_kotype for a memory object. In
536 * this case, the port isn't really a port at all, but
537 * instead is just a raw memory object.
540 if ((object
= vm_object_enter((memory_object_t
)port
,
541 size
, FALSE
, FALSE
, FALSE
))
543 return(KERN_INVALID_OBJECT
);
545 /* wait for object (if any) to be ready */
546 if (object
!= VM_OBJECT_NULL
) {
547 if(object
== kernel_object
) {
548 printf("Warning: Attempt to map kernel object"
549 " by a non-private kernel entity\n");
550 return(KERN_INVALID_OBJECT
);
552 vm_object_lock(object
);
553 while (!object
->pager_ready
) {
554 vm_object_wait(object
,
555 VM_OBJECT_EVENT_PAGER_READY
,
557 vm_object_lock(object
);
559 vm_object_unlock(object
);
562 return (KERN_INVALID_OBJECT
);
565 *address
= trunc_page(*address
);
566 size
= round_page(size
);
569 * Perform the copy if requested
573 vm_object_t new_object
;
574 vm_object_offset_t new_offset
;
576 result
= vm_object_copy_strategically(object
, offset
, size
,
577 &new_object
, &new_offset
,
581 if (result
== KERN_MEMORY_RESTART_COPY
) {
583 boolean_t src_needs_copy
;
587 * We currently ignore src_needs_copy.
588 * This really is the issue of how to make
589 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
590 * non-kernel users to use. Solution forthcoming.
591 * In the meantime, since we don't allow non-kernel
592 * memory managers to specify symmetric copy,
593 * we won't run into problems here.
597 success
= vm_object_copy_quickly(&new_object
,
602 result
= KERN_SUCCESS
;
605 * Throw away the reference to the
606 * original object, as it won't be mapped.
609 vm_object_deallocate(object
);
611 if (result
!= KERN_SUCCESS
)
618 if ((result
= vm_map_enter(target_map
,
619 address
, size
, mask
, flags
,
622 cur_protection
, max_protection
, inheritance
624 vm_object_deallocate(object
);
628 /* temporary, until world build */
631 vm_offset_t
*address
,
638 vm_prot_t cur_protection
,
639 vm_prot_t max_protection
,
640 vm_inherit_t inheritance
)
642 vm_map_64(target_map
, address
, size
, mask
, flags
,
643 port
, (vm_object_offset_t
)offset
, copy
,
644 cur_protection
, max_protection
, inheritance
);
649 * NOTE: this routine (and this file) will no longer require mach_host_server.h
650 * when vm_wire is changed to use ledgers.
652 #include <mach/mach_host_server.h>
654 * Specify that the range of the virtual address space
655 * of the target task must not cause page faults for
656 * the indicated accesses.
658 * [ To unwire the pages, specify VM_PROT_NONE. ]
662 host_priv_t host_priv
,
663 register vm_map_t map
,
670 if (host_priv
== HOST_PRIV_NULL
)
671 return KERN_INVALID_HOST
;
673 assert(host_priv
== &realhost
);
675 if (map
== VM_MAP_NULL
)
676 return KERN_INVALID_TASK
;
678 if (access
& ~VM_PROT_ALL
)
679 return KERN_INVALID_ARGUMENT
;
681 if (access
!= VM_PROT_NONE
) {
682 rc
= vm_map_wire(map
, trunc_page(start
),
683 round_page(start
+size
), access
, TRUE
);
685 rc
= vm_map_unwire(map
, trunc_page(start
),
686 round_page(start
+size
), TRUE
);
694 * Synchronises the memory range specified with its backing store
695 * image by either flushing or cleaning the contents to the appropriate
696 * memory manager engaging in a memory object synchronize dialog with
697 * the manager. The client doesn't return until the manager issues
698 * m_o_s_completed message. MIG Magically converts user task parameter
699 * to the task's address map.
701 * interpretation of sync_flags
702 * VM_SYNC_INVALIDATE - discard pages, only return precious
705 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
706 * - discard pages, write dirty or precious
707 * pages back to memory manager.
709 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
710 * - write dirty or precious pages back to
711 * the memory manager.
714 * The memory object attributes have not yet been implemented, this
715 * function will have to deal with the invalidate attribute
718 * KERN_INVALID_TASK Bad task parameter
719 * KERN_INVALID_ARGUMENT both sync and async were specified.
720 * KERN_SUCCESS The usual.
726 vm_address_t address
,
728 vm_sync_t sync_flags
)
732 queue_chain_t req_q
; /* queue of requests for this msync */
733 vm_map_entry_t entry
;
734 vm_size_t amount_left
;
735 vm_object_offset_t offset
;
736 boolean_t do_sync_req
;
737 boolean_t modifiable
;
740 if ((sync_flags
& VM_SYNC_ASYNCHRONOUS
) &&
741 (sync_flags
& VM_SYNC_SYNCHRONOUS
))
742 return(KERN_INVALID_ARGUMENT
);
745 * align address and size on page boundaries
747 size
= round_page(address
+ size
) - trunc_page(address
);
748 address
= trunc_page(address
);
750 if (map
== VM_MAP_NULL
)
751 return(KERN_INVALID_TASK
);
754 return(KERN_SUCCESS
);
759 while (amount_left
> 0) {
760 vm_size_t flush_size
;
764 if (!vm_map_lookup_entry(map
, address
, &entry
)) {
768 * hole in the address map.
772 * Check for empty map.
774 if (entry
== vm_map_to_entry(map
) &&
775 entry
->vme_next
== entry
) {
780 * Check that we don't wrap and that
781 * we have at least one real map entry.
783 if ((map
->hdr
.nentries
== 0) ||
784 (entry
->vme_next
->vme_start
< address
)) {
789 * Move up to the next entry if needed
791 skip
= (entry
->vme_next
->vme_start
- address
);
792 if (skip
>= amount_left
)
796 address
= entry
->vme_next
->vme_start
;
801 offset
= address
- entry
->vme_start
;
804 * do we have more to flush than is contained in this
807 if (amount_left
+ entry
->vme_start
+ offset
> entry
->vme_end
) {
808 flush_size
= entry
->vme_end
-
809 (entry
->vme_start
+ offset
);
811 flush_size
= amount_left
;
813 amount_left
-= flush_size
;
814 address
+= flush_size
;
816 if (entry
->is_sub_map
== TRUE
) {
818 vm_offset_t local_offset
;
820 local_map
= entry
->object
.sub_map
;
821 local_offset
= entry
->offset
;
830 object
= entry
->object
.vm_object
;
833 * We can't sync this object if the object has not been
836 if (object
== VM_OBJECT_NULL
) {
840 offset
+= entry
->offset
;
841 modifiable
= (entry
->protection
& VM_PROT_WRITE
)
844 vm_object_lock(object
);
846 if (sync_flags
& (VM_SYNC_KILLPAGES
| VM_SYNC_DEACTIVATE
)) {
847 boolean_t kill_pages
= 0;
849 if (sync_flags
& VM_SYNC_KILLPAGES
) {
850 if (object
->ref_count
== 1 && !entry
->needs_copy
&& !object
->shadow
)
855 if (kill_pages
!= -1)
856 vm_object_deactivate_pages(object
, offset
,
857 (vm_object_size_t
)flush_size
, kill_pages
);
858 vm_object_unlock(object
);
863 * We can't sync this object if there isn't a pager.
864 * Don't bother to sync internal objects, since there can't
865 * be any "permanent" storage for these objects anyway.
867 if ((object
->pager
== MEMORY_OBJECT_NULL
) ||
868 (object
->internal
) || (object
->private)) {
869 vm_object_unlock(object
);
874 * keep reference on the object until syncing is done
876 assert(object
->ref_count
> 0);
878 vm_object_res_reference(object
);
879 vm_object_unlock(object
);
883 do_sync_req
= vm_object_sync(object
,
886 sync_flags
& VM_SYNC_INVALIDATE
,
888 (sync_flags
& VM_SYNC_SYNCHRONOUS
||
889 sync_flags
& VM_SYNC_ASYNCHRONOUS
)));
892 * only send a m_o_s if we returned pages or if the entry
893 * is writable (ie dirty pages may have already been sent back)
895 if (!do_sync_req
&& !modifiable
) {
896 vm_object_deallocate(object
);
899 msync_req_alloc(new_msr
);
901 vm_object_lock(object
);
902 offset
+= object
->paging_offset
;
904 new_msr
->offset
= offset
;
905 new_msr
->length
= flush_size
;
906 new_msr
->object
= object
;
907 new_msr
->flag
= VM_MSYNC_SYNCHRONIZING
;
909 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
911 * need to check for overlapping entry, if found, wait
912 * on overlapping msr to be done, then reiterate
915 if (msr
->flag
== VM_MSYNC_SYNCHRONIZING
&&
916 ((offset
>= msr
->offset
&&
917 offset
< (msr
->offset
+ msr
->length
)) ||
918 (msr
->offset
>= offset
&&
919 msr
->offset
< (offset
+ flush_size
))))
921 assert_wait((event_t
) msr
,THREAD_INTERRUPTIBLE
);
923 vm_object_unlock(object
);
924 thread_block((void (*)(void))0);
925 vm_object_lock(object
);
931 queue_enter(&object
->msr_q
, new_msr
, msync_req_t
, msr_q
);
932 vm_object_unlock(object
);
934 queue_enter(&req_q
, new_msr
, msync_req_t
, req_q
);
936 (void) memory_object_synchronize(
944 * wait for memory_object_sychronize_completed messages from pager(s)
947 while (!queue_empty(&req_q
)) {
948 msr
= (msync_req_t
)queue_first(&req_q
);
950 while(msr
->flag
!= VM_MSYNC_DONE
) {
951 assert_wait((event_t
) msr
, THREAD_INTERRUPTIBLE
);
953 thread_block((void (*)(void))0);
956 queue_remove(&req_q
, msr
, msync_req_t
, req_q
);
958 vm_object_deallocate(msr
->object
);
962 return(KERN_SUCCESS
);
969 * Set or clear the map's wiring_required flag. This flag, if set,
970 * will cause all future virtual memory allocation to allocate
971 * user wired memory. Unwiring pages wired down as a result of
972 * this routine is done with the vm_wire interface.
979 if (map
== VM_MAP_NULL
)
980 return(KERN_INVALID_ARGUMENT
);
983 map
->wiring_required
= TRUE
;
985 map
->wiring_required
= FALSE
;
987 return(KERN_SUCCESS
);
991 * vm_behavior_set sets the paging behavior attribute for the
992 * specified range in the specified map. This routine will fail
993 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
994 * is not a valid allocated or reserved memory region.
1001 vm_behavior_t new_behavior
)
1003 if (map
== VM_MAP_NULL
)
1004 return(KERN_INVALID_ARGUMENT
);
1006 return(vm_map_behavior_set(map
, trunc_page(start
),
1007 round_page(start
+size
), new_behavior
));
1012 * Control whether the kernel will permit use of
1013 * vm_allocate_cpm at all.
1015 unsigned int vm_allocate_cpm_enabled
= 1;
1018 * Ordinarily, the right to allocate CPM is restricted
1019 * to privileged applications (those that can gain access
1020 * to the host port). Set this variable to zero if you
1021 * want to let any application allocate CPM.
1023 unsigned int vm_allocate_cpm_privileged
= 0;
1026 * Allocate memory in the specified map, with the caveat that
1027 * the memory is physically contiguous. This call may fail
1028 * if the system can't find sufficient contiguous memory.
1029 * This call may cause or lead to heart-stopping amounts of
1032 * Memory obtained from this call should be freed in the
1033 * normal way, viz., via vm_deallocate.
1037 host_priv_t host_priv
,
1038 register vm_map_t map
,
1039 register vm_offset_t
*addr
,
1040 register vm_size_t size
,
1043 vm_object_t cpm_obj
;
1047 vm_offset_t va
, start
, end
, offset
;
1049 extern vm_offset_t avail_start
, avail_end
;
1050 vm_offset_t prev_addr
;
1051 #endif /* MACH_ASSERT */
1053 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1055 if (!vm_allocate_cpm_enabled
)
1056 return KERN_FAILURE
;
1058 if (vm_allocate_cpm_privileged
&& host_priv
== HOST_PRIV_NULL
)
1059 return KERN_INVALID_HOST
;
1061 if (map
== VM_MAP_NULL
)
1062 return KERN_INVALID_ARGUMENT
;
1064 assert(host_priv
== &realhost
);
1068 return KERN_SUCCESS
;
1072 *addr
= vm_map_min(map
);
1074 *addr
= trunc_page(*addr
);
1075 size
= round_page(size
);
1077 if ((kr
= cpm_allocate(size
, &pages
, TRUE
)) != KERN_SUCCESS
)
1080 cpm_obj
= vm_object_allocate(size
);
1081 assert(cpm_obj
!= VM_OBJECT_NULL
);
1082 assert(cpm_obj
->internal
);
1083 assert(cpm_obj
->size
== size
);
1084 assert(cpm_obj
->can_persist
== FALSE
);
1085 assert(cpm_obj
->pager_created
== FALSE
);
1086 assert(cpm_obj
->pageout
== FALSE
);
1087 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1090 * Insert pages into object.
1093 vm_object_lock(cpm_obj
);
1094 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1096 pages
= NEXT_PAGE(m
);
1098 assert(!m
->gobbled
);
1100 assert(!m
->pageout
);
1103 assert(m
->phys_addr
>=avail_start
&& m
->phys_addr
<=avail_end
);
1106 vm_page_insert(m
, cpm_obj
, offset
);
1108 assert(cpm_obj
->resident_page_count
== size
/ PAGE_SIZE
);
1109 vm_object_unlock(cpm_obj
);
1112 * Hang onto a reference on the object in case a
1113 * multi-threaded application for some reason decides
1114 * to deallocate the portion of the address space into
1115 * which we will insert this object.
1117 * Unfortunately, we must insert the object now before
1118 * we can talk to the pmap module about which addresses
1119 * must be wired down. Hence, the race with a multi-
1122 vm_object_reference(cpm_obj
);
1125 * Insert object into map.
1135 (vm_object_offset_t
)0,
1139 VM_INHERIT_DEFAULT
);
1141 if (kr
!= KERN_SUCCESS
) {
1143 * A CPM object doesn't have can_persist set,
1144 * so all we have to do is deallocate it to
1145 * free up these pages.
1147 assert(cpm_obj
->pager_created
== FALSE
);
1148 assert(cpm_obj
->can_persist
== FALSE
);
1149 assert(cpm_obj
->pageout
== FALSE
);
1150 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1151 vm_object_deallocate(cpm_obj
); /* kill acquired ref */
1152 vm_object_deallocate(cpm_obj
); /* kill creation ref */
1156 * Inform the physical mapping system that the
1157 * range of addresses may not fault, so that
1158 * page tables and such can be locked down as well.
1162 pmap
= vm_map_pmap(map
);
1163 pmap_pageable(pmap
, start
, end
, FALSE
);
1166 * Enter each page into the pmap, to avoid faults.
1167 * Note that this loop could be coded more efficiently,
1168 * if the need arose, rather than looking up each page
1171 for (offset
= 0, va
= start
; offset
< size
;
1172 va
+= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
1173 vm_object_lock(cpm_obj
);
1174 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1175 vm_object_unlock(cpm_obj
);
1176 assert(m
!= VM_PAGE_NULL
);
1177 PMAP_ENTER(pmap
, va
, m
, VM_PROT_ALL
,
1178 VM_WIMG_USE_DEFAULT
, TRUE
);
1183 * Verify ordering in address space.
1185 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1186 vm_object_lock(cpm_obj
);
1187 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1188 vm_object_unlock(cpm_obj
);
1189 if (m
== VM_PAGE_NULL
)
1190 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1195 assert(!m
->fictitious
);
1196 assert(!m
->private);
1199 assert(!m
->cleaning
);
1200 assert(!m
->precious
);
1201 assert(!m
->clustered
);
1203 if (m
->phys_addr
!= prev_addr
+ PAGE_SIZE
) {
1204 printf("start 0x%x end 0x%x va 0x%x\n",
1206 printf("obj 0x%x off 0x%x\n", cpm_obj
, offset
);
1207 printf("m 0x%x prev_address 0x%x\n", m
,
1209 panic("vm_allocate_cpm: pages not contig!");
1212 prev_addr
= m
->phys_addr
;
1214 #endif /* MACH_ASSERT */
1216 vm_object_deallocate(cpm_obj
); /* kill extra ref */
1225 * Interface is defined in all cases, but unless the kernel
1226 * is built explicitly for this option, the interface does
1232 host_priv_t host_priv
,
1233 register vm_map_t map
,
1234 register vm_offset_t
*addr
,
1235 register vm_size_t size
,
1238 return KERN_FAILURE
;
1244 mach_memory_object_memory_entry_64(
1247 vm_object_offset_t size
,
1248 vm_prot_t permission
,
1249 memory_object_t pager
,
1250 ipc_port_t
*entry_handle
)
1252 vm_named_entry_t user_object
;
1253 ipc_port_t user_handle
;
1254 ipc_port_t previous
;
1257 if (host
== HOST_NULL
)
1258 return(KERN_INVALID_HOST
);
1260 user_object
= (vm_named_entry_t
)
1261 kalloc(sizeof (struct vm_named_entry
));
1262 if(user_object
== NULL
)
1263 return KERN_FAILURE
;
1264 named_entry_lock_init(user_object
);
1265 user_handle
= ipc_port_alloc_kernel();
1266 ip_lock(user_handle
);
1268 /* make a sonce right */
1269 user_handle
->ip_sorights
++;
1270 ip_reference(user_handle
);
1272 user_handle
->ip_destination
= IP_NULL
;
1273 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1274 user_handle
->ip_receiver
= ipc_space_kernel
;
1276 /* make a send right */
1277 user_handle
->ip_mscount
++;
1278 user_handle
->ip_srights
++;
1279 ip_reference(user_handle
);
1281 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1282 /* nsrequest unlocks user_handle */
1284 user_object
->object
= NULL
;
1285 user_object
->size
= size
;
1286 user_object
->offset
= 0;
1287 user_object
->backing
.pager
= pager
;
1288 user_object
->protection
= permission
;
1289 user_object
->internal
= internal
;
1290 user_object
->is_sub_map
= FALSE
;
1291 user_object
->ref_count
= 1;
1293 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1295 *entry_handle
= user_handle
;
1296 return KERN_SUCCESS
;
1300 mach_memory_object_memory_entry(
1304 vm_prot_t permission
,
1305 memory_object_t pager
,
1306 ipc_port_t
*entry_handle
)
1308 return mach_memory_object_memory_entry_64( host
, internal
,
1309 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
1318 mach_make_memory_entry_64(
1319 vm_map_t target_map
,
1320 vm_object_size_t
*size
,
1321 vm_object_offset_t offset
,
1322 vm_prot_t permission
,
1323 ipc_port_t
*object_handle
,
1324 ipc_port_t parent_entry
)
1326 vm_map_version_t version
;
1327 vm_named_entry_t user_object
;
1328 ipc_port_t user_handle
;
1329 ipc_port_t previous
;
1333 /* needed for call to vm_map_lookup_locked */
1335 vm_object_offset_t obj_off
;
1337 vm_object_offset_t lo_offset
, hi_offset
;
1338 vm_behavior_t behavior
;
1340 vm_object_t shadow_object
;
1342 /* needed for direct map entry manipulation */
1343 vm_map_entry_t map_entry
;
1344 vm_map_entry_t next_entry
;
1346 vm_map_t original_map
= target_map
;
1347 vm_offset_t local_offset
;
1348 vm_object_size_t mappable_size
;
1349 vm_object_size_t total_size
;
1352 offset
= trunc_page_64(offset
);
1353 *size
= round_page_64(*size
);
1355 user_object
= (vm_named_entry_t
)
1356 kalloc(sizeof (struct vm_named_entry
));
1357 if(user_object
== NULL
)
1358 return KERN_FAILURE
;
1359 named_entry_lock_init(user_object
);
1360 user_handle
= ipc_port_alloc_kernel();
1361 ip_lock(user_handle
);
1363 /* make a sonce right */
1364 user_handle
->ip_sorights
++;
1365 ip_reference(user_handle
);
1367 user_handle
->ip_destination
= IP_NULL
;
1368 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1369 user_handle
->ip_receiver
= ipc_space_kernel
;
1371 /* make a send right */
1372 user_handle
->ip_mscount
++;
1373 user_handle
->ip_srights
++;
1374 ip_reference(user_handle
);
1376 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1377 /* nsrequest unlocks user_handle */
1379 user_object
->backing
.pager
= NULL
;
1380 user_object
->ref_count
= 1;
1382 if(parent_entry
== NULL
) {
1383 /* Create a named object based on address range within the task map */
1384 /* Go find the object at given address */
1386 permission
&= VM_PROT_ALL
;
1387 vm_map_lock_read(target_map
);
1389 /* get the object associated with the target address */
1390 /* note we check the permission of the range against */
1391 /* that requested by the caller */
1393 kr
= vm_map_lookup_locked(&target_map
, offset
,
1394 permission
, &version
,
1395 &object
, &obj_off
, &prot
, &wired
, &behavior
,
1396 &lo_offset
, &hi_offset
, &pmap_map
);
1397 if (kr
!= KERN_SUCCESS
) {
1398 vm_map_unlock_read(target_map
);
1401 if (((prot
& permission
) != permission
)
1402 || (object
== kernel_object
)) {
1403 kr
= KERN_INVALID_RIGHT
;
1404 vm_object_unlock(object
);
1405 vm_map_unlock_read(target_map
);
1406 if(pmap_map
!= target_map
)
1407 vm_map_unlock_read(pmap_map
);
1408 if(object
== kernel_object
) {
1409 printf("Warning: Attempt to create a named"
1410 " entry from the kernel_object\n");
1415 /* We have an object, now check to see if this object */
1416 /* is suitable. If not, create a shadow and share that */
1419 local_map
= original_map
;
1420 local_offset
= offset
;
1421 if(target_map
!= local_map
) {
1422 vm_map_unlock_read(target_map
);
1423 if(pmap_map
!= target_map
)
1424 vm_map_unlock_read(pmap_map
);
1425 vm_map_lock_read(local_map
);
1426 target_map
= local_map
;
1427 pmap_map
= local_map
;
1430 if(!vm_map_lookup_entry(local_map
,
1431 local_offset
, &map_entry
)) {
1432 kr
= KERN_INVALID_ARGUMENT
;
1433 vm_object_unlock(object
);
1434 vm_map_unlock_read(target_map
);
1435 if(pmap_map
!= target_map
)
1436 vm_map_unlock_read(pmap_map
);
1439 if(!(map_entry
->is_sub_map
)) {
1440 if(map_entry
->object
.vm_object
!= object
) {
1441 kr
= KERN_INVALID_ARGUMENT
;
1442 vm_object_unlock(object
);
1443 vm_map_unlock_read(target_map
);
1444 if(pmap_map
!= target_map
)
1445 vm_map_unlock_read(pmap_map
);
1448 if(map_entry
->wired_count
) {
1449 object
->true_share
= TRUE
;
1455 local_map
= map_entry
->object
.sub_map
;
1457 vm_map_lock_read(local_map
);
1458 vm_map_unlock_read(tmap
);
1459 target_map
= local_map
;
1460 pmap_map
= local_map
;
1461 local_offset
= local_offset
- map_entry
->vme_start
;
1462 local_offset
+= map_entry
->offset
;
1465 if(((map_entry
->max_protection
) & permission
) != permission
) {
1466 kr
= KERN_INVALID_RIGHT
;
1467 vm_object_unlock(object
);
1468 vm_map_unlock_read(target_map
);
1469 if(pmap_map
!= target_map
)
1470 vm_map_unlock_read(pmap_map
);
1474 mappable_size
= hi_offset
- obj_off
;
1475 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
1476 if(*size
> mappable_size
) {
1477 /* try to extend mappable size if the entries */
1478 /* following are from the same object and are */
1480 next_entry
= map_entry
->vme_next
;
1481 /* lets see if the next map entry is still */
1482 /* pointing at this object and is contiguous */
1483 while(*size
> mappable_size
) {
1484 if((next_entry
->object
.vm_object
== object
) &&
1485 (next_entry
->vme_start
==
1486 next_entry
->vme_prev
->vme_end
) &&
1487 (next_entry
->offset
==
1488 next_entry
->vme_prev
->offset
+
1489 (next_entry
->vme_prev
->vme_end
-
1490 next_entry
->vme_prev
->vme_start
))) {
1491 if(((next_entry
->max_protection
)
1492 & permission
) != permission
) {
1495 mappable_size
+= next_entry
->vme_end
1496 - next_entry
->vme_start
;
1497 total_size
+= next_entry
->vme_end
1498 - next_entry
->vme_start
;
1499 next_entry
= next_entry
->vme_next
;
1507 if(object
->internal
) {
1508 /* vm_map_lookup_locked will create a shadow if */
1509 /* needs_copy is set but does not check for the */
1510 /* other two conditions shown. It is important to */
1511 /* set up an object which will not be pulled from */
1514 if ((map_entry
->needs_copy
|| object
->shadowed
||
1515 (object
->size
> total_size
))
1516 && !object
->true_share
) {
1517 if (vm_map_lock_read_to_write(target_map
)) {
1518 vm_map_lock_read(target_map
);
1523 /* create a shadow object */
1524 vm_object_shadow(&map_entry
->object
.vm_object
,
1525 &map_entry
->offset
, total_size
);
1526 shadow_object
= map_entry
->object
.vm_object
;
1527 vm_object_unlock(object
);
1528 vm_object_pmap_protect(
1529 object
, map_entry
->offset
,
1531 ((map_entry
->is_shared
1532 || target_map
->mapped
)
1535 map_entry
->vme_start
,
1536 map_entry
->protection
& ~VM_PROT_WRITE
);
1537 total_size
-= (map_entry
->vme_end
1538 - map_entry
->vme_start
);
1539 next_entry
= map_entry
->vme_next
;
1540 map_entry
->needs_copy
= FALSE
;
1541 while (total_size
) {
1542 if(next_entry
->object
.vm_object
== object
) {
1543 next_entry
->object
.vm_object
1546 = next_entry
->vme_prev
->offset
+
1547 (next_entry
->vme_prev
->vme_end
1548 - next_entry
->vme_prev
->vme_start
);
1549 next_entry
->needs_copy
= FALSE
;
1551 panic("mach_make_memory_entry_64:"
1552 " map entries out of sync\n");
1556 - next_entry
->vme_start
;
1557 next_entry
= next_entry
->vme_next
;
1560 object
= shadow_object
;
1561 vm_object_lock(object
);
1562 obj_off
= (local_offset
- map_entry
->vme_start
)
1563 + map_entry
->offset
;
1564 vm_map_lock_write_to_read(target_map
);
1570 /* note: in the future we can (if necessary) allow for */
1571 /* memory object lists, this will better support */
1572 /* fragmentation, but is it necessary? The user should */
1573 /* be encouraged to create address space oriented */
1574 /* shared objects from CLEAN memory regions which have */
1575 /* a known and defined history. i.e. no inheritence */
1576 /* share, make this call before making the region the */
1577 /* target of ipc's, etc. The code above, protecting */
1578 /* against delayed copy, etc. is mostly defensive. */
1582 object
->true_share
= TRUE
;
1583 user_object
->object
= object
;
1584 user_object
->internal
= object
->internal
;
1585 user_object
->is_sub_map
= FALSE
;
1586 user_object
->offset
= obj_off
;
1587 user_object
->protection
= permission
;
1589 /* the size of mapped entry that overlaps with our region */
1590 /* which is targeted for share. */
1591 /* (entry_end - entry_start) - */
1592 /* offset of our beg addr within entry */
1593 /* it corresponds to this: */
1595 if(*size
> mappable_size
)
1596 *size
= mappable_size
;
1598 user_object
->size
= *size
;
1600 /* user_object pager and internal fields are not used */
1601 /* when the object field is filled in. */
1603 object
->ref_count
++; /* we now point to this object, hold on */
1604 vm_object_res_reference(object
);
1605 vm_object_unlock(object
);
1606 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1608 *object_handle
= user_handle
;
1609 vm_map_unlock_read(target_map
);
1610 if(pmap_map
!= target_map
)
1611 vm_map_unlock_read(pmap_map
);
1612 return KERN_SUCCESS
;
1615 vm_named_entry_t parent_object
;
1617 /* The new object will be base on an existing named object */
1618 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1619 kr
= KERN_INVALID_ARGUMENT
;
1622 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1623 if(permission
& parent_object
->protection
!= permission
) {
1624 kr
= KERN_INVALID_ARGUMENT
;
1627 if((offset
+ *size
) > parent_object
->size
) {
1628 kr
= KERN_INVALID_ARGUMENT
;
1632 user_object
->object
= parent_object
->object
;
1633 user_object
->size
= *size
;
1634 user_object
->offset
= parent_object
->offset
+ offset
;
1635 user_object
->protection
= permission
;
1636 if(parent_object
->is_sub_map
) {
1637 user_object
->backing
.map
= parent_object
->backing
.map
;
1638 vm_map_lock(user_object
->backing
.map
);
1639 user_object
->backing
.map
->ref_count
++;
1640 vm_map_unlock(user_object
->backing
.map
);
1643 user_object
->backing
.pager
= parent_object
->backing
.pager
;
1645 user_object
->internal
= parent_object
->internal
;
1646 user_object
->is_sub_map
= parent_object
->is_sub_map
;
1648 if(parent_object
->object
!= NULL
) {
1649 /* we now point to this object, hold on */
1650 vm_object_reference(parent_object
->object
);
1651 vm_object_lock(parent_object
->object
);
1652 parent_object
->object
->true_share
= TRUE
;
1653 vm_object_unlock(parent_object
->object
);
1655 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1657 *object_handle
= user_handle
;
1658 return KERN_SUCCESS
;
1664 ipc_port_dealloc_kernel(user_handle
);
1665 kfree((vm_offset_t
)user_object
, sizeof (struct vm_named_entry
));
1670 mach_make_memory_entry(
1671 vm_map_t target_map
,
1674 vm_prot_t permission
,
1675 ipc_port_t
*object_handle
,
1676 ipc_port_t parent_entry
)
1678 vm_object_offset_t size_64
;
1681 size_64
= (vm_object_offset_t
)*size
;
1682 kr
= mach_make_memory_entry_64(target_map
, &size_64
,
1683 (vm_object_offset_t
)offset
, permission
, object_handle
,
1685 *size
= (vm_size_t
)size_64
;
1693 vm_region_object_create(
1694 vm_map_t target_map
,
1696 ipc_port_t
*object_handle
)
1698 vm_named_entry_t user_object
;
1699 ipc_port_t user_handle
;
1702 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
1703 ipc_port_t previous
;
1706 if(new_pmap
== PMAP_NULL
)
1707 return KERN_FAILURE
;
1708 user_object
= (vm_named_entry_t
)
1709 kalloc(sizeof (struct vm_named_entry
));
1710 if(user_object
== NULL
) {
1711 pmap_destroy(new_pmap
);
1712 return KERN_FAILURE
;
1714 named_entry_lock_init(user_object
);
1715 user_handle
= ipc_port_alloc_kernel();
1718 ip_lock(user_handle
);
1720 /* make a sonce right */
1721 user_handle
->ip_sorights
++;
1722 ip_reference(user_handle
);
1724 user_handle
->ip_destination
= IP_NULL
;
1725 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1726 user_handle
->ip_receiver
= ipc_space_kernel
;
1728 /* make a send right */
1729 user_handle
->ip_mscount
++;
1730 user_handle
->ip_srights
++;
1731 ip_reference(user_handle
);
1733 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1734 /* nsrequest unlocks user_handle */
1736 /* Create a named object based on a submap of specified size */
1738 new_map
= vm_map_create(new_pmap
, 0, size
, TRUE
);
1739 user_object
->backing
.map
= new_map
;
1742 user_object
->object
= VM_OBJECT_NULL
;
1743 user_object
->internal
= TRUE
;
1744 user_object
->is_sub_map
= TRUE
;
1745 user_object
->offset
= 0;
1746 user_object
->protection
= VM_PROT_ALL
;
1747 user_object
->size
= size
;
1748 user_object
->ref_count
= 1;
1750 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1752 *object_handle
= user_handle
;
1753 return KERN_SUCCESS
;
1757 /* For a given range, check all map entries. If the entry coresponds to */
1758 /* the old vm_region/map provided on the call, replace it with the */
1759 /* corresponding range in the new vm_region/map */
1760 kern_return_t
vm_map_region_replace(
1761 vm_map_t target_map
,
1762 ipc_port_t old_region
,
1763 ipc_port_t new_region
,
1767 vm_named_entry_t old_object
;
1768 vm_named_entry_t new_object
;
1769 vm_map_t old_submap
;
1770 vm_map_t new_submap
;
1772 vm_map_entry_t entry
;
1773 int nested_pmap
= 0;
1776 vm_map_lock(target_map
);
1777 old_object
= (vm_named_entry_t
)old_region
->ip_kobject
;
1778 new_object
= (vm_named_entry_t
)new_region
->ip_kobject
;
1779 if((!old_object
->is_sub_map
) || (!new_object
->is_sub_map
)) {
1780 vm_map_unlock(target_map
);
1781 return KERN_INVALID_ARGUMENT
;
1783 old_submap
= (vm_map_t
)old_object
->backing
.map
;
1784 new_submap
= (vm_map_t
)new_object
->backing
.map
;
1785 vm_map_lock(old_submap
);
1786 if((old_submap
->min_offset
!= new_submap
->min_offset
) ||
1787 (old_submap
->max_offset
!= new_submap
->max_offset
)) {
1788 vm_map_unlock(old_submap
);
1789 vm_map_unlock(target_map
);
1790 return KERN_INVALID_ARGUMENT
;
1792 if(!vm_map_lookup_entry(target_map
, start
, &entry
)) {
1793 /* if the src is not contained, the entry preceeds */
1795 addr
= entry
->vme_start
;
1796 if(entry
== vm_map_to_entry(target_map
)) {
1797 vm_map_unlock(old_submap
);
1798 vm_map_unlock(target_map
);
1799 return KERN_SUCCESS
;
1801 vm_map_lookup_entry(target_map
, addr
, &entry
);
1803 addr
= entry
->vme_start
;
1804 vm_map_reference(old_submap
);
1805 while((entry
!= vm_map_to_entry(target_map
)) &&
1806 (entry
->vme_start
< end
)) {
1807 if((entry
->is_sub_map
) &&
1808 (entry
->object
.sub_map
== old_submap
)) {
1809 if(entry
->use_pmap
) {
1810 if((start
& 0xfffffff) ||
1811 ((end
- start
) != 0x10000000)) {
1812 vm_map_unlock(old_submap
);
1813 vm_map_deallocate(old_submap
);
1814 vm_map_unlock(target_map
);
1815 return KERN_INVALID_ARGUMENT
;
1819 entry
->object
.sub_map
= new_submap
;
1820 vm_map_reference(new_submap
);
1821 vm_map_deallocate(old_submap
);
1823 entry
= entry
->vme_next
;
1824 addr
= entry
->vme_start
;
1828 pmap_unnest(target_map
->pmap
, start
, end
- start
);
1829 if(target_map
->mapped
) {
1830 vm_map_submap_pmap_clean(target_map
,
1831 start
, end
, old_submap
, 0);
1833 pmap_nest(target_map
->pmap
, new_submap
->pmap
,
1834 start
, end
- start
);
1837 vm_map_submap_pmap_clean(target_map
,
1838 start
, end
, old_submap
, 0);
1840 vm_map_unlock(old_submap
);
1841 vm_map_deallocate(old_submap
);
1842 vm_map_unlock(target_map
);
1843 return KERN_SUCCESS
;
1848 mach_destroy_memory_entry(
1851 vm_named_entry_t named_entry
;
1853 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
1854 #endif /* MACH_ASSERT */
1855 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1856 mutex_lock(&(named_entry
)->Lock
);
1857 named_entry
->ref_count
-=1;
1858 if(named_entry
->ref_count
== 0) {
1859 if(named_entry
->object
) {
1860 /* release the memory object we've been pointing to */
1861 vm_object_deallocate(named_entry
->object
);
1863 if(named_entry
->is_sub_map
) {
1864 vm_map_deallocate(named_entry
->backing
.map
);
1866 kfree((vm_offset_t
)port
->ip_kobject
,
1867 sizeof (struct vm_named_entry
));
1869 mutex_unlock(&(named_entry
)->Lock
);
1875 vm_map_t target_map
,
1880 vm_map_entry_t map_entry
;
1887 vm_map_lock(target_map
);
1888 if(!vm_map_lookup_entry(target_map
, offset
, &map_entry
)) {
1889 vm_map_unlock(target_map
);
1890 return KERN_FAILURE
;
1892 offset
-= map_entry
->vme_start
; /* adjust to offset within entry */
1893 offset
+= map_entry
->offset
; /* adjust to target object offset */
1894 if(map_entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
1895 if(!map_entry
->is_sub_map
) {
1896 object
= map_entry
->object
.vm_object
;
1898 vm_map_unlock(target_map
);
1899 target_map
= map_entry
->object
.sub_map
;
1900 goto restart_page_query
;
1903 vm_map_unlock(target_map
);
1904 return KERN_FAILURE
;
1906 vm_object_lock(object
);
1907 vm_map_unlock(target_map
);
1909 m
= vm_page_lookup(object
, offset
);
1910 if (m
!= VM_PAGE_NULL
) {
1911 *disposition
|= VM_PAGE_QUERY_PAGE_PRESENT
;
1914 if(object
->shadow
) {
1915 offset
+= object
->shadow_offset
;
1916 vm_object_unlock(object
);
1917 object
= object
->shadow
;
1918 vm_object_lock(object
);
1921 vm_object_unlock(object
);
1922 return KERN_FAILURE
;
1926 /* The ref_count is not strictly accurate, it measures the number */
1927 /* of entities holding a ref on the object, they may not be mapping */
1928 /* the object or may not be mapping the section holding the */
1929 /* target page but its still a ball park number and though an over- */
1930 /* count, it picks up the copy-on-write cases */
1932 /* We could also get a picture of page sharing from pmap_attributes */
1933 /* but this would under count as only faulted-in mappings would */
1936 *ref_count
= object
->ref_count
;
1938 if (m
->fictitious
) {
1939 *disposition
|= VM_PAGE_QUERY_PAGE_FICTITIOUS
;
1940 vm_object_unlock(object
);
1941 return KERN_SUCCESS
;
1945 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
1946 else if(pmap_is_modified(m
->phys_addr
))
1947 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
1950 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
1951 else if(pmap_is_referenced(m
->phys_addr
))
1952 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
1954 vm_object_unlock(object
);
1955 return KERN_SUCCESS
;
1960 set_dp_control_port(
1961 host_priv_t host_priv
,
1962 ipc_port_t control_port
)
1964 if (host_priv
== HOST_PRIV_NULL
)
1965 return (KERN_INVALID_HOST
);
1967 if (IP_VALID(dynamic_pager_control_port
))
1968 ipc_port_release_send(dynamic_pager_control_port
);
1970 dynamic_pager_control_port
= control_port
;
1971 return KERN_SUCCESS
;
1975 get_dp_control_port(
1976 host_priv_t host_priv
,
1977 ipc_port_t
*control_port
)
1979 if (host_priv
== HOST_PRIV_NULL
)
1980 return (KERN_INVALID_HOST
);
1982 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
1983 return KERN_SUCCESS
;
1988 /* Retrieve a upl for an object underlying an address range in a map */
1993 vm_address_t offset
,
1994 vm_size_t
*upl_size
,
1996 upl_page_info_array_t page_list
,
1997 unsigned int *count
,
1999 int force_data_sync
)
2001 vm_map_entry_t entry
;
2003 int sync_cow_data
= FALSE
;
2004 vm_object_t local_object
;
2005 vm_offset_t local_offset
;
2006 vm_offset_t local_start
;
2009 caller_flags
= *flags
;
2010 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
2011 sync_cow_data
= TRUE
;
2014 return KERN_INVALID_ARGUMENT
;
2019 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
2020 if (entry
->object
.vm_object
== VM_OBJECT_NULL
||
2021 !entry
->object
.vm_object
->phys_contiguous
) {
2022 if((*upl_size
/page_size
) > MAX_UPL_TRANSFER
) {
2023 *upl_size
= MAX_UPL_TRANSFER
* page_size
;
2026 if((entry
->vme_end
- offset
) < *upl_size
) {
2027 *upl_size
= entry
->vme_end
- offset
;
2029 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
2030 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2032 } else if (entry
->object
.vm_object
->private) {
2033 *flags
= UPL_DEV_MEMORY
;
2034 if (entry
->object
.vm_object
->phys_contiguous
) {
2035 *flags
|= UPL_PHYS_CONTIG
;
2041 return KERN_SUCCESS
;
2044 * Create an object if necessary.
2046 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2047 entry
->object
.vm_object
= vm_object_allocate(
2048 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
2051 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
2052 if (entry
->needs_copy
) {
2055 vm_object_offset_t offset_hi
;
2056 vm_object_offset_t offset_lo
;
2057 vm_object_offset_t new_offset
;
2060 vm_behavior_t behavior
;
2061 vm_map_version_t version
;
2065 vm_map_lock_write_to_read(map
);
2066 if(vm_map_lookup_locked(&local_map
,
2067 offset
, VM_PROT_WRITE
,
2069 &new_offset
, &prot
, &wired
,
2070 &behavior
, &offset_lo
,
2071 &offset_hi
, &pmap_map
)) {
2072 vm_map_unlock(local_map
);
2073 return KERN_FAILURE
;
2075 if (pmap_map
!= map
) {
2076 vm_map_unlock(pmap_map
);
2078 vm_object_unlock(object
);
2079 vm_map_unlock(local_map
);
2081 goto REDISCOVER_ENTRY
;
2084 if (entry
->is_sub_map
) {
2087 submap
= entry
->object
.sub_map
;
2088 local_start
= entry
->vme_start
;
2089 local_offset
= entry
->offset
;
2090 vm_map_reference(submap
);
2093 ret
= (vm_map_get_upl(submap
,
2094 local_offset
+ (offset
- local_start
),
2095 upl_size
, upl
, page_list
, count
,
2096 flags
, force_data_sync
));
2098 vm_map_deallocate(submap
);
2102 if (sync_cow_data
) {
2103 if (entry
->object
.vm_object
->shadow
2104 || entry
->object
.vm_object
->copy
) {
2107 local_object
= entry
->object
.vm_object
;
2108 local_start
= entry
->vme_start
;
2109 local_offset
= entry
->offset
;
2110 vm_object_reference(local_object
);
2113 if(local_object
->copy
== NULL
) {
2114 flags
= MEMORY_OBJECT_DATA_SYNC
;
2116 flags
= MEMORY_OBJECT_COPY_SYNC
;
2119 if((local_object
->paging_offset
) &&
2120 (local_object
->pager
== 0)) {
2122 * do a little clean-up for our unorthodox
2123 * entry into a pager call from a non-pager
2124 * context. Normally the pager code
2125 * assumes that an object it has been called
2126 * with has a backing pager and so does
2127 * not bother to check the pager field
2128 * before relying on the paging_offset
2130 vm_object_lock(local_object
);
2131 if (local_object
->pager
== 0) {
2132 local_object
->paging_offset
= 0;
2134 vm_object_unlock(local_object
);
2137 if (entry
->object
.vm_object
->shadow
&&
2138 entry
->object
.vm_object
->copy
) {
2139 vm_object_lock_request(
2140 local_object
->shadow
,
2141 (vm_object_offset_t
)
2142 ((offset
- local_start
) +
2144 local_object
->shadow_offset
+
2145 local_object
->paging_offset
,
2147 MEMORY_OBJECT_DATA_SYNC
,
2150 sync_cow_data
= FALSE
;
2151 vm_object_deallocate(local_object
);
2152 goto REDISCOVER_ENTRY
;
2156 if (force_data_sync
) {
2158 local_object
= entry
->object
.vm_object
;
2159 local_start
= entry
->vme_start
;
2160 local_offset
= entry
->offset
;
2161 vm_object_reference(local_object
);
2164 if((local_object
->paging_offset
) &&
2165 (local_object
->pager
== 0)) {
2167 * do a little clean-up for our unorthodox
2168 * entry into a pager call from a non-pager
2169 * context. Normally the pager code
2170 * assumes that an object it has been called
2171 * with has a backing pager and so does
2172 * not bother to check the pager field
2173 * before relying on the paging_offset
2175 vm_object_lock(local_object
);
2176 if (local_object
->pager
== 0) {
2177 local_object
->paging_offset
= 0;
2179 vm_object_unlock(local_object
);
2182 vm_object_lock_request(
2184 (vm_object_offset_t
)
2185 ((offset
- local_start
) + local_offset
) +
2186 local_object
->paging_offset
,
2187 (vm_object_size_t
)*upl_size
, FALSE
,
2188 MEMORY_OBJECT_DATA_SYNC
,
2190 force_data_sync
= FALSE
;
2191 vm_object_deallocate(local_object
);
2192 goto REDISCOVER_ENTRY
;
2195 if(!(entry
->object
.vm_object
->private)) {
2196 if(*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
2197 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
2198 if(entry
->object
.vm_object
->phys_contiguous
) {
2199 *flags
= UPL_PHYS_CONTIG
;
2204 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
2206 local_object
= entry
->object
.vm_object
;
2207 local_offset
= entry
->offset
;
2208 local_start
= entry
->vme_start
;
2209 vm_object_reference(local_object
);
2211 ret
= (vm_object_upl_request(local_object
,
2212 (vm_object_offset_t
)
2213 ((offset
- local_start
) + local_offset
),
2219 vm_object_deallocate(local_object
);
2224 return(KERN_FAILURE
);
2228 /* ******* Temporary Internal calls to UPL for BSD ***** */
2233 vm_offset_t
*dst_addr
)
2235 return (vm_upl_map(map
, upl
, dst_addr
));
2244 return(vm_upl_unmap(map
, upl
));
2250 upl_page_info_t
*pl
,
2251 mach_msg_type_number_t count
)
2255 kr
= upl_commit(upl
, pl
, count
);
2256 upl_deallocate(upl
);
2262 kernel_upl_commit_range(
2267 upl_page_info_array_t pl
,
2268 mach_msg_type_number_t count
)
2270 boolean_t finished
= FALSE
;
2273 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2274 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2276 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
2278 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
2279 upl_deallocate(upl
);
2285 kernel_upl_abort_range(
2292 boolean_t finished
= FALSE
;
2294 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2295 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2297 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
2299 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
2300 upl_deallocate(upl
);
2312 kr
= upl_abort(upl
, abort_type
);
2313 upl_deallocate(upl
);
2319 vm_get_shared_region(
2321 shared_region_mapping_t
*shared_region
)
2323 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
2324 return KERN_SUCCESS
;
2328 vm_set_shared_region(
2330 shared_region_mapping_t shared_region
)
2332 task
->system_shared_region
= (vm_offset_t
) shared_region
;
2333 return KERN_SUCCESS
;
2337 shared_region_mapping_info(
2338 shared_region_mapping_t shared_region
,
2339 ipc_port_t
*text_region
,
2340 vm_size_t
*text_size
,
2341 ipc_port_t
*data_region
,
2342 vm_size_t
*data_size
,
2343 vm_offset_t
*region_mappings
,
2344 vm_offset_t
*client_base
,
2345 vm_offset_t
*alt_base
,
2346 vm_offset_t
*alt_next
,
2348 shared_region_mapping_t
*next
)
2350 shared_region_mapping_lock(shared_region
);
2352 *text_region
= shared_region
->text_region
;
2353 *text_size
= shared_region
->text_size
;
2354 *data_region
= shared_region
->data_region
;
2355 *data_size
= shared_region
->data_size
;
2356 *region_mappings
= shared_region
->region_mappings
;
2357 *client_base
= shared_region
->client_base
;
2358 *alt_base
= shared_region
->alternate_base
;
2359 *alt_next
= shared_region
->alternate_next
;
2360 *flags
= shared_region
->flags
;
2361 *next
= shared_region
->next
;
2363 shared_region_mapping_unlock(shared_region
);
2367 shared_region_object_chain_attach(
2368 shared_region_mapping_t target_region
,
2369 shared_region_mapping_t object_chain_region
)
2371 shared_region_object_chain_t object_ele
;
2373 if(target_region
->object_chain
)
2374 return KERN_FAILURE
;
2375 object_ele
= (shared_region_object_chain_t
)
2376 kalloc(sizeof (struct shared_region_object_chain
));
2377 shared_region_mapping_lock(object_chain_region
);
2378 target_region
->object_chain
= object_ele
;
2379 object_ele
->object_chain_region
= object_chain_region
;
2380 object_ele
->next
= object_chain_region
->object_chain
;
2381 object_ele
->depth
= object_chain_region
->depth
;
2382 object_chain_region
->depth
++;
2383 target_region
->alternate_next
= object_chain_region
->alternate_next
;
2384 shared_region_mapping_unlock(object_chain_region
);
2385 return KERN_SUCCESS
;
2389 shared_region_mapping_create(
2390 ipc_port_t text_region
,
2391 vm_size_t text_size
,
2392 ipc_port_t data_region
,
2393 vm_size_t data_size
,
2394 vm_offset_t region_mappings
,
2395 vm_offset_t client_base
,
2396 shared_region_mapping_t
*shared_region
,
2397 vm_offset_t alt_base
,
2398 vm_offset_t alt_next
)
2400 *shared_region
= (shared_region_mapping_t
)
2401 kalloc(sizeof (struct shared_region_mapping
));
2402 if(*shared_region
== NULL
)
2403 return KERN_FAILURE
;
2404 shared_region_mapping_lock_init((*shared_region
));
2405 (*shared_region
)->text_region
= text_region
;
2406 (*shared_region
)->text_size
= text_size
;
2407 (*shared_region
)->data_region
= data_region
;
2408 (*shared_region
)->data_size
= data_size
;
2409 (*shared_region
)->region_mappings
= region_mappings
;
2410 (*shared_region
)->client_base
= client_base
;
2411 (*shared_region
)->ref_count
= 1;
2412 (*shared_region
)->next
= NULL
;
2413 (*shared_region
)->object_chain
= NULL
;
2414 (*shared_region
)->self
= *shared_region
;
2415 (*shared_region
)->flags
= 0;
2416 (*shared_region
)->depth
= 0;
2417 (*shared_region
)->alternate_base
= alt_base
;
2418 (*shared_region
)->alternate_next
= alt_next
;
2419 return KERN_SUCCESS
;
2423 shared_region_mapping_set_alt_next(
2424 shared_region_mapping_t shared_region
,
2425 vm_offset_t alt_next
)
2427 shared_region
->alternate_next
= alt_next
;
2428 return KERN_SUCCESS
;
2432 shared_region_mapping_ref(
2433 shared_region_mapping_t shared_region
)
2435 if(shared_region
== NULL
)
2436 return KERN_SUCCESS
;
2437 hw_atomic_add(&shared_region
->ref_count
, 1);
2438 return KERN_SUCCESS
;
2442 shared_region_mapping_dealloc(
2443 shared_region_mapping_t shared_region
)
2445 struct shared_region_task_mappings sm_info
;
2446 shared_region_mapping_t next
= NULL
;
2448 while (shared_region
) {
2449 if (hw_atomic_sub(&shared_region
->ref_count
, 1) == 0) {
2450 shared_region_mapping_lock(shared_region
);
2452 sm_info
.text_region
= shared_region
->text_region
;
2453 sm_info
.text_size
= shared_region
->text_size
;
2454 sm_info
.data_region
= shared_region
->data_region
;
2455 sm_info
.data_size
= shared_region
->data_size
;
2456 sm_info
.region_mappings
= shared_region
->region_mappings
;
2457 sm_info
.client_base
= shared_region
->client_base
;
2458 sm_info
.alternate_base
= shared_region
->alternate_base
;
2459 sm_info
.alternate_next
= shared_region
->alternate_next
;
2460 sm_info
.flags
= shared_region
->flags
;
2461 sm_info
.self
= (vm_offset_t
)shared_region
;
2463 lsf_remove_regions_mappings(shared_region
, &sm_info
);
2464 pmap_remove(((vm_named_entry_t
)
2465 (shared_region
->text_region
->ip_kobject
))
2466 ->backing
.map
->pmap
,
2467 sm_info
.client_base
,
2468 sm_info
.client_base
+ sm_info
.text_size
);
2469 ipc_port_release_send(shared_region
->text_region
);
2470 ipc_port_release_send(shared_region
->data_region
);
2471 if (shared_region
->object_chain
) {
2472 next
= shared_region
->object_chain
->object_chain_region
;
2473 kfree((vm_offset_t
)shared_region
->object_chain
,
2474 sizeof (struct shared_region_object_chain
));
2478 shared_region_mapping_unlock(shared_region
);
2479 kfree((vm_offset_t
)shared_region
,
2480 sizeof (struct shared_region_mapping
));
2481 shared_region
= next
;
2486 return KERN_SUCCESS
;
2490 vm_map_get_phys_page(
2494 vm_map_entry_t entry
;
2497 vm_offset_t phys_addr
= 0;
2501 while (vm_map_lookup_entry(map
, offset
, &entry
)) {
2503 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2505 return (vm_offset_t
) 0;
2507 if (entry
->is_sub_map
) {
2509 vm_map_lock(entry
->object
.sub_map
);
2511 map
= entry
->object
.sub_map
;
2512 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2513 vm_map_unlock(old_map
);
2516 if (entry
->object
.vm_object
->phys_contiguous
) {
2517 /* These are not standard pageable memory mappings */
2518 /* If they are not present in the object they will */
2519 /* have to be picked up from the pager through the */
2520 /* fault mechanism. */
2521 if(entry
->object
.vm_object
->shadow_offset
== 0) {
2522 /* need to call vm_fault */
2524 vm_fault(map
, offset
, VM_PROT_NONE
,
2525 FALSE
, THREAD_UNINT
, NULL
, 0);
2529 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2530 phys_addr
= entry
->object
.vm_object
->shadow_offset
+ offset
;
2534 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2535 object
= entry
->object
.vm_object
;
2536 vm_object_lock(object
);
2538 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
2539 if(dst_page
== VM_PAGE_NULL
) {
2540 if(object
->shadow
) {
2541 vm_object_t old_object
;
2542 vm_object_lock(object
->shadow
);
2543 old_object
= object
;
2544 offset
= offset
+ object
->shadow_offset
;
2545 object
= object
->shadow
;
2546 vm_object_unlock(old_object
);
2548 vm_object_unlock(object
);
2552 phys_addr
= dst_page
->phys_addr
;
2553 vm_object_unlock(object
);