2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * User-exported virtual memory functions.
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_types.h> /* to get vm_address_t */
63 #include <mach/memory_object.h>
64 #include <mach/std_types.h> /* to get pointer_t */
65 #include <mach/vm_attributes.h>
66 #include <mach/vm_param.h>
67 #include <mach/vm_statistics.h>
68 #include <mach/vm_map_server.h>
69 #include <mach/mach_syscalls.h>
71 #include <mach/shared_memory_server.h>
72 #include <vm/vm_shared_memory_server.h>
74 #include <kern/host.h>
75 #include <kern/task.h>
76 #include <kern/misc_protos.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/memory_object.h>
81 #include <vm/vm_pageout.h>
83 __private_extern__ load_struct_t
*
84 lsf_remove_regions_mappings_lock(
85 shared_region_mapping_t region
,
86 shared_region_task_mappings_t sm_info
,
90 vm_size_t upl_offset_to_pagelist
= 0;
96 ipc_port_t dynamic_pager_control_port
=NULL
;
99 * vm_allocate allocates "zero fill" memory in the specfied
104 register vm_map_t map
,
105 register vm_offset_t
*addr
,
106 register vm_size_t size
,
109 kern_return_t result
;
110 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
112 if (map
== VM_MAP_NULL
)
113 return(KERN_INVALID_ARGUMENT
);
116 return(KERN_SUCCESS
);
120 *addr
= vm_map_min(map
);
122 *addr
= trunc_page_32(*addr
);
123 size
= round_page_32(size
);
125 return(KERN_INVALID_ARGUMENT
);
128 result
= vm_map_enter(
135 (vm_object_offset_t
)0,
145 * vm_deallocate deallocates the specified range of addresses in the
146 * specified address map.
150 register vm_map_t map
,
154 if (map
== VM_MAP_NULL
)
155 return(KERN_INVALID_ARGUMENT
);
157 if (size
== (vm_offset_t
) 0)
158 return(KERN_SUCCESS
);
160 return(vm_map_remove(map
, trunc_page_32(start
),
161 round_page_32(start
+size
), VM_MAP_NO_FLAGS
));
165 * vm_inherit sets the inheritance of the specified range in the
170 register vm_map_t map
,
173 vm_inherit_t new_inheritance
)
175 if (map
== VM_MAP_NULL
)
176 return(KERN_INVALID_ARGUMENT
);
178 if (new_inheritance
> VM_INHERIT_LAST_VALID
)
179 return(KERN_INVALID_ARGUMENT
);
181 return(vm_map_inherit(map
,
182 trunc_page_32(start
),
183 round_page_32(start
+size
),
188 * vm_protect sets the protection of the specified range in the
194 register vm_map_t map
,
197 boolean_t set_maximum
,
198 vm_prot_t new_protection
)
200 if ((map
== VM_MAP_NULL
) ||
201 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
202 return(KERN_INVALID_ARGUMENT
);
204 return(vm_map_protect(map
,
205 trunc_page_32(start
),
206 round_page_32(start
+size
),
212 * Handle machine-specific attributes for a mapping, such
213 * as cachability, migrability, etc.
216 vm_machine_attribute(
218 vm_address_t address
,
220 vm_machine_attribute_t attribute
,
221 vm_machine_attribute_val_t
* value
) /* IN/OUT */
223 if (map
== VM_MAP_NULL
)
224 return(KERN_INVALID_ARGUMENT
);
226 return vm_map_machine_attribute(map
, address
, size
, attribute
, value
);
232 vm_address_t address
,
235 mach_msg_type_number_t
*data_size
)
238 vm_map_copy_t ipc_address
;
240 if (map
== VM_MAP_NULL
)
241 return(KERN_INVALID_ARGUMENT
);
243 if ((error
= vm_map_copyin(map
,
246 FALSE
, /* src_destroy */
247 &ipc_address
)) == KERN_SUCCESS
) {
248 *data
= (pointer_t
) ipc_address
;
257 vm_read_entry_t data_list
,
258 mach_msg_type_number_t count
)
260 mach_msg_type_number_t i
;
262 vm_map_copy_t ipc_address
;
264 if (map
== VM_MAP_NULL
)
265 return(KERN_INVALID_ARGUMENT
);
267 for(i
=0; i
<count
; i
++) {
268 error
= vm_map_copyin(map
,
269 data_list
[i
].address
,
271 FALSE
, /* src_destroy */
273 if(error
!= KERN_SUCCESS
) {
274 data_list
[i
].address
= (vm_address_t
)0;
275 data_list
[i
].size
= (vm_size_t
)0;
278 if(data_list
[i
].size
!= 0) {
279 error
= vm_map_copyout(current_task()->map
,
280 &(data_list
[i
].address
),
281 (vm_map_copy_t
) ipc_address
);
282 if(error
!= KERN_SUCCESS
) {
283 data_list
[i
].address
= (vm_address_t
)0;
284 data_list
[i
].size
= (vm_size_t
)0;
293 * This routine reads from the specified map and overwrites part of the current
294 * activation's map. In making an assumption that the current thread is local,
295 * it is no longer cluster-safe without a fully supportive local proxy thread/
296 * task (but we don't support cluster's anymore so this is moot).
299 #define VM_OVERWRITE_SMALL 512
304 vm_address_t address
,
307 vm_size_t
*data_size
)
311 char buf
[VM_OVERWRITE_SMALL
];
314 kern_return_t error
= KERN_SUCCESS
;
317 if (map
== VM_MAP_NULL
)
318 return(KERN_INVALID_ARGUMENT
);
320 if (size
<= VM_OVERWRITE_SMALL
) {
321 if(vm_map_read_user(map
, (vm_offset_t
)address
,
322 (vm_offset_t
)&inbuf
, size
)) {
323 error
= KERN_INVALID_ADDRESS
;
325 if(vm_map_write_user(current_map(),
326 (vm_offset_t
)&inbuf
, (vm_offset_t
)data
, size
))
327 error
= KERN_INVALID_ADDRESS
;
331 if ((error
= vm_map_copyin(map
,
334 FALSE
, /* src_destroy */
335 ©
)) == KERN_SUCCESS
) {
336 if ((error
= vm_map_copy_overwrite(
340 FALSE
)) == KERN_SUCCESS
) {
343 vm_map_copy_discard(copy
);
358 vm_address_t address
,
360 mach_msg_type_number_t size
)
362 if (map
== VM_MAP_NULL
)
363 return KERN_INVALID_ARGUMENT
;
365 return vm_map_copy_overwrite(map
, address
, (vm_map_copy_t
) data
,
366 FALSE
/* interruptible XXX */);
372 vm_address_t source_address
,
374 vm_address_t dest_address
)
379 if (map
== VM_MAP_NULL
)
380 return KERN_INVALID_ARGUMENT
;
382 kr
= vm_map_copyin(map
, source_address
, size
,
384 if (kr
!= KERN_SUCCESS
)
387 kr
= vm_map_copy_overwrite(map
, dest_address
, copy
,
388 FALSE
/* interruptible XXX */);
389 if (kr
!= KERN_SUCCESS
) {
390 vm_map_copy_discard(copy
);
403 vm_offset_t
*address
,
404 vm_size_t initial_size
,
408 vm_object_offset_t offset
,
410 vm_prot_t cur_protection
,
411 vm_prot_t max_protection
,
412 vm_inherit_t inheritance
)
417 vm_object_size_t size
= (vm_object_size_t
)initial_size
;
418 kern_return_t result
;
421 * Check arguments for validity
423 if ((target_map
== VM_MAP_NULL
) ||
424 (cur_protection
& ~VM_PROT_ALL
) ||
425 (max_protection
& ~VM_PROT_ALL
) ||
426 (inheritance
> VM_INHERIT_LAST_VALID
) ||
428 return(KERN_INVALID_ARGUMENT
);
431 * Find the vm object (if any) corresponding to this port.
433 if (!IP_VALID(port
)) {
434 object
= VM_OBJECT_NULL
;
437 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
438 vm_named_entry_t named_entry
;
440 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
441 /* a few checks to make sure user is obeying rules */
443 if(offset
>= named_entry
->size
)
444 return(KERN_INVALID_RIGHT
);
445 size
= named_entry
->size
- offset
;
447 if((named_entry
->protection
& max_protection
) != max_protection
)
448 return(KERN_INVALID_RIGHT
);
449 if((named_entry
->protection
& cur_protection
) != cur_protection
)
450 return(KERN_INVALID_RIGHT
);
451 if(named_entry
->size
< (offset
+ size
))
452 return(KERN_INVALID_ARGUMENT
);
454 /* the callers parameter offset is defined to be the */
455 /* offset from beginning of named entry offset in object */
456 offset
= offset
+ named_entry
->offset
;
458 named_entry_lock(named_entry
);
459 if(named_entry
->is_sub_map
) {
460 vm_map_entry_t map_entry
;
462 named_entry_unlock(named_entry
);
463 *address
= trunc_page_32(*address
);
464 size
= round_page_64(size
);
465 vm_object_reference(vm_submap_object
);
466 if ((result
= vm_map_enter(target_map
,
467 address
, size
, mask
, flags
,
470 cur_protection
, max_protection
, inheritance
471 )) != KERN_SUCCESS
) {
472 vm_object_deallocate(vm_submap_object
);
476 VM_GET_FLAGS_ALIAS(flags
, alias
);
477 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
479 vm_map_submap(target_map
, *address
,
481 named_entry
->backing
.map
,
482 (vm_offset_t
)offset
, TRUE
);
484 vm_map_submap(target_map
, *address
,
486 named_entry
->backing
.map
,
487 (vm_offset_t
)offset
, FALSE
);
490 if(vm_map_lookup_entry(
491 target_map
, *address
, &map_entry
)) {
492 map_entry
->needs_copy
= TRUE
;
498 } else if(named_entry
->object
) {
499 /* This is the case where we are going to map */
500 /* an already mapped object. If the object is */
501 /* not ready it is internal. An external */
502 /* object cannot be mapped until it is ready */
503 /* we can therefore avoid the ready check */
505 named_entry_unlock(named_entry
);
506 vm_object_reference(named_entry
->object
);
507 object
= named_entry
->object
;
510 vm_prot_t protections
;
511 unsigned int wimg_mode
;
512 boolean_t cache_attr
;
514 protections
= named_entry
->protection
516 access
= GET_MAP_MEM(named_entry
->protection
);
518 object
= vm_object_enter(
519 named_entry
->backing
.pager
,
521 named_entry
->internal
,
524 if (object
== VM_OBJECT_NULL
) {
525 named_entry_unlock(named_entry
);
526 return(KERN_INVALID_OBJECT
);
529 vm_object_lock(object
);
531 /* create an extra ref for the named entry */
532 vm_object_reference_locked(object
);
533 named_entry
->object
= object
;
534 named_entry_unlock(named_entry
);
536 wimg_mode
= object
->wimg_bits
;
537 if(access
== MAP_MEM_IO
) {
538 wimg_mode
= VM_WIMG_IO
;
539 } else if (access
== MAP_MEM_COPYBACK
) {
540 wimg_mode
= VM_WIMG_USE_DEFAULT
;
541 } else if (access
== MAP_MEM_WTHRU
) {
542 wimg_mode
= VM_WIMG_WTHRU
;
543 } else if (access
== MAP_MEM_WCOMB
) {
544 wimg_mode
= VM_WIMG_WCOMB
;
546 if ((wimg_mode
== VM_WIMG_IO
)
547 || (wimg_mode
== VM_WIMG_WCOMB
))
552 if (named_entry
->backing
.pager
) {
553 /* wait for object (if any) to be ready */
554 while (!object
->pager_ready
) {
555 vm_object_wait(object
,
556 VM_OBJECT_EVENT_PAGER_READY
,
558 vm_object_lock(object
);
561 if(object
->wimg_bits
!= wimg_mode
) {
564 vm_object_paging_wait(object
, THREAD_UNINT
);
566 object
->wimg_bits
= wimg_mode
;
567 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
568 if (!p
->fictitious
) {
573 pmap_sync_caches_phys(
578 object
->true_share
= TRUE
;
579 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
580 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
581 vm_object_unlock(object
);
583 } else if (ip_kotype(port
) == IKOT_MEMORY_OBJECT
) {
585 * JMM - This is temporary until we unify named entries
586 * and raw memory objects.
588 * Detected fake ip_kotype for a memory object. In
589 * this case, the port isn't really a port at all, but
590 * instead is just a raw memory object.
593 if ((object
= vm_object_enter((memory_object_t
)port
,
594 size
, FALSE
, FALSE
, FALSE
))
596 return(KERN_INVALID_OBJECT
);
598 /* wait for object (if any) to be ready */
599 if (object
!= VM_OBJECT_NULL
) {
600 if(object
== kernel_object
) {
601 printf("Warning: Attempt to map kernel object"
602 " by a non-private kernel entity\n");
603 return(KERN_INVALID_OBJECT
);
605 vm_object_lock(object
);
606 while (!object
->pager_ready
) {
607 vm_object_wait(object
,
608 VM_OBJECT_EVENT_PAGER_READY
,
610 vm_object_lock(object
);
612 vm_object_unlock(object
);
615 return (KERN_INVALID_OBJECT
);
618 *address
= trunc_page_32(*address
);
619 size
= round_page_64(size
);
622 * Perform the copy if requested
626 vm_object_t new_object
;
627 vm_object_offset_t new_offset
;
629 result
= vm_object_copy_strategically(object
, offset
, size
,
630 &new_object
, &new_offset
,
634 if (result
== KERN_MEMORY_RESTART_COPY
) {
636 boolean_t src_needs_copy
;
640 * We currently ignore src_needs_copy.
641 * This really is the issue of how to make
642 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
643 * non-kernel users to use. Solution forthcoming.
644 * In the meantime, since we don't allow non-kernel
645 * memory managers to specify symmetric copy,
646 * we won't run into problems here.
650 success
= vm_object_copy_quickly(&new_object
,
655 result
= KERN_SUCCESS
;
658 * Throw away the reference to the
659 * original object, as it won't be mapped.
662 vm_object_deallocate(object
);
664 if (result
!= KERN_SUCCESS
)
671 if ((result
= vm_map_enter(target_map
,
672 address
, size
, mask
, flags
,
675 cur_protection
, max_protection
, inheritance
677 vm_object_deallocate(object
);
681 /* temporary, until world build */
685 vm_offset_t
*address
,
692 vm_prot_t cur_protection
,
693 vm_prot_t max_protection
,
694 vm_inherit_t inheritance
)
696 return vm_map_64(target_map
, address
, size
, mask
, flags
,
697 port
, (vm_object_offset_t
)offset
, copy
,
698 cur_protection
, max_protection
, inheritance
);
703 * NOTE: this routine (and this file) will no longer require mach_host_server.h
704 * when vm_wire is changed to use ledgers.
706 #include <mach/mach_host_server.h>
708 * Specify that the range of the virtual address space
709 * of the target task must not cause page faults for
710 * the indicated accesses.
712 * [ To unwire the pages, specify VM_PROT_NONE. ]
716 host_priv_t host_priv
,
717 register vm_map_t map
,
724 if (host_priv
== HOST_PRIV_NULL
)
725 return KERN_INVALID_HOST
;
727 assert(host_priv
== &realhost
);
729 if (map
== VM_MAP_NULL
)
730 return KERN_INVALID_TASK
;
732 if (access
& ~VM_PROT_ALL
)
733 return KERN_INVALID_ARGUMENT
;
735 if (access
!= VM_PROT_NONE
) {
736 rc
= vm_map_wire(map
, trunc_page_32(start
),
737 round_page_32(start
+size
), access
, TRUE
);
739 rc
= vm_map_unwire(map
, trunc_page_32(start
),
740 round_page_32(start
+size
), TRUE
);
748 * Synchronises the memory range specified with its backing store
749 * image by either flushing or cleaning the contents to the appropriate
750 * memory manager engaging in a memory object synchronize dialog with
751 * the manager. The client doesn't return until the manager issues
752 * m_o_s_completed message. MIG Magically converts user task parameter
753 * to the task's address map.
755 * interpretation of sync_flags
756 * VM_SYNC_INVALIDATE - discard pages, only return precious
759 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
760 * - discard pages, write dirty or precious
761 * pages back to memory manager.
763 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
764 * - write dirty or precious pages back to
765 * the memory manager.
768 * The memory object attributes have not yet been implemented, this
769 * function will have to deal with the invalidate attribute
772 * KERN_INVALID_TASK Bad task parameter
773 * KERN_INVALID_ARGUMENT both sync and async were specified.
774 * KERN_SUCCESS The usual.
780 vm_address_t address
,
782 vm_sync_t sync_flags
)
786 queue_chain_t req_q
; /* queue of requests for this msync */
787 vm_map_entry_t entry
;
788 vm_size_t amount_left
;
789 vm_object_offset_t offset
;
790 boolean_t do_sync_req
;
791 boolean_t modifiable
;
794 if ((sync_flags
& VM_SYNC_ASYNCHRONOUS
) &&
795 (sync_flags
& VM_SYNC_SYNCHRONOUS
))
796 return(KERN_INVALID_ARGUMENT
);
799 * align address and size on page boundaries
801 size
= round_page_32(address
+ size
) - trunc_page_32(address
);
802 address
= trunc_page_32(address
);
804 if (map
== VM_MAP_NULL
)
805 return(KERN_INVALID_TASK
);
808 return(KERN_SUCCESS
);
813 while (amount_left
> 0) {
814 vm_size_t flush_size
;
818 if (!vm_map_lookup_entry(map
, address
, &entry
)) {
822 * hole in the address map.
826 * Check for empty map.
828 if (entry
== vm_map_to_entry(map
) &&
829 entry
->vme_next
== entry
) {
834 * Check that we don't wrap and that
835 * we have at least one real map entry.
837 if ((map
->hdr
.nentries
== 0) ||
838 (entry
->vme_next
->vme_start
< address
)) {
843 * Move up to the next entry if needed
845 skip
= (entry
->vme_next
->vme_start
- address
);
846 if (skip
>= amount_left
)
850 address
= entry
->vme_next
->vme_start
;
855 offset
= address
- entry
->vme_start
;
858 * do we have more to flush than is contained in this
861 if (amount_left
+ entry
->vme_start
+ offset
> entry
->vme_end
) {
862 flush_size
= entry
->vme_end
-
863 (entry
->vme_start
+ offset
);
865 flush_size
= amount_left
;
867 amount_left
-= flush_size
;
868 address
+= flush_size
;
870 if (entry
->is_sub_map
== TRUE
) {
872 vm_offset_t local_offset
;
874 local_map
= entry
->object
.sub_map
;
875 local_offset
= entry
->offset
;
884 object
= entry
->object
.vm_object
;
887 * We can't sync this object if the object has not been
890 if (object
== VM_OBJECT_NULL
) {
894 offset
+= entry
->offset
;
895 modifiable
= (entry
->protection
& VM_PROT_WRITE
)
898 vm_object_lock(object
);
900 if (sync_flags
& (VM_SYNC_KILLPAGES
| VM_SYNC_DEACTIVATE
)) {
901 boolean_t kill_pages
= 0;
903 if (sync_flags
& VM_SYNC_KILLPAGES
) {
904 if (object
->ref_count
== 1 && !entry
->needs_copy
&& !object
->shadow
)
909 if (kill_pages
!= -1)
910 vm_object_deactivate_pages(object
, offset
,
911 (vm_object_size_t
)flush_size
, kill_pages
);
912 vm_object_unlock(object
);
917 * We can't sync this object if there isn't a pager.
918 * Don't bother to sync internal objects, since there can't
919 * be any "permanent" storage for these objects anyway.
921 if ((object
->pager
== MEMORY_OBJECT_NULL
) ||
922 (object
->internal
) || (object
->private)) {
923 vm_object_unlock(object
);
928 * keep reference on the object until syncing is done
930 assert(object
->ref_count
> 0);
932 vm_object_res_reference(object
);
933 vm_object_unlock(object
);
937 do_sync_req
= vm_object_sync(object
,
940 sync_flags
& VM_SYNC_INVALIDATE
,
942 (sync_flags
& VM_SYNC_SYNCHRONOUS
||
943 sync_flags
& VM_SYNC_ASYNCHRONOUS
)));
946 * only send a m_o_s if we returned pages or if the entry
947 * is writable (ie dirty pages may have already been sent back)
949 if (!do_sync_req
&& !modifiable
) {
950 vm_object_deallocate(object
);
953 msync_req_alloc(new_msr
);
955 vm_object_lock(object
);
956 offset
+= object
->paging_offset
;
958 new_msr
->offset
= offset
;
959 new_msr
->length
= flush_size
;
960 new_msr
->object
= object
;
961 new_msr
->flag
= VM_MSYNC_SYNCHRONIZING
;
963 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
965 * need to check for overlapping entry, if found, wait
966 * on overlapping msr to be done, then reiterate
969 if (msr
->flag
== VM_MSYNC_SYNCHRONIZING
&&
970 ((offset
>= msr
->offset
&&
971 offset
< (msr
->offset
+ msr
->length
)) ||
972 (msr
->offset
>= offset
&&
973 msr
->offset
< (offset
+ flush_size
))))
975 assert_wait((event_t
) msr
,THREAD_INTERRUPTIBLE
);
977 vm_object_unlock(object
);
978 thread_block((void (*)(void))0);
979 vm_object_lock(object
);
985 queue_enter(&object
->msr_q
, new_msr
, msync_req_t
, msr_q
);
986 vm_object_unlock(object
);
988 queue_enter(&req_q
, new_msr
, msync_req_t
, req_q
);
990 (void) memory_object_synchronize(
998 * wait for memory_object_sychronize_completed messages from pager(s)
1001 while (!queue_empty(&req_q
)) {
1002 msr
= (msync_req_t
)queue_first(&req_q
);
1004 while(msr
->flag
!= VM_MSYNC_DONE
) {
1005 assert_wait((event_t
) msr
, THREAD_INTERRUPTIBLE
);
1007 thread_block((void (*)(void))0);
1010 queue_remove(&req_q
, msr
, msync_req_t
, req_q
);
1012 vm_object_deallocate(msr
->object
);
1013 msync_req_free(msr
);
1014 }/* queue_iterate */
1016 return(KERN_SUCCESS
);
1023 * Set or clear the map's wiring_required flag. This flag, if set,
1024 * will cause all future virtual memory allocation to allocate
1025 * user wired memory. Unwiring pages wired down as a result of
1026 * this routine is done with the vm_wire interface.
1031 boolean_t must_wire
)
1033 if (map
== VM_MAP_NULL
)
1034 return(KERN_INVALID_ARGUMENT
);
1037 map
->wiring_required
= TRUE
;
1039 map
->wiring_required
= FALSE
;
1041 return(KERN_SUCCESS
);
1045 * vm_behavior_set sets the paging behavior attribute for the
1046 * specified range in the specified map. This routine will fail
1047 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
1048 * is not a valid allocated or reserved memory region.
1055 vm_behavior_t new_behavior
)
1057 if (map
== VM_MAP_NULL
)
1058 return(KERN_INVALID_ARGUMENT
);
1060 return(vm_map_behavior_set(map
, trunc_page_32(start
),
1061 round_page_32(start
+size
), new_behavior
));
1066 * Control whether the kernel will permit use of
1067 * vm_allocate_cpm at all.
1069 unsigned int vm_allocate_cpm_enabled
= 1;
1072 * Ordinarily, the right to allocate CPM is restricted
1073 * to privileged applications (those that can gain access
1074 * to the host port). Set this variable to zero if you
1075 * want to let any application allocate CPM.
1077 unsigned int vm_allocate_cpm_privileged
= 0;
1080 * Allocate memory in the specified map, with the caveat that
1081 * the memory is physically contiguous. This call may fail
1082 * if the system can't find sufficient contiguous memory.
1083 * This call may cause or lead to heart-stopping amounts of
1086 * Memory obtained from this call should be freed in the
1087 * normal way, viz., via vm_deallocate.
1091 host_priv_t host_priv
,
1092 register vm_map_t map
,
1093 register vm_offset_t
*addr
,
1094 register vm_size_t size
,
1097 vm_object_t cpm_obj
;
1101 vm_offset_t va
, start
, end
, offset
;
1103 extern vm_offset_t avail_start
, avail_end
;
1104 vm_offset_t prev_addr
;
1105 #endif /* MACH_ASSERT */
1107 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1109 if (!vm_allocate_cpm_enabled
)
1110 return KERN_FAILURE
;
1112 if (vm_allocate_cpm_privileged
&& host_priv
== HOST_PRIV_NULL
)
1113 return KERN_INVALID_HOST
;
1115 if (map
== VM_MAP_NULL
)
1116 return KERN_INVALID_ARGUMENT
;
1118 assert(host_priv
== &realhost
);
1122 return KERN_SUCCESS
;
1126 *addr
= vm_map_min(map
);
1128 *addr
= trunc_page_32(*addr
);
1129 size
= round_page_32(size
);
1131 if ((kr
= cpm_allocate(size
, &pages
, TRUE
)) != KERN_SUCCESS
)
1134 cpm_obj
= vm_object_allocate(size
);
1135 assert(cpm_obj
!= VM_OBJECT_NULL
);
1136 assert(cpm_obj
->internal
);
1137 assert(cpm_obj
->size
== size
);
1138 assert(cpm_obj
->can_persist
== FALSE
);
1139 assert(cpm_obj
->pager_created
== FALSE
);
1140 assert(cpm_obj
->pageout
== FALSE
);
1141 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1144 * Insert pages into object.
1147 vm_object_lock(cpm_obj
);
1148 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1150 pages
= NEXT_PAGE(m
);
1152 assert(!m
->gobbled
);
1154 assert(!m
->pageout
);
1157 assert(m
->phys_page
>=avail_start
&& m
->phys_page
<=avail_end
);
1160 vm_page_insert(m
, cpm_obj
, offset
);
1162 assert(cpm_obj
->resident_page_count
== size
/ PAGE_SIZE
);
1163 vm_object_unlock(cpm_obj
);
1166 * Hang onto a reference on the object in case a
1167 * multi-threaded application for some reason decides
1168 * to deallocate the portion of the address space into
1169 * which we will insert this object.
1171 * Unfortunately, we must insert the object now before
1172 * we can talk to the pmap module about which addresses
1173 * must be wired down. Hence, the race with a multi-
1176 vm_object_reference(cpm_obj
);
1179 * Insert object into map.
1189 (vm_object_offset_t
)0,
1193 VM_INHERIT_DEFAULT
);
1195 if (kr
!= KERN_SUCCESS
) {
1197 * A CPM object doesn't have can_persist set,
1198 * so all we have to do is deallocate it to
1199 * free up these pages.
1201 assert(cpm_obj
->pager_created
== FALSE
);
1202 assert(cpm_obj
->can_persist
== FALSE
);
1203 assert(cpm_obj
->pageout
== FALSE
);
1204 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1205 vm_object_deallocate(cpm_obj
); /* kill acquired ref */
1206 vm_object_deallocate(cpm_obj
); /* kill creation ref */
1210 * Inform the physical mapping system that the
1211 * range of addresses may not fault, so that
1212 * page tables and such can be locked down as well.
1216 pmap
= vm_map_pmap(map
);
1217 pmap_pageable(pmap
, start
, end
, FALSE
);
1220 * Enter each page into the pmap, to avoid faults.
1221 * Note that this loop could be coded more efficiently,
1222 * if the need arose, rather than looking up each page
1225 for (offset
= 0, va
= start
; offset
< size
;
1226 va
+= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
1227 vm_object_lock(cpm_obj
);
1228 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1229 vm_object_unlock(cpm_obj
);
1230 assert(m
!= VM_PAGE_NULL
);
1231 PMAP_ENTER(pmap
, va
, m
, VM_PROT_ALL
,
1232 ((unsigned int)(m
->object
->wimg_bits
)) & VM_WIMG_MASK
,
1238 * Verify ordering in address space.
1240 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1241 vm_object_lock(cpm_obj
);
1242 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1243 vm_object_unlock(cpm_obj
);
1244 if (m
== VM_PAGE_NULL
)
1245 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1250 assert(!m
->fictitious
);
1251 assert(!m
->private);
1254 assert(!m
->cleaning
);
1255 assert(!m
->precious
);
1256 assert(!m
->clustered
);
1258 if (m
->phys_page
!= prev_addr
+ 1) {
1259 printf("start 0x%x end 0x%x va 0x%x\n",
1261 printf("obj 0x%x off 0x%x\n", cpm_obj
, offset
);
1262 printf("m 0x%x prev_address 0x%x\n", m
,
1264 panic("vm_allocate_cpm: pages not contig!");
1267 prev_addr
= m
->phys_page
;
1269 #endif /* MACH_ASSERT */
1271 vm_object_deallocate(cpm_obj
); /* kill extra ref */
1280 * Interface is defined in all cases, but unless the kernel
1281 * is built explicitly for this option, the interface does
1287 host_priv_t host_priv
,
1288 register vm_map_t map
,
1289 register vm_offset_t
*addr
,
1290 register vm_size_t size
,
1293 return KERN_FAILURE
;
1299 mach_memory_object_memory_entry_64(
1302 vm_object_offset_t size
,
1303 vm_prot_t permission
,
1304 memory_object_t pager
,
1305 ipc_port_t
*entry_handle
)
1307 unsigned int access
;
1308 vm_named_entry_t user_object
;
1309 ipc_port_t user_handle
;
1310 ipc_port_t previous
;
1313 if (host
== HOST_NULL
)
1314 return(KERN_INVALID_HOST
);
1316 user_object
= (vm_named_entry_t
)
1317 kalloc(sizeof (struct vm_named_entry
));
1318 if(user_object
== NULL
)
1319 return KERN_FAILURE
;
1320 named_entry_lock_init(user_object
);
1321 user_handle
= ipc_port_alloc_kernel();
1322 ip_lock(user_handle
);
1324 /* make a sonce right */
1325 user_handle
->ip_sorights
++;
1326 ip_reference(user_handle
);
1328 user_handle
->ip_destination
= IP_NULL
;
1329 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1330 user_handle
->ip_receiver
= ipc_space_kernel
;
1332 /* make a send right */
1333 user_handle
->ip_mscount
++;
1334 user_handle
->ip_srights
++;
1335 ip_reference(user_handle
);
1337 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1338 /* nsrequest unlocks user_handle */
1340 user_object
->object
= NULL
;
1341 user_object
->size
= size
;
1342 user_object
->offset
= 0;
1343 user_object
->backing
.pager
= pager
;
1344 user_object
->protection
= permission
& VM_PROT_ALL
;
1345 access
= GET_MAP_MEM(permission
);
1346 SET_MAP_MEM(access
, user_object
->protection
);
1347 user_object
->internal
= internal
;
1348 user_object
->is_sub_map
= FALSE
;
1349 user_object
->ref_count
= 1;
1351 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1353 *entry_handle
= user_handle
;
1354 return KERN_SUCCESS
;
1358 mach_memory_object_memory_entry(
1362 vm_prot_t permission
,
1363 memory_object_t pager
,
1364 ipc_port_t
*entry_handle
)
1366 return mach_memory_object_memory_entry_64( host
, internal
,
1367 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
1376 mach_make_memory_entry_64(
1377 vm_map_t target_map
,
1378 vm_object_size_t
*size
,
1379 vm_object_offset_t offset
,
1380 vm_prot_t permission
,
1381 ipc_port_t
*object_handle
,
1382 ipc_port_t parent_entry
)
1384 vm_map_version_t version
;
1385 vm_named_entry_t user_object
;
1386 ipc_port_t user_handle
;
1387 ipc_port_t previous
;
1391 /* needed for call to vm_map_lookup_locked */
1393 vm_object_offset_t obj_off
;
1395 vm_object_offset_t lo_offset
, hi_offset
;
1396 vm_behavior_t behavior
;
1398 vm_object_t shadow_object
;
1400 /* needed for direct map entry manipulation */
1401 vm_map_entry_t map_entry
;
1402 vm_map_entry_t next_entry
;
1404 vm_map_t original_map
= target_map
;
1405 vm_offset_t local_offset
;
1406 vm_object_size_t mappable_size
;
1407 vm_object_size_t total_size
;
1409 unsigned int access
;
1410 vm_prot_t protections
;
1411 unsigned int wimg_mode
;
1412 boolean_t cache_attr
;
1414 protections
= permission
& VM_PROT_ALL
;
1415 access
= GET_MAP_MEM(permission
);
1418 offset
= trunc_page_64(offset
);
1419 *size
= round_page_64(*size
);
1421 if((parent_entry
!= NULL
)
1422 && (permission
& MAP_MEM_ONLY
)) {
1423 vm_named_entry_t parent_object
;
1424 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1425 return KERN_INVALID_ARGUMENT
;
1427 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1428 object
= parent_object
->object
;
1429 if(object
!= VM_OBJECT_NULL
)
1430 wimg_mode
= object
->wimg_bits
;
1431 if((access
!= GET_MAP_MEM(parent_object
->protection
)) &&
1432 !(parent_object
->protection
& VM_PROT_WRITE
)) {
1433 return KERN_INVALID_RIGHT
;
1435 if(access
== MAP_MEM_IO
) {
1436 SET_MAP_MEM(access
, parent_object
->protection
);
1437 wimg_mode
= VM_WIMG_IO
;
1438 } else if (access
== MAP_MEM_COPYBACK
) {
1439 SET_MAP_MEM(access
, parent_object
->protection
);
1440 wimg_mode
= VM_WIMG_DEFAULT
;
1441 } else if (access
== MAP_MEM_WTHRU
) {
1442 SET_MAP_MEM(access
, parent_object
->protection
);
1443 wimg_mode
= VM_WIMG_WTHRU
;
1444 } else if (access
== MAP_MEM_WCOMB
) {
1445 SET_MAP_MEM(access
, parent_object
->protection
);
1446 wimg_mode
= VM_WIMG_WCOMB
;
1449 (access
!= MAP_MEM_NOOP
) &&
1450 (!(object
->nophyscache
))) {
1451 if(object
->wimg_bits
!= wimg_mode
) {
1453 if ((wimg_mode
== VM_WIMG_IO
)
1454 || (wimg_mode
== VM_WIMG_WCOMB
))
1458 vm_object_lock(object
);
1459 while(object
->paging_in_progress
) {
1460 vm_object_unlock(object
);
1461 vm_object_wait(object
,
1462 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
1464 vm_object_lock(object
);
1466 object
->wimg_bits
= wimg_mode
;
1467 queue_iterate(&object
->memq
,
1468 p
, vm_page_t
, listq
) {
1469 if (!p
->fictitious
) {
1474 pmap_sync_caches_phys(
1478 vm_object_unlock(object
);
1481 return KERN_SUCCESS
;
1484 if(permission
& MAP_MEM_ONLY
) {
1485 return KERN_INVALID_ARGUMENT
;
1488 user_object
= (vm_named_entry_t
)
1489 kalloc(sizeof (struct vm_named_entry
));
1490 if(user_object
== NULL
)
1491 return KERN_FAILURE
;
1492 named_entry_lock_init(user_object
);
1493 user_handle
= ipc_port_alloc_kernel();
1494 ip_lock(user_handle
);
1496 /* make a sonce right */
1497 user_handle
->ip_sorights
++;
1498 ip_reference(user_handle
);
1500 user_handle
->ip_destination
= IP_NULL
;
1501 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1502 user_handle
->ip_receiver
= ipc_space_kernel
;
1504 /* make a send right */
1505 user_handle
->ip_mscount
++;
1506 user_handle
->ip_srights
++;
1507 ip_reference(user_handle
);
1509 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1510 /* nsrequest unlocks user_handle */
1512 user_object
->backing
.pager
= NULL
;
1513 user_object
->ref_count
= 1;
1515 if(permission
& MAP_MEM_NAMED_CREATE
) {
1516 user_object
->object
= NULL
;
1517 user_object
->internal
= TRUE
;
1518 user_object
->is_sub_map
= FALSE
;
1519 user_object
->offset
= 0;
1520 user_object
->protection
= protections
;
1521 SET_MAP_MEM(access
, user_object
->protection
);
1522 user_object
->size
= *size
;
1524 /* user_object pager and internal fields are not used */
1525 /* when the object field is filled in. */
1527 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1529 *object_handle
= user_handle
;
1530 return KERN_SUCCESS
;
1533 if(parent_entry
== NULL
) {
1534 /* Create a named object based on address range within the task map */
1535 /* Go find the object at given address */
1537 vm_map_lock_read(target_map
);
1539 /* get the object associated with the target address */
1540 /* note we check the permission of the range against */
1541 /* that requested by the caller */
1543 kr
= vm_map_lookup_locked(&target_map
, offset
,
1544 protections
, &version
,
1545 &object
, &obj_off
, &prot
, &wired
, &behavior
,
1546 &lo_offset
, &hi_offset
, &pmap_map
);
1547 if (kr
!= KERN_SUCCESS
) {
1548 vm_map_unlock_read(target_map
);
1551 if (((prot
& protections
) != protections
)
1552 || (object
== kernel_object
)) {
1553 kr
= KERN_INVALID_RIGHT
;
1554 vm_object_unlock(object
);
1555 vm_map_unlock_read(target_map
);
1556 if(pmap_map
!= target_map
)
1557 vm_map_unlock_read(pmap_map
);
1558 if(object
== kernel_object
) {
1559 printf("Warning: Attempt to create a named"
1560 " entry from the kernel_object\n");
1565 /* We have an object, now check to see if this object */
1566 /* is suitable. If not, create a shadow and share that */
1569 local_map
= original_map
;
1570 local_offset
= offset
;
1571 if(target_map
!= local_map
) {
1572 vm_map_unlock_read(target_map
);
1573 if(pmap_map
!= target_map
)
1574 vm_map_unlock_read(pmap_map
);
1575 vm_map_lock_read(local_map
);
1576 target_map
= local_map
;
1577 pmap_map
= local_map
;
1580 if(!vm_map_lookup_entry(local_map
,
1581 local_offset
, &map_entry
)) {
1582 kr
= KERN_INVALID_ARGUMENT
;
1583 vm_object_unlock(object
);
1584 vm_map_unlock_read(target_map
);
1585 if(pmap_map
!= target_map
)
1586 vm_map_unlock_read(pmap_map
);
1589 if(!(map_entry
->is_sub_map
)) {
1590 if(map_entry
->object
.vm_object
!= object
) {
1591 kr
= KERN_INVALID_ARGUMENT
;
1592 vm_object_unlock(object
);
1593 vm_map_unlock_read(target_map
);
1594 if(pmap_map
!= target_map
)
1595 vm_map_unlock_read(pmap_map
);
1598 if(map_entry
->wired_count
) {
1599 /* JMM - The check below should be reworked instead. */
1600 object
->true_share
= TRUE
;
1606 local_map
= map_entry
->object
.sub_map
;
1608 vm_map_lock_read(local_map
);
1609 vm_map_unlock_read(tmap
);
1610 target_map
= local_map
;
1611 pmap_map
= local_map
;
1612 local_offset
= local_offset
- map_entry
->vme_start
;
1613 local_offset
+= map_entry
->offset
;
1616 if(((map_entry
->max_protection
) & protections
) != protections
) {
1617 kr
= KERN_INVALID_RIGHT
;
1618 vm_object_unlock(object
);
1619 vm_map_unlock_read(target_map
);
1620 if(pmap_map
!= target_map
)
1621 vm_map_unlock_read(pmap_map
);
1625 mappable_size
= hi_offset
- obj_off
;
1626 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
1627 if(*size
> mappable_size
) {
1628 /* try to extend mappable size if the entries */
1629 /* following are from the same object and are */
1631 next_entry
= map_entry
->vme_next
;
1632 /* lets see if the next map entry is still */
1633 /* pointing at this object and is contiguous */
1634 while(*size
> mappable_size
) {
1635 if((next_entry
->object
.vm_object
== object
) &&
1636 (next_entry
->vme_start
==
1637 next_entry
->vme_prev
->vme_end
) &&
1638 (next_entry
->offset
==
1639 next_entry
->vme_prev
->offset
+
1640 (next_entry
->vme_prev
->vme_end
-
1641 next_entry
->vme_prev
->vme_start
))) {
1642 if(((next_entry
->max_protection
)
1643 & protections
) != protections
) {
1646 if (next_entry
->needs_copy
!=
1647 map_entry
->needs_copy
)
1649 mappable_size
+= next_entry
->vme_end
1650 - next_entry
->vme_start
;
1651 total_size
+= next_entry
->vme_end
1652 - next_entry
->vme_start
;
1653 next_entry
= next_entry
->vme_next
;
1661 if(object
->internal
) {
1662 /* vm_map_lookup_locked will create a shadow if */
1663 /* needs_copy is set but does not check for the */
1664 /* other two conditions shown. It is important to */
1665 /* set up an object which will not be pulled from */
1668 if ((map_entry
->needs_copy
|| object
->shadowed
||
1669 (object
->size
> total_size
))
1670 && !object
->true_share
) {
1671 if (vm_map_lock_read_to_write(target_map
)) {
1672 vm_map_lock_read(target_map
);
1677 * JMM - We need to avoid coming here when the object
1678 * is wired by anybody, not just the current map. Why
1679 * couldn't we use the standard vm_object_copy_quickly()
1683 /* create a shadow object */
1684 vm_object_shadow(&map_entry
->object
.vm_object
,
1685 &map_entry
->offset
, total_size
);
1686 shadow_object
= map_entry
->object
.vm_object
;
1687 vm_object_unlock(object
);
1688 vm_object_pmap_protect(
1689 object
, map_entry
->offset
,
1691 ((map_entry
->is_shared
1692 || target_map
->mapped
)
1695 map_entry
->vme_start
,
1696 map_entry
->protection
& ~VM_PROT_WRITE
);
1697 total_size
-= (map_entry
->vme_end
1698 - map_entry
->vme_start
);
1699 next_entry
= map_entry
->vme_next
;
1700 map_entry
->needs_copy
= FALSE
;
1701 while (total_size
) {
1702 if(next_entry
->object
.vm_object
== object
) {
1703 shadow_object
->ref_count
++;
1704 vm_object_res_reference(shadow_object
);
1705 next_entry
->object
.vm_object
1707 vm_object_deallocate(object
);
1709 = next_entry
->vme_prev
->offset
+
1710 (next_entry
->vme_prev
->vme_end
1711 - next_entry
->vme_prev
->vme_start
);
1712 next_entry
->needs_copy
= FALSE
;
1714 panic("mach_make_memory_entry_64:"
1715 " map entries out of sync\n");
1719 - next_entry
->vme_start
;
1720 next_entry
= next_entry
->vme_next
;
1723 object
= shadow_object
;
1724 vm_object_lock(object
);
1725 obj_off
= (local_offset
- map_entry
->vme_start
)
1726 + map_entry
->offset
;
1727 vm_map_lock_write_to_read(target_map
);
1733 /* note: in the future we can (if necessary) allow for */
1734 /* memory object lists, this will better support */
1735 /* fragmentation, but is it necessary? The user should */
1736 /* be encouraged to create address space oriented */
1737 /* shared objects from CLEAN memory regions which have */
1738 /* a known and defined history. i.e. no inheritence */
1739 /* share, make this call before making the region the */
1740 /* target of ipc's, etc. The code above, protecting */
1741 /* against delayed copy, etc. is mostly defensive. */
1743 wimg_mode
= object
->wimg_bits
;
1744 if(!(object
->nophyscache
)) {
1745 if(access
== MAP_MEM_IO
) {
1746 wimg_mode
= VM_WIMG_IO
;
1747 } else if (access
== MAP_MEM_COPYBACK
) {
1748 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1749 } else if (access
== MAP_MEM_WTHRU
) {
1750 wimg_mode
= VM_WIMG_WTHRU
;
1751 } else if (access
== MAP_MEM_WCOMB
) {
1752 wimg_mode
= VM_WIMG_WCOMB
;
1756 object
->true_share
= TRUE
;
1757 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
1758 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1760 /* we now point to this object, hold on to it */
1761 vm_object_reference_locked(object
);
1762 vm_map_unlock_read(target_map
);
1763 if(pmap_map
!= target_map
)
1764 vm_map_unlock_read(pmap_map
);
1766 if(object
->wimg_bits
!= wimg_mode
) {
1769 vm_object_paging_wait(object
, THREAD_UNINT
);
1771 queue_iterate(&object
->memq
,
1772 p
, vm_page_t
, listq
) {
1773 if (!p
->fictitious
) {
1778 pmap_sync_caches_phys(
1782 object
->wimg_bits
= wimg_mode
;
1784 user_object
->object
= object
;
1785 user_object
->internal
= object
->internal
;
1786 user_object
->is_sub_map
= FALSE
;
1787 user_object
->offset
= obj_off
;
1788 user_object
->protection
= permission
;
1790 /* the size of mapped entry that overlaps with our region */
1791 /* which is targeted for share. */
1792 /* (entry_end - entry_start) - */
1793 /* offset of our beg addr within entry */
1794 /* it corresponds to this: */
1796 if(*size
> mappable_size
)
1797 *size
= mappable_size
;
1799 user_object
->size
= *size
;
1801 /* user_object pager and internal fields are not used */
1802 /* when the object field is filled in. */
1804 vm_object_unlock(object
);
1805 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1807 *object_handle
= user_handle
;
1808 return KERN_SUCCESS
;
1811 vm_named_entry_t parent_object
;
1813 /* The new object will be base on an existing named object */
1814 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1815 kr
= KERN_INVALID_ARGUMENT
;
1818 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1819 if((offset
+ *size
) > parent_object
->size
) {
1820 kr
= KERN_INVALID_ARGUMENT
;
1824 user_object
->object
= parent_object
->object
;
1825 user_object
->size
= *size
;
1826 user_object
->offset
= parent_object
->offset
+ offset
;
1827 user_object
->protection
= parent_object
->protection
;
1828 user_object
->protection
&= ~VM_PROT_ALL
;
1829 user_object
->protection
= permission
& VM_PROT_ALL
;
1830 if(access
!= MAP_MEM_NOOP
) {
1831 SET_MAP_MEM(access
, user_object
->protection
);
1833 if(parent_object
->is_sub_map
) {
1834 user_object
->backing
.map
= parent_object
->backing
.map
;
1835 vm_map_lock(user_object
->backing
.map
);
1836 user_object
->backing
.map
->ref_count
++;
1837 vm_map_unlock(user_object
->backing
.map
);
1840 user_object
->backing
.pager
= parent_object
->backing
.pager
;
1842 user_object
->internal
= parent_object
->internal
;
1843 user_object
->is_sub_map
= parent_object
->is_sub_map
;
1845 if(parent_object
->object
!= NULL
) {
1846 /* we now point to this object, hold on */
1847 vm_object_reference(parent_object
->object
);
1848 vm_object_lock(parent_object
->object
);
1849 parent_object
->object
->true_share
= TRUE
;
1850 if (parent_object
->object
->copy_strategy
==
1851 MEMORY_OBJECT_COPY_SYMMETRIC
)
1852 parent_object
->object
->copy_strategy
=
1853 MEMORY_OBJECT_COPY_DELAY
;
1854 vm_object_unlock(parent_object
->object
);
1856 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1858 *object_handle
= user_handle
;
1859 return KERN_SUCCESS
;
1865 ipc_port_dealloc_kernel(user_handle
);
1866 kfree((vm_offset_t
)user_object
, sizeof (struct vm_named_entry
));
1871 mach_make_memory_entry(
1872 vm_map_t target_map
,
1875 vm_prot_t permission
,
1876 ipc_port_t
*object_handle
,
1877 ipc_port_t parent_entry
)
1879 vm_object_offset_t size_64
;
1882 size_64
= (vm_object_offset_t
)*size
;
1883 kr
= mach_make_memory_entry_64(target_map
, &size_64
,
1884 (vm_object_offset_t
)offset
, permission
, object_handle
,
1886 *size
= (vm_size_t
)size_64
;
1894 vm_region_object_create(
1895 vm_map_t target_map
,
1897 ipc_port_t
*object_handle
)
1899 vm_named_entry_t user_object
;
1900 ipc_port_t user_handle
;
1903 ipc_port_t previous
;
1906 user_object
= (vm_named_entry_t
)
1907 kalloc(sizeof (struct vm_named_entry
));
1908 if(user_object
== NULL
) {
1909 return KERN_FAILURE
;
1911 named_entry_lock_init(user_object
);
1912 user_handle
= ipc_port_alloc_kernel();
1915 ip_lock(user_handle
);
1917 /* make a sonce right */
1918 user_handle
->ip_sorights
++;
1919 ip_reference(user_handle
);
1921 user_handle
->ip_destination
= IP_NULL
;
1922 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1923 user_handle
->ip_receiver
= ipc_space_kernel
;
1925 /* make a send right */
1926 user_handle
->ip_mscount
++;
1927 user_handle
->ip_srights
++;
1928 ip_reference(user_handle
);
1930 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1931 /* nsrequest unlocks user_handle */
1933 /* Create a named object based on a submap of specified size */
1935 new_map
= vm_map_create(0, 0, size
, TRUE
);
1936 user_object
->backing
.map
= new_map
;
1939 user_object
->object
= VM_OBJECT_NULL
;
1940 user_object
->internal
= TRUE
;
1941 user_object
->is_sub_map
= TRUE
;
1942 user_object
->offset
= 0;
1943 user_object
->protection
= VM_PROT_ALL
;
1944 user_object
->size
= size
;
1945 user_object
->ref_count
= 1;
1947 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1949 *object_handle
= user_handle
;
1950 return KERN_SUCCESS
;
1954 /* For a given range, check all map entries. If the entry coresponds to */
1955 /* the old vm_region/map provided on the call, replace it with the */
1956 /* corresponding range in the new vm_region/map */
1957 kern_return_t
vm_map_region_replace(
1958 vm_map_t target_map
,
1959 ipc_port_t old_region
,
1960 ipc_port_t new_region
,
1964 vm_named_entry_t old_object
;
1965 vm_named_entry_t new_object
;
1966 vm_map_t old_submap
;
1967 vm_map_t new_submap
;
1969 vm_map_entry_t entry
;
1970 int nested_pmap
= 0;
1973 vm_map_lock(target_map
);
1974 old_object
= (vm_named_entry_t
)old_region
->ip_kobject
;
1975 new_object
= (vm_named_entry_t
)new_region
->ip_kobject
;
1976 if((!old_object
->is_sub_map
) || (!new_object
->is_sub_map
)) {
1977 vm_map_unlock(target_map
);
1978 return KERN_INVALID_ARGUMENT
;
1980 old_submap
= (vm_map_t
)old_object
->backing
.map
;
1981 new_submap
= (vm_map_t
)new_object
->backing
.map
;
1982 vm_map_lock(old_submap
);
1983 if((old_submap
->min_offset
!= new_submap
->min_offset
) ||
1984 (old_submap
->max_offset
!= new_submap
->max_offset
)) {
1985 vm_map_unlock(old_submap
);
1986 vm_map_unlock(target_map
);
1987 return KERN_INVALID_ARGUMENT
;
1989 if(!vm_map_lookup_entry(target_map
, start
, &entry
)) {
1990 /* if the src is not contained, the entry preceeds */
1992 addr
= entry
->vme_start
;
1993 if(entry
== vm_map_to_entry(target_map
)) {
1994 vm_map_unlock(old_submap
);
1995 vm_map_unlock(target_map
);
1996 return KERN_SUCCESS
;
1999 if ((entry
->use_pmap
) &&
2000 (new_submap
->pmap
== NULL
)) {
2001 new_submap
->pmap
= pmap_create((vm_size_t
) 0);
2002 if(new_submap
->pmap
== PMAP_NULL
) {
2003 vm_map_unlock(old_submap
);
2004 vm_map_unlock(target_map
);
2005 return(KERN_NO_SPACE
);
2008 addr
= entry
->vme_start
;
2009 vm_map_reference(old_submap
);
2010 while((entry
!= vm_map_to_entry(target_map
)) &&
2011 (entry
->vme_start
< end
)) {
2012 if((entry
->is_sub_map
) &&
2013 (entry
->object
.sub_map
== old_submap
)) {
2014 if(entry
->use_pmap
) {
2015 if((start
& 0x0fffffff) ||
2016 ((end
- start
) != 0x10000000)) {
2017 vm_map_unlock(old_submap
);
2018 vm_map_deallocate(old_submap
);
2019 vm_map_unlock(target_map
);
2020 return KERN_INVALID_ARGUMENT
;
2024 entry
->object
.sub_map
= new_submap
;
2025 vm_map_reference(new_submap
);
2026 vm_map_deallocate(old_submap
);
2028 entry
= entry
->vme_next
;
2029 addr
= entry
->vme_start
;
2033 pmap_unnest(target_map
->pmap
, (addr64_t
)start
);
2034 if(target_map
->mapped
) {
2035 vm_map_submap_pmap_clean(target_map
,
2036 start
, end
, old_submap
, 0);
2038 pmap_nest(target_map
->pmap
, new_submap
->pmap
,
2039 (addr64_t
)start
, (addr64_t
)start
,
2040 (addr64_t
)(end
- start
));
2043 vm_map_submap_pmap_clean(target_map
,
2044 start
, end
, old_submap
, 0);
2046 vm_map_unlock(old_submap
);
2047 vm_map_deallocate(old_submap
);
2048 vm_map_unlock(target_map
);
2049 return KERN_SUCCESS
;
2054 mach_destroy_memory_entry(
2057 vm_named_entry_t named_entry
;
2059 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2060 #endif /* MACH_ASSERT */
2061 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
2062 mutex_lock(&(named_entry
)->Lock
);
2063 named_entry
->ref_count
-=1;
2064 if(named_entry
->ref_count
== 0) {
2065 if(named_entry
->object
) {
2066 /* release the memory object we've been pointing to */
2067 vm_object_deallocate(named_entry
->object
);
2069 if(named_entry
->is_sub_map
) {
2070 vm_map_deallocate(named_entry
->backing
.map
);
2072 kfree((vm_offset_t
)port
->ip_kobject
,
2073 sizeof (struct vm_named_entry
));
2075 mutex_unlock(&(named_entry
)->Lock
);
2081 vm_map_t target_map
,
2086 vm_map_entry_t map_entry
;
2093 vm_map_lock(target_map
);
2094 if(!vm_map_lookup_entry(target_map
, offset
, &map_entry
)) {
2095 vm_map_unlock(target_map
);
2096 return KERN_FAILURE
;
2098 offset
-= map_entry
->vme_start
; /* adjust to offset within entry */
2099 offset
+= map_entry
->offset
; /* adjust to target object offset */
2100 if(map_entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
2101 if(!map_entry
->is_sub_map
) {
2102 object
= map_entry
->object
.vm_object
;
2104 vm_map_unlock(target_map
);
2105 target_map
= map_entry
->object
.sub_map
;
2106 goto restart_page_query
;
2109 vm_map_unlock(target_map
);
2110 return KERN_FAILURE
;
2112 vm_object_lock(object
);
2113 vm_map_unlock(target_map
);
2115 m
= vm_page_lookup(object
, offset
);
2116 if (m
!= VM_PAGE_NULL
) {
2117 *disposition
|= VM_PAGE_QUERY_PAGE_PRESENT
;
2120 if(object
->shadow
) {
2121 offset
+= object
->shadow_offset
;
2122 vm_object_unlock(object
);
2123 object
= object
->shadow
;
2124 vm_object_lock(object
);
2127 vm_object_unlock(object
);
2128 return KERN_FAILURE
;
2132 /* The ref_count is not strictly accurate, it measures the number */
2133 /* of entities holding a ref on the object, they may not be mapping */
2134 /* the object or may not be mapping the section holding the */
2135 /* target page but its still a ball park number and though an over- */
2136 /* count, it picks up the copy-on-write cases */
2138 /* We could also get a picture of page sharing from pmap_attributes */
2139 /* but this would under count as only faulted-in mappings would */
2142 *ref_count
= object
->ref_count
;
2144 if (m
->fictitious
) {
2145 *disposition
|= VM_PAGE_QUERY_PAGE_FICTITIOUS
;
2146 vm_object_unlock(object
);
2147 return KERN_SUCCESS
;
2151 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
2152 else if(pmap_is_modified(m
->phys_page
))
2153 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
2156 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
2157 else if(pmap_is_referenced(m
->phys_page
))
2158 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
2160 vm_object_unlock(object
);
2161 return KERN_SUCCESS
;
2166 set_dp_control_port(
2167 host_priv_t host_priv
,
2168 ipc_port_t control_port
)
2170 if (host_priv
== HOST_PRIV_NULL
)
2171 return (KERN_INVALID_HOST
);
2173 if (IP_VALID(dynamic_pager_control_port
))
2174 ipc_port_release_send(dynamic_pager_control_port
);
2176 dynamic_pager_control_port
= control_port
;
2177 return KERN_SUCCESS
;
2181 get_dp_control_port(
2182 host_priv_t host_priv
,
2183 ipc_port_t
*control_port
)
2185 if (host_priv
== HOST_PRIV_NULL
)
2186 return (KERN_INVALID_HOST
);
2188 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
2189 return KERN_SUCCESS
;
2194 /* Retrieve a upl for an object underlying an address range in a map */
2199 vm_address_t offset
,
2200 vm_size_t
*upl_size
,
2202 upl_page_info_array_t page_list
,
2203 unsigned int *count
,
2205 int force_data_sync
)
2207 vm_map_entry_t entry
;
2209 int sync_cow_data
= FALSE
;
2210 vm_object_t local_object
;
2211 vm_offset_t local_offset
;
2212 vm_offset_t local_start
;
2215 caller_flags
= *flags
;
2216 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
2217 sync_cow_data
= TRUE
;
2220 return KERN_INVALID_ARGUMENT
;
2225 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
2226 if (entry
->object
.vm_object
== VM_OBJECT_NULL
||
2227 !entry
->object
.vm_object
->phys_contiguous
) {
2228 if((*upl_size
/page_size
) > MAX_UPL_TRANSFER
) {
2229 *upl_size
= MAX_UPL_TRANSFER
* page_size
;
2232 if((entry
->vme_end
- offset
) < *upl_size
) {
2233 *upl_size
= entry
->vme_end
- offset
;
2235 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
2236 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2238 } else if (entry
->object
.vm_object
->private) {
2239 *flags
= UPL_DEV_MEMORY
;
2240 if (entry
->object
.vm_object
->phys_contiguous
) {
2241 *flags
|= UPL_PHYS_CONTIG
;
2247 return KERN_SUCCESS
;
2250 * Create an object if necessary.
2252 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2253 entry
->object
.vm_object
= vm_object_allocate(
2254 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
2257 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
2258 if (!(entry
->protection
& VM_PROT_WRITE
)) {
2260 return KERN_PROTECTION_FAILURE
;
2262 if (entry
->needs_copy
) {
2265 vm_object_offset_t offset_hi
;
2266 vm_object_offset_t offset_lo
;
2267 vm_object_offset_t new_offset
;
2270 vm_behavior_t behavior
;
2271 vm_map_version_t version
;
2275 vm_map_lock_write_to_read(map
);
2276 if(vm_map_lookup_locked(&local_map
,
2277 offset
, VM_PROT_WRITE
,
2279 &new_offset
, &prot
, &wired
,
2280 &behavior
, &offset_lo
,
2281 &offset_hi
, &pmap_map
)) {
2282 vm_map_unlock(local_map
);
2283 return KERN_FAILURE
;
2285 if (pmap_map
!= map
) {
2286 vm_map_unlock(pmap_map
);
2288 vm_object_unlock(object
);
2289 vm_map_unlock(local_map
);
2291 goto REDISCOVER_ENTRY
;
2294 if (entry
->is_sub_map
) {
2297 submap
= entry
->object
.sub_map
;
2298 local_start
= entry
->vme_start
;
2299 local_offset
= entry
->offset
;
2300 vm_map_reference(submap
);
2303 ret
= (vm_map_get_upl(submap
,
2304 local_offset
+ (offset
- local_start
),
2305 upl_size
, upl
, page_list
, count
,
2306 flags
, force_data_sync
));
2308 vm_map_deallocate(submap
);
2312 if (sync_cow_data
) {
2313 if (entry
->object
.vm_object
->shadow
2314 || entry
->object
.vm_object
->copy
) {
2317 local_object
= entry
->object
.vm_object
;
2318 local_start
= entry
->vme_start
;
2319 local_offset
= entry
->offset
;
2320 vm_object_reference(local_object
);
2323 if(local_object
->copy
== NULL
) {
2324 flags
= MEMORY_OBJECT_DATA_SYNC
;
2326 flags
= MEMORY_OBJECT_COPY_SYNC
;
2329 if (entry
->object
.vm_object
->shadow
&&
2330 entry
->object
.vm_object
->copy
) {
2331 vm_object_lock_request(
2332 local_object
->shadow
,
2333 (vm_object_offset_t
)
2334 ((offset
- local_start
) +
2336 local_object
->shadow_offset
,
2338 MEMORY_OBJECT_DATA_SYNC
,
2341 sync_cow_data
= FALSE
;
2342 vm_object_deallocate(local_object
);
2343 goto REDISCOVER_ENTRY
;
2347 if (force_data_sync
) {
2349 local_object
= entry
->object
.vm_object
;
2350 local_start
= entry
->vme_start
;
2351 local_offset
= entry
->offset
;
2352 vm_object_reference(local_object
);
2355 vm_object_lock_request(
2357 (vm_object_offset_t
)
2358 ((offset
- local_start
) + local_offset
),
2359 (vm_object_size_t
)*upl_size
, FALSE
,
2360 MEMORY_OBJECT_DATA_SYNC
,
2362 force_data_sync
= FALSE
;
2363 vm_object_deallocate(local_object
);
2364 goto REDISCOVER_ENTRY
;
2367 if(!(entry
->object
.vm_object
->private)) {
2368 if(*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
2369 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
2370 if(entry
->object
.vm_object
->phys_contiguous
) {
2371 *flags
= UPL_PHYS_CONTIG
;
2376 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
2378 local_object
= entry
->object
.vm_object
;
2379 local_offset
= entry
->offset
;
2380 local_start
= entry
->vme_start
;
2381 vm_object_reference(local_object
);
2383 if(caller_flags
& UPL_SET_IO_WIRE
) {
2384 ret
= (vm_object_iopl_request(local_object
,
2385 (vm_object_offset_t
)
2386 ((offset
- local_start
)
2394 ret
= (vm_object_upl_request(local_object
,
2395 (vm_object_offset_t
)
2396 ((offset
- local_start
)
2404 vm_object_deallocate(local_object
);
2409 return(KERN_FAILURE
);
2413 /* ******* Temporary Internal calls to UPL for BSD ***** */
2418 vm_offset_t
*dst_addr
)
2420 return (vm_upl_map(map
, upl
, dst_addr
));
2429 return(vm_upl_unmap(map
, upl
));
2435 upl_page_info_t
*pl
,
2436 mach_msg_type_number_t count
)
2440 kr
= upl_commit(upl
, pl
, count
);
2441 upl_deallocate(upl
);
2447 kernel_upl_commit_range(
2452 upl_page_info_array_t pl
,
2453 mach_msg_type_number_t count
)
2455 boolean_t finished
= FALSE
;
2458 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2459 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2461 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
2463 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
2464 upl_deallocate(upl
);
2470 kernel_upl_abort_range(
2477 boolean_t finished
= FALSE
;
2479 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2480 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2482 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
2484 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
2485 upl_deallocate(upl
);
2497 kr
= upl_abort(upl
, abort_type
);
2498 upl_deallocate(upl
);
2504 vm_get_shared_region(
2506 shared_region_mapping_t
*shared_region
)
2508 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
2509 return KERN_SUCCESS
;
2513 vm_set_shared_region(
2515 shared_region_mapping_t shared_region
)
2517 task
->system_shared_region
= (vm_offset_t
) shared_region
;
2518 return KERN_SUCCESS
;
2522 shared_region_mapping_info(
2523 shared_region_mapping_t shared_region
,
2524 ipc_port_t
*text_region
,
2525 vm_size_t
*text_size
,
2526 ipc_port_t
*data_region
,
2527 vm_size_t
*data_size
,
2528 vm_offset_t
*region_mappings
,
2529 vm_offset_t
*client_base
,
2530 vm_offset_t
*alt_base
,
2531 vm_offset_t
*alt_next
,
2532 unsigned int *fs_base
,
2533 unsigned int *system
,
2535 shared_region_mapping_t
*next
)
2537 shared_region_mapping_lock(shared_region
);
2539 *text_region
= shared_region
->text_region
;
2540 *text_size
= shared_region
->text_size
;
2541 *data_region
= shared_region
->data_region
;
2542 *data_size
= shared_region
->data_size
;
2543 *region_mappings
= shared_region
->region_mappings
;
2544 *client_base
= shared_region
->client_base
;
2545 *alt_base
= shared_region
->alternate_base
;
2546 *alt_next
= shared_region
->alternate_next
;
2547 *flags
= shared_region
->flags
;
2548 *fs_base
= shared_region
->fs_base
;
2549 *system
= shared_region
->system
;
2550 *next
= shared_region
->next
;
2552 shared_region_mapping_unlock(shared_region
);
2556 shared_region_object_chain_attach(
2557 shared_region_mapping_t target_region
,
2558 shared_region_mapping_t object_chain_region
)
2560 shared_region_object_chain_t object_ele
;
2562 if(target_region
->object_chain
)
2563 return KERN_FAILURE
;
2564 object_ele
= (shared_region_object_chain_t
)
2565 kalloc(sizeof (struct shared_region_object_chain
));
2566 shared_region_mapping_lock(object_chain_region
);
2567 target_region
->object_chain
= object_ele
;
2568 object_ele
->object_chain_region
= object_chain_region
;
2569 object_ele
->next
= object_chain_region
->object_chain
;
2570 object_ele
->depth
= object_chain_region
->depth
;
2571 object_chain_region
->depth
++;
2572 target_region
->alternate_next
= object_chain_region
->alternate_next
;
2573 shared_region_mapping_unlock(object_chain_region
);
2574 return KERN_SUCCESS
;
2578 shared_region_mapping_create(
2579 ipc_port_t text_region
,
2580 vm_size_t text_size
,
2581 ipc_port_t data_region
,
2582 vm_size_t data_size
,
2583 vm_offset_t region_mappings
,
2584 vm_offset_t client_base
,
2585 shared_region_mapping_t
*shared_region
,
2586 vm_offset_t alt_base
,
2587 vm_offset_t alt_next
)
2589 *shared_region
= (shared_region_mapping_t
)
2590 kalloc(sizeof (struct shared_region_mapping
));
2591 if(*shared_region
== NULL
)
2592 return KERN_FAILURE
;
2593 shared_region_mapping_lock_init((*shared_region
));
2594 (*shared_region
)->text_region
= text_region
;
2595 (*shared_region
)->text_size
= text_size
;
2596 (*shared_region
)->fs_base
= ENV_DEFAULT_ROOT
;
2597 (*shared_region
)->system
= machine_slot
[cpu_number()].cpu_type
;
2598 (*shared_region
)->data_region
= data_region
;
2599 (*shared_region
)->data_size
= data_size
;
2600 (*shared_region
)->region_mappings
= region_mappings
;
2601 (*shared_region
)->client_base
= client_base
;
2602 (*shared_region
)->ref_count
= 1;
2603 (*shared_region
)->next
= NULL
;
2604 (*shared_region
)->object_chain
= NULL
;
2605 (*shared_region
)->self
= *shared_region
;
2606 (*shared_region
)->flags
= 0;
2607 (*shared_region
)->depth
= 0;
2608 (*shared_region
)->default_env_list
= NULL
;
2609 (*shared_region
)->alternate_base
= alt_base
;
2610 (*shared_region
)->alternate_next
= alt_next
;
2611 return KERN_SUCCESS
;
2615 shared_region_mapping_set_alt_next(
2616 shared_region_mapping_t shared_region
,
2617 vm_offset_t alt_next
)
2619 shared_region
->alternate_next
= alt_next
;
2620 return KERN_SUCCESS
;
2624 shared_region_mapping_ref(
2625 shared_region_mapping_t shared_region
)
2627 if(shared_region
== NULL
)
2628 return KERN_SUCCESS
;
2629 hw_atomic_add(&shared_region
->ref_count
, 1);
2630 return KERN_SUCCESS
;
2633 __private_extern__ kern_return_t
2634 shared_region_mapping_dealloc_lock(
2635 shared_region_mapping_t shared_region
,
2638 struct shared_region_task_mappings sm_info
;
2639 shared_region_mapping_t next
= NULL
;
2642 while (shared_region
) {
2644 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
2645 shared_region_mapping_lock(shared_region
);
2647 sm_info
.text_region
= shared_region
->text_region
;
2648 sm_info
.text_size
= shared_region
->text_size
;
2649 sm_info
.data_region
= shared_region
->data_region
;
2650 sm_info
.data_size
= shared_region
->data_size
;
2651 sm_info
.region_mappings
= shared_region
->region_mappings
;
2652 sm_info
.client_base
= shared_region
->client_base
;
2653 sm_info
.alternate_base
= shared_region
->alternate_base
;
2654 sm_info
.alternate_next
= shared_region
->alternate_next
;
2655 sm_info
.flags
= shared_region
->flags
;
2656 sm_info
.self
= (vm_offset_t
)shared_region
;
2658 if(shared_region
->region_mappings
) {
2659 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_lock
);
2661 if(((vm_named_entry_t
)
2662 (shared_region
->text_region
->ip_kobject
))
2663 ->backing
.map
->pmap
) {
2664 pmap_remove(((vm_named_entry_t
)
2665 (shared_region
->text_region
->ip_kobject
))
2666 ->backing
.map
->pmap
,
2667 sm_info
.client_base
,
2668 sm_info
.client_base
+ sm_info
.text_size
);
2670 ipc_port_release_send(shared_region
->text_region
);
2671 if(shared_region
->data_region
)
2672 ipc_port_release_send(shared_region
->data_region
);
2673 if (shared_region
->object_chain
) {
2674 next
= shared_region
->object_chain
->object_chain_region
;
2675 kfree((vm_offset_t
)shared_region
->object_chain
,
2676 sizeof (struct shared_region_object_chain
));
2680 shared_region_mapping_unlock(shared_region
);
2681 kfree((vm_offset_t
)shared_region
,
2682 sizeof (struct shared_region_mapping
));
2683 shared_region
= next
;
2685 /* Stale indicates that a system region is no */
2686 /* longer in the default environment list. */
2687 if((ref_count
== 1) &&
2688 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
2689 && (shared_region
->flags
& ~SHARED_REGION_STALE
)) {
2690 remove_default_shared_region_lock(shared_region
,need_lock
);
2695 return KERN_SUCCESS
;
2699 * Stub function; always indicates that the lock needs to be taken in the
2700 * call to lsf_remove_regions_mappings_lock().
2703 shared_region_mapping_dealloc(
2704 shared_region_mapping_t shared_region
)
2706 return shared_region_mapping_dealloc_lock(shared_region
, 1);
2710 vm_map_get_phys_page(
2714 vm_map_entry_t entry
;
2717 ppnum_t phys_page
= 0;
2721 while (vm_map_lookup_entry(map
, offset
, &entry
)) {
2723 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2725 return (vm_offset_t
) 0;
2727 if (entry
->is_sub_map
) {
2729 vm_map_lock(entry
->object
.sub_map
);
2731 map
= entry
->object
.sub_map
;
2732 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2733 vm_map_unlock(old_map
);
2736 if (entry
->object
.vm_object
->phys_contiguous
) {
2737 /* These are not standard pageable memory mappings */
2738 /* If they are not present in the object they will */
2739 /* have to be picked up from the pager through the */
2740 /* fault mechanism. */
2741 if(entry
->object
.vm_object
->shadow_offset
== 0) {
2742 /* need to call vm_fault */
2744 vm_fault(map
, offset
, VM_PROT_NONE
,
2745 FALSE
, THREAD_UNINT
, NULL
, 0);
2749 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2750 phys_page
= (ppnum_t
)
2751 ((entry
->object
.vm_object
->shadow_offset
2756 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2757 object
= entry
->object
.vm_object
;
2758 vm_object_lock(object
);
2760 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
2761 if(dst_page
== VM_PAGE_NULL
) {
2762 if(object
->shadow
) {
2763 vm_object_t old_object
;
2764 vm_object_lock(object
->shadow
);
2765 old_object
= object
;
2766 offset
= offset
+ object
->shadow_offset
;
2767 object
= object
->shadow
;
2768 vm_object_unlock(old_object
);
2770 vm_object_unlock(object
);
2774 phys_page
= (ppnum_t
)(dst_page
->phys_page
);
2775 vm_object_unlock(object
);
2790 kernel_object_iopl_request(
2791 vm_named_entry_t named_entry
,
2792 memory_object_offset_t offset
,
2793 vm_size_t
*upl_size
,
2795 upl_page_info_array_t user_page_list
,
2796 unsigned int *page_list_count
,
2804 caller_flags
= *flags
;
2806 /* a few checks to make sure user is obeying rules */
2807 if(*upl_size
== 0) {
2808 if(offset
>= named_entry
->size
)
2809 return(KERN_INVALID_RIGHT
);
2810 *upl_size
= named_entry
->size
- offset
;
2812 if(caller_flags
& UPL_COPYOUT_FROM
) {
2813 if((named_entry
->protection
& VM_PROT_READ
)
2815 return(KERN_INVALID_RIGHT
);
2818 if((named_entry
->protection
&
2819 (VM_PROT_READ
| VM_PROT_WRITE
))
2820 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
2821 return(KERN_INVALID_RIGHT
);
2824 if(named_entry
->size
< (offset
+ *upl_size
))
2825 return(KERN_INVALID_ARGUMENT
);
2827 /* the callers parameter offset is defined to be the */
2828 /* offset from beginning of named entry offset in object */
2829 offset
= offset
+ named_entry
->offset
;
2831 if(named_entry
->is_sub_map
)
2832 return (KERN_INVALID_ARGUMENT
);
2834 named_entry_lock(named_entry
);
2836 if(named_entry
->object
) {
2837 /* This is the case where we are going to map */
2838 /* an already mapped object. If the object is */
2839 /* not ready it is internal. An external */
2840 /* object cannot be mapped until it is ready */
2841 /* we can therefore avoid the ready check */
2843 vm_object_reference(named_entry
->object
);
2844 object
= named_entry
->object
;
2845 named_entry_unlock(named_entry
);
2847 object
= vm_object_enter(named_entry
->backing
.pager
,
2848 named_entry
->offset
+ named_entry
->size
,
2849 named_entry
->internal
,
2852 if (object
== VM_OBJECT_NULL
) {
2853 named_entry_unlock(named_entry
);
2854 return(KERN_INVALID_OBJECT
);
2856 vm_object_lock(object
);
2858 /* create an extra reference for the named entry */
2859 vm_object_reference_locked(object
);
2860 named_entry
->object
= object
;
2861 named_entry_unlock(named_entry
);
2863 /* wait for object (if any) to be ready */
2864 while (!object
->pager_ready
) {
2865 vm_object_wait(object
,
2866 VM_OBJECT_EVENT_PAGER_READY
,
2868 vm_object_lock(object
);
2870 vm_object_unlock(object
);
2873 if (!object
->private) {
2874 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
2875 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
2876 if (object
->phys_contiguous
) {
2877 *flags
= UPL_PHYS_CONTIG
;
2882 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
2885 ret
= vm_object_iopl_request(object
,
2892 vm_object_deallocate(object
);