2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
59 * User-exported virtual memory functions.
63 #include <mach/boolean.h>
64 #include <mach/kern_return.h>
65 #include <mach/mach_types.h> /* to get vm_address_t */
66 #include <mach/memory_object.h>
67 #include <mach/std_types.h> /* to get pointer_t */
68 #include <mach/vm_attributes.h>
69 #include <mach/vm_param.h>
70 #include <mach/vm_statistics.h>
71 #include <mach/vm_map_server.h>
72 #include <mach/mach_syscalls.h>
74 #include <mach/shared_memory_server.h>
75 #include <vm/vm_shared_memory_server.h>
77 #include <kern/host.h>
78 #include <kern/task.h>
79 #include <kern/misc_protos.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/memory_object.h>
84 #include <vm/vm_pageout.h>
88 vm_size_t upl_offset_to_pagelist
= 0;
94 ipc_port_t dynamic_pager_control_port
=NULL
;
97 * vm_allocate allocates "zero fill" memory in the specfied
102 register vm_map_t map
,
103 register vm_offset_t
*addr
,
104 register vm_size_t size
,
107 kern_return_t result
;
108 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
110 if (map
== VM_MAP_NULL
)
111 return(KERN_INVALID_ARGUMENT
);
114 return(KERN_SUCCESS
);
118 *addr
= vm_map_min(map
);
120 *addr
= trunc_page_32(*addr
);
121 size
= round_page_32(size
);
123 return(KERN_INVALID_ARGUMENT
);
126 result
= vm_map_enter(
133 (vm_object_offset_t
)0,
143 * vm_deallocate deallocates the specified range of addresses in the
144 * specified address map.
148 register vm_map_t map
,
152 if (map
== VM_MAP_NULL
)
153 return(KERN_INVALID_ARGUMENT
);
155 if (size
== (vm_offset_t
) 0)
156 return(KERN_SUCCESS
);
158 return(vm_map_remove(map
, trunc_page_32(start
),
159 round_page_32(start
+size
), VM_MAP_NO_FLAGS
));
163 * vm_inherit sets the inheritance of the specified range in the
168 register vm_map_t map
,
171 vm_inherit_t new_inheritance
)
173 if (map
== VM_MAP_NULL
)
174 return(KERN_INVALID_ARGUMENT
);
176 if (new_inheritance
> VM_INHERIT_LAST_VALID
)
177 return(KERN_INVALID_ARGUMENT
);
179 return(vm_map_inherit(map
,
180 trunc_page_32(start
),
181 round_page_32(start
+size
),
186 * vm_protect sets the protection of the specified range in the
192 register vm_map_t map
,
195 boolean_t set_maximum
,
196 vm_prot_t new_protection
)
198 if ((map
== VM_MAP_NULL
) ||
199 (new_protection
& ~(VM_PROT_ALL
| VM_PROT_COPY
)))
200 return(KERN_INVALID_ARGUMENT
);
202 return(vm_map_protect(map
,
203 trunc_page_32(start
),
204 round_page_32(start
+size
),
210 * Handle machine-specific attributes for a mapping, such
211 * as cachability, migrability, etc.
214 vm_machine_attribute(
216 vm_address_t address
,
218 vm_machine_attribute_t attribute
,
219 vm_machine_attribute_val_t
* value
) /* IN/OUT */
221 if (map
== VM_MAP_NULL
)
222 return(KERN_INVALID_ARGUMENT
);
224 return vm_map_machine_attribute(map
, address
, size
, attribute
, value
);
230 vm_address_t address
,
233 mach_msg_type_number_t
*data_size
)
236 vm_map_copy_t ipc_address
;
238 if (map
== VM_MAP_NULL
)
239 return(KERN_INVALID_ARGUMENT
);
241 if ((error
= vm_map_copyin(map
,
244 FALSE
, /* src_destroy */
245 &ipc_address
)) == KERN_SUCCESS
) {
246 *data
= (pointer_t
) ipc_address
;
255 vm_read_entry_t data_list
,
256 mach_msg_type_number_t count
)
258 mach_msg_type_number_t i
;
260 vm_map_copy_t ipc_address
;
262 if (map
== VM_MAP_NULL
)
263 return(KERN_INVALID_ARGUMENT
);
265 for(i
=0; i
<count
; i
++) {
266 error
= vm_map_copyin(map
,
267 data_list
[i
].address
,
269 FALSE
, /* src_destroy */
271 if(error
!= KERN_SUCCESS
) {
272 data_list
[i
].address
= (vm_address_t
)0;
273 data_list
[i
].size
= (vm_size_t
)0;
276 if(data_list
[i
].size
!= 0) {
277 error
= vm_map_copyout(current_task()->map
,
278 &(data_list
[i
].address
),
279 (vm_map_copy_t
) ipc_address
);
280 if(error
!= KERN_SUCCESS
) {
281 data_list
[i
].address
= (vm_address_t
)0;
282 data_list
[i
].size
= (vm_size_t
)0;
291 * This routine reads from the specified map and overwrites part of the current
292 * activation's map. In making an assumption that the current thread is local,
293 * it is no longer cluster-safe without a fully supportive local proxy thread/
294 * task (but we don't support cluster's anymore so this is moot).
297 #define VM_OVERWRITE_SMALL 512
302 vm_address_t address
,
305 vm_size_t
*data_size
)
309 char buf
[VM_OVERWRITE_SMALL
];
312 kern_return_t error
= KERN_SUCCESS
;
315 if (map
== VM_MAP_NULL
)
316 return(KERN_INVALID_ARGUMENT
);
318 if (size
<= VM_OVERWRITE_SMALL
) {
319 if(vm_map_read_user(map
, (vm_offset_t
)address
,
320 (vm_offset_t
)&inbuf
, size
)) {
321 error
= KERN_INVALID_ADDRESS
;
323 if(vm_map_write_user(current_map(),
324 (vm_offset_t
)&inbuf
, (vm_offset_t
)data
, size
))
325 error
= KERN_INVALID_ADDRESS
;
329 if ((error
= vm_map_copyin(map
,
332 FALSE
, /* src_destroy */
333 ©
)) == KERN_SUCCESS
) {
334 if ((error
= vm_map_copy_overwrite(
338 FALSE
)) == KERN_SUCCESS
) {
341 vm_map_copy_discard(copy
);
356 vm_address_t address
,
358 mach_msg_type_number_t size
)
360 if (map
== VM_MAP_NULL
)
361 return KERN_INVALID_ARGUMENT
;
363 return vm_map_copy_overwrite(map
, address
, (vm_map_copy_t
) data
,
364 FALSE
/* interruptible XXX */);
370 vm_address_t source_address
,
372 vm_address_t dest_address
)
377 if (map
== VM_MAP_NULL
)
378 return KERN_INVALID_ARGUMENT
;
380 kr
= vm_map_copyin(map
, source_address
, size
,
382 if (kr
!= KERN_SUCCESS
)
385 kr
= vm_map_copy_overwrite(map
, dest_address
, copy
,
386 FALSE
/* interruptible XXX */);
387 if (kr
!= KERN_SUCCESS
) {
388 vm_map_copy_discard(copy
);
401 vm_offset_t
*address
,
402 vm_size_t initial_size
,
406 vm_object_offset_t offset
,
408 vm_prot_t cur_protection
,
409 vm_prot_t max_protection
,
410 vm_inherit_t inheritance
)
415 vm_object_size_t size
= (vm_object_size_t
)initial_size
;
416 kern_return_t result
;
419 * Check arguments for validity
421 if ((target_map
== VM_MAP_NULL
) ||
422 (cur_protection
& ~VM_PROT_ALL
) ||
423 (max_protection
& ~VM_PROT_ALL
) ||
424 (inheritance
> VM_INHERIT_LAST_VALID
) ||
426 return(KERN_INVALID_ARGUMENT
);
429 * Find the vm object (if any) corresponding to this port.
431 if (!IP_VALID(port
)) {
432 object
= VM_OBJECT_NULL
;
435 } else if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
436 vm_named_entry_t named_entry
;
438 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
439 /* a few checks to make sure user is obeying rules */
441 if(offset
>= named_entry
->size
)
442 return(KERN_INVALID_RIGHT
);
443 size
= named_entry
->size
- offset
;
445 if((named_entry
->protection
& max_protection
) != max_protection
)
446 return(KERN_INVALID_RIGHT
);
447 if((named_entry
->protection
& cur_protection
) != cur_protection
)
448 return(KERN_INVALID_RIGHT
);
449 if(named_entry
->size
< (offset
+ size
))
450 return(KERN_INVALID_ARGUMENT
);
452 /* the callers parameter offset is defined to be the */
453 /* offset from beginning of named entry offset in object */
454 offset
= offset
+ named_entry
->offset
;
456 named_entry_lock(named_entry
);
457 if(named_entry
->is_sub_map
) {
458 vm_map_entry_t map_entry
;
460 named_entry_unlock(named_entry
);
461 *address
= trunc_page_32(*address
);
462 size
= round_page_64(size
);
463 vm_object_reference(vm_submap_object
);
464 if ((result
= vm_map_enter(target_map
,
465 address
, size
, mask
, flags
,
468 cur_protection
, max_protection
, inheritance
469 )) != KERN_SUCCESS
) {
470 vm_object_deallocate(vm_submap_object
);
474 VM_GET_FLAGS_ALIAS(flags
, alias
);
475 if ((alias
== VM_MEMORY_SHARED_PMAP
) &&
477 vm_map_submap(target_map
, *address
,
479 named_entry
->backing
.map
,
480 (vm_offset_t
)offset
, TRUE
);
482 vm_map_submap(target_map
, *address
,
484 named_entry
->backing
.map
,
485 (vm_offset_t
)offset
, FALSE
);
488 if(vm_map_lookup_entry(
489 target_map
, *address
, &map_entry
)) {
490 map_entry
->needs_copy
= TRUE
;
496 } else if(named_entry
->object
) {
497 /* This is the case where we are going to map */
498 /* an already mapped object. If the object is */
499 /* not ready it is internal. An external */
500 /* object cannot be mapped until it is ready */
501 /* we can therefore avoid the ready check */
503 named_entry_unlock(named_entry
);
504 vm_object_reference(named_entry
->object
);
505 object
= named_entry
->object
;
508 vm_prot_t protections
;
509 unsigned int wimg_mode
;
510 boolean_t cache_attr
;
512 protections
= named_entry
->protection
514 access
= GET_MAP_MEM(named_entry
->protection
);
516 object
= vm_object_enter(
517 named_entry
->backing
.pager
,
519 named_entry
->internal
,
522 if (object
== VM_OBJECT_NULL
) {
523 named_entry_unlock(named_entry
);
524 return(KERN_INVALID_OBJECT
);
527 vm_object_lock(object
);
529 /* create an extra ref for the named entry */
530 vm_object_reference_locked(object
);
531 named_entry
->object
= object
;
532 named_entry_unlock(named_entry
);
534 wimg_mode
= object
->wimg_bits
;
535 if(access
== MAP_MEM_IO
) {
536 wimg_mode
= VM_WIMG_IO
;
537 } else if (access
== MAP_MEM_COPYBACK
) {
538 wimg_mode
= VM_WIMG_USE_DEFAULT
;
539 } else if (access
== MAP_MEM_WTHRU
) {
540 wimg_mode
= VM_WIMG_WTHRU
;
541 } else if (access
== MAP_MEM_WCOMB
) {
542 wimg_mode
= VM_WIMG_WCOMB
;
544 if ((wimg_mode
== VM_WIMG_IO
)
545 || (wimg_mode
== VM_WIMG_WCOMB
))
550 if (named_entry
->backing
.pager
) {
551 /* wait for object (if any) to be ready */
552 while (!object
->pager_ready
) {
553 vm_object_wait(object
,
554 VM_OBJECT_EVENT_PAGER_READY
,
556 vm_object_lock(object
);
559 if(object
->wimg_bits
!= wimg_mode
) {
562 vm_object_paging_wait(object
, THREAD_UNINT
);
564 object
->wimg_bits
= wimg_mode
;
565 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
566 if (!p
->fictitious
) {
571 pmap_sync_caches_phys(
576 object
->true_share
= TRUE
;
577 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
578 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
579 vm_object_unlock(object
);
581 } else if (ip_kotype(port
) == IKOT_MEMORY_OBJECT
) {
583 * JMM - This is temporary until we unify named entries
584 * and raw memory objects.
586 * Detected fake ip_kotype for a memory object. In
587 * this case, the port isn't really a port at all, but
588 * instead is just a raw memory object.
591 if ((object
= vm_object_enter((memory_object_t
)port
,
592 size
, FALSE
, FALSE
, FALSE
))
594 return(KERN_INVALID_OBJECT
);
596 /* wait for object (if any) to be ready */
597 if (object
!= VM_OBJECT_NULL
) {
598 if(object
== kernel_object
) {
599 printf("Warning: Attempt to map kernel object"
600 " by a non-private kernel entity\n");
601 return(KERN_INVALID_OBJECT
);
603 vm_object_lock(object
);
604 while (!object
->pager_ready
) {
605 vm_object_wait(object
,
606 VM_OBJECT_EVENT_PAGER_READY
,
608 vm_object_lock(object
);
610 vm_object_unlock(object
);
613 return (KERN_INVALID_OBJECT
);
616 *address
= trunc_page_32(*address
);
617 size
= round_page_64(size
);
620 * Perform the copy if requested
624 vm_object_t new_object
;
625 vm_object_offset_t new_offset
;
627 result
= vm_object_copy_strategically(object
, offset
, size
,
628 &new_object
, &new_offset
,
632 if (result
== KERN_MEMORY_RESTART_COPY
) {
634 boolean_t src_needs_copy
;
638 * We currently ignore src_needs_copy.
639 * This really is the issue of how to make
640 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
641 * non-kernel users to use. Solution forthcoming.
642 * In the meantime, since we don't allow non-kernel
643 * memory managers to specify symmetric copy,
644 * we won't run into problems here.
648 success
= vm_object_copy_quickly(&new_object
,
653 result
= KERN_SUCCESS
;
656 * Throw away the reference to the
657 * original object, as it won't be mapped.
660 vm_object_deallocate(object
);
662 if (result
!= KERN_SUCCESS
)
669 if ((result
= vm_map_enter(target_map
,
670 address
, size
, mask
, flags
,
673 cur_protection
, max_protection
, inheritance
675 vm_object_deallocate(object
);
679 /* temporary, until world build */
683 vm_offset_t
*address
,
690 vm_prot_t cur_protection
,
691 vm_prot_t max_protection
,
692 vm_inherit_t inheritance
)
694 return vm_map_64(target_map
, address
, size
, mask
, flags
,
695 port
, (vm_object_offset_t
)offset
, copy
,
696 cur_protection
, max_protection
, inheritance
);
701 * NOTE: this routine (and this file) will no longer require mach_host_server.h
702 * when vm_wire is changed to use ledgers.
704 #include <mach/mach_host_server.h>
706 * Specify that the range of the virtual address space
707 * of the target task must not cause page faults for
708 * the indicated accesses.
710 * [ To unwire the pages, specify VM_PROT_NONE. ]
714 host_priv_t host_priv
,
715 register vm_map_t map
,
722 if (host_priv
== HOST_PRIV_NULL
)
723 return KERN_INVALID_HOST
;
725 assert(host_priv
== &realhost
);
727 if (map
== VM_MAP_NULL
)
728 return KERN_INVALID_TASK
;
730 if (access
& ~VM_PROT_ALL
)
731 return KERN_INVALID_ARGUMENT
;
733 if (access
!= VM_PROT_NONE
) {
734 rc
= vm_map_wire(map
, trunc_page_32(start
),
735 round_page_32(start
+size
), access
, TRUE
);
737 rc
= vm_map_unwire(map
, trunc_page_32(start
),
738 round_page_32(start
+size
), TRUE
);
746 * Synchronises the memory range specified with its backing store
747 * image by either flushing or cleaning the contents to the appropriate
748 * memory manager engaging in a memory object synchronize dialog with
749 * the manager. The client doesn't return until the manager issues
750 * m_o_s_completed message. MIG Magically converts user task parameter
751 * to the task's address map.
753 * interpretation of sync_flags
754 * VM_SYNC_INVALIDATE - discard pages, only return precious
757 * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
758 * - discard pages, write dirty or precious
759 * pages back to memory manager.
761 * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
762 * - write dirty or precious pages back to
763 * the memory manager.
766 * The memory object attributes have not yet been implemented, this
767 * function will have to deal with the invalidate attribute
770 * KERN_INVALID_TASK Bad task parameter
771 * KERN_INVALID_ARGUMENT both sync and async were specified.
772 * KERN_SUCCESS The usual.
778 vm_address_t address
,
780 vm_sync_t sync_flags
)
784 queue_chain_t req_q
; /* queue of requests for this msync */
785 vm_map_entry_t entry
;
786 vm_size_t amount_left
;
787 vm_object_offset_t offset
;
788 boolean_t do_sync_req
;
789 boolean_t modifiable
;
792 if ((sync_flags
& VM_SYNC_ASYNCHRONOUS
) &&
793 (sync_flags
& VM_SYNC_SYNCHRONOUS
))
794 return(KERN_INVALID_ARGUMENT
);
797 * align address and size on page boundaries
799 size
= round_page_32(address
+ size
) - trunc_page_32(address
);
800 address
= trunc_page_32(address
);
802 if (map
== VM_MAP_NULL
)
803 return(KERN_INVALID_TASK
);
806 return(KERN_SUCCESS
);
811 while (amount_left
> 0) {
812 vm_size_t flush_size
;
816 if (!vm_map_lookup_entry(map
, address
, &entry
)) {
820 * hole in the address map.
824 * Check for empty map.
826 if (entry
== vm_map_to_entry(map
) &&
827 entry
->vme_next
== entry
) {
832 * Check that we don't wrap and that
833 * we have at least one real map entry.
835 if ((map
->hdr
.nentries
== 0) ||
836 (entry
->vme_next
->vme_start
< address
)) {
841 * Move up to the next entry if needed
843 skip
= (entry
->vme_next
->vme_start
- address
);
844 if (skip
>= amount_left
)
848 address
= entry
->vme_next
->vme_start
;
853 offset
= address
- entry
->vme_start
;
856 * do we have more to flush than is contained in this
859 if (amount_left
+ entry
->vme_start
+ offset
> entry
->vme_end
) {
860 flush_size
= entry
->vme_end
-
861 (entry
->vme_start
+ offset
);
863 flush_size
= amount_left
;
865 amount_left
-= flush_size
;
866 address
+= flush_size
;
868 if (entry
->is_sub_map
== TRUE
) {
870 vm_offset_t local_offset
;
872 local_map
= entry
->object
.sub_map
;
873 local_offset
= entry
->offset
;
882 object
= entry
->object
.vm_object
;
885 * We can't sync this object if the object has not been
888 if (object
== VM_OBJECT_NULL
) {
892 offset
+= entry
->offset
;
893 modifiable
= (entry
->protection
& VM_PROT_WRITE
)
896 vm_object_lock(object
);
898 if (sync_flags
& (VM_SYNC_KILLPAGES
| VM_SYNC_DEACTIVATE
)) {
899 boolean_t kill_pages
= 0;
901 if (sync_flags
& VM_SYNC_KILLPAGES
) {
902 if (object
->ref_count
== 1 && !entry
->needs_copy
&& !object
->shadow
)
907 if (kill_pages
!= -1)
908 vm_object_deactivate_pages(object
, offset
,
909 (vm_object_size_t
)flush_size
, kill_pages
);
910 vm_object_unlock(object
);
915 * We can't sync this object if there isn't a pager.
916 * Don't bother to sync internal objects, since there can't
917 * be any "permanent" storage for these objects anyway.
919 if ((object
->pager
== MEMORY_OBJECT_NULL
) ||
920 (object
->internal
) || (object
->private)) {
921 vm_object_unlock(object
);
926 * keep reference on the object until syncing is done
928 assert(object
->ref_count
> 0);
930 vm_object_res_reference(object
);
931 vm_object_unlock(object
);
935 do_sync_req
= vm_object_sync(object
,
938 sync_flags
& VM_SYNC_INVALIDATE
,
940 (sync_flags
& VM_SYNC_SYNCHRONOUS
||
941 sync_flags
& VM_SYNC_ASYNCHRONOUS
)));
944 * only send a m_o_s if we returned pages or if the entry
945 * is writable (ie dirty pages may have already been sent back)
947 if (!do_sync_req
&& !modifiable
) {
948 vm_object_deallocate(object
);
951 msync_req_alloc(new_msr
);
953 vm_object_lock(object
);
954 offset
+= object
->paging_offset
;
956 new_msr
->offset
= offset
;
957 new_msr
->length
= flush_size
;
958 new_msr
->object
= object
;
959 new_msr
->flag
= VM_MSYNC_SYNCHRONIZING
;
961 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
963 * need to check for overlapping entry, if found, wait
964 * on overlapping msr to be done, then reiterate
967 if (msr
->flag
== VM_MSYNC_SYNCHRONIZING
&&
968 ((offset
>= msr
->offset
&&
969 offset
< (msr
->offset
+ msr
->length
)) ||
970 (msr
->offset
>= offset
&&
971 msr
->offset
< (offset
+ flush_size
))))
973 assert_wait((event_t
) msr
,THREAD_INTERRUPTIBLE
);
975 vm_object_unlock(object
);
976 thread_block((void (*)(void))0);
977 vm_object_lock(object
);
983 queue_enter(&object
->msr_q
, new_msr
, msync_req_t
, msr_q
);
984 vm_object_unlock(object
);
986 queue_enter(&req_q
, new_msr
, msync_req_t
, req_q
);
988 (void) memory_object_synchronize(
996 * wait for memory_object_sychronize_completed messages from pager(s)
999 while (!queue_empty(&req_q
)) {
1000 msr
= (msync_req_t
)queue_first(&req_q
);
1002 while(msr
->flag
!= VM_MSYNC_DONE
) {
1003 assert_wait((event_t
) msr
, THREAD_INTERRUPTIBLE
);
1005 thread_block((void (*)(void))0);
1008 queue_remove(&req_q
, msr
, msync_req_t
, req_q
);
1010 vm_object_deallocate(msr
->object
);
1011 msync_req_free(msr
);
1012 }/* queue_iterate */
1014 return(KERN_SUCCESS
);
1021 * Set or clear the map's wiring_required flag. This flag, if set,
1022 * will cause all future virtual memory allocation to allocate
1023 * user wired memory. Unwiring pages wired down as a result of
1024 * this routine is done with the vm_wire interface.
1029 boolean_t must_wire
)
1031 if (map
== VM_MAP_NULL
)
1032 return(KERN_INVALID_ARGUMENT
);
1035 map
->wiring_required
= TRUE
;
1037 map
->wiring_required
= FALSE
;
1039 return(KERN_SUCCESS
);
1043 * vm_behavior_set sets the paging behavior attribute for the
1044 * specified range in the specified map. This routine will fail
1045 * with KERN_INVALID_ADDRESS if any address in [start,start+size)
1046 * is not a valid allocated or reserved memory region.
1053 vm_behavior_t new_behavior
)
1055 if (map
== VM_MAP_NULL
)
1056 return(KERN_INVALID_ARGUMENT
);
1058 return(vm_map_behavior_set(map
, trunc_page_32(start
),
1059 round_page_32(start
+size
), new_behavior
));
1064 * Control whether the kernel will permit use of
1065 * vm_allocate_cpm at all.
1067 unsigned int vm_allocate_cpm_enabled
= 1;
1070 * Ordinarily, the right to allocate CPM is restricted
1071 * to privileged applications (those that can gain access
1072 * to the host port). Set this variable to zero if you
1073 * want to let any application allocate CPM.
1075 unsigned int vm_allocate_cpm_privileged
= 0;
1078 * Allocate memory in the specified map, with the caveat that
1079 * the memory is physically contiguous. This call may fail
1080 * if the system can't find sufficient contiguous memory.
1081 * This call may cause or lead to heart-stopping amounts of
1084 * Memory obtained from this call should be freed in the
1085 * normal way, viz., via vm_deallocate.
1089 host_priv_t host_priv
,
1090 register vm_map_t map
,
1091 register vm_offset_t
*addr
,
1092 register vm_size_t size
,
1095 vm_object_t cpm_obj
;
1099 vm_offset_t va
, start
, end
, offset
;
1101 extern vm_offset_t avail_start
, avail_end
;
1102 vm_offset_t prev_addr
;
1103 #endif /* MACH_ASSERT */
1105 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1107 if (!vm_allocate_cpm_enabled
)
1108 return KERN_FAILURE
;
1110 if (vm_allocate_cpm_privileged
&& host_priv
== HOST_PRIV_NULL
)
1111 return KERN_INVALID_HOST
;
1113 if (map
== VM_MAP_NULL
)
1114 return KERN_INVALID_ARGUMENT
;
1116 assert(host_priv
== &realhost
);
1120 return KERN_SUCCESS
;
1124 *addr
= vm_map_min(map
);
1126 *addr
= trunc_page_32(*addr
);
1127 size
= round_page_32(size
);
1129 if ((kr
= cpm_allocate(size
, &pages
, TRUE
)) != KERN_SUCCESS
)
1132 cpm_obj
= vm_object_allocate(size
);
1133 assert(cpm_obj
!= VM_OBJECT_NULL
);
1134 assert(cpm_obj
->internal
);
1135 assert(cpm_obj
->size
== size
);
1136 assert(cpm_obj
->can_persist
== FALSE
);
1137 assert(cpm_obj
->pager_created
== FALSE
);
1138 assert(cpm_obj
->pageout
== FALSE
);
1139 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1142 * Insert pages into object.
1145 vm_object_lock(cpm_obj
);
1146 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1148 pages
= NEXT_PAGE(m
);
1150 assert(!m
->gobbled
);
1152 assert(!m
->pageout
);
1155 assert(m
->phys_page
>=avail_start
&& m
->phys_page
<=avail_end
);
1158 vm_page_insert(m
, cpm_obj
, offset
);
1160 assert(cpm_obj
->resident_page_count
== size
/ PAGE_SIZE
);
1161 vm_object_unlock(cpm_obj
);
1164 * Hang onto a reference on the object in case a
1165 * multi-threaded application for some reason decides
1166 * to deallocate the portion of the address space into
1167 * which we will insert this object.
1169 * Unfortunately, we must insert the object now before
1170 * we can talk to the pmap module about which addresses
1171 * must be wired down. Hence, the race with a multi-
1174 vm_object_reference(cpm_obj
);
1177 * Insert object into map.
1187 (vm_object_offset_t
)0,
1191 VM_INHERIT_DEFAULT
);
1193 if (kr
!= KERN_SUCCESS
) {
1195 * A CPM object doesn't have can_persist set,
1196 * so all we have to do is deallocate it to
1197 * free up these pages.
1199 assert(cpm_obj
->pager_created
== FALSE
);
1200 assert(cpm_obj
->can_persist
== FALSE
);
1201 assert(cpm_obj
->pageout
== FALSE
);
1202 assert(cpm_obj
->shadow
== VM_OBJECT_NULL
);
1203 vm_object_deallocate(cpm_obj
); /* kill acquired ref */
1204 vm_object_deallocate(cpm_obj
); /* kill creation ref */
1208 * Inform the physical mapping system that the
1209 * range of addresses may not fault, so that
1210 * page tables and such can be locked down as well.
1214 pmap
= vm_map_pmap(map
);
1215 pmap_pageable(pmap
, start
, end
, FALSE
);
1218 * Enter each page into the pmap, to avoid faults.
1219 * Note that this loop could be coded more efficiently,
1220 * if the need arose, rather than looking up each page
1223 for (offset
= 0, va
= start
; offset
< size
;
1224 va
+= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
1225 vm_object_lock(cpm_obj
);
1226 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1227 vm_object_unlock(cpm_obj
);
1228 assert(m
!= VM_PAGE_NULL
);
1229 PMAP_ENTER(pmap
, va
, m
, VM_PROT_ALL
,
1230 ((unsigned int)(m
->object
->wimg_bits
)) & VM_WIMG_MASK
,
1236 * Verify ordering in address space.
1238 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
) {
1239 vm_object_lock(cpm_obj
);
1240 m
= vm_page_lookup(cpm_obj
, (vm_object_offset_t
)offset
);
1241 vm_object_unlock(cpm_obj
);
1242 if (m
== VM_PAGE_NULL
)
1243 panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
1248 assert(!m
->fictitious
);
1249 assert(!m
->private);
1252 assert(!m
->cleaning
);
1253 assert(!m
->precious
);
1254 assert(!m
->clustered
);
1256 if (m
->phys_page
!= prev_addr
+ 1) {
1257 printf("start 0x%x end 0x%x va 0x%x\n",
1259 printf("obj 0x%x off 0x%x\n", cpm_obj
, offset
);
1260 printf("m 0x%x prev_address 0x%x\n", m
,
1262 panic("vm_allocate_cpm: pages not contig!");
1265 prev_addr
= m
->phys_page
;
1267 #endif /* MACH_ASSERT */
1269 vm_object_deallocate(cpm_obj
); /* kill extra ref */
1278 * Interface is defined in all cases, but unless the kernel
1279 * is built explicitly for this option, the interface does
1285 host_priv_t host_priv
,
1286 register vm_map_t map
,
1287 register vm_offset_t
*addr
,
1288 register vm_size_t size
,
1291 return KERN_FAILURE
;
1297 mach_memory_object_memory_entry_64(
1300 vm_object_offset_t size
,
1301 vm_prot_t permission
,
1302 memory_object_t pager
,
1303 ipc_port_t
*entry_handle
)
1305 unsigned int access
;
1306 vm_named_entry_t user_object
;
1307 ipc_port_t user_handle
;
1308 ipc_port_t previous
;
1311 if (host
== HOST_NULL
)
1312 return(KERN_INVALID_HOST
);
1314 user_object
= (vm_named_entry_t
)
1315 kalloc(sizeof (struct vm_named_entry
));
1316 if(user_object
== NULL
)
1317 return KERN_FAILURE
;
1318 named_entry_lock_init(user_object
);
1319 user_handle
= ipc_port_alloc_kernel();
1320 ip_lock(user_handle
);
1322 /* make a sonce right */
1323 user_handle
->ip_sorights
++;
1324 ip_reference(user_handle
);
1326 user_handle
->ip_destination
= IP_NULL
;
1327 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1328 user_handle
->ip_receiver
= ipc_space_kernel
;
1330 /* make a send right */
1331 user_handle
->ip_mscount
++;
1332 user_handle
->ip_srights
++;
1333 ip_reference(user_handle
);
1335 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1336 /* nsrequest unlocks user_handle */
1338 user_object
->object
= NULL
;
1339 user_object
->size
= size
;
1340 user_object
->offset
= 0;
1341 user_object
->backing
.pager
= pager
;
1342 user_object
->protection
= permission
& VM_PROT_ALL
;
1343 access
= GET_MAP_MEM(permission
);
1344 SET_MAP_MEM(access
, user_object
->protection
);
1345 user_object
->internal
= internal
;
1346 user_object
->is_sub_map
= FALSE
;
1347 user_object
->ref_count
= 1;
1349 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1351 *entry_handle
= user_handle
;
1352 return KERN_SUCCESS
;
1356 mach_memory_object_memory_entry(
1360 vm_prot_t permission
,
1361 memory_object_t pager
,
1362 ipc_port_t
*entry_handle
)
1364 return mach_memory_object_memory_entry_64( host
, internal
,
1365 (vm_object_offset_t
)size
, permission
, pager
, entry_handle
);
1374 mach_make_memory_entry_64(
1375 vm_map_t target_map
,
1376 vm_object_size_t
*size
,
1377 vm_object_offset_t offset
,
1378 vm_prot_t permission
,
1379 ipc_port_t
*object_handle
,
1380 ipc_port_t parent_entry
)
1382 vm_map_version_t version
;
1383 vm_named_entry_t user_object
;
1384 ipc_port_t user_handle
;
1385 ipc_port_t previous
;
1389 /* needed for call to vm_map_lookup_locked */
1391 vm_object_offset_t obj_off
;
1393 vm_object_offset_t lo_offset
, hi_offset
;
1394 vm_behavior_t behavior
;
1396 vm_object_t shadow_object
;
1398 /* needed for direct map entry manipulation */
1399 vm_map_entry_t map_entry
;
1400 vm_map_entry_t next_entry
;
1402 vm_map_t original_map
= target_map
;
1403 vm_offset_t local_offset
;
1404 vm_object_size_t mappable_size
;
1405 vm_object_size_t total_size
;
1407 unsigned int access
;
1408 vm_prot_t protections
;
1409 unsigned int wimg_mode
;
1410 boolean_t cache_attr
;
1412 protections
= permission
& VM_PROT_ALL
;
1413 access
= GET_MAP_MEM(permission
);
1416 offset
= trunc_page_64(offset
);
1417 *size
= round_page_64(*size
);
1419 if((parent_entry
!= NULL
)
1420 && (permission
& MAP_MEM_ONLY
)) {
1421 vm_named_entry_t parent_object
;
1422 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1423 return KERN_INVALID_ARGUMENT
;
1425 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1426 object
= parent_object
->object
;
1427 if(object
!= VM_OBJECT_NULL
)
1428 wimg_mode
= object
->wimg_bits
;
1429 if((access
!= GET_MAP_MEM(parent_object
->protection
)) &&
1430 !(parent_object
->protection
& VM_PROT_WRITE
)) {
1431 return KERN_INVALID_RIGHT
;
1433 if(access
== MAP_MEM_IO
) {
1434 SET_MAP_MEM(access
, parent_object
->protection
);
1435 wimg_mode
= VM_WIMG_IO
;
1436 } else if (access
== MAP_MEM_COPYBACK
) {
1437 SET_MAP_MEM(access
, parent_object
->protection
);
1438 wimg_mode
= VM_WIMG_DEFAULT
;
1439 } else if (access
== MAP_MEM_WTHRU
) {
1440 SET_MAP_MEM(access
, parent_object
->protection
);
1441 wimg_mode
= VM_WIMG_WTHRU
;
1442 } else if (access
== MAP_MEM_WCOMB
) {
1443 SET_MAP_MEM(access
, parent_object
->protection
);
1444 wimg_mode
= VM_WIMG_WCOMB
;
1447 (access
!= MAP_MEM_NOOP
) &&
1448 (!(object
->nophyscache
))) {
1449 if(object
->wimg_bits
!= wimg_mode
) {
1451 if ((wimg_mode
== VM_WIMG_IO
)
1452 || (wimg_mode
== VM_WIMG_WCOMB
))
1456 vm_object_lock(object
);
1457 while(object
->paging_in_progress
) {
1458 vm_object_unlock(object
);
1459 vm_object_wait(object
,
1460 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
1462 vm_object_lock(object
);
1464 object
->wimg_bits
= wimg_mode
;
1465 queue_iterate(&object
->memq
,
1466 p
, vm_page_t
, listq
) {
1467 if (!p
->fictitious
) {
1472 pmap_sync_caches_phys(
1476 vm_object_unlock(object
);
1479 return KERN_SUCCESS
;
1482 if(permission
& MAP_MEM_ONLY
) {
1483 return KERN_INVALID_ARGUMENT
;
1486 user_object
= (vm_named_entry_t
)
1487 kalloc(sizeof (struct vm_named_entry
));
1488 if(user_object
== NULL
)
1489 return KERN_FAILURE
;
1490 named_entry_lock_init(user_object
);
1491 user_handle
= ipc_port_alloc_kernel();
1492 ip_lock(user_handle
);
1494 /* make a sonce right */
1495 user_handle
->ip_sorights
++;
1496 ip_reference(user_handle
);
1498 user_handle
->ip_destination
= IP_NULL
;
1499 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1500 user_handle
->ip_receiver
= ipc_space_kernel
;
1502 /* make a send right */
1503 user_handle
->ip_mscount
++;
1504 user_handle
->ip_srights
++;
1505 ip_reference(user_handle
);
1507 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1508 /* nsrequest unlocks user_handle */
1510 user_object
->backing
.pager
= NULL
;
1511 user_object
->ref_count
= 1;
1513 if(permission
& MAP_MEM_NAMED_CREATE
) {
1514 user_object
->object
= NULL
;
1515 user_object
->internal
= TRUE
;
1516 user_object
->is_sub_map
= FALSE
;
1517 user_object
->offset
= 0;
1518 user_object
->protection
= protections
;
1519 SET_MAP_MEM(access
, user_object
->protection
);
1520 user_object
->size
= *size
;
1522 /* user_object pager and internal fields are not used */
1523 /* when the object field is filled in. */
1525 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1527 *object_handle
= user_handle
;
1528 return KERN_SUCCESS
;
1531 if(parent_entry
== NULL
) {
1532 /* Create a named object based on address range within the task map */
1533 /* Go find the object at given address */
1535 vm_map_lock_read(target_map
);
1537 /* get the object associated with the target address */
1538 /* note we check the permission of the range against */
1539 /* that requested by the caller */
1541 kr
= vm_map_lookup_locked(&target_map
, offset
,
1542 protections
, &version
,
1543 &object
, &obj_off
, &prot
, &wired
, &behavior
,
1544 &lo_offset
, &hi_offset
, &pmap_map
);
1545 if (kr
!= KERN_SUCCESS
) {
1546 vm_map_unlock_read(target_map
);
1549 if (((prot
& protections
) != protections
)
1550 || (object
== kernel_object
)) {
1551 kr
= KERN_INVALID_RIGHT
;
1552 vm_object_unlock(object
);
1553 vm_map_unlock_read(target_map
);
1554 if(pmap_map
!= target_map
)
1555 vm_map_unlock_read(pmap_map
);
1556 if(object
== kernel_object
) {
1557 printf("Warning: Attempt to create a named"
1558 " entry from the kernel_object\n");
1563 /* We have an object, now check to see if this object */
1564 /* is suitable. If not, create a shadow and share that */
1567 local_map
= original_map
;
1568 local_offset
= offset
;
1569 if(target_map
!= local_map
) {
1570 vm_map_unlock_read(target_map
);
1571 if(pmap_map
!= target_map
)
1572 vm_map_unlock_read(pmap_map
);
1573 vm_map_lock_read(local_map
);
1574 target_map
= local_map
;
1575 pmap_map
= local_map
;
1578 if(!vm_map_lookup_entry(local_map
,
1579 local_offset
, &map_entry
)) {
1580 kr
= KERN_INVALID_ARGUMENT
;
1581 vm_object_unlock(object
);
1582 vm_map_unlock_read(target_map
);
1583 if(pmap_map
!= target_map
)
1584 vm_map_unlock_read(pmap_map
);
1587 if(!(map_entry
->is_sub_map
)) {
1588 if(map_entry
->object
.vm_object
!= object
) {
1589 kr
= KERN_INVALID_ARGUMENT
;
1590 vm_object_unlock(object
);
1591 vm_map_unlock_read(target_map
);
1592 if(pmap_map
!= target_map
)
1593 vm_map_unlock_read(pmap_map
);
1596 if(map_entry
->wired_count
) {
1597 /* JMM - The check below should be reworked instead. */
1598 object
->true_share
= TRUE
;
1604 local_map
= map_entry
->object
.sub_map
;
1606 vm_map_lock_read(local_map
);
1607 vm_map_unlock_read(tmap
);
1608 target_map
= local_map
;
1609 pmap_map
= local_map
;
1610 local_offset
= local_offset
- map_entry
->vme_start
;
1611 local_offset
+= map_entry
->offset
;
1614 if(((map_entry
->max_protection
) & protections
) != protections
) {
1615 kr
= KERN_INVALID_RIGHT
;
1616 vm_object_unlock(object
);
1617 vm_map_unlock_read(target_map
);
1618 if(pmap_map
!= target_map
)
1619 vm_map_unlock_read(pmap_map
);
1623 mappable_size
= hi_offset
- obj_off
;
1624 total_size
= map_entry
->vme_end
- map_entry
->vme_start
;
1625 if(*size
> mappable_size
) {
1626 /* try to extend mappable size if the entries */
1627 /* following are from the same object and are */
1629 next_entry
= map_entry
->vme_next
;
1630 /* lets see if the next map entry is still */
1631 /* pointing at this object and is contiguous */
1632 while(*size
> mappable_size
) {
1633 if((next_entry
->object
.vm_object
== object
) &&
1634 (next_entry
->vme_start
==
1635 next_entry
->vme_prev
->vme_end
) &&
1636 (next_entry
->offset
==
1637 next_entry
->vme_prev
->offset
+
1638 (next_entry
->vme_prev
->vme_end
-
1639 next_entry
->vme_prev
->vme_start
))) {
1640 if(((next_entry
->max_protection
)
1641 & protections
) != protections
) {
1644 if (next_entry
->needs_copy
!=
1645 map_entry
->needs_copy
)
1647 mappable_size
+= next_entry
->vme_end
1648 - next_entry
->vme_start
;
1649 total_size
+= next_entry
->vme_end
1650 - next_entry
->vme_start
;
1651 next_entry
= next_entry
->vme_next
;
1659 if(object
->internal
) {
1660 /* vm_map_lookup_locked will create a shadow if */
1661 /* needs_copy is set but does not check for the */
1662 /* other two conditions shown. It is important to */
1663 /* set up an object which will not be pulled from */
1666 if ((map_entry
->needs_copy
|| object
->shadowed
||
1667 (object
->size
> total_size
))
1668 && !object
->true_share
) {
1669 if (vm_map_lock_read_to_write(target_map
)) {
1670 vm_map_lock_read(target_map
);
1675 * JMM - We need to avoid coming here when the object
1676 * is wired by anybody, not just the current map. Why
1677 * couldn't we use the standard vm_object_copy_quickly()
1681 /* create a shadow object */
1682 vm_object_shadow(&map_entry
->object
.vm_object
,
1683 &map_entry
->offset
, total_size
);
1684 shadow_object
= map_entry
->object
.vm_object
;
1685 vm_object_unlock(object
);
1686 vm_object_pmap_protect(
1687 object
, map_entry
->offset
,
1689 ((map_entry
->is_shared
1690 || target_map
->mapped
)
1693 map_entry
->vme_start
,
1694 map_entry
->protection
& ~VM_PROT_WRITE
);
1695 total_size
-= (map_entry
->vme_end
1696 - map_entry
->vme_start
);
1697 next_entry
= map_entry
->vme_next
;
1698 map_entry
->needs_copy
= FALSE
;
1699 while (total_size
) {
1700 if(next_entry
->object
.vm_object
== object
) {
1701 next_entry
->object
.vm_object
1704 = next_entry
->vme_prev
->offset
+
1705 (next_entry
->vme_prev
->vme_end
1706 - next_entry
->vme_prev
->vme_start
);
1707 next_entry
->needs_copy
= FALSE
;
1709 panic("mach_make_memory_entry_64:"
1710 " map entries out of sync\n");
1714 - next_entry
->vme_start
;
1715 next_entry
= next_entry
->vme_next
;
1718 object
= shadow_object
;
1719 vm_object_lock(object
);
1720 obj_off
= (local_offset
- map_entry
->vme_start
)
1721 + map_entry
->offset
;
1722 vm_map_lock_write_to_read(target_map
);
1728 /* note: in the future we can (if necessary) allow for */
1729 /* memory object lists, this will better support */
1730 /* fragmentation, but is it necessary? The user should */
1731 /* be encouraged to create address space oriented */
1732 /* shared objects from CLEAN memory regions which have */
1733 /* a known and defined history. i.e. no inheritence */
1734 /* share, make this call before making the region the */
1735 /* target of ipc's, etc. The code above, protecting */
1736 /* against delayed copy, etc. is mostly defensive. */
1738 wimg_mode
= object
->wimg_bits
;
1739 if(!(object
->nophyscache
)) {
1740 if(access
== MAP_MEM_IO
) {
1741 wimg_mode
= VM_WIMG_IO
;
1742 } else if (access
== MAP_MEM_COPYBACK
) {
1743 wimg_mode
= VM_WIMG_USE_DEFAULT
;
1744 } else if (access
== MAP_MEM_WTHRU
) {
1745 wimg_mode
= VM_WIMG_WTHRU
;
1746 } else if (access
== MAP_MEM_WCOMB
) {
1747 wimg_mode
= VM_WIMG_WCOMB
;
1751 object
->true_share
= TRUE
;
1752 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
1753 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1755 /* we now point to this object, hold on to it */
1756 vm_object_reference_locked(object
);
1757 vm_map_unlock_read(target_map
);
1758 if(pmap_map
!= target_map
)
1759 vm_map_unlock_read(pmap_map
);
1761 if(object
->wimg_bits
!= wimg_mode
) {
1764 vm_object_paging_wait(object
, THREAD_UNINT
);
1766 queue_iterate(&object
->memq
,
1767 p
, vm_page_t
, listq
) {
1768 if (!p
->fictitious
) {
1773 pmap_sync_caches_phys(
1777 object
->wimg_bits
= wimg_mode
;
1779 user_object
->object
= object
;
1780 user_object
->internal
= object
->internal
;
1781 user_object
->is_sub_map
= FALSE
;
1782 user_object
->offset
= obj_off
;
1783 user_object
->protection
= permission
;
1785 /* the size of mapped entry that overlaps with our region */
1786 /* which is targeted for share. */
1787 /* (entry_end - entry_start) - */
1788 /* offset of our beg addr within entry */
1789 /* it corresponds to this: */
1791 if(*size
> mappable_size
)
1792 *size
= mappable_size
;
1794 user_object
->size
= *size
;
1796 /* user_object pager and internal fields are not used */
1797 /* when the object field is filled in. */
1799 vm_object_unlock(object
);
1800 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1802 *object_handle
= user_handle
;
1803 return KERN_SUCCESS
;
1806 vm_named_entry_t parent_object
;
1808 /* The new object will be base on an existing named object */
1809 if(ip_kotype(parent_entry
) != IKOT_NAMED_ENTRY
) {
1810 kr
= KERN_INVALID_ARGUMENT
;
1813 parent_object
= (vm_named_entry_t
)parent_entry
->ip_kobject
;
1814 if((offset
+ *size
) > parent_object
->size
) {
1815 kr
= KERN_INVALID_ARGUMENT
;
1819 user_object
->object
= parent_object
->object
;
1820 user_object
->size
= *size
;
1821 user_object
->offset
= parent_object
->offset
+ offset
;
1822 user_object
->protection
= parent_object
->protection
;
1823 user_object
->protection
&= ~VM_PROT_ALL
;
1824 user_object
->protection
= permission
& VM_PROT_ALL
;
1825 if(access
!= MAP_MEM_NOOP
) {
1826 SET_MAP_MEM(access
, user_object
->protection
);
1828 if(parent_object
->is_sub_map
) {
1829 user_object
->backing
.map
= parent_object
->backing
.map
;
1830 vm_map_lock(user_object
->backing
.map
);
1831 user_object
->backing
.map
->ref_count
++;
1832 vm_map_unlock(user_object
->backing
.map
);
1835 user_object
->backing
.pager
= parent_object
->backing
.pager
;
1837 user_object
->internal
= parent_object
->internal
;
1838 user_object
->is_sub_map
= parent_object
->is_sub_map
;
1840 if(parent_object
->object
!= NULL
) {
1841 /* we now point to this object, hold on */
1842 vm_object_reference(parent_object
->object
);
1843 vm_object_lock(parent_object
->object
);
1844 parent_object
->object
->true_share
= TRUE
;
1845 if (parent_object
->object
->copy_strategy
==
1846 MEMORY_OBJECT_COPY_SYMMETRIC
)
1847 parent_object
->object
->copy_strategy
=
1848 MEMORY_OBJECT_COPY_DELAY
;
1849 vm_object_unlock(parent_object
->object
);
1851 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1853 *object_handle
= user_handle
;
1854 return KERN_SUCCESS
;
1860 ipc_port_dealloc_kernel(user_handle
);
1861 kfree((vm_offset_t
)user_object
, sizeof (struct vm_named_entry
));
1866 mach_make_memory_entry(
1867 vm_map_t target_map
,
1870 vm_prot_t permission
,
1871 ipc_port_t
*object_handle
,
1872 ipc_port_t parent_entry
)
1874 vm_object_offset_t size_64
;
1877 size_64
= (vm_object_offset_t
)*size
;
1878 kr
= mach_make_memory_entry_64(target_map
, &size_64
,
1879 (vm_object_offset_t
)offset
, permission
, object_handle
,
1881 *size
= (vm_size_t
)size_64
;
1889 vm_region_object_create(
1890 vm_map_t target_map
,
1892 ipc_port_t
*object_handle
)
1894 vm_named_entry_t user_object
;
1895 ipc_port_t user_handle
;
1898 ipc_port_t previous
;
1901 user_object
= (vm_named_entry_t
)
1902 kalloc(sizeof (struct vm_named_entry
));
1903 if(user_object
== NULL
) {
1904 return KERN_FAILURE
;
1906 named_entry_lock_init(user_object
);
1907 user_handle
= ipc_port_alloc_kernel();
1910 ip_lock(user_handle
);
1912 /* make a sonce right */
1913 user_handle
->ip_sorights
++;
1914 ip_reference(user_handle
);
1916 user_handle
->ip_destination
= IP_NULL
;
1917 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
1918 user_handle
->ip_receiver
= ipc_space_kernel
;
1920 /* make a send right */
1921 user_handle
->ip_mscount
++;
1922 user_handle
->ip_srights
++;
1923 ip_reference(user_handle
);
1925 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
1926 /* nsrequest unlocks user_handle */
1928 /* Create a named object based on a submap of specified size */
1930 new_map
= vm_map_create(0, 0, size
, TRUE
);
1931 user_object
->backing
.map
= new_map
;
1934 user_object
->object
= VM_OBJECT_NULL
;
1935 user_object
->internal
= TRUE
;
1936 user_object
->is_sub_map
= TRUE
;
1937 user_object
->offset
= 0;
1938 user_object
->protection
= VM_PROT_ALL
;
1939 user_object
->size
= size
;
1940 user_object
->ref_count
= 1;
1942 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_object
,
1944 *object_handle
= user_handle
;
1945 return KERN_SUCCESS
;
1949 /* For a given range, check all map entries. If the entry coresponds to */
1950 /* the old vm_region/map provided on the call, replace it with the */
1951 /* corresponding range in the new vm_region/map */
1952 kern_return_t
vm_map_region_replace(
1953 vm_map_t target_map
,
1954 ipc_port_t old_region
,
1955 ipc_port_t new_region
,
1959 vm_named_entry_t old_object
;
1960 vm_named_entry_t new_object
;
1961 vm_map_t old_submap
;
1962 vm_map_t new_submap
;
1964 vm_map_entry_t entry
;
1965 int nested_pmap
= 0;
1968 vm_map_lock(target_map
);
1969 old_object
= (vm_named_entry_t
)old_region
->ip_kobject
;
1970 new_object
= (vm_named_entry_t
)new_region
->ip_kobject
;
1971 if((!old_object
->is_sub_map
) || (!new_object
->is_sub_map
)) {
1972 vm_map_unlock(target_map
);
1973 return KERN_INVALID_ARGUMENT
;
1975 old_submap
= (vm_map_t
)old_object
->backing
.map
;
1976 new_submap
= (vm_map_t
)new_object
->backing
.map
;
1977 vm_map_lock(old_submap
);
1978 if((old_submap
->min_offset
!= new_submap
->min_offset
) ||
1979 (old_submap
->max_offset
!= new_submap
->max_offset
)) {
1980 vm_map_unlock(old_submap
);
1981 vm_map_unlock(target_map
);
1982 return KERN_INVALID_ARGUMENT
;
1984 if(!vm_map_lookup_entry(target_map
, start
, &entry
)) {
1985 /* if the src is not contained, the entry preceeds */
1987 addr
= entry
->vme_start
;
1988 if(entry
== vm_map_to_entry(target_map
)) {
1989 vm_map_unlock(old_submap
);
1990 vm_map_unlock(target_map
);
1991 return KERN_SUCCESS
;
1994 if ((entry
->use_pmap
) &&
1995 (new_submap
->pmap
== NULL
)) {
1996 new_submap
->pmap
= pmap_create((vm_size_t
) 0);
1997 if(new_submap
->pmap
== PMAP_NULL
) {
1998 vm_map_unlock(old_submap
);
1999 vm_map_unlock(target_map
);
2000 return(KERN_NO_SPACE
);
2003 addr
= entry
->vme_start
;
2004 vm_map_reference(old_submap
);
2005 while((entry
!= vm_map_to_entry(target_map
)) &&
2006 (entry
->vme_start
< end
)) {
2007 if((entry
->is_sub_map
) &&
2008 (entry
->object
.sub_map
== old_submap
)) {
2009 if(entry
->use_pmap
) {
2010 if((start
& 0x0fffffff) ||
2011 ((end
- start
) != 0x10000000)) {
2012 vm_map_unlock(old_submap
);
2013 vm_map_deallocate(old_submap
);
2014 vm_map_unlock(target_map
);
2015 return KERN_INVALID_ARGUMENT
;
2019 entry
->object
.sub_map
= new_submap
;
2020 vm_map_reference(new_submap
);
2021 vm_map_deallocate(old_submap
);
2023 entry
= entry
->vme_next
;
2024 addr
= entry
->vme_start
;
2028 pmap_unnest(target_map
->pmap
, (addr64_t
)start
);
2029 if(target_map
->mapped
) {
2030 vm_map_submap_pmap_clean(target_map
,
2031 start
, end
, old_submap
, 0);
2033 pmap_nest(target_map
->pmap
, new_submap
->pmap
,
2034 (addr64_t
)start
, (addr64_t
)start
,
2035 (addr64_t
)(end
- start
));
2038 vm_map_submap_pmap_clean(target_map
,
2039 start
, end
, old_submap
, 0);
2041 vm_map_unlock(old_submap
);
2042 vm_map_deallocate(old_submap
);
2043 vm_map_unlock(target_map
);
2044 return KERN_SUCCESS
;
2049 mach_destroy_memory_entry(
2052 vm_named_entry_t named_entry
;
2054 assert(ip_kotype(port
) == IKOT_NAMED_ENTRY
);
2055 #endif /* MACH_ASSERT */
2056 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
2057 mutex_lock(&(named_entry
)->Lock
);
2058 named_entry
->ref_count
-=1;
2059 if(named_entry
->ref_count
== 0) {
2060 if(named_entry
->object
) {
2061 /* release the memory object we've been pointing to */
2062 vm_object_deallocate(named_entry
->object
);
2064 if(named_entry
->is_sub_map
) {
2065 vm_map_deallocate(named_entry
->backing
.map
);
2067 kfree((vm_offset_t
)port
->ip_kobject
,
2068 sizeof (struct vm_named_entry
));
2070 mutex_unlock(&(named_entry
)->Lock
);
2076 vm_map_t target_map
,
2081 vm_map_entry_t map_entry
;
2088 vm_map_lock(target_map
);
2089 if(!vm_map_lookup_entry(target_map
, offset
, &map_entry
)) {
2090 vm_map_unlock(target_map
);
2091 return KERN_FAILURE
;
2093 offset
-= map_entry
->vme_start
; /* adjust to offset within entry */
2094 offset
+= map_entry
->offset
; /* adjust to target object offset */
2095 if(map_entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
2096 if(!map_entry
->is_sub_map
) {
2097 object
= map_entry
->object
.vm_object
;
2099 vm_map_unlock(target_map
);
2100 target_map
= map_entry
->object
.sub_map
;
2101 goto restart_page_query
;
2104 vm_map_unlock(target_map
);
2105 return KERN_FAILURE
;
2107 vm_object_lock(object
);
2108 vm_map_unlock(target_map
);
2110 m
= vm_page_lookup(object
, offset
);
2111 if (m
!= VM_PAGE_NULL
) {
2112 *disposition
|= VM_PAGE_QUERY_PAGE_PRESENT
;
2115 if(object
->shadow
) {
2116 offset
+= object
->shadow_offset
;
2117 vm_object_unlock(object
);
2118 object
= object
->shadow
;
2119 vm_object_lock(object
);
2122 vm_object_unlock(object
);
2123 return KERN_FAILURE
;
2127 /* The ref_count is not strictly accurate, it measures the number */
2128 /* of entities holding a ref on the object, they may not be mapping */
2129 /* the object or may not be mapping the section holding the */
2130 /* target page but its still a ball park number and though an over- */
2131 /* count, it picks up the copy-on-write cases */
2133 /* We could also get a picture of page sharing from pmap_attributes */
2134 /* but this would under count as only faulted-in mappings would */
2137 *ref_count
= object
->ref_count
;
2139 if (m
->fictitious
) {
2140 *disposition
|= VM_PAGE_QUERY_PAGE_FICTITIOUS
;
2141 vm_object_unlock(object
);
2142 return KERN_SUCCESS
;
2146 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
2147 else if(pmap_is_modified(m
->phys_page
))
2148 *disposition
|= VM_PAGE_QUERY_PAGE_DIRTY
;
2151 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
2152 else if(pmap_is_referenced(m
->phys_page
))
2153 *disposition
|= VM_PAGE_QUERY_PAGE_REF
;
2155 vm_object_unlock(object
);
2156 return KERN_SUCCESS
;
2161 set_dp_control_port(
2162 host_priv_t host_priv
,
2163 ipc_port_t control_port
)
2165 if (host_priv
== HOST_PRIV_NULL
)
2166 return (KERN_INVALID_HOST
);
2168 if (IP_VALID(dynamic_pager_control_port
))
2169 ipc_port_release_send(dynamic_pager_control_port
);
2171 dynamic_pager_control_port
= control_port
;
2172 return KERN_SUCCESS
;
2176 get_dp_control_port(
2177 host_priv_t host_priv
,
2178 ipc_port_t
*control_port
)
2180 if (host_priv
== HOST_PRIV_NULL
)
2181 return (KERN_INVALID_HOST
);
2183 *control_port
= ipc_port_copy_send(dynamic_pager_control_port
);
2184 return KERN_SUCCESS
;
2189 /* Retrieve a upl for an object underlying an address range in a map */
2194 vm_address_t offset
,
2195 vm_size_t
*upl_size
,
2197 upl_page_info_array_t page_list
,
2198 unsigned int *count
,
2200 int force_data_sync
)
2202 vm_map_entry_t entry
;
2204 int sync_cow_data
= FALSE
;
2205 vm_object_t local_object
;
2206 vm_offset_t local_offset
;
2207 vm_offset_t local_start
;
2210 caller_flags
= *flags
;
2211 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
2212 sync_cow_data
= TRUE
;
2215 return KERN_INVALID_ARGUMENT
;
2220 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
2221 if (entry
->object
.vm_object
== VM_OBJECT_NULL
||
2222 !entry
->object
.vm_object
->phys_contiguous
) {
2223 if((*upl_size
/page_size
) > MAX_UPL_TRANSFER
) {
2224 *upl_size
= MAX_UPL_TRANSFER
* page_size
;
2227 if((entry
->vme_end
- offset
) < *upl_size
) {
2228 *upl_size
= entry
->vme_end
- offset
;
2230 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
2231 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2233 } else if (entry
->object
.vm_object
->private) {
2234 *flags
= UPL_DEV_MEMORY
;
2235 if (entry
->object
.vm_object
->phys_contiguous
) {
2236 *flags
|= UPL_PHYS_CONTIG
;
2242 return KERN_SUCCESS
;
2245 * Create an object if necessary.
2247 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2248 entry
->object
.vm_object
= vm_object_allocate(
2249 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
2252 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
2253 if (!(entry
->protection
& VM_PROT_WRITE
)) {
2255 return KERN_PROTECTION_FAILURE
;
2257 if (entry
->needs_copy
) {
2260 vm_object_offset_t offset_hi
;
2261 vm_object_offset_t offset_lo
;
2262 vm_object_offset_t new_offset
;
2265 vm_behavior_t behavior
;
2266 vm_map_version_t version
;
2270 vm_map_lock_write_to_read(map
);
2271 if(vm_map_lookup_locked(&local_map
,
2272 offset
, VM_PROT_WRITE
,
2274 &new_offset
, &prot
, &wired
,
2275 &behavior
, &offset_lo
,
2276 &offset_hi
, &pmap_map
)) {
2277 vm_map_unlock(local_map
);
2278 return KERN_FAILURE
;
2280 if (pmap_map
!= map
) {
2281 vm_map_unlock(pmap_map
);
2283 vm_object_unlock(object
);
2284 vm_map_unlock(local_map
);
2286 goto REDISCOVER_ENTRY
;
2289 if (entry
->is_sub_map
) {
2292 submap
= entry
->object
.sub_map
;
2293 local_start
= entry
->vme_start
;
2294 local_offset
= entry
->offset
;
2295 vm_map_reference(submap
);
2298 ret
= (vm_map_get_upl(submap
,
2299 local_offset
+ (offset
- local_start
),
2300 upl_size
, upl
, page_list
, count
,
2301 flags
, force_data_sync
));
2303 vm_map_deallocate(submap
);
2307 if (sync_cow_data
) {
2308 if (entry
->object
.vm_object
->shadow
2309 || entry
->object
.vm_object
->copy
) {
2312 local_object
= entry
->object
.vm_object
;
2313 local_start
= entry
->vme_start
;
2314 local_offset
= entry
->offset
;
2315 vm_object_reference(local_object
);
2318 if(local_object
->copy
== NULL
) {
2319 flags
= MEMORY_OBJECT_DATA_SYNC
;
2321 flags
= MEMORY_OBJECT_COPY_SYNC
;
2324 if((local_object
->paging_offset
) &&
2325 (local_object
->pager
== 0)) {
2327 * do a little clean-up for our unorthodox
2328 * entry into a pager call from a non-pager
2329 * context. Normally the pager code
2330 * assumes that an object it has been called
2331 * with has a backing pager and so does
2332 * not bother to check the pager field
2333 * before relying on the paging_offset
2335 vm_object_lock(local_object
);
2336 if (local_object
->pager
== 0) {
2337 local_object
->paging_offset
= 0;
2339 vm_object_unlock(local_object
);
2342 if (entry
->object
.vm_object
->shadow
&&
2343 entry
->object
.vm_object
->copy
) {
2344 vm_object_lock_request(
2345 local_object
->shadow
,
2346 (vm_object_offset_t
)
2347 ((offset
- local_start
) +
2349 local_object
->shadow_offset
+
2350 local_object
->paging_offset
,
2352 MEMORY_OBJECT_DATA_SYNC
,
2355 sync_cow_data
= FALSE
;
2356 vm_object_deallocate(local_object
);
2357 goto REDISCOVER_ENTRY
;
2361 if (force_data_sync
) {
2363 local_object
= entry
->object
.vm_object
;
2364 local_start
= entry
->vme_start
;
2365 local_offset
= entry
->offset
;
2366 vm_object_reference(local_object
);
2369 if((local_object
->paging_offset
) &&
2370 (local_object
->pager
== 0)) {
2372 * do a little clean-up for our unorthodox
2373 * entry into a pager call from a non-pager
2374 * context. Normally the pager code
2375 * assumes that an object it has been called
2376 * with has a backing pager and so does
2377 * not bother to check the pager field
2378 * before relying on the paging_offset
2380 vm_object_lock(local_object
);
2381 if (local_object
->pager
== 0) {
2382 local_object
->paging_offset
= 0;
2384 vm_object_unlock(local_object
);
2387 vm_object_lock_request(
2389 (vm_object_offset_t
)
2390 ((offset
- local_start
) + local_offset
) +
2391 local_object
->paging_offset
,
2392 (vm_object_size_t
)*upl_size
, FALSE
,
2393 MEMORY_OBJECT_DATA_SYNC
,
2395 force_data_sync
= FALSE
;
2396 vm_object_deallocate(local_object
);
2397 goto REDISCOVER_ENTRY
;
2400 if(!(entry
->object
.vm_object
->private)) {
2401 if(*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
2402 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
2403 if(entry
->object
.vm_object
->phys_contiguous
) {
2404 *flags
= UPL_PHYS_CONTIG
;
2409 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
2411 local_object
= entry
->object
.vm_object
;
2412 local_offset
= entry
->offset
;
2413 local_start
= entry
->vme_start
;
2414 vm_object_reference(local_object
);
2416 if(caller_flags
& UPL_SET_IO_WIRE
) {
2417 ret
= (vm_object_iopl_request(local_object
,
2418 (vm_object_offset_t
)
2419 ((offset
- local_start
)
2427 ret
= (vm_object_upl_request(local_object
,
2428 (vm_object_offset_t
)
2429 ((offset
- local_start
)
2437 vm_object_deallocate(local_object
);
2442 return(KERN_FAILURE
);
2446 /* ******* Temporary Internal calls to UPL for BSD ***** */
2451 vm_offset_t
*dst_addr
)
2453 return (vm_upl_map(map
, upl
, dst_addr
));
2462 return(vm_upl_unmap(map
, upl
));
2468 upl_page_info_t
*pl
,
2469 mach_msg_type_number_t count
)
2473 kr
= upl_commit(upl
, pl
, count
);
2474 upl_deallocate(upl
);
2480 kernel_upl_commit_range(
2485 upl_page_info_array_t pl
,
2486 mach_msg_type_number_t count
)
2488 boolean_t finished
= FALSE
;
2491 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2492 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2494 kr
= upl_commit_range(upl
, offset
, size
, flags
, pl
, count
, &finished
);
2496 if ((flags
& UPL_COMMIT_NOTIFY_EMPTY
) && finished
)
2497 upl_deallocate(upl
);
2503 kernel_upl_abort_range(
2510 boolean_t finished
= FALSE
;
2512 if (abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2513 abort_flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2515 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &finished
);
2517 if ((abort_flags
& UPL_COMMIT_FREE_ON_EMPTY
) && finished
)
2518 upl_deallocate(upl
);
2530 kr
= upl_abort(upl
, abort_type
);
2531 upl_deallocate(upl
);
2537 vm_get_shared_region(
2539 shared_region_mapping_t
*shared_region
)
2541 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
2542 return KERN_SUCCESS
;
2546 vm_set_shared_region(
2548 shared_region_mapping_t shared_region
)
2550 task
->system_shared_region
= (vm_offset_t
) shared_region
;
2551 return KERN_SUCCESS
;
2555 shared_region_mapping_info(
2556 shared_region_mapping_t shared_region
,
2557 ipc_port_t
*text_region
,
2558 vm_size_t
*text_size
,
2559 ipc_port_t
*data_region
,
2560 vm_size_t
*data_size
,
2561 vm_offset_t
*region_mappings
,
2562 vm_offset_t
*client_base
,
2563 vm_offset_t
*alt_base
,
2564 vm_offset_t
*alt_next
,
2565 unsigned int *fs_base
,
2566 unsigned int *system
,
2568 shared_region_mapping_t
*next
)
2570 shared_region_mapping_lock(shared_region
);
2572 *text_region
= shared_region
->text_region
;
2573 *text_size
= shared_region
->text_size
;
2574 *data_region
= shared_region
->data_region
;
2575 *data_size
= shared_region
->data_size
;
2576 *region_mappings
= shared_region
->region_mappings
;
2577 *client_base
= shared_region
->client_base
;
2578 *alt_base
= shared_region
->alternate_base
;
2579 *alt_next
= shared_region
->alternate_next
;
2580 *flags
= shared_region
->flags
;
2581 *fs_base
= shared_region
->fs_base
;
2582 *system
= shared_region
->system
;
2583 *next
= shared_region
->next
;
2585 shared_region_mapping_unlock(shared_region
);
2589 shared_region_object_chain_attach(
2590 shared_region_mapping_t target_region
,
2591 shared_region_mapping_t object_chain_region
)
2593 shared_region_object_chain_t object_ele
;
2595 if(target_region
->object_chain
)
2596 return KERN_FAILURE
;
2597 object_ele
= (shared_region_object_chain_t
)
2598 kalloc(sizeof (struct shared_region_object_chain
));
2599 shared_region_mapping_lock(object_chain_region
);
2600 target_region
->object_chain
= object_ele
;
2601 object_ele
->object_chain_region
= object_chain_region
;
2602 object_ele
->next
= object_chain_region
->object_chain
;
2603 object_ele
->depth
= object_chain_region
->depth
;
2604 object_chain_region
->depth
++;
2605 target_region
->alternate_next
= object_chain_region
->alternate_next
;
2606 shared_region_mapping_unlock(object_chain_region
);
2607 return KERN_SUCCESS
;
2611 shared_region_mapping_create(
2612 ipc_port_t text_region
,
2613 vm_size_t text_size
,
2614 ipc_port_t data_region
,
2615 vm_size_t data_size
,
2616 vm_offset_t region_mappings
,
2617 vm_offset_t client_base
,
2618 shared_region_mapping_t
*shared_region
,
2619 vm_offset_t alt_base
,
2620 vm_offset_t alt_next
)
2622 *shared_region
= (shared_region_mapping_t
)
2623 kalloc(sizeof (struct shared_region_mapping
));
2624 if(*shared_region
== NULL
)
2625 return KERN_FAILURE
;
2626 shared_region_mapping_lock_init((*shared_region
));
2627 (*shared_region
)->text_region
= text_region
;
2628 (*shared_region
)->text_size
= text_size
;
2629 (*shared_region
)->fs_base
= ENV_DEFAULT_ROOT
;
2630 (*shared_region
)->system
= ENV_DEFAULT_SYSTEM
;
2631 (*shared_region
)->data_region
= data_region
;
2632 (*shared_region
)->data_size
= data_size
;
2633 (*shared_region
)->region_mappings
= region_mappings
;
2634 (*shared_region
)->client_base
= client_base
;
2635 (*shared_region
)->ref_count
= 1;
2636 (*shared_region
)->next
= NULL
;
2637 (*shared_region
)->object_chain
= NULL
;
2638 (*shared_region
)->self
= *shared_region
;
2639 (*shared_region
)->flags
= 0;
2640 (*shared_region
)->depth
= 0;
2641 (*shared_region
)->default_env_list
= NULL
;
2642 (*shared_region
)->alternate_base
= alt_base
;
2643 (*shared_region
)->alternate_next
= alt_next
;
2644 return KERN_SUCCESS
;
2648 shared_region_mapping_set_alt_next(
2649 shared_region_mapping_t shared_region
,
2650 vm_offset_t alt_next
)
2652 shared_region
->alternate_next
= alt_next
;
2653 return KERN_SUCCESS
;
2657 shared_region_mapping_ref(
2658 shared_region_mapping_t shared_region
)
2660 if(shared_region
== NULL
)
2661 return KERN_SUCCESS
;
2662 hw_atomic_add(&shared_region
->ref_count
, 1);
2663 return KERN_SUCCESS
;
2667 shared_region_mapping_dealloc(
2668 shared_region_mapping_t shared_region
)
2670 struct shared_region_task_mappings sm_info
;
2671 shared_region_mapping_t next
= NULL
;
2674 while (shared_region
) {
2676 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
2677 shared_region_mapping_lock(shared_region
);
2679 sm_info
.text_region
= shared_region
->text_region
;
2680 sm_info
.text_size
= shared_region
->text_size
;
2681 sm_info
.data_region
= shared_region
->data_region
;
2682 sm_info
.data_size
= shared_region
->data_size
;
2683 sm_info
.region_mappings
= shared_region
->region_mappings
;
2684 sm_info
.client_base
= shared_region
->client_base
;
2685 sm_info
.alternate_base
= shared_region
->alternate_base
;
2686 sm_info
.alternate_next
= shared_region
->alternate_next
;
2687 sm_info
.flags
= shared_region
->flags
;
2688 sm_info
.self
= (vm_offset_t
)shared_region
;
2690 if(shared_region
->region_mappings
) {
2691 lsf_remove_regions_mappings(shared_region
, &sm_info
);
2693 if(((vm_named_entry_t
)
2694 (shared_region
->text_region
->ip_kobject
))
2695 ->backing
.map
->pmap
) {
2696 pmap_remove(((vm_named_entry_t
)
2697 (shared_region
->text_region
->ip_kobject
))
2698 ->backing
.map
->pmap
,
2699 sm_info
.client_base
,
2700 sm_info
.client_base
+ sm_info
.text_size
);
2702 ipc_port_release_send(shared_region
->text_region
);
2703 if(shared_region
->data_region
)
2704 ipc_port_release_send(shared_region
->data_region
);
2705 if (shared_region
->object_chain
) {
2706 next
= shared_region
->object_chain
->object_chain_region
;
2707 kfree((vm_offset_t
)shared_region
->object_chain
,
2708 sizeof (struct shared_region_object_chain
));
2712 shared_region_mapping_unlock(shared_region
);
2713 kfree((vm_offset_t
)shared_region
,
2714 sizeof (struct shared_region_mapping
));
2715 shared_region
= next
;
2717 /* Stale indicates that a system region is no */
2718 /* longer in the default environment list. */
2719 if((ref_count
== 1) &&
2720 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
2721 && (shared_region
->flags
& ~SHARED_REGION_STALE
)) {
2722 remove_default_shared_region(shared_region
);
2727 return KERN_SUCCESS
;
2731 vm_map_get_phys_page(
2735 vm_map_entry_t entry
;
2738 ppnum_t phys_page
= 0;
2742 while (vm_map_lookup_entry(map
, offset
, &entry
)) {
2744 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2746 return (vm_offset_t
) 0;
2748 if (entry
->is_sub_map
) {
2750 vm_map_lock(entry
->object
.sub_map
);
2752 map
= entry
->object
.sub_map
;
2753 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2754 vm_map_unlock(old_map
);
2757 if (entry
->object
.vm_object
->phys_contiguous
) {
2758 /* These are not standard pageable memory mappings */
2759 /* If they are not present in the object they will */
2760 /* have to be picked up from the pager through the */
2761 /* fault mechanism. */
2762 if(entry
->object
.vm_object
->shadow_offset
== 0) {
2763 /* need to call vm_fault */
2765 vm_fault(map
, offset
, VM_PROT_NONE
,
2766 FALSE
, THREAD_UNINT
, NULL
, 0);
2770 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2771 phys_page
= (ppnum_t
)
2772 ((entry
->object
.vm_object
->shadow_offset
2777 offset
= entry
->offset
+ (offset
- entry
->vme_start
);
2778 object
= entry
->object
.vm_object
;
2779 vm_object_lock(object
);
2781 vm_page_t dst_page
= vm_page_lookup(object
,offset
);
2782 if(dst_page
== VM_PAGE_NULL
) {
2783 if(object
->shadow
) {
2784 vm_object_t old_object
;
2785 vm_object_lock(object
->shadow
);
2786 old_object
= object
;
2787 offset
= offset
+ object
->shadow_offset
;
2788 object
= object
->shadow
;
2789 vm_object_unlock(old_object
);
2791 vm_object_unlock(object
);
2795 phys_page
= (ppnum_t
)(dst_page
->phys_page
);
2796 vm_object_unlock(object
);
2809 kernel_object_iopl_request(
2810 vm_named_entry_t named_entry
,
2811 memory_object_offset_t offset
,
2814 upl_page_info_array_t user_page_list
,
2815 unsigned int *page_list_count
,
2822 /* a few checks to make sure user is obeying rules */
2824 if(offset
>= named_entry
->size
)
2825 return(KERN_INVALID_RIGHT
);
2826 size
= named_entry
->size
- offset
;
2828 if(cntrl_flags
& UPL_COPYOUT_FROM
) {
2829 if((named_entry
->protection
& VM_PROT_READ
)
2831 return(KERN_INVALID_RIGHT
);
2834 if((named_entry
->protection
&
2835 (VM_PROT_READ
| VM_PROT_WRITE
))
2836 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
2837 return(KERN_INVALID_RIGHT
);
2840 if(named_entry
->size
< (offset
+ size
))
2841 return(KERN_INVALID_ARGUMENT
);
2843 /* the callers parameter offset is defined to be the */
2844 /* offset from beginning of named entry offset in object */
2845 offset
= offset
+ named_entry
->offset
;
2847 if(named_entry
->is_sub_map
)
2848 return (KERN_INVALID_ARGUMENT
);
2850 named_entry_lock(named_entry
);
2852 if(named_entry
->object
) {
2853 /* This is the case where we are going to map */
2854 /* an already mapped object. If the object is */
2855 /* not ready it is internal. An external */
2856 /* object cannot be mapped until it is ready */
2857 /* we can therefore avoid the ready check */
2859 vm_object_reference(named_entry
->object
);
2860 object
= named_entry
->object
;
2861 named_entry_unlock(named_entry
);
2863 object
= vm_object_enter(named_entry
->backing
.pager
,
2865 named_entry
->internal
,
2868 if (object
== VM_OBJECT_NULL
) {
2869 named_entry_unlock(named_entry
);
2870 return(KERN_INVALID_OBJECT
);
2872 vm_object_lock(object
);
2874 /* create an extra reference for the named entry */
2875 vm_object_reference_locked(object
);
2876 named_entry
->object
= object
;
2877 named_entry_unlock(named_entry
);
2879 /* wait for object (if any) to be ready */
2880 while (!object
->pager_ready
) {
2881 vm_object_wait(object
,
2882 VM_OBJECT_EVENT_PAGER_READY
,
2884 vm_object_lock(object
);
2886 vm_object_unlock(object
);
2889 ret
= vm_object_iopl_request(object
,
2896 vm_object_deallocate(object
);