2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
60 * Virtual memory mapping module.
64 #include <task_swapper.h>
65 #include <mach_assert.h>
67 #include <mach/kern_return.h>
68 #include <mach/port.h>
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <kern/assert.h>
73 #include <kern/counters.h>
74 #include <kern/zalloc.h>
75 #include <vm/vm_init.h>
76 #include <vm/vm_fault.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_kern.h>
81 #include <ipc/ipc_port.h>
82 #include <kern/sched_prim.h>
83 #include <kern/misc_protos.h>
84 #include <mach/vm_map_server.h>
85 #include <mach/mach_host_server.h>
87 #include <machine/db_machdep.h>
90 /* Internal prototypes
92 extern boolean_t
vm_map_range_check(
96 vm_map_entry_t
*entry
);
98 extern vm_map_entry_t
_vm_map_entry_create(
99 struct vm_map_header
*map_header
);
101 extern void _vm_map_entry_dispose(
102 struct vm_map_header
*map_header
,
103 vm_map_entry_t entry
);
105 extern void vm_map_pmap_enter(
108 vm_offset_t end_addr
,
110 vm_object_offset_t offset
,
111 vm_prot_t protection
);
113 extern void _vm_map_clip_end(
114 struct vm_map_header
*map_header
,
115 vm_map_entry_t entry
,
118 extern void vm_map_entry_delete(
120 vm_map_entry_t entry
);
122 extern kern_return_t
vm_map_delete(
128 extern void vm_map_copy_steal_pages(
131 extern kern_return_t
vm_map_copy_overwrite_unaligned(
133 vm_map_entry_t entry
,
137 extern kern_return_t
vm_map_copy_overwrite_aligned(
139 vm_map_entry_t tmp_entry
,
144 extern kern_return_t
vm_map_copyin_kernel_buffer(
146 vm_offset_t src_addr
,
148 boolean_t src_destroy
,
149 vm_map_copy_t
*copy_result
); /* OUT */
151 extern kern_return_t
vm_map_copyout_kernel_buffer(
153 vm_offset_t
*addr
, /* IN/OUT */
155 boolean_t overwrite
);
157 extern void vm_map_fork_share(
159 vm_map_entry_t old_entry
,
162 extern boolean_t
vm_map_fork_copy(
164 vm_map_entry_t
*old_entry_p
,
167 extern kern_return_t
vm_remap_range_allocate(
169 vm_offset_t
*address
, /* IN/OUT */
173 vm_map_entry_t
*map_entry
); /* OUT */
175 extern void _vm_map_clip_start(
176 struct vm_map_header
*map_header
,
177 vm_map_entry_t entry
,
180 void vm_region_top_walk(
181 vm_map_entry_t entry
,
182 vm_region_top_info_t top
);
185 vm_map_entry_t entry
,
186 vm_region_extended_info_t extended
,
187 vm_object_offset_t offset
,
193 * Macros to copy a vm_map_entry. We must be careful to correctly
194 * manage the wired page count. vm_map_entry_copy() creates a new
195 * map entry to the same memory - the wired count in the new entry
196 * must be set to zero. vm_map_entry_copy_full() creates a new
197 * entry that is identical to the old entry. This preserves the
198 * wire count; it's used for map splitting and zone changing in
201 #define vm_map_entry_copy(NEW,OLD) \
204 (NEW)->is_shared = FALSE; \
205 (NEW)->needs_wakeup = FALSE; \
206 (NEW)->in_transition = FALSE; \
207 (NEW)->wired_count = 0; \
208 (NEW)->user_wired_count = 0; \
211 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
214 * Virtual memory maps provide for the mapping, protection,
215 * and sharing of virtual memory objects. In addition,
216 * this module provides for an efficient virtual copy of
217 * memory from one map to another.
219 * Synchronization is required prior to most operations.
221 * Maps consist of an ordered doubly-linked list of simple
222 * entries; a single hint is used to speed up lookups.
224 * Sharing maps have been deleted from this version of Mach.
225 * All shared objects are now mapped directly into the respective
226 * maps. This requires a change in the copy on write strategy;
227 * the asymmetric (delayed) strategy is used for shared temporary
228 * objects instead of the symmetric (shadow) strategy. All maps
229 * are now "top level" maps (either task map, kernel map or submap
230 * of the kernel map).
232 * Since portions of maps are specified by start/end addreses,
233 * which may not align with existing map entries, all
234 * routines merely "clip" entries to these start/end values.
235 * [That is, an entry is split into two, bordering at a
236 * start or end value.] Note that these clippings may not
237 * always be necessary (as the two resulting entries are then
238 * not changed); however, the clipping is done for convenience.
239 * No attempt is currently made to "glue back together" two
242 * The symmetric (shadow) copy strategy implements virtual copy
243 * by copying VM object references from one map to
244 * another, and then marking both regions as copy-on-write.
245 * It is important to note that only one writeable reference
246 * to a VM object region exists in any map when this strategy
247 * is used -- this means that shadow object creation can be
248 * delayed until a write operation occurs. The symmetric (delayed)
249 * strategy allows multiple maps to have writeable references to
250 * the same region of a vm object, and hence cannot delay creating
251 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
252 * Copying of permanent objects is completely different; see
253 * vm_object_copy_strategically() in vm_object.c.
256 zone_t vm_map_zone
; /* zone for vm_map structures */
257 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
258 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
259 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
263 * Placeholder object for submap operations. This object is dropped
264 * into the range by a call to vm_map_find, and removed when
265 * vm_map_submap creates the submap.
268 vm_object_t vm_submap_object
;
273 * Initialize the vm_map module. Must be called before
274 * any other vm_map routines.
276 * Map and entry structures are allocated from zones -- we must
277 * initialize those zones.
279 * There are three zones of interest:
281 * vm_map_zone: used to allocate maps.
282 * vm_map_entry_zone: used to allocate map entries.
283 * vm_map_kentry_zone: used to allocate map entries for the kernel.
285 * The kernel allocates map entries from a special zone that is initially
286 * "crammed" with memory. It would be difficult (perhaps impossible) for
287 * the kernel to allocate more memory to a entry zone when it became
288 * empty since the very act of allocating memory implies the creation
292 vm_offset_t map_data
;
293 vm_size_t map_data_size
;
294 vm_offset_t kentry_data
;
295 vm_size_t kentry_data_size
;
296 int kentry_count
= 2048; /* to init kentry_data_size */
298 #define NO_COALESCE_LIMIT (1024 * 128)
301 * Threshold for aggressive (eager) page map entering for vm copyout
302 * operations. Any copyout larger will NOT be aggressively entered.
304 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
310 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
313 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
314 1024*1024, PAGE_SIZE
*5,
315 "non-kernel map entries");
317 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
318 kentry_data_size
, kentry_data_size
,
319 "kernel map entries");
321 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
322 16*1024, PAGE_SIZE
, "map copies");
325 * Cram the map and kentry zones with initial data.
326 * Set kentry_zone non-collectible to aid zone_gc().
328 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
329 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
330 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
331 zcram(vm_map_zone
, map_data
, map_data_size
);
332 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
339 map_data_size
= round_page_32(10 * sizeof(struct vm_map
));
340 map_data
= pmap_steal_memory(map_data_size
);
344 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
345 * physical page (i.e. that beyond the kernel image and page tables)
346 * individually; we guess at most one entry per eight pages in the
347 * real world. This works out to roughly .1 of 1% of physical memory,
348 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
351 kentry_count
= pmap_free_pages() / 8;
355 round_page_32(kentry_count
* sizeof(struct vm_map_entry
));
356 kentry_data
= pmap_steal_memory(kentry_data_size
);
362 * Creates and returns a new empty VM map with
363 * the given physical map structure, and having
364 * the given lower and upper address bounds.
373 register vm_map_t result
;
375 result
= (vm_map_t
) zalloc(vm_map_zone
);
376 if (result
== VM_MAP_NULL
)
377 panic("vm_map_create");
379 vm_map_first_entry(result
) = vm_map_to_entry(result
);
380 vm_map_last_entry(result
) = vm_map_to_entry(result
);
381 result
->hdr
.nentries
= 0;
382 result
->hdr
.entries_pageable
= pageable
;
385 result
->ref_count
= 1;
387 result
->res_count
= 1;
388 result
->sw_state
= MAP_SW_IN
;
389 #endif /* TASK_SWAPPER */
391 result
->min_offset
= min
;
392 result
->max_offset
= max
;
393 result
->wiring_required
= FALSE
;
394 result
->no_zero_fill
= FALSE
;
395 result
->mapped
= FALSE
;
396 result
->wait_for_space
= FALSE
;
397 result
->first_free
= vm_map_to_entry(result
);
398 result
->hint
= vm_map_to_entry(result
);
399 vm_map_lock_init(result
);
400 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
406 * vm_map_entry_create: [ internal use only ]
408 * Allocates a VM map entry for insertion in the
409 * given map (or map copy). No fields are filled.
411 #define vm_map_entry_create(map) \
412 _vm_map_entry_create(&(map)->hdr)
414 #define vm_map_copy_entry_create(copy) \
415 _vm_map_entry_create(&(copy)->cpy_hdr)
418 _vm_map_entry_create(
419 register struct vm_map_header
*map_header
)
421 register zone_t zone
;
422 register vm_map_entry_t entry
;
424 if (map_header
->entries_pageable
)
425 zone
= vm_map_entry_zone
;
427 zone
= vm_map_kentry_zone
;
429 entry
= (vm_map_entry_t
) zalloc(zone
);
430 if (entry
== VM_MAP_ENTRY_NULL
)
431 panic("vm_map_entry_create");
437 * vm_map_entry_dispose: [ internal use only ]
439 * Inverse of vm_map_entry_create.
441 #define vm_map_entry_dispose(map, entry) \
443 if((entry) == (map)->first_free) \
444 (map)->first_free = vm_map_to_entry(map); \
445 if((entry) == (map)->hint) \
446 (map)->hint = vm_map_to_entry(map); \
447 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
450 #define vm_map_copy_entry_dispose(map, entry) \
451 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
454 _vm_map_entry_dispose(
455 register struct vm_map_header
*map_header
,
456 register vm_map_entry_t entry
)
458 register zone_t zone
;
460 if (map_header
->entries_pageable
)
461 zone
= vm_map_entry_zone
;
463 zone
= vm_map_kentry_zone
;
465 zfree(zone
, (vm_offset_t
) entry
);
468 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
469 boolean_t first_free_check
= FALSE
;
474 vm_map_entry_t entry
, next
;
476 if (!first_free_check
)
479 entry
= vm_map_to_entry(map
);
480 next
= entry
->vme_next
;
481 while (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_end
) ||
482 (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_start
) &&
483 next
!= vm_map_to_entry(map
))) {
485 next
= entry
->vme_next
;
486 if (entry
== vm_map_to_entry(map
))
489 if (map
->first_free
!= entry
) {
490 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
491 map
, map
->first_free
, entry
);
500 * Updates the map->first_free pointer to the
501 * entry immediately before the first hole in the map.
502 * The map should be locked.
504 #define UPDATE_FIRST_FREE(map, new_first_free) \
507 vm_map_entry_t UFF_first_free; \
508 vm_map_entry_t UFF_next_entry; \
510 UFF_first_free = (new_first_free); \
511 UFF_next_entry = UFF_first_free->vme_next; \
512 while (trunc_page_32(UFF_next_entry->vme_start) == \
513 trunc_page_32(UFF_first_free->vme_end) || \
514 (trunc_page_32(UFF_next_entry->vme_start) == \
515 trunc_page_32(UFF_first_free->vme_start) && \
516 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
517 UFF_first_free = UFF_next_entry; \
518 UFF_next_entry = UFF_first_free->vme_next; \
519 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
522 UFF_map->first_free = UFF_first_free; \
523 assert(first_free_is_valid(UFF_map)); \
527 * vm_map_entry_{un,}link:
529 * Insert/remove entries from maps (or map copies).
531 #define vm_map_entry_link(map, after_where, entry) \
534 vm_map_entry_t VMEL_entry; \
536 VMEL_entry = (entry); \
537 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
538 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
542 #define vm_map_copy_entry_link(copy, after_where, entry) \
543 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
545 #define _vm_map_entry_link(hdr, after_where, entry) \
548 (entry)->vme_prev = (after_where); \
549 (entry)->vme_next = (after_where)->vme_next; \
550 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
553 #define vm_map_entry_unlink(map, entry) \
556 vm_map_entry_t VMEU_entry; \
557 vm_map_entry_t VMEU_first_free; \
559 VMEU_entry = (entry); \
560 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
561 VMEU_first_free = VMEU_entry->vme_prev; \
563 VMEU_first_free = VMEU_map->first_free; \
564 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
565 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
568 #define vm_map_copy_entry_unlink(copy, entry) \
569 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
571 #define _vm_map_entry_unlink(hdr, entry) \
574 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
575 (entry)->vme_prev->vme_next = (entry)->vme_next; \
578 #if MACH_ASSERT && TASK_SWAPPER
580 * vm_map_res_reference:
582 * Adds another valid residence count to the given map.
584 * Map is locked so this function can be called from
588 void vm_map_res_reference(register vm_map_t map
)
590 /* assert map is locked */
591 assert(map
->res_count
>= 0);
592 assert(map
->ref_count
>= map
->res_count
);
593 if (map
->res_count
== 0) {
594 mutex_unlock(&map
->s_lock
);
597 mutex_lock(&map
->s_lock
);
605 * vm_map_reference_swap:
607 * Adds valid reference and residence counts to the given map.
609 * The map may not be in memory (i.e. zero residence count).
612 void vm_map_reference_swap(register vm_map_t map
)
614 assert(map
!= VM_MAP_NULL
);
615 mutex_lock(&map
->s_lock
);
616 assert(map
->res_count
>= 0);
617 assert(map
->ref_count
>= map
->res_count
);
619 vm_map_res_reference(map
);
620 mutex_unlock(&map
->s_lock
);
624 * vm_map_res_deallocate:
626 * Decrement residence count on a map; possibly causing swapout.
628 * The map must be in memory (i.e. non-zero residence count).
630 * The map is locked, so this function is callable from vm_map_deallocate.
633 void vm_map_res_deallocate(register vm_map_t map
)
635 assert(map
->res_count
> 0);
636 if (--map
->res_count
== 0) {
637 mutex_unlock(&map
->s_lock
);
641 mutex_lock(&map
->s_lock
);
643 assert(map
->ref_count
>= map
->res_count
);
645 #endif /* MACH_ASSERT && TASK_SWAPPER */
650 * Actually destroy a map.
654 register vm_map_t map
)
657 (void) vm_map_delete(map
, map
->min_offset
,
658 map
->max_offset
, VM_MAP_NO_FLAGS
);
662 pmap_destroy(map
->pmap
);
664 zfree(vm_map_zone
, (vm_offset_t
) map
);
669 * vm_map_swapin/vm_map_swapout
671 * Swap a map in and out, either referencing or releasing its resources.
672 * These functions are internal use only; however, they must be exported
673 * because they may be called from macros, which are exported.
675 * In the case of swapout, there could be races on the residence count,
676 * so if the residence count is up, we return, assuming that a
677 * vm_map_deallocate() call in the near future will bring us back.
680 * -- We use the map write lock for synchronization among races.
681 * -- The map write lock, and not the simple s_lock, protects the
682 * swap state of the map.
683 * -- If a map entry is a share map, then we hold both locks, in
684 * hierarchical order.
686 * Synchronization Notes:
687 * 1) If a vm_map_swapin() call happens while swapout in progress, it
688 * will block on the map lock and proceed when swapout is through.
689 * 2) A vm_map_reference() call at this time is illegal, and will
690 * cause a panic. vm_map_reference() is only allowed on resident
691 * maps, since it refuses to block.
692 * 3) A vm_map_swapin() call during a swapin will block, and
693 * proceeed when the first swapin is done, turning into a nop.
694 * This is the reason the res_count is not incremented until
695 * after the swapin is complete.
696 * 4) There is a timing hole after the checks of the res_count, before
697 * the map lock is taken, during which a swapin may get the lock
698 * before a swapout about to happen. If this happens, the swapin
699 * will detect the state and increment the reference count, causing
700 * the swapout to be a nop, thereby delaying it until a later
701 * vm_map_deallocate. If the swapout gets the lock first, then
702 * the swapin will simply block until the swapout is done, and
705 * Because vm_map_swapin() is potentially an expensive operation, it
706 * should be used with caution.
709 * 1) A map with a residence count of zero is either swapped, or
711 * 2) A map with a non-zero residence count is either resident,
712 * or being swapped in.
715 int vm_map_swap_enable
= 1;
717 void vm_map_swapin (vm_map_t map
)
719 register vm_map_entry_t entry
;
721 if (!vm_map_swap_enable
) /* debug */
726 * First deal with various races.
728 if (map
->sw_state
== MAP_SW_IN
)
730 * we raced with swapout and won. Returning will incr.
731 * the res_count, turning the swapout into a nop.
736 * The residence count must be zero. If we raced with another
737 * swapin, the state would have been IN; if we raced with a
738 * swapout (after another competing swapin), we must have lost
739 * the race to get here (see above comment), in which case
740 * res_count is still 0.
742 assert(map
->res_count
== 0);
745 * There are no intermediate states of a map going out or
746 * coming in, since the map is locked during the transition.
748 assert(map
->sw_state
== MAP_SW_OUT
);
751 * We now operate upon each map entry. If the entry is a sub-
752 * or share-map, we call vm_map_res_reference upon it.
753 * If the entry is an object, we call vm_object_res_reference
754 * (this may iterate through the shadow chain).
755 * Note that we hold the map locked the entire time,
756 * even if we get back here via a recursive call in
757 * vm_map_res_reference.
759 entry
= vm_map_first_entry(map
);
761 while (entry
!= vm_map_to_entry(map
)) {
762 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
763 if (entry
->is_sub_map
) {
764 vm_map_t lmap
= entry
->object
.sub_map
;
765 mutex_lock(&lmap
->s_lock
);
766 vm_map_res_reference(lmap
);
767 mutex_unlock(&lmap
->s_lock
);
769 vm_object_t object
= entry
->object
.vm_object
;
770 vm_object_lock(object
);
772 * This call may iterate through the
775 vm_object_res_reference(object
);
776 vm_object_unlock(object
);
779 entry
= entry
->vme_next
;
781 assert(map
->sw_state
== MAP_SW_OUT
);
782 map
->sw_state
= MAP_SW_IN
;
785 void vm_map_swapout(vm_map_t map
)
787 register vm_map_entry_t entry
;
791 * First deal with various races.
792 * If we raced with a swapin and lost, the residence count
793 * will have been incremented to 1, and we simply return.
795 mutex_lock(&map
->s_lock
);
796 if (map
->res_count
!= 0) {
797 mutex_unlock(&map
->s_lock
);
800 mutex_unlock(&map
->s_lock
);
803 * There are no intermediate states of a map going out or
804 * coming in, since the map is locked during the transition.
806 assert(map
->sw_state
== MAP_SW_IN
);
808 if (!vm_map_swap_enable
)
812 * We now operate upon each map entry. If the entry is a sub-
813 * or share-map, we call vm_map_res_deallocate upon it.
814 * If the entry is an object, we call vm_object_res_deallocate
815 * (this may iterate through the shadow chain).
816 * Note that we hold the map locked the entire time,
817 * even if we get back here via a recursive call in
818 * vm_map_res_deallocate.
820 entry
= vm_map_first_entry(map
);
822 while (entry
!= vm_map_to_entry(map
)) {
823 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
824 if (entry
->is_sub_map
) {
825 vm_map_t lmap
= entry
->object
.sub_map
;
826 mutex_lock(&lmap
->s_lock
);
827 vm_map_res_deallocate(lmap
);
828 mutex_unlock(&lmap
->s_lock
);
830 vm_object_t object
= entry
->object
.vm_object
;
831 vm_object_lock(object
);
833 * This call may take a long time,
834 * since it could actively push
835 * out pages (if we implement it
838 vm_object_res_deallocate(object
);
839 vm_object_unlock(object
);
842 entry
= entry
->vme_next
;
844 assert(map
->sw_state
== MAP_SW_IN
);
845 map
->sw_state
= MAP_SW_OUT
;
848 #endif /* TASK_SWAPPER */
854 * Saves the specified entry as the hint for
855 * future lookups. Performs necessary interlocks.
857 #define SAVE_HINT(map,value) \
858 mutex_lock(&(map)->s_lock); \
859 (map)->hint = (value); \
860 mutex_unlock(&(map)->s_lock);
863 * vm_map_lookup_entry: [ internal use only ]
865 * Finds the map entry containing (or
866 * immediately preceding) the specified address
867 * in the given map; the entry is returned
868 * in the "entry" parameter. The boolean
869 * result indicates whether the address is
870 * actually contained in the map.
874 register vm_map_t map
,
875 register vm_offset_t address
,
876 vm_map_entry_t
*entry
) /* OUT */
878 register vm_map_entry_t cur
;
879 register vm_map_entry_t last
;
882 * Start looking either from the head of the
883 * list, or from the hint.
886 mutex_lock(&map
->s_lock
);
888 mutex_unlock(&map
->s_lock
);
890 if (cur
== vm_map_to_entry(map
))
893 if (address
>= cur
->vme_start
) {
895 * Go from hint to end of list.
897 * But first, make a quick check to see if
898 * we are already looking at the entry we
899 * want (which is usually the case).
900 * Note also that we don't need to save the hint
901 * here... it is the same hint (unless we are
902 * at the header, in which case the hint didn't
903 * buy us anything anyway).
905 last
= vm_map_to_entry(map
);
906 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
913 * Go from start to hint, *inclusively*
915 last
= cur
->vme_next
;
916 cur
= vm_map_first_entry(map
);
923 while (cur
!= last
) {
924 if (cur
->vme_end
> address
) {
925 if (address
>= cur
->vme_start
) {
927 * Save this lookup for future
939 *entry
= cur
->vme_prev
;
940 SAVE_HINT(map
, *entry
);
945 * Routine: vm_map_find_space
947 * Allocate a range in the specified virtual address map,
948 * returning the entry allocated for that range.
949 * Used by kmem_alloc, etc.
951 * The map must be NOT be locked. It will be returned locked
952 * on KERN_SUCCESS, unlocked on failure.
954 * If an entry is allocated, the object/offset fields
955 * are initialized to zero.
959 register vm_map_t map
,
960 vm_offset_t
*address
, /* OUT */
963 vm_map_entry_t
*o_entry
) /* OUT */
965 register vm_map_entry_t entry
, new_entry
;
966 register vm_offset_t start
;
967 register vm_offset_t end
;
969 new_entry
= vm_map_entry_create(map
);
972 * Look for the first possible address; if there's already
973 * something at this address, we have to start after it.
978 assert(first_free_is_valid(map
));
979 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
980 start
= map
->min_offset
;
982 start
= entry
->vme_end
;
985 * In any case, the "entry" always precedes
986 * the proposed new region throughout the loop:
990 register vm_map_entry_t next
;
993 * Find the end of the proposed new region.
994 * Be sure we didn't go beyond the end, or
995 * wrap around the address.
998 end
= ((start
+ mask
) & ~mask
);
1000 vm_map_entry_dispose(map
, new_entry
);
1002 return(KERN_NO_SPACE
);
1007 if ((end
> map
->max_offset
) || (end
< start
)) {
1008 vm_map_entry_dispose(map
, new_entry
);
1010 return(KERN_NO_SPACE
);
1014 * If there are no more entries, we must win.
1017 next
= entry
->vme_next
;
1018 if (next
== vm_map_to_entry(map
))
1022 * If there is another entry, it must be
1023 * after the end of the potential new region.
1026 if (next
->vme_start
>= end
)
1030 * Didn't fit -- move to the next entry.
1034 start
= entry
->vme_end
;
1039 * "start" and "end" should define the endpoints of the
1040 * available new range, and
1041 * "entry" should refer to the region before the new
1044 * the map should be locked.
1049 new_entry
->vme_start
= start
;
1050 new_entry
->vme_end
= end
;
1051 assert(page_aligned(new_entry
->vme_start
));
1052 assert(page_aligned(new_entry
->vme_end
));
1054 new_entry
->is_shared
= FALSE
;
1055 new_entry
->is_sub_map
= FALSE
;
1056 new_entry
->use_pmap
= FALSE
;
1057 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1058 new_entry
->offset
= (vm_object_offset_t
) 0;
1060 new_entry
->needs_copy
= FALSE
;
1062 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1063 new_entry
->protection
= VM_PROT_DEFAULT
;
1064 new_entry
->max_protection
= VM_PROT_ALL
;
1065 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1066 new_entry
->wired_count
= 0;
1067 new_entry
->user_wired_count
= 0;
1069 new_entry
->in_transition
= FALSE
;
1070 new_entry
->needs_wakeup
= FALSE
;
1073 * Insert the new entry into the list
1076 vm_map_entry_link(map
, entry
, new_entry
);
1081 * Update the lookup hint
1083 SAVE_HINT(map
, new_entry
);
1085 *o_entry
= new_entry
;
1086 return(KERN_SUCCESS
);
1089 int vm_map_pmap_enter_print
= FALSE
;
1090 int vm_map_pmap_enter_enable
= FALSE
;
1093 * Routine: vm_map_pmap_enter
1096 * Force pages from the specified object to be entered into
1097 * the pmap at the specified address if they are present.
1098 * As soon as a page not found in the object the scan ends.
1103 * In/out conditions:
1104 * The source map should not be locked on entry.
1109 register vm_offset_t addr
,
1110 register vm_offset_t end_addr
,
1111 register vm_object_t object
,
1112 vm_object_offset_t offset
,
1113 vm_prot_t protection
)
1115 unsigned int cache_attr
;
1120 while (addr
< end_addr
) {
1121 register vm_page_t m
;
1123 vm_object_lock(object
);
1124 vm_object_paging_begin(object
);
1126 m
= vm_page_lookup(object
, offset
);
1127 if (m
== VM_PAGE_NULL
|| m
->busy
||
1128 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1129 protection
& m
->page_lock
))) {
1131 vm_object_paging_end(object
);
1132 vm_object_unlock(object
);
1136 assert(!m
->fictitious
); /* XXX is this possible ??? */
1138 if (vm_map_pmap_enter_print
) {
1139 printf("vm_map_pmap_enter:");
1140 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1141 map
, addr
, object
, offset
);
1145 if (m
->no_isync
== TRUE
) {
1146 pmap_sync_caches_phys(m
->phys_page
);
1147 m
->no_isync
= FALSE
;
1150 cache_attr
= ((unsigned int)object
->wimg_bits
) & VM_WIMG_MASK
;
1151 vm_object_unlock(object
);
1153 PMAP_ENTER(map
->pmap
, addr
, m
,
1154 protection
, cache_attr
, FALSE
);
1156 vm_object_lock(object
);
1158 PAGE_WAKEUP_DONE(m
);
1159 vm_page_lock_queues();
1160 if (!m
->active
&& !m
->inactive
)
1161 vm_page_activate(m
);
1162 vm_page_unlock_queues();
1163 vm_object_paging_end(object
);
1164 vm_object_unlock(object
);
1166 offset
+= PAGE_SIZE_64
;
1172 * Routine: vm_map_enter
1175 * Allocate a range in the specified virtual address map.
1176 * The resulting range will refer to memory defined by
1177 * the given memory object and offset into that object.
1179 * Arguments are as defined in the vm_map call.
1183 register vm_map_t map
,
1184 vm_offset_t
*address
, /* IN/OUT */
1189 vm_object_offset_t offset
,
1190 boolean_t needs_copy
,
1191 vm_prot_t cur_protection
,
1192 vm_prot_t max_protection
,
1193 vm_inherit_t inheritance
)
1195 vm_map_entry_t entry
;
1196 register vm_offset_t start
;
1197 register vm_offset_t end
;
1198 kern_return_t result
= KERN_SUCCESS
;
1200 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1203 VM_GET_FLAGS_ALIAS(flags
, alias
);
1205 #define RETURN(value) { result = value; goto BailOut; }
1207 assert(page_aligned(*address
));
1208 assert(page_aligned(size
));
1217 * Calculate the first possible address.
1220 if (start
< map
->min_offset
)
1221 start
= map
->min_offset
;
1222 if (start
> map
->max_offset
)
1223 RETURN(KERN_NO_SPACE
);
1226 * Look for the first possible address;
1227 * if there's already something at this
1228 * address, we have to start after it.
1231 assert(first_free_is_valid(map
));
1232 if (start
== map
->min_offset
) {
1233 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1234 start
= entry
->vme_end
;
1236 vm_map_entry_t tmp_entry
;
1237 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1238 start
= tmp_entry
->vme_end
;
1243 * In any case, the "entry" always precedes
1244 * the proposed new region throughout the
1249 register vm_map_entry_t next
;
1252 * Find the end of the proposed new region.
1253 * Be sure we didn't go beyond the end, or
1254 * wrap around the address.
1257 end
= ((start
+ mask
) & ~mask
);
1259 RETURN(KERN_NO_SPACE
);
1263 if ((end
> map
->max_offset
) || (end
< start
)) {
1264 if (map
->wait_for_space
) {
1265 if (size
<= (map
->max_offset
-
1267 assert_wait((event_t
)map
,
1270 thread_block((void (*)(void))0);
1274 RETURN(KERN_NO_SPACE
);
1278 * If there are no more entries, we must win.
1281 next
= entry
->vme_next
;
1282 if (next
== vm_map_to_entry(map
))
1286 * If there is another entry, it must be
1287 * after the end of the potential new region.
1290 if (next
->vme_start
>= end
)
1294 * Didn't fit -- move to the next entry.
1298 start
= entry
->vme_end
;
1302 vm_map_entry_t temp_entry
;
1306 * the address doesn't itself violate
1307 * the mask requirement.
1311 if ((start
& mask
) != 0)
1312 RETURN(KERN_NO_SPACE
);
1315 * ... the address is within bounds
1320 if ((start
< map
->min_offset
) ||
1321 (end
> map
->max_offset
) ||
1323 RETURN(KERN_INVALID_ADDRESS
);
1327 * ... the starting address isn't allocated
1330 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1331 RETURN(KERN_NO_SPACE
);
1336 * ... the next region doesn't overlap the
1340 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1341 (entry
->vme_next
->vme_start
< end
))
1342 RETURN(KERN_NO_SPACE
);
1347 * "start" and "end" should define the endpoints of the
1348 * available new range, and
1349 * "entry" should refer to the region before the new
1352 * the map should be locked.
1356 * See whether we can avoid creating a new entry (and object) by
1357 * extending one of our neighbors. [So far, we only attempt to
1358 * extend from below.]
1361 if ((object
== VM_OBJECT_NULL
) &&
1362 (entry
!= vm_map_to_entry(map
)) &&
1363 (entry
->vme_end
== start
) &&
1364 (!entry
->is_shared
) &&
1365 (!entry
->is_sub_map
) &&
1366 (entry
->alias
== alias
) &&
1367 (entry
->inheritance
== inheritance
) &&
1368 (entry
->protection
== cur_protection
) &&
1369 (entry
->max_protection
== max_protection
) &&
1370 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1371 (entry
->in_transition
== 0) &&
1372 ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
) &&
1373 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1374 if (vm_object_coalesce(entry
->object
.vm_object
,
1377 (vm_object_offset_t
) 0,
1378 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1379 (vm_size_t
)(end
- entry
->vme_end
))) {
1382 * Coalesced the two objects - can extend
1383 * the previous map entry to include the
1386 map
->size
+= (end
- entry
->vme_end
);
1387 entry
->vme_end
= end
;
1388 UPDATE_FIRST_FREE(map
, map
->first_free
);
1389 RETURN(KERN_SUCCESS
);
1394 * Create a new entry
1398 register vm_map_entry_t new_entry
;
1400 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1401 offset
, needs_copy
, FALSE
, FALSE
,
1402 cur_protection
, max_protection
,
1403 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1404 new_entry
->alias
= alias
;
1407 /* Wire down the new entry if the user
1408 * requested all new map entries be wired.
1410 if (map
->wiring_required
) {
1411 result
= vm_map_wire(map
, start
, end
,
1412 new_entry
->protection
, TRUE
);
1416 if ((object
!= VM_OBJECT_NULL
) &&
1417 (vm_map_pmap_enter_enable
) &&
1420 (size
< (128*1024))) {
1421 vm_map_pmap_enter(map
, start
, end
,
1422 object
, offset
, cur_protection
);
1436 * vm_map_clip_start: [ internal use only ]
1438 * Asserts that the given entry begins at or after
1439 * the specified address; if necessary,
1440 * it splits the entry into two.
1443 #define vm_map_clip_start(map, entry, startaddr) \
1445 vm_map_t VMCS_map; \
1446 vm_map_entry_t VMCS_entry; \
1447 vm_offset_t VMCS_startaddr; \
1449 VMCS_entry = (entry); \
1450 VMCS_startaddr = (startaddr); \
1451 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1452 if(entry->use_pmap) { \
1453 vm_offset_t pmap_base_addr; \
1455 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1456 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1457 entry->use_pmap = FALSE; \
1458 } else if(entry->object.vm_object \
1459 && !entry->is_sub_map \
1460 && entry->object.vm_object->phys_contiguous) { \
1461 pmap_remove(map->pmap, \
1462 (addr64_t)(entry->vme_start), \
1463 (addr64_t)(entry->vme_end)); \
1465 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1467 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1470 #define vm_map_clip_start(map, entry, startaddr) \
1472 vm_map_t VMCS_map; \
1473 vm_map_entry_t VMCS_entry; \
1474 vm_offset_t VMCS_startaddr; \
1476 VMCS_entry = (entry); \
1477 VMCS_startaddr = (startaddr); \
1478 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1479 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1481 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1485 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1487 if ((startaddr) > (entry)->vme_start) \
1488 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1492 * This routine is called only when it is known that
1493 * the entry must be split.
1497 register struct vm_map_header
*map_header
,
1498 register vm_map_entry_t entry
,
1499 register vm_offset_t start
)
1501 register vm_map_entry_t new_entry
;
1504 * Split off the front portion --
1505 * note that we must insert the new
1506 * entry BEFORE this one, so that
1507 * this entry has the specified starting
1511 new_entry
= _vm_map_entry_create(map_header
);
1512 vm_map_entry_copy_full(new_entry
, entry
);
1514 new_entry
->vme_end
= start
;
1515 entry
->offset
+= (start
- entry
->vme_start
);
1516 entry
->vme_start
= start
;
1518 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1520 if (entry
->is_sub_map
)
1521 vm_map_reference(new_entry
->object
.sub_map
);
1523 vm_object_reference(new_entry
->object
.vm_object
);
1528 * vm_map_clip_end: [ internal use only ]
1530 * Asserts that the given entry ends at or before
1531 * the specified address; if necessary,
1532 * it splits the entry into two.
1535 #define vm_map_clip_end(map, entry, endaddr) \
1537 vm_map_t VMCE_map; \
1538 vm_map_entry_t VMCE_entry; \
1539 vm_offset_t VMCE_endaddr; \
1541 VMCE_entry = (entry); \
1542 VMCE_endaddr = (endaddr); \
1543 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1544 if(entry->use_pmap) { \
1545 vm_offset_t pmap_base_addr; \
1547 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1548 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1549 entry->use_pmap = FALSE; \
1550 } else if(entry->object.vm_object \
1551 && !entry->is_sub_map \
1552 && entry->object.vm_object->phys_contiguous) { \
1553 pmap_remove(map->pmap, \
1554 (addr64_t)(entry->vme_start), \
1555 (addr64_t)(entry->vme_end)); \
1557 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1559 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1562 #define vm_map_clip_end(map, entry, endaddr) \
1564 vm_map_t VMCE_map; \
1565 vm_map_entry_t VMCE_entry; \
1566 vm_offset_t VMCE_endaddr; \
1568 VMCE_entry = (entry); \
1569 VMCE_endaddr = (endaddr); \
1570 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1571 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1573 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1577 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1579 if ((endaddr) < (entry)->vme_end) \
1580 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1584 * This routine is called only when it is known that
1585 * the entry must be split.
1589 register struct vm_map_header
*map_header
,
1590 register vm_map_entry_t entry
,
1591 register vm_offset_t end
)
1593 register vm_map_entry_t new_entry
;
1596 * Create a new entry and insert it
1597 * AFTER the specified entry
1600 new_entry
= _vm_map_entry_create(map_header
);
1601 vm_map_entry_copy_full(new_entry
, entry
);
1603 new_entry
->vme_start
= entry
->vme_end
= end
;
1604 new_entry
->offset
+= (end
- entry
->vme_start
);
1606 _vm_map_entry_link(map_header
, entry
, new_entry
);
1608 if (entry
->is_sub_map
)
1609 vm_map_reference(new_entry
->object
.sub_map
);
1611 vm_object_reference(new_entry
->object
.vm_object
);
1616 * VM_MAP_RANGE_CHECK: [ internal use only ]
1618 * Asserts that the starting and ending region
1619 * addresses fall within the valid range of the map.
1621 #define VM_MAP_RANGE_CHECK(map, start, end) \
1623 if (start < vm_map_min(map)) \
1624 start = vm_map_min(map); \
1625 if (end > vm_map_max(map)) \
1626 end = vm_map_max(map); \
1632 * vm_map_range_check: [ internal use only ]
1634 * Check that the region defined by the specified start and
1635 * end addresses are wholly contained within a single map
1636 * entry or set of adjacent map entries of the spacified map,
1637 * i.e. the specified region contains no unmapped space.
1638 * If any or all of the region is unmapped, FALSE is returned.
1639 * Otherwise, TRUE is returned and if the output argument 'entry'
1640 * is not NULL it points to the map entry containing the start
1643 * The map is locked for reading on entry and is left locked.
1647 register vm_map_t map
,
1648 register vm_offset_t start
,
1649 register vm_offset_t end
,
1650 vm_map_entry_t
*entry
)
1653 register vm_offset_t prev
;
1656 * Basic sanity checks first
1658 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1662 * Check first if the region starts within a valid
1663 * mapping for the map.
1665 if (!vm_map_lookup_entry(map
, start
, &cur
))
1669 * Optimize for the case that the region is contained
1670 * in a single map entry.
1672 if (entry
!= (vm_map_entry_t
*) NULL
)
1674 if (end
<= cur
->vme_end
)
1678 * If the region is not wholly contained within a
1679 * single entry, walk the entries looking for holes.
1681 prev
= cur
->vme_end
;
1682 cur
= cur
->vme_next
;
1683 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1684 if (end
<= cur
->vme_end
)
1686 prev
= cur
->vme_end
;
1687 cur
= cur
->vme_next
;
1693 * vm_map_submap: [ kernel use only ]
1695 * Mark the given range as handled by a subordinate map.
1697 * This range must have been created with vm_map_find using
1698 * the vm_submap_object, and no other operations may have been
1699 * performed on this range prior to calling vm_map_submap.
1701 * Only a limited number of operations can be performed
1702 * within this rage after calling vm_map_submap:
1704 * [Don't try vm_map_copyin!]
1706 * To remove a submapping, one must first remove the
1707 * range from the superior map, and then destroy the
1708 * submap (if desired). [Better yet, don't try it.]
1712 register vm_map_t map
,
1713 register vm_offset_t start
,
1714 register vm_offset_t end
,
1719 vm_map_entry_t entry
;
1720 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1721 register vm_object_t object
;
1725 submap
->mapped
= TRUE
;
1727 VM_MAP_RANGE_CHECK(map
, start
, end
);
1729 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1730 vm_map_clip_start(map
, entry
, start
);
1733 entry
= entry
->vme_next
;
1735 if(entry
== vm_map_to_entry(map
)) {
1737 return KERN_INVALID_ARGUMENT
;
1740 vm_map_clip_end(map
, entry
, end
);
1742 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1743 (!entry
->is_sub_map
) &&
1744 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1745 (object
->resident_page_count
== 0) &&
1746 (object
->copy
== VM_OBJECT_NULL
) &&
1747 (object
->shadow
== VM_OBJECT_NULL
) &&
1748 (!object
->pager_created
)) {
1749 entry
->offset
= (vm_object_offset_t
)offset
;
1750 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1751 vm_object_deallocate(object
);
1752 entry
->is_sub_map
= TRUE
;
1753 entry
->object
.sub_map
= submap
;
1754 vm_map_reference(submap
);
1756 if ((use_pmap
) && (offset
== 0)) {
1757 /* nest if platform code will allow */
1758 if(submap
->pmap
== NULL
) {
1759 submap
->pmap
= pmap_create((vm_size_t
) 0);
1760 if(submap
->pmap
== PMAP_NULL
) {
1761 return(KERN_NO_SPACE
);
1764 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1765 (addr64_t
)start
, (addr64_t
)start
, (uint64_t)(end
- start
));
1767 panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result
);
1768 entry
->use_pmap
= TRUE
;
1772 pmap_remove(map
->pmap
, (addr64_t
)start
, (addr64_t
)end
);
1774 result
= KERN_SUCCESS
;
1784 * Sets the protection of the specified address
1785 * region in the target map. If "set_max" is
1786 * specified, the maximum protection is to be set;
1787 * otherwise, only the current protection is affected.
1791 register vm_map_t map
,
1792 register vm_offset_t start
,
1793 register vm_offset_t end
,
1794 register vm_prot_t new_prot
,
1795 register boolean_t set_max
)
1797 register vm_map_entry_t current
;
1798 register vm_offset_t prev
;
1799 vm_map_entry_t entry
;
1804 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1805 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1810 * Lookup the entry. If it doesn't start in a valid
1811 * entry, return an error. Remember if we need to
1812 * clip the entry. We don't do it here because we don't
1813 * want to make any changes until we've scanned the
1814 * entire range below for address and protection
1817 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1819 return(KERN_INVALID_ADDRESS
);
1823 * Make a first pass to check for protection and address
1828 prev
= current
->vme_start
;
1829 while ((current
!= vm_map_to_entry(map
)) &&
1830 (current
->vme_start
< end
)) {
1833 * If there is a hole, return an error.
1835 if (current
->vme_start
!= prev
) {
1837 return(KERN_INVALID_ADDRESS
);
1840 new_max
= current
->max_protection
;
1841 if(new_prot
& VM_PROT_COPY
) {
1842 new_max
|= VM_PROT_WRITE
;
1843 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1845 return(KERN_PROTECTION_FAILURE
);
1848 if ((new_prot
& new_max
) != new_prot
) {
1850 return(KERN_PROTECTION_FAILURE
);
1854 prev
= current
->vme_end
;
1855 current
= current
->vme_next
;
1859 return(KERN_INVALID_ADDRESS
);
1863 * Go back and fix up protections.
1864 * Clip to start here if the range starts within
1870 vm_map_clip_start(map
, entry
, start
);
1872 while ((current
!= vm_map_to_entry(map
)) &&
1873 (current
->vme_start
< end
)) {
1877 vm_map_clip_end(map
, current
, end
);
1879 old_prot
= current
->protection
;
1881 if(new_prot
& VM_PROT_COPY
) {
1882 /* caller is asking specifically to copy the */
1883 /* mapped data, this implies that max protection */
1884 /* will include write. Caller must be prepared */
1885 /* for loss of shared memory communication in the */
1886 /* target area after taking this step */
1887 current
->needs_copy
= TRUE
;
1888 current
->max_protection
|= VM_PROT_WRITE
;
1892 current
->protection
=
1893 (current
->max_protection
=
1894 new_prot
& ~VM_PROT_COPY
) &
1897 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1900 * Update physical map if necessary.
1901 * If the request is to turn off write protection,
1902 * we won't do it for real (in pmap). This is because
1903 * it would cause copy-on-write to fail. We've already
1904 * set, the new protection in the map, so if a
1905 * write-protect fault occurred, it will be fixed up
1906 * properly, COW or not.
1908 /* the 256M hack for existing hardware limitations */
1909 if (current
->protection
!= old_prot
) {
1910 if(current
->is_sub_map
&& current
->use_pmap
) {
1911 vm_offset_t pmap_base_addr
;
1912 vm_offset_t pmap_end_addr
;
1913 vm_map_entry_t local_entry
;
1915 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1916 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1918 if(!vm_map_lookup_entry(map
,
1919 pmap_base_addr
, &local_entry
))
1920 panic("vm_map_protect: nested pmap area is missing");
1921 while ((local_entry
!= vm_map_to_entry(map
)) &&
1922 (local_entry
->vme_start
< pmap_end_addr
)) {
1923 local_entry
->use_pmap
= FALSE
;
1924 local_entry
= local_entry
->vme_next
;
1926 pmap_unnest(map
->pmap
, (addr64_t
)pmap_base_addr
);
1929 if (!(current
->protection
& VM_PROT_WRITE
)) {
1930 /* Look one level in we support nested pmaps */
1931 /* from mapped submaps which are direct entries */
1933 if(current
->is_sub_map
&& current
->use_pmap
) {
1934 pmap_protect(current
->object
.sub_map
->pmap
,
1937 current
->protection
);
1939 pmap_protect(map
->pmap
, current
->vme_start
,
1941 current
->protection
);
1945 current
= current
->vme_next
;
1949 return(KERN_SUCCESS
);
1955 * Sets the inheritance of the specified address
1956 * range in the target map. Inheritance
1957 * affects how the map will be shared with
1958 * child maps at the time of vm_map_fork.
1962 register vm_map_t map
,
1963 register vm_offset_t start
,
1964 register vm_offset_t end
,
1965 register vm_inherit_t new_inheritance
)
1967 register vm_map_entry_t entry
;
1968 vm_map_entry_t temp_entry
;
1972 VM_MAP_RANGE_CHECK(map
, start
, end
);
1974 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
1976 vm_map_clip_start(map
, entry
, start
);
1979 temp_entry
= temp_entry
->vme_next
;
1983 /* first check entire range for submaps which can't support the */
1984 /* given inheritance. */
1985 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1986 if(entry
->is_sub_map
) {
1987 if(new_inheritance
== VM_INHERIT_COPY
)
1988 return(KERN_INVALID_ARGUMENT
);
1991 entry
= entry
->vme_next
;
1996 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1997 vm_map_clip_end(map
, entry
, end
);
1999 entry
->inheritance
= new_inheritance
;
2001 entry
= entry
->vme_next
;
2005 return(KERN_SUCCESS
);
2011 * Sets the pageability of the specified address range in the
2012 * target map as wired. Regions specified as not pageable require
2013 * locked-down physical memory and physical page maps. The
2014 * access_type variable indicates types of accesses that must not
2015 * generate page faults. This is checked against protection of
2016 * memory being locked-down.
2018 * The map must not be locked, but a reference must remain to the
2019 * map throughout the call.
2023 register vm_map_t map
,
2024 register vm_offset_t start
,
2025 register vm_offset_t end
,
2026 register vm_prot_t access_type
,
2027 boolean_t user_wire
,
2029 vm_offset_t pmap_addr
)
2031 register vm_map_entry_t entry
;
2032 struct vm_map_entry
*first_entry
, tmp_entry
;
2034 register vm_offset_t s
,e
;
2036 boolean_t need_wakeup
;
2037 boolean_t main_map
= FALSE
;
2038 wait_interrupt_t interruptible_state
;
2039 thread_t cur_thread
;
2040 unsigned int last_timestamp
;
2044 if(map_pmap
== NULL
)
2046 last_timestamp
= map
->timestamp
;
2048 VM_MAP_RANGE_CHECK(map
, start
, end
);
2049 assert(page_aligned(start
));
2050 assert(page_aligned(end
));
2052 /* We wired what the caller asked for, zero pages */
2054 return KERN_SUCCESS
;
2057 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2058 entry
= first_entry
;
2059 /* vm_map_clip_start will be done later. */
2061 /* Start address is not in map */
2063 return(KERN_INVALID_ADDRESS
);
2067 need_wakeup
= FALSE
;
2068 cur_thread
= current_thread();
2069 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2071 * If another thread is wiring/unwiring this entry then
2072 * block after informing other thread to wake us up.
2074 if (entry
->in_transition
) {
2075 wait_result_t wait_result
;
2078 * We have not clipped the entry. Make sure that
2079 * the start address is in range so that the lookup
2080 * below will succeed.
2082 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2084 entry
->needs_wakeup
= TRUE
;
2087 * wake up anybody waiting on entries that we have
2091 vm_map_entry_wakeup(map
);
2092 need_wakeup
= FALSE
;
2095 * User wiring is interruptible
2097 wait_result
= vm_map_entry_wait(map
,
2098 (user_wire
) ? THREAD_ABORTSAFE
:
2100 if (user_wire
&& wait_result
== THREAD_INTERRUPTED
) {
2102 * undo the wirings we have done so far
2103 * We do not clear the needs_wakeup flag,
2104 * because we cannot tell if we were the
2108 vm_map_unwire(map
, start
, s
, user_wire
);
2109 return(KERN_FAILURE
);
2113 * Cannot avoid a lookup here. reset timestamp.
2115 last_timestamp
= map
->timestamp
;
2118 * The entry could have been clipped, look it up again.
2119 * Worse that can happen is, it may not exist anymore.
2121 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2123 panic("vm_map_wire: re-lookup failed");
2126 * User: undo everything upto the previous
2127 * entry. let vm_map_unwire worry about
2128 * checking the validity of the range.
2131 vm_map_unwire(map
, start
, s
, user_wire
);
2132 return(KERN_FAILURE
);
2134 entry
= first_entry
;
2138 if(entry
->is_sub_map
) {
2139 vm_offset_t sub_start
;
2140 vm_offset_t sub_end
;
2141 vm_offset_t local_start
;
2142 vm_offset_t local_end
;
2145 vm_map_clip_start(map
, entry
, start
);
2146 vm_map_clip_end(map
, entry
, end
);
2148 sub_start
= entry
->offset
;
2149 sub_end
= entry
->vme_end
- entry
->vme_start
;
2150 sub_end
+= entry
->offset
;
2152 local_end
= entry
->vme_end
;
2153 if(map_pmap
== NULL
) {
2154 if(entry
->use_pmap
) {
2155 pmap
= entry
->object
.sub_map
->pmap
;
2156 /* ppc implementation requires that */
2157 /* submaps pmap address ranges line */
2158 /* up with parent map */
2160 pmap_addr
= sub_start
;
2167 if (entry
->wired_count
) {
2168 if (entry
->wired_count
2170 panic("vm_map_wire: too many wirings");
2173 entry
->user_wired_count
2174 >= MAX_WIRE_COUNT
) {
2176 vm_map_unwire(map
, start
,
2177 entry
->vme_start
, user_wire
);
2178 return(KERN_FAILURE
);
2181 entry
->user_wired_count
++;
2183 (entry
->user_wired_count
== 0))
2184 entry
->wired_count
++;
2185 entry
= entry
->vme_next
;
2190 vm_object_offset_t offset_hi
;
2191 vm_object_offset_t offset_lo
;
2192 vm_object_offset_t offset
;
2195 vm_behavior_t behavior
;
2196 vm_map_entry_t local_entry
;
2197 vm_map_version_t version
;
2198 vm_map_t lookup_map
;
2200 /* call vm_map_lookup_locked to */
2201 /* cause any needs copy to be */
2203 local_start
= entry
->vme_start
;
2205 vm_map_lock_write_to_read(map
);
2206 if(vm_map_lookup_locked(
2207 &lookup_map
, local_start
,
2210 &offset
, &prot
, &wired
,
2211 &behavior
, &offset_lo
,
2212 &offset_hi
, &pmap_map
)) {
2214 vm_map_unlock(lookup_map
);
2215 vm_map_unwire(map
, start
,
2216 entry
->vme_start
, user_wire
);
2217 return(KERN_FAILURE
);
2219 if(pmap_map
!= lookup_map
)
2220 vm_map_unlock(pmap_map
);
2221 vm_map_unlock_read(lookup_map
);
2223 vm_object_unlock(object
);
2225 if (!vm_map_lookup_entry(map
,
2226 local_start
, &local_entry
)) {
2228 vm_map_unwire(map
, start
,
2229 entry
->vme_start
, user_wire
);
2230 return(KERN_FAILURE
);
2232 /* did we have a change of type? */
2233 if (!local_entry
->is_sub_map
) {
2234 last_timestamp
= map
->timestamp
;
2237 entry
= local_entry
;
2239 entry
->user_wired_count
++;
2241 (entry
->user_wired_count
== 1))
2242 entry
->wired_count
++;
2244 entry
->in_transition
= TRUE
;
2247 rc
= vm_map_wire_nested(
2248 entry
->object
.sub_map
,
2251 user_wire
, pmap
, pmap_addr
);
2255 local_start
= entry
->vme_start
;
2257 entry
->user_wired_count
++;
2259 (entry
->user_wired_count
== 1))
2260 entry
->wired_count
++;
2262 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2265 user_wire
, pmap
, pmap_addr
);
2268 s
= entry
->vme_start
;
2272 * Find the entry again. It could have been clipped
2273 * after we unlocked the map.
2275 if (!vm_map_lookup_entry(map
, local_start
,
2277 panic("vm_map_wire: re-lookup failed");
2278 entry
= first_entry
;
2280 last_timestamp
= map
->timestamp
;
2281 while ((entry
!= vm_map_to_entry(map
)) &&
2282 (entry
->vme_start
< e
)) {
2283 assert(entry
->in_transition
);
2284 entry
->in_transition
= FALSE
;
2285 if (entry
->needs_wakeup
) {
2286 entry
->needs_wakeup
= FALSE
;
2289 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2291 entry
->user_wired_count
--;
2293 (entry
->user_wired_count
== 0))
2294 entry
->wired_count
--;
2296 entry
= entry
->vme_next
;
2298 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2301 vm_map_entry_wakeup(map
);
2303 * undo everything upto the previous entry.
2305 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2312 * If this entry is already wired then increment
2313 * the appropriate wire reference count.
2315 if (entry
->wired_count
) {
2316 /* sanity check: wired_count is a short */
2317 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2318 panic("vm_map_wire: too many wirings");
2321 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2323 vm_map_unwire(map
, start
,
2324 entry
->vme_start
, user_wire
);
2325 return(KERN_FAILURE
);
2328 * entry is already wired down, get our reference
2329 * after clipping to our range.
2331 vm_map_clip_start(map
, entry
, start
);
2332 vm_map_clip_end(map
, entry
, end
);
2334 entry
->user_wired_count
++;
2335 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2336 entry
->wired_count
++;
2338 entry
= entry
->vme_next
;
2343 * Unwired entry or wire request transmitted via submap
2348 * Perform actions of vm_map_lookup that need the write
2349 * lock on the map: create a shadow object for a
2350 * copy-on-write region, or an object for a zero-fill
2353 size
= entry
->vme_end
- entry
->vme_start
;
2355 * If wiring a copy-on-write page, we need to copy it now
2356 * even if we're only (currently) requesting read access.
2357 * This is aggressive, but once it's wired we can't move it.
2359 if (entry
->needs_copy
) {
2360 vm_object_shadow(&entry
->object
.vm_object
,
2361 &entry
->offset
, size
);
2362 entry
->needs_copy
= FALSE
;
2363 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2364 entry
->object
.vm_object
= vm_object_allocate(size
);
2365 entry
->offset
= (vm_object_offset_t
)0;
2368 vm_map_clip_start(map
, entry
, start
);
2369 vm_map_clip_end(map
, entry
, end
);
2371 s
= entry
->vme_start
;
2375 * Check for holes and protection mismatch.
2376 * Holes: Next entry should be contiguous unless this
2377 * is the end of the region.
2378 * Protection: Access requested must be allowed, unless
2379 * wiring is by protection class
2381 if ((((entry
->vme_end
< end
) &&
2382 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2383 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2384 ((entry
->protection
& access_type
) != access_type
))) {
2386 * Found a hole or protection problem.
2387 * Unwire the region we wired so far.
2389 if (start
!= entry
->vme_start
) {
2391 vm_map_unwire(map
, start
, s
, user_wire
);
2395 return((entry
->protection
&access_type
) != access_type
?
2396 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2399 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2402 entry
->user_wired_count
++;
2403 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2404 entry
->wired_count
++;
2406 entry
->in_transition
= TRUE
;
2409 * This entry might get split once we unlock the map.
2410 * In vm_fault_wire(), we need the current range as
2411 * defined by this entry. In order for this to work
2412 * along with a simultaneous clip operation, we make a
2413 * temporary copy of this entry and use that for the
2414 * wiring. Note that the underlying objects do not
2415 * change during a clip.
2420 * The in_transition state guarentees that the entry
2421 * (or entries for this range, if split occured) will be
2422 * there when the map lock is acquired for the second time.
2426 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2427 interruptible_state
= thread_interrupt_level(THREAD_UNINT
);
2430 rc
= vm_fault_wire(map
,
2431 &tmp_entry
, map_pmap
, pmap_addr
);
2433 rc
= vm_fault_wire(map
,
2434 &tmp_entry
, map
->pmap
,
2435 tmp_entry
.vme_start
);
2437 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2438 thread_interrupt_level(interruptible_state
);
2442 if (last_timestamp
+1 != map
->timestamp
) {
2444 * Find the entry again. It could have been clipped
2445 * after we unlocked the map.
2447 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2449 panic("vm_map_wire: re-lookup failed");
2451 entry
= first_entry
;
2454 last_timestamp
= map
->timestamp
;
2456 while ((entry
!= vm_map_to_entry(map
)) &&
2457 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2458 assert(entry
->in_transition
);
2459 entry
->in_transition
= FALSE
;
2460 if (entry
->needs_wakeup
) {
2461 entry
->needs_wakeup
= FALSE
;
2464 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2466 entry
->user_wired_count
--;
2468 (entry
->user_wired_count
== 0))
2469 entry
->wired_count
--;
2471 entry
= entry
->vme_next
;
2474 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2477 vm_map_entry_wakeup(map
);
2479 * undo everything upto the previous entry.
2481 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2484 } /* end while loop through map entries */
2488 * wake up anybody waiting on entries we wired.
2491 vm_map_entry_wakeup(map
);
2493 return(KERN_SUCCESS
);
2499 register vm_map_t map
,
2500 register vm_offset_t start
,
2501 register vm_offset_t end
,
2502 register vm_prot_t access_type
,
2503 boolean_t user_wire
)
2510 * the calls to mapping_prealloc and mapping_relpre
2511 * (along with the VM_MAP_RANGE_CHECK to insure a
2512 * resonable range was passed in) are
2513 * currently necessary because
2514 * we haven't enabled kernel pre-emption
2515 * and/or the pmap_enter cannot purge and re-use
2518 VM_MAP_RANGE_CHECK(map
, start
, end
);
2519 mapping_prealloc(end
- start
);
2521 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2522 user_wire
, (pmap_t
)NULL
, 0);
2532 * Sets the pageability of the specified address range in the target
2533 * as pageable. Regions specified must have been wired previously.
2535 * The map must not be locked, but a reference must remain to the map
2536 * throughout the call.
2538 * Kernel will panic on failures. User unwire ignores holes and
2539 * unwired and intransition entries to avoid losing memory by leaving
2543 vm_map_unwire_nested(
2544 register vm_map_t map
,
2545 register vm_offset_t start
,
2546 register vm_offset_t end
,
2547 boolean_t user_wire
,
2549 vm_offset_t pmap_addr
)
2551 register vm_map_entry_t entry
;
2552 struct vm_map_entry
*first_entry
, tmp_entry
;
2553 boolean_t need_wakeup
;
2554 boolean_t main_map
= FALSE
;
2555 unsigned int last_timestamp
;
2558 if(map_pmap
== NULL
)
2560 last_timestamp
= map
->timestamp
;
2562 VM_MAP_RANGE_CHECK(map
, start
, end
);
2563 assert(page_aligned(start
));
2564 assert(page_aligned(end
));
2566 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2567 entry
= first_entry
;
2568 /* vm_map_clip_start will be done later. */
2571 /* Start address is not in map. */
2573 return(KERN_INVALID_ADDRESS
);
2576 need_wakeup
= FALSE
;
2577 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2578 if (entry
->in_transition
) {
2581 * Another thread is wiring down this entry. Note
2582 * that if it is not for the other thread we would
2583 * be unwiring an unwired entry. This is not
2584 * permitted. If we wait, we will be unwiring memory
2588 * Another thread is unwiring this entry. We did not
2589 * have a reference to it, because if we did, this
2590 * entry will not be getting unwired now.
2593 panic("vm_map_unwire: in_transition entry");
2595 entry
= entry
->vme_next
;
2599 if(entry
->is_sub_map
) {
2600 vm_offset_t sub_start
;
2601 vm_offset_t sub_end
;
2602 vm_offset_t local_end
;
2606 vm_map_clip_start(map
, entry
, start
);
2607 vm_map_clip_end(map
, entry
, end
);
2609 sub_start
= entry
->offset
;
2610 sub_end
= entry
->vme_end
- entry
->vme_start
;
2611 sub_end
+= entry
->offset
;
2612 local_end
= entry
->vme_end
;
2613 if(map_pmap
== NULL
) {
2614 if(entry
->use_pmap
) {
2615 pmap
= entry
->object
.sub_map
->pmap
;
2616 pmap_addr
= sub_start
;
2621 if (entry
->wired_count
== 0 ||
2622 (user_wire
&& entry
->user_wired_count
== 0)) {
2624 panic("vm_map_unwire: entry is unwired");
2625 entry
= entry
->vme_next
;
2631 * Holes: Next entry should be contiguous unless
2632 * this is the end of the region.
2634 if (((entry
->vme_end
< end
) &&
2635 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2636 (entry
->vme_next
->vme_start
2637 > entry
->vme_end
)))) {
2639 panic("vm_map_unwire: non-contiguous region");
2641 entry = entry->vme_next;
2646 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2647 entry
->wired_count
--;
2649 if (entry
->wired_count
!= 0) {
2650 entry
= entry
->vme_next
;
2654 entry
->in_transition
= TRUE
;
2655 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2658 * We can unlock the map now. The in_transition state
2659 * guarantees existance of the entry.
2662 vm_map_unwire_nested(entry
->object
.sub_map
,
2663 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2666 if (last_timestamp
+1 != map
->timestamp
) {
2668 * Find the entry again. It could have been
2669 * clipped or deleted after we unlocked the map.
2671 if (!vm_map_lookup_entry(map
,
2672 tmp_entry
.vme_start
,
2675 panic("vm_map_unwire: re-lookup failed");
2676 entry
= first_entry
->vme_next
;
2678 entry
= first_entry
;
2680 last_timestamp
= map
->timestamp
;
2683 * clear transition bit for all constituent entries
2684 * that were in the original entry (saved in
2685 * tmp_entry). Also check for waiters.
2687 while ((entry
!= vm_map_to_entry(map
)) &&
2688 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2689 assert(entry
->in_transition
);
2690 entry
->in_transition
= FALSE
;
2691 if (entry
->needs_wakeup
) {
2692 entry
->needs_wakeup
= FALSE
;
2695 entry
= entry
->vme_next
;
2700 vm_map_unwire_nested(entry
->object
.sub_map
,
2701 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2704 if (last_timestamp
+1 != map
->timestamp
) {
2706 * Find the entry again. It could have been
2707 * clipped or deleted after we unlocked the map.
2709 if (!vm_map_lookup_entry(map
,
2710 tmp_entry
.vme_start
,
2713 panic("vm_map_unwire: re-lookup failed");
2714 entry
= first_entry
->vme_next
;
2716 entry
= first_entry
;
2718 last_timestamp
= map
->timestamp
;
2723 if ((entry
->wired_count
== 0) ||
2724 (user_wire
&& entry
->user_wired_count
== 0)) {
2726 panic("vm_map_unwire: entry is unwired");
2728 entry
= entry
->vme_next
;
2732 assert(entry
->wired_count
> 0 &&
2733 (!user_wire
|| entry
->user_wired_count
> 0));
2735 vm_map_clip_start(map
, entry
, start
);
2736 vm_map_clip_end(map
, entry
, end
);
2740 * Holes: Next entry should be contiguous unless
2741 * this is the end of the region.
2743 if (((entry
->vme_end
< end
) &&
2744 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2745 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2748 panic("vm_map_unwire: non-contiguous region");
2749 entry
= entry
->vme_next
;
2753 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2754 entry
->wired_count
--;
2756 if (entry
->wired_count
!= 0) {
2757 entry
= entry
->vme_next
;
2761 entry
->in_transition
= TRUE
;
2762 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2765 * We can unlock the map now. The in_transition state
2766 * guarantees existance of the entry.
2770 vm_fault_unwire(map
,
2771 &tmp_entry
, FALSE
, map_pmap
, pmap_addr
);
2773 vm_fault_unwire(map
,
2774 &tmp_entry
, FALSE
, map
->pmap
,
2775 tmp_entry
.vme_start
);
2779 if (last_timestamp
+1 != map
->timestamp
) {
2781 * Find the entry again. It could have been clipped
2782 * or deleted after we unlocked the map.
2784 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2787 panic("vm_map_unwire: re-lookup failed");
2788 entry
= first_entry
->vme_next
;
2790 entry
= first_entry
;
2792 last_timestamp
= map
->timestamp
;
2795 * clear transition bit for all constituent entries that
2796 * were in the original entry (saved in tmp_entry). Also
2797 * check for waiters.
2799 while ((entry
!= vm_map_to_entry(map
)) &&
2800 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2801 assert(entry
->in_transition
);
2802 entry
->in_transition
= FALSE
;
2803 if (entry
->needs_wakeup
) {
2804 entry
->needs_wakeup
= FALSE
;
2807 entry
= entry
->vme_next
;
2812 * wake up anybody waiting on entries that we have unwired.
2815 vm_map_entry_wakeup(map
);
2816 return(KERN_SUCCESS
);
2822 register vm_map_t map
,
2823 register vm_offset_t start
,
2824 register vm_offset_t end
,
2825 boolean_t user_wire
)
2827 return vm_map_unwire_nested(map
, start
, end
,
2828 user_wire
, (pmap_t
)NULL
, 0);
2833 * vm_map_entry_delete: [ internal use only ]
2835 * Deallocate the given entry from the target map.
2838 vm_map_entry_delete(
2839 register vm_map_t map
,
2840 register vm_map_entry_t entry
)
2842 register vm_offset_t s
, e
;
2843 register vm_object_t object
;
2844 register vm_map_t submap
;
2845 extern vm_object_t kernel_object
;
2847 s
= entry
->vme_start
;
2849 assert(page_aligned(s
));
2850 assert(page_aligned(e
));
2851 assert(entry
->wired_count
== 0);
2852 assert(entry
->user_wired_count
== 0);
2854 if (entry
->is_sub_map
) {
2856 submap
= entry
->object
.sub_map
;
2859 object
= entry
->object
.vm_object
;
2862 vm_map_entry_unlink(map
, entry
);
2865 vm_map_entry_dispose(map
, entry
);
2869 * Deallocate the object only after removing all
2870 * pmap entries pointing to its pages.
2873 vm_map_deallocate(submap
);
2875 vm_object_deallocate(object
);
2880 vm_map_submap_pmap_clean(
2887 vm_offset_t submap_start
;
2888 vm_offset_t submap_end
;
2890 vm_size_t remove_size
;
2891 vm_map_entry_t entry
;
2893 submap_end
= offset
+ (end
- start
);
2894 submap_start
= offset
;
2895 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2897 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2898 if(offset
> entry
->vme_start
)
2899 remove_size
-= offset
- entry
->vme_start
;
2902 if(submap_end
< entry
->vme_end
) {
2904 entry
->vme_end
- submap_end
;
2906 if(entry
->is_sub_map
) {
2907 vm_map_submap_pmap_clean(
2910 start
+ remove_size
,
2911 entry
->object
.sub_map
,
2915 if((map
->mapped
) && (map
->ref_count
)
2916 && (entry
->object
.vm_object
!= NULL
)) {
2917 vm_object_pmap_protect(
2918 entry
->object
.vm_object
,
2925 pmap_remove(map
->pmap
,
2927 (addr64_t
)(start
+ remove_size
));
2932 entry
= entry
->vme_next
;
2934 while((entry
!= vm_map_to_entry(sub_map
))
2935 && (entry
->vme_start
< submap_end
)) {
2936 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2937 if(submap_end
< entry
->vme_end
) {
2938 remove_size
-= entry
->vme_end
- submap_end
;
2940 if(entry
->is_sub_map
) {
2941 vm_map_submap_pmap_clean(
2943 (start
+ entry
->vme_start
) - offset
,
2944 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2945 entry
->object
.sub_map
,
2948 if((map
->mapped
) && (map
->ref_count
)
2949 && (entry
->object
.vm_object
!= NULL
)) {
2950 vm_object_pmap_protect(
2951 entry
->object
.vm_object
,
2958 pmap_remove(map
->pmap
,
2959 (addr64_t
)((start
+ entry
->vme_start
)
2961 (addr64_t
)(((start
+ entry
->vme_start
)
2962 - offset
) + remove_size
));
2965 entry
= entry
->vme_next
;
2971 * vm_map_delete: [ internal use only ]
2973 * Deallocates the given address range from the target map.
2974 * Removes all user wirings. Unwires one kernel wiring if
2975 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2976 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2977 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2979 * This routine is called with map locked and leaves map locked.
2983 register vm_map_t map
,
2985 register vm_offset_t end
,
2988 vm_map_entry_t entry
, next
;
2989 struct vm_map_entry
*first_entry
, tmp_entry
;
2990 register vm_offset_t s
, e
;
2991 register vm_object_t object
;
2992 boolean_t need_wakeup
;
2993 unsigned int last_timestamp
= ~0; /* unlikely value */
2995 extern vm_map_t kernel_map
;
2997 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
2998 THREAD_ABORTSAFE
: THREAD_UNINT
;
3001 * All our DMA I/O operations in IOKit are currently done by
3002 * wiring through the map entries of the task requesting the I/O.
3003 * Because of this, we must always wait for kernel wirings
3004 * to go away on the entries before deleting them.
3006 * Any caller who wants to actually remove a kernel wiring
3007 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3008 * properly remove one wiring instead of blasting through
3011 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3014 * Find the start of the region, and clip it
3016 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3017 entry
= first_entry
;
3018 vm_map_clip_start(map
, entry
, start
);
3021 * Fix the lookup hint now, rather than each
3022 * time through the loop.
3024 SAVE_HINT(map
, entry
->vme_prev
);
3026 entry
= first_entry
->vme_next
;
3029 need_wakeup
= FALSE
;
3031 * Step through all entries in this region
3033 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3035 vm_map_clip_end(map
, entry
, end
);
3036 if (entry
->in_transition
) {
3037 wait_result_t wait_result
;
3040 * Another thread is wiring/unwiring this entry.
3041 * Let the other thread know we are waiting.
3043 s
= entry
->vme_start
;
3044 entry
->needs_wakeup
= TRUE
;
3047 * wake up anybody waiting on entries that we have
3048 * already unwired/deleted.
3051 vm_map_entry_wakeup(map
);
3052 need_wakeup
= FALSE
;
3055 wait_result
= vm_map_entry_wait(map
, interruptible
);
3057 if (interruptible
&&
3058 wait_result
== THREAD_INTERRUPTED
) {
3060 * We do not clear the needs_wakeup flag,
3061 * since we cannot tell if we were the only one.
3064 return KERN_ABORTED
;
3068 * The entry could have been clipped or it
3069 * may not exist anymore. Look it up again.
3071 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3072 assert((map
!= kernel_map
) &&
3073 (!entry
->is_sub_map
));
3075 * User: use the next entry
3077 entry
= first_entry
->vme_next
;
3079 entry
= first_entry
;
3080 SAVE_HINT(map
, entry
->vme_prev
);
3082 last_timestamp
= map
->timestamp
;
3084 } /* end in_transition */
3086 if (entry
->wired_count
) {
3088 * Remove a kernel wiring if requested or if
3089 * there are user wirings.
3091 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3092 (entry
->user_wired_count
> 0))
3093 entry
->wired_count
--;
3095 /* remove all user wire references */
3096 entry
->user_wired_count
= 0;
3098 if (entry
->wired_count
!= 0) {
3099 assert((map
!= kernel_map
) &&
3100 (!entry
->is_sub_map
));
3102 * Cannot continue. Typical case is when
3103 * a user thread has physical io pending on
3104 * on this page. Either wait for the
3105 * kernel wiring to go away or return an
3108 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3109 wait_result_t wait_result
;
3111 s
= entry
->vme_start
;
3112 entry
->needs_wakeup
= TRUE
;
3113 wait_result
= vm_map_entry_wait(map
,
3116 if (interruptible
&&
3117 wait_result
== THREAD_INTERRUPTED
) {
3119 * We do not clear the
3120 * needs_wakeup flag, since we
3121 * cannot tell if we were the
3125 return KERN_ABORTED
;
3129 * The entry could have been clipped or
3130 * it may not exist anymore. Look it
3133 if (!vm_map_lookup_entry(map
, s
,
3135 assert((map
!= kernel_map
) &&
3136 (!entry
->is_sub_map
));
3138 * User: use the next entry
3140 entry
= first_entry
->vme_next
;
3142 entry
= first_entry
;
3143 SAVE_HINT(map
, entry
->vme_prev
);
3145 last_timestamp
= map
->timestamp
;
3149 return KERN_FAILURE
;
3153 entry
->in_transition
= TRUE
;
3155 * copy current entry. see comment in vm_map_wire()
3158 s
= entry
->vme_start
;
3162 * We can unlock the map now. The in_transition
3163 * state guarentees existance of the entry.
3166 vm_fault_unwire(map
, &tmp_entry
,
3167 tmp_entry
.object
.vm_object
== kernel_object
,
3168 map
->pmap
, tmp_entry
.vme_start
);
3171 if (last_timestamp
+1 != map
->timestamp
) {
3173 * Find the entry again. It could have
3174 * been clipped after we unlocked the map.
3176 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3177 assert((map
!= kernel_map
) &&
3178 (!entry
->is_sub_map
));
3179 first_entry
= first_entry
->vme_next
;
3181 SAVE_HINT(map
, entry
->vme_prev
);
3184 SAVE_HINT(map
, entry
->vme_prev
);
3185 first_entry
= entry
;
3188 last_timestamp
= map
->timestamp
;
3190 entry
= first_entry
;
3191 while ((entry
!= vm_map_to_entry(map
)) &&
3192 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3193 assert(entry
->in_transition
);
3194 entry
->in_transition
= FALSE
;
3195 if (entry
->needs_wakeup
) {
3196 entry
->needs_wakeup
= FALSE
;
3199 entry
= entry
->vme_next
;
3202 * We have unwired the entry(s). Go back and
3205 entry
= first_entry
;
3209 /* entry is unwired */
3210 assert(entry
->wired_count
== 0);
3211 assert(entry
->user_wired_count
== 0);
3213 if ((!entry
->is_sub_map
&&
3214 entry
->object
.vm_object
!= kernel_object
) ||
3215 entry
->is_sub_map
) {
3216 if(entry
->is_sub_map
) {
3217 if(entry
->use_pmap
) {
3219 pmap_unnest(map
->pmap
, (addr64_t
)entry
->vme_start
);
3221 if((map
->mapped
) && (map
->ref_count
)) {
3222 /* clean up parent map/maps */
3223 vm_map_submap_pmap_clean(
3224 map
, entry
->vme_start
,
3226 entry
->object
.sub_map
,
3230 vm_map_submap_pmap_clean(
3231 map
, entry
->vme_start
, entry
->vme_end
,
3232 entry
->object
.sub_map
,
3236 if((map
->mapped
) && (map
->ref_count
)) {
3237 vm_object_pmap_protect(
3238 entry
->object
.vm_object
,
3240 entry
->vme_end
- entry
->vme_start
,
3245 pmap_remove(map
->pmap
,
3246 (addr64_t
)(entry
->vme_start
),
3247 (addr64_t
)(entry
->vme_end
));
3252 next
= entry
->vme_next
;
3253 s
= next
->vme_start
;
3254 last_timestamp
= map
->timestamp
;
3255 vm_map_entry_delete(map
, entry
);
3256 /* vm_map_entry_delete unlocks the map */
3260 if(entry
== vm_map_to_entry(map
)) {
3263 if (last_timestamp
+1 != map
->timestamp
) {
3265 * we are responsible for deleting everything
3266 * from the give space, if someone has interfered
3267 * we pick up where we left off, back fills should
3268 * be all right for anyone except map_delete and
3269 * we have to assume that the task has been fully
3270 * disabled before we get here
3272 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3273 entry
= entry
->vme_next
;
3275 SAVE_HINT(map
, entry
->vme_prev
);
3278 * others can not only allocate behind us, we can
3279 * also see coalesce while we don't have the map lock
3281 if(entry
== vm_map_to_entry(map
)) {
3284 vm_map_clip_start(map
, entry
, s
);
3286 last_timestamp
= map
->timestamp
;
3289 if (map
->wait_for_space
)
3290 thread_wakeup((event_t
) map
);
3292 * wake up anybody waiting on entries that we have already deleted.
3295 vm_map_entry_wakeup(map
);
3297 return KERN_SUCCESS
;
3303 * Remove the given address range from the target map.
3304 * This is the exported form of vm_map_delete.
3308 register vm_map_t map
,
3309 register vm_offset_t start
,
3310 register vm_offset_t end
,
3311 register boolean_t flags
)
3313 register kern_return_t result
;
3314 boolean_t funnel_set
= FALSE
;
3316 thread_t cur_thread
;
3318 cur_thread
= current_thread();
3320 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
3322 curflock
= cur_thread
->funnel_lock
;
3323 thread_funnel_set( curflock
, FALSE
);
3326 VM_MAP_RANGE_CHECK(map
, start
, end
);
3327 result
= vm_map_delete(map
, start
, end
, flags
);
3330 thread_funnel_set( curflock
, TRUE
);
3338 * Routine: vm_map_copy_discard
3341 * Dispose of a map copy object (returned by
3345 vm_map_copy_discard(
3348 TR_DECL("vm_map_copy_discard");
3350 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3352 if (copy
== VM_MAP_COPY_NULL
)
3355 switch (copy
->type
) {
3356 case VM_MAP_COPY_ENTRY_LIST
:
3357 while (vm_map_copy_first_entry(copy
) !=
3358 vm_map_copy_to_entry(copy
)) {
3359 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3361 vm_map_copy_entry_unlink(copy
, entry
);
3362 vm_object_deallocate(entry
->object
.vm_object
);
3363 vm_map_copy_entry_dispose(copy
, entry
);
3366 case VM_MAP_COPY_OBJECT
:
3367 vm_object_deallocate(copy
->cpy_object
);
3369 case VM_MAP_COPY_KERNEL_BUFFER
:
3372 * The vm_map_copy_t and possibly the data buffer were
3373 * allocated by a single call to kalloc(), i.e. the
3374 * vm_map_copy_t was not allocated out of the zone.
3376 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3379 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3383 * Routine: vm_map_copy_copy
3386 * Move the information in a map copy object to
3387 * a new map copy object, leaving the old one
3390 * This is used by kernel routines that need
3391 * to look at out-of-line data (in copyin form)
3392 * before deciding whether to return SUCCESS.
3393 * If the routine returns FAILURE, the original
3394 * copy object will be deallocated; therefore,
3395 * these routines must make a copy of the copy
3396 * object and leave the original empty so that
3397 * deallocation will not fail.
3403 vm_map_copy_t new_copy
;
3405 if (copy
== VM_MAP_COPY_NULL
)
3406 return VM_MAP_COPY_NULL
;
3409 * Allocate a new copy object, and copy the information
3410 * from the old one into it.
3413 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3416 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3418 * The links in the entry chain must be
3419 * changed to point to the new copy object.
3421 vm_map_copy_first_entry(copy
)->vme_prev
3422 = vm_map_copy_to_entry(new_copy
);
3423 vm_map_copy_last_entry(copy
)->vme_next
3424 = vm_map_copy_to_entry(new_copy
);
3428 * Change the old copy object into one that contains
3429 * nothing to be deallocated.
3431 copy
->type
= VM_MAP_COPY_OBJECT
;
3432 copy
->cpy_object
= VM_OBJECT_NULL
;
3435 * Return the new object.
3441 vm_map_overwrite_submap_recurse(
3443 vm_offset_t dst_addr
,
3446 vm_offset_t dst_end
;
3447 vm_map_entry_t tmp_entry
;
3448 vm_map_entry_t entry
;
3449 kern_return_t result
;
3450 boolean_t encountered_sub_map
= FALSE
;
3455 * Verify that the destination is all writeable
3456 * initially. We have to trunc the destination
3457 * address and round the copy size or we'll end up
3458 * splitting entries in strange ways.
3461 dst_end
= round_page_32(dst_addr
+ dst_size
);
3462 vm_map_lock(dst_map
);
3465 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3466 vm_map_unlock(dst_map
);
3467 return(KERN_INVALID_ADDRESS
);
3470 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3472 for (entry
= tmp_entry
;;) {
3473 vm_map_entry_t next
;
3475 next
= entry
->vme_next
;
3476 while(entry
->is_sub_map
) {
3477 vm_offset_t sub_start
;
3478 vm_offset_t sub_end
;
3479 vm_offset_t local_end
;
3481 if (entry
->in_transition
) {
3483 * Say that we are waiting, and wait for entry.
3485 entry
->needs_wakeup
= TRUE
;
3486 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3491 encountered_sub_map
= TRUE
;
3492 sub_start
= entry
->offset
;
3494 if(entry
->vme_end
< dst_end
)
3495 sub_end
= entry
->vme_end
;
3498 sub_end
-= entry
->vme_start
;
3499 sub_end
+= entry
->offset
;
3500 local_end
= entry
->vme_end
;
3501 vm_map_unlock(dst_map
);
3503 result
= vm_map_overwrite_submap_recurse(
3504 entry
->object
.sub_map
,
3506 sub_end
- sub_start
);
3508 if(result
!= KERN_SUCCESS
)
3510 if (dst_end
<= entry
->vme_end
)
3511 return KERN_SUCCESS
;
3512 vm_map_lock(dst_map
);
3513 if(!vm_map_lookup_entry(dst_map
, local_end
,
3515 vm_map_unlock(dst_map
);
3516 return(KERN_INVALID_ADDRESS
);
3519 next
= entry
->vme_next
;
3522 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3523 vm_map_unlock(dst_map
);
3524 return(KERN_PROTECTION_FAILURE
);
3528 * If the entry is in transition, we must wait
3529 * for it to exit that state. Anything could happen
3530 * when we unlock the map, so start over.
3532 if (entry
->in_transition
) {
3535 * Say that we are waiting, and wait for entry.
3537 entry
->needs_wakeup
= TRUE
;
3538 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3544 * our range is contained completely within this map entry
3546 if (dst_end
<= entry
->vme_end
) {
3547 vm_map_unlock(dst_map
);
3548 return KERN_SUCCESS
;
3551 * check that range specified is contiguous region
3553 if ((next
== vm_map_to_entry(dst_map
)) ||
3554 (next
->vme_start
!= entry
->vme_end
)) {
3555 vm_map_unlock(dst_map
);
3556 return(KERN_INVALID_ADDRESS
);
3560 * Check for permanent objects in the destination.
3562 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3563 ((!entry
->object
.vm_object
->internal
) ||
3564 (entry
->object
.vm_object
->true_share
))) {
3565 if(encountered_sub_map
) {
3566 vm_map_unlock(dst_map
);
3567 return(KERN_FAILURE
);
3574 vm_map_unlock(dst_map
);
3575 return(KERN_SUCCESS
);
3579 * Routine: vm_map_copy_overwrite
3582 * Copy the memory described by the map copy
3583 * object (copy; returned by vm_map_copyin) onto
3584 * the specified destination region (dst_map, dst_addr).
3585 * The destination must be writeable.
3587 * Unlike vm_map_copyout, this routine actually
3588 * writes over previously-mapped memory. If the
3589 * previous mapping was to a permanent (user-supplied)
3590 * memory object, it is preserved.
3592 * The attributes (protection and inheritance) of the
3593 * destination region are preserved.
3595 * If successful, consumes the copy object.
3596 * Otherwise, the caller is responsible for it.
3598 * Implementation notes:
3599 * To overwrite aligned temporary virtual memory, it is
3600 * sufficient to remove the previous mapping and insert
3601 * the new copy. This replacement is done either on
3602 * the whole region (if no permanent virtual memory
3603 * objects are embedded in the destination region) or
3604 * in individual map entries.
3606 * To overwrite permanent virtual memory , it is necessary
3607 * to copy each page, as the external memory management
3608 * interface currently does not provide any optimizations.
3610 * Unaligned memory also has to be copied. It is possible
3611 * to use 'vm_trickery' to copy the aligned data. This is
3612 * not done but not hard to implement.
3614 * Once a page of permanent memory has been overwritten,
3615 * it is impossible to interrupt this function; otherwise,
3616 * the call would be neither atomic nor location-independent.
3617 * The kernel-state portion of a user thread must be
3620 * It may be expensive to forward all requests that might
3621 * overwrite permanent memory (vm_write, vm_copy) to
3622 * uninterruptible kernel threads. This routine may be
3623 * called by interruptible threads; however, success is
3624 * not guaranteed -- if the request cannot be performed
3625 * atomically and interruptibly, an error indication is
3630 vm_map_copy_overwrite_nested(
3632 vm_offset_t dst_addr
,
3634 boolean_t interruptible
,
3637 vm_offset_t dst_end
;
3638 vm_map_entry_t tmp_entry
;
3639 vm_map_entry_t entry
;
3641 boolean_t aligned
= TRUE
;
3642 boolean_t contains_permanent_objects
= FALSE
;
3643 boolean_t encountered_sub_map
= FALSE
;
3644 vm_offset_t base_addr
;
3645 vm_size_t copy_size
;
3646 vm_size_t total_size
;
3650 * Check for null copy object.
3653 if (copy
== VM_MAP_COPY_NULL
)
3654 return(KERN_SUCCESS
);
3657 * Check for special kernel buffer allocated
3658 * by new_ipc_kmsg_copyin.
3661 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3662 return(vm_map_copyout_kernel_buffer(
3668 * Only works for entry lists at the moment. Will
3669 * support page lists later.
3672 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3674 if (copy
->size
== 0) {
3675 vm_map_copy_discard(copy
);
3676 return(KERN_SUCCESS
);
3680 * Verify that the destination is all writeable
3681 * initially. We have to trunc the destination
3682 * address and round the copy size or we'll end up
3683 * splitting entries in strange ways.
3686 if (!page_aligned(copy
->size
) ||
3687 !page_aligned (copy
->offset
) ||
3688 !page_aligned (dst_addr
))
3691 dst_end
= round_page_32(dst_addr
+ copy
->size
);
3693 dst_end
= dst_addr
+ copy
->size
;
3696 vm_map_lock(dst_map
);
3699 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3700 vm_map_unlock(dst_map
);
3701 return(KERN_INVALID_ADDRESS
);
3703 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3704 for (entry
= tmp_entry
;;) {
3705 vm_map_entry_t next
= entry
->vme_next
;
3707 while(entry
->is_sub_map
) {
3708 vm_offset_t sub_start
;
3709 vm_offset_t sub_end
;
3710 vm_offset_t local_end
;
3712 if (entry
->in_transition
) {
3715 * Say that we are waiting, and wait for entry.
3717 entry
->needs_wakeup
= TRUE
;
3718 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3723 local_end
= entry
->vme_end
;
3724 if (!(entry
->needs_copy
)) {
3725 /* if needs_copy we are a COW submap */
3726 /* in such a case we just replace so */
3727 /* there is no need for the follow- */
3729 encountered_sub_map
= TRUE
;
3730 sub_start
= entry
->offset
;
3732 if(entry
->vme_end
< dst_end
)
3733 sub_end
= entry
->vme_end
;
3736 sub_end
-= entry
->vme_start
;
3737 sub_end
+= entry
->offset
;
3738 vm_map_unlock(dst_map
);
3740 kr
= vm_map_overwrite_submap_recurse(
3741 entry
->object
.sub_map
,
3743 sub_end
- sub_start
);
3744 if(kr
!= KERN_SUCCESS
)
3746 vm_map_lock(dst_map
);
3749 if (dst_end
<= entry
->vme_end
)
3750 goto start_overwrite
;
3751 if(!vm_map_lookup_entry(dst_map
, local_end
,
3753 vm_map_unlock(dst_map
);
3754 return(KERN_INVALID_ADDRESS
);
3756 next
= entry
->vme_next
;
3759 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3760 vm_map_unlock(dst_map
);
3761 return(KERN_PROTECTION_FAILURE
);
3765 * If the entry is in transition, we must wait
3766 * for it to exit that state. Anything could happen
3767 * when we unlock the map, so start over.
3769 if (entry
->in_transition
) {
3772 * Say that we are waiting, and wait for entry.
3774 entry
->needs_wakeup
= TRUE
;
3775 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3781 * our range is contained completely within this map entry
3783 if (dst_end
<= entry
->vme_end
)
3786 * check that range specified is contiguous region
3788 if ((next
== vm_map_to_entry(dst_map
)) ||
3789 (next
->vme_start
!= entry
->vme_end
)) {
3790 vm_map_unlock(dst_map
);
3791 return(KERN_INVALID_ADDRESS
);
3796 * Check for permanent objects in the destination.
3798 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3799 ((!entry
->object
.vm_object
->internal
) ||
3800 (entry
->object
.vm_object
->true_share
))) {
3801 contains_permanent_objects
= TRUE
;
3809 * If there are permanent objects in the destination, then
3810 * the copy cannot be interrupted.
3813 if (interruptible
&& contains_permanent_objects
) {
3814 vm_map_unlock(dst_map
);
3815 return(KERN_FAILURE
); /* XXX */
3820 * Make a second pass, overwriting the data
3821 * At the beginning of each loop iteration,
3822 * the next entry to be overwritten is "tmp_entry"
3823 * (initially, the value returned from the lookup above),
3824 * and the starting address expected in that entry
3828 total_size
= copy
->size
;
3829 if(encountered_sub_map
) {
3831 /* re-calculate tmp_entry since we've had the map */
3833 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3834 vm_map_unlock(dst_map
);
3835 return(KERN_INVALID_ADDRESS
);
3838 copy_size
= copy
->size
;
3841 base_addr
= dst_addr
;
3843 /* deconstruct the copy object and do in parts */
3844 /* only in sub_map, interruptable case */
3845 vm_map_entry_t copy_entry
;
3846 vm_map_entry_t previous_prev
;
3847 vm_map_entry_t next_copy
;
3849 int remaining_entries
;
3852 for (entry
= tmp_entry
; copy_size
== 0;) {
3853 vm_map_entry_t next
;
3855 next
= entry
->vme_next
;
3857 /* tmp_entry and base address are moved along */
3858 /* each time we encounter a sub-map. Otherwise */
3859 /* entry can outpase tmp_entry, and the copy_size */
3860 /* may reflect the distance between them */
3861 /* if the current entry is found to be in transition */
3862 /* we will start over at the beginning or the last */
3863 /* encounter of a submap as dictated by base_addr */
3864 /* we will zero copy_size accordingly. */
3865 if (entry
->in_transition
) {
3867 * Say that we are waiting, and wait for entry.
3869 entry
->needs_wakeup
= TRUE
;
3870 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3872 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3874 vm_map_unlock(dst_map
);
3875 return(KERN_INVALID_ADDRESS
);
3881 if(entry
->is_sub_map
) {
3882 vm_offset_t sub_start
;
3883 vm_offset_t sub_end
;
3884 vm_offset_t local_end
;
3886 if (entry
->needs_copy
) {
3887 /* if this is a COW submap */
3888 /* just back the range with a */
3889 /* anonymous entry */
3890 if(entry
->vme_end
< dst_end
)
3891 sub_end
= entry
->vme_end
;
3894 if(entry
->vme_start
< base_addr
)
3895 sub_start
= base_addr
;
3897 sub_start
= entry
->vme_start
;
3899 dst_map
, entry
, sub_end
);
3901 dst_map
, entry
, sub_start
);
3902 entry
->is_sub_map
= FALSE
;
3904 entry
->object
.sub_map
);
3905 entry
->object
.sub_map
= NULL
;
3906 entry
->is_shared
= FALSE
;
3907 entry
->needs_copy
= FALSE
;
3909 entry
->protection
= VM_PROT_ALL
;
3910 entry
->max_protection
= VM_PROT_ALL
;
3911 entry
->wired_count
= 0;
3912 entry
->user_wired_count
= 0;
3913 if(entry
->inheritance
3914 == VM_INHERIT_SHARE
)
3915 entry
->inheritance
= VM_INHERIT_COPY
;
3918 /* first take care of any non-sub_map */
3919 /* entries to send */
3920 if(base_addr
< entry
->vme_start
) {
3923 entry
->vme_start
- base_addr
;
3926 sub_start
= entry
->offset
;
3928 if(entry
->vme_end
< dst_end
)
3929 sub_end
= entry
->vme_end
;
3932 sub_end
-= entry
->vme_start
;
3933 sub_end
+= entry
->offset
;
3934 local_end
= entry
->vme_end
;
3935 vm_map_unlock(dst_map
);
3936 copy_size
= sub_end
- sub_start
;
3938 /* adjust the copy object */
3939 if (total_size
> copy_size
) {
3940 vm_size_t local_size
= 0;
3941 vm_size_t entry_size
;
3944 new_offset
= copy
->offset
;
3945 copy_entry
= vm_map_copy_first_entry(copy
);
3947 vm_map_copy_to_entry(copy
)){
3948 entry_size
= copy_entry
->vme_end
-
3949 copy_entry
->vme_start
;
3950 if((local_size
< copy_size
) &&
3951 ((local_size
+ entry_size
)
3953 vm_map_copy_clip_end(copy
,
3955 copy_entry
->vme_start
+
3956 (copy_size
- local_size
));
3957 entry_size
= copy_entry
->vme_end
-
3958 copy_entry
->vme_start
;
3959 local_size
+= entry_size
;
3960 new_offset
+= entry_size
;
3962 if(local_size
>= copy_size
) {
3963 next_copy
= copy_entry
->vme_next
;
3964 copy_entry
->vme_next
=
3965 vm_map_copy_to_entry(copy
);
3967 copy
->cpy_hdr
.links
.prev
;
3968 copy
->cpy_hdr
.links
.prev
= copy_entry
;
3969 copy
->size
= copy_size
;
3971 copy
->cpy_hdr
.nentries
;
3972 remaining_entries
-= nentries
;
3973 copy
->cpy_hdr
.nentries
= nentries
;
3976 local_size
+= entry_size
;
3977 new_offset
+= entry_size
;
3980 copy_entry
= copy_entry
->vme_next
;
3984 if((entry
->use_pmap
) && (pmap
== NULL
)) {
3985 kr
= vm_map_copy_overwrite_nested(
3986 entry
->object
.sub_map
,
3990 entry
->object
.sub_map
->pmap
);
3991 } else if (pmap
!= NULL
) {
3992 kr
= vm_map_copy_overwrite_nested(
3993 entry
->object
.sub_map
,
3996 interruptible
, pmap
);
3998 kr
= vm_map_copy_overwrite_nested(
3999 entry
->object
.sub_map
,
4005 if(kr
!= KERN_SUCCESS
) {
4006 if(next_copy
!= NULL
) {
4007 copy
->cpy_hdr
.nentries
+=
4009 copy
->cpy_hdr
.links
.prev
->vme_next
=
4011 copy
->cpy_hdr
.links
.prev
4013 copy
->size
= total_size
;
4017 if (dst_end
<= local_end
) {
4018 return(KERN_SUCCESS
);
4020 /* otherwise copy no longer exists, it was */
4021 /* destroyed after successful copy_overwrite */
4022 copy
= (vm_map_copy_t
)
4023 zalloc(vm_map_copy_zone
);
4024 vm_map_copy_first_entry(copy
) =
4025 vm_map_copy_last_entry(copy
) =
4026 vm_map_copy_to_entry(copy
);
4027 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4028 copy
->offset
= new_offset
;
4030 total_size
-= copy_size
;
4032 /* put back remainder of copy in container */
4033 if(next_copy
!= NULL
) {
4034 copy
->cpy_hdr
.nentries
= remaining_entries
;
4035 copy
->cpy_hdr
.links
.next
= next_copy
;
4036 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4037 copy
->size
= total_size
;
4038 next_copy
->vme_prev
=
4039 vm_map_copy_to_entry(copy
);
4042 base_addr
= local_end
;
4043 vm_map_lock(dst_map
);
4044 if(!vm_map_lookup_entry(dst_map
,
4045 local_end
, &tmp_entry
)) {
4046 vm_map_unlock(dst_map
);
4047 return(KERN_INVALID_ADDRESS
);
4052 if (dst_end
<= entry
->vme_end
) {
4053 copy_size
= dst_end
- base_addr
;
4057 if ((next
== vm_map_to_entry(dst_map
)) ||
4058 (next
->vme_start
!= entry
->vme_end
)) {
4059 vm_map_unlock(dst_map
);
4060 return(KERN_INVALID_ADDRESS
);
4069 /* adjust the copy object */
4070 if (total_size
> copy_size
) {
4071 vm_size_t local_size
= 0;
4072 vm_size_t entry_size
;
4074 new_offset
= copy
->offset
;
4075 copy_entry
= vm_map_copy_first_entry(copy
);
4076 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4077 entry_size
= copy_entry
->vme_end
-
4078 copy_entry
->vme_start
;
4079 if((local_size
< copy_size
) &&
4080 ((local_size
+ entry_size
)
4082 vm_map_copy_clip_end(copy
, copy_entry
,
4083 copy_entry
->vme_start
+
4084 (copy_size
- local_size
));
4085 entry_size
= copy_entry
->vme_end
-
4086 copy_entry
->vme_start
;
4087 local_size
+= entry_size
;
4088 new_offset
+= entry_size
;
4090 if(local_size
>= copy_size
) {
4091 next_copy
= copy_entry
->vme_next
;
4092 copy_entry
->vme_next
=
4093 vm_map_copy_to_entry(copy
);
4095 copy
->cpy_hdr
.links
.prev
;
4096 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4097 copy
->size
= copy_size
;
4099 copy
->cpy_hdr
.nentries
;
4100 remaining_entries
-= nentries
;
4101 copy
->cpy_hdr
.nentries
= nentries
;
4104 local_size
+= entry_size
;
4105 new_offset
+= entry_size
;
4108 copy_entry
= copy_entry
->vme_next
;
4118 local_pmap
= dst_map
->pmap
;
4120 if ((kr
= vm_map_copy_overwrite_aligned(
4121 dst_map
, tmp_entry
, copy
,
4122 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4123 if(next_copy
!= NULL
) {
4124 copy
->cpy_hdr
.nentries
+=
4126 copy
->cpy_hdr
.links
.prev
->vme_next
=
4128 copy
->cpy_hdr
.links
.prev
=
4130 copy
->size
+= copy_size
;
4134 vm_map_unlock(dst_map
);
4139 * if the copy and dst address are misaligned but the same
4140 * offset within the page we can copy_not_aligned the
4141 * misaligned parts and copy aligned the rest. If they are
4142 * aligned but len is unaligned we simply need to copy
4143 * the end bit unaligned. We'll need to split the misaligned
4144 * bits of the region in this case !
4146 /* ALWAYS UNLOCKS THE dst_map MAP */
4147 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4148 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4149 if(next_copy
!= NULL
) {
4150 copy
->cpy_hdr
.nentries
+=
4152 copy
->cpy_hdr
.links
.prev
->vme_next
=
4154 copy
->cpy_hdr
.links
.prev
=
4156 copy
->size
+= copy_size
;
4161 total_size
-= copy_size
;
4164 base_addr
+= copy_size
;
4166 copy
->offset
= new_offset
;
4167 if(next_copy
!= NULL
) {
4168 copy
->cpy_hdr
.nentries
= remaining_entries
;
4169 copy
->cpy_hdr
.links
.next
= next_copy
;
4170 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4171 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4172 copy
->size
= total_size
;
4174 vm_map_lock(dst_map
);
4176 if (!vm_map_lookup_entry(dst_map
,
4177 base_addr
, &tmp_entry
)) {
4178 vm_map_unlock(dst_map
);
4179 return(KERN_INVALID_ADDRESS
);
4181 if (tmp_entry
->in_transition
) {
4182 entry
->needs_wakeup
= TRUE
;
4183 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4188 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(base_addr
));
4194 * Throw away the vm_map_copy object
4196 vm_map_copy_discard(copy
);
4198 return(KERN_SUCCESS
);
4199 }/* vm_map_copy_overwrite */
4202 vm_map_copy_overwrite(
4204 vm_offset_t dst_addr
,
4206 boolean_t interruptible
)
4208 return vm_map_copy_overwrite_nested(
4209 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4214 * Routine: vm_map_copy_overwrite_unaligned
4217 * Physically copy unaligned data
4220 * Unaligned parts of pages have to be physically copied. We use
4221 * a modified form of vm_fault_copy (which understands none-aligned
4222 * page offsets and sizes) to do the copy. We attempt to copy as
4223 * much memory in one go as possibly, however vm_fault_copy copies
4224 * within 1 memory object so we have to find the smaller of "amount left"
4225 * "source object data size" and "target object data size". With
4226 * unaligned data we don't need to split regions, therefore the source
4227 * (copy) object should be one map entry, the target range may be split
4228 * over multiple map entries however. In any event we are pessimistic
4229 * about these assumptions.
4232 * dst_map is locked on entry and is return locked on success,
4233 * unlocked on error.
4237 vm_map_copy_overwrite_unaligned(
4239 vm_map_entry_t entry
,
4243 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4244 vm_map_version_t version
;
4245 vm_object_t dst_object
;
4246 vm_object_offset_t dst_offset
;
4247 vm_object_offset_t src_offset
;
4248 vm_object_offset_t entry_offset
;
4249 vm_offset_t entry_end
;
4254 kern_return_t kr
= KERN_SUCCESS
;
4256 vm_map_lock_write_to_read(dst_map
);
4258 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4259 amount_left
= copy
->size
;
4261 * unaligned so we never clipped this entry, we need the offset into
4262 * the vm_object not just the data.
4264 while (amount_left
> 0) {
4266 if (entry
== vm_map_to_entry(dst_map
)) {
4267 vm_map_unlock_read(dst_map
);
4268 return KERN_INVALID_ADDRESS
;
4271 /* "start" must be within the current map entry */
4272 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4274 dst_offset
= start
- entry
->vme_start
;
4276 dst_size
= entry
->vme_end
- start
;
4278 src_size
= copy_entry
->vme_end
-
4279 (copy_entry
->vme_start
+ src_offset
);
4281 if (dst_size
< src_size
) {
4283 * we can only copy dst_size bytes before
4284 * we have to get the next destination entry
4286 copy_size
= dst_size
;
4289 * we can only copy src_size bytes before
4290 * we have to get the next source copy entry
4292 copy_size
= src_size
;
4295 if (copy_size
> amount_left
) {
4296 copy_size
= amount_left
;
4299 * Entry needs copy, create a shadow shadow object for
4300 * Copy on write region.
4302 if (entry
->needs_copy
&&
4303 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4305 if (vm_map_lock_read_to_write(dst_map
)) {
4306 vm_map_lock_read(dst_map
);
4309 vm_object_shadow(&entry
->object
.vm_object
,
4311 (vm_size_t
)(entry
->vme_end
4312 - entry
->vme_start
));
4313 entry
->needs_copy
= FALSE
;
4314 vm_map_lock_write_to_read(dst_map
);
4316 dst_object
= entry
->object
.vm_object
;
4318 * unlike with the virtual (aligned) copy we're going
4319 * to fault on it therefore we need a target object.
4321 if (dst_object
== VM_OBJECT_NULL
) {
4322 if (vm_map_lock_read_to_write(dst_map
)) {
4323 vm_map_lock_read(dst_map
);
4326 dst_object
= vm_object_allocate((vm_size_t
)
4327 entry
->vme_end
- entry
->vme_start
);
4328 entry
->object
.vm_object
= dst_object
;
4330 vm_map_lock_write_to_read(dst_map
);
4333 * Take an object reference and unlock map. The "entry" may
4334 * disappear or change when the map is unlocked.
4336 vm_object_reference(dst_object
);
4337 version
.main_timestamp
= dst_map
->timestamp
;
4338 entry_offset
= entry
->offset
;
4339 entry_end
= entry
->vme_end
;
4340 vm_map_unlock_read(dst_map
);
4342 * Copy as much as possible in one pass
4345 copy_entry
->object
.vm_object
,
4346 copy_entry
->offset
+ src_offset
,
4349 entry_offset
+ dst_offset
,
4355 src_offset
+= copy_size
;
4356 amount_left
-= copy_size
;
4358 * Release the object reference
4360 vm_object_deallocate(dst_object
);
4362 * If a hard error occurred, return it now
4364 if (kr
!= KERN_SUCCESS
)
4367 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4368 || amount_left
== 0)
4371 * all done with this copy entry, dispose.
4373 vm_map_copy_entry_unlink(copy
, copy_entry
);
4374 vm_object_deallocate(copy_entry
->object
.vm_object
);
4375 vm_map_copy_entry_dispose(copy
, copy_entry
);
4377 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4378 == vm_map_copy_to_entry(copy
) && amount_left
) {
4380 * not finished copying but run out of source
4382 return KERN_INVALID_ADDRESS
;
4387 if (amount_left
== 0)
4388 return KERN_SUCCESS
;
4390 vm_map_lock_read(dst_map
);
4391 if (version
.main_timestamp
== dst_map
->timestamp
) {
4392 if (start
== entry_end
) {
4394 * destination region is split. Use the version
4395 * information to avoid a lookup in the normal
4398 entry
= entry
->vme_next
;
4400 * should be contiguous. Fail if we encounter
4401 * a hole in the destination.
4403 if (start
!= entry
->vme_start
) {
4404 vm_map_unlock_read(dst_map
);
4405 return KERN_INVALID_ADDRESS
;
4410 * Map version check failed.
4411 * we must lookup the entry because somebody
4412 * might have changed the map behind our backs.
4415 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4417 vm_map_unlock_read(dst_map
);
4418 return KERN_INVALID_ADDRESS
;
4424 vm_map_unlock_read(dst_map
);
4426 return KERN_SUCCESS
;
4427 }/* vm_map_copy_overwrite_unaligned */
4430 * Routine: vm_map_copy_overwrite_aligned
4433 * Does all the vm_trickery possible for whole pages.
4437 * If there are no permanent objects in the destination,
4438 * and the source and destination map entry zones match,
4439 * and the destination map entry is not shared,
4440 * then the map entries can be deleted and replaced
4441 * with those from the copy. The following code is the
4442 * basic idea of what to do, but there are lots of annoying
4443 * little details about getting protection and inheritance
4444 * right. Should add protection, inheritance, and sharing checks
4445 * to the above pass and make sure that no wiring is involved.
4449 vm_map_copy_overwrite_aligned(
4451 vm_map_entry_t tmp_entry
,
4457 vm_map_entry_t copy_entry
;
4458 vm_size_t copy_size
;
4460 vm_map_entry_t entry
;
4462 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4463 != vm_map_copy_to_entry(copy
))
4465 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4468 if (entry
== vm_map_to_entry(dst_map
)) {
4469 vm_map_unlock(dst_map
);
4470 return KERN_INVALID_ADDRESS
;
4472 size
= (entry
->vme_end
- entry
->vme_start
);
4474 * Make sure that no holes popped up in the
4475 * address map, and that the protection is
4476 * still valid, in case the map was unlocked
4480 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4481 && !entry
->needs_copy
)) {
4482 vm_map_unlock(dst_map
);
4483 return(KERN_INVALID_ADDRESS
);
4485 assert(entry
!= vm_map_to_entry(dst_map
));
4488 * Check protection again
4491 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4492 vm_map_unlock(dst_map
);
4493 return(KERN_PROTECTION_FAILURE
);
4497 * Adjust to source size first
4500 if (copy_size
< size
) {
4501 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4506 * Adjust to destination size
4509 if (size
< copy_size
) {
4510 vm_map_copy_clip_end(copy
, copy_entry
,
4511 copy_entry
->vme_start
+ size
);
4515 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4516 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4517 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4520 * If the destination contains temporary unshared memory,
4521 * we can perform the copy by throwing it away and
4522 * installing the source data.
4525 object
= entry
->object
.vm_object
;
4526 if ((!entry
->is_shared
&&
4527 ((object
== VM_OBJECT_NULL
) ||
4528 (object
->internal
&& !object
->true_share
))) ||
4529 entry
->needs_copy
) {
4530 vm_object_t old_object
= entry
->object
.vm_object
;
4531 vm_object_offset_t old_offset
= entry
->offset
;
4532 vm_object_offset_t offset
;
4535 * Ensure that the source and destination aren't
4538 if (old_object
== copy_entry
->object
.vm_object
&&
4539 old_offset
== copy_entry
->offset
) {
4540 vm_map_copy_entry_unlink(copy
, copy_entry
);
4541 vm_map_copy_entry_dispose(copy
, copy_entry
);
4543 if (old_object
!= VM_OBJECT_NULL
)
4544 vm_object_deallocate(old_object
);
4546 start
= tmp_entry
->vme_end
;
4547 tmp_entry
= tmp_entry
->vme_next
;
4551 if (old_object
!= VM_OBJECT_NULL
) {
4552 if(entry
->is_sub_map
) {
4553 if(entry
->use_pmap
) {
4555 pmap_unnest(dst_map
->pmap
,
4558 - entry
->vme_start
);
4560 if(dst_map
->mapped
) {
4561 /* clean up parent */
4563 vm_map_submap_pmap_clean(
4564 dst_map
, entry
->vme_start
,
4566 entry
->object
.sub_map
,
4570 vm_map_submap_pmap_clean(
4571 dst_map
, entry
->vme_start
,
4573 entry
->object
.sub_map
,
4577 entry
->object
.sub_map
);
4579 if(dst_map
->mapped
) {
4580 vm_object_pmap_protect(
4581 entry
->object
.vm_object
,
4589 pmap_remove(dst_map
->pmap
,
4590 (addr64_t
)(entry
->vme_start
),
4591 (addr64_t
)(entry
->vme_end
));
4593 vm_object_deallocate(old_object
);
4597 entry
->is_sub_map
= FALSE
;
4598 entry
->object
= copy_entry
->object
;
4599 object
= entry
->object
.vm_object
;
4600 entry
->needs_copy
= copy_entry
->needs_copy
;
4601 entry
->wired_count
= 0;
4602 entry
->user_wired_count
= 0;
4603 offset
= entry
->offset
= copy_entry
->offset
;
4605 vm_map_copy_entry_unlink(copy
, copy_entry
);
4606 vm_map_copy_entry_dispose(copy
, copy_entry
);
4607 #if BAD_OPTIMIZATION
4609 * if we turn this optimization back on
4610 * we need to revisit our use of pmap mappings
4611 * large copies will cause us to run out and panic
4612 * this optimization only saved on average 2 us per page if ALL
4613 * the pages in the source were currently mapped
4614 * and ALL the pages in the dest were touched, if there were fewer
4615 * than 2/3 of the pages touched, this optimization actually cost more cycles
4619 * Try to aggressively enter physical mappings
4620 * (but avoid uninstantiated objects)
4622 if (object
!= VM_OBJECT_NULL
) {
4623 vm_offset_t va
= entry
->vme_start
;
4625 while (va
< entry
->vme_end
) {
4626 register vm_page_t m
;
4630 * Look for the page in the top object
4632 prot
= entry
->protection
;
4633 vm_object_lock(object
);
4634 vm_object_paging_begin(object
);
4636 if ((m
= vm_page_lookup(object
,offset
)) !=
4637 VM_PAGE_NULL
&& !m
->busy
&&
4639 (!m
->unusual
|| (!m
->error
&&
4640 !m
->restart
&& !m
->absent
&&
4641 (prot
& m
->page_lock
) == 0))) {
4644 vm_object_unlock(object
);
4647 * Honor COW obligations
4649 if (entry
->needs_copy
)
4650 prot
&= ~VM_PROT_WRITE
;
4651 /* It is our policy to require */
4652 /* explicit sync from anyone */
4653 /* writing code and then */
4654 /* a pc to execute it. */
4657 PMAP_ENTER(pmap
, va
, m
, prot
,
4659 (m
->object
->wimg_bits
))
4663 vm_object_lock(object
);
4664 vm_page_lock_queues();
4665 if (!m
->active
&& !m
->inactive
)
4666 vm_page_activate(m
);
4667 vm_page_unlock_queues();
4668 PAGE_WAKEUP_DONE(m
);
4670 vm_object_paging_end(object
);
4671 vm_object_unlock(object
);
4673 offset
+= PAGE_SIZE_64
;
4675 } /* end while (va < entry->vme_end) */
4676 } /* end if (object) */
4679 * Set up for the next iteration. The map
4680 * has not been unlocked, so the next
4681 * address should be at the end of this
4682 * entry, and the next map entry should be
4683 * the one following it.
4686 start
= tmp_entry
->vme_end
;
4687 tmp_entry
= tmp_entry
->vme_next
;
4689 vm_map_version_t version
;
4690 vm_object_t dst_object
= entry
->object
.vm_object
;
4691 vm_object_offset_t dst_offset
= entry
->offset
;
4695 * Take an object reference, and record
4696 * the map version information so that the
4697 * map can be safely unlocked.
4700 vm_object_reference(dst_object
);
4702 /* account for unlock bumping up timestamp */
4703 version
.main_timestamp
= dst_map
->timestamp
+ 1;
4705 vm_map_unlock(dst_map
);
4708 * Copy as much as possible in one pass
4713 copy_entry
->object
.vm_object
,
4723 * Release the object reference
4726 vm_object_deallocate(dst_object
);
4729 * If a hard error occurred, return it now
4732 if (r
!= KERN_SUCCESS
)
4735 if (copy_size
!= 0) {
4737 * Dispose of the copied region
4740 vm_map_copy_clip_end(copy
, copy_entry
,
4741 copy_entry
->vme_start
+ copy_size
);
4742 vm_map_copy_entry_unlink(copy
, copy_entry
);
4743 vm_object_deallocate(copy_entry
->object
.vm_object
);
4744 vm_map_copy_entry_dispose(copy
, copy_entry
);
4748 * Pick up in the destination map where we left off.
4750 * Use the version information to avoid a lookup
4751 * in the normal case.
4755 vm_map_lock(dst_map
);
4756 if (version
.main_timestamp
== dst_map
->timestamp
) {
4757 /* We can safely use saved tmp_entry value */
4759 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4760 tmp_entry
= tmp_entry
->vme_next
;
4762 /* Must do lookup of tmp_entry */
4764 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4765 vm_map_unlock(dst_map
);
4766 return(KERN_INVALID_ADDRESS
);
4768 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4773 return(KERN_SUCCESS
);
4774 }/* vm_map_copy_overwrite_aligned */
4777 * Routine: vm_map_copyin_kernel_buffer
4780 * Copy in data to a kernel buffer from space in the
4781 * source map. The original space may be otpionally
4784 * If successful, returns a new copy object.
4787 vm_map_copyin_kernel_buffer(
4789 vm_offset_t src_addr
,
4791 boolean_t src_destroy
,
4792 vm_map_copy_t
*copy_result
)
4796 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4798 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4799 if (copy
== VM_MAP_COPY_NULL
) {
4800 return KERN_RESOURCE_SHORTAGE
;
4802 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4805 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4806 copy
->cpy_kalloc_size
= kalloc_size
;
4808 if (src_map
== kernel_map
) {
4809 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4810 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4811 VM_MAP_REMOVE_INTERRUPTIBLE
;
4814 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4815 if (kr
!= KERN_SUCCESS
) {
4816 kfree((vm_offset_t
)copy
, kalloc_size
);
4819 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4820 VM_MAP_REMOVE_INTERRUPTIBLE
;
4823 (void) vm_map_remove(src_map
, trunc_page_32(src_addr
),
4824 round_page_32(src_addr
+ len
),
4827 *copy_result
= copy
;
4828 return KERN_SUCCESS
;
4832 * Routine: vm_map_copyout_kernel_buffer
4835 * Copy out data from a kernel buffer into space in the
4836 * destination map. The space may be otpionally dynamically
4839 * If successful, consumes the copy object.
4840 * Otherwise, the caller is responsible for it.
4843 vm_map_copyout_kernel_buffer(
4845 vm_offset_t
*addr
, /* IN/OUT */
4847 boolean_t overwrite
)
4849 kern_return_t kr
= KERN_SUCCESS
;
4850 thread_act_t thr_act
= current_act();
4855 * Allocate space in the target map for the data
4858 kr
= vm_map_enter(map
,
4860 round_page_32(copy
->size
),
4864 (vm_object_offset_t
) 0,
4868 VM_INHERIT_DEFAULT
);
4869 if (kr
!= KERN_SUCCESS
)
4874 * Copyout the data from the kernel buffer to the target map.
4876 if (thr_act
->map
== map
) {
4879 * If the target map is the current map, just do
4882 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4884 return(KERN_INVALID_ADDRESS
);
4891 * If the target map is another map, assume the
4892 * target's address space identity for the duration
4895 vm_map_reference(map
);
4896 oldmap
= vm_map_switch(map
);
4898 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4900 return(KERN_INVALID_ADDRESS
);
4903 (void) vm_map_switch(oldmap
);
4904 vm_map_deallocate(map
);
4907 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4913 * Macro: vm_map_copy_insert
4916 * Link a copy chain ("copy") into a map at the
4917 * specified location (after "where").
4919 * The copy chain is destroyed.
4921 * The arguments are evaluated multiple times.
4923 #define vm_map_copy_insert(map, where, copy) \
4925 vm_map_t VMCI_map; \
4926 vm_map_entry_t VMCI_where; \
4927 vm_map_copy_t VMCI_copy; \
4929 VMCI_where = (where); \
4930 VMCI_copy = (copy); \
4931 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4932 ->vme_next = (VMCI_where->vme_next); \
4933 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4934 ->vme_prev = VMCI_where; \
4935 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4936 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4937 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4941 * Routine: vm_map_copyout
4944 * Copy out a copy chain ("copy") into newly-allocated
4945 * space in the destination map.
4947 * If successful, consumes the copy object.
4948 * Otherwise, the caller is responsible for it.
4952 register vm_map_t dst_map
,
4953 vm_offset_t
*dst_addr
, /* OUT */
4954 register vm_map_copy_t copy
)
4957 vm_size_t adjustment
;
4959 vm_object_offset_t vm_copy_start
;
4960 vm_map_entry_t last
;
4962 vm_map_entry_t entry
;
4965 * Check for null copy object.
4968 if (copy
== VM_MAP_COPY_NULL
) {
4970 return(KERN_SUCCESS
);
4974 * Check for special copy object, created
4975 * by vm_map_copyin_object.
4978 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
4979 vm_object_t object
= copy
->cpy_object
;
4981 vm_object_offset_t offset
;
4983 offset
= trunc_page_64(copy
->offset
);
4984 size
= round_page_32(copy
->size
+
4985 (vm_size_t
)(copy
->offset
- offset
));
4987 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
4988 (vm_offset_t
) 0, TRUE
,
4989 object
, offset
, FALSE
,
4990 VM_PROT_DEFAULT
, VM_PROT_ALL
,
4991 VM_INHERIT_DEFAULT
);
4992 if (kr
!= KERN_SUCCESS
)
4994 /* Account for non-pagealigned copy object */
4995 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
4996 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
4997 return(KERN_SUCCESS
);
5001 * Check for special kernel buffer allocated
5002 * by new_ipc_kmsg_copyin.
5005 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
5006 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
5011 * Find space for the data
5014 vm_copy_start
= trunc_page_64(copy
->offset
);
5015 size
= round_page_32((vm_size_t
)copy
->offset
+ copy
->size
)
5020 vm_map_lock(dst_map
);
5021 assert(first_free_is_valid(dst_map
));
5022 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
5023 vm_map_min(dst_map
) : last
->vme_end
;
5026 vm_map_entry_t next
= last
->vme_next
;
5027 vm_offset_t end
= start
+ size
;
5029 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5030 if (dst_map
->wait_for_space
) {
5031 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5032 assert_wait((event_t
) dst_map
,
5033 THREAD_INTERRUPTIBLE
);
5034 vm_map_unlock(dst_map
);
5035 thread_block((void (*)(void))0);
5039 vm_map_unlock(dst_map
);
5040 return(KERN_NO_SPACE
);
5043 if ((next
== vm_map_to_entry(dst_map
)) ||
5044 (next
->vme_start
>= end
))
5048 start
= last
->vme_end
;
5052 * Since we're going to just drop the map
5053 * entries from the copy into the destination
5054 * map, they must come from the same pool.
5057 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5059 * Mismatches occur when dealing with the default
5063 vm_map_entry_t next
, new;
5066 * Find the zone that the copies were allocated from
5068 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5070 : vm_map_kentry_zone
;
5071 entry
= vm_map_copy_first_entry(copy
);
5074 * Reinitialize the copy so that vm_map_copy_entry_link
5077 copy
->cpy_hdr
.nentries
= 0;
5078 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5079 vm_map_copy_first_entry(copy
) =
5080 vm_map_copy_last_entry(copy
) =
5081 vm_map_copy_to_entry(copy
);
5086 while (entry
!= vm_map_copy_to_entry(copy
)) {
5087 new = vm_map_copy_entry_create(copy
);
5088 vm_map_entry_copy_full(new, entry
);
5089 new->use_pmap
= FALSE
; /* clr address space specifics */
5090 vm_map_copy_entry_link(copy
,
5091 vm_map_copy_last_entry(copy
),
5093 next
= entry
->vme_next
;
5094 zfree(old_zone
, (vm_offset_t
) entry
);
5100 * Adjust the addresses in the copy chain, and
5101 * reset the region attributes.
5104 adjustment
= start
- vm_copy_start
;
5105 for (entry
= vm_map_copy_first_entry(copy
);
5106 entry
!= vm_map_copy_to_entry(copy
);
5107 entry
= entry
->vme_next
) {
5108 entry
->vme_start
+= adjustment
;
5109 entry
->vme_end
+= adjustment
;
5111 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5112 entry
->protection
= VM_PROT_DEFAULT
;
5113 entry
->max_protection
= VM_PROT_ALL
;
5114 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5117 * If the entry is now wired,
5118 * map the pages into the destination map.
5120 if (entry
->wired_count
!= 0) {
5121 register vm_offset_t va
;
5122 vm_object_offset_t offset
;
5123 register vm_object_t object
;
5125 object
= entry
->object
.vm_object
;
5126 offset
= entry
->offset
;
5127 va
= entry
->vme_start
;
5129 pmap_pageable(dst_map
->pmap
,
5134 while (va
< entry
->vme_end
) {
5135 register vm_page_t m
;
5138 * Look up the page in the object.
5139 * Assert that the page will be found in the
5142 * the object was newly created by
5143 * vm_object_copy_slowly, and has
5144 * copies of all of the pages from
5147 * the object was moved from the old
5148 * map entry; because the old map
5149 * entry was wired, all of the pages
5150 * were in the top-level object.
5151 * (XXX not true if we wire pages for
5154 vm_object_lock(object
);
5155 vm_object_paging_begin(object
);
5157 m
= vm_page_lookup(object
, offset
);
5158 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5160 panic("vm_map_copyout: wiring 0x%x", m
);
5163 vm_object_unlock(object
);
5165 PMAP_ENTER(dst_map
->pmap
, va
, m
, entry
->protection
,
5167 (m
->object
->wimg_bits
))
5171 vm_object_lock(object
);
5172 PAGE_WAKEUP_DONE(m
);
5173 /* the page is wired, so we don't have to activate */
5174 vm_object_paging_end(object
);
5175 vm_object_unlock(object
);
5177 offset
+= PAGE_SIZE_64
;
5181 else if (size
<= vm_map_aggressive_enter_max
) {
5183 register vm_offset_t va
;
5184 vm_object_offset_t offset
;
5185 register vm_object_t object
;
5188 object
= entry
->object
.vm_object
;
5189 if (object
!= VM_OBJECT_NULL
) {
5191 offset
= entry
->offset
;
5192 va
= entry
->vme_start
;
5193 while (va
< entry
->vme_end
) {
5194 register vm_page_t m
;
5197 * Look up the page in the object.
5198 * Assert that the page will be found
5199 * in the top object if at all...
5201 vm_object_lock(object
);
5202 vm_object_paging_begin(object
);
5204 if (((m
= vm_page_lookup(object
,
5207 !m
->busy
&& !m
->fictitious
&&
5208 !m
->absent
&& !m
->error
) {
5210 vm_object_unlock(object
);
5212 /* honor cow obligations */
5213 prot
= entry
->protection
;
5214 if (entry
->needs_copy
)
5215 prot
&= ~VM_PROT_WRITE
;
5217 PMAP_ENTER(dst_map
->pmap
, va
,
5220 (m
->object
->wimg_bits
))
5224 vm_object_lock(object
);
5225 vm_page_lock_queues();
5226 if (!m
->active
&& !m
->inactive
)
5227 vm_page_activate(m
);
5228 vm_page_unlock_queues();
5229 PAGE_WAKEUP_DONE(m
);
5231 vm_object_paging_end(object
);
5232 vm_object_unlock(object
);
5234 offset
+= PAGE_SIZE_64
;
5242 * Correct the page alignment for the result
5245 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5248 * Update the hints and the map size
5251 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5253 dst_map
->size
+= size
;
5259 vm_map_copy_insert(dst_map
, last
, copy
);
5261 vm_map_unlock(dst_map
);
5264 * XXX If wiring_required, call vm_map_pageable
5267 return(KERN_SUCCESS
);
5270 boolean_t vm_map_aggressive_enter
; /* not used yet */
5274 * Routine: vm_map_copyin
5277 * Copy the specified region (src_addr, len) from the
5278 * source address space (src_map), possibly removing
5279 * the region from the source address space (src_destroy).
5282 * A vm_map_copy_t object (copy_result), suitable for
5283 * insertion into another address space (using vm_map_copyout),
5284 * copying over another address space region (using
5285 * vm_map_copy_overwrite). If the copy is unused, it
5286 * should be destroyed (using vm_map_copy_discard).
5288 * In/out conditions:
5289 * The source map should not be locked on entry.
5292 typedef struct submap_map
{
5293 vm_map_t parent_map
;
5294 vm_offset_t base_start
;
5295 vm_offset_t base_end
;
5296 struct submap_map
*next
;
5300 vm_map_copyin_common(
5302 vm_offset_t src_addr
,
5304 boolean_t src_destroy
,
5305 boolean_t src_volatile
,
5306 vm_map_copy_t
*copy_result
, /* OUT */
5307 boolean_t use_maxprot
)
5309 extern int msg_ool_size_small
;
5311 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5312 * in multi-level lookup, this
5313 * entry contains the actual
5317 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5319 vm_offset_t src_start
; /* Start of current entry --
5320 * where copy is taking place now
5322 vm_offset_t src_end
; /* End of entire region to be
5324 vm_offset_t base_start
; /* submap fields to save offsets */
5325 /* in original map */
5326 vm_offset_t base_end
;
5327 vm_map_t base_map
=src_map
;
5328 vm_map_entry_t base_entry
;
5329 boolean_t map_share
=FALSE
;
5330 submap_map_t
*parent_maps
= NULL
;
5333 vm_map_copy_t copy
; /* Resulting copy */
5334 vm_offset_t copy_addr
;
5337 * Check for copies of zero bytes.
5341 *copy_result
= VM_MAP_COPY_NULL
;
5342 return(KERN_SUCCESS
);
5346 * If the copy is sufficiently small, use a kernel buffer instead
5347 * of making a virtual copy. The theory being that the cost of
5348 * setting up VM (and taking C-O-W faults) dominates the copy costs
5349 * for small regions.
5351 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5352 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5353 src_destroy
, copy_result
);
5356 * Compute start and end of region
5359 src_start
= trunc_page_32(src_addr
);
5360 src_end
= round_page_32(src_addr
+ len
);
5362 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5365 * Check that the end address doesn't overflow
5368 if (src_end
<= src_start
)
5369 if ((src_end
< src_start
) || (src_start
!= 0))
5370 return(KERN_INVALID_ADDRESS
);
5373 * Allocate a header element for the list.
5375 * Use the start and end in the header to
5376 * remember the endpoints prior to rounding.
5379 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5380 vm_map_copy_first_entry(copy
) =
5381 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5382 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5383 copy
->cpy_hdr
.nentries
= 0;
5384 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5386 copy
->offset
= src_addr
;
5389 new_entry
= vm_map_copy_entry_create(copy
);
5393 vm_map_unlock(src_map); \
5394 if(src_map != base_map) \
5395 vm_map_deallocate(src_map); \
5396 if (new_entry != VM_MAP_ENTRY_NULL) \
5397 vm_map_copy_entry_dispose(copy,new_entry); \
5398 vm_map_copy_discard(copy); \
5400 submap_map_t *ptr; \
5402 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5403 parent_maps=parent_maps->next; \
5404 if (ptr->parent_map != base_map) \
5405 vm_map_deallocate(ptr->parent_map); \
5406 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5413 * Find the beginning of the region.
5416 vm_map_lock(src_map
);
5418 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5419 RETURN(KERN_INVALID_ADDRESS
);
5420 if(!tmp_entry
->is_sub_map
) {
5421 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5423 /* set for later submap fix-up */
5424 copy_addr
= src_start
;
5427 * Go through entries until we get to the end.
5432 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5433 vm_size_t src_size
; /* Size of source
5434 * map entry (in both
5439 vm_object_t src_object
; /* Object to copy */
5440 vm_object_offset_t src_offset
;
5442 boolean_t src_needs_copy
; /* Should source map
5444 * for copy-on-write?
5447 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5449 boolean_t was_wired
; /* Was source wired? */
5450 vm_map_version_t version
; /* Version before locks
5451 * dropped to make copy
5453 kern_return_t result
; /* Return value from
5454 * copy_strategically.
5456 while(tmp_entry
->is_sub_map
) {
5457 vm_size_t submap_len
;
5460 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5461 ptr
->next
= parent_maps
;
5463 ptr
->parent_map
= src_map
;
5464 ptr
->base_start
= src_start
;
5465 ptr
->base_end
= src_end
;
5466 submap_len
= tmp_entry
->vme_end
- src_start
;
5467 if(submap_len
> (src_end
-src_start
))
5468 submap_len
= src_end
-src_start
;
5469 ptr
->base_start
+= submap_len
;
5471 src_start
-= tmp_entry
->vme_start
;
5472 src_start
+= tmp_entry
->offset
;
5473 src_end
= src_start
+ submap_len
;
5474 src_map
= tmp_entry
->object
.sub_map
;
5475 vm_map_lock(src_map
);
5476 /* keep an outstanding reference for all maps in */
5477 /* the parents tree except the base map */
5478 vm_map_reference(src_map
);
5479 vm_map_unlock(ptr
->parent_map
);
5480 if (!vm_map_lookup_entry(
5481 src_map
, src_start
, &tmp_entry
))
5482 RETURN(KERN_INVALID_ADDRESS
);
5484 if(!tmp_entry
->is_sub_map
)
5485 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5486 src_entry
= tmp_entry
;
5488 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5489 (tmp_entry
->object
.vm_object
->phys_contiguous
)) {
5490 /* This is not, supported for now.In future */
5491 /* we will need to detect the phys_contig */
5492 /* condition and then upgrade copy_slowly */
5493 /* to do physical copy from the device mem */
5494 /* based object. We can piggy-back off of */
5495 /* the was wired boolean to set-up the */
5496 /* proper handling */
5497 RETURN(KERN_PROTECTION_FAILURE
);
5500 * Create a new address map entry to hold the result.
5501 * Fill in the fields from the appropriate source entries.
5502 * We must unlock the source map to do this if we need
5503 * to allocate a map entry.
5505 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5506 version
.main_timestamp
= src_map
->timestamp
;
5507 vm_map_unlock(src_map
);
5509 new_entry
= vm_map_copy_entry_create(copy
);
5511 vm_map_lock(src_map
);
5512 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5513 if (!vm_map_lookup_entry(src_map
, src_start
,
5515 RETURN(KERN_INVALID_ADDRESS
);
5517 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5518 continue; /* restart w/ new tmp_entry */
5523 * Verify that the region can be read.
5525 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5527 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5528 RETURN(KERN_PROTECTION_FAILURE
);
5531 * Clip against the endpoints of the entire region.
5534 vm_map_clip_end(src_map
, src_entry
, src_end
);
5536 src_size
= src_entry
->vme_end
- src_start
;
5537 src_object
= src_entry
->object
.vm_object
;
5538 src_offset
= src_entry
->offset
;
5539 was_wired
= (src_entry
->wired_count
!= 0);
5541 vm_map_entry_copy(new_entry
, src_entry
);
5542 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5545 * Attempt non-blocking copy-on-write optimizations.
5549 (src_object
== VM_OBJECT_NULL
||
5550 (src_object
->internal
&& !src_object
->true_share
5553 * If we are destroying the source, and the object
5554 * is internal, we can move the object reference
5555 * from the source to the copy. The copy is
5556 * copy-on-write only if the source is.
5557 * We make another reference to the object, because
5558 * destroying the source entry will deallocate it.
5560 vm_object_reference(src_object
);
5563 * Copy is always unwired. vm_map_copy_entry
5564 * set its wired count to zero.
5567 goto CopySuccessful
;
5572 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5573 src_object
, new_entry
, new_entry
->object
.vm_object
,
5575 if ((src_object
== VM_OBJECT_NULL
||
5576 (!was_wired
&& !map_share
&& !tmp_entry
->is_shared
)) &&
5577 vm_object_copy_quickly(
5578 &new_entry
->object
.vm_object
,
5582 &new_entry_needs_copy
)) {
5584 new_entry
->needs_copy
= new_entry_needs_copy
;
5587 * Handle copy-on-write obligations
5590 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5591 vm_object_pmap_protect(
5595 (src_entry
->is_shared
?
5598 src_entry
->vme_start
,
5599 src_entry
->protection
&
5601 tmp_entry
->needs_copy
= TRUE
;
5605 * The map has never been unlocked, so it's safe
5606 * to move to the next entry rather than doing
5610 goto CopySuccessful
;
5614 * Take an object reference, so that we may
5615 * release the map lock(s).
5618 assert(src_object
!= VM_OBJECT_NULL
);
5619 vm_object_reference(src_object
);
5622 * Record the timestamp for later verification.
5626 version
.main_timestamp
= src_map
->timestamp
;
5627 vm_map_unlock(src_map
); /* Increments timestamp once! */
5635 vm_object_lock(src_object
);
5636 result
= vm_object_copy_slowly(
5641 &new_entry
->object
.vm_object
);
5642 new_entry
->offset
= 0;
5643 new_entry
->needs_copy
= FALSE
;
5646 else if (src_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
&&
5647 (tmp_entry
->is_shared
|| map_share
)) {
5648 vm_object_t new_object
;
5650 vm_object_lock(src_object
);
5651 new_object
= vm_object_copy_delayed(
5655 if (new_object
== VM_OBJECT_NULL
)
5658 new_entry
->object
.vm_object
= new_object
;
5659 new_entry
->needs_copy
= TRUE
;
5660 result
= KERN_SUCCESS
;
5663 result
= vm_object_copy_strategically(src_object
,
5666 &new_entry
->object
.vm_object
,
5668 &new_entry_needs_copy
);
5670 new_entry
->needs_copy
= new_entry_needs_copy
;
5673 if (result
!= KERN_SUCCESS
&&
5674 result
!= KERN_MEMORY_RESTART_COPY
) {
5675 vm_map_lock(src_map
);
5680 * Throw away the extra reference
5683 vm_object_deallocate(src_object
);
5686 * Verify that the map has not substantially
5687 * changed while the copy was being made.
5690 vm_map_lock(src_map
);
5692 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5693 goto VerificationSuccessful
;
5696 * Simple version comparison failed.
5698 * Retry the lookup and verify that the
5699 * same object/offset are still present.
5701 * [Note: a memory manager that colludes with
5702 * the calling task can detect that we have
5703 * cheated. While the map was unlocked, the
5704 * mapping could have been changed and restored.]
5707 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5708 RETURN(KERN_INVALID_ADDRESS
);
5711 src_entry
= tmp_entry
;
5712 vm_map_clip_start(src_map
, src_entry
, src_start
);
5714 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5716 src_entry
->max_protection
& VM_PROT_READ
== 0)
5717 goto VerificationFailed
;
5719 if (src_entry
->vme_end
< new_entry
->vme_end
)
5720 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5722 if ((src_entry
->object
.vm_object
!= src_object
) ||
5723 (src_entry
->offset
!= src_offset
) ) {
5726 * Verification failed.
5728 * Start over with this top-level entry.
5731 VerificationFailed
: ;
5733 vm_object_deallocate(new_entry
->object
.vm_object
);
5734 tmp_entry
= src_entry
;
5739 * Verification succeeded.
5742 VerificationSuccessful
: ;
5744 if (result
== KERN_MEMORY_RESTART_COPY
)
5754 * Link in the new copy entry.
5757 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5761 * Determine whether the entire region
5764 src_start
= new_entry
->vme_end
;
5765 new_entry
= VM_MAP_ENTRY_NULL
;
5766 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5767 if (src_map
!= base_map
) {
5771 assert(ptr
!= NULL
);
5772 parent_maps
= parent_maps
->next
;
5773 vm_map_unlock(src_map
);
5774 vm_map_deallocate(src_map
);
5775 vm_map_lock(ptr
->parent_map
);
5776 src_map
= ptr
->parent_map
;
5777 src_start
= ptr
->base_start
;
5778 src_end
= ptr
->base_end
;
5779 if ((src_end
> src_start
) &&
5780 !vm_map_lookup_entry(
5781 src_map
, src_start
, &tmp_entry
))
5782 RETURN(KERN_INVALID_ADDRESS
);
5783 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5784 if(parent_maps
== NULL
)
5786 src_entry
= tmp_entry
->vme_prev
;
5790 if ((src_start
>= src_end
) && (src_end
!= 0))
5794 * Verify that there are no gaps in the region
5797 tmp_entry
= src_entry
->vme_next
;
5798 if ((tmp_entry
->vme_start
!= src_start
) ||
5799 (tmp_entry
== vm_map_to_entry(src_map
)))
5800 RETURN(KERN_INVALID_ADDRESS
);
5804 * If the source should be destroyed, do it now, since the
5805 * copy was successful.
5808 (void) vm_map_delete(src_map
,
5809 trunc_page_32(src_addr
),
5811 (src_map
== kernel_map
) ?
5812 VM_MAP_REMOVE_KUNWIRE
:
5816 vm_map_unlock(src_map
);
5818 /* Fix-up start and end points in copy. This is necessary */
5819 /* when the various entries in the copy object were picked */
5820 /* up from different sub-maps */
5822 tmp_entry
= vm_map_copy_first_entry(copy
);
5823 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5824 tmp_entry
->vme_end
= copy_addr
+
5825 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5826 tmp_entry
->vme_start
= copy_addr
;
5827 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5828 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5831 *copy_result
= copy
;
5832 return(KERN_SUCCESS
);
5838 * vm_map_copyin_object:
5840 * Create a copy object from an object.
5841 * Our caller donates an object reference.
5845 vm_map_copyin_object(
5847 vm_object_offset_t offset
, /* offset of region in object */
5848 vm_object_size_t size
, /* size of region in object */
5849 vm_map_copy_t
*copy_result
) /* OUT */
5851 vm_map_copy_t copy
; /* Resulting copy */
5854 * We drop the object into a special copy object
5855 * that contains the object directly.
5858 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5859 copy
->type
= VM_MAP_COPY_OBJECT
;
5860 copy
->cpy_object
= object
;
5861 copy
->cpy_index
= 0;
5862 copy
->offset
= offset
;
5865 *copy_result
= copy
;
5866 return(KERN_SUCCESS
);
5872 vm_map_entry_t old_entry
,
5876 vm_map_entry_t new_entry
;
5877 kern_return_t result
;
5880 * New sharing code. New map entry
5881 * references original object. Internal
5882 * objects use asynchronous copy algorithm for
5883 * future copies. First make sure we have
5884 * the right object. If we need a shadow,
5885 * or someone else already has one, then
5886 * make a new shadow and share it.
5889 object
= old_entry
->object
.vm_object
;
5890 if (old_entry
->is_sub_map
) {
5891 assert(old_entry
->wired_count
== 0);
5893 if(old_entry
->use_pmap
) {
5894 result
= pmap_nest(new_map
->pmap
,
5895 (old_entry
->object
.sub_map
)->pmap
,
5896 (addr64_t
)old_entry
->vme_start
,
5897 (addr64_t
)old_entry
->vme_start
,
5898 (uint64_t)(old_entry
->vme_end
- old_entry
->vme_start
));
5900 panic("vm_map_fork_share: pmap_nest failed!");
5903 } else if (object
== VM_OBJECT_NULL
) {
5904 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5905 old_entry
->vme_start
));
5906 old_entry
->offset
= 0;
5907 old_entry
->object
.vm_object
= object
;
5908 assert(!old_entry
->needs_copy
);
5909 } else if (object
->copy_strategy
!=
5910 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5913 * We are already using an asymmetric
5914 * copy, and therefore we already have
5918 assert(! old_entry
->needs_copy
);
5920 else if (old_entry
->needs_copy
|| /* case 1 */
5921 object
->shadowed
|| /* case 2 */
5922 (!object
->true_share
&& /* case 3 */
5923 !old_entry
->is_shared
&&
5925 (vm_size_t
)(old_entry
->vme_end
-
5926 old_entry
->vme_start
)))) {
5929 * We need to create a shadow.
5930 * There are three cases here.
5931 * In the first case, we need to
5932 * complete a deferred symmetrical
5933 * copy that we participated in.
5934 * In the second and third cases,
5935 * we need to create the shadow so
5936 * that changes that we make to the
5937 * object do not interfere with
5938 * any symmetrical copies which
5939 * have occured (case 2) or which
5940 * might occur (case 3).
5942 * The first case is when we had
5943 * deferred shadow object creation
5944 * via the entry->needs_copy mechanism.
5945 * This mechanism only works when
5946 * only one entry points to the source
5947 * object, and we are about to create
5948 * a second entry pointing to the
5949 * same object. The problem is that
5950 * there is no way of mapping from
5951 * an object to the entries pointing
5952 * to it. (Deferred shadow creation
5953 * works with one entry because occurs
5954 * at fault time, and we walk from the
5955 * entry to the object when handling
5958 * The second case is when the object
5959 * to be shared has already been copied
5960 * with a symmetric copy, but we point
5961 * directly to the object without
5962 * needs_copy set in our entry. (This
5963 * can happen because different ranges
5964 * of an object can be pointed to by
5965 * different entries. In particular,
5966 * a single entry pointing to an object
5967 * can be split by a call to vm_inherit,
5968 * which, combined with task_create, can
5969 * result in the different entries
5970 * having different needs_copy values.)
5971 * The shadowed flag in the object allows
5972 * us to detect this case. The problem
5973 * with this case is that if this object
5974 * has or will have shadows, then we
5975 * must not perform an asymmetric copy
5976 * of this object, since such a copy
5977 * allows the object to be changed, which
5978 * will break the previous symmetrical
5979 * copies (which rely upon the object
5980 * not changing). In a sense, the shadowed
5981 * flag says "don't change this object".
5982 * We fix this by creating a shadow
5983 * object for this object, and sharing
5984 * that. This works because we are free
5985 * to change the shadow object (and thus
5986 * to use an asymmetric copy strategy);
5987 * this is also semantically correct,
5988 * since this object is temporary, and
5989 * therefore a copy of the object is
5990 * as good as the object itself. (This
5991 * is not true for permanent objects,
5992 * since the pager needs to see changes,
5993 * which won't happen if the changes
5994 * are made to a copy.)
5996 * The third case is when the object
5997 * to be shared has parts sticking
5998 * outside of the entry we're working
5999 * with, and thus may in the future
6000 * be subject to a symmetrical copy.
6001 * (This is a preemptive version of
6005 assert(!(object
->shadowed
&& old_entry
->is_shared
));
6006 vm_object_shadow(&old_entry
->object
.vm_object
,
6008 (vm_size_t
) (old_entry
->vme_end
-
6009 old_entry
->vme_start
));
6012 * If we're making a shadow for other than
6013 * copy on write reasons, then we have
6014 * to remove write permission.
6017 if (!old_entry
->needs_copy
&&
6018 (old_entry
->protection
& VM_PROT_WRITE
)) {
6019 if(old_map
->mapped
) {
6020 vm_object_pmap_protect(
6021 old_entry
->object
.vm_object
,
6023 (old_entry
->vme_end
-
6024 old_entry
->vme_start
),
6026 old_entry
->vme_start
,
6027 old_entry
->protection
& ~VM_PROT_WRITE
);
6029 pmap_protect(old_map
->pmap
,
6030 old_entry
->vme_start
,
6032 old_entry
->protection
& ~VM_PROT_WRITE
);
6036 old_entry
->needs_copy
= FALSE
;
6037 object
= old_entry
->object
.vm_object
;
6041 * If object was using a symmetric copy strategy,
6042 * change its copy strategy to the default
6043 * asymmetric copy strategy, which is copy_delay
6044 * in the non-norma case and copy_call in the
6045 * norma case. Bump the reference count for the
6049 if(old_entry
->is_sub_map
) {
6050 vm_map_lock(old_entry
->object
.sub_map
);
6051 vm_map_reference(old_entry
->object
.sub_map
);
6052 vm_map_unlock(old_entry
->object
.sub_map
);
6054 vm_object_lock(object
);
6055 object
->ref_count
++;
6056 vm_object_res_reference(object
);
6057 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6058 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6060 vm_object_unlock(object
);
6064 * Clone the entry, using object ref from above.
6065 * Mark both entries as shared.
6068 new_entry
= vm_map_entry_create(new_map
);
6069 vm_map_entry_copy(new_entry
, old_entry
);
6070 old_entry
->is_shared
= TRUE
;
6071 new_entry
->is_shared
= TRUE
;
6074 * Insert the entry into the new map -- we
6075 * know we're inserting at the end of the new
6079 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6082 * Update the physical map
6085 if (old_entry
->is_sub_map
) {
6086 /* Bill Angell pmap support goes here */
6088 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6089 old_entry
->vme_end
- old_entry
->vme_start
,
6090 old_entry
->vme_start
);
6097 vm_map_entry_t
*old_entry_p
,
6100 vm_map_entry_t old_entry
= *old_entry_p
;
6101 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6102 vm_offset_t start
= old_entry
->vme_start
;
6104 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6106 vm_map_unlock(old_map
);
6108 * Use maxprot version of copyin because we
6109 * care about whether this memory can ever
6110 * be accessed, not just whether it's accessible
6113 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6116 * The map might have changed while it
6117 * was unlocked, check it again. Skip
6118 * any blank space or permanently
6119 * unreadable region.
6121 vm_map_lock(old_map
);
6122 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6123 last
->max_protection
& VM_PROT_READ
==
6125 last
= last
->vme_next
;
6127 *old_entry_p
= last
;
6130 * XXX For some error returns, want to
6131 * XXX skip to the next element. Note
6132 * that INVALID_ADDRESS and
6133 * PROTECTION_FAILURE are handled above.
6140 * Insert the copy into the new map
6143 vm_map_copy_insert(new_map
, last
, copy
);
6146 * Pick up the traversal at the end of
6147 * the copied region.
6150 vm_map_lock(old_map
);
6151 start
+= entry_size
;
6152 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6153 last
= last
->vme_next
;
6155 vm_map_clip_start(old_map
, last
, start
);
6157 *old_entry_p
= last
;
6165 * Create and return a new map based on the old
6166 * map, according to the inheritance values on the
6167 * regions in that map.
6169 * The source map must not be locked.
6175 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6177 vm_map_entry_t old_entry
;
6178 vm_size_t new_size
= 0, entry_size
;
6179 vm_map_entry_t new_entry
;
6180 boolean_t src_needs_copy
;
6181 boolean_t new_entry_needs_copy
;
6183 vm_map_reference_swap(old_map
);
6184 vm_map_lock(old_map
);
6186 new_map
= vm_map_create(new_pmap
,
6187 old_map
->min_offset
,
6188 old_map
->max_offset
,
6189 old_map
->hdr
.entries_pageable
);
6192 old_entry
= vm_map_first_entry(old_map
);
6193 old_entry
!= vm_map_to_entry(old_map
);
6196 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6198 switch (old_entry
->inheritance
) {
6199 case VM_INHERIT_NONE
:
6202 case VM_INHERIT_SHARE
:
6203 vm_map_fork_share(old_map
, old_entry
, new_map
);
6204 new_size
+= entry_size
;
6207 case VM_INHERIT_COPY
:
6210 * Inline the copy_quickly case;
6211 * upon failure, fall back on call
6212 * to vm_map_fork_copy.
6215 if(old_entry
->is_sub_map
)
6217 if ((old_entry
->wired_count
!= 0) ||
6218 ((old_entry
->object
.vm_object
!= NULL
) &&
6219 (old_entry
->object
.vm_object
->true_share
))) {
6220 goto slow_vm_map_fork_copy
;
6223 new_entry
= vm_map_entry_create(new_map
);
6224 vm_map_entry_copy(new_entry
, old_entry
);
6225 /* clear address space specifics */
6226 new_entry
->use_pmap
= FALSE
;
6228 if (! vm_object_copy_quickly(
6229 &new_entry
->object
.vm_object
,
6231 (old_entry
->vme_end
-
6232 old_entry
->vme_start
),
6234 &new_entry_needs_copy
)) {
6235 vm_map_entry_dispose(new_map
, new_entry
);
6236 goto slow_vm_map_fork_copy
;
6240 * Handle copy-on-write obligations
6243 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6244 vm_object_pmap_protect(
6245 old_entry
->object
.vm_object
,
6247 (old_entry
->vme_end
-
6248 old_entry
->vme_start
),
6249 ((old_entry
->is_shared
6253 old_entry
->vme_start
,
6254 old_entry
->protection
& ~VM_PROT_WRITE
);
6256 old_entry
->needs_copy
= TRUE
;
6258 new_entry
->needs_copy
= new_entry_needs_copy
;
6261 * Insert the entry at the end
6265 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6267 new_size
+= entry_size
;
6270 slow_vm_map_fork_copy
:
6271 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6272 new_size
+= entry_size
;
6276 old_entry
= old_entry
->vme_next
;
6279 new_map
->size
= new_size
;
6280 vm_map_unlock(old_map
);
6281 vm_map_deallocate(old_map
);
6288 * vm_map_lookup_locked:
6290 * Finds the VM object, offset, and
6291 * protection for a given virtual address in the
6292 * specified map, assuming a page fault of the
6295 * Returns the (object, offset, protection) for
6296 * this address, whether it is wired down, and whether
6297 * this map has the only reference to the data in question.
6298 * In order to later verify this lookup, a "version"
6301 * The map MUST be locked by the caller and WILL be
6302 * locked on exit. In order to guarantee the
6303 * existence of the returned object, it is returned
6306 * If a lookup is requested with "write protection"
6307 * specified, the map may be changed to perform virtual
6308 * copying operations, although the data referenced will
6312 vm_map_lookup_locked(
6313 vm_map_t
*var_map
, /* IN/OUT */
6314 register vm_offset_t vaddr
,
6315 register vm_prot_t fault_type
,
6316 vm_map_version_t
*out_version
, /* OUT */
6317 vm_object_t
*object
, /* OUT */
6318 vm_object_offset_t
*offset
, /* OUT */
6319 vm_prot_t
*out_prot
, /* OUT */
6320 boolean_t
*wired
, /* OUT */
6321 int *behavior
, /* OUT */
6322 vm_object_offset_t
*lo_offset
, /* OUT */
6323 vm_object_offset_t
*hi_offset
, /* OUT */
6326 vm_map_entry_t entry
;
6327 register vm_map_t map
= *var_map
;
6328 vm_map_t old_map
= *var_map
;
6329 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6330 vm_offset_t cow_parent_vaddr
;
6331 vm_offset_t old_start
;
6332 vm_offset_t old_end
;
6333 register vm_prot_t prot
;
6339 * If the map has an interesting hint, try it before calling
6340 * full blown lookup routine.
6343 mutex_lock(&map
->s_lock
);
6345 mutex_unlock(&map
->s_lock
);
6347 if ((entry
== vm_map_to_entry(map
)) ||
6348 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6349 vm_map_entry_t tmp_entry
;
6352 * Entry was either not a valid hint, or the vaddr
6353 * was not contained in the entry, so do a full lookup.
6355 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6356 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6357 vm_map_unlock(cow_sub_map_parent
);
6358 if((*pmap_map
!= map
)
6359 && (*pmap_map
!= cow_sub_map_parent
))
6360 vm_map_unlock(*pmap_map
);
6361 return KERN_INVALID_ADDRESS
;
6366 if(map
== old_map
) {
6367 old_start
= entry
->vme_start
;
6368 old_end
= entry
->vme_end
;
6372 * Handle submaps. Drop lock on upper map, submap is
6377 if (entry
->is_sub_map
) {
6378 vm_offset_t local_vaddr
;
6379 vm_offset_t end_delta
;
6380 vm_offset_t start_delta
;
6381 vm_offset_t object_start_delta
;
6382 vm_map_entry_t submap_entry
;
6383 boolean_t mapped_needs_copy
=FALSE
;
6385 local_vaddr
= vaddr
;
6387 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6388 /* if pmap_map equals map we unlock below */
6389 if ((*pmap_map
!= map
) &&
6390 (*pmap_map
!= cow_sub_map_parent
))
6391 vm_map_unlock(*pmap_map
);
6392 *pmap_map
= entry
->object
.sub_map
;
6395 if(entry
->needs_copy
) {
6396 if (!mapped_needs_copy
) {
6397 if (vm_map_lock_read_to_write(map
)) {
6398 vm_map_lock_read(map
);
6399 if(*pmap_map
== entry
->object
.sub_map
)
6403 vm_map_lock_read(entry
->object
.sub_map
);
6404 cow_sub_map_parent
= map
;
6405 /* reset base to map before cow object */
6406 /* this is the map which will accept */
6407 /* the new cow object */
6408 old_start
= entry
->vme_start
;
6409 old_end
= entry
->vme_end
;
6410 cow_parent_vaddr
= vaddr
;
6411 mapped_needs_copy
= TRUE
;
6413 vm_map_lock_read(entry
->object
.sub_map
);
6414 if((cow_sub_map_parent
!= map
) &&
6419 vm_map_lock_read(entry
->object
.sub_map
);
6420 /* leave map locked if it is a target */
6421 /* cow sub_map above otherwise, just */
6422 /* follow the maps down to the object */
6423 /* here we unlock knowing we are not */
6424 /* revisiting the map. */
6425 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6426 vm_map_unlock_read(map
);
6429 *var_map
= map
= entry
->object
.sub_map
;
6431 /* calculate the offset in the submap for vaddr */
6432 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6435 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6436 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6437 vm_map_unlock(cow_sub_map_parent
);
6439 if((*pmap_map
!= map
)
6440 && (*pmap_map
!= cow_sub_map_parent
)) {
6441 vm_map_unlock(*pmap_map
);
6444 return KERN_INVALID_ADDRESS
;
6446 /* find the attenuated shadow of the underlying object */
6447 /* on our target map */
6449 /* in english the submap object may extend beyond the */
6450 /* region mapped by the entry or, may only fill a portion */
6451 /* of it. For our purposes, we only care if the object */
6452 /* doesn't fill. In this case the area which will */
6453 /* ultimately be clipped in the top map will only need */
6454 /* to be as big as the portion of the underlying entry */
6455 /* which is mapped */
6456 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6457 submap_entry
->vme_start
- entry
->offset
: 0;
6460 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6461 submap_entry
->vme_end
?
6462 0 : (entry
->offset
+
6463 (old_end
- old_start
))
6464 - submap_entry
->vme_end
;
6466 old_start
+= start_delta
;
6467 old_end
-= end_delta
;
6469 if(submap_entry
->is_sub_map
) {
6470 entry
= submap_entry
;
6471 vaddr
= local_vaddr
;
6472 goto submap_recurse
;
6475 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6477 vm_object_t copy_object
;
6478 vm_offset_t local_start
;
6479 vm_offset_t local_end
;
6480 boolean_t copied_slowly
= FALSE
;
6482 if (vm_map_lock_read_to_write(map
)) {
6483 vm_map_lock_read(map
);
6484 old_start
-= start_delta
;
6485 old_end
+= end_delta
;
6490 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6491 submap_entry
->object
.vm_object
=
6494 (submap_entry
->vme_end
6495 - submap_entry
->vme_start
));
6496 submap_entry
->offset
= 0;
6498 local_start
= local_vaddr
-
6499 (cow_parent_vaddr
- old_start
);
6500 local_end
= local_vaddr
+
6501 (old_end
- cow_parent_vaddr
);
6502 vm_map_clip_start(map
, submap_entry
, local_start
);
6503 vm_map_clip_end(map
, submap_entry
, local_end
);
6505 /* This is the COW case, lets connect */
6506 /* an entry in our space to the underlying */
6507 /* object in the submap, bypassing the */
6511 if(submap_entry
->wired_count
!= 0) {
6513 submap_entry
->object
.vm_object
);
6514 vm_object_copy_slowly(
6515 submap_entry
->object
.vm_object
,
6516 submap_entry
->offset
,
6517 submap_entry
->vme_end
-
6518 submap_entry
->vme_start
,
6521 copied_slowly
= TRUE
;
6524 /* set up shadow object */
6525 copy_object
= submap_entry
->object
.vm_object
;
6526 vm_object_reference(copy_object
);
6527 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6528 submap_entry
->needs_copy
= TRUE
;
6529 vm_object_pmap_protect(
6530 submap_entry
->object
.vm_object
,
6531 submap_entry
->offset
,
6532 submap_entry
->vme_end
-
6533 submap_entry
->vme_start
,
6534 (submap_entry
->is_shared
6536 PMAP_NULL
: map
->pmap
,
6537 submap_entry
->vme_start
,
6538 submap_entry
->protection
&
6543 /* This works diffently than the */
6544 /* normal submap case. We go back */
6545 /* to the parent of the cow map and*/
6546 /* clip out the target portion of */
6547 /* the sub_map, substituting the */
6548 /* new copy object, */
6551 local_start
= old_start
;
6552 local_end
= old_end
;
6553 map
= cow_sub_map_parent
;
6554 *var_map
= cow_sub_map_parent
;
6555 vaddr
= cow_parent_vaddr
;
6556 cow_sub_map_parent
= NULL
;
6558 if(!vm_map_lookup_entry(map
,
6560 vm_object_deallocate(
6562 vm_map_lock_write_to_read(map
);
6563 return KERN_INVALID_ADDRESS
;
6566 /* clip out the portion of space */
6567 /* mapped by the sub map which */
6568 /* corresponds to the underlying */
6570 vm_map_clip_start(map
, entry
, local_start
);
6571 vm_map_clip_end(map
, entry
, local_end
);
6574 /* substitute copy object for */
6575 /* shared map entry */
6576 vm_map_deallocate(entry
->object
.sub_map
);
6577 entry
->is_sub_map
= FALSE
;
6578 entry
->object
.vm_object
= copy_object
;
6580 entry
->protection
|= VM_PROT_WRITE
;
6581 entry
->max_protection
|= VM_PROT_WRITE
;
6584 entry
->needs_copy
= FALSE
;
6585 entry
->is_shared
= FALSE
;
6587 entry
->offset
= submap_entry
->offset
;
6588 entry
->needs_copy
= TRUE
;
6589 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6590 entry
->inheritance
= VM_INHERIT_COPY
;
6592 entry
->is_shared
= TRUE
;
6594 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6595 entry
->inheritance
= VM_INHERIT_COPY
;
6597 vm_map_lock_write_to_read(map
);
6599 if((cow_sub_map_parent
)
6600 && (cow_sub_map_parent
!= *pmap_map
)
6601 && (cow_sub_map_parent
!= map
)) {
6602 vm_map_unlock(cow_sub_map_parent
);
6604 entry
= submap_entry
;
6605 vaddr
= local_vaddr
;
6610 * Check whether this task is allowed to have
6614 prot
= entry
->protection
;
6615 if ((fault_type
& (prot
)) != fault_type
) {
6616 if (*pmap_map
!= map
) {
6617 vm_map_unlock(*pmap_map
);
6620 return KERN_PROTECTION_FAILURE
;
6624 * If this page is not pageable, we have to get
6625 * it for all possible accesses.
6628 if (*wired
= (entry
->wired_count
!= 0))
6629 prot
= fault_type
= entry
->protection
;
6632 * If the entry was copy-on-write, we either ...
6635 if (entry
->needs_copy
) {
6637 * If we want to write the page, we may as well
6638 * handle that now since we've got the map locked.
6640 * If we don't need to write the page, we just
6641 * demote the permissions allowed.
6644 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6646 * Make a new object, and place it in the
6647 * object chain. Note that no new references
6648 * have appeared -- one just moved from the
6649 * map to the new object.
6652 if (vm_map_lock_read_to_write(map
)) {
6653 vm_map_lock_read(map
);
6656 vm_object_shadow(&entry
->object
.vm_object
,
6658 (vm_size_t
) (entry
->vme_end
-
6661 entry
->object
.vm_object
->shadowed
= TRUE
;
6662 entry
->needs_copy
= FALSE
;
6663 vm_map_lock_write_to_read(map
);
6667 * We're attempting to read a copy-on-write
6668 * page -- don't allow writes.
6671 prot
&= (~VM_PROT_WRITE
);
6676 * Create an object if necessary.
6678 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6680 if (vm_map_lock_read_to_write(map
)) {
6681 vm_map_lock_read(map
);
6685 entry
->object
.vm_object
= vm_object_allocate(
6686 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6688 vm_map_lock_write_to_read(map
);
6692 * Return the object/offset from this entry. If the entry
6693 * was copy-on-write or empty, it has been fixed up. Also
6694 * return the protection.
6697 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6698 *object
= entry
->object
.vm_object
;
6700 *behavior
= entry
->behavior
;
6701 *lo_offset
= entry
->offset
;
6702 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6705 * Lock the object to prevent it from disappearing
6708 vm_object_lock(*object
);
6711 * Save the version number
6714 out_version
->main_timestamp
= map
->timestamp
;
6716 return KERN_SUCCESS
;
6723 * Verifies that the map in question has not changed
6724 * since the given version. If successful, the map
6725 * will not change until vm_map_verify_done() is called.
6729 register vm_map_t map
,
6730 register vm_map_version_t
*version
) /* REF */
6734 vm_map_lock_read(map
);
6735 result
= (map
->timestamp
== version
->main_timestamp
);
6738 vm_map_unlock_read(map
);
6744 * vm_map_verify_done:
6746 * Releases locks acquired by a vm_map_verify.
6748 * This is now a macro in vm/vm_map.h. It does a
6749 * vm_map_unlock_read on the map.
6756 * User call to obtain information about a region in
6757 * a task's address map. Currently, only one flavor is
6760 * XXX The reserved and behavior fields cannot be filled
6761 * in until the vm merge from the IK is completed, and
6762 * vm_reserve is implemented.
6764 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6770 vm_offset_t
*address
, /* IN/OUT */
6771 vm_size_t
*size
, /* OUT */
6772 vm_region_flavor_t flavor
, /* IN */
6773 vm_region_info_t info
, /* OUT */
6774 mach_msg_type_number_t
*count
, /* IN/OUT */
6775 ipc_port_t
*object_name
) /* OUT */
6777 vm_map_entry_t tmp_entry
;
6779 vm_map_entry_t entry
;
6782 vm_region_basic_info_t basic
;
6783 vm_region_extended_info_t extended
;
6784 vm_region_top_info_t top
;
6786 if (map
== VM_MAP_NULL
)
6787 return(KERN_INVALID_ARGUMENT
);
6791 case VM_REGION_BASIC_INFO
:
6793 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6794 return(KERN_INVALID_ARGUMENT
);
6796 basic
= (vm_region_basic_info_t
) info
;
6797 *count
= VM_REGION_BASIC_INFO_COUNT
;
6799 vm_map_lock_read(map
);
6802 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6803 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6804 vm_map_unlock_read(map
);
6805 return(KERN_INVALID_ADDRESS
);
6811 start
= entry
->vme_start
;
6813 basic
->offset
= entry
->offset
;
6814 basic
->protection
= entry
->protection
;
6815 basic
->inheritance
= entry
->inheritance
;
6816 basic
->max_protection
= entry
->max_protection
;
6817 basic
->behavior
= entry
->behavior
;
6818 basic
->user_wired_count
= entry
->user_wired_count
;
6819 basic
->reserved
= entry
->is_sub_map
;
6821 *size
= (entry
->vme_end
- start
);
6823 if (object_name
) *object_name
= IP_NULL
;
6824 if (entry
->is_sub_map
) {
6825 basic
->shared
= FALSE
;
6827 basic
->shared
= entry
->is_shared
;
6830 vm_map_unlock_read(map
);
6831 return(KERN_SUCCESS
);
6833 case VM_REGION_EXTENDED_INFO
:
6836 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6837 return(KERN_INVALID_ARGUMENT
);
6839 extended
= (vm_region_extended_info_t
) info
;
6840 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6842 vm_map_lock_read(map
);
6845 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6846 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6847 vm_map_unlock_read(map
);
6848 return(KERN_INVALID_ADDRESS
);
6853 start
= entry
->vme_start
;
6855 extended
->protection
= entry
->protection
;
6856 extended
->user_tag
= entry
->alias
;
6857 extended
->pages_resident
= 0;
6858 extended
->pages_swapped_out
= 0;
6859 extended
->pages_shared_now_private
= 0;
6860 extended
->pages_dirtied
= 0;
6861 extended
->external_pager
= 0;
6862 extended
->shadow_depth
= 0;
6864 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6866 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6867 extended
->share_mode
= SM_PRIVATE
;
6870 *object_name
= IP_NULL
;
6872 *size
= (entry
->vme_end
- start
);
6874 vm_map_unlock_read(map
);
6875 return(KERN_SUCCESS
);
6877 case VM_REGION_TOP_INFO
:
6880 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6881 return(KERN_INVALID_ARGUMENT
);
6883 top
= (vm_region_top_info_t
) info
;
6884 *count
= VM_REGION_TOP_INFO_COUNT
;
6886 vm_map_lock_read(map
);
6889 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6890 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6891 vm_map_unlock_read(map
);
6892 return(KERN_INVALID_ADDRESS
);
6898 start
= entry
->vme_start
;
6900 top
->private_pages_resident
= 0;
6901 top
->shared_pages_resident
= 0;
6903 vm_region_top_walk(entry
, top
);
6906 *object_name
= IP_NULL
;
6908 *size
= (entry
->vme_end
- start
);
6910 vm_map_unlock_read(map
);
6911 return(KERN_SUCCESS
);
6914 return(KERN_INVALID_ARGUMENT
);
6919 * vm_region_recurse: A form of vm_region which follows the
6920 * submaps in a target map
6927 vm_offset_t
*address
, /* IN/OUT */
6928 vm_size_t
*size
, /* OUT */
6929 natural_t
*nesting_depth
, /* IN/OUT */
6930 vm_region_recurse_info_t info
, /* IN/OUT */
6931 mach_msg_type_number_t
*count
) /* IN/OUT */
6933 vm_map_entry_t tmp_entry
;
6935 vm_map_entry_t entry
;
6939 unsigned int recurse_count
;
6942 vm_map_entry_t base_entry
;
6943 vm_offset_t base_next
;
6944 vm_offset_t base_addr
;
6945 vm_offset_t baddr_start_delta
;
6946 vm_region_submap_info_t submap_info
;
6947 vm_region_extended_info_data_t extended
;
6949 if (map
== VM_MAP_NULL
)
6950 return(KERN_INVALID_ARGUMENT
);
6952 submap_info
= (vm_region_submap_info_t
) info
;
6953 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6955 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6956 return(KERN_INVALID_ARGUMENT
);
6960 recurse_count
= *nesting_depth
;
6962 LOOKUP_NEXT_BASE_ENTRY
:
6963 vm_map_lock_read(map
);
6964 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6965 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6966 vm_map_unlock_read(map
);
6967 return(KERN_INVALID_ADDRESS
);
6972 *size
= entry
->vme_end
- entry
->vme_start
;
6973 start
= entry
->vme_start
;
6975 baddr_start_delta
= *address
- start
;
6976 base_next
= entry
->vme_end
;
6979 while(entry
->is_sub_map
&& recurse_count
) {
6981 vm_map_lock_read(entry
->object
.sub_map
);
6984 if(entry
== base_entry
) {
6985 start
= entry
->offset
;
6986 start
+= *address
- entry
->vme_start
;
6989 submap
= entry
->object
.sub_map
;
6990 vm_map_unlock_read(map
);
6993 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6994 if ((entry
= tmp_entry
->vme_next
)
6995 == vm_map_to_entry(map
)) {
6996 vm_map_unlock_read(map
);
7001 goto LOOKUP_NEXT_BASE_ENTRY
;
7007 if(start
<= entry
->vme_start
) {
7008 vm_offset_t old_start
= start
;
7009 if(baddr_start_delta
) {
7010 base_addr
+= (baddr_start_delta
);
7011 *size
-= baddr_start_delta
;
7012 baddr_start_delta
= 0;
7015 (base_addr
+= (entry
->vme_start
- start
))) {
7016 vm_map_unlock_read(map
);
7021 goto LOOKUP_NEXT_BASE_ENTRY
;
7023 *size
-= entry
->vme_start
- start
;
7024 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7025 *size
= entry
->vme_end
- entry
->vme_start
;
7029 if(baddr_start_delta
) {
7030 if((start
- entry
->vme_start
)
7031 < baddr_start_delta
) {
7032 base_addr
+= start
- entry
->vme_start
;
7033 *size
-= start
- entry
->vme_start
;
7035 base_addr
+= baddr_start_delta
;
7036 *size
+= baddr_start_delta
;
7038 baddr_start_delta
= 0;
7040 base_addr
+= entry
->vme_start
;
7041 if(base_addr
>= base_next
) {
7042 vm_map_unlock_read(map
);
7047 goto LOOKUP_NEXT_BASE_ENTRY
;
7049 if (*size
> (entry
->vme_end
- start
))
7050 *size
= entry
->vme_end
- start
;
7052 start
= entry
->vme_start
- start
;
7055 start
+= entry
->offset
;
7058 *nesting_depth
-= recurse_count
;
7059 if(entry
!= base_entry
) {
7060 start
= entry
->vme_start
+ (start
- entry
->offset
);
7064 submap_info
->user_tag
= entry
->alias
;
7065 submap_info
->offset
= entry
->offset
;
7066 submap_info
->protection
= entry
->protection
;
7067 submap_info
->inheritance
= entry
->inheritance
;
7068 submap_info
->max_protection
= entry
->max_protection
;
7069 submap_info
->behavior
= entry
->behavior
;
7070 submap_info
->user_wired_count
= entry
->user_wired_count
;
7071 submap_info
->is_submap
= entry
->is_sub_map
;
7072 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7073 *address
= base_addr
;
7076 extended
.pages_resident
= 0;
7077 extended
.pages_swapped_out
= 0;
7078 extended
.pages_shared_now_private
= 0;
7079 extended
.pages_dirtied
= 0;
7080 extended
.external_pager
= 0;
7081 extended
.shadow_depth
= 0;
7083 if(!entry
->is_sub_map
) {
7084 vm_region_walk(entry
, &extended
, entry
->offset
,
7085 entry
->vme_end
- start
, map
, start
);
7086 submap_info
->share_mode
= extended
.share_mode
;
7087 if (extended
.external_pager
&& extended
.ref_count
== 2
7088 && extended
.share_mode
== SM_SHARED
)
7089 submap_info
->share_mode
= SM_PRIVATE
;
7090 submap_info
->ref_count
= extended
.ref_count
;
7093 submap_info
->share_mode
= SM_TRUESHARED
;
7095 submap_info
->share_mode
= SM_PRIVATE
;
7096 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7099 submap_info
->pages_resident
= extended
.pages_resident
;
7100 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7101 submap_info
->pages_shared_now_private
=
7102 extended
.pages_shared_now_private
;
7103 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7104 submap_info
->external_pager
= extended
.external_pager
;
7105 submap_info
->shadow_depth
= extended
.shadow_depth
;
7107 vm_map_unlock_read(map
);
7108 return(KERN_SUCCESS
);
7112 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7113 * Goes away after regular vm_region_recurse function migrates to
7115 * vm_region_recurse: A form of vm_region which follows the
7116 * submaps in a target map
7121 vm_region_recurse_64(
7123 vm_offset_t
*address
, /* IN/OUT */
7124 vm_size_t
*size
, /* OUT */
7125 natural_t
*nesting_depth
, /* IN/OUT */
7126 vm_region_recurse_info_t info
, /* IN/OUT */
7127 mach_msg_type_number_t
*count
) /* IN/OUT */
7129 vm_map_entry_t tmp_entry
;
7131 vm_map_entry_t entry
;
7135 unsigned int recurse_count
;
7138 vm_map_entry_t base_entry
;
7139 vm_offset_t base_next
;
7140 vm_offset_t base_addr
;
7141 vm_offset_t baddr_start_delta
;
7142 vm_region_submap_info_64_t submap_info
;
7143 vm_region_extended_info_data_t extended
;
7145 if (map
== VM_MAP_NULL
)
7146 return(KERN_INVALID_ARGUMENT
);
7148 submap_info
= (vm_region_submap_info_64_t
) info
;
7149 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7151 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7152 return(KERN_INVALID_ARGUMENT
);
7156 recurse_count
= *nesting_depth
;
7158 LOOKUP_NEXT_BASE_ENTRY
:
7159 vm_map_lock_read(map
);
7160 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7161 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7162 vm_map_unlock_read(map
);
7163 return(KERN_INVALID_ADDRESS
);
7168 *size
= entry
->vme_end
- entry
->vme_start
;
7169 start
= entry
->vme_start
;
7171 baddr_start_delta
= *address
- start
;
7172 base_next
= entry
->vme_end
;
7175 while(entry
->is_sub_map
&& recurse_count
) {
7177 vm_map_lock_read(entry
->object
.sub_map
);
7180 if(entry
== base_entry
) {
7181 start
= entry
->offset
;
7182 start
+= *address
- entry
->vme_start
;
7185 submap
= entry
->object
.sub_map
;
7186 vm_map_unlock_read(map
);
7189 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7190 if ((entry
= tmp_entry
->vme_next
)
7191 == vm_map_to_entry(map
)) {
7192 vm_map_unlock_read(map
);
7197 goto LOOKUP_NEXT_BASE_ENTRY
;
7203 if(start
<= entry
->vme_start
) {
7204 vm_offset_t old_start
= start
;
7205 if(baddr_start_delta
) {
7206 base_addr
+= (baddr_start_delta
);
7207 *size
-= baddr_start_delta
;
7208 baddr_start_delta
= 0;
7211 (base_addr
+= (entry
->vme_start
- start
))) {
7212 vm_map_unlock_read(map
);
7217 goto LOOKUP_NEXT_BASE_ENTRY
;
7219 *size
-= entry
->vme_start
- start
;
7220 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7221 *size
= entry
->vme_end
- entry
->vme_start
;
7225 if(baddr_start_delta
) {
7226 if((start
- entry
->vme_start
)
7227 < baddr_start_delta
) {
7228 base_addr
+= start
- entry
->vme_start
;
7229 *size
-= start
- entry
->vme_start
;
7231 base_addr
+= baddr_start_delta
;
7232 *size
+= baddr_start_delta
;
7234 baddr_start_delta
= 0;
7236 base_addr
+= entry
->vme_start
;
7237 if(base_addr
>= base_next
) {
7238 vm_map_unlock_read(map
);
7243 goto LOOKUP_NEXT_BASE_ENTRY
;
7245 if (*size
> (entry
->vme_end
- start
))
7246 *size
= entry
->vme_end
- start
;
7248 start
= entry
->vme_start
- start
;
7251 start
+= entry
->offset
;
7254 *nesting_depth
-= recurse_count
;
7255 if(entry
!= base_entry
) {
7256 start
= entry
->vme_start
+ (start
- entry
->offset
);
7260 submap_info
->user_tag
= entry
->alias
;
7261 submap_info
->offset
= entry
->offset
;
7262 submap_info
->protection
= entry
->protection
;
7263 submap_info
->inheritance
= entry
->inheritance
;
7264 submap_info
->max_protection
= entry
->max_protection
;
7265 submap_info
->behavior
= entry
->behavior
;
7266 submap_info
->user_wired_count
= entry
->user_wired_count
;
7267 submap_info
->is_submap
= entry
->is_sub_map
;
7268 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7269 *address
= base_addr
;
7272 extended
.pages_resident
= 0;
7273 extended
.pages_swapped_out
= 0;
7274 extended
.pages_shared_now_private
= 0;
7275 extended
.pages_dirtied
= 0;
7276 extended
.external_pager
= 0;
7277 extended
.shadow_depth
= 0;
7279 if(!entry
->is_sub_map
) {
7280 vm_region_walk(entry
, &extended
, entry
->offset
,
7281 entry
->vme_end
- start
, map
, start
);
7282 submap_info
->share_mode
= extended
.share_mode
;
7283 if (extended
.external_pager
&& extended
.ref_count
== 2
7284 && extended
.share_mode
== SM_SHARED
)
7285 submap_info
->share_mode
= SM_PRIVATE
;
7286 submap_info
->ref_count
= extended
.ref_count
;
7289 submap_info
->share_mode
= SM_TRUESHARED
;
7291 submap_info
->share_mode
= SM_PRIVATE
;
7292 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7295 submap_info
->pages_resident
= extended
.pages_resident
;
7296 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7297 submap_info
->pages_shared_now_private
=
7298 extended
.pages_shared_now_private
;
7299 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7300 submap_info
->external_pager
= extended
.external_pager
;
7301 submap_info
->shadow_depth
= extended
.shadow_depth
;
7303 vm_map_unlock_read(map
);
7304 return(KERN_SUCCESS
);
7309 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7310 * Goes away after regular vm_region function migrates to
7318 vm_offset_t
*address
, /* IN/OUT */
7319 vm_size_t
*size
, /* OUT */
7320 vm_region_flavor_t flavor
, /* IN */
7321 vm_region_info_t info
, /* OUT */
7322 mach_msg_type_number_t
*count
, /* IN/OUT */
7323 ipc_port_t
*object_name
) /* OUT */
7325 vm_map_entry_t tmp_entry
;
7327 vm_map_entry_t entry
;
7330 vm_region_basic_info_64_t basic
;
7331 vm_region_extended_info_t extended
;
7332 vm_region_top_info_t top
;
7334 if (map
== VM_MAP_NULL
)
7335 return(KERN_INVALID_ARGUMENT
);
7339 case VM_REGION_BASIC_INFO
:
7341 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7342 return(KERN_INVALID_ARGUMENT
);
7344 basic
= (vm_region_basic_info_64_t
) info
;
7345 *count
= VM_REGION_BASIC_INFO_COUNT
;
7347 vm_map_lock_read(map
);
7350 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7351 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7352 vm_map_unlock_read(map
);
7353 return(KERN_INVALID_ADDRESS
);
7359 start
= entry
->vme_start
;
7361 basic
->offset
= entry
->offset
;
7362 basic
->protection
= entry
->protection
;
7363 basic
->inheritance
= entry
->inheritance
;
7364 basic
->max_protection
= entry
->max_protection
;
7365 basic
->behavior
= entry
->behavior
;
7366 basic
->user_wired_count
= entry
->user_wired_count
;
7367 basic
->reserved
= entry
->is_sub_map
;
7369 *size
= (entry
->vme_end
- start
);
7371 if (object_name
) *object_name
= IP_NULL
;
7372 if (entry
->is_sub_map
) {
7373 basic
->shared
= FALSE
;
7375 basic
->shared
= entry
->is_shared
;
7378 vm_map_unlock_read(map
);
7379 return(KERN_SUCCESS
);
7381 case VM_REGION_EXTENDED_INFO
:
7384 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7385 return(KERN_INVALID_ARGUMENT
);
7387 extended
= (vm_region_extended_info_t
) info
;
7388 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7390 vm_map_lock_read(map
);
7393 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7394 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7395 vm_map_unlock_read(map
);
7396 return(KERN_INVALID_ADDRESS
);
7401 start
= entry
->vme_start
;
7403 extended
->protection
= entry
->protection
;
7404 extended
->user_tag
= entry
->alias
;
7405 extended
->pages_resident
= 0;
7406 extended
->pages_swapped_out
= 0;
7407 extended
->pages_shared_now_private
= 0;
7408 extended
->pages_dirtied
= 0;
7409 extended
->external_pager
= 0;
7410 extended
->shadow_depth
= 0;
7412 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7414 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7415 extended
->share_mode
= SM_PRIVATE
;
7418 *object_name
= IP_NULL
;
7420 *size
= (entry
->vme_end
- start
);
7422 vm_map_unlock_read(map
);
7423 return(KERN_SUCCESS
);
7425 case VM_REGION_TOP_INFO
:
7428 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7429 return(KERN_INVALID_ARGUMENT
);
7431 top
= (vm_region_top_info_t
) info
;
7432 *count
= VM_REGION_TOP_INFO_COUNT
;
7434 vm_map_lock_read(map
);
7437 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7438 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7439 vm_map_unlock_read(map
);
7440 return(KERN_INVALID_ADDRESS
);
7446 start
= entry
->vme_start
;
7448 top
->private_pages_resident
= 0;
7449 top
->shared_pages_resident
= 0;
7451 vm_region_top_walk(entry
, top
);
7454 *object_name
= IP_NULL
;
7456 *size
= (entry
->vme_end
- start
);
7458 vm_map_unlock_read(map
);
7459 return(KERN_SUCCESS
);
7462 return(KERN_INVALID_ARGUMENT
);
7468 vm_map_entry_t entry
,
7469 vm_region_top_info_t top
)
7471 register struct vm_object
*obj
, *tmp_obj
;
7472 register int ref_count
;
7474 if (entry
->object
.vm_object
== 0 || entry
->is_sub_map
) {
7475 top
->share_mode
= SM_EMPTY
;
7481 obj
= entry
->object
.vm_object
;
7483 vm_object_lock(obj
);
7485 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7490 top
->private_pages_resident
= obj
->resident_page_count
;
7492 top
->shared_pages_resident
= obj
->resident_page_count
;
7493 top
->ref_count
= ref_count
;
7494 top
->share_mode
= SM_COW
;
7496 while (tmp_obj
= obj
->shadow
) {
7497 vm_object_lock(tmp_obj
);
7498 vm_object_unlock(obj
);
7501 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7504 top
->shared_pages_resident
+= obj
->resident_page_count
;
7505 top
->ref_count
+= ref_count
- 1;
7508 if (entry
->needs_copy
) {
7509 top
->share_mode
= SM_COW
;
7510 top
->shared_pages_resident
= obj
->resident_page_count
;
7512 if (ref_count
== 1 ||
7513 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7514 top
->share_mode
= SM_PRIVATE
;
7515 top
->private_pages_resident
= obj
->resident_page_count
;
7517 top
->share_mode
= SM_SHARED
;
7518 top
->shared_pages_resident
= obj
->resident_page_count
;
7521 top
->ref_count
= ref_count
;
7523 top
->obj_id
= (int)obj
;
7525 vm_object_unlock(obj
);
7531 vm_map_entry_t entry
,
7532 vm_region_extended_info_t extended
,
7533 vm_object_offset_t offset
,
7538 register struct vm_object
*obj
, *tmp_obj
;
7539 register vm_offset_t last_offset
;
7541 register int ref_count
;
7542 void vm_region_look_for_page();
7544 if ((entry
->object
.vm_object
== 0) ||
7545 (entry
->is_sub_map
) ||
7546 (entry
->object
.vm_object
->phys_contiguous
)) {
7547 extended
->share_mode
= SM_EMPTY
;
7548 extended
->ref_count
= 0;
7552 obj
= entry
->object
.vm_object
;
7554 vm_object_lock(obj
);
7556 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7559 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7560 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7562 if (extended
->shadow_depth
|| entry
->needs_copy
)
7563 extended
->share_mode
= SM_COW
;
7566 extended
->share_mode
= SM_PRIVATE
;
7568 if (obj
->true_share
)
7569 extended
->share_mode
= SM_TRUESHARED
;
7571 extended
->share_mode
= SM_SHARED
;
7574 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7576 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7577 if ((tmp_obj
= obj
->shadow
) == 0)
7579 vm_object_lock(tmp_obj
);
7580 vm_object_unlock(obj
);
7582 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7585 extended
->ref_count
+= ref_count
;
7588 vm_object_unlock(obj
);
7590 if (extended
->share_mode
== SM_SHARED
) {
7591 register vm_map_entry_t cur
;
7592 register vm_map_entry_t last
;
7595 obj
= entry
->object
.vm_object
;
7596 last
= vm_map_to_entry(map
);
7599 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7601 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7602 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7604 if (my_refs
== ref_count
)
7605 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7606 else if (my_refs
> 1)
7607 extended
->share_mode
= SM_SHARED_ALIASED
;
7613 /* object is locked on entry and locked on return */
7617 vm_region_look_for_page(
7619 vm_region_extended_info_t extended
,
7620 vm_object_offset_t offset
,
7626 register vm_page_t p
;
7627 register vm_object_t shadow
;
7628 register int ref_count
;
7629 vm_object_t caller_object
;
7631 shadow
= object
->shadow
;
7632 caller_object
= object
;
7637 if ( !(object
->pager_trusted
) && !(object
->internal
))
7638 extended
->external_pager
= 1;
7640 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7641 if (shadow
&& (max_refcnt
== 1))
7642 extended
->pages_shared_now_private
++;
7644 if (!p
->fictitious
&&
7645 (p
->dirty
|| pmap_is_modified(p
->phys_page
)))
7646 extended
->pages_dirtied
++;
7647 extended
->pages_resident
++;
7649 if(object
!= caller_object
)
7650 vm_object_unlock(object
);
7654 if (object
->existence_map
) {
7655 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7657 extended
->pages_swapped_out
++;
7659 if(object
!= caller_object
)
7660 vm_object_unlock(object
);
7666 vm_object_lock(shadow
);
7668 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7671 if (++depth
> extended
->shadow_depth
)
7672 extended
->shadow_depth
= depth
;
7674 if (ref_count
> max_refcnt
)
7675 max_refcnt
= ref_count
;
7677 if(object
!= caller_object
)
7678 vm_object_unlock(object
);
7681 shadow
= object
->shadow
;
7682 offset
= offset
+ object
->shadow_offset
;
7685 if(object
!= caller_object
)
7686 vm_object_unlock(object
);
7692 vm_region_count_obj_refs(
7693 vm_map_entry_t entry
,
7696 register int ref_count
;
7697 register vm_object_t chk_obj
;
7698 register vm_object_t tmp_obj
;
7700 if (entry
->object
.vm_object
== 0)
7703 if (entry
->is_sub_map
)
7708 chk_obj
= entry
->object
.vm_object
;
7709 vm_object_lock(chk_obj
);
7712 if (chk_obj
== object
)
7714 if (tmp_obj
= chk_obj
->shadow
)
7715 vm_object_lock(tmp_obj
);
7716 vm_object_unlock(chk_obj
);
7726 * Routine: vm_map_simplify
7729 * Attempt to simplify the map representation in
7730 * the vicinity of the given starting address.
7732 * This routine is intended primarily to keep the
7733 * kernel maps more compact -- they generally don't
7734 * benefit from the "expand a map entry" technology
7735 * at allocation time because the adjacent entry
7736 * is often wired down.
7743 vm_map_entry_t this_entry
;
7744 vm_map_entry_t prev_entry
;
7745 vm_map_entry_t next_entry
;
7749 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7750 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7752 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7754 (prev_entry
->is_shared
== FALSE
) &&
7755 (prev_entry
->is_sub_map
== FALSE
) &&
7757 (this_entry
->is_shared
== FALSE
) &&
7758 (this_entry
->is_sub_map
== FALSE
) &&
7760 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7761 (prev_entry
->protection
== this_entry
->protection
) &&
7762 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7763 (prev_entry
->behavior
== this_entry
->behavior
) &&
7764 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7765 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7766 (prev_entry
->in_transition
== FALSE
) &&
7767 (this_entry
->in_transition
== FALSE
) &&
7769 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7771 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7772 ((prev_entry
->offset
+
7773 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7774 == this_entry
->offset
)
7776 SAVE_HINT(map
, prev_entry
);
7777 vm_map_entry_unlink(map
, this_entry
);
7778 prev_entry
->vme_end
= this_entry
->vme_end
;
7779 UPDATE_FIRST_FREE(map
, map
->first_free
);
7780 vm_object_deallocate(this_entry
->object
.vm_object
);
7781 vm_map_entry_dispose(map
, this_entry
);
7782 counter(c_vm_map_simplified_lower
++);
7785 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7786 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7788 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7790 (next_entry
->is_shared
== FALSE
) &&
7791 (next_entry
->is_sub_map
== FALSE
) &&
7793 (next_entry
->is_shared
== FALSE
) &&
7794 (next_entry
->is_sub_map
== FALSE
) &&
7796 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7797 (next_entry
->protection
== this_entry
->protection
) &&
7798 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7799 (next_entry
->behavior
== this_entry
->behavior
) &&
7800 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7801 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7802 (this_entry
->in_transition
== FALSE
) &&
7803 (next_entry
->in_transition
== FALSE
) &&
7805 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7807 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7808 ((this_entry
->offset
+
7809 (this_entry
->vme_end
- this_entry
->vme_start
))
7810 == next_entry
->offset
)
7812 vm_map_entry_unlink(map
, next_entry
);
7813 this_entry
->vme_end
= next_entry
->vme_end
;
7814 UPDATE_FIRST_FREE(map
, map
->first_free
);
7815 vm_object_deallocate(next_entry
->object
.vm_object
);
7816 vm_map_entry_dispose(map
, next_entry
);
7817 counter(c_vm_map_simplified_upper
++);
7819 counter(c_vm_map_simplify_called
++);
7825 * Routine: vm_map_machine_attribute
7827 * Provide machine-specific attributes to mappings,
7828 * such as cachability etc. for machines that provide
7829 * them. NUMA architectures and machines with big/strange
7830 * caches will use this.
7832 * Responsibilities for locking and checking are handled here,
7833 * everything else in the pmap module. If any non-volatile
7834 * information must be kept, the pmap module should handle
7835 * it itself. [This assumes that attributes do not
7836 * need to be inherited, which seems ok to me]
7839 vm_map_machine_attribute(
7841 vm_offset_t address
,
7843 vm_machine_attribute_t attribute
,
7844 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7847 vm_size_t sync_size
;
7849 vm_map_entry_t entry
;
7851 if (address
< vm_map_min(map
) ||
7852 (address
+ size
) > vm_map_max(map
))
7853 return KERN_INVALID_ADDRESS
;
7857 if (attribute
!= MATTR_CACHE
) {
7858 /* If we don't have to find physical addresses, we */
7859 /* don't have to do an explicit traversal here. */
7860 ret
= pmap_attribute(map
->pmap
,
7861 address
, size
, attribute
, value
);
7866 /* Get the starting address */
7867 start
= trunc_page_32(address
);
7868 /* Figure how much memory we need to flush (in page increments) */
7869 sync_size
= round_page_32(start
+ size
) - start
;
7872 ret
= KERN_SUCCESS
; /* Assume it all worked */
7875 if (vm_map_lookup_entry(map
, start
, &entry
)) {
7877 if((entry
->vme_end
- start
) > sync_size
) {
7878 sub_size
= sync_size
;
7881 sub_size
= entry
->vme_end
- start
;
7882 sync_size
-= sub_size
;
7884 if(entry
->is_sub_map
) {
7885 vm_map_machine_attribute(
7886 entry
->object
.sub_map
,
7887 (start
- entry
->vme_start
)
7892 if(entry
->object
.vm_object
) {
7895 vm_object_t base_object
;
7896 vm_object_offset_t offset
;
7897 vm_object_offset_t base_offset
;
7900 offset
= (start
- entry
->vme_start
)
7902 base_offset
= offset
;
7903 object
= entry
->object
.vm_object
;
7904 base_object
= object
;
7908 if(m
&& !m
->fictitious
) {
7911 pmap_attribute_cache_sync(
7915 } else if (object
->shadow
) {
7917 object
->shadow_offset
;
7918 object
= object
->shadow
;
7922 /* Bump to the next page */
7923 base_offset
+= PAGE_SIZE
;
7924 offset
= base_offset
;
7925 object
= base_object
;
7933 return KERN_FAILURE
;
7944 * vm_map_behavior_set:
7946 * Sets the paging reference behavior of the specified address
7947 * range in the target map. Paging reference behavior affects
7948 * how pagein operations resulting from faults on the map will be
7952 vm_map_behavior_set(
7956 vm_behavior_t new_behavior
)
7958 register vm_map_entry_t entry
;
7959 vm_map_entry_t temp_entry
;
7962 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
7963 (integer_t
)map
, start
, end
, new_behavior
, 0);
7965 switch (new_behavior
) {
7966 case VM_BEHAVIOR_DEFAULT
:
7967 case VM_BEHAVIOR_RANDOM
:
7968 case VM_BEHAVIOR_SEQUENTIAL
:
7969 case VM_BEHAVIOR_RSEQNTL
:
7971 case VM_BEHAVIOR_WILLNEED
:
7972 case VM_BEHAVIOR_DONTNEED
:
7973 new_behavior
= VM_BEHAVIOR_DEFAULT
;
7976 return(KERN_INVALID_ARGUMENT
);
7982 * The entire address range must be valid for the map.
7983 * Note that vm_map_range_check() does a
7984 * vm_map_lookup_entry() internally and returns the
7985 * entry containing the start of the address range if
7986 * the entire range is valid.
7988 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
7990 vm_map_clip_start(map
, entry
, start
);
7994 return(KERN_INVALID_ADDRESS
);
7997 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
7998 vm_map_clip_end(map
, entry
, end
);
8000 entry
->behavior
= new_behavior
;
8002 entry
= entry
->vme_next
;
8006 return(KERN_SUCCESS
);
8010 #include <mach_kdb.h>
8012 #include <ddb/db_output.h>
8013 #include <vm/vm_print.h>
8015 #define printf db_printf
8018 * Forward declarations for internal functions.
8020 extern void vm_map_links_print(
8021 struct vm_map_links
*links
);
8023 extern void vm_map_header_print(
8024 struct vm_map_header
*header
);
8026 extern void vm_map_entry_print(
8027 vm_map_entry_t entry
);
8029 extern void vm_follow_entry(
8030 vm_map_entry_t entry
);
8032 extern void vm_follow_map(
8036 * vm_map_links_print: [ debug ]
8040 struct vm_map_links
*links
)
8042 iprintf("prev = %08X next = %08X start = %08X end = %08X\n",
8050 * vm_map_header_print: [ debug ]
8053 vm_map_header_print(
8054 struct vm_map_header
*header
)
8056 vm_map_links_print(&header
->links
);
8057 iprintf("nentries = %08X, %sentries_pageable\n",
8059 (header
->entries_pageable
? "" : "!"));
8063 * vm_follow_entry: [ debug ]
8067 vm_map_entry_t entry
)
8069 extern int db_indent
;
8072 iprintf("map entry %08X\n", entry
);
8076 shadows
= vm_follow_object(entry
->object
.vm_object
);
8077 iprintf("Total objects : %d\n",shadows
);
8083 * vm_map_entry_print: [ debug ]
8087 register vm_map_entry_t entry
)
8089 extern int db_indent
;
8090 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
8091 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
8093 iprintf("map entry %08X n", entry
);
8097 vm_map_links_print(&entry
->links
);
8099 iprintf("start = %08X end = %08X, prot=%x/%x/%s\n",
8103 entry
->max_protection
,
8104 inheritance_name
[(entry
->inheritance
& 0x3)]);
8106 iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
8107 behavior_name
[(entry
->behavior
& 0x3)],
8109 entry
->user_wired_count
);
8110 iprintf("%sin_transition, %sneeds_wakeup\n",
8111 (entry
->in_transition
? "" : "!"),
8112 (entry
->needs_wakeup
? "" : "!"));
8114 if (entry
->is_sub_map
) {
8115 iprintf("submap = %08X - offset=%08X\n",
8116 entry
->object
.sub_map
,
8119 iprintf("object=%08X, offset=%08X, ",
8120 entry
->object
.vm_object
,
8122 printf("%sis_shared, %sneeds_copy\n",
8123 (entry
->is_shared
? "" : "!"),
8124 (entry
->needs_copy
? "" : "!"));
8131 * vm_follow_map: [ debug ]
8137 register vm_map_entry_t entry
;
8138 extern int db_indent
;
8140 iprintf("task map %08X\n", map
);
8144 for (entry
= vm_map_first_entry(map
);
8145 entry
&& entry
!= vm_map_to_entry(map
);
8146 entry
= entry
->vme_next
) {
8147 vm_follow_entry(entry
);
8154 * vm_map_print: [ debug ]
8160 register vm_map_entry_t entry
;
8162 extern int db_indent
;
8165 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8167 iprintf("task map %08X\n", map
);
8171 vm_map_header_print(&map
->hdr
);
8173 iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
8180 iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
8181 (map
->wait_for_space
? "" : "!"),
8182 (map
->wiring_required
? "" : "!"),
8186 switch (map
->sw_state
) {
8197 iprintf("res = %d, sw_state = %s\n", map
->res_count
, swstate
);
8198 #endif /* TASK_SWAPPER */
8200 for (entry
= vm_map_first_entry(map
);
8201 entry
&& entry
!= vm_map_to_entry(map
);
8202 entry
= entry
->vme_next
) {
8203 vm_map_entry_print(entry
);
8210 * Routine: vm_map_copy_print
8212 * Pretty-print a copy object for ddb.
8219 extern int db_indent
;
8222 vm_map_entry_t entry
;
8224 copy
= (vm_map_copy_t
)incopy
; /* Make sure we have the right type */
8226 printf("copy object 0x%x\n", copy
);
8230 iprintf("type=%d", copy
->type
);
8231 switch (copy
->type
) {
8232 case VM_MAP_COPY_ENTRY_LIST
:
8233 printf("[entry_list]");
8236 case VM_MAP_COPY_OBJECT
:
8240 case VM_MAP_COPY_KERNEL_BUFFER
:
8241 printf("[kernel_buffer]");
8245 printf("[bad type]");
8248 printf(", offset=0x%x", copy
->offset
);
8249 printf(", size=0x%x\n", copy
->size
);
8251 switch (copy
->type
) {
8252 case VM_MAP_COPY_ENTRY_LIST
:
8253 vm_map_header_print(©
->cpy_hdr
);
8254 for (entry
= vm_map_copy_first_entry(copy
);
8255 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8256 entry
= entry
->vme_next
) {
8257 vm_map_entry_print(entry
);
8261 case VM_MAP_COPY_OBJECT
:
8262 iprintf("object=0x%x\n", copy
->cpy_object
);
8265 case VM_MAP_COPY_KERNEL_BUFFER
:
8266 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8267 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8276 * db_vm_map_total_size(map) [ debug ]
8278 * return the total virtual size (in bytes) of the map
8281 db_vm_map_total_size(
8284 vm_map_entry_t entry
;
8288 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8291 for (entry
= vm_map_first_entry(map
);
8292 entry
!= vm_map_to_entry(map
);
8293 entry
= entry
->vme_next
) {
8294 total
+= entry
->vme_end
- entry
->vme_start
;
8300 #endif /* MACH_KDB */
8303 * Routine: vm_map_entry_insert
8305 * Descritpion: This routine inserts a new vm_entry in a locked map.
8308 vm_map_entry_insert(
8310 vm_map_entry_t insp_entry
,
8314 vm_object_offset_t offset
,
8315 boolean_t needs_copy
,
8316 boolean_t is_shared
,
8317 boolean_t in_transition
,
8318 vm_prot_t cur_protection
,
8319 vm_prot_t max_protection
,
8320 vm_behavior_t behavior
,
8321 vm_inherit_t inheritance
,
8322 unsigned wired_count
)
8324 vm_map_entry_t new_entry
;
8326 assert(insp_entry
!= (vm_map_entry_t
)0);
8328 new_entry
= vm_map_entry_create(map
);
8330 new_entry
->vme_start
= start
;
8331 new_entry
->vme_end
= end
;
8332 assert(page_aligned(new_entry
->vme_start
));
8333 assert(page_aligned(new_entry
->vme_end
));
8335 new_entry
->object
.vm_object
= object
;
8336 new_entry
->offset
= offset
;
8337 new_entry
->is_shared
= is_shared
;
8338 new_entry
->is_sub_map
= FALSE
;
8339 new_entry
->needs_copy
= needs_copy
;
8340 new_entry
->in_transition
= in_transition
;
8341 new_entry
->needs_wakeup
= FALSE
;
8342 new_entry
->inheritance
= inheritance
;
8343 new_entry
->protection
= cur_protection
;
8344 new_entry
->max_protection
= max_protection
;
8345 new_entry
->behavior
= behavior
;
8346 new_entry
->wired_count
= wired_count
;
8347 new_entry
->user_wired_count
= 0;
8348 new_entry
->use_pmap
= FALSE
;
8351 * Insert the new entry into the list.
8354 vm_map_entry_link(map
, insp_entry
, new_entry
);
8355 map
->size
+= end
- start
;
8358 * Update the free space hint and the lookup hint.
8361 SAVE_HINT(map
, new_entry
);
8366 * Routine: vm_remap_extract
8368 * Descritpion: This routine returns a vm_entry list from a map.
8376 struct vm_map_header
*map_header
,
8377 vm_prot_t
*cur_protection
,
8378 vm_prot_t
*max_protection
,
8379 /* What, no behavior? */
8380 vm_inherit_t inheritance
,
8383 kern_return_t result
;
8384 vm_size_t mapped_size
;
8386 vm_map_entry_t src_entry
; /* result of last map lookup */
8387 vm_map_entry_t new_entry
;
8388 vm_object_offset_t offset
;
8389 vm_offset_t map_address
;
8390 vm_offset_t src_start
; /* start of entry to map */
8391 vm_offset_t src_end
; /* end of region to be mapped */
8393 vm_map_version_t version
;
8394 boolean_t src_needs_copy
;
8395 boolean_t new_entry_needs_copy
;
8397 assert(map
!= VM_MAP_NULL
);
8398 assert(size
!= 0 && size
== round_page_32(size
));
8399 assert(inheritance
== VM_INHERIT_NONE
||
8400 inheritance
== VM_INHERIT_COPY
||
8401 inheritance
== VM_INHERIT_SHARE
);
8404 * Compute start and end of region.
8406 src_start
= trunc_page_32(addr
);
8407 src_end
= round_page_32(src_start
+ size
);
8410 * Initialize map_header.
8412 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8413 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8414 map_header
->nentries
= 0;
8415 map_header
->entries_pageable
= pageable
;
8417 *cur_protection
= VM_PROT_ALL
;
8418 *max_protection
= VM_PROT_ALL
;
8422 result
= KERN_SUCCESS
;
8425 * The specified source virtual space might correspond to
8426 * multiple map entries, need to loop on them.
8429 while (mapped_size
!= size
) {
8430 vm_size_t entry_size
;
8433 * Find the beginning of the region.
8435 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8436 result
= KERN_INVALID_ADDRESS
;
8440 if (src_start
< src_entry
->vme_start
||
8441 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8442 result
= KERN_INVALID_ADDRESS
;
8446 if(src_entry
->is_sub_map
) {
8447 result
= KERN_INVALID_ADDRESS
;
8451 tmp_size
= size
- mapped_size
;
8452 if (src_end
> src_entry
->vme_end
)
8453 tmp_size
-= (src_end
- src_entry
->vme_end
);
8455 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8456 src_entry
->vme_start
);
8458 if(src_entry
->is_sub_map
) {
8459 vm_map_reference(src_entry
->object
.sub_map
);
8461 object
= src_entry
->object
.vm_object
;
8463 if (object
== VM_OBJECT_NULL
) {
8464 object
= vm_object_allocate(entry_size
);
8465 src_entry
->offset
= 0;
8466 src_entry
->object
.vm_object
= object
;
8467 } else if (object
->copy_strategy
!=
8468 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8470 * We are already using an asymmetric
8471 * copy, and therefore we already have
8474 assert(!src_entry
->needs_copy
);
8475 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8476 (object
->internal
&& !object
->true_share
&&
8477 !src_entry
->is_shared
&&
8478 object
->size
> entry_size
)) {
8480 vm_object_shadow(&src_entry
->object
.vm_object
,
8484 if (!src_entry
->needs_copy
&&
8485 (src_entry
->protection
& VM_PROT_WRITE
)) {
8487 vm_object_pmap_protect(
8488 src_entry
->object
.vm_object
,
8492 src_entry
->vme_start
,
8493 src_entry
->protection
&
8496 pmap_protect(vm_map_pmap(map
),
8497 src_entry
->vme_start
,
8499 src_entry
->protection
&
8504 object
= src_entry
->object
.vm_object
;
8505 src_entry
->needs_copy
= FALSE
;
8509 vm_object_lock(object
);
8510 object
->ref_count
++; /* object ref. for new entry */
8511 VM_OBJ_RES_INCR(object
);
8512 if (object
->copy_strategy
==
8513 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8514 object
->copy_strategy
=
8515 MEMORY_OBJECT_COPY_DELAY
;
8517 vm_object_unlock(object
);
8520 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8522 new_entry
= _vm_map_entry_create(map_header
);
8523 vm_map_entry_copy(new_entry
, src_entry
);
8524 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8526 new_entry
->vme_start
= map_address
;
8527 new_entry
->vme_end
= map_address
+ tmp_size
;
8528 new_entry
->inheritance
= inheritance
;
8529 new_entry
->offset
= offset
;
8532 * The new region has to be copied now if required.
8536 src_entry
->is_shared
= TRUE
;
8537 new_entry
->is_shared
= TRUE
;
8538 if (!(new_entry
->is_sub_map
))
8539 new_entry
->needs_copy
= FALSE
;
8541 } else if (src_entry
->is_sub_map
) {
8542 /* make this a COW sub_map if not already */
8543 new_entry
->needs_copy
= TRUE
;
8544 } else if (src_entry
->wired_count
== 0 &&
8545 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8547 (new_entry
->vme_end
-
8548 new_entry
->vme_start
),
8550 &new_entry_needs_copy
)) {
8552 new_entry
->needs_copy
= new_entry_needs_copy
;
8553 new_entry
->is_shared
= FALSE
;
8556 * Handle copy_on_write semantics.
8558 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8559 vm_object_pmap_protect(object
,
8562 ((src_entry
->is_shared
8564 PMAP_NULL
: map
->pmap
),
8565 src_entry
->vme_start
,
8566 src_entry
->protection
&
8569 src_entry
->needs_copy
= TRUE
;
8572 * Throw away the old object reference of the new entry.
8574 vm_object_deallocate(object
);
8577 new_entry
->is_shared
= FALSE
;
8580 * The map can be safely unlocked since we
8581 * already hold a reference on the object.
8583 * Record the timestamp of the map for later
8584 * verification, and unlock the map.
8586 version
.main_timestamp
= map
->timestamp
;
8587 vm_map_unlock(map
); /* Increments timestamp once! */
8592 if (src_entry
->wired_count
> 0) {
8593 vm_object_lock(object
);
8594 result
= vm_object_copy_slowly(
8599 &new_entry
->object
.vm_object
);
8601 new_entry
->offset
= 0;
8602 new_entry
->needs_copy
= FALSE
;
8604 result
= vm_object_copy_strategically(
8608 &new_entry
->object
.vm_object
,
8610 &new_entry_needs_copy
);
8612 new_entry
->needs_copy
= new_entry_needs_copy
;
8616 * Throw away the old object reference of the new entry.
8618 vm_object_deallocate(object
);
8620 if (result
!= KERN_SUCCESS
&&
8621 result
!= KERN_MEMORY_RESTART_COPY
) {
8622 _vm_map_entry_dispose(map_header
, new_entry
);
8627 * Verify that the map has not substantially
8628 * changed while the copy was being made.
8632 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8634 * Simple version comparison failed.
8636 * Retry the lookup and verify that the
8637 * same object/offset are still present.
8639 vm_object_deallocate(new_entry
->
8641 _vm_map_entry_dispose(map_header
, new_entry
);
8642 if (result
== KERN_MEMORY_RESTART_COPY
)
8643 result
= KERN_SUCCESS
;
8647 if (result
== KERN_MEMORY_RESTART_COPY
) {
8648 vm_object_reference(object
);
8653 _vm_map_entry_link(map_header
,
8654 map_header
->links
.prev
, new_entry
);
8656 *cur_protection
&= src_entry
->protection
;
8657 *max_protection
&= src_entry
->max_protection
;
8659 map_address
+= tmp_size
;
8660 mapped_size
+= tmp_size
;
8661 src_start
+= tmp_size
;
8666 if (result
!= KERN_SUCCESS
) {
8668 * Free all allocated elements.
8670 for (src_entry
= map_header
->links
.next
;
8671 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8672 src_entry
= new_entry
) {
8673 new_entry
= src_entry
->vme_next
;
8674 _vm_map_entry_unlink(map_header
, src_entry
);
8675 vm_object_deallocate(src_entry
->object
.vm_object
);
8676 _vm_map_entry_dispose(map_header
, src_entry
);
8685 * Map portion of a task's address space.
8686 * Mapped region must not overlap more than
8687 * one vm memory object. Protections and
8688 * inheritance attributes remain the same
8689 * as in the original task and are out parameters.
8690 * Source and Target task can be identical
8691 * Other attributes are identical as for vm_map()
8695 vm_map_t target_map
,
8696 vm_offset_t
*address
,
8701 vm_offset_t memory_address
,
8703 vm_prot_t
*cur_protection
,
8704 vm_prot_t
*max_protection
,
8705 vm_inherit_t inheritance
)
8707 kern_return_t result
;
8708 vm_map_entry_t entry
;
8709 vm_map_entry_t insp_entry
;
8710 vm_map_entry_t new_entry
;
8711 struct vm_map_header map_header
;
8713 if (target_map
== VM_MAP_NULL
)
8714 return KERN_INVALID_ARGUMENT
;
8716 switch (inheritance
) {
8717 case VM_INHERIT_NONE
:
8718 case VM_INHERIT_COPY
:
8719 case VM_INHERIT_SHARE
:
8720 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8724 return KERN_INVALID_ARGUMENT
;
8727 size
= round_page_32(size
);
8729 result
= vm_remap_extract(src_map
, memory_address
,
8730 size
, copy
, &map_header
,
8737 if (result
!= KERN_SUCCESS
) {
8742 * Allocate/check a range of free virtual address
8743 * space for the target
8745 *address
= trunc_page_32(*address
);
8746 vm_map_lock(target_map
);
8747 result
= vm_remap_range_allocate(target_map
, address
, size
,
8748 mask
, anywhere
, &insp_entry
);
8750 for (entry
= map_header
.links
.next
;
8751 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8752 entry
= new_entry
) {
8753 new_entry
= entry
->vme_next
;
8754 _vm_map_entry_unlink(&map_header
, entry
);
8755 if (result
== KERN_SUCCESS
) {
8756 entry
->vme_start
+= *address
;
8757 entry
->vme_end
+= *address
;
8758 vm_map_entry_link(target_map
, insp_entry
, entry
);
8761 if (!entry
->is_sub_map
) {
8762 vm_object_deallocate(entry
->object
.vm_object
);
8764 vm_map_deallocate(entry
->object
.sub_map
);
8766 _vm_map_entry_dispose(&map_header
, entry
);
8770 if (result
== KERN_SUCCESS
) {
8771 target_map
->size
+= size
;
8772 SAVE_HINT(target_map
, insp_entry
);
8774 vm_map_unlock(target_map
);
8776 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8777 result
= vm_map_wire(target_map
, *address
,
8778 *address
+ size
, *cur_protection
, TRUE
);
8783 * Routine: vm_remap_range_allocate
8786 * Allocate a range in the specified virtual address map.
8787 * returns the address and the map entry just before the allocated
8790 * Map must be locked.
8794 vm_remap_range_allocate(
8796 vm_offset_t
*address
, /* IN/OUT */
8800 vm_map_entry_t
*map_entry
) /* OUT */
8802 register vm_map_entry_t entry
;
8803 register vm_offset_t start
;
8804 register vm_offset_t end
;
8805 kern_return_t result
= KERN_SUCCESS
;
8814 * Calculate the first possible address.
8817 if (start
< map
->min_offset
)
8818 start
= map
->min_offset
;
8819 if (start
> map
->max_offset
)
8820 return(KERN_NO_SPACE
);
8823 * Look for the first possible address;
8824 * if there's already something at this
8825 * address, we have to start after it.
8828 assert(first_free_is_valid(map
));
8829 if (start
== map
->min_offset
) {
8830 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8831 start
= entry
->vme_end
;
8833 vm_map_entry_t tmp_entry
;
8834 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8835 start
= tmp_entry
->vme_end
;
8840 * In any case, the "entry" always precedes
8841 * the proposed new region throughout the
8846 register vm_map_entry_t next
;
8849 * Find the end of the proposed new region.
8850 * Be sure we didn't go beyond the end, or
8851 * wrap around the address.
8854 end
= ((start
+ mask
) & ~mask
);
8856 return(KERN_NO_SPACE
);
8860 if ((end
> map
->max_offset
) || (end
< start
)) {
8861 if (map
->wait_for_space
) {
8862 if (size
<= (map
->max_offset
-
8864 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8866 thread_block((void (*)(void))0);
8872 return(KERN_NO_SPACE
);
8876 * If there are no more entries, we must win.
8879 next
= entry
->vme_next
;
8880 if (next
== vm_map_to_entry(map
))
8884 * If there is another entry, it must be
8885 * after the end of the potential new region.
8888 if (next
->vme_start
>= end
)
8892 * Didn't fit -- move to the next entry.
8896 start
= entry
->vme_end
;
8900 vm_map_entry_t temp_entry
;
8904 * the address doesn't itself violate
8905 * the mask requirement.
8908 if ((start
& mask
) != 0)
8909 return(KERN_NO_SPACE
);
8913 * ... the address is within bounds
8918 if ((start
< map
->min_offset
) ||
8919 (end
> map
->max_offset
) ||
8921 return(KERN_INVALID_ADDRESS
);
8925 * ... the starting address isn't allocated
8928 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
8929 return(KERN_NO_SPACE
);
8934 * ... the next region doesn't overlap the
8938 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
8939 (entry
->vme_next
->vme_start
< end
))
8940 return(KERN_NO_SPACE
);
8943 return(KERN_SUCCESS
);
8949 * Set the address map for the current thr_act to the specified map
8957 thread_act_t thr_act
= current_act();
8958 vm_map_t oldmap
= thr_act
->map
;
8960 mp_disable_preemption();
8961 mycpu
= cpu_number();
8964 * Deactivate the current map and activate the requested map
8966 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
8968 mp_enable_preemption();
8974 * Routine: vm_map_write_user
8977 * Copy out data from a kernel space into space in the
8978 * destination map. The space must already exist in the
8980 * NOTE: This routine should only be called by threads
8981 * which can block on a page fault. i.e. kernel mode user
8988 vm_offset_t src_addr
,
8989 vm_offset_t dst_addr
,
8992 thread_act_t thr_act
= current_act();
8993 kern_return_t kr
= KERN_SUCCESS
;
8995 if(thr_act
->map
== map
) {
8996 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8997 kr
= KERN_INVALID_ADDRESS
;
9002 /* take on the identity of the target map while doing */
9005 vm_map_reference(map
);
9006 oldmap
= vm_map_switch(map
);
9007 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9008 kr
= KERN_INVALID_ADDRESS
;
9010 vm_map_switch(oldmap
);
9011 vm_map_deallocate(map
);
9017 * Routine: vm_map_read_user
9020 * Copy in data from a user space source map into the
9021 * kernel map. The space must already exist in the
9023 * NOTE: This routine should only be called by threads
9024 * which can block on a page fault. i.e. kernel mode user
9031 vm_offset_t src_addr
,
9032 vm_offset_t dst_addr
,
9035 thread_act_t thr_act
= current_act();
9036 kern_return_t kr
= KERN_SUCCESS
;
9038 if(thr_act
->map
== map
) {
9039 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9040 kr
= KERN_INVALID_ADDRESS
;
9045 /* take on the identity of the target map while doing */
9048 vm_map_reference(map
);
9049 oldmap
= vm_map_switch(map
);
9050 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9051 kr
= KERN_INVALID_ADDRESS
;
9053 vm_map_switch(oldmap
);
9054 vm_map_deallocate(map
);
9059 /* Takes existing source and destination sub-maps and clones the contents of */
9060 /* the source map */
9064 ipc_port_t src_region
,
9065 ipc_port_t dst_region
)
9067 vm_named_entry_t src_object
;
9068 vm_named_entry_t dst_object
;
9072 vm_offset_t max_off
;
9073 vm_map_entry_t entry
;
9074 vm_map_entry_t new_entry
;
9075 vm_map_entry_t insert_point
;
9077 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
9078 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
9079 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
9080 return KERN_INVALID_ARGUMENT
;
9082 src_map
= (vm_map_t
)src_object
->backing
.map
;
9083 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
9084 /* destination map is assumed to be unavailable to any other */
9085 /* activity. i.e. it is new */
9086 vm_map_lock(src_map
);
9087 if((src_map
->min_offset
!= dst_map
->min_offset
)
9088 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
9089 vm_map_unlock(src_map
);
9090 return KERN_INVALID_ARGUMENT
;
9092 addr
= src_map
->min_offset
;
9093 vm_map_lookup_entry(dst_map
, addr
, &entry
);
9094 if(entry
== vm_map_to_entry(dst_map
)) {
9095 entry
= entry
->vme_next
;
9097 if(entry
== vm_map_to_entry(dst_map
)) {
9098 max_off
= src_map
->max_offset
;
9100 max_off
= entry
->vme_start
;
9102 vm_map_lookup_entry(src_map
, addr
, &entry
);
9103 if(entry
== vm_map_to_entry(src_map
)) {
9104 entry
= entry
->vme_next
;
9106 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
9107 while((entry
!= vm_map_to_entry(src_map
)) &&
9108 (entry
->vme_end
<= max_off
)) {
9109 addr
= entry
->vme_start
;
9110 new_entry
= vm_map_entry_create(dst_map
);
9111 vm_map_entry_copy(new_entry
, entry
);
9112 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
9113 insert_point
= new_entry
;
9114 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
9115 if (new_entry
->is_sub_map
) {
9116 vm_map_reference(new_entry
->object
.sub_map
);
9118 vm_object_reference(
9119 new_entry
->object
.vm_object
);
9122 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
9123 entry
= entry
->vme_next
;
9125 vm_map_unlock(src_map
);
9126 return KERN_SUCCESS
;
9130 * Export routines to other components for the things we access locally through
9137 return (current_map_fast());
9141 * vm_map_check_protection:
9143 * Assert that the target map allows the specified
9144 * privilege on the entire address region given.
9145 * The entire region must be allocated.
9147 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9148 register vm_map_t map
;
9149 register vm_offset_t start
;
9150 register vm_offset_t end
;
9151 register vm_prot_t protection
;
9153 register vm_map_entry_t entry
;
9154 vm_map_entry_t tmp_entry
;
9158 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9164 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9171 while (start
< end
) {
9172 if (entry
== vm_map_to_entry(map
)) {
9181 if (start
< entry
->vme_start
) {
9187 * Check protection associated with entry.
9190 if ((entry
->protection
& protection
) != protection
) {
9195 /* go to next entry */
9197 start
= entry
->vme_end
;
9198 entry
= entry
->vme_next
;
9205 * This routine is obsolete, but included for backward
9206 * compatibility for older drivers.
9209 kernel_vm_map_reference(
9212 vm_map_reference(map
);
9218 * Most code internal to the osfmk will go through a
9219 * macro defining this. This is always here for the
9220 * use of other kernel components.
9222 #undef vm_map_reference
9225 register vm_map_t map
)
9227 if (map
== VM_MAP_NULL
)
9230 mutex_lock(&map
->s_lock
);
9232 assert(map
->res_count
> 0);
9233 assert(map
->ref_count
>= map
->res_count
);
9237 mutex_unlock(&map
->s_lock
);
9241 * vm_map_deallocate:
9243 * Removes a reference from the specified map,
9244 * destroying it if no references remain.
9245 * The map should not be locked.
9249 register vm_map_t map
)
9253 if (map
== VM_MAP_NULL
)
9256 mutex_lock(&map
->s_lock
);
9257 ref
= --map
->ref_count
;
9259 vm_map_res_deallocate(map
);
9260 mutex_unlock(&map
->s_lock
);
9263 assert(map
->ref_count
== 0);
9264 mutex_unlock(&map
->s_lock
);
9268 * The map residence count isn't decremented here because
9269 * the vm_map_delete below will traverse the entire map,
9270 * deleting entries, and the residence counts on objects
9271 * and sharing maps will go away then.
9275 vm_map_destroy(map
);