2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
60 * Virtual memory mapping module.
64 #include <task_swapper.h>
65 #include <mach_assert.h>
67 #include <mach/kern_return.h>
68 #include <mach/port.h>
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/vm_statistics.h>
73 #include <kern/assert.h>
74 #include <kern/counters.h>
75 #include <kern/zalloc.h>
76 #include <vm/vm_init.h>
77 #include <vm/vm_fault.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_kern.h>
82 #include <ipc/ipc_port.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <mach/vm_map_server.h>
86 #include <mach/mach_host_server.h>
88 #include <machine/db_machdep.h>
91 /* Internal prototypes
93 extern boolean_t
vm_map_range_check(
97 vm_map_entry_t
*entry
);
99 extern vm_map_entry_t
_vm_map_entry_create(
100 struct vm_map_header
*map_header
);
102 extern void _vm_map_entry_dispose(
103 struct vm_map_header
*map_header
,
104 vm_map_entry_t entry
);
106 extern void vm_map_pmap_enter(
109 vm_offset_t end_addr
,
111 vm_object_offset_t offset
,
112 vm_prot_t protection
);
114 extern void _vm_map_clip_end(
115 struct vm_map_header
*map_header
,
116 vm_map_entry_t entry
,
119 extern void vm_map_entry_delete(
121 vm_map_entry_t entry
);
123 extern kern_return_t
vm_map_delete(
129 extern void vm_map_copy_steal_pages(
132 extern kern_return_t
vm_map_copy_overwrite_unaligned(
134 vm_map_entry_t entry
,
138 extern kern_return_t
vm_map_copy_overwrite_aligned(
140 vm_map_entry_t tmp_entry
,
145 extern kern_return_t
vm_map_copyin_kernel_buffer(
147 vm_offset_t src_addr
,
149 boolean_t src_destroy
,
150 vm_map_copy_t
*copy_result
); /* OUT */
152 extern kern_return_t
vm_map_copyout_kernel_buffer(
154 vm_offset_t
*addr
, /* IN/OUT */
156 boolean_t overwrite
);
158 extern void vm_map_fork_share(
160 vm_map_entry_t old_entry
,
163 extern boolean_t
vm_map_fork_copy(
165 vm_map_entry_t
*old_entry_p
,
168 extern kern_return_t
vm_remap_range_allocate(
170 vm_offset_t
*address
, /* IN/OUT */
174 vm_map_entry_t
*map_entry
); /* OUT */
176 extern void _vm_map_clip_start(
177 struct vm_map_header
*map_header
,
178 vm_map_entry_t entry
,
181 void vm_region_top_walk(
182 vm_map_entry_t entry
,
183 vm_region_top_info_t top
);
186 vm_map_entry_t entry
,
187 vm_region_extended_info_t extended
,
188 vm_object_offset_t offset
,
194 * Macros to copy a vm_map_entry. We must be careful to correctly
195 * manage the wired page count. vm_map_entry_copy() creates a new
196 * map entry to the same memory - the wired count in the new entry
197 * must be set to zero. vm_map_entry_copy_full() creates a new
198 * entry that is identical to the old entry. This preserves the
199 * wire count; it's used for map splitting and zone changing in
202 #define vm_map_entry_copy(NEW,OLD) \
205 (NEW)->is_shared = FALSE; \
206 (NEW)->needs_wakeup = FALSE; \
207 (NEW)->in_transition = FALSE; \
208 (NEW)->wired_count = 0; \
209 (NEW)->user_wired_count = 0; \
212 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
215 * Virtual memory maps provide for the mapping, protection,
216 * and sharing of virtual memory objects. In addition,
217 * this module provides for an efficient virtual copy of
218 * memory from one map to another.
220 * Synchronization is required prior to most operations.
222 * Maps consist of an ordered doubly-linked list of simple
223 * entries; a single hint is used to speed up lookups.
225 * Sharing maps have been deleted from this version of Mach.
226 * All shared objects are now mapped directly into the respective
227 * maps. This requires a change in the copy on write strategy;
228 * the asymmetric (delayed) strategy is used for shared temporary
229 * objects instead of the symmetric (shadow) strategy. All maps
230 * are now "top level" maps (either task map, kernel map or submap
231 * of the kernel map).
233 * Since portions of maps are specified by start/end addreses,
234 * which may not align with existing map entries, all
235 * routines merely "clip" entries to these start/end values.
236 * [That is, an entry is split into two, bordering at a
237 * start or end value.] Note that these clippings may not
238 * always be necessary (as the two resulting entries are then
239 * not changed); however, the clipping is done for convenience.
240 * No attempt is currently made to "glue back together" two
243 * The symmetric (shadow) copy strategy implements virtual copy
244 * by copying VM object references from one map to
245 * another, and then marking both regions as copy-on-write.
246 * It is important to note that only one writeable reference
247 * to a VM object region exists in any map when this strategy
248 * is used -- this means that shadow object creation can be
249 * delayed until a write operation occurs. The symmetric (delayed)
250 * strategy allows multiple maps to have writeable references to
251 * the same region of a vm object, and hence cannot delay creating
252 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
253 * Copying of permanent objects is completely different; see
254 * vm_object_copy_strategically() in vm_object.c.
257 zone_t vm_map_zone
; /* zone for vm_map structures */
258 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
259 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
260 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
264 * Placeholder object for submap operations. This object is dropped
265 * into the range by a call to vm_map_find, and removed when
266 * vm_map_submap creates the submap.
269 vm_object_t vm_submap_object
;
274 * Initialize the vm_map module. Must be called before
275 * any other vm_map routines.
277 * Map and entry structures are allocated from zones -- we must
278 * initialize those zones.
280 * There are three zones of interest:
282 * vm_map_zone: used to allocate maps.
283 * vm_map_entry_zone: used to allocate map entries.
284 * vm_map_kentry_zone: used to allocate map entries for the kernel.
286 * The kernel allocates map entries from a special zone that is initially
287 * "crammed" with memory. It would be difficult (perhaps impossible) for
288 * the kernel to allocate more memory to a entry zone when it became
289 * empty since the very act of allocating memory implies the creation
293 vm_offset_t map_data
;
294 vm_size_t map_data_size
;
295 vm_offset_t kentry_data
;
296 vm_size_t kentry_data_size
;
297 int kentry_count
= 2048; /* to init kentry_data_size */
299 #define NO_COALESCE_LIMIT (1024 * 128)
302 * Threshold for aggressive (eager) page map entering for vm copyout
303 * operations. Any copyout larger will NOT be aggressively entered.
305 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
307 /* Skip acquiring locks if we're in the midst of a kernel core dump */
308 extern unsigned int not_in_kdp
;
314 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
317 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
318 1024*1024, PAGE_SIZE
*5,
319 "non-kernel map entries");
321 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
322 kentry_data_size
, kentry_data_size
,
323 "kernel map entries");
325 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
326 16*1024, PAGE_SIZE
, "map copies");
329 * Cram the map and kentry zones with initial data.
330 * Set kentry_zone non-collectible to aid zone_gc().
332 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
333 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
334 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
335 zcram(vm_map_zone
, map_data
, map_data_size
);
336 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
343 map_data_size
= round_page_32(10 * sizeof(struct vm_map
));
344 map_data
= pmap_steal_memory(map_data_size
);
348 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
349 * physical page (i.e. that beyond the kernel image and page tables)
350 * individually; we guess at most one entry per eight pages in the
351 * real world. This works out to roughly .1 of 1% of physical memory,
352 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
355 kentry_count
= pmap_free_pages() / 8;
359 round_page_32(kentry_count
* sizeof(struct vm_map_entry
));
360 kentry_data
= pmap_steal_memory(kentry_data_size
);
366 * Creates and returns a new empty VM map with
367 * the given physical map structure, and having
368 * the given lower and upper address bounds.
377 register vm_map_t result
;
379 result
= (vm_map_t
) zalloc(vm_map_zone
);
380 if (result
== VM_MAP_NULL
)
381 panic("vm_map_create");
383 vm_map_first_entry(result
) = vm_map_to_entry(result
);
384 vm_map_last_entry(result
) = vm_map_to_entry(result
);
385 result
->hdr
.nentries
= 0;
386 result
->hdr
.entries_pageable
= pageable
;
389 result
->ref_count
= 1;
391 result
->res_count
= 1;
392 result
->sw_state
= MAP_SW_IN
;
393 #endif /* TASK_SWAPPER */
395 result
->min_offset
= min
;
396 result
->max_offset
= max
;
397 result
->wiring_required
= FALSE
;
398 result
->no_zero_fill
= FALSE
;
399 result
->mapped
= FALSE
;
400 result
->wait_for_space
= FALSE
;
401 result
->first_free
= vm_map_to_entry(result
);
402 result
->hint
= vm_map_to_entry(result
);
403 vm_map_lock_init(result
);
404 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
410 * vm_map_entry_create: [ internal use only ]
412 * Allocates a VM map entry for insertion in the
413 * given map (or map copy). No fields are filled.
415 #define vm_map_entry_create(map) \
416 _vm_map_entry_create(&(map)->hdr)
418 #define vm_map_copy_entry_create(copy) \
419 _vm_map_entry_create(&(copy)->cpy_hdr)
422 _vm_map_entry_create(
423 register struct vm_map_header
*map_header
)
425 register zone_t zone
;
426 register vm_map_entry_t entry
;
428 if (map_header
->entries_pageable
)
429 zone
= vm_map_entry_zone
;
431 zone
= vm_map_kentry_zone
;
433 entry
= (vm_map_entry_t
) zalloc(zone
);
434 if (entry
== VM_MAP_ENTRY_NULL
)
435 panic("vm_map_entry_create");
441 * vm_map_entry_dispose: [ internal use only ]
443 * Inverse of vm_map_entry_create.
445 #define vm_map_entry_dispose(map, entry) \
447 if((entry) == (map)->first_free) \
448 (map)->first_free = vm_map_to_entry(map); \
449 if((entry) == (map)->hint) \
450 (map)->hint = vm_map_to_entry(map); \
451 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
454 #define vm_map_copy_entry_dispose(map, entry) \
455 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
458 _vm_map_entry_dispose(
459 register struct vm_map_header
*map_header
,
460 register vm_map_entry_t entry
)
462 register zone_t zone
;
464 if (map_header
->entries_pageable
)
465 zone
= vm_map_entry_zone
;
467 zone
= vm_map_kentry_zone
;
469 zfree(zone
, (vm_offset_t
) entry
);
472 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
473 boolean_t first_free_check
= FALSE
;
478 vm_map_entry_t entry
, next
;
480 if (!first_free_check
)
483 entry
= vm_map_to_entry(map
);
484 next
= entry
->vme_next
;
485 while (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_end
) ||
486 (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_start
) &&
487 next
!= vm_map_to_entry(map
))) {
489 next
= entry
->vme_next
;
490 if (entry
== vm_map_to_entry(map
))
493 if (map
->first_free
!= entry
) {
494 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
495 map
, map
->first_free
, entry
);
504 * Updates the map->first_free pointer to the
505 * entry immediately before the first hole in the map.
506 * The map should be locked.
508 #define UPDATE_FIRST_FREE(map, new_first_free) \
511 vm_map_entry_t UFF_first_free; \
512 vm_map_entry_t UFF_next_entry; \
514 UFF_first_free = (new_first_free); \
515 UFF_next_entry = UFF_first_free->vme_next; \
516 while (trunc_page_32(UFF_next_entry->vme_start) == \
517 trunc_page_32(UFF_first_free->vme_end) || \
518 (trunc_page_32(UFF_next_entry->vme_start) == \
519 trunc_page_32(UFF_first_free->vme_start) && \
520 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
521 UFF_first_free = UFF_next_entry; \
522 UFF_next_entry = UFF_first_free->vme_next; \
523 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
526 UFF_map->first_free = UFF_first_free; \
527 assert(first_free_is_valid(UFF_map)); \
531 * vm_map_entry_{un,}link:
533 * Insert/remove entries from maps (or map copies).
535 #define vm_map_entry_link(map, after_where, entry) \
538 vm_map_entry_t VMEL_entry; \
540 VMEL_entry = (entry); \
541 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
542 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
546 #define vm_map_copy_entry_link(copy, after_where, entry) \
547 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
549 #define _vm_map_entry_link(hdr, after_where, entry) \
552 (entry)->vme_prev = (after_where); \
553 (entry)->vme_next = (after_where)->vme_next; \
554 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
557 #define vm_map_entry_unlink(map, entry) \
560 vm_map_entry_t VMEU_entry; \
561 vm_map_entry_t VMEU_first_free; \
563 VMEU_entry = (entry); \
564 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
565 VMEU_first_free = VMEU_entry->vme_prev; \
567 VMEU_first_free = VMEU_map->first_free; \
568 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
569 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
572 #define vm_map_copy_entry_unlink(copy, entry) \
573 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
575 #define _vm_map_entry_unlink(hdr, entry) \
578 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
579 (entry)->vme_prev->vme_next = (entry)->vme_next; \
582 #if MACH_ASSERT && TASK_SWAPPER
584 * vm_map_res_reference:
586 * Adds another valid residence count to the given map.
588 * Map is locked so this function can be called from
592 void vm_map_res_reference(register vm_map_t map
)
594 /* assert map is locked */
595 assert(map
->res_count
>= 0);
596 assert(map
->ref_count
>= map
->res_count
);
597 if (map
->res_count
== 0) {
598 mutex_unlock(&map
->s_lock
);
601 mutex_lock(&map
->s_lock
);
609 * vm_map_reference_swap:
611 * Adds valid reference and residence counts to the given map.
613 * The map may not be in memory (i.e. zero residence count).
616 void vm_map_reference_swap(register vm_map_t map
)
618 assert(map
!= VM_MAP_NULL
);
619 mutex_lock(&map
->s_lock
);
620 assert(map
->res_count
>= 0);
621 assert(map
->ref_count
>= map
->res_count
);
623 vm_map_res_reference(map
);
624 mutex_unlock(&map
->s_lock
);
628 * vm_map_res_deallocate:
630 * Decrement residence count on a map; possibly causing swapout.
632 * The map must be in memory (i.e. non-zero residence count).
634 * The map is locked, so this function is callable from vm_map_deallocate.
637 void vm_map_res_deallocate(register vm_map_t map
)
639 assert(map
->res_count
> 0);
640 if (--map
->res_count
== 0) {
641 mutex_unlock(&map
->s_lock
);
645 mutex_lock(&map
->s_lock
);
647 assert(map
->ref_count
>= map
->res_count
);
649 #endif /* MACH_ASSERT && TASK_SWAPPER */
654 * Actually destroy a map.
658 register vm_map_t map
)
661 (void) vm_map_delete(map
, map
->min_offset
,
662 map
->max_offset
, VM_MAP_NO_FLAGS
);
666 pmap_destroy(map
->pmap
);
668 zfree(vm_map_zone
, (vm_offset_t
) map
);
673 * vm_map_swapin/vm_map_swapout
675 * Swap a map in and out, either referencing or releasing its resources.
676 * These functions are internal use only; however, they must be exported
677 * because they may be called from macros, which are exported.
679 * In the case of swapout, there could be races on the residence count,
680 * so if the residence count is up, we return, assuming that a
681 * vm_map_deallocate() call in the near future will bring us back.
684 * -- We use the map write lock for synchronization among races.
685 * -- The map write lock, and not the simple s_lock, protects the
686 * swap state of the map.
687 * -- If a map entry is a share map, then we hold both locks, in
688 * hierarchical order.
690 * Synchronization Notes:
691 * 1) If a vm_map_swapin() call happens while swapout in progress, it
692 * will block on the map lock and proceed when swapout is through.
693 * 2) A vm_map_reference() call at this time is illegal, and will
694 * cause a panic. vm_map_reference() is only allowed on resident
695 * maps, since it refuses to block.
696 * 3) A vm_map_swapin() call during a swapin will block, and
697 * proceeed when the first swapin is done, turning into a nop.
698 * This is the reason the res_count is not incremented until
699 * after the swapin is complete.
700 * 4) There is a timing hole after the checks of the res_count, before
701 * the map lock is taken, during which a swapin may get the lock
702 * before a swapout about to happen. If this happens, the swapin
703 * will detect the state and increment the reference count, causing
704 * the swapout to be a nop, thereby delaying it until a later
705 * vm_map_deallocate. If the swapout gets the lock first, then
706 * the swapin will simply block until the swapout is done, and
709 * Because vm_map_swapin() is potentially an expensive operation, it
710 * should be used with caution.
713 * 1) A map with a residence count of zero is either swapped, or
715 * 2) A map with a non-zero residence count is either resident,
716 * or being swapped in.
719 int vm_map_swap_enable
= 1;
721 void vm_map_swapin (vm_map_t map
)
723 register vm_map_entry_t entry
;
725 if (!vm_map_swap_enable
) /* debug */
730 * First deal with various races.
732 if (map
->sw_state
== MAP_SW_IN
)
734 * we raced with swapout and won. Returning will incr.
735 * the res_count, turning the swapout into a nop.
740 * The residence count must be zero. If we raced with another
741 * swapin, the state would have been IN; if we raced with a
742 * swapout (after another competing swapin), we must have lost
743 * the race to get here (see above comment), in which case
744 * res_count is still 0.
746 assert(map
->res_count
== 0);
749 * There are no intermediate states of a map going out or
750 * coming in, since the map is locked during the transition.
752 assert(map
->sw_state
== MAP_SW_OUT
);
755 * We now operate upon each map entry. If the entry is a sub-
756 * or share-map, we call vm_map_res_reference upon it.
757 * If the entry is an object, we call vm_object_res_reference
758 * (this may iterate through the shadow chain).
759 * Note that we hold the map locked the entire time,
760 * even if we get back here via a recursive call in
761 * vm_map_res_reference.
763 entry
= vm_map_first_entry(map
);
765 while (entry
!= vm_map_to_entry(map
)) {
766 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
767 if (entry
->is_sub_map
) {
768 vm_map_t lmap
= entry
->object
.sub_map
;
769 mutex_lock(&lmap
->s_lock
);
770 vm_map_res_reference(lmap
);
771 mutex_unlock(&lmap
->s_lock
);
773 vm_object_t object
= entry
->object
.vm_object
;
774 vm_object_lock(object
);
776 * This call may iterate through the
779 vm_object_res_reference(object
);
780 vm_object_unlock(object
);
783 entry
= entry
->vme_next
;
785 assert(map
->sw_state
== MAP_SW_OUT
);
786 map
->sw_state
= MAP_SW_IN
;
789 void vm_map_swapout(vm_map_t map
)
791 register vm_map_entry_t entry
;
795 * First deal with various races.
796 * If we raced with a swapin and lost, the residence count
797 * will have been incremented to 1, and we simply return.
799 mutex_lock(&map
->s_lock
);
800 if (map
->res_count
!= 0) {
801 mutex_unlock(&map
->s_lock
);
804 mutex_unlock(&map
->s_lock
);
807 * There are no intermediate states of a map going out or
808 * coming in, since the map is locked during the transition.
810 assert(map
->sw_state
== MAP_SW_IN
);
812 if (!vm_map_swap_enable
)
816 * We now operate upon each map entry. If the entry is a sub-
817 * or share-map, we call vm_map_res_deallocate upon it.
818 * If the entry is an object, we call vm_object_res_deallocate
819 * (this may iterate through the shadow chain).
820 * Note that we hold the map locked the entire time,
821 * even if we get back here via a recursive call in
822 * vm_map_res_deallocate.
824 entry
= vm_map_first_entry(map
);
826 while (entry
!= vm_map_to_entry(map
)) {
827 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
828 if (entry
->is_sub_map
) {
829 vm_map_t lmap
= entry
->object
.sub_map
;
830 mutex_lock(&lmap
->s_lock
);
831 vm_map_res_deallocate(lmap
);
832 mutex_unlock(&lmap
->s_lock
);
834 vm_object_t object
= entry
->object
.vm_object
;
835 vm_object_lock(object
);
837 * This call may take a long time,
838 * since it could actively push
839 * out pages (if we implement it
842 vm_object_res_deallocate(object
);
843 vm_object_unlock(object
);
846 entry
= entry
->vme_next
;
848 assert(map
->sw_state
== MAP_SW_IN
);
849 map
->sw_state
= MAP_SW_OUT
;
852 #endif /* TASK_SWAPPER */
858 * Saves the specified entry as the hint for
859 * future lookups. Performs necessary interlocks.
861 #define SAVE_HINT(map,value) \
863 mutex_lock(&(map)->s_lock); \
864 (map)->hint = (value); \
865 mutex_unlock(&(map)->s_lock); \
869 * vm_map_lookup_entry: [ internal use only ]
871 * Finds the map entry containing (or
872 * immediately preceding) the specified address
873 * in the given map; the entry is returned
874 * in the "entry" parameter. The boolean
875 * result indicates whether the address is
876 * actually contained in the map.
880 register vm_map_t map
,
881 register vm_offset_t address
,
882 vm_map_entry_t
*entry
) /* OUT */
884 register vm_map_entry_t cur
;
885 register vm_map_entry_t last
;
888 * Start looking either from the head of the
889 * list, or from the hint.
892 mutex_lock(&map
->s_lock
);
895 mutex_unlock(&map
->s_lock
);
897 if (cur
== vm_map_to_entry(map
))
900 if (address
>= cur
->vme_start
) {
902 * Go from hint to end of list.
904 * But first, make a quick check to see if
905 * we are already looking at the entry we
906 * want (which is usually the case).
907 * Note also that we don't need to save the hint
908 * here... it is the same hint (unless we are
909 * at the header, in which case the hint didn't
910 * buy us anything anyway).
912 last
= vm_map_to_entry(map
);
913 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
920 * Go from start to hint, *inclusively*
922 last
= cur
->vme_next
;
923 cur
= vm_map_first_entry(map
);
930 while (cur
!= last
) {
931 if (cur
->vme_end
> address
) {
932 if (address
>= cur
->vme_start
) {
934 * Save this lookup for future
947 *entry
= cur
->vme_prev
;
949 SAVE_HINT(map
, *entry
);
954 * Routine: vm_map_find_space
956 * Allocate a range in the specified virtual address map,
957 * returning the entry allocated for that range.
958 * Used by kmem_alloc, etc.
960 * The map must be NOT be locked. It will be returned locked
961 * on KERN_SUCCESS, unlocked on failure.
963 * If an entry is allocated, the object/offset fields
964 * are initialized to zero.
968 register vm_map_t map
,
969 vm_offset_t
*address
, /* OUT */
972 vm_map_entry_t
*o_entry
) /* OUT */
974 register vm_map_entry_t entry
, new_entry
;
975 register vm_offset_t start
;
976 register vm_offset_t end
;
978 new_entry
= vm_map_entry_create(map
);
981 * Look for the first possible address; if there's already
982 * something at this address, we have to start after it.
987 assert(first_free_is_valid(map
));
988 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
989 start
= map
->min_offset
;
991 start
= entry
->vme_end
;
994 * In any case, the "entry" always precedes
995 * the proposed new region throughout the loop:
999 register vm_map_entry_t next
;
1002 * Find the end of the proposed new region.
1003 * Be sure we didn't go beyond the end, or
1004 * wrap around the address.
1007 end
= ((start
+ mask
) & ~mask
);
1009 vm_map_entry_dispose(map
, new_entry
);
1011 return(KERN_NO_SPACE
);
1016 if ((end
> map
->max_offset
) || (end
< start
)) {
1017 vm_map_entry_dispose(map
, new_entry
);
1019 return(KERN_NO_SPACE
);
1023 * If there are no more entries, we must win.
1026 next
= entry
->vme_next
;
1027 if (next
== vm_map_to_entry(map
))
1031 * If there is another entry, it must be
1032 * after the end of the potential new region.
1035 if (next
->vme_start
>= end
)
1039 * Didn't fit -- move to the next entry.
1043 start
= entry
->vme_end
;
1048 * "start" and "end" should define the endpoints of the
1049 * available new range, and
1050 * "entry" should refer to the region before the new
1053 * the map should be locked.
1058 new_entry
->vme_start
= start
;
1059 new_entry
->vme_end
= end
;
1060 assert(page_aligned(new_entry
->vme_start
));
1061 assert(page_aligned(new_entry
->vme_end
));
1063 new_entry
->is_shared
= FALSE
;
1064 new_entry
->is_sub_map
= FALSE
;
1065 new_entry
->use_pmap
= FALSE
;
1066 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1067 new_entry
->offset
= (vm_object_offset_t
) 0;
1069 new_entry
->needs_copy
= FALSE
;
1071 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1072 new_entry
->protection
= VM_PROT_DEFAULT
;
1073 new_entry
->max_protection
= VM_PROT_ALL
;
1074 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1075 new_entry
->wired_count
= 0;
1076 new_entry
->user_wired_count
= 0;
1078 new_entry
->in_transition
= FALSE
;
1079 new_entry
->needs_wakeup
= FALSE
;
1082 * Insert the new entry into the list
1085 vm_map_entry_link(map
, entry
, new_entry
);
1090 * Update the lookup hint
1092 SAVE_HINT(map
, new_entry
);
1094 *o_entry
= new_entry
;
1095 return(KERN_SUCCESS
);
1098 int vm_map_pmap_enter_print
= FALSE
;
1099 int vm_map_pmap_enter_enable
= FALSE
;
1102 * Routine: vm_map_pmap_enter
1105 * Force pages from the specified object to be entered into
1106 * the pmap at the specified address if they are present.
1107 * As soon as a page not found in the object the scan ends.
1112 * In/out conditions:
1113 * The source map should not be locked on entry.
1118 register vm_offset_t addr
,
1119 register vm_offset_t end_addr
,
1120 register vm_object_t object
,
1121 vm_object_offset_t offset
,
1122 vm_prot_t protection
)
1124 unsigned int cache_attr
;
1129 while (addr
< end_addr
) {
1130 register vm_page_t m
;
1132 vm_object_lock(object
);
1133 vm_object_paging_begin(object
);
1135 m
= vm_page_lookup(object
, offset
);
1136 if (m
== VM_PAGE_NULL
|| m
->busy
||
1137 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1138 protection
& m
->page_lock
))) {
1140 vm_object_paging_end(object
);
1141 vm_object_unlock(object
);
1145 assert(!m
->fictitious
); /* XXX is this possible ??? */
1147 if (vm_map_pmap_enter_print
) {
1148 printf("vm_map_pmap_enter:");
1149 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1150 map
, addr
, object
, offset
);
1154 if (m
->no_isync
== TRUE
) {
1155 pmap_sync_caches_phys(m
->phys_page
);
1156 m
->no_isync
= FALSE
;
1159 cache_attr
= ((unsigned int)object
->wimg_bits
) & VM_WIMG_MASK
;
1160 vm_object_unlock(object
);
1162 PMAP_ENTER(map
->pmap
, addr
, m
,
1163 protection
, cache_attr
, FALSE
);
1165 vm_object_lock(object
);
1167 PAGE_WAKEUP_DONE(m
);
1168 vm_page_lock_queues();
1169 if (!m
->active
&& !m
->inactive
)
1170 vm_page_activate(m
);
1171 vm_page_unlock_queues();
1172 vm_object_paging_end(object
);
1173 vm_object_unlock(object
);
1175 offset
+= PAGE_SIZE_64
;
1181 * Routine: vm_map_enter
1184 * Allocate a range in the specified virtual address map.
1185 * The resulting range will refer to memory defined by
1186 * the given memory object and offset into that object.
1188 * Arguments are as defined in the vm_map call.
1192 register vm_map_t map
,
1193 vm_offset_t
*address
, /* IN/OUT */
1198 vm_object_offset_t offset
,
1199 boolean_t needs_copy
,
1200 vm_prot_t cur_protection
,
1201 vm_prot_t max_protection
,
1202 vm_inherit_t inheritance
)
1204 vm_map_entry_t entry
;
1205 register vm_offset_t start
;
1206 register vm_offset_t end
;
1207 kern_return_t result
= KERN_SUCCESS
;
1209 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1212 VM_GET_FLAGS_ALIAS(flags
, alias
);
1214 #define RETURN(value) { result = value; goto BailOut; }
1216 assert(page_aligned(*address
));
1217 assert(page_aligned(size
));
1226 * Calculate the first possible address.
1229 if (start
< map
->min_offset
)
1230 start
= map
->min_offset
;
1231 if (start
> map
->max_offset
)
1232 RETURN(KERN_NO_SPACE
);
1235 * Look for the first possible address;
1236 * if there's already something at this
1237 * address, we have to start after it.
1240 assert(first_free_is_valid(map
));
1241 if (start
== map
->min_offset
) {
1242 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1243 start
= entry
->vme_end
;
1245 vm_map_entry_t tmp_entry
;
1246 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1247 start
= tmp_entry
->vme_end
;
1252 * In any case, the "entry" always precedes
1253 * the proposed new region throughout the
1258 register vm_map_entry_t next
;
1261 * Find the end of the proposed new region.
1262 * Be sure we didn't go beyond the end, or
1263 * wrap around the address.
1266 end
= ((start
+ mask
) & ~mask
);
1268 RETURN(KERN_NO_SPACE
);
1272 if ((end
> map
->max_offset
) || (end
< start
)) {
1273 if (map
->wait_for_space
) {
1274 if (size
<= (map
->max_offset
-
1276 assert_wait((event_t
)map
,
1279 thread_block((void (*)(void))0);
1283 RETURN(KERN_NO_SPACE
);
1287 * If there are no more entries, we must win.
1290 next
= entry
->vme_next
;
1291 if (next
== vm_map_to_entry(map
))
1295 * If there is another entry, it must be
1296 * after the end of the potential new region.
1299 if (next
->vme_start
>= end
)
1303 * Didn't fit -- move to the next entry.
1307 start
= entry
->vme_end
;
1311 vm_map_entry_t temp_entry
;
1315 * the address doesn't itself violate
1316 * the mask requirement.
1320 if ((start
& mask
) != 0)
1321 RETURN(KERN_NO_SPACE
);
1324 * ... the address is within bounds
1329 if ((start
< map
->min_offset
) ||
1330 (end
> map
->max_offset
) ||
1332 RETURN(KERN_INVALID_ADDRESS
);
1336 * ... the starting address isn't allocated
1339 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1340 RETURN(KERN_NO_SPACE
);
1345 * ... the next region doesn't overlap the
1349 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1350 (entry
->vme_next
->vme_start
< end
))
1351 RETURN(KERN_NO_SPACE
);
1356 * "start" and "end" should define the endpoints of the
1357 * available new range, and
1358 * "entry" should refer to the region before the new
1361 * the map should be locked.
1365 * See whether we can avoid creating a new entry (and object) by
1366 * extending one of our neighbors. [So far, we only attempt to
1367 * extend from below.]
1370 if ((object
== VM_OBJECT_NULL
) &&
1371 (entry
!= vm_map_to_entry(map
)) &&
1372 (entry
->vme_end
== start
) &&
1373 (!entry
->is_shared
) &&
1374 (!entry
->is_sub_map
) &&
1375 (entry
->alias
== alias
) &&
1376 (entry
->inheritance
== inheritance
) &&
1377 (entry
->protection
== cur_protection
) &&
1378 (entry
->max_protection
== max_protection
) &&
1379 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1380 (entry
->in_transition
== 0) &&
1381 ((alias
== VM_MEMORY_REALLOC
) || ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
)) &&
1382 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1383 if (vm_object_coalesce(entry
->object
.vm_object
,
1386 (vm_object_offset_t
) 0,
1387 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1388 (vm_size_t
)(end
- entry
->vme_end
))) {
1391 * Coalesced the two objects - can extend
1392 * the previous map entry to include the
1395 map
->size
+= (end
- entry
->vme_end
);
1396 entry
->vme_end
= end
;
1397 UPDATE_FIRST_FREE(map
, map
->first_free
);
1398 RETURN(KERN_SUCCESS
);
1403 * Create a new entry
1407 register vm_map_entry_t new_entry
;
1409 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1410 offset
, needs_copy
, FALSE
, FALSE
,
1411 cur_protection
, max_protection
,
1412 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1413 new_entry
->alias
= alias
;
1416 /* Wire down the new entry if the user
1417 * requested all new map entries be wired.
1419 if (map
->wiring_required
) {
1420 result
= vm_map_wire(map
, start
, end
,
1421 new_entry
->protection
, TRUE
);
1425 if ((object
!= VM_OBJECT_NULL
) &&
1426 (vm_map_pmap_enter_enable
) &&
1429 (size
< (128*1024))) {
1430 vm_map_pmap_enter(map
, start
, end
,
1431 object
, offset
, cur_protection
);
1445 * vm_map_clip_start: [ internal use only ]
1447 * Asserts that the given entry begins at or after
1448 * the specified address; if necessary,
1449 * it splits the entry into two.
1452 #define vm_map_clip_start(map, entry, startaddr) \
1454 vm_map_t VMCS_map; \
1455 vm_map_entry_t VMCS_entry; \
1456 vm_offset_t VMCS_startaddr; \
1458 VMCS_entry = (entry); \
1459 VMCS_startaddr = (startaddr); \
1460 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1461 if(entry->use_pmap) { \
1462 vm_offset_t pmap_base_addr; \
1464 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1465 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1466 entry->use_pmap = FALSE; \
1467 } else if(entry->object.vm_object \
1468 && !entry->is_sub_map \
1469 && entry->object.vm_object->phys_contiguous) { \
1470 pmap_remove(map->pmap, \
1471 (addr64_t)(entry->vme_start), \
1472 (addr64_t)(entry->vme_end)); \
1474 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1476 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1479 #define vm_map_clip_start(map, entry, startaddr) \
1481 vm_map_t VMCS_map; \
1482 vm_map_entry_t VMCS_entry; \
1483 vm_offset_t VMCS_startaddr; \
1485 VMCS_entry = (entry); \
1486 VMCS_startaddr = (startaddr); \
1487 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1488 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1490 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1494 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1496 if ((startaddr) > (entry)->vme_start) \
1497 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1501 * This routine is called only when it is known that
1502 * the entry must be split.
1506 register struct vm_map_header
*map_header
,
1507 register vm_map_entry_t entry
,
1508 register vm_offset_t start
)
1510 register vm_map_entry_t new_entry
;
1513 * Split off the front portion --
1514 * note that we must insert the new
1515 * entry BEFORE this one, so that
1516 * this entry has the specified starting
1520 new_entry
= _vm_map_entry_create(map_header
);
1521 vm_map_entry_copy_full(new_entry
, entry
);
1523 new_entry
->vme_end
= start
;
1524 entry
->offset
+= (start
- entry
->vme_start
);
1525 entry
->vme_start
= start
;
1527 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1529 if (entry
->is_sub_map
)
1530 vm_map_reference(new_entry
->object
.sub_map
);
1532 vm_object_reference(new_entry
->object
.vm_object
);
1537 * vm_map_clip_end: [ internal use only ]
1539 * Asserts that the given entry ends at or before
1540 * the specified address; if necessary,
1541 * it splits the entry into two.
1544 #define vm_map_clip_end(map, entry, endaddr) \
1546 vm_map_t VMCE_map; \
1547 vm_map_entry_t VMCE_entry; \
1548 vm_offset_t VMCE_endaddr; \
1550 VMCE_entry = (entry); \
1551 VMCE_endaddr = (endaddr); \
1552 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1553 if(entry->use_pmap) { \
1554 vm_offset_t pmap_base_addr; \
1556 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1557 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1558 entry->use_pmap = FALSE; \
1559 } else if(entry->object.vm_object \
1560 && !entry->is_sub_map \
1561 && entry->object.vm_object->phys_contiguous) { \
1562 pmap_remove(map->pmap, \
1563 (addr64_t)(entry->vme_start), \
1564 (addr64_t)(entry->vme_end)); \
1566 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1568 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1571 #define vm_map_clip_end(map, entry, endaddr) \
1573 vm_map_t VMCE_map; \
1574 vm_map_entry_t VMCE_entry; \
1575 vm_offset_t VMCE_endaddr; \
1577 VMCE_entry = (entry); \
1578 VMCE_endaddr = (endaddr); \
1579 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1580 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1582 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1586 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1588 if ((endaddr) < (entry)->vme_end) \
1589 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1593 * This routine is called only when it is known that
1594 * the entry must be split.
1598 register struct vm_map_header
*map_header
,
1599 register vm_map_entry_t entry
,
1600 register vm_offset_t end
)
1602 register vm_map_entry_t new_entry
;
1605 * Create a new entry and insert it
1606 * AFTER the specified entry
1609 new_entry
= _vm_map_entry_create(map_header
);
1610 vm_map_entry_copy_full(new_entry
, entry
);
1612 new_entry
->vme_start
= entry
->vme_end
= end
;
1613 new_entry
->offset
+= (end
- entry
->vme_start
);
1615 _vm_map_entry_link(map_header
, entry
, new_entry
);
1617 if (entry
->is_sub_map
)
1618 vm_map_reference(new_entry
->object
.sub_map
);
1620 vm_object_reference(new_entry
->object
.vm_object
);
1625 * VM_MAP_RANGE_CHECK: [ internal use only ]
1627 * Asserts that the starting and ending region
1628 * addresses fall within the valid range of the map.
1630 #define VM_MAP_RANGE_CHECK(map, start, end) \
1632 if (start < vm_map_min(map)) \
1633 start = vm_map_min(map); \
1634 if (end > vm_map_max(map)) \
1635 end = vm_map_max(map); \
1641 * vm_map_range_check: [ internal use only ]
1643 * Check that the region defined by the specified start and
1644 * end addresses are wholly contained within a single map
1645 * entry or set of adjacent map entries of the spacified map,
1646 * i.e. the specified region contains no unmapped space.
1647 * If any or all of the region is unmapped, FALSE is returned.
1648 * Otherwise, TRUE is returned and if the output argument 'entry'
1649 * is not NULL it points to the map entry containing the start
1652 * The map is locked for reading on entry and is left locked.
1656 register vm_map_t map
,
1657 register vm_offset_t start
,
1658 register vm_offset_t end
,
1659 vm_map_entry_t
*entry
)
1662 register vm_offset_t prev
;
1665 * Basic sanity checks first
1667 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1671 * Check first if the region starts within a valid
1672 * mapping for the map.
1674 if (!vm_map_lookup_entry(map
, start
, &cur
))
1678 * Optimize for the case that the region is contained
1679 * in a single map entry.
1681 if (entry
!= (vm_map_entry_t
*) NULL
)
1683 if (end
<= cur
->vme_end
)
1687 * If the region is not wholly contained within a
1688 * single entry, walk the entries looking for holes.
1690 prev
= cur
->vme_end
;
1691 cur
= cur
->vme_next
;
1692 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1693 if (end
<= cur
->vme_end
)
1695 prev
= cur
->vme_end
;
1696 cur
= cur
->vme_next
;
1702 * vm_map_submap: [ kernel use only ]
1704 * Mark the given range as handled by a subordinate map.
1706 * This range must have been created with vm_map_find using
1707 * the vm_submap_object, and no other operations may have been
1708 * performed on this range prior to calling vm_map_submap.
1710 * Only a limited number of operations can be performed
1711 * within this rage after calling vm_map_submap:
1713 * [Don't try vm_map_copyin!]
1715 * To remove a submapping, one must first remove the
1716 * range from the superior map, and then destroy the
1717 * submap (if desired). [Better yet, don't try it.]
1721 register vm_map_t map
,
1722 register vm_offset_t start
,
1723 register vm_offset_t end
,
1728 vm_map_entry_t entry
;
1729 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1730 register vm_object_t object
;
1734 submap
->mapped
= TRUE
;
1736 VM_MAP_RANGE_CHECK(map
, start
, end
);
1738 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1739 vm_map_clip_start(map
, entry
, start
);
1742 entry
= entry
->vme_next
;
1744 if(entry
== vm_map_to_entry(map
)) {
1746 return KERN_INVALID_ARGUMENT
;
1749 vm_map_clip_end(map
, entry
, end
);
1751 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1752 (!entry
->is_sub_map
) &&
1753 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1754 (object
->resident_page_count
== 0) &&
1755 (object
->copy
== VM_OBJECT_NULL
) &&
1756 (object
->shadow
== VM_OBJECT_NULL
) &&
1757 (!object
->pager_created
)) {
1758 entry
->offset
= (vm_object_offset_t
)offset
;
1759 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1760 vm_object_deallocate(object
);
1761 entry
->is_sub_map
= TRUE
;
1762 entry
->object
.sub_map
= submap
;
1763 vm_map_reference(submap
);
1765 if ((use_pmap
) && (offset
== 0)) {
1766 /* nest if platform code will allow */
1767 if(submap
->pmap
== NULL
) {
1768 submap
->pmap
= pmap_create((vm_size_t
) 0);
1769 if(submap
->pmap
== PMAP_NULL
) {
1770 return(KERN_NO_SPACE
);
1773 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1774 (addr64_t
)start
, (addr64_t
)start
, (uint64_t)(end
- start
));
1776 panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result
);
1777 entry
->use_pmap
= TRUE
;
1781 pmap_remove(map
->pmap
, (addr64_t
)start
, (addr64_t
)end
);
1783 result
= KERN_SUCCESS
;
1793 * Sets the protection of the specified address
1794 * region in the target map. If "set_max" is
1795 * specified, the maximum protection is to be set;
1796 * otherwise, only the current protection is affected.
1800 register vm_map_t map
,
1801 register vm_offset_t start
,
1802 register vm_offset_t end
,
1803 register vm_prot_t new_prot
,
1804 register boolean_t set_max
)
1806 register vm_map_entry_t current
;
1807 register vm_offset_t prev
;
1808 vm_map_entry_t entry
;
1813 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1814 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1819 * Lookup the entry. If it doesn't start in a valid
1820 * entry, return an error. Remember if we need to
1821 * clip the entry. We don't do it here because we don't
1822 * want to make any changes until we've scanned the
1823 * entire range below for address and protection
1826 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1828 return(KERN_INVALID_ADDRESS
);
1832 * Make a first pass to check for protection and address
1837 prev
= current
->vme_start
;
1838 while ((current
!= vm_map_to_entry(map
)) &&
1839 (current
->vme_start
< end
)) {
1842 * If there is a hole, return an error.
1844 if (current
->vme_start
!= prev
) {
1846 return(KERN_INVALID_ADDRESS
);
1849 new_max
= current
->max_protection
;
1850 if(new_prot
& VM_PROT_COPY
) {
1851 new_max
|= VM_PROT_WRITE
;
1852 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1854 return(KERN_PROTECTION_FAILURE
);
1857 if ((new_prot
& new_max
) != new_prot
) {
1859 return(KERN_PROTECTION_FAILURE
);
1863 prev
= current
->vme_end
;
1864 current
= current
->vme_next
;
1868 return(KERN_INVALID_ADDRESS
);
1872 * Go back and fix up protections.
1873 * Clip to start here if the range starts within
1879 vm_map_clip_start(map
, entry
, start
);
1881 while ((current
!= vm_map_to_entry(map
)) &&
1882 (current
->vme_start
< end
)) {
1886 vm_map_clip_end(map
, current
, end
);
1888 old_prot
= current
->protection
;
1890 if(new_prot
& VM_PROT_COPY
) {
1891 /* caller is asking specifically to copy the */
1892 /* mapped data, this implies that max protection */
1893 /* will include write. Caller must be prepared */
1894 /* for loss of shared memory communication in the */
1895 /* target area after taking this step */
1896 current
->needs_copy
= TRUE
;
1897 current
->max_protection
|= VM_PROT_WRITE
;
1901 current
->protection
=
1902 (current
->max_protection
=
1903 new_prot
& ~VM_PROT_COPY
) &
1906 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1909 * Update physical map if necessary.
1910 * If the request is to turn off write protection,
1911 * we won't do it for real (in pmap). This is because
1912 * it would cause copy-on-write to fail. We've already
1913 * set, the new protection in the map, so if a
1914 * write-protect fault occurred, it will be fixed up
1915 * properly, COW or not.
1917 /* the 256M hack for existing hardware limitations */
1918 if (current
->protection
!= old_prot
) {
1919 if(current
->is_sub_map
&& current
->use_pmap
) {
1920 vm_offset_t pmap_base_addr
;
1921 vm_offset_t pmap_end_addr
;
1922 vm_map_entry_t local_entry
;
1924 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1925 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1927 if(!vm_map_lookup_entry(map
,
1928 pmap_base_addr
, &local_entry
))
1929 panic("vm_map_protect: nested pmap area is missing");
1930 while ((local_entry
!= vm_map_to_entry(map
)) &&
1931 (local_entry
->vme_start
< pmap_end_addr
)) {
1932 local_entry
->use_pmap
= FALSE
;
1933 local_entry
= local_entry
->vme_next
;
1935 pmap_unnest(map
->pmap
, (addr64_t
)pmap_base_addr
);
1938 if (!(current
->protection
& VM_PROT_WRITE
)) {
1939 /* Look one level in we support nested pmaps */
1940 /* from mapped submaps which are direct entries */
1942 if(current
->is_sub_map
&& current
->use_pmap
) {
1943 pmap_protect(current
->object
.sub_map
->pmap
,
1946 current
->protection
);
1948 pmap_protect(map
->pmap
, current
->vme_start
,
1950 current
->protection
);
1954 current
= current
->vme_next
;
1958 return(KERN_SUCCESS
);
1964 * Sets the inheritance of the specified address
1965 * range in the target map. Inheritance
1966 * affects how the map will be shared with
1967 * child maps at the time of vm_map_fork.
1971 register vm_map_t map
,
1972 register vm_offset_t start
,
1973 register vm_offset_t end
,
1974 register vm_inherit_t new_inheritance
)
1976 register vm_map_entry_t entry
;
1977 vm_map_entry_t temp_entry
;
1981 VM_MAP_RANGE_CHECK(map
, start
, end
);
1983 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
1985 vm_map_clip_start(map
, entry
, start
);
1988 temp_entry
= temp_entry
->vme_next
;
1992 /* first check entire range for submaps which can't support the */
1993 /* given inheritance. */
1994 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1995 if(entry
->is_sub_map
) {
1996 if(new_inheritance
== VM_INHERIT_COPY
)
1997 return(KERN_INVALID_ARGUMENT
);
2000 entry
= entry
->vme_next
;
2005 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2006 vm_map_clip_end(map
, entry
, end
);
2008 entry
->inheritance
= new_inheritance
;
2010 entry
= entry
->vme_next
;
2014 return(KERN_SUCCESS
);
2020 * Sets the pageability of the specified address range in the
2021 * target map as wired. Regions specified as not pageable require
2022 * locked-down physical memory and physical page maps. The
2023 * access_type variable indicates types of accesses that must not
2024 * generate page faults. This is checked against protection of
2025 * memory being locked-down.
2027 * The map must not be locked, but a reference must remain to the
2028 * map throughout the call.
2032 register vm_map_t map
,
2033 register vm_offset_t start
,
2034 register vm_offset_t end
,
2035 register vm_prot_t access_type
,
2036 boolean_t user_wire
,
2038 vm_offset_t pmap_addr
)
2040 register vm_map_entry_t entry
;
2041 struct vm_map_entry
*first_entry
, tmp_entry
;
2043 register vm_offset_t s
,e
;
2045 boolean_t need_wakeup
;
2046 boolean_t main_map
= FALSE
;
2047 wait_interrupt_t interruptible_state
;
2048 thread_t cur_thread
;
2049 unsigned int last_timestamp
;
2053 if(map_pmap
== NULL
)
2055 last_timestamp
= map
->timestamp
;
2057 VM_MAP_RANGE_CHECK(map
, start
, end
);
2058 assert(page_aligned(start
));
2059 assert(page_aligned(end
));
2061 /* We wired what the caller asked for, zero pages */
2063 return KERN_SUCCESS
;
2066 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2067 entry
= first_entry
;
2068 /* vm_map_clip_start will be done later. */
2070 /* Start address is not in map */
2072 return(KERN_INVALID_ADDRESS
);
2076 need_wakeup
= FALSE
;
2077 cur_thread
= current_thread();
2078 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2080 * If another thread is wiring/unwiring this entry then
2081 * block after informing other thread to wake us up.
2083 if (entry
->in_transition
) {
2084 wait_result_t wait_result
;
2087 * We have not clipped the entry. Make sure that
2088 * the start address is in range so that the lookup
2089 * below will succeed.
2091 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2093 entry
->needs_wakeup
= TRUE
;
2096 * wake up anybody waiting on entries that we have
2100 vm_map_entry_wakeup(map
);
2101 need_wakeup
= FALSE
;
2104 * User wiring is interruptible
2106 wait_result
= vm_map_entry_wait(map
,
2107 (user_wire
) ? THREAD_ABORTSAFE
:
2109 if (user_wire
&& wait_result
== THREAD_INTERRUPTED
) {
2111 * undo the wirings we have done so far
2112 * We do not clear the needs_wakeup flag,
2113 * because we cannot tell if we were the
2117 vm_map_unwire(map
, start
, s
, user_wire
);
2118 return(KERN_FAILURE
);
2122 * Cannot avoid a lookup here. reset timestamp.
2124 last_timestamp
= map
->timestamp
;
2127 * The entry could have been clipped, look it up again.
2128 * Worse that can happen is, it may not exist anymore.
2130 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2132 panic("vm_map_wire: re-lookup failed");
2135 * User: undo everything upto the previous
2136 * entry. let vm_map_unwire worry about
2137 * checking the validity of the range.
2140 vm_map_unwire(map
, start
, s
, user_wire
);
2141 return(KERN_FAILURE
);
2143 entry
= first_entry
;
2147 if(entry
->is_sub_map
) {
2148 vm_offset_t sub_start
;
2149 vm_offset_t sub_end
;
2150 vm_offset_t local_start
;
2151 vm_offset_t local_end
;
2154 vm_map_clip_start(map
, entry
, start
);
2155 vm_map_clip_end(map
, entry
, end
);
2157 sub_start
= entry
->offset
;
2158 sub_end
= entry
->vme_end
- entry
->vme_start
;
2159 sub_end
+= entry
->offset
;
2161 local_end
= entry
->vme_end
;
2162 if(map_pmap
== NULL
) {
2163 if(entry
->use_pmap
) {
2164 pmap
= entry
->object
.sub_map
->pmap
;
2165 /* ppc implementation requires that */
2166 /* submaps pmap address ranges line */
2167 /* up with parent map */
2169 pmap_addr
= sub_start
;
2176 if (entry
->wired_count
) {
2177 if (entry
->wired_count
2179 panic("vm_map_wire: too many wirings");
2182 entry
->user_wired_count
2183 >= MAX_WIRE_COUNT
) {
2185 vm_map_unwire(map
, start
,
2186 entry
->vme_start
, user_wire
);
2187 return(KERN_FAILURE
);
2190 entry
->user_wired_count
++;
2192 (entry
->user_wired_count
== 0))
2193 entry
->wired_count
++;
2194 entry
= entry
->vme_next
;
2199 vm_object_offset_t offset_hi
;
2200 vm_object_offset_t offset_lo
;
2201 vm_object_offset_t offset
;
2204 vm_behavior_t behavior
;
2205 vm_map_entry_t local_entry
;
2206 vm_map_version_t version
;
2207 vm_map_t lookup_map
;
2209 /* call vm_map_lookup_locked to */
2210 /* cause any needs copy to be */
2212 local_start
= entry
->vme_start
;
2214 vm_map_lock_write_to_read(map
);
2215 if(vm_map_lookup_locked(
2216 &lookup_map
, local_start
,
2219 &offset
, &prot
, &wired
,
2220 &behavior
, &offset_lo
,
2221 &offset_hi
, &pmap_map
)) {
2223 vm_map_unlock(lookup_map
);
2224 vm_map_unwire(map
, start
,
2225 entry
->vme_start
, user_wire
);
2226 return(KERN_FAILURE
);
2228 if(pmap_map
!= lookup_map
)
2229 vm_map_unlock(pmap_map
);
2230 vm_map_unlock_read(lookup_map
);
2232 vm_object_unlock(object
);
2234 if (!vm_map_lookup_entry(map
,
2235 local_start
, &local_entry
)) {
2237 vm_map_unwire(map
, start
,
2238 entry
->vme_start
, user_wire
);
2239 return(KERN_FAILURE
);
2241 /* did we have a change of type? */
2242 if (!local_entry
->is_sub_map
) {
2243 last_timestamp
= map
->timestamp
;
2246 entry
= local_entry
;
2248 entry
->user_wired_count
++;
2250 (entry
->user_wired_count
== 1))
2251 entry
->wired_count
++;
2253 entry
->in_transition
= TRUE
;
2256 rc
= vm_map_wire_nested(
2257 entry
->object
.sub_map
,
2260 user_wire
, pmap
, pmap_addr
);
2264 local_start
= entry
->vme_start
;
2266 entry
->user_wired_count
++;
2268 (entry
->user_wired_count
== 1))
2269 entry
->wired_count
++;
2271 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2274 user_wire
, map_pmap
, pmap_addr
);
2277 s
= entry
->vme_start
;
2281 * Find the entry again. It could have been clipped
2282 * after we unlocked the map.
2284 if (!vm_map_lookup_entry(map
, local_start
,
2286 panic("vm_map_wire: re-lookup failed");
2287 entry
= first_entry
;
2289 last_timestamp
= map
->timestamp
;
2290 while ((entry
!= vm_map_to_entry(map
)) &&
2291 (entry
->vme_start
< e
)) {
2292 assert(entry
->in_transition
);
2293 entry
->in_transition
= FALSE
;
2294 if (entry
->needs_wakeup
) {
2295 entry
->needs_wakeup
= FALSE
;
2298 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2300 entry
->user_wired_count
--;
2302 (entry
->user_wired_count
== 0))
2303 entry
->wired_count
--;
2305 entry
= entry
->vme_next
;
2307 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2310 vm_map_entry_wakeup(map
);
2312 * undo everything upto the previous entry.
2314 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2321 * If this entry is already wired then increment
2322 * the appropriate wire reference count.
2324 if (entry
->wired_count
) {
2325 /* sanity check: wired_count is a short */
2326 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2327 panic("vm_map_wire: too many wirings");
2330 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2332 vm_map_unwire(map
, start
,
2333 entry
->vme_start
, user_wire
);
2334 return(KERN_FAILURE
);
2337 * entry is already wired down, get our reference
2338 * after clipping to our range.
2340 vm_map_clip_start(map
, entry
, start
);
2341 vm_map_clip_end(map
, entry
, end
);
2343 entry
->user_wired_count
++;
2344 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2345 entry
->wired_count
++;
2347 entry
= entry
->vme_next
;
2352 * Unwired entry or wire request transmitted via submap
2357 * Perform actions of vm_map_lookup that need the write
2358 * lock on the map: create a shadow object for a
2359 * copy-on-write region, or an object for a zero-fill
2362 size
= entry
->vme_end
- entry
->vme_start
;
2364 * If wiring a copy-on-write page, we need to copy it now
2365 * even if we're only (currently) requesting read access.
2366 * This is aggressive, but once it's wired we can't move it.
2368 if (entry
->needs_copy
) {
2369 vm_object_shadow(&entry
->object
.vm_object
,
2370 &entry
->offset
, size
);
2371 entry
->needs_copy
= FALSE
;
2372 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2373 entry
->object
.vm_object
= vm_object_allocate(size
);
2374 entry
->offset
= (vm_object_offset_t
)0;
2377 vm_map_clip_start(map
, entry
, start
);
2378 vm_map_clip_end(map
, entry
, end
);
2380 s
= entry
->vme_start
;
2384 * Check for holes and protection mismatch.
2385 * Holes: Next entry should be contiguous unless this
2386 * is the end of the region.
2387 * Protection: Access requested must be allowed, unless
2388 * wiring is by protection class
2390 if ((((entry
->vme_end
< end
) &&
2391 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2392 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2393 ((entry
->protection
& access_type
) != access_type
))) {
2395 * Found a hole or protection problem.
2396 * Unwire the region we wired so far.
2398 if (start
!= entry
->vme_start
) {
2400 vm_map_unwire(map
, start
, s
, user_wire
);
2404 return((entry
->protection
&access_type
) != access_type
?
2405 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2408 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2411 entry
->user_wired_count
++;
2412 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2413 entry
->wired_count
++;
2415 entry
->in_transition
= TRUE
;
2418 * This entry might get split once we unlock the map.
2419 * In vm_fault_wire(), we need the current range as
2420 * defined by this entry. In order for this to work
2421 * along with a simultaneous clip operation, we make a
2422 * temporary copy of this entry and use that for the
2423 * wiring. Note that the underlying objects do not
2424 * change during a clip.
2429 * The in_transition state guarentees that the entry
2430 * (or entries for this range, if split occured) will be
2431 * there when the map lock is acquired for the second time.
2435 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2436 interruptible_state
= thread_interrupt_level(THREAD_UNINT
);
2439 rc
= vm_fault_wire(map
,
2440 &tmp_entry
, map_pmap
, pmap_addr
);
2442 rc
= vm_fault_wire(map
,
2443 &tmp_entry
, map
->pmap
,
2444 tmp_entry
.vme_start
);
2446 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2447 thread_interrupt_level(interruptible_state
);
2451 if (last_timestamp
+1 != map
->timestamp
) {
2453 * Find the entry again. It could have been clipped
2454 * after we unlocked the map.
2456 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2458 panic("vm_map_wire: re-lookup failed");
2460 entry
= first_entry
;
2463 last_timestamp
= map
->timestamp
;
2465 while ((entry
!= vm_map_to_entry(map
)) &&
2466 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2467 assert(entry
->in_transition
);
2468 entry
->in_transition
= FALSE
;
2469 if (entry
->needs_wakeup
) {
2470 entry
->needs_wakeup
= FALSE
;
2473 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2475 entry
->user_wired_count
--;
2477 (entry
->user_wired_count
== 0))
2478 entry
->wired_count
--;
2480 entry
= entry
->vme_next
;
2483 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2486 vm_map_entry_wakeup(map
);
2488 * undo everything upto the previous entry.
2490 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2493 } /* end while loop through map entries */
2497 * wake up anybody waiting on entries we wired.
2500 vm_map_entry_wakeup(map
);
2502 return(KERN_SUCCESS
);
2508 register vm_map_t map
,
2509 register vm_offset_t start
,
2510 register vm_offset_t end
,
2511 register vm_prot_t access_type
,
2512 boolean_t user_wire
)
2519 * the calls to mapping_prealloc and mapping_relpre
2520 * (along with the VM_MAP_RANGE_CHECK to insure a
2521 * resonable range was passed in) are
2522 * currently necessary because
2523 * we haven't enabled kernel pre-emption
2524 * and/or the pmap_enter cannot purge and re-use
2527 VM_MAP_RANGE_CHECK(map
, start
, end
);
2528 mapping_prealloc(end
- start
);
2530 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2531 user_wire
, (pmap_t
)NULL
, 0);
2541 * Sets the pageability of the specified address range in the target
2542 * as pageable. Regions specified must have been wired previously.
2544 * The map must not be locked, but a reference must remain to the map
2545 * throughout the call.
2547 * Kernel will panic on failures. User unwire ignores holes and
2548 * unwired and intransition entries to avoid losing memory by leaving
2552 vm_map_unwire_nested(
2553 register vm_map_t map
,
2554 register vm_offset_t start
,
2555 register vm_offset_t end
,
2556 boolean_t user_wire
,
2558 vm_offset_t pmap_addr
)
2560 register vm_map_entry_t entry
;
2561 struct vm_map_entry
*first_entry
, tmp_entry
;
2562 boolean_t need_wakeup
;
2563 boolean_t main_map
= FALSE
;
2564 unsigned int last_timestamp
;
2567 if(map_pmap
== NULL
)
2569 last_timestamp
= map
->timestamp
;
2571 VM_MAP_RANGE_CHECK(map
, start
, end
);
2572 assert(page_aligned(start
));
2573 assert(page_aligned(end
));
2575 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2576 entry
= first_entry
;
2577 /* vm_map_clip_start will be done later. */
2580 /* Start address is not in map. */
2582 return(KERN_INVALID_ADDRESS
);
2585 need_wakeup
= FALSE
;
2586 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2587 if (entry
->in_transition
) {
2590 * Another thread is wiring down this entry. Note
2591 * that if it is not for the other thread we would
2592 * be unwiring an unwired entry. This is not
2593 * permitted. If we wait, we will be unwiring memory
2597 * Another thread is unwiring this entry. We did not
2598 * have a reference to it, because if we did, this
2599 * entry will not be getting unwired now.
2602 panic("vm_map_unwire: in_transition entry");
2604 entry
= entry
->vme_next
;
2608 if(entry
->is_sub_map
) {
2609 vm_offset_t sub_start
;
2610 vm_offset_t sub_end
;
2611 vm_offset_t local_end
;
2615 vm_map_clip_start(map
, entry
, start
);
2616 vm_map_clip_end(map
, entry
, end
);
2618 sub_start
= entry
->offset
;
2619 sub_end
= entry
->vme_end
- entry
->vme_start
;
2620 sub_end
+= entry
->offset
;
2621 local_end
= entry
->vme_end
;
2622 if(map_pmap
== NULL
) {
2623 if(entry
->use_pmap
) {
2624 pmap
= entry
->object
.sub_map
->pmap
;
2625 pmap_addr
= sub_start
;
2630 if (entry
->wired_count
== 0 ||
2631 (user_wire
&& entry
->user_wired_count
== 0)) {
2633 panic("vm_map_unwire: entry is unwired");
2634 entry
= entry
->vme_next
;
2640 * Holes: Next entry should be contiguous unless
2641 * this is the end of the region.
2643 if (((entry
->vme_end
< end
) &&
2644 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2645 (entry
->vme_next
->vme_start
2646 > entry
->vme_end
)))) {
2648 panic("vm_map_unwire: non-contiguous region");
2650 entry = entry->vme_next;
2655 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2656 entry
->wired_count
--;
2658 if (entry
->wired_count
!= 0) {
2659 entry
= entry
->vme_next
;
2663 entry
->in_transition
= TRUE
;
2664 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2667 * We can unlock the map now. The in_transition state
2668 * guarantees existance of the entry.
2671 vm_map_unwire_nested(entry
->object
.sub_map
,
2672 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2675 if (last_timestamp
+1 != map
->timestamp
) {
2677 * Find the entry again. It could have been
2678 * clipped or deleted after we unlocked the map.
2680 if (!vm_map_lookup_entry(map
,
2681 tmp_entry
.vme_start
,
2684 panic("vm_map_unwire: re-lookup failed");
2685 entry
= first_entry
->vme_next
;
2687 entry
= first_entry
;
2689 last_timestamp
= map
->timestamp
;
2692 * clear transition bit for all constituent entries
2693 * that were in the original entry (saved in
2694 * tmp_entry). Also check for waiters.
2696 while ((entry
!= vm_map_to_entry(map
)) &&
2697 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2698 assert(entry
->in_transition
);
2699 entry
->in_transition
= FALSE
;
2700 if (entry
->needs_wakeup
) {
2701 entry
->needs_wakeup
= FALSE
;
2704 entry
= entry
->vme_next
;
2709 vm_map_unwire_nested(entry
->object
.sub_map
,
2710 sub_start
, sub_end
, user_wire
, map_pmap
,
2714 if (last_timestamp
+1 != map
->timestamp
) {
2716 * Find the entry again. It could have been
2717 * clipped or deleted after we unlocked the map.
2719 if (!vm_map_lookup_entry(map
,
2720 tmp_entry
.vme_start
,
2723 panic("vm_map_unwire: re-lookup failed");
2724 entry
= first_entry
->vme_next
;
2726 entry
= first_entry
;
2728 last_timestamp
= map
->timestamp
;
2733 if ((entry
->wired_count
== 0) ||
2734 (user_wire
&& entry
->user_wired_count
== 0)) {
2736 panic("vm_map_unwire: entry is unwired");
2738 entry
= entry
->vme_next
;
2742 assert(entry
->wired_count
> 0 &&
2743 (!user_wire
|| entry
->user_wired_count
> 0));
2745 vm_map_clip_start(map
, entry
, start
);
2746 vm_map_clip_end(map
, entry
, end
);
2750 * Holes: Next entry should be contiguous unless
2751 * this is the end of the region.
2753 if (((entry
->vme_end
< end
) &&
2754 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2755 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2758 panic("vm_map_unwire: non-contiguous region");
2759 entry
= entry
->vme_next
;
2763 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2764 entry
->wired_count
--;
2766 if (entry
->wired_count
!= 0) {
2767 entry
= entry
->vme_next
;
2771 entry
->in_transition
= TRUE
;
2772 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2775 * We can unlock the map now. The in_transition state
2776 * guarantees existance of the entry.
2780 vm_fault_unwire(map
,
2781 &tmp_entry
, FALSE
, map_pmap
, pmap_addr
);
2783 vm_fault_unwire(map
,
2784 &tmp_entry
, FALSE
, map
->pmap
,
2785 tmp_entry
.vme_start
);
2789 if (last_timestamp
+1 != map
->timestamp
) {
2791 * Find the entry again. It could have been clipped
2792 * or deleted after we unlocked the map.
2794 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2797 panic("vm_map_unwire: re-lookup failed");
2798 entry
= first_entry
->vme_next
;
2800 entry
= first_entry
;
2802 last_timestamp
= map
->timestamp
;
2805 * clear transition bit for all constituent entries that
2806 * were in the original entry (saved in tmp_entry). Also
2807 * check for waiters.
2809 while ((entry
!= vm_map_to_entry(map
)) &&
2810 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2811 assert(entry
->in_transition
);
2812 entry
->in_transition
= FALSE
;
2813 if (entry
->needs_wakeup
) {
2814 entry
->needs_wakeup
= FALSE
;
2817 entry
= entry
->vme_next
;
2822 * wake up anybody waiting on entries that we have unwired.
2825 vm_map_entry_wakeup(map
);
2826 return(KERN_SUCCESS
);
2832 register vm_map_t map
,
2833 register vm_offset_t start
,
2834 register vm_offset_t end
,
2835 boolean_t user_wire
)
2837 return vm_map_unwire_nested(map
, start
, end
,
2838 user_wire
, (pmap_t
)NULL
, 0);
2843 * vm_map_entry_delete: [ internal use only ]
2845 * Deallocate the given entry from the target map.
2848 vm_map_entry_delete(
2849 register vm_map_t map
,
2850 register vm_map_entry_t entry
)
2852 register vm_offset_t s
, e
;
2853 register vm_object_t object
;
2854 register vm_map_t submap
;
2855 extern vm_object_t kernel_object
;
2857 s
= entry
->vme_start
;
2859 assert(page_aligned(s
));
2860 assert(page_aligned(e
));
2861 assert(entry
->wired_count
== 0);
2862 assert(entry
->user_wired_count
== 0);
2864 if (entry
->is_sub_map
) {
2866 submap
= entry
->object
.sub_map
;
2869 object
= entry
->object
.vm_object
;
2872 vm_map_entry_unlink(map
, entry
);
2875 vm_map_entry_dispose(map
, entry
);
2879 * Deallocate the object only after removing all
2880 * pmap entries pointing to its pages.
2883 vm_map_deallocate(submap
);
2885 vm_object_deallocate(object
);
2890 vm_map_submap_pmap_clean(
2897 vm_offset_t submap_start
;
2898 vm_offset_t submap_end
;
2900 vm_size_t remove_size
;
2901 vm_map_entry_t entry
;
2903 submap_end
= offset
+ (end
- start
);
2904 submap_start
= offset
;
2905 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2907 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2908 if(offset
> entry
->vme_start
)
2909 remove_size
-= offset
- entry
->vme_start
;
2912 if(submap_end
< entry
->vme_end
) {
2914 entry
->vme_end
- submap_end
;
2916 if(entry
->is_sub_map
) {
2917 vm_map_submap_pmap_clean(
2920 start
+ remove_size
,
2921 entry
->object
.sub_map
,
2925 if((map
->mapped
) && (map
->ref_count
)
2926 && (entry
->object
.vm_object
!= NULL
)) {
2927 vm_object_pmap_protect(
2928 entry
->object
.vm_object
,
2935 pmap_remove(map
->pmap
,
2937 (addr64_t
)(start
+ remove_size
));
2942 entry
= entry
->vme_next
;
2944 while((entry
!= vm_map_to_entry(sub_map
))
2945 && (entry
->vme_start
< submap_end
)) {
2946 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2947 if(submap_end
< entry
->vme_end
) {
2948 remove_size
-= entry
->vme_end
- submap_end
;
2950 if(entry
->is_sub_map
) {
2951 vm_map_submap_pmap_clean(
2953 (start
+ entry
->vme_start
) - offset
,
2954 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2955 entry
->object
.sub_map
,
2958 if((map
->mapped
) && (map
->ref_count
)
2959 && (entry
->object
.vm_object
!= NULL
)) {
2960 vm_object_pmap_protect(
2961 entry
->object
.vm_object
,
2968 pmap_remove(map
->pmap
,
2969 (addr64_t
)((start
+ entry
->vme_start
)
2971 (addr64_t
)(((start
+ entry
->vme_start
)
2972 - offset
) + remove_size
));
2975 entry
= entry
->vme_next
;
2981 * vm_map_delete: [ internal use only ]
2983 * Deallocates the given address range from the target map.
2984 * Removes all user wirings. Unwires one kernel wiring if
2985 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2986 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2987 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2989 * This routine is called with map locked and leaves map locked.
2993 register vm_map_t map
,
2995 register vm_offset_t end
,
2998 vm_map_entry_t entry
, next
;
2999 struct vm_map_entry
*first_entry
, tmp_entry
;
3000 register vm_offset_t s
, e
;
3001 register vm_object_t object
;
3002 boolean_t need_wakeup
;
3003 unsigned int last_timestamp
= ~0; /* unlikely value */
3005 extern vm_map_t kernel_map
;
3007 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
3008 THREAD_ABORTSAFE
: THREAD_UNINT
;
3011 * All our DMA I/O operations in IOKit are currently done by
3012 * wiring through the map entries of the task requesting the I/O.
3013 * Because of this, we must always wait for kernel wirings
3014 * to go away on the entries before deleting them.
3016 * Any caller who wants to actually remove a kernel wiring
3017 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3018 * properly remove one wiring instead of blasting through
3021 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3024 * Find the start of the region, and clip it
3026 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3027 entry
= first_entry
;
3028 vm_map_clip_start(map
, entry
, start
);
3031 * Fix the lookup hint now, rather than each
3032 * time through the loop.
3034 SAVE_HINT(map
, entry
->vme_prev
);
3036 entry
= first_entry
->vme_next
;
3039 need_wakeup
= FALSE
;
3041 * Step through all entries in this region
3043 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3045 vm_map_clip_end(map
, entry
, end
);
3046 if (entry
->in_transition
) {
3047 wait_result_t wait_result
;
3050 * Another thread is wiring/unwiring this entry.
3051 * Let the other thread know we are waiting.
3053 s
= entry
->vme_start
;
3054 entry
->needs_wakeup
= TRUE
;
3057 * wake up anybody waiting on entries that we have
3058 * already unwired/deleted.
3061 vm_map_entry_wakeup(map
);
3062 need_wakeup
= FALSE
;
3065 wait_result
= vm_map_entry_wait(map
, interruptible
);
3067 if (interruptible
&&
3068 wait_result
== THREAD_INTERRUPTED
) {
3070 * We do not clear the needs_wakeup flag,
3071 * since we cannot tell if we were the only one.
3074 return KERN_ABORTED
;
3078 * The entry could have been clipped or it
3079 * may not exist anymore. Look it up again.
3081 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3082 assert((map
!= kernel_map
) &&
3083 (!entry
->is_sub_map
));
3085 * User: use the next entry
3087 entry
= first_entry
->vme_next
;
3089 entry
= first_entry
;
3090 SAVE_HINT(map
, entry
->vme_prev
);
3092 last_timestamp
= map
->timestamp
;
3094 } /* end in_transition */
3096 if (entry
->wired_count
) {
3098 * Remove a kernel wiring if requested or if
3099 * there are user wirings.
3101 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3102 (entry
->user_wired_count
> 0))
3103 entry
->wired_count
--;
3105 /* remove all user wire references */
3106 entry
->user_wired_count
= 0;
3108 if (entry
->wired_count
!= 0) {
3109 assert((map
!= kernel_map
) &&
3110 (!entry
->is_sub_map
));
3112 * Cannot continue. Typical case is when
3113 * a user thread has physical io pending on
3114 * on this page. Either wait for the
3115 * kernel wiring to go away or return an
3118 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3119 wait_result_t wait_result
;
3121 s
= entry
->vme_start
;
3122 entry
->needs_wakeup
= TRUE
;
3123 wait_result
= vm_map_entry_wait(map
,
3126 if (interruptible
&&
3127 wait_result
== THREAD_INTERRUPTED
) {
3129 * We do not clear the
3130 * needs_wakeup flag, since we
3131 * cannot tell if we were the
3135 return KERN_ABORTED
;
3139 * The entry could have been clipped or
3140 * it may not exist anymore. Look it
3143 if (!vm_map_lookup_entry(map
, s
,
3145 assert((map
!= kernel_map
) &&
3146 (!entry
->is_sub_map
));
3148 * User: use the next entry
3150 entry
= first_entry
->vme_next
;
3152 entry
= first_entry
;
3153 SAVE_HINT(map
, entry
->vme_prev
);
3155 last_timestamp
= map
->timestamp
;
3159 return KERN_FAILURE
;
3163 entry
->in_transition
= TRUE
;
3165 * copy current entry. see comment in vm_map_wire()
3168 s
= entry
->vme_start
;
3172 * We can unlock the map now. The in_transition
3173 * state guarentees existance of the entry.
3176 vm_fault_unwire(map
, &tmp_entry
,
3177 tmp_entry
.object
.vm_object
== kernel_object
,
3178 map
->pmap
, tmp_entry
.vme_start
);
3181 if (last_timestamp
+1 != map
->timestamp
) {
3183 * Find the entry again. It could have
3184 * been clipped after we unlocked the map.
3186 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3187 assert((map
!= kernel_map
) &&
3188 (!entry
->is_sub_map
));
3189 first_entry
= first_entry
->vme_next
;
3191 SAVE_HINT(map
, entry
->vme_prev
);
3194 SAVE_HINT(map
, entry
->vme_prev
);
3195 first_entry
= entry
;
3198 last_timestamp
= map
->timestamp
;
3200 entry
= first_entry
;
3201 while ((entry
!= vm_map_to_entry(map
)) &&
3202 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3203 assert(entry
->in_transition
);
3204 entry
->in_transition
= FALSE
;
3205 if (entry
->needs_wakeup
) {
3206 entry
->needs_wakeup
= FALSE
;
3209 entry
= entry
->vme_next
;
3212 * We have unwired the entry(s). Go back and
3215 entry
= first_entry
;
3219 /* entry is unwired */
3220 assert(entry
->wired_count
== 0);
3221 assert(entry
->user_wired_count
== 0);
3223 if ((!entry
->is_sub_map
&&
3224 entry
->object
.vm_object
!= kernel_object
) ||
3225 entry
->is_sub_map
) {
3226 if(entry
->is_sub_map
) {
3227 if(entry
->use_pmap
) {
3229 pmap_unnest(map
->pmap
, (addr64_t
)entry
->vme_start
);
3231 if((map
->mapped
) && (map
->ref_count
)) {
3232 /* clean up parent map/maps */
3233 vm_map_submap_pmap_clean(
3234 map
, entry
->vme_start
,
3236 entry
->object
.sub_map
,
3240 vm_map_submap_pmap_clean(
3241 map
, entry
->vme_start
, entry
->vme_end
,
3242 entry
->object
.sub_map
,
3246 object
= entry
->object
.vm_object
;
3247 if((map
->mapped
) && (map
->ref_count
)) {
3248 vm_object_pmap_protect(
3249 object
, entry
->offset
,
3250 entry
->vme_end
- entry
->vme_start
,
3254 } else if(object
!= NULL
) {
3255 if ((object
->shadow
!= NULL
) ||
3256 (object
->phys_contiguous
) ||
3257 (object
->resident_page_count
>
3258 atop((entry
->vme_end
- entry
->vme_start
)/4))) {
3259 pmap_remove(map
->pmap
,
3260 (addr64_t
)(entry
->vme_start
),
3261 (addr64_t
)(entry
->vme_end
));
3264 vm_object_offset_t start_off
;
3265 vm_object_offset_t end_off
;
3266 start_off
= entry
->offset
;
3267 end_off
= start_off
+
3268 (entry
->vme_end
- entry
->vme_start
);
3269 vm_object_lock(object
);
3270 queue_iterate(&object
->memq
,
3271 p
, vm_page_t
, listq
) {
3272 if ((!p
->fictitious
) &&
3273 (p
->offset
>= start_off
) &&
3274 (p
->offset
< end_off
)) {
3276 start
= entry
->vme_start
;
3277 start
+= p
->offset
- start_off
;
3283 vm_object_unlock(object
);
3289 next
= entry
->vme_next
;
3290 s
= next
->vme_start
;
3291 last_timestamp
= map
->timestamp
;
3292 vm_map_entry_delete(map
, entry
);
3293 /* vm_map_entry_delete unlocks the map */
3297 if(entry
== vm_map_to_entry(map
)) {
3300 if (last_timestamp
+1 != map
->timestamp
) {
3302 * we are responsible for deleting everything
3303 * from the give space, if someone has interfered
3304 * we pick up where we left off, back fills should
3305 * be all right for anyone except map_delete and
3306 * we have to assume that the task has been fully
3307 * disabled before we get here
3309 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3310 entry
= entry
->vme_next
;
3312 SAVE_HINT(map
, entry
->vme_prev
);
3315 * others can not only allocate behind us, we can
3316 * also see coalesce while we don't have the map lock
3318 if(entry
== vm_map_to_entry(map
)) {
3321 vm_map_clip_start(map
, entry
, s
);
3323 last_timestamp
= map
->timestamp
;
3326 if (map
->wait_for_space
)
3327 thread_wakeup((event_t
) map
);
3329 * wake up anybody waiting on entries that we have already deleted.
3332 vm_map_entry_wakeup(map
);
3334 return KERN_SUCCESS
;
3340 * Remove the given address range from the target map.
3341 * This is the exported form of vm_map_delete.
3345 register vm_map_t map
,
3346 register vm_offset_t start
,
3347 register vm_offset_t end
,
3348 register boolean_t flags
)
3350 register kern_return_t result
;
3351 boolean_t funnel_set
= FALSE
;
3353 thread_t cur_thread
;
3355 cur_thread
= current_thread();
3357 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
3359 curflock
= cur_thread
->funnel_lock
;
3360 thread_funnel_set( curflock
, FALSE
);
3363 VM_MAP_RANGE_CHECK(map
, start
, end
);
3364 result
= vm_map_delete(map
, start
, end
, flags
);
3367 thread_funnel_set( curflock
, TRUE
);
3375 * Routine: vm_map_copy_discard
3378 * Dispose of a map copy object (returned by
3382 vm_map_copy_discard(
3385 TR_DECL("vm_map_copy_discard");
3387 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3389 if (copy
== VM_MAP_COPY_NULL
)
3392 switch (copy
->type
) {
3393 case VM_MAP_COPY_ENTRY_LIST
:
3394 while (vm_map_copy_first_entry(copy
) !=
3395 vm_map_copy_to_entry(copy
)) {
3396 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3398 vm_map_copy_entry_unlink(copy
, entry
);
3399 vm_object_deallocate(entry
->object
.vm_object
);
3400 vm_map_copy_entry_dispose(copy
, entry
);
3403 case VM_MAP_COPY_OBJECT
:
3404 vm_object_deallocate(copy
->cpy_object
);
3406 case VM_MAP_COPY_KERNEL_BUFFER
:
3409 * The vm_map_copy_t and possibly the data buffer were
3410 * allocated by a single call to kalloc(), i.e. the
3411 * vm_map_copy_t was not allocated out of the zone.
3413 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3416 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3420 * Routine: vm_map_copy_copy
3423 * Move the information in a map copy object to
3424 * a new map copy object, leaving the old one
3427 * This is used by kernel routines that need
3428 * to look at out-of-line data (in copyin form)
3429 * before deciding whether to return SUCCESS.
3430 * If the routine returns FAILURE, the original
3431 * copy object will be deallocated; therefore,
3432 * these routines must make a copy of the copy
3433 * object and leave the original empty so that
3434 * deallocation will not fail.
3440 vm_map_copy_t new_copy
;
3442 if (copy
== VM_MAP_COPY_NULL
)
3443 return VM_MAP_COPY_NULL
;
3446 * Allocate a new copy object, and copy the information
3447 * from the old one into it.
3450 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3453 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3455 * The links in the entry chain must be
3456 * changed to point to the new copy object.
3458 vm_map_copy_first_entry(copy
)->vme_prev
3459 = vm_map_copy_to_entry(new_copy
);
3460 vm_map_copy_last_entry(copy
)->vme_next
3461 = vm_map_copy_to_entry(new_copy
);
3465 * Change the old copy object into one that contains
3466 * nothing to be deallocated.
3468 copy
->type
= VM_MAP_COPY_OBJECT
;
3469 copy
->cpy_object
= VM_OBJECT_NULL
;
3472 * Return the new object.
3478 vm_map_overwrite_submap_recurse(
3480 vm_offset_t dst_addr
,
3483 vm_offset_t dst_end
;
3484 vm_map_entry_t tmp_entry
;
3485 vm_map_entry_t entry
;
3486 kern_return_t result
;
3487 boolean_t encountered_sub_map
= FALSE
;
3492 * Verify that the destination is all writeable
3493 * initially. We have to trunc the destination
3494 * address and round the copy size or we'll end up
3495 * splitting entries in strange ways.
3498 dst_end
= round_page_32(dst_addr
+ dst_size
);
3499 vm_map_lock(dst_map
);
3502 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3503 vm_map_unlock(dst_map
);
3504 return(KERN_INVALID_ADDRESS
);
3507 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3509 for (entry
= tmp_entry
;;) {
3510 vm_map_entry_t next
;
3512 next
= entry
->vme_next
;
3513 while(entry
->is_sub_map
) {
3514 vm_offset_t sub_start
;
3515 vm_offset_t sub_end
;
3516 vm_offset_t local_end
;
3518 if (entry
->in_transition
) {
3520 * Say that we are waiting, and wait for entry.
3522 entry
->needs_wakeup
= TRUE
;
3523 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3528 encountered_sub_map
= TRUE
;
3529 sub_start
= entry
->offset
;
3531 if(entry
->vme_end
< dst_end
)
3532 sub_end
= entry
->vme_end
;
3535 sub_end
-= entry
->vme_start
;
3536 sub_end
+= entry
->offset
;
3537 local_end
= entry
->vme_end
;
3538 vm_map_unlock(dst_map
);
3540 result
= vm_map_overwrite_submap_recurse(
3541 entry
->object
.sub_map
,
3543 sub_end
- sub_start
);
3545 if(result
!= KERN_SUCCESS
)
3547 if (dst_end
<= entry
->vme_end
)
3548 return KERN_SUCCESS
;
3549 vm_map_lock(dst_map
);
3550 if(!vm_map_lookup_entry(dst_map
, local_end
,
3552 vm_map_unlock(dst_map
);
3553 return(KERN_INVALID_ADDRESS
);
3556 next
= entry
->vme_next
;
3559 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3560 vm_map_unlock(dst_map
);
3561 return(KERN_PROTECTION_FAILURE
);
3565 * If the entry is in transition, we must wait
3566 * for it to exit that state. Anything could happen
3567 * when we unlock the map, so start over.
3569 if (entry
->in_transition
) {
3572 * Say that we are waiting, and wait for entry.
3574 entry
->needs_wakeup
= TRUE
;
3575 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3581 * our range is contained completely within this map entry
3583 if (dst_end
<= entry
->vme_end
) {
3584 vm_map_unlock(dst_map
);
3585 return KERN_SUCCESS
;
3588 * check that range specified is contiguous region
3590 if ((next
== vm_map_to_entry(dst_map
)) ||
3591 (next
->vme_start
!= entry
->vme_end
)) {
3592 vm_map_unlock(dst_map
);
3593 return(KERN_INVALID_ADDRESS
);
3597 * Check for permanent objects in the destination.
3599 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3600 ((!entry
->object
.vm_object
->internal
) ||
3601 (entry
->object
.vm_object
->true_share
))) {
3602 if(encountered_sub_map
) {
3603 vm_map_unlock(dst_map
);
3604 return(KERN_FAILURE
);
3611 vm_map_unlock(dst_map
);
3612 return(KERN_SUCCESS
);
3616 * Routine: vm_map_copy_overwrite
3619 * Copy the memory described by the map copy
3620 * object (copy; returned by vm_map_copyin) onto
3621 * the specified destination region (dst_map, dst_addr).
3622 * The destination must be writeable.
3624 * Unlike vm_map_copyout, this routine actually
3625 * writes over previously-mapped memory. If the
3626 * previous mapping was to a permanent (user-supplied)
3627 * memory object, it is preserved.
3629 * The attributes (protection and inheritance) of the
3630 * destination region are preserved.
3632 * If successful, consumes the copy object.
3633 * Otherwise, the caller is responsible for it.
3635 * Implementation notes:
3636 * To overwrite aligned temporary virtual memory, it is
3637 * sufficient to remove the previous mapping and insert
3638 * the new copy. This replacement is done either on
3639 * the whole region (if no permanent virtual memory
3640 * objects are embedded in the destination region) or
3641 * in individual map entries.
3643 * To overwrite permanent virtual memory , it is necessary
3644 * to copy each page, as the external memory management
3645 * interface currently does not provide any optimizations.
3647 * Unaligned memory also has to be copied. It is possible
3648 * to use 'vm_trickery' to copy the aligned data. This is
3649 * not done but not hard to implement.
3651 * Once a page of permanent memory has been overwritten,
3652 * it is impossible to interrupt this function; otherwise,
3653 * the call would be neither atomic nor location-independent.
3654 * The kernel-state portion of a user thread must be
3657 * It may be expensive to forward all requests that might
3658 * overwrite permanent memory (vm_write, vm_copy) to
3659 * uninterruptible kernel threads. This routine may be
3660 * called by interruptible threads; however, success is
3661 * not guaranteed -- if the request cannot be performed
3662 * atomically and interruptibly, an error indication is
3667 vm_map_copy_overwrite_nested(
3669 vm_offset_t dst_addr
,
3671 boolean_t interruptible
,
3674 vm_offset_t dst_end
;
3675 vm_map_entry_t tmp_entry
;
3676 vm_map_entry_t entry
;
3678 boolean_t aligned
= TRUE
;
3679 boolean_t contains_permanent_objects
= FALSE
;
3680 boolean_t encountered_sub_map
= FALSE
;
3681 vm_offset_t base_addr
;
3682 vm_size_t copy_size
;
3683 vm_size_t total_size
;
3687 * Check for null copy object.
3690 if (copy
== VM_MAP_COPY_NULL
)
3691 return(KERN_SUCCESS
);
3694 * Check for special kernel buffer allocated
3695 * by new_ipc_kmsg_copyin.
3698 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3699 return(vm_map_copyout_kernel_buffer(
3705 * Only works for entry lists at the moment. Will
3706 * support page lists later.
3709 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3711 if (copy
->size
== 0) {
3712 vm_map_copy_discard(copy
);
3713 return(KERN_SUCCESS
);
3717 * Verify that the destination is all writeable
3718 * initially. We have to trunc the destination
3719 * address and round the copy size or we'll end up
3720 * splitting entries in strange ways.
3723 if (!page_aligned(copy
->size
) ||
3724 !page_aligned (copy
->offset
) ||
3725 !page_aligned (dst_addr
))
3728 dst_end
= round_page_32(dst_addr
+ copy
->size
);
3730 dst_end
= dst_addr
+ copy
->size
;
3733 vm_map_lock(dst_map
);
3736 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3737 vm_map_unlock(dst_map
);
3738 return(KERN_INVALID_ADDRESS
);
3740 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3741 for (entry
= tmp_entry
;;) {
3742 vm_map_entry_t next
= entry
->vme_next
;
3744 while(entry
->is_sub_map
) {
3745 vm_offset_t sub_start
;
3746 vm_offset_t sub_end
;
3747 vm_offset_t local_end
;
3749 if (entry
->in_transition
) {
3752 * Say that we are waiting, and wait for entry.
3754 entry
->needs_wakeup
= TRUE
;
3755 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3760 local_end
= entry
->vme_end
;
3761 if (!(entry
->needs_copy
)) {
3762 /* if needs_copy we are a COW submap */
3763 /* in such a case we just replace so */
3764 /* there is no need for the follow- */
3766 encountered_sub_map
= TRUE
;
3767 sub_start
= entry
->offset
;
3769 if(entry
->vme_end
< dst_end
)
3770 sub_end
= entry
->vme_end
;
3773 sub_end
-= entry
->vme_start
;
3774 sub_end
+= entry
->offset
;
3775 vm_map_unlock(dst_map
);
3777 kr
= vm_map_overwrite_submap_recurse(
3778 entry
->object
.sub_map
,
3780 sub_end
- sub_start
);
3781 if(kr
!= KERN_SUCCESS
)
3783 vm_map_lock(dst_map
);
3786 if (dst_end
<= entry
->vme_end
)
3787 goto start_overwrite
;
3788 if(!vm_map_lookup_entry(dst_map
, local_end
,
3790 vm_map_unlock(dst_map
);
3791 return(KERN_INVALID_ADDRESS
);
3793 next
= entry
->vme_next
;
3796 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3797 vm_map_unlock(dst_map
);
3798 return(KERN_PROTECTION_FAILURE
);
3802 * If the entry is in transition, we must wait
3803 * for it to exit that state. Anything could happen
3804 * when we unlock the map, so start over.
3806 if (entry
->in_transition
) {
3809 * Say that we are waiting, and wait for entry.
3811 entry
->needs_wakeup
= TRUE
;
3812 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3818 * our range is contained completely within this map entry
3820 if (dst_end
<= entry
->vme_end
)
3823 * check that range specified is contiguous region
3825 if ((next
== vm_map_to_entry(dst_map
)) ||
3826 (next
->vme_start
!= entry
->vme_end
)) {
3827 vm_map_unlock(dst_map
);
3828 return(KERN_INVALID_ADDRESS
);
3833 * Check for permanent objects in the destination.
3835 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3836 ((!entry
->object
.vm_object
->internal
) ||
3837 (entry
->object
.vm_object
->true_share
))) {
3838 contains_permanent_objects
= TRUE
;
3846 * If there are permanent objects in the destination, then
3847 * the copy cannot be interrupted.
3850 if (interruptible
&& contains_permanent_objects
) {
3851 vm_map_unlock(dst_map
);
3852 return(KERN_FAILURE
); /* XXX */
3857 * Make a second pass, overwriting the data
3858 * At the beginning of each loop iteration,
3859 * the next entry to be overwritten is "tmp_entry"
3860 * (initially, the value returned from the lookup above),
3861 * and the starting address expected in that entry
3865 total_size
= copy
->size
;
3866 if(encountered_sub_map
) {
3868 /* re-calculate tmp_entry since we've had the map */
3870 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3871 vm_map_unlock(dst_map
);
3872 return(KERN_INVALID_ADDRESS
);
3875 copy_size
= copy
->size
;
3878 base_addr
= dst_addr
;
3880 /* deconstruct the copy object and do in parts */
3881 /* only in sub_map, interruptable case */
3882 vm_map_entry_t copy_entry
;
3883 vm_map_entry_t previous_prev
;
3884 vm_map_entry_t next_copy
;
3886 int remaining_entries
;
3889 for (entry
= tmp_entry
; copy_size
== 0;) {
3890 vm_map_entry_t next
;
3892 next
= entry
->vme_next
;
3894 /* tmp_entry and base address are moved along */
3895 /* each time we encounter a sub-map. Otherwise */
3896 /* entry can outpase tmp_entry, and the copy_size */
3897 /* may reflect the distance between them */
3898 /* if the current entry is found to be in transition */
3899 /* we will start over at the beginning or the last */
3900 /* encounter of a submap as dictated by base_addr */
3901 /* we will zero copy_size accordingly. */
3902 if (entry
->in_transition
) {
3904 * Say that we are waiting, and wait for entry.
3906 entry
->needs_wakeup
= TRUE
;
3907 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3909 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3911 vm_map_unlock(dst_map
);
3912 return(KERN_INVALID_ADDRESS
);
3918 if(entry
->is_sub_map
) {
3919 vm_offset_t sub_start
;
3920 vm_offset_t sub_end
;
3921 vm_offset_t local_end
;
3923 if (entry
->needs_copy
) {
3924 /* if this is a COW submap */
3925 /* just back the range with a */
3926 /* anonymous entry */
3927 if(entry
->vme_end
< dst_end
)
3928 sub_end
= entry
->vme_end
;
3931 if(entry
->vme_start
< base_addr
)
3932 sub_start
= base_addr
;
3934 sub_start
= entry
->vme_start
;
3936 dst_map
, entry
, sub_end
);
3938 dst_map
, entry
, sub_start
);
3939 entry
->is_sub_map
= FALSE
;
3941 entry
->object
.sub_map
);
3942 entry
->object
.sub_map
= NULL
;
3943 entry
->is_shared
= FALSE
;
3944 entry
->needs_copy
= FALSE
;
3946 entry
->protection
= VM_PROT_ALL
;
3947 entry
->max_protection
= VM_PROT_ALL
;
3948 entry
->wired_count
= 0;
3949 entry
->user_wired_count
= 0;
3950 if(entry
->inheritance
3951 == VM_INHERIT_SHARE
)
3952 entry
->inheritance
= VM_INHERIT_COPY
;
3955 /* first take care of any non-sub_map */
3956 /* entries to send */
3957 if(base_addr
< entry
->vme_start
) {
3960 entry
->vme_start
- base_addr
;
3963 sub_start
= entry
->offset
;
3965 if(entry
->vme_end
< dst_end
)
3966 sub_end
= entry
->vme_end
;
3969 sub_end
-= entry
->vme_start
;
3970 sub_end
+= entry
->offset
;
3971 local_end
= entry
->vme_end
;
3972 vm_map_unlock(dst_map
);
3973 copy_size
= sub_end
- sub_start
;
3975 /* adjust the copy object */
3976 if (total_size
> copy_size
) {
3977 vm_size_t local_size
= 0;
3978 vm_size_t entry_size
;
3981 new_offset
= copy
->offset
;
3982 copy_entry
= vm_map_copy_first_entry(copy
);
3984 vm_map_copy_to_entry(copy
)){
3985 entry_size
= copy_entry
->vme_end
-
3986 copy_entry
->vme_start
;
3987 if((local_size
< copy_size
) &&
3988 ((local_size
+ entry_size
)
3990 vm_map_copy_clip_end(copy
,
3992 copy_entry
->vme_start
+
3993 (copy_size
- local_size
));
3994 entry_size
= copy_entry
->vme_end
-
3995 copy_entry
->vme_start
;
3996 local_size
+= entry_size
;
3997 new_offset
+= entry_size
;
3999 if(local_size
>= copy_size
) {
4000 next_copy
= copy_entry
->vme_next
;
4001 copy_entry
->vme_next
=
4002 vm_map_copy_to_entry(copy
);
4004 copy
->cpy_hdr
.links
.prev
;
4005 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4006 copy
->size
= copy_size
;
4008 copy
->cpy_hdr
.nentries
;
4009 remaining_entries
-= nentries
;
4010 copy
->cpy_hdr
.nentries
= nentries
;
4013 local_size
+= entry_size
;
4014 new_offset
+= entry_size
;
4017 copy_entry
= copy_entry
->vme_next
;
4021 if((entry
->use_pmap
) && (pmap
== NULL
)) {
4022 kr
= vm_map_copy_overwrite_nested(
4023 entry
->object
.sub_map
,
4027 entry
->object
.sub_map
->pmap
);
4028 } else if (pmap
!= NULL
) {
4029 kr
= vm_map_copy_overwrite_nested(
4030 entry
->object
.sub_map
,
4033 interruptible
, pmap
);
4035 kr
= vm_map_copy_overwrite_nested(
4036 entry
->object
.sub_map
,
4042 if(kr
!= KERN_SUCCESS
) {
4043 if(next_copy
!= NULL
) {
4044 copy
->cpy_hdr
.nentries
+=
4046 copy
->cpy_hdr
.links
.prev
->vme_next
=
4048 copy
->cpy_hdr
.links
.prev
4050 copy
->size
= total_size
;
4054 if (dst_end
<= local_end
) {
4055 return(KERN_SUCCESS
);
4057 /* otherwise copy no longer exists, it was */
4058 /* destroyed after successful copy_overwrite */
4059 copy
= (vm_map_copy_t
)
4060 zalloc(vm_map_copy_zone
);
4061 vm_map_copy_first_entry(copy
) =
4062 vm_map_copy_last_entry(copy
) =
4063 vm_map_copy_to_entry(copy
);
4064 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4065 copy
->offset
= new_offset
;
4067 total_size
-= copy_size
;
4069 /* put back remainder of copy in container */
4070 if(next_copy
!= NULL
) {
4071 copy
->cpy_hdr
.nentries
= remaining_entries
;
4072 copy
->cpy_hdr
.links
.next
= next_copy
;
4073 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4074 copy
->size
= total_size
;
4075 next_copy
->vme_prev
=
4076 vm_map_copy_to_entry(copy
);
4079 base_addr
= local_end
;
4080 vm_map_lock(dst_map
);
4081 if(!vm_map_lookup_entry(dst_map
,
4082 local_end
, &tmp_entry
)) {
4083 vm_map_unlock(dst_map
);
4084 return(KERN_INVALID_ADDRESS
);
4089 if (dst_end
<= entry
->vme_end
) {
4090 copy_size
= dst_end
- base_addr
;
4094 if ((next
== vm_map_to_entry(dst_map
)) ||
4095 (next
->vme_start
!= entry
->vme_end
)) {
4096 vm_map_unlock(dst_map
);
4097 return(KERN_INVALID_ADDRESS
);
4106 /* adjust the copy object */
4107 if (total_size
> copy_size
) {
4108 vm_size_t local_size
= 0;
4109 vm_size_t entry_size
;
4111 new_offset
= copy
->offset
;
4112 copy_entry
= vm_map_copy_first_entry(copy
);
4113 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4114 entry_size
= copy_entry
->vme_end
-
4115 copy_entry
->vme_start
;
4116 if((local_size
< copy_size
) &&
4117 ((local_size
+ entry_size
)
4119 vm_map_copy_clip_end(copy
, copy_entry
,
4120 copy_entry
->vme_start
+
4121 (copy_size
- local_size
));
4122 entry_size
= copy_entry
->vme_end
-
4123 copy_entry
->vme_start
;
4124 local_size
+= entry_size
;
4125 new_offset
+= entry_size
;
4127 if(local_size
>= copy_size
) {
4128 next_copy
= copy_entry
->vme_next
;
4129 copy_entry
->vme_next
=
4130 vm_map_copy_to_entry(copy
);
4132 copy
->cpy_hdr
.links
.prev
;
4133 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4134 copy
->size
= copy_size
;
4136 copy
->cpy_hdr
.nentries
;
4137 remaining_entries
-= nentries
;
4138 copy
->cpy_hdr
.nentries
= nentries
;
4141 local_size
+= entry_size
;
4142 new_offset
+= entry_size
;
4145 copy_entry
= copy_entry
->vme_next
;
4155 local_pmap
= dst_map
->pmap
;
4157 if ((kr
= vm_map_copy_overwrite_aligned(
4158 dst_map
, tmp_entry
, copy
,
4159 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4160 if(next_copy
!= NULL
) {
4161 copy
->cpy_hdr
.nentries
+=
4163 copy
->cpy_hdr
.links
.prev
->vme_next
=
4165 copy
->cpy_hdr
.links
.prev
=
4167 copy
->size
+= copy_size
;
4171 vm_map_unlock(dst_map
);
4176 * if the copy and dst address are misaligned but the same
4177 * offset within the page we can copy_not_aligned the
4178 * misaligned parts and copy aligned the rest. If they are
4179 * aligned but len is unaligned we simply need to copy
4180 * the end bit unaligned. We'll need to split the misaligned
4181 * bits of the region in this case !
4183 /* ALWAYS UNLOCKS THE dst_map MAP */
4184 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4185 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4186 if(next_copy
!= NULL
) {
4187 copy
->cpy_hdr
.nentries
+=
4189 copy
->cpy_hdr
.links
.prev
->vme_next
=
4191 copy
->cpy_hdr
.links
.prev
=
4193 copy
->size
+= copy_size
;
4198 total_size
-= copy_size
;
4201 base_addr
+= copy_size
;
4203 copy
->offset
= new_offset
;
4204 if(next_copy
!= NULL
) {
4205 copy
->cpy_hdr
.nentries
= remaining_entries
;
4206 copy
->cpy_hdr
.links
.next
= next_copy
;
4207 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4208 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4209 copy
->size
= total_size
;
4211 vm_map_lock(dst_map
);
4213 if (!vm_map_lookup_entry(dst_map
,
4214 base_addr
, &tmp_entry
)) {
4215 vm_map_unlock(dst_map
);
4216 return(KERN_INVALID_ADDRESS
);
4218 if (tmp_entry
->in_transition
) {
4219 entry
->needs_wakeup
= TRUE
;
4220 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4225 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(base_addr
));
4231 * Throw away the vm_map_copy object
4233 vm_map_copy_discard(copy
);
4235 return(KERN_SUCCESS
);
4236 }/* vm_map_copy_overwrite */
4239 vm_map_copy_overwrite(
4241 vm_offset_t dst_addr
,
4243 boolean_t interruptible
)
4245 return vm_map_copy_overwrite_nested(
4246 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4251 * Routine: vm_map_copy_overwrite_unaligned
4254 * Physically copy unaligned data
4257 * Unaligned parts of pages have to be physically copied. We use
4258 * a modified form of vm_fault_copy (which understands none-aligned
4259 * page offsets and sizes) to do the copy. We attempt to copy as
4260 * much memory in one go as possibly, however vm_fault_copy copies
4261 * within 1 memory object so we have to find the smaller of "amount left"
4262 * "source object data size" and "target object data size". With
4263 * unaligned data we don't need to split regions, therefore the source
4264 * (copy) object should be one map entry, the target range may be split
4265 * over multiple map entries however. In any event we are pessimistic
4266 * about these assumptions.
4269 * dst_map is locked on entry and is return locked on success,
4270 * unlocked on error.
4274 vm_map_copy_overwrite_unaligned(
4276 vm_map_entry_t entry
,
4280 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4281 vm_map_version_t version
;
4282 vm_object_t dst_object
;
4283 vm_object_offset_t dst_offset
;
4284 vm_object_offset_t src_offset
;
4285 vm_object_offset_t entry_offset
;
4286 vm_offset_t entry_end
;
4291 kern_return_t kr
= KERN_SUCCESS
;
4293 vm_map_lock_write_to_read(dst_map
);
4295 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4296 amount_left
= copy
->size
;
4298 * unaligned so we never clipped this entry, we need the offset into
4299 * the vm_object not just the data.
4301 while (amount_left
> 0) {
4303 if (entry
== vm_map_to_entry(dst_map
)) {
4304 vm_map_unlock_read(dst_map
);
4305 return KERN_INVALID_ADDRESS
;
4308 /* "start" must be within the current map entry */
4309 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4311 dst_offset
= start
- entry
->vme_start
;
4313 dst_size
= entry
->vme_end
- start
;
4315 src_size
= copy_entry
->vme_end
-
4316 (copy_entry
->vme_start
+ src_offset
);
4318 if (dst_size
< src_size
) {
4320 * we can only copy dst_size bytes before
4321 * we have to get the next destination entry
4323 copy_size
= dst_size
;
4326 * we can only copy src_size bytes before
4327 * we have to get the next source copy entry
4329 copy_size
= src_size
;
4332 if (copy_size
> amount_left
) {
4333 copy_size
= amount_left
;
4336 * Entry needs copy, create a shadow shadow object for
4337 * Copy on write region.
4339 if (entry
->needs_copy
&&
4340 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4342 if (vm_map_lock_read_to_write(dst_map
)) {
4343 vm_map_lock_read(dst_map
);
4346 vm_object_shadow(&entry
->object
.vm_object
,
4348 (vm_size_t
)(entry
->vme_end
4349 - entry
->vme_start
));
4350 entry
->needs_copy
= FALSE
;
4351 vm_map_lock_write_to_read(dst_map
);
4353 dst_object
= entry
->object
.vm_object
;
4355 * unlike with the virtual (aligned) copy we're going
4356 * to fault on it therefore we need a target object.
4358 if (dst_object
== VM_OBJECT_NULL
) {
4359 if (vm_map_lock_read_to_write(dst_map
)) {
4360 vm_map_lock_read(dst_map
);
4363 dst_object
= vm_object_allocate((vm_size_t
)
4364 entry
->vme_end
- entry
->vme_start
);
4365 entry
->object
.vm_object
= dst_object
;
4367 vm_map_lock_write_to_read(dst_map
);
4370 * Take an object reference and unlock map. The "entry" may
4371 * disappear or change when the map is unlocked.
4373 vm_object_reference(dst_object
);
4374 version
.main_timestamp
= dst_map
->timestamp
;
4375 entry_offset
= entry
->offset
;
4376 entry_end
= entry
->vme_end
;
4377 vm_map_unlock_read(dst_map
);
4379 * Copy as much as possible in one pass
4382 copy_entry
->object
.vm_object
,
4383 copy_entry
->offset
+ src_offset
,
4386 entry_offset
+ dst_offset
,
4392 src_offset
+= copy_size
;
4393 amount_left
-= copy_size
;
4395 * Release the object reference
4397 vm_object_deallocate(dst_object
);
4399 * If a hard error occurred, return it now
4401 if (kr
!= KERN_SUCCESS
)
4404 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4405 || amount_left
== 0)
4408 * all done with this copy entry, dispose.
4410 vm_map_copy_entry_unlink(copy
, copy_entry
);
4411 vm_object_deallocate(copy_entry
->object
.vm_object
);
4412 vm_map_copy_entry_dispose(copy
, copy_entry
);
4414 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4415 == vm_map_copy_to_entry(copy
) && amount_left
) {
4417 * not finished copying but run out of source
4419 return KERN_INVALID_ADDRESS
;
4424 if (amount_left
== 0)
4425 return KERN_SUCCESS
;
4427 vm_map_lock_read(dst_map
);
4428 if (version
.main_timestamp
== dst_map
->timestamp
) {
4429 if (start
== entry_end
) {
4431 * destination region is split. Use the version
4432 * information to avoid a lookup in the normal
4435 entry
= entry
->vme_next
;
4437 * should be contiguous. Fail if we encounter
4438 * a hole in the destination.
4440 if (start
!= entry
->vme_start
) {
4441 vm_map_unlock_read(dst_map
);
4442 return KERN_INVALID_ADDRESS
;
4447 * Map version check failed.
4448 * we must lookup the entry because somebody
4449 * might have changed the map behind our backs.
4452 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4454 vm_map_unlock_read(dst_map
);
4455 return KERN_INVALID_ADDRESS
;
4461 vm_map_unlock_read(dst_map
);
4463 return KERN_SUCCESS
;
4464 }/* vm_map_copy_overwrite_unaligned */
4467 * Routine: vm_map_copy_overwrite_aligned
4470 * Does all the vm_trickery possible for whole pages.
4474 * If there are no permanent objects in the destination,
4475 * and the source and destination map entry zones match,
4476 * and the destination map entry is not shared,
4477 * then the map entries can be deleted and replaced
4478 * with those from the copy. The following code is the
4479 * basic idea of what to do, but there are lots of annoying
4480 * little details about getting protection and inheritance
4481 * right. Should add protection, inheritance, and sharing checks
4482 * to the above pass and make sure that no wiring is involved.
4486 vm_map_copy_overwrite_aligned(
4488 vm_map_entry_t tmp_entry
,
4494 vm_map_entry_t copy_entry
;
4495 vm_size_t copy_size
;
4497 vm_map_entry_t entry
;
4499 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4500 != vm_map_copy_to_entry(copy
))
4502 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4505 if (entry
== vm_map_to_entry(dst_map
)) {
4506 vm_map_unlock(dst_map
);
4507 return KERN_INVALID_ADDRESS
;
4509 size
= (entry
->vme_end
- entry
->vme_start
);
4511 * Make sure that no holes popped up in the
4512 * address map, and that the protection is
4513 * still valid, in case the map was unlocked
4517 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4518 && !entry
->needs_copy
)) {
4519 vm_map_unlock(dst_map
);
4520 return(KERN_INVALID_ADDRESS
);
4522 assert(entry
!= vm_map_to_entry(dst_map
));
4525 * Check protection again
4528 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4529 vm_map_unlock(dst_map
);
4530 return(KERN_PROTECTION_FAILURE
);
4534 * Adjust to source size first
4537 if (copy_size
< size
) {
4538 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4543 * Adjust to destination size
4546 if (size
< copy_size
) {
4547 vm_map_copy_clip_end(copy
, copy_entry
,
4548 copy_entry
->vme_start
+ size
);
4552 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4553 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4554 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4557 * If the destination contains temporary unshared memory,
4558 * we can perform the copy by throwing it away and
4559 * installing the source data.
4562 object
= entry
->object
.vm_object
;
4563 if ((!entry
->is_shared
&&
4564 ((object
== VM_OBJECT_NULL
) ||
4565 (object
->internal
&& !object
->true_share
))) ||
4566 entry
->needs_copy
) {
4567 vm_object_t old_object
= entry
->object
.vm_object
;
4568 vm_object_offset_t old_offset
= entry
->offset
;
4569 vm_object_offset_t offset
;
4572 * Ensure that the source and destination aren't
4575 if (old_object
== copy_entry
->object
.vm_object
&&
4576 old_offset
== copy_entry
->offset
) {
4577 vm_map_copy_entry_unlink(copy
, copy_entry
);
4578 vm_map_copy_entry_dispose(copy
, copy_entry
);
4580 if (old_object
!= VM_OBJECT_NULL
)
4581 vm_object_deallocate(old_object
);
4583 start
= tmp_entry
->vme_end
;
4584 tmp_entry
= tmp_entry
->vme_next
;
4588 if (old_object
!= VM_OBJECT_NULL
) {
4589 if(entry
->is_sub_map
) {
4590 if(entry
->use_pmap
) {
4592 pmap_unnest(dst_map
->pmap
,
4595 - entry
->vme_start
);
4597 if(dst_map
->mapped
) {
4598 /* clean up parent */
4600 vm_map_submap_pmap_clean(
4601 dst_map
, entry
->vme_start
,
4603 entry
->object
.sub_map
,
4607 vm_map_submap_pmap_clean(
4608 dst_map
, entry
->vme_start
,
4610 entry
->object
.sub_map
,
4614 entry
->object
.sub_map
);
4616 if(dst_map
->mapped
) {
4617 vm_object_pmap_protect(
4618 entry
->object
.vm_object
,
4626 pmap_remove(dst_map
->pmap
,
4627 (addr64_t
)(entry
->vme_start
),
4628 (addr64_t
)(entry
->vme_end
));
4630 vm_object_deallocate(old_object
);
4634 entry
->is_sub_map
= FALSE
;
4635 entry
->object
= copy_entry
->object
;
4636 object
= entry
->object
.vm_object
;
4637 entry
->needs_copy
= copy_entry
->needs_copy
;
4638 entry
->wired_count
= 0;
4639 entry
->user_wired_count
= 0;
4640 offset
= entry
->offset
= copy_entry
->offset
;
4642 vm_map_copy_entry_unlink(copy
, copy_entry
);
4643 vm_map_copy_entry_dispose(copy
, copy_entry
);
4644 #if BAD_OPTIMIZATION
4646 * if we turn this optimization back on
4647 * we need to revisit our use of pmap mappings
4648 * large copies will cause us to run out and panic
4649 * this optimization only saved on average 2 us per page if ALL
4650 * the pages in the source were currently mapped
4651 * and ALL the pages in the dest were touched, if there were fewer
4652 * than 2/3 of the pages touched, this optimization actually cost more cycles
4656 * Try to aggressively enter physical mappings
4657 * (but avoid uninstantiated objects)
4659 if (object
!= VM_OBJECT_NULL
) {
4660 vm_offset_t va
= entry
->vme_start
;
4662 while (va
< entry
->vme_end
) {
4663 register vm_page_t m
;
4667 * Look for the page in the top object
4669 prot
= entry
->protection
;
4670 vm_object_lock(object
);
4671 vm_object_paging_begin(object
);
4673 if ((m
= vm_page_lookup(object
,offset
)) !=
4674 VM_PAGE_NULL
&& !m
->busy
&&
4676 (!m
->unusual
|| (!m
->error
&&
4677 !m
->restart
&& !m
->absent
&&
4678 (prot
& m
->page_lock
) == 0))) {
4681 vm_object_unlock(object
);
4684 * Honor COW obligations
4686 if (entry
->needs_copy
)
4687 prot
&= ~VM_PROT_WRITE
;
4688 /* It is our policy to require */
4689 /* explicit sync from anyone */
4690 /* writing code and then */
4691 /* a pc to execute it. */
4694 PMAP_ENTER(pmap
, va
, m
, prot
,
4696 (m
->object
->wimg_bits
))
4700 vm_object_lock(object
);
4701 vm_page_lock_queues();
4702 if (!m
->active
&& !m
->inactive
)
4703 vm_page_activate(m
);
4704 vm_page_unlock_queues();
4705 PAGE_WAKEUP_DONE(m
);
4707 vm_object_paging_end(object
);
4708 vm_object_unlock(object
);
4710 offset
+= PAGE_SIZE_64
;
4712 } /* end while (va < entry->vme_end) */
4713 } /* end if (object) */
4716 * Set up for the next iteration. The map
4717 * has not been unlocked, so the next
4718 * address should be at the end of this
4719 * entry, and the next map entry should be
4720 * the one following it.
4723 start
= tmp_entry
->vme_end
;
4724 tmp_entry
= tmp_entry
->vme_next
;
4726 vm_map_version_t version
;
4727 vm_object_t dst_object
= entry
->object
.vm_object
;
4728 vm_object_offset_t dst_offset
= entry
->offset
;
4732 * Take an object reference, and record
4733 * the map version information so that the
4734 * map can be safely unlocked.
4737 vm_object_reference(dst_object
);
4739 /* account for unlock bumping up timestamp */
4740 version
.main_timestamp
= dst_map
->timestamp
+ 1;
4742 vm_map_unlock(dst_map
);
4745 * Copy as much as possible in one pass
4750 copy_entry
->object
.vm_object
,
4760 * Release the object reference
4763 vm_object_deallocate(dst_object
);
4766 * If a hard error occurred, return it now
4769 if (r
!= KERN_SUCCESS
)
4772 if (copy_size
!= 0) {
4774 * Dispose of the copied region
4777 vm_map_copy_clip_end(copy
, copy_entry
,
4778 copy_entry
->vme_start
+ copy_size
);
4779 vm_map_copy_entry_unlink(copy
, copy_entry
);
4780 vm_object_deallocate(copy_entry
->object
.vm_object
);
4781 vm_map_copy_entry_dispose(copy
, copy_entry
);
4785 * Pick up in the destination map where we left off.
4787 * Use the version information to avoid a lookup
4788 * in the normal case.
4792 vm_map_lock(dst_map
);
4793 if (version
.main_timestamp
== dst_map
->timestamp
) {
4794 /* We can safely use saved tmp_entry value */
4796 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4797 tmp_entry
= tmp_entry
->vme_next
;
4799 /* Must do lookup of tmp_entry */
4801 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4802 vm_map_unlock(dst_map
);
4803 return(KERN_INVALID_ADDRESS
);
4805 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4810 return(KERN_SUCCESS
);
4811 }/* vm_map_copy_overwrite_aligned */
4814 * Routine: vm_map_copyin_kernel_buffer
4817 * Copy in data to a kernel buffer from space in the
4818 * source map. The original space may be otpionally
4821 * If successful, returns a new copy object.
4824 vm_map_copyin_kernel_buffer(
4826 vm_offset_t src_addr
,
4828 boolean_t src_destroy
,
4829 vm_map_copy_t
*copy_result
)
4833 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4835 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4836 if (copy
== VM_MAP_COPY_NULL
) {
4837 return KERN_RESOURCE_SHORTAGE
;
4839 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4842 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4843 copy
->cpy_kalloc_size
= kalloc_size
;
4845 if (src_map
== kernel_map
) {
4846 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4847 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4848 VM_MAP_REMOVE_INTERRUPTIBLE
;
4851 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4852 if (kr
!= KERN_SUCCESS
) {
4853 kfree((vm_offset_t
)copy
, kalloc_size
);
4856 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4857 VM_MAP_REMOVE_INTERRUPTIBLE
;
4860 (void) vm_map_remove(src_map
, trunc_page_32(src_addr
),
4861 round_page_32(src_addr
+ len
),
4864 *copy_result
= copy
;
4865 return KERN_SUCCESS
;
4869 * Routine: vm_map_copyout_kernel_buffer
4872 * Copy out data from a kernel buffer into space in the
4873 * destination map. The space may be otpionally dynamically
4876 * If successful, consumes the copy object.
4877 * Otherwise, the caller is responsible for it.
4880 vm_map_copyout_kernel_buffer(
4882 vm_offset_t
*addr
, /* IN/OUT */
4884 boolean_t overwrite
)
4886 kern_return_t kr
= KERN_SUCCESS
;
4887 thread_act_t thr_act
= current_act();
4892 * Allocate space in the target map for the data
4895 kr
= vm_map_enter(map
,
4897 round_page_32(copy
->size
),
4901 (vm_object_offset_t
) 0,
4905 VM_INHERIT_DEFAULT
);
4906 if (kr
!= KERN_SUCCESS
)
4911 * Copyout the data from the kernel buffer to the target map.
4913 if (thr_act
->map
== map
) {
4916 * If the target map is the current map, just do
4919 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4921 return(KERN_INVALID_ADDRESS
);
4928 * If the target map is another map, assume the
4929 * target's address space identity for the duration
4932 vm_map_reference(map
);
4933 oldmap
= vm_map_switch(map
);
4935 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4937 return(KERN_INVALID_ADDRESS
);
4940 (void) vm_map_switch(oldmap
);
4941 vm_map_deallocate(map
);
4944 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4950 * Macro: vm_map_copy_insert
4953 * Link a copy chain ("copy") into a map at the
4954 * specified location (after "where").
4956 * The copy chain is destroyed.
4958 * The arguments are evaluated multiple times.
4960 #define vm_map_copy_insert(map, where, copy) \
4962 vm_map_t VMCI_map; \
4963 vm_map_entry_t VMCI_where; \
4964 vm_map_copy_t VMCI_copy; \
4966 VMCI_where = (where); \
4967 VMCI_copy = (copy); \
4968 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4969 ->vme_next = (VMCI_where->vme_next); \
4970 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4971 ->vme_prev = VMCI_where; \
4972 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4973 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4974 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4978 * Routine: vm_map_copyout
4981 * Copy out a copy chain ("copy") into newly-allocated
4982 * space in the destination map.
4984 * If successful, consumes the copy object.
4985 * Otherwise, the caller is responsible for it.
4989 register vm_map_t dst_map
,
4990 vm_offset_t
*dst_addr
, /* OUT */
4991 register vm_map_copy_t copy
)
4994 vm_size_t adjustment
;
4996 vm_object_offset_t vm_copy_start
;
4997 vm_map_entry_t last
;
4999 vm_map_entry_t entry
;
5002 * Check for null copy object.
5005 if (copy
== VM_MAP_COPY_NULL
) {
5007 return(KERN_SUCCESS
);
5011 * Check for special copy object, created
5012 * by vm_map_copyin_object.
5015 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
5016 vm_object_t object
= copy
->cpy_object
;
5018 vm_object_offset_t offset
;
5020 offset
= trunc_page_64(copy
->offset
);
5021 size
= round_page_32(copy
->size
+
5022 (vm_size_t
)(copy
->offset
- offset
));
5024 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
5025 (vm_offset_t
) 0, TRUE
,
5026 object
, offset
, FALSE
,
5027 VM_PROT_DEFAULT
, VM_PROT_ALL
,
5028 VM_INHERIT_DEFAULT
);
5029 if (kr
!= KERN_SUCCESS
)
5031 /* Account for non-pagealigned copy object */
5032 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
5033 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
5034 return(KERN_SUCCESS
);
5038 * Check for special kernel buffer allocated
5039 * by new_ipc_kmsg_copyin.
5042 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
5043 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
5048 * Find space for the data
5051 vm_copy_start
= trunc_page_64(copy
->offset
);
5052 size
= round_page_32((vm_size_t
)copy
->offset
+ copy
->size
)
5057 vm_map_lock(dst_map
);
5058 assert(first_free_is_valid(dst_map
));
5059 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
5060 vm_map_min(dst_map
) : last
->vme_end
;
5063 vm_map_entry_t next
= last
->vme_next
;
5064 vm_offset_t end
= start
+ size
;
5066 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5067 if (dst_map
->wait_for_space
) {
5068 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5069 assert_wait((event_t
) dst_map
,
5070 THREAD_INTERRUPTIBLE
);
5071 vm_map_unlock(dst_map
);
5072 thread_block((void (*)(void))0);
5076 vm_map_unlock(dst_map
);
5077 return(KERN_NO_SPACE
);
5080 if ((next
== vm_map_to_entry(dst_map
)) ||
5081 (next
->vme_start
>= end
))
5085 start
= last
->vme_end
;
5089 * Since we're going to just drop the map
5090 * entries from the copy into the destination
5091 * map, they must come from the same pool.
5094 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5096 * Mismatches occur when dealing with the default
5100 vm_map_entry_t next
, new;
5103 * Find the zone that the copies were allocated from
5105 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5107 : vm_map_kentry_zone
;
5108 entry
= vm_map_copy_first_entry(copy
);
5111 * Reinitialize the copy so that vm_map_copy_entry_link
5114 copy
->cpy_hdr
.nentries
= 0;
5115 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5116 vm_map_copy_first_entry(copy
) =
5117 vm_map_copy_last_entry(copy
) =
5118 vm_map_copy_to_entry(copy
);
5123 while (entry
!= vm_map_copy_to_entry(copy
)) {
5124 new = vm_map_copy_entry_create(copy
);
5125 vm_map_entry_copy_full(new, entry
);
5126 new->use_pmap
= FALSE
; /* clr address space specifics */
5127 vm_map_copy_entry_link(copy
,
5128 vm_map_copy_last_entry(copy
),
5130 next
= entry
->vme_next
;
5131 zfree(old_zone
, (vm_offset_t
) entry
);
5137 * Adjust the addresses in the copy chain, and
5138 * reset the region attributes.
5141 adjustment
= start
- vm_copy_start
;
5142 for (entry
= vm_map_copy_first_entry(copy
);
5143 entry
!= vm_map_copy_to_entry(copy
);
5144 entry
= entry
->vme_next
) {
5145 entry
->vme_start
+= adjustment
;
5146 entry
->vme_end
+= adjustment
;
5148 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5149 entry
->protection
= VM_PROT_DEFAULT
;
5150 entry
->max_protection
= VM_PROT_ALL
;
5151 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5154 * If the entry is now wired,
5155 * map the pages into the destination map.
5157 if (entry
->wired_count
!= 0) {
5158 register vm_offset_t va
;
5159 vm_object_offset_t offset
;
5160 register vm_object_t object
;
5162 object
= entry
->object
.vm_object
;
5163 offset
= entry
->offset
;
5164 va
= entry
->vme_start
;
5166 pmap_pageable(dst_map
->pmap
,
5171 while (va
< entry
->vme_end
) {
5172 register vm_page_t m
;
5175 * Look up the page in the object.
5176 * Assert that the page will be found in the
5179 * the object was newly created by
5180 * vm_object_copy_slowly, and has
5181 * copies of all of the pages from
5184 * the object was moved from the old
5185 * map entry; because the old map
5186 * entry was wired, all of the pages
5187 * were in the top-level object.
5188 * (XXX not true if we wire pages for
5191 vm_object_lock(object
);
5192 vm_object_paging_begin(object
);
5194 m
= vm_page_lookup(object
, offset
);
5195 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5197 panic("vm_map_copyout: wiring 0x%x", m
);
5200 vm_object_unlock(object
);
5202 PMAP_ENTER(dst_map
->pmap
, va
, m
, entry
->protection
,
5204 (m
->object
->wimg_bits
))
5208 vm_object_lock(object
);
5209 PAGE_WAKEUP_DONE(m
);
5210 /* the page is wired, so we don't have to activate */
5211 vm_object_paging_end(object
);
5212 vm_object_unlock(object
);
5214 offset
+= PAGE_SIZE_64
;
5218 else if (size
<= vm_map_aggressive_enter_max
) {
5220 register vm_offset_t va
;
5221 vm_object_offset_t offset
;
5222 register vm_object_t object
;
5225 object
= entry
->object
.vm_object
;
5226 if (object
!= VM_OBJECT_NULL
) {
5228 offset
= entry
->offset
;
5229 va
= entry
->vme_start
;
5230 while (va
< entry
->vme_end
) {
5231 register vm_page_t m
;
5234 * Look up the page in the object.
5235 * Assert that the page will be found
5236 * in the top object if at all...
5238 vm_object_lock(object
);
5239 vm_object_paging_begin(object
);
5241 if (((m
= vm_page_lookup(object
,
5244 !m
->busy
&& !m
->fictitious
&&
5245 !m
->absent
&& !m
->error
) {
5247 vm_object_unlock(object
);
5249 /* honor cow obligations */
5250 prot
= entry
->protection
;
5251 if (entry
->needs_copy
)
5252 prot
&= ~VM_PROT_WRITE
;
5254 PMAP_ENTER(dst_map
->pmap
, va
,
5257 (m
->object
->wimg_bits
))
5261 vm_object_lock(object
);
5262 vm_page_lock_queues();
5263 if (!m
->active
&& !m
->inactive
)
5264 vm_page_activate(m
);
5265 vm_page_unlock_queues();
5266 PAGE_WAKEUP_DONE(m
);
5268 vm_object_paging_end(object
);
5269 vm_object_unlock(object
);
5271 offset
+= PAGE_SIZE_64
;
5279 * Correct the page alignment for the result
5282 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5285 * Update the hints and the map size
5288 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5290 dst_map
->size
+= size
;
5296 vm_map_copy_insert(dst_map
, last
, copy
);
5298 vm_map_unlock(dst_map
);
5301 * XXX If wiring_required, call vm_map_pageable
5304 return(KERN_SUCCESS
);
5307 boolean_t vm_map_aggressive_enter
; /* not used yet */
5311 * Routine: vm_map_copyin
5314 * Copy the specified region (src_addr, len) from the
5315 * source address space (src_map), possibly removing
5316 * the region from the source address space (src_destroy).
5319 * A vm_map_copy_t object (copy_result), suitable for
5320 * insertion into another address space (using vm_map_copyout),
5321 * copying over another address space region (using
5322 * vm_map_copy_overwrite). If the copy is unused, it
5323 * should be destroyed (using vm_map_copy_discard).
5325 * In/out conditions:
5326 * The source map should not be locked on entry.
5329 typedef struct submap_map
{
5330 vm_map_t parent_map
;
5331 vm_offset_t base_start
;
5332 vm_offset_t base_end
;
5333 struct submap_map
*next
;
5337 vm_map_copyin_common(
5339 vm_offset_t src_addr
,
5341 boolean_t src_destroy
,
5342 boolean_t src_volatile
,
5343 vm_map_copy_t
*copy_result
, /* OUT */
5344 boolean_t use_maxprot
)
5346 extern int msg_ool_size_small
;
5348 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5349 * in multi-level lookup, this
5350 * entry contains the actual
5354 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5356 vm_offset_t src_start
; /* Start of current entry --
5357 * where copy is taking place now
5359 vm_offset_t src_end
; /* End of entire region to be
5361 vm_offset_t base_start
; /* submap fields to save offsets */
5362 /* in original map */
5363 vm_offset_t base_end
;
5364 vm_map_t base_map
=src_map
;
5365 vm_map_entry_t base_entry
;
5366 boolean_t map_share
=FALSE
;
5367 submap_map_t
*parent_maps
= NULL
;
5370 vm_map_copy_t copy
; /* Resulting copy */
5371 vm_offset_t copy_addr
;
5374 * Check for copies of zero bytes.
5378 *copy_result
= VM_MAP_COPY_NULL
;
5379 return(KERN_SUCCESS
);
5383 * Check that the end address doesn't overflow
5385 src_end
= src_addr
+ len
;
5386 if (src_end
< src_addr
)
5387 return KERN_INVALID_ADDRESS
;
5390 * If the copy is sufficiently small, use a kernel buffer instead
5391 * of making a virtual copy. The theory being that the cost of
5392 * setting up VM (and taking C-O-W faults) dominates the copy costs
5393 * for small regions.
5395 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5396 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5397 src_destroy
, copy_result
);
5400 * Compute (page aligned) start and end of region
5402 src_start
= trunc_page_32(src_addr
);
5403 src_end
= round_page_32(src_end
);
5405 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5408 * Allocate a header element for the list.
5410 * Use the start and end in the header to
5411 * remember the endpoints prior to rounding.
5414 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5415 vm_map_copy_first_entry(copy
) =
5416 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5417 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5418 copy
->cpy_hdr
.nentries
= 0;
5419 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5421 copy
->offset
= src_addr
;
5424 new_entry
= vm_map_copy_entry_create(copy
);
5428 vm_map_unlock(src_map); \
5429 if(src_map != base_map) \
5430 vm_map_deallocate(src_map); \
5431 if (new_entry != VM_MAP_ENTRY_NULL) \
5432 vm_map_copy_entry_dispose(copy,new_entry); \
5433 vm_map_copy_discard(copy); \
5435 submap_map_t *ptr; \
5437 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5438 parent_maps=parent_maps->next; \
5439 if (ptr->parent_map != base_map) \
5440 vm_map_deallocate(ptr->parent_map); \
5441 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5448 * Find the beginning of the region.
5451 vm_map_lock(src_map
);
5453 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5454 RETURN(KERN_INVALID_ADDRESS
);
5455 if(!tmp_entry
->is_sub_map
) {
5456 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5458 /* set for later submap fix-up */
5459 copy_addr
= src_start
;
5462 * Go through entries until we get to the end.
5467 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5468 vm_size_t src_size
; /* Size of source
5469 * map entry (in both
5474 vm_object_t src_object
; /* Object to copy */
5475 vm_object_offset_t src_offset
;
5477 boolean_t src_needs_copy
; /* Should source map
5479 * for copy-on-write?
5482 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5484 boolean_t was_wired
; /* Was source wired? */
5485 vm_map_version_t version
; /* Version before locks
5486 * dropped to make copy
5488 kern_return_t result
; /* Return value from
5489 * copy_strategically.
5491 while(tmp_entry
->is_sub_map
) {
5492 vm_size_t submap_len
;
5495 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5496 ptr
->next
= parent_maps
;
5498 ptr
->parent_map
= src_map
;
5499 ptr
->base_start
= src_start
;
5500 ptr
->base_end
= src_end
;
5501 submap_len
= tmp_entry
->vme_end
- src_start
;
5502 if(submap_len
> (src_end
-src_start
))
5503 submap_len
= src_end
-src_start
;
5504 ptr
->base_start
+= submap_len
;
5506 src_start
-= tmp_entry
->vme_start
;
5507 src_start
+= tmp_entry
->offset
;
5508 src_end
= src_start
+ submap_len
;
5509 src_map
= tmp_entry
->object
.sub_map
;
5510 vm_map_lock(src_map
);
5511 /* keep an outstanding reference for all maps in */
5512 /* the parents tree except the base map */
5513 vm_map_reference(src_map
);
5514 vm_map_unlock(ptr
->parent_map
);
5515 if (!vm_map_lookup_entry(
5516 src_map
, src_start
, &tmp_entry
))
5517 RETURN(KERN_INVALID_ADDRESS
);
5519 if(!tmp_entry
->is_sub_map
)
5520 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5521 src_entry
= tmp_entry
;
5523 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5524 (tmp_entry
->object
.vm_object
->phys_contiguous
)) {
5525 /* This is not, supported for now.In future */
5526 /* we will need to detect the phys_contig */
5527 /* condition and then upgrade copy_slowly */
5528 /* to do physical copy from the device mem */
5529 /* based object. We can piggy-back off of */
5530 /* the was wired boolean to set-up the */
5531 /* proper handling */
5532 RETURN(KERN_PROTECTION_FAILURE
);
5535 * Create a new address map entry to hold the result.
5536 * Fill in the fields from the appropriate source entries.
5537 * We must unlock the source map to do this if we need
5538 * to allocate a map entry.
5540 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5541 version
.main_timestamp
= src_map
->timestamp
;
5542 vm_map_unlock(src_map
);
5544 new_entry
= vm_map_copy_entry_create(copy
);
5546 vm_map_lock(src_map
);
5547 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5548 if (!vm_map_lookup_entry(src_map
, src_start
,
5550 RETURN(KERN_INVALID_ADDRESS
);
5552 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5553 continue; /* restart w/ new tmp_entry */
5558 * Verify that the region can be read.
5560 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5562 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5563 RETURN(KERN_PROTECTION_FAILURE
);
5566 * Clip against the endpoints of the entire region.
5569 vm_map_clip_end(src_map
, src_entry
, src_end
);
5571 src_size
= src_entry
->vme_end
- src_start
;
5572 src_object
= src_entry
->object
.vm_object
;
5573 src_offset
= src_entry
->offset
;
5574 was_wired
= (src_entry
->wired_count
!= 0);
5576 vm_map_entry_copy(new_entry
, src_entry
);
5577 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5580 * Attempt non-blocking copy-on-write optimizations.
5584 (src_object
== VM_OBJECT_NULL
||
5585 (src_object
->internal
&& !src_object
->true_share
5588 * If we are destroying the source, and the object
5589 * is internal, we can move the object reference
5590 * from the source to the copy. The copy is
5591 * copy-on-write only if the source is.
5592 * We make another reference to the object, because
5593 * destroying the source entry will deallocate it.
5595 vm_object_reference(src_object
);
5598 * Copy is always unwired. vm_map_copy_entry
5599 * set its wired count to zero.
5602 goto CopySuccessful
;
5607 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5608 src_object
, new_entry
, new_entry
->object
.vm_object
,
5610 if ((src_object
== VM_OBJECT_NULL
||
5611 (!was_wired
&& !map_share
&& !tmp_entry
->is_shared
)) &&
5612 vm_object_copy_quickly(
5613 &new_entry
->object
.vm_object
,
5617 &new_entry_needs_copy
)) {
5619 new_entry
->needs_copy
= new_entry_needs_copy
;
5622 * Handle copy-on-write obligations
5625 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5626 vm_object_pmap_protect(
5630 (src_entry
->is_shared
?
5633 src_entry
->vme_start
,
5634 src_entry
->protection
&
5636 tmp_entry
->needs_copy
= TRUE
;
5640 * The map has never been unlocked, so it's safe
5641 * to move to the next entry rather than doing
5645 goto CopySuccessful
;
5649 * Take an object reference, so that we may
5650 * release the map lock(s).
5653 assert(src_object
!= VM_OBJECT_NULL
);
5654 vm_object_reference(src_object
);
5657 * Record the timestamp for later verification.
5661 version
.main_timestamp
= src_map
->timestamp
;
5662 vm_map_unlock(src_map
); /* Increments timestamp once! */
5670 vm_object_lock(src_object
);
5671 result
= vm_object_copy_slowly(
5676 &new_entry
->object
.vm_object
);
5677 new_entry
->offset
= 0;
5678 new_entry
->needs_copy
= FALSE
;
5681 else if (src_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
&&
5682 (tmp_entry
->is_shared
|| map_share
)) {
5683 vm_object_t new_object
;
5685 vm_object_lock(src_object
);
5686 new_object
= vm_object_copy_delayed(
5690 if (new_object
== VM_OBJECT_NULL
)
5693 new_entry
->object
.vm_object
= new_object
;
5694 new_entry
->needs_copy
= TRUE
;
5695 result
= KERN_SUCCESS
;
5698 result
= vm_object_copy_strategically(src_object
,
5701 &new_entry
->object
.vm_object
,
5703 &new_entry_needs_copy
);
5705 new_entry
->needs_copy
= new_entry_needs_copy
;
5708 if (result
!= KERN_SUCCESS
&&
5709 result
!= KERN_MEMORY_RESTART_COPY
) {
5710 vm_map_lock(src_map
);
5715 * Throw away the extra reference
5718 vm_object_deallocate(src_object
);
5721 * Verify that the map has not substantially
5722 * changed while the copy was being made.
5725 vm_map_lock(src_map
);
5727 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5728 goto VerificationSuccessful
;
5731 * Simple version comparison failed.
5733 * Retry the lookup and verify that the
5734 * same object/offset are still present.
5736 * [Note: a memory manager that colludes with
5737 * the calling task can detect that we have
5738 * cheated. While the map was unlocked, the
5739 * mapping could have been changed and restored.]
5742 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5743 RETURN(KERN_INVALID_ADDRESS
);
5746 src_entry
= tmp_entry
;
5747 vm_map_clip_start(src_map
, src_entry
, src_start
);
5749 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5751 src_entry
->max_protection
& VM_PROT_READ
== 0)
5752 goto VerificationFailed
;
5754 if (src_entry
->vme_end
< new_entry
->vme_end
)
5755 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5757 if ((src_entry
->object
.vm_object
!= src_object
) ||
5758 (src_entry
->offset
!= src_offset
) ) {
5761 * Verification failed.
5763 * Start over with this top-level entry.
5766 VerificationFailed
: ;
5768 vm_object_deallocate(new_entry
->object
.vm_object
);
5769 tmp_entry
= src_entry
;
5774 * Verification succeeded.
5777 VerificationSuccessful
: ;
5779 if (result
== KERN_MEMORY_RESTART_COPY
)
5789 * Link in the new copy entry.
5792 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5796 * Determine whether the entire region
5799 src_start
= new_entry
->vme_end
;
5800 new_entry
= VM_MAP_ENTRY_NULL
;
5801 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5802 if (src_map
!= base_map
) {
5806 assert(ptr
!= NULL
);
5807 parent_maps
= parent_maps
->next
;
5808 vm_map_unlock(src_map
);
5809 vm_map_deallocate(src_map
);
5810 vm_map_lock(ptr
->parent_map
);
5811 src_map
= ptr
->parent_map
;
5812 src_start
= ptr
->base_start
;
5813 src_end
= ptr
->base_end
;
5814 if ((src_end
> src_start
) &&
5815 !vm_map_lookup_entry(
5816 src_map
, src_start
, &tmp_entry
))
5817 RETURN(KERN_INVALID_ADDRESS
);
5818 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5819 if(parent_maps
== NULL
)
5821 src_entry
= tmp_entry
->vme_prev
;
5825 if ((src_start
>= src_end
) && (src_end
!= 0))
5829 * Verify that there are no gaps in the region
5832 tmp_entry
= src_entry
->vme_next
;
5833 if ((tmp_entry
->vme_start
!= src_start
) ||
5834 (tmp_entry
== vm_map_to_entry(src_map
)))
5835 RETURN(KERN_INVALID_ADDRESS
);
5839 * If the source should be destroyed, do it now, since the
5840 * copy was successful.
5843 (void) vm_map_delete(src_map
,
5844 trunc_page_32(src_addr
),
5846 (src_map
== kernel_map
) ?
5847 VM_MAP_REMOVE_KUNWIRE
:
5851 vm_map_unlock(src_map
);
5853 /* Fix-up start and end points in copy. This is necessary */
5854 /* when the various entries in the copy object were picked */
5855 /* up from different sub-maps */
5857 tmp_entry
= vm_map_copy_first_entry(copy
);
5858 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5859 tmp_entry
->vme_end
= copy_addr
+
5860 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5861 tmp_entry
->vme_start
= copy_addr
;
5862 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5863 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5866 *copy_result
= copy
;
5867 return(KERN_SUCCESS
);
5873 * vm_map_copyin_object:
5875 * Create a copy object from an object.
5876 * Our caller donates an object reference.
5880 vm_map_copyin_object(
5882 vm_object_offset_t offset
, /* offset of region in object */
5883 vm_object_size_t size
, /* size of region in object */
5884 vm_map_copy_t
*copy_result
) /* OUT */
5886 vm_map_copy_t copy
; /* Resulting copy */
5889 * We drop the object into a special copy object
5890 * that contains the object directly.
5893 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5894 copy
->type
= VM_MAP_COPY_OBJECT
;
5895 copy
->cpy_object
= object
;
5896 copy
->cpy_index
= 0;
5897 copy
->offset
= offset
;
5900 *copy_result
= copy
;
5901 return(KERN_SUCCESS
);
5907 vm_map_entry_t old_entry
,
5911 vm_map_entry_t new_entry
;
5912 kern_return_t result
;
5915 * New sharing code. New map entry
5916 * references original object. Internal
5917 * objects use asynchronous copy algorithm for
5918 * future copies. First make sure we have
5919 * the right object. If we need a shadow,
5920 * or someone else already has one, then
5921 * make a new shadow and share it.
5924 object
= old_entry
->object
.vm_object
;
5925 if (old_entry
->is_sub_map
) {
5926 assert(old_entry
->wired_count
== 0);
5928 if(old_entry
->use_pmap
) {
5929 result
= pmap_nest(new_map
->pmap
,
5930 (old_entry
->object
.sub_map
)->pmap
,
5931 (addr64_t
)old_entry
->vme_start
,
5932 (addr64_t
)old_entry
->vme_start
,
5933 (uint64_t)(old_entry
->vme_end
- old_entry
->vme_start
));
5935 panic("vm_map_fork_share: pmap_nest failed!");
5938 } else if (object
== VM_OBJECT_NULL
) {
5939 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5940 old_entry
->vme_start
));
5941 old_entry
->offset
= 0;
5942 old_entry
->object
.vm_object
= object
;
5943 assert(!old_entry
->needs_copy
);
5944 } else if (object
->copy_strategy
!=
5945 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5948 * We are already using an asymmetric
5949 * copy, and therefore we already have
5953 assert(! old_entry
->needs_copy
);
5955 else if (old_entry
->needs_copy
|| /* case 1 */
5956 object
->shadowed
|| /* case 2 */
5957 (!object
->true_share
&& /* case 3 */
5958 !old_entry
->is_shared
&&
5960 (vm_size_t
)(old_entry
->vme_end
-
5961 old_entry
->vme_start
)))) {
5964 * We need to create a shadow.
5965 * There are three cases here.
5966 * In the first case, we need to
5967 * complete a deferred symmetrical
5968 * copy that we participated in.
5969 * In the second and third cases,
5970 * we need to create the shadow so
5971 * that changes that we make to the
5972 * object do not interfere with
5973 * any symmetrical copies which
5974 * have occured (case 2) or which
5975 * might occur (case 3).
5977 * The first case is when we had
5978 * deferred shadow object creation
5979 * via the entry->needs_copy mechanism.
5980 * This mechanism only works when
5981 * only one entry points to the source
5982 * object, and we are about to create
5983 * a second entry pointing to the
5984 * same object. The problem is that
5985 * there is no way of mapping from
5986 * an object to the entries pointing
5987 * to it. (Deferred shadow creation
5988 * works with one entry because occurs
5989 * at fault time, and we walk from the
5990 * entry to the object when handling
5993 * The second case is when the object
5994 * to be shared has already been copied
5995 * with a symmetric copy, but we point
5996 * directly to the object without
5997 * needs_copy set in our entry. (This
5998 * can happen because different ranges
5999 * of an object can be pointed to by
6000 * different entries. In particular,
6001 * a single entry pointing to an object
6002 * can be split by a call to vm_inherit,
6003 * which, combined with task_create, can
6004 * result in the different entries
6005 * having different needs_copy values.)
6006 * The shadowed flag in the object allows
6007 * us to detect this case. The problem
6008 * with this case is that if this object
6009 * has or will have shadows, then we
6010 * must not perform an asymmetric copy
6011 * of this object, since such a copy
6012 * allows the object to be changed, which
6013 * will break the previous symmetrical
6014 * copies (which rely upon the object
6015 * not changing). In a sense, the shadowed
6016 * flag says "don't change this object".
6017 * We fix this by creating a shadow
6018 * object for this object, and sharing
6019 * that. This works because we are free
6020 * to change the shadow object (and thus
6021 * to use an asymmetric copy strategy);
6022 * this is also semantically correct,
6023 * since this object is temporary, and
6024 * therefore a copy of the object is
6025 * as good as the object itself. (This
6026 * is not true for permanent objects,
6027 * since the pager needs to see changes,
6028 * which won't happen if the changes
6029 * are made to a copy.)
6031 * The third case is when the object
6032 * to be shared has parts sticking
6033 * outside of the entry we're working
6034 * with, and thus may in the future
6035 * be subject to a symmetrical copy.
6036 * (This is a preemptive version of
6040 assert(!(object
->shadowed
&& old_entry
->is_shared
));
6041 vm_object_shadow(&old_entry
->object
.vm_object
,
6043 (vm_size_t
) (old_entry
->vme_end
-
6044 old_entry
->vme_start
));
6047 * If we're making a shadow for other than
6048 * copy on write reasons, then we have
6049 * to remove write permission.
6052 if (!old_entry
->needs_copy
&&
6053 (old_entry
->protection
& VM_PROT_WRITE
)) {
6054 if(old_map
->mapped
) {
6055 vm_object_pmap_protect(
6056 old_entry
->object
.vm_object
,
6058 (old_entry
->vme_end
-
6059 old_entry
->vme_start
),
6061 old_entry
->vme_start
,
6062 old_entry
->protection
& ~VM_PROT_WRITE
);
6064 pmap_protect(old_map
->pmap
,
6065 old_entry
->vme_start
,
6067 old_entry
->protection
& ~VM_PROT_WRITE
);
6071 old_entry
->needs_copy
= FALSE
;
6072 object
= old_entry
->object
.vm_object
;
6076 * If object was using a symmetric copy strategy,
6077 * change its copy strategy to the default
6078 * asymmetric copy strategy, which is copy_delay
6079 * in the non-norma case and copy_call in the
6080 * norma case. Bump the reference count for the
6084 if(old_entry
->is_sub_map
) {
6085 vm_map_lock(old_entry
->object
.sub_map
);
6086 vm_map_reference(old_entry
->object
.sub_map
);
6087 vm_map_unlock(old_entry
->object
.sub_map
);
6089 vm_object_lock(object
);
6090 object
->ref_count
++;
6091 vm_object_res_reference(object
);
6092 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6093 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6095 vm_object_unlock(object
);
6099 * Clone the entry, using object ref from above.
6100 * Mark both entries as shared.
6103 new_entry
= vm_map_entry_create(new_map
);
6104 vm_map_entry_copy(new_entry
, old_entry
);
6105 old_entry
->is_shared
= TRUE
;
6106 new_entry
->is_shared
= TRUE
;
6109 * Insert the entry into the new map -- we
6110 * know we're inserting at the end of the new
6114 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6117 * Update the physical map
6120 if (old_entry
->is_sub_map
) {
6121 /* Bill Angell pmap support goes here */
6123 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6124 old_entry
->vme_end
- old_entry
->vme_start
,
6125 old_entry
->vme_start
);
6132 vm_map_entry_t
*old_entry_p
,
6135 vm_map_entry_t old_entry
= *old_entry_p
;
6136 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6137 vm_offset_t start
= old_entry
->vme_start
;
6139 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6141 vm_map_unlock(old_map
);
6143 * Use maxprot version of copyin because we
6144 * care about whether this memory can ever
6145 * be accessed, not just whether it's accessible
6148 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6151 * The map might have changed while it
6152 * was unlocked, check it again. Skip
6153 * any blank space or permanently
6154 * unreadable region.
6156 vm_map_lock(old_map
);
6157 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6158 (last
->max_protection
& VM_PROT_READ
) == VM_PROT_NONE
) {
6159 last
= last
->vme_next
;
6161 *old_entry_p
= last
;
6164 * XXX For some error returns, want to
6165 * XXX skip to the next element. Note
6166 * that INVALID_ADDRESS and
6167 * PROTECTION_FAILURE are handled above.
6174 * Insert the copy into the new map
6177 vm_map_copy_insert(new_map
, last
, copy
);
6180 * Pick up the traversal at the end of
6181 * the copied region.
6184 vm_map_lock(old_map
);
6185 start
+= entry_size
;
6186 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6187 last
= last
->vme_next
;
6189 vm_map_clip_start(old_map
, last
, start
);
6191 *old_entry_p
= last
;
6199 * Create and return a new map based on the old
6200 * map, according to the inheritance values on the
6201 * regions in that map.
6203 * The source map must not be locked.
6209 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6211 vm_map_entry_t old_entry
;
6212 vm_size_t new_size
= 0, entry_size
;
6213 vm_map_entry_t new_entry
;
6214 boolean_t src_needs_copy
;
6215 boolean_t new_entry_needs_copy
;
6217 vm_map_reference_swap(old_map
);
6218 vm_map_lock(old_map
);
6220 new_map
= vm_map_create(new_pmap
,
6221 old_map
->min_offset
,
6222 old_map
->max_offset
,
6223 old_map
->hdr
.entries_pageable
);
6226 old_entry
= vm_map_first_entry(old_map
);
6227 old_entry
!= vm_map_to_entry(old_map
);
6230 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6232 switch (old_entry
->inheritance
) {
6233 case VM_INHERIT_NONE
:
6236 case VM_INHERIT_SHARE
:
6237 vm_map_fork_share(old_map
, old_entry
, new_map
);
6238 new_size
+= entry_size
;
6241 case VM_INHERIT_COPY
:
6244 * Inline the copy_quickly case;
6245 * upon failure, fall back on call
6246 * to vm_map_fork_copy.
6249 if(old_entry
->is_sub_map
)
6251 if ((old_entry
->wired_count
!= 0) ||
6252 ((old_entry
->object
.vm_object
!= NULL
) &&
6253 (old_entry
->object
.vm_object
->true_share
))) {
6254 goto slow_vm_map_fork_copy
;
6257 new_entry
= vm_map_entry_create(new_map
);
6258 vm_map_entry_copy(new_entry
, old_entry
);
6259 /* clear address space specifics */
6260 new_entry
->use_pmap
= FALSE
;
6262 if (! vm_object_copy_quickly(
6263 &new_entry
->object
.vm_object
,
6265 (old_entry
->vme_end
-
6266 old_entry
->vme_start
),
6268 &new_entry_needs_copy
)) {
6269 vm_map_entry_dispose(new_map
, new_entry
);
6270 goto slow_vm_map_fork_copy
;
6274 * Handle copy-on-write obligations
6277 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6278 vm_object_pmap_protect(
6279 old_entry
->object
.vm_object
,
6281 (old_entry
->vme_end
-
6282 old_entry
->vme_start
),
6283 ((old_entry
->is_shared
6287 old_entry
->vme_start
,
6288 old_entry
->protection
& ~VM_PROT_WRITE
);
6290 old_entry
->needs_copy
= TRUE
;
6292 new_entry
->needs_copy
= new_entry_needs_copy
;
6295 * Insert the entry at the end
6299 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6301 new_size
+= entry_size
;
6304 slow_vm_map_fork_copy
:
6305 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6306 new_size
+= entry_size
;
6310 old_entry
= old_entry
->vme_next
;
6313 new_map
->size
= new_size
;
6314 vm_map_unlock(old_map
);
6315 vm_map_deallocate(old_map
);
6322 * vm_map_lookup_locked:
6324 * Finds the VM object, offset, and
6325 * protection for a given virtual address in the
6326 * specified map, assuming a page fault of the
6329 * Returns the (object, offset, protection) for
6330 * this address, whether it is wired down, and whether
6331 * this map has the only reference to the data in question.
6332 * In order to later verify this lookup, a "version"
6335 * The map MUST be locked by the caller and WILL be
6336 * locked on exit. In order to guarantee the
6337 * existence of the returned object, it is returned
6340 * If a lookup is requested with "write protection"
6341 * specified, the map may be changed to perform virtual
6342 * copying operations, although the data referenced will
6346 vm_map_lookup_locked(
6347 vm_map_t
*var_map
, /* IN/OUT */
6348 register vm_offset_t vaddr
,
6349 register vm_prot_t fault_type
,
6350 vm_map_version_t
*out_version
, /* OUT */
6351 vm_object_t
*object
, /* OUT */
6352 vm_object_offset_t
*offset
, /* OUT */
6353 vm_prot_t
*out_prot
, /* OUT */
6354 boolean_t
*wired
, /* OUT */
6355 int *behavior
, /* OUT */
6356 vm_object_offset_t
*lo_offset
, /* OUT */
6357 vm_object_offset_t
*hi_offset
, /* OUT */
6360 vm_map_entry_t entry
;
6361 register vm_map_t map
= *var_map
;
6362 vm_map_t old_map
= *var_map
;
6363 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6364 vm_offset_t cow_parent_vaddr
;
6365 vm_offset_t old_start
;
6366 vm_offset_t old_end
;
6367 register vm_prot_t prot
;
6373 * If the map has an interesting hint, try it before calling
6374 * full blown lookup routine.
6377 mutex_lock(&map
->s_lock
);
6379 mutex_unlock(&map
->s_lock
);
6381 if ((entry
== vm_map_to_entry(map
)) ||
6382 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6383 vm_map_entry_t tmp_entry
;
6386 * Entry was either not a valid hint, or the vaddr
6387 * was not contained in the entry, so do a full lookup.
6389 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6390 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6391 vm_map_unlock(cow_sub_map_parent
);
6392 if((*pmap_map
!= map
)
6393 && (*pmap_map
!= cow_sub_map_parent
))
6394 vm_map_unlock(*pmap_map
);
6395 return KERN_INVALID_ADDRESS
;
6400 if(map
== old_map
) {
6401 old_start
= entry
->vme_start
;
6402 old_end
= entry
->vme_end
;
6406 * Handle submaps. Drop lock on upper map, submap is
6411 if (entry
->is_sub_map
) {
6412 vm_offset_t local_vaddr
;
6413 vm_offset_t end_delta
;
6414 vm_offset_t start_delta
;
6415 vm_offset_t object_start_delta
;
6416 vm_map_entry_t submap_entry
;
6417 boolean_t mapped_needs_copy
=FALSE
;
6419 local_vaddr
= vaddr
;
6421 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6422 /* if pmap_map equals map we unlock below */
6423 if ((*pmap_map
!= map
) &&
6424 (*pmap_map
!= cow_sub_map_parent
))
6425 vm_map_unlock(*pmap_map
);
6426 *pmap_map
= entry
->object
.sub_map
;
6429 if(entry
->needs_copy
) {
6430 if (!mapped_needs_copy
) {
6431 if (vm_map_lock_read_to_write(map
)) {
6432 vm_map_lock_read(map
);
6433 if(*pmap_map
== entry
->object
.sub_map
)
6437 vm_map_lock_read(entry
->object
.sub_map
);
6438 cow_sub_map_parent
= map
;
6439 /* reset base to map before cow object */
6440 /* this is the map which will accept */
6441 /* the new cow object */
6442 old_start
= entry
->vme_start
;
6443 old_end
= entry
->vme_end
;
6444 cow_parent_vaddr
= vaddr
;
6445 mapped_needs_copy
= TRUE
;
6447 vm_map_lock_read(entry
->object
.sub_map
);
6448 if((cow_sub_map_parent
!= map
) &&
6453 vm_map_lock_read(entry
->object
.sub_map
);
6454 /* leave map locked if it is a target */
6455 /* cow sub_map above otherwise, just */
6456 /* follow the maps down to the object */
6457 /* here we unlock knowing we are not */
6458 /* revisiting the map. */
6459 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6460 vm_map_unlock_read(map
);
6463 *var_map
= map
= entry
->object
.sub_map
;
6465 /* calculate the offset in the submap for vaddr */
6466 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6469 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6470 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6471 vm_map_unlock(cow_sub_map_parent
);
6473 if((*pmap_map
!= map
)
6474 && (*pmap_map
!= cow_sub_map_parent
)) {
6475 vm_map_unlock(*pmap_map
);
6478 return KERN_INVALID_ADDRESS
;
6480 /* find the attenuated shadow of the underlying object */
6481 /* on our target map */
6483 /* in english the submap object may extend beyond the */
6484 /* region mapped by the entry or, may only fill a portion */
6485 /* of it. For our purposes, we only care if the object */
6486 /* doesn't fill. In this case the area which will */
6487 /* ultimately be clipped in the top map will only need */
6488 /* to be as big as the portion of the underlying entry */
6489 /* which is mapped */
6490 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6491 submap_entry
->vme_start
- entry
->offset
: 0;
6494 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6495 submap_entry
->vme_end
?
6496 0 : (entry
->offset
+
6497 (old_end
- old_start
))
6498 - submap_entry
->vme_end
;
6500 old_start
+= start_delta
;
6501 old_end
-= end_delta
;
6503 if(submap_entry
->is_sub_map
) {
6504 entry
= submap_entry
;
6505 vaddr
= local_vaddr
;
6506 goto submap_recurse
;
6509 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6511 vm_object_t copy_object
;
6512 vm_offset_t local_start
;
6513 vm_offset_t local_end
;
6514 boolean_t copied_slowly
= FALSE
;
6516 if (vm_map_lock_read_to_write(map
)) {
6517 vm_map_lock_read(map
);
6518 old_start
-= start_delta
;
6519 old_end
+= end_delta
;
6524 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6525 submap_entry
->object
.vm_object
=
6528 (submap_entry
->vme_end
6529 - submap_entry
->vme_start
));
6530 submap_entry
->offset
= 0;
6532 local_start
= local_vaddr
-
6533 (cow_parent_vaddr
- old_start
);
6534 local_end
= local_vaddr
+
6535 (old_end
- cow_parent_vaddr
);
6536 vm_map_clip_start(map
, submap_entry
, local_start
);
6537 vm_map_clip_end(map
, submap_entry
, local_end
);
6539 /* This is the COW case, lets connect */
6540 /* an entry in our space to the underlying */
6541 /* object in the submap, bypassing the */
6545 if(submap_entry
->wired_count
!= 0) {
6547 submap_entry
->object
.vm_object
);
6548 vm_object_copy_slowly(
6549 submap_entry
->object
.vm_object
,
6550 submap_entry
->offset
,
6551 submap_entry
->vme_end
-
6552 submap_entry
->vme_start
,
6555 copied_slowly
= TRUE
;
6558 /* set up shadow object */
6559 copy_object
= submap_entry
->object
.vm_object
;
6560 vm_object_reference(copy_object
);
6561 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6562 submap_entry
->needs_copy
= TRUE
;
6563 vm_object_pmap_protect(
6564 submap_entry
->object
.vm_object
,
6565 submap_entry
->offset
,
6566 submap_entry
->vme_end
-
6567 submap_entry
->vme_start
,
6568 (submap_entry
->is_shared
6570 PMAP_NULL
: map
->pmap
,
6571 submap_entry
->vme_start
,
6572 submap_entry
->protection
&
6577 /* This works diffently than the */
6578 /* normal submap case. We go back */
6579 /* to the parent of the cow map and*/
6580 /* clip out the target portion of */
6581 /* the sub_map, substituting the */
6582 /* new copy object, */
6585 local_start
= old_start
;
6586 local_end
= old_end
;
6587 map
= cow_sub_map_parent
;
6588 *var_map
= cow_sub_map_parent
;
6589 vaddr
= cow_parent_vaddr
;
6590 cow_sub_map_parent
= NULL
;
6592 if(!vm_map_lookup_entry(map
,
6594 vm_object_deallocate(
6596 vm_map_lock_write_to_read(map
);
6597 return KERN_INVALID_ADDRESS
;
6600 /* clip out the portion of space */
6601 /* mapped by the sub map which */
6602 /* corresponds to the underlying */
6604 vm_map_clip_start(map
, entry
, local_start
);
6605 vm_map_clip_end(map
, entry
, local_end
);
6608 /* substitute copy object for */
6609 /* shared map entry */
6610 vm_map_deallocate(entry
->object
.sub_map
);
6611 entry
->is_sub_map
= FALSE
;
6612 entry
->object
.vm_object
= copy_object
;
6614 entry
->protection
|= VM_PROT_WRITE
;
6615 entry
->max_protection
|= VM_PROT_WRITE
;
6618 entry
->needs_copy
= FALSE
;
6619 entry
->is_shared
= FALSE
;
6621 entry
->offset
= submap_entry
->offset
;
6622 entry
->needs_copy
= TRUE
;
6623 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6624 entry
->inheritance
= VM_INHERIT_COPY
;
6626 entry
->is_shared
= TRUE
;
6628 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6629 entry
->inheritance
= VM_INHERIT_COPY
;
6631 vm_map_lock_write_to_read(map
);
6633 if((cow_sub_map_parent
)
6634 && (cow_sub_map_parent
!= *pmap_map
)
6635 && (cow_sub_map_parent
!= map
)) {
6636 vm_map_unlock(cow_sub_map_parent
);
6638 entry
= submap_entry
;
6639 vaddr
= local_vaddr
;
6644 * Check whether this task is allowed to have
6648 prot
= entry
->protection
;
6649 if ((fault_type
& (prot
)) != fault_type
) {
6650 if (*pmap_map
!= map
) {
6651 vm_map_unlock(*pmap_map
);
6654 return KERN_PROTECTION_FAILURE
;
6658 * If this page is not pageable, we have to get
6659 * it for all possible accesses.
6662 if (*wired
= (entry
->wired_count
!= 0))
6663 prot
= fault_type
= entry
->protection
;
6666 * If the entry was copy-on-write, we either ...
6669 if (entry
->needs_copy
) {
6671 * If we want to write the page, we may as well
6672 * handle that now since we've got the map locked.
6674 * If we don't need to write the page, we just
6675 * demote the permissions allowed.
6678 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6680 * Make a new object, and place it in the
6681 * object chain. Note that no new references
6682 * have appeared -- one just moved from the
6683 * map to the new object.
6686 if (vm_map_lock_read_to_write(map
)) {
6687 vm_map_lock_read(map
);
6690 vm_object_shadow(&entry
->object
.vm_object
,
6692 (vm_size_t
) (entry
->vme_end
-
6695 entry
->object
.vm_object
->shadowed
= TRUE
;
6696 entry
->needs_copy
= FALSE
;
6697 vm_map_lock_write_to_read(map
);
6701 * We're attempting to read a copy-on-write
6702 * page -- don't allow writes.
6705 prot
&= (~VM_PROT_WRITE
);
6710 * Create an object if necessary.
6712 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6714 if (vm_map_lock_read_to_write(map
)) {
6715 vm_map_lock_read(map
);
6719 entry
->object
.vm_object
= vm_object_allocate(
6720 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6722 vm_map_lock_write_to_read(map
);
6726 * Return the object/offset from this entry. If the entry
6727 * was copy-on-write or empty, it has been fixed up. Also
6728 * return the protection.
6731 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6732 *object
= entry
->object
.vm_object
;
6734 *behavior
= entry
->behavior
;
6735 *lo_offset
= entry
->offset
;
6736 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6739 * Lock the object to prevent it from disappearing
6742 vm_object_lock(*object
);
6745 * Save the version number
6748 out_version
->main_timestamp
= map
->timestamp
;
6750 return KERN_SUCCESS
;
6757 * Verifies that the map in question has not changed
6758 * since the given version. If successful, the map
6759 * will not change until vm_map_verify_done() is called.
6763 register vm_map_t map
,
6764 register vm_map_version_t
*version
) /* REF */
6768 vm_map_lock_read(map
);
6769 result
= (map
->timestamp
== version
->main_timestamp
);
6772 vm_map_unlock_read(map
);
6778 * vm_map_verify_done:
6780 * Releases locks acquired by a vm_map_verify.
6782 * This is now a macro in vm/vm_map.h. It does a
6783 * vm_map_unlock_read on the map.
6790 * User call to obtain information about a region in
6791 * a task's address map. Currently, only one flavor is
6794 * XXX The reserved and behavior fields cannot be filled
6795 * in until the vm merge from the IK is completed, and
6796 * vm_reserve is implemented.
6798 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6804 vm_offset_t
*address
, /* IN/OUT */
6805 vm_size_t
*size
, /* OUT */
6806 vm_region_flavor_t flavor
, /* IN */
6807 vm_region_info_t info
, /* OUT */
6808 mach_msg_type_number_t
*count
, /* IN/OUT */
6809 ipc_port_t
*object_name
) /* OUT */
6811 vm_map_entry_t tmp_entry
;
6813 vm_map_entry_t entry
;
6816 vm_region_basic_info_t basic
;
6817 vm_region_extended_info_t extended
;
6818 vm_region_top_info_t top
;
6820 if (map
== VM_MAP_NULL
)
6821 return(KERN_INVALID_ARGUMENT
);
6825 case VM_REGION_BASIC_INFO
:
6827 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6828 return(KERN_INVALID_ARGUMENT
);
6830 basic
= (vm_region_basic_info_t
) info
;
6831 *count
= VM_REGION_BASIC_INFO_COUNT
;
6833 vm_map_lock_read(map
);
6836 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6837 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6838 vm_map_unlock_read(map
);
6839 return(KERN_INVALID_ADDRESS
);
6845 start
= entry
->vme_start
;
6847 basic
->offset
= entry
->offset
;
6848 basic
->protection
= entry
->protection
;
6849 basic
->inheritance
= entry
->inheritance
;
6850 basic
->max_protection
= entry
->max_protection
;
6851 basic
->behavior
= entry
->behavior
;
6852 basic
->user_wired_count
= entry
->user_wired_count
;
6853 basic
->reserved
= entry
->is_sub_map
;
6855 *size
= (entry
->vme_end
- start
);
6857 if (object_name
) *object_name
= IP_NULL
;
6858 if (entry
->is_sub_map
) {
6859 basic
->shared
= FALSE
;
6861 basic
->shared
= entry
->is_shared
;
6864 vm_map_unlock_read(map
);
6865 return(KERN_SUCCESS
);
6867 case VM_REGION_EXTENDED_INFO
:
6870 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6871 return(KERN_INVALID_ARGUMENT
);
6873 extended
= (vm_region_extended_info_t
) info
;
6874 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6876 vm_map_lock_read(map
);
6879 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6880 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6881 vm_map_unlock_read(map
);
6882 return(KERN_INVALID_ADDRESS
);
6887 start
= entry
->vme_start
;
6889 extended
->protection
= entry
->protection
;
6890 extended
->user_tag
= entry
->alias
;
6891 extended
->pages_resident
= 0;
6892 extended
->pages_swapped_out
= 0;
6893 extended
->pages_shared_now_private
= 0;
6894 extended
->pages_dirtied
= 0;
6895 extended
->external_pager
= 0;
6896 extended
->shadow_depth
= 0;
6898 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6900 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6901 extended
->share_mode
= SM_PRIVATE
;
6904 *object_name
= IP_NULL
;
6906 *size
= (entry
->vme_end
- start
);
6908 vm_map_unlock_read(map
);
6909 return(KERN_SUCCESS
);
6911 case VM_REGION_TOP_INFO
:
6914 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6915 return(KERN_INVALID_ARGUMENT
);
6917 top
= (vm_region_top_info_t
) info
;
6918 *count
= VM_REGION_TOP_INFO_COUNT
;
6920 vm_map_lock_read(map
);
6923 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6924 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6925 vm_map_unlock_read(map
);
6926 return(KERN_INVALID_ADDRESS
);
6932 start
= entry
->vme_start
;
6934 top
->private_pages_resident
= 0;
6935 top
->shared_pages_resident
= 0;
6937 vm_region_top_walk(entry
, top
);
6940 *object_name
= IP_NULL
;
6942 *size
= (entry
->vme_end
- start
);
6944 vm_map_unlock_read(map
);
6945 return(KERN_SUCCESS
);
6948 return(KERN_INVALID_ARGUMENT
);
6953 * vm_region_recurse: A form of vm_region which follows the
6954 * submaps in a target map
6961 vm_offset_t
*address
, /* IN/OUT */
6962 vm_size_t
*size
, /* OUT */
6963 natural_t
*nesting_depth
, /* IN/OUT */
6964 vm_region_recurse_info_t info
, /* IN/OUT */
6965 mach_msg_type_number_t
*count
) /* IN/OUT */
6967 vm_map_entry_t tmp_entry
;
6969 vm_map_entry_t entry
;
6973 unsigned int recurse_count
;
6976 vm_map_entry_t base_entry
;
6977 vm_offset_t base_next
;
6978 vm_offset_t base_addr
;
6979 vm_offset_t baddr_start_delta
;
6980 vm_region_submap_info_t submap_info
;
6981 vm_region_extended_info_data_t extended
;
6983 if (map
== VM_MAP_NULL
)
6984 return(KERN_INVALID_ARGUMENT
);
6986 submap_info
= (vm_region_submap_info_t
) info
;
6987 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6989 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6990 return(KERN_INVALID_ARGUMENT
);
6994 recurse_count
= *nesting_depth
;
6996 LOOKUP_NEXT_BASE_ENTRY
:
6997 vm_map_lock_read(map
);
6998 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6999 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7000 vm_map_unlock_read(map
);
7001 return(KERN_INVALID_ADDRESS
);
7006 *size
= entry
->vme_end
- entry
->vme_start
;
7007 start
= entry
->vme_start
;
7009 baddr_start_delta
= *address
- start
;
7010 base_next
= entry
->vme_end
;
7013 while(entry
->is_sub_map
&& recurse_count
) {
7015 vm_map_lock_read(entry
->object
.sub_map
);
7018 if(entry
== base_entry
) {
7019 start
= entry
->offset
;
7020 start
+= *address
- entry
->vme_start
;
7023 submap
= entry
->object
.sub_map
;
7024 vm_map_unlock_read(map
);
7027 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7028 if ((entry
= tmp_entry
->vme_next
)
7029 == vm_map_to_entry(map
)) {
7030 vm_map_unlock_read(map
);
7035 goto LOOKUP_NEXT_BASE_ENTRY
;
7041 if(start
<= entry
->vme_start
) {
7042 vm_offset_t old_start
= start
;
7043 if(baddr_start_delta
) {
7044 base_addr
+= (baddr_start_delta
);
7045 *size
-= baddr_start_delta
;
7046 baddr_start_delta
= 0;
7049 (base_addr
+= (entry
->vme_start
- start
))) {
7050 vm_map_unlock_read(map
);
7055 goto LOOKUP_NEXT_BASE_ENTRY
;
7057 *size
-= entry
->vme_start
- start
;
7058 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7059 *size
= entry
->vme_end
- entry
->vme_start
;
7063 if(baddr_start_delta
) {
7064 if((start
- entry
->vme_start
)
7065 < baddr_start_delta
) {
7066 base_addr
+= start
- entry
->vme_start
;
7067 *size
-= start
- entry
->vme_start
;
7069 base_addr
+= baddr_start_delta
;
7070 *size
+= baddr_start_delta
;
7072 baddr_start_delta
= 0;
7074 base_addr
+= entry
->vme_start
;
7075 if(base_addr
>= base_next
) {
7076 vm_map_unlock_read(map
);
7081 goto LOOKUP_NEXT_BASE_ENTRY
;
7083 if (*size
> (entry
->vme_end
- start
))
7084 *size
= entry
->vme_end
- start
;
7086 start
= entry
->vme_start
- start
;
7089 start
+= entry
->offset
;
7092 *nesting_depth
-= recurse_count
;
7093 if(entry
!= base_entry
) {
7094 start
= entry
->vme_start
+ (start
- entry
->offset
);
7098 submap_info
->user_tag
= entry
->alias
;
7099 submap_info
->offset
= entry
->offset
;
7100 submap_info
->protection
= entry
->protection
;
7101 submap_info
->inheritance
= entry
->inheritance
;
7102 submap_info
->max_protection
= entry
->max_protection
;
7103 submap_info
->behavior
= entry
->behavior
;
7104 submap_info
->user_wired_count
= entry
->user_wired_count
;
7105 submap_info
->is_submap
= entry
->is_sub_map
;
7106 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7107 *address
= base_addr
;
7110 extended
.pages_resident
= 0;
7111 extended
.pages_swapped_out
= 0;
7112 extended
.pages_shared_now_private
= 0;
7113 extended
.pages_dirtied
= 0;
7114 extended
.external_pager
= 0;
7115 extended
.shadow_depth
= 0;
7117 if(!entry
->is_sub_map
) {
7118 vm_region_walk(entry
, &extended
, entry
->offset
,
7119 entry
->vme_end
- start
, map
, start
);
7120 submap_info
->share_mode
= extended
.share_mode
;
7121 if (extended
.external_pager
&& extended
.ref_count
== 2
7122 && extended
.share_mode
== SM_SHARED
)
7123 submap_info
->share_mode
= SM_PRIVATE
;
7124 submap_info
->ref_count
= extended
.ref_count
;
7127 submap_info
->share_mode
= SM_TRUESHARED
;
7129 submap_info
->share_mode
= SM_PRIVATE
;
7130 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7133 submap_info
->pages_resident
= extended
.pages_resident
;
7134 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7135 submap_info
->pages_shared_now_private
=
7136 extended
.pages_shared_now_private
;
7137 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7138 submap_info
->external_pager
= extended
.external_pager
;
7139 submap_info
->shadow_depth
= extended
.shadow_depth
;
7141 vm_map_unlock_read(map
);
7142 return(KERN_SUCCESS
);
7146 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7147 * Goes away after regular vm_region_recurse function migrates to
7149 * vm_region_recurse: A form of vm_region which follows the
7150 * submaps in a target map
7155 vm_region_recurse_64(
7157 vm_offset_t
*address
, /* IN/OUT */
7158 vm_size_t
*size
, /* OUT */
7159 natural_t
*nesting_depth
, /* IN/OUT */
7160 vm_region_recurse_info_t info
, /* IN/OUT */
7161 mach_msg_type_number_t
*count
) /* IN/OUT */
7163 vm_map_entry_t tmp_entry
;
7165 vm_map_entry_t entry
;
7169 unsigned int recurse_count
;
7172 vm_map_entry_t base_entry
;
7173 vm_offset_t base_next
;
7174 vm_offset_t base_addr
;
7175 vm_offset_t baddr_start_delta
;
7176 vm_region_submap_info_64_t submap_info
;
7177 vm_region_extended_info_data_t extended
;
7179 if (map
== VM_MAP_NULL
)
7180 return(KERN_INVALID_ARGUMENT
);
7182 submap_info
= (vm_region_submap_info_64_t
) info
;
7183 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7185 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7186 return(KERN_INVALID_ARGUMENT
);
7190 recurse_count
= *nesting_depth
;
7192 LOOKUP_NEXT_BASE_ENTRY
:
7194 vm_map_lock_read(map
);
7195 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7196 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7198 vm_map_unlock_read(map
);
7199 return(KERN_INVALID_ADDRESS
);
7204 *size
= entry
->vme_end
- entry
->vme_start
;
7205 start
= entry
->vme_start
;
7207 baddr_start_delta
= *address
- start
;
7208 base_next
= entry
->vme_end
;
7211 while(entry
->is_sub_map
&& recurse_count
) {
7214 vm_map_lock_read(entry
->object
.sub_map
);
7217 if(entry
== base_entry
) {
7218 start
= entry
->offset
;
7219 start
+= *address
- entry
->vme_start
;
7222 submap
= entry
->object
.sub_map
;
7224 vm_map_unlock_read(map
);
7227 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7228 if ((entry
= tmp_entry
->vme_next
)
7229 == vm_map_to_entry(map
)) {
7231 vm_map_unlock_read(map
);
7236 goto LOOKUP_NEXT_BASE_ENTRY
;
7242 if(start
<= entry
->vme_start
) {
7243 vm_offset_t old_start
= start
;
7244 if(baddr_start_delta
) {
7245 base_addr
+= (baddr_start_delta
);
7246 *size
-= baddr_start_delta
;
7247 baddr_start_delta
= 0;
7250 (base_addr
+= (entry
->vme_start
- start
))) {
7252 vm_map_unlock_read(map
);
7257 goto LOOKUP_NEXT_BASE_ENTRY
;
7259 *size
-= entry
->vme_start
- start
;
7260 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7261 *size
= entry
->vme_end
- entry
->vme_start
;
7265 if(baddr_start_delta
) {
7266 if((start
- entry
->vme_start
)
7267 < baddr_start_delta
) {
7268 base_addr
+= start
- entry
->vme_start
;
7269 *size
-= start
- entry
->vme_start
;
7271 base_addr
+= baddr_start_delta
;
7272 *size
+= baddr_start_delta
;
7274 baddr_start_delta
= 0;
7276 base_addr
+= entry
->vme_start
;
7277 if(base_addr
>= base_next
) {
7279 vm_map_unlock_read(map
);
7284 goto LOOKUP_NEXT_BASE_ENTRY
;
7286 if (*size
> (entry
->vme_end
- start
))
7287 *size
= entry
->vme_end
- start
;
7289 start
= entry
->vme_start
- start
;
7292 start
+= entry
->offset
;
7295 *nesting_depth
-= recurse_count
;
7296 if(entry
!= base_entry
) {
7297 start
= entry
->vme_start
+ (start
- entry
->offset
);
7301 submap_info
->user_tag
= entry
->alias
;
7302 submap_info
->offset
= entry
->offset
;
7303 submap_info
->protection
= entry
->protection
;
7304 submap_info
->inheritance
= entry
->inheritance
;
7305 submap_info
->max_protection
= entry
->max_protection
;
7306 submap_info
->behavior
= entry
->behavior
;
7307 submap_info
->user_wired_count
= entry
->user_wired_count
;
7308 submap_info
->is_submap
= entry
->is_sub_map
;
7309 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7310 *address
= base_addr
;
7313 extended
.pages_resident
= 0;
7314 extended
.pages_swapped_out
= 0;
7315 extended
.pages_shared_now_private
= 0;
7316 extended
.pages_dirtied
= 0;
7317 extended
.external_pager
= 0;
7318 extended
.shadow_depth
= 0;
7321 if(!entry
->is_sub_map
) {
7322 vm_region_walk(entry
, &extended
, entry
->offset
,
7323 entry
->vme_end
- start
, map
, start
);
7324 submap_info
->share_mode
= extended
.share_mode
;
7325 if (extended
.external_pager
&& extended
.ref_count
== 2
7326 && extended
.share_mode
== SM_SHARED
)
7327 submap_info
->share_mode
= SM_PRIVATE
;
7328 submap_info
->ref_count
= extended
.ref_count
;
7331 submap_info
->share_mode
= SM_TRUESHARED
;
7333 submap_info
->share_mode
= SM_PRIVATE
;
7334 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7337 submap_info
->pages_resident
= extended
.pages_resident
;
7338 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7339 submap_info
->pages_shared_now_private
=
7340 extended
.pages_shared_now_private
;
7341 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7342 submap_info
->external_pager
= extended
.external_pager
;
7343 submap_info
->shadow_depth
= extended
.shadow_depth
;
7345 vm_map_unlock_read(map
);
7346 return(KERN_SUCCESS
);
7351 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7352 * Goes away after regular vm_region function migrates to
7360 vm_offset_t
*address
, /* IN/OUT */
7361 vm_size_t
*size
, /* OUT */
7362 vm_region_flavor_t flavor
, /* IN */
7363 vm_region_info_t info
, /* OUT */
7364 mach_msg_type_number_t
*count
, /* IN/OUT */
7365 ipc_port_t
*object_name
) /* OUT */
7367 vm_map_entry_t tmp_entry
;
7369 vm_map_entry_t entry
;
7372 vm_region_basic_info_64_t basic
;
7373 vm_region_extended_info_t extended
;
7374 vm_region_top_info_t top
;
7376 if (map
== VM_MAP_NULL
)
7377 return(KERN_INVALID_ARGUMENT
);
7381 case VM_REGION_BASIC_INFO
:
7383 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7384 return(KERN_INVALID_ARGUMENT
);
7386 basic
= (vm_region_basic_info_64_t
) info
;
7387 *count
= VM_REGION_BASIC_INFO_COUNT
;
7389 vm_map_lock_read(map
);
7392 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7393 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7394 vm_map_unlock_read(map
);
7395 return(KERN_INVALID_ADDRESS
);
7401 start
= entry
->vme_start
;
7403 basic
->offset
= entry
->offset
;
7404 basic
->protection
= entry
->protection
;
7405 basic
->inheritance
= entry
->inheritance
;
7406 basic
->max_protection
= entry
->max_protection
;
7407 basic
->behavior
= entry
->behavior
;
7408 basic
->user_wired_count
= entry
->user_wired_count
;
7409 basic
->reserved
= entry
->is_sub_map
;
7411 *size
= (entry
->vme_end
- start
);
7413 if (object_name
) *object_name
= IP_NULL
;
7414 if (entry
->is_sub_map
) {
7415 basic
->shared
= FALSE
;
7417 basic
->shared
= entry
->is_shared
;
7420 vm_map_unlock_read(map
);
7421 return(KERN_SUCCESS
);
7423 case VM_REGION_EXTENDED_INFO
:
7426 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7427 return(KERN_INVALID_ARGUMENT
);
7429 extended
= (vm_region_extended_info_t
) info
;
7430 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7432 vm_map_lock_read(map
);
7435 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7436 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7437 vm_map_unlock_read(map
);
7438 return(KERN_INVALID_ADDRESS
);
7443 start
= entry
->vme_start
;
7445 extended
->protection
= entry
->protection
;
7446 extended
->user_tag
= entry
->alias
;
7447 extended
->pages_resident
= 0;
7448 extended
->pages_swapped_out
= 0;
7449 extended
->pages_shared_now_private
= 0;
7450 extended
->pages_dirtied
= 0;
7451 extended
->external_pager
= 0;
7452 extended
->shadow_depth
= 0;
7454 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7456 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7457 extended
->share_mode
= SM_PRIVATE
;
7460 *object_name
= IP_NULL
;
7462 *size
= (entry
->vme_end
- start
);
7464 vm_map_unlock_read(map
);
7465 return(KERN_SUCCESS
);
7467 case VM_REGION_TOP_INFO
:
7470 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7471 return(KERN_INVALID_ARGUMENT
);
7473 top
= (vm_region_top_info_t
) info
;
7474 *count
= VM_REGION_TOP_INFO_COUNT
;
7476 vm_map_lock_read(map
);
7479 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7480 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7481 vm_map_unlock_read(map
);
7482 return(KERN_INVALID_ADDRESS
);
7488 start
= entry
->vme_start
;
7490 top
->private_pages_resident
= 0;
7491 top
->shared_pages_resident
= 0;
7493 vm_region_top_walk(entry
, top
);
7496 *object_name
= IP_NULL
;
7498 *size
= (entry
->vme_end
- start
);
7500 vm_map_unlock_read(map
);
7501 return(KERN_SUCCESS
);
7504 return(KERN_INVALID_ARGUMENT
);
7510 vm_map_entry_t entry
,
7511 vm_region_top_info_t top
)
7513 register struct vm_object
*obj
, *tmp_obj
;
7514 register int ref_count
;
7516 if (entry
->object
.vm_object
== 0 || entry
->is_sub_map
) {
7517 top
->share_mode
= SM_EMPTY
;
7523 obj
= entry
->object
.vm_object
;
7525 vm_object_lock(obj
);
7527 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7532 top
->private_pages_resident
= obj
->resident_page_count
;
7534 top
->shared_pages_resident
= obj
->resident_page_count
;
7535 top
->ref_count
= ref_count
;
7536 top
->share_mode
= SM_COW
;
7538 while (tmp_obj
= obj
->shadow
) {
7539 vm_object_lock(tmp_obj
);
7540 vm_object_unlock(obj
);
7543 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7546 top
->shared_pages_resident
+= obj
->resident_page_count
;
7547 top
->ref_count
+= ref_count
- 1;
7550 if (entry
->needs_copy
) {
7551 top
->share_mode
= SM_COW
;
7552 top
->shared_pages_resident
= obj
->resident_page_count
;
7554 if (ref_count
== 1 ||
7555 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7556 top
->share_mode
= SM_PRIVATE
;
7557 top
->private_pages_resident
= obj
->resident_page_count
;
7559 top
->share_mode
= SM_SHARED
;
7560 top
->shared_pages_resident
= obj
->resident_page_count
;
7563 top
->ref_count
= ref_count
;
7565 top
->obj_id
= (int)obj
;
7567 vm_object_unlock(obj
);
7573 vm_map_entry_t entry
,
7574 vm_region_extended_info_t extended
,
7575 vm_object_offset_t offset
,
7580 register struct vm_object
*obj
, *tmp_obj
;
7581 register vm_offset_t last_offset
;
7583 register int ref_count
;
7584 void vm_region_look_for_page();
7586 if ((entry
->object
.vm_object
== 0) ||
7587 (entry
->is_sub_map
) ||
7588 (entry
->object
.vm_object
->phys_contiguous
)) {
7589 extended
->share_mode
= SM_EMPTY
;
7590 extended
->ref_count
= 0;
7594 obj
= entry
->object
.vm_object
;
7596 vm_object_lock(obj
);
7598 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7601 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7602 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7604 if (extended
->shadow_depth
|| entry
->needs_copy
)
7605 extended
->share_mode
= SM_COW
;
7608 extended
->share_mode
= SM_PRIVATE
;
7610 if (obj
->true_share
)
7611 extended
->share_mode
= SM_TRUESHARED
;
7613 extended
->share_mode
= SM_SHARED
;
7616 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7618 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7619 if ((tmp_obj
= obj
->shadow
) == 0)
7621 vm_object_lock(tmp_obj
);
7622 vm_object_unlock(obj
);
7624 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7627 extended
->ref_count
+= ref_count
;
7630 vm_object_unlock(obj
);
7632 if (extended
->share_mode
== SM_SHARED
) {
7633 register vm_map_entry_t cur
;
7634 register vm_map_entry_t last
;
7637 obj
= entry
->object
.vm_object
;
7638 last
= vm_map_to_entry(map
);
7641 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7643 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7644 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7646 if (my_refs
== ref_count
)
7647 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7648 else if (my_refs
> 1)
7649 extended
->share_mode
= SM_SHARED_ALIASED
;
7655 /* object is locked on entry and locked on return */
7659 vm_region_look_for_page(
7661 vm_region_extended_info_t extended
,
7662 vm_object_offset_t offset
,
7668 register vm_page_t p
;
7669 register vm_object_t shadow
;
7670 register int ref_count
;
7671 vm_object_t caller_object
;
7673 shadow
= object
->shadow
;
7674 caller_object
= object
;
7679 if ( !(object
->pager_trusted
) && !(object
->internal
))
7680 extended
->external_pager
= 1;
7682 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7683 if (shadow
&& (max_refcnt
== 1))
7684 extended
->pages_shared_now_private
++;
7686 if (!p
->fictitious
&&
7687 (p
->dirty
|| pmap_is_modified(p
->phys_page
)))
7688 extended
->pages_dirtied
++;
7689 extended
->pages_resident
++;
7691 if(object
!= caller_object
)
7692 vm_object_unlock(object
);
7696 if (object
->existence_map
) {
7697 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7699 extended
->pages_swapped_out
++;
7701 if(object
!= caller_object
)
7702 vm_object_unlock(object
);
7708 vm_object_lock(shadow
);
7710 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7713 if (++depth
> extended
->shadow_depth
)
7714 extended
->shadow_depth
= depth
;
7716 if (ref_count
> max_refcnt
)
7717 max_refcnt
= ref_count
;
7719 if(object
!= caller_object
)
7720 vm_object_unlock(object
);
7723 shadow
= object
->shadow
;
7724 offset
= offset
+ object
->shadow_offset
;
7727 if(object
!= caller_object
)
7728 vm_object_unlock(object
);
7734 vm_region_count_obj_refs(
7735 vm_map_entry_t entry
,
7738 register int ref_count
;
7739 register vm_object_t chk_obj
;
7740 register vm_object_t tmp_obj
;
7742 if (entry
->object
.vm_object
== 0)
7745 if (entry
->is_sub_map
)
7750 chk_obj
= entry
->object
.vm_object
;
7751 vm_object_lock(chk_obj
);
7754 if (chk_obj
== object
)
7756 if (tmp_obj
= chk_obj
->shadow
)
7757 vm_object_lock(tmp_obj
);
7758 vm_object_unlock(chk_obj
);
7768 * Routine: vm_map_simplify
7771 * Attempt to simplify the map representation in
7772 * the vicinity of the given starting address.
7774 * This routine is intended primarily to keep the
7775 * kernel maps more compact -- they generally don't
7776 * benefit from the "expand a map entry" technology
7777 * at allocation time because the adjacent entry
7778 * is often wired down.
7785 vm_map_entry_t this_entry
;
7786 vm_map_entry_t prev_entry
;
7787 vm_map_entry_t next_entry
;
7791 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7792 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7794 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7796 (prev_entry
->is_shared
== FALSE
) &&
7797 (prev_entry
->is_sub_map
== FALSE
) &&
7799 (this_entry
->is_shared
== FALSE
) &&
7800 (this_entry
->is_sub_map
== FALSE
) &&
7802 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7803 (prev_entry
->protection
== this_entry
->protection
) &&
7804 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7805 (prev_entry
->behavior
== this_entry
->behavior
) &&
7806 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7807 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7808 (prev_entry
->in_transition
== FALSE
) &&
7809 (this_entry
->in_transition
== FALSE
) &&
7811 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7813 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7814 ((prev_entry
->offset
+
7815 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7816 == this_entry
->offset
)
7818 SAVE_HINT(map
, prev_entry
);
7819 vm_map_entry_unlink(map
, this_entry
);
7820 prev_entry
->vme_end
= this_entry
->vme_end
;
7821 UPDATE_FIRST_FREE(map
, map
->first_free
);
7822 vm_object_deallocate(this_entry
->object
.vm_object
);
7823 vm_map_entry_dispose(map
, this_entry
);
7824 counter(c_vm_map_simplified_lower
++);
7827 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7828 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7830 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7832 (next_entry
->is_shared
== FALSE
) &&
7833 (next_entry
->is_sub_map
== FALSE
) &&
7835 (next_entry
->is_shared
== FALSE
) &&
7836 (next_entry
->is_sub_map
== FALSE
) &&
7838 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7839 (next_entry
->protection
== this_entry
->protection
) &&
7840 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7841 (next_entry
->behavior
== this_entry
->behavior
) &&
7842 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7843 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7844 (this_entry
->in_transition
== FALSE
) &&
7845 (next_entry
->in_transition
== FALSE
) &&
7847 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7849 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7850 ((this_entry
->offset
+
7851 (this_entry
->vme_end
- this_entry
->vme_start
))
7852 == next_entry
->offset
)
7854 vm_map_entry_unlink(map
, next_entry
);
7855 this_entry
->vme_end
= next_entry
->vme_end
;
7856 UPDATE_FIRST_FREE(map
, map
->first_free
);
7857 vm_object_deallocate(next_entry
->object
.vm_object
);
7858 vm_map_entry_dispose(map
, next_entry
);
7859 counter(c_vm_map_simplified_upper
++);
7861 counter(c_vm_map_simplify_called
++);
7867 * Routine: vm_map_machine_attribute
7869 * Provide machine-specific attributes to mappings,
7870 * such as cachability etc. for machines that provide
7871 * them. NUMA architectures and machines with big/strange
7872 * caches will use this.
7874 * Responsibilities for locking and checking are handled here,
7875 * everything else in the pmap module. If any non-volatile
7876 * information must be kept, the pmap module should handle
7877 * it itself. [This assumes that attributes do not
7878 * need to be inherited, which seems ok to me]
7881 vm_map_machine_attribute(
7883 vm_offset_t address
,
7885 vm_machine_attribute_t attribute
,
7886 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7889 vm_size_t sync_size
;
7891 vm_map_entry_t entry
;
7893 if (address
< vm_map_min(map
) ||
7894 (address
+ size
) > vm_map_max(map
))
7895 return KERN_INVALID_ADDRESS
;
7899 if (attribute
!= MATTR_CACHE
) {
7900 /* If we don't have to find physical addresses, we */
7901 /* don't have to do an explicit traversal here. */
7902 ret
= pmap_attribute(map
->pmap
,
7903 address
, size
, attribute
, value
);
7908 /* Get the starting address */
7909 start
= trunc_page_32(address
);
7910 /* Figure how much memory we need to flush (in page increments) */
7911 sync_size
= round_page_32(start
+ size
) - start
;
7914 ret
= KERN_SUCCESS
; /* Assume it all worked */
7917 if (vm_map_lookup_entry(map
, start
, &entry
)) {
7919 if((entry
->vme_end
- start
) > sync_size
) {
7920 sub_size
= sync_size
;
7923 sub_size
= entry
->vme_end
- start
;
7924 sync_size
-= sub_size
;
7926 if(entry
->is_sub_map
) {
7927 vm_map_machine_attribute(
7928 entry
->object
.sub_map
,
7929 (start
- entry
->vme_start
)
7934 if(entry
->object
.vm_object
) {
7937 vm_object_t base_object
;
7938 vm_object_offset_t offset
;
7939 vm_object_offset_t base_offset
;
7942 offset
= (start
- entry
->vme_start
)
7944 base_offset
= offset
;
7945 object
= entry
->object
.vm_object
;
7946 base_object
= object
;
7950 if(m
&& !m
->fictitious
) {
7953 pmap_attribute_cache_sync(
7957 } else if (object
->shadow
) {
7959 object
->shadow_offset
;
7960 object
= object
->shadow
;
7964 /* Bump to the next page */
7965 base_offset
+= PAGE_SIZE
;
7966 offset
= base_offset
;
7967 object
= base_object
;
7975 return KERN_FAILURE
;
7986 * vm_map_behavior_set:
7988 * Sets the paging reference behavior of the specified address
7989 * range in the target map. Paging reference behavior affects
7990 * how pagein operations resulting from faults on the map will be
7994 vm_map_behavior_set(
7998 vm_behavior_t new_behavior
)
8000 register vm_map_entry_t entry
;
8001 vm_map_entry_t temp_entry
;
8004 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
8005 (integer_t
)map
, start
, end
, new_behavior
, 0);
8007 switch (new_behavior
) {
8008 case VM_BEHAVIOR_DEFAULT
:
8009 case VM_BEHAVIOR_RANDOM
:
8010 case VM_BEHAVIOR_SEQUENTIAL
:
8011 case VM_BEHAVIOR_RSEQNTL
:
8013 case VM_BEHAVIOR_WILLNEED
:
8014 case VM_BEHAVIOR_DONTNEED
:
8015 new_behavior
= VM_BEHAVIOR_DEFAULT
;
8018 return(KERN_INVALID_ARGUMENT
);
8024 * The entire address range must be valid for the map.
8025 * Note that vm_map_range_check() does a
8026 * vm_map_lookup_entry() internally and returns the
8027 * entry containing the start of the address range if
8028 * the entire range is valid.
8030 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
8032 vm_map_clip_start(map
, entry
, start
);
8036 return(KERN_INVALID_ADDRESS
);
8039 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
8040 vm_map_clip_end(map
, entry
, end
);
8042 entry
->behavior
= new_behavior
;
8044 entry
= entry
->vme_next
;
8048 return(KERN_SUCCESS
);
8052 #include <mach_kdb.h>
8054 #include <ddb/db_output.h>
8055 #include <vm/vm_print.h>
8057 #define printf db_printf
8060 * Forward declarations for internal functions.
8062 extern void vm_map_links_print(
8063 struct vm_map_links
*links
);
8065 extern void vm_map_header_print(
8066 struct vm_map_header
*header
);
8068 extern void vm_map_entry_print(
8069 vm_map_entry_t entry
);
8071 extern void vm_follow_entry(
8072 vm_map_entry_t entry
);
8074 extern void vm_follow_map(
8078 * vm_map_links_print: [ debug ]
8082 struct vm_map_links
*links
)
8084 iprintf("prev = %08X next = %08X start = %08X end = %08X\n",
8092 * vm_map_header_print: [ debug ]
8095 vm_map_header_print(
8096 struct vm_map_header
*header
)
8098 vm_map_links_print(&header
->links
);
8099 iprintf("nentries = %08X, %sentries_pageable\n",
8101 (header
->entries_pageable
? "" : "!"));
8105 * vm_follow_entry: [ debug ]
8109 vm_map_entry_t entry
)
8111 extern int db_indent
;
8114 iprintf("map entry %08X\n", entry
);
8118 shadows
= vm_follow_object(entry
->object
.vm_object
);
8119 iprintf("Total objects : %d\n",shadows
);
8125 * vm_map_entry_print: [ debug ]
8129 register vm_map_entry_t entry
)
8131 extern int db_indent
;
8132 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
8133 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
8135 iprintf("map entry %08X n", entry
);
8139 vm_map_links_print(&entry
->links
);
8141 iprintf("start = %08X end = %08X, prot=%x/%x/%s\n",
8145 entry
->max_protection
,
8146 inheritance_name
[(entry
->inheritance
& 0x3)]);
8148 iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
8149 behavior_name
[(entry
->behavior
& 0x3)],
8151 entry
->user_wired_count
);
8152 iprintf("%sin_transition, %sneeds_wakeup\n",
8153 (entry
->in_transition
? "" : "!"),
8154 (entry
->needs_wakeup
? "" : "!"));
8156 if (entry
->is_sub_map
) {
8157 iprintf("submap = %08X - offset=%08X\n",
8158 entry
->object
.sub_map
,
8161 iprintf("object=%08X, offset=%08X, ",
8162 entry
->object
.vm_object
,
8164 printf("%sis_shared, %sneeds_copy\n",
8165 (entry
->is_shared
? "" : "!"),
8166 (entry
->needs_copy
? "" : "!"));
8173 * vm_follow_map: [ debug ]
8179 register vm_map_entry_t entry
;
8180 extern int db_indent
;
8182 iprintf("task map %08X\n", map
);
8186 for (entry
= vm_map_first_entry(map
);
8187 entry
&& entry
!= vm_map_to_entry(map
);
8188 entry
= entry
->vme_next
) {
8189 vm_follow_entry(entry
);
8196 * vm_map_print: [ debug ]
8202 register vm_map_entry_t entry
;
8204 extern int db_indent
;
8207 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8209 iprintf("task map %08X\n", map
);
8213 vm_map_header_print(&map
->hdr
);
8215 iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
8222 iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
8223 (map
->wait_for_space
? "" : "!"),
8224 (map
->wiring_required
? "" : "!"),
8228 switch (map
->sw_state
) {
8239 iprintf("res = %d, sw_state = %s\n", map
->res_count
, swstate
);
8240 #endif /* TASK_SWAPPER */
8242 for (entry
= vm_map_first_entry(map
);
8243 entry
&& entry
!= vm_map_to_entry(map
);
8244 entry
= entry
->vme_next
) {
8245 vm_map_entry_print(entry
);
8252 * Routine: vm_map_copy_print
8254 * Pretty-print a copy object for ddb.
8261 extern int db_indent
;
8264 vm_map_entry_t entry
;
8266 copy
= (vm_map_copy_t
)incopy
; /* Make sure we have the right type */
8268 printf("copy object 0x%x\n", copy
);
8272 iprintf("type=%d", copy
->type
);
8273 switch (copy
->type
) {
8274 case VM_MAP_COPY_ENTRY_LIST
:
8275 printf("[entry_list]");
8278 case VM_MAP_COPY_OBJECT
:
8282 case VM_MAP_COPY_KERNEL_BUFFER
:
8283 printf("[kernel_buffer]");
8287 printf("[bad type]");
8290 printf(", offset=0x%x", copy
->offset
);
8291 printf(", size=0x%x\n", copy
->size
);
8293 switch (copy
->type
) {
8294 case VM_MAP_COPY_ENTRY_LIST
:
8295 vm_map_header_print(©
->cpy_hdr
);
8296 for (entry
= vm_map_copy_first_entry(copy
);
8297 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8298 entry
= entry
->vme_next
) {
8299 vm_map_entry_print(entry
);
8303 case VM_MAP_COPY_OBJECT
:
8304 iprintf("object=0x%x\n", copy
->cpy_object
);
8307 case VM_MAP_COPY_KERNEL_BUFFER
:
8308 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8309 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8318 * db_vm_map_total_size(map) [ debug ]
8320 * return the total virtual size (in bytes) of the map
8323 db_vm_map_total_size(
8326 vm_map_entry_t entry
;
8330 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8333 for (entry
= vm_map_first_entry(map
);
8334 entry
!= vm_map_to_entry(map
);
8335 entry
= entry
->vme_next
) {
8336 total
+= entry
->vme_end
- entry
->vme_start
;
8342 #endif /* MACH_KDB */
8345 * Routine: vm_map_entry_insert
8347 * Descritpion: This routine inserts a new vm_entry in a locked map.
8350 vm_map_entry_insert(
8352 vm_map_entry_t insp_entry
,
8356 vm_object_offset_t offset
,
8357 boolean_t needs_copy
,
8358 boolean_t is_shared
,
8359 boolean_t in_transition
,
8360 vm_prot_t cur_protection
,
8361 vm_prot_t max_protection
,
8362 vm_behavior_t behavior
,
8363 vm_inherit_t inheritance
,
8364 unsigned wired_count
)
8366 vm_map_entry_t new_entry
;
8368 assert(insp_entry
!= (vm_map_entry_t
)0);
8370 new_entry
= vm_map_entry_create(map
);
8372 new_entry
->vme_start
= start
;
8373 new_entry
->vme_end
= end
;
8374 assert(page_aligned(new_entry
->vme_start
));
8375 assert(page_aligned(new_entry
->vme_end
));
8377 new_entry
->object
.vm_object
= object
;
8378 new_entry
->offset
= offset
;
8379 new_entry
->is_shared
= is_shared
;
8380 new_entry
->is_sub_map
= FALSE
;
8381 new_entry
->needs_copy
= needs_copy
;
8382 new_entry
->in_transition
= in_transition
;
8383 new_entry
->needs_wakeup
= FALSE
;
8384 new_entry
->inheritance
= inheritance
;
8385 new_entry
->protection
= cur_protection
;
8386 new_entry
->max_protection
= max_protection
;
8387 new_entry
->behavior
= behavior
;
8388 new_entry
->wired_count
= wired_count
;
8389 new_entry
->user_wired_count
= 0;
8390 new_entry
->use_pmap
= FALSE
;
8393 * Insert the new entry into the list.
8396 vm_map_entry_link(map
, insp_entry
, new_entry
);
8397 map
->size
+= end
- start
;
8400 * Update the free space hint and the lookup hint.
8403 SAVE_HINT(map
, new_entry
);
8408 * Routine: vm_remap_extract
8410 * Descritpion: This routine returns a vm_entry list from a map.
8418 struct vm_map_header
*map_header
,
8419 vm_prot_t
*cur_protection
,
8420 vm_prot_t
*max_protection
,
8421 /* What, no behavior? */
8422 vm_inherit_t inheritance
,
8425 kern_return_t result
;
8426 vm_size_t mapped_size
;
8428 vm_map_entry_t src_entry
; /* result of last map lookup */
8429 vm_map_entry_t new_entry
;
8430 vm_object_offset_t offset
;
8431 vm_offset_t map_address
;
8432 vm_offset_t src_start
; /* start of entry to map */
8433 vm_offset_t src_end
; /* end of region to be mapped */
8435 vm_map_version_t version
;
8436 boolean_t src_needs_copy
;
8437 boolean_t new_entry_needs_copy
;
8439 assert(map
!= VM_MAP_NULL
);
8440 assert(size
!= 0 && size
== round_page_32(size
));
8441 assert(inheritance
== VM_INHERIT_NONE
||
8442 inheritance
== VM_INHERIT_COPY
||
8443 inheritance
== VM_INHERIT_SHARE
);
8446 * Compute start and end of region.
8448 src_start
= trunc_page_32(addr
);
8449 src_end
= round_page_32(src_start
+ size
);
8452 * Initialize map_header.
8454 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8455 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8456 map_header
->nentries
= 0;
8457 map_header
->entries_pageable
= pageable
;
8459 *cur_protection
= VM_PROT_ALL
;
8460 *max_protection
= VM_PROT_ALL
;
8464 result
= KERN_SUCCESS
;
8467 * The specified source virtual space might correspond to
8468 * multiple map entries, need to loop on them.
8471 while (mapped_size
!= size
) {
8472 vm_size_t entry_size
;
8475 * Find the beginning of the region.
8477 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8478 result
= KERN_INVALID_ADDRESS
;
8482 if (src_start
< src_entry
->vme_start
||
8483 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8484 result
= KERN_INVALID_ADDRESS
;
8488 if(src_entry
->is_sub_map
) {
8489 result
= KERN_INVALID_ADDRESS
;
8493 tmp_size
= size
- mapped_size
;
8494 if (src_end
> src_entry
->vme_end
)
8495 tmp_size
-= (src_end
- src_entry
->vme_end
);
8497 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8498 src_entry
->vme_start
);
8500 if(src_entry
->is_sub_map
) {
8501 vm_map_reference(src_entry
->object
.sub_map
);
8503 object
= src_entry
->object
.vm_object
;
8505 if (object
== VM_OBJECT_NULL
) {
8506 object
= vm_object_allocate(entry_size
);
8507 src_entry
->offset
= 0;
8508 src_entry
->object
.vm_object
= object
;
8509 } else if (object
->copy_strategy
!=
8510 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8512 * We are already using an asymmetric
8513 * copy, and therefore we already have
8516 assert(!src_entry
->needs_copy
);
8517 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8518 (object
->internal
&& !object
->true_share
&&
8519 !src_entry
->is_shared
&&
8520 object
->size
> entry_size
)) {
8522 vm_object_shadow(&src_entry
->object
.vm_object
,
8526 if (!src_entry
->needs_copy
&&
8527 (src_entry
->protection
& VM_PROT_WRITE
)) {
8529 vm_object_pmap_protect(
8530 src_entry
->object
.vm_object
,
8534 src_entry
->vme_start
,
8535 src_entry
->protection
&
8538 pmap_protect(vm_map_pmap(map
),
8539 src_entry
->vme_start
,
8541 src_entry
->protection
&
8546 object
= src_entry
->object
.vm_object
;
8547 src_entry
->needs_copy
= FALSE
;
8551 vm_object_lock(object
);
8552 object
->ref_count
++; /* object ref. for new entry */
8553 VM_OBJ_RES_INCR(object
);
8554 if (object
->copy_strategy
==
8555 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8556 object
->copy_strategy
=
8557 MEMORY_OBJECT_COPY_DELAY
;
8559 vm_object_unlock(object
);
8562 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8564 new_entry
= _vm_map_entry_create(map_header
);
8565 vm_map_entry_copy(new_entry
, src_entry
);
8566 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8568 new_entry
->vme_start
= map_address
;
8569 new_entry
->vme_end
= map_address
+ tmp_size
;
8570 new_entry
->inheritance
= inheritance
;
8571 new_entry
->offset
= offset
;
8574 * The new region has to be copied now if required.
8578 src_entry
->is_shared
= TRUE
;
8579 new_entry
->is_shared
= TRUE
;
8580 if (!(new_entry
->is_sub_map
))
8581 new_entry
->needs_copy
= FALSE
;
8583 } else if (src_entry
->is_sub_map
) {
8584 /* make this a COW sub_map if not already */
8585 new_entry
->needs_copy
= TRUE
;
8586 } else if (src_entry
->wired_count
== 0 &&
8587 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8589 (new_entry
->vme_end
-
8590 new_entry
->vme_start
),
8592 &new_entry_needs_copy
)) {
8594 new_entry
->needs_copy
= new_entry_needs_copy
;
8595 new_entry
->is_shared
= FALSE
;
8598 * Handle copy_on_write semantics.
8600 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8601 vm_object_pmap_protect(object
,
8604 ((src_entry
->is_shared
8606 PMAP_NULL
: map
->pmap
),
8607 src_entry
->vme_start
,
8608 src_entry
->protection
&
8611 src_entry
->needs_copy
= TRUE
;
8614 * Throw away the old object reference of the new entry.
8616 vm_object_deallocate(object
);
8619 new_entry
->is_shared
= FALSE
;
8622 * The map can be safely unlocked since we
8623 * already hold a reference on the object.
8625 * Record the timestamp of the map for later
8626 * verification, and unlock the map.
8628 version
.main_timestamp
= map
->timestamp
;
8629 vm_map_unlock(map
); /* Increments timestamp once! */
8634 if (src_entry
->wired_count
> 0) {
8635 vm_object_lock(object
);
8636 result
= vm_object_copy_slowly(
8641 &new_entry
->object
.vm_object
);
8643 new_entry
->offset
= 0;
8644 new_entry
->needs_copy
= FALSE
;
8646 result
= vm_object_copy_strategically(
8650 &new_entry
->object
.vm_object
,
8652 &new_entry_needs_copy
);
8654 new_entry
->needs_copy
= new_entry_needs_copy
;
8658 * Throw away the old object reference of the new entry.
8660 vm_object_deallocate(object
);
8662 if (result
!= KERN_SUCCESS
&&
8663 result
!= KERN_MEMORY_RESTART_COPY
) {
8664 _vm_map_entry_dispose(map_header
, new_entry
);
8669 * Verify that the map has not substantially
8670 * changed while the copy was being made.
8674 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8676 * Simple version comparison failed.
8678 * Retry the lookup and verify that the
8679 * same object/offset are still present.
8681 vm_object_deallocate(new_entry
->
8683 _vm_map_entry_dispose(map_header
, new_entry
);
8684 if (result
== KERN_MEMORY_RESTART_COPY
)
8685 result
= KERN_SUCCESS
;
8689 if (result
== KERN_MEMORY_RESTART_COPY
) {
8690 vm_object_reference(object
);
8695 _vm_map_entry_link(map_header
,
8696 map_header
->links
.prev
, new_entry
);
8698 *cur_protection
&= src_entry
->protection
;
8699 *max_protection
&= src_entry
->max_protection
;
8701 map_address
+= tmp_size
;
8702 mapped_size
+= tmp_size
;
8703 src_start
+= tmp_size
;
8708 if (result
!= KERN_SUCCESS
) {
8710 * Free all allocated elements.
8712 for (src_entry
= map_header
->links
.next
;
8713 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8714 src_entry
= new_entry
) {
8715 new_entry
= src_entry
->vme_next
;
8716 _vm_map_entry_unlink(map_header
, src_entry
);
8717 vm_object_deallocate(src_entry
->object
.vm_object
);
8718 _vm_map_entry_dispose(map_header
, src_entry
);
8727 * Map portion of a task's address space.
8728 * Mapped region must not overlap more than
8729 * one vm memory object. Protections and
8730 * inheritance attributes remain the same
8731 * as in the original task and are out parameters.
8732 * Source and Target task can be identical
8733 * Other attributes are identical as for vm_map()
8737 vm_map_t target_map
,
8738 vm_offset_t
*address
,
8743 vm_offset_t memory_address
,
8745 vm_prot_t
*cur_protection
,
8746 vm_prot_t
*max_protection
,
8747 vm_inherit_t inheritance
)
8749 kern_return_t result
;
8750 vm_map_entry_t entry
;
8751 vm_map_entry_t insp_entry
;
8752 vm_map_entry_t new_entry
;
8753 struct vm_map_header map_header
;
8755 if (target_map
== VM_MAP_NULL
)
8756 return KERN_INVALID_ARGUMENT
;
8758 switch (inheritance
) {
8759 case VM_INHERIT_NONE
:
8760 case VM_INHERIT_COPY
:
8761 case VM_INHERIT_SHARE
:
8762 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8766 return KERN_INVALID_ARGUMENT
;
8769 size
= round_page_32(size
);
8771 result
= vm_remap_extract(src_map
, memory_address
,
8772 size
, copy
, &map_header
,
8779 if (result
!= KERN_SUCCESS
) {
8784 * Allocate/check a range of free virtual address
8785 * space for the target
8787 *address
= trunc_page_32(*address
);
8788 vm_map_lock(target_map
);
8789 result
= vm_remap_range_allocate(target_map
, address
, size
,
8790 mask
, anywhere
, &insp_entry
);
8792 for (entry
= map_header
.links
.next
;
8793 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8794 entry
= new_entry
) {
8795 new_entry
= entry
->vme_next
;
8796 _vm_map_entry_unlink(&map_header
, entry
);
8797 if (result
== KERN_SUCCESS
) {
8798 entry
->vme_start
+= *address
;
8799 entry
->vme_end
+= *address
;
8800 vm_map_entry_link(target_map
, insp_entry
, entry
);
8803 if (!entry
->is_sub_map
) {
8804 vm_object_deallocate(entry
->object
.vm_object
);
8806 vm_map_deallocate(entry
->object
.sub_map
);
8808 _vm_map_entry_dispose(&map_header
, entry
);
8812 if (result
== KERN_SUCCESS
) {
8813 target_map
->size
+= size
;
8814 SAVE_HINT(target_map
, insp_entry
);
8816 vm_map_unlock(target_map
);
8818 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8819 result
= vm_map_wire(target_map
, *address
,
8820 *address
+ size
, *cur_protection
, TRUE
);
8825 * Routine: vm_remap_range_allocate
8828 * Allocate a range in the specified virtual address map.
8829 * returns the address and the map entry just before the allocated
8832 * Map must be locked.
8836 vm_remap_range_allocate(
8838 vm_offset_t
*address
, /* IN/OUT */
8842 vm_map_entry_t
*map_entry
) /* OUT */
8844 register vm_map_entry_t entry
;
8845 register vm_offset_t start
;
8846 register vm_offset_t end
;
8847 kern_return_t result
= KERN_SUCCESS
;
8856 * Calculate the first possible address.
8859 if (start
< map
->min_offset
)
8860 start
= map
->min_offset
;
8861 if (start
> map
->max_offset
)
8862 return(KERN_NO_SPACE
);
8865 * Look for the first possible address;
8866 * if there's already something at this
8867 * address, we have to start after it.
8870 assert(first_free_is_valid(map
));
8871 if (start
== map
->min_offset
) {
8872 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8873 start
= entry
->vme_end
;
8875 vm_map_entry_t tmp_entry
;
8876 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8877 start
= tmp_entry
->vme_end
;
8882 * In any case, the "entry" always precedes
8883 * the proposed new region throughout the
8888 register vm_map_entry_t next
;
8891 * Find the end of the proposed new region.
8892 * Be sure we didn't go beyond the end, or
8893 * wrap around the address.
8896 end
= ((start
+ mask
) & ~mask
);
8898 return(KERN_NO_SPACE
);
8902 if ((end
> map
->max_offset
) || (end
< start
)) {
8903 if (map
->wait_for_space
) {
8904 if (size
<= (map
->max_offset
-
8906 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8908 thread_block((void (*)(void))0);
8914 return(KERN_NO_SPACE
);
8918 * If there are no more entries, we must win.
8921 next
= entry
->vme_next
;
8922 if (next
== vm_map_to_entry(map
))
8926 * If there is another entry, it must be
8927 * after the end of the potential new region.
8930 if (next
->vme_start
>= end
)
8934 * Didn't fit -- move to the next entry.
8938 start
= entry
->vme_end
;
8942 vm_map_entry_t temp_entry
;
8946 * the address doesn't itself violate
8947 * the mask requirement.
8950 if ((start
& mask
) != 0)
8951 return(KERN_NO_SPACE
);
8955 * ... the address is within bounds
8960 if ((start
< map
->min_offset
) ||
8961 (end
> map
->max_offset
) ||
8963 return(KERN_INVALID_ADDRESS
);
8967 * ... the starting address isn't allocated
8970 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
8971 return(KERN_NO_SPACE
);
8976 * ... the next region doesn't overlap the
8980 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
8981 (entry
->vme_next
->vme_start
< end
))
8982 return(KERN_NO_SPACE
);
8985 return(KERN_SUCCESS
);
8991 * Set the address map for the current thr_act to the specified map
8999 thread_act_t thr_act
= current_act();
9000 vm_map_t oldmap
= thr_act
->map
;
9002 mp_disable_preemption();
9003 mycpu
= cpu_number();
9006 * Deactivate the current map and activate the requested map
9008 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
9010 mp_enable_preemption();
9016 * Routine: vm_map_write_user
9019 * Copy out data from a kernel space into space in the
9020 * destination map. The space must already exist in the
9022 * NOTE: This routine should only be called by threads
9023 * which can block on a page fault. i.e. kernel mode user
9030 vm_offset_t src_addr
,
9031 vm_offset_t dst_addr
,
9034 thread_act_t thr_act
= current_act();
9035 kern_return_t kr
= KERN_SUCCESS
;
9037 if(thr_act
->map
== map
) {
9038 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9039 kr
= KERN_INVALID_ADDRESS
;
9044 /* take on the identity of the target map while doing */
9047 vm_map_reference(map
);
9048 oldmap
= vm_map_switch(map
);
9049 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9050 kr
= KERN_INVALID_ADDRESS
;
9052 vm_map_switch(oldmap
);
9053 vm_map_deallocate(map
);
9059 * Routine: vm_map_read_user
9062 * Copy in data from a user space source map into the
9063 * kernel map. The space must already exist in the
9065 * NOTE: This routine should only be called by threads
9066 * which can block on a page fault. i.e. kernel mode user
9073 vm_offset_t src_addr
,
9074 vm_offset_t dst_addr
,
9077 thread_act_t thr_act
= current_act();
9078 kern_return_t kr
= KERN_SUCCESS
;
9080 if(thr_act
->map
== map
) {
9081 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9082 kr
= KERN_INVALID_ADDRESS
;
9087 /* take on the identity of the target map while doing */
9090 vm_map_reference(map
);
9091 oldmap
= vm_map_switch(map
);
9092 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9093 kr
= KERN_INVALID_ADDRESS
;
9095 vm_map_switch(oldmap
);
9096 vm_map_deallocate(map
);
9101 /* Takes existing source and destination sub-maps and clones the contents of */
9102 /* the source map */
9106 ipc_port_t src_region
,
9107 ipc_port_t dst_region
)
9109 vm_named_entry_t src_object
;
9110 vm_named_entry_t dst_object
;
9114 vm_offset_t max_off
;
9115 vm_map_entry_t entry
;
9116 vm_map_entry_t new_entry
;
9117 vm_map_entry_t insert_point
;
9119 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
9120 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
9121 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
9122 return KERN_INVALID_ARGUMENT
;
9124 src_map
= (vm_map_t
)src_object
->backing
.map
;
9125 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
9126 /* destination map is assumed to be unavailable to any other */
9127 /* activity. i.e. it is new */
9128 vm_map_lock(src_map
);
9129 if((src_map
->min_offset
!= dst_map
->min_offset
)
9130 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
9131 vm_map_unlock(src_map
);
9132 return KERN_INVALID_ARGUMENT
;
9134 addr
= src_map
->min_offset
;
9135 vm_map_lookup_entry(dst_map
, addr
, &entry
);
9136 if(entry
== vm_map_to_entry(dst_map
)) {
9137 entry
= entry
->vme_next
;
9139 if(entry
== vm_map_to_entry(dst_map
)) {
9140 max_off
= src_map
->max_offset
;
9142 max_off
= entry
->vme_start
;
9144 vm_map_lookup_entry(src_map
, addr
, &entry
);
9145 if(entry
== vm_map_to_entry(src_map
)) {
9146 entry
= entry
->vme_next
;
9148 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
9149 while((entry
!= vm_map_to_entry(src_map
)) &&
9150 (entry
->vme_end
<= max_off
)) {
9151 addr
= entry
->vme_start
;
9152 new_entry
= vm_map_entry_create(dst_map
);
9153 vm_map_entry_copy(new_entry
, entry
);
9154 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
9155 insert_point
= new_entry
;
9156 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
9157 if (new_entry
->is_sub_map
) {
9158 vm_map_reference(new_entry
->object
.sub_map
);
9160 vm_object_reference(
9161 new_entry
->object
.vm_object
);
9164 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
9165 entry
= entry
->vme_next
;
9167 vm_map_unlock(src_map
);
9168 return KERN_SUCCESS
;
9172 * Export routines to other components for the things we access locally through
9179 return (current_map_fast());
9183 * vm_map_check_protection:
9185 * Assert that the target map allows the specified
9186 * privilege on the entire address region given.
9187 * The entire region must be allocated.
9189 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9190 register vm_map_t map
;
9191 register vm_offset_t start
;
9192 register vm_offset_t end
;
9193 register vm_prot_t protection
;
9195 register vm_map_entry_t entry
;
9196 vm_map_entry_t tmp_entry
;
9200 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9206 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9213 while (start
< end
) {
9214 if (entry
== vm_map_to_entry(map
)) {
9223 if (start
< entry
->vme_start
) {
9229 * Check protection associated with entry.
9232 if ((entry
->protection
& protection
) != protection
) {
9237 /* go to next entry */
9239 start
= entry
->vme_end
;
9240 entry
= entry
->vme_next
;
9247 * This routine is obsolete, but included for backward
9248 * compatibility for older drivers.
9251 kernel_vm_map_reference(
9254 vm_map_reference(map
);
9260 * Most code internal to the osfmk will go through a
9261 * macro defining this. This is always here for the
9262 * use of other kernel components.
9264 #undef vm_map_reference
9267 register vm_map_t map
)
9269 if (map
== VM_MAP_NULL
)
9272 mutex_lock(&map
->s_lock
);
9274 assert(map
->res_count
> 0);
9275 assert(map
->ref_count
>= map
->res_count
);
9279 mutex_unlock(&map
->s_lock
);
9283 * vm_map_deallocate:
9285 * Removes a reference from the specified map,
9286 * destroying it if no references remain.
9287 * The map should not be locked.
9291 register vm_map_t map
)
9295 if (map
== VM_MAP_NULL
)
9298 mutex_lock(&map
->s_lock
);
9299 ref
= --map
->ref_count
;
9301 vm_map_res_deallocate(map
);
9302 mutex_unlock(&map
->s_lock
);
9305 assert(map
->ref_count
== 0);
9306 mutex_unlock(&map
->s_lock
);
9310 * The map residence count isn't decremented here because
9311 * the vm_map_delete below will traverse the entire map,
9312 * deleting entries, and the residence counts on objects
9313 * and sharing maps will go away then.
9317 vm_map_destroy(map
);