2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Virtual memory mapping module.
61 #include <task_swapper.h>
62 #include <mach_assert.h>
64 #include <mach/kern_return.h>
65 #include <mach/port.h>
66 #include <mach/vm_attributes.h>
67 #include <mach/vm_param.h>
68 #include <mach/vm_behavior.h>
69 #include <mach/vm_statistics.h>
70 #include <kern/assert.h>
71 #include <kern/counters.h>
72 #include <kern/zalloc.h>
73 #include <vm/vm_init.h>
74 #include <vm/vm_fault.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_kern.h>
79 #include <ipc/ipc_port.h>
80 #include <kern/sched_prim.h>
81 #include <kern/misc_protos.h>
82 #include <mach/vm_map_server.h>
83 #include <mach/mach_host_server.h>
85 #include <machine/db_machdep.h>
88 /* Internal prototypes
90 extern boolean_t
vm_map_range_check(
94 vm_map_entry_t
*entry
);
96 extern vm_map_entry_t
_vm_map_entry_create(
97 struct vm_map_header
*map_header
);
99 extern void _vm_map_entry_dispose(
100 struct vm_map_header
*map_header
,
101 vm_map_entry_t entry
);
103 extern void vm_map_pmap_enter(
106 vm_offset_t end_addr
,
108 vm_object_offset_t offset
,
109 vm_prot_t protection
);
111 extern void _vm_map_clip_end(
112 struct vm_map_header
*map_header
,
113 vm_map_entry_t entry
,
116 extern void vm_map_entry_delete(
118 vm_map_entry_t entry
);
120 extern kern_return_t
vm_map_delete(
126 extern void vm_map_copy_steal_pages(
129 extern kern_return_t
vm_map_copy_overwrite_unaligned(
131 vm_map_entry_t entry
,
135 extern kern_return_t
vm_map_copy_overwrite_aligned(
137 vm_map_entry_t tmp_entry
,
142 extern kern_return_t
vm_map_copyin_kernel_buffer(
144 vm_offset_t src_addr
,
146 boolean_t src_destroy
,
147 vm_map_copy_t
*copy_result
); /* OUT */
149 extern kern_return_t
vm_map_copyout_kernel_buffer(
151 vm_offset_t
*addr
, /* IN/OUT */
153 boolean_t overwrite
);
155 extern void vm_map_fork_share(
157 vm_map_entry_t old_entry
,
160 extern boolean_t
vm_map_fork_copy(
162 vm_map_entry_t
*old_entry_p
,
165 extern kern_return_t
vm_remap_range_allocate(
167 vm_offset_t
*address
, /* IN/OUT */
171 vm_map_entry_t
*map_entry
); /* OUT */
173 extern void _vm_map_clip_start(
174 struct vm_map_header
*map_header
,
175 vm_map_entry_t entry
,
178 void vm_region_top_walk(
179 vm_map_entry_t entry
,
180 vm_region_top_info_t top
);
183 vm_map_entry_t entry
,
184 vm_region_extended_info_t extended
,
185 vm_object_offset_t offset
,
191 * Macros to copy a vm_map_entry. We must be careful to correctly
192 * manage the wired page count. vm_map_entry_copy() creates a new
193 * map entry to the same memory - the wired count in the new entry
194 * must be set to zero. vm_map_entry_copy_full() creates a new
195 * entry that is identical to the old entry. This preserves the
196 * wire count; it's used for map splitting and zone changing in
199 #define vm_map_entry_copy(NEW,OLD) \
202 (NEW)->is_shared = FALSE; \
203 (NEW)->needs_wakeup = FALSE; \
204 (NEW)->in_transition = FALSE; \
205 (NEW)->wired_count = 0; \
206 (NEW)->user_wired_count = 0; \
209 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
212 * Virtual memory maps provide for the mapping, protection,
213 * and sharing of virtual memory objects. In addition,
214 * this module provides for an efficient virtual copy of
215 * memory from one map to another.
217 * Synchronization is required prior to most operations.
219 * Maps consist of an ordered doubly-linked list of simple
220 * entries; a single hint is used to speed up lookups.
222 * Sharing maps have been deleted from this version of Mach.
223 * All shared objects are now mapped directly into the respective
224 * maps. This requires a change in the copy on write strategy;
225 * the asymmetric (delayed) strategy is used for shared temporary
226 * objects instead of the symmetric (shadow) strategy. All maps
227 * are now "top level" maps (either task map, kernel map or submap
228 * of the kernel map).
230 * Since portions of maps are specified by start/end addreses,
231 * which may not align with existing map entries, all
232 * routines merely "clip" entries to these start/end values.
233 * [That is, an entry is split into two, bordering at a
234 * start or end value.] Note that these clippings may not
235 * always be necessary (as the two resulting entries are then
236 * not changed); however, the clipping is done for convenience.
237 * No attempt is currently made to "glue back together" two
240 * The symmetric (shadow) copy strategy implements virtual copy
241 * by copying VM object references from one map to
242 * another, and then marking both regions as copy-on-write.
243 * It is important to note that only one writeable reference
244 * to a VM object region exists in any map when this strategy
245 * is used -- this means that shadow object creation can be
246 * delayed until a write operation occurs. The symmetric (delayed)
247 * strategy allows multiple maps to have writeable references to
248 * the same region of a vm object, and hence cannot delay creating
249 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
250 * Copying of permanent objects is completely different; see
251 * vm_object_copy_strategically() in vm_object.c.
254 zone_t vm_map_zone
; /* zone for vm_map structures */
255 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
256 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
257 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
261 * Placeholder object for submap operations. This object is dropped
262 * into the range by a call to vm_map_find, and removed when
263 * vm_map_submap creates the submap.
266 vm_object_t vm_submap_object
;
271 * Initialize the vm_map module. Must be called before
272 * any other vm_map routines.
274 * Map and entry structures are allocated from zones -- we must
275 * initialize those zones.
277 * There are three zones of interest:
279 * vm_map_zone: used to allocate maps.
280 * vm_map_entry_zone: used to allocate map entries.
281 * vm_map_kentry_zone: used to allocate map entries for the kernel.
283 * The kernel allocates map entries from a special zone that is initially
284 * "crammed" with memory. It would be difficult (perhaps impossible) for
285 * the kernel to allocate more memory to a entry zone when it became
286 * empty since the very act of allocating memory implies the creation
290 vm_offset_t map_data
;
291 vm_size_t map_data_size
;
292 vm_offset_t kentry_data
;
293 vm_size_t kentry_data_size
;
294 int kentry_count
= 2048; /* to init kentry_data_size */
296 #define NO_COALESCE_LIMIT (1024 * 128)
299 * Threshold for aggressive (eager) page map entering for vm copyout
300 * operations. Any copyout larger will NOT be aggressively entered.
302 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
304 /* Skip acquiring locks if we're in the midst of a kernel core dump */
305 extern unsigned int not_in_kdp
;
311 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
314 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
315 1024*1024, PAGE_SIZE
*5,
316 "non-kernel map entries");
318 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
319 kentry_data_size
, kentry_data_size
,
320 "kernel map entries");
322 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
323 16*1024, PAGE_SIZE
, "map copies");
326 * Cram the map and kentry zones with initial data.
327 * Set kentry_zone non-collectible to aid zone_gc().
329 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
330 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
331 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
332 zcram(vm_map_zone
, map_data
, map_data_size
);
333 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
340 map_data_size
= round_page_32(10 * sizeof(struct vm_map
));
341 map_data
= pmap_steal_memory(map_data_size
);
345 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
346 * physical page (i.e. that beyond the kernel image and page tables)
347 * individually; we guess at most one entry per eight pages in the
348 * real world. This works out to roughly .1 of 1% of physical memory,
349 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
352 kentry_count
= pmap_free_pages() / 8;
356 round_page_32(kentry_count
* sizeof(struct vm_map_entry
));
357 kentry_data
= pmap_steal_memory(kentry_data_size
);
363 * Creates and returns a new empty VM map with
364 * the given physical map structure, and having
365 * the given lower and upper address bounds.
374 register vm_map_t result
;
376 result
= (vm_map_t
) zalloc(vm_map_zone
);
377 if (result
== VM_MAP_NULL
)
378 panic("vm_map_create");
380 vm_map_first_entry(result
) = vm_map_to_entry(result
);
381 vm_map_last_entry(result
) = vm_map_to_entry(result
);
382 result
->hdr
.nentries
= 0;
383 result
->hdr
.entries_pageable
= pageable
;
386 result
->ref_count
= 1;
388 result
->res_count
= 1;
389 result
->sw_state
= MAP_SW_IN
;
390 #endif /* TASK_SWAPPER */
392 result
->min_offset
= min
;
393 result
->max_offset
= max
;
394 result
->wiring_required
= FALSE
;
395 result
->no_zero_fill
= FALSE
;
396 result
->mapped
= FALSE
;
397 result
->wait_for_space
= FALSE
;
398 result
->first_free
= vm_map_to_entry(result
);
399 result
->hint
= vm_map_to_entry(result
);
400 vm_map_lock_init(result
);
401 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
407 * vm_map_entry_create: [ internal use only ]
409 * Allocates a VM map entry for insertion in the
410 * given map (or map copy). No fields are filled.
412 #define vm_map_entry_create(map) \
413 _vm_map_entry_create(&(map)->hdr)
415 #define vm_map_copy_entry_create(copy) \
416 _vm_map_entry_create(&(copy)->cpy_hdr)
419 _vm_map_entry_create(
420 register struct vm_map_header
*map_header
)
422 register zone_t zone
;
423 register vm_map_entry_t entry
;
425 if (map_header
->entries_pageable
)
426 zone
= vm_map_entry_zone
;
428 zone
= vm_map_kentry_zone
;
430 entry
= (vm_map_entry_t
) zalloc(zone
);
431 if (entry
== VM_MAP_ENTRY_NULL
)
432 panic("vm_map_entry_create");
438 * vm_map_entry_dispose: [ internal use only ]
440 * Inverse of vm_map_entry_create.
442 #define vm_map_entry_dispose(map, entry) \
444 if((entry) == (map)->first_free) \
445 (map)->first_free = vm_map_to_entry(map); \
446 if((entry) == (map)->hint) \
447 (map)->hint = vm_map_to_entry(map); \
448 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
451 #define vm_map_copy_entry_dispose(map, entry) \
452 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
455 _vm_map_entry_dispose(
456 register struct vm_map_header
*map_header
,
457 register vm_map_entry_t entry
)
459 register zone_t zone
;
461 if (map_header
->entries_pageable
)
462 zone
= vm_map_entry_zone
;
464 zone
= vm_map_kentry_zone
;
466 zfree(zone
, (vm_offset_t
) entry
);
469 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
470 boolean_t first_free_check
= FALSE
;
475 vm_map_entry_t entry
, next
;
477 if (!first_free_check
)
480 entry
= vm_map_to_entry(map
);
481 next
= entry
->vme_next
;
482 while (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_end
) ||
483 (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_start
) &&
484 next
!= vm_map_to_entry(map
))) {
486 next
= entry
->vme_next
;
487 if (entry
== vm_map_to_entry(map
))
490 if (map
->first_free
!= entry
) {
491 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
492 map
, map
->first_free
, entry
);
501 * Updates the map->first_free pointer to the
502 * entry immediately before the first hole in the map.
503 * The map should be locked.
505 #define UPDATE_FIRST_FREE(map, new_first_free) \
508 vm_map_entry_t UFF_first_free; \
509 vm_map_entry_t UFF_next_entry; \
511 UFF_first_free = (new_first_free); \
512 UFF_next_entry = UFF_first_free->vme_next; \
513 while (trunc_page_32(UFF_next_entry->vme_start) == \
514 trunc_page_32(UFF_first_free->vme_end) || \
515 (trunc_page_32(UFF_next_entry->vme_start) == \
516 trunc_page_32(UFF_first_free->vme_start) && \
517 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
518 UFF_first_free = UFF_next_entry; \
519 UFF_next_entry = UFF_first_free->vme_next; \
520 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
523 UFF_map->first_free = UFF_first_free; \
524 assert(first_free_is_valid(UFF_map)); \
528 * vm_map_entry_{un,}link:
530 * Insert/remove entries from maps (or map copies).
532 #define vm_map_entry_link(map, after_where, entry) \
535 vm_map_entry_t VMEL_entry; \
537 VMEL_entry = (entry); \
538 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
539 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
543 #define vm_map_copy_entry_link(copy, after_where, entry) \
544 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
546 #define _vm_map_entry_link(hdr, after_where, entry) \
549 (entry)->vme_prev = (after_where); \
550 (entry)->vme_next = (after_where)->vme_next; \
551 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
554 #define vm_map_entry_unlink(map, entry) \
557 vm_map_entry_t VMEU_entry; \
558 vm_map_entry_t VMEU_first_free; \
560 VMEU_entry = (entry); \
561 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
562 VMEU_first_free = VMEU_entry->vme_prev; \
564 VMEU_first_free = VMEU_map->first_free; \
565 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
566 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
569 #define vm_map_copy_entry_unlink(copy, entry) \
570 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
572 #define _vm_map_entry_unlink(hdr, entry) \
575 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
576 (entry)->vme_prev->vme_next = (entry)->vme_next; \
579 #if MACH_ASSERT && TASK_SWAPPER
581 * vm_map_res_reference:
583 * Adds another valid residence count to the given map.
585 * Map is locked so this function can be called from
589 void vm_map_res_reference(register vm_map_t map
)
591 /* assert map is locked */
592 assert(map
->res_count
>= 0);
593 assert(map
->ref_count
>= map
->res_count
);
594 if (map
->res_count
== 0) {
595 mutex_unlock(&map
->s_lock
);
598 mutex_lock(&map
->s_lock
);
606 * vm_map_reference_swap:
608 * Adds valid reference and residence counts to the given map.
610 * The map may not be in memory (i.e. zero residence count).
613 void vm_map_reference_swap(register vm_map_t map
)
615 assert(map
!= VM_MAP_NULL
);
616 mutex_lock(&map
->s_lock
);
617 assert(map
->res_count
>= 0);
618 assert(map
->ref_count
>= map
->res_count
);
620 vm_map_res_reference(map
);
621 mutex_unlock(&map
->s_lock
);
625 * vm_map_res_deallocate:
627 * Decrement residence count on a map; possibly causing swapout.
629 * The map must be in memory (i.e. non-zero residence count).
631 * The map is locked, so this function is callable from vm_map_deallocate.
634 void vm_map_res_deallocate(register vm_map_t map
)
636 assert(map
->res_count
> 0);
637 if (--map
->res_count
== 0) {
638 mutex_unlock(&map
->s_lock
);
642 mutex_lock(&map
->s_lock
);
644 assert(map
->ref_count
>= map
->res_count
);
646 #endif /* MACH_ASSERT && TASK_SWAPPER */
651 * Actually destroy a map.
655 register vm_map_t map
)
658 (void) vm_map_delete(map
, map
->min_offset
,
659 map
->max_offset
, VM_MAP_NO_FLAGS
);
663 pmap_destroy(map
->pmap
);
665 zfree(vm_map_zone
, (vm_offset_t
) map
);
670 * vm_map_swapin/vm_map_swapout
672 * Swap a map in and out, either referencing or releasing its resources.
673 * These functions are internal use only; however, they must be exported
674 * because they may be called from macros, which are exported.
676 * In the case of swapout, there could be races on the residence count,
677 * so if the residence count is up, we return, assuming that a
678 * vm_map_deallocate() call in the near future will bring us back.
681 * -- We use the map write lock for synchronization among races.
682 * -- The map write lock, and not the simple s_lock, protects the
683 * swap state of the map.
684 * -- If a map entry is a share map, then we hold both locks, in
685 * hierarchical order.
687 * Synchronization Notes:
688 * 1) If a vm_map_swapin() call happens while swapout in progress, it
689 * will block on the map lock and proceed when swapout is through.
690 * 2) A vm_map_reference() call at this time is illegal, and will
691 * cause a panic. vm_map_reference() is only allowed on resident
692 * maps, since it refuses to block.
693 * 3) A vm_map_swapin() call during a swapin will block, and
694 * proceeed when the first swapin is done, turning into a nop.
695 * This is the reason the res_count is not incremented until
696 * after the swapin is complete.
697 * 4) There is a timing hole after the checks of the res_count, before
698 * the map lock is taken, during which a swapin may get the lock
699 * before a swapout about to happen. If this happens, the swapin
700 * will detect the state and increment the reference count, causing
701 * the swapout to be a nop, thereby delaying it until a later
702 * vm_map_deallocate. If the swapout gets the lock first, then
703 * the swapin will simply block until the swapout is done, and
706 * Because vm_map_swapin() is potentially an expensive operation, it
707 * should be used with caution.
710 * 1) A map with a residence count of zero is either swapped, or
712 * 2) A map with a non-zero residence count is either resident,
713 * or being swapped in.
716 int vm_map_swap_enable
= 1;
718 void vm_map_swapin (vm_map_t map
)
720 register vm_map_entry_t entry
;
722 if (!vm_map_swap_enable
) /* debug */
727 * First deal with various races.
729 if (map
->sw_state
== MAP_SW_IN
)
731 * we raced with swapout and won. Returning will incr.
732 * the res_count, turning the swapout into a nop.
737 * The residence count must be zero. If we raced with another
738 * swapin, the state would have been IN; if we raced with a
739 * swapout (after another competing swapin), we must have lost
740 * the race to get here (see above comment), in which case
741 * res_count is still 0.
743 assert(map
->res_count
== 0);
746 * There are no intermediate states of a map going out or
747 * coming in, since the map is locked during the transition.
749 assert(map
->sw_state
== MAP_SW_OUT
);
752 * We now operate upon each map entry. If the entry is a sub-
753 * or share-map, we call vm_map_res_reference upon it.
754 * If the entry is an object, we call vm_object_res_reference
755 * (this may iterate through the shadow chain).
756 * Note that we hold the map locked the entire time,
757 * even if we get back here via a recursive call in
758 * vm_map_res_reference.
760 entry
= vm_map_first_entry(map
);
762 while (entry
!= vm_map_to_entry(map
)) {
763 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
764 if (entry
->is_sub_map
) {
765 vm_map_t lmap
= entry
->object
.sub_map
;
766 mutex_lock(&lmap
->s_lock
);
767 vm_map_res_reference(lmap
);
768 mutex_unlock(&lmap
->s_lock
);
770 vm_object_t object
= entry
->object
.vm_object
;
771 vm_object_lock(object
);
773 * This call may iterate through the
776 vm_object_res_reference(object
);
777 vm_object_unlock(object
);
780 entry
= entry
->vme_next
;
782 assert(map
->sw_state
== MAP_SW_OUT
);
783 map
->sw_state
= MAP_SW_IN
;
786 void vm_map_swapout(vm_map_t map
)
788 register vm_map_entry_t entry
;
792 * First deal with various races.
793 * If we raced with a swapin and lost, the residence count
794 * will have been incremented to 1, and we simply return.
796 mutex_lock(&map
->s_lock
);
797 if (map
->res_count
!= 0) {
798 mutex_unlock(&map
->s_lock
);
801 mutex_unlock(&map
->s_lock
);
804 * There are no intermediate states of a map going out or
805 * coming in, since the map is locked during the transition.
807 assert(map
->sw_state
== MAP_SW_IN
);
809 if (!vm_map_swap_enable
)
813 * We now operate upon each map entry. If the entry is a sub-
814 * or share-map, we call vm_map_res_deallocate upon it.
815 * If the entry is an object, we call vm_object_res_deallocate
816 * (this may iterate through the shadow chain).
817 * Note that we hold the map locked the entire time,
818 * even if we get back here via a recursive call in
819 * vm_map_res_deallocate.
821 entry
= vm_map_first_entry(map
);
823 while (entry
!= vm_map_to_entry(map
)) {
824 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
825 if (entry
->is_sub_map
) {
826 vm_map_t lmap
= entry
->object
.sub_map
;
827 mutex_lock(&lmap
->s_lock
);
828 vm_map_res_deallocate(lmap
);
829 mutex_unlock(&lmap
->s_lock
);
831 vm_object_t object
= entry
->object
.vm_object
;
832 vm_object_lock(object
);
834 * This call may take a long time,
835 * since it could actively push
836 * out pages (if we implement it
839 vm_object_res_deallocate(object
);
840 vm_object_unlock(object
);
843 entry
= entry
->vme_next
;
845 assert(map
->sw_state
== MAP_SW_IN
);
846 map
->sw_state
= MAP_SW_OUT
;
849 #endif /* TASK_SWAPPER */
855 * Saves the specified entry as the hint for
856 * future lookups. Performs necessary interlocks.
858 #define SAVE_HINT(map,value) \
860 mutex_lock(&(map)->s_lock); \
861 (map)->hint = (value); \
862 mutex_unlock(&(map)->s_lock); \
866 * vm_map_lookup_entry: [ internal use only ]
868 * Finds the map entry containing (or
869 * immediately preceding) the specified address
870 * in the given map; the entry is returned
871 * in the "entry" parameter. The boolean
872 * result indicates whether the address is
873 * actually contained in the map.
877 register vm_map_t map
,
878 register vm_offset_t address
,
879 vm_map_entry_t
*entry
) /* OUT */
881 register vm_map_entry_t cur
;
882 register vm_map_entry_t last
;
885 * Start looking either from the head of the
886 * list, or from the hint.
889 mutex_lock(&map
->s_lock
);
892 mutex_unlock(&map
->s_lock
);
894 if (cur
== vm_map_to_entry(map
))
897 if (address
>= cur
->vme_start
) {
899 * Go from hint to end of list.
901 * But first, make a quick check to see if
902 * we are already looking at the entry we
903 * want (which is usually the case).
904 * Note also that we don't need to save the hint
905 * here... it is the same hint (unless we are
906 * at the header, in which case the hint didn't
907 * buy us anything anyway).
909 last
= vm_map_to_entry(map
);
910 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
917 * Go from start to hint, *inclusively*
919 last
= cur
->vme_next
;
920 cur
= vm_map_first_entry(map
);
927 while (cur
!= last
) {
928 if (cur
->vme_end
> address
) {
929 if (address
>= cur
->vme_start
) {
931 * Save this lookup for future
944 *entry
= cur
->vme_prev
;
946 SAVE_HINT(map
, *entry
);
951 * Routine: vm_map_find_space
953 * Allocate a range in the specified virtual address map,
954 * returning the entry allocated for that range.
955 * Used by kmem_alloc, etc.
957 * The map must be NOT be locked. It will be returned locked
958 * on KERN_SUCCESS, unlocked on failure.
960 * If an entry is allocated, the object/offset fields
961 * are initialized to zero.
965 register vm_map_t map
,
966 vm_offset_t
*address
, /* OUT */
969 vm_map_entry_t
*o_entry
) /* OUT */
971 register vm_map_entry_t entry
, new_entry
;
972 register vm_offset_t start
;
973 register vm_offset_t end
;
975 new_entry
= vm_map_entry_create(map
);
978 * Look for the first possible address; if there's already
979 * something at this address, we have to start after it.
984 assert(first_free_is_valid(map
));
985 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
986 start
= map
->min_offset
;
988 start
= entry
->vme_end
;
991 * In any case, the "entry" always precedes
992 * the proposed new region throughout the loop:
996 register vm_map_entry_t next
;
999 * Find the end of the proposed new region.
1000 * Be sure we didn't go beyond the end, or
1001 * wrap around the address.
1004 end
= ((start
+ mask
) & ~mask
);
1006 vm_map_entry_dispose(map
, new_entry
);
1008 return(KERN_NO_SPACE
);
1013 if ((end
> map
->max_offset
) || (end
< start
)) {
1014 vm_map_entry_dispose(map
, new_entry
);
1016 return(KERN_NO_SPACE
);
1020 * If there are no more entries, we must win.
1023 next
= entry
->vme_next
;
1024 if (next
== vm_map_to_entry(map
))
1028 * If there is another entry, it must be
1029 * after the end of the potential new region.
1032 if (next
->vme_start
>= end
)
1036 * Didn't fit -- move to the next entry.
1040 start
= entry
->vme_end
;
1045 * "start" and "end" should define the endpoints of the
1046 * available new range, and
1047 * "entry" should refer to the region before the new
1050 * the map should be locked.
1055 new_entry
->vme_start
= start
;
1056 new_entry
->vme_end
= end
;
1057 assert(page_aligned(new_entry
->vme_start
));
1058 assert(page_aligned(new_entry
->vme_end
));
1060 new_entry
->is_shared
= FALSE
;
1061 new_entry
->is_sub_map
= FALSE
;
1062 new_entry
->use_pmap
= FALSE
;
1063 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1064 new_entry
->offset
= (vm_object_offset_t
) 0;
1066 new_entry
->needs_copy
= FALSE
;
1068 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1069 new_entry
->protection
= VM_PROT_DEFAULT
;
1070 new_entry
->max_protection
= VM_PROT_ALL
;
1071 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1072 new_entry
->wired_count
= 0;
1073 new_entry
->user_wired_count
= 0;
1075 new_entry
->in_transition
= FALSE
;
1076 new_entry
->needs_wakeup
= FALSE
;
1079 * Insert the new entry into the list
1082 vm_map_entry_link(map
, entry
, new_entry
);
1087 * Update the lookup hint
1089 SAVE_HINT(map
, new_entry
);
1091 *o_entry
= new_entry
;
1092 return(KERN_SUCCESS
);
1095 int vm_map_pmap_enter_print
= FALSE
;
1096 int vm_map_pmap_enter_enable
= FALSE
;
1099 * Routine: vm_map_pmap_enter
1102 * Force pages from the specified object to be entered into
1103 * the pmap at the specified address if they are present.
1104 * As soon as a page not found in the object the scan ends.
1109 * In/out conditions:
1110 * The source map should not be locked on entry.
1115 register vm_offset_t addr
,
1116 register vm_offset_t end_addr
,
1117 register vm_object_t object
,
1118 vm_object_offset_t offset
,
1119 vm_prot_t protection
)
1121 unsigned int cache_attr
;
1126 while (addr
< end_addr
) {
1127 register vm_page_t m
;
1129 vm_object_lock(object
);
1130 vm_object_paging_begin(object
);
1132 m
= vm_page_lookup(object
, offset
);
1133 if (m
== VM_PAGE_NULL
|| m
->busy
||
1134 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1135 protection
& m
->page_lock
))) {
1137 vm_object_paging_end(object
);
1138 vm_object_unlock(object
);
1142 assert(!m
->fictitious
); /* XXX is this possible ??? */
1144 if (vm_map_pmap_enter_print
) {
1145 printf("vm_map_pmap_enter:");
1146 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1147 map
, addr
, object
, offset
);
1151 if (m
->no_isync
== TRUE
) {
1152 pmap_sync_caches_phys(m
->phys_page
);
1153 m
->no_isync
= FALSE
;
1156 cache_attr
= ((unsigned int)object
->wimg_bits
) & VM_WIMG_MASK
;
1157 vm_object_unlock(object
);
1159 PMAP_ENTER(map
->pmap
, addr
, m
,
1160 protection
, cache_attr
, FALSE
);
1162 vm_object_lock(object
);
1164 PAGE_WAKEUP_DONE(m
);
1165 vm_page_lock_queues();
1166 if (!m
->active
&& !m
->inactive
)
1167 vm_page_activate(m
);
1168 vm_page_unlock_queues();
1169 vm_object_paging_end(object
);
1170 vm_object_unlock(object
);
1172 offset
+= PAGE_SIZE_64
;
1178 * Routine: vm_map_enter
1181 * Allocate a range in the specified virtual address map.
1182 * The resulting range will refer to memory defined by
1183 * the given memory object and offset into that object.
1185 * Arguments are as defined in the vm_map call.
1189 register vm_map_t map
,
1190 vm_offset_t
*address
, /* IN/OUT */
1195 vm_object_offset_t offset
,
1196 boolean_t needs_copy
,
1197 vm_prot_t cur_protection
,
1198 vm_prot_t max_protection
,
1199 vm_inherit_t inheritance
)
1201 vm_map_entry_t entry
;
1202 register vm_offset_t start
;
1203 register vm_offset_t end
;
1204 kern_return_t result
= KERN_SUCCESS
;
1206 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1209 VM_GET_FLAGS_ALIAS(flags
, alias
);
1211 #define RETURN(value) { result = value; goto BailOut; }
1213 assert(page_aligned(*address
));
1214 assert(page_aligned(size
));
1223 * Calculate the first possible address.
1226 if (start
< map
->min_offset
)
1227 start
= map
->min_offset
;
1228 if (start
> map
->max_offset
)
1229 RETURN(KERN_NO_SPACE
);
1232 * Look for the first possible address;
1233 * if there's already something at this
1234 * address, we have to start after it.
1237 assert(first_free_is_valid(map
));
1238 if (start
== map
->min_offset
) {
1239 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1240 start
= entry
->vme_end
;
1242 vm_map_entry_t tmp_entry
;
1243 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1244 start
= tmp_entry
->vme_end
;
1249 * In any case, the "entry" always precedes
1250 * the proposed new region throughout the
1255 register vm_map_entry_t next
;
1258 * Find the end of the proposed new region.
1259 * Be sure we didn't go beyond the end, or
1260 * wrap around the address.
1263 end
= ((start
+ mask
) & ~mask
);
1265 RETURN(KERN_NO_SPACE
);
1269 if ((end
> map
->max_offset
) || (end
< start
)) {
1270 if (map
->wait_for_space
) {
1271 if (size
<= (map
->max_offset
-
1273 assert_wait((event_t
)map
,
1276 thread_block((void (*)(void))0);
1280 RETURN(KERN_NO_SPACE
);
1284 * If there are no more entries, we must win.
1287 next
= entry
->vme_next
;
1288 if (next
== vm_map_to_entry(map
))
1292 * If there is another entry, it must be
1293 * after the end of the potential new region.
1296 if (next
->vme_start
>= end
)
1300 * Didn't fit -- move to the next entry.
1304 start
= entry
->vme_end
;
1308 vm_map_entry_t temp_entry
;
1312 * the address doesn't itself violate
1313 * the mask requirement.
1317 if ((start
& mask
) != 0)
1318 RETURN(KERN_NO_SPACE
);
1321 * ... the address is within bounds
1326 if ((start
< map
->min_offset
) ||
1327 (end
> map
->max_offset
) ||
1329 RETURN(KERN_INVALID_ADDRESS
);
1333 * ... the starting address isn't allocated
1336 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1337 RETURN(KERN_NO_SPACE
);
1342 * ... the next region doesn't overlap the
1346 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1347 (entry
->vme_next
->vme_start
< end
))
1348 RETURN(KERN_NO_SPACE
);
1353 * "start" and "end" should define the endpoints of the
1354 * available new range, and
1355 * "entry" should refer to the region before the new
1358 * the map should be locked.
1362 * See whether we can avoid creating a new entry (and object) by
1363 * extending one of our neighbors. [So far, we only attempt to
1364 * extend from below.]
1367 if ((object
== VM_OBJECT_NULL
) &&
1368 (entry
!= vm_map_to_entry(map
)) &&
1369 (entry
->vme_end
== start
) &&
1370 (!entry
->is_shared
) &&
1371 (!entry
->is_sub_map
) &&
1372 (entry
->alias
== alias
) &&
1373 (entry
->inheritance
== inheritance
) &&
1374 (entry
->protection
== cur_protection
) &&
1375 (entry
->max_protection
== max_protection
) &&
1376 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1377 (entry
->in_transition
== 0) &&
1378 ((alias
== VM_MEMORY_REALLOC
) || ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
)) &&
1379 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1380 if (vm_object_coalesce(entry
->object
.vm_object
,
1383 (vm_object_offset_t
) 0,
1384 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1385 (vm_size_t
)(end
- entry
->vme_end
))) {
1388 * Coalesced the two objects - can extend
1389 * the previous map entry to include the
1392 map
->size
+= (end
- entry
->vme_end
);
1393 entry
->vme_end
= end
;
1394 UPDATE_FIRST_FREE(map
, map
->first_free
);
1395 RETURN(KERN_SUCCESS
);
1400 * Create a new entry
1404 register vm_map_entry_t new_entry
;
1406 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1407 offset
, needs_copy
, FALSE
, FALSE
,
1408 cur_protection
, max_protection
,
1409 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1410 new_entry
->alias
= alias
;
1413 /* Wire down the new entry if the user
1414 * requested all new map entries be wired.
1416 if (map
->wiring_required
) {
1417 result
= vm_map_wire(map
, start
, end
,
1418 new_entry
->protection
, TRUE
);
1422 if ((object
!= VM_OBJECT_NULL
) &&
1423 (vm_map_pmap_enter_enable
) &&
1426 (size
< (128*1024))) {
1427 vm_map_pmap_enter(map
, start
, end
,
1428 object
, offset
, cur_protection
);
1442 * vm_map_clip_start: [ internal use only ]
1444 * Asserts that the given entry begins at or after
1445 * the specified address; if necessary,
1446 * it splits the entry into two.
1449 #define vm_map_clip_start(map, entry, startaddr) \
1451 vm_map_t VMCS_map; \
1452 vm_map_entry_t VMCS_entry; \
1453 vm_offset_t VMCS_startaddr; \
1455 VMCS_entry = (entry); \
1456 VMCS_startaddr = (startaddr); \
1457 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1458 if(entry->use_pmap) { \
1459 vm_offset_t pmap_base_addr; \
1461 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1462 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1463 entry->use_pmap = FALSE; \
1464 } else if(entry->object.vm_object \
1465 && !entry->is_sub_map \
1466 && entry->object.vm_object->phys_contiguous) { \
1467 pmap_remove(map->pmap, \
1468 (addr64_t)(entry->vme_start), \
1469 (addr64_t)(entry->vme_end)); \
1471 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1473 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1476 #define vm_map_clip_start(map, entry, startaddr) \
1478 vm_map_t VMCS_map; \
1479 vm_map_entry_t VMCS_entry; \
1480 vm_offset_t VMCS_startaddr; \
1482 VMCS_entry = (entry); \
1483 VMCS_startaddr = (startaddr); \
1484 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1485 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1487 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1491 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1493 if ((startaddr) > (entry)->vme_start) \
1494 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1498 * This routine is called only when it is known that
1499 * the entry must be split.
1503 register struct vm_map_header
*map_header
,
1504 register vm_map_entry_t entry
,
1505 register vm_offset_t start
)
1507 register vm_map_entry_t new_entry
;
1510 * Split off the front portion --
1511 * note that we must insert the new
1512 * entry BEFORE this one, so that
1513 * this entry has the specified starting
1517 new_entry
= _vm_map_entry_create(map_header
);
1518 vm_map_entry_copy_full(new_entry
, entry
);
1520 new_entry
->vme_end
= start
;
1521 entry
->offset
+= (start
- entry
->vme_start
);
1522 entry
->vme_start
= start
;
1524 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1526 if (entry
->is_sub_map
)
1527 vm_map_reference(new_entry
->object
.sub_map
);
1529 vm_object_reference(new_entry
->object
.vm_object
);
1534 * vm_map_clip_end: [ internal use only ]
1536 * Asserts that the given entry ends at or before
1537 * the specified address; if necessary,
1538 * it splits the entry into two.
1541 #define vm_map_clip_end(map, entry, endaddr) \
1543 vm_map_t VMCE_map; \
1544 vm_map_entry_t VMCE_entry; \
1545 vm_offset_t VMCE_endaddr; \
1547 VMCE_entry = (entry); \
1548 VMCE_endaddr = (endaddr); \
1549 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1550 if(entry->use_pmap) { \
1551 vm_offset_t pmap_base_addr; \
1553 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1554 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1555 entry->use_pmap = FALSE; \
1556 } else if(entry->object.vm_object \
1557 && !entry->is_sub_map \
1558 && entry->object.vm_object->phys_contiguous) { \
1559 pmap_remove(map->pmap, \
1560 (addr64_t)(entry->vme_start), \
1561 (addr64_t)(entry->vme_end)); \
1563 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1565 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1568 #define vm_map_clip_end(map, entry, endaddr) \
1570 vm_map_t VMCE_map; \
1571 vm_map_entry_t VMCE_entry; \
1572 vm_offset_t VMCE_endaddr; \
1574 VMCE_entry = (entry); \
1575 VMCE_endaddr = (endaddr); \
1576 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1577 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1579 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1583 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1585 if ((endaddr) < (entry)->vme_end) \
1586 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1590 * This routine is called only when it is known that
1591 * the entry must be split.
1595 register struct vm_map_header
*map_header
,
1596 register vm_map_entry_t entry
,
1597 register vm_offset_t end
)
1599 register vm_map_entry_t new_entry
;
1602 * Create a new entry and insert it
1603 * AFTER the specified entry
1606 new_entry
= _vm_map_entry_create(map_header
);
1607 vm_map_entry_copy_full(new_entry
, entry
);
1609 new_entry
->vme_start
= entry
->vme_end
= end
;
1610 new_entry
->offset
+= (end
- entry
->vme_start
);
1612 _vm_map_entry_link(map_header
, entry
, new_entry
);
1614 if (entry
->is_sub_map
)
1615 vm_map_reference(new_entry
->object
.sub_map
);
1617 vm_object_reference(new_entry
->object
.vm_object
);
1622 * VM_MAP_RANGE_CHECK: [ internal use only ]
1624 * Asserts that the starting and ending region
1625 * addresses fall within the valid range of the map.
1627 #define VM_MAP_RANGE_CHECK(map, start, end) \
1629 if (start < vm_map_min(map)) \
1630 start = vm_map_min(map); \
1631 if (end > vm_map_max(map)) \
1632 end = vm_map_max(map); \
1638 * vm_map_range_check: [ internal use only ]
1640 * Check that the region defined by the specified start and
1641 * end addresses are wholly contained within a single map
1642 * entry or set of adjacent map entries of the spacified map,
1643 * i.e. the specified region contains no unmapped space.
1644 * If any or all of the region is unmapped, FALSE is returned.
1645 * Otherwise, TRUE is returned and if the output argument 'entry'
1646 * is not NULL it points to the map entry containing the start
1649 * The map is locked for reading on entry and is left locked.
1653 register vm_map_t map
,
1654 register vm_offset_t start
,
1655 register vm_offset_t end
,
1656 vm_map_entry_t
*entry
)
1659 register vm_offset_t prev
;
1662 * Basic sanity checks first
1664 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1668 * Check first if the region starts within a valid
1669 * mapping for the map.
1671 if (!vm_map_lookup_entry(map
, start
, &cur
))
1675 * Optimize for the case that the region is contained
1676 * in a single map entry.
1678 if (entry
!= (vm_map_entry_t
*) NULL
)
1680 if (end
<= cur
->vme_end
)
1684 * If the region is not wholly contained within a
1685 * single entry, walk the entries looking for holes.
1687 prev
= cur
->vme_end
;
1688 cur
= cur
->vme_next
;
1689 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1690 if (end
<= cur
->vme_end
)
1692 prev
= cur
->vme_end
;
1693 cur
= cur
->vme_next
;
1699 * vm_map_submap: [ kernel use only ]
1701 * Mark the given range as handled by a subordinate map.
1703 * This range must have been created with vm_map_find using
1704 * the vm_submap_object, and no other operations may have been
1705 * performed on this range prior to calling vm_map_submap.
1707 * Only a limited number of operations can be performed
1708 * within this rage after calling vm_map_submap:
1710 * [Don't try vm_map_copyin!]
1712 * To remove a submapping, one must first remove the
1713 * range from the superior map, and then destroy the
1714 * submap (if desired). [Better yet, don't try it.]
1718 register vm_map_t map
,
1719 register vm_offset_t start
,
1720 register vm_offset_t end
,
1725 vm_map_entry_t entry
;
1726 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1727 register vm_object_t object
;
1731 submap
->mapped
= TRUE
;
1733 VM_MAP_RANGE_CHECK(map
, start
, end
);
1735 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1736 vm_map_clip_start(map
, entry
, start
);
1739 entry
= entry
->vme_next
;
1741 if(entry
== vm_map_to_entry(map
)) {
1743 return KERN_INVALID_ARGUMENT
;
1746 vm_map_clip_end(map
, entry
, end
);
1748 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1749 (!entry
->is_sub_map
) &&
1750 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1751 (object
->resident_page_count
== 0) &&
1752 (object
->copy
== VM_OBJECT_NULL
) &&
1753 (object
->shadow
== VM_OBJECT_NULL
) &&
1754 (!object
->pager_created
)) {
1755 entry
->offset
= (vm_object_offset_t
)offset
;
1756 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1757 vm_object_deallocate(object
);
1758 entry
->is_sub_map
= TRUE
;
1759 entry
->object
.sub_map
= submap
;
1760 vm_map_reference(submap
);
1762 if ((use_pmap
) && (offset
== 0)) {
1763 /* nest if platform code will allow */
1764 if(submap
->pmap
== NULL
) {
1765 submap
->pmap
= pmap_create((vm_size_t
) 0);
1766 if(submap
->pmap
== PMAP_NULL
) {
1767 return(KERN_NO_SPACE
);
1770 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1771 (addr64_t
)start
, (addr64_t
)start
, (uint64_t)(end
- start
));
1773 panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result
);
1774 entry
->use_pmap
= TRUE
;
1778 pmap_remove(map
->pmap
, (addr64_t
)start
, (addr64_t
)end
);
1780 result
= KERN_SUCCESS
;
1790 * Sets the protection of the specified address
1791 * region in the target map. If "set_max" is
1792 * specified, the maximum protection is to be set;
1793 * otherwise, only the current protection is affected.
1797 register vm_map_t map
,
1798 register vm_offset_t start
,
1799 register vm_offset_t end
,
1800 register vm_prot_t new_prot
,
1801 register boolean_t set_max
)
1803 register vm_map_entry_t current
;
1804 register vm_offset_t prev
;
1805 vm_map_entry_t entry
;
1810 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1811 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1816 * Lookup the entry. If it doesn't start in a valid
1817 * entry, return an error. Remember if we need to
1818 * clip the entry. We don't do it here because we don't
1819 * want to make any changes until we've scanned the
1820 * entire range below for address and protection
1823 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1825 return(KERN_INVALID_ADDRESS
);
1829 * Make a first pass to check for protection and address
1834 prev
= current
->vme_start
;
1835 while ((current
!= vm_map_to_entry(map
)) &&
1836 (current
->vme_start
< end
)) {
1839 * If there is a hole, return an error.
1841 if (current
->vme_start
!= prev
) {
1843 return(KERN_INVALID_ADDRESS
);
1846 new_max
= current
->max_protection
;
1847 if(new_prot
& VM_PROT_COPY
) {
1848 new_max
|= VM_PROT_WRITE
;
1849 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1851 return(KERN_PROTECTION_FAILURE
);
1854 if ((new_prot
& new_max
) != new_prot
) {
1856 return(KERN_PROTECTION_FAILURE
);
1860 prev
= current
->vme_end
;
1861 current
= current
->vme_next
;
1865 return(KERN_INVALID_ADDRESS
);
1869 * Go back and fix up protections.
1870 * Clip to start here if the range starts within
1876 vm_map_clip_start(map
, entry
, start
);
1878 while ((current
!= vm_map_to_entry(map
)) &&
1879 (current
->vme_start
< end
)) {
1883 vm_map_clip_end(map
, current
, end
);
1885 old_prot
= current
->protection
;
1887 if(new_prot
& VM_PROT_COPY
) {
1888 /* caller is asking specifically to copy the */
1889 /* mapped data, this implies that max protection */
1890 /* will include write. Caller must be prepared */
1891 /* for loss of shared memory communication in the */
1892 /* target area after taking this step */
1893 current
->needs_copy
= TRUE
;
1894 current
->max_protection
|= VM_PROT_WRITE
;
1898 current
->protection
=
1899 (current
->max_protection
=
1900 new_prot
& ~VM_PROT_COPY
) &
1903 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1906 * Update physical map if necessary.
1907 * If the request is to turn off write protection,
1908 * we won't do it for real (in pmap). This is because
1909 * it would cause copy-on-write to fail. We've already
1910 * set, the new protection in the map, so if a
1911 * write-protect fault occurred, it will be fixed up
1912 * properly, COW or not.
1914 /* the 256M hack for existing hardware limitations */
1915 if (current
->protection
!= old_prot
) {
1916 if(current
->is_sub_map
&& current
->use_pmap
) {
1917 vm_offset_t pmap_base_addr
;
1918 vm_offset_t pmap_end_addr
;
1919 vm_map_entry_t local_entry
;
1921 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1922 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1924 if(!vm_map_lookup_entry(map
,
1925 pmap_base_addr
, &local_entry
))
1926 panic("vm_map_protect: nested pmap area is missing");
1927 while ((local_entry
!= vm_map_to_entry(map
)) &&
1928 (local_entry
->vme_start
< pmap_end_addr
)) {
1929 local_entry
->use_pmap
= FALSE
;
1930 local_entry
= local_entry
->vme_next
;
1932 pmap_unnest(map
->pmap
, (addr64_t
)pmap_base_addr
);
1935 if (!(current
->protection
& VM_PROT_WRITE
)) {
1936 /* Look one level in we support nested pmaps */
1937 /* from mapped submaps which are direct entries */
1939 if(current
->is_sub_map
&& current
->use_pmap
) {
1940 pmap_protect(current
->object
.sub_map
->pmap
,
1943 current
->protection
);
1945 pmap_protect(map
->pmap
, current
->vme_start
,
1947 current
->protection
);
1951 current
= current
->vme_next
;
1954 /* coalesce the map entries, if possible */
1956 while (current
!= vm_map_to_entry(map
) &&
1957 current
->vme_start
<= end
) {
1958 vm_map_simplify_entry(map
, current
);
1959 current
= current
->vme_next
;
1963 return(KERN_SUCCESS
);
1969 * Sets the inheritance of the specified address
1970 * range in the target map. Inheritance
1971 * affects how the map will be shared with
1972 * child maps at the time of vm_map_fork.
1976 register vm_map_t map
,
1977 register vm_offset_t start
,
1978 register vm_offset_t end
,
1979 register vm_inherit_t new_inheritance
)
1981 register vm_map_entry_t entry
;
1982 vm_map_entry_t temp_entry
;
1986 VM_MAP_RANGE_CHECK(map
, start
, end
);
1988 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
1990 vm_map_clip_start(map
, entry
, start
);
1993 temp_entry
= temp_entry
->vme_next
;
1997 /* first check entire range for submaps which can't support the */
1998 /* given inheritance. */
1999 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2000 if(entry
->is_sub_map
) {
2001 if(new_inheritance
== VM_INHERIT_COPY
)
2002 return(KERN_INVALID_ARGUMENT
);
2005 entry
= entry
->vme_next
;
2010 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2011 vm_map_clip_end(map
, entry
, end
);
2013 entry
->inheritance
= new_inheritance
;
2015 entry
= entry
->vme_next
;
2019 return(KERN_SUCCESS
);
2025 * Sets the pageability of the specified address range in the
2026 * target map as wired. Regions specified as not pageable require
2027 * locked-down physical memory and physical page maps. The
2028 * access_type variable indicates types of accesses that must not
2029 * generate page faults. This is checked against protection of
2030 * memory being locked-down.
2032 * The map must not be locked, but a reference must remain to the
2033 * map throughout the call.
2037 register vm_map_t map
,
2038 register vm_offset_t start
,
2039 register vm_offset_t end
,
2040 register vm_prot_t access_type
,
2041 boolean_t user_wire
,
2043 vm_offset_t pmap_addr
)
2045 register vm_map_entry_t entry
;
2046 struct vm_map_entry
*first_entry
, tmp_entry
;
2048 register vm_offset_t s
,e
;
2050 boolean_t need_wakeup
;
2051 boolean_t main_map
= FALSE
;
2052 wait_interrupt_t interruptible_state
;
2053 thread_t cur_thread
;
2054 unsigned int last_timestamp
;
2058 if(map_pmap
== NULL
)
2060 last_timestamp
= map
->timestamp
;
2062 VM_MAP_RANGE_CHECK(map
, start
, end
);
2063 assert(page_aligned(start
));
2064 assert(page_aligned(end
));
2066 /* We wired what the caller asked for, zero pages */
2068 return KERN_SUCCESS
;
2071 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2072 entry
= first_entry
;
2073 /* vm_map_clip_start will be done later. */
2075 /* Start address is not in map */
2077 return(KERN_INVALID_ADDRESS
);
2081 need_wakeup
= FALSE
;
2082 cur_thread
= current_thread();
2083 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2085 * If another thread is wiring/unwiring this entry then
2086 * block after informing other thread to wake us up.
2088 if (entry
->in_transition
) {
2089 wait_result_t wait_result
;
2092 * We have not clipped the entry. Make sure that
2093 * the start address is in range so that the lookup
2094 * below will succeed.
2096 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2098 entry
->needs_wakeup
= TRUE
;
2101 * wake up anybody waiting on entries that we have
2105 vm_map_entry_wakeup(map
);
2106 need_wakeup
= FALSE
;
2109 * User wiring is interruptible
2111 wait_result
= vm_map_entry_wait(map
,
2112 (user_wire
) ? THREAD_ABORTSAFE
:
2114 if (user_wire
&& wait_result
== THREAD_INTERRUPTED
) {
2116 * undo the wirings we have done so far
2117 * We do not clear the needs_wakeup flag,
2118 * because we cannot tell if we were the
2122 vm_map_unwire(map
, start
, s
, user_wire
);
2123 return(KERN_FAILURE
);
2127 * Cannot avoid a lookup here. reset timestamp.
2129 last_timestamp
= map
->timestamp
;
2132 * The entry could have been clipped, look it up again.
2133 * Worse that can happen is, it may not exist anymore.
2135 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2137 panic("vm_map_wire: re-lookup failed");
2140 * User: undo everything upto the previous
2141 * entry. let vm_map_unwire worry about
2142 * checking the validity of the range.
2145 vm_map_unwire(map
, start
, s
, user_wire
);
2146 return(KERN_FAILURE
);
2148 entry
= first_entry
;
2152 if(entry
->is_sub_map
) {
2153 vm_offset_t sub_start
;
2154 vm_offset_t sub_end
;
2155 vm_offset_t local_start
;
2156 vm_offset_t local_end
;
2159 vm_map_clip_start(map
, entry
, start
);
2160 vm_map_clip_end(map
, entry
, end
);
2162 sub_start
= entry
->offset
;
2163 sub_end
= entry
->vme_end
- entry
->vme_start
;
2164 sub_end
+= entry
->offset
;
2166 local_end
= entry
->vme_end
;
2167 if(map_pmap
== NULL
) {
2168 if(entry
->use_pmap
) {
2169 pmap
= entry
->object
.sub_map
->pmap
;
2170 /* ppc implementation requires that */
2171 /* submaps pmap address ranges line */
2172 /* up with parent map */
2174 pmap_addr
= sub_start
;
2181 if (entry
->wired_count
) {
2182 if (entry
->wired_count
2184 panic("vm_map_wire: too many wirings");
2187 entry
->user_wired_count
2188 >= MAX_WIRE_COUNT
) {
2190 vm_map_unwire(map
, start
,
2191 entry
->vme_start
, user_wire
);
2192 return(KERN_FAILURE
);
2195 entry
->user_wired_count
++;
2197 (entry
->user_wired_count
== 0))
2198 entry
->wired_count
++;
2199 entry
= entry
->vme_next
;
2204 vm_object_offset_t offset_hi
;
2205 vm_object_offset_t offset_lo
;
2206 vm_object_offset_t offset
;
2209 vm_behavior_t behavior
;
2210 vm_map_entry_t local_entry
;
2211 vm_map_version_t version
;
2212 vm_map_t lookup_map
;
2214 /* call vm_map_lookup_locked to */
2215 /* cause any needs copy to be */
2217 local_start
= entry
->vme_start
;
2219 vm_map_lock_write_to_read(map
);
2220 if(vm_map_lookup_locked(
2221 &lookup_map
, local_start
,
2224 &offset
, &prot
, &wired
,
2225 &behavior
, &offset_lo
,
2226 &offset_hi
, &pmap_map
)) {
2228 vm_map_unlock(lookup_map
);
2229 vm_map_unwire(map
, start
,
2230 entry
->vme_start
, user_wire
);
2231 return(KERN_FAILURE
);
2233 if(pmap_map
!= lookup_map
)
2234 vm_map_unlock(pmap_map
);
2235 vm_map_unlock_read(lookup_map
);
2237 vm_object_unlock(object
);
2239 if (!vm_map_lookup_entry(map
,
2240 local_start
, &local_entry
)) {
2242 vm_map_unwire(map
, start
,
2243 entry
->vme_start
, user_wire
);
2244 return(KERN_FAILURE
);
2246 /* did we have a change of type? */
2247 if (!local_entry
->is_sub_map
) {
2248 last_timestamp
= map
->timestamp
;
2251 entry
= local_entry
;
2253 entry
->user_wired_count
++;
2255 (entry
->user_wired_count
== 1))
2256 entry
->wired_count
++;
2258 entry
->in_transition
= TRUE
;
2261 rc
= vm_map_wire_nested(
2262 entry
->object
.sub_map
,
2265 user_wire
, pmap
, pmap_addr
);
2269 local_start
= entry
->vme_start
;
2271 entry
->user_wired_count
++;
2273 (entry
->user_wired_count
== 1))
2274 entry
->wired_count
++;
2276 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2279 user_wire
, map_pmap
, pmap_addr
);
2282 s
= entry
->vme_start
;
2286 * Find the entry again. It could have been clipped
2287 * after we unlocked the map.
2289 if (!vm_map_lookup_entry(map
, local_start
,
2291 panic("vm_map_wire: re-lookup failed");
2292 entry
= first_entry
;
2294 last_timestamp
= map
->timestamp
;
2295 while ((entry
!= vm_map_to_entry(map
)) &&
2296 (entry
->vme_start
< e
)) {
2297 assert(entry
->in_transition
);
2298 entry
->in_transition
= FALSE
;
2299 if (entry
->needs_wakeup
) {
2300 entry
->needs_wakeup
= FALSE
;
2303 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2305 entry
->user_wired_count
--;
2307 (entry
->user_wired_count
== 0))
2308 entry
->wired_count
--;
2310 entry
= entry
->vme_next
;
2312 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2315 vm_map_entry_wakeup(map
);
2317 * undo everything upto the previous entry.
2319 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2326 * If this entry is already wired then increment
2327 * the appropriate wire reference count.
2329 if (entry
->wired_count
) {
2330 /* sanity check: wired_count is a short */
2331 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2332 panic("vm_map_wire: too many wirings");
2335 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2337 vm_map_unwire(map
, start
,
2338 entry
->vme_start
, user_wire
);
2339 return(KERN_FAILURE
);
2342 * entry is already wired down, get our reference
2343 * after clipping to our range.
2345 vm_map_clip_start(map
, entry
, start
);
2346 vm_map_clip_end(map
, entry
, end
);
2348 entry
->user_wired_count
++;
2349 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2350 entry
->wired_count
++;
2352 entry
= entry
->vme_next
;
2357 * Unwired entry or wire request transmitted via submap
2362 * Perform actions of vm_map_lookup that need the write
2363 * lock on the map: create a shadow object for a
2364 * copy-on-write region, or an object for a zero-fill
2367 size
= entry
->vme_end
- entry
->vme_start
;
2369 * If wiring a copy-on-write page, we need to copy it now
2370 * even if we're only (currently) requesting read access.
2371 * This is aggressive, but once it's wired we can't move it.
2373 if (entry
->needs_copy
) {
2374 vm_object_shadow(&entry
->object
.vm_object
,
2375 &entry
->offset
, size
);
2376 entry
->needs_copy
= FALSE
;
2377 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2378 entry
->object
.vm_object
= vm_object_allocate(size
);
2379 entry
->offset
= (vm_object_offset_t
)0;
2382 vm_map_clip_start(map
, entry
, start
);
2383 vm_map_clip_end(map
, entry
, end
);
2385 s
= entry
->vme_start
;
2389 * Check for holes and protection mismatch.
2390 * Holes: Next entry should be contiguous unless this
2391 * is the end of the region.
2392 * Protection: Access requested must be allowed, unless
2393 * wiring is by protection class
2395 if ((((entry
->vme_end
< end
) &&
2396 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2397 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2398 ((entry
->protection
& access_type
) != access_type
))) {
2400 * Found a hole or protection problem.
2401 * Unwire the region we wired so far.
2403 if (start
!= entry
->vme_start
) {
2405 vm_map_unwire(map
, start
, s
, user_wire
);
2409 return((entry
->protection
&access_type
) != access_type
?
2410 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2413 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2416 entry
->user_wired_count
++;
2417 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2418 entry
->wired_count
++;
2420 entry
->in_transition
= TRUE
;
2423 * This entry might get split once we unlock the map.
2424 * In vm_fault_wire(), we need the current range as
2425 * defined by this entry. In order for this to work
2426 * along with a simultaneous clip operation, we make a
2427 * temporary copy of this entry and use that for the
2428 * wiring. Note that the underlying objects do not
2429 * change during a clip.
2434 * The in_transition state guarentees that the entry
2435 * (or entries for this range, if split occured) will be
2436 * there when the map lock is acquired for the second time.
2440 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2441 interruptible_state
= thread_interrupt_level(THREAD_UNINT
);
2444 rc
= vm_fault_wire(map
,
2445 &tmp_entry
, map_pmap
, pmap_addr
);
2447 rc
= vm_fault_wire(map
,
2448 &tmp_entry
, map
->pmap
,
2449 tmp_entry
.vme_start
);
2451 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2452 thread_interrupt_level(interruptible_state
);
2456 if (last_timestamp
+1 != map
->timestamp
) {
2458 * Find the entry again. It could have been clipped
2459 * after we unlocked the map.
2461 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2463 panic("vm_map_wire: re-lookup failed");
2465 entry
= first_entry
;
2468 last_timestamp
= map
->timestamp
;
2470 while ((entry
!= vm_map_to_entry(map
)) &&
2471 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2472 assert(entry
->in_transition
);
2473 entry
->in_transition
= FALSE
;
2474 if (entry
->needs_wakeup
) {
2475 entry
->needs_wakeup
= FALSE
;
2478 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2480 entry
->user_wired_count
--;
2482 (entry
->user_wired_count
== 0))
2483 entry
->wired_count
--;
2485 entry
= entry
->vme_next
;
2488 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2491 vm_map_entry_wakeup(map
);
2493 * undo everything upto the previous entry.
2495 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2498 } /* end while loop through map entries */
2502 * wake up anybody waiting on entries we wired.
2505 vm_map_entry_wakeup(map
);
2507 return(KERN_SUCCESS
);
2513 register vm_map_t map
,
2514 register vm_offset_t start
,
2515 register vm_offset_t end
,
2516 register vm_prot_t access_type
,
2517 boolean_t user_wire
)
2524 * the calls to mapping_prealloc and mapping_relpre
2525 * (along with the VM_MAP_RANGE_CHECK to insure a
2526 * resonable range was passed in) are
2527 * currently necessary because
2528 * we haven't enabled kernel pre-emption
2529 * and/or the pmap_enter cannot purge and re-use
2532 VM_MAP_RANGE_CHECK(map
, start
, end
);
2533 mapping_prealloc(end
- start
);
2535 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2536 user_wire
, (pmap_t
)NULL
, 0);
2546 * Sets the pageability of the specified address range in the target
2547 * as pageable. Regions specified must have been wired previously.
2549 * The map must not be locked, but a reference must remain to the map
2550 * throughout the call.
2552 * Kernel will panic on failures. User unwire ignores holes and
2553 * unwired and intransition entries to avoid losing memory by leaving
2557 vm_map_unwire_nested(
2558 register vm_map_t map
,
2559 register vm_offset_t start
,
2560 register vm_offset_t end
,
2561 boolean_t user_wire
,
2563 vm_offset_t pmap_addr
)
2565 register vm_map_entry_t entry
;
2566 struct vm_map_entry
*first_entry
, tmp_entry
;
2567 boolean_t need_wakeup
;
2568 boolean_t main_map
= FALSE
;
2569 unsigned int last_timestamp
;
2572 if(map_pmap
== NULL
)
2574 last_timestamp
= map
->timestamp
;
2576 VM_MAP_RANGE_CHECK(map
, start
, end
);
2577 assert(page_aligned(start
));
2578 assert(page_aligned(end
));
2580 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2581 entry
= first_entry
;
2582 /* vm_map_clip_start will be done later. */
2585 /* Start address is not in map. */
2587 return(KERN_INVALID_ADDRESS
);
2590 need_wakeup
= FALSE
;
2591 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2592 if (entry
->in_transition
) {
2595 * Another thread is wiring down this entry. Note
2596 * that if it is not for the other thread we would
2597 * be unwiring an unwired entry. This is not
2598 * permitted. If we wait, we will be unwiring memory
2602 * Another thread is unwiring this entry. We did not
2603 * have a reference to it, because if we did, this
2604 * entry will not be getting unwired now.
2607 panic("vm_map_unwire: in_transition entry");
2609 entry
= entry
->vme_next
;
2613 if(entry
->is_sub_map
) {
2614 vm_offset_t sub_start
;
2615 vm_offset_t sub_end
;
2616 vm_offset_t local_end
;
2620 vm_map_clip_start(map
, entry
, start
);
2621 vm_map_clip_end(map
, entry
, end
);
2623 sub_start
= entry
->offset
;
2624 sub_end
= entry
->vme_end
- entry
->vme_start
;
2625 sub_end
+= entry
->offset
;
2626 local_end
= entry
->vme_end
;
2627 if(map_pmap
== NULL
) {
2628 if(entry
->use_pmap
) {
2629 pmap
= entry
->object
.sub_map
->pmap
;
2630 pmap_addr
= sub_start
;
2635 if (entry
->wired_count
== 0 ||
2636 (user_wire
&& entry
->user_wired_count
== 0)) {
2638 panic("vm_map_unwire: entry is unwired");
2639 entry
= entry
->vme_next
;
2645 * Holes: Next entry should be contiguous unless
2646 * this is the end of the region.
2648 if (((entry
->vme_end
< end
) &&
2649 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2650 (entry
->vme_next
->vme_start
2651 > entry
->vme_end
)))) {
2653 panic("vm_map_unwire: non-contiguous region");
2655 entry = entry->vme_next;
2660 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2661 entry
->wired_count
--;
2663 if (entry
->wired_count
!= 0) {
2664 entry
= entry
->vme_next
;
2668 entry
->in_transition
= TRUE
;
2669 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2672 * We can unlock the map now. The in_transition state
2673 * guarantees existance of the entry.
2676 vm_map_unwire_nested(entry
->object
.sub_map
,
2677 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2680 if (last_timestamp
+1 != map
->timestamp
) {
2682 * Find the entry again. It could have been
2683 * clipped or deleted after we unlocked the map.
2685 if (!vm_map_lookup_entry(map
,
2686 tmp_entry
.vme_start
,
2689 panic("vm_map_unwire: re-lookup failed");
2690 entry
= first_entry
->vme_next
;
2692 entry
= first_entry
;
2694 last_timestamp
= map
->timestamp
;
2697 * clear transition bit for all constituent entries
2698 * that were in the original entry (saved in
2699 * tmp_entry). Also check for waiters.
2701 while ((entry
!= vm_map_to_entry(map
)) &&
2702 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2703 assert(entry
->in_transition
);
2704 entry
->in_transition
= FALSE
;
2705 if (entry
->needs_wakeup
) {
2706 entry
->needs_wakeup
= FALSE
;
2709 entry
= entry
->vme_next
;
2714 vm_map_unwire_nested(entry
->object
.sub_map
,
2715 sub_start
, sub_end
, user_wire
, map_pmap
,
2719 if (last_timestamp
+1 != map
->timestamp
) {
2721 * Find the entry again. It could have been
2722 * clipped or deleted after we unlocked the map.
2724 if (!vm_map_lookup_entry(map
,
2725 tmp_entry
.vme_start
,
2728 panic("vm_map_unwire: re-lookup failed");
2729 entry
= first_entry
->vme_next
;
2731 entry
= first_entry
;
2733 last_timestamp
= map
->timestamp
;
2738 if ((entry
->wired_count
== 0) ||
2739 (user_wire
&& entry
->user_wired_count
== 0)) {
2741 panic("vm_map_unwire: entry is unwired");
2743 entry
= entry
->vme_next
;
2747 assert(entry
->wired_count
> 0 &&
2748 (!user_wire
|| entry
->user_wired_count
> 0));
2750 vm_map_clip_start(map
, entry
, start
);
2751 vm_map_clip_end(map
, entry
, end
);
2755 * Holes: Next entry should be contiguous unless
2756 * this is the end of the region.
2758 if (((entry
->vme_end
< end
) &&
2759 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2760 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2763 panic("vm_map_unwire: non-contiguous region");
2764 entry
= entry
->vme_next
;
2768 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2769 entry
->wired_count
--;
2771 if (entry
->wired_count
!= 0) {
2772 entry
= entry
->vme_next
;
2776 entry
->in_transition
= TRUE
;
2777 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2780 * We can unlock the map now. The in_transition state
2781 * guarantees existance of the entry.
2785 vm_fault_unwire(map
,
2786 &tmp_entry
, FALSE
, map_pmap
, pmap_addr
);
2788 vm_fault_unwire(map
,
2789 &tmp_entry
, FALSE
, map
->pmap
,
2790 tmp_entry
.vme_start
);
2794 if (last_timestamp
+1 != map
->timestamp
) {
2796 * Find the entry again. It could have been clipped
2797 * or deleted after we unlocked the map.
2799 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2802 panic("vm_map_unwire: re-lookup failed");
2803 entry
= first_entry
->vme_next
;
2805 entry
= first_entry
;
2807 last_timestamp
= map
->timestamp
;
2810 * clear transition bit for all constituent entries that
2811 * were in the original entry (saved in tmp_entry). Also
2812 * check for waiters.
2814 while ((entry
!= vm_map_to_entry(map
)) &&
2815 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2816 assert(entry
->in_transition
);
2817 entry
->in_transition
= FALSE
;
2818 if (entry
->needs_wakeup
) {
2819 entry
->needs_wakeup
= FALSE
;
2822 entry
= entry
->vme_next
;
2827 * wake up anybody waiting on entries that we have unwired.
2830 vm_map_entry_wakeup(map
);
2831 return(KERN_SUCCESS
);
2837 register vm_map_t map
,
2838 register vm_offset_t start
,
2839 register vm_offset_t end
,
2840 boolean_t user_wire
)
2842 return vm_map_unwire_nested(map
, start
, end
,
2843 user_wire
, (pmap_t
)NULL
, 0);
2848 * vm_map_entry_delete: [ internal use only ]
2850 * Deallocate the given entry from the target map.
2853 vm_map_entry_delete(
2854 register vm_map_t map
,
2855 register vm_map_entry_t entry
)
2857 register vm_offset_t s
, e
;
2858 register vm_object_t object
;
2859 register vm_map_t submap
;
2860 extern vm_object_t kernel_object
;
2862 s
= entry
->vme_start
;
2864 assert(page_aligned(s
));
2865 assert(page_aligned(e
));
2866 assert(entry
->wired_count
== 0);
2867 assert(entry
->user_wired_count
== 0);
2869 if (entry
->is_sub_map
) {
2871 submap
= entry
->object
.sub_map
;
2874 object
= entry
->object
.vm_object
;
2877 vm_map_entry_unlink(map
, entry
);
2880 vm_map_entry_dispose(map
, entry
);
2884 * Deallocate the object only after removing all
2885 * pmap entries pointing to its pages.
2888 vm_map_deallocate(submap
);
2890 vm_object_deallocate(object
);
2895 vm_map_submap_pmap_clean(
2902 vm_offset_t submap_start
;
2903 vm_offset_t submap_end
;
2905 vm_size_t remove_size
;
2906 vm_map_entry_t entry
;
2908 submap_end
= offset
+ (end
- start
);
2909 submap_start
= offset
;
2910 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2912 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2913 if(offset
> entry
->vme_start
)
2914 remove_size
-= offset
- entry
->vme_start
;
2917 if(submap_end
< entry
->vme_end
) {
2919 entry
->vme_end
- submap_end
;
2921 if(entry
->is_sub_map
) {
2922 vm_map_submap_pmap_clean(
2925 start
+ remove_size
,
2926 entry
->object
.sub_map
,
2930 if((map
->mapped
) && (map
->ref_count
)
2931 && (entry
->object
.vm_object
!= NULL
)) {
2932 vm_object_pmap_protect(
2933 entry
->object
.vm_object
,
2940 pmap_remove(map
->pmap
,
2942 (addr64_t
)(start
+ remove_size
));
2947 entry
= entry
->vme_next
;
2949 while((entry
!= vm_map_to_entry(sub_map
))
2950 && (entry
->vme_start
< submap_end
)) {
2951 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2952 if(submap_end
< entry
->vme_end
) {
2953 remove_size
-= entry
->vme_end
- submap_end
;
2955 if(entry
->is_sub_map
) {
2956 vm_map_submap_pmap_clean(
2958 (start
+ entry
->vme_start
) - offset
,
2959 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2960 entry
->object
.sub_map
,
2963 if((map
->mapped
) && (map
->ref_count
)
2964 && (entry
->object
.vm_object
!= NULL
)) {
2965 vm_object_pmap_protect(
2966 entry
->object
.vm_object
,
2973 pmap_remove(map
->pmap
,
2974 (addr64_t
)((start
+ entry
->vme_start
)
2976 (addr64_t
)(((start
+ entry
->vme_start
)
2977 - offset
) + remove_size
));
2980 entry
= entry
->vme_next
;
2986 * vm_map_delete: [ internal use only ]
2988 * Deallocates the given address range from the target map.
2989 * Removes all user wirings. Unwires one kernel wiring if
2990 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2991 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2992 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2994 * This routine is called with map locked and leaves map locked.
2998 register vm_map_t map
,
3000 register vm_offset_t end
,
3003 vm_map_entry_t entry
, next
;
3004 struct vm_map_entry
*first_entry
, tmp_entry
;
3005 register vm_offset_t s
, e
;
3006 register vm_object_t object
;
3007 boolean_t need_wakeup
;
3008 unsigned int last_timestamp
= ~0; /* unlikely value */
3010 extern vm_map_t kernel_map
;
3012 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
3013 THREAD_ABORTSAFE
: THREAD_UNINT
;
3016 * All our DMA I/O operations in IOKit are currently done by
3017 * wiring through the map entries of the task requesting the I/O.
3018 * Because of this, we must always wait for kernel wirings
3019 * to go away on the entries before deleting them.
3021 * Any caller who wants to actually remove a kernel wiring
3022 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3023 * properly remove one wiring instead of blasting through
3026 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3029 * Find the start of the region, and clip it
3031 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3032 entry
= first_entry
;
3033 vm_map_clip_start(map
, entry
, start
);
3036 * Fix the lookup hint now, rather than each
3037 * time through the loop.
3039 SAVE_HINT(map
, entry
->vme_prev
);
3041 entry
= first_entry
->vme_next
;
3044 need_wakeup
= FALSE
;
3046 * Step through all entries in this region
3048 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3050 vm_map_clip_end(map
, entry
, end
);
3051 if (entry
->in_transition
) {
3052 wait_result_t wait_result
;
3055 * Another thread is wiring/unwiring this entry.
3056 * Let the other thread know we are waiting.
3058 s
= entry
->vme_start
;
3059 entry
->needs_wakeup
= TRUE
;
3062 * wake up anybody waiting on entries that we have
3063 * already unwired/deleted.
3066 vm_map_entry_wakeup(map
);
3067 need_wakeup
= FALSE
;
3070 wait_result
= vm_map_entry_wait(map
, interruptible
);
3072 if (interruptible
&&
3073 wait_result
== THREAD_INTERRUPTED
) {
3075 * We do not clear the needs_wakeup flag,
3076 * since we cannot tell if we were the only one.
3079 return KERN_ABORTED
;
3083 * The entry could have been clipped or it
3084 * may not exist anymore. Look it up again.
3086 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3087 assert((map
!= kernel_map
) &&
3088 (!entry
->is_sub_map
));
3090 * User: use the next entry
3092 entry
= first_entry
->vme_next
;
3094 entry
= first_entry
;
3095 SAVE_HINT(map
, entry
->vme_prev
);
3097 last_timestamp
= map
->timestamp
;
3099 } /* end in_transition */
3101 if (entry
->wired_count
) {
3103 * Remove a kernel wiring if requested or if
3104 * there are user wirings.
3106 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3107 (entry
->user_wired_count
> 0))
3108 entry
->wired_count
--;
3110 /* remove all user wire references */
3111 entry
->user_wired_count
= 0;
3113 if (entry
->wired_count
!= 0) {
3114 assert((map
!= kernel_map
) &&
3115 (!entry
->is_sub_map
));
3117 * Cannot continue. Typical case is when
3118 * a user thread has physical io pending on
3119 * on this page. Either wait for the
3120 * kernel wiring to go away or return an
3123 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3124 wait_result_t wait_result
;
3126 s
= entry
->vme_start
;
3127 entry
->needs_wakeup
= TRUE
;
3128 wait_result
= vm_map_entry_wait(map
,
3131 if (interruptible
&&
3132 wait_result
== THREAD_INTERRUPTED
) {
3134 * We do not clear the
3135 * needs_wakeup flag, since we
3136 * cannot tell if we were the
3140 return KERN_ABORTED
;
3144 * The entry could have been clipped or
3145 * it may not exist anymore. Look it
3148 if (!vm_map_lookup_entry(map
, s
,
3150 assert((map
!= kernel_map
) &&
3151 (!entry
->is_sub_map
));
3153 * User: use the next entry
3155 entry
= first_entry
->vme_next
;
3157 entry
= first_entry
;
3158 SAVE_HINT(map
, entry
->vme_prev
);
3160 last_timestamp
= map
->timestamp
;
3164 return KERN_FAILURE
;
3168 entry
->in_transition
= TRUE
;
3170 * copy current entry. see comment in vm_map_wire()
3173 s
= entry
->vme_start
;
3177 * We can unlock the map now. The in_transition
3178 * state guarentees existance of the entry.
3181 vm_fault_unwire(map
, &tmp_entry
,
3182 tmp_entry
.object
.vm_object
== kernel_object
,
3183 map
->pmap
, tmp_entry
.vme_start
);
3186 if (last_timestamp
+1 != map
->timestamp
) {
3188 * Find the entry again. It could have
3189 * been clipped after we unlocked the map.
3191 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3192 assert((map
!= kernel_map
) &&
3193 (!entry
->is_sub_map
));
3194 first_entry
= first_entry
->vme_next
;
3196 SAVE_HINT(map
, entry
->vme_prev
);
3199 SAVE_HINT(map
, entry
->vme_prev
);
3200 first_entry
= entry
;
3203 last_timestamp
= map
->timestamp
;
3205 entry
= first_entry
;
3206 while ((entry
!= vm_map_to_entry(map
)) &&
3207 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3208 assert(entry
->in_transition
);
3209 entry
->in_transition
= FALSE
;
3210 if (entry
->needs_wakeup
) {
3211 entry
->needs_wakeup
= FALSE
;
3214 entry
= entry
->vme_next
;
3217 * We have unwired the entry(s). Go back and
3220 entry
= first_entry
;
3224 /* entry is unwired */
3225 assert(entry
->wired_count
== 0);
3226 assert(entry
->user_wired_count
== 0);
3228 if ((!entry
->is_sub_map
&&
3229 entry
->object
.vm_object
!= kernel_object
) ||
3230 entry
->is_sub_map
) {
3231 if(entry
->is_sub_map
) {
3232 if(entry
->use_pmap
) {
3234 pmap_unnest(map
->pmap
, (addr64_t
)entry
->vme_start
);
3236 if((map
->mapped
) && (map
->ref_count
)) {
3237 /* clean up parent map/maps */
3238 vm_map_submap_pmap_clean(
3239 map
, entry
->vme_start
,
3241 entry
->object
.sub_map
,
3245 vm_map_submap_pmap_clean(
3246 map
, entry
->vme_start
, entry
->vme_end
,
3247 entry
->object
.sub_map
,
3251 object
= entry
->object
.vm_object
;
3252 if((map
->mapped
) && (map
->ref_count
)) {
3253 vm_object_pmap_protect(
3254 object
, entry
->offset
,
3255 entry
->vme_end
- entry
->vme_start
,
3259 } else if(object
!= NULL
) {
3260 if ((object
->shadow
!= NULL
) ||
3261 (object
->phys_contiguous
) ||
3262 (object
->resident_page_count
>
3263 atop((entry
->vme_end
- entry
->vme_start
)/4))) {
3264 pmap_remove(map
->pmap
,
3265 (addr64_t
)(entry
->vme_start
),
3266 (addr64_t
)(entry
->vme_end
));
3269 vm_object_offset_t start_off
;
3270 vm_object_offset_t end_off
;
3271 start_off
= entry
->offset
;
3272 end_off
= start_off
+
3273 (entry
->vme_end
- entry
->vme_start
);
3274 vm_object_lock(object
);
3275 queue_iterate(&object
->memq
,
3276 p
, vm_page_t
, listq
) {
3277 if ((!p
->fictitious
) &&
3278 (p
->offset
>= start_off
) &&
3279 (p
->offset
< end_off
)) {
3281 start
= entry
->vme_start
;
3282 start
+= p
->offset
- start_off
;
3288 vm_object_unlock(object
);
3294 next
= entry
->vme_next
;
3295 s
= next
->vme_start
;
3296 last_timestamp
= map
->timestamp
;
3297 vm_map_entry_delete(map
, entry
);
3298 /* vm_map_entry_delete unlocks the map */
3302 if(entry
== vm_map_to_entry(map
)) {
3305 if (last_timestamp
+1 != map
->timestamp
) {
3307 * we are responsible for deleting everything
3308 * from the give space, if someone has interfered
3309 * we pick up where we left off, back fills should
3310 * be all right for anyone except map_delete and
3311 * we have to assume that the task has been fully
3312 * disabled before we get here
3314 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3315 entry
= entry
->vme_next
;
3317 SAVE_HINT(map
, entry
->vme_prev
);
3320 * others can not only allocate behind us, we can
3321 * also see coalesce while we don't have the map lock
3323 if(entry
== vm_map_to_entry(map
)) {
3326 vm_map_clip_start(map
, entry
, s
);
3328 last_timestamp
= map
->timestamp
;
3331 if (map
->wait_for_space
)
3332 thread_wakeup((event_t
) map
);
3334 * wake up anybody waiting on entries that we have already deleted.
3337 vm_map_entry_wakeup(map
);
3339 return KERN_SUCCESS
;
3345 * Remove the given address range from the target map.
3346 * This is the exported form of vm_map_delete.
3350 register vm_map_t map
,
3351 register vm_offset_t start
,
3352 register vm_offset_t end
,
3353 register boolean_t flags
)
3355 register kern_return_t result
;
3356 boolean_t funnel_set
= FALSE
;
3358 thread_t cur_thread
;
3360 cur_thread
= current_thread();
3362 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
3364 curflock
= cur_thread
->funnel_lock
;
3365 thread_funnel_set( curflock
, FALSE
);
3368 VM_MAP_RANGE_CHECK(map
, start
, end
);
3369 result
= vm_map_delete(map
, start
, end
, flags
);
3372 thread_funnel_set( curflock
, TRUE
);
3380 * Routine: vm_map_copy_discard
3383 * Dispose of a map copy object (returned by
3387 vm_map_copy_discard(
3390 TR_DECL("vm_map_copy_discard");
3392 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3394 if (copy
== VM_MAP_COPY_NULL
)
3397 switch (copy
->type
) {
3398 case VM_MAP_COPY_ENTRY_LIST
:
3399 while (vm_map_copy_first_entry(copy
) !=
3400 vm_map_copy_to_entry(copy
)) {
3401 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3403 vm_map_copy_entry_unlink(copy
, entry
);
3404 vm_object_deallocate(entry
->object
.vm_object
);
3405 vm_map_copy_entry_dispose(copy
, entry
);
3408 case VM_MAP_COPY_OBJECT
:
3409 vm_object_deallocate(copy
->cpy_object
);
3411 case VM_MAP_COPY_KERNEL_BUFFER
:
3414 * The vm_map_copy_t and possibly the data buffer were
3415 * allocated by a single call to kalloc(), i.e. the
3416 * vm_map_copy_t was not allocated out of the zone.
3418 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3421 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3425 * Routine: vm_map_copy_copy
3428 * Move the information in a map copy object to
3429 * a new map copy object, leaving the old one
3432 * This is used by kernel routines that need
3433 * to look at out-of-line data (in copyin form)
3434 * before deciding whether to return SUCCESS.
3435 * If the routine returns FAILURE, the original
3436 * copy object will be deallocated; therefore,
3437 * these routines must make a copy of the copy
3438 * object and leave the original empty so that
3439 * deallocation will not fail.
3445 vm_map_copy_t new_copy
;
3447 if (copy
== VM_MAP_COPY_NULL
)
3448 return VM_MAP_COPY_NULL
;
3451 * Allocate a new copy object, and copy the information
3452 * from the old one into it.
3455 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3458 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3460 * The links in the entry chain must be
3461 * changed to point to the new copy object.
3463 vm_map_copy_first_entry(copy
)->vme_prev
3464 = vm_map_copy_to_entry(new_copy
);
3465 vm_map_copy_last_entry(copy
)->vme_next
3466 = vm_map_copy_to_entry(new_copy
);
3470 * Change the old copy object into one that contains
3471 * nothing to be deallocated.
3473 copy
->type
= VM_MAP_COPY_OBJECT
;
3474 copy
->cpy_object
= VM_OBJECT_NULL
;
3477 * Return the new object.
3483 vm_map_overwrite_submap_recurse(
3485 vm_offset_t dst_addr
,
3488 vm_offset_t dst_end
;
3489 vm_map_entry_t tmp_entry
;
3490 vm_map_entry_t entry
;
3491 kern_return_t result
;
3492 boolean_t encountered_sub_map
= FALSE
;
3497 * Verify that the destination is all writeable
3498 * initially. We have to trunc the destination
3499 * address and round the copy size or we'll end up
3500 * splitting entries in strange ways.
3503 dst_end
= round_page_32(dst_addr
+ dst_size
);
3504 vm_map_lock(dst_map
);
3507 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3508 vm_map_unlock(dst_map
);
3509 return(KERN_INVALID_ADDRESS
);
3512 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3514 for (entry
= tmp_entry
;;) {
3515 vm_map_entry_t next
;
3517 next
= entry
->vme_next
;
3518 while(entry
->is_sub_map
) {
3519 vm_offset_t sub_start
;
3520 vm_offset_t sub_end
;
3521 vm_offset_t local_end
;
3523 if (entry
->in_transition
) {
3525 * Say that we are waiting, and wait for entry.
3527 entry
->needs_wakeup
= TRUE
;
3528 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3533 encountered_sub_map
= TRUE
;
3534 sub_start
= entry
->offset
;
3536 if(entry
->vme_end
< dst_end
)
3537 sub_end
= entry
->vme_end
;
3540 sub_end
-= entry
->vme_start
;
3541 sub_end
+= entry
->offset
;
3542 local_end
= entry
->vme_end
;
3543 vm_map_unlock(dst_map
);
3545 result
= vm_map_overwrite_submap_recurse(
3546 entry
->object
.sub_map
,
3548 sub_end
- sub_start
);
3550 if(result
!= KERN_SUCCESS
)
3552 if (dst_end
<= entry
->vme_end
)
3553 return KERN_SUCCESS
;
3554 vm_map_lock(dst_map
);
3555 if(!vm_map_lookup_entry(dst_map
, local_end
,
3557 vm_map_unlock(dst_map
);
3558 return(KERN_INVALID_ADDRESS
);
3561 next
= entry
->vme_next
;
3564 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3565 vm_map_unlock(dst_map
);
3566 return(KERN_PROTECTION_FAILURE
);
3570 * If the entry is in transition, we must wait
3571 * for it to exit that state. Anything could happen
3572 * when we unlock the map, so start over.
3574 if (entry
->in_transition
) {
3577 * Say that we are waiting, and wait for entry.
3579 entry
->needs_wakeup
= TRUE
;
3580 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3586 * our range is contained completely within this map entry
3588 if (dst_end
<= entry
->vme_end
) {
3589 vm_map_unlock(dst_map
);
3590 return KERN_SUCCESS
;
3593 * check that range specified is contiguous region
3595 if ((next
== vm_map_to_entry(dst_map
)) ||
3596 (next
->vme_start
!= entry
->vme_end
)) {
3597 vm_map_unlock(dst_map
);
3598 return(KERN_INVALID_ADDRESS
);
3602 * Check for permanent objects in the destination.
3604 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3605 ((!entry
->object
.vm_object
->internal
) ||
3606 (entry
->object
.vm_object
->true_share
))) {
3607 if(encountered_sub_map
) {
3608 vm_map_unlock(dst_map
);
3609 return(KERN_FAILURE
);
3616 vm_map_unlock(dst_map
);
3617 return(KERN_SUCCESS
);
3621 * Routine: vm_map_copy_overwrite
3624 * Copy the memory described by the map copy
3625 * object (copy; returned by vm_map_copyin) onto
3626 * the specified destination region (dst_map, dst_addr).
3627 * The destination must be writeable.
3629 * Unlike vm_map_copyout, this routine actually
3630 * writes over previously-mapped memory. If the
3631 * previous mapping was to a permanent (user-supplied)
3632 * memory object, it is preserved.
3634 * The attributes (protection and inheritance) of the
3635 * destination region are preserved.
3637 * If successful, consumes the copy object.
3638 * Otherwise, the caller is responsible for it.
3640 * Implementation notes:
3641 * To overwrite aligned temporary virtual memory, it is
3642 * sufficient to remove the previous mapping and insert
3643 * the new copy. This replacement is done either on
3644 * the whole region (if no permanent virtual memory
3645 * objects are embedded in the destination region) or
3646 * in individual map entries.
3648 * To overwrite permanent virtual memory , it is necessary
3649 * to copy each page, as the external memory management
3650 * interface currently does not provide any optimizations.
3652 * Unaligned memory also has to be copied. It is possible
3653 * to use 'vm_trickery' to copy the aligned data. This is
3654 * not done but not hard to implement.
3656 * Once a page of permanent memory has been overwritten,
3657 * it is impossible to interrupt this function; otherwise,
3658 * the call would be neither atomic nor location-independent.
3659 * The kernel-state portion of a user thread must be
3662 * It may be expensive to forward all requests that might
3663 * overwrite permanent memory (vm_write, vm_copy) to
3664 * uninterruptible kernel threads. This routine may be
3665 * called by interruptible threads; however, success is
3666 * not guaranteed -- if the request cannot be performed
3667 * atomically and interruptibly, an error indication is
3672 vm_map_copy_overwrite_nested(
3674 vm_offset_t dst_addr
,
3676 boolean_t interruptible
,
3679 vm_offset_t dst_end
;
3680 vm_map_entry_t tmp_entry
;
3681 vm_map_entry_t entry
;
3683 boolean_t aligned
= TRUE
;
3684 boolean_t contains_permanent_objects
= FALSE
;
3685 boolean_t encountered_sub_map
= FALSE
;
3686 vm_offset_t base_addr
;
3687 vm_size_t copy_size
;
3688 vm_size_t total_size
;
3692 * Check for null copy object.
3695 if (copy
== VM_MAP_COPY_NULL
)
3696 return(KERN_SUCCESS
);
3699 * Check for special kernel buffer allocated
3700 * by new_ipc_kmsg_copyin.
3703 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3704 return(vm_map_copyout_kernel_buffer(
3710 * Only works for entry lists at the moment. Will
3711 * support page lists later.
3714 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3716 if (copy
->size
== 0) {
3717 vm_map_copy_discard(copy
);
3718 return(KERN_SUCCESS
);
3722 * Verify that the destination is all writeable
3723 * initially. We have to trunc the destination
3724 * address and round the copy size or we'll end up
3725 * splitting entries in strange ways.
3728 if (!page_aligned(copy
->size
) ||
3729 !page_aligned (copy
->offset
) ||
3730 !page_aligned (dst_addr
))
3733 dst_end
= round_page_32(dst_addr
+ copy
->size
);
3735 dst_end
= dst_addr
+ copy
->size
;
3738 vm_map_lock(dst_map
);
3741 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3742 vm_map_unlock(dst_map
);
3743 return(KERN_INVALID_ADDRESS
);
3745 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3746 for (entry
= tmp_entry
;;) {
3747 vm_map_entry_t next
= entry
->vme_next
;
3749 while(entry
->is_sub_map
) {
3750 vm_offset_t sub_start
;
3751 vm_offset_t sub_end
;
3752 vm_offset_t local_end
;
3754 if (entry
->in_transition
) {
3757 * Say that we are waiting, and wait for entry.
3759 entry
->needs_wakeup
= TRUE
;
3760 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3765 local_end
= entry
->vme_end
;
3766 if (!(entry
->needs_copy
)) {
3767 /* if needs_copy we are a COW submap */
3768 /* in such a case we just replace so */
3769 /* there is no need for the follow- */
3771 encountered_sub_map
= TRUE
;
3772 sub_start
= entry
->offset
;
3774 if(entry
->vme_end
< dst_end
)
3775 sub_end
= entry
->vme_end
;
3778 sub_end
-= entry
->vme_start
;
3779 sub_end
+= entry
->offset
;
3780 vm_map_unlock(dst_map
);
3782 kr
= vm_map_overwrite_submap_recurse(
3783 entry
->object
.sub_map
,
3785 sub_end
- sub_start
);
3786 if(kr
!= KERN_SUCCESS
)
3788 vm_map_lock(dst_map
);
3791 if (dst_end
<= entry
->vme_end
)
3792 goto start_overwrite
;
3793 if(!vm_map_lookup_entry(dst_map
, local_end
,
3795 vm_map_unlock(dst_map
);
3796 return(KERN_INVALID_ADDRESS
);
3798 next
= entry
->vme_next
;
3801 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3802 vm_map_unlock(dst_map
);
3803 return(KERN_PROTECTION_FAILURE
);
3807 * If the entry is in transition, we must wait
3808 * for it to exit that state. Anything could happen
3809 * when we unlock the map, so start over.
3811 if (entry
->in_transition
) {
3814 * Say that we are waiting, and wait for entry.
3816 entry
->needs_wakeup
= TRUE
;
3817 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3823 * our range is contained completely within this map entry
3825 if (dst_end
<= entry
->vme_end
)
3828 * check that range specified is contiguous region
3830 if ((next
== vm_map_to_entry(dst_map
)) ||
3831 (next
->vme_start
!= entry
->vme_end
)) {
3832 vm_map_unlock(dst_map
);
3833 return(KERN_INVALID_ADDRESS
);
3838 * Check for permanent objects in the destination.
3840 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3841 ((!entry
->object
.vm_object
->internal
) ||
3842 (entry
->object
.vm_object
->true_share
))) {
3843 contains_permanent_objects
= TRUE
;
3851 * If there are permanent objects in the destination, then
3852 * the copy cannot be interrupted.
3855 if (interruptible
&& contains_permanent_objects
) {
3856 vm_map_unlock(dst_map
);
3857 return(KERN_FAILURE
); /* XXX */
3862 * Make a second pass, overwriting the data
3863 * At the beginning of each loop iteration,
3864 * the next entry to be overwritten is "tmp_entry"
3865 * (initially, the value returned from the lookup above),
3866 * and the starting address expected in that entry
3870 total_size
= copy
->size
;
3871 if(encountered_sub_map
) {
3873 /* re-calculate tmp_entry since we've had the map */
3875 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3876 vm_map_unlock(dst_map
);
3877 return(KERN_INVALID_ADDRESS
);
3880 copy_size
= copy
->size
;
3883 base_addr
= dst_addr
;
3885 /* deconstruct the copy object and do in parts */
3886 /* only in sub_map, interruptable case */
3887 vm_map_entry_t copy_entry
;
3888 vm_map_entry_t previous_prev
;
3889 vm_map_entry_t next_copy
;
3891 int remaining_entries
;
3894 for (entry
= tmp_entry
; copy_size
== 0;) {
3895 vm_map_entry_t next
;
3897 next
= entry
->vme_next
;
3899 /* tmp_entry and base address are moved along */
3900 /* each time we encounter a sub-map. Otherwise */
3901 /* entry can outpase tmp_entry, and the copy_size */
3902 /* may reflect the distance between them */
3903 /* if the current entry is found to be in transition */
3904 /* we will start over at the beginning or the last */
3905 /* encounter of a submap as dictated by base_addr */
3906 /* we will zero copy_size accordingly. */
3907 if (entry
->in_transition
) {
3909 * Say that we are waiting, and wait for entry.
3911 entry
->needs_wakeup
= TRUE
;
3912 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3914 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3916 vm_map_unlock(dst_map
);
3917 return(KERN_INVALID_ADDRESS
);
3923 if(entry
->is_sub_map
) {
3924 vm_offset_t sub_start
;
3925 vm_offset_t sub_end
;
3926 vm_offset_t local_end
;
3928 if (entry
->needs_copy
) {
3929 /* if this is a COW submap */
3930 /* just back the range with a */
3931 /* anonymous entry */
3932 if(entry
->vme_end
< dst_end
)
3933 sub_end
= entry
->vme_end
;
3936 if(entry
->vme_start
< base_addr
)
3937 sub_start
= base_addr
;
3939 sub_start
= entry
->vme_start
;
3941 dst_map
, entry
, sub_end
);
3943 dst_map
, entry
, sub_start
);
3944 entry
->is_sub_map
= FALSE
;
3946 entry
->object
.sub_map
);
3947 entry
->object
.sub_map
= NULL
;
3948 entry
->is_shared
= FALSE
;
3949 entry
->needs_copy
= FALSE
;
3951 entry
->protection
= VM_PROT_ALL
;
3952 entry
->max_protection
= VM_PROT_ALL
;
3953 entry
->wired_count
= 0;
3954 entry
->user_wired_count
= 0;
3955 if(entry
->inheritance
3956 == VM_INHERIT_SHARE
)
3957 entry
->inheritance
= VM_INHERIT_COPY
;
3960 /* first take care of any non-sub_map */
3961 /* entries to send */
3962 if(base_addr
< entry
->vme_start
) {
3965 entry
->vme_start
- base_addr
;
3968 sub_start
= entry
->offset
;
3970 if(entry
->vme_end
< dst_end
)
3971 sub_end
= entry
->vme_end
;
3974 sub_end
-= entry
->vme_start
;
3975 sub_end
+= entry
->offset
;
3976 local_end
= entry
->vme_end
;
3977 vm_map_unlock(dst_map
);
3978 copy_size
= sub_end
- sub_start
;
3980 /* adjust the copy object */
3981 if (total_size
> copy_size
) {
3982 vm_size_t local_size
= 0;
3983 vm_size_t entry_size
;
3986 new_offset
= copy
->offset
;
3987 copy_entry
= vm_map_copy_first_entry(copy
);
3989 vm_map_copy_to_entry(copy
)){
3990 entry_size
= copy_entry
->vme_end
-
3991 copy_entry
->vme_start
;
3992 if((local_size
< copy_size
) &&
3993 ((local_size
+ entry_size
)
3995 vm_map_copy_clip_end(copy
,
3997 copy_entry
->vme_start
+
3998 (copy_size
- local_size
));
3999 entry_size
= copy_entry
->vme_end
-
4000 copy_entry
->vme_start
;
4001 local_size
+= entry_size
;
4002 new_offset
+= entry_size
;
4004 if(local_size
>= copy_size
) {
4005 next_copy
= copy_entry
->vme_next
;
4006 copy_entry
->vme_next
=
4007 vm_map_copy_to_entry(copy
);
4009 copy
->cpy_hdr
.links
.prev
;
4010 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4011 copy
->size
= copy_size
;
4013 copy
->cpy_hdr
.nentries
;
4014 remaining_entries
-= nentries
;
4015 copy
->cpy_hdr
.nentries
= nentries
;
4018 local_size
+= entry_size
;
4019 new_offset
+= entry_size
;
4022 copy_entry
= copy_entry
->vme_next
;
4026 if((entry
->use_pmap
) && (pmap
== NULL
)) {
4027 kr
= vm_map_copy_overwrite_nested(
4028 entry
->object
.sub_map
,
4032 entry
->object
.sub_map
->pmap
);
4033 } else if (pmap
!= NULL
) {
4034 kr
= vm_map_copy_overwrite_nested(
4035 entry
->object
.sub_map
,
4038 interruptible
, pmap
);
4040 kr
= vm_map_copy_overwrite_nested(
4041 entry
->object
.sub_map
,
4047 if(kr
!= KERN_SUCCESS
) {
4048 if(next_copy
!= NULL
) {
4049 copy
->cpy_hdr
.nentries
+=
4051 copy
->cpy_hdr
.links
.prev
->vme_next
=
4053 copy
->cpy_hdr
.links
.prev
4055 copy
->size
= total_size
;
4059 if (dst_end
<= local_end
) {
4060 return(KERN_SUCCESS
);
4062 /* otherwise copy no longer exists, it was */
4063 /* destroyed after successful copy_overwrite */
4064 copy
= (vm_map_copy_t
)
4065 zalloc(vm_map_copy_zone
);
4066 vm_map_copy_first_entry(copy
) =
4067 vm_map_copy_last_entry(copy
) =
4068 vm_map_copy_to_entry(copy
);
4069 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4070 copy
->offset
= new_offset
;
4072 total_size
-= copy_size
;
4074 /* put back remainder of copy in container */
4075 if(next_copy
!= NULL
) {
4076 copy
->cpy_hdr
.nentries
= remaining_entries
;
4077 copy
->cpy_hdr
.links
.next
= next_copy
;
4078 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4079 copy
->size
= total_size
;
4080 next_copy
->vme_prev
=
4081 vm_map_copy_to_entry(copy
);
4084 base_addr
= local_end
;
4085 vm_map_lock(dst_map
);
4086 if(!vm_map_lookup_entry(dst_map
,
4087 local_end
, &tmp_entry
)) {
4088 vm_map_unlock(dst_map
);
4089 return(KERN_INVALID_ADDRESS
);
4094 if (dst_end
<= entry
->vme_end
) {
4095 copy_size
= dst_end
- base_addr
;
4099 if ((next
== vm_map_to_entry(dst_map
)) ||
4100 (next
->vme_start
!= entry
->vme_end
)) {
4101 vm_map_unlock(dst_map
);
4102 return(KERN_INVALID_ADDRESS
);
4111 /* adjust the copy object */
4112 if (total_size
> copy_size
) {
4113 vm_size_t local_size
= 0;
4114 vm_size_t entry_size
;
4116 new_offset
= copy
->offset
;
4117 copy_entry
= vm_map_copy_first_entry(copy
);
4118 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4119 entry_size
= copy_entry
->vme_end
-
4120 copy_entry
->vme_start
;
4121 if((local_size
< copy_size
) &&
4122 ((local_size
+ entry_size
)
4124 vm_map_copy_clip_end(copy
, copy_entry
,
4125 copy_entry
->vme_start
+
4126 (copy_size
- local_size
));
4127 entry_size
= copy_entry
->vme_end
-
4128 copy_entry
->vme_start
;
4129 local_size
+= entry_size
;
4130 new_offset
+= entry_size
;
4132 if(local_size
>= copy_size
) {
4133 next_copy
= copy_entry
->vme_next
;
4134 copy_entry
->vme_next
=
4135 vm_map_copy_to_entry(copy
);
4137 copy
->cpy_hdr
.links
.prev
;
4138 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4139 copy
->size
= copy_size
;
4141 copy
->cpy_hdr
.nentries
;
4142 remaining_entries
-= nentries
;
4143 copy
->cpy_hdr
.nentries
= nentries
;
4146 local_size
+= entry_size
;
4147 new_offset
+= entry_size
;
4150 copy_entry
= copy_entry
->vme_next
;
4160 local_pmap
= dst_map
->pmap
;
4162 if ((kr
= vm_map_copy_overwrite_aligned(
4163 dst_map
, tmp_entry
, copy
,
4164 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4165 if(next_copy
!= NULL
) {
4166 copy
->cpy_hdr
.nentries
+=
4168 copy
->cpy_hdr
.links
.prev
->vme_next
=
4170 copy
->cpy_hdr
.links
.prev
=
4172 copy
->size
+= copy_size
;
4176 vm_map_unlock(dst_map
);
4181 * if the copy and dst address are misaligned but the same
4182 * offset within the page we can copy_not_aligned the
4183 * misaligned parts and copy aligned the rest. If they are
4184 * aligned but len is unaligned we simply need to copy
4185 * the end bit unaligned. We'll need to split the misaligned
4186 * bits of the region in this case !
4188 /* ALWAYS UNLOCKS THE dst_map MAP */
4189 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4190 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4191 if(next_copy
!= NULL
) {
4192 copy
->cpy_hdr
.nentries
+=
4194 copy
->cpy_hdr
.links
.prev
->vme_next
=
4196 copy
->cpy_hdr
.links
.prev
=
4198 copy
->size
+= copy_size
;
4203 total_size
-= copy_size
;
4206 base_addr
+= copy_size
;
4208 copy
->offset
= new_offset
;
4209 if(next_copy
!= NULL
) {
4210 copy
->cpy_hdr
.nentries
= remaining_entries
;
4211 copy
->cpy_hdr
.links
.next
= next_copy
;
4212 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4213 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4214 copy
->size
= total_size
;
4216 vm_map_lock(dst_map
);
4218 if (!vm_map_lookup_entry(dst_map
,
4219 base_addr
, &tmp_entry
)) {
4220 vm_map_unlock(dst_map
);
4221 return(KERN_INVALID_ADDRESS
);
4223 if (tmp_entry
->in_transition
) {
4224 entry
->needs_wakeup
= TRUE
;
4225 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4230 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(base_addr
));
4236 * Throw away the vm_map_copy object
4238 vm_map_copy_discard(copy
);
4240 return(KERN_SUCCESS
);
4241 }/* vm_map_copy_overwrite */
4244 vm_map_copy_overwrite(
4246 vm_offset_t dst_addr
,
4248 boolean_t interruptible
)
4250 return vm_map_copy_overwrite_nested(
4251 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4256 * Routine: vm_map_copy_overwrite_unaligned
4259 * Physically copy unaligned data
4262 * Unaligned parts of pages have to be physically copied. We use
4263 * a modified form of vm_fault_copy (which understands none-aligned
4264 * page offsets and sizes) to do the copy. We attempt to copy as
4265 * much memory in one go as possibly, however vm_fault_copy copies
4266 * within 1 memory object so we have to find the smaller of "amount left"
4267 * "source object data size" and "target object data size". With
4268 * unaligned data we don't need to split regions, therefore the source
4269 * (copy) object should be one map entry, the target range may be split
4270 * over multiple map entries however. In any event we are pessimistic
4271 * about these assumptions.
4274 * dst_map is locked on entry and is return locked on success,
4275 * unlocked on error.
4279 vm_map_copy_overwrite_unaligned(
4281 vm_map_entry_t entry
,
4285 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4286 vm_map_version_t version
;
4287 vm_object_t dst_object
;
4288 vm_object_offset_t dst_offset
;
4289 vm_object_offset_t src_offset
;
4290 vm_object_offset_t entry_offset
;
4291 vm_offset_t entry_end
;
4296 kern_return_t kr
= KERN_SUCCESS
;
4298 vm_map_lock_write_to_read(dst_map
);
4300 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4301 amount_left
= copy
->size
;
4303 * unaligned so we never clipped this entry, we need the offset into
4304 * the vm_object not just the data.
4306 while (amount_left
> 0) {
4308 if (entry
== vm_map_to_entry(dst_map
)) {
4309 vm_map_unlock_read(dst_map
);
4310 return KERN_INVALID_ADDRESS
;
4313 /* "start" must be within the current map entry */
4314 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4316 dst_offset
= start
- entry
->vme_start
;
4318 dst_size
= entry
->vme_end
- start
;
4320 src_size
= copy_entry
->vme_end
-
4321 (copy_entry
->vme_start
+ src_offset
);
4323 if (dst_size
< src_size
) {
4325 * we can only copy dst_size bytes before
4326 * we have to get the next destination entry
4328 copy_size
= dst_size
;
4331 * we can only copy src_size bytes before
4332 * we have to get the next source copy entry
4334 copy_size
= src_size
;
4337 if (copy_size
> amount_left
) {
4338 copy_size
= amount_left
;
4341 * Entry needs copy, create a shadow shadow object for
4342 * Copy on write region.
4344 if (entry
->needs_copy
&&
4345 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4347 if (vm_map_lock_read_to_write(dst_map
)) {
4348 vm_map_lock_read(dst_map
);
4351 vm_object_shadow(&entry
->object
.vm_object
,
4353 (vm_size_t
)(entry
->vme_end
4354 - entry
->vme_start
));
4355 entry
->needs_copy
= FALSE
;
4356 vm_map_lock_write_to_read(dst_map
);
4358 dst_object
= entry
->object
.vm_object
;
4360 * unlike with the virtual (aligned) copy we're going
4361 * to fault on it therefore we need a target object.
4363 if (dst_object
== VM_OBJECT_NULL
) {
4364 if (vm_map_lock_read_to_write(dst_map
)) {
4365 vm_map_lock_read(dst_map
);
4368 dst_object
= vm_object_allocate((vm_size_t
)
4369 entry
->vme_end
- entry
->vme_start
);
4370 entry
->object
.vm_object
= dst_object
;
4372 vm_map_lock_write_to_read(dst_map
);
4375 * Take an object reference and unlock map. The "entry" may
4376 * disappear or change when the map is unlocked.
4378 vm_object_reference(dst_object
);
4379 version
.main_timestamp
= dst_map
->timestamp
;
4380 entry_offset
= entry
->offset
;
4381 entry_end
= entry
->vme_end
;
4382 vm_map_unlock_read(dst_map
);
4384 * Copy as much as possible in one pass
4387 copy_entry
->object
.vm_object
,
4388 copy_entry
->offset
+ src_offset
,
4391 entry_offset
+ dst_offset
,
4397 src_offset
+= copy_size
;
4398 amount_left
-= copy_size
;
4400 * Release the object reference
4402 vm_object_deallocate(dst_object
);
4404 * If a hard error occurred, return it now
4406 if (kr
!= KERN_SUCCESS
)
4409 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4410 || amount_left
== 0)
4413 * all done with this copy entry, dispose.
4415 vm_map_copy_entry_unlink(copy
, copy_entry
);
4416 vm_object_deallocate(copy_entry
->object
.vm_object
);
4417 vm_map_copy_entry_dispose(copy
, copy_entry
);
4419 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4420 == vm_map_copy_to_entry(copy
) && amount_left
) {
4422 * not finished copying but run out of source
4424 return KERN_INVALID_ADDRESS
;
4429 if (amount_left
== 0)
4430 return KERN_SUCCESS
;
4432 vm_map_lock_read(dst_map
);
4433 if (version
.main_timestamp
== dst_map
->timestamp
) {
4434 if (start
== entry_end
) {
4436 * destination region is split. Use the version
4437 * information to avoid a lookup in the normal
4440 entry
= entry
->vme_next
;
4442 * should be contiguous. Fail if we encounter
4443 * a hole in the destination.
4445 if (start
!= entry
->vme_start
) {
4446 vm_map_unlock_read(dst_map
);
4447 return KERN_INVALID_ADDRESS
;
4452 * Map version check failed.
4453 * we must lookup the entry because somebody
4454 * might have changed the map behind our backs.
4457 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4459 vm_map_unlock_read(dst_map
);
4460 return KERN_INVALID_ADDRESS
;
4466 vm_map_unlock_read(dst_map
);
4468 return KERN_SUCCESS
;
4469 }/* vm_map_copy_overwrite_unaligned */
4472 * Routine: vm_map_copy_overwrite_aligned
4475 * Does all the vm_trickery possible for whole pages.
4479 * If there are no permanent objects in the destination,
4480 * and the source and destination map entry zones match,
4481 * and the destination map entry is not shared,
4482 * then the map entries can be deleted and replaced
4483 * with those from the copy. The following code is the
4484 * basic idea of what to do, but there are lots of annoying
4485 * little details about getting protection and inheritance
4486 * right. Should add protection, inheritance, and sharing checks
4487 * to the above pass and make sure that no wiring is involved.
4491 vm_map_copy_overwrite_aligned(
4493 vm_map_entry_t tmp_entry
,
4499 vm_map_entry_t copy_entry
;
4500 vm_size_t copy_size
;
4502 vm_map_entry_t entry
;
4504 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4505 != vm_map_copy_to_entry(copy
))
4507 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4510 if (entry
== vm_map_to_entry(dst_map
)) {
4511 vm_map_unlock(dst_map
);
4512 return KERN_INVALID_ADDRESS
;
4514 size
= (entry
->vme_end
- entry
->vme_start
);
4516 * Make sure that no holes popped up in the
4517 * address map, and that the protection is
4518 * still valid, in case the map was unlocked
4522 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4523 && !entry
->needs_copy
)) {
4524 vm_map_unlock(dst_map
);
4525 return(KERN_INVALID_ADDRESS
);
4527 assert(entry
!= vm_map_to_entry(dst_map
));
4530 * Check protection again
4533 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4534 vm_map_unlock(dst_map
);
4535 return(KERN_PROTECTION_FAILURE
);
4539 * Adjust to source size first
4542 if (copy_size
< size
) {
4543 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4548 * Adjust to destination size
4551 if (size
< copy_size
) {
4552 vm_map_copy_clip_end(copy
, copy_entry
,
4553 copy_entry
->vme_start
+ size
);
4557 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4558 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4559 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4562 * If the destination contains temporary unshared memory,
4563 * we can perform the copy by throwing it away and
4564 * installing the source data.
4567 object
= entry
->object
.vm_object
;
4568 if ((!entry
->is_shared
&&
4569 ((object
== VM_OBJECT_NULL
) ||
4570 (object
->internal
&& !object
->true_share
))) ||
4571 entry
->needs_copy
) {
4572 vm_object_t old_object
= entry
->object
.vm_object
;
4573 vm_object_offset_t old_offset
= entry
->offset
;
4574 vm_object_offset_t offset
;
4577 * Ensure that the source and destination aren't
4580 if (old_object
== copy_entry
->object
.vm_object
&&
4581 old_offset
== copy_entry
->offset
) {
4582 vm_map_copy_entry_unlink(copy
, copy_entry
);
4583 vm_map_copy_entry_dispose(copy
, copy_entry
);
4585 if (old_object
!= VM_OBJECT_NULL
)
4586 vm_object_deallocate(old_object
);
4588 start
= tmp_entry
->vme_end
;
4589 tmp_entry
= tmp_entry
->vme_next
;
4593 if (old_object
!= VM_OBJECT_NULL
) {
4594 if(entry
->is_sub_map
) {
4595 if(entry
->use_pmap
) {
4597 pmap_unnest(dst_map
->pmap
,
4600 - entry
->vme_start
);
4602 if(dst_map
->mapped
) {
4603 /* clean up parent */
4605 vm_map_submap_pmap_clean(
4606 dst_map
, entry
->vme_start
,
4608 entry
->object
.sub_map
,
4612 vm_map_submap_pmap_clean(
4613 dst_map
, entry
->vme_start
,
4615 entry
->object
.sub_map
,
4619 entry
->object
.sub_map
);
4621 if(dst_map
->mapped
) {
4622 vm_object_pmap_protect(
4623 entry
->object
.vm_object
,
4631 pmap_remove(dst_map
->pmap
,
4632 (addr64_t
)(entry
->vme_start
),
4633 (addr64_t
)(entry
->vme_end
));
4635 vm_object_deallocate(old_object
);
4639 entry
->is_sub_map
= FALSE
;
4640 entry
->object
= copy_entry
->object
;
4641 object
= entry
->object
.vm_object
;
4642 entry
->needs_copy
= copy_entry
->needs_copy
;
4643 entry
->wired_count
= 0;
4644 entry
->user_wired_count
= 0;
4645 offset
= entry
->offset
= copy_entry
->offset
;
4647 vm_map_copy_entry_unlink(copy
, copy_entry
);
4648 vm_map_copy_entry_dispose(copy
, copy_entry
);
4649 #if BAD_OPTIMIZATION
4651 * if we turn this optimization back on
4652 * we need to revisit our use of pmap mappings
4653 * large copies will cause us to run out and panic
4654 * this optimization only saved on average 2 us per page if ALL
4655 * the pages in the source were currently mapped
4656 * and ALL the pages in the dest were touched, if there were fewer
4657 * than 2/3 of the pages touched, this optimization actually cost more cycles
4661 * Try to aggressively enter physical mappings
4662 * (but avoid uninstantiated objects)
4664 if (object
!= VM_OBJECT_NULL
) {
4665 vm_offset_t va
= entry
->vme_start
;
4667 while (va
< entry
->vme_end
) {
4668 register vm_page_t m
;
4672 * Look for the page in the top object
4674 prot
= entry
->protection
;
4675 vm_object_lock(object
);
4676 vm_object_paging_begin(object
);
4678 if ((m
= vm_page_lookup(object
,offset
)) !=
4679 VM_PAGE_NULL
&& !m
->busy
&&
4681 (!m
->unusual
|| (!m
->error
&&
4682 !m
->restart
&& !m
->absent
&&
4683 (prot
& m
->page_lock
) == 0))) {
4686 vm_object_unlock(object
);
4689 * Honor COW obligations
4691 if (entry
->needs_copy
)
4692 prot
&= ~VM_PROT_WRITE
;
4693 /* It is our policy to require */
4694 /* explicit sync from anyone */
4695 /* writing code and then */
4696 /* a pc to execute it. */
4699 PMAP_ENTER(pmap
, va
, m
, prot
,
4701 (m
->object
->wimg_bits
))
4705 vm_object_lock(object
);
4706 vm_page_lock_queues();
4707 if (!m
->active
&& !m
->inactive
)
4708 vm_page_activate(m
);
4709 vm_page_unlock_queues();
4710 PAGE_WAKEUP_DONE(m
);
4712 vm_object_paging_end(object
);
4713 vm_object_unlock(object
);
4715 offset
+= PAGE_SIZE_64
;
4717 } /* end while (va < entry->vme_end) */
4718 } /* end if (object) */
4721 * Set up for the next iteration. The map
4722 * has not been unlocked, so the next
4723 * address should be at the end of this
4724 * entry, and the next map entry should be
4725 * the one following it.
4728 start
= tmp_entry
->vme_end
;
4729 tmp_entry
= tmp_entry
->vme_next
;
4731 vm_map_version_t version
;
4732 vm_object_t dst_object
= entry
->object
.vm_object
;
4733 vm_object_offset_t dst_offset
= entry
->offset
;
4737 * Take an object reference, and record
4738 * the map version information so that the
4739 * map can be safely unlocked.
4742 vm_object_reference(dst_object
);
4744 /* account for unlock bumping up timestamp */
4745 version
.main_timestamp
= dst_map
->timestamp
+ 1;
4747 vm_map_unlock(dst_map
);
4750 * Copy as much as possible in one pass
4755 copy_entry
->object
.vm_object
,
4765 * Release the object reference
4768 vm_object_deallocate(dst_object
);
4771 * If a hard error occurred, return it now
4774 if (r
!= KERN_SUCCESS
)
4777 if (copy_size
!= 0) {
4779 * Dispose of the copied region
4782 vm_map_copy_clip_end(copy
, copy_entry
,
4783 copy_entry
->vme_start
+ copy_size
);
4784 vm_map_copy_entry_unlink(copy
, copy_entry
);
4785 vm_object_deallocate(copy_entry
->object
.vm_object
);
4786 vm_map_copy_entry_dispose(copy
, copy_entry
);
4790 * Pick up in the destination map where we left off.
4792 * Use the version information to avoid a lookup
4793 * in the normal case.
4797 vm_map_lock(dst_map
);
4798 if (version
.main_timestamp
== dst_map
->timestamp
) {
4799 /* We can safely use saved tmp_entry value */
4801 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4802 tmp_entry
= tmp_entry
->vme_next
;
4804 /* Must do lookup of tmp_entry */
4806 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4807 vm_map_unlock(dst_map
);
4808 return(KERN_INVALID_ADDRESS
);
4810 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4815 return(KERN_SUCCESS
);
4816 }/* vm_map_copy_overwrite_aligned */
4819 * Routine: vm_map_copyin_kernel_buffer
4822 * Copy in data to a kernel buffer from space in the
4823 * source map. The original space may be otpionally
4826 * If successful, returns a new copy object.
4829 vm_map_copyin_kernel_buffer(
4831 vm_offset_t src_addr
,
4833 boolean_t src_destroy
,
4834 vm_map_copy_t
*copy_result
)
4838 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4840 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4841 if (copy
== VM_MAP_COPY_NULL
) {
4842 return KERN_RESOURCE_SHORTAGE
;
4844 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4847 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4848 copy
->cpy_kalloc_size
= kalloc_size
;
4850 if (src_map
== kernel_map
) {
4851 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4852 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4853 VM_MAP_REMOVE_INTERRUPTIBLE
;
4856 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4857 if (kr
!= KERN_SUCCESS
) {
4858 kfree((vm_offset_t
)copy
, kalloc_size
);
4861 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4862 VM_MAP_REMOVE_INTERRUPTIBLE
;
4865 (void) vm_map_remove(src_map
, trunc_page_32(src_addr
),
4866 round_page_32(src_addr
+ len
),
4869 *copy_result
= copy
;
4870 return KERN_SUCCESS
;
4874 * Routine: vm_map_copyout_kernel_buffer
4877 * Copy out data from a kernel buffer into space in the
4878 * destination map. The space may be otpionally dynamically
4881 * If successful, consumes the copy object.
4882 * Otherwise, the caller is responsible for it.
4885 vm_map_copyout_kernel_buffer(
4887 vm_offset_t
*addr
, /* IN/OUT */
4889 boolean_t overwrite
)
4891 kern_return_t kr
= KERN_SUCCESS
;
4892 thread_act_t thr_act
= current_act();
4897 * Allocate space in the target map for the data
4900 kr
= vm_map_enter(map
,
4902 round_page_32(copy
->size
),
4906 (vm_object_offset_t
) 0,
4910 VM_INHERIT_DEFAULT
);
4911 if (kr
!= KERN_SUCCESS
)
4916 * Copyout the data from the kernel buffer to the target map.
4918 if (thr_act
->map
== map
) {
4921 * If the target map is the current map, just do
4924 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4926 return(KERN_INVALID_ADDRESS
);
4933 * If the target map is another map, assume the
4934 * target's address space identity for the duration
4937 vm_map_reference(map
);
4938 oldmap
= vm_map_switch(map
);
4940 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4942 return(KERN_INVALID_ADDRESS
);
4945 (void) vm_map_switch(oldmap
);
4946 vm_map_deallocate(map
);
4949 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4955 * Macro: vm_map_copy_insert
4958 * Link a copy chain ("copy") into a map at the
4959 * specified location (after "where").
4961 * The copy chain is destroyed.
4963 * The arguments are evaluated multiple times.
4965 #define vm_map_copy_insert(map, where, copy) \
4967 vm_map_t VMCI_map; \
4968 vm_map_entry_t VMCI_where; \
4969 vm_map_copy_t VMCI_copy; \
4971 VMCI_where = (where); \
4972 VMCI_copy = (copy); \
4973 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4974 ->vme_next = (VMCI_where->vme_next); \
4975 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4976 ->vme_prev = VMCI_where; \
4977 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4978 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4979 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4983 * Routine: vm_map_copyout
4986 * Copy out a copy chain ("copy") into newly-allocated
4987 * space in the destination map.
4989 * If successful, consumes the copy object.
4990 * Otherwise, the caller is responsible for it.
4994 register vm_map_t dst_map
,
4995 vm_offset_t
*dst_addr
, /* OUT */
4996 register vm_map_copy_t copy
)
4999 vm_size_t adjustment
;
5001 vm_object_offset_t vm_copy_start
;
5002 vm_map_entry_t last
;
5004 vm_map_entry_t entry
;
5007 * Check for null copy object.
5010 if (copy
== VM_MAP_COPY_NULL
) {
5012 return(KERN_SUCCESS
);
5016 * Check for special copy object, created
5017 * by vm_map_copyin_object.
5020 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
5021 vm_object_t object
= copy
->cpy_object
;
5023 vm_object_offset_t offset
;
5025 offset
= trunc_page_64(copy
->offset
);
5026 size
= round_page_32(copy
->size
+
5027 (vm_size_t
)(copy
->offset
- offset
));
5029 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
5030 (vm_offset_t
) 0, TRUE
,
5031 object
, offset
, FALSE
,
5032 VM_PROT_DEFAULT
, VM_PROT_ALL
,
5033 VM_INHERIT_DEFAULT
);
5034 if (kr
!= KERN_SUCCESS
)
5036 /* Account for non-pagealigned copy object */
5037 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
5038 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
5039 return(KERN_SUCCESS
);
5043 * Check for special kernel buffer allocated
5044 * by new_ipc_kmsg_copyin.
5047 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
5048 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
5053 * Find space for the data
5056 vm_copy_start
= trunc_page_64(copy
->offset
);
5057 size
= round_page_32((vm_size_t
)copy
->offset
+ copy
->size
)
5062 vm_map_lock(dst_map
);
5063 assert(first_free_is_valid(dst_map
));
5064 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
5065 vm_map_min(dst_map
) : last
->vme_end
;
5068 vm_map_entry_t next
= last
->vme_next
;
5069 vm_offset_t end
= start
+ size
;
5071 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5072 if (dst_map
->wait_for_space
) {
5073 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5074 assert_wait((event_t
) dst_map
,
5075 THREAD_INTERRUPTIBLE
);
5076 vm_map_unlock(dst_map
);
5077 thread_block((void (*)(void))0);
5081 vm_map_unlock(dst_map
);
5082 return(KERN_NO_SPACE
);
5085 if ((next
== vm_map_to_entry(dst_map
)) ||
5086 (next
->vme_start
>= end
))
5090 start
= last
->vme_end
;
5094 * Since we're going to just drop the map
5095 * entries from the copy into the destination
5096 * map, they must come from the same pool.
5099 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5101 * Mismatches occur when dealing with the default
5105 vm_map_entry_t next
, new;
5108 * Find the zone that the copies were allocated from
5110 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5112 : vm_map_kentry_zone
;
5113 entry
= vm_map_copy_first_entry(copy
);
5116 * Reinitialize the copy so that vm_map_copy_entry_link
5119 copy
->cpy_hdr
.nentries
= 0;
5120 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5121 vm_map_copy_first_entry(copy
) =
5122 vm_map_copy_last_entry(copy
) =
5123 vm_map_copy_to_entry(copy
);
5128 while (entry
!= vm_map_copy_to_entry(copy
)) {
5129 new = vm_map_copy_entry_create(copy
);
5130 vm_map_entry_copy_full(new, entry
);
5131 new->use_pmap
= FALSE
; /* clr address space specifics */
5132 vm_map_copy_entry_link(copy
,
5133 vm_map_copy_last_entry(copy
),
5135 next
= entry
->vme_next
;
5136 zfree(old_zone
, (vm_offset_t
) entry
);
5142 * Adjust the addresses in the copy chain, and
5143 * reset the region attributes.
5146 adjustment
= start
- vm_copy_start
;
5147 for (entry
= vm_map_copy_first_entry(copy
);
5148 entry
!= vm_map_copy_to_entry(copy
);
5149 entry
= entry
->vme_next
) {
5150 entry
->vme_start
+= adjustment
;
5151 entry
->vme_end
+= adjustment
;
5153 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5154 entry
->protection
= VM_PROT_DEFAULT
;
5155 entry
->max_protection
= VM_PROT_ALL
;
5156 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5159 * If the entry is now wired,
5160 * map the pages into the destination map.
5162 if (entry
->wired_count
!= 0) {
5163 register vm_offset_t va
;
5164 vm_object_offset_t offset
;
5165 register vm_object_t object
;
5167 object
= entry
->object
.vm_object
;
5168 offset
= entry
->offset
;
5169 va
= entry
->vme_start
;
5171 pmap_pageable(dst_map
->pmap
,
5176 while (va
< entry
->vme_end
) {
5177 register vm_page_t m
;
5180 * Look up the page in the object.
5181 * Assert that the page will be found in the
5184 * the object was newly created by
5185 * vm_object_copy_slowly, and has
5186 * copies of all of the pages from
5189 * the object was moved from the old
5190 * map entry; because the old map
5191 * entry was wired, all of the pages
5192 * were in the top-level object.
5193 * (XXX not true if we wire pages for
5196 vm_object_lock(object
);
5197 vm_object_paging_begin(object
);
5199 m
= vm_page_lookup(object
, offset
);
5200 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5202 panic("vm_map_copyout: wiring 0x%x", m
);
5205 vm_object_unlock(object
);
5207 PMAP_ENTER(dst_map
->pmap
, va
, m
, entry
->protection
,
5209 (m
->object
->wimg_bits
))
5213 vm_object_lock(object
);
5214 PAGE_WAKEUP_DONE(m
);
5215 /* the page is wired, so we don't have to activate */
5216 vm_object_paging_end(object
);
5217 vm_object_unlock(object
);
5219 offset
+= PAGE_SIZE_64
;
5223 else if (size
<= vm_map_aggressive_enter_max
) {
5225 register vm_offset_t va
;
5226 vm_object_offset_t offset
;
5227 register vm_object_t object
;
5230 object
= entry
->object
.vm_object
;
5231 if (object
!= VM_OBJECT_NULL
) {
5233 offset
= entry
->offset
;
5234 va
= entry
->vme_start
;
5235 while (va
< entry
->vme_end
) {
5236 register vm_page_t m
;
5239 * Look up the page in the object.
5240 * Assert that the page will be found
5241 * in the top object if at all...
5243 vm_object_lock(object
);
5244 vm_object_paging_begin(object
);
5246 if (((m
= vm_page_lookup(object
,
5249 !m
->busy
&& !m
->fictitious
&&
5250 !m
->absent
&& !m
->error
) {
5252 vm_object_unlock(object
);
5254 /* honor cow obligations */
5255 prot
= entry
->protection
;
5256 if (entry
->needs_copy
)
5257 prot
&= ~VM_PROT_WRITE
;
5259 PMAP_ENTER(dst_map
->pmap
, va
,
5262 (m
->object
->wimg_bits
))
5266 vm_object_lock(object
);
5267 vm_page_lock_queues();
5268 if (!m
->active
&& !m
->inactive
)
5269 vm_page_activate(m
);
5270 vm_page_unlock_queues();
5271 PAGE_WAKEUP_DONE(m
);
5273 vm_object_paging_end(object
);
5274 vm_object_unlock(object
);
5276 offset
+= PAGE_SIZE_64
;
5284 * Correct the page alignment for the result
5287 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5290 * Update the hints and the map size
5293 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5295 dst_map
->size
+= size
;
5301 vm_map_copy_insert(dst_map
, last
, copy
);
5303 vm_map_unlock(dst_map
);
5306 * XXX If wiring_required, call vm_map_pageable
5309 return(KERN_SUCCESS
);
5312 boolean_t vm_map_aggressive_enter
; /* not used yet */
5316 * Routine: vm_map_copyin
5319 * Copy the specified region (src_addr, len) from the
5320 * source address space (src_map), possibly removing
5321 * the region from the source address space (src_destroy).
5324 * A vm_map_copy_t object (copy_result), suitable for
5325 * insertion into another address space (using vm_map_copyout),
5326 * copying over another address space region (using
5327 * vm_map_copy_overwrite). If the copy is unused, it
5328 * should be destroyed (using vm_map_copy_discard).
5330 * In/out conditions:
5331 * The source map should not be locked on entry.
5334 typedef struct submap_map
{
5335 vm_map_t parent_map
;
5336 vm_offset_t base_start
;
5337 vm_offset_t base_end
;
5338 struct submap_map
*next
;
5342 vm_map_copyin_common(
5344 vm_offset_t src_addr
,
5346 boolean_t src_destroy
,
5347 boolean_t src_volatile
,
5348 vm_map_copy_t
*copy_result
, /* OUT */
5349 boolean_t use_maxprot
)
5351 extern int msg_ool_size_small
;
5353 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5354 * in multi-level lookup, this
5355 * entry contains the actual
5359 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5361 vm_offset_t src_start
; /* Start of current entry --
5362 * where copy is taking place now
5364 vm_offset_t src_end
; /* End of entire region to be
5366 vm_offset_t base_start
; /* submap fields to save offsets */
5367 /* in original map */
5368 vm_offset_t base_end
;
5369 vm_map_t base_map
=src_map
;
5370 vm_map_entry_t base_entry
;
5371 boolean_t map_share
=FALSE
;
5372 submap_map_t
*parent_maps
= NULL
;
5375 vm_map_copy_t copy
; /* Resulting copy */
5376 vm_offset_t copy_addr
;
5379 * Check for copies of zero bytes.
5383 *copy_result
= VM_MAP_COPY_NULL
;
5384 return(KERN_SUCCESS
);
5388 * Check that the end address doesn't overflow
5390 src_end
= src_addr
+ len
;
5391 if (src_end
< src_addr
)
5392 return KERN_INVALID_ADDRESS
;
5395 * If the copy is sufficiently small, use a kernel buffer instead
5396 * of making a virtual copy. The theory being that the cost of
5397 * setting up VM (and taking C-O-W faults) dominates the copy costs
5398 * for small regions.
5400 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5401 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5402 src_destroy
, copy_result
);
5405 * Compute (page aligned) start and end of region
5407 src_start
= trunc_page_32(src_addr
);
5408 src_end
= round_page_32(src_end
);
5410 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5413 * Allocate a header element for the list.
5415 * Use the start and end in the header to
5416 * remember the endpoints prior to rounding.
5419 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5420 vm_map_copy_first_entry(copy
) =
5421 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5422 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5423 copy
->cpy_hdr
.nentries
= 0;
5424 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5426 copy
->offset
= src_addr
;
5429 new_entry
= vm_map_copy_entry_create(copy
);
5433 vm_map_unlock(src_map); \
5434 if(src_map != base_map) \
5435 vm_map_deallocate(src_map); \
5436 if (new_entry != VM_MAP_ENTRY_NULL) \
5437 vm_map_copy_entry_dispose(copy,new_entry); \
5438 vm_map_copy_discard(copy); \
5440 submap_map_t *ptr; \
5442 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5443 parent_maps=parent_maps->next; \
5444 if (ptr->parent_map != base_map) \
5445 vm_map_deallocate(ptr->parent_map); \
5446 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5453 * Find the beginning of the region.
5456 vm_map_lock(src_map
);
5458 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5459 RETURN(KERN_INVALID_ADDRESS
);
5460 if(!tmp_entry
->is_sub_map
) {
5461 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5463 /* set for later submap fix-up */
5464 copy_addr
= src_start
;
5467 * Go through entries until we get to the end.
5472 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5473 vm_size_t src_size
; /* Size of source
5474 * map entry (in both
5479 vm_object_t src_object
; /* Object to copy */
5480 vm_object_offset_t src_offset
;
5482 boolean_t src_needs_copy
; /* Should source map
5484 * for copy-on-write?
5487 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5489 boolean_t was_wired
; /* Was source wired? */
5490 vm_map_version_t version
; /* Version before locks
5491 * dropped to make copy
5493 kern_return_t result
; /* Return value from
5494 * copy_strategically.
5496 while(tmp_entry
->is_sub_map
) {
5497 vm_size_t submap_len
;
5500 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5501 ptr
->next
= parent_maps
;
5503 ptr
->parent_map
= src_map
;
5504 ptr
->base_start
= src_start
;
5505 ptr
->base_end
= src_end
;
5506 submap_len
= tmp_entry
->vme_end
- src_start
;
5507 if(submap_len
> (src_end
-src_start
))
5508 submap_len
= src_end
-src_start
;
5509 ptr
->base_start
+= submap_len
;
5511 src_start
-= tmp_entry
->vme_start
;
5512 src_start
+= tmp_entry
->offset
;
5513 src_end
= src_start
+ submap_len
;
5514 src_map
= tmp_entry
->object
.sub_map
;
5515 vm_map_lock(src_map
);
5516 /* keep an outstanding reference for all maps in */
5517 /* the parents tree except the base map */
5518 vm_map_reference(src_map
);
5519 vm_map_unlock(ptr
->parent_map
);
5520 if (!vm_map_lookup_entry(
5521 src_map
, src_start
, &tmp_entry
))
5522 RETURN(KERN_INVALID_ADDRESS
);
5524 if(!tmp_entry
->is_sub_map
)
5525 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5526 src_entry
= tmp_entry
;
5528 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5529 (tmp_entry
->object
.vm_object
->phys_contiguous
)) {
5530 /* This is not, supported for now.In future */
5531 /* we will need to detect the phys_contig */
5532 /* condition and then upgrade copy_slowly */
5533 /* to do physical copy from the device mem */
5534 /* based object. We can piggy-back off of */
5535 /* the was wired boolean to set-up the */
5536 /* proper handling */
5537 RETURN(KERN_PROTECTION_FAILURE
);
5540 * Create a new address map entry to hold the result.
5541 * Fill in the fields from the appropriate source entries.
5542 * We must unlock the source map to do this if we need
5543 * to allocate a map entry.
5545 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5546 version
.main_timestamp
= src_map
->timestamp
;
5547 vm_map_unlock(src_map
);
5549 new_entry
= vm_map_copy_entry_create(copy
);
5551 vm_map_lock(src_map
);
5552 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5553 if (!vm_map_lookup_entry(src_map
, src_start
,
5555 RETURN(KERN_INVALID_ADDRESS
);
5557 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5558 continue; /* restart w/ new tmp_entry */
5563 * Verify that the region can be read.
5565 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5567 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5568 RETURN(KERN_PROTECTION_FAILURE
);
5571 * Clip against the endpoints of the entire region.
5574 vm_map_clip_end(src_map
, src_entry
, src_end
);
5576 src_size
= src_entry
->vme_end
- src_start
;
5577 src_object
= src_entry
->object
.vm_object
;
5578 src_offset
= src_entry
->offset
;
5579 was_wired
= (src_entry
->wired_count
!= 0);
5581 vm_map_entry_copy(new_entry
, src_entry
);
5582 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5585 * Attempt non-blocking copy-on-write optimizations.
5589 (src_object
== VM_OBJECT_NULL
||
5590 (src_object
->internal
&& !src_object
->true_share
5593 * If we are destroying the source, and the object
5594 * is internal, we can move the object reference
5595 * from the source to the copy. The copy is
5596 * copy-on-write only if the source is.
5597 * We make another reference to the object, because
5598 * destroying the source entry will deallocate it.
5600 vm_object_reference(src_object
);
5603 * Copy is always unwired. vm_map_copy_entry
5604 * set its wired count to zero.
5607 goto CopySuccessful
;
5612 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5613 src_object
, new_entry
, new_entry
->object
.vm_object
,
5615 if ((src_object
== VM_OBJECT_NULL
||
5616 (!was_wired
&& !map_share
&& !tmp_entry
->is_shared
)) &&
5617 vm_object_copy_quickly(
5618 &new_entry
->object
.vm_object
,
5622 &new_entry_needs_copy
)) {
5624 new_entry
->needs_copy
= new_entry_needs_copy
;
5627 * Handle copy-on-write obligations
5630 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5631 vm_object_pmap_protect(
5635 (src_entry
->is_shared
?
5638 src_entry
->vme_start
,
5639 src_entry
->protection
&
5641 tmp_entry
->needs_copy
= TRUE
;
5645 * The map has never been unlocked, so it's safe
5646 * to move to the next entry rather than doing
5650 goto CopySuccessful
;
5654 * Take an object reference, so that we may
5655 * release the map lock(s).
5658 assert(src_object
!= VM_OBJECT_NULL
);
5659 vm_object_reference(src_object
);
5662 * Record the timestamp for later verification.
5666 version
.main_timestamp
= src_map
->timestamp
;
5667 vm_map_unlock(src_map
); /* Increments timestamp once! */
5675 vm_object_lock(src_object
);
5676 result
= vm_object_copy_slowly(
5681 &new_entry
->object
.vm_object
);
5682 new_entry
->offset
= 0;
5683 new_entry
->needs_copy
= FALSE
;
5686 else if (src_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
&&
5687 (tmp_entry
->is_shared
|| map_share
)) {
5688 vm_object_t new_object
;
5690 vm_object_lock(src_object
);
5691 new_object
= vm_object_copy_delayed(
5695 if (new_object
== VM_OBJECT_NULL
)
5698 new_entry
->object
.vm_object
= new_object
;
5699 new_entry
->needs_copy
= TRUE
;
5700 result
= KERN_SUCCESS
;
5703 result
= vm_object_copy_strategically(src_object
,
5706 &new_entry
->object
.vm_object
,
5708 &new_entry_needs_copy
);
5710 new_entry
->needs_copy
= new_entry_needs_copy
;
5713 if (result
!= KERN_SUCCESS
&&
5714 result
!= KERN_MEMORY_RESTART_COPY
) {
5715 vm_map_lock(src_map
);
5720 * Throw away the extra reference
5723 vm_object_deallocate(src_object
);
5726 * Verify that the map has not substantially
5727 * changed while the copy was being made.
5730 vm_map_lock(src_map
);
5732 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5733 goto VerificationSuccessful
;
5736 * Simple version comparison failed.
5738 * Retry the lookup and verify that the
5739 * same object/offset are still present.
5741 * [Note: a memory manager that colludes with
5742 * the calling task can detect that we have
5743 * cheated. While the map was unlocked, the
5744 * mapping could have been changed and restored.]
5747 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5748 RETURN(KERN_INVALID_ADDRESS
);
5751 src_entry
= tmp_entry
;
5752 vm_map_clip_start(src_map
, src_entry
, src_start
);
5754 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5756 src_entry
->max_protection
& VM_PROT_READ
== 0)
5757 goto VerificationFailed
;
5759 if (src_entry
->vme_end
< new_entry
->vme_end
)
5760 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5762 if ((src_entry
->object
.vm_object
!= src_object
) ||
5763 (src_entry
->offset
!= src_offset
) ) {
5766 * Verification failed.
5768 * Start over with this top-level entry.
5771 VerificationFailed
: ;
5773 vm_object_deallocate(new_entry
->object
.vm_object
);
5774 tmp_entry
= src_entry
;
5779 * Verification succeeded.
5782 VerificationSuccessful
: ;
5784 if (result
== KERN_MEMORY_RESTART_COPY
)
5794 * Link in the new copy entry.
5797 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5801 * Determine whether the entire region
5804 src_start
= new_entry
->vme_end
;
5805 new_entry
= VM_MAP_ENTRY_NULL
;
5806 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5807 if (src_map
!= base_map
) {
5811 assert(ptr
!= NULL
);
5812 parent_maps
= parent_maps
->next
;
5813 vm_map_unlock(src_map
);
5814 vm_map_deallocate(src_map
);
5815 vm_map_lock(ptr
->parent_map
);
5816 src_map
= ptr
->parent_map
;
5817 src_start
= ptr
->base_start
;
5818 src_end
= ptr
->base_end
;
5819 if ((src_end
> src_start
) &&
5820 !vm_map_lookup_entry(
5821 src_map
, src_start
, &tmp_entry
))
5822 RETURN(KERN_INVALID_ADDRESS
);
5823 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5824 if(parent_maps
== NULL
)
5826 src_entry
= tmp_entry
->vme_prev
;
5830 if ((src_start
>= src_end
) && (src_end
!= 0))
5834 * Verify that there are no gaps in the region
5837 tmp_entry
= src_entry
->vme_next
;
5838 if ((tmp_entry
->vme_start
!= src_start
) ||
5839 (tmp_entry
== vm_map_to_entry(src_map
)))
5840 RETURN(KERN_INVALID_ADDRESS
);
5844 * If the source should be destroyed, do it now, since the
5845 * copy was successful.
5848 (void) vm_map_delete(src_map
,
5849 trunc_page_32(src_addr
),
5851 (src_map
== kernel_map
) ?
5852 VM_MAP_REMOVE_KUNWIRE
:
5856 vm_map_unlock(src_map
);
5858 /* Fix-up start and end points in copy. This is necessary */
5859 /* when the various entries in the copy object were picked */
5860 /* up from different sub-maps */
5862 tmp_entry
= vm_map_copy_first_entry(copy
);
5863 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5864 tmp_entry
->vme_end
= copy_addr
+
5865 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5866 tmp_entry
->vme_start
= copy_addr
;
5867 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5868 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5871 *copy_result
= copy
;
5872 return(KERN_SUCCESS
);
5878 * vm_map_copyin_object:
5880 * Create a copy object from an object.
5881 * Our caller donates an object reference.
5885 vm_map_copyin_object(
5887 vm_object_offset_t offset
, /* offset of region in object */
5888 vm_object_size_t size
, /* size of region in object */
5889 vm_map_copy_t
*copy_result
) /* OUT */
5891 vm_map_copy_t copy
; /* Resulting copy */
5894 * We drop the object into a special copy object
5895 * that contains the object directly.
5898 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5899 copy
->type
= VM_MAP_COPY_OBJECT
;
5900 copy
->cpy_object
= object
;
5901 copy
->cpy_index
= 0;
5902 copy
->offset
= offset
;
5905 *copy_result
= copy
;
5906 return(KERN_SUCCESS
);
5912 vm_map_entry_t old_entry
,
5916 vm_map_entry_t new_entry
;
5917 kern_return_t result
;
5920 * New sharing code. New map entry
5921 * references original object. Internal
5922 * objects use asynchronous copy algorithm for
5923 * future copies. First make sure we have
5924 * the right object. If we need a shadow,
5925 * or someone else already has one, then
5926 * make a new shadow and share it.
5929 object
= old_entry
->object
.vm_object
;
5930 if (old_entry
->is_sub_map
) {
5931 assert(old_entry
->wired_count
== 0);
5933 if(old_entry
->use_pmap
) {
5934 result
= pmap_nest(new_map
->pmap
,
5935 (old_entry
->object
.sub_map
)->pmap
,
5936 (addr64_t
)old_entry
->vme_start
,
5937 (addr64_t
)old_entry
->vme_start
,
5938 (uint64_t)(old_entry
->vme_end
- old_entry
->vme_start
));
5940 panic("vm_map_fork_share: pmap_nest failed!");
5943 } else if (object
== VM_OBJECT_NULL
) {
5944 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5945 old_entry
->vme_start
));
5946 old_entry
->offset
= 0;
5947 old_entry
->object
.vm_object
= object
;
5948 assert(!old_entry
->needs_copy
);
5949 } else if (object
->copy_strategy
!=
5950 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5953 * We are already using an asymmetric
5954 * copy, and therefore we already have
5958 assert(! old_entry
->needs_copy
);
5960 else if (old_entry
->needs_copy
|| /* case 1 */
5961 object
->shadowed
|| /* case 2 */
5962 (!object
->true_share
&& /* case 3 */
5963 !old_entry
->is_shared
&&
5965 (vm_size_t
)(old_entry
->vme_end
-
5966 old_entry
->vme_start
)))) {
5969 * We need to create a shadow.
5970 * There are three cases here.
5971 * In the first case, we need to
5972 * complete a deferred symmetrical
5973 * copy that we participated in.
5974 * In the second and third cases,
5975 * we need to create the shadow so
5976 * that changes that we make to the
5977 * object do not interfere with
5978 * any symmetrical copies which
5979 * have occured (case 2) or which
5980 * might occur (case 3).
5982 * The first case is when we had
5983 * deferred shadow object creation
5984 * via the entry->needs_copy mechanism.
5985 * This mechanism only works when
5986 * only one entry points to the source
5987 * object, and we are about to create
5988 * a second entry pointing to the
5989 * same object. The problem is that
5990 * there is no way of mapping from
5991 * an object to the entries pointing
5992 * to it. (Deferred shadow creation
5993 * works with one entry because occurs
5994 * at fault time, and we walk from the
5995 * entry to the object when handling
5998 * The second case is when the object
5999 * to be shared has already been copied
6000 * with a symmetric copy, but we point
6001 * directly to the object without
6002 * needs_copy set in our entry. (This
6003 * can happen because different ranges
6004 * of an object can be pointed to by
6005 * different entries. In particular,
6006 * a single entry pointing to an object
6007 * can be split by a call to vm_inherit,
6008 * which, combined with task_create, can
6009 * result in the different entries
6010 * having different needs_copy values.)
6011 * The shadowed flag in the object allows
6012 * us to detect this case. The problem
6013 * with this case is that if this object
6014 * has or will have shadows, then we
6015 * must not perform an asymmetric copy
6016 * of this object, since such a copy
6017 * allows the object to be changed, which
6018 * will break the previous symmetrical
6019 * copies (which rely upon the object
6020 * not changing). In a sense, the shadowed
6021 * flag says "don't change this object".
6022 * We fix this by creating a shadow
6023 * object for this object, and sharing
6024 * that. This works because we are free
6025 * to change the shadow object (and thus
6026 * to use an asymmetric copy strategy);
6027 * this is also semantically correct,
6028 * since this object is temporary, and
6029 * therefore a copy of the object is
6030 * as good as the object itself. (This
6031 * is not true for permanent objects,
6032 * since the pager needs to see changes,
6033 * which won't happen if the changes
6034 * are made to a copy.)
6036 * The third case is when the object
6037 * to be shared has parts sticking
6038 * outside of the entry we're working
6039 * with, and thus may in the future
6040 * be subject to a symmetrical copy.
6041 * (This is a preemptive version of
6045 assert(!(object
->shadowed
&& old_entry
->is_shared
));
6046 vm_object_shadow(&old_entry
->object
.vm_object
,
6048 (vm_size_t
) (old_entry
->vme_end
-
6049 old_entry
->vme_start
));
6052 * If we're making a shadow for other than
6053 * copy on write reasons, then we have
6054 * to remove write permission.
6057 if (!old_entry
->needs_copy
&&
6058 (old_entry
->protection
& VM_PROT_WRITE
)) {
6059 if(old_map
->mapped
) {
6060 vm_object_pmap_protect(
6061 old_entry
->object
.vm_object
,
6063 (old_entry
->vme_end
-
6064 old_entry
->vme_start
),
6066 old_entry
->vme_start
,
6067 old_entry
->protection
& ~VM_PROT_WRITE
);
6069 pmap_protect(old_map
->pmap
,
6070 old_entry
->vme_start
,
6072 old_entry
->protection
& ~VM_PROT_WRITE
);
6076 old_entry
->needs_copy
= FALSE
;
6077 object
= old_entry
->object
.vm_object
;
6081 * If object was using a symmetric copy strategy,
6082 * change its copy strategy to the default
6083 * asymmetric copy strategy, which is copy_delay
6084 * in the non-norma case and copy_call in the
6085 * norma case. Bump the reference count for the
6089 if(old_entry
->is_sub_map
) {
6090 vm_map_lock(old_entry
->object
.sub_map
);
6091 vm_map_reference(old_entry
->object
.sub_map
);
6092 vm_map_unlock(old_entry
->object
.sub_map
);
6094 vm_object_lock(object
);
6095 object
->ref_count
++;
6096 vm_object_res_reference(object
);
6097 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6098 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6100 vm_object_unlock(object
);
6104 * Clone the entry, using object ref from above.
6105 * Mark both entries as shared.
6108 new_entry
= vm_map_entry_create(new_map
);
6109 vm_map_entry_copy(new_entry
, old_entry
);
6110 old_entry
->is_shared
= TRUE
;
6111 new_entry
->is_shared
= TRUE
;
6114 * Insert the entry into the new map -- we
6115 * know we're inserting at the end of the new
6119 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6122 * Update the physical map
6125 if (old_entry
->is_sub_map
) {
6126 /* Bill Angell pmap support goes here */
6128 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6129 old_entry
->vme_end
- old_entry
->vme_start
,
6130 old_entry
->vme_start
);
6137 vm_map_entry_t
*old_entry_p
,
6140 vm_map_entry_t old_entry
= *old_entry_p
;
6141 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6142 vm_offset_t start
= old_entry
->vme_start
;
6144 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6146 vm_map_unlock(old_map
);
6148 * Use maxprot version of copyin because we
6149 * care about whether this memory can ever
6150 * be accessed, not just whether it's accessible
6153 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6156 * The map might have changed while it
6157 * was unlocked, check it again. Skip
6158 * any blank space or permanently
6159 * unreadable region.
6161 vm_map_lock(old_map
);
6162 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6163 (last
->max_protection
& VM_PROT_READ
) == VM_PROT_NONE
) {
6164 last
= last
->vme_next
;
6166 *old_entry_p
= last
;
6169 * XXX For some error returns, want to
6170 * XXX skip to the next element. Note
6171 * that INVALID_ADDRESS and
6172 * PROTECTION_FAILURE are handled above.
6179 * Insert the copy into the new map
6182 vm_map_copy_insert(new_map
, last
, copy
);
6185 * Pick up the traversal at the end of
6186 * the copied region.
6189 vm_map_lock(old_map
);
6190 start
+= entry_size
;
6191 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6192 last
= last
->vme_next
;
6194 vm_map_clip_start(old_map
, last
, start
);
6196 *old_entry_p
= last
;
6204 * Create and return a new map based on the old
6205 * map, according to the inheritance values on the
6206 * regions in that map.
6208 * The source map must not be locked.
6214 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6216 vm_map_entry_t old_entry
;
6217 vm_size_t new_size
= 0, entry_size
;
6218 vm_map_entry_t new_entry
;
6219 boolean_t src_needs_copy
;
6220 boolean_t new_entry_needs_copy
;
6222 vm_map_reference_swap(old_map
);
6223 vm_map_lock(old_map
);
6225 new_map
= vm_map_create(new_pmap
,
6226 old_map
->min_offset
,
6227 old_map
->max_offset
,
6228 old_map
->hdr
.entries_pageable
);
6231 old_entry
= vm_map_first_entry(old_map
);
6232 old_entry
!= vm_map_to_entry(old_map
);
6235 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6237 switch (old_entry
->inheritance
) {
6238 case VM_INHERIT_NONE
:
6241 case VM_INHERIT_SHARE
:
6242 vm_map_fork_share(old_map
, old_entry
, new_map
);
6243 new_size
+= entry_size
;
6246 case VM_INHERIT_COPY
:
6249 * Inline the copy_quickly case;
6250 * upon failure, fall back on call
6251 * to vm_map_fork_copy.
6254 if(old_entry
->is_sub_map
)
6256 if ((old_entry
->wired_count
!= 0) ||
6257 ((old_entry
->object
.vm_object
!= NULL
) &&
6258 (old_entry
->object
.vm_object
->true_share
))) {
6259 goto slow_vm_map_fork_copy
;
6262 new_entry
= vm_map_entry_create(new_map
);
6263 vm_map_entry_copy(new_entry
, old_entry
);
6264 /* clear address space specifics */
6265 new_entry
->use_pmap
= FALSE
;
6267 if (! vm_object_copy_quickly(
6268 &new_entry
->object
.vm_object
,
6270 (old_entry
->vme_end
-
6271 old_entry
->vme_start
),
6273 &new_entry_needs_copy
)) {
6274 vm_map_entry_dispose(new_map
, new_entry
);
6275 goto slow_vm_map_fork_copy
;
6279 * Handle copy-on-write obligations
6282 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6283 vm_object_pmap_protect(
6284 old_entry
->object
.vm_object
,
6286 (old_entry
->vme_end
-
6287 old_entry
->vme_start
),
6288 ((old_entry
->is_shared
6292 old_entry
->vme_start
,
6293 old_entry
->protection
& ~VM_PROT_WRITE
);
6295 old_entry
->needs_copy
= TRUE
;
6297 new_entry
->needs_copy
= new_entry_needs_copy
;
6300 * Insert the entry at the end
6304 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6306 new_size
+= entry_size
;
6309 slow_vm_map_fork_copy
:
6310 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6311 new_size
+= entry_size
;
6315 old_entry
= old_entry
->vme_next
;
6318 new_map
->size
= new_size
;
6319 vm_map_unlock(old_map
);
6320 vm_map_deallocate(old_map
);
6327 * vm_map_lookup_locked:
6329 * Finds the VM object, offset, and
6330 * protection for a given virtual address in the
6331 * specified map, assuming a page fault of the
6334 * Returns the (object, offset, protection) for
6335 * this address, whether it is wired down, and whether
6336 * this map has the only reference to the data in question.
6337 * In order to later verify this lookup, a "version"
6340 * The map MUST be locked by the caller and WILL be
6341 * locked on exit. In order to guarantee the
6342 * existence of the returned object, it is returned
6345 * If a lookup is requested with "write protection"
6346 * specified, the map may be changed to perform virtual
6347 * copying operations, although the data referenced will
6351 vm_map_lookup_locked(
6352 vm_map_t
*var_map
, /* IN/OUT */
6353 register vm_offset_t vaddr
,
6354 register vm_prot_t fault_type
,
6355 vm_map_version_t
*out_version
, /* OUT */
6356 vm_object_t
*object
, /* OUT */
6357 vm_object_offset_t
*offset
, /* OUT */
6358 vm_prot_t
*out_prot
, /* OUT */
6359 boolean_t
*wired
, /* OUT */
6360 int *behavior
, /* OUT */
6361 vm_object_offset_t
*lo_offset
, /* OUT */
6362 vm_object_offset_t
*hi_offset
, /* OUT */
6365 vm_map_entry_t entry
;
6366 register vm_map_t map
= *var_map
;
6367 vm_map_t old_map
= *var_map
;
6368 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6369 vm_offset_t cow_parent_vaddr
;
6370 vm_offset_t old_start
;
6371 vm_offset_t old_end
;
6372 register vm_prot_t prot
;
6378 * If the map has an interesting hint, try it before calling
6379 * full blown lookup routine.
6382 mutex_lock(&map
->s_lock
);
6384 mutex_unlock(&map
->s_lock
);
6386 if ((entry
== vm_map_to_entry(map
)) ||
6387 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6388 vm_map_entry_t tmp_entry
;
6391 * Entry was either not a valid hint, or the vaddr
6392 * was not contained in the entry, so do a full lookup.
6394 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6395 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6396 vm_map_unlock(cow_sub_map_parent
);
6397 if((*pmap_map
!= map
)
6398 && (*pmap_map
!= cow_sub_map_parent
))
6399 vm_map_unlock(*pmap_map
);
6400 return KERN_INVALID_ADDRESS
;
6405 if(map
== old_map
) {
6406 old_start
= entry
->vme_start
;
6407 old_end
= entry
->vme_end
;
6411 * Handle submaps. Drop lock on upper map, submap is
6416 if (entry
->is_sub_map
) {
6417 vm_offset_t local_vaddr
;
6418 vm_offset_t end_delta
;
6419 vm_offset_t start_delta
;
6420 vm_offset_t object_start_delta
;
6421 vm_map_entry_t submap_entry
;
6422 boolean_t mapped_needs_copy
=FALSE
;
6424 local_vaddr
= vaddr
;
6426 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6427 /* if pmap_map equals map we unlock below */
6428 if ((*pmap_map
!= map
) &&
6429 (*pmap_map
!= cow_sub_map_parent
))
6430 vm_map_unlock(*pmap_map
);
6431 *pmap_map
= entry
->object
.sub_map
;
6434 if(entry
->needs_copy
) {
6435 if (!mapped_needs_copy
) {
6436 if (vm_map_lock_read_to_write(map
)) {
6437 vm_map_lock_read(map
);
6438 if(*pmap_map
== entry
->object
.sub_map
)
6442 vm_map_lock_read(entry
->object
.sub_map
);
6443 cow_sub_map_parent
= map
;
6444 /* reset base to map before cow object */
6445 /* this is the map which will accept */
6446 /* the new cow object */
6447 old_start
= entry
->vme_start
;
6448 old_end
= entry
->vme_end
;
6449 cow_parent_vaddr
= vaddr
;
6450 mapped_needs_copy
= TRUE
;
6452 vm_map_lock_read(entry
->object
.sub_map
);
6453 if((cow_sub_map_parent
!= map
) &&
6458 vm_map_lock_read(entry
->object
.sub_map
);
6459 /* leave map locked if it is a target */
6460 /* cow sub_map above otherwise, just */
6461 /* follow the maps down to the object */
6462 /* here we unlock knowing we are not */
6463 /* revisiting the map. */
6464 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6465 vm_map_unlock_read(map
);
6468 *var_map
= map
= entry
->object
.sub_map
;
6470 /* calculate the offset in the submap for vaddr */
6471 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6474 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6475 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6476 vm_map_unlock(cow_sub_map_parent
);
6478 if((*pmap_map
!= map
)
6479 && (*pmap_map
!= cow_sub_map_parent
)) {
6480 vm_map_unlock(*pmap_map
);
6483 return KERN_INVALID_ADDRESS
;
6485 /* find the attenuated shadow of the underlying object */
6486 /* on our target map */
6488 /* in english the submap object may extend beyond the */
6489 /* region mapped by the entry or, may only fill a portion */
6490 /* of it. For our purposes, we only care if the object */
6491 /* doesn't fill. In this case the area which will */
6492 /* ultimately be clipped in the top map will only need */
6493 /* to be as big as the portion of the underlying entry */
6494 /* which is mapped */
6495 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6496 submap_entry
->vme_start
- entry
->offset
: 0;
6499 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6500 submap_entry
->vme_end
?
6501 0 : (entry
->offset
+
6502 (old_end
- old_start
))
6503 - submap_entry
->vme_end
;
6505 old_start
+= start_delta
;
6506 old_end
-= end_delta
;
6508 if(submap_entry
->is_sub_map
) {
6509 entry
= submap_entry
;
6510 vaddr
= local_vaddr
;
6511 goto submap_recurse
;
6514 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6516 vm_object_t copy_object
;
6517 vm_offset_t local_start
;
6518 vm_offset_t local_end
;
6519 boolean_t copied_slowly
= FALSE
;
6521 if (vm_map_lock_read_to_write(map
)) {
6522 vm_map_lock_read(map
);
6523 old_start
-= start_delta
;
6524 old_end
+= end_delta
;
6529 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6530 submap_entry
->object
.vm_object
=
6533 (submap_entry
->vme_end
6534 - submap_entry
->vme_start
));
6535 submap_entry
->offset
= 0;
6537 local_start
= local_vaddr
-
6538 (cow_parent_vaddr
- old_start
);
6539 local_end
= local_vaddr
+
6540 (old_end
- cow_parent_vaddr
);
6541 vm_map_clip_start(map
, submap_entry
, local_start
);
6542 vm_map_clip_end(map
, submap_entry
, local_end
);
6544 /* This is the COW case, lets connect */
6545 /* an entry in our space to the underlying */
6546 /* object in the submap, bypassing the */
6550 if(submap_entry
->wired_count
!= 0) {
6552 submap_entry
->object
.vm_object
);
6553 vm_object_copy_slowly(
6554 submap_entry
->object
.vm_object
,
6555 submap_entry
->offset
,
6556 submap_entry
->vme_end
-
6557 submap_entry
->vme_start
,
6560 copied_slowly
= TRUE
;
6563 /* set up shadow object */
6564 copy_object
= submap_entry
->object
.vm_object
;
6565 vm_object_reference(copy_object
);
6566 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6567 submap_entry
->needs_copy
= TRUE
;
6568 vm_object_pmap_protect(
6569 submap_entry
->object
.vm_object
,
6570 submap_entry
->offset
,
6571 submap_entry
->vme_end
-
6572 submap_entry
->vme_start
,
6573 (submap_entry
->is_shared
6575 PMAP_NULL
: map
->pmap
,
6576 submap_entry
->vme_start
,
6577 submap_entry
->protection
&
6582 /* This works diffently than the */
6583 /* normal submap case. We go back */
6584 /* to the parent of the cow map and*/
6585 /* clip out the target portion of */
6586 /* the sub_map, substituting the */
6587 /* new copy object, */
6590 local_start
= old_start
;
6591 local_end
= old_end
;
6592 map
= cow_sub_map_parent
;
6593 *var_map
= cow_sub_map_parent
;
6594 vaddr
= cow_parent_vaddr
;
6595 cow_sub_map_parent
= NULL
;
6597 if(!vm_map_lookup_entry(map
,
6599 vm_object_deallocate(
6601 vm_map_lock_write_to_read(map
);
6602 return KERN_INVALID_ADDRESS
;
6605 /* clip out the portion of space */
6606 /* mapped by the sub map which */
6607 /* corresponds to the underlying */
6609 vm_map_clip_start(map
, entry
, local_start
);
6610 vm_map_clip_end(map
, entry
, local_end
);
6613 /* substitute copy object for */
6614 /* shared map entry */
6615 vm_map_deallocate(entry
->object
.sub_map
);
6616 entry
->is_sub_map
= FALSE
;
6617 entry
->object
.vm_object
= copy_object
;
6619 entry
->protection
|= VM_PROT_WRITE
;
6620 entry
->max_protection
|= VM_PROT_WRITE
;
6623 entry
->needs_copy
= FALSE
;
6624 entry
->is_shared
= FALSE
;
6626 entry
->offset
= submap_entry
->offset
;
6627 entry
->needs_copy
= TRUE
;
6628 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6629 entry
->inheritance
= VM_INHERIT_COPY
;
6631 entry
->is_shared
= TRUE
;
6633 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6634 entry
->inheritance
= VM_INHERIT_COPY
;
6636 vm_map_lock_write_to_read(map
);
6638 if((cow_sub_map_parent
)
6639 && (cow_sub_map_parent
!= *pmap_map
)
6640 && (cow_sub_map_parent
!= map
)) {
6641 vm_map_unlock(cow_sub_map_parent
);
6643 entry
= submap_entry
;
6644 vaddr
= local_vaddr
;
6649 * Check whether this task is allowed to have
6653 prot
= entry
->protection
;
6654 if ((fault_type
& (prot
)) != fault_type
) {
6655 if (*pmap_map
!= map
) {
6656 vm_map_unlock(*pmap_map
);
6659 return KERN_PROTECTION_FAILURE
;
6663 * If this page is not pageable, we have to get
6664 * it for all possible accesses.
6667 if (*wired
= (entry
->wired_count
!= 0))
6668 prot
= fault_type
= entry
->protection
;
6671 * If the entry was copy-on-write, we either ...
6674 if (entry
->needs_copy
) {
6676 * If we want to write the page, we may as well
6677 * handle that now since we've got the map locked.
6679 * If we don't need to write the page, we just
6680 * demote the permissions allowed.
6683 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6685 * Make a new object, and place it in the
6686 * object chain. Note that no new references
6687 * have appeared -- one just moved from the
6688 * map to the new object.
6691 if (vm_map_lock_read_to_write(map
)) {
6692 vm_map_lock_read(map
);
6695 vm_object_shadow(&entry
->object
.vm_object
,
6697 (vm_size_t
) (entry
->vme_end
-
6700 entry
->object
.vm_object
->shadowed
= TRUE
;
6701 entry
->needs_copy
= FALSE
;
6702 vm_map_lock_write_to_read(map
);
6706 * We're attempting to read a copy-on-write
6707 * page -- don't allow writes.
6710 prot
&= (~VM_PROT_WRITE
);
6715 * Create an object if necessary.
6717 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6719 if (vm_map_lock_read_to_write(map
)) {
6720 vm_map_lock_read(map
);
6724 entry
->object
.vm_object
= vm_object_allocate(
6725 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6727 vm_map_lock_write_to_read(map
);
6731 * Return the object/offset from this entry. If the entry
6732 * was copy-on-write or empty, it has been fixed up. Also
6733 * return the protection.
6736 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6737 *object
= entry
->object
.vm_object
;
6739 *behavior
= entry
->behavior
;
6740 *lo_offset
= entry
->offset
;
6741 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6744 * Lock the object to prevent it from disappearing
6747 vm_object_lock(*object
);
6750 * Save the version number
6753 out_version
->main_timestamp
= map
->timestamp
;
6755 return KERN_SUCCESS
;
6762 * Verifies that the map in question has not changed
6763 * since the given version. If successful, the map
6764 * will not change until vm_map_verify_done() is called.
6768 register vm_map_t map
,
6769 register vm_map_version_t
*version
) /* REF */
6773 vm_map_lock_read(map
);
6774 result
= (map
->timestamp
== version
->main_timestamp
);
6777 vm_map_unlock_read(map
);
6783 * vm_map_verify_done:
6785 * Releases locks acquired by a vm_map_verify.
6787 * This is now a macro in vm/vm_map.h. It does a
6788 * vm_map_unlock_read on the map.
6795 * User call to obtain information about a region in
6796 * a task's address map. Currently, only one flavor is
6799 * XXX The reserved and behavior fields cannot be filled
6800 * in until the vm merge from the IK is completed, and
6801 * vm_reserve is implemented.
6803 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6809 vm_offset_t
*address
, /* IN/OUT */
6810 vm_size_t
*size
, /* OUT */
6811 vm_region_flavor_t flavor
, /* IN */
6812 vm_region_info_t info
, /* OUT */
6813 mach_msg_type_number_t
*count
, /* IN/OUT */
6814 ipc_port_t
*object_name
) /* OUT */
6816 vm_map_entry_t tmp_entry
;
6818 vm_map_entry_t entry
;
6821 vm_region_basic_info_t basic
;
6822 vm_region_extended_info_t extended
;
6823 vm_region_top_info_t top
;
6825 if (map
== VM_MAP_NULL
)
6826 return(KERN_INVALID_ARGUMENT
);
6830 case VM_REGION_BASIC_INFO
:
6832 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6833 return(KERN_INVALID_ARGUMENT
);
6835 basic
= (vm_region_basic_info_t
) info
;
6836 *count
= VM_REGION_BASIC_INFO_COUNT
;
6838 vm_map_lock_read(map
);
6841 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6842 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6843 vm_map_unlock_read(map
);
6844 return(KERN_INVALID_ADDRESS
);
6850 start
= entry
->vme_start
;
6852 basic
->offset
= entry
->offset
;
6853 basic
->protection
= entry
->protection
;
6854 basic
->inheritance
= entry
->inheritance
;
6855 basic
->max_protection
= entry
->max_protection
;
6856 basic
->behavior
= entry
->behavior
;
6857 basic
->user_wired_count
= entry
->user_wired_count
;
6858 basic
->reserved
= entry
->is_sub_map
;
6860 *size
= (entry
->vme_end
- start
);
6862 if (object_name
) *object_name
= IP_NULL
;
6863 if (entry
->is_sub_map
) {
6864 basic
->shared
= FALSE
;
6866 basic
->shared
= entry
->is_shared
;
6869 vm_map_unlock_read(map
);
6870 return(KERN_SUCCESS
);
6872 case VM_REGION_EXTENDED_INFO
:
6875 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6876 return(KERN_INVALID_ARGUMENT
);
6878 extended
= (vm_region_extended_info_t
) info
;
6879 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6881 vm_map_lock_read(map
);
6884 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6885 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6886 vm_map_unlock_read(map
);
6887 return(KERN_INVALID_ADDRESS
);
6892 start
= entry
->vme_start
;
6894 extended
->protection
= entry
->protection
;
6895 extended
->user_tag
= entry
->alias
;
6896 extended
->pages_resident
= 0;
6897 extended
->pages_swapped_out
= 0;
6898 extended
->pages_shared_now_private
= 0;
6899 extended
->pages_dirtied
= 0;
6900 extended
->external_pager
= 0;
6901 extended
->shadow_depth
= 0;
6903 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6905 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6906 extended
->share_mode
= SM_PRIVATE
;
6909 *object_name
= IP_NULL
;
6911 *size
= (entry
->vme_end
- start
);
6913 vm_map_unlock_read(map
);
6914 return(KERN_SUCCESS
);
6916 case VM_REGION_TOP_INFO
:
6919 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6920 return(KERN_INVALID_ARGUMENT
);
6922 top
= (vm_region_top_info_t
) info
;
6923 *count
= VM_REGION_TOP_INFO_COUNT
;
6925 vm_map_lock_read(map
);
6928 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6929 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6930 vm_map_unlock_read(map
);
6931 return(KERN_INVALID_ADDRESS
);
6937 start
= entry
->vme_start
;
6939 top
->private_pages_resident
= 0;
6940 top
->shared_pages_resident
= 0;
6942 vm_region_top_walk(entry
, top
);
6945 *object_name
= IP_NULL
;
6947 *size
= (entry
->vme_end
- start
);
6949 vm_map_unlock_read(map
);
6950 return(KERN_SUCCESS
);
6953 return(KERN_INVALID_ARGUMENT
);
6958 * vm_region_recurse: A form of vm_region which follows the
6959 * submaps in a target map
6966 vm_offset_t
*address
, /* IN/OUT */
6967 vm_size_t
*size
, /* OUT */
6968 natural_t
*nesting_depth
, /* IN/OUT */
6969 vm_region_recurse_info_t info
, /* IN/OUT */
6970 mach_msg_type_number_t
*count
) /* IN/OUT */
6972 vm_map_entry_t tmp_entry
;
6974 vm_map_entry_t entry
;
6978 unsigned int recurse_count
;
6981 vm_map_entry_t base_entry
;
6982 vm_offset_t base_next
;
6983 vm_offset_t base_addr
;
6984 vm_offset_t baddr_start_delta
;
6985 vm_region_submap_info_t submap_info
;
6986 vm_region_extended_info_data_t extended
;
6988 if (map
== VM_MAP_NULL
)
6989 return(KERN_INVALID_ARGUMENT
);
6991 submap_info
= (vm_region_submap_info_t
) info
;
6992 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6994 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6995 return(KERN_INVALID_ARGUMENT
);
6999 recurse_count
= *nesting_depth
;
7001 LOOKUP_NEXT_BASE_ENTRY
:
7002 vm_map_lock_read(map
);
7003 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7004 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7005 vm_map_unlock_read(map
);
7006 return(KERN_INVALID_ADDRESS
);
7011 *size
= entry
->vme_end
- entry
->vme_start
;
7012 start
= entry
->vme_start
;
7014 baddr_start_delta
= *address
- start
;
7015 base_next
= entry
->vme_end
;
7018 while(entry
->is_sub_map
&& recurse_count
) {
7020 vm_map_lock_read(entry
->object
.sub_map
);
7023 if(entry
== base_entry
) {
7024 start
= entry
->offset
;
7025 start
+= *address
- entry
->vme_start
;
7028 submap
= entry
->object
.sub_map
;
7029 vm_map_unlock_read(map
);
7032 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7033 if ((entry
= tmp_entry
->vme_next
)
7034 == vm_map_to_entry(map
)) {
7035 vm_map_unlock_read(map
);
7040 goto LOOKUP_NEXT_BASE_ENTRY
;
7046 if(start
<= entry
->vme_start
) {
7047 vm_offset_t old_start
= start
;
7048 if(baddr_start_delta
) {
7049 base_addr
+= (baddr_start_delta
);
7050 *size
-= baddr_start_delta
;
7051 baddr_start_delta
= 0;
7054 (base_addr
+= (entry
->vme_start
- start
))) {
7055 vm_map_unlock_read(map
);
7060 goto LOOKUP_NEXT_BASE_ENTRY
;
7062 *size
-= entry
->vme_start
- start
;
7063 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7064 *size
= entry
->vme_end
- entry
->vme_start
;
7068 if(baddr_start_delta
) {
7069 if((start
- entry
->vme_start
)
7070 < baddr_start_delta
) {
7071 base_addr
+= start
- entry
->vme_start
;
7072 *size
-= start
- entry
->vme_start
;
7074 base_addr
+= baddr_start_delta
;
7075 *size
+= baddr_start_delta
;
7077 baddr_start_delta
= 0;
7079 base_addr
+= entry
->vme_start
;
7080 if(base_addr
>= base_next
) {
7081 vm_map_unlock_read(map
);
7086 goto LOOKUP_NEXT_BASE_ENTRY
;
7088 if (*size
> (entry
->vme_end
- start
))
7089 *size
= entry
->vme_end
- start
;
7091 start
= entry
->vme_start
- start
;
7094 start
+= entry
->offset
;
7097 *nesting_depth
-= recurse_count
;
7098 if(entry
!= base_entry
) {
7099 start
= entry
->vme_start
+ (start
- entry
->offset
);
7103 submap_info
->user_tag
= entry
->alias
;
7104 submap_info
->offset
= entry
->offset
;
7105 submap_info
->protection
= entry
->protection
;
7106 submap_info
->inheritance
= entry
->inheritance
;
7107 submap_info
->max_protection
= entry
->max_protection
;
7108 submap_info
->behavior
= entry
->behavior
;
7109 submap_info
->user_wired_count
= entry
->user_wired_count
;
7110 submap_info
->is_submap
= entry
->is_sub_map
;
7111 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7112 *address
= base_addr
;
7115 extended
.pages_resident
= 0;
7116 extended
.pages_swapped_out
= 0;
7117 extended
.pages_shared_now_private
= 0;
7118 extended
.pages_dirtied
= 0;
7119 extended
.external_pager
= 0;
7120 extended
.shadow_depth
= 0;
7122 if(!entry
->is_sub_map
) {
7123 vm_region_walk(entry
, &extended
, entry
->offset
,
7124 entry
->vme_end
- start
, map
, start
);
7125 submap_info
->share_mode
= extended
.share_mode
;
7126 if (extended
.external_pager
&& extended
.ref_count
== 2
7127 && extended
.share_mode
== SM_SHARED
)
7128 submap_info
->share_mode
= SM_PRIVATE
;
7129 submap_info
->ref_count
= extended
.ref_count
;
7132 submap_info
->share_mode
= SM_TRUESHARED
;
7134 submap_info
->share_mode
= SM_PRIVATE
;
7135 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7138 submap_info
->pages_resident
= extended
.pages_resident
;
7139 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7140 submap_info
->pages_shared_now_private
=
7141 extended
.pages_shared_now_private
;
7142 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7143 submap_info
->external_pager
= extended
.external_pager
;
7144 submap_info
->shadow_depth
= extended
.shadow_depth
;
7146 vm_map_unlock_read(map
);
7147 return(KERN_SUCCESS
);
7151 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7152 * Goes away after regular vm_region_recurse function migrates to
7154 * vm_region_recurse: A form of vm_region which follows the
7155 * submaps in a target map
7160 vm_region_recurse_64(
7162 vm_offset_t
*address
, /* IN/OUT */
7163 vm_size_t
*size
, /* OUT */
7164 natural_t
*nesting_depth
, /* IN/OUT */
7165 vm_region_recurse_info_t info
, /* IN/OUT */
7166 mach_msg_type_number_t
*count
) /* IN/OUT */
7168 vm_map_entry_t tmp_entry
;
7170 vm_map_entry_t entry
;
7174 unsigned int recurse_count
;
7177 vm_map_entry_t base_entry
;
7178 vm_offset_t base_next
;
7179 vm_offset_t base_addr
;
7180 vm_offset_t baddr_start_delta
;
7181 vm_region_submap_info_64_t submap_info
;
7182 vm_region_extended_info_data_t extended
;
7184 if (map
== VM_MAP_NULL
)
7185 return(KERN_INVALID_ARGUMENT
);
7187 submap_info
= (vm_region_submap_info_64_t
) info
;
7188 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7190 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7191 return(KERN_INVALID_ARGUMENT
);
7195 recurse_count
= *nesting_depth
;
7197 LOOKUP_NEXT_BASE_ENTRY
:
7199 vm_map_lock_read(map
);
7200 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7201 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7203 vm_map_unlock_read(map
);
7204 return(KERN_INVALID_ADDRESS
);
7209 *size
= entry
->vme_end
- entry
->vme_start
;
7210 start
= entry
->vme_start
;
7212 baddr_start_delta
= *address
- start
;
7213 base_next
= entry
->vme_end
;
7216 while(entry
->is_sub_map
&& recurse_count
) {
7219 vm_map_lock_read(entry
->object
.sub_map
);
7222 if(entry
== base_entry
) {
7223 start
= entry
->offset
;
7224 start
+= *address
- entry
->vme_start
;
7227 submap
= entry
->object
.sub_map
;
7229 vm_map_unlock_read(map
);
7232 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7233 if ((entry
= tmp_entry
->vme_next
)
7234 == vm_map_to_entry(map
)) {
7236 vm_map_unlock_read(map
);
7241 goto LOOKUP_NEXT_BASE_ENTRY
;
7247 if(start
<= entry
->vme_start
) {
7248 vm_offset_t old_start
= start
;
7249 if(baddr_start_delta
) {
7250 base_addr
+= (baddr_start_delta
);
7251 *size
-= baddr_start_delta
;
7252 baddr_start_delta
= 0;
7255 (base_addr
+= (entry
->vme_start
- start
))) {
7257 vm_map_unlock_read(map
);
7262 goto LOOKUP_NEXT_BASE_ENTRY
;
7264 *size
-= entry
->vme_start
- start
;
7265 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7266 *size
= entry
->vme_end
- entry
->vme_start
;
7270 if(baddr_start_delta
) {
7271 if((start
- entry
->vme_start
)
7272 < baddr_start_delta
) {
7273 base_addr
+= start
- entry
->vme_start
;
7274 *size
-= start
- entry
->vme_start
;
7276 base_addr
+= baddr_start_delta
;
7277 *size
+= baddr_start_delta
;
7279 baddr_start_delta
= 0;
7281 base_addr
+= entry
->vme_start
;
7282 if(base_addr
>= base_next
) {
7284 vm_map_unlock_read(map
);
7289 goto LOOKUP_NEXT_BASE_ENTRY
;
7291 if (*size
> (entry
->vme_end
- start
))
7292 *size
= entry
->vme_end
- start
;
7294 start
= entry
->vme_start
- start
;
7297 start
+= entry
->offset
;
7300 *nesting_depth
-= recurse_count
;
7301 if(entry
!= base_entry
) {
7302 start
= entry
->vme_start
+ (start
- entry
->offset
);
7306 submap_info
->user_tag
= entry
->alias
;
7307 submap_info
->offset
= entry
->offset
;
7308 submap_info
->protection
= entry
->protection
;
7309 submap_info
->inheritance
= entry
->inheritance
;
7310 submap_info
->max_protection
= entry
->max_protection
;
7311 submap_info
->behavior
= entry
->behavior
;
7312 submap_info
->user_wired_count
= entry
->user_wired_count
;
7313 submap_info
->is_submap
= entry
->is_sub_map
;
7314 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7315 *address
= base_addr
;
7318 extended
.pages_resident
= 0;
7319 extended
.pages_swapped_out
= 0;
7320 extended
.pages_shared_now_private
= 0;
7321 extended
.pages_dirtied
= 0;
7322 extended
.external_pager
= 0;
7323 extended
.shadow_depth
= 0;
7326 if(!entry
->is_sub_map
) {
7327 vm_region_walk(entry
, &extended
, entry
->offset
,
7328 entry
->vme_end
- start
, map
, start
);
7329 submap_info
->share_mode
= extended
.share_mode
;
7330 if (extended
.external_pager
&& extended
.ref_count
== 2
7331 && extended
.share_mode
== SM_SHARED
)
7332 submap_info
->share_mode
= SM_PRIVATE
;
7333 submap_info
->ref_count
= extended
.ref_count
;
7336 submap_info
->share_mode
= SM_TRUESHARED
;
7338 submap_info
->share_mode
= SM_PRIVATE
;
7339 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7342 submap_info
->pages_resident
= extended
.pages_resident
;
7343 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7344 submap_info
->pages_shared_now_private
=
7345 extended
.pages_shared_now_private
;
7346 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7347 submap_info
->external_pager
= extended
.external_pager
;
7348 submap_info
->shadow_depth
= extended
.shadow_depth
;
7350 vm_map_unlock_read(map
);
7351 return(KERN_SUCCESS
);
7356 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7357 * Goes away after regular vm_region function migrates to
7365 vm_offset_t
*address
, /* IN/OUT */
7366 vm_size_t
*size
, /* OUT */
7367 vm_region_flavor_t flavor
, /* IN */
7368 vm_region_info_t info
, /* OUT */
7369 mach_msg_type_number_t
*count
, /* IN/OUT */
7370 ipc_port_t
*object_name
) /* OUT */
7372 vm_map_entry_t tmp_entry
;
7374 vm_map_entry_t entry
;
7377 vm_region_basic_info_64_t basic
;
7378 vm_region_extended_info_t extended
;
7379 vm_region_top_info_t top
;
7380 vm_region_object_info_64_t object_info_64
;
7382 if (map
== VM_MAP_NULL
)
7383 return(KERN_INVALID_ARGUMENT
);
7387 case VM_REGION_BASIC_INFO
:
7389 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7390 return(KERN_INVALID_ARGUMENT
);
7392 basic
= (vm_region_basic_info_64_t
) info
;
7393 *count
= VM_REGION_BASIC_INFO_COUNT
;
7395 vm_map_lock_read(map
);
7398 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7399 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7400 vm_map_unlock_read(map
);
7401 return(KERN_INVALID_ADDRESS
);
7407 start
= entry
->vme_start
;
7409 basic
->offset
= entry
->offset
;
7410 basic
->protection
= entry
->protection
;
7411 basic
->inheritance
= entry
->inheritance
;
7412 basic
->max_protection
= entry
->max_protection
;
7413 basic
->behavior
= entry
->behavior
;
7414 basic
->user_wired_count
= entry
->user_wired_count
;
7415 basic
->reserved
= entry
->is_sub_map
;
7417 *size
= (entry
->vme_end
- start
);
7419 if (object_name
) *object_name
= IP_NULL
;
7420 if (entry
->is_sub_map
) {
7421 basic
->shared
= FALSE
;
7423 basic
->shared
= entry
->is_shared
;
7426 vm_map_unlock_read(map
);
7427 return(KERN_SUCCESS
);
7429 case VM_REGION_EXTENDED_INFO
:
7432 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7433 return(KERN_INVALID_ARGUMENT
);
7435 extended
= (vm_region_extended_info_t
) info
;
7436 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7438 vm_map_lock_read(map
);
7441 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7442 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7443 vm_map_unlock_read(map
);
7444 return(KERN_INVALID_ADDRESS
);
7449 start
= entry
->vme_start
;
7451 extended
->protection
= entry
->protection
;
7452 extended
->user_tag
= entry
->alias
;
7453 extended
->pages_resident
= 0;
7454 extended
->pages_swapped_out
= 0;
7455 extended
->pages_shared_now_private
= 0;
7456 extended
->pages_dirtied
= 0;
7457 extended
->external_pager
= 0;
7458 extended
->shadow_depth
= 0;
7460 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7462 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7463 extended
->share_mode
= SM_PRIVATE
;
7466 *object_name
= IP_NULL
;
7468 *size
= (entry
->vme_end
- start
);
7470 vm_map_unlock_read(map
);
7471 return(KERN_SUCCESS
);
7473 case VM_REGION_TOP_INFO
:
7476 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7477 return(KERN_INVALID_ARGUMENT
);
7479 top
= (vm_region_top_info_t
) info
;
7480 *count
= VM_REGION_TOP_INFO_COUNT
;
7482 vm_map_lock_read(map
);
7485 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7486 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7487 vm_map_unlock_read(map
);
7488 return(KERN_INVALID_ADDRESS
);
7494 start
= entry
->vme_start
;
7496 top
->private_pages_resident
= 0;
7497 top
->shared_pages_resident
= 0;
7499 vm_region_top_walk(entry
, top
);
7502 *object_name
= IP_NULL
;
7504 *size
= (entry
->vme_end
- start
);
7506 vm_map_unlock_read(map
);
7507 return(KERN_SUCCESS
);
7509 case VM_REGION_OBJECT_INFO_64
:
7511 if (*count
< VM_REGION_OBJECT_INFO_COUNT_64
)
7512 return(KERN_INVALID_ARGUMENT
);
7514 object_info_64
= (vm_region_object_info_64_t
) info
;
7515 *count
= VM_REGION_OBJECT_INFO_COUNT_64
;
7517 vm_map_lock_read(map
);
7520 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7521 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7522 vm_map_unlock_read(map
);
7523 return(KERN_INVALID_ADDRESS
);
7529 start
= entry
->vme_start
;
7531 object_info_64
->offset
= entry
->offset
;
7532 object_info_64
->protection
= entry
->protection
;
7533 object_info_64
->inheritance
= entry
->inheritance
;
7534 object_info_64
->max_protection
= entry
->max_protection
;
7535 object_info_64
->behavior
= entry
->behavior
;
7536 object_info_64
->user_wired_count
= entry
->user_wired_count
;
7537 object_info_64
->is_sub_map
= entry
->is_sub_map
;
7539 *size
= (entry
->vme_end
- start
);
7541 if (object_name
) *object_name
= IP_NULL
;
7542 if (entry
->is_sub_map
) {
7543 object_info_64
->shared
= FALSE
;
7544 object_info_64
->object_id
= 0;
7546 object_info_64
->shared
= entry
->is_shared
;
7547 object_info_64
->object_id
=
7548 (vm_offset_t
) entry
->object
.vm_object
;
7551 vm_map_unlock_read(map
);
7552 return(KERN_SUCCESS
);
7555 return(KERN_INVALID_ARGUMENT
);
7561 vm_map_entry_t entry
,
7562 vm_region_top_info_t top
)
7564 register struct vm_object
*obj
, *tmp_obj
;
7565 register int ref_count
;
7567 if (entry
->object
.vm_object
== 0 || entry
->is_sub_map
) {
7568 top
->share_mode
= SM_EMPTY
;
7574 obj
= entry
->object
.vm_object
;
7576 vm_object_lock(obj
);
7578 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7583 top
->private_pages_resident
= obj
->resident_page_count
;
7585 top
->shared_pages_resident
= obj
->resident_page_count
;
7586 top
->ref_count
= ref_count
;
7587 top
->share_mode
= SM_COW
;
7589 while (tmp_obj
= obj
->shadow
) {
7590 vm_object_lock(tmp_obj
);
7591 vm_object_unlock(obj
);
7594 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7597 top
->shared_pages_resident
+= obj
->resident_page_count
;
7598 top
->ref_count
+= ref_count
- 1;
7601 if (entry
->needs_copy
) {
7602 top
->share_mode
= SM_COW
;
7603 top
->shared_pages_resident
= obj
->resident_page_count
;
7605 if (ref_count
== 1 ||
7606 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7607 top
->share_mode
= SM_PRIVATE
;
7608 top
->private_pages_resident
= obj
->resident_page_count
;
7610 top
->share_mode
= SM_SHARED
;
7611 top
->shared_pages_resident
= obj
->resident_page_count
;
7614 top
->ref_count
= ref_count
;
7616 top
->obj_id
= (int)obj
;
7618 vm_object_unlock(obj
);
7624 vm_map_entry_t entry
,
7625 vm_region_extended_info_t extended
,
7626 vm_object_offset_t offset
,
7631 register struct vm_object
*obj
, *tmp_obj
;
7632 register vm_offset_t last_offset
;
7634 register int ref_count
;
7635 void vm_region_look_for_page();
7637 if ((entry
->object
.vm_object
== 0) ||
7638 (entry
->is_sub_map
) ||
7639 (entry
->object
.vm_object
->phys_contiguous
)) {
7640 extended
->share_mode
= SM_EMPTY
;
7641 extended
->ref_count
= 0;
7645 obj
= entry
->object
.vm_object
;
7647 vm_object_lock(obj
);
7649 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7652 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7653 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7655 if (extended
->shadow_depth
|| entry
->needs_copy
)
7656 extended
->share_mode
= SM_COW
;
7659 extended
->share_mode
= SM_PRIVATE
;
7661 if (obj
->true_share
)
7662 extended
->share_mode
= SM_TRUESHARED
;
7664 extended
->share_mode
= SM_SHARED
;
7667 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7669 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7670 if ((tmp_obj
= obj
->shadow
) == 0)
7672 vm_object_lock(tmp_obj
);
7673 vm_object_unlock(obj
);
7675 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7678 extended
->ref_count
+= ref_count
;
7681 vm_object_unlock(obj
);
7683 if (extended
->share_mode
== SM_SHARED
) {
7684 register vm_map_entry_t cur
;
7685 register vm_map_entry_t last
;
7688 obj
= entry
->object
.vm_object
;
7689 last
= vm_map_to_entry(map
);
7692 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7694 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7695 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7697 if (my_refs
== ref_count
)
7698 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7699 else if (my_refs
> 1)
7700 extended
->share_mode
= SM_SHARED_ALIASED
;
7706 /* object is locked on entry and locked on return */
7710 vm_region_look_for_page(
7712 vm_region_extended_info_t extended
,
7713 vm_object_offset_t offset
,
7719 register vm_page_t p
;
7720 register vm_object_t shadow
;
7721 register int ref_count
;
7722 vm_object_t caller_object
;
7724 shadow
= object
->shadow
;
7725 caller_object
= object
;
7730 if ( !(object
->pager_trusted
) && !(object
->internal
))
7731 extended
->external_pager
= 1;
7733 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7734 if (shadow
&& (max_refcnt
== 1))
7735 extended
->pages_shared_now_private
++;
7737 if (!p
->fictitious
&&
7738 (p
->dirty
|| pmap_is_modified(p
->phys_page
)))
7739 extended
->pages_dirtied
++;
7740 extended
->pages_resident
++;
7742 if(object
!= caller_object
)
7743 vm_object_unlock(object
);
7747 if (object
->existence_map
) {
7748 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7750 extended
->pages_swapped_out
++;
7752 if(object
!= caller_object
)
7753 vm_object_unlock(object
);
7759 vm_object_lock(shadow
);
7761 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7764 if (++depth
> extended
->shadow_depth
)
7765 extended
->shadow_depth
= depth
;
7767 if (ref_count
> max_refcnt
)
7768 max_refcnt
= ref_count
;
7770 if(object
!= caller_object
)
7771 vm_object_unlock(object
);
7774 shadow
= object
->shadow
;
7775 offset
= offset
+ object
->shadow_offset
;
7778 if(object
!= caller_object
)
7779 vm_object_unlock(object
);
7785 vm_region_count_obj_refs(
7786 vm_map_entry_t entry
,
7789 register int ref_count
;
7790 register vm_object_t chk_obj
;
7791 register vm_object_t tmp_obj
;
7793 if (entry
->object
.vm_object
== 0)
7796 if (entry
->is_sub_map
)
7801 chk_obj
= entry
->object
.vm_object
;
7802 vm_object_lock(chk_obj
);
7805 if (chk_obj
== object
)
7807 if (tmp_obj
= chk_obj
->shadow
)
7808 vm_object_lock(tmp_obj
);
7809 vm_object_unlock(chk_obj
);
7819 * Routine: vm_map_simplify
7822 * Attempt to simplify the map representation in
7823 * the vicinity of the given starting address.
7825 * This routine is intended primarily to keep the
7826 * kernel maps more compact -- they generally don't
7827 * benefit from the "expand a map entry" technology
7828 * at allocation time because the adjacent entry
7829 * is often wired down.
7832 vm_map_simplify_entry(
7834 vm_map_entry_t this_entry
)
7836 vm_map_entry_t prev_entry
;
7838 prev_entry
= this_entry
->vme_prev
;
7840 if ((this_entry
!= vm_map_to_entry(map
)) &&
7841 (prev_entry
!= vm_map_to_entry(map
)) &&
7843 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7845 (prev_entry
->is_sub_map
== FALSE
) &&
7846 (this_entry
->is_sub_map
== FALSE
) &&
7848 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
) &&
7849 ((prev_entry
->offset
+ (prev_entry
->vme_end
-
7850 prev_entry
->vme_start
))
7851 == this_entry
->offset
) &&
7853 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7854 (prev_entry
->protection
== this_entry
->protection
) &&
7855 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7856 (prev_entry
->behavior
== this_entry
->behavior
) &&
7857 (prev_entry
->alias
== this_entry
->alias
) &&
7858 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7859 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
) &&
7860 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7862 (prev_entry
->use_pmap
== FALSE
) &&
7863 (this_entry
->use_pmap
== FALSE
) &&
7864 (prev_entry
->in_transition
== FALSE
) &&
7865 (this_entry
->in_transition
== FALSE
) &&
7866 (prev_entry
->needs_wakeup
== FALSE
) &&
7867 (this_entry
->needs_wakeup
== FALSE
) &&
7868 (prev_entry
->is_shared
== FALSE
) &&
7869 (this_entry
->is_shared
== FALSE
)
7871 _vm_map_entry_unlink(&map
->hdr
, prev_entry
);
7872 this_entry
->vme_start
= prev_entry
->vme_start
;
7873 this_entry
->offset
= prev_entry
->offset
;
7874 vm_object_deallocate(prev_entry
->object
.vm_object
);
7875 vm_map_entry_dispose(map
, prev_entry
);
7876 SAVE_HINT(map
, this_entry
);
7877 counter(c_vm_map_entry_simplified
++);
7879 counter(c_vm_map_simplify_entry_called
++);
7887 vm_map_entry_t this_entry
;
7890 if (vm_map_lookup_entry(map
, start
, &this_entry
)) {
7891 vm_map_simplify_entry(map
, this_entry
);
7892 vm_map_simplify_entry(map
, this_entry
->vme_next
);
7894 counter(c_vm_map_simplify_called
++);
7899 * Routine: vm_map_machine_attribute
7901 * Provide machine-specific attributes to mappings,
7902 * such as cachability etc. for machines that provide
7903 * them. NUMA architectures and machines with big/strange
7904 * caches will use this.
7906 * Responsibilities for locking and checking are handled here,
7907 * everything else in the pmap module. If any non-volatile
7908 * information must be kept, the pmap module should handle
7909 * it itself. [This assumes that attributes do not
7910 * need to be inherited, which seems ok to me]
7913 vm_map_machine_attribute(
7915 vm_offset_t address
,
7917 vm_machine_attribute_t attribute
,
7918 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7921 vm_size_t sync_size
;
7923 vm_map_entry_t entry
;
7925 if (address
< vm_map_min(map
) ||
7926 (address
+ size
) > vm_map_max(map
))
7927 return KERN_INVALID_ADDRESS
;
7931 if (attribute
!= MATTR_CACHE
) {
7932 /* If we don't have to find physical addresses, we */
7933 /* don't have to do an explicit traversal here. */
7934 ret
= pmap_attribute(map
->pmap
,
7935 address
, size
, attribute
, value
);
7940 /* Get the starting address */
7941 start
= trunc_page_32(address
);
7942 /* Figure how much memory we need to flush (in page increments) */
7943 sync_size
= round_page_32(start
+ size
) - start
;
7946 ret
= KERN_SUCCESS
; /* Assume it all worked */
7949 if (vm_map_lookup_entry(map
, start
, &entry
)) {
7951 if((entry
->vme_end
- start
) > sync_size
) {
7952 sub_size
= sync_size
;
7955 sub_size
= entry
->vme_end
- start
;
7956 sync_size
-= sub_size
;
7958 if(entry
->is_sub_map
) {
7959 vm_map_machine_attribute(
7960 entry
->object
.sub_map
,
7961 (start
- entry
->vme_start
)
7966 if(entry
->object
.vm_object
) {
7969 vm_object_t base_object
;
7970 vm_object_offset_t offset
;
7971 vm_object_offset_t base_offset
;
7974 offset
= (start
- entry
->vme_start
)
7976 base_offset
= offset
;
7977 object
= entry
->object
.vm_object
;
7978 base_object
= object
;
7982 if(m
&& !m
->fictitious
) {
7985 pmap_attribute_cache_sync(
7989 } else if (object
->shadow
) {
7991 object
->shadow_offset
;
7992 object
= object
->shadow
;
7996 /* Bump to the next page */
7997 base_offset
+= PAGE_SIZE
;
7998 offset
= base_offset
;
7999 object
= base_object
;
8007 return KERN_FAILURE
;
8018 * vm_map_behavior_set:
8020 * Sets the paging reference behavior of the specified address
8021 * range in the target map. Paging reference behavior affects
8022 * how pagein operations resulting from faults on the map will be
8026 vm_map_behavior_set(
8030 vm_behavior_t new_behavior
)
8032 register vm_map_entry_t entry
;
8033 vm_map_entry_t temp_entry
;
8036 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
8037 (integer_t
)map
, start
, end
, new_behavior
, 0);
8039 switch (new_behavior
) {
8040 case VM_BEHAVIOR_DEFAULT
:
8041 case VM_BEHAVIOR_RANDOM
:
8042 case VM_BEHAVIOR_SEQUENTIAL
:
8043 case VM_BEHAVIOR_RSEQNTL
:
8045 case VM_BEHAVIOR_WILLNEED
:
8046 case VM_BEHAVIOR_DONTNEED
:
8047 new_behavior
= VM_BEHAVIOR_DEFAULT
;
8050 return(KERN_INVALID_ARGUMENT
);
8056 * The entire address range must be valid for the map.
8057 * Note that vm_map_range_check() does a
8058 * vm_map_lookup_entry() internally and returns the
8059 * entry containing the start of the address range if
8060 * the entire range is valid.
8062 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
8064 vm_map_clip_start(map
, entry
, start
);
8068 return(KERN_INVALID_ADDRESS
);
8071 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
8072 vm_map_clip_end(map
, entry
, end
);
8074 entry
->behavior
= new_behavior
;
8076 entry
= entry
->vme_next
;
8080 return(KERN_SUCCESS
);
8084 #include <mach_kdb.h>
8086 #include <ddb/db_output.h>
8087 #include <vm/vm_print.h>
8089 #define printf db_printf
8092 * Forward declarations for internal functions.
8094 extern void vm_map_links_print(
8095 struct vm_map_links
*links
);
8097 extern void vm_map_header_print(
8098 struct vm_map_header
*header
);
8100 extern void vm_map_entry_print(
8101 vm_map_entry_t entry
);
8103 extern void vm_follow_entry(
8104 vm_map_entry_t entry
);
8106 extern void vm_follow_map(
8110 * vm_map_links_print: [ debug ]
8114 struct vm_map_links
*links
)
8116 iprintf("prev = %08X next = %08X start = %08X end = %08X\n",
8124 * vm_map_header_print: [ debug ]
8127 vm_map_header_print(
8128 struct vm_map_header
*header
)
8130 vm_map_links_print(&header
->links
);
8131 iprintf("nentries = %08X, %sentries_pageable\n",
8133 (header
->entries_pageable
? "" : "!"));
8137 * vm_follow_entry: [ debug ]
8141 vm_map_entry_t entry
)
8143 extern int db_indent
;
8146 iprintf("map entry %08X\n", entry
);
8150 shadows
= vm_follow_object(entry
->object
.vm_object
);
8151 iprintf("Total objects : %d\n",shadows
);
8157 * vm_map_entry_print: [ debug ]
8161 register vm_map_entry_t entry
)
8163 extern int db_indent
;
8164 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
8165 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
8167 iprintf("map entry %08X n", entry
);
8171 vm_map_links_print(&entry
->links
);
8173 iprintf("start = %08X end = %08X, prot=%x/%x/%s\n",
8177 entry
->max_protection
,
8178 inheritance_name
[(entry
->inheritance
& 0x3)]);
8180 iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
8181 behavior_name
[(entry
->behavior
& 0x3)],
8183 entry
->user_wired_count
);
8184 iprintf("%sin_transition, %sneeds_wakeup\n",
8185 (entry
->in_transition
? "" : "!"),
8186 (entry
->needs_wakeup
? "" : "!"));
8188 if (entry
->is_sub_map
) {
8189 iprintf("submap = %08X - offset=%08X\n",
8190 entry
->object
.sub_map
,
8193 iprintf("object=%08X, offset=%08X, ",
8194 entry
->object
.vm_object
,
8196 printf("%sis_shared, %sneeds_copy\n",
8197 (entry
->is_shared
? "" : "!"),
8198 (entry
->needs_copy
? "" : "!"));
8205 * vm_follow_map: [ debug ]
8211 register vm_map_entry_t entry
;
8212 extern int db_indent
;
8214 iprintf("task map %08X\n", map
);
8218 for (entry
= vm_map_first_entry(map
);
8219 entry
&& entry
!= vm_map_to_entry(map
);
8220 entry
= entry
->vme_next
) {
8221 vm_follow_entry(entry
);
8228 * vm_map_print: [ debug ]
8234 register vm_map_entry_t entry
;
8236 extern int db_indent
;
8239 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8241 iprintf("task map %08X\n", map
);
8245 vm_map_header_print(&map
->hdr
);
8247 iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
8254 iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
8255 (map
->wait_for_space
? "" : "!"),
8256 (map
->wiring_required
? "" : "!"),
8260 switch (map
->sw_state
) {
8271 iprintf("res = %d, sw_state = %s\n", map
->res_count
, swstate
);
8272 #endif /* TASK_SWAPPER */
8274 for (entry
= vm_map_first_entry(map
);
8275 entry
&& entry
!= vm_map_to_entry(map
);
8276 entry
= entry
->vme_next
) {
8277 vm_map_entry_print(entry
);
8284 * Routine: vm_map_copy_print
8286 * Pretty-print a copy object for ddb.
8293 extern int db_indent
;
8296 vm_map_entry_t entry
;
8298 copy
= (vm_map_copy_t
)incopy
; /* Make sure we have the right type */
8300 printf("copy object 0x%x\n", copy
);
8304 iprintf("type=%d", copy
->type
);
8305 switch (copy
->type
) {
8306 case VM_MAP_COPY_ENTRY_LIST
:
8307 printf("[entry_list]");
8310 case VM_MAP_COPY_OBJECT
:
8314 case VM_MAP_COPY_KERNEL_BUFFER
:
8315 printf("[kernel_buffer]");
8319 printf("[bad type]");
8322 printf(", offset=0x%x", copy
->offset
);
8323 printf(", size=0x%x\n", copy
->size
);
8325 switch (copy
->type
) {
8326 case VM_MAP_COPY_ENTRY_LIST
:
8327 vm_map_header_print(©
->cpy_hdr
);
8328 for (entry
= vm_map_copy_first_entry(copy
);
8329 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8330 entry
= entry
->vme_next
) {
8331 vm_map_entry_print(entry
);
8335 case VM_MAP_COPY_OBJECT
:
8336 iprintf("object=0x%x\n", copy
->cpy_object
);
8339 case VM_MAP_COPY_KERNEL_BUFFER
:
8340 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8341 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8350 * db_vm_map_total_size(map) [ debug ]
8352 * return the total virtual size (in bytes) of the map
8355 db_vm_map_total_size(
8358 vm_map_entry_t entry
;
8362 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8365 for (entry
= vm_map_first_entry(map
);
8366 entry
!= vm_map_to_entry(map
);
8367 entry
= entry
->vme_next
) {
8368 total
+= entry
->vme_end
- entry
->vme_start
;
8374 #endif /* MACH_KDB */
8377 * Routine: vm_map_entry_insert
8379 * Descritpion: This routine inserts a new vm_entry in a locked map.
8382 vm_map_entry_insert(
8384 vm_map_entry_t insp_entry
,
8388 vm_object_offset_t offset
,
8389 boolean_t needs_copy
,
8390 boolean_t is_shared
,
8391 boolean_t in_transition
,
8392 vm_prot_t cur_protection
,
8393 vm_prot_t max_protection
,
8394 vm_behavior_t behavior
,
8395 vm_inherit_t inheritance
,
8396 unsigned wired_count
)
8398 vm_map_entry_t new_entry
;
8400 assert(insp_entry
!= (vm_map_entry_t
)0);
8402 new_entry
= vm_map_entry_create(map
);
8404 new_entry
->vme_start
= start
;
8405 new_entry
->vme_end
= end
;
8406 assert(page_aligned(new_entry
->vme_start
));
8407 assert(page_aligned(new_entry
->vme_end
));
8409 new_entry
->object
.vm_object
= object
;
8410 new_entry
->offset
= offset
;
8411 new_entry
->is_shared
= is_shared
;
8412 new_entry
->is_sub_map
= FALSE
;
8413 new_entry
->needs_copy
= needs_copy
;
8414 new_entry
->in_transition
= in_transition
;
8415 new_entry
->needs_wakeup
= FALSE
;
8416 new_entry
->inheritance
= inheritance
;
8417 new_entry
->protection
= cur_protection
;
8418 new_entry
->max_protection
= max_protection
;
8419 new_entry
->behavior
= behavior
;
8420 new_entry
->wired_count
= wired_count
;
8421 new_entry
->user_wired_count
= 0;
8422 new_entry
->use_pmap
= FALSE
;
8425 * Insert the new entry into the list.
8428 vm_map_entry_link(map
, insp_entry
, new_entry
);
8429 map
->size
+= end
- start
;
8432 * Update the free space hint and the lookup hint.
8435 SAVE_HINT(map
, new_entry
);
8440 * Routine: vm_remap_extract
8442 * Descritpion: This routine returns a vm_entry list from a map.
8450 struct vm_map_header
*map_header
,
8451 vm_prot_t
*cur_protection
,
8452 vm_prot_t
*max_protection
,
8453 /* What, no behavior? */
8454 vm_inherit_t inheritance
,
8457 kern_return_t result
;
8458 vm_size_t mapped_size
;
8460 vm_map_entry_t src_entry
; /* result of last map lookup */
8461 vm_map_entry_t new_entry
;
8462 vm_object_offset_t offset
;
8463 vm_offset_t map_address
;
8464 vm_offset_t src_start
; /* start of entry to map */
8465 vm_offset_t src_end
; /* end of region to be mapped */
8467 vm_map_version_t version
;
8468 boolean_t src_needs_copy
;
8469 boolean_t new_entry_needs_copy
;
8471 assert(map
!= VM_MAP_NULL
);
8472 assert(size
!= 0 && size
== round_page_32(size
));
8473 assert(inheritance
== VM_INHERIT_NONE
||
8474 inheritance
== VM_INHERIT_COPY
||
8475 inheritance
== VM_INHERIT_SHARE
);
8478 * Compute start and end of region.
8480 src_start
= trunc_page_32(addr
);
8481 src_end
= round_page_32(src_start
+ size
);
8484 * Initialize map_header.
8486 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8487 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8488 map_header
->nentries
= 0;
8489 map_header
->entries_pageable
= pageable
;
8491 *cur_protection
= VM_PROT_ALL
;
8492 *max_protection
= VM_PROT_ALL
;
8496 result
= KERN_SUCCESS
;
8499 * The specified source virtual space might correspond to
8500 * multiple map entries, need to loop on them.
8503 while (mapped_size
!= size
) {
8504 vm_size_t entry_size
;
8507 * Find the beginning of the region.
8509 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8510 result
= KERN_INVALID_ADDRESS
;
8514 if (src_start
< src_entry
->vme_start
||
8515 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8516 result
= KERN_INVALID_ADDRESS
;
8520 if(src_entry
->is_sub_map
) {
8521 result
= KERN_INVALID_ADDRESS
;
8525 tmp_size
= size
- mapped_size
;
8526 if (src_end
> src_entry
->vme_end
)
8527 tmp_size
-= (src_end
- src_entry
->vme_end
);
8529 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8530 src_entry
->vme_start
);
8532 if(src_entry
->is_sub_map
) {
8533 vm_map_reference(src_entry
->object
.sub_map
);
8535 object
= src_entry
->object
.vm_object
;
8537 if (object
== VM_OBJECT_NULL
) {
8538 object
= vm_object_allocate(entry_size
);
8539 src_entry
->offset
= 0;
8540 src_entry
->object
.vm_object
= object
;
8541 } else if (object
->copy_strategy
!=
8542 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8544 * We are already using an asymmetric
8545 * copy, and therefore we already have
8548 assert(!src_entry
->needs_copy
);
8549 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8550 (object
->internal
&& !object
->true_share
&&
8551 !src_entry
->is_shared
&&
8552 object
->size
> entry_size
)) {
8554 vm_object_shadow(&src_entry
->object
.vm_object
,
8558 if (!src_entry
->needs_copy
&&
8559 (src_entry
->protection
& VM_PROT_WRITE
)) {
8561 vm_object_pmap_protect(
8562 src_entry
->object
.vm_object
,
8566 src_entry
->vme_start
,
8567 src_entry
->protection
&
8570 pmap_protect(vm_map_pmap(map
),
8571 src_entry
->vme_start
,
8573 src_entry
->protection
&
8578 object
= src_entry
->object
.vm_object
;
8579 src_entry
->needs_copy
= FALSE
;
8583 vm_object_lock(object
);
8584 object
->ref_count
++; /* object ref. for new entry */
8585 VM_OBJ_RES_INCR(object
);
8586 if (object
->copy_strategy
==
8587 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8588 object
->copy_strategy
=
8589 MEMORY_OBJECT_COPY_DELAY
;
8591 vm_object_unlock(object
);
8594 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8596 new_entry
= _vm_map_entry_create(map_header
);
8597 vm_map_entry_copy(new_entry
, src_entry
);
8598 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8600 new_entry
->vme_start
= map_address
;
8601 new_entry
->vme_end
= map_address
+ tmp_size
;
8602 new_entry
->inheritance
= inheritance
;
8603 new_entry
->offset
= offset
;
8606 * The new region has to be copied now if required.
8610 src_entry
->is_shared
= TRUE
;
8611 new_entry
->is_shared
= TRUE
;
8612 if (!(new_entry
->is_sub_map
))
8613 new_entry
->needs_copy
= FALSE
;
8615 } else if (src_entry
->is_sub_map
) {
8616 /* make this a COW sub_map if not already */
8617 new_entry
->needs_copy
= TRUE
;
8618 } else if (src_entry
->wired_count
== 0 &&
8619 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8621 (new_entry
->vme_end
-
8622 new_entry
->vme_start
),
8624 &new_entry_needs_copy
)) {
8626 new_entry
->needs_copy
= new_entry_needs_copy
;
8627 new_entry
->is_shared
= FALSE
;
8630 * Handle copy_on_write semantics.
8632 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8633 vm_object_pmap_protect(object
,
8636 ((src_entry
->is_shared
8638 PMAP_NULL
: map
->pmap
),
8639 src_entry
->vme_start
,
8640 src_entry
->protection
&
8643 src_entry
->needs_copy
= TRUE
;
8646 * Throw away the old object reference of the new entry.
8648 vm_object_deallocate(object
);
8651 new_entry
->is_shared
= FALSE
;
8654 * The map can be safely unlocked since we
8655 * already hold a reference on the object.
8657 * Record the timestamp of the map for later
8658 * verification, and unlock the map.
8660 version
.main_timestamp
= map
->timestamp
;
8661 vm_map_unlock(map
); /* Increments timestamp once! */
8666 if (src_entry
->wired_count
> 0) {
8667 vm_object_lock(object
);
8668 result
= vm_object_copy_slowly(
8673 &new_entry
->object
.vm_object
);
8675 new_entry
->offset
= 0;
8676 new_entry
->needs_copy
= FALSE
;
8678 result
= vm_object_copy_strategically(
8682 &new_entry
->object
.vm_object
,
8684 &new_entry_needs_copy
);
8686 new_entry
->needs_copy
= new_entry_needs_copy
;
8690 * Throw away the old object reference of the new entry.
8692 vm_object_deallocate(object
);
8694 if (result
!= KERN_SUCCESS
&&
8695 result
!= KERN_MEMORY_RESTART_COPY
) {
8696 _vm_map_entry_dispose(map_header
, new_entry
);
8701 * Verify that the map has not substantially
8702 * changed while the copy was being made.
8706 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8708 * Simple version comparison failed.
8710 * Retry the lookup and verify that the
8711 * same object/offset are still present.
8713 vm_object_deallocate(new_entry
->
8715 _vm_map_entry_dispose(map_header
, new_entry
);
8716 if (result
== KERN_MEMORY_RESTART_COPY
)
8717 result
= KERN_SUCCESS
;
8721 if (result
== KERN_MEMORY_RESTART_COPY
) {
8722 vm_object_reference(object
);
8727 _vm_map_entry_link(map_header
,
8728 map_header
->links
.prev
, new_entry
);
8730 *cur_protection
&= src_entry
->protection
;
8731 *max_protection
&= src_entry
->max_protection
;
8733 map_address
+= tmp_size
;
8734 mapped_size
+= tmp_size
;
8735 src_start
+= tmp_size
;
8740 if (result
!= KERN_SUCCESS
) {
8742 * Free all allocated elements.
8744 for (src_entry
= map_header
->links
.next
;
8745 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8746 src_entry
= new_entry
) {
8747 new_entry
= src_entry
->vme_next
;
8748 _vm_map_entry_unlink(map_header
, src_entry
);
8749 vm_object_deallocate(src_entry
->object
.vm_object
);
8750 _vm_map_entry_dispose(map_header
, src_entry
);
8759 * Map portion of a task's address space.
8760 * Mapped region must not overlap more than
8761 * one vm memory object. Protections and
8762 * inheritance attributes remain the same
8763 * as in the original task and are out parameters.
8764 * Source and Target task can be identical
8765 * Other attributes are identical as for vm_map()
8769 vm_map_t target_map
,
8770 vm_offset_t
*address
,
8775 vm_offset_t memory_address
,
8777 vm_prot_t
*cur_protection
,
8778 vm_prot_t
*max_protection
,
8779 vm_inherit_t inheritance
)
8781 kern_return_t result
;
8782 vm_map_entry_t entry
;
8783 vm_map_entry_t insp_entry
;
8784 vm_map_entry_t new_entry
;
8785 struct vm_map_header map_header
;
8787 if (target_map
== VM_MAP_NULL
)
8788 return KERN_INVALID_ARGUMENT
;
8790 switch (inheritance
) {
8791 case VM_INHERIT_NONE
:
8792 case VM_INHERIT_COPY
:
8793 case VM_INHERIT_SHARE
:
8794 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8798 return KERN_INVALID_ARGUMENT
;
8801 size
= round_page_32(size
);
8803 result
= vm_remap_extract(src_map
, memory_address
,
8804 size
, copy
, &map_header
,
8811 if (result
!= KERN_SUCCESS
) {
8816 * Allocate/check a range of free virtual address
8817 * space for the target
8819 *address
= trunc_page_32(*address
);
8820 vm_map_lock(target_map
);
8821 result
= vm_remap_range_allocate(target_map
, address
, size
,
8822 mask
, anywhere
, &insp_entry
);
8824 for (entry
= map_header
.links
.next
;
8825 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8826 entry
= new_entry
) {
8827 new_entry
= entry
->vme_next
;
8828 _vm_map_entry_unlink(&map_header
, entry
);
8829 if (result
== KERN_SUCCESS
) {
8830 entry
->vme_start
+= *address
;
8831 entry
->vme_end
+= *address
;
8832 vm_map_entry_link(target_map
, insp_entry
, entry
);
8835 if (!entry
->is_sub_map
) {
8836 vm_object_deallocate(entry
->object
.vm_object
);
8838 vm_map_deallocate(entry
->object
.sub_map
);
8840 _vm_map_entry_dispose(&map_header
, entry
);
8844 if (result
== KERN_SUCCESS
) {
8845 target_map
->size
+= size
;
8846 SAVE_HINT(target_map
, insp_entry
);
8848 vm_map_unlock(target_map
);
8850 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8851 result
= vm_map_wire(target_map
, *address
,
8852 *address
+ size
, *cur_protection
, TRUE
);
8857 * Routine: vm_remap_range_allocate
8860 * Allocate a range in the specified virtual address map.
8861 * returns the address and the map entry just before the allocated
8864 * Map must be locked.
8868 vm_remap_range_allocate(
8870 vm_offset_t
*address
, /* IN/OUT */
8874 vm_map_entry_t
*map_entry
) /* OUT */
8876 register vm_map_entry_t entry
;
8877 register vm_offset_t start
;
8878 register vm_offset_t end
;
8879 kern_return_t result
= KERN_SUCCESS
;
8888 * Calculate the first possible address.
8891 if (start
< map
->min_offset
)
8892 start
= map
->min_offset
;
8893 if (start
> map
->max_offset
)
8894 return(KERN_NO_SPACE
);
8897 * Look for the first possible address;
8898 * if there's already something at this
8899 * address, we have to start after it.
8902 assert(first_free_is_valid(map
));
8903 if (start
== map
->min_offset
) {
8904 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8905 start
= entry
->vme_end
;
8907 vm_map_entry_t tmp_entry
;
8908 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8909 start
= tmp_entry
->vme_end
;
8914 * In any case, the "entry" always precedes
8915 * the proposed new region throughout the
8920 register vm_map_entry_t next
;
8923 * Find the end of the proposed new region.
8924 * Be sure we didn't go beyond the end, or
8925 * wrap around the address.
8928 end
= ((start
+ mask
) & ~mask
);
8930 return(KERN_NO_SPACE
);
8934 if ((end
> map
->max_offset
) || (end
< start
)) {
8935 if (map
->wait_for_space
) {
8936 if (size
<= (map
->max_offset
-
8938 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8940 thread_block((void (*)(void))0);
8946 return(KERN_NO_SPACE
);
8950 * If there are no more entries, we must win.
8953 next
= entry
->vme_next
;
8954 if (next
== vm_map_to_entry(map
))
8958 * If there is another entry, it must be
8959 * after the end of the potential new region.
8962 if (next
->vme_start
>= end
)
8966 * Didn't fit -- move to the next entry.
8970 start
= entry
->vme_end
;
8974 vm_map_entry_t temp_entry
;
8978 * the address doesn't itself violate
8979 * the mask requirement.
8982 if ((start
& mask
) != 0)
8983 return(KERN_NO_SPACE
);
8987 * ... the address is within bounds
8992 if ((start
< map
->min_offset
) ||
8993 (end
> map
->max_offset
) ||
8995 return(KERN_INVALID_ADDRESS
);
8999 * ... the starting address isn't allocated
9002 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
9003 return(KERN_NO_SPACE
);
9008 * ... the next region doesn't overlap the
9012 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
9013 (entry
->vme_next
->vme_start
< end
))
9014 return(KERN_NO_SPACE
);
9017 return(KERN_SUCCESS
);
9023 * Set the address map for the current thr_act to the specified map
9031 thread_act_t thr_act
= current_act();
9032 vm_map_t oldmap
= thr_act
->map
;
9034 mp_disable_preemption();
9035 mycpu
= cpu_number();
9038 * Deactivate the current map and activate the requested map
9040 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
9042 mp_enable_preemption();
9048 * Routine: vm_map_write_user
9051 * Copy out data from a kernel space into space in the
9052 * destination map. The space must already exist in the
9054 * NOTE: This routine should only be called by threads
9055 * which can block on a page fault. i.e. kernel mode user
9062 vm_offset_t src_addr
,
9063 vm_offset_t dst_addr
,
9066 thread_act_t thr_act
= current_act();
9067 kern_return_t kr
= KERN_SUCCESS
;
9069 if(thr_act
->map
== map
) {
9070 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9071 kr
= KERN_INVALID_ADDRESS
;
9076 /* take on the identity of the target map while doing */
9079 vm_map_reference(map
);
9080 oldmap
= vm_map_switch(map
);
9081 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9082 kr
= KERN_INVALID_ADDRESS
;
9084 vm_map_switch(oldmap
);
9085 vm_map_deallocate(map
);
9091 * Routine: vm_map_read_user
9094 * Copy in data from a user space source map into the
9095 * kernel map. The space must already exist in the
9097 * NOTE: This routine should only be called by threads
9098 * which can block on a page fault. i.e. kernel mode user
9105 vm_offset_t src_addr
,
9106 vm_offset_t dst_addr
,
9109 thread_act_t thr_act
= current_act();
9110 kern_return_t kr
= KERN_SUCCESS
;
9112 if(thr_act
->map
== map
) {
9113 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9114 kr
= KERN_INVALID_ADDRESS
;
9119 /* take on the identity of the target map while doing */
9122 vm_map_reference(map
);
9123 oldmap
= vm_map_switch(map
);
9124 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9125 kr
= KERN_INVALID_ADDRESS
;
9127 vm_map_switch(oldmap
);
9128 vm_map_deallocate(map
);
9133 /* Takes existing source and destination sub-maps and clones the contents of */
9134 /* the source map */
9138 ipc_port_t src_region
,
9139 ipc_port_t dst_region
)
9141 vm_named_entry_t src_object
;
9142 vm_named_entry_t dst_object
;
9146 vm_offset_t max_off
;
9147 vm_map_entry_t entry
;
9148 vm_map_entry_t new_entry
;
9149 vm_map_entry_t insert_point
;
9151 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
9152 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
9153 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
9154 return KERN_INVALID_ARGUMENT
;
9156 src_map
= (vm_map_t
)src_object
->backing
.map
;
9157 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
9158 /* destination map is assumed to be unavailable to any other */
9159 /* activity. i.e. it is new */
9160 vm_map_lock(src_map
);
9161 if((src_map
->min_offset
!= dst_map
->min_offset
)
9162 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
9163 vm_map_unlock(src_map
);
9164 return KERN_INVALID_ARGUMENT
;
9166 addr
= src_map
->min_offset
;
9167 vm_map_lookup_entry(dst_map
, addr
, &entry
);
9168 if(entry
== vm_map_to_entry(dst_map
)) {
9169 entry
= entry
->vme_next
;
9171 if(entry
== vm_map_to_entry(dst_map
)) {
9172 max_off
= src_map
->max_offset
;
9174 max_off
= entry
->vme_start
;
9176 vm_map_lookup_entry(src_map
, addr
, &entry
);
9177 if(entry
== vm_map_to_entry(src_map
)) {
9178 entry
= entry
->vme_next
;
9180 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
9181 while((entry
!= vm_map_to_entry(src_map
)) &&
9182 (entry
->vme_end
<= max_off
)) {
9183 addr
= entry
->vme_start
;
9184 new_entry
= vm_map_entry_create(dst_map
);
9185 vm_map_entry_copy(new_entry
, entry
);
9186 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
9187 insert_point
= new_entry
;
9188 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
9189 if (new_entry
->is_sub_map
) {
9190 vm_map_reference(new_entry
->object
.sub_map
);
9192 vm_object_reference(
9193 new_entry
->object
.vm_object
);
9196 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
9197 entry
= entry
->vme_next
;
9199 vm_map_unlock(src_map
);
9200 return KERN_SUCCESS
;
9204 * Export routines to other components for the things we access locally through
9211 return (current_map_fast());
9215 * vm_map_check_protection:
9217 * Assert that the target map allows the specified
9218 * privilege on the entire address region given.
9219 * The entire region must be allocated.
9221 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9222 register vm_map_t map
;
9223 register vm_offset_t start
;
9224 register vm_offset_t end
;
9225 register vm_prot_t protection
;
9227 register vm_map_entry_t entry
;
9228 vm_map_entry_t tmp_entry
;
9232 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9238 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9245 while (start
< end
) {
9246 if (entry
== vm_map_to_entry(map
)) {
9255 if (start
< entry
->vme_start
) {
9261 * Check protection associated with entry.
9264 if ((entry
->protection
& protection
) != protection
) {
9269 /* go to next entry */
9271 start
= entry
->vme_end
;
9272 entry
= entry
->vme_next
;
9279 * This routine is obsolete, but included for backward
9280 * compatibility for older drivers.
9283 kernel_vm_map_reference(
9286 vm_map_reference(map
);
9292 * Most code internal to the osfmk will go through a
9293 * macro defining this. This is always here for the
9294 * use of other kernel components.
9296 #undef vm_map_reference
9299 register vm_map_t map
)
9301 if (map
== VM_MAP_NULL
)
9304 mutex_lock(&map
->s_lock
);
9306 assert(map
->res_count
> 0);
9307 assert(map
->ref_count
>= map
->res_count
);
9311 mutex_unlock(&map
->s_lock
);
9315 * vm_map_deallocate:
9317 * Removes a reference from the specified map,
9318 * destroying it if no references remain.
9319 * The map should not be locked.
9323 register vm_map_t map
)
9327 if (map
== VM_MAP_NULL
)
9330 mutex_lock(&map
->s_lock
);
9331 ref
= --map
->ref_count
;
9333 vm_map_res_deallocate(map
);
9334 mutex_unlock(&map
->s_lock
);
9337 assert(map
->ref_count
== 0);
9338 mutex_unlock(&map
->s_lock
);
9342 * The map residence count isn't decremented here because
9343 * the vm_map_delete below will traverse the entire map,
9344 * deleting entries, and the residence counts on objects
9345 * and sharing maps will go away then.
9349 vm_map_destroy(map
);